VirtualBox

source: vbox/trunk/src/VBox/Main/testcase/tstCollector.cpp@ 43629

最後變更 在這個檔案從43629是 43629,由 vboxsync 提交於 12 年 前

Main/Metrics: Linux fs/disk metrics, VBoxManage filtering + minor fixes (#6345)

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 16.8 KB
 
1/* $Id: tstCollector.cpp 43629 2012-10-12 09:26:07Z vboxsync $ */
2
3/** @file
4 *
5 * Collector classes test cases.
6 */
7
8/*
9 * Copyright (C) 2008 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.alldomusa.eu.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20#ifdef RT_OS_DARWIN
21# include "../src-server/darwin/PerformanceDarwin.cpp"
22#endif
23#ifdef RT_OS_FREEBSD
24# include "../src-server/freebsd/PerformanceFreeBSD.cpp"
25#endif
26#ifdef RT_OS_LINUX
27# include "../src-server/linux/PerformanceLinux.cpp"
28#endif
29#ifdef RT_OS_OS2
30# include "../src-server/os2/PerformanceOS2.cpp"
31#endif
32#ifdef RT_OS_SOLARIS
33# include "../src-server/solaris/PerformanceSolaris.cpp"
34#endif
35#ifdef RT_OS_WINDOWS
36# define _WIN32_DCOM
37# include <objidl.h>
38# include <objbase.h>
39# include "../src-server/win/PerformanceWin.cpp"
40#endif
41
42#include <iprt/initterm.h>
43#include <iprt/stream.h>
44#include <iprt/env.h>
45#include <iprt/err.h>
46#include <iprt/process.h>
47#include <iprt/thread.h>
48#include <iprt/time.h>
49
50#define RUN_TIME_MS 1000
51
52#define N_CALLS(n, fn) \
53 for (int call = 0; call < n; ++call) \
54 rc = collector->fn; \
55 if (RT_FAILURE(rc)) \
56 RTPrintf("tstCollector: "#fn" -> %Rrc\n", rc)
57
58#define CALLS_PER_SECOND(fn) \
59 nCalls = 0; \
60 start = RTTimeMilliTS(); \
61 do { \
62 rc = collector->fn; \
63 if (RT_FAILURE(rc)) \
64 break; \
65 ++nCalls; \
66 } while(RTTimeMilliTS() - start < RUN_TIME_MS); \
67 if (RT_FAILURE(rc)) \
68 { \
69 RTPrintf("tstCollector: "#fn" -> %Rrc\n", rc); \
70 } \
71 else \
72 RTPrintf("%70s -- %u calls per second\n", #fn, nCalls)
73
74void measurePerformance(pm::CollectorHAL *collector, const char *pszName, int cVMs)
75{
76
77 static const char * const args[] = { pszName, "-child", NULL };
78 pm::CollectorHints hints;
79 std::vector<RTPROCESS> processes;
80
81 hints.collectHostCpuLoad();
82 hints.collectHostRamUsage();
83 /* Start fake VMs */
84 for (int i = 0; i < cVMs; ++i)
85 {
86 RTPROCESS pid;
87 int rc = RTProcCreate(pszName, args, RTENV_DEFAULT, 0, &pid);
88 if (RT_FAILURE(rc))
89 {
90 hints.getProcesses(processes);
91 std::for_each(processes.begin(), processes.end(), std::ptr_fun(RTProcTerminate));
92 RTPrintf("tstCollector: RTProcCreate() -> %Rrc\n", rc);
93 return;
94 }
95 hints.collectProcessCpuLoad(pid);
96 hints.collectProcessRamUsage(pid);
97 }
98
99 hints.getProcesses(processes);
100 RTThreadSleep(30000); // Let children settle for half a minute
101
102 int rc;
103 ULONG tmp;
104 uint64_t tmp64;
105 uint64_t start;
106 unsigned int nCalls;
107 /* Pre-collect */
108 CALLS_PER_SECOND(preCollect(hints, 0));
109 /* Host CPU load */
110 CALLS_PER_SECOND(getRawHostCpuLoad(&tmp64, &tmp64, &tmp64));
111 /* Process CPU load */
112 CALLS_PER_SECOND(getRawProcessCpuLoad(processes[nCalls%cVMs], &tmp64, &tmp64, &tmp64));
113 /* Host CPU speed */
114 CALLS_PER_SECOND(getHostCpuMHz(&tmp));
115 /* Host RAM usage */
116 CALLS_PER_SECOND(getHostMemoryUsage(&tmp, &tmp, &tmp));
117 /* Process RAM usage */
118 CALLS_PER_SECOND(getProcessMemoryUsage(processes[nCalls%cVMs], &tmp));
119
120 start = RTTimeNanoTS();
121
122 int times;
123 for (times = 0; times < 100; times++)
124 {
125 /* Pre-collect */
126 N_CALLS(1, preCollect(hints, 0));
127 /* Host CPU load */
128 N_CALLS(1, getRawHostCpuLoad(&tmp64, &tmp64, &tmp64));
129 /* Host CPU speed */
130 N_CALLS(1, getHostCpuMHz(&tmp));
131 /* Host RAM usage */
132 N_CALLS(1, getHostMemoryUsage(&tmp, &tmp, &tmp));
133 /* Process CPU load */
134 N_CALLS(cVMs, getRawProcessCpuLoad(processes[call], &tmp64, &tmp64, &tmp64));
135 /* Process RAM usage */
136 N_CALLS(cVMs, getProcessMemoryUsage(processes[call], &tmp));
137 }
138 printf("\n%u VMs -- %.2f%% of CPU time\n", cVMs, (RTTimeNanoTS() - start) / 10000000. / times);
139
140 /* Shut down fake VMs */
141 std::for_each(processes.begin(), processes.end(), std::ptr_fun(RTProcTerminate));
142}
143
144#ifdef RT_OS_SOLARIS
145#define NETIFNAME "net0"
146#else
147#define NETIFNAME "eth0"
148#endif
149int testNetwork(pm::CollectorHAL *collector)
150{
151 pm::CollectorHints hints;
152 uint64_t hostRxStart, hostTxStart;
153 uint64_t hostRxStop, hostTxStop, speed = 125000000; /* Assume 1Gbit/s */
154
155 RTPrintf("tstCollector: TESTING - Network load, sleeping for 5 sec...\n");
156
157 int rc = collector->preCollect(hints, 0);
158 if (RT_FAILURE(rc))
159 {
160 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
161 return 1;
162 }
163 rc = collector->getRawHostNetworkLoad(NETIFNAME, &hostRxStart, &hostTxStart);
164 if (RT_FAILURE(rc))
165 {
166 RTPrintf("tstCollector: getRawHostNetworkLoad() -> %Rrc\n", rc);
167 return 1;
168 }
169
170 RTThreadSleep(5000); // Sleep for five seconds
171
172 rc = collector->preCollect(hints, 0);
173 if (RT_FAILURE(rc))
174 {
175 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
176 return 1;
177 }
178 rc = collector->getRawHostNetworkLoad(NETIFNAME, &hostRxStop, &hostTxStop);
179 if (RT_FAILURE(rc))
180 {
181 RTPrintf("tstCollector: getRawHostNetworkLoad() -> %Rrc\n", rc);
182 return 1;
183 }
184 RTPrintf("tstCollector: host network speed = %llu bytes/sec (%llu mbit/sec)\n",
185 speed, speed/(1000000/8));
186 RTPrintf("tstCollector: host network rx = %llu bytes/sec (%llu mbit/sec, %u.%u %%)\n",
187 (hostRxStop - hostRxStart)/5, (hostRxStop - hostRxStart)/(5000000/8),
188 (hostRxStop - hostRxStart) * 100 / (speed * 5),
189 (hostRxStop - hostRxStart) * 10000 / (speed * 5) % 100);
190 RTPrintf("tstCollector: host network tx = %llu bytes/sec (%llu mbit/sec, %u.%u %%)\n\n",
191 (hostTxStop - hostTxStart)/5, (hostTxStop - hostTxStart)/(5000000/8),
192 (hostTxStop - hostTxStart) * 100 / (speed * 5),
193 (hostTxStop - hostTxStart) * 10000 / (speed * 5) % 100);
194
195 return 0;
196}
197
198#define FSNAME "/"
199int testFsUsage(pm::CollectorHAL *collector)
200{
201 RTPrintf("tstCollector: TESTING - File system usage\n");
202
203 ULONG total, used, available;
204
205 int rc = collector->getHostFilesystemUsage(FSNAME, &total, &used, &available);
206 if (RT_FAILURE(rc))
207 {
208 RTPrintf("tstCollector: getHostFilesystemUsage() -> %Rrc\n", rc);
209 return 1;
210 }
211 RTPrintf("tstCollector: host root fs total = %lu mB\n", total);
212 RTPrintf("tstCollector: host root fs used = %lu mB\n", used);
213 RTPrintf("tstCollector: host root fs available = %lu mB\n\n", available);
214 return 0;
215}
216
217int testDisk(pm::CollectorHAL *collector)
218{
219 pm::CollectorHints hints;
220 uint64_t diskMsStart, totalMsStart;
221 uint64_t diskMsStop, totalMsStop;
222
223 std::list<RTCString> disks;
224 int rc = pm::getDiskListByFs(FSNAME, disks);
225 if (RT_FAILURE(rc))
226 {
227 RTPrintf("tstCollector: getDiskListByFs(%s) -> %Rrc\n", FSNAME, rc);
228 return 1;
229 }
230 if (disks.empty())
231 {
232 RTPrintf("tstCollector: getDiskListByFs(%s) returned empty list\n", FSNAME);
233 return 1;
234 }
235
236 RTPrintf("tstCollector: TESTING - Disk utilization, sleeping for 5 sec...\n");
237
238 hints.collectHostCpuLoad();
239 rc = collector->preCollect(hints, 0);
240 if (RT_FAILURE(rc))
241 {
242 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
243 return 1;
244 }
245 rc = collector->getRawHostDiskLoad(disks.front().c_str(), &diskMsStart, &totalMsStart);
246 if (RT_FAILURE(rc))
247 {
248 RTPrintf("tstCollector: getRawHostNetworkLoad() -> %Rrc\n", rc);
249 return 1;
250 }
251
252 RTThreadSleep(5000); // Sleep for five seconds
253
254 rc = collector->preCollect(hints, 0);
255 if (RT_FAILURE(rc))
256 {
257 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
258 return 1;
259 }
260 rc = collector->getRawHostDiskLoad(disks.front().c_str(), &diskMsStop, &totalMsStop);
261 if (RT_FAILURE(rc))
262 {
263 RTPrintf("tstCollector: getRawHostNetworkLoad() -> %Rrc\n", rc);
264 return 1;
265 }
266 RTPrintf("tstCollector: host disk util = %llu msec (%u.%u %%), total = %llu msec\n\n",
267 (diskMsStop - diskMsStart),
268 (unsigned)((diskMsStop - diskMsStart) * 100 / (totalMsStop - totalMsStart)),
269 (unsigned)((diskMsStop - diskMsStart) * 10000 / (totalMsStop - totalMsStart) % 100),
270 totalMsStop - totalMsStart);
271
272 return 0;
273}
274
275
276
277int main(int argc, char *argv[])
278{
279 /*
280 * Initialize the VBox runtime without loading
281 * the support driver.
282 */
283 int rc = RTR3InitExe(argc, &argv, 0);
284 if (RT_FAILURE(rc))
285 {
286 RTPrintf("tstCollector: RTR3InitExe() -> %d\n", rc);
287 return 1;
288 }
289 if (argc > 1 && !strcmp(argv[1], "-child"))
290 {
291 /* We have spawned ourselves as a child process -- scratch the leg */
292 RTThreadSleep(1000000);
293 return 1;
294 }
295#ifdef RT_OS_WINDOWS
296 HRESULT hRes = CoInitialize(NULL);
297 /*
298 * Need to initialize security to access performance enumerators.
299 */
300 hRes = CoInitializeSecurity(
301 NULL,
302 -1,
303 NULL,
304 NULL,
305 RPC_C_AUTHN_LEVEL_NONE,
306 RPC_C_IMP_LEVEL_IMPERSONATE,
307 NULL, EOAC_NONE, 0);
308#endif
309
310 pm::CollectorHAL *collector = pm::createHAL();
311 if (!collector)
312 {
313 RTPrintf("tstCollector: createMetricFactory() failed\n", rc);
314 return 1;
315 }
316#if 1
317 pm::CollectorHints hints;
318 hints.collectHostCpuLoad();
319 hints.collectHostRamUsage();
320 hints.collectProcessCpuLoad(RTProcSelf());
321 hints.collectProcessRamUsage(RTProcSelf());
322
323 uint64_t start;
324
325 uint64_t hostUserStart, hostKernelStart, hostIdleStart;
326 uint64_t hostUserStop, hostKernelStop, hostIdleStop, hostTotal;
327
328 uint64_t processUserStart, processKernelStart, processTotalStart;
329 uint64_t processUserStop, processKernelStop, processTotalStop;
330
331 RTPrintf("tstCollector: TESTING - CPU load, sleeping for 5 sec\n");
332
333 rc = collector->preCollect(hints, 0);
334 if (RT_FAILURE(rc))
335 {
336 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
337 return 1;
338 }
339 rc = collector->getRawHostCpuLoad(&hostUserStart, &hostKernelStart, &hostIdleStart);
340 if (RT_FAILURE(rc))
341 {
342 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
343 return 1;
344 }
345 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStart, &processKernelStart, &processTotalStart);
346 if (RT_FAILURE(rc))
347 {
348 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
349 return 1;
350 }
351
352 RTThreadSleep(5000); // Sleep for 5 seconds
353
354 rc = collector->preCollect(hints, 0);
355 if (RT_FAILURE(rc))
356 {
357 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
358 return 1;
359 }
360 rc = collector->getRawHostCpuLoad(&hostUserStop, &hostKernelStop, &hostIdleStop);
361 if (RT_FAILURE(rc))
362 {
363 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
364 return 1;
365 }
366 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStop, &processKernelStop, &processTotalStop);
367 if (RT_FAILURE(rc))
368 {
369 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
370 return 1;
371 }
372 hostTotal = hostUserStop - hostUserStart
373 + hostKernelStop - hostKernelStart
374 + hostIdleStop - hostIdleStart;
375 /*printf("tstCollector: host cpu user = %f sec\n", (hostUserStop - hostUserStart) / 10000000.);
376 printf("tstCollector: host cpu kernel = %f sec\n", (hostKernelStop - hostKernelStart) / 10000000.);
377 printf("tstCollector: host cpu idle = %f sec\n", (hostIdleStop - hostIdleStart) / 10000000.);
378 printf("tstCollector: host cpu total = %f sec\n", hostTotal / 10000000.);*/
379 RTPrintf("tstCollector: host cpu user = %u.%u %%\n",
380 (unsigned)((hostUserStop - hostUserStart) * 100 / hostTotal),
381 (unsigned)((hostUserStop - hostUserStart) * 10000 / hostTotal % 100));
382 RTPrintf("tstCollector: host cpu kernel = %u.%u %%\n",
383 (unsigned)((hostKernelStop - hostKernelStart) * 100 / hostTotal),
384 (unsigned)((hostKernelStop - hostKernelStart) * 10000 / hostTotal % 100));
385 RTPrintf("tstCollector: host cpu idle = %u.%u %%\n",
386 (unsigned)((hostIdleStop - hostIdleStart) * 100 / hostTotal),
387 (unsigned)((hostIdleStop - hostIdleStart) * 10000 / hostTotal % 100));
388 RTPrintf("tstCollector: process cpu user = %u.%u %%\n",
389 (unsigned)((processUserStop - processUserStart) * 100 / (processTotalStop - processTotalStart)),
390 (unsigned)((processUserStop - processUserStart) * 10000 / (processTotalStop - processTotalStart) % 100));
391 RTPrintf("tstCollector: process cpu kernel = %u.%u %%\n\n",
392 (unsigned)((processKernelStop - processKernelStart) * 100 / (processTotalStop - processTotalStart)),
393 (unsigned)((processKernelStop - processKernelStart) * 10000 / (processTotalStop - processTotalStart) % 100));
394
395 RTPrintf("tstCollector: TESTING - CPU load, looping for 5 sec\n");
396 rc = collector->preCollect(hints, 0);
397 if (RT_FAILURE(rc))
398 {
399 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
400 return 1;
401 }
402 rc = collector->getRawHostCpuLoad(&hostUserStart, &hostKernelStart, &hostIdleStart);
403 if (RT_FAILURE(rc))
404 {
405 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
406 return 1;
407 }
408 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStart, &processKernelStart, &processTotalStart);
409 if (RT_FAILURE(rc))
410 {
411 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
412 return 1;
413 }
414 start = RTTimeMilliTS();
415 while(RTTimeMilliTS() - start < 5000)
416 ; // Loop for 5 seconds
417 rc = collector->preCollect(hints, 0);
418 if (RT_FAILURE(rc))
419 {
420 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
421 return 1;
422 }
423 rc = collector->getRawHostCpuLoad(&hostUserStop, &hostKernelStop, &hostIdleStop);
424 if (RT_FAILURE(rc))
425 {
426 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
427 return 1;
428 }
429 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStop, &processKernelStop, &processTotalStop);
430 if (RT_FAILURE(rc))
431 {
432 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
433 return 1;
434 }
435 hostTotal = hostUserStop - hostUserStart
436 + hostKernelStop - hostKernelStart
437 + hostIdleStop - hostIdleStart;
438 RTPrintf("tstCollector: host cpu user = %u.%u %%\n",
439 (unsigned)((hostUserStop - hostUserStart) * 100 / hostTotal),
440 (unsigned)((hostUserStop - hostUserStart) * 10000 / hostTotal % 100));
441 RTPrintf("tstCollector: host cpu kernel = %u.%u %%\n",
442 (unsigned)((hostKernelStop - hostKernelStart) * 100 / hostTotal),
443 (unsigned)((hostKernelStop - hostKernelStart) * 10000 / hostTotal % 100));
444 RTPrintf("tstCollector: host cpu idle = %u.%u %%\n",
445 (unsigned)((hostIdleStop - hostIdleStart) * 100 / hostTotal),
446 (unsigned)((hostIdleStop - hostIdleStart) * 10000 / hostTotal % 100));
447 RTPrintf("tstCollector: process cpu user = %u.%u %%\n",
448 (unsigned)((processUserStop - processUserStart) * 100 / (processTotalStop - processTotalStart)),
449 (unsigned)((processUserStop - processUserStart) * 10000 / (processTotalStop - processTotalStart) % 100));
450 RTPrintf("tstCollector: process cpu kernel = %u.%u %%\n\n",
451 (unsigned)((processKernelStop - processKernelStart) * 100 / (processTotalStop - processTotalStart)),
452 (unsigned)((processKernelStop - processKernelStart) * 10000 / (processTotalStop - processTotalStart) % 100));
453
454 RTPrintf("tstCollector: TESTING - Memory usage\n");
455
456 ULONG total, used, available, processUsed;
457
458 rc = collector->getHostMemoryUsage(&total, &used, &available);
459 if (RT_FAILURE(rc))
460 {
461 RTPrintf("tstCollector: getHostMemoryUsage() -> %Rrc\n", rc);
462 return 1;
463 }
464 rc = collector->getProcessMemoryUsage(RTProcSelf(), &processUsed);
465 if (RT_FAILURE(rc))
466 {
467 RTPrintf("tstCollector: getProcessMemoryUsage() -> %Rrc\n", rc);
468 return 1;
469 }
470 RTPrintf("tstCollector: host mem total = %lu kB\n", total);
471 RTPrintf("tstCollector: host mem used = %lu kB\n", used);
472 RTPrintf("tstCollector: host mem available = %lu kB\n", available);
473 RTPrintf("tstCollector: process mem used = %lu kB\n\n", processUsed);
474#endif
475#if 1
476 rc = testNetwork(collector);
477#endif
478#if 1
479 rc = testFsUsage(collector);
480#endif
481#if 1
482 rc = testDisk(collector);
483#endif
484#if 1
485 RTPrintf("tstCollector: TESTING - Performance\n\n");
486
487 measurePerformance(collector, argv[0], 100);
488#endif
489
490 delete collector;
491
492 printf ("\ntstCollector FINISHED.\n");
493
494 return rc;
495}
496
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette