VirtualBox

source: vbox/trunk/src/VBox/Main/testcase/tstCollector.cpp@ 43538

最後變更 在這個檔案從43538是 43538,由 vboxsync 提交於 12 年 前

Main/Metrics: Host network interface metrics for Solaris (#6345)

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 13.0 KB
 
1/* $Id: tstCollector.cpp 43538 2012-10-04 12:24:20Z vboxsync $ */
2
3/** @file
4 *
5 * Collector classes test cases.
6 */
7
8/*
9 * Copyright (C) 2008 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.alldomusa.eu.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20#ifdef RT_OS_DARWIN
21# include "../src-server/darwin/PerformanceDarwin.cpp"
22#endif
23#ifdef RT_OS_FREEBSD
24# include "../src-server/freebsd/PerformanceFreeBSD.cpp"
25#endif
26#ifdef RT_OS_LINUX
27# include "../src-server/linux/PerformanceLinux.cpp"
28#endif
29#ifdef RT_OS_OS2
30# include "../src-server/os2/PerformanceOS2.cpp"
31#endif
32#ifdef RT_OS_SOLARIS
33# include "../src-server/solaris/PerformanceSolaris.cpp"
34#endif
35#ifdef RT_OS_WINDOWS
36# define _WIN32_DCOM
37# include <objidl.h>
38# include <objbase.h>
39# include "../src-server/win/PerformanceWin.cpp"
40#endif
41
42#include <iprt/initterm.h>
43#include <iprt/stream.h>
44#include <iprt/env.h>
45#include <iprt/err.h>
46#include <iprt/process.h>
47#include <iprt/thread.h>
48#include <iprt/time.h>
49
50#define RUN_TIME_MS 1000
51
52#define N_CALLS(n, fn) \
53 for (int call = 0; call < n; ++call) \
54 rc = collector->fn; \
55 if (RT_FAILURE(rc)) \
56 RTPrintf("tstCollector: "#fn" -> %Rrc\n", rc)
57
58#define CALLS_PER_SECOND(fn) \
59 nCalls = 0; \
60 start = RTTimeMilliTS(); \
61 do { \
62 rc = collector->fn; \
63 if (RT_FAILURE(rc)) \
64 break; \
65 ++nCalls; \
66 } while(RTTimeMilliTS() - start < RUN_TIME_MS); \
67 if (RT_FAILURE(rc)) \
68 { \
69 RTPrintf("tstCollector: "#fn" -> %Rrc\n", rc); \
70 } \
71 else \
72 RTPrintf("%70s -- %u calls per second\n", #fn, nCalls)
73
74void measurePerformance(pm::CollectorHAL *collector, const char *pszName, int cVMs)
75{
76
77 static const char * const args[] = { pszName, "-child", NULL };
78 pm::CollectorHints hints;
79 std::vector<RTPROCESS> processes;
80
81 hints.collectHostCpuLoad();
82 hints.collectHostRamUsage();
83 /* Start fake VMs */
84 for (int i = 0; i < cVMs; ++i)
85 {
86 RTPROCESS pid;
87 int rc = RTProcCreate(pszName, args, RTENV_DEFAULT, 0, &pid);
88 if (RT_FAILURE(rc))
89 {
90 hints.getProcesses(processes);
91 std::for_each(processes.begin(), processes.end(), std::ptr_fun(RTProcTerminate));
92 RTPrintf("tstCollector: RTProcCreate() -> %Rrc\n", rc);
93 return;
94 }
95 hints.collectProcessCpuLoad(pid);
96 hints.collectProcessRamUsage(pid);
97 }
98
99 hints.getProcesses(processes);
100 RTThreadSleep(30000); // Let children settle for half a minute
101
102 int rc;
103 ULONG tmp;
104 uint64_t tmp64;
105 uint64_t start;
106 unsigned int nCalls;
107 /* Pre-collect */
108 CALLS_PER_SECOND(preCollect(hints, 0));
109 /* Host CPU load */
110 CALLS_PER_SECOND(getRawHostCpuLoad(&tmp64, &tmp64, &tmp64));
111 /* Process CPU load */
112 CALLS_PER_SECOND(getRawProcessCpuLoad(processes[nCalls%cVMs], &tmp64, &tmp64, &tmp64));
113 /* Host CPU speed */
114 CALLS_PER_SECOND(getHostCpuMHz(&tmp));
115 /* Host RAM usage */
116 CALLS_PER_SECOND(getHostMemoryUsage(&tmp, &tmp, &tmp));
117 /* Process RAM usage */
118 CALLS_PER_SECOND(getProcessMemoryUsage(processes[nCalls%cVMs], &tmp));
119
120 start = RTTimeNanoTS();
121
122 int times;
123 for (times = 0; times < 100; times++)
124 {
125 /* Pre-collect */
126 N_CALLS(1, preCollect(hints, 0));
127 /* Host CPU load */
128 N_CALLS(1, getRawHostCpuLoad(&tmp64, &tmp64, &tmp64));
129 /* Host CPU speed */
130 N_CALLS(1, getHostCpuMHz(&tmp));
131 /* Host RAM usage */
132 N_CALLS(1, getHostMemoryUsage(&tmp, &tmp, &tmp));
133 /* Process CPU load */
134 N_CALLS(cVMs, getRawProcessCpuLoad(processes[call], &tmp64, &tmp64, &tmp64));
135 /* Process RAM usage */
136 N_CALLS(cVMs, getProcessMemoryUsage(processes[call], &tmp));
137 }
138 printf("\n%u VMs -- %.2f%% of CPU time\n", cVMs, (RTTimeNanoTS() - start) / 10000000. / times);
139
140 /* Shut down fake VMs */
141 std::for_each(processes.begin(), processes.end(), std::ptr_fun(RTProcTerminate));
142}
143
144#ifdef RT_OS_SOLARIS
145#define NETIFNAME "net0"
146#else
147#define NETIFNAME "eth0"
148#endif
149int testNetwork(pm::CollectorHAL *collector)
150{
151 pm::CollectorHints hints;
152 uint64_t hostRxStart, hostTxStart;
153 uint64_t hostRxStop, hostTxStop, speed = 125000000; /* Assume 1Gbit/s */
154
155 RTPrintf("\ntstCollector: TESTING - Network load, sleeping for 5 sec...\n");
156
157 int rc = collector->preCollect(hints, 0);
158 if (RT_FAILURE(rc))
159 {
160 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
161 return 1;
162 }
163 rc = collector->getRawHostNetworkLoad(NETIFNAME, &hostRxStart, &hostTxStart);
164 if (RT_FAILURE(rc))
165 {
166 RTPrintf("tstCollector: getRawHostNetworkLoad() -> %Rrc\n", rc);
167 return 1;
168 }
169
170 RTThreadSleep(5000); // Sleep for five seconds
171
172 rc = collector->preCollect(hints, 0);
173 if (RT_FAILURE(rc))
174 {
175 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
176 return 1;
177 }
178 rc = collector->getRawHostNetworkLoad(NETIFNAME, &hostRxStop, &hostTxStop);
179 if (RT_FAILURE(rc))
180 {
181 RTPrintf("tstCollector: getRawHostNetworkLoad() -> %Rrc\n", rc);
182 return 1;
183 }
184 RTPrintf("tstCollector: host network speed = %llu bytes/sec (%llu mbit/sec)\n",
185 speed, speed/(1000000/8));
186 RTPrintf("tstCollector: host network rx = %llu bytes/sec (%llu mbit/sec, %d %%*100)\n",
187 (hostRxStop - hostRxStart)/5, (hostRxStop - hostRxStart)/(5000000/8),
188 (hostRxStop - hostRxStart) * 10000 / (speed * 5));
189 RTPrintf("tstCollector: host network tx = %llu bytes/sec (%llu mbit/sec, %d %%*100)\n",
190 (hostTxStop - hostTxStart)/5, (hostTxStop - hostTxStart)/(5000000/8),
191 (hostTxStop - hostTxStart) * 10000 / (speed * 5));
192
193 return 0;
194}
195
196int main(int argc, char *argv[])
197{
198 /*
199 * Initialize the VBox runtime without loading
200 * the support driver.
201 */
202 int rc = RTR3InitExe(argc, &argv, 0);
203 if (RT_FAILURE(rc))
204 {
205 RTPrintf("tstCollector: RTR3InitExe() -> %d\n", rc);
206 return 1;
207 }
208 if (argc > 1 && !strcmp(argv[1], "-child"))
209 {
210 /* We have spawned ourselves as a child process -- scratch the leg */
211 RTThreadSleep(1000000);
212 return 1;
213 }
214#ifdef RT_OS_WINDOWS
215 HRESULT hRes = CoInitialize(NULL);
216 /*
217 * Need to initialize security to access performance enumerators.
218 */
219 hRes = CoInitializeSecurity(
220 NULL,
221 -1,
222 NULL,
223 NULL,
224 RPC_C_AUTHN_LEVEL_NONE,
225 RPC_C_IMP_LEVEL_IMPERSONATE,
226 NULL, EOAC_NONE, 0);
227#endif
228
229 pm::CollectorHAL *collector = pm::createHAL();
230 if (!collector)
231 {
232 RTPrintf("tstCollector: createMetricFactory() failed\n", rc);
233 return 1;
234 }
235#if 1
236 pm::CollectorHints hints;
237 hints.collectHostCpuLoad();
238 hints.collectHostRamUsage();
239 hints.collectProcessCpuLoad(RTProcSelf());
240 hints.collectProcessRamUsage(RTProcSelf());
241
242 uint64_t start;
243
244 uint64_t hostUserStart, hostKernelStart, hostIdleStart;
245 uint64_t hostUserStop, hostKernelStop, hostIdleStop, hostTotal;
246
247 uint64_t processUserStart, processKernelStart, processTotalStart;
248 uint64_t processUserStop, processKernelStop, processTotalStop;
249
250 RTPrintf("tstCollector: TESTING - CPU load, sleeping for 5 sec\n");
251
252 rc = collector->preCollect(hints, 0);
253 if (RT_FAILURE(rc))
254 {
255 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
256 return 1;
257 }
258 rc = collector->getRawHostCpuLoad(&hostUserStart, &hostKernelStart, &hostIdleStart);
259 if (RT_FAILURE(rc))
260 {
261 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
262 return 1;
263 }
264 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStart, &processKernelStart, &processTotalStart);
265 if (RT_FAILURE(rc))
266 {
267 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
268 return 1;
269 }
270
271 RTThreadSleep(5000); // Sleep for 5 seconds
272
273 rc = collector->preCollect(hints, 0);
274 if (RT_FAILURE(rc))
275 {
276 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
277 return 1;
278 }
279 rc = collector->getRawHostCpuLoad(&hostUserStop, &hostKernelStop, &hostIdleStop);
280 if (RT_FAILURE(rc))
281 {
282 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
283 return 1;
284 }
285 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStop, &processKernelStop, &processTotalStop);
286 if (RT_FAILURE(rc))
287 {
288 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
289 return 1;
290 }
291 hostTotal = hostUserStop - hostUserStart
292 + hostKernelStop - hostKernelStart
293 + hostIdleStop - hostIdleStart;
294 /*printf("tstCollector: host cpu user = %f sec\n", (hostUserStop - hostUserStart) / 10000000.);
295 printf("tstCollector: host cpu kernel = %f sec\n", (hostKernelStop - hostKernelStart) / 10000000.);
296 printf("tstCollector: host cpu idle = %f sec\n", (hostIdleStop - hostIdleStart) / 10000000.);
297 printf("tstCollector: host cpu total = %f sec\n", hostTotal / 10000000.);*/
298 RTPrintf("tstCollector: host cpu user = %llu %%\n", (hostUserStop - hostUserStart) * 100 / hostTotal);
299 RTPrintf("tstCollector: host cpu kernel = %llu %%\n", (hostKernelStop - hostKernelStart) * 100 / hostTotal);
300 RTPrintf("tstCollector: host cpu idle = %llu %%\n", (hostIdleStop - hostIdleStart) * 100 / hostTotal);
301 RTPrintf("tstCollector: process cpu user = %llu %%\n", (processUserStop - processUserStart) * 100 / (processTotalStop - processTotalStart));
302 RTPrintf("tstCollector: process cpu kernel = %llu %%\n\n", (processKernelStop - processKernelStart) * 100 / (processTotalStop - processTotalStart));
303
304 RTPrintf("tstCollector: TESTING - CPU load, looping for 5 sec\n");
305 rc = collector->preCollect(hints, 0);
306 if (RT_FAILURE(rc))
307 {
308 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
309 return 1;
310 }
311 rc = collector->getRawHostCpuLoad(&hostUserStart, &hostKernelStart, &hostIdleStart);
312 if (RT_FAILURE(rc))
313 {
314 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
315 return 1;
316 }
317 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStart, &processKernelStart, &processTotalStart);
318 if (RT_FAILURE(rc))
319 {
320 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
321 return 1;
322 }
323 start = RTTimeMilliTS();
324 while(RTTimeMilliTS() - start < 5000)
325 ; // Loop for 5 seconds
326 rc = collector->preCollect(hints, 0);
327 if (RT_FAILURE(rc))
328 {
329 RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc);
330 return 1;
331 }
332 rc = collector->getRawHostCpuLoad(&hostUserStop, &hostKernelStop, &hostIdleStop);
333 if (RT_FAILURE(rc))
334 {
335 RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc);
336 return 1;
337 }
338 rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStop, &processKernelStop, &processTotalStop);
339 if (RT_FAILURE(rc))
340 {
341 RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc);
342 return 1;
343 }
344 hostTotal = hostUserStop - hostUserStart
345 + hostKernelStop - hostKernelStart
346 + hostIdleStop - hostIdleStart;
347 RTPrintf("tstCollector: host cpu user = %llu %%\n", (hostUserStop - hostUserStart) * 100 / hostTotal);
348 RTPrintf("tstCollector: host cpu kernel = %llu %%\n", (hostKernelStop - hostKernelStart) * 100 / hostTotal);
349 RTPrintf("tstCollector: host cpu idle = %llu %%\n", (hostIdleStop - hostIdleStart) * 100 / hostTotal);
350 RTPrintf("tstCollector: process cpu user = %llu %%\n", (processUserStop - processUserStart) * 100 / (processTotalStop - processTotalStart));
351 RTPrintf("tstCollector: process cpu kernel = %llu %%\n\n", (processKernelStop - processKernelStart) * 100 / (processTotalStop - processTotalStart));
352
353 RTPrintf("tstCollector: TESTING - Memory usage\n");
354
355 ULONG total, used, available, processUsed;
356
357 rc = collector->getHostMemoryUsage(&total, &used, &available);
358 if (RT_FAILURE(rc))
359 {
360 RTPrintf("tstCollector: getHostMemoryUsage() -> %Rrc\n", rc);
361 return 1;
362 }
363 rc = collector->getProcessMemoryUsage(RTProcSelf(), &processUsed);
364 if (RT_FAILURE(rc))
365 {
366 RTPrintf("tstCollector: getProcessMemoryUsage() -> %Rrc\n", rc);
367 return 1;
368 }
369 RTPrintf("tstCollector: host mem total = %lu kB\n", total);
370 RTPrintf("tstCollector: host mem used = %lu kB\n", used);
371 RTPrintf("tstCollector: host mem available = %lu kB\n", available);
372 RTPrintf("tstCollector: process mem used = %lu kB\n", processUsed);
373#endif
374#if 1
375 rc = testNetwork(collector);
376#endif
377#if 0
378 RTPrintf("\ntstCollector: TESTING - Performance\n\n");
379
380 measurePerformance(collector, argv[0], 100);
381#endif
382
383 delete collector;
384
385 printf ("\ntstCollector FINISHED.\n");
386
387 return rc;
388}
389
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette