VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 50831

最後變更 在這個檔案從50831是 50831,由 vboxsync 提交於 11 年 前

crOpnGL: new command submission continued

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 85.4 KB
 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.alldomusa.eu.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16//#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24#include <iprt/param.h>
25
26#include "DevVGA.h"
27#include "HGSMI/SHGSMIHost.h"
28
29#include <VBox/VBoxVideo3D.h>
30#include <VBox/VBoxVideoHost3D.h>
31
32#ifdef DEBUG_misha
33# define VBOXVDBG_MEMCACHE_DISABLE
34#endif
35
36#ifndef VBOXVDBG_MEMCACHE_DISABLE
37# include <iprt/memcache.h>
38#endif
39
40#ifdef DEBUG_misha
41#define WARN_BP() do { AssertFailed(); } while (0)
42#else
43#define WARN_BP() do { } while (0)
44#endif
45#define WARN(_msg) do { \
46 LogRel(_msg); \
47 WARN_BP(); \
48 } while (0)
49
50#define VBOXVDMATHREAD_STATE_TERMINATED 0
51#define VBOXVDMATHREAD_STATE_CREATED 1
52#define VBOXVDMATHREAD_STATE_TERMINATING 2
53
54typedef struct VBOXVDMATHREAD
55{
56 RTTHREAD hWorkerThread;
57 RTSEMEVENT hEvent;
58 RTSEMEVENT hClientEvent;
59 volatile uint32_t u32State;
60} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
61
62
63/* state transformations:
64 *
65 * submitter | processor
66 *
67 * LISTENING ---> PROCESSING
68 *
69 * */
70#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
71#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
72
73#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
74#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
75#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
76
77typedef struct VBVAEXHOSTCONTEXT
78{
79 VBVABUFFER *pVBVA;
80 volatile int32_t i32State;
81 volatile int32_t i32EnableState;
82 volatile uint32_t u32cCtls;
83 /* critical section for accessing ctl lists */
84 RTCRITSECT CltCritSect;
85 RTLISTANCHOR GuestCtlList;
86 RTLISTANCHOR HostCtlList;
87#ifndef VBOXVDBG_MEMCACHE_DISABLE
88 RTMEMCACHE CtlCache;
89#endif
90} VBVAEXHOSTCONTEXT;
91
92typedef enum
93{
94 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
95 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
96 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
97 VBVAEXHOSTCTL_TYPE_HH_ENABLE,
98 VBVAEXHOSTCTL_TYPE_HH_TERM,
99 VBVAEXHOSTCTL_TYPE_HH_RESET,
100 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
101 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
102 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
103 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
104 VBVAEXHOSTCTL_TYPE_GH_ENABLE_DISABLE
105} VBVAEXHOSTCTL_TYPE;
106
107struct VBVAEXHOSTCTL;
108
109typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
110
111typedef struct VBVAEXHOSTCTL
112{
113 RTLISTNODE Node;
114 VBVAEXHOSTCTL_TYPE enmType;
115 union
116 {
117 struct
118 {
119 uint8_t * pu8Cmd;
120 uint32_t cbCmd;
121 } cmd;
122
123 struct
124 {
125 PSSMHANDLE pSSM;
126 uint32_t u32Version;
127 } state;
128 } u;
129 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
130 void *pvComplete;
131} VBVAEXHOSTCTL;
132
133/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
134 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
135 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
136 * see mor edetailed comments in headers for function definitions */
137typedef enum
138{
139 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
140 VBVAEXHOST_DATA_TYPE_CMD,
141 VBVAEXHOST_DATA_TYPE_HOSTCTL,
142 VBVAEXHOST_DATA_TYPE_GUESTCTL
143} VBVAEXHOST_DATA_TYPE;
144static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
145
146static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
147static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
148
149/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
150 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
151static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
152
153static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
154static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
155static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
156static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
157static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
158static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
159
160static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
161{
162#ifndef VBOXVDBG_MEMCACHE_DISABLE
163 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
164#else
165 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
166#endif
167}
168
169static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
170{
171#ifndef VBOXVDBG_MEMCACHE_DISABLE
172 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
173#else
174 RTMemFree(pCtl);
175#endif
176}
177
178static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
179{
180 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
181 if (!pCtl)
182 {
183 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
184 return NULL;
185 }
186
187 pCtl->enmType = enmType;
188 return pCtl;
189}
190
191static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
192{
193 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
194
195 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
196 return VINF_SUCCESS;
197 return VERR_SEM_BUSY;
198}
199
200static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
201{
202 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
203
204 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
205 return NULL;
206
207 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
208 if (RT_SUCCESS(rc))
209 {
210 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
211 if (pCtl)
212 *pfHostCtl = true;
213 else if (!fHostOnlyMode)
214 {
215 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) > VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
216 {
217 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
218 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
219 * and there are no HostCtl commands*/
220 Assert(pCtl);
221 *pfHostCtl = false;
222 }
223 }
224
225 if (pCtl)
226 {
227 RTListNodeRemove(&pCtl->Node);
228 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
229 }
230
231 RTCritSectLeave(&pCmdVbva->CltCritSect);
232
233 return pCtl;
234 }
235 else
236 WARN(("RTCritSectEnter failed %d\n", rc));
237
238 return NULL;
239}
240
241static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
242{
243 bool fHostCtl;
244 return vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
245}
246
247
248static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
249{
250 switch (pCtl->enmType)
251 {
252 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
253 if (pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
254 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
255 return true;
256 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
257 if (pCmdVbva->i32EnableState == VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
258 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
259 return true;
260 default:
261 return false;
262 }
263}
264
265static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
266{
267 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
268
269 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
270}
271
272static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
273{
274 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
275 if (pCmdVbva->pVBVA)
276 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
277}
278
279static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
280{
281 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
282 if (pCmdVbva->pVBVA)
283 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
284}
285
286static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
287{
288 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
289 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
290
291 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
292
293 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
294 uint32_t indexRecordFree = pVBVA->indexRecordFree;
295
296 Log(("first = %d, free = %d\n",
297 indexRecordFirst, indexRecordFree));
298
299 if (indexRecordFirst == indexRecordFree)
300 {
301 /* No records to process. Return without assigning output variables. */
302 return VINF_EOF;
303 }
304
305 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
306
307 /* A new record need to be processed. */
308 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
309 {
310 /* the record is being recorded, try again */
311 return VINF_TRY_AGAIN;
312 }
313
314 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
315
316 if (!cbRecord)
317 {
318 /* the record is being recorded, try again */
319 return VINF_TRY_AGAIN;
320 }
321
322 /* we should not get partial commands here actually */
323 Assert(cbRecord);
324
325 /* The size of largest contiguous chunk in the ring biffer. */
326 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
327
328 /* The pointer to data in the ring buffer. */
329 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
330
331 /* Fetch or point the data. */
332 if (u32BytesTillBoundary >= cbRecord)
333 {
334 /* The command does not cross buffer boundary. Return address in the buffer. */
335 *ppCmd = pSrc;
336 *pcbCmd = cbRecord;
337 return VINF_SUCCESS;
338 }
339
340 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
341 return VERR_INVALID_STATE;
342}
343
344static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
345{
346 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
347 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
348
349 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
350}
351
352static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
353{
354 if (pCtl->pfnComplete)
355 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
356 else
357 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
358}
359
360static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
361{
362 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
363 VBVAEXHOSTCTL*pCtl;
364 bool fHostClt;
365
366 for(;;)
367 {
368 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
369 if (pCtl)
370 {
371 if (fHostClt)
372 {
373 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
374 {
375 *ppCmd = (uint8_t*)pCtl;
376 *pcbCmd = sizeof (*pCtl);
377 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
378 }
379 }
380 else
381 {
382 *ppCmd = (uint8_t*)pCtl;
383 *pcbCmd = sizeof (*pCtl);
384 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
385 }
386 }
387
388 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
389 return VBVAEXHOST_DATA_TYPE_NO_DATA;
390
391 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
392 switch (rc)
393 {
394 case VINF_SUCCESS:
395 return VBVAEXHOST_DATA_TYPE_CMD;
396 case VINF_EOF:
397 return VBVAEXHOST_DATA_TYPE_NO_DATA;
398 case VINF_TRY_AGAIN:
399 RTThreadSleep(1);
400 continue;
401 default:
402 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
403 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
404 return VBVAEXHOST_DATA_TYPE_NO_DATA;
405 }
406 }
407
408 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
409 return VBVAEXHOST_DATA_TYPE_NO_DATA;
410}
411
412static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
413{
414 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
415 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
416 {
417 vboxVBVAExHPHgEventClear(pCmdVbva);
418 vboxVBVAExHPProcessorRelease(pCmdVbva);
419 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
420 * 1. we check the queue -> and it is empty
421 * 2. submitter adds command to the queue
422 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
423 * 4. we clear the "processing" state
424 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
425 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
426 **/
427 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
428 if (RT_SUCCESS(rc))
429 {
430 /* we are the processor now */
431 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
432 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
433 {
434 vboxVBVAExHPProcessorRelease(pCmdVbva);
435 return VBVAEXHOST_DATA_TYPE_NO_DATA;
436 }
437
438 vboxVBVAExHPHgEventSet(pCmdVbva);
439 }
440 }
441
442 return enmType;
443}
444
445DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
446{
447 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
448
449 if (pVBVA)
450 {
451 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
452 uint32_t indexRecordFree = pVBVA->indexRecordFree;
453
454 if (indexRecordFirst != indexRecordFree)
455 return true;
456 }
457
458 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
459}
460
461/* Checks whether the new commands are ready for processing
462 * @returns
463 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
464 * VINF_EOF - no commands in a queue
465 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
466 * VERR_INVALID_STATE - the VBVA is paused or pausing */
467static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
468{
469 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
470 if (RT_SUCCESS(rc))
471 {
472 /* we are the processor now */
473 if (vboxVBVAExHSHasCommands(pCmdVbva))
474 {
475 vboxVBVAExHPHgEventSet(pCmdVbva);
476 return VINF_SUCCESS;
477 }
478
479 vboxVBVAExHPProcessorRelease(pCmdVbva);
480 return VINF_EOF;
481 }
482 if (rc == VERR_SEM_BUSY)
483 return VINF_ALREADY_INITIALIZED;
484 return VERR_INVALID_STATE;
485}
486
487static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
488{
489 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
490 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
491 if (RT_SUCCESS(rc))
492 {
493#ifndef VBOXVDBG_MEMCACHE_DISABLE
494 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
495 0, /* size_t cbAlignment */
496 UINT32_MAX, /* uint32_t cMaxObjects */
497 NULL, /* PFNMEMCACHECTOR pfnCtor*/
498 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
499 NULL, /* void *pvUser*/
500 0 /* uint32_t fFlags*/
501 );
502 if (RT_SUCCESS(rc))
503#endif
504 {
505 RTListInit(&pCmdVbva->GuestCtlList);
506 RTListInit(&pCmdVbva->HostCtlList);
507 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
508 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
509 return VINF_SUCCESS;
510 }
511#ifndef VBOXVDBG_MEMCACHE_DISABLE
512 else
513 WARN(("RTMemCacheCreate failed %d\n", rc));
514#endif
515 }
516 else
517 WARN(("RTCritSectInit failed %d\n", rc));
518
519 return rc;
520}
521
522DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
523{
524 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
525}
526
527static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
528{
529 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
530 return VINF_ALREADY_INITIALIZED;
531
532 pCmdVbva->pVBVA = pVBVA;
533 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
534 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
535 return VINF_SUCCESS;
536}
537
538static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
539{
540 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
541 return VINF_SUCCESS;
542
543 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
544 return VINF_SUCCESS;
545}
546
547static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
548{
549 /* ensure the processor is stopped */
550 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
551
552 /* ensure no one tries to submit the command */
553 if (pCmdVbva->pVBVA)
554 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
555
556 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
557 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
558
559 RTCritSectDelete(&pCmdVbva->CltCritSect);
560
561#ifndef VBOXVDBG_MEMCACHE_DISABLE
562 RTMemCacheDestroy(pCmdVbva->CtlCache);
563#endif
564
565 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
566}
567
568/* Saves state
569 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
570 */
571static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
572{
573 int rc;
574
575 int32_t i32EnableState = ASMAtomicUoReadS32(&pCmdVbva->i32EnableState);
576 if (i32EnableState >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
577 {
578 if (i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
579 {
580 WARN(("vbva not paused\n"));
581 return VERR_INVALID_STATE;
582 }
583
584 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pCmdVbva->pVBVA) - pu8VramBase));
585 AssertRCReturn(rc, rc);
586 return VINF_SUCCESS;
587 }
588
589 rc = SSMR3PutU32(pSSM, 0xffffffff);
590 AssertRCReturn(rc, rc);
591
592 return VINF_SUCCESS;
593}
594
595typedef enum
596{
597 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
598 VBVAEXHOSTCTL_SOURCE_HOST_ANY,
599 VBVAEXHOSTCTL_SOURCE_HOST_ENABLED
600} VBVAEXHOSTCTL_SOURCE;
601
602
603static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
604{
605 if ((enmSource == VBVAEXHOSTCTL_SOURCE_HOST_ENABLED) && !VBoxVBVAExHSIsEnabled(pCmdVbva))
606 {
607 WARN(("cmd vbva not enabled\n"));
608 return VERR_INVALID_STATE;
609 }
610
611 pCtl->pfnComplete = pfnComplete;
612 pCtl->pvComplete = pvComplete;
613
614 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
615 if (RT_SUCCESS(rc))
616 {
617 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
618 {
619 if ((enmSource == VBVAEXHOSTCTL_SOURCE_HOST_ENABLED) && !VBoxVBVAExHSIsEnabled(pCmdVbva))
620 {
621 WARN(("cmd vbva not enabled\n"));
622 RTCritSectLeave(&pCmdVbva->CltCritSect);
623 return VERR_INVALID_STATE;
624 }
625 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
626 }
627 else
628 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
629
630 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
631
632 RTCritSectLeave(&pCmdVbva->CltCritSect);
633
634 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
635 }
636 else
637 WARN(("RTCritSectEnter failed %d\n", rc));
638
639 return rc;
640}
641
642
643/* Loads state
644 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
645 */
646static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
647{
648 AssertMsgFailed(("implement!\n"));
649 uint32_t u32;
650 int rc = SSMR3GetU32(pSSM, &u32);
651 AssertRCReturn(rc, rc);
652 if (u32 != 0xffffffff)
653 {
654 VBVABUFFER *pVBVA = (VBVABUFFER*)pu8VramBase + u32;
655 rc = VBoxVBVAExHSEnable(pCmdVbva, pVBVA);
656 AssertRCReturn(rc, rc);
657 return VBoxVBVAExHSCheckCommands(pCmdVbva);
658 }
659
660 return VINF_SUCCESS;
661}
662
663typedef struct VBOXVDMAHOST
664{
665 PHGSMIINSTANCE pHgsmi;
666 PVGASTATE pVGAState;
667 VBVAEXHOSTCONTEXT CmdVbva;
668 VBOXVDMATHREAD Thread;
669 VBOXCRCMD_SVRINFO CrSrvInfo;
670 VBVAEXHOSTCTL* pCurRemainingHostCtl;
671#ifdef VBOX_VDMA_WITH_WATCHDOG
672 PTMTIMERR3 WatchDogTimer;
673#endif
674} VBOXVDMAHOST, *PVBOXVDMAHOST;
675
676int VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread)
677{
678 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATED);
679 int rc = RTSemEventSignal(pThread->hClientEvent);
680 AssertRC(rc);
681 if (RT_SUCCESS(rc))
682 {
683 pThread->u32State = VBOXVDMATHREAD_STATE_CREATED;
684 return VINF_SUCCESS;
685 }
686 return rc;
687}
688
689int VBoxVDMAThreadNotifyConstructFailed(PVBOXVDMATHREAD pThread)
690{
691 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATED);
692 int rc = RTSemEventSignal(pThread->hClientEvent);
693 AssertRC(rc);
694 if (RT_SUCCESS(rc))
695 return VINF_SUCCESS;
696 return rc;
697}
698
699DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
700{
701 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
702}
703
704int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread)
705{
706 int rc = RTSemEventCreate(&pThread->hEvent);
707 if (RT_SUCCESS(rc))
708 {
709 rc = RTSemEventCreate(&pThread->hClientEvent);
710 if (RT_SUCCESS(rc))
711 {
712 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
713 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
714 if (RT_SUCCESS(rc))
715 {
716 rc = RTSemEventWait(pThread->hClientEvent, RT_INDEFINITE_WAIT);
717 if (RT_SUCCESS(rc))
718 {
719 if (pThread->u32State == VBOXVDMATHREAD_STATE_CREATED)
720 return VINF_SUCCESS;
721 WARN(("thread routine failed the initialization\n"));
722 rc = VERR_INVALID_STATE;
723 }
724 else
725 WARN(("RTSemEventWait failed %d\n", rc));
726
727 RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
728 }
729 else
730 WARN(("RTThreadCreate failed %d\n", rc));
731
732 RTSemEventDestroy(pThread->hClientEvent);
733 }
734 else
735 WARN(("RTSemEventCreate failed %d\n", rc));
736
737 RTSemEventDestroy(pThread->hEvent);
738 }
739 else
740 WARN(("RTSemEventCreate failed %d\n", rc));
741
742 return rc;
743}
744
745DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
746{
747 int rc = RTSemEventSignal(pThread->hEvent);
748 AssertRC(rc);
749 return rc;
750}
751
752DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
753{
754 int rc = RTSemEventWait(pThread->hEvent, cMillies);
755 AssertRC(rc);
756 return rc;
757}
758
759void VBoxVDMAThreadMarkTerminating(PVBOXVDMATHREAD pThread)
760{
761 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATED);
762 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
763}
764
765void VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread)
766{
767 int rc;
768 if (ASMAtomicReadU32(&pThread->u32State) != VBOXVDMATHREAD_STATE_TERMINATING)
769 {
770 VBoxVDMAThreadMarkTerminating(pThread);
771 rc = VBoxVDMAThreadEventNotify(pThread);
772 AssertRC(rc);
773 }
774 rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
775 AssertRC(rc);
776 RTSemEventDestroy(pThread->hClientEvent);
777 RTSemEventDestroy(pThread->hEvent);
778}
779
780static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
781
782#ifdef VBOX_WITH_CRHGSMI
783
784typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
785typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
786
787typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
788{
789 uint32_t cRefs;
790 int32_t rc;
791 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
792 void *pvCompletion;
793 VBOXVDMACMD_CHROMIUM_CTL Cmd;
794} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
795
796#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
797
798static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
799{
800 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
801 Assert(pHdr);
802 if (pHdr)
803 {
804 pHdr->cRefs = 1;
805 pHdr->rc = VERR_NOT_IMPLEMENTED;
806 pHdr->Cmd.enmType = enmCmd;
807 pHdr->Cmd.cbCmd = cbCmd;
808 return &pHdr->Cmd;
809 }
810
811 return NULL;
812}
813
814DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
815{
816 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
817 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
818 if(!cRefs)
819 {
820 RTMemFree(pHdr);
821 }
822}
823
824DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
825{
826 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
827 ASMAtomicIncU32(&pHdr->cRefs);
828}
829
830DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
831{
832 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
833 return pHdr->rc;
834}
835
836static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
837{
838 RTSemEventSignal((RTSEMEVENT)pvContext);
839}
840
841static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
842{
843 vboxVDMACrCtlRelease(pCmd);
844}
845
846
847static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
848{
849 if ( pVGAState->pDrv
850 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
851 {
852 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
853 pHdr->pfnCompletion = pfnCompletion;
854 pHdr->pvCompletion = pvCompletion;
855 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
856 return VINF_SUCCESS;
857 }
858#ifdef DEBUG_misha
859 Assert(0);
860#endif
861 return VERR_NOT_SUPPORTED;
862}
863
864static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
865{
866 RTSEMEVENT hComplEvent;
867 int rc = RTSemEventCreate(&hComplEvent);
868 AssertRC(rc);
869 if(RT_SUCCESS(rc))
870 {
871 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
872#ifdef DEBUG_misha
873 AssertRC(rc);
874#endif
875 if (RT_SUCCESS(rc))
876 {
877 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
878 AssertRC(rc);
879 if(RT_SUCCESS(rc))
880 {
881 RTSemEventDestroy(hComplEvent);
882 }
883 }
884 else
885 {
886 /* the command is completed */
887 RTSemEventDestroy(hComplEvent);
888 }
889 }
890 return rc;
891}
892
893typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
894{
895 int rc;
896 RTSEMEVENT hEvent;
897} VDMA_VBVA_CTL_CYNC_COMPLETION;
898
899static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
900{
901 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
902 pData->rc = rc;
903 rc = RTSemEventSignal(pData->hEvent);
904 if (!RT_SUCCESS(rc))
905 WARN(("RTSemEventSignal failed %d\n", rc));
906}
907
908static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
909{
910 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
911 Data.rc = VERR_NOT_IMPLEMENTED;
912 int rc = RTSemEventCreate(&Data.hEvent);
913 if (!RT_SUCCESS(rc))
914 {
915 WARN(("RTSemEventCreate failed %d\n", rc));
916 return rc;
917 }
918
919 PVGASTATE pVGAState = pVdma->pVGAState;
920 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
921 if (RT_SUCCESS(rc))
922 {
923 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
924 if (RT_SUCCESS(rc))
925 {
926 rc = Data.rc;
927 if (!RT_SUCCESS(rc))
928 {
929 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
930 }
931
932 }
933 else
934 WARN(("RTSemEventWait failed %d\n", rc));
935 }
936 else
937 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
938
939
940 RTSemEventDestroy(Data.hEvent);
941
942 return rc;
943}
944
945static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
946{
947 struct VBOXVDMAHOST *pVdma = hClient;
948 if (!pVdma->pCurRemainingHostCtl)
949 {
950 /* disable VBVA, all subsequent host commands will go HGCM way */
951 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
952 }
953 else
954 {
955 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
956 }
957
958 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
959 if (pVdma->pCurRemainingHostCtl)
960 {
961 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
962 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
963 }
964
965 *pcbCtl = 0;
966 return NULL;
967}
968
969static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
970{
971 VBOXCRCMDCTL_ENABLE Enable;
972 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
973 Enable.hRHCmd = pVdma;
974 Enable.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
975
976 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
977 Assert(!pVdma->pCurRemainingHostCtl);
978 if (RT_SUCCESS(rc))
979 {
980 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
981 return VINF_SUCCESS;
982 }
983
984 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
985 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
986
987 return rc;
988}
989
990static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
991{
992 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
993 {
994 WARN(("vdma VBVA is already enabled\n"));
995 return VERR_INVALID_STATE;
996 }
997
998 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
999 if (!pVBVA)
1000 {
1001 WARN(("invalid offset %d\n", u32Offset));
1002 return VERR_INVALID_PARAMETER;
1003 }
1004
1005 if (!pVdma->CrSrvInfo.pfnEnable)
1006 {
1007#ifdef DEBUG_misha
1008 WARN(("pfnEnable is NULL\n"));
1009 return VERR_NOT_SUPPORTED;
1010#endif
1011 }
1012
1013 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1014 if (RT_SUCCESS(rc))
1015 {
1016 VBOXCRCMDCTL Ctl;
1017 Ctl.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1018 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Ctl, sizeof (Ctl));
1019 if (RT_SUCCESS(rc))
1020 {
1021 PVGASTATE pVGAState = pVdma->pVGAState;
1022 VBOXCRCMD_SVRENABLE_INFO Info;
1023 Info.hCltScr = pVGAState->pDrv;
1024 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1025 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1026 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1027 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1028 if (RT_SUCCESS(rc))
1029 return VINF_SUCCESS;
1030 else
1031 WARN(("pfnEnable failed %d\n", rc));
1032
1033 vboxVDMACrHgcmHandleEnable(pVdma);
1034 }
1035 else
1036 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1037
1038 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1039 }
1040 else
1041 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1042
1043 return rc;
1044}
1045
1046static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma)
1047{
1048 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1049 {
1050 Log(("vdma VBVA is already disabled\n"));
1051 return VINF_SUCCESS;
1052 }
1053
1054 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1055 if (RT_SUCCESS(rc))
1056 {
1057 /* disable is a bit tricky
1058 * we need to ensure the host ctl commands do not come out of order
1059 * and do not come over HGCM channel until after it is enabled */
1060 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1061 if (RT_SUCCESS(rc))
1062 return rc;
1063
1064 PVGASTATE pVGAState = pVdma->pVGAState;
1065 VBOXCRCMD_SVRENABLE_INFO Info;
1066 Info.hCltScr = pVGAState->pDrv;
1067 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1068 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1069 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1070 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1071 }
1072 else
1073 WARN(("pfnDisable failed %d\n", rc));
1074
1075 return rc;
1076}
1077
1078static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1079{
1080 switch (pCmd->enmType)
1081 {
1082 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1083 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1084 {
1085 WARN(("VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for disabled vdma VBVA\n"));
1086 return VERR_INVALID_STATE;
1087 }
1088 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1089 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1090 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1091 {
1092 WARN(("VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for disabled vdma VBVA\n"));
1093 return VERR_INVALID_STATE;
1094 }
1095 return pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1096 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1097 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1098 {
1099 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1100 return VERR_INVALID_STATE;
1101 }
1102 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1103 case VBVAEXHOSTCTL_TYPE_HH_TERM:
1104 {
1105 int rc = vdmaVBVADisableProcess(pVdma);
1106 if (!RT_SUCCESS(rc))
1107 {
1108 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1109 return rc;
1110 }
1111
1112 VBoxVDMAThreadMarkTerminating(&pVdma->Thread);
1113 return VINF_SUCCESS;
1114 }
1115 case VBVAEXHOSTCTL_TYPE_HH_RESET:
1116 {
1117 int rc = vdmaVBVADisableProcess(pVdma);
1118 if (!RT_SUCCESS(rc))
1119 {
1120 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1121 return rc;
1122 }
1123 return VINF_SUCCESS;
1124 }
1125 default:
1126 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1127 return VERR_INVALID_PARAMETER;
1128 }
1129}
1130
1131static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1132{
1133 switch (pCmd->enmType)
1134 {
1135 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1136 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1137 {
1138 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1139 return VERR_INVALID_STATE;
1140 }
1141 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1142 case VBVAEXHOSTCTL_TYPE_GH_ENABLE_DISABLE:
1143 {
1144 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1145 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1146 if ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE)
1147 {
1148 uint32_t u32Offset = pEnable->u32Offset;
1149 return vdmaVBVAEnableProcess(pVdma, u32Offset);
1150 }
1151
1152 return vdmaVBVADisableProcess(pVdma);
1153 }
1154 default:
1155 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1156 return VERR_INVALID_PARAMETER;
1157 }
1158}
1159
1160/**
1161 * @param fIn - whether this is a page in or out op.
1162 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1163 */
1164static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, const VBOXCMDVBVA_SYSMEMEL *pMemEl, uint8_t *pu8Vram, uint8_t *pu8VramMax, uint8_t **ppu8VramNext, bool fIn)
1165{
1166 uint32_t u32Cpages = pMemEl->cPagesAfterFirst + 1;
1167 RTGCPHYS phPage = (pMemEl->iPage1 | (pMemEl->iPage2 << 20));
1168 PGMPAGEMAPLOCK Lock;
1169 uint32_t cbCopy = u32Cpages * PAGE_SIZE;
1170 uint8_t* pu8VramNext = pu8Vram + cbCopy;
1171
1172 if (pu8VramNext <= pu8Vram && pu8VramNext > pu8VramMax)
1173 {
1174 WARN(("invalid copy size"));
1175 return VERR_INVALID_PARAMETER;
1176 }
1177
1178 int rc;
1179 if (fIn)
1180 {
1181 for (uint32_t i = 0; i < u32Cpages; ++i)
1182 {
1183 const void * pvPage;
1184 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1185 if (!RT_SUCCESS(rc))
1186 {
1187 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1188 return rc;
1189 }
1190
1191 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1192
1193 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1194
1195 pu8Vram += PAGE_SIZE;
1196 }
1197 }
1198 else
1199 {
1200 for (uint32_t i = 0; i < u32Cpages; ++i)
1201 {
1202 void * pvPage;
1203 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1204 if (!RT_SUCCESS(rc))
1205 {
1206 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1207 return rc;
1208 }
1209
1210 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1211
1212 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1213
1214 pu8Vram += PAGE_SIZE;
1215 }
1216 }
1217
1218 if (ppu8VramNext)
1219 *ppu8VramNext = pu8VramNext;
1220
1221 return VINF_SUCCESS;
1222}
1223
1224static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVA_SYSMEMEL *pMemEl, uint32_t cMemEls, uint8_t *pu8Vram, uint8_t *pu8VramMax, uint8_t **ppu8VramNext, bool fIn)
1225{
1226 uint8_t *pu8VramNext = pu8Vram;
1227 for (uint32_t i = 0; i < cMemEls; ++i, ++pMemEl)
1228 {
1229 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, pMemEl, pu8Vram, pu8VramMax, &pu8VramNext, fIn);
1230 if (!RT_SUCCESS(rc))
1231 {
1232 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1233 return rc;
1234 }
1235 }
1236
1237 if (ppu8VramNext)
1238 *ppu8VramNext = pu8VramNext;
1239
1240 return VINF_SUCCESS;
1241}
1242
1243static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd,
1244 const VBOXCMDVBVA_SYSMEMEL **ppSysMem, uint32_t *pcSysMem,
1245 uint8_t **ppu8Vram, uint8_t **ppu8VramMax, bool *pfIn)
1246{
1247 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1248 {
1249 WARN(("cmd too small"));
1250 return -1;
1251 }
1252
1253 uint32_t cSysMem = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1254 if (cSysMem % sizeof (VBOXCMDVBVA_SYSMEMEL))
1255 {
1256 WARN(("invalid cmd size"));
1257 return -1;
1258 }
1259 cSysMem /= sizeof (VBOXCMDVBVA_SYSMEMEL);
1260
1261 VBOXCMDVBVA_PAGING_TRANSFER *pTransfer = (VBOXCMDVBVA_PAGING_TRANSFER*)pCmd;
1262 VBOXCMDVBVAOFFSET offVRAM = pTransfer->Alloc.u.offVRAM;
1263 if (offVRAM & PAGE_OFFSET_MASK)
1264 {
1265 WARN(("offVRAM address is not on page boundary\n"));
1266 return -1;
1267 }
1268 const VBOXCMDVBVA_SYSMEMEL *pSysMem = pTransfer->aSysMem;
1269
1270 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1271 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1272 if (pTransfer->Alloc.u.offVRAM >= pVGAState->vram_size)
1273 {
1274 WARN(("invalid vram offset"));
1275 return -1;
1276 }
1277
1278 uint8_t *pu8Vram = pu8VramBase + pTransfer->Alloc.u.offVRAM;
1279 bool fIn = (pTransfer->Hdr.u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1280
1281 *ppSysMem = pSysMem;
1282 *pcSysMem = cSysMem;
1283 *ppu8Vram = pu8Vram;
1284 *ppu8VramMax = pu8VramMax;
1285 *pfIn = fIn;
1286 return 0;
1287}
1288
1289static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1290{
1291 switch (pCmd->u8OpCode)
1292 {
1293 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1294 return 0;
1295 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1296 {
1297 PVGASTATE pVGAState = pVdma->pVGAState;
1298 const VBOXCMDVBVA_SYSMEMEL *pSysMem;
1299 uint32_t cSysMem;
1300 uint8_t *pu8Vram;
1301 uint8_t *pu8VramMax;
1302 bool fIn;
1303 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, cbCmd,
1304 &pSysMem, &cSysMem,
1305 &pu8Vram, &pu8VramMax, &fIn);
1306 if (i8Result < 0)
1307 {
1308 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1309 return i8Result;
1310 }
1311
1312 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1313 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pSysMem, cSysMem, pu8Vram, pu8VramMax, &pu8Vram, fIn);
1314 if (!RT_SUCCESS(rc))
1315 {
1316 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1317 return -1;
1318 }
1319
1320 return 0;
1321 }
1322 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1323 WARN(("VBOXCMDVBVA_OPTYPE_PAGING_FILL not implemented"));
1324 return -1;
1325 default:
1326 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1327 }
1328}
1329
1330#if 0
1331typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1332{
1333 VBOXCMDVBVA_HDR Hdr;
1334 /* for now can only contain offVRAM.
1335 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1336 VBOXCMDVBVA_ALLOCINFO Alloc;
1337 uint32_t u32Reserved;
1338 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1339} VBOXCMDVBVA_PAGING_TRANSFER;
1340#endif
1341
1342AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1343AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1344AssertCompile(sizeof (VBOXCMDVBVA_SYSMEMEL) == 8);
1345AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVA_SYSMEMEL)));
1346AssertCompile(!(sizeof (VBOXCMDVBVA_PAGING_TRANSFER) % 8));
1347
1348#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1349
1350static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1351{
1352 switch (pCmd->u8OpCode)
1353 {
1354 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1355 {
1356 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1357 const VBOXCMDVBVA_HDR *pRealCmd;
1358 uint32_t cbRealCmd = pCmd->u8Flags;
1359 cbRealCmd |= (pCmd->u.u8PrimaryID << 8);
1360 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1361 {
1362 WARN(("invalid sysmem cmd size"));
1363 return -1;
1364 }
1365
1366 RTGCPHYS phPage = pSysmemCmd->phSysMem;
1367 if (phPage & PAGE_OFFSET_MASK)
1368 {
1369 WARN(("cmd address is not on page boundary\n"));
1370 return -1;
1371 }
1372
1373 PGMPAGEMAPLOCK Lock;
1374 PVGASTATE pVGAState = pVdma->pVGAState;
1375 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1376 const void * pvCmd;
1377 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvCmd, &Lock);
1378 if (!RT_SUCCESS(rc))
1379 {
1380 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1381 return -1;
1382 }
1383
1384 pRealCmd = (const VBOXCMDVBVA_HDR *)pvCmd;
1385
1386 if (cbRealCmd <= PAGE_SIZE)
1387 {
1388 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmd, cbRealCmd);
1389 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1390 return i8Result;
1391 }
1392
1393 int8_t i8Result = 0;
1394
1395 switch (pRealCmd->u8OpCode)
1396 {
1397 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1398 {
1399 const VBOXCMDVBVA_SYSMEMEL *pSysMem;
1400 uint32_t cSysMem;
1401 uint8_t *pu8Vram;
1402 uint8_t *pu8VramMax;
1403 bool fIn;
1404 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, cbCmd,
1405 &pSysMem, &cSysMem,
1406 &pu8Vram, &pu8VramMax, &fIn);
1407 if (i8Result < 0)
1408 {
1409 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1410 return i8Result;
1411 }
1412
1413 uint32_t cCurSysMem = PAGE_SIZE - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, aSysMem);
1414 cCurSysMem /= sizeof (VBOXCMDVBVA_SYSMEMEL);
1415 Assert(cCurSysMem < cSysMem);
1416
1417 do
1418 {
1419 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pSysMem, cCurSysMem, pu8Vram, pu8VramMax, &pu8Vram, fIn);
1420 if (!RT_SUCCESS(rc))
1421 {
1422 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1423 i8Result = -1;
1424 break;
1425 }
1426
1427 Assert(cSysMem >= cCurSysMem);
1428 cSysMem -= cCurSysMem;
1429
1430 if (!cSysMem)
1431 break;
1432
1433 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1434
1435 phPage += PAGE_SIZE;
1436
1437 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvCmd, &Lock);
1438 if (!RT_SUCCESS(rc))
1439 {
1440 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1441 return -1;
1442 }
1443
1444 if (cSysMem > VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE)
1445 cCurSysMem = VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE;
1446 else
1447 cCurSysMem = cSysMem;
1448 } while (1);
1449 break;
1450 }
1451 default:
1452 WARN(("command can not be splitted"));
1453 i8Result = -1;
1454 break;
1455 }
1456
1457 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1458 return i8Result;
1459 }
1460 default:
1461 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
1462 }
1463}
1464
1465static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
1466{
1467 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
1468 return;
1469
1470 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1471 {
1472 WARN(("invalid command size"));
1473 return;
1474 }
1475
1476 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
1477
1478 /* check if the command is cancelled */
1479 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
1480 {
1481 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
1482 return;
1483 }
1484
1485 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
1486}
1487
1488static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
1489{
1490 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
1491 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
1492 int rc = VERR_NO_MEMORY;
1493 if (pCmd)
1494 {
1495 PVGASTATE pVGAState = pVdma->pVGAState;
1496 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
1497 pCmd->cbVRam = pVGAState->vram_size;
1498 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
1499 if (RT_SUCCESS(rc))
1500 {
1501 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
1502 if (RT_SUCCESS(rc))
1503 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
1504 else if (rc != VERR_NOT_SUPPORTED)
1505 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
1506 }
1507 else
1508 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
1509
1510 vboxVDMACrCtlRelease(&pCmd->Hdr);
1511 }
1512
1513 if (!RT_SUCCESS(rc))
1514 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
1515
1516 return rc;
1517}
1518
1519static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
1520
1521/* check if this is external cmd to be passed to chromium backend */
1522static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
1523{
1524 PVBOXVDMACMD pDmaCmd = NULL;
1525 uint32_t cbDmaCmd = 0;
1526 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1527 int rc = VINF_NOT_SUPPORTED;
1528
1529 cbDmaCmd = pCmdDr->cbBuf;
1530
1531 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
1532 {
1533 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
1534 {
1535 AssertMsgFailed(("invalid buffer data!"));
1536 return VERR_INVALID_PARAMETER;
1537 }
1538
1539 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
1540 {
1541 AssertMsgFailed(("invalid command buffer data!"));
1542 return VERR_INVALID_PARAMETER;
1543 }
1544
1545 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
1546 }
1547 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
1548 {
1549 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
1550 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
1551 {
1552 AssertMsgFailed(("invalid command buffer data from offset!"));
1553 return VERR_INVALID_PARAMETER;
1554 }
1555 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
1556 }
1557
1558 if (pDmaCmd)
1559 {
1560 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
1561 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
1562
1563 switch (pDmaCmd->enmType)
1564 {
1565 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
1566 {
1567 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
1568 if (cbBody < sizeof (*pCrCmd))
1569 {
1570 AssertMsgFailed(("invalid chromium command buffer size!"));
1571 return VERR_INVALID_PARAMETER;
1572 }
1573 PVGASTATE pVGAState = pVdma->pVGAState;
1574 rc = VINF_SUCCESS;
1575 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
1576 {
1577 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
1578 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
1579 break;
1580 }
1581 else
1582 {
1583 Assert(0);
1584 }
1585
1586 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
1587 AssertRC(tmpRc);
1588 break;
1589 }
1590 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
1591 {
1592 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
1593 if (cbBody < sizeof (*pTransfer))
1594 {
1595 AssertMsgFailed(("invalid bpb transfer buffer size!"));
1596 return VERR_INVALID_PARAMETER;
1597 }
1598
1599 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
1600 AssertRC(rc);
1601 if (RT_SUCCESS(rc))
1602 {
1603 pCmdDr->rc = VINF_SUCCESS;
1604 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
1605 AssertRC(rc);
1606 rc = VINF_SUCCESS;
1607 }
1608 break;
1609 }
1610 default:
1611 break;
1612 }
1613 }
1614 return rc;
1615}
1616
1617int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
1618{
1619 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1620 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
1621 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
1622 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
1623 AssertRC(rc);
1624 pDr->rc = rc;
1625
1626 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
1627 rc = VBoxSHGSMICommandComplete(pIns, pDr);
1628 AssertRC(rc);
1629 return rc;
1630}
1631
1632int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
1633{
1634 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1635 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1636 pCmdPrivate->rc = rc;
1637 if (pCmdPrivate->pfnCompletion)
1638 {
1639 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
1640 }
1641 return VINF_SUCCESS;
1642}
1643
1644#endif
1645
1646#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1647/* to simplify things and to avoid extra backend if modifications we assume the VBOXVDMA_RECTL is the same as VBVACMDHDR */
1648AssertCompile(sizeof(VBOXVDMA_RECTL) == sizeof(VBVACMDHDR));
1649AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, left) == RT_SIZEOFMEMB(VBVACMDHDR, x));
1650AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, top) == RT_SIZEOFMEMB(VBVACMDHDR, y));
1651AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, width) == RT_SIZEOFMEMB(VBVACMDHDR, w));
1652AssertCompile(RT_SIZEOFMEMB(VBOXVDMA_RECTL, height) == RT_SIZEOFMEMB(VBVACMDHDR, h));
1653AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, left) == RT_OFFSETOF(VBVACMDHDR, x));
1654AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, top) == RT_OFFSETOF(VBVACMDHDR, y));
1655AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, width) == RT_OFFSETOF(VBVACMDHDR, w));
1656AssertCompile(RT_OFFSETOF(VBOXVDMA_RECTL, height) == RT_OFFSETOF(VBVACMDHDR, h));
1657
1658static int vboxVDMANotifyPrimaryUpdate (PVGASTATE pVGAState, unsigned uScreenId, const VBOXVDMA_RECTL * pRectl)
1659{
1660 pVGAState->pDrv->pfnVBVAUpdateBegin (pVGAState->pDrv, uScreenId);
1661
1662 /* Updates the rectangle and sends the command to the VRDP server. */
1663 pVGAState->pDrv->pfnVBVAUpdateProcess (pVGAState->pDrv, uScreenId,
1664 (const PVBVACMDHDR)pRectl /* <- see above AssertCompile's and comments */,
1665 sizeof (VBOXVDMA_RECTL));
1666
1667 pVGAState->pDrv->pfnVBVAUpdateEnd (pVGAState->pDrv, uScreenId, pRectl->left, pRectl->top,
1668 pRectl->width, pRectl->height);
1669
1670 return VINF_SUCCESS;
1671}
1672#endif
1673
1674static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
1675 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
1676 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
1677 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
1678{
1679 /* we do not support color conversion */
1680 Assert(pDstDesc->format == pSrcDesc->format);
1681 /* we do not support stretching */
1682 Assert(pDstRectl->height == pSrcRectl->height);
1683 Assert(pDstRectl->width == pSrcRectl->width);
1684 if (pDstDesc->format != pSrcDesc->format)
1685 return VERR_INVALID_FUNCTION;
1686 if (pDstDesc->width == pDstRectl->width
1687 && pSrcDesc->width == pSrcRectl->width
1688 && pSrcDesc->width == pDstDesc->width)
1689 {
1690 Assert(!pDstRectl->left);
1691 Assert(!pSrcRectl->left);
1692 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
1693 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
1694 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
1695 }
1696 else
1697 {
1698 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
1699 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
1700 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
1701 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
1702 Assert(cbDstLine <= pDstDesc->pitch);
1703 uint32_t cbDstSkip = pDstDesc->pitch;
1704 uint8_t * pvDstStart = pvDstSurf + offDstStart;
1705
1706 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
1707 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
1708 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
1709 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
1710 Assert(cbSrcLine <= pSrcDesc->pitch);
1711 uint32_t cbSrcSkip = pSrcDesc->pitch;
1712 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
1713
1714 Assert(cbDstLine == cbSrcLine);
1715
1716 for (uint32_t i = 0; ; ++i)
1717 {
1718 memcpy (pvDstStart, pvSrcStart, cbDstLine);
1719 if (i == pDstRectl->height)
1720 break;
1721 pvDstStart += cbDstSkip;
1722 pvSrcStart += cbSrcSkip;
1723 }
1724 }
1725 return VINF_SUCCESS;
1726}
1727
1728static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
1729{
1730 if (!pRectl1->width)
1731 *pRectl1 = *pRectl2;
1732 else
1733 {
1734 int16_t x21 = pRectl1->left + pRectl1->width;
1735 int16_t x22 = pRectl2->left + pRectl2->width;
1736 if (pRectl1->left > pRectl2->left)
1737 {
1738 pRectl1->left = pRectl2->left;
1739 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
1740 }
1741 else if (x21 < x22)
1742 pRectl1->width = x22 - pRectl1->left;
1743
1744 x21 = pRectl1->top + pRectl1->height;
1745 x22 = pRectl2->top + pRectl2->height;
1746 if (pRectl1->top > pRectl2->top)
1747 {
1748 pRectl1->top = pRectl2->top;
1749 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
1750 }
1751 else if (x21 < x22)
1752 pRectl1->height = x22 - pRectl1->top;
1753 }
1754}
1755
1756/*
1757 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
1758 */
1759static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
1760{
1761 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
1762 Assert(cbBlt <= cbBuffer);
1763 if (cbBuffer < cbBlt)
1764 return VERR_INVALID_FUNCTION;
1765
1766 /* we do not support stretching for now */
1767 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
1768 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
1769 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
1770 return VERR_INVALID_FUNCTION;
1771 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
1772 return VERR_INVALID_FUNCTION;
1773 Assert(pBlt->cDstSubRects);
1774
1775 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
1776 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
1777
1778 if (pBlt->cDstSubRects)
1779 {
1780 VBOXVDMA_RECTL dstRectl, srcRectl;
1781 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
1782 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
1783 {
1784 pDstRectl = &pBlt->aDstSubRects[i];
1785 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
1786 {
1787 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
1788 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
1789 dstRectl.width = pDstRectl->width;
1790 dstRectl.height = pDstRectl->height;
1791 pDstRectl = &dstRectl;
1792 }
1793
1794 pSrcRectl = &pBlt->aDstSubRects[i];
1795 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
1796 {
1797 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
1798 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
1799 srcRectl.width = pSrcRectl->width;
1800 srcRectl.height = pSrcRectl->height;
1801 pSrcRectl = &srcRectl;
1802 }
1803
1804 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
1805 &pBlt->dstDesc, &pBlt->srcDesc,
1806 pDstRectl,
1807 pSrcRectl);
1808 AssertRC(rc);
1809 if (!RT_SUCCESS(rc))
1810 return rc;
1811
1812 vboxVDMARectlUnite(&updateRectl, pDstRectl);
1813 }
1814 }
1815 else
1816 {
1817 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
1818 &pBlt->dstDesc, &pBlt->srcDesc,
1819 &pBlt->dstRectl,
1820 &pBlt->srcRectl);
1821 AssertRC(rc);
1822 if (!RT_SUCCESS(rc))
1823 return rc;
1824
1825 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
1826 }
1827
1828#ifdef VBOX_VDMA_WITH_WORKERTHREAD
1829 int iView = 0;
1830 /* @todo: fixme: check if update is needed and get iView */
1831 vboxVDMANotifyPrimaryUpdate (pVdma->pVGAState, iView, &updateRectl);
1832#endif
1833
1834 return cbBlt;
1835}
1836
1837static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
1838{
1839 if (cbBuffer < sizeof (*pTransfer))
1840 return VERR_INVALID_PARAMETER;
1841
1842 PVGASTATE pVGAState = pVdma->pVGAState;
1843 uint8_t * pvRam = pVGAState->vram_ptrR3;
1844 PGMPAGEMAPLOCK SrcLock;
1845 PGMPAGEMAPLOCK DstLock;
1846 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
1847 const void * pvSrc;
1848 void * pvDst;
1849 int rc = VINF_SUCCESS;
1850 uint32_t cbTransfer = pTransfer->cbTransferSize;
1851 uint32_t cbTransfered = 0;
1852 bool bSrcLocked = false;
1853 bool bDstLocked = false;
1854 do
1855 {
1856 uint32_t cbSubTransfer = cbTransfer;
1857 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
1858 {
1859 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
1860 }
1861 else
1862 {
1863 RTGCPHYS phPage = pTransfer->Src.phBuf;
1864 phPage += cbTransfered;
1865 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
1866 AssertRC(rc);
1867 if (RT_SUCCESS(rc))
1868 {
1869 bSrcLocked = true;
1870 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
1871 }
1872 else
1873 {
1874 break;
1875 }
1876 }
1877
1878 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
1879 {
1880 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
1881 }
1882 else
1883 {
1884 RTGCPHYS phPage = pTransfer->Dst.phBuf;
1885 phPage += cbTransfered;
1886 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
1887 AssertRC(rc);
1888 if (RT_SUCCESS(rc))
1889 {
1890 bDstLocked = true;
1891 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
1892 }
1893 else
1894 {
1895 break;
1896 }
1897 }
1898
1899 if (RT_SUCCESS(rc))
1900 {
1901 memcpy(pvDst, pvSrc, cbSubTransfer);
1902 cbTransfer -= cbSubTransfer;
1903 cbTransfered += cbSubTransfer;
1904 }
1905 else
1906 {
1907 cbTransfer = 0; /* to break */
1908 }
1909
1910 if (bSrcLocked)
1911 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
1912 if (bDstLocked)
1913 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
1914 } while (cbTransfer);
1915
1916 if (RT_SUCCESS(rc))
1917 return sizeof (*pTransfer);
1918 return rc;
1919}
1920
1921static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
1922{
1923 do
1924 {
1925 Assert(pvBuffer);
1926 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
1927
1928 if (!pvBuffer)
1929 return VERR_INVALID_PARAMETER;
1930 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
1931 return VERR_INVALID_PARAMETER;
1932
1933 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
1934 uint32_t cbCmd = 0;
1935 switch (pCmd->enmType)
1936 {
1937 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
1938 {
1939#ifdef VBOXWDDM_TEST_UHGSMI
1940 static int count = 0;
1941 static uint64_t start, end;
1942 if (count==0)
1943 {
1944 start = RTTimeNanoTS();
1945 }
1946 ++count;
1947 if (count==100000)
1948 {
1949 end = RTTimeNanoTS();
1950 float ems = (end-start)/1000000.f;
1951 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
1952 }
1953#endif
1954 /* todo: post the buffer to chromium */
1955 return VINF_SUCCESS;
1956 }
1957 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
1958 {
1959 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
1960 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
1961 Assert(cbBlt >= 0);
1962 Assert((uint32_t)cbBlt <= cbBuffer);
1963 if (cbBlt >= 0)
1964 {
1965 if ((uint32_t)cbBlt == cbBuffer)
1966 return VINF_SUCCESS;
1967 else
1968 {
1969 cbBuffer -= (uint32_t)cbBlt;
1970 pvBuffer -= cbBlt;
1971 }
1972 }
1973 else
1974 return cbBlt; /* error */
1975 break;
1976 }
1977 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
1978 {
1979 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
1980 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
1981 Assert(cbTransfer >= 0);
1982 Assert((uint32_t)cbTransfer <= cbBuffer);
1983 if (cbTransfer >= 0)
1984 {
1985 if ((uint32_t)cbTransfer == cbBuffer)
1986 return VINF_SUCCESS;
1987 else
1988 {
1989 cbBuffer -= (uint32_t)cbTransfer;
1990 pvBuffer -= cbTransfer;
1991 }
1992 }
1993 else
1994 return cbTransfer; /* error */
1995 break;
1996 }
1997 case VBOXVDMACMD_TYPE_DMA_NOP:
1998 return VINF_SUCCESS;
1999 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2000 return VINF_SUCCESS;
2001 default:
2002 AssertBreakpoint();
2003 return VERR_INVALID_FUNCTION;
2004 }
2005 } while (1);
2006
2007 /* we should not be here */
2008 AssertBreakpoint();
2009 return VERR_INVALID_STATE;
2010}
2011
2012static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2013{
2014 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2015 PVGASTATE pVGAState = pVdma->pVGAState;
2016 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2017 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2018 uint8_t *pCmd;
2019 uint32_t cbCmd;
2020
2021 int rc = VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread);
2022 if (!RT_SUCCESS(rc))
2023 {
2024 WARN(("VBoxVDMAThreadNotifyConstructSucceeded failed %d\n", rc));
2025 return rc;
2026 }
2027
2028 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2029 {
2030 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2031 switch (enmType)
2032 {
2033 case VBVAEXHOST_DATA_TYPE_CMD:
2034 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2035 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2036 VBVARaiseIrqNoWait(pVGAState, 0);
2037 break;
2038 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2039 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2040 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2041 break;
2042 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2043 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2044 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2045 break;
2046 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2047 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2048 AssertRC(rc);
2049 break;
2050 default:
2051 WARN(("unexpected type %d\n", enmType));
2052 break;
2053 }
2054 }
2055
2056 return VINF_SUCCESS;
2057}
2058
2059static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2060{
2061 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2062 const uint8_t * pvBuf;
2063 PGMPAGEMAPLOCK Lock;
2064 int rc;
2065 bool bReleaseLocked = false;
2066
2067 do
2068 {
2069 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2070
2071 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2072 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2073 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2074 {
2075 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2076 pvBuf = pvRam + pCmd->Location.offVramBuf;
2077 }
2078 else
2079 {
2080 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2081 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2082 Assert(offset + pCmd->cbBuf <= 0x1000);
2083 if (offset + pCmd->cbBuf > 0x1000)
2084 {
2085 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2086 rc = VERR_INVALID_PARAMETER;
2087 break;
2088 }
2089
2090 const void * pvPageBuf;
2091 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2092 AssertRC(rc);
2093 if (!RT_SUCCESS(rc))
2094 {
2095 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2096 break;
2097 }
2098
2099 pvBuf = (const uint8_t *)pvPageBuf;
2100 pvBuf += offset;
2101
2102 bReleaseLocked = true;
2103 }
2104
2105 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2106 AssertRC(rc);
2107
2108 if (bReleaseLocked)
2109 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2110 } while (0);
2111
2112 pCmd->rc = rc;
2113
2114 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2115 AssertRC(rc);
2116}
2117
2118static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2119{
2120 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2121 pCmd->i32Result = VINF_SUCCESS;
2122 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2123 AssertRC(rc);
2124}
2125
2126#ifdef VBOX_VDMA_WITH_WATCHDOG
2127static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2128{
2129 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2130 PVGASTATE pVGAState = pVdma->pVGAState;
2131 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2132}
2133
2134static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2135{
2136 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2137 if (cMillis)
2138 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2139 else
2140 TMTimerStop(pVdma->WatchDogTimer);
2141 return VINF_SUCCESS;
2142}
2143#endif
2144
2145int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2146{
2147 int rc;
2148#ifdef VBOX_VDMA_WITH_WORKERTHREAD
2149 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(RT_OFFSETOF(VBOXVDMAHOST, CmdPool.aCmds[cPipeElements]));
2150#else
2151 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2152#endif
2153 Assert(pVdma);
2154 if (pVdma)
2155 {
2156 pVdma->pHgsmi = pVGAState->pHGSMI;
2157 pVdma->pVGAState = pVGAState;
2158
2159#ifdef VBOX_VDMA_WITH_WATCHDOG
2160 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2161 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2162 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2163 AssertRC(rc);
2164#endif
2165 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2166 if (RT_SUCCESS(rc))
2167 {
2168 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma);
2169 if (RT_SUCCESS(rc))
2170 {
2171 pVGAState->pVdma = pVdma;
2172#ifdef VBOX_WITH_CRHGSMI
2173 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2174#endif
2175 return VINF_SUCCESS;
2176 }
2177 else
2178 WARN(("VBoxVDMAThreadCreate faile %d\n", rc));
2179
2180 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2181 }
2182 else
2183 WARN(("VBoxVBVAExHSInit faile %d\n", rc));
2184
2185 RTMemFree(pVdma);
2186 }
2187 else
2188 rc = VERR_OUT_OF_RESOURCES;
2189
2190 return rc;
2191}
2192
2193int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2194{
2195 VBVAEXHOSTCTL Ctl;
2196 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_RESET;
2197 int rc = vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
2198 if (!RT_SUCCESS(rc))
2199 {
2200 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
2201 return rc;
2202 }
2203 return VINF_SUCCESS;
2204}
2205
2206int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2207{
2208 VBVAEXHOSTCTL Ctl;
2209 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_TERM;
2210 int rc = vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
2211 if (!RT_SUCCESS(rc))
2212 {
2213 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
2214 return rc;
2215 }
2216 VBoxVDMAThreadTerm(&pVdma->Thread);
2217 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2218 RTMemFree(pVdma);
2219 return VINF_SUCCESS;
2220}
2221
2222int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
2223{
2224#ifdef VBOX_WITH_CRHGSMI
2225 PVGASTATE pVGAState = pVdma->pVGAState;
2226 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
2227 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
2228 Assert(pCmd);
2229 if (pCmd)
2230 {
2231 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
2232 AssertRC(rc);
2233 if (RT_SUCCESS(rc))
2234 {
2235 rc = vboxVDMACrCtlGetRc(pCmd);
2236 }
2237 vboxVDMACrCtlRelease(pCmd);
2238 return rc;
2239 }
2240 return VERR_NO_MEMORY;
2241#else
2242 return VINF_SUCCESS;
2243#endif
2244}
2245
2246int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
2247{
2248#ifdef VBOX_WITH_CRHGSMI
2249 PVGASTATE pVGAState = pVdma->pVGAState;
2250 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
2251 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
2252 Assert(pCmd);
2253 if (pCmd)
2254 {
2255 int rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
2256 AssertRC(rc);
2257 if (RT_SUCCESS(rc))
2258 {
2259 rc = vboxVDMACrCtlGetRc(pCmd);
2260 }
2261 vboxVDMACrCtlRelease(pCmd);
2262 return rc;
2263 }
2264 return VERR_NO_MEMORY;
2265#else
2266 return VINF_SUCCESS;
2267#endif
2268}
2269
2270void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2271{
2272#if 1
2273 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2274
2275 switch (pCmd->enmCtl)
2276 {
2277 case VBOXVDMA_CTL_TYPE_ENABLE:
2278 pCmd->i32Result = VINF_SUCCESS;
2279 break;
2280 case VBOXVDMA_CTL_TYPE_DISABLE:
2281 pCmd->i32Result = VINF_SUCCESS;
2282 break;
2283 case VBOXVDMA_CTL_TYPE_FLUSH:
2284 pCmd->i32Result = VINF_SUCCESS;
2285 break;
2286#ifdef VBOX_VDMA_WITH_WATCHDOG
2287 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2288 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2289 break;
2290#endif
2291 default:
2292 AssertBreakpoint();
2293 pCmd->i32Result = VERR_NOT_SUPPORTED;
2294 }
2295
2296 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2297 AssertRC(rc);
2298#else
2299 /* test asinch completion */
2300 VBOXVDMACMD_SUBMIT_CONTEXT Context;
2301 Context.pVdma = pVdma;
2302 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACTL;
2303 Context.Cmd.u.pCtl = pCmd;
2304
2305 int rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
2306 AssertRC(rc);
2307 if (RT_SUCCESS(rc))
2308 {
2309 Assert(Context.bQueued);
2310 if (Context.bQueued)
2311 {
2312 /* success */
2313 return;
2314 }
2315 rc = VERR_OUT_OF_RESOURCES;
2316 }
2317
2318 /* failure */
2319 Assert(RT_FAILURE(rc));
2320 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2321 pCmd->i32Result = rc;
2322 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
2323 AssertRC(tmpRc);
2324
2325#endif
2326}
2327
2328void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2329{
2330 int rc = VERR_NOT_IMPLEMENTED;
2331
2332#ifdef VBOX_WITH_CRHGSMI
2333 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2334 * this is why we process them specially */
2335 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2336 if (rc == VINF_SUCCESS)
2337 return;
2338
2339 if (RT_FAILURE(rc))
2340 {
2341 pCmd->rc = rc;
2342 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2343 AssertRC(rc);
2344 return;
2345 }
2346#endif
2347
2348#ifndef VBOX_VDMA_WITH_WORKERTHREAD
2349 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2350#else
2351
2352# ifdef DEBUG_misha
2353 Assert(0);
2354# endif
2355
2356 VBOXVDMACMD_SUBMIT_CONTEXT Context;
2357 Context.pVdma = pVdma;
2358 Context.Cmd.enmType = VBOXVDMAPIPE_CMD_TYPE_DMACMD;
2359 Context.Cmd.u.pDr = pCmd;
2360
2361 rc = vboxVDMAPipeModifyClient(&pVdma->Pipe, vboxVDMACommandSubmitCb, &Context);
2362 AssertRC(rc);
2363 if (RT_SUCCESS(rc))
2364 {
2365 Assert(Context.bQueued);
2366 if (Context.bQueued)
2367 {
2368 /* success */
2369 return;
2370 }
2371 rc = VERR_OUT_OF_RESOURCES;
2372 }
2373 /* failure */
2374 Assert(RT_FAILURE(rc));
2375 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2376 pCmd->rc = rc;
2377 int tmpRc = VBoxSHGSMICommandComplete (pIns, pCmd);
2378 AssertRC(tmpRc);
2379#endif
2380}
2381
2382/**/
2383
2384static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2385{
2386 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2387 if (RT_SUCCESS(rc))
2388 {
2389 if (rc == VINF_SUCCESS)
2390 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2391 else
2392 Assert(rc == VINF_ALREADY_INITIALIZED);
2393 }
2394 else
2395 WARN(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2396
2397 return rc;
2398}
2399
2400static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2401{
2402 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2403 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2404 AssertRC(rc);
2405 pGCtl->i32Result = rc;
2406
2407 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2408 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2409 AssertRC(rc);
2410
2411 VBoxVBVAExHCtlFree(pVbva, pCtl);
2412}
2413
2414static int vdmaVBVACtlOpaqueSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2415{
2416 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE);
2417 if (!pHCtl)
2418 {
2419 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2420 return VERR_NO_MEMORY;
2421 }
2422
2423 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2424 pHCtl->u.cmd.cbCmd = cbCmd;
2425 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2426 if (!RT_SUCCESS(rc))
2427 {
2428 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2429 return rc;;
2430 }
2431 return VINF_SUCCESS;
2432}
2433
2434static int vdmaVBVACtlOpaqueGuestSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2435{
2436 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2437 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2438 int rc = vdmaVBVACtlOpaqueSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2439 if (RT_SUCCESS(rc))
2440 return VINF_SUCCESS;
2441
2442 WARN(("vdmaVBVACtlOpaqueSubmit failed %d\n", rc));
2443 pCtl->i32Result = rc;
2444 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2445 AssertRC(rc);
2446 return VINF_SUCCESS;
2447}
2448
2449static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2450{
2451 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2452 if (pVboxCtl->pfnInternal)
2453 ((PFNCRCTLCOMPLETION)pVboxCtl->pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2454 VBoxVBVAExHCtlFree(pVbva, pCtl);
2455}
2456
2457static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2458 PFNCRCTLCOMPLETION pfnCompletion,
2459 void *pvCompletion)
2460{
2461 pCmd->pfnInternal = (void(*)())pfnCompletion;
2462 int rc = vdmaVBVACtlOpaqueSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST_ENABLED, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2463 if (!RT_SUCCESS(rc))
2464 {
2465 if (rc == VERR_INVALID_STATE)
2466 {
2467 pCmd->pfnInternal = NULL;
2468 PVGASTATE pVGAState = pVdma->pVGAState;
2469 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2470 if (!RT_SUCCESS(rc))
2471 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2472
2473 return rc;
2474 }
2475 WARN(("vdmaVBVACtlOpaqueSubmit failed %d\n", rc));
2476 return rc;
2477 }
2478
2479 return VINF_SUCCESS;
2480}
2481
2482static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2483{
2484 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GH_ENABLE_DISABLE);
2485 if (!pHCtl)
2486 {
2487 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2488 return VERR_NO_MEMORY;
2489 }
2490
2491 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2492 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2493 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
2494 if (!RT_SUCCESS(rc))
2495 {
2496 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2497 return rc;;
2498 }
2499 return VINF_SUCCESS;
2500}
2501
2502static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
2503{
2504 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
2505 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2506 if (RT_SUCCESS(rc))
2507 return VINF_SUCCESS;
2508
2509 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
2510 pEnable->Hdr.i32Result = rc;
2511 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
2512 AssertRC(rc);
2513 return VINF_SUCCESS;
2514}
2515
2516static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2517{
2518 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
2519 pData->rc = rc;
2520 rc = RTSemEventSignal(pData->hEvent);
2521 if (!RT_SUCCESS(rc))
2522 WARN(("RTSemEventSignal failed %d\n", rc));
2523}
2524
2525static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
2526{
2527 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2528 Data.rc = VERR_NOT_IMPLEMENTED;
2529 int rc = RTSemEventCreate(&Data.hEvent);
2530 if (!RT_SUCCESS(rc))
2531 {
2532 WARN(("RTSemEventCreate failed %d\n", rc));
2533 return rc;
2534 }
2535
2536 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
2537 if (RT_SUCCESS(rc))
2538 {
2539 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2540 if (RT_SUCCESS(rc))
2541 {
2542 rc = Data.rc;
2543 if (!RT_SUCCESS(rc))
2544 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2545 }
2546 else
2547 WARN(("RTSemEventWait failed %d\n", rc));
2548 }
2549 else
2550 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
2551
2552 RTSemEventDestroy(Data.hEvent);
2553
2554 return rc;
2555}
2556
2557static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
2558{
2559 VBVAEXHOSTCTL Ctl;
2560 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
2561 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
2562}
2563
2564static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
2565{
2566 VBVAEXHOSTCTL Ctl;
2567 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
2568 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST_ANY);
2569}
2570
2571static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
2572{
2573 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
2574 switch (rc)
2575 {
2576 case VINF_SUCCESS:
2577 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2578 case VINF_ALREADY_INITIALIZED:
2579 case VINF_EOF:
2580 case VERR_INVALID_STATE:
2581 return VINF_SUCCESS;
2582 default:
2583 Assert(!RT_FAILURE(rc));
2584 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
2585 }
2586}
2587
2588
2589int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
2590 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2591 PFNCRCTLCOMPLETION pfnCompletion,
2592 void *pvCompletion)
2593{
2594 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2595 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
2596 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
2597}
2598
2599int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2600{
2601 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
2602 int rc = VINF_SUCCESS;
2603 switch (pCtl->u32Type)
2604 {
2605 case VBOXCMDVBVACTL_TYPE_3DCTL:
2606 return vdmaVBVACtlOpaqueGuestSubmit(pVdma, pCtl, cbCtl);
2607 case VBOXCMDVBVACTL_TYPE_ENABLE:
2608 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
2609 {
2610 WARN(("incorrect enable size\n"));
2611 rc = VERR_INVALID_PARAMETER;
2612 break;
2613 }
2614 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
2615 default:
2616 WARN(("unsupported type\n"));
2617 rc = VERR_INVALID_PARAMETER;
2618 break;
2619 }
2620
2621 pCtl->i32Result = rc;
2622 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2623 AssertRC(rc);
2624 return VINF_SUCCESS;
2625}
2626
2627int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
2628{
2629 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2630 {
2631 WARN(("vdma VBVA is disabled\n"));
2632 return VERR_INVALID_STATE;
2633 }
2634
2635 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2636}
2637
2638int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
2639{
2640 WARN(("flush\n"));
2641 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2642 {
2643 WARN(("vdma VBVA is disabled\n"));
2644 return VERR_INVALID_STATE;
2645 }
2646 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2647}
2648
2649void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
2650{
2651 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
2652 return;
2653 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
2654}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette