VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 51349

最後變更 在這個檔案從51349是 51349,由 vboxsync 提交於 11 年 前

crOpenGL: saved state fixes, misc fixes

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 107.8 KB
 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.alldomusa.eu.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24#include <iprt/param.h>
25
26#include "DevVGA.h"
27#include "HGSMI/SHGSMIHost.h"
28
29#include <VBox/VBoxVideo3D.h>
30#include <VBox/VBoxVideoHost3D.h>
31
32#ifdef DEBUG_misha
33# define VBOXVDBG_MEMCACHE_DISABLE
34#endif
35
36#ifndef VBOXVDBG_MEMCACHE_DISABLE
37# include <iprt/memcache.h>
38#endif
39
40#ifdef DEBUG_misha
41#define WARN_BP() do { AssertFailed(); } while (0)
42#else
43#define WARN_BP() do { } while (0)
44#endif
45#define WARN(_msg) do { \
46 LogRel(_msg); \
47 WARN_BP(); \
48 } while (0)
49
50#define VBOXVDMATHREAD_STATE_TERMINATED 0
51#define VBOXVDMATHREAD_STATE_CREATING 1
52#define VBOXVDMATHREAD_STATE_CREATED 3
53#define VBOXVDMATHREAD_STATE_TERMINATING 4
54
55struct VBOXVDMATHREAD;
56
57typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
58
59typedef struct VBOXVDMATHREAD
60{
61 RTTHREAD hWorkerThread;
62 RTSEMEVENT hEvent;
63 volatile uint32_t u32State;
64 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
65 void *pvChanged;
66} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
67
68
69/* state transformations:
70 *
71 * submitter | processor
72 *
73 * LISTENING ---> PROCESSING
74 *
75 * */
76#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
77#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
78
79#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
80#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
81#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
82
83typedef struct VBVAEXHOSTCONTEXT
84{
85 VBVABUFFER *pVBVA;
86 volatile int32_t i32State;
87 volatile int32_t i32EnableState;
88 volatile uint32_t u32cCtls;
89 /* critical section for accessing ctl lists */
90 RTCRITSECT CltCritSect;
91 RTLISTANCHOR GuestCtlList;
92 RTLISTANCHOR HostCtlList;
93#ifndef VBOXVDBG_MEMCACHE_DISABLE
94 RTMEMCACHE CtlCache;
95#endif
96} VBVAEXHOSTCONTEXT;
97
98typedef enum
99{
100 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
101 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
102 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
103 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
104 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
105 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
106 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
107 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
108 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
109 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
110 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
111 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
112 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
113} VBVAEXHOSTCTL_TYPE;
114
115struct VBVAEXHOSTCTL;
116
117typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
118
119typedef struct VBVAEXHOSTCTL
120{
121 RTLISTNODE Node;
122 VBVAEXHOSTCTL_TYPE enmType;
123 union
124 {
125 struct
126 {
127 uint8_t * pu8Cmd;
128 uint32_t cbCmd;
129 } cmd;
130
131 struct
132 {
133 PSSMHANDLE pSSM;
134 uint32_t u32Version;
135 } state;
136 } u;
137 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
138 void *pvComplete;
139} VBVAEXHOSTCTL;
140
141/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
142 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
143 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
144 * see mor edetailed comments in headers for function definitions */
145typedef enum
146{
147 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
148 VBVAEXHOST_DATA_TYPE_CMD,
149 VBVAEXHOST_DATA_TYPE_HOSTCTL,
150 VBVAEXHOST_DATA_TYPE_GUESTCTL
151} VBVAEXHOST_DATA_TYPE;
152static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
153
154static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
155static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
156
157/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
158 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
159static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
160
161static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
162static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
163static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
164static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
165static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
166static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
167
168static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
169{
170#ifndef VBOXVDBG_MEMCACHE_DISABLE
171 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
172#else
173 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
174#endif
175}
176
177static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
178{
179#ifndef VBOXVDBG_MEMCACHE_DISABLE
180 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
181#else
182 RTMemFree(pCtl);
183#endif
184}
185
186static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
187{
188 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
189 if (!pCtl)
190 {
191 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
192 return NULL;
193 }
194
195 pCtl->enmType = enmType;
196 return pCtl;
197}
198
199static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
200{
201 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
202
203 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
204 return VINF_SUCCESS;
205 return VERR_SEM_BUSY;
206}
207
208static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
209{
210 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
211
212 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
213 return NULL;
214
215 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
216 if (RT_SUCCESS(rc))
217 {
218 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
219 if (pCtl)
220 *pfHostCtl = true;
221 else if (!fHostOnlyMode)
222 {
223 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
224 {
225 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
226 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
227 * and there are no HostCtl commands*/
228 Assert(pCtl);
229 *pfHostCtl = false;
230 }
231 }
232
233 if (pCtl)
234 {
235 RTListNodeRemove(&pCtl->Node);
236 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
237 }
238
239 RTCritSectLeave(&pCmdVbva->CltCritSect);
240
241 return pCtl;
242 }
243 else
244 WARN(("RTCritSectEnter failed %d\n", rc));
245
246 return NULL;
247}
248
249static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
250{
251 bool fHostCtl = false;
252 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
253 Assert(!pCtl || fHostCtl);
254 return pCtl;
255}
256
257static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
258{
259 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
260 {
261 WARN(("Invalid state\n"));
262 return VERR_INVALID_STATE;
263 }
264
265 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
266 return VINF_SUCCESS;
267}
268
269static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
270{
271 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
272 {
273 WARN(("Invalid state\n"));
274 return VERR_INVALID_STATE;
275 }
276
277 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
278 return VINF_SUCCESS;
279}
280
281
282static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
283{
284 switch (pCtl->enmType)
285 {
286 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
287 {
288 int rc = VBoxVBVAExHPPause(pCmdVbva);
289 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
290 return true;
291 }
292 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
293 {
294 int rc = VBoxVBVAExHPResume(pCmdVbva);
295 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
296 return true;
297 }
298 default:
299 return false;
300 }
301}
302
303static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
304{
305 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
306
307 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
308}
309
310static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
311{
312 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
313 if (pCmdVbva->pVBVA)
314 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
315}
316
317static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
318{
319 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
320 if (pCmdVbva->pVBVA)
321 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
322}
323
324static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
325{
326 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
327 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
328
329 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
330
331 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
332 uint32_t indexRecordFree = pVBVA->indexRecordFree;
333
334 Log(("first = %d, free = %d\n",
335 indexRecordFirst, indexRecordFree));
336
337 if (indexRecordFirst == indexRecordFree)
338 {
339 /* No records to process. Return without assigning output variables. */
340 return VINF_EOF;
341 }
342
343 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
344
345 /* A new record need to be processed. */
346 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
347 {
348 /* the record is being recorded, try again */
349 return VINF_TRY_AGAIN;
350 }
351
352 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
353
354 if (!cbRecord)
355 {
356 /* the record is being recorded, try again */
357 return VINF_TRY_AGAIN;
358 }
359
360 /* we should not get partial commands here actually */
361 Assert(cbRecord);
362
363 /* The size of largest contiguous chunk in the ring biffer. */
364 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
365
366 /* The pointer to data in the ring buffer. */
367 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
368
369 /* Fetch or point the data. */
370 if (u32BytesTillBoundary >= cbRecord)
371 {
372 /* The command does not cross buffer boundary. Return address in the buffer. */
373 *ppCmd = pSrc;
374 *pcbCmd = cbRecord;
375 return VINF_SUCCESS;
376 }
377
378 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
379 return VERR_INVALID_STATE;
380}
381
382static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
383{
384 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
385 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
386
387 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
388}
389
390static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
391{
392 if (pCtl->pfnComplete)
393 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
394 else
395 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
396}
397
398static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
399{
400 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
401 VBVAEXHOSTCTL*pCtl;
402 bool fHostClt;
403
404 for(;;)
405 {
406 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
407 if (pCtl)
408 {
409 if (fHostClt)
410 {
411 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
412 {
413 *ppCmd = (uint8_t*)pCtl;
414 *pcbCmd = sizeof (*pCtl);
415 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
416 }
417 continue;
418 }
419 else
420 {
421 *ppCmd = (uint8_t*)pCtl;
422 *pcbCmd = sizeof (*pCtl);
423 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
424 }
425 }
426
427 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
428 return VBVAEXHOST_DATA_TYPE_NO_DATA;
429
430 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
431 switch (rc)
432 {
433 case VINF_SUCCESS:
434 return VBVAEXHOST_DATA_TYPE_CMD;
435 case VINF_EOF:
436 return VBVAEXHOST_DATA_TYPE_NO_DATA;
437 case VINF_TRY_AGAIN:
438 RTThreadSleep(1);
439 continue;
440 default:
441 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
442 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
443 return VBVAEXHOST_DATA_TYPE_NO_DATA;
444 }
445 }
446
447 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
448 return VBVAEXHOST_DATA_TYPE_NO_DATA;
449}
450
451static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
452{
453 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
454 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
455 {
456 vboxVBVAExHPHgEventClear(pCmdVbva);
457 vboxVBVAExHPProcessorRelease(pCmdVbva);
458 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
459 * 1. we check the queue -> and it is empty
460 * 2. submitter adds command to the queue
461 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
462 * 4. we clear the "processing" state
463 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
464 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
465 **/
466 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
467 if (RT_SUCCESS(rc))
468 {
469 /* we are the processor now */
470 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
471 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
472 {
473 vboxVBVAExHPProcessorRelease(pCmdVbva);
474 return VBVAEXHOST_DATA_TYPE_NO_DATA;
475 }
476
477 vboxVBVAExHPHgEventSet(pCmdVbva);
478 }
479 }
480
481 return enmType;
482}
483
484DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
485{
486 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
487
488 if (pVBVA)
489 {
490 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
491 uint32_t indexRecordFree = pVBVA->indexRecordFree;
492
493 if (indexRecordFirst != indexRecordFree)
494 return true;
495 }
496
497 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
498}
499
500/* Checks whether the new commands are ready for processing
501 * @returns
502 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
503 * VINF_EOF - no commands in a queue
504 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
505 * VERR_INVALID_STATE - the VBVA is paused or pausing */
506static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
507{
508 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
509 if (RT_SUCCESS(rc))
510 {
511 /* we are the processor now */
512 if (vboxVBVAExHSHasCommands(pCmdVbva))
513 {
514 vboxVBVAExHPHgEventSet(pCmdVbva);
515 return VINF_SUCCESS;
516 }
517
518 vboxVBVAExHPProcessorRelease(pCmdVbva);
519 return VINF_EOF;
520 }
521 if (rc == VERR_SEM_BUSY)
522 return VINF_ALREADY_INITIALIZED;
523 return VERR_INVALID_STATE;
524}
525
526static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
527{
528 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
529 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
530 if (RT_SUCCESS(rc))
531 {
532#ifndef VBOXVDBG_MEMCACHE_DISABLE
533 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
534 0, /* size_t cbAlignment */
535 UINT32_MAX, /* uint32_t cMaxObjects */
536 NULL, /* PFNMEMCACHECTOR pfnCtor*/
537 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
538 NULL, /* void *pvUser*/
539 0 /* uint32_t fFlags*/
540 );
541 if (RT_SUCCESS(rc))
542#endif
543 {
544 RTListInit(&pCmdVbva->GuestCtlList);
545 RTListInit(&pCmdVbva->HostCtlList);
546 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
547 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
548 return VINF_SUCCESS;
549 }
550#ifndef VBOXVDBG_MEMCACHE_DISABLE
551 else
552 WARN(("RTMemCacheCreate failed %d\n", rc));
553#endif
554 }
555 else
556 WARN(("RTCritSectInit failed %d\n", rc));
557
558 return rc;
559}
560
561DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
562{
563 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
564}
565
566DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
567{
568 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
569}
570
571static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
572{
573 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
574 {
575 WARN(("VBVAEx is enabled already\n"));
576 return VERR_INVALID_STATE;
577 }
578
579 pCmdVbva->pVBVA = pVBVA;
580 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
581 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
582 return VINF_SUCCESS;
583}
584
585static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
586{
587 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
588 return VINF_SUCCESS;
589
590 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
591 return VINF_SUCCESS;
592}
593
594static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
595{
596 /* ensure the processor is stopped */
597 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
598
599 /* ensure no one tries to submit the command */
600 if (pCmdVbva->pVBVA)
601 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
602
603 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
604 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
605
606 RTCritSectDelete(&pCmdVbva->CltCritSect);
607
608#ifndef VBOXVDBG_MEMCACHE_DISABLE
609 RTMemCacheDestroy(pCmdVbva->CtlCache);
610#endif
611
612 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
613}
614
615static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
616{
617 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
618 AssertRCReturn(rc, rc);
619 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
622 AssertRCReturn(rc, rc);
623
624 return VINF_SUCCESS;
625}
626
627static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
628{
629 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
630 {
631 WARN(("vbva not paused\n"));
632 return VERR_INVALID_STATE;
633 }
634
635 VBVAEXHOSTCTL* pCtl;
636 int rc;
637 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
638 {
639 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
640 AssertRCReturn(rc, rc);
641 }
642
643 rc = SSMR3PutU32(pSSM, 0);
644 AssertRCReturn(rc, rc);
645
646 return VINF_SUCCESS;
647}
648/* Saves state
649 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
650 */
651static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
652{
653 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
654 if (RT_FAILURE(rc))
655 {
656 WARN(("RTCritSectEnter failed %d\n", rc));
657 return rc;
658 }
659
660 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
661 if (RT_FAILURE(rc))
662 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
663
664 RTCritSectLeave(&pCmdVbva->CltCritSect);
665
666 return rc;
667}
668
669static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
670{
671 uint32_t u32;
672 int rc = SSMR3GetU32(pSSM, &u32);
673 AssertRCReturn(rc, rc);
674
675 if (!u32)
676 return VINF_EOF;
677
678 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
679 if (!pHCtl)
680 {
681 WARN(("VBoxVBVAExHCtlCreate failed\n"));
682 return VERR_NO_MEMORY;
683 }
684
685 rc = SSMR3GetU32(pSSM, &u32);
686 AssertRCReturn(rc, rc);
687 pHCtl->u.cmd.cbCmd = u32;
688
689 rc = SSMR3GetU32(pSSM, &u32);
690 AssertRCReturn(rc, rc);
691 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
692
693 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
694 ++pCmdVbva->u32cCtls;
695
696 return VINF_SUCCESS;
697}
698
699
700static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
701{
702 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
703 {
704 WARN(("vbva not stopped\n"));
705 return VERR_INVALID_STATE;
706 }
707
708 int rc;
709
710 do {
711 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
712 AssertRCReturn(rc, rc);
713 } while (VINF_EOF != rc);
714
715 return VINF_SUCCESS;
716}
717
718/* Loads state
719 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
720 */
721static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
722{
723 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
724 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
725 if (RT_FAILURE(rc))
726 {
727 WARN(("RTCritSectEnter failed %d\n", rc));
728 return rc;
729 }
730
731 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
732 if (RT_FAILURE(rc))
733 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
734
735 RTCritSectLeave(&pCmdVbva->CltCritSect);
736
737 return rc;
738}
739
740typedef enum
741{
742 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
743 VBVAEXHOSTCTL_SOURCE_HOST
744} VBVAEXHOSTCTL_SOURCE;
745
746
747static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
748{
749 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
750 {
751 Log(("cmd vbva not enabled\n"));
752 return VERR_INVALID_STATE;
753 }
754
755 pCtl->pfnComplete = pfnComplete;
756 pCtl->pvComplete = pvComplete;
757
758 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
759 if (RT_SUCCESS(rc))
760 {
761 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
762 {
763 Log(("cmd vbva not enabled\n"));
764 RTCritSectLeave(&pCmdVbva->CltCritSect);
765 return VERR_INVALID_STATE;
766 }
767
768 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
769 {
770 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
771 }
772 else
773 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
774
775 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
776
777 RTCritSectLeave(&pCmdVbva->CltCritSect);
778
779 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
780 }
781 else
782 WARN(("RTCritSectEnter failed %d\n", rc));
783
784 return rc;
785}
786
787#ifdef VBOX_WITH_CRHGSMI
788typedef struct VBOXVDMA_SOURCE
789{
790 VBVAINFOSCREEN Screen;
791 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
792} VBOXVDMA_SOURCE;
793#endif
794
795typedef struct VBOXVDMAHOST
796{
797 PHGSMIINSTANCE pHgsmi;
798 PVGASTATE pVGAState;
799#ifdef VBOX_WITH_CRHGSMI
800 VBVAEXHOSTCONTEXT CmdVbva;
801 VBOXVDMATHREAD Thread;
802 VBOXCRCMD_SVRINFO CrSrvInfo;
803 VBVAEXHOSTCTL* pCurRemainingHostCtl;
804 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
805 int32_t volatile i32cHostCrCtlCompleted;
806// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
807#endif
808#ifdef VBOX_VDMA_WITH_WATCHDOG
809 PTMTIMERR3 WatchDogTimer;
810#endif
811} VBOXVDMAHOST, *PVBOXVDMAHOST;
812
813#ifdef VBOX_WITH_CRHGSMI
814
815void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
816{
817 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
818 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
819 void *pvChanged = pThread->pvChanged;
820
821 pThread->pfnChanged = NULL;
822 pThread->pvChanged = NULL;
823
824 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
825
826 if (pfnChanged)
827 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
828}
829
830void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
831{
832 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
833 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
834 void *pvChanged = pThread->pvChanged;
835
836 pThread->pfnChanged = NULL;
837 pThread->pvChanged = NULL;
838
839 if (pfnChanged)
840 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
841}
842
843DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
844{
845 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
846}
847
848void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
849{
850 memset(pThread, 0, sizeof (*pThread));
851 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
852}
853
854int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
855{
856 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
857 switch (u32State)
858 {
859 case VBOXVDMATHREAD_STATE_TERMINATED:
860 return VINF_SUCCESS;
861 case VBOXVDMATHREAD_STATE_TERMINATING:
862 {
863 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
864 if (!RT_SUCCESS(rc))
865 {
866 WARN(("RTThreadWait failed %d\n", rc));
867 return rc;
868 }
869
870 RTSemEventDestroy(pThread->hEvent);
871
872 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
873 return VINF_SUCCESS;
874 }
875 default:
876 WARN(("invalid state"));
877 return VERR_INVALID_STATE;
878 }
879}
880
881int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
882{
883 int rc = VBoxVDMAThreadCleanup(pThread);
884 if (RT_FAILURE(rc))
885 {
886 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
887 return rc;
888 }
889
890 rc = RTSemEventCreate(&pThread->hEvent);
891 if (RT_SUCCESS(rc))
892 {
893 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
894 pThread->pfnChanged = pfnCreated;
895 pThread->pvChanged = pvCreated;
896 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
897 if (RT_SUCCESS(rc))
898 return VINF_SUCCESS;
899 else
900 WARN(("RTThreadCreate failed %d\n", rc));
901
902 RTSemEventDestroy(pThread->hEvent);
903 }
904 else
905 WARN(("RTSemEventCreate failed %d\n", rc));
906
907 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
908
909 return rc;
910}
911
912DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
913{
914 int rc = RTSemEventSignal(pThread->hEvent);
915 AssertRC(rc);
916 return rc;
917}
918
919DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
920{
921 int rc = RTSemEventWait(pThread->hEvent, cMillies);
922 AssertRC(rc);
923 return rc;
924}
925
926int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
927{
928 int rc;
929 do
930 {
931 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
932 switch (u32State)
933 {
934 case VBOXVDMATHREAD_STATE_CREATED:
935 pThread->pfnChanged = pfnTerminated;
936 pThread->pvChanged = pvTerminated;
937 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
938 if (fNotify)
939 {
940 rc = VBoxVDMAThreadEventNotify(pThread);
941 AssertRC(rc);
942 }
943 return VINF_SUCCESS;
944 case VBOXVDMATHREAD_STATE_TERMINATING:
945 case VBOXVDMATHREAD_STATE_TERMINATED:
946 {
947 WARN(("thread is marked to termination or terminated\nn"));
948 return VERR_INVALID_STATE;
949 }
950 case VBOXVDMATHREAD_STATE_CREATING:
951 {
952 /* wait till the thread creation is completed */
953 WARN(("concurrent thread create/destron\n"));
954 RTThreadYield();
955 continue;
956 }
957 default:
958 WARN(("invalid state"));
959 return VERR_INVALID_STATE;
960 }
961 } while (1);
962
963 WARN(("should never be here\n"));
964 return VERR_INTERNAL_ERROR;
965}
966
967static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
968
969typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
970typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
971
972typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
973{
974 uint32_t cRefs;
975 int32_t rc;
976 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
977 void *pvCompletion;
978 VBOXVDMACMD_CHROMIUM_CTL Cmd;
979} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
980
981#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
982
983static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
984{
985 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
986 Assert(pHdr);
987 if (pHdr)
988 {
989 pHdr->cRefs = 1;
990 pHdr->rc = VERR_NOT_IMPLEMENTED;
991 pHdr->Cmd.enmType = enmCmd;
992 pHdr->Cmd.cbCmd = cbCmd;
993 return &pHdr->Cmd;
994 }
995
996 return NULL;
997}
998
999DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1000{
1001 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1002 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1003 if(!cRefs)
1004 {
1005 RTMemFree(pHdr);
1006 }
1007}
1008
1009DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1010{
1011 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1012 ASMAtomicIncU32(&pHdr->cRefs);
1013}
1014
1015DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1016{
1017 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1018 return pHdr->rc;
1019}
1020
1021static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1022{
1023 RTSemEventSignal((RTSEMEVENT)pvContext);
1024}
1025
1026static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1027{
1028 vboxVDMACrCtlRelease(pCmd);
1029}
1030
1031
1032static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1033{
1034 if ( pVGAState->pDrv
1035 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1036 {
1037 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1038 pHdr->pfnCompletion = pfnCompletion;
1039 pHdr->pvCompletion = pvCompletion;
1040 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1041 return VINF_SUCCESS;
1042 }
1043#ifdef DEBUG_misha
1044 Assert(0);
1045#endif
1046 return VERR_NOT_SUPPORTED;
1047}
1048
1049static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1050{
1051 RTSEMEVENT hComplEvent;
1052 int rc = RTSemEventCreate(&hComplEvent);
1053 AssertRC(rc);
1054 if(RT_SUCCESS(rc))
1055 {
1056 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1057#ifdef DEBUG_misha
1058 AssertRC(rc);
1059#endif
1060 if (RT_SUCCESS(rc))
1061 {
1062 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1063 AssertRC(rc);
1064 if(RT_SUCCESS(rc))
1065 {
1066 RTSemEventDestroy(hComplEvent);
1067 }
1068 }
1069 else
1070 {
1071 /* the command is completed */
1072 RTSemEventDestroy(hComplEvent);
1073 }
1074 }
1075 return rc;
1076}
1077
1078typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1079{
1080 int rc;
1081 RTSEMEVENT hEvent;
1082} VDMA_VBVA_CTL_CYNC_COMPLETION;
1083
1084static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1085{
1086 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1087 pData->rc = rc;
1088 rc = RTSemEventSignal(pData->hEvent);
1089 if (!RT_SUCCESS(rc))
1090 WARN(("RTSemEventSignal failed %d\n", rc));
1091}
1092
1093static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1094{
1095 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1096 Data.rc = VERR_NOT_IMPLEMENTED;
1097 int rc = RTSemEventCreate(&Data.hEvent);
1098 if (!RT_SUCCESS(rc))
1099 {
1100 WARN(("RTSemEventCreate failed %d\n", rc));
1101 return rc;
1102 }
1103
1104 PVGASTATE pVGAState = pVdma->pVGAState;
1105 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1106 if (RT_SUCCESS(rc))
1107 {
1108 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1109 if (RT_SUCCESS(rc))
1110 {
1111 rc = Data.rc;
1112 if (!RT_SUCCESS(rc))
1113 {
1114 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1115 }
1116
1117 }
1118 else
1119 WARN(("RTSemEventWait failed %d\n", rc));
1120 }
1121 else
1122 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1123
1124
1125 RTSemEventDestroy(Data.hEvent);
1126
1127 return rc;
1128}
1129
1130static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1131{
1132 VBVAEXHOSTCTL HCtl;
1133 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1134 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1135}
1136
1137static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1138{
1139 struct VBOXVDMAHOST *pVdma = hClient;
1140 if (!pVdma->pCurRemainingHostCtl)
1141 {
1142 /* disable VBVA, all subsequent host commands will go HGCM way */
1143 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1144 }
1145 else
1146 {
1147 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1148 }
1149
1150 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1151 if (pVdma->pCurRemainingHostCtl)
1152 {
1153 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1154 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1155 }
1156
1157 *pcbCtl = 0;
1158 return NULL;
1159}
1160
1161static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1162{
1163 struct VBOXVDMAHOST *pVdma = hClient;
1164 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1165 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1166}
1167
1168static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1169{
1170 struct VBOXVDMAHOST *pVdma = hClient;
1171 VBVAEXHOSTCTL HCtl;
1172 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1173 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1174
1175 pHgcmEnableData->hRHCmd = pVdma;
1176 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1177
1178 if (RT_FAILURE(rc))
1179 {
1180 if (rc == VERR_INVALID_STATE)
1181 rc = VINF_SUCCESS;
1182 else
1183 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1184 }
1185
1186 return rc;
1187}
1188
1189static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1190{
1191 VBOXCRCMDCTL_ENABLE Enable;
1192 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1193 Enable.Data.hRHCmd = pVdma;
1194 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1195
1196 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1197 Assert(!pVdma->pCurRemainingHostCtl);
1198 if (RT_SUCCESS(rc))
1199 {
1200 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1201 return VINF_SUCCESS;
1202 }
1203
1204 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1205 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1206
1207 return rc;
1208}
1209
1210static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1211{
1212 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1213 {
1214 WARN(("vdma VBVA is already enabled\n"));
1215 return VERR_INVALID_STATE;
1216 }
1217
1218 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1219 if (!pVBVA)
1220 {
1221 WARN(("invalid offset %d\n", u32Offset));
1222 return VERR_INVALID_PARAMETER;
1223 }
1224
1225 if (!pVdma->CrSrvInfo.pfnEnable)
1226 {
1227#ifdef DEBUG_misha
1228 WARN(("pfnEnable is NULL\n"));
1229 return VERR_NOT_SUPPORTED;
1230#endif
1231 }
1232
1233 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1234 if (RT_SUCCESS(rc))
1235 {
1236 VBOXCRCMDCTL_DISABLE Disable;
1237 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1238 Disable.Data.hNotifyTerm = pVdma;
1239 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1240 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1241 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1242 if (RT_SUCCESS(rc))
1243 {
1244 PVGASTATE pVGAState = pVdma->pVGAState;
1245 VBOXCRCMD_SVRENABLE_INFO Info;
1246 Info.hCltScr = pVGAState->pDrv;
1247 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1248 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1249 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1250 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1251 if (RT_SUCCESS(rc))
1252 return VINF_SUCCESS;
1253 else
1254 WARN(("pfnEnable failed %d\n", rc));
1255
1256 vboxVDMACrHgcmHandleEnable(pVdma);
1257 }
1258 else
1259 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1260
1261 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1262 }
1263 else
1264 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1265
1266 return rc;
1267}
1268
1269static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1270{
1271 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1272 {
1273 Log(("vdma VBVA is already disabled\n"));
1274 return VINF_SUCCESS;
1275 }
1276
1277 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1278 if (RT_SUCCESS(rc))
1279 {
1280 if (fDoHgcmEnable)
1281 {
1282 /* disable is a bit tricky
1283 * we need to ensure the host ctl commands do not come out of order
1284 * and do not come over HGCM channel until after it is enabled */
1285 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1286 if (RT_SUCCESS(rc))
1287 return rc;
1288
1289 PVGASTATE pVGAState = pVdma->pVGAState;
1290 VBOXCRCMD_SVRENABLE_INFO Info;
1291 Info.hCltScr = pVGAState->pDrv;
1292 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1293 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1294 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1295 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1296 }
1297 }
1298 else
1299 WARN(("pfnDisable failed %d\n", rc));
1300
1301 return rc;
1302}
1303
1304static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1305{
1306 *pfContinue = true;
1307
1308 switch (pCmd->enmType)
1309 {
1310 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1311 {
1312 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1313 {
1314 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1315 return VERR_INVALID_STATE;
1316 }
1317 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1318 }
1319 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1320 {
1321 int rc = vdmaVBVADisableProcess(pVdma, true);
1322 if (RT_FAILURE(rc))
1323 {
1324 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1325 return rc;
1326 }
1327
1328 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1329 }
1330 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1331 {
1332 int rc = vdmaVBVADisableProcess(pVdma, false);
1333 if (RT_FAILURE(rc))
1334 {
1335 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1336 return rc;
1337 }
1338
1339 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1340 if (RT_FAILURE(rc))
1341 {
1342 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1343 return rc;
1344 }
1345
1346 *pfContinue = false;
1347 return VINF_SUCCESS;
1348 }
1349 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1350 {
1351 PVGASTATE pVGAState = pVdma->pVGAState;
1352 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1353 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1354 if (RT_FAILURE(rc))
1355 {
1356 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1357 return rc;
1358 }
1359 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1360 }
1361 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1362 {
1363 PVGASTATE pVGAState = pVdma->pVGAState;
1364 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1365
1366 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1367 if (RT_FAILURE(rc))
1368 {
1369 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1370 return rc;
1371 }
1372
1373 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1374 if (RT_FAILURE(rc))
1375 {
1376 WARN(("pfnLoadState failed %d\n", rc));
1377 return rc;
1378 }
1379
1380 return VINF_SUCCESS;
1381 }
1382 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1383 {
1384 PVGASTATE pVGAState = pVdma->pVGAState;
1385
1386 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1387 {
1388 VBVAINFOSCREEN CurScreen;
1389 VBVAINFOVIEW CurView;
1390
1391 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1392 if (RT_FAILURE(rc))
1393 {
1394 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1395 return rc;
1396 }
1397
1398 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1399 if (RT_FAILURE(rc))
1400 {
1401 WARN(("VBVAInfoScreen failed %d\n", rc));
1402 return rc;
1403 }
1404 }
1405
1406 return VINF_SUCCESS;
1407 }
1408 default:
1409 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1410 return VERR_INVALID_PARAMETER;
1411 }
1412}
1413
1414static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1415{
1416 PVGASTATE pVGAState = pVdma->pVGAState;
1417 VBVAINFOSCREEN Screen = pEntry->Screen;
1418 VBVAINFOVIEW View;
1419 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1420 uint32_t u32ViewIndex = Screen.u32ViewIndex;
1421 uint16_t u16Flags = Screen.u16Flags;
1422 bool fDisable = false;
1423
1424 memcpy(aTargetMap, pEntry->aTargetMap, sizeof (aTargetMap));
1425
1426 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1427
1428 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1429 {
1430 fDisable = true;
1431 memset(&Screen, 0, sizeof (Screen));
1432 Screen.u32ViewIndex = u32ViewIndex;
1433 Screen.u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1434 }
1435
1436 if (u32ViewIndex > pVGAState->cMonitors)
1437 {
1438 if (u32ViewIndex != 0xffffffff)
1439 {
1440 WARN(("invalid view index\n"));
1441 return VERR_INVALID_PARAMETER;
1442 }
1443 else if (!fDisable)
1444 {
1445 WARN(("0xffffffff view index only valid for disable requests\n"));
1446 return VERR_INVALID_PARAMETER;
1447 }
1448 }
1449
1450 View.u32ViewOffset = 0;
1451 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1452 View.u32MaxScreenSize = View.u32ViewSize + Screen.u32Width + 1; /* <- make VBVAInfoScreen logic (offEnd < pView->u32MaxScreenSize) happy */
1453
1454 int rc = VINF_SUCCESS;
1455
1456 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1457 i >= 0;
1458 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1459 {
1460 Screen.u32ViewIndex = i;
1461
1462 VBVAINFOSCREEN CurScreen;
1463 VBVAINFOVIEW CurView;
1464
1465 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1466 AssertRC(rc);
1467
1468 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1469 continue;
1470
1471 if (!fDisable || !CurView.u32ViewSize)
1472 {
1473 View.u32ViewIndex = Screen.u32ViewIndex;
1474
1475 rc = VBVAInfoView(pVGAState, &View);
1476 if (RT_FAILURE(rc))
1477 {
1478 WARN(("VBVAInfoView failed %d\n", rc));
1479 break;
1480 }
1481 }
1482
1483 rc = VBVAInfoScreen(pVGAState, &Screen);
1484 if (RT_FAILURE(rc))
1485 {
1486 WARN(("VBVAInfoScreen failed %d\n", rc));
1487 break;
1488 }
1489 }
1490
1491 if (RT_FAILURE(rc))
1492 return rc;
1493
1494 Screen.u32ViewIndex = u32ViewIndex;
1495
1496 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1497 if (RT_FAILURE(rc))
1498 WARN(("pfnResize failed %d\n", rc));
1499
1500 return rc;
1501}
1502
1503static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1504{
1505 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1506 switch (enmType)
1507 {
1508 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1509 {
1510 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1511 {
1512 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1513 return VERR_INVALID_STATE;
1514 }
1515 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1516 }
1517 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1518 {
1519 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1520 {
1521 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1522 return VERR_INVALID_STATE;
1523 }
1524
1525 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1526
1527 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1528 {
1529 WARN(("invalid buffer size\n"));
1530 return VERR_INVALID_PARAMETER;
1531 }
1532
1533 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1534 if (!cElements)
1535 {
1536 WARN(("invalid buffer size\n"));
1537 return VERR_INVALID_PARAMETER;
1538 }
1539
1540 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1541
1542 int rc = VINF_SUCCESS;
1543
1544 for (uint32_t i = 0; i < cElements; ++i)
1545 {
1546 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1547 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1548 if (RT_FAILURE(rc))
1549 {
1550 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1551 break;
1552 }
1553 }
1554 return rc;
1555 }
1556 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1557 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1558 {
1559 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1560 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1561 uint32_t u32Offset = pEnable->u32Offset;
1562 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1563 if (!RT_SUCCESS(rc))
1564 {
1565 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1566 return rc;
1567 }
1568
1569 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1570 {
1571 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1572 if (!RT_SUCCESS(rc))
1573 {
1574 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1575 return rc;
1576 }
1577 }
1578
1579 return VINF_SUCCESS;
1580 }
1581 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1582 {
1583 int rc = vdmaVBVADisableProcess(pVdma, true);
1584 if (RT_FAILURE(rc))
1585 {
1586 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1587 return rc;
1588 }
1589
1590 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1591 }
1592 default:
1593 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1594 return VERR_INVALID_PARAMETER;
1595 }
1596}
1597
1598/**
1599 * @param fIn - whether this is a page in or out op.
1600 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1601 */
1602static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1603{
1604 RTGCPHYS phPage = (RTGCPHYS)(iPage << PAGE_SHIFT);
1605 PGMPAGEMAPLOCK Lock;
1606 int rc;
1607
1608 if (fIn)
1609 {
1610 const void * pvPage;
1611 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1612 if (!RT_SUCCESS(rc))
1613 {
1614 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1615 return rc;
1616 }
1617
1618 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1619
1620 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1621 }
1622 else
1623 {
1624 void * pvPage;
1625 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1626 if (!RT_SUCCESS(rc))
1627 {
1628 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1629 return rc;
1630 }
1631
1632 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1633
1634 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1635 }
1636
1637 return VINF_SUCCESS;
1638}
1639
1640static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1641{
1642 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1643 {
1644 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1645 if (!RT_SUCCESS(rc))
1646 {
1647 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1648 return rc;
1649 }
1650 }
1651
1652 return VINF_SUCCESS;
1653}
1654
1655static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1656 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1657 uint8_t **ppu8Vram, bool *pfIn)
1658{
1659 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1660 {
1661 WARN(("cmd too small"));
1662 return -1;
1663 }
1664
1665 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1666 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1667 {
1668 WARN(("invalid cmd size"));
1669 return -1;
1670 }
1671 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1672
1673 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1674 if (offVRAM & PAGE_OFFSET_MASK)
1675 {
1676 WARN(("offVRAM address is not on page boundary\n"));
1677 return -1;
1678 }
1679 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1680
1681 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1682 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1683 if (offVRAM >= pVGAState->vram_size)
1684 {
1685 WARN(("invalid vram offset"));
1686 return -1;
1687 }
1688
1689 if (offVRAM + (cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1690 {
1691 WARN(("invalid cPages"));
1692 return -1;
1693 }
1694
1695 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1696 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1697
1698 *ppPages = pPages;
1699 *pcPages = cPages;
1700 *ppu8Vram = pu8Vram;
1701 *pfIn = fIn;
1702 return 0;
1703}
1704
1705static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1706{
1707 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1708 if (offVRAM & PAGE_OFFSET_MASK)
1709 {
1710 WARN(("offVRAM address is not on page boundary\n"));
1711 return -1;
1712 }
1713
1714 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1715 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1716 if (offVRAM >= pVGAState->vram_size)
1717 {
1718 WARN(("invalid vram offset"));
1719 return -1;
1720 }
1721
1722 uint32_t cbFill = pFill->u32CbFill;
1723
1724 if (offVRAM + cbFill >= pVGAState->vram_size)
1725 {
1726 WARN(("invalid cPages"));
1727 return -1;
1728 }
1729
1730 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1731 uint32_t u32Color = pFill->u32Pattern;
1732
1733 Assert(!(cbFill % 4));
1734 for (uint32_t i = 0; i < cbFill / 4; ++i)
1735 {
1736 pu32Vram[i] = u32Color;
1737 }
1738
1739 return 0;
1740}
1741
1742static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1743{
1744 switch (pCmd->u8OpCode)
1745 {
1746 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1747 return 0;
1748 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1749 {
1750 PVGASTATE pVGAState = pVdma->pVGAState;
1751 const VBOXCMDVBVAPAGEIDX *pPages;
1752 uint32_t cPages;
1753 uint8_t *pu8Vram;
1754 bool fIn;
1755 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1756 &pPages, &cPages,
1757 &pu8Vram, &fIn);
1758 if (i8Result < 0)
1759 {
1760 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1761 return i8Result;
1762 }
1763
1764 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1765 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1766 if (!RT_SUCCESS(rc))
1767 {
1768 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1769 return -1;
1770 }
1771
1772 return 0;
1773 }
1774 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1775 {
1776 PVGASTATE pVGAState = pVdma->pVGAState;
1777 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1778 {
1779 WARN(("cmd too small"));
1780 return -1;
1781 }
1782
1783 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1784 }
1785 default:
1786 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1787 }
1788}
1789
1790#if 0
1791typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1792{
1793 VBOXCMDVBVA_HDR Hdr;
1794 /* for now can only contain offVRAM.
1795 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1796 VBOXCMDVBVA_ALLOCINFO Alloc;
1797 uint32_t u32Reserved;
1798 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1799} VBOXCMDVBVA_PAGING_TRANSFER;
1800#endif
1801
1802AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1803AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1804AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1805AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1806
1807#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1808
1809static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1810{
1811 switch (pCmd->u8OpCode)
1812 {
1813 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1814 {
1815 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1816 {
1817 WARN(("invalid command size"));
1818 return -1;
1819 }
1820 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1821 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1822 uint32_t cbRealCmd = pCmd->u8Flags;
1823 cbRealCmd |= (pCmd->u.u8PrimaryID << 8);
1824 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1825 {
1826 WARN(("invalid sysmem cmd size"));
1827 return -1;
1828 }
1829
1830 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1831
1832 PGMPAGEMAPLOCK Lock;
1833 PVGASTATE pVGAState = pVdma->pVGAState;
1834 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1835 const void * pvCmd;
1836 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1837 if (!RT_SUCCESS(rc))
1838 {
1839 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1840 return -1;
1841 }
1842
1843 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1844
1845 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1846
1847 if (cbRealCmd <= cbCmdPart)
1848 {
1849 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1850 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1851 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1852 return i8Result;
1853 }
1854
1855 VBOXCMDVBVA_HDR Hdr;
1856 const void *pvCurCmdTail;
1857 uint32_t cbCurCmdTail;
1858 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1859 {
1860 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1861 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1862 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1863 }
1864 else
1865 {
1866 memcpy(&Hdr, pvCmd, cbCmdPart);
1867 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1868 phCmd += cbCmdPart;
1869 Assert(!(phCmd & PAGE_OFFSET_MASK));
1870 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1871 if (!RT_SUCCESS(rc))
1872 {
1873 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1874 return -1;
1875 }
1876
1877 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1878 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1879 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1880 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1881 }
1882
1883 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1884 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1885
1886 int8_t i8Result = 0;
1887
1888 switch (pRealCmdHdr->u8OpCode)
1889 {
1890 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1891 {
1892 const uint32_t *pPages;
1893 uint32_t cPages;
1894 uint8_t *pu8Vram;
1895 bool fIn;
1896 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1897 &pPages, &cPages,
1898 &pu8Vram, &fIn);
1899 if (i8Result < 0)
1900 {
1901 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1902 /* we need to break, not return, to ensure currently locked page is released */
1903 break;
1904 }
1905
1906 if (cbCurCmdTail & 3)
1907 {
1908 WARN(("command is not alligned properly %d", cbCurCmdTail));
1909 i8Result = -1;
1910 /* we need to break, not return, to ensure currently locked page is released */
1911 break;
1912 }
1913
1914 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1915 Assert(cCurPages < cPages);
1916
1917 do
1918 {
1919 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1920 if (!RT_SUCCESS(rc))
1921 {
1922 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1923 i8Result = -1;
1924 /* we need to break, not return, to ensure currently locked page is released */
1925 break;
1926 }
1927
1928 Assert(cPages >= cCurPages);
1929 cPages -= cCurPages;
1930
1931 if (!cPages)
1932 break;
1933
1934 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1935
1936 Assert(!(phCmd & PAGE_OFFSET_MASK));
1937
1938 phCmd += PAGE_SIZE;
1939 pu8Vram += (cCurPages << PAGE_SHIFT);
1940
1941 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1942 if (!RT_SUCCESS(rc))
1943 {
1944 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1945 /* the page is not locked, return */
1946 return -1;
1947 }
1948
1949 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
1950 if (cCurPages > cPages)
1951 cCurPages = cPages;
1952 } while (1);
1953 break;
1954 }
1955 default:
1956 WARN(("command can not be splitted"));
1957 i8Result = -1;
1958 break;
1959 }
1960
1961 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1962 return i8Result;
1963 }
1964 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
1965 {
1966 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
1967 ++pCmd;
1968 cbCmd -= sizeof (*pCmd);
1969 uint32_t cbCurCmd = 0;
1970 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
1971 {
1972 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1973 {
1974 WARN(("invalid command size"));
1975 return -1;
1976 }
1977
1978 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
1979 if (cbCmd < cbCurCmd)
1980 {
1981 WARN(("invalid command size"));
1982 return -1;
1983 }
1984
1985 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
1986 if (i8Result < 0)
1987 {
1988 WARN(("vboxVDMACrCmdVbvaProcess failed"));
1989 return i8Result;
1990 }
1991 }
1992 return 0;
1993 }
1994 default:
1995 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
1996 }
1997}
1998
1999static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2000{
2001 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2002 return;
2003
2004 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2005 {
2006 WARN(("invalid command size"));
2007 return;
2008 }
2009
2010 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2011
2012 /* check if the command is cancelled */
2013 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2014 {
2015 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2016 return;
2017 }
2018
2019 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2020}
2021
2022static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2023{
2024 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2025 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2026 int rc = VERR_NO_MEMORY;
2027 if (pCmd)
2028 {
2029 PVGASTATE pVGAState = pVdma->pVGAState;
2030 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2031 pCmd->cbVRam = pVGAState->vram_size;
2032 pCmd->pLed = &pVGAState->Led3D;
2033 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2034 if (RT_SUCCESS(rc))
2035 {
2036 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2037 if (RT_SUCCESS(rc))
2038 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2039 else if (rc != VERR_NOT_SUPPORTED)
2040 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2041 }
2042 else
2043 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2044
2045 vboxVDMACrCtlRelease(&pCmd->Hdr);
2046 }
2047
2048 if (!RT_SUCCESS(rc))
2049 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2050
2051 return rc;
2052}
2053
2054static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2055
2056/* check if this is external cmd to be passed to chromium backend */
2057static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2058{
2059 PVBOXVDMACMD pDmaCmd = NULL;
2060 uint32_t cbDmaCmd = 0;
2061 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2062 int rc = VINF_NOT_SUPPORTED;
2063
2064 cbDmaCmd = pCmdDr->cbBuf;
2065
2066 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2067 {
2068 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2069 {
2070 AssertMsgFailed(("invalid buffer data!"));
2071 return VERR_INVALID_PARAMETER;
2072 }
2073
2074 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2075 {
2076 AssertMsgFailed(("invalid command buffer data!"));
2077 return VERR_INVALID_PARAMETER;
2078 }
2079
2080 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2081 }
2082 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2083 {
2084 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2085 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2086 {
2087 AssertMsgFailed(("invalid command buffer data from offset!"));
2088 return VERR_INVALID_PARAMETER;
2089 }
2090 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2091 }
2092
2093 if (pDmaCmd)
2094 {
2095 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2096 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2097
2098 switch (pDmaCmd->enmType)
2099 {
2100 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2101 {
2102 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2103 if (cbBody < sizeof (*pCrCmd))
2104 {
2105 AssertMsgFailed(("invalid chromium command buffer size!"));
2106 return VERR_INVALID_PARAMETER;
2107 }
2108 PVGASTATE pVGAState = pVdma->pVGAState;
2109 rc = VINF_SUCCESS;
2110 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2111 {
2112 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2113 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2114 break;
2115 }
2116 else
2117 {
2118 Assert(0);
2119 }
2120
2121 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2122 AssertRC(tmpRc);
2123 break;
2124 }
2125 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2126 {
2127 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2128 if (cbBody < sizeof (*pTransfer))
2129 {
2130 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2131 return VERR_INVALID_PARAMETER;
2132 }
2133
2134 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2135 AssertRC(rc);
2136 if (RT_SUCCESS(rc))
2137 {
2138 pCmdDr->rc = VINF_SUCCESS;
2139 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2140 AssertRC(rc);
2141 rc = VINF_SUCCESS;
2142 }
2143 break;
2144 }
2145 default:
2146 break;
2147 }
2148 }
2149 return rc;
2150}
2151
2152int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2153{
2154 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2155 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2156 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2157 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2158 AssertRC(rc);
2159 pDr->rc = rc;
2160
2161 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2162 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2163 AssertRC(rc);
2164 return rc;
2165}
2166
2167int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2168{
2169 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2170 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2171 pCmdPrivate->rc = rc;
2172 if (pCmdPrivate->pfnCompletion)
2173 {
2174 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2175 }
2176 return VINF_SUCCESS;
2177}
2178
2179static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
2180 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2181 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2182 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2183{
2184 /* we do not support color conversion */
2185 Assert(pDstDesc->format == pSrcDesc->format);
2186 /* we do not support stretching */
2187 Assert(pDstRectl->height == pSrcRectl->height);
2188 Assert(pDstRectl->width == pSrcRectl->width);
2189 if (pDstDesc->format != pSrcDesc->format)
2190 return VERR_INVALID_FUNCTION;
2191 if (pDstDesc->width == pDstRectl->width
2192 && pSrcDesc->width == pSrcRectl->width
2193 && pSrcDesc->width == pDstDesc->width)
2194 {
2195 Assert(!pDstRectl->left);
2196 Assert(!pSrcRectl->left);
2197 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2198 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2199 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2200 }
2201 else
2202 {
2203 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2204 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2205 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2206 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2207 Assert(cbDstLine <= pDstDesc->pitch);
2208 uint32_t cbDstSkip = pDstDesc->pitch;
2209 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2210
2211 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2212 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2213 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2214 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2215 Assert(cbSrcLine <= pSrcDesc->pitch);
2216 uint32_t cbSrcSkip = pSrcDesc->pitch;
2217 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2218
2219 Assert(cbDstLine == cbSrcLine);
2220
2221 for (uint32_t i = 0; ; ++i)
2222 {
2223 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2224 if (i == pDstRectl->height)
2225 break;
2226 pvDstStart += cbDstSkip;
2227 pvSrcStart += cbSrcSkip;
2228 }
2229 }
2230 return VINF_SUCCESS;
2231}
2232
2233static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2234{
2235 if (!pRectl1->width)
2236 *pRectl1 = *pRectl2;
2237 else
2238 {
2239 int16_t x21 = pRectl1->left + pRectl1->width;
2240 int16_t x22 = pRectl2->left + pRectl2->width;
2241 if (pRectl1->left > pRectl2->left)
2242 {
2243 pRectl1->left = pRectl2->left;
2244 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2245 }
2246 else if (x21 < x22)
2247 pRectl1->width = x22 - pRectl1->left;
2248
2249 x21 = pRectl1->top + pRectl1->height;
2250 x22 = pRectl2->top + pRectl2->height;
2251 if (pRectl1->top > pRectl2->top)
2252 {
2253 pRectl1->top = pRectl2->top;
2254 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2255 }
2256 else if (x21 < x22)
2257 pRectl1->height = x22 - pRectl1->top;
2258 }
2259}
2260
2261/*
2262 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2263 */
2264static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2265{
2266 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2267 Assert(cbBlt <= cbBuffer);
2268 if (cbBuffer < cbBlt)
2269 return VERR_INVALID_FUNCTION;
2270
2271 /* we do not support stretching for now */
2272 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2273 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2274 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2275 return VERR_INVALID_FUNCTION;
2276 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2277 return VERR_INVALID_FUNCTION;
2278 Assert(pBlt->cDstSubRects);
2279
2280 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2281 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2282
2283 if (pBlt->cDstSubRects)
2284 {
2285 VBOXVDMA_RECTL dstRectl, srcRectl;
2286 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2287 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2288 {
2289 pDstRectl = &pBlt->aDstSubRects[i];
2290 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2291 {
2292 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2293 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2294 dstRectl.width = pDstRectl->width;
2295 dstRectl.height = pDstRectl->height;
2296 pDstRectl = &dstRectl;
2297 }
2298
2299 pSrcRectl = &pBlt->aDstSubRects[i];
2300 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2301 {
2302 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2303 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2304 srcRectl.width = pSrcRectl->width;
2305 srcRectl.height = pSrcRectl->height;
2306 pSrcRectl = &srcRectl;
2307 }
2308
2309 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2310 &pBlt->dstDesc, &pBlt->srcDesc,
2311 pDstRectl,
2312 pSrcRectl);
2313 AssertRC(rc);
2314 if (!RT_SUCCESS(rc))
2315 return rc;
2316
2317 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2318 }
2319 }
2320 else
2321 {
2322 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2323 &pBlt->dstDesc, &pBlt->srcDesc,
2324 &pBlt->dstRectl,
2325 &pBlt->srcRectl);
2326 AssertRC(rc);
2327 if (!RT_SUCCESS(rc))
2328 return rc;
2329
2330 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2331 }
2332
2333 return cbBlt;
2334}
2335
2336static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2337{
2338 if (cbBuffer < sizeof (*pTransfer))
2339 return VERR_INVALID_PARAMETER;
2340
2341 PVGASTATE pVGAState = pVdma->pVGAState;
2342 uint8_t * pvRam = pVGAState->vram_ptrR3;
2343 PGMPAGEMAPLOCK SrcLock;
2344 PGMPAGEMAPLOCK DstLock;
2345 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2346 const void * pvSrc;
2347 void * pvDst;
2348 int rc = VINF_SUCCESS;
2349 uint32_t cbTransfer = pTransfer->cbTransferSize;
2350 uint32_t cbTransfered = 0;
2351 bool bSrcLocked = false;
2352 bool bDstLocked = false;
2353 do
2354 {
2355 uint32_t cbSubTransfer = cbTransfer;
2356 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2357 {
2358 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2359 }
2360 else
2361 {
2362 RTGCPHYS phPage = pTransfer->Src.phBuf;
2363 phPage += cbTransfered;
2364 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2365 AssertRC(rc);
2366 if (RT_SUCCESS(rc))
2367 {
2368 bSrcLocked = true;
2369 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2370 }
2371 else
2372 {
2373 break;
2374 }
2375 }
2376
2377 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2378 {
2379 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2380 }
2381 else
2382 {
2383 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2384 phPage += cbTransfered;
2385 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2386 AssertRC(rc);
2387 if (RT_SUCCESS(rc))
2388 {
2389 bDstLocked = true;
2390 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2391 }
2392 else
2393 {
2394 break;
2395 }
2396 }
2397
2398 if (RT_SUCCESS(rc))
2399 {
2400 memcpy(pvDst, pvSrc, cbSubTransfer);
2401 cbTransfer -= cbSubTransfer;
2402 cbTransfered += cbSubTransfer;
2403 }
2404 else
2405 {
2406 cbTransfer = 0; /* to break */
2407 }
2408
2409 if (bSrcLocked)
2410 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2411 if (bDstLocked)
2412 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2413 } while (cbTransfer);
2414
2415 if (RT_SUCCESS(rc))
2416 return sizeof (*pTransfer);
2417 return rc;
2418}
2419
2420static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2421{
2422 do
2423 {
2424 Assert(pvBuffer);
2425 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2426
2427 if (!pvBuffer)
2428 return VERR_INVALID_PARAMETER;
2429 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2430 return VERR_INVALID_PARAMETER;
2431
2432 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2433 uint32_t cbCmd = 0;
2434 switch (pCmd->enmType)
2435 {
2436 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2437 {
2438#ifdef VBOXWDDM_TEST_UHGSMI
2439 static int count = 0;
2440 static uint64_t start, end;
2441 if (count==0)
2442 {
2443 start = RTTimeNanoTS();
2444 }
2445 ++count;
2446 if (count==100000)
2447 {
2448 end = RTTimeNanoTS();
2449 float ems = (end-start)/1000000.f;
2450 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2451 }
2452#endif
2453 /* todo: post the buffer to chromium */
2454 return VINF_SUCCESS;
2455 }
2456 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2457 {
2458 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2459 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2460 Assert(cbBlt >= 0);
2461 Assert((uint32_t)cbBlt <= cbBuffer);
2462 if (cbBlt >= 0)
2463 {
2464 if ((uint32_t)cbBlt == cbBuffer)
2465 return VINF_SUCCESS;
2466 else
2467 {
2468 cbBuffer -= (uint32_t)cbBlt;
2469 pvBuffer -= cbBlt;
2470 }
2471 }
2472 else
2473 return cbBlt; /* error */
2474 break;
2475 }
2476 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2477 {
2478 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2479 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2480 Assert(cbTransfer >= 0);
2481 Assert((uint32_t)cbTransfer <= cbBuffer);
2482 if (cbTransfer >= 0)
2483 {
2484 if ((uint32_t)cbTransfer == cbBuffer)
2485 return VINF_SUCCESS;
2486 else
2487 {
2488 cbBuffer -= (uint32_t)cbTransfer;
2489 pvBuffer -= cbTransfer;
2490 }
2491 }
2492 else
2493 return cbTransfer; /* error */
2494 break;
2495 }
2496 case VBOXVDMACMD_TYPE_DMA_NOP:
2497 return VINF_SUCCESS;
2498 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2499 return VINF_SUCCESS;
2500 default:
2501 AssertBreakpoint();
2502 return VERR_INVALID_FUNCTION;
2503 }
2504 } while (1);
2505
2506 /* we should not be here */
2507 AssertBreakpoint();
2508 return VERR_INVALID_STATE;
2509}
2510
2511static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2512{
2513 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2514 PVGASTATE pVGAState = pVdma->pVGAState;
2515 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2516 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2517 uint8_t *pCmd;
2518 uint32_t cbCmd;
2519 int rc;
2520
2521 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2522
2523 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2524 {
2525 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2526 switch (enmType)
2527 {
2528 case VBVAEXHOST_DATA_TYPE_CMD:
2529 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2530 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2531 VBVARaiseIrqNoWait(pVGAState, 0);
2532 break;
2533 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2534 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2535 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2536 break;
2537 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2538 {
2539 bool fContinue = true;
2540 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2541 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2542 if (fContinue)
2543 break;
2544 }
2545 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2546 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2547 AssertRC(rc);
2548 break;
2549 default:
2550 WARN(("unexpected type %d\n", enmType));
2551 break;
2552 }
2553 }
2554
2555 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2556
2557 return VINF_SUCCESS;
2558}
2559
2560static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2561{
2562 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2563 const uint8_t * pvBuf;
2564 PGMPAGEMAPLOCK Lock;
2565 int rc;
2566 bool bReleaseLocked = false;
2567
2568 do
2569 {
2570 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2571
2572 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2573 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2574 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2575 {
2576 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2577 pvBuf = pvRam + pCmd->Location.offVramBuf;
2578 }
2579 else
2580 {
2581 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2582 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2583 Assert(offset + pCmd->cbBuf <= 0x1000);
2584 if (offset + pCmd->cbBuf > 0x1000)
2585 {
2586 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2587 rc = VERR_INVALID_PARAMETER;
2588 break;
2589 }
2590
2591 const void * pvPageBuf;
2592 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2593 AssertRC(rc);
2594 if (!RT_SUCCESS(rc))
2595 {
2596 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2597 break;
2598 }
2599
2600 pvBuf = (const uint8_t *)pvPageBuf;
2601 pvBuf += offset;
2602
2603 bReleaseLocked = true;
2604 }
2605
2606 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2607 AssertRC(rc);
2608
2609 if (bReleaseLocked)
2610 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2611 } while (0);
2612
2613 pCmd->rc = rc;
2614
2615 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2616 AssertRC(rc);
2617}
2618
2619static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2620{
2621 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2622 pCmd->i32Result = VINF_SUCCESS;
2623 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2624 AssertRC(rc);
2625}
2626
2627#endif /* #ifdef VBOX_WITH_CRHGSMI */
2628
2629#ifdef VBOX_VDMA_WITH_WATCHDOG
2630static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2631{
2632 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2633 PVGASTATE pVGAState = pVdma->pVGAState;
2634 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2635}
2636
2637static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2638{
2639 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2640 if (cMillis)
2641 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2642 else
2643 TMTimerStop(pVdma->WatchDogTimer);
2644 return VINF_SUCCESS;
2645}
2646#endif
2647
2648int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2649{
2650 int rc;
2651 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2652 Assert(pVdma);
2653 if (pVdma)
2654 {
2655 pVdma->pHgsmi = pVGAState->pHGSMI;
2656 pVdma->pVGAState = pVGAState;
2657
2658#ifdef VBOX_VDMA_WITH_WATCHDOG
2659 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2660 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2661 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2662 AssertRC(rc);
2663#endif
2664
2665#ifdef VBOX_WITH_CRHGSMI
2666 VBoxVDMAThreadInit(&pVdma->Thread);
2667
2668 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2669 if (RT_SUCCESS(rc))
2670 {
2671 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2672 if (RT_SUCCESS(rc))
2673 {
2674 pVGAState->pVdma = pVdma;
2675 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2676 return VINF_SUCCESS;
2677
2678 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2679 }
2680 else
2681 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2682
2683 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2684 }
2685 else
2686 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2687
2688
2689 RTMemFree(pVdma);
2690#else
2691 pVGAState->pVdma = pVdma;
2692 return VINF_SUCCESS;
2693#endif
2694 }
2695 else
2696 rc = VERR_OUT_OF_RESOURCES;
2697
2698 return rc;
2699}
2700
2701int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2702{
2703#ifdef VBOX_WITH_CRHGSMI
2704 vdmaVBVACtlDisableSync(pVdma);
2705#endif
2706 return VINF_SUCCESS;
2707}
2708
2709int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2710{
2711#ifdef VBOX_WITH_CRHGSMI
2712 vdmaVBVACtlDisableSync(pVdma);
2713 VBoxVDMAThreadCleanup(&pVdma->Thread);
2714 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2715 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2716#endif
2717 RTMemFree(pVdma);
2718 return VINF_SUCCESS;
2719}
2720
2721void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2722{
2723 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2724
2725 switch (pCmd->enmCtl)
2726 {
2727 case VBOXVDMA_CTL_TYPE_ENABLE:
2728 pCmd->i32Result = VINF_SUCCESS;
2729 break;
2730 case VBOXVDMA_CTL_TYPE_DISABLE:
2731 pCmd->i32Result = VINF_SUCCESS;
2732 break;
2733 case VBOXVDMA_CTL_TYPE_FLUSH:
2734 pCmd->i32Result = VINF_SUCCESS;
2735 break;
2736#ifdef VBOX_VDMA_WITH_WATCHDOG
2737 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2738 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2739 break;
2740#endif
2741 default:
2742 WARN(("cmd not supported"));
2743 pCmd->i32Result = VERR_NOT_SUPPORTED;
2744 }
2745
2746 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2747 AssertRC(rc);
2748}
2749
2750void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2751{
2752 int rc = VERR_NOT_IMPLEMENTED;
2753
2754#ifdef VBOX_WITH_CRHGSMI
2755 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2756 * this is why we process them specially */
2757 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2758 if (rc == VINF_SUCCESS)
2759 return;
2760
2761 if (RT_FAILURE(rc))
2762 {
2763 pCmd->rc = rc;
2764 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2765 AssertRC(rc);
2766 return;
2767 }
2768
2769 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2770#else
2771 pCmd->rc = rc;
2772 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2773 AssertRC(rc);
2774#endif
2775}
2776
2777/**/
2778#ifdef VBOX_WITH_CRHGSMI
2779
2780static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2781
2782static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2783{
2784 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2785 if (RT_SUCCESS(rc))
2786 {
2787 if (rc == VINF_SUCCESS)
2788 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2789 else
2790 Assert(rc == VINF_ALREADY_INITIALIZED);
2791 }
2792 else
2793 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2794
2795 return rc;
2796}
2797
2798static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2799{
2800 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2801 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2802 AssertRC(rc);
2803 pGCtl->i32Result = rc;
2804
2805 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2806 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2807 AssertRC(rc);
2808
2809 VBoxVBVAExHCtlFree(pVbva, pCtl);
2810}
2811
2812static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2813{
2814 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2815 if (!pHCtl)
2816 {
2817 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2818 return VERR_NO_MEMORY;
2819 }
2820
2821 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2822 pHCtl->u.cmd.cbCmd = cbCmd;
2823 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2824 if (RT_FAILURE(rc))
2825 {
2826 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2827 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2828 return rc;;
2829 }
2830 return VINF_SUCCESS;
2831}
2832
2833static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2834{
2835 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2836 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2837 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2838 if (RT_SUCCESS(rc))
2839 return VINF_SUCCESS;
2840
2841 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2842 pCtl->i32Result = rc;
2843 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2844 AssertRC(rc);
2845 return VINF_SUCCESS;
2846}
2847
2848static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2849{
2850 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2851 if (pVboxCtl->u.pfnInternal)
2852 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2853 VBoxVBVAExHCtlFree(pVbva, pCtl);
2854}
2855
2856static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2857 PFNCRCTLCOMPLETION pfnCompletion,
2858 void *pvCompletion)
2859{
2860 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2861 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2862 if (RT_FAILURE(rc))
2863 {
2864 if (rc == VERR_INVALID_STATE)
2865 {
2866 pCmd->u.pfnInternal = NULL;
2867 PVGASTATE pVGAState = pVdma->pVGAState;
2868 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2869 if (!RT_SUCCESS(rc))
2870 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2871
2872 return rc;
2873 }
2874 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2875 return rc;
2876 }
2877
2878 return VINF_SUCCESS;
2879}
2880
2881static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2882{
2883 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2884 {
2885 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2886 if (!RT_SUCCESS(rc))
2887 {
2888 WARN(("pfnVBVAEnable failed %d\n", rc));
2889 for (uint32_t j = 0; j < i; j++)
2890 {
2891 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2892 }
2893
2894 return rc;
2895 }
2896 }
2897 return VINF_SUCCESS;
2898}
2899
2900static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2901{
2902 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2903 {
2904 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2905 }
2906 return VINF_SUCCESS;
2907}
2908
2909static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2910{
2911 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2912 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2913
2914 if (RT_SUCCESS(rc))
2915 {
2916 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2917 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2918 if (rc == VINF_SUCCESS)
2919 {
2920 /* we need to inform Main about VBVA enable/disable
2921 * main expects notifications to be done from the main thread
2922 * submit it there */
2923 PVGASTATE pVGAState = pVdma->pVGAState;
2924
2925 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
2926 vdmaVBVANotifyEnable(pVGAState);
2927 else
2928 vdmaVBVANotifyDisable(pVGAState);
2929 }
2930 else if (RT_FAILURE(rc))
2931 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
2932 }
2933 else
2934 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
2935
2936 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
2937}
2938
2939static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2940{
2941 int rc;
2942 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
2943 if (pHCtl)
2944 {
2945 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2946 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2947 pHCtl->pfnComplete = pfnComplete;
2948 pHCtl->pvComplete = pvComplete;
2949
2950 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
2951 if (RT_SUCCESS(rc))
2952 return VINF_SUCCESS;
2953 else
2954 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
2955
2956 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2957 }
2958 else
2959 {
2960 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2961 rc = VERR_NO_MEMORY;
2962 }
2963
2964 return rc;
2965}
2966
2967static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
2968{
2969 VBVAENABLE Enable = {0};
2970 Enable.u32Flags = VBVA_F_ENABLE;
2971 Enable.u32Offset = offVram;
2972
2973 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2974 Data.rc = VERR_NOT_IMPLEMENTED;
2975 int rc = RTSemEventCreate(&Data.hEvent);
2976 if (!RT_SUCCESS(rc))
2977 {
2978 WARN(("RTSemEventCreate failed %d\n", rc));
2979 return rc;
2980 }
2981
2982 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
2983 if (RT_SUCCESS(rc))
2984 {
2985 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2986 if (RT_SUCCESS(rc))
2987 {
2988 rc = Data.rc;
2989 if (!RT_SUCCESS(rc))
2990 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2991 }
2992 else
2993 WARN(("RTSemEventWait failed %d\n", rc));
2994 }
2995 else
2996 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
2997
2998 RTSemEventDestroy(Data.hEvent);
2999
3000 return rc;
3001}
3002
3003static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3004{
3005 int rc;
3006 VBVAEXHOSTCTL* pHCtl;
3007 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3008 {
3009 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3010 return VINF_SUCCESS;
3011 }
3012
3013 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3014 if (!pHCtl)
3015 {
3016 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3017 return VERR_NO_MEMORY;
3018 }
3019
3020 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3021 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3022 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3023 if (RT_SUCCESS(rc))
3024 return VINF_SUCCESS;
3025
3026 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3027 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3028 return rc;
3029}
3030
3031static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3032{
3033 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3034 if (fEnable)
3035 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3036 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3037}
3038
3039static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3040{
3041 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3042 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3043 if (RT_SUCCESS(rc))
3044 return VINF_SUCCESS;
3045
3046 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3047 pEnable->Hdr.i32Result = rc;
3048 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3049 AssertRC(rc);
3050 return VINF_SUCCESS;
3051}
3052
3053static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
3054{
3055 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3056 pData->rc = rc;
3057 rc = RTSemEventSignal(pData->hEvent);
3058 if (!RT_SUCCESS(rc))
3059 WARN(("RTSemEventSignal failed %d\n", rc));
3060}
3061
3062static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3063{
3064 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3065 Data.rc = VERR_NOT_IMPLEMENTED;
3066 int rc = RTSemEventCreate(&Data.hEvent);
3067 if (!RT_SUCCESS(rc))
3068 {
3069 WARN(("RTSemEventCreate failed %d\n", rc));
3070 return rc;
3071 }
3072
3073 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3074 if (RT_SUCCESS(rc))
3075 {
3076 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3077 if (RT_SUCCESS(rc))
3078 {
3079 rc = Data.rc;
3080 if (!RT_SUCCESS(rc))
3081 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3082 }
3083 else
3084 WARN(("RTSemEventWait failed %d\n", rc));
3085 }
3086 else
3087 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3088
3089 RTSemEventDestroy(Data.hEvent);
3090
3091 return rc;
3092}
3093
3094static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3095{
3096 VBVAEXHOSTCTL Ctl;
3097 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3098 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3099}
3100
3101static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3102{
3103 VBVAEXHOSTCTL Ctl;
3104 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3105 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3106}
3107
3108static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3109{
3110 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3111 switch (rc)
3112 {
3113 case VINF_SUCCESS:
3114 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3115 case VINF_ALREADY_INITIALIZED:
3116 case VINF_EOF:
3117 case VERR_INVALID_STATE:
3118 return VINF_SUCCESS;
3119 default:
3120 Assert(!RT_FAILURE(rc));
3121 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3122 }
3123}
3124
3125
3126int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3127 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3128 PFNCRCTLCOMPLETION pfnCompletion,
3129 void *pvCompletion)
3130{
3131 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3132 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3133 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3134}
3135
3136typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3137{
3138 struct VBOXVDMAHOST *pVdma;
3139 uint32_t fProcessing;
3140 int rc;
3141} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3142
3143static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3144{
3145 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3146
3147 pData->rc = rc;
3148 pData->fProcessing = 0;
3149
3150 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3151
3152 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3153
3154 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3155}
3156
3157int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3158 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3159{
3160 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3161 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3162 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3163 Data.pVdma = pVdma;
3164 Data.fProcessing = 1;
3165 Data.rc = VERR_INTERNAL_ERROR;
3166 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3167 if (!RT_SUCCESS(rc))
3168 {
3169 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3170 return rc;
3171 }
3172
3173 while (Data.fProcessing)
3174 {
3175 /* Poll infrequently to make sure no completed message has been missed. */
3176 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3177
3178 if (Data.fProcessing)
3179 RTThreadYield();
3180 }
3181
3182 /* 'Our' message has been processed, so should reset the semaphore.
3183 * There is still possible that another message has been processed
3184 * and the semaphore has been signalled again.
3185 * Reset only if there are no other messages completed.
3186 */
3187 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3188 Assert(c >= 0);
3189 if (!c)
3190 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3191
3192 rc = Data.rc;
3193 if (!RT_SUCCESS(rc))
3194 WARN(("host call failed %d", rc));
3195
3196 return rc;
3197}
3198
3199int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3200{
3201 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3202 int rc = VINF_SUCCESS;
3203 switch (pCtl->u32Type)
3204 {
3205 case VBOXCMDVBVACTL_TYPE_3DCTL:
3206 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3207 case VBOXCMDVBVACTL_TYPE_RESIZE:
3208 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3209 case VBOXCMDVBVACTL_TYPE_ENABLE:
3210 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3211 {
3212 WARN(("incorrect enable size\n"));
3213 rc = VERR_INVALID_PARAMETER;
3214 break;
3215 }
3216 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3217 default:
3218 WARN(("unsupported type\n"));
3219 rc = VERR_INVALID_PARAMETER;
3220 break;
3221 }
3222
3223 pCtl->i32Result = rc;
3224 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3225 AssertRC(rc);
3226 return VINF_SUCCESS;
3227}
3228
3229int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3230{
3231 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3232 {
3233 WARN(("vdma VBVA is disabled\n"));
3234 return VERR_INVALID_STATE;
3235 }
3236
3237 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3238}
3239
3240int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3241{
3242 WARN(("flush\n"));
3243 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3244 {
3245 WARN(("vdma VBVA is disabled\n"));
3246 return VERR_INVALID_STATE;
3247 }
3248 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3249}
3250
3251void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3252{
3253 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3254 return;
3255 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3256}
3257
3258bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3259{
3260 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3261}
3262#endif
3263
3264int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3265{
3266#ifdef VBOX_WITH_CRHGSMI
3267 int rc = vdmaVBVAPause(pVdma);
3268 if (RT_SUCCESS(rc))
3269 return VINF_SUCCESS;
3270
3271 if (rc != VERR_INVALID_STATE)
3272 {
3273 WARN(("vdmaVBVAPause failed %d\n", rc));
3274 return rc;
3275 }
3276
3277#ifdef DEBUG_misha
3278 WARN(("debug prep"));
3279#endif
3280
3281 PVGASTATE pVGAState = pVdma->pVGAState;
3282 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3283 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3284 Assert(pCmd);
3285 if (pCmd)
3286 {
3287 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3288 AssertRC(rc);
3289 if (RT_SUCCESS(rc))
3290 {
3291 rc = vboxVDMACrCtlGetRc(pCmd);
3292 }
3293 vboxVDMACrCtlRelease(pCmd);
3294 return rc;
3295 }
3296 return VERR_NO_MEMORY;
3297#else
3298 return VINF_SUCCESS;
3299#endif
3300}
3301
3302int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3303{
3304#ifdef VBOX_WITH_CRHGSMI
3305 int rc = vdmaVBVAResume(pVdma);
3306 if (RT_SUCCESS(rc))
3307 return VINF_SUCCESS;
3308
3309 if (rc != VERR_INVALID_STATE)
3310 {
3311 WARN(("vdmaVBVAResume failed %d\n", rc));
3312 return rc;
3313 }
3314
3315#ifdef DEBUG_misha
3316 WARN(("debug done"));
3317#endif
3318
3319 PVGASTATE pVGAState = pVdma->pVGAState;
3320 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3321 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3322 Assert(pCmd);
3323 if (pCmd)
3324 {
3325 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3326 AssertRC(rc);
3327 if (RT_SUCCESS(rc))
3328 {
3329 rc = vboxVDMACrCtlGetRc(pCmd);
3330 }
3331 vboxVDMACrCtlRelease(pCmd);
3332 return rc;
3333 }
3334 return VERR_NO_MEMORY;
3335#else
3336 return VINF_SUCCESS;
3337#endif
3338}
3339
3340int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3341{
3342 int rc;
3343
3344#ifdef VBOX_WITH_CRHGSMI
3345 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3346#endif
3347 {
3348 rc = SSMR3PutU32(pSSM, 0xffffffff);
3349 AssertRCReturn(rc, rc);
3350 return VINF_SUCCESS;
3351 }
3352
3353#ifdef VBOX_WITH_CRHGSMI
3354 PVGASTATE pVGAState = pVdma->pVGAState;
3355 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3356
3357 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3358 AssertRCReturn(rc, rc);
3359
3360 VBVAEXHOSTCTL HCtl;
3361 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3362 HCtl.u.state.pSSM = pSSM;
3363 HCtl.u.state.u32Version = 0;
3364 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3365#endif
3366}
3367
3368int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3369{
3370 uint32_t u32;
3371 int rc = SSMR3GetU32(pSSM, &u32);
3372 AssertRCReturn(rc, rc);
3373
3374 if (u32 != 0xffffffff)
3375 {
3376#ifdef VBOX_WITH_CRHGSMI
3377 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3378 AssertRCReturn(rc, rc);
3379
3380 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3381
3382 VBVAEXHOSTCTL HCtl;
3383 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3384 HCtl.u.state.pSSM = pSSM;
3385 HCtl.u.state.u32Version = u32Version;
3386 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3387 AssertRCReturn(rc, rc);
3388
3389 rc = vdmaVBVAResume(pVdma);
3390 AssertRCReturn(rc, rc);
3391
3392 return VINF_SUCCESS;
3393#else
3394 WARN(("Unsupported VBVACtl info!\n"));
3395 return VERR_VERSION_MISMATCH;
3396#endif
3397 }
3398
3399 return VINF_SUCCESS;
3400}
3401
3402int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3403{
3404 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3405 if (!pHCtl)
3406 {
3407 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3408 return VERR_NO_MEMORY;
3409 }
3410
3411 /* sanity */
3412 pHCtl->u.cmd.pu8Cmd = NULL;
3413 pHCtl->u.cmd.cbCmd = 0;
3414
3415 /* NULL completion will just free the ctl up */
3416 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3417 if (RT_FAILURE(rc))
3418 {
3419 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3420 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3421 return rc;
3422 }
3423
3424 return VINF_SUCCESS;
3425}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette