VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 51362

最後變更 在這個檔案從51362是 51362,由 vboxsync 提交於 11 年 前

Disabled broken vboxVDMASaveLoadDone code (VERR_INVALID_STATE).

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 108.3 KB
 
1/** @file
2 * Video DMA (VDMA) support.
3 */
4
5/*
6 * Copyright (C) 2006-2012 Oracle Corporation
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.alldomusa.eu.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 */
16#include <VBox/VMMDev.h>
17#include <VBox/vmm/pdmdev.h>
18#include <VBox/VBoxVideo.h>
19#include <iprt/semaphore.h>
20#include <iprt/thread.h>
21#include <iprt/mem.h>
22#include <iprt/asm.h>
23#include <iprt/list.h>
24#include <iprt/param.h>
25
26#include "DevVGA.h"
27#include "HGSMI/SHGSMIHost.h"
28
29#include <VBox/VBoxVideo3D.h>
30#include <VBox/VBoxVideoHost3D.h>
31
32#ifdef DEBUG_misha
33# define VBOXVDBG_MEMCACHE_DISABLE
34#endif
35
36#ifndef VBOXVDBG_MEMCACHE_DISABLE
37# include <iprt/memcache.h>
38#endif
39
40#ifdef DEBUG_misha
41#define WARN_BP() do { AssertFailed(); } while (0)
42#else
43#define WARN_BP() do { } while (0)
44#endif
45#define WARN(_msg) do { \
46 LogRel(_msg); \
47 WARN_BP(); \
48 } while (0)
49
50#define VBOXVDMATHREAD_STATE_TERMINATED 0
51#define VBOXVDMATHREAD_STATE_CREATING 1
52#define VBOXVDMATHREAD_STATE_CREATED 3
53#define VBOXVDMATHREAD_STATE_TERMINATING 4
54
55struct VBOXVDMATHREAD;
56
57typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
58
59typedef struct VBOXVDMATHREAD
60{
61 RTTHREAD hWorkerThread;
62 RTSEMEVENT hEvent;
63 volatile uint32_t u32State;
64 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
65 void *pvChanged;
66} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
67
68
69/* state transformations:
70 *
71 * submitter | processor
72 *
73 * LISTENING ---> PROCESSING
74 *
75 * */
76#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
77#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
78
79#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
80#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
81#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
82
83typedef struct VBVAEXHOSTCONTEXT
84{
85 VBVABUFFER *pVBVA;
86 volatile int32_t i32State;
87 volatile int32_t i32EnableState;
88 volatile uint32_t u32cCtls;
89 /* critical section for accessing ctl lists */
90 RTCRITSECT CltCritSect;
91 RTLISTANCHOR GuestCtlList;
92 RTLISTANCHOR HostCtlList;
93#ifndef VBOXVDBG_MEMCACHE_DISABLE
94 RTMEMCACHE CtlCache;
95#endif
96} VBVAEXHOSTCONTEXT;
97
98typedef enum
99{
100 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
101 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
102 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
103 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
104 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
105 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
106 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
107 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
108 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
109 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
110 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
111 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
112 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
113} VBVAEXHOSTCTL_TYPE;
114
115struct VBVAEXHOSTCTL;
116
117typedef DECLCALLBACKPTR(void, PFNVBVAEXHOSTCTL_COMPLETE)(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
118
119typedef struct VBVAEXHOSTCTL
120{
121 RTLISTNODE Node;
122 VBVAEXHOSTCTL_TYPE enmType;
123 union
124 {
125 struct
126 {
127 uint8_t * pu8Cmd;
128 uint32_t cbCmd;
129 } cmd;
130
131 struct
132 {
133 PSSMHANDLE pSSM;
134 uint32_t u32Version;
135 } state;
136 } u;
137 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
138 void *pvComplete;
139} VBVAEXHOSTCTL;
140
141/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
142 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
143 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
144 * see mor edetailed comments in headers for function definitions */
145typedef enum
146{
147 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
148 VBVAEXHOST_DATA_TYPE_CMD,
149 VBVAEXHOST_DATA_TYPE_HOSTCTL,
150 VBVAEXHOST_DATA_TYPE_GUESTCTL
151} VBVAEXHOST_DATA_TYPE;
152
153static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState);
154
155
156static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd);
157
158static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
159static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
160
161/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
162 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
163static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
164
165static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
166static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
167static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
168static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
169static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
170static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
171
172static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
173{
174#ifndef VBOXVDBG_MEMCACHE_DISABLE
175 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
176#else
177 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
178#endif
179}
180
181static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
182{
183#ifndef VBOXVDBG_MEMCACHE_DISABLE
184 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
185#else
186 RTMemFree(pCtl);
187#endif
188}
189
190static VBVAEXHOSTCTL* VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
191{
192 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
193 if (!pCtl)
194 {
195 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
196 return NULL;
197 }
198
199 pCtl->enmType = enmType;
200 return pCtl;
201}
202
203static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
204{
205 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
206
207 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
208 return VINF_SUCCESS;
209 return VERR_SEM_BUSY;
210}
211
212static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
213{
214 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
215
216 if(!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
217 return NULL;
218
219 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
220 if (RT_SUCCESS(rc))
221 {
222 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
223 if (pCtl)
224 *pfHostCtl = true;
225 else if (!fHostOnlyMode)
226 {
227 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
228 {
229 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
230 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
231 * and there are no HostCtl commands*/
232 Assert(pCtl);
233 *pfHostCtl = false;
234 }
235 }
236
237 if (pCtl)
238 {
239 RTListNodeRemove(&pCtl->Node);
240 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
241 }
242
243 RTCritSectLeave(&pCmdVbva->CltCritSect);
244
245 return pCtl;
246 }
247 else
248 WARN(("RTCritSectEnter failed %d\n", rc));
249
250 return NULL;
251}
252
253static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
254{
255 bool fHostCtl = false;
256 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
257 Assert(!pCtl || fHostCtl);
258 return pCtl;
259}
260
261static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
262{
263 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
264 {
265 WARN(("Invalid state\n"));
266 return VERR_INVALID_STATE;
267 }
268
269 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
270 return VINF_SUCCESS;
271}
272
273static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
274{
275 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
276 {
277 WARN(("Invalid state\n"));
278 return VERR_INVALID_STATE;
279 }
280
281 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
282 return VINF_SUCCESS;
283}
284
285
286static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
287{
288 switch (pCtl->enmType)
289 {
290 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
291 {
292 int rc = VBoxVBVAExHPPause(pCmdVbva);
293 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
294 return true;
295 }
296 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
297 {
298 int rc = VBoxVBVAExHPResume(pCmdVbva);
299 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
300 return true;
301 }
302 default:
303 return false;
304 }
305}
306
307static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
308{
309 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
310
311 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
312}
313
314static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
315{
316 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
317 if (pCmdVbva->pVBVA)
318 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
319}
320
321static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
322{
323 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
324 if (pCmdVbva->pVBVA)
325 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
326}
327
328static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
329{
330 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
331 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
332
333 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
334
335 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
336 uint32_t indexRecordFree = pVBVA->indexRecordFree;
337
338 Log(("first = %d, free = %d\n",
339 indexRecordFirst, indexRecordFree));
340
341 if (indexRecordFirst == indexRecordFree)
342 {
343 /* No records to process. Return without assigning output variables. */
344 return VINF_EOF;
345 }
346
347 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
348
349 /* A new record need to be processed. */
350 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
351 {
352 /* the record is being recorded, try again */
353 return VINF_TRY_AGAIN;
354 }
355
356 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
357
358 if (!cbRecord)
359 {
360 /* the record is being recorded, try again */
361 return VINF_TRY_AGAIN;
362 }
363
364 /* we should not get partial commands here actually */
365 Assert(cbRecord);
366
367 /* The size of largest contiguous chunk in the ring biffer. */
368 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
369
370 /* The pointer to data in the ring buffer. */
371 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
372
373 /* Fetch or point the data. */
374 if (u32BytesTillBoundary >= cbRecord)
375 {
376 /* The command does not cross buffer boundary. Return address in the buffer. */
377 *ppCmd = pSrc;
378 *pcbCmd = cbRecord;
379 return VINF_SUCCESS;
380 }
381
382 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
383 return VERR_INVALID_STATE;
384}
385
386static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
387{
388 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
389 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
390
391 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
392}
393
394static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
395{
396 if (pCtl->pfnComplete)
397 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
398 else
399 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
400}
401
402static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
403{
404 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
405 VBVAEXHOSTCTL*pCtl;
406 bool fHostClt;
407
408 for(;;)
409 {
410 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
411 if (pCtl)
412 {
413 if (fHostClt)
414 {
415 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
416 {
417 *ppCmd = (uint8_t*)pCtl;
418 *pcbCmd = sizeof (*pCtl);
419 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
420 }
421 continue;
422 }
423 else
424 {
425 *ppCmd = (uint8_t*)pCtl;
426 *pcbCmd = sizeof (*pCtl);
427 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
428 }
429 }
430
431 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
432 return VBVAEXHOST_DATA_TYPE_NO_DATA;
433
434 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
435 switch (rc)
436 {
437 case VINF_SUCCESS:
438 return VBVAEXHOST_DATA_TYPE_CMD;
439 case VINF_EOF:
440 return VBVAEXHOST_DATA_TYPE_NO_DATA;
441 case VINF_TRY_AGAIN:
442 RTThreadSleep(1);
443 continue;
444 default:
445 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
446 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
447 return VBVAEXHOST_DATA_TYPE_NO_DATA;
448 }
449 }
450
451 WARN(("Warning: VBoxVBVAExHCmdGet unexpected state\n"));
452 return VBVAEXHOST_DATA_TYPE_NO_DATA;
453}
454
455static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
456{
457 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
458 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
459 {
460 vboxVBVAExHPHgEventClear(pCmdVbva);
461 vboxVBVAExHPProcessorRelease(pCmdVbva);
462 /* we need to prevent racing between us clearing the flag and command check/submission thread, i.e.
463 * 1. we check the queue -> and it is empty
464 * 2. submitter adds command to the queue
465 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
466 * 4. we clear the "processing" state
467 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
468 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
469 **/
470 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
471 if (RT_SUCCESS(rc))
472 {
473 /* we are the processor now */
474 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
475 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
476 {
477 vboxVBVAExHPProcessorRelease(pCmdVbva);
478 return VBVAEXHOST_DATA_TYPE_NO_DATA;
479 }
480
481 vboxVBVAExHPHgEventSet(pCmdVbva);
482 }
483 }
484
485 return enmType;
486}
487
488DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
489{
490 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
491
492 if (pVBVA)
493 {
494 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
495 uint32_t indexRecordFree = pVBVA->indexRecordFree;
496
497 if (indexRecordFirst != indexRecordFree)
498 return true;
499 }
500
501 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
502}
503
504/* Checks whether the new commands are ready for processing
505 * @returns
506 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
507 * VINF_EOF - no commands in a queue
508 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
509 * VERR_INVALID_STATE - the VBVA is paused or pausing */
510static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
511{
512 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
513 if (RT_SUCCESS(rc))
514 {
515 /* we are the processor now */
516 if (vboxVBVAExHSHasCommands(pCmdVbva))
517 {
518 vboxVBVAExHPHgEventSet(pCmdVbva);
519 return VINF_SUCCESS;
520 }
521
522 vboxVBVAExHPProcessorRelease(pCmdVbva);
523 return VINF_EOF;
524 }
525 if (rc == VERR_SEM_BUSY)
526 return VINF_ALREADY_INITIALIZED;
527 return VERR_INVALID_STATE;
528}
529
530static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
531{
532 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
533 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
534 if (RT_SUCCESS(rc))
535 {
536#ifndef VBOXVDBG_MEMCACHE_DISABLE
537 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
538 0, /* size_t cbAlignment */
539 UINT32_MAX, /* uint32_t cMaxObjects */
540 NULL, /* PFNMEMCACHECTOR pfnCtor*/
541 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
542 NULL, /* void *pvUser*/
543 0 /* uint32_t fFlags*/
544 );
545 if (RT_SUCCESS(rc))
546#endif
547 {
548 RTListInit(&pCmdVbva->GuestCtlList);
549 RTListInit(&pCmdVbva->HostCtlList);
550 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
551 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
552 return VINF_SUCCESS;
553 }
554#ifndef VBOXVDBG_MEMCACHE_DISABLE
555 else
556 WARN(("RTMemCacheCreate failed %d\n", rc));
557#endif
558 }
559 else
560 WARN(("RTCritSectInit failed %d\n", rc));
561
562 return rc;
563}
564
565DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
566{
567 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
568}
569
570DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
571{
572 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
573}
574
575static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
576{
577 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
578 {
579 WARN(("VBVAEx is enabled already\n"));
580 return VERR_INVALID_STATE;
581 }
582
583 pCmdVbva->pVBVA = pVBVA;
584 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
585 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
586 return VINF_SUCCESS;
587}
588
589static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
590{
591 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
592 return VINF_SUCCESS;
593
594 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
595 return VINF_SUCCESS;
596}
597
598static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
599{
600 /* ensure the processor is stopped */
601 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
602
603 /* ensure no one tries to submit the command */
604 if (pCmdVbva->pVBVA)
605 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
606
607 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
608 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
609
610 RTCritSectDelete(&pCmdVbva->CltCritSect);
611
612#ifndef VBOXVDBG_MEMCACHE_DISABLE
613 RTMemCacheDestroy(pCmdVbva->CtlCache);
614#endif
615
616 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
617}
618
619static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
620{
621 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
624 AssertRCReturn(rc, rc);
625 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
626 AssertRCReturn(rc, rc);
627
628 return VINF_SUCCESS;
629}
630
631static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
632{
633 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
634 {
635 WARN(("vbva not paused\n"));
636 return VERR_INVALID_STATE;
637 }
638
639 VBVAEXHOSTCTL* pCtl;
640 int rc;
641 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
642 {
643 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
644 AssertRCReturn(rc, rc);
645 }
646
647 rc = SSMR3PutU32(pSSM, 0);
648 AssertRCReturn(rc, rc);
649
650 return VINF_SUCCESS;
651}
652/* Saves state
653 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
654 */
655static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
656{
657 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
658 if (RT_FAILURE(rc))
659 {
660 WARN(("RTCritSectEnter failed %d\n", rc));
661 return rc;
662 }
663
664 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
665 if (RT_FAILURE(rc))
666 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
667
668 RTCritSectLeave(&pCmdVbva->CltCritSect);
669
670 return rc;
671}
672
673static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
674{
675 uint32_t u32;
676 int rc = SSMR3GetU32(pSSM, &u32);
677 AssertRCReturn(rc, rc);
678
679 if (!u32)
680 return VINF_EOF;
681
682 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
683 if (!pHCtl)
684 {
685 WARN(("VBoxVBVAExHCtlCreate failed\n"));
686 return VERR_NO_MEMORY;
687 }
688
689 rc = SSMR3GetU32(pSSM, &u32);
690 AssertRCReturn(rc, rc);
691 pHCtl->u.cmd.cbCmd = u32;
692
693 rc = SSMR3GetU32(pSSM, &u32);
694 AssertRCReturn(rc, rc);
695 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
696
697 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
698 ++pCmdVbva->u32cCtls;
699
700 return VINF_SUCCESS;
701}
702
703
704static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
705{
706 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
707 {
708 WARN(("vbva not stopped\n"));
709 return VERR_INVALID_STATE;
710 }
711
712 int rc;
713
714 do {
715 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
716 AssertRCReturn(rc, rc);
717 } while (VINF_EOF != rc);
718
719 return VINF_SUCCESS;
720}
721
722/* Loads state
723 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
724 */
725static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
726{
727 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
728 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
729 if (RT_FAILURE(rc))
730 {
731 WARN(("RTCritSectEnter failed %d\n", rc));
732 return rc;
733 }
734
735 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
736 if (RT_FAILURE(rc))
737 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
738
739 RTCritSectLeave(&pCmdVbva->CltCritSect);
740
741 return rc;
742}
743
744typedef enum
745{
746 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
747 VBVAEXHOSTCTL_SOURCE_HOST
748} VBVAEXHOSTCTL_SOURCE;
749
750
751static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
752{
753 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
754 {
755 Log(("cmd vbva not enabled\n"));
756 return VERR_INVALID_STATE;
757 }
758
759 pCtl->pfnComplete = pfnComplete;
760 pCtl->pvComplete = pvComplete;
761
762 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
763 if (RT_SUCCESS(rc))
764 {
765 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
766 {
767 Log(("cmd vbva not enabled\n"));
768 RTCritSectLeave(&pCmdVbva->CltCritSect);
769 return VERR_INVALID_STATE;
770 }
771
772 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
773 {
774 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
775 }
776 else
777 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
778
779 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
780
781 RTCritSectLeave(&pCmdVbva->CltCritSect);
782
783 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
784 }
785 else
786 WARN(("RTCritSectEnter failed %d\n", rc));
787
788 return rc;
789}
790
791#ifdef VBOX_WITH_CRHGSMI
792typedef struct VBOXVDMA_SOURCE
793{
794 VBVAINFOSCREEN Screen;
795 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
796} VBOXVDMA_SOURCE;
797#endif
798
799typedef struct VBOXVDMAHOST
800{
801 PHGSMIINSTANCE pHgsmi;
802 PVGASTATE pVGAState;
803#ifdef VBOX_WITH_CRHGSMI
804 VBVAEXHOSTCONTEXT CmdVbva;
805 VBOXVDMATHREAD Thread;
806 VBOXCRCMD_SVRINFO CrSrvInfo;
807 VBVAEXHOSTCTL* pCurRemainingHostCtl;
808 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
809 int32_t volatile i32cHostCrCtlCompleted;
810// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
811#endif
812#ifdef VBOX_VDMA_WITH_WATCHDOG
813 PTMTIMERR3 WatchDogTimer;
814#endif
815} VBOXVDMAHOST, *PVBOXVDMAHOST;
816
817#ifdef VBOX_WITH_CRHGSMI
818
819void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
820{
821 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
822 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
823 void *pvChanged = pThread->pvChanged;
824
825 pThread->pfnChanged = NULL;
826 pThread->pvChanged = NULL;
827
828 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
829
830 if (pfnChanged)
831 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
832}
833
834void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
835{
836 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
837 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
838 void *pvChanged = pThread->pvChanged;
839
840 pThread->pfnChanged = NULL;
841 pThread->pvChanged = NULL;
842
843 if (pfnChanged)
844 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
845}
846
847DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
848{
849 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
850}
851
852void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
853{
854 memset(pThread, 0, sizeof (*pThread));
855 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
856}
857
858int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
859{
860 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
861 switch (u32State)
862 {
863 case VBOXVDMATHREAD_STATE_TERMINATED:
864 return VINF_SUCCESS;
865 case VBOXVDMATHREAD_STATE_TERMINATING:
866 {
867 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
868 if (!RT_SUCCESS(rc))
869 {
870 WARN(("RTThreadWait failed %d\n", rc));
871 return rc;
872 }
873
874 RTSemEventDestroy(pThread->hEvent);
875
876 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
877 return VINF_SUCCESS;
878 }
879 default:
880 WARN(("invalid state"));
881 return VERR_INVALID_STATE;
882 }
883}
884
885int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread, PFNVBOXVDMATHREAD_CHANGED pfnCreated, void*pvCreated)
886{
887 int rc = VBoxVDMAThreadCleanup(pThread);
888 if (RT_FAILURE(rc))
889 {
890 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
891 return rc;
892 }
893
894 rc = RTSemEventCreate(&pThread->hEvent);
895 if (RT_SUCCESS(rc))
896 {
897 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
898 pThread->pfnChanged = pfnCreated;
899 pThread->pvChanged = pvCreated;
900 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
901 if (RT_SUCCESS(rc))
902 return VINF_SUCCESS;
903 else
904 WARN(("RTThreadCreate failed %d\n", rc));
905
906 RTSemEventDestroy(pThread->hEvent);
907 }
908 else
909 WARN(("RTSemEventCreate failed %d\n", rc));
910
911 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
912
913 return rc;
914}
915
916DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
917{
918 int rc = RTSemEventSignal(pThread->hEvent);
919 AssertRC(rc);
920 return rc;
921}
922
923DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
924{
925 int rc = RTSemEventWait(pThread->hEvent, cMillies);
926 AssertRC(rc);
927 return rc;
928}
929
930int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
931{
932 int rc;
933 do
934 {
935 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
936 switch (u32State)
937 {
938 case VBOXVDMATHREAD_STATE_CREATED:
939 pThread->pfnChanged = pfnTerminated;
940 pThread->pvChanged = pvTerminated;
941 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
942 if (fNotify)
943 {
944 rc = VBoxVDMAThreadEventNotify(pThread);
945 AssertRC(rc);
946 }
947 return VINF_SUCCESS;
948 case VBOXVDMATHREAD_STATE_TERMINATING:
949 case VBOXVDMATHREAD_STATE_TERMINATED:
950 {
951 WARN(("thread is marked to termination or terminated\nn"));
952 return VERR_INVALID_STATE;
953 }
954 case VBOXVDMATHREAD_STATE_CREATING:
955 {
956 /* wait till the thread creation is completed */
957 WARN(("concurrent thread create/destron\n"));
958 RTThreadYield();
959 continue;
960 }
961 default:
962 WARN(("invalid state"));
963 return VERR_INVALID_STATE;
964 }
965 } while (1);
966
967 WARN(("should never be here\n"));
968 return VERR_INTERNAL_ERROR;
969}
970
971static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
972
973typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
974typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
975
976typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
977{
978 uint32_t cRefs;
979 int32_t rc;
980 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
981 void *pvCompletion;
982 VBOXVDMACMD_CHROMIUM_CTL Cmd;
983} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
984
985#define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
986
987static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
988{
989 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
990 Assert(pHdr);
991 if (pHdr)
992 {
993 pHdr->cRefs = 1;
994 pHdr->rc = VERR_NOT_IMPLEMENTED;
995 pHdr->Cmd.enmType = enmCmd;
996 pHdr->Cmd.cbCmd = cbCmd;
997 return &pHdr->Cmd;
998 }
999
1000 return NULL;
1001}
1002
1003DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1004{
1005 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1006 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1007 if(!cRefs)
1008 {
1009 RTMemFree(pHdr);
1010 }
1011}
1012
1013DECLINLINE(void) vboxVDMACrCtlRetain (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1014{
1015 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1016 ASMAtomicIncU32(&pHdr->cRefs);
1017}
1018
1019DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1020{
1021 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1022 return pHdr->rc;
1023}
1024
1025static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1026{
1027 RTSemEventSignal((RTSEMEVENT)pvContext);
1028}
1029
1030static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1031{
1032 vboxVDMACrCtlRelease(pCmd);
1033}
1034
1035
1036static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1037{
1038 if ( pVGAState->pDrv
1039 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1040 {
1041 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1042 pHdr->pfnCompletion = pfnCompletion;
1043 pHdr->pvCompletion = pvCompletion;
1044 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1045 return VINF_SUCCESS;
1046 }
1047#ifdef DEBUG_misha
1048 Assert(0);
1049#endif
1050 return VERR_NOT_SUPPORTED;
1051}
1052
1053static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1054{
1055 RTSEMEVENT hComplEvent;
1056 int rc = RTSemEventCreate(&hComplEvent);
1057 AssertRC(rc);
1058 if(RT_SUCCESS(rc))
1059 {
1060 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1061#ifdef DEBUG_misha
1062 AssertRC(rc);
1063#endif
1064 if (RT_SUCCESS(rc))
1065 {
1066 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1067 AssertRC(rc);
1068 if(RT_SUCCESS(rc))
1069 {
1070 RTSemEventDestroy(hComplEvent);
1071 }
1072 }
1073 else
1074 {
1075 /* the command is completed */
1076 RTSemEventDestroy(hComplEvent);
1077 }
1078 }
1079 return rc;
1080}
1081
1082typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1083{
1084 int rc;
1085 RTSEMEVENT hEvent;
1086} VDMA_VBVA_CTL_CYNC_COMPLETION;
1087
1088static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1089{
1090 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1091 pData->rc = rc;
1092 rc = RTSemEventSignal(pData->hEvent);
1093 if (!RT_SUCCESS(rc))
1094 WARN(("RTSemEventSignal failed %d\n", rc));
1095}
1096
1097static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1098{
1099 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1100 Data.rc = VERR_NOT_IMPLEMENTED;
1101 int rc = RTSemEventCreate(&Data.hEvent);
1102 if (!RT_SUCCESS(rc))
1103 {
1104 WARN(("RTSemEventCreate failed %d\n", rc));
1105 return rc;
1106 }
1107
1108 PVGASTATE pVGAState = pVdma->pVGAState;
1109 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1110 if (RT_SUCCESS(rc))
1111 {
1112 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1113 if (RT_SUCCESS(rc))
1114 {
1115 rc = Data.rc;
1116 if (!RT_SUCCESS(rc))
1117 {
1118 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1119 }
1120
1121 }
1122 else
1123 WARN(("RTSemEventWait failed %d\n", rc));
1124 }
1125 else
1126 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1127
1128
1129 RTSemEventDestroy(Data.hEvent);
1130
1131 return rc;
1132}
1133
1134static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1135{
1136 VBVAEXHOSTCTL HCtl;
1137 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1138 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1139}
1140
1141static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1142{
1143 struct VBOXVDMAHOST *pVdma = hClient;
1144 if (!pVdma->pCurRemainingHostCtl)
1145 {
1146 /* disable VBVA, all subsequent host commands will go HGCM way */
1147 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1148 }
1149 else
1150 {
1151 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1152 }
1153
1154 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1155 if (pVdma->pCurRemainingHostCtl)
1156 {
1157 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1158 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1159 }
1160
1161 *pcbCtl = 0;
1162 return NULL;
1163}
1164
1165static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1166{
1167 struct VBOXVDMAHOST *pVdma = hClient;
1168 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1169 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1170}
1171
1172static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1173{
1174 struct VBOXVDMAHOST *pVdma = hClient;
1175 VBVAEXHOSTCTL HCtl;
1176 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1177 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1178
1179 pHgcmEnableData->hRHCmd = pVdma;
1180 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1181
1182 if (RT_FAILURE(rc))
1183 {
1184 if (rc == VERR_INVALID_STATE)
1185 rc = VINF_SUCCESS;
1186 else
1187 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1188 }
1189
1190 return rc;
1191}
1192
1193static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1194{
1195 VBOXCRCMDCTL_ENABLE Enable;
1196 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1197 Enable.Data.hRHCmd = pVdma;
1198 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1199
1200 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1201 Assert(!pVdma->pCurRemainingHostCtl);
1202 if (RT_SUCCESS(rc))
1203 {
1204 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1205 return VINF_SUCCESS;
1206 }
1207
1208 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1209 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1210
1211 return rc;
1212}
1213
1214static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1215{
1216 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1217 {
1218 WARN(("vdma VBVA is already enabled\n"));
1219 return VERR_INVALID_STATE;
1220 }
1221
1222 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1223 if (!pVBVA)
1224 {
1225 WARN(("invalid offset %d\n", u32Offset));
1226 return VERR_INVALID_PARAMETER;
1227 }
1228
1229 if (!pVdma->CrSrvInfo.pfnEnable)
1230 {
1231#ifdef DEBUG_misha
1232 WARN(("pfnEnable is NULL\n"));
1233 return VERR_NOT_SUPPORTED;
1234#endif
1235 }
1236
1237 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1238 if (RT_SUCCESS(rc))
1239 {
1240 VBOXCRCMDCTL_DISABLE Disable;
1241 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1242 Disable.Data.hNotifyTerm = pVdma;
1243 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1244 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1245 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1246 if (RT_SUCCESS(rc))
1247 {
1248 PVGASTATE pVGAState = pVdma->pVGAState;
1249 VBOXCRCMD_SVRENABLE_INFO Info;
1250 Info.hCltScr = pVGAState->pDrv;
1251 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1252 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1253 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1254 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1255 if (RT_SUCCESS(rc))
1256 return VINF_SUCCESS;
1257 else
1258 WARN(("pfnEnable failed %d\n", rc));
1259
1260 vboxVDMACrHgcmHandleEnable(pVdma);
1261 }
1262 else
1263 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1264
1265 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1266 }
1267 else
1268 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1269
1270 return rc;
1271}
1272
1273static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1274{
1275 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1276 {
1277 Log(("vdma VBVA is already disabled\n"));
1278 return VINF_SUCCESS;
1279 }
1280
1281 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1282 if (RT_SUCCESS(rc))
1283 {
1284 if (fDoHgcmEnable)
1285 {
1286 PVGASTATE pVGAState = pVdma->pVGAState;
1287
1288 /* disable is a bit tricky
1289 * we need to ensure the host ctl commands do not come out of order
1290 * and do not come over HGCM channel until after it is enabled */
1291 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1292 if (RT_SUCCESS(rc))
1293 {
1294 vdmaVBVANotifyDisable(pVGAState);
1295 return VINF_SUCCESS;
1296 }
1297
1298 VBOXCRCMD_SVRENABLE_INFO Info;
1299 Info.hCltScr = pVGAState->pDrv;
1300 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1301 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1302 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1303 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1304 }
1305 }
1306 else
1307 WARN(("pfnDisable failed %d\n", rc));
1308
1309 return rc;
1310}
1311
1312static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1313{
1314 *pfContinue = true;
1315
1316 switch (pCmd->enmType)
1317 {
1318 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1319 {
1320 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1321 {
1322 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1323 return VERR_INVALID_STATE;
1324 }
1325 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1326 }
1327 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1328 {
1329 int rc = vdmaVBVADisableProcess(pVdma, true);
1330 if (RT_FAILURE(rc))
1331 {
1332 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1333 return rc;
1334 }
1335
1336 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1337 }
1338 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1339 {
1340 int rc = vdmaVBVADisableProcess(pVdma, false);
1341 if (RT_FAILURE(rc))
1342 {
1343 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1344 return rc;
1345 }
1346
1347 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1348 if (RT_FAILURE(rc))
1349 {
1350 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1351 return rc;
1352 }
1353
1354 *pfContinue = false;
1355 return VINF_SUCCESS;
1356 }
1357 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1358 {
1359 PVGASTATE pVGAState = pVdma->pVGAState;
1360 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1361 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1362 if (RT_FAILURE(rc))
1363 {
1364 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1365 return rc;
1366 }
1367 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1368 }
1369 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1370 {
1371 PVGASTATE pVGAState = pVdma->pVGAState;
1372 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1373
1374 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1375 if (RT_FAILURE(rc))
1376 {
1377 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1378 return rc;
1379 }
1380
1381 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1382 if (RT_FAILURE(rc))
1383 {
1384 WARN(("pfnLoadState failed %d\n", rc));
1385 return rc;
1386 }
1387
1388 return VINF_SUCCESS;
1389 }
1390 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1391 {
1392 PVGASTATE pVGAState = pVdma->pVGAState;
1393
1394 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1395 {
1396 VBVAINFOSCREEN CurScreen;
1397 VBVAINFOVIEW CurView;
1398
1399 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1400 if (RT_FAILURE(rc))
1401 {
1402 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1403 return rc;
1404 }
1405
1406 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1407 if (RT_FAILURE(rc))
1408 {
1409 WARN(("VBVAInfoScreen failed %d\n", rc));
1410 return rc;
1411 }
1412 }
1413
1414 return VINF_SUCCESS;
1415 }
1416 default:
1417 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1418 return VERR_INVALID_PARAMETER;
1419 }
1420}
1421
1422static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1423{
1424 PVGASTATE pVGAState = pVdma->pVGAState;
1425 VBVAINFOSCREEN Screen = pEntry->Screen;
1426 VBVAINFOVIEW View;
1427 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1428 uint32_t u32ViewIndex = Screen.u32ViewIndex;
1429 uint16_t u16Flags = Screen.u16Flags;
1430 bool fDisable = false;
1431
1432 memcpy(aTargetMap, pEntry->aTargetMap, sizeof (aTargetMap));
1433
1434 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1435
1436 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1437 {
1438 fDisable = true;
1439 memset(&Screen, 0, sizeof (Screen));
1440 Screen.u32ViewIndex = u32ViewIndex;
1441 Screen.u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1442 }
1443
1444 if (u32ViewIndex > pVGAState->cMonitors)
1445 {
1446 if (u32ViewIndex != 0xffffffff)
1447 {
1448 WARN(("invalid view index\n"));
1449 return VERR_INVALID_PARAMETER;
1450 }
1451 else if (!fDisable)
1452 {
1453 WARN(("0xffffffff view index only valid for disable requests\n"));
1454 return VERR_INVALID_PARAMETER;
1455 }
1456 }
1457
1458 View.u32ViewOffset = 0;
1459 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1460 View.u32MaxScreenSize = View.u32ViewSize + Screen.u32Width + 1; /* <- make VBVAInfoScreen logic (offEnd < pView->u32MaxScreenSize) happy */
1461
1462 int rc = VINF_SUCCESS;
1463
1464 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1465 i >= 0;
1466 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1467 {
1468 Screen.u32ViewIndex = i;
1469
1470 VBVAINFOSCREEN CurScreen;
1471 VBVAINFOVIEW CurView;
1472
1473 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1474 AssertRC(rc);
1475
1476 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1477 continue;
1478
1479 if (!fDisable || !CurView.u32ViewSize)
1480 {
1481 View.u32ViewIndex = Screen.u32ViewIndex;
1482
1483 rc = VBVAInfoView(pVGAState, &View);
1484 if (RT_FAILURE(rc))
1485 {
1486 WARN(("VBVAInfoView failed %d\n", rc));
1487 break;
1488 }
1489 }
1490
1491 rc = VBVAInfoScreen(pVGAState, &Screen);
1492 if (RT_FAILURE(rc))
1493 {
1494 WARN(("VBVAInfoScreen failed %d\n", rc));
1495 break;
1496 }
1497 }
1498
1499 if (RT_FAILURE(rc))
1500 return rc;
1501
1502 Screen.u32ViewIndex = u32ViewIndex;
1503
1504 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1505 if (RT_FAILURE(rc))
1506 WARN(("pfnResize failed %d\n", rc));
1507
1508 return rc;
1509}
1510
1511static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1512{
1513 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1514 switch (enmType)
1515 {
1516 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1517 {
1518 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1519 {
1520 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1521 return VERR_INVALID_STATE;
1522 }
1523 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1524 }
1525 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1526 {
1527 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1528 {
1529 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1530 return VERR_INVALID_STATE;
1531 }
1532
1533 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1534
1535 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1536 {
1537 WARN(("invalid buffer size\n"));
1538 return VERR_INVALID_PARAMETER;
1539 }
1540
1541 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1542 if (!cElements)
1543 {
1544 WARN(("invalid buffer size\n"));
1545 return VERR_INVALID_PARAMETER;
1546 }
1547
1548 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1549
1550 int rc = VINF_SUCCESS;
1551
1552 for (uint32_t i = 0; i < cElements; ++i)
1553 {
1554 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1555 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1556 if (RT_FAILURE(rc))
1557 {
1558 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1559 break;
1560 }
1561 }
1562 return rc;
1563 }
1564 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1565 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1566 {
1567 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1568 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1569 uint32_t u32Offset = pEnable->u32Offset;
1570 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1571 if (!RT_SUCCESS(rc))
1572 {
1573 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1574 return rc;
1575 }
1576
1577 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1578 {
1579 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1580 if (!RT_SUCCESS(rc))
1581 {
1582 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1583 return rc;
1584 }
1585 }
1586
1587 return VINF_SUCCESS;
1588 }
1589 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1590 {
1591 int rc = vdmaVBVADisableProcess(pVdma, true);
1592 if (RT_FAILURE(rc))
1593 {
1594 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1595 return rc;
1596 }
1597
1598 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1599 }
1600 default:
1601 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1602 return VERR_INVALID_PARAMETER;
1603 }
1604}
1605
1606/**
1607 * @param fIn - whether this is a page in or out op.
1608 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1609 */
1610static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX iPage, uint8_t *pu8Vram, bool fIn)
1611{
1612 RTGCPHYS phPage = (RTGCPHYS)(iPage << PAGE_SHIFT);
1613 PGMPAGEMAPLOCK Lock;
1614 int rc;
1615
1616 if (fIn)
1617 {
1618 const void * pvPage;
1619 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPage, &Lock);
1620 if (!RT_SUCCESS(rc))
1621 {
1622 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1623 return rc;
1624 }
1625
1626 memcpy(pu8Vram, pvPage, PAGE_SIZE);
1627
1628 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1629 }
1630 else
1631 {
1632 void * pvPage;
1633 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvPage, &Lock);
1634 if (!RT_SUCCESS(rc))
1635 {
1636 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1637 return rc;
1638 }
1639
1640 memcpy(pvPage, pu8Vram, PAGE_SIZE);
1641
1642 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1643 }
1644
1645 return VINF_SUCCESS;
1646}
1647
1648static int vboxVDMACrCmdVbvaProcessPagingEls(PPDMDEVINS pDevIns, const VBOXCMDVBVAPAGEIDX *piPages, uint32_t cPages, uint8_t *pu8Vram, bool fIn)
1649{
1650 for (uint32_t i = 0; i < cPages; ++i, pu8Vram += PAGE_SIZE)
1651 {
1652 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pDevIns, piPages[i], pu8Vram, fIn);
1653 if (!RT_SUCCESS(rc))
1654 {
1655 WARN(("vboxVDMACrCmdVbvaProcessPagingEl failed %d", rc));
1656 return rc;
1657 }
1658 }
1659
1660 return VINF_SUCCESS;
1661}
1662
1663static int8_t vboxVDMACrCmdVbvaPagingDataInit(PVGASTATE pVGAState, const VBOXCMDVBVA_HDR *pHdr, const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData, uint32_t cbCmd,
1664 const VBOXCMDVBVAPAGEIDX **ppPages, VBOXCMDVBVAPAGEIDX *pcPages,
1665 uint8_t **ppu8Vram, bool *pfIn)
1666{
1667 if (cbCmd < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
1668 {
1669 WARN(("cmd too small"));
1670 return -1;
1671 }
1672
1673 VBOXCMDVBVAPAGEIDX cPages = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1674 if (cPages % sizeof (VBOXCMDVBVAPAGEIDX))
1675 {
1676 WARN(("invalid cmd size"));
1677 return -1;
1678 }
1679 cPages /= sizeof (VBOXCMDVBVAPAGEIDX);
1680
1681 VBOXCMDVBVAOFFSET offVRAM = pData->Alloc.u.offVRAM;
1682 if (offVRAM & PAGE_OFFSET_MASK)
1683 {
1684 WARN(("offVRAM address is not on page boundary\n"));
1685 return -1;
1686 }
1687 const VBOXCMDVBVAPAGEIDX *pPages = pData->aPageNumbers;
1688
1689 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1690 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1691 if (offVRAM >= pVGAState->vram_size)
1692 {
1693 WARN(("invalid vram offset"));
1694 return -1;
1695 }
1696
1697 if (offVRAM + (cPages << PAGE_SHIFT) >= pVGAState->vram_size)
1698 {
1699 WARN(("invalid cPages"));
1700 return -1;
1701 }
1702
1703 uint8_t *pu8Vram = pu8VramBase + offVRAM;
1704 bool fIn = !!(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1705
1706 *ppPages = pPages;
1707 *pcPages = cPages;
1708 *ppu8Vram = pu8Vram;
1709 *pfIn = fIn;
1710 return 0;
1711}
1712
1713static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1714{
1715 VBOXCMDVBVAOFFSET offVRAM = pFill->offVRAM;
1716 if (offVRAM & PAGE_OFFSET_MASK)
1717 {
1718 WARN(("offVRAM address is not on page boundary\n"));
1719 return -1;
1720 }
1721
1722 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1723 uint8_t *pu8VramMax = pu8VramBase + pVGAState->vram_size;
1724 if (offVRAM >= pVGAState->vram_size)
1725 {
1726 WARN(("invalid vram offset"));
1727 return -1;
1728 }
1729
1730 uint32_t cbFill = pFill->u32CbFill;
1731
1732 if (offVRAM + cbFill >= pVGAState->vram_size)
1733 {
1734 WARN(("invalid cPages"));
1735 return -1;
1736 }
1737
1738 uint32_t *pu32Vram = (uint32_t*)(pu8VramBase + offVRAM);
1739 uint32_t u32Color = pFill->u32Pattern;
1740
1741 Assert(!(cbFill % 4));
1742 for (uint32_t i = 0; i < cbFill / 4; ++i)
1743 {
1744 pu32Vram[i] = u32Color;
1745 }
1746
1747 return 0;
1748}
1749
1750static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1751{
1752 switch (pCmd->u8OpCode)
1753 {
1754 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1755 return 0;
1756 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1757 {
1758 PVGASTATE pVGAState = pVdma->pVGAState;
1759 const VBOXCMDVBVAPAGEIDX *pPages;
1760 uint32_t cPages;
1761 uint8_t *pu8Vram;
1762 bool fIn;
1763 int8_t i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pCmd, &((VBOXCMDVBVA_PAGING_TRANSFER*)pCmd)->Data, cbCmd,
1764 &pPages, &cPages,
1765 &pu8Vram, &fIn);
1766 if (i8Result < 0)
1767 {
1768 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1769 return i8Result;
1770 }
1771
1772 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1773 int rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cPages, pu8Vram, fIn);
1774 if (!RT_SUCCESS(rc))
1775 {
1776 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1777 return -1;
1778 }
1779
1780 return 0;
1781 }
1782 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1783 {
1784 PVGASTATE pVGAState = pVdma->pVGAState;
1785 if (cbCmd != sizeof (VBOXCMDVBVA_PAGING_FILL))
1786 {
1787 WARN(("cmd too small"));
1788 return -1;
1789 }
1790
1791 return vboxVDMACrCmdVbvaPagingFill(pVGAState, (VBOXCMDVBVA_PAGING_FILL*)pCmd);
1792 }
1793 default:
1794 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1795 }
1796}
1797
1798#if 0
1799typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1800{
1801 VBOXCMDVBVA_HDR Hdr;
1802 /* for now can only contain offVRAM.
1803 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1804 VBOXCMDVBVA_ALLOCINFO Alloc;
1805 uint32_t u32Reserved;
1806 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1807} VBOXCMDVBVA_PAGING_TRANSFER;
1808#endif
1809
1810AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1811AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1812AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1813AssertCompile(!(PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1814
1815#define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1816
1817static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1818{
1819 switch (pCmd->u8OpCode)
1820 {
1821 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1822 {
1823 if (cbCmd < sizeof (VBOXCMDVBVA_SYSMEMCMD))
1824 {
1825 WARN(("invalid command size"));
1826 return -1;
1827 }
1828 VBOXCMDVBVA_SYSMEMCMD *pSysmemCmd = (VBOXCMDVBVA_SYSMEMCMD*)pCmd;
1829 const VBOXCMDVBVA_HDR *pRealCmdHdr;
1830 uint32_t cbRealCmd = pCmd->u8Flags;
1831 cbRealCmd |= (pCmd->u.u8PrimaryID << 8);
1832 if (cbRealCmd < sizeof (VBOXCMDVBVA_HDR))
1833 {
1834 WARN(("invalid sysmem cmd size"));
1835 return -1;
1836 }
1837
1838 RTGCPHYS phCmd = (RTGCPHYS)pSysmemCmd->phCmd;
1839
1840 PGMPAGEMAPLOCK Lock;
1841 PVGASTATE pVGAState = pVdma->pVGAState;
1842 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1843 const void * pvCmd;
1844 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1845 if (!RT_SUCCESS(rc))
1846 {
1847 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1848 return -1;
1849 }
1850
1851 Assert((phCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK));
1852
1853 uint32_t cbCmdPart = PAGE_SIZE - (((uintptr_t)pvCmd) & PAGE_OFFSET_MASK);
1854
1855 if (cbRealCmd <= cbCmdPart)
1856 {
1857 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1858 uint8_t i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
1859 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1860 return i8Result;
1861 }
1862
1863 VBOXCMDVBVA_HDR Hdr;
1864 const void *pvCurCmdTail;
1865 uint32_t cbCurCmdTail;
1866 if (cbCmdPart >= sizeof (*pRealCmdHdr))
1867 {
1868 pRealCmdHdr = (const VBOXCMDVBVA_HDR *)pvCmd;
1869 pvCurCmdTail = (const void*)(pRealCmdHdr + 1);
1870 cbCurCmdTail = cbCmdPart - sizeof (*pRealCmdHdr);
1871 }
1872 else
1873 {
1874 memcpy(&Hdr, pvCmd, cbCmdPart);
1875 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1876 phCmd += cbCmdPart;
1877 Assert(!(phCmd & PAGE_OFFSET_MASK));
1878 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1879 if (!RT_SUCCESS(rc))
1880 {
1881 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1882 return -1;
1883 }
1884
1885 cbCmdPart = sizeof (*pRealCmdHdr) - cbCmdPart;
1886 memcpy(((uint8_t*)(&Hdr)) + cbCmdPart, pvCmd, cbCmdPart);
1887 pvCurCmdTail = (const void*)(((uint8_t*)pvCmd) + cbCmdPart);
1888 cbCurCmdTail = PAGE_SIZE - cbCmdPart;
1889 }
1890
1891 if (cbCurCmdTail > cbRealCmd - sizeof (*pRealCmdHdr))
1892 cbCurCmdTail = cbRealCmd - sizeof (*pRealCmdHdr);
1893
1894 int8_t i8Result = 0;
1895
1896 switch (pRealCmdHdr->u8OpCode)
1897 {
1898 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1899 {
1900 const uint32_t *pPages;
1901 uint32_t cPages;
1902 uint8_t *pu8Vram;
1903 bool fIn;
1904 i8Result = vboxVDMACrCmdVbvaPagingDataInit(pVGAState, pRealCmdHdr, (const VBOXCMDVBVA_PAGING_TRANSFER_DATA*)pvCurCmdTail, cbRealCmd,
1905 &pPages, &cPages,
1906 &pu8Vram, &fIn);
1907 if (i8Result < 0)
1908 {
1909 WARN(("vboxVDMACrCmdVbvaPagingDataInit failed %d", i8Result));
1910 /* we need to break, not return, to ensure currently locked page is released */
1911 break;
1912 }
1913
1914 if (cbCurCmdTail & 3)
1915 {
1916 WARN(("command is not alligned properly %d", cbCurCmdTail));
1917 i8Result = -1;
1918 /* we need to break, not return, to ensure currently locked page is released */
1919 break;
1920 }
1921
1922 uint32_t cCurPages = cbCurCmdTail / sizeof (VBOXCMDVBVAPAGEIDX);
1923 Assert(cCurPages < cPages);
1924
1925 do
1926 {
1927 rc = vboxVDMACrCmdVbvaProcessPagingEls(pDevIns, pPages, cCurPages, pu8Vram, fIn);
1928 if (!RT_SUCCESS(rc))
1929 {
1930 WARN(("vboxVDMACrCmdVbvaProcessPagingEls failed %d", rc));
1931 i8Result = -1;
1932 /* we need to break, not return, to ensure currently locked page is released */
1933 break;
1934 }
1935
1936 Assert(cPages >= cCurPages);
1937 cPages -= cCurPages;
1938
1939 if (!cPages)
1940 break;
1941
1942 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1943
1944 Assert(!(phCmd & PAGE_OFFSET_MASK));
1945
1946 phCmd += PAGE_SIZE;
1947 pu8Vram += (cCurPages << PAGE_SHIFT);
1948
1949 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phCmd, 0, &pvCmd, &Lock);
1950 if (!RT_SUCCESS(rc))
1951 {
1952 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1953 /* the page is not locked, return */
1954 return -1;
1955 }
1956
1957 cCurPages = PAGE_SIZE / sizeof (VBOXCMDVBVAPAGEIDX);
1958 if (cCurPages > cPages)
1959 cCurPages = cPages;
1960 } while (1);
1961 break;
1962 }
1963 default:
1964 WARN(("command can not be splitted"));
1965 i8Result = -1;
1966 break;
1967 }
1968
1969 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1970 return i8Result;
1971 }
1972 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
1973 {
1974 Assert(cbCmd >= sizeof (VBOXCMDVBVA_HDR));
1975 ++pCmd;
1976 cbCmd -= sizeof (*pCmd);
1977 uint32_t cbCurCmd = 0;
1978 for ( ; cbCmd; cbCmd -= cbCurCmd, pCmd = (VBOXCMDVBVA_HDR*)(((uint8_t*)pCmd) + cbCurCmd))
1979 {
1980 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
1981 {
1982 WARN(("invalid command size"));
1983 return -1;
1984 }
1985
1986 cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
1987 if (cbCmd < cbCurCmd)
1988 {
1989 WARN(("invalid command size"));
1990 return -1;
1991 }
1992
1993 int8_t i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd);
1994 if (i8Result < 0)
1995 {
1996 WARN(("vboxVDMACrCmdVbvaProcess failed"));
1997 return i8Result;
1998 }
1999 }
2000 return 0;
2001 }
2002 default:
2003 return vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2004 }
2005}
2006
2007static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pu8Cmd, uint32_t cbCmd)
2008{
2009 if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
2010 return;
2011
2012 if (cbCmd < sizeof (VBOXCMDVBVA_HDR))
2013 {
2014 WARN(("invalid command size"));
2015 return;
2016 }
2017
2018 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pu8Cmd;
2019
2020 /* check if the command is cancelled */
2021 if (!ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2022 {
2023 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2024 return;
2025 }
2026
2027 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd);
2028}
2029
2030static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2031{
2032 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2033 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2034 int rc = VERR_NO_MEMORY;
2035 if (pCmd)
2036 {
2037 PVGASTATE pVGAState = pVdma->pVGAState;
2038 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2039 pCmd->cbVRam = pVGAState->vram_size;
2040 pCmd->pLed = &pVGAState->Led3D;
2041 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2042 if (RT_SUCCESS(rc))
2043 {
2044 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2045 if (RT_SUCCESS(rc))
2046 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2047 else if (rc != VERR_NOT_SUPPORTED)
2048 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2049 }
2050 else
2051 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2052
2053 vboxVDMACrCtlRelease(&pCmd->Hdr);
2054 }
2055
2056 if (!RT_SUCCESS(rc))
2057 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2058
2059 return rc;
2060}
2061
2062static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer);
2063
2064/* check if this is external cmd to be passed to chromium backend */
2065static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2066{
2067 PVBOXVDMACMD pDmaCmd = NULL;
2068 uint32_t cbDmaCmd = 0;
2069 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2070 int rc = VINF_NOT_SUPPORTED;
2071
2072 cbDmaCmd = pCmdDr->cbBuf;
2073
2074 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2075 {
2076 if (cbCmdDr < sizeof (*pCmdDr) + VBOXVDMACMD_HEADER_SIZE())
2077 {
2078 AssertMsgFailed(("invalid buffer data!"));
2079 return VERR_INVALID_PARAMETER;
2080 }
2081
2082 if (cbDmaCmd < cbCmdDr - sizeof (*pCmdDr) - VBOXVDMACMD_HEADER_SIZE())
2083 {
2084 AssertMsgFailed(("invalid command buffer data!"));
2085 return VERR_INVALID_PARAMETER;
2086 }
2087
2088 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2089 }
2090 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2091 {
2092 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2093 if (offBuf + cbDmaCmd > pVdma->pVGAState->vram_size)
2094 {
2095 AssertMsgFailed(("invalid command buffer data from offset!"));
2096 return VERR_INVALID_PARAMETER;
2097 }
2098 pDmaCmd = (VBOXVDMACMD*)(pvRam + offBuf);
2099 }
2100
2101 if (pDmaCmd)
2102 {
2103 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2104 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2105
2106 switch (pDmaCmd->enmType)
2107 {
2108 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2109 {
2110 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2111 if (cbBody < sizeof (*pCrCmd))
2112 {
2113 AssertMsgFailed(("invalid chromium command buffer size!"));
2114 return VERR_INVALID_PARAMETER;
2115 }
2116 PVGASTATE pVGAState = pVdma->pVGAState;
2117 rc = VINF_SUCCESS;
2118 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2119 {
2120 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2121 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2122 break;
2123 }
2124 else
2125 {
2126 Assert(0);
2127 }
2128
2129 int tmpRc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2130 AssertRC(tmpRc);
2131 break;
2132 }
2133 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2134 {
2135 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2136 if (cbBody < sizeof (*pTransfer))
2137 {
2138 AssertMsgFailed(("invalid bpb transfer buffer size!"));
2139 return VERR_INVALID_PARAMETER;
2140 }
2141
2142 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2143 AssertRC(rc);
2144 if (RT_SUCCESS(rc))
2145 {
2146 pCmdDr->rc = VINF_SUCCESS;
2147 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmdDr);
2148 AssertRC(rc);
2149 rc = VINF_SUCCESS;
2150 }
2151 break;
2152 }
2153 default:
2154 break;
2155 }
2156 }
2157 return rc;
2158}
2159
2160int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2161{
2162 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2163 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2164 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2165 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2166 AssertRC(rc);
2167 pDr->rc = rc;
2168
2169 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2170 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2171 AssertRC(rc);
2172 return rc;
2173}
2174
2175int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2176{
2177 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2178 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2179 pCmdPrivate->rc = rc;
2180 if (pCmdPrivate->pfnCompletion)
2181 {
2182 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2183 }
2184 return VINF_SUCCESS;
2185}
2186
2187static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma,
2188 uint8_t *pvDstSurf, const uint8_t *pvSrcSurf,
2189 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2190 const VBOXVDMA_RECTL * pDstRectl, const VBOXVDMA_RECTL * pSrcRectl)
2191{
2192 /* we do not support color conversion */
2193 Assert(pDstDesc->format == pSrcDesc->format);
2194 /* we do not support stretching */
2195 Assert(pDstRectl->height == pSrcRectl->height);
2196 Assert(pDstRectl->width == pSrcRectl->width);
2197 if (pDstDesc->format != pSrcDesc->format)
2198 return VERR_INVALID_FUNCTION;
2199 if (pDstDesc->width == pDstRectl->width
2200 && pSrcDesc->width == pSrcRectl->width
2201 && pSrcDesc->width == pDstDesc->width)
2202 {
2203 Assert(!pDstRectl->left);
2204 Assert(!pSrcRectl->left);
2205 uint32_t cbOff = pDstDesc->pitch * pDstRectl->top;
2206 uint32_t cbSize = pDstDesc->pitch * pDstRectl->height;
2207 memcpy(pvDstSurf + cbOff, pvSrcSurf + cbOff, cbSize);
2208 }
2209 else
2210 {
2211 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2212 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2213 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2214 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2215 Assert(cbDstLine <= pDstDesc->pitch);
2216 uint32_t cbDstSkip = pDstDesc->pitch;
2217 uint8_t * pvDstStart = pvDstSurf + offDstStart;
2218
2219 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2220 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2221 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2222 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2223 Assert(cbSrcLine <= pSrcDesc->pitch);
2224 uint32_t cbSrcSkip = pSrcDesc->pitch;
2225 const uint8_t * pvSrcStart = pvSrcSurf + offSrcStart;
2226
2227 Assert(cbDstLine == cbSrcLine);
2228
2229 for (uint32_t i = 0; ; ++i)
2230 {
2231 memcpy (pvDstStart, pvSrcStart, cbDstLine);
2232 if (i == pDstRectl->height)
2233 break;
2234 pvDstStart += cbDstSkip;
2235 pvSrcStart += cbSrcSkip;
2236 }
2237 }
2238 return VINF_SUCCESS;
2239}
2240
2241static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2242{
2243 if (!pRectl1->width)
2244 *pRectl1 = *pRectl2;
2245 else
2246 {
2247 int16_t x21 = pRectl1->left + pRectl1->width;
2248 int16_t x22 = pRectl2->left + pRectl2->width;
2249 if (pRectl1->left > pRectl2->left)
2250 {
2251 pRectl1->left = pRectl2->left;
2252 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2253 }
2254 else if (x21 < x22)
2255 pRectl1->width = x22 - pRectl1->left;
2256
2257 x21 = pRectl1->top + pRectl1->height;
2258 x22 = pRectl2->top + pRectl2->height;
2259 if (pRectl1->top > pRectl2->top)
2260 {
2261 pRectl1->top = pRectl2->top;
2262 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2263 }
2264 else if (x21 < x22)
2265 pRectl1->height = x22 - pRectl1->top;
2266 }
2267}
2268
2269/*
2270 * @return on success the number of bytes the command contained, otherwise - VERR_xxx error code
2271 */
2272static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2273{
2274 const uint32_t cbBlt = VBOXVDMACMD_BODY_FIELD_OFFSET(uint32_t, VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[pBlt->cDstSubRects]);
2275 Assert(cbBlt <= cbBuffer);
2276 if (cbBuffer < cbBlt)
2277 return VERR_INVALID_FUNCTION;
2278
2279 /* we do not support stretching for now */
2280 Assert(pBlt->srcRectl.width == pBlt->dstRectl.width);
2281 Assert(pBlt->srcRectl.height == pBlt->dstRectl.height);
2282 if (pBlt->srcRectl.width != pBlt->dstRectl.width)
2283 return VERR_INVALID_FUNCTION;
2284 if (pBlt->srcRectl.height != pBlt->dstRectl.height)
2285 return VERR_INVALID_FUNCTION;
2286 Assert(pBlt->cDstSubRects);
2287
2288 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2289 VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0};
2290
2291 if (pBlt->cDstSubRects)
2292 {
2293 VBOXVDMA_RECTL dstRectl, srcRectl;
2294 const VBOXVDMA_RECTL *pDstRectl, *pSrcRectl;
2295 for (uint32_t i = 0; i < pBlt->cDstSubRects; ++i)
2296 {
2297 pDstRectl = &pBlt->aDstSubRects[i];
2298 if (pBlt->dstRectl.left || pBlt->dstRectl.top)
2299 {
2300 dstRectl.left = pDstRectl->left + pBlt->dstRectl.left;
2301 dstRectl.top = pDstRectl->top + pBlt->dstRectl.top;
2302 dstRectl.width = pDstRectl->width;
2303 dstRectl.height = pDstRectl->height;
2304 pDstRectl = &dstRectl;
2305 }
2306
2307 pSrcRectl = &pBlt->aDstSubRects[i];
2308 if (pBlt->srcRectl.left || pBlt->srcRectl.top)
2309 {
2310 srcRectl.left = pSrcRectl->left + pBlt->srcRectl.left;
2311 srcRectl.top = pSrcRectl->top + pBlt->srcRectl.top;
2312 srcRectl.width = pSrcRectl->width;
2313 srcRectl.height = pSrcRectl->height;
2314 pSrcRectl = &srcRectl;
2315 }
2316
2317 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2318 &pBlt->dstDesc, &pBlt->srcDesc,
2319 pDstRectl,
2320 pSrcRectl);
2321 AssertRC(rc);
2322 if (!RT_SUCCESS(rc))
2323 return rc;
2324
2325 vboxVDMARectlUnite(&updateRectl, pDstRectl);
2326 }
2327 }
2328 else
2329 {
2330 int rc = vboxVDMACmdExecBltPerform(pVdma, pvRam + pBlt->offDst, pvRam + pBlt->offSrc,
2331 &pBlt->dstDesc, &pBlt->srcDesc,
2332 &pBlt->dstRectl,
2333 &pBlt->srcRectl);
2334 AssertRC(rc);
2335 if (!RT_SUCCESS(rc))
2336 return rc;
2337
2338 vboxVDMARectlUnite(&updateRectl, &pBlt->dstRectl);
2339 }
2340
2341 return cbBlt;
2342}
2343
2344static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer, uint32_t cbBuffer)
2345{
2346 if (cbBuffer < sizeof (*pTransfer))
2347 return VERR_INVALID_PARAMETER;
2348
2349 PVGASTATE pVGAState = pVdma->pVGAState;
2350 uint8_t * pvRam = pVGAState->vram_ptrR3;
2351 PGMPAGEMAPLOCK SrcLock;
2352 PGMPAGEMAPLOCK DstLock;
2353 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2354 const void * pvSrc;
2355 void * pvDst;
2356 int rc = VINF_SUCCESS;
2357 uint32_t cbTransfer = pTransfer->cbTransferSize;
2358 uint32_t cbTransfered = 0;
2359 bool bSrcLocked = false;
2360 bool bDstLocked = false;
2361 do
2362 {
2363 uint32_t cbSubTransfer = cbTransfer;
2364 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2365 {
2366 pvSrc = pvRam + pTransfer->Src.offVramBuf + cbTransfered;
2367 }
2368 else
2369 {
2370 RTGCPHYS phPage = pTransfer->Src.phBuf;
2371 phPage += cbTransfered;
2372 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvSrc, &SrcLock);
2373 AssertRC(rc);
2374 if (RT_SUCCESS(rc))
2375 {
2376 bSrcLocked = true;
2377 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2378 }
2379 else
2380 {
2381 break;
2382 }
2383 }
2384
2385 if (pTransfer->fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2386 {
2387 pvDst = pvRam + pTransfer->Dst.offVramBuf + cbTransfered;
2388 }
2389 else
2390 {
2391 RTGCPHYS phPage = pTransfer->Dst.phBuf;
2392 phPage += cbTransfered;
2393 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, phPage, 0, &pvDst, &DstLock);
2394 AssertRC(rc);
2395 if (RT_SUCCESS(rc))
2396 {
2397 bDstLocked = true;
2398 cbSubTransfer = RT_MIN(cbSubTransfer, 0x1000);
2399 }
2400 else
2401 {
2402 break;
2403 }
2404 }
2405
2406 if (RT_SUCCESS(rc))
2407 {
2408 memcpy(pvDst, pvSrc, cbSubTransfer);
2409 cbTransfer -= cbSubTransfer;
2410 cbTransfered += cbSubTransfer;
2411 }
2412 else
2413 {
2414 cbTransfer = 0; /* to break */
2415 }
2416
2417 if (bSrcLocked)
2418 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2419 if (bDstLocked)
2420 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2421 } while (cbTransfer);
2422
2423 if (RT_SUCCESS(rc))
2424 return sizeof (*pTransfer);
2425 return rc;
2426}
2427
2428static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pvBuffer, uint32_t cbBuffer)
2429{
2430 do
2431 {
2432 Assert(pvBuffer);
2433 Assert(cbBuffer >= VBOXVDMACMD_HEADER_SIZE());
2434
2435 if (!pvBuffer)
2436 return VERR_INVALID_PARAMETER;
2437 if (cbBuffer < VBOXVDMACMD_HEADER_SIZE())
2438 return VERR_INVALID_PARAMETER;
2439
2440 PVBOXVDMACMD pCmd = (PVBOXVDMACMD)pvBuffer;
2441 uint32_t cbCmd = 0;
2442 switch (pCmd->enmType)
2443 {
2444 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2445 {
2446#ifdef VBOXWDDM_TEST_UHGSMI
2447 static int count = 0;
2448 static uint64_t start, end;
2449 if (count==0)
2450 {
2451 start = RTTimeNanoTS();
2452 }
2453 ++count;
2454 if (count==100000)
2455 {
2456 end = RTTimeNanoTS();
2457 float ems = (end-start)/1000000.f;
2458 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2459 }
2460#endif
2461 /* todo: post the buffer to chromium */
2462 return VINF_SUCCESS;
2463 }
2464 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2465 {
2466 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2467 int cbBlt = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer);
2468 Assert(cbBlt >= 0);
2469 Assert((uint32_t)cbBlt <= cbBuffer);
2470 if (cbBlt >= 0)
2471 {
2472 if ((uint32_t)cbBlt == cbBuffer)
2473 return VINF_SUCCESS;
2474 else
2475 {
2476 cbBuffer -= (uint32_t)cbBlt;
2477 pvBuffer -= cbBlt;
2478 }
2479 }
2480 else
2481 return cbBlt; /* error */
2482 break;
2483 }
2484 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2485 {
2486 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2487 int cbTransfer = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer);
2488 Assert(cbTransfer >= 0);
2489 Assert((uint32_t)cbTransfer <= cbBuffer);
2490 if (cbTransfer >= 0)
2491 {
2492 if ((uint32_t)cbTransfer == cbBuffer)
2493 return VINF_SUCCESS;
2494 else
2495 {
2496 cbBuffer -= (uint32_t)cbTransfer;
2497 pvBuffer -= cbTransfer;
2498 }
2499 }
2500 else
2501 return cbTransfer; /* error */
2502 break;
2503 }
2504 case VBOXVDMACMD_TYPE_DMA_NOP:
2505 return VINF_SUCCESS;
2506 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2507 return VINF_SUCCESS;
2508 default:
2509 AssertBreakpoint();
2510 return VERR_INVALID_FUNCTION;
2511 }
2512 } while (1);
2513
2514 /* we should not be here */
2515 AssertBreakpoint();
2516 return VERR_INVALID_STATE;
2517}
2518
2519static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD ThreadSelf, void *pvUser)
2520{
2521 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2522 PVGASTATE pVGAState = pVdma->pVGAState;
2523 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2524 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2525 uint8_t *pCmd;
2526 uint32_t cbCmd;
2527 int rc;
2528
2529 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2530
2531 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2532 {
2533 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pCmd, &cbCmd);
2534 switch (enmType)
2535 {
2536 case VBVAEXHOST_DATA_TYPE_CMD:
2537 vboxVDMACrCmdProcess(pVdma, pCmd, cbCmd);
2538 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2539 VBVARaiseIrqNoWait(pVGAState, 0);
2540 break;
2541 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2542 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd);
2543 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2544 break;
2545 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2546 {
2547 bool fContinue = true;
2548 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL*)pCmd, &fContinue);
2549 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL*)pCmd, rc);
2550 if (fContinue)
2551 break;
2552 }
2553 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2554 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2555 AssertRC(rc);
2556 break;
2557 default:
2558 WARN(("unexpected type %d\n", enmType));
2559 break;
2560 }
2561 }
2562
2563 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2564
2565 return VINF_SUCCESS;
2566}
2567
2568static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2569{
2570 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2571 const uint8_t * pvBuf;
2572 PGMPAGEMAPLOCK Lock;
2573 int rc;
2574 bool bReleaseLocked = false;
2575
2576 do
2577 {
2578 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2579
2580 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2581 pvBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2582 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2583 {
2584 uint8_t * pvRam = pVdma->pVGAState->vram_ptrR3;
2585 pvBuf = pvRam + pCmd->Location.offVramBuf;
2586 }
2587 else
2588 {
2589 RTGCPHYS phPage = pCmd->Location.phBuf & ~0xfffULL;
2590 uint32_t offset = pCmd->Location.phBuf & 0xfff;
2591 Assert(offset + pCmd->cbBuf <= 0x1000);
2592 if (offset + pCmd->cbBuf > 0x1000)
2593 {
2594 /* @todo: more advanced mechanism of command buffer proc is actually needed */
2595 rc = VERR_INVALID_PARAMETER;
2596 break;
2597 }
2598
2599 const void * pvPageBuf;
2600 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, phPage, 0, &pvPageBuf, &Lock);
2601 AssertRC(rc);
2602 if (!RT_SUCCESS(rc))
2603 {
2604 /* @todo: if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2605 break;
2606 }
2607
2608 pvBuf = (const uint8_t *)pvPageBuf;
2609 pvBuf += offset;
2610
2611 bReleaseLocked = true;
2612 }
2613
2614 rc = vboxVDMACmdExec(pVdma, pvBuf, pCmd->cbBuf);
2615 AssertRC(rc);
2616
2617 if (bReleaseLocked)
2618 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2619 } while (0);
2620
2621 pCmd->rc = rc;
2622
2623 rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2624 AssertRC(rc);
2625}
2626
2627static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2628{
2629 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2630 pCmd->i32Result = VINF_SUCCESS;
2631 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2632 AssertRC(rc);
2633}
2634
2635#endif /* #ifdef VBOX_WITH_CRHGSMI */
2636
2637#ifdef VBOX_VDMA_WITH_WATCHDOG
2638static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2639{
2640 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2641 PVGASTATE pVGAState = pVdma->pVGAState;
2642 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2643}
2644
2645static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2646{
2647 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2648 if (cMillis)
2649 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2650 else
2651 TMTimerStop(pVdma->WatchDogTimer);
2652 return VINF_SUCCESS;
2653}
2654#endif
2655
2656int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2657{
2658 int rc;
2659 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2660 Assert(pVdma);
2661 if (pVdma)
2662 {
2663 pVdma->pHgsmi = pVGAState->pHGSMI;
2664 pVdma->pVGAState = pVGAState;
2665
2666#ifdef VBOX_VDMA_WITH_WATCHDOG
2667 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2668 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2669 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2670 AssertRC(rc);
2671#endif
2672
2673#ifdef VBOX_WITH_CRHGSMI
2674 VBoxVDMAThreadInit(&pVdma->Thread);
2675
2676 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2677 if (RT_SUCCESS(rc))
2678 {
2679 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2680 if (RT_SUCCESS(rc))
2681 {
2682 pVGAState->pVdma = pVdma;
2683 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2684 return VINF_SUCCESS;
2685
2686 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2687 }
2688 else
2689 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2690
2691 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2692 }
2693 else
2694 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2695
2696
2697 RTMemFree(pVdma);
2698#else
2699 pVGAState->pVdma = pVdma;
2700 return VINF_SUCCESS;
2701#endif
2702 }
2703 else
2704 rc = VERR_OUT_OF_RESOURCES;
2705
2706 return rc;
2707}
2708
2709int vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2710{
2711#ifdef VBOX_WITH_CRHGSMI
2712 vdmaVBVACtlDisableSync(pVdma);
2713#endif
2714 return VINF_SUCCESS;
2715}
2716
2717int vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2718{
2719#ifdef VBOX_WITH_CRHGSMI
2720 vdmaVBVACtlDisableSync(pVdma);
2721 VBoxVDMAThreadCleanup(&pVdma->Thread);
2722 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2723 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2724#endif
2725 RTMemFree(pVdma);
2726 return VINF_SUCCESS;
2727}
2728
2729void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2730{
2731 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2732
2733 switch (pCmd->enmCtl)
2734 {
2735 case VBOXVDMA_CTL_TYPE_ENABLE:
2736 pCmd->i32Result = VINF_SUCCESS;
2737 break;
2738 case VBOXVDMA_CTL_TYPE_DISABLE:
2739 pCmd->i32Result = VINF_SUCCESS;
2740 break;
2741 case VBOXVDMA_CTL_TYPE_FLUSH:
2742 pCmd->i32Result = VINF_SUCCESS;
2743 break;
2744#ifdef VBOX_VDMA_WITH_WATCHDOG
2745 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2746 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2747 break;
2748#endif
2749 default:
2750 WARN(("cmd not supported"));
2751 pCmd->i32Result = VERR_NOT_SUPPORTED;
2752 }
2753
2754 int rc = VBoxSHGSMICommandComplete (pIns, pCmd);
2755 AssertRC(rc);
2756}
2757
2758void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2759{
2760 int rc = VERR_NOT_IMPLEMENTED;
2761
2762#ifdef VBOX_WITH_CRHGSMI
2763 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2764 * this is why we process them specially */
2765 rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2766 if (rc == VINF_SUCCESS)
2767 return;
2768
2769 if (RT_FAILURE(rc))
2770 {
2771 pCmd->rc = rc;
2772 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2773 AssertRC(rc);
2774 return;
2775 }
2776
2777 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2778#else
2779 pCmd->rc = rc;
2780 rc = VBoxSHGSMICommandComplete (pVdma->pHgsmi, pCmd);
2781 AssertRC(rc);
2782#endif
2783}
2784
2785/**/
2786#ifdef VBOX_WITH_CRHGSMI
2787
2788static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2789
2790static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2791{
2792 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2793 if (RT_SUCCESS(rc))
2794 {
2795 if (rc == VINF_SUCCESS)
2796 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2797 else
2798 Assert(rc == VINF_ALREADY_INITIALIZED);
2799 }
2800 else
2801 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2802
2803 return rc;
2804}
2805
2806static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
2807{
2808 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2809 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2810 AssertRC(rc);
2811 pGCtl->i32Result = rc;
2812
2813 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2814 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2815 AssertRC(rc);
2816
2817 VBoxVBVAExHCtlFree(pVbva, pCtl);
2818}
2819
2820static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType, uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2821{
2822 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
2823 if (!pHCtl)
2824 {
2825 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2826 return VERR_NO_MEMORY;
2827 }
2828
2829 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
2830 pHCtl->u.cmd.cbCmd = cbCmd;
2831 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
2832 if (RT_FAILURE(rc))
2833 {
2834 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2835 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
2836 return rc;;
2837 }
2838 return VINF_SUCCESS;
2839}
2840
2841static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
2842{
2843 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
2844 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
2845 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1), cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
2846 if (RT_SUCCESS(rc))
2847 return VINF_SUCCESS;
2848
2849 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2850 pCtl->i32Result = rc;
2851 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
2852 AssertRC(rc);
2853 return VINF_SUCCESS;
2854}
2855
2856static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvCompletion)
2857{
2858 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
2859 if (pVboxCtl->u.pfnInternal)
2860 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
2861 VBoxVBVAExHCtlFree(pVbva, pCtl);
2862}
2863
2864static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
2865 PFNCRCTLCOMPLETION pfnCompletion,
2866 void *pvCompletion)
2867{
2868 pCmd->u.pfnInternal = (void(*)())pfnCompletion;
2869 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, (uint8_t*)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
2870 if (RT_FAILURE(rc))
2871 {
2872 if (rc == VERR_INVALID_STATE)
2873 {
2874 pCmd->u.pfnInternal = NULL;
2875 PVGASTATE pVGAState = pVdma->pVGAState;
2876 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
2877 if (!RT_SUCCESS(rc))
2878 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
2879
2880 return rc;
2881 }
2882 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
2883 return rc;
2884 }
2885
2886 return VINF_SUCCESS;
2887}
2888
2889static DECLCALLBACK(int) vdmaVBVANotifyEnable(PVGASTATE pVGAState)
2890{
2891 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2892 {
2893 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
2894 if (!RT_SUCCESS(rc))
2895 {
2896 WARN(("pfnVBVAEnable failed %d\n", rc));
2897 for (uint32_t j = 0; j < i; j++)
2898 {
2899 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
2900 }
2901
2902 return rc;
2903 }
2904 }
2905 return VINF_SUCCESS;
2906}
2907
2908static DECLCALLBACK(int) vdmaVBVANotifyDisable(PVGASTATE pVGAState)
2909{
2910 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
2911 {
2912 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, i);
2913 }
2914 return VINF_SUCCESS;
2915}
2916
2917static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvContext)
2918{
2919 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
2920 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
2921
2922 if (RT_SUCCESS(rc))
2923 {
2924 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
2925 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
2926 if (rc == VINF_SUCCESS)
2927 {
2928 /* we need to inform Main about VBVA enable/disable
2929 * main expects notifications to be done from the main thread
2930 * submit it there */
2931 PVGASTATE pVGAState = pVdma->pVGAState;
2932
2933 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
2934 vdmaVBVANotifyEnable(pVGAState);
2935 else
2936 vdmaVBVANotifyDisable(pVGAState);
2937 }
2938 else if (RT_FAILURE(rc))
2939 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
2940 }
2941 else
2942 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
2943
2944 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
2945}
2946
2947static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2948{
2949 int rc;
2950 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
2951 if (pHCtl)
2952 {
2953 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
2954 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
2955 pHCtl->pfnComplete = pfnComplete;
2956 pHCtl->pvComplete = pvComplete;
2957
2958 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
2959 if (RT_SUCCESS(rc))
2960 return VINF_SUCCESS;
2961 else
2962 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
2963
2964 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
2965 }
2966 else
2967 {
2968 WARN(("VBoxVBVAExHCtlCreate failed\n"));
2969 rc = VERR_NO_MEMORY;
2970 }
2971
2972 return rc;
2973}
2974
2975static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
2976{
2977 VBVAENABLE Enable = {0};
2978 Enable.u32Flags = VBVA_F_ENABLE;
2979 Enable.u32Offset = offVram;
2980
2981 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
2982 Data.rc = VERR_NOT_IMPLEMENTED;
2983 int rc = RTSemEventCreate(&Data.hEvent);
2984 if (!RT_SUCCESS(rc))
2985 {
2986 WARN(("RTSemEventCreate failed %d\n", rc));
2987 return rc;
2988 }
2989
2990 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
2991 if (RT_SUCCESS(rc))
2992 {
2993 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
2994 if (RT_SUCCESS(rc))
2995 {
2996 rc = Data.rc;
2997 if (!RT_SUCCESS(rc))
2998 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
2999 }
3000 else
3001 WARN(("RTSemEventWait failed %d\n", rc));
3002 }
3003 else
3004 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3005
3006 RTSemEventDestroy(Data.hEvent);
3007
3008 return rc;
3009}
3010
3011static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3012{
3013 int rc;
3014 VBVAEXHOSTCTL* pHCtl;
3015 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3016 {
3017 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3018 return VINF_SUCCESS;
3019 }
3020
3021 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3022 if (!pHCtl)
3023 {
3024 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3025 return VERR_NO_MEMORY;
3026 }
3027
3028 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3029 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3030 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3031 if (RT_SUCCESS(rc))
3032 return VINF_SUCCESS;
3033
3034 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3035 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3036 return rc;
3037}
3038
3039static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3040{
3041 bool fEnable = ((pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE);
3042 if (fEnable)
3043 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3044 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3045}
3046
3047static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3048{
3049 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3050 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3051 if (RT_SUCCESS(rc))
3052 return VINF_SUCCESS;
3053
3054 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3055 pEnable->Hdr.i32Result = rc;
3056 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3057 AssertRC(rc);
3058 return VINF_SUCCESS;
3059}
3060
3061static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext)
3062{
3063 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvContext;
3064 pData->rc = rc;
3065 rc = RTSemEventSignal(pData->hEvent);
3066 if (!RT_SUCCESS(rc))
3067 WARN(("RTSemEventSignal failed %d\n", rc));
3068}
3069
3070static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3071{
3072 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3073 Data.rc = VERR_NOT_IMPLEMENTED;
3074 int rc = RTSemEventCreate(&Data.hEvent);
3075 if (!RT_SUCCESS(rc))
3076 {
3077 WARN(("RTSemEventCreate failed %d\n", rc));
3078 return rc;
3079 }
3080
3081 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3082 if (RT_SUCCESS(rc))
3083 {
3084 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3085 if (RT_SUCCESS(rc))
3086 {
3087 rc = Data.rc;
3088 if (!RT_SUCCESS(rc))
3089 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3090 }
3091 else
3092 WARN(("RTSemEventWait failed %d\n", rc));
3093 }
3094 else
3095 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3096
3097 RTSemEventDestroy(Data.hEvent);
3098
3099 return rc;
3100}
3101
3102static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3103{
3104 VBVAEXHOSTCTL Ctl;
3105 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3106 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3107}
3108
3109static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3110{
3111 VBVAEXHOSTCTL Ctl;
3112 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3113 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3114}
3115
3116static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3117{
3118 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3119 switch (rc)
3120 {
3121 case VINF_SUCCESS:
3122 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3123 case VINF_ALREADY_INITIALIZED:
3124 case VINF_EOF:
3125 case VERR_INVALID_STATE:
3126 return VINF_SUCCESS;
3127 default:
3128 Assert(!RT_FAILURE(rc));
3129 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3130 }
3131}
3132
3133
3134int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3135 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3136 PFNCRCTLCOMPLETION pfnCompletion,
3137 void *pvCompletion)
3138{
3139 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3140 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3141 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3142}
3143
3144typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3145{
3146 struct VBOXVDMAHOST *pVdma;
3147 uint32_t fProcessing;
3148 int rc;
3149} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3150
3151static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3152{
3153 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC*)pvCompletion;
3154
3155 pData->rc = rc;
3156 pData->fProcessing = 0;
3157
3158 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3159
3160 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3161
3162 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3163}
3164
3165int vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface,
3166 struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd)
3167{
3168 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3169 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3170 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3171 Data.pVdma = pVdma;
3172 Data.fProcessing = 1;
3173 Data.rc = VERR_INTERNAL_ERROR;
3174 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3175 if (!RT_SUCCESS(rc))
3176 {
3177 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3178 return rc;
3179 }
3180
3181 while (Data.fProcessing)
3182 {
3183 /* Poll infrequently to make sure no completed message has been missed. */
3184 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3185
3186 if (Data.fProcessing)
3187 RTThreadYield();
3188 }
3189
3190 /* 'Our' message has been processed, so should reset the semaphore.
3191 * There is still possible that another message has been processed
3192 * and the semaphore has been signalled again.
3193 * Reset only if there are no other messages completed.
3194 */
3195 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3196 Assert(c >= 0);
3197 if (!c)
3198 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3199
3200 rc = Data.rc;
3201 if (!RT_SUCCESS(rc))
3202 WARN(("host call failed %d", rc));
3203
3204 return rc;
3205}
3206
3207int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3208{
3209 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3210 int rc = VINF_SUCCESS;
3211 switch (pCtl->u32Type)
3212 {
3213 case VBOXCMDVBVACTL_TYPE_3DCTL:
3214 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3215 case VBOXCMDVBVACTL_TYPE_RESIZE:
3216 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3217 case VBOXCMDVBVACTL_TYPE_ENABLE:
3218 if (cbCtl != sizeof (VBOXCMDVBVA_CTL_ENABLE))
3219 {
3220 WARN(("incorrect enable size\n"));
3221 rc = VERR_INVALID_PARAMETER;
3222 break;
3223 }
3224 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE*)pCtl);
3225 default:
3226 WARN(("unsupported type\n"));
3227 rc = VERR_INVALID_PARAMETER;
3228 break;
3229 }
3230
3231 pCtl->i32Result = rc;
3232 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3233 AssertRC(rc);
3234 return VINF_SUCCESS;
3235}
3236
3237int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3238{
3239 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3240 {
3241 WARN(("vdma VBVA is disabled\n"));
3242 return VERR_INVALID_STATE;
3243 }
3244
3245 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3246}
3247
3248int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3249{
3250 WARN(("flush\n"));
3251 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3252 {
3253 WARN(("vdma VBVA is disabled\n"));
3254 return VERR_INVALID_STATE;
3255 }
3256 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3257}
3258
3259void vboxCmdVBVACmdTimer(PVGASTATE pVGAState)
3260{
3261 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3262 return;
3263 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3264}
3265
3266bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3267{
3268 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3269}
3270#endif
3271
3272int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3273{
3274#ifdef VBOX_WITH_CRHGSMI
3275 int rc = vdmaVBVAPause(pVdma);
3276 if (RT_SUCCESS(rc))
3277 return VINF_SUCCESS;
3278
3279 if (rc != VERR_INVALID_STATE)
3280 {
3281 WARN(("vdmaVBVAPause failed %d\n", rc));
3282 return rc;
3283 }
3284
3285#ifdef DEBUG_misha
3286 WARN(("debug prep"));
3287#endif
3288
3289 PVGASTATE pVGAState = pVdma->pVGAState;
3290 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3291 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3292 Assert(pCmd);
3293 if (pCmd)
3294 {
3295 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3296 AssertRC(rc);
3297 if (RT_SUCCESS(rc))
3298 {
3299 rc = vboxVDMACrCtlGetRc(pCmd);
3300 }
3301 vboxVDMACrCtlRelease(pCmd);
3302 return rc;
3303 }
3304 return VERR_NO_MEMORY;
3305#else
3306 return VINF_SUCCESS;
3307#endif
3308}
3309
3310int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3311{
3312#ifdef VBOX_WITH_CRHGSMI
3313 int rc = vdmaVBVAResume(pVdma);
3314 if (RT_SUCCESS(rc))
3315 return VINF_SUCCESS;
3316
3317 if (rc != VERR_INVALID_STATE)
3318 {
3319 WARN(("vdmaVBVAResume failed %d\n", rc));
3320 return rc;
3321 }
3322
3323#ifdef DEBUG_misha
3324 WARN(("debug done"));
3325#endif
3326
3327 PVGASTATE pVGAState = pVdma->pVGAState;
3328 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3329 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3330 Assert(pCmd);
3331 if (pCmd)
3332 {
3333 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3334 AssertRC(rc);
3335 if (RT_SUCCESS(rc))
3336 {
3337 rc = vboxVDMACrCtlGetRc(pCmd);
3338 }
3339 vboxVDMACrCtlRelease(pCmd);
3340 return rc;
3341 }
3342 return VERR_NO_MEMORY;
3343#else
3344 return VINF_SUCCESS;
3345#endif
3346}
3347
3348int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3349{
3350 int rc;
3351
3352#ifdef VBOX_WITH_CRHGSMI
3353 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3354#endif
3355 {
3356 rc = SSMR3PutU32(pSSM, 0xffffffff);
3357 AssertRCReturn(rc, rc);
3358 return VINF_SUCCESS;
3359 }
3360
3361#ifdef VBOX_WITH_CRHGSMI
3362 PVGASTATE pVGAState = pVdma->pVGAState;
3363 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3364
3365 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3366 AssertRCReturn(rc, rc);
3367
3368 VBVAEXHOSTCTL HCtl;
3369 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3370 HCtl.u.state.pSSM = pSSM;
3371 HCtl.u.state.u32Version = 0;
3372 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3373#endif
3374}
3375
3376int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3377{
3378 uint32_t u32;
3379 int rc = SSMR3GetU32(pSSM, &u32);
3380 AssertRCReturn(rc, rc);
3381
3382 if (u32 != 0xffffffff)
3383 {
3384#ifdef VBOX_WITH_CRHGSMI
3385 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3386 AssertRCReturn(rc, rc);
3387
3388 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3389
3390 VBVAEXHOSTCTL HCtl;
3391 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3392 HCtl.u.state.pSSM = pSSM;
3393 HCtl.u.state.u32Version = u32Version;
3394 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3395 AssertRCReturn(rc, rc);
3396
3397 rc = vdmaVBVAResume(pVdma);
3398 AssertRCReturn(rc, rc);
3399
3400 return VINF_SUCCESS;
3401#else
3402 WARN(("Unsupported VBVACtl info!\n"));
3403 return VERR_VERSION_MISMATCH;
3404#endif
3405 }
3406
3407 return VINF_SUCCESS;
3408}
3409
3410int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3411{
3412#ifdef VBOX_WITH_CRHGSMI
3413#if 0 /** @todo r=bird: This code is still busted. Getting VERR_INVALID_STATE when restoring a trunk state taken by
3414 * 4.3.53 r92948 win.amd64. BTW. would be great if you put in a couple of comments here and there explaining what
3415 * the purpose of this code is. */
3416 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3417 if (!pHCtl)
3418 {
3419 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3420 return VERR_NO_MEMORY;
3421 }
3422
3423 /* sanity */
3424 pHCtl->u.cmd.pu8Cmd = NULL;
3425 pHCtl->u.cmd.cbCmd = 0;
3426
3427 /* NULL completion will just free the ctl up */
3428 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3429 if (RT_FAILURE(rc))
3430 {
3431 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3432 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3433 return rc;
3434 }
3435#endif
3436#endif
3437 return VINF_SUCCESS;
3438}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette