VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 70596

最後變更 在這個檔案從70596是 70596,由 vboxsync 提交於 7 年 前

DevVGA: cleanup in progress

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 120.6 KB
 
1/* $Id: DevVGA_VDMA.cpp 70596 2018-01-15 22:46:29Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VGA
23#include <VBox/VMMDev.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pgm.h>
26#include <VBoxVideo.h>
27#include <iprt/semaphore.h>
28#include <iprt/thread.h>
29#include <iprt/mem.h>
30#include <iprt/asm.h>
31#include <iprt/list.h>
32#include <iprt/param.h>
33
34#include "DevVGA.h"
35#include "HGSMI/SHGSMIHost.h"
36
37#include <VBoxVideo3D.h>
38#include <VBoxVideoHost3D.h>
39
40#ifdef DEBUG_misha
41# define VBOXVDBG_MEMCACHE_DISABLE
42#endif
43
44#ifndef VBOXVDBG_MEMCACHE_DISABLE
45# include <iprt/memcache.h>
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#ifdef DEBUG_misha
53# define WARN_BP() do { AssertFailed(); } while (0)
54#else
55# define WARN_BP() do { } while (0)
56#endif
57#define WARN(_msg) do { \
58 LogRel(_msg); \
59 WARN_BP(); \
60 } while (0)
61
62#define VBOXVDMATHREAD_STATE_TERMINATED 0
63#define VBOXVDMATHREAD_STATE_CREATING 1
64#define VBOXVDMATHREAD_STATE_CREATED 3
65#define VBOXVDMATHREAD_STATE_TERMINATING 4
66
67
68/*********************************************************************************************************************************
69* Structures and Typedefs *
70*********************************************************************************************************************************/
71struct VBOXVDMATHREAD;
72
73typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
74
75#ifdef VBOX_WITH_CRHGSMI
76static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
77#endif
78
79
80typedef struct VBOXVDMATHREAD
81{
82 RTTHREAD hWorkerThread;
83 RTSEMEVENT hEvent;
84 volatile uint32_t u32State;
85 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
86 void *pvChanged;
87} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
88
89
90/* state transformations:
91 *
92 * submitter | processor
93 *
94 * LISTENING ---> PROCESSING
95 *
96 * */
97#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
98#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
99
100#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
101#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
102#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
103
104typedef struct VBVAEXHOSTCONTEXT
105{
106 VBVABUFFER *pVBVA;
107 volatile int32_t i32State;
108 volatile int32_t i32EnableState;
109 volatile uint32_t u32cCtls;
110 /* critical section for accessing ctl lists */
111 RTCRITSECT CltCritSect;
112 RTLISTANCHOR GuestCtlList;
113 RTLISTANCHOR HostCtlList;
114#ifndef VBOXVDBG_MEMCACHE_DISABLE
115 RTMEMCACHE CtlCache;
116#endif
117} VBVAEXHOSTCONTEXT;
118
119typedef enum
120{
121 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
122 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
123 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
124 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
125 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
126 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
127 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
128 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
129 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
130 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
131 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
132 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
133 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
134} VBVAEXHOSTCTL_TYPE;
135
136struct VBVAEXHOSTCTL;
137
138typedef DECLCALLBACK(void) FNVBVAEXHOSTCTL_COMPLETE(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
139typedef FNVBVAEXHOSTCTL_COMPLETE *PFNVBVAEXHOSTCTL_COMPLETE;
140
141typedef struct VBVAEXHOSTCTL
142{
143 RTLISTNODE Node;
144 VBVAEXHOSTCTL_TYPE enmType;
145 union
146 {
147 struct
148 {
149 uint8_t * pu8Cmd;
150 uint32_t cbCmd;
151 } cmd;
152
153 struct
154 {
155 PSSMHANDLE pSSM;
156 uint32_t u32Version;
157 } state;
158 } u;
159 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
160 void *pvComplete;
161} VBVAEXHOSTCTL;
162
163/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
164 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
165 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
166 * see mor edetailed comments in headers for function definitions */
167typedef enum
168{
169 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
170 VBVAEXHOST_DATA_TYPE_CMD,
171 VBVAEXHOST_DATA_TYPE_HOSTCTL,
172 VBVAEXHOST_DATA_TYPE_GUESTCTL
173} VBVAEXHOST_DATA_TYPE;
174
175
176#ifdef VBOX_WITH_CRHGSMI
177typedef struct VBOXVDMA_SOURCE
178{
179 VBVAINFOSCREEN Screen;
180 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
181} VBOXVDMA_SOURCE;
182#endif
183
184typedef struct VBOXVDMAHOST
185{
186 PHGSMIINSTANCE pHgsmi;
187 PVGASTATE pVGAState;
188#ifdef VBOX_WITH_CRHGSMI
189 VBVAEXHOSTCONTEXT CmdVbva;
190 VBOXVDMATHREAD Thread;
191 VBOXCRCMD_SVRINFO CrSrvInfo;
192 VBVAEXHOSTCTL* pCurRemainingHostCtl;
193 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
194 int32_t volatile i32cHostCrCtlCompleted;
195 RTCRITSECT CalloutCritSect;
196// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
197#endif
198#ifdef VBOX_VDMA_WITH_WATCHDOG
199 PTMTIMERR3 WatchDogTimer;
200#endif
201} VBOXVDMAHOST, *PVBOXVDMAHOST;
202
203
204/*********************************************************************************************************************************
205* Internal Functions *
206*********************************************************************************************************************************/
207#ifdef VBOX_WITH_CRHGSMI
208static int vdmaVBVANotifyDisable(PVGASTATE pVGAState);
209
210static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
211static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
212
213/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
214 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
215static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva);
216
217static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva);
218static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA);
219static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva);
220static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva);
221static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM);
222static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version);
223
224#endif /* VBOX_WITH_CRHGSMI */
225
226
227
228#ifdef VBOX_WITH_CRHGSMI
229
230static VBVAEXHOSTCTL* VBoxVBVAExHCtlAlloc(VBVAEXHOSTCONTEXT *pCmdVbva)
231{
232# ifndef VBOXVDBG_MEMCACHE_DISABLE
233 return (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
234# else
235 return (VBVAEXHOSTCTL*)RTMemAlloc(sizeof (VBVAEXHOSTCTL));
236# endif
237}
238
239static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
240{
241# ifndef VBOXVDBG_MEMCACHE_DISABLE
242 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
243# else
244 RTMemFree(pCtl);
245# endif
246}
247
248static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
249{
250 VBVAEXHOSTCTL* pCtl = VBoxVBVAExHCtlAlloc(pCmdVbva);
251 if (!pCtl)
252 {
253 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
254 return NULL;
255 }
256
257 pCtl->enmType = enmType;
258 return pCtl;
259}
260
261static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
262{
263 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
264
265 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
266 return VINF_SUCCESS;
267 return VERR_SEM_BUSY;
268}
269
270static VBVAEXHOSTCTL* vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
271{
272 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
273
274 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
275 return NULL;
276
277 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
278 if (RT_SUCCESS(rc))
279 {
280 VBVAEXHOSTCTL* pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
281 if (pCtl)
282 *pfHostCtl = true;
283 else if (!fHostOnlyMode)
284 {
285 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
286 {
287 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
288 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
289 * and there are no HostCtl commands*/
290 Assert(pCtl);
291 *pfHostCtl = false;
292 }
293 }
294
295 if (pCtl)
296 {
297 RTListNodeRemove(&pCtl->Node);
298 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
299 }
300
301 RTCritSectLeave(&pCmdVbva->CltCritSect);
302
303 return pCtl;
304 }
305 else
306 WARN(("RTCritSectEnter failed %d\n", rc));
307
308 return NULL;
309}
310
311static VBVAEXHOSTCTL* VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
312{
313 bool fHostCtl = false;
314 VBVAEXHOSTCTL* pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
315 Assert(!pCtl || fHostCtl);
316 return pCtl;
317}
318
319static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
320{
321 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
322 {
323 WARN(("Invalid state\n"));
324 return VERR_INVALID_STATE;
325 }
326
327 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
328 return VINF_SUCCESS;
329}
330
331static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
332{
333 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
334 {
335 WARN(("Invalid state\n"));
336 return VERR_INVALID_STATE;
337 }
338
339 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
340 return VINF_SUCCESS;
341}
342
343static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
344{
345 switch (pCtl->enmType)
346 {
347 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
348 {
349 VBoxVBVAExHPPause(pCmdVbva);
350 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
351 return true;
352 }
353 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
354 {
355 VBoxVBVAExHPResume(pCmdVbva);
356 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
357 return true;
358 }
359 default:
360 return false;
361 }
362}
363
364static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
365{
366 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
367
368 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
369}
370
371static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
372{
373 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
374 if (pCmdVbva->pVBVA)
375 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
376}
377
378static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
379{
380 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
381 if (pCmdVbva->pVBVA)
382 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
383}
384
385/**
386 * Worker for vboxVBVAExHPDataGet.
387 * @thread VDMA
388 * @todo r=bird: revalidate this code.
389 */
390static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
391{
392 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
393 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
394
395 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
396
397 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
398 uint32_t indexRecordFree = pVBVA->indexRecordFree;
399
400 Log(("first = %d, free = %d\n", indexRecordFirst, indexRecordFree));
401
402 if (indexRecordFirst == indexRecordFree)
403 {
404 /* No records to process. Return without assigning output variables. */
405 return VINF_EOF;
406 }
407
408 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
409
410 /* A new record need to be processed. */
411 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
412 {
413 /* the record is being recorded, try again */
414 return VINF_TRY_AGAIN;
415 }
416
417 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
418
419 if (!cbRecord)
420 {
421 /* the record is being recorded, try again */
422 return VINF_TRY_AGAIN;
423 }
424
425 /* we should not get partial commands here actually */
426 Assert(cbRecord);
427
428 /* The size of largest contiguous chunk in the ring biffer. */
429 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
430
431 /* The pointer to data in the ring buffer. */
432 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
433
434 /* Fetch or point the data. */
435 if (u32BytesTillBoundary >= cbRecord)
436 {
437 /* The command does not cross buffer boundary. Return address in the buffer. */
438 *ppCmd = pSrc;
439 *pcbCmd = cbRecord;
440 return VINF_SUCCESS;
441 }
442
443 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
444 return VERR_INVALID_STATE;
445}
446
447static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
448{
449 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
450 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
451
452 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
453}
454
455static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
456{
457 if (pCtl->pfnComplete)
458 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
459 else
460 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
461}
462
463
464/**
465 * Worker for VBoxVBVAExHPDataGet.
466 * @thread VDMA
467 */
468static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
469{
470 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
471 VBVAEXHOSTCTL*pCtl;
472 bool fHostClt;
473
474 for (;;)
475 {
476 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
477 if (pCtl)
478 {
479 if (fHostClt)
480 {
481 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
482 {
483 *ppCmd = (uint8_t*)pCtl;
484 *pcbCmd = sizeof (*pCtl);
485 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
486 }
487 continue;
488 }
489 *ppCmd = (uint8_t*)pCtl;
490 *pcbCmd = sizeof (*pCtl);
491 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
492 }
493
494 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
495 return VBVAEXHOST_DATA_TYPE_NO_DATA;
496
497 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
498 switch (rc)
499 {
500 case VINF_SUCCESS:
501 return VBVAEXHOST_DATA_TYPE_CMD;
502 case VINF_EOF:
503 return VBVAEXHOST_DATA_TYPE_NO_DATA;
504 case VINF_TRY_AGAIN:
505 RTThreadSleep(1);
506 continue;
507 default:
508 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
509 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %d\n", rc));
510 return VBVAEXHOST_DATA_TYPE_NO_DATA;
511 }
512 }
513 /* not reached */
514}
515
516/**
517 * Called by vboxVDMAWorkerThread to get the next command to process.
518 * @thread VDMA
519 */
520static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
521{
522 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
523 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
524 {
525 vboxVBVAExHPHgEventClear(pCmdVbva);
526 vboxVBVAExHPProcessorRelease(pCmdVbva);
527
528 /*
529 * We need to prevent racing between us clearing the flag and command check/submission thread, i.e.
530 * 1. we check the queue -> and it is empty
531 * 2. submitter adds command to the queue
532 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
533 * 4. we clear the "processing" state
534 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
535 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
536 */
537 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
538 if (RT_SUCCESS(rc))
539 {
540 /* we are the processor now */
541 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
542 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
543 {
544 vboxVBVAExHPProcessorRelease(pCmdVbva);
545 return VBVAEXHOST_DATA_TYPE_NO_DATA;
546 }
547
548 vboxVBVAExHPHgEventSet(pCmdVbva);
549 }
550 }
551
552 return enmType;
553}
554
555DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
556{
557 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
558
559 if (pVBVA)
560 {
561 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
562 uint32_t indexRecordFree = pVBVA->indexRecordFree;
563
564 if (indexRecordFirst != indexRecordFree)
565 return true;
566 }
567
568 return !!ASMAtomicReadU32(&pCmdVbva->u32cCtls);
569}
570
571/** Checks whether the new commands are ready for processing
572 * @returns
573 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
574 * VINF_EOF - no commands in a queue
575 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
576 * VERR_INVALID_STATE - the VBVA is paused or pausing */
577static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
578{
579 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
580 if (RT_SUCCESS(rc))
581 {
582 /* we are the processor now */
583 if (vboxVBVAExHSHasCommands(pCmdVbva))
584 {
585 vboxVBVAExHPHgEventSet(pCmdVbva);
586 return VINF_SUCCESS;
587 }
588
589 vboxVBVAExHPProcessorRelease(pCmdVbva);
590 return VINF_EOF;
591 }
592 if (rc == VERR_SEM_BUSY)
593 return VINF_ALREADY_INITIALIZED;
594 return VERR_INVALID_STATE;
595}
596
597static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
598{
599 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
600 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
601 if (RT_SUCCESS(rc))
602 {
603# ifndef VBOXVDBG_MEMCACHE_DISABLE
604 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
605 0, /* size_t cbAlignment */
606 UINT32_MAX, /* uint32_t cMaxObjects */
607 NULL, /* PFNMEMCACHECTOR pfnCtor*/
608 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
609 NULL, /* void *pvUser*/
610 0 /* uint32_t fFlags*/
611 );
612 if (RT_SUCCESS(rc))
613# endif
614 {
615 RTListInit(&pCmdVbva->GuestCtlList);
616 RTListInit(&pCmdVbva->HostCtlList);
617 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
618 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
619 return VINF_SUCCESS;
620 }
621# ifndef VBOXVDBG_MEMCACHE_DISABLE
622 else
623 WARN(("RTMemCacheCreate failed %d\n", rc));
624# endif
625 }
626 else
627 WARN(("RTCritSectInit failed %d\n", rc));
628
629 return rc;
630}
631
632DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
633{
634 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
635}
636
637DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
638{
639 return (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
640}
641
642static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
643{
644 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
645 {
646 WARN(("VBVAEx is enabled already\n"));
647 return VERR_INVALID_STATE;
648 }
649
650 pCmdVbva->pVBVA = pVBVA;
651 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
652 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
653 return VINF_SUCCESS;
654}
655
656static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
657{
658 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
659 return VINF_SUCCESS;
660
661 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
662 return VINF_SUCCESS;
663}
664
665static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
666{
667 /* ensure the processor is stopped */
668 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
669
670 /* ensure no one tries to submit the command */
671 if (pCmdVbva->pVBVA)
672 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
673
674 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
675 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
676
677 RTCritSectDelete(&pCmdVbva->CltCritSect);
678
679# ifndef VBOXVDBG_MEMCACHE_DISABLE
680 RTMemCacheDestroy(pCmdVbva->CtlCache);
681# endif
682
683 memset(pCmdVbva, 0, sizeof (*pCmdVbva));
684}
685
686static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
687{
688 RT_NOREF(pCmdVbva);
689 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
690 AssertRCReturn(rc, rc);
691 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
692 AssertRCReturn(rc, rc);
693 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
694 AssertRCReturn(rc, rc);
695
696 return VINF_SUCCESS;
697}
698
699static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
700{
701 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
702 {
703 WARN(("vbva not paused\n"));
704 return VERR_INVALID_STATE;
705 }
706
707 int rc;
708 VBVAEXHOSTCTL* pCtl;
709 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
710 {
711 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
712 AssertRCReturn(rc, rc);
713 }
714
715 rc = SSMR3PutU32(pSSM, 0);
716 AssertRCReturn(rc, rc);
717
718 return VINF_SUCCESS;
719}
720
721
722/** Saves state
723 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
724 */
725static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
726{
727 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
728 if (RT_FAILURE(rc))
729 {
730 WARN(("RTCritSectEnter failed %d\n", rc));
731 return rc;
732 }
733
734 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
735 if (RT_FAILURE(rc))
736 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
737
738 RTCritSectLeave(&pCmdVbva->CltCritSect);
739
740 return rc;
741}
742
743static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
744{
745 RT_NOREF(u32Version);
746 uint32_t u32;
747 int rc = SSMR3GetU32(pSSM, &u32);
748 AssertLogRelRCReturn(rc, rc);
749
750 if (!u32)
751 return VINF_EOF;
752
753 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
754 if (!pHCtl)
755 {
756 WARN(("VBoxVBVAExHCtlCreate failed\n"));
757 return VERR_NO_MEMORY;
758 }
759
760 rc = SSMR3GetU32(pSSM, &u32);
761 AssertLogRelRCReturn(rc, rc);
762 pHCtl->u.cmd.cbCmd = u32;
763
764 rc = SSMR3GetU32(pSSM, &u32);
765 AssertLogRelRCReturn(rc, rc);
766 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
767
768 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
769 ++pCmdVbva->u32cCtls;
770
771 return VINF_SUCCESS;
772}
773
774
775static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
776{
777 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
778 {
779 WARN(("vbva not stopped\n"));
780 return VERR_INVALID_STATE;
781 }
782
783 int rc;
784
785 do {
786 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
787 AssertLogRelRCReturn(rc, rc);
788 } while (VINF_EOF != rc);
789
790 return VINF_SUCCESS;
791}
792
793/** Loads state
794 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
795 */
796static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
797{
798 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
799 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
800 if (RT_FAILURE(rc))
801 {
802 WARN(("RTCritSectEnter failed %d\n", rc));
803 return rc;
804 }
805
806 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
807 if (RT_FAILURE(rc))
808 WARN(("vboxVBVAExHSSaveStateLocked failed %d\n", rc));
809
810 RTCritSectLeave(&pCmdVbva->CltCritSect);
811
812 return rc;
813}
814
815typedef enum
816{
817 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
818 VBVAEXHOSTCTL_SOURCE_HOST
819} VBVAEXHOSTCTL_SOURCE;
820
821
822static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
823 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
824{
825 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
826 {
827 Log(("cmd vbva not enabled\n"));
828 return VERR_INVALID_STATE;
829 }
830
831 pCtl->pfnComplete = pfnComplete;
832 pCtl->pvComplete = pvComplete;
833
834 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
835 if (RT_SUCCESS(rc))
836 {
837 if (!VBoxVBVAExHSIsEnabled(pCmdVbva))
838 {
839 Log(("cmd vbva not enabled\n"));
840 RTCritSectLeave(&pCmdVbva->CltCritSect);
841 return VERR_INVALID_STATE;
842 }
843
844 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
845 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
846 else
847 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
848
849 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
850
851 RTCritSectLeave(&pCmdVbva->CltCritSect);
852
853 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
854 }
855 else
856 WARN(("RTCritSectEnter failed %d\n", rc));
857
858 return rc;
859}
860
861/**
862 * Call VDMA thread creation notification callback.
863 */
864void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
865{
866 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
867 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
868 void *pvChanged = pThread->pvChanged;
869
870 pThread->pfnChanged = NULL;
871 pThread->pvChanged = NULL;
872
873 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
874
875 if (pfnChanged)
876 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
877}
878
879/**
880 * Call VDMA thread termination notification callback.
881 */
882void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
883{
884 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
885 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
886 void *pvChanged = pThread->pvChanged;
887
888 pThread->pfnChanged = NULL;
889 pThread->pvChanged = NULL;
890
891 if (pfnChanged)
892 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
893}
894
895/**
896 * Check if VDMA thread is terminating.
897 */
898DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
899{
900 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
901}
902
903/**
904 * Init VDMA thread.
905 */
906void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
907{
908 memset(pThread, 0, sizeof (*pThread));
909 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
910}
911
912/**
913 * Clean up VDMA thread.
914 */
915int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
916{
917 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
918 switch (u32State)
919 {
920 case VBOXVDMATHREAD_STATE_TERMINATED:
921 return VINF_SUCCESS;
922 case VBOXVDMATHREAD_STATE_TERMINATING:
923 {
924 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
925 if (!RT_SUCCESS(rc))
926 {
927 WARN(("RTThreadWait failed %d\n", rc));
928 return rc;
929 }
930
931 RTSemEventDestroy(pThread->hEvent);
932
933 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
934 return VINF_SUCCESS;
935 }
936 default:
937 WARN(("invalid state"));
938 return VERR_INVALID_STATE;
939 }
940}
941
942/**
943 * Start VDMA thread.
944 */
945int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread,
946 PFNVBOXVDMATHREAD_CHANGED pfnCreated, void *pvCreated)
947{
948 int rc = VBoxVDMAThreadCleanup(pThread);
949 if (RT_FAILURE(rc))
950 {
951 WARN(("VBoxVDMAThreadCleanup failed %d\n", rc));
952 return rc;
953 }
954
955 rc = RTSemEventCreate(&pThread->hEvent);
956 if (RT_SUCCESS(rc))
957 {
958 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
959 pThread->pfnChanged = pfnCreated;
960 pThread->pvChanged = pvCreated;
961 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
962 if (RT_SUCCESS(rc))
963 return VINF_SUCCESS;
964 WARN(("RTThreadCreate failed %d\n", rc));
965
966 RTSemEventDestroy(pThread->hEvent);
967 }
968 else
969 WARN(("RTSemEventCreate failed %d\n", rc));
970
971 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
972
973 return rc;
974}
975
976DECLINLINE(int) VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
977{
978 int rc = RTSemEventSignal(pThread->hEvent);
979 AssertRC(rc);
980 return rc;
981}
982
983DECLINLINE(int) VBoxVDMAThreadEventWait(PVBOXVDMATHREAD pThread, RTMSINTERVAL cMillies)
984{
985 int rc = RTSemEventWait(pThread->hEvent, cMillies);
986 AssertRC(rc);
987 return rc;
988}
989
990int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void*pvTerminated, bool fNotify)
991{
992 int rc;
993 do
994 {
995 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
996 switch (u32State)
997 {
998 case VBOXVDMATHREAD_STATE_CREATED:
999 pThread->pfnChanged = pfnTerminated;
1000 pThread->pvChanged = pvTerminated;
1001 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
1002 if (fNotify)
1003 {
1004 rc = VBoxVDMAThreadEventNotify(pThread);
1005 AssertRC(rc);
1006 }
1007 return VINF_SUCCESS;
1008 case VBOXVDMATHREAD_STATE_TERMINATING:
1009 case VBOXVDMATHREAD_STATE_TERMINATED:
1010 {
1011 WARN(("thread is marked to termination or terminated\nn"));
1012 return VERR_INVALID_STATE;
1013 }
1014 case VBOXVDMATHREAD_STATE_CREATING:
1015 {
1016 /* wait till the thread creation is completed */
1017 WARN(("concurrent thread create/destron\n"));
1018 RTThreadYield();
1019 continue;
1020 }
1021 default:
1022 WARN(("invalid state"));
1023 return VERR_INVALID_STATE;
1024 }
1025 } while (1);
1026
1027 WARN(("should never be here\n"));
1028 return VERR_INTERNAL_ERROR;
1029}
1030
1031static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
1032
1033typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1034typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1035
1036typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1037{
1038 uint32_t cRefs;
1039 int32_t rc;
1040 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1041 void *pvCompletion;
1042 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1043} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1044
1045# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) ((PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)(((uint8_t*)(_p)) - RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)))
1046
1047static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1048{
1049 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1050 Assert(pHdr);
1051 if (pHdr)
1052 {
1053 pHdr->cRefs = 1;
1054 pHdr->rc = VERR_NOT_IMPLEMENTED;
1055 pHdr->Cmd.enmType = enmCmd;
1056 pHdr->Cmd.cbCmd = cbCmd;
1057 return &pHdr->Cmd;
1058 }
1059
1060 return NULL;
1061}
1062
1063DECLINLINE(void) vboxVDMACrCtlRelease (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1064{
1065 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1066 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1067 if (!cRefs)
1068 RTMemFree(pHdr);
1069}
1070
1071#if 0 /* unused */
1072DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1073{
1074 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1075 ASMAtomicIncU32(&pHdr->cRefs);
1076}
1077#endif /* unused */
1078
1079DECLINLINE(int) vboxVDMACrCtlGetRc (PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1080{
1081 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1082 return pHdr->rc;
1083}
1084
1085static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1086{
1087 RT_NOREF(pVGAState, pCmd);
1088 RTSemEventSignal((RTSEMEVENT)pvContext);
1089}
1090
1091# if 0 /** @todo vboxVDMACrCtlCbReleaseCmd is unused */
1092static DECLCALLBACK(void) vboxVDMACrCtlCbReleaseCmd(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext)
1093{
1094 RT_NOREF(pVGAState, pvContext);
1095 vboxVDMACrCtlRelease(pCmd);
1096}
1097# endif
1098
1099static int vboxVDMACrCtlPostAsync (PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd, PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1100{
1101 if ( pVGAState->pDrv
1102 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1103 {
1104 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1105 pHdr->pfnCompletion = pfnCompletion;
1106 pHdr->pvCompletion = pvCompletion;
1107 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1108 return VINF_SUCCESS;
1109 }
1110# ifdef DEBUG_misha
1111 Assert(0);
1112# endif
1113 return VERR_NOT_SUPPORTED;
1114}
1115
1116/**
1117 * Posts stuff and waits.
1118 */
1119static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1120{
1121 RTSEMEVENT hComplEvent;
1122 int rc = RTSemEventCreate(&hComplEvent);
1123 AssertRC(rc);
1124 if (RT_SUCCESS(rc))
1125 {
1126 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, (void*)hComplEvent);
1127# ifdef DEBUG_misha
1128 AssertRC(rc);
1129# endif
1130 if (RT_SUCCESS(rc))
1131 {
1132 rc = RTSemEventWaitNoResume(hComplEvent, RT_INDEFINITE_WAIT);
1133 AssertRC(rc);
1134 if (RT_SUCCESS(rc))
1135 {
1136 RTSemEventDestroy(hComplEvent);
1137 }
1138 }
1139 else
1140 {
1141 /* the command is completed */
1142 RTSemEventDestroy(hComplEvent);
1143 }
1144 }
1145 return rc;
1146}
1147
1148typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1149{
1150 int rc;
1151 RTSEMEVENT hEvent;
1152} VDMA_VBVA_CTL_CYNC_COMPLETION;
1153
1154static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1155{
1156 RT_NOREF(pCmd, cbCmd);
1157 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1158 pData->rc = rc;
1159 rc = RTSemEventSignal(pData->hEvent);
1160 if (!RT_SUCCESS(rc))
1161 WARN(("RTSemEventSignal failed %d\n", rc));
1162}
1163
1164static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1165{
1166 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1167 Data.rc = VERR_NOT_IMPLEMENTED;
1168 int rc = RTSemEventCreate(&Data.hEvent);
1169 if (!RT_SUCCESS(rc))
1170 {
1171 WARN(("RTSemEventCreate failed %d\n", rc));
1172 return rc;
1173 }
1174
1175 pCtl->CalloutList.List.pNext = NULL;
1176
1177 PVGASTATE pVGAState = pVdma->pVGAState;
1178 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1179 if (RT_SUCCESS(rc))
1180 {
1181 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1182 if (RT_SUCCESS(rc))
1183 {
1184 rc = Data.rc;
1185 if (!RT_SUCCESS(rc))
1186 {
1187 WARN(("pfnCrHgcmCtlSubmit command failed %d\n", rc));
1188 }
1189
1190 }
1191 else
1192 WARN(("RTSemEventWait failed %d\n", rc));
1193 }
1194 else
1195 WARN(("pfnCrHgcmCtlSubmit failed %d\n", rc));
1196
1197
1198 RTSemEventDestroy(Data.hEvent);
1199
1200 return rc;
1201}
1202
1203static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1204{
1205 VBVAEXHOSTCTL HCtl;
1206 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1207 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1208 if (RT_FAILURE(rc))
1209 {
1210 Log(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1211 return rc;
1212 }
1213
1214 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1215
1216 return VINF_SUCCESS;
1217}
1218
1219static DECLCALLBACK(uint8_t*) vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1220{
1221 struct VBOXVDMAHOST *pVdma = hClient;
1222 if (!pVdma->pCurRemainingHostCtl)
1223 {
1224 /* disable VBVA, all subsequent host commands will go HGCM way */
1225 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1226 }
1227 else
1228 {
1229 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1230 }
1231
1232 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1233 if (pVdma->pCurRemainingHostCtl)
1234 {
1235 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1236 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1237 }
1238
1239 *pcbCtl = 0;
1240 return NULL;
1241}
1242
1243static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1244{
1245# ifdef VBOX_STRICT
1246 struct VBOXVDMAHOST *pVdma = hClient;
1247 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1248 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1249# else
1250 RT_NOREF(hClient);
1251# endif
1252}
1253
1254static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient, VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1255{
1256 struct VBOXVDMAHOST *pVdma = hClient;
1257 VBVAEXHOSTCTL HCtl;
1258 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1259 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1260
1261 pHgcmEnableData->hRHCmd = pVdma;
1262 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1263
1264 if (RT_FAILURE(rc))
1265 {
1266 if (rc == VERR_INVALID_STATE)
1267 rc = VINF_SUCCESS;
1268 else
1269 WARN(("vdmaVBVACtlSubmitSync failed %d\n", rc));
1270 }
1271
1272 return rc;
1273}
1274
1275static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1276{
1277 VBOXCRCMDCTL_ENABLE Enable;
1278 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1279 Enable.Data.hRHCmd = pVdma;
1280 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1281
1282 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1283 Assert(!pVdma->pCurRemainingHostCtl);
1284 if (RT_SUCCESS(rc))
1285 {
1286 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1287 return VINF_SUCCESS;
1288 }
1289
1290 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1291 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1292
1293 return rc;
1294}
1295
1296static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1297{
1298 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1299 {
1300 WARN(("vdma VBVA is already enabled\n"));
1301 return VERR_INVALID_STATE;
1302 }
1303
1304 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1305 if (!pVBVA)
1306 {
1307 WARN(("invalid offset %d\n", u32Offset));
1308 return VERR_INVALID_PARAMETER;
1309 }
1310
1311 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1312 if (RT_SUCCESS(rc))
1313 {
1314 if (!pVdma->CrSrvInfo.pfnEnable)
1315 {
1316 /* "HGCM-less" mode. All inited. */
1317 return VINF_SUCCESS;
1318 }
1319
1320 VBOXCRCMDCTL_DISABLE Disable;
1321 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1322 Disable.Data.hNotifyTerm = pVdma;
1323 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1324 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1325 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1326 if (RT_SUCCESS(rc))
1327 {
1328 PVGASTATE pVGAState = pVdma->pVGAState;
1329 VBOXCRCMD_SVRENABLE_INFO Info;
1330 Info.hCltScr = pVGAState->pDrv;
1331 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1332 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1333 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1334 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1335 if (RT_SUCCESS(rc))
1336 return VINF_SUCCESS;
1337 else
1338 WARN(("pfnEnable failed %d\n", rc));
1339
1340 vboxVDMACrHgcmHandleEnable(pVdma);
1341 }
1342 else
1343 WARN(("vboxVDMACrHgcmSubmitSync failed %d\n", rc));
1344
1345 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1346 }
1347 else
1348 WARN(("VBoxVBVAExHSEnable failed %d\n", rc));
1349
1350 return rc;
1351}
1352
1353static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1354{
1355 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1356 {
1357 Log(("vdma VBVA is already disabled\n"));
1358 return VINF_SUCCESS;
1359 }
1360
1361 if (!pVdma->CrSrvInfo.pfnDisable)
1362 {
1363 /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */
1364 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1365 return VINF_SUCCESS;
1366 }
1367
1368 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1369 if (RT_SUCCESS(rc))
1370 {
1371 if (fDoHgcmEnable)
1372 {
1373 PVGASTATE pVGAState = pVdma->pVGAState;
1374
1375 /* disable is a bit tricky
1376 * we need to ensure the host ctl commands do not come out of order
1377 * and do not come over HGCM channel until after it is enabled */
1378 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1379 if (RT_SUCCESS(rc))
1380 {
1381 vdmaVBVANotifyDisable(pVGAState);
1382 return VINF_SUCCESS;
1383 }
1384
1385 VBOXCRCMD_SVRENABLE_INFO Info;
1386 Info.hCltScr = pVGAState->pDrv;
1387 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1388 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1389 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1390 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1391 }
1392 }
1393 else
1394 WARN(("pfnDisable failed %d\n", rc));
1395
1396 return rc;
1397}
1398
1399static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1400{
1401 *pfContinue = true;
1402
1403 switch (pCmd->enmType)
1404 {
1405 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1406 {
1407 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1408 {
1409 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1410 return VERR_INVALID_STATE;
1411 }
1412 if (!pVdma->CrSrvInfo.pfnHostCtl)
1413 {
1414 /* Should not be. */
1415 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n"));
1416 return VERR_INVALID_STATE;
1417 }
1418 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1419 }
1420 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1421 {
1422 int rc = vdmaVBVADisableProcess(pVdma, true);
1423 if (RT_FAILURE(rc))
1424 {
1425 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1426 return rc;
1427 }
1428
1429 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1430 }
1431 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1432 {
1433 int rc = vdmaVBVADisableProcess(pVdma, false);
1434 if (RT_FAILURE(rc))
1435 {
1436 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1437 return rc;
1438 }
1439
1440 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true);
1441 if (RT_FAILURE(rc))
1442 {
1443 WARN(("VBoxVDMAThreadTerm failed %d\n", rc));
1444 return rc;
1445 }
1446
1447 *pfContinue = false;
1448 return VINF_SUCCESS;
1449 }
1450 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1451 {
1452 PVGASTATE pVGAState = pVdma->pVGAState;
1453 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1454 int rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM);
1455 if (RT_FAILURE(rc))
1456 {
1457 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1458 return rc;
1459 }
1460 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1461
1462 if (!pVdma->CrSrvInfo.pfnSaveState)
1463 {
1464 /* Done. */
1465 return VINF_SUCCESS;
1466 }
1467
1468 return pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1469 }
1470 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1471 {
1472 PVGASTATE pVGAState = pVdma->pVGAState;
1473 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
1474
1475 int rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pu8VramBase, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1476 if (RT_FAILURE(rc))
1477 {
1478 WARN(("VBoxVBVAExHSSaveState failed %d\n", rc));
1479 return rc;
1480 }
1481
1482 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1483 if (!pVdma->CrSrvInfo.pfnLoadState)
1484 {
1485 /* Done. */
1486 return VINF_SUCCESS;
1487 }
1488
1489 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1490 if (RT_FAILURE(rc))
1491 {
1492 WARN(("pfnLoadState failed %d\n", rc));
1493 return rc;
1494 }
1495
1496 return VINF_SUCCESS;
1497 }
1498 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1499 {
1500 PVGASTATE pVGAState = pVdma->pVGAState;
1501
1502 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1503 {
1504 VBVAINFOSCREEN CurScreen;
1505 VBVAINFOVIEW CurView;
1506
1507 int rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1508 if (RT_FAILURE(rc))
1509 {
1510 WARN(("VBVAGetInfoViewAndScreen failed %d\n", rc));
1511 return rc;
1512 }
1513
1514 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1515 if (RT_FAILURE(rc))
1516 {
1517 WARN(("VBVAInfoScreen failed %d\n", rc));
1518 return rc;
1519 }
1520 }
1521
1522 return VINF_SUCCESS;
1523 }
1524 default:
1525 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1526 return VERR_INVALID_PARAMETER;
1527 }
1528}
1529
1530static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1531{
1532 const uint32_t u32ViewIndex = pScreen->u32ViewIndex;
1533 const uint16_t u16Flags = pScreen->u16Flags;
1534
1535 if (u16Flags & VBVA_SCREEN_F_DISABLED)
1536 {
1537 if ( u32ViewIndex < pVGAState->cMonitors
1538 || u32ViewIndex == UINT32_C(0xFFFFFFFF))
1539 {
1540 RT_ZERO(*pScreen);
1541 pScreen->u32ViewIndex = u32ViewIndex;
1542 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1543 return VINF_SUCCESS;
1544 }
1545 }
1546 else
1547 {
1548 if (u16Flags & VBVA_SCREEN_F_BLANK2)
1549 {
1550 if ( u32ViewIndex >= pVGAState->cMonitors
1551 && u32ViewIndex != UINT32_C(0xFFFFFFFF))
1552 {
1553 return VERR_INVALID_PARAMETER;
1554 }
1555
1556 /* Special case for blanking using current video mode.
1557 * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
1558 */
1559 RT_ZERO(*pScreen);
1560 pScreen->u32ViewIndex = u32ViewIndex;
1561 pScreen->u16Flags = u16Flags;
1562 return VINF_SUCCESS;
1563 }
1564
1565 if ( u32ViewIndex < pVGAState->cMonitors
1566 && pScreen->u16BitsPerPixel <= 32
1567 && pScreen->u32Width <= UINT16_MAX
1568 && pScreen->u32Height <= UINT16_MAX
1569 && pScreen->u32LineSize <= UINT16_MAX * 4)
1570 {
1571 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1572 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1573 {
1574 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1575 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1576 && u64ScreenSize <= pVGAState->vram_size
1577 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1578 {
1579 return VINF_SUCCESS;
1580 }
1581 }
1582 }
1583 }
1584
1585 return VERR_INVALID_PARAMETER;
1586}
1587
1588static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1589{
1590 PVGASTATE pVGAState = pVdma->pVGAState;
1591 VBVAINFOSCREEN Screen = pEntry->Screen;
1592
1593 /* Verify and cleanup local copy of the input data. */
1594 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1595 if (RT_FAILURE(rc))
1596 {
1597 WARN(("invalid screen data\n"));
1598 return rc;
1599 }
1600
1601 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1602 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1603 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1604
1605 if (pVdma->CrSrvInfo.pfnResize)
1606 {
1607 /* Also inform the HGCM service, if it is there. */
1608 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1609 if (RT_FAILURE(rc))
1610 {
1611 WARN(("pfnResize failed %d\n", rc));
1612 return rc;
1613 }
1614 }
1615
1616 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1617 VBVAINFOVIEW View;
1618 View.u32ViewOffset = 0;
1619 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1620 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1621
1622 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1623
1624 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1625 i >= 0;
1626 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1627 {
1628 Screen.u32ViewIndex = i;
1629
1630 VBVAINFOSCREEN CurScreen;
1631 VBVAINFOVIEW CurView;
1632
1633 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1634 AssertRC(rc);
1635
1636 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1637 continue;
1638
1639 /* The view does not change if _BLANK2 is set. */
1640 if ( (!fDisable || !CurView.u32ViewSize)
1641 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
1642 {
1643 View.u32ViewIndex = Screen.u32ViewIndex;
1644
1645 rc = VBVAInfoView(pVGAState, &View);
1646 if (RT_FAILURE(rc))
1647 {
1648 WARN(("VBVAInfoView failed %d\n", rc));
1649 break;
1650 }
1651 }
1652
1653 rc = VBVAInfoScreen(pVGAState, &Screen);
1654 if (RT_FAILURE(rc))
1655 {
1656 WARN(("VBVAInfoScreen failed %d\n", rc));
1657 break;
1658 }
1659 }
1660
1661 return rc;
1662}
1663
1664static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1665{
1666 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1667 switch (enmType)
1668 {
1669 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1670 {
1671 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1672 {
1673 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1674 return VERR_INVALID_STATE;
1675 }
1676 if (!pVdma->CrSrvInfo.pfnGuestCtl)
1677 {
1678 /* Unexpected. */
1679 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE in HGCM-less mode\n"));
1680 return VERR_INVALID_STATE;
1681 }
1682 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1683 }
1684 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1685 {
1686 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1687 {
1688 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1689 return VERR_INVALID_STATE;
1690 }
1691
1692 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1693
1694 if (cbCmd % sizeof (VBOXCMDVBVA_RESIZE_ENTRY))
1695 {
1696 WARN(("invalid buffer size\n"));
1697 return VERR_INVALID_PARAMETER;
1698 }
1699
1700 uint32_t cElements = cbCmd / sizeof (VBOXCMDVBVA_RESIZE_ENTRY);
1701 if (!cElements)
1702 {
1703 WARN(("invalid buffer size\n"));
1704 return VERR_INVALID_PARAMETER;
1705 }
1706
1707 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE*)pCmd->u.cmd.pu8Cmd;
1708
1709 int rc = VINF_SUCCESS;
1710
1711 for (uint32_t i = 0; i < cElements; ++i)
1712 {
1713 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1714 rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1715 if (RT_FAILURE(rc))
1716 {
1717 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %d\n", rc));
1718 break;
1719 }
1720 }
1721 return rc;
1722 }
1723 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1724 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1725 {
1726 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1727 Assert(pCmd->u.cmd.cbCmd == sizeof (VBVAENABLE));
1728 uint32_t u32Offset = pEnable->u32Offset;
1729 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1730 if (!RT_SUCCESS(rc))
1731 {
1732 WARN(("vdmaVBVAEnableProcess failed %d\n", rc));
1733 return rc;
1734 }
1735
1736 if (enmType == VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1737 {
1738 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1739 if (!RT_SUCCESS(rc))
1740 {
1741 WARN(("VBoxVBVAExHPPause failed %d\n", rc));
1742 return rc;
1743 }
1744 }
1745
1746 return VINF_SUCCESS;
1747 }
1748 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1749 {
1750 int rc = vdmaVBVADisableProcess(pVdma, true);
1751 if (RT_FAILURE(rc))
1752 {
1753 WARN(("vdmaVBVADisableProcess failed %d\n", rc));
1754 return rc;
1755 }
1756
1757 /* do vgaUpdateDisplayAll right away */
1758 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1759 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1760
1761 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false);
1762 }
1763 default:
1764 WARN(("unexpected ctl type %d\n", pCmd->enmType));
1765 return VERR_INVALID_PARAMETER;
1766 }
1767}
1768
1769
1770/**
1771 * Copies one page in a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
1772 *
1773 * @param fIn - whether this is a page in or out op.
1774 * @thread VDMA
1775 *
1776 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
1777 */
1778static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX uPageNo, uint8_t *pbVram, bool fIn)
1779{
1780 RTGCPHYS GCPhysPage = (RTGCPHYS)uPageNo << X86_PAGE_SHIFT;
1781 PGMPAGEMAPLOCK Lock;
1782 int rc;
1783
1784 if (fIn)
1785 {
1786 const void *pvPage;
1787 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
1788 if (RT_SUCCESS(rc))
1789 {
1790 memcpy(pbVram, pvPage, PAGE_SIZE);
1791 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1792 }
1793 else
1794 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d", rc));
1795 }
1796 else
1797 {
1798 void *pvPage;
1799 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
1800 if (RT_SUCCESS(rc))
1801 {
1802 memcpy(pvPage, pbVram, PAGE_SIZE);
1803 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
1804 }
1805 else
1806 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %d", rc));
1807 }
1808
1809 return rc;
1810}
1811
1812/**
1813 * Handles a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
1814 *
1815 * @return 0 on success, -1 on failure.
1816 *
1817 * @thread VDMA
1818 */
1819static int8_t vboxVDMACrCmdVbvaPageTransfer(PVGASTATE pVGAState, VBOXCMDVBVA_HDR const volatile *pHdr, uint32_t cbCmd,
1820 const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData)
1821{
1822 /*
1823 * Extract and validate information.
1824 */
1825 AssertMsgReturn(cbCmd >= sizeof(VBOXCMDVBVA_PAGING_TRANSFER), ("%#x\n", cbCmd), -1);
1826
1827 bool const fIn = RT_BOOL(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
1828
1829 uint32_t cbPageNumbers = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
1830 AssertMsgReturn(!(cbPageNumbers % sizeof(VBOXCMDVBVAPAGEIDX)), ("%#x\n", cbPageNumbers), -1);
1831 VBOXCMDVBVAPAGEIDX const cPages = cbPageNumbers / sizeof(VBOXCMDVBVAPAGEIDX);
1832
1833 VBOXCMDVBVAOFFSET offVRam = pData->Alloc.u.offVRAM;
1834 AssertMsgReturn(!(offVRam & X86_PAGE_OFFSET_MASK), ("%#x\n", offVRam), -1);
1835 AssertMsgReturn(offVRam < pVGAState->vram_size, ("%#x vs %#x\n", offVRam, pVGAState->vram_size), -1);
1836 uint32_t cVRamPages = (pVGAState->vram_size - offVRam) >> X86_PAGE_SHIFT;
1837 AssertMsgReturn(cPages <= cVRamPages, ("cPages=%#x vs cVRamPages=%#x @ offVRam=%#x\n", cPages, cVRamPages, offVRam), -1);
1838
1839 /*
1840 * Execute the command.
1841 */
1842 uint8_t *pbVRam = (uint8_t *)pVGAState->vram_ptrR3 + offVRam;
1843 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbVRam += X86_PAGE_SIZE)
1844 {
1845 uint32_t uPageNo = pData->aPageNumbers[iPage];
1846 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pVGAState->pDevInsR3, uPageNo, pbVRam, fIn);
1847 AssertMsgReturn(RT_SUCCESS(rc), ("#%#x: uPageNo=%#x rc=%Rrc\n", iPage, uPageNo, rc), -1);
1848 }
1849 return 0;
1850}
1851
1852
1853/**
1854 * Handles VBOXCMDVBVA_OPTYPE_PAGING_FILL.
1855 *
1856 * @returns 0 on success, -1 on failure.
1857 * @param pVGAState The VGA state.
1858 * @param pFill The fill command (volatile).
1859 *
1860 * @thread VDMA
1861 */
1862static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
1863{
1864 VBOXCMDVBVA_PAGING_FILL FillSafe = *pFill;
1865 VBOXCMDVBVAOFFSET offVRAM = FillSafe.offVRAM;
1866 if (!(offVRAM & X86_PAGE_OFFSET_MASK))
1867 {
1868 if (offVRAM <= pVGAState->vram_size)
1869 {
1870 uint32_t cbFill = FillSafe.u32CbFill;
1871 AssertStmt(!(cbFill & 3), cbFill &= ~(uint32_t)3);
1872
1873 if ( cbFill < pVGAState->vram_size
1874 && offVRAM <= pVGAState->vram_size - cbFill)
1875 {
1876 uint32_t *pu32Vram = (uint32_t *)((uint8_t *)pVGAState->vram_ptrR3 + offVRAM);
1877 uint32_t const u32Color = FillSafe.u32Pattern;
1878
1879 uint32_t cLoops = cbFill / 4;
1880 while (cLoops-- > 0)
1881 pu32Vram[cLoops] = u32Color;
1882
1883 return 0;
1884
1885 }
1886 else
1887 WARN(("invalid cbFill"));
1888
1889 }
1890 WARN(("invalid vram offset"));
1891
1892 }
1893 else
1894 WARN(("offVRAM address is not on page boundary\n"));
1895 return -1;
1896}
1897
1898/**
1899 * Process command data.
1900 *
1901 * @returns zero or positive is success, negative failure.
1902 * @param pVdma The VDMA channel.
1903 * @param pCmd The command data to process. Assume volatile.
1904 * @param cbCmd The amount of command data.
1905 *
1906 * @thread VDMA
1907 */
1908static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
1909{
1910 uint8_t bOpCode = pCmd->u8OpCode;
1911 switch (bOpCode)
1912 {
1913 case VBOXCMDVBVA_OPTYPE_NOPCMD:
1914 return 0;
1915
1916 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
1917 return vboxVDMACrCmdVbvaPageTransfer(pVdma->pVGAState, pCmd, cbCmd, &((VBOXCMDVBVA_PAGING_TRANSFER *)pCmd)->Data);
1918
1919 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
1920 if (cbCmd == sizeof(VBOXCMDVBVA_PAGING_FILL))
1921 return vboxVDMACrCmdVbvaPagingFill(pVdma->pVGAState, (VBOXCMDVBVA_PAGING_FILL *)pCmd);
1922 WARN(("cmd too small"));
1923 return -1;
1924
1925 default:
1926 if (pVdma->CrSrvInfo.pfnCmd)
1927 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
1928 /* Unexpected. */
1929 WARN(("no HGCM"));
1930 return -1;
1931 }
1932}
1933
1934# if 0
1935typedef struct VBOXCMDVBVA_PAGING_TRANSFER
1936{
1937 VBOXCMDVBVA_HDR Hdr;
1938 /* for now can only contain offVRAM.
1939 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
1940 VBOXCMDVBVA_ALLOCINFO Alloc;
1941 uint32_t u32Reserved;
1942 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
1943} VBOXCMDVBVA_PAGING_TRANSFER;
1944# endif
1945
1946AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
1947AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
1948AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
1949AssertCompile(!(X86_PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
1950
1951# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (X86_PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
1952
1953/**
1954 * Worker for vboxVDMACrCmdProcess.
1955 *
1956 * @returns 8-bit result.
1957 * @param pVdma The VDMA channel.
1958 * @param pCmd The command. Consider volatile!
1959 * @param cbCmd The size of what @a pCmd points to. At least
1960 * sizeof(VBOXCMDVBVA_HDR).
1961 * @param fRecursion Set if recursive call, false if not.
1962 *
1963 * @thread VDMA
1964 */
1965static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd, bool fRecursion)
1966{
1967 int8_t i8Result = 0;
1968 uint8_t const bOpCode = pCmd->u8OpCode;
1969 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, bOpCode=%u\n", bOpCode));
1970 switch (bOpCode)
1971 {
1972 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
1973 {
1974 /*
1975 * Extract the command physical address and size.
1976 */
1977 AssertMsgReturn(cbCmd >= sizeof(VBOXCMDVBVA_SYSMEMCMD), ("%#x\n", cbCmd), -1);
1978 RTGCPHYS GCPhysCmd = ((VBOXCMDVBVA_SYSMEMCMD *)pCmd)->phCmd;
1979 uint32_t cbCmdPart = X86_PAGE_SIZE - (uint32_t)(GCPhysCmd & X86_PAGE_OFFSET_MASK);
1980
1981 uint32_t cbRealCmd = pCmd->u8Flags;
1982 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
1983 AssertMsgReturn(cbRealCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbRealCmd), -1);
1984 AssertMsgReturn(cbRealCmd <= _1M, ("%#x\n", cbRealCmd), -1);
1985
1986 /*
1987 * Lock down the first page of the memory specified by the command.
1988 */
1989 PGMPAGEMAPLOCK Lock;
1990 PVGASTATE pVGAState = pVdma->pVGAState;
1991 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
1992 VBOXCMDVBVA_HDR const *pRealCmdHdr = NULL;
1993 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysCmd, 0, (const void **)&pRealCmdHdr, &Lock);
1994 if (!RT_SUCCESS(rc))
1995 {
1996 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %d\n", rc));
1997 return -1;
1998 }
1999 Assert((GCPhysCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pRealCmdHdr) & PAGE_OFFSET_MASK));
2000
2001 /*
2002 * All fits within one page? We can handle that pretty efficiently.
2003 */
2004 if (cbRealCmd <= cbCmdPart)
2005 {
2006 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
2007 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2008 }
2009 else
2010 {
2011 /*
2012 * To keep things damn simple, just double buffer cross page or
2013 * multipage requests.
2014 */
2015 uint8_t *pbCmdBuf = (uint8_t *)RTMemTmpAllocZ(RT_ALIGN_Z(cbRealCmd, 16));
2016 if (pbCmdBuf)
2017 {
2018 memcpy(pbCmdBuf, pRealCmdHdr, cbCmdPart);
2019 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2020 pRealCmdHdr = NULL;
2021
2022 rc = PDMDevHlpPhysRead(pDevIns, GCPhysCmd + cbCmdPart, &pbCmdBuf[cbCmdPart], cbRealCmd - cbCmdPart);
2023 if (RT_SUCCESS(rc))
2024 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, (VBOXCMDVBVA_HDR const *)pbCmdBuf, cbRealCmd);
2025 else
2026 LogRelMax(200, ("VDMA: Error reading %#x bytes of guest memory %#RGp!\n", cbRealCmd, GCPhysCmd));
2027 RTMemTmpFree(pbCmdBuf);
2028 }
2029 else
2030 {
2031 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2032 LogRelMax(200, ("VDMA: Out of temporary memory! %#x\n", cbRealCmd));
2033 i8Result = -1;
2034 }
2035 }
2036 return i8Result;
2037 }
2038
2039 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2040 {
2041 Assert(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); /* caller already checked this */
2042 AssertReturn(!fRecursion, -1);
2043
2044 /* Skip current command. */
2045 cbCmd -= sizeof(*pCmd);
2046 pCmd++;
2047
2048 /* Process subcommands. */
2049 while (cbCmd > 0)
2050 {
2051 AssertMsgReturn(cbCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbCmd), -1);
2052
2053 uint16_t cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2054 AssertMsgReturn(cbCurCmd <= cbCmd, ("cbCurCmd=%#x, cbCmd=%#x\n", cbCurCmd, cbCmd), -1);
2055
2056 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd, true /*fRecursive*/);
2057 if (i8Result < 0)
2058 {
2059 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2060 return i8Result;
2061 }
2062
2063 /* Advance to the next command. */
2064 pCmd = (VBOXCMDVBVA_HDR *)((uintptr_t)pCmd + cbCurCmd);
2065 cbCmd -= cbCurCmd;
2066 }
2067 return 0;
2068 }
2069
2070 default:
2071 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2072 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode));
2073 return i8Result;
2074 }
2075}
2076
2077/**
2078 * Worker for vboxVDMAWorkerThread handling VBVAEXHOST_DATA_TYPE_CMD.
2079 *
2080 * @thread VDMA
2081 */
2082static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pbCmd, uint32_t cbCmd)
2083{
2084 if ( cbCmd > 0
2085 && *pbCmd == VBOXCMDVBVA_OPTYPE_NOP)
2086 { /* nop */ }
2087 else if (cbCmd >= sizeof(VBOXCMDVBVA_HDR))
2088 {
2089 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pbCmd;
2090
2091 /* check if the command is cancelled */
2092 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2093 {
2094 /* Process it. */
2095 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd, false /*fRecursion*/);
2096 }
2097 else
2098 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2099 }
2100 else
2101 WARN(("invalid command size"));
2102
2103}
2104
2105static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2106{
2107 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)
2108 vboxVDMACrCtlCreate (VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof (*pCmd));
2109 int rc = VERR_NO_MEMORY;
2110 if (pCmd)
2111 {
2112 PVGASTATE pVGAState = pVdma->pVGAState;
2113 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2114 pCmd->cbVRam = pVGAState->vram_size;
2115 pCmd->pLed = &pVGAState->Led3D;
2116 pCmd->CrClientInfo.hClient = pVdma;
2117 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2118 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2119 if (RT_SUCCESS(rc))
2120 {
2121 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2122 if (RT_SUCCESS(rc))
2123 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2124 else if (rc != VERR_NOT_SUPPORTED)
2125 WARN(("vboxVDMACrCtlGetRc returned %d\n", rc));
2126 }
2127 else
2128 WARN(("vboxVDMACrCtlPost failed %d\n", rc));
2129
2130 vboxVDMACrCtlRelease(&pCmd->Hdr);
2131 }
2132
2133 if (!RT_SUCCESS(rc))
2134 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2135
2136 return rc;
2137}
2138
2139static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_BPB_TRANSFER *pTransfer, uint32_t cbBuffer);
2140
2141/**
2142 * Check if this is an external command to be passed to the chromium backend.
2143 *
2144 * @retval VINF_NOT_SUPPORTED if not chromium command.
2145 *
2146 * @note cbCmdDr is at least sizeof(VBOXVDMACBUF_DR).
2147 */
2148static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2149{
2150 uint32_t cbDmaCmd = 0;
2151 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
2152 int rc = VINF_NOT_SUPPORTED;
2153
2154 cbDmaCmd = pCmdDr->cbBuf;
2155
2156 PVBOXVDMACMD pDmaCmd;
2157 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2158 {
2159 AssertReturn(cbCmdDr >= sizeof(*pCmdDr) + VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2160 AssertReturn(cbDmaCmd >= cbCmdDr - sizeof(*pCmdDr) - VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2161
2162 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2163 }
2164 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2165 {
2166 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2167 AssertReturn( cbDmaCmd <= pVdma->pVGAState->vram_size
2168 && offBuf <= pVdma->pVGAState->vram_size - cbDmaCmd, VERR_INVALID_PARAMETER);
2169 pDmaCmd = (VBOXVDMACMD *)(pbRam + offBuf);
2170 }
2171 else
2172 pDmaCmd = NULL;
2173 if (pDmaCmd)
2174 {
2175 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2176 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2177
2178 switch (pDmaCmd->enmType)
2179 {
2180 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2181 {
2182 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2183 AssertReturn(cbBody >= sizeof(*pCrCmd), VERR_INVALID_PARAMETER);
2184
2185 PVGASTATE pVGAState = pVdma->pVGAState;
2186 rc = VINF_SUCCESS;
2187 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2188 {
2189 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2190 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2191 break;
2192 }
2193
2194 AssertFailed();
2195 int tmpRc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmdDr);
2196 AssertRC(tmpRc);
2197 break;
2198 }
2199
2200 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2201 {
2202 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2203 AssertReturn(cbBody >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
2204
2205 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2206 AssertRC(rc);
2207 if (RT_SUCCESS(rc))
2208 {
2209 pCmdDr->rc = VINF_SUCCESS;
2210 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmdDr);
2211 AssertRC(rc);
2212 rc = VINF_SUCCESS;
2213 }
2214 break;
2215 }
2216
2217 default:
2218 break;
2219 }
2220 }
2221 return rc;
2222}
2223
2224int vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2225{
2226 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2227 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2228 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2229 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2230 AssertRC(rc);
2231 pDr->rc = rc;
2232
2233 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2234 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2235 AssertRC(rc);
2236 return rc;
2237}
2238
2239int vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
2240{
2241 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2242 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pCmdPrivate = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
2243 pCmdPrivate->rc = rc;
2244 if (pCmdPrivate->pfnCompletion)
2245 {
2246 pCmdPrivate->pfnCompletion(pVGAState, pCmd, pCmdPrivate->pvCompletion);
2247 }
2248 return VINF_SUCCESS;
2249}
2250
2251/**
2252 * Worker for vboxVDMACmdExecBlt().
2253 */
2254static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, const VBOXVIDEOOFFSET offDst, const VBOXVIDEOOFFSET offSrc,
2255 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2256 const VBOXVDMA_RECTL *pDstRectl, const VBOXVDMA_RECTL *pSrcRectl)
2257{
2258 /*
2259 * We do not support color conversion.
2260 */
2261 AssertReturn(pDstDesc->format == pSrcDesc->format, VERR_INVALID_FUNCTION);
2262
2263 /* we do not support stretching (checked by caller) */
2264 Assert(pDstRectl->height == pSrcRectl->height);
2265 Assert(pDstRectl->width == pSrcRectl->width);
2266
2267 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
2268 AssertCompileSize(pVdma->pVGAState->vram_size, sizeof(uint32_t));
2269 uint32_t cbVRamSize = pVdma->pVGAState->vram_size;
2270 uint8_t *pbDstSurf = pbRam + offDst;
2271 uint8_t *pbSrcSurf = pbRam + offSrc;
2272
2273 if ( pDstDesc->width == pDstRectl->width
2274 && pSrcDesc->width == pSrcRectl->width
2275 && pSrcDesc->width == pDstDesc->width
2276 && pSrcDesc->pitch == pDstDesc->pitch)
2277 {
2278 Assert(!pDstRectl->left);
2279 Assert(!pSrcRectl->left);
2280 uint32_t offBoth = pDstDesc->pitch * pDstRectl->top;
2281 uint32_t cbToCopy = pDstDesc->pitch * pDstRectl->height;
2282
2283 if ( cbToCopy <= cbVRamSize
2284 && (uintptr_t)(pbDstSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy
2285 && (uintptr_t)(pbSrcSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy)
2286 memcpy(pbDstSurf + offBoth, pbSrcSurf + offBoth, cbToCopy);
2287 else
2288 return VERR_INVALID_PARAMETER;
2289 }
2290 else
2291 {
2292 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2293 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2294 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2295 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2296 Assert(cbDstLine <= pDstDesc->pitch);
2297 uint32_t cbDstSkip = pDstDesc->pitch;
2298 uint8_t *pbDstStart = pbDstSurf + offDstStart;
2299
2300 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2301# ifdef VBOX_STRICT
2302 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2303 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2304# endif
2305 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2306 Assert(cbSrcLine <= pSrcDesc->pitch);
2307 uint32_t cbSrcSkip = pSrcDesc->pitch;
2308 const uint8_t *pbSrcStart = pbSrcSurf + offSrcStart;
2309
2310 Assert(cbDstLine == cbSrcLine);
2311
2312 for (uint32_t i = 0; ; ++i)
2313 {
2314 if ( cbDstLine <= cbVRamSize
2315 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine
2316 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine)
2317 memcpy(pbDstStart, pbSrcStart, cbDstLine);
2318 else
2319 return VERR_INVALID_PARAMETER;
2320 if (i == pDstRectl->height)
2321 break;
2322 pbDstStart += cbDstSkip;
2323 pbSrcStart += cbSrcSkip;
2324 }
2325 }
2326 return VINF_SUCCESS;
2327}
2328
2329#if 0 /* unused */
2330static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2331{
2332 if (!pRectl1->width)
2333 *pRectl1 = *pRectl2;
2334 else
2335 {
2336 int16_t x21 = pRectl1->left + pRectl1->width;
2337 int16_t x22 = pRectl2->left + pRectl2->width;
2338 if (pRectl1->left > pRectl2->left)
2339 {
2340 pRectl1->left = pRectl2->left;
2341 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2342 }
2343 else if (x21 < x22)
2344 pRectl1->width = x22 - pRectl1->left;
2345
2346 x21 = pRectl1->top + pRectl1->height;
2347 x22 = pRectl2->top + pRectl2->height;
2348 if (pRectl1->top > pRectl2->top)
2349 {
2350 pRectl1->top = pRectl2->top;
2351 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2352 }
2353 else if (x21 < x22)
2354 pRectl1->height = x22 - pRectl1->top;
2355 }
2356}
2357#endif /* unused */
2358
2359/**
2360 * Handles VBOXVDMACMD_TYPE_DMA_PRESENT_BLT for vboxVDMACmdExec().
2361 *
2362 * @returns number of bytes (positive) of the full command on success,
2363 * otherwise a negative error status (VERR_XXX).
2364 *
2365 * @param pVdma The VDMA channel.
2366 * @param pBlt Blit command buffer. This is to be considered
2367 * volatile!
2368 * @param cbBuffer Number of bytes accessible at @a pBtl.
2369 */
2370static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2371{
2372 /*
2373 * Validate and make a local copy of the blt command up to the rectangle array.
2374 */
2375 AssertReturn(cbBuffer >= RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects), VERR_INVALID_PARAMETER);
2376 VBOXVDMACMD_DMA_PRESENT_BLT BltSafe;
2377 memcpy(&BltSafe, pBlt, RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects));
2378
2379 AssertReturn(BltSafe.cDstSubRects < _8M, VERR_INVALID_PARAMETER);
2380 uint32_t const cbBlt = RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[BltSafe.cDstSubRects]);
2381 AssertReturn(cbBuffer >= cbBlt, VERR_INVALID_PARAMETER);
2382
2383
2384 /*
2385 * We do not support stretching.
2386 */
2387 AssertReturn(BltSafe.srcRectl.width == BltSafe.dstRectl.width, VERR_INVALID_FUNCTION);
2388 AssertReturn(BltSafe.srcRectl.height == BltSafe.dstRectl.height, VERR_INVALID_FUNCTION);
2389
2390 Assert(BltSafe.cDstSubRects);
2391
2392 //VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0}; - pointless
2393
2394 if (BltSafe.cDstSubRects)
2395 {
2396 for (uint32_t i = 0; i < BltSafe.cDstSubRects; ++i)
2397 {
2398 VBOXVDMA_RECTL dstSubRectl = pBlt->aDstSubRects[i];
2399 VBOXVDMA_RECTL srcSubRectl = dstSubRectl;
2400
2401 dstSubRectl.left += BltSafe.dstRectl.left;
2402 dstSubRectl.top += BltSafe.dstRectl.top;
2403
2404 srcSubRectl.left += BltSafe.srcRectl.left;
2405 srcSubRectl.top += BltSafe.srcRectl.top;
2406
2407 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2408 &dstSubRectl, &srcSubRectl);
2409 AssertRCReturn(rc, rc);
2410
2411 //vboxVDMARectlUnite(&updateRectl, &dstSubRectl); - pointless
2412 }
2413 }
2414 else
2415 {
2416 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2417 &BltSafe.dstRectl, &BltSafe.srcRectl);
2418 AssertRCReturn(rc, rc);
2419
2420 //vboxVDMARectlUnite(&updateRectl, &BltSafe.dstRectl); - pointless
2421 }
2422
2423 return cbBlt;
2424}
2425
2426
2427/**
2428 * Handles VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER for vboxVDMACmdCheckCrCmd() and
2429 * vboxVDMACmdExec().
2430 *
2431 * @returns number of bytes (positive) of the full command on success,
2432 * otherwise a negative error status (VERR_XXX).
2433 *
2434 * @param pVdma The VDMA channel.
2435 * @param pTransfer Transfer command buffer. This is to be considered
2436 * volatile!
2437 * @param cbBuffer Number of bytes accessible at @a pTransfer.
2438 */
2439static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_BPB_TRANSFER *pTransfer, uint32_t cbBuffer)
2440{
2441 /*
2442 * Make a copy of the command (it's volatile).
2443 */
2444 AssertReturn(cbBuffer >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
2445 VBOXVDMACMD_DMA_BPB_TRANSFER const TransferSafeCopy = *pTransfer;
2446 pTransfer = &TransferSafeCopy;
2447
2448 PVGASTATE pVGAState = pVdma->pVGAState;
2449 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2450 uint8_t *pbRam = pVGAState->vram_ptrR3;
2451 uint32_t cbTransfer = TransferSafeCopy.cbTransferSize;
2452
2453 /*
2454 * Validate VRAM offset.
2455 */
2456 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2457 AssertReturn( cbTransfer <= pVGAState->vram_size
2458 && TransferSafeCopy.Src.offVramBuf <= pVGAState->vram_size - cbTransfer,
2459 VERR_INVALID_PARAMETER);
2460
2461 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2462 AssertReturn( cbTransfer <= pVGAState->vram_size
2463 && TransferSafeCopy.Dst.offVramBuf <= pVGAState->vram_size - cbTransfer,
2464 VERR_INVALID_PARAMETER);
2465
2466 /*
2467 * Transfer loop.
2468 */
2469 uint32_t cbTransfered = 0;
2470 int rc = VINF_SUCCESS;
2471 do
2472 {
2473 uint32_t cbSubTransfer = cbTransfer;
2474
2475 const void *pvSrc;
2476 bool fSrcLocked = false;
2477 PGMPAGEMAPLOCK SrcLock;
2478 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2479 pvSrc = pbRam + TransferSafeCopy.Src.offVramBuf + cbTransfered;
2480 else
2481 {
2482 RTGCPHYS GCPhysSrcPage = TransferSafeCopy.Src.phBuf + cbTransfered;
2483 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysSrcPage, 0, &pvSrc, &SrcLock);
2484 AssertRC(rc);
2485 if (RT_SUCCESS(rc))
2486 {
2487 fSrcLocked = true;
2488 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysSrcPage & X86_PAGE_OFFSET_MASK));
2489 }
2490 else
2491 break;
2492 }
2493
2494 void *pvDst;
2495 PGMPAGEMAPLOCK DstLock;
2496 bool fDstLocked = false;
2497 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2498 pvDst = pbRam + TransferSafeCopy.Dst.offVramBuf + cbTransfered;
2499 else
2500 {
2501 RTGCPHYS GCPhysDstPage = TransferSafeCopy.Dst.phBuf + cbTransfered;
2502 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysDstPage, 0, &pvDst, &DstLock);
2503 AssertRC(rc);
2504 if (RT_SUCCESS(rc))
2505 {
2506 fDstLocked = true;
2507 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysDstPage & X86_PAGE_OFFSET_MASK));
2508 }
2509 }
2510
2511 if (RT_SUCCESS(rc))
2512 {
2513 memcpy(pvDst, pvSrc, cbSubTransfer);
2514 cbTransfered += cbSubTransfer;
2515 cbTransfer -= cbSubTransfer;
2516 }
2517 else
2518 cbTransfer = 0; /* force break below */
2519
2520 if (fSrcLocked)
2521 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2522 if (fDstLocked)
2523 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2524 } while (cbTransfer);
2525
2526 if (RT_SUCCESS(rc))
2527 return sizeof(TransferSafeCopy);
2528 return rc;
2529}
2530
2531/**
2532 * Worker for vboxVDMACommandProcess().
2533 *
2534 * @param pVdma Tthe VDMA channel.
2535 * @param pbBuffer Command buffer, considered volatile.
2536 * @param cbBuffer The number of bytes at @a pbBuffer.
2537 */
2538static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pbBuffer, uint32_t cbBuffer)
2539{
2540 AssertReturn(pbBuffer, VERR_INVALID_POINTER);
2541
2542 for (;;)
2543 {
2544 AssertReturn(cbBuffer >= VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2545
2546 VBOXVDMACMD const *pCmd = (VBOXVDMACMD const *)pbBuffer;
2547 VBOXVDMACMD_TYPE enmCmdType = pCmd->enmType;
2548 int cbProcessed;
2549 switch (enmCmdType)
2550 {
2551 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2552 {
2553# ifdef VBOXWDDM_TEST_UHGSMI
2554 static int count = 0;
2555 static uint64_t start, end;
2556 if (count==0)
2557 {
2558 start = RTTimeNanoTS();
2559 }
2560 ++count;
2561 if (count==100000)
2562 {
2563 end = RTTimeNanoTS();
2564 float ems = (end-start)/1000000.f;
2565 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2566 }
2567# endif
2568 /** @todo post the buffer to chromium */
2569 return VINF_SUCCESS;
2570 }
2571
2572 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2573 {
2574 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2575 cbProcessed = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2576 Assert(cbProcessed >= 0);
2577 break;
2578 }
2579
2580 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2581 {
2582 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2583 cbProcessed = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2584 Assert(cbProcessed >= 0);
2585 break;
2586 }
2587
2588 case VBOXVDMACMD_TYPE_DMA_NOP:
2589 return VINF_SUCCESS;
2590
2591 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2592 return VINF_SUCCESS;
2593
2594 default:
2595 AssertFailedReturn(VERR_INVALID_FUNCTION);
2596 }
2597
2598 /* Advance buffer or return. */
2599 if (cbProcessed >= 0)
2600 {
2601 Assert(cbProcessed > 0);
2602 cbProcessed += VBOXVDMACMD_HEADER_SIZE();
2603 if ((uint32_t)cbProcessed >= cbBuffer)
2604 {
2605 Assert((uint32_t)cbProcessed == cbBuffer);
2606 return VINF_SUCCESS;
2607 }
2608
2609 cbBuffer -= cbProcessed;
2610 pbBuffer += cbProcessed;
2611 }
2612 else
2613 return cbProcessed; /* error status */
2614
2615 }
2616}
2617
2618/**
2619 * VDMA worker thread procedure, see vdmaVBVACtlEnableSubmitInternal().
2620 *
2621 * @thread VDMA
2622 */
2623static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2624{
2625 RT_NOREF(hThreadSelf);
2626 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2627 PVGASTATE pVGAState = pVdma->pVGAState;
2628 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2629 int rc;
2630
2631 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2632
2633 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2634 {
2635 uint8_t *pbCmd = NULL;
2636 uint32_t cbCmd = 0;
2637 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pbCmd, &cbCmd);
2638 switch (enmType)
2639 {
2640 case VBVAEXHOST_DATA_TYPE_CMD:
2641 vboxVDMACrCmdProcess(pVdma, pbCmd, cbCmd);
2642 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2643 VBVARaiseIrq(pVGAState, 0);
2644 break;
2645
2646 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2647 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd);
2648 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2649 break;
2650
2651 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2652 {
2653 bool fContinue = true;
2654 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd, &fContinue);
2655 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2656 if (fContinue)
2657 break;
2658 }
2659 RT_FALL_THRU();
2660 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2661 rc = VBoxVDMAThreadEventWait(&pVdma->Thread, RT_INDEFINITE_WAIT);
2662 AssertRC(rc);
2663 break;
2664
2665 default:
2666 WARN(("unexpected type %d\n", enmType));
2667 break;
2668 }
2669 }
2670
2671 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2672
2673 return VINF_SUCCESS;
2674}
2675
2676/**
2677 * Worker for vboxVDMACommand.
2678 *
2679 * @param pCmd The command to process. Consider content volatile.
2680 * @param cbCmd Number of valid bytes at @a pCmd. This is at least
2681 * sizeof(VBOXVDMACBUF_DR).
2682 */
2683static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2684{
2685 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2686 int rc;
2687
2688 do /* break loop */
2689 {
2690 /*
2691 * Get the command buffer (volatile).
2692 */
2693 uint16_t const cbCmdBuf = pCmd->cbBuf;
2694 const uint8_t *pbCmdBuf;
2695 PGMPAGEMAPLOCK Lock;
2696 bool bReleaseLocked = false;
2697 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2698 {
2699 pbCmdBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2700 rc = VINF_SUCCESS;
2701 AssertBreakStmt((uintptr_t)&pbCmdBuf[cbCmdBuf] <= (uintptr_t)&((uint8_t *)pCmd)[cbCmd],
2702 rc = VERR_INVALID_PARAMETER);
2703 }
2704 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2705 {
2706 uint64_t offVRam = pCmd->Location.offVramBuf;
2707 pbCmdBuf = (uint8_t const *)pVdma->pVGAState->vram_ptrR3 + offVRam;
2708 rc = VINF_SUCCESS;
2709 AssertBreakStmt( offVRam <= pVdma->pVGAState->vram_size
2710 && offVRam + cbCmdBuf <= pVdma->pVGAState->vram_size,
2711 rc = VERR_INVALID_PARAMETER);
2712 }
2713 else
2714 {
2715 /* Make sure it doesn't cross a page. */
2716 RTGCPHYS GCPhysBuf = pCmd->Location.phBuf;
2717 AssertBreakStmt((uint32_t)(GCPhysBuf & X86_PAGE_OFFSET_MASK) + cbCmdBuf <= (uint32_t)X86_PAGE_SIZE,
2718 rc = VERR_INVALID_PARAMETER);
2719
2720 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pVdma->pVGAState->pDevInsR3, GCPhysBuf, 0 /*fFlags*/,
2721 (const void **)&pbCmdBuf, &Lock);
2722 AssertRCBreak(rc); /* if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2723 bReleaseLocked = true;
2724 }
2725
2726 /*
2727 * Process the command.
2728 */
2729 rc = vboxVDMACmdExec(pVdma, pbCmdBuf, cbCmdBuf);
2730 AssertRC(rc);
2731
2732 /* Clean up comand buffer. */
2733 if (bReleaseLocked)
2734 PDMDevHlpPhysReleasePageMappingLock(pVdma->pVGAState->pDevInsR3, &Lock);
2735
2736 } while (0);
2737
2738 /*
2739 * Complete the command.
2740 */
2741 pCmd->rc = rc;
2742 rc = VBoxSHGSMICommandComplete(pHgsmi, pCmd);
2743 AssertRC(rc);
2744}
2745
2746# if 0 /** @todo vboxVDMAControlProcess is unused */
2747static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2748{
2749 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2750 pCmd->i32Result = VINF_SUCCESS;
2751 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2752 AssertRC(rc);
2753}
2754# endif
2755
2756#endif /* VBOX_WITH_CRHGSMI */
2757#ifdef VBOX_VDMA_WITH_WATCHDOG
2758
2759/**
2760 * @callback_method_impl{TMTIMER, VDMA watchdog timer.}
2761 */
2762static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2763{
2764 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2765 PVGASTATE pVGAState = pVdma->pVGAState;
2766 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
2767}
2768
2769/**
2770 * Handles VBOXVDMA_CTL_TYPE_WATCHDOG for vboxVDMAControl.
2771 */
2772static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
2773{
2774 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
2775 if (cMillis)
2776 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
2777 else
2778 TMTimerStop(pVdma->WatchDogTimer);
2779 return VINF_SUCCESS;
2780}
2781
2782#endif /* VBOX_VDMA_WITH_WATCHDOG */
2783
2784/**
2785 * Called by vgaR3Construct() to initialize the state.
2786 *
2787 * @returns VBox status code.
2788 */
2789int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
2790{
2791 RT_NOREF(cPipeElements);
2792 int rc;
2793 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
2794 Assert(pVdma);
2795 if (pVdma)
2796 {
2797 pVdma->pHgsmi = pVGAState->pHGSMI;
2798 pVdma->pVGAState = pVGAState;
2799
2800#ifdef VBOX_VDMA_WITH_WATCHDOG
2801 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
2802 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
2803 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
2804 AssertRC(rc);
2805#else
2806 rc = VINF_SUCCESS;
2807#endif
2808 if (RT_SUCCESS(rc))
2809 {
2810#ifdef VBOX_WITH_CRHGSMI
2811 VBoxVDMAThreadInit(&pVdma->Thread);
2812
2813 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
2814 if (RT_SUCCESS(rc))
2815 {
2816 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
2817 if (RT_SUCCESS(rc))
2818 {
2819 rc = RTCritSectInit(&pVdma->CalloutCritSect);
2820 if (RT_SUCCESS(rc))
2821 {
2822#endif
2823 pVGAState->pVdma = pVdma;
2824
2825 /* No HGCM service if VMSVGA is enabled. */
2826 if (!pVGAState->fVMSVGAEnabled)
2827 {
2828 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
2829 }
2830 return VINF_SUCCESS;
2831
2832#ifdef VBOX_WITH_CRHGSMI
2833 }
2834
2835 WARN(("RTCritSectInit failed %d\n", rc));
2836 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2837 }
2838 else
2839 WARN(("VBoxVBVAExHSInit failed %d\n", rc));
2840 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2841 }
2842 else
2843 WARN(("RTSemEventMultiCreate failed %d\n", rc));
2844#endif
2845 /* the timer is cleaned up automatically */
2846 }
2847 RTMemFree(pVdma);
2848 }
2849 else
2850 rc = VERR_OUT_OF_RESOURCES;
2851 return rc;
2852}
2853
2854/**
2855 * Called by vgaR3Reset() to do reset.
2856 */
2857void vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
2858{
2859#ifdef VBOX_WITH_CRHGSMI
2860 vdmaVBVACtlDisableSync(pVdma);
2861#else
2862 RT_NOREF(pVdma);
2863#endif
2864}
2865
2866/**
2867 * Called by vgaR3Destruct() to do cleanup..
2868 */
2869void vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
2870{
2871 if (!pVdma)
2872 return;
2873#ifdef VBOX_WITH_CRHGSMI
2874 if (pVdma->pVGAState->fVMSVGAEnabled)
2875 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
2876 else
2877 {
2878 /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point
2879 * as the result of the SharedOpenGL HGCM service unloading.
2880 */
2881 vdmaVBVACtlDisableSync(pVdma);
2882 }
2883 VBoxVDMAThreadCleanup(&pVdma->Thread);
2884 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
2885 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
2886 RTCritSectDelete(&pVdma->CalloutCritSect);
2887#endif
2888 RTMemFree(pVdma);
2889}
2890
2891/**
2892 * Handle VBVA_VDMA_CTL, see vbvaChannelHandler
2893 *
2894 * @param pVdma The VDMA channel.
2895 * @param pCmd The control command to handle. Considered volatile.
2896 * @param cbCmd The size of the command. At least sizeof(VBOXVDMA_CTL).
2897 */
2898void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
2899{
2900 RT_NOREF(cbCmd);
2901 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
2902
2903 VBOXVDMA_CTL_TYPE enmCtl = pCmd->enmCtl;
2904 switch (enmCtl)
2905 {
2906 case VBOXVDMA_CTL_TYPE_ENABLE:
2907 pCmd->i32Result = VINF_SUCCESS;
2908 break;
2909 case VBOXVDMA_CTL_TYPE_DISABLE:
2910 pCmd->i32Result = VINF_SUCCESS;
2911 break;
2912 case VBOXVDMA_CTL_TYPE_FLUSH:
2913 pCmd->i32Result = VINF_SUCCESS;
2914 break;
2915#ifdef VBOX_VDMA_WITH_WATCHDOG
2916 case VBOXVDMA_CTL_TYPE_WATCHDOG:
2917 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
2918 break;
2919#endif
2920 default:
2921 WARN(("cmd not supported"));
2922 pCmd->i32Result = VERR_NOT_SUPPORTED;
2923 break;
2924 }
2925
2926 int rc = VBoxSHGSMICommandComplete(pIns, pCmd);
2927 AssertRC(rc);
2928}
2929
2930/**
2931 * Handle VBVA_VDMA_CMD, see vbvaChannelHandler().
2932 *
2933 * @param pVdma The VDMA channel.
2934 * @param pCmd The command to handle. Considered volatile.
2935 * @param cbCmd The size of the command. At least sizeof(VBOXVDMACBUF_DR).
2936 */
2937void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2938{
2939#ifdef VBOX_WITH_CRHGSMI
2940 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
2941 * this is why we process them specially */
2942 int rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
2943 if (rc == VINF_SUCCESS)
2944 return;
2945
2946 if (RT_FAILURE(rc))
2947 {
2948 pCmd->rc = rc;
2949 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmd);
2950 AssertRC(rc);
2951 return;
2952 }
2953
2954 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
2955
2956#else
2957 RT_NOREF(cbCmd);
2958 pCmd->rc = VERR_NOT_IMPLEMENTED;
2959 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmd);
2960 AssertRC(rc);
2961#endif
2962}
2963
2964#ifdef VBOX_WITH_CRHGSMI
2965
2966static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvContext);
2967
2968static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
2969 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
2970{
2971 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
2972 if (RT_SUCCESS(rc))
2973 {
2974 if (rc == VINF_SUCCESS)
2975 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
2976 Assert(rc == VINF_ALREADY_INITIALIZED);
2977 }
2978 else
2979 Log(("VBoxVBVAExHCtlSubmit failed %d\n", rc));
2980
2981 return rc;
2982}
2983
2984/**
2985 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
2986 * Used by vdmaVBVACtlEnableDisableSubmit() and vdmaVBVACtlEnableDisableSubmit() }
2987 */
2988static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
2989 int rc, void *pvContext)
2990{
2991 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
2992 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
2993 AssertRC(rc);
2994 pGCtl->i32Result = rc;
2995
2996 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2997 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
2998 AssertRC(rc);
2999
3000 VBoxVBVAExHCtlFree(pVbva, pCtl);
3001}
3002
3003/**
3004 * Worker for vdmaVBVACtlGenericGuestSubmit() and vdmaVBVACtlOpaqueHostSubmit().
3005 */
3006static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType,
3007 uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3008{
3009 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
3010 if (!pHCtl)
3011 {
3012 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3013 return VERR_NO_MEMORY;
3014 }
3015
3016 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
3017 pHCtl->u.cmd.cbCmd = cbCmd;
3018 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
3019 if (RT_FAILURE(rc))
3020 {
3021 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3022 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3023 return rc;;
3024 }
3025 return VINF_SUCCESS;
3026}
3027
3028/**
3029 * Handler for vboxCmdVBVACmdCtl()/VBOXCMDVBVACTL_TYPE_3DCTL.
3030 */
3031static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3032{
3033 Assert(cbCtl >= sizeof (VBOXCMDVBVA_CTL));
3034 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
3035 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t*)(pCtl+1),
3036 cbCtl - sizeof (VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3037 if (RT_SUCCESS(rc))
3038 return VINF_SUCCESS;
3039
3040 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
3041 pCtl->i32Result = rc;
3042 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3043 AssertRC(rc);
3044 return VINF_SUCCESS;
3045}
3046
3047/**
3048 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, Used by vdmaVBVACtlOpaqueHostSubmit()}
3049 */
3050static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3051 int rc, void *pvCompletion)
3052{
3053 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
3054 if (pVboxCtl->u.pfnInternal)
3055 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
3056 VBoxVBVAExHCtlFree(pVbva, pCtl);
3057}
3058
3059/**
3060 * Worker for vboxCmdVBVACmdHostCtl() and vboxCmdVBVACmdHostCtlSync().
3061 */
3062static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3063 PFNCRCTLCOMPLETION pfnCompletion, void *pvCompletion)
3064{
3065 pCmd->u.pfnInternal = (PFNRT)pfnCompletion;
3066 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
3067 (uint8_t *)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
3068 if (RT_FAILURE(rc))
3069 {
3070 if (rc == VERR_INVALID_STATE)
3071 {
3072 pCmd->u.pfnInternal = NULL;
3073 PVGASTATE pVGAState = pVdma->pVGAState;
3074 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
3075 if (!RT_SUCCESS(rc))
3076 WARN(("pfnCrHgsmiControlProcess failed %d\n", rc));
3077
3078 return rc;
3079 }
3080 WARN(("vdmaVBVACtlGenericSubmit failed %d\n", rc));
3081 return rc;
3082 }
3083
3084 return VINF_SUCCESS;
3085}
3086
3087/**
3088 * Called from vdmaVBVACtlThreadCreatedEnable().
3089 */
3090static int vdmaVBVANotifyEnable(PVGASTATE pVGAState)
3091{
3092 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3093 {
3094 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
3095 if (!RT_SUCCESS(rc))
3096 {
3097 WARN(("pfnVBVAEnable failed %d\n", rc));
3098 for (uint32_t j = 0; j < i; j++)
3099 {
3100 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3101 }
3102
3103 return rc;
3104 }
3105 }
3106 return VINF_SUCCESS;
3107}
3108
3109/**
3110 * Called from vdmaVBVACtlThreadCreatedEnable() and vdmaVBVADisableProcess().
3111 */
3112static int vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3113{
3114 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3115 pVGAState->pDrv->pfnVBVADisable(pVGAState->pDrv, i);
3116 return VINF_SUCCESS;
3117}
3118
3119/**
3120 * Hook that is called by vboxVDMAWorkerThread when it starts.
3121 */
3122static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3123 void *pvThreadContext, void *pvContext)
3124{
3125 RT_NOREF(pThread);
3126 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3127 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3128
3129 if (RT_SUCCESS(rc))
3130 {
3131 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3132 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3133 if (rc == VINF_SUCCESS)
3134 {
3135 /* we need to inform Main about VBVA enable/disable
3136 * main expects notifications to be done from the main thread
3137 * submit it there */
3138 PVGASTATE pVGAState = pVdma->pVGAState;
3139
3140 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3141 vdmaVBVANotifyEnable(pVGAState);
3142 else
3143 vdmaVBVANotifyDisable(pVGAState);
3144 }
3145 else if (RT_FAILURE(rc))
3146 WARN(("vboxVDMACrGuestCtlProcess failed %d\n", rc));
3147 }
3148 else
3149 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %d\n", rc));
3150
3151 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3152}
3153
3154/**
3155 * Worker for vdmaVBVACtlEnableDisableSubmitInternal() and vdmaVBVACtlEnableSubmitSync().
3156 */
3157static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3158{
3159 int rc;
3160 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva,
3161 fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3162 if (pHCtl)
3163 {
3164 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3165 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3166 pHCtl->pfnComplete = pfnComplete;
3167 pHCtl->pvComplete = pvComplete;
3168
3169 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3170 if (RT_SUCCESS(rc))
3171 return VINF_SUCCESS;
3172 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3173
3174 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3175 }
3176 else
3177 {
3178 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3179 rc = VERR_NO_MEMORY;
3180 }
3181
3182 return rc;
3183}
3184
3185/**
3186 * Worker for vboxVDMASaveLoadExecPerform().
3187 */
3188static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3189{
3190 VBVAENABLE Enable = {0};
3191 Enable.u32Flags = VBVA_F_ENABLE;
3192 Enable.u32Offset = offVram;
3193
3194 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3195 Data.rc = VERR_NOT_IMPLEMENTED;
3196 int rc = RTSemEventCreate(&Data.hEvent);
3197 if (!RT_SUCCESS(rc))
3198 {
3199 WARN(("RTSemEventCreate failed %d\n", rc));
3200 return rc;
3201 }
3202
3203 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3204 if (RT_SUCCESS(rc))
3205 {
3206 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3207 if (RT_SUCCESS(rc))
3208 {
3209 rc = Data.rc;
3210 if (!RT_SUCCESS(rc))
3211 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3212 }
3213 else
3214 WARN(("RTSemEventWait failed %d\n", rc));
3215 }
3216 else
3217 WARN(("vdmaVBVACtlSubmit failed %d\n", rc));
3218
3219 RTSemEventDestroy(Data.hEvent);
3220
3221 return rc;
3222}
3223
3224/**
3225 * Worker for vdmaVBVACtlEnableDisableSubmitInternal().
3226 */
3227static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable,
3228 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3229{
3230 int rc;
3231 VBVAEXHOSTCTL* pHCtl;
3232 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3233 {
3234 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3235 return VINF_SUCCESS;
3236 }
3237
3238 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3239 if (!pHCtl)
3240 {
3241 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3242 return VERR_NO_MEMORY;
3243 }
3244
3245 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3246 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3247 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3248 if (RT_SUCCESS(rc))
3249 return VINF_SUCCESS;
3250
3251 WARN(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3252 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3253 return rc;
3254}
3255
3256/**
3257 * Worker for vdmaVBVACtlEnableDisableSubmit().
3258 */
3259static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable,
3260 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3261{
3262 bool fEnable = (pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE;
3263 if (fEnable)
3264 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3265 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3266}
3267
3268/**
3269 * Handler for vboxCmdVBVACmdCtl/VBOXCMDVBVACTL_TYPE_ENABLE.
3270 */
3271static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3272{
3273 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3274 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3275 if (RT_SUCCESS(rc))
3276 return VINF_SUCCESS;
3277
3278 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %d\n", rc));
3279 pEnable->Hdr.i32Result = rc;
3280 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3281 AssertRC(rc);
3282 return VINF_SUCCESS;
3283}
3284
3285/**
3286 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3287 * Used by vdmaVBVACtlSubmitSync() and vdmaVBVACtlEnableSubmitSync().}
3288 */
3289static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3290 int rc, void *pvContext)
3291{
3292 RT_NOREF(pVbva, pCtl);
3293 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION *)pvContext;
3294 pData->rc = rc;
3295 rc = RTSemEventSignal(pData->hEvent);
3296 if (!RT_SUCCESS(rc))
3297 WARN(("RTSemEventSignal failed %d\n", rc));
3298}
3299
3300
3301static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3302{
3303 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3304 Data.rc = VERR_NOT_IMPLEMENTED;
3305 int rc = RTSemEventCreate(&Data.hEvent);
3306 if (!RT_SUCCESS(rc))
3307 {
3308 WARN(("RTSemEventCreate failed %d\n", rc));
3309 return rc;
3310 }
3311
3312 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3313 if (RT_SUCCESS(rc))
3314 {
3315 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3316 if (RT_SUCCESS(rc))
3317 {
3318 rc = Data.rc;
3319 if (!RT_SUCCESS(rc))
3320 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %d\n", rc));
3321 }
3322 else
3323 WARN(("RTSemEventWait failed %d\n", rc));
3324 }
3325 else
3326 Log(("vdmaVBVACtlSubmit failed %d\n", rc));
3327
3328 RTSemEventDestroy(Data.hEvent);
3329
3330 return rc;
3331}
3332
3333/**
3334 * Worker for vboxVDMASaveStateExecPrep().
3335 */
3336static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3337{
3338 VBVAEXHOSTCTL Ctl;
3339 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3340 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3341}
3342
3343/**
3344 * Worker for vboxVDMASaveLoadExecPerform() and vboxVDMASaveStateExecDone().
3345 */
3346static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3347{
3348 VBVAEXHOSTCTL Ctl;
3349 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3350 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3351}
3352
3353/**
3354 * Worker for vboxCmdVBVACmdSubmit(), vboxCmdVBVACmdFlush() and vboxCmdVBVATimerRefresh().
3355 */
3356static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3357{
3358 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3359 switch (rc)
3360 {
3361 case VINF_SUCCESS:
3362 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3363 case VINF_ALREADY_INITIALIZED:
3364 case VINF_EOF:
3365 case VERR_INVALID_STATE:
3366 return VINF_SUCCESS;
3367 default:
3368 Assert(!RT_FAILURE(rc));
3369 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3370 }
3371}
3372
3373
3374/**
3375 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmit}
3376 */
3377int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3378 struct VBOXCRCMDCTL *pCmd,
3379 uint32_t cbCmd,
3380 PFNCRCTLCOMPLETION pfnCompletion,
3381 void *pvCompletion)
3382{
3383 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3384 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3385 if (pVdma == NULL)
3386 return VERR_INVALID_STATE;
3387 pCmd->CalloutList.List.pNext = NULL;
3388 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3389}
3390
3391/**
3392 * Argument package from vboxCmdVBVACmdHostCtlSync to vboxCmdVBVACmdHostCtlSyncCb.
3393 */
3394typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3395{
3396 struct VBOXVDMAHOST *pVdma;
3397 uint32_t fProcessing;
3398 int rc;
3399} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3400
3401/**
3402 * @interface_method_impl{FNCRCTLCOMPLETION, Used by vboxCmdVBVACmdHostCtlSync.}
3403 */
3404static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3405{
3406 RT_NOREF(pCmd, cbCmd);
3407 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC *)pvCompletion;
3408
3409 pData->rc = rc;
3410
3411 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3412
3413 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3414
3415 pData->fProcessing = 0;
3416
3417 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3418}
3419
3420/**
3421 * @callback_method_impl{FNVBOXCRCLIENT_CALLOUT, Worker for vboxVDMACrCtlHgsmiSetup }
3422 *
3423 * @note r=bird: not to be confused with the callout function below. sigh.
3424 */
3425static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd,
3426 VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3427{
3428 pEntry->pfnCb = pfnCb;
3429 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3430 if (RT_SUCCESS(rc))
3431 {
3432 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3433 RTCritSectLeave(&pVdma->CalloutCritSect);
3434
3435 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3436 }
3437 else
3438 WARN(("RTCritSectEnter failed %d\n", rc));
3439
3440 return rc;
3441}
3442
3443
3444/**
3445 * Worker for vboxCmdVBVACmdHostCtlSync.
3446 */
3447static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3448{
3449 int rc = VINF_SUCCESS;
3450 for (;;)
3451 {
3452 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3453 if (RT_SUCCESS(rc))
3454 {
3455 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3456 if (pEntry)
3457 RTListNodeRemove(&pEntry->Node);
3458 RTCritSectLeave(&pVdma->CalloutCritSect);
3459
3460 if (!pEntry)
3461 break;
3462
3463 pEntry->pfnCb(pEntry);
3464 }
3465 else
3466 {
3467 WARN(("RTCritSectEnter failed %d\n", rc));
3468 break;
3469 }
3470 }
3471
3472 return rc;
3473}
3474
3475/**
3476 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmitSync}
3477 */
3478DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd)
3479{
3480 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3481 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3482 if (pVdma == NULL)
3483 return VERR_INVALID_STATE;
3484
3485 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3486 Data.pVdma = pVdma;
3487 Data.fProcessing = 1;
3488 Data.rc = VERR_INTERNAL_ERROR;
3489 RTListInit(&pCmd->CalloutList.List);
3490 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3491 if (!RT_SUCCESS(rc))
3492 {
3493 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %d", rc));
3494 return rc;
3495 }
3496
3497 while (Data.fProcessing)
3498 {
3499 /* Poll infrequently to make sure no completed message has been missed. */
3500 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3501
3502 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3503
3504 if (Data.fProcessing)
3505 RTThreadYield();
3506 }
3507
3508 /* extra check callouts */
3509 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3510
3511 /* 'Our' message has been processed, so should reset the semaphore.
3512 * There is still possible that another message has been processed
3513 * and the semaphore has been signalled again.
3514 * Reset only if there are no other messages completed.
3515 */
3516 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3517 Assert(c >= 0);
3518 if (!c)
3519 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3520
3521 rc = Data.rc;
3522 if (!RT_SUCCESS(rc))
3523 WARN(("host call failed %d", rc));
3524
3525 return rc;
3526}
3527
3528/**
3529 * Handler for VBVA_CMDVBVA_CTL, see vbvaChannelHandler().
3530 */
3531int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3532{
3533 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3534 switch (pCtl->u32Type)
3535 {
3536 case VBOXCMDVBVACTL_TYPE_3DCTL:
3537 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3538 case VBOXCMDVBVACTL_TYPE_RESIZE:
3539 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3540 case VBOXCMDVBVACTL_TYPE_ENABLE:
3541 if (cbCtl == sizeof(VBOXCMDVBVA_CTL_ENABLE))
3542 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE *)pCtl);
3543 WARN(("incorrect enable size\n"));
3544 break;
3545 default:
3546 WARN(("unsupported type\n"));
3547 break;
3548 }
3549
3550 pCtl->i32Result = VERR_INVALID_PARAMETER;
3551 int rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3552 AssertRC(rc);
3553 return VINF_SUCCESS;
3554}
3555
3556/**
3557 * Handler for VBVA_CMDVBVA_SUBMIT, see vbvaChannelHandler().
3558 */
3559int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3560{
3561 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3562 {
3563 WARN(("vdma VBVA is disabled\n"));
3564 return VERR_INVALID_STATE;
3565 }
3566
3567 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3568}
3569
3570/**
3571 * Handler for VBVA_CMDVBVA_FLUSH, see vbvaChannelHandler().
3572 */
3573int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3574{
3575 WARN(("flush\n"));
3576 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3577 {
3578 WARN(("vdma VBVA is disabled\n"));
3579 return VERR_INVALID_STATE;
3580 }
3581 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3582}
3583
3584/**
3585 * Called from vgaTimerRefresh().
3586 */
3587void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState)
3588{
3589 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3590 return;
3591 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3592}
3593
3594bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3595{
3596 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3597}
3598
3599#endif /* VBOX_WITH_CRHGSMI */
3600
3601
3602/*
3603 *
3604 *
3605 * Saved state.
3606 * Saved state.
3607 * Saved state.
3608 *
3609 *
3610 */
3611
3612int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3613{
3614#ifdef VBOX_WITH_CRHGSMI
3615 int rc = vdmaVBVAPause(pVdma);
3616 if (RT_SUCCESS(rc))
3617 return VINF_SUCCESS;
3618
3619 if (rc != VERR_INVALID_STATE)
3620 {
3621 WARN(("vdmaVBVAPause failed %d\n", rc));
3622 return rc;
3623 }
3624
3625# ifdef DEBUG_misha
3626 WARN(("debug prep"));
3627# endif
3628
3629 PVGASTATE pVGAState = pVdma->pVGAState;
3630 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3631 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof (*pCmd));
3632 Assert(pCmd);
3633 if (pCmd)
3634 {
3635 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3636 AssertRC(rc);
3637 if (RT_SUCCESS(rc))
3638 {
3639 rc = vboxVDMACrCtlGetRc(pCmd);
3640 }
3641 vboxVDMACrCtlRelease(pCmd);
3642 return rc;
3643 }
3644 return VERR_NO_MEMORY;
3645#else
3646 RT_NOREF(pVdma);
3647 return VINF_SUCCESS;
3648#endif
3649}
3650
3651int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3652{
3653#ifdef VBOX_WITH_CRHGSMI
3654 int rc = vdmaVBVAResume(pVdma);
3655 if (RT_SUCCESS(rc))
3656 return VINF_SUCCESS;
3657
3658 if (rc != VERR_INVALID_STATE)
3659 {
3660 WARN(("vdmaVBVAResume failed %d\n", rc));
3661 return rc;
3662 }
3663
3664# ifdef DEBUG_misha
3665 WARN(("debug done"));
3666# endif
3667
3668 PVGASTATE pVGAState = pVdma->pVGAState;
3669 PVBOXVDMACMD_CHROMIUM_CTL pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(
3670 VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof (*pCmd));
3671 Assert(pCmd);
3672 if (pCmd)
3673 {
3674 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3675 AssertRC(rc);
3676 if (RT_SUCCESS(rc))
3677 {
3678 rc = vboxVDMACrCtlGetRc(pCmd);
3679 }
3680 vboxVDMACrCtlRelease(pCmd);
3681 return rc;
3682 }
3683 return VERR_NO_MEMORY;
3684#else
3685 RT_NOREF(pVdma);
3686 return VINF_SUCCESS;
3687#endif
3688}
3689
3690int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3691{
3692 int rc;
3693#ifndef VBOX_WITH_CRHGSMI
3694 RT_NOREF(pVdma, pSSM);
3695
3696#else
3697 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3698#endif
3699 {
3700 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3701 AssertRCReturn(rc, rc);
3702 return VINF_SUCCESS;
3703 }
3704
3705#ifdef VBOX_WITH_CRHGSMI
3706 PVGASTATE pVGAState = pVdma->pVGAState;
3707 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3708
3709 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3710 AssertRCReturn(rc, rc);
3711
3712 VBVAEXHOSTCTL HCtl;
3713 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3714 HCtl.u.state.pSSM = pSSM;
3715 HCtl.u.state.u32Version = 0;
3716 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3717#endif
3718}
3719
3720int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3721{
3722 uint32_t u32;
3723 int rc = SSMR3GetU32(pSSM, &u32);
3724 AssertLogRelRCReturn(rc, rc);
3725
3726 if (u32 != UINT32_MAX)
3727 {
3728#ifdef VBOX_WITH_CRHGSMI
3729 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3730 AssertLogRelRCReturn(rc, rc);
3731
3732 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3733
3734 VBVAEXHOSTCTL HCtl;
3735 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3736 HCtl.u.state.pSSM = pSSM;
3737 HCtl.u.state.u32Version = u32Version;
3738 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3739 AssertLogRelRCReturn(rc, rc);
3740
3741 rc = vdmaVBVAResume(pVdma);
3742 AssertLogRelRCReturn(rc, rc);
3743
3744 return VINF_SUCCESS;
3745#else
3746 RT_NOREF(pVdma, u32Version);
3747 WARN(("Unsupported VBVACtl info!\n"));
3748 return VERR_VERSION_MISMATCH;
3749#endif
3750 }
3751
3752 return VINF_SUCCESS;
3753}
3754
3755int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3756{
3757#ifdef VBOX_WITH_CRHGSMI
3758 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3759 return VINF_SUCCESS;
3760
3761/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3762 * the purpose of this code is. */
3763 VBVAEXHOSTCTL* pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3764 if (!pHCtl)
3765 {
3766 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3767 return VERR_NO_MEMORY;
3768 }
3769
3770 /* sanity */
3771 pHCtl->u.cmd.pu8Cmd = NULL;
3772 pHCtl->u.cmd.cbCmd = 0;
3773
3774 /* NULL completion will just free the ctl up */
3775 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3776 if (RT_FAILURE(rc))
3777 {
3778 Log(("vdmaVBVACtlSubmit failed rc %d\n", rc));
3779 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3780 return rc;
3781 }
3782#else
3783 RT_NOREF(pVdma);
3784#endif
3785 return VINF_SUCCESS;
3786}
3787
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette