VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/DevVGA_VDMA.cpp@ 70600

最後變更 在這個檔案從70600是 70600,由 vboxsync 提交於 7 年 前

DevVGA: cleanup in progress.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 128.3 KB
 
1/* $Id: DevVGA_VDMA.cpp 70600 2018-01-16 15:56:12Z vboxsync $ */
2/** @file
3 * Video DMA (VDMA) support.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEV_VGA
23#include <VBox/VMMDev.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/vmm/pgm.h>
26#include <VBoxVideo.h>
27#include <iprt/semaphore.h>
28#include <iprt/thread.h>
29#include <iprt/mem.h>
30#include <iprt/asm.h>
31#include <iprt/list.h>
32#include <iprt/param.h>
33
34#include "DevVGA.h"
35#include "HGSMI/SHGSMIHost.h"
36
37#include <VBoxVideo3D.h>
38#include <VBoxVideoHost3D.h>
39
40#ifdef DEBUG_misha
41# define VBOXVDBG_MEMCACHE_DISABLE
42#endif
43
44#ifndef VBOXVDBG_MEMCACHE_DISABLE
45# include <iprt/memcache.h>
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#ifdef DEBUG_misha
53# define WARN_BP() do { AssertFailed(); } while (0)
54#else
55# define WARN_BP() do { } while (0)
56#endif
57#define WARN(_msg) do { \
58 LogRel(_msg); \
59 WARN_BP(); \
60 } while (0)
61
62#define VBOXVDMATHREAD_STATE_TERMINATED 0
63#define VBOXVDMATHREAD_STATE_CREATING 1
64#define VBOXVDMATHREAD_STATE_CREATED 3
65#define VBOXVDMATHREAD_STATE_TERMINATING 4
66
67
68/*********************************************************************************************************************************
69* Structures and Typedefs *
70*********************************************************************************************************************************/
71struct VBOXVDMATHREAD;
72
73typedef DECLCALLBACKPTR(void, PFNVBOXVDMATHREAD_CHANGED)(struct VBOXVDMATHREAD *pThread, int rc, void *pvThreadContext, void *pvChangeContext);
74
75#ifdef VBOX_WITH_CRHGSMI
76static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd, VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb);
77#endif
78
79
80typedef struct VBOXVDMATHREAD
81{
82 RTTHREAD hWorkerThread;
83 RTSEMEVENT hEvent;
84 volatile uint32_t u32State;
85 PFNVBOXVDMATHREAD_CHANGED pfnChanged;
86 void *pvChanged;
87} VBOXVDMATHREAD, *PVBOXVDMATHREAD;
88
89
90/* state transformations:
91 *
92 * submitter | processor
93 *
94 * LISTENING ---> PROCESSING
95 *
96 * */
97#define VBVAEXHOSTCONTEXT_STATE_LISTENING 0
98#define VBVAEXHOSTCONTEXT_STATE_PROCESSING 1
99
100#define VBVAEXHOSTCONTEXT_ESTATE_DISABLED -1
101#define VBVAEXHOSTCONTEXT_ESTATE_PAUSED 0
102#define VBVAEXHOSTCONTEXT_ESTATE_ENABLED 1
103
104typedef struct VBVAEXHOSTCONTEXT
105{
106 VBVABUFFER *pVBVA;
107 volatile int32_t i32State;
108 volatile int32_t i32EnableState;
109 volatile uint32_t u32cCtls;
110 /* critical section for accessing ctl lists */
111 RTCRITSECT CltCritSect;
112 RTLISTANCHOR GuestCtlList;
113 RTLISTANCHOR HostCtlList;
114#ifndef VBOXVDBG_MEMCACHE_DISABLE
115 RTMEMCACHE CtlCache;
116#endif
117} VBVAEXHOSTCONTEXT;
118
119typedef enum
120{
121 VBVAEXHOSTCTL_TYPE_UNDEFINED = 0,
122 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE,
123 VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME,
124 VBVAEXHOSTCTL_TYPE_HH_SAVESTATE,
125 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE,
126 VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE,
127 VBVAEXHOSTCTL_TYPE_HH_BE_OPAQUE,
128 VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD,
129 VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
130 VBVAEXHOSTCTL_TYPE_GHH_ENABLE,
131 VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED,
132 VBVAEXHOSTCTL_TYPE_GHH_DISABLE,
133 VBVAEXHOSTCTL_TYPE_GHH_RESIZE
134} VBVAEXHOSTCTL_TYPE;
135
136struct VBVAEXHOSTCTL;
137
138typedef DECLCALLBACK(void) FNVBVAEXHOSTCTL_COMPLETE(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl, int rc, void *pvComplete);
139typedef FNVBVAEXHOSTCTL_COMPLETE *PFNVBVAEXHOSTCTL_COMPLETE;
140
141typedef struct VBVAEXHOSTCTL
142{
143 RTLISTNODE Node;
144 VBVAEXHOSTCTL_TYPE enmType;
145 union
146 {
147 struct
148 {
149 uint8_t * pu8Cmd;
150 uint32_t cbCmd;
151 } cmd;
152
153 struct
154 {
155 PSSMHANDLE pSSM;
156 uint32_t u32Version;
157 } state;
158 } u;
159 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete;
160 void *pvComplete;
161} VBVAEXHOSTCTL;
162
163/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
164 * but can be called with other VBoxVBVAExS** (submitter) functions except Init/Start/Term aparently.
165 * Can only be called be the processor, i.e. the entity that acquired the processor state by direct or indirect call to the VBoxVBVAExHSCheckCommands
166 * see mor edetailed comments in headers for function definitions */
167typedef enum
168{
169 VBVAEXHOST_DATA_TYPE_NO_DATA = 0,
170 VBVAEXHOST_DATA_TYPE_CMD,
171 VBVAEXHOST_DATA_TYPE_HOSTCTL,
172 VBVAEXHOST_DATA_TYPE_GUESTCTL
173} VBVAEXHOST_DATA_TYPE;
174
175
176#ifdef VBOX_WITH_CRHGSMI
177typedef struct VBOXVDMA_SOURCE
178{
179 VBVAINFOSCREEN Screen;
180 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
181} VBOXVDMA_SOURCE;
182#endif
183
184typedef struct VBOXVDMAHOST
185{
186 PHGSMIINSTANCE pHgsmi;
187 PVGASTATE pVGAState;
188#ifdef VBOX_WITH_CRHGSMI
189 VBVAEXHOSTCONTEXT CmdVbva;
190 VBOXVDMATHREAD Thread;
191 VBOXCRCMD_SVRINFO CrSrvInfo;
192 VBVAEXHOSTCTL* pCurRemainingHostCtl;
193 RTSEMEVENTMULTI HostCrCtlCompleteEvent;
194 int32_t volatile i32cHostCrCtlCompleted;
195 RTCRITSECT CalloutCritSect;
196// VBOXVDMA_SOURCE aSources[VBOX_VIDEO_MAX_SCREENS];
197#endif
198#ifdef VBOX_VDMA_WITH_WATCHDOG
199 PTMTIMERR3 WatchDogTimer;
200#endif
201} VBOXVDMAHOST, *PVBOXVDMAHOST;
202
203
204/**
205 * List selector for VBoxVBVAExHCtlSubmit(), vdmaVBVACtlSubmit().
206 */
207typedef enum
208{
209 VBVAEXHOSTCTL_SOURCE_GUEST = 0,
210 VBVAEXHOSTCTL_SOURCE_HOST
211} VBVAEXHOSTCTL_SOURCE;
212
213
214/*********************************************************************************************************************************
215* Internal Functions *
216*********************************************************************************************************************************/
217#ifdef VBOX_WITH_CRHGSMI
218static int vdmaVBVANotifyDisable(PVGASTATE pVGAState);
219static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd);
220static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc);
221static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread);
222static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_BPB_TRANSFER *pTransfer, uint32_t cbBuffer);
223static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource);
224static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
225 int rc, void *pvContext);
226
227/* VBoxVBVAExHP**, i.e. processor functions, can NOT be called concurrently with each other,
228 * can be called concurrently with istelf as well as with other VBoxVBVAEx** functions except Init/Start/Term aparently */
229#endif /* VBOX_WITH_CRHGSMI */
230
231
232
233#ifdef VBOX_WITH_CRHGSMI
234
235/**
236 * Creates a host control command.
237 */
238static VBVAEXHOSTCTL *VBoxVBVAExHCtlCreate(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL_TYPE enmType)
239{
240# ifndef VBOXVDBG_MEMCACHE_DISABLE
241 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL*)RTMemCacheAlloc(pCmdVbva->CtlCache);
242# else
243 VBVAEXHOSTCTL *pCtl = (VBVAEXHOSTCTL*)RTMemAlloc(sizeof(VBVAEXHOSTCTL));
244# endif
245 if (pCtl)
246 {
247 RT_ZERO(*pCtl);
248 pCtl->enmType = enmType;
249 }
250 else
251 WARN(("VBoxVBVAExHCtlAlloc failed\n"));
252 return pCtl;
253}
254
255/**
256 * Destroys a host control command.
257 */
258static void VBoxVBVAExHCtlFree(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl)
259{
260# ifndef VBOXVDBG_MEMCACHE_DISABLE
261 RTMemCacheFree(pCmdVbva->CtlCache, pCtl);
262# else
263 RTMemFree(pCtl);
264# endif
265}
266
267
268
269/**
270 * Works the VBVA state.
271 */
272static int vboxVBVAExHSProcessorAcquire(struct VBVAEXHOSTCONTEXT *pCmdVbva)
273{
274 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
275
276 if (ASMAtomicCmpXchgS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_PROCESSING, VBVAEXHOSTCONTEXT_STATE_LISTENING))
277 return VINF_SUCCESS;
278 return VERR_SEM_BUSY;
279}
280
281/**
282 * Worker for vboxVBVAExHPDataGet() and VBoxVBVAExHPCheckHostCtlOnDisable() that
283 * gets the next control command.
284 *
285 * @returns Pointer to command if found, NULL if not.
286 * @param pCmdVbva The VBVA command context.
287 * @param pfHostCtl Where to indicate whether it's a host or guest
288 * control command.
289 * @param fHostOnlyMode Whether to only fetch host commands, or both.
290 */
291static VBVAEXHOSTCTL *vboxVBVAExHPCheckCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, bool *pfHostCtl, bool fHostOnlyMode)
292{
293 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
294
295 if (!fHostOnlyMode && !ASMAtomicUoReadU32(&pCmdVbva->u32cCtls))
296 return NULL;
297
298 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
299 if (RT_SUCCESS(rc))
300 {
301 VBVAEXHOSTCTL *pCtl = RTListGetFirst(&pCmdVbva->HostCtlList, VBVAEXHOSTCTL, Node);
302 if (pCtl)
303 *pfHostCtl = true;
304 else if (!fHostOnlyMode)
305 {
306 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
307 {
308 pCtl = RTListGetFirst(&pCmdVbva->GuestCtlList, VBVAEXHOSTCTL, Node);
309 /* pCtl can not be null here since pCmdVbva->u32cCtls is not null,
310 * and there are no HostCtl commands*/
311 Assert(pCtl);
312 *pfHostCtl = false;
313 }
314 }
315
316 if (pCtl)
317 {
318 RTListNodeRemove(&pCtl->Node);
319 ASMAtomicDecU32(&pCmdVbva->u32cCtls);
320 }
321
322 RTCritSectLeave(&pCmdVbva->CltCritSect);
323
324 return pCtl;
325 }
326 else
327 WARN(("RTCritSectEnter failed %Rrc\n", rc));
328
329 return NULL;
330}
331
332/**
333 * Worker for vboxVDMACrHgcmHandleEnableRemainingHostCommand().
334 */
335static VBVAEXHOSTCTL *VBoxVBVAExHPCheckHostCtlOnDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
336{
337 bool fHostCtl = false;
338 VBVAEXHOSTCTL *pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostCtl, true);
339 Assert(!pCtl || fHostCtl);
340 return pCtl;
341}
342
343/**
344 * Worker for vboxVBVAExHPCheckProcessCtlInternal() and
345 * vboxVDMACrGuestCtlProcess() / VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED.
346 */
347static int VBoxVBVAExHPPause(struct VBVAEXHOSTCONTEXT *pCmdVbva)
348{
349 if (pCmdVbva->i32EnableState < VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
350 {
351 WARN(("Invalid state\n"));
352 return VERR_INVALID_STATE;
353 }
354
355 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
356 return VINF_SUCCESS;
357}
358
359/**
360 * Works the VBVA state in response to VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME.
361 */
362static int VBoxVBVAExHPResume(struct VBVAEXHOSTCONTEXT *pCmdVbva)
363{
364 if (pCmdVbva->i32EnableState != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
365 {
366 WARN(("Invalid state\n"));
367 return VERR_INVALID_STATE;
368 }
369
370 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
371 return VINF_SUCCESS;
372}
373
374/**
375 * Worker for vboxVBVAExHPDataGet that processes PAUSE and RESUME requests.
376 *
377 * Unclear why these cannot be handled the normal way.
378 *
379 * @returns true if handled, false if not.
380 * @param pCmdVbva The VBVA context.
381 * @param pCtl The host control command.
382 */
383static bool vboxVBVAExHPCheckProcessCtlInternal(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl)
384{
385 switch (pCtl->enmType)
386 {
387 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE:
388 {
389 VBoxVBVAExHPPause(pCmdVbva);
390 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
391 return true;
392 }
393 case VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME:
394 {
395 VBoxVBVAExHPResume(pCmdVbva);
396 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, pCtl, VINF_SUCCESS);
397 return true;
398 }
399 default:
400 return false;
401 }
402}
403
404/**
405 * Works the VBVA state.
406 */
407static void vboxVBVAExHPProcessorRelease(struct VBVAEXHOSTCONTEXT *pCmdVbva)
408{
409 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
410
411 ASMAtomicWriteS32(&pCmdVbva->i32State, VBVAEXHOSTCONTEXT_STATE_LISTENING);
412}
413
414/**
415 * Works the VBVA state.
416 */
417static void vboxVBVAExHPHgEventSet(struct VBVAEXHOSTCONTEXT *pCmdVbva)
418{
419 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
420 if (pCmdVbva->pVBVA)
421 ASMAtomicOrU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, VBVA_F_STATE_PROCESSING);
422}
423
424/**
425 * Works the VBVA state.
426 */
427static void vboxVBVAExHPHgEventClear(struct VBVAEXHOSTCONTEXT *pCmdVbva)
428{
429 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
430 if (pCmdVbva->pVBVA)
431 ASMAtomicAndU32(&pCmdVbva->pVBVA->hostFlags.u32HostEvents, ~VBVA_F_STATE_PROCESSING);
432}
433
434/**
435 * Worker for vboxVBVAExHPDataGet.
436 * @thread VDMA
437 * @todo r=bird: revalidate this code.
438 */
439static int vboxVBVAExHPCmdGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
440{
441 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
442 Assert(pCmdVbva->i32EnableState > VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
443
444 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
445
446 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
447 uint32_t indexRecordFree = pVBVA->indexRecordFree;
448
449 Log(("first = %d, free = %d\n", indexRecordFirst, indexRecordFree));
450
451 if (indexRecordFirst == indexRecordFree)
452 {
453 /* No records to process. Return without assigning output variables. */
454 return VINF_EOF;
455 }
456
457 uint32_t cbRecordCurrent = ASMAtomicReadU32(&pVBVA->aRecords[indexRecordFirst].cbRecord);
458
459 /* A new record need to be processed. */
460 if (cbRecordCurrent & VBVA_F_RECORD_PARTIAL)
461 {
462 /* the record is being recorded, try again */
463 return VINF_TRY_AGAIN;
464 }
465
466 uint32_t cbRecord = cbRecordCurrent & ~VBVA_F_RECORD_PARTIAL;
467
468 if (!cbRecord)
469 {
470 /* the record is being recorded, try again */
471 return VINF_TRY_AGAIN;
472 }
473
474 /* we should not get partial commands here actually */
475 Assert(cbRecord);
476
477 /* The size of largest contiguous chunk in the ring biffer. */
478 uint32_t u32BytesTillBoundary = pVBVA->cbData - pVBVA->off32Data;
479
480 /* The pointer to data in the ring buffer. */
481 uint8_t *pSrc = &pVBVA->au8Data[pVBVA->off32Data];
482
483 /* Fetch or point the data. */
484 if (u32BytesTillBoundary >= cbRecord)
485 {
486 /* The command does not cross buffer boundary. Return address in the buffer. */
487 *ppCmd = pSrc;
488 *pcbCmd = cbRecord;
489 return VINF_SUCCESS;
490 }
491
492 LogRel(("CmdVbva: cross-bound writes unsupported\n"));
493 return VERR_INVALID_STATE;
494}
495
496static void VBoxVBVAExHPDataCompleteCmd(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint32_t cbCmd)
497{
498 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
499 pVBVA->off32Data = (pVBVA->off32Data + cbCmd) % pVBVA->cbData;
500
501 pVBVA->indexRecordFirst = (pVBVA->indexRecordFirst + 1) % RT_ELEMENTS(pVBVA->aRecords);
502}
503
504/**
505 * Control command completion routine used by many.
506 */
507static void VBoxVBVAExHPDataCompleteCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL *pCtl, int rc)
508{
509 if (pCtl->pfnComplete)
510 pCtl->pfnComplete(pCmdVbva, pCtl, rc, pCtl->pvComplete);
511 else
512 VBoxVBVAExHCtlFree(pCmdVbva, pCtl);
513}
514
515
516/**
517 * Worker for VBoxVBVAExHPDataGet.
518 * @thread VDMA
519 */
520static VBVAEXHOST_DATA_TYPE vboxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
521{
522 Assert(pCmdVbva->i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
523 VBVAEXHOSTCTL*pCtl;
524 bool fHostClt;
525
526 for (;;)
527 {
528 pCtl = vboxVBVAExHPCheckCtl(pCmdVbva, &fHostClt, false);
529 if (pCtl)
530 {
531 if (fHostClt)
532 {
533 if (!vboxVBVAExHPCheckProcessCtlInternal(pCmdVbva, pCtl))
534 {
535 *ppCmd = (uint8_t*)pCtl;
536 *pcbCmd = sizeof (*pCtl);
537 return VBVAEXHOST_DATA_TYPE_HOSTCTL;
538 }
539 continue; /* Processed by vboxVBVAExHPCheckProcessCtlInternal, get next. */
540 }
541 *ppCmd = (uint8_t*)pCtl;
542 *pcbCmd = sizeof (*pCtl);
543 return VBVAEXHOST_DATA_TYPE_GUESTCTL;
544 }
545
546 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) <= VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
547 return VBVAEXHOST_DATA_TYPE_NO_DATA;
548
549 int rc = vboxVBVAExHPCmdGet(pCmdVbva, ppCmd, pcbCmd);
550 switch (rc)
551 {
552 case VINF_SUCCESS:
553 return VBVAEXHOST_DATA_TYPE_CMD;
554 case VINF_EOF:
555 return VBVAEXHOST_DATA_TYPE_NO_DATA;
556 case VINF_TRY_AGAIN:
557 RTThreadSleep(1);
558 continue;
559 default:
560 /* this is something really unexpected, i.e. most likely guest has written something incorrect to the VBVA buffer */
561 WARN(("Warning: vboxVBVAExHCmdGet returned unexpected status %Rrc\n", rc));
562 return VBVAEXHOST_DATA_TYPE_NO_DATA;
563 }
564 }
565 /* not reached */
566}
567
568/**
569 * Called by vboxVDMAWorkerThread to get the next command to process.
570 * @thread VDMA
571 */
572static VBVAEXHOST_DATA_TYPE VBoxVBVAExHPDataGet(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t **ppCmd, uint32_t *pcbCmd)
573{
574 VBVAEXHOST_DATA_TYPE enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
575 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
576 {
577 vboxVBVAExHPHgEventClear(pCmdVbva);
578 vboxVBVAExHPProcessorRelease(pCmdVbva);
579
580 /*
581 * We need to prevent racing between us clearing the flag and command check/submission thread, i.e.
582 * 1. we check the queue -> and it is empty
583 * 2. submitter adds command to the queue
584 * 3. submitter checks the "processing" -> and it is true , thus it does not submit a notification
585 * 4. we clear the "processing" state
586 * 5. ->here we need to re-check the queue state to ensure we do not leak the notification of the above command
587 * 6. if the queue appears to be not-empty set the "processing" state back to "true"
588 */
589 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
590 if (RT_SUCCESS(rc))
591 {
592 /* we are the processor now */
593 enmType = vboxVBVAExHPDataGet(pCmdVbva, ppCmd, pcbCmd);
594 if (enmType == VBVAEXHOST_DATA_TYPE_NO_DATA)
595 {
596 vboxVBVAExHPProcessorRelease(pCmdVbva);
597 return VBVAEXHOST_DATA_TYPE_NO_DATA;
598 }
599
600 vboxVBVAExHPHgEventSet(pCmdVbva);
601 }
602 }
603
604 return enmType;
605}
606
607/**
608 * Checks for pending VBVA command or (internal) control command.
609 */
610DECLINLINE(bool) vboxVBVAExHSHasCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
611{
612 VBVABUFFER *pVBVA = pCmdVbva->pVBVA;
613 if (pVBVA)
614 {
615 uint32_t indexRecordFirst = pVBVA->indexRecordFirst;
616 uint32_t indexRecordFree = pVBVA->indexRecordFree;
617
618 if (indexRecordFirst != indexRecordFree)
619 return true;
620 }
621
622 return ASMAtomicReadU32(&pCmdVbva->u32cCtls) > 0;
623}
624
625/** Checks whether the new commands are ready for processing
626 * @returns
627 * VINF_SUCCESS - there are commands are in a queue, and the given thread is now the processor (i.e. typically it would delegate processing to a worker thread)
628 * VINF_EOF - no commands in a queue
629 * VINF_ALREADY_INITIALIZED - another thread already processing the commands
630 * VERR_INVALID_STATE - the VBVA is paused or pausing */
631static int VBoxVBVAExHSCheckCommands(struct VBVAEXHOSTCONTEXT *pCmdVbva)
632{
633 int rc = vboxVBVAExHSProcessorAcquire(pCmdVbva);
634 if (RT_SUCCESS(rc))
635 {
636 /* we are the processor now */
637 if (vboxVBVAExHSHasCommands(pCmdVbva))
638 {
639 vboxVBVAExHPHgEventSet(pCmdVbva);
640 return VINF_SUCCESS;
641 }
642
643 vboxVBVAExHPProcessorRelease(pCmdVbva);
644 return VINF_EOF;
645 }
646 if (rc == VERR_SEM_BUSY)
647 return VINF_ALREADY_INITIALIZED;
648 return VERR_INVALID_STATE;
649}
650
651/**
652 * Worker for vboxVDMAConstruct() that initializes the give VBVA host context.
653 */
654static int VBoxVBVAExHSInit(struct VBVAEXHOSTCONTEXT *pCmdVbva)
655{
656 RT_ZERO(*pCmdVbva);
657 int rc = RTCritSectInit(&pCmdVbva->CltCritSect);
658 if (RT_SUCCESS(rc))
659 {
660# ifndef VBOXVDBG_MEMCACHE_DISABLE
661 rc = RTMemCacheCreate(&pCmdVbva->CtlCache, sizeof (VBVAEXHOSTCTL),
662 0, /* size_t cbAlignment */
663 UINT32_MAX, /* uint32_t cMaxObjects */
664 NULL, /* PFNMEMCACHECTOR pfnCtor*/
665 NULL, /* PFNMEMCACHEDTOR pfnDtor*/
666 NULL, /* void *pvUser*/
667 0 /* uint32_t fFlags*/
668 );
669 if (RT_SUCCESS(rc))
670# endif
671 {
672 RTListInit(&pCmdVbva->GuestCtlList);
673 RTListInit(&pCmdVbva->HostCtlList);
674 pCmdVbva->i32State = VBVAEXHOSTCONTEXT_STATE_PROCESSING;
675 pCmdVbva->i32EnableState = VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
676 return VINF_SUCCESS;
677 }
678# ifndef VBOXVDBG_MEMCACHE_DISABLE
679 WARN(("RTMemCacheCreate failed %Rrc\n", rc));
680# endif
681 }
682 else
683 WARN(("RTCritSectInit failed %Rrc\n", rc));
684
685 return rc;
686}
687
688/**
689 * Checks if VBVA state is some form of enabled.
690 */
691DECLINLINE(bool) VBoxVBVAExHSIsEnabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
692{
693 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) >= VBVAEXHOSTCONTEXT_ESTATE_PAUSED;
694}
695
696/**
697 * Checks if VBVA state is disabled.
698 */
699DECLINLINE(bool) VBoxVBVAExHSIsDisabled(struct VBVAEXHOSTCONTEXT *pCmdVbva)
700{
701 return ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) == VBVAEXHOSTCONTEXT_ESTATE_DISABLED;
702}
703
704/**
705 * Worker for vdmaVBVAEnableProcess().
706 *
707 * @thread VDMA
708 */
709static int VBoxVBVAExHSEnable(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVABUFFER *pVBVA)
710{
711 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
712 {
713 WARN(("VBVAEx is enabled already\n"));
714 return VERR_INVALID_STATE;
715 }
716
717 pCmdVbva->pVBVA = pVBVA;
718 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
719 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_ENABLED);
720 return VINF_SUCCESS;
721}
722
723/**
724 * Works the enable state.
725 * @thread VDMA, CR, EMT, ...
726 */
727static int VBoxVBVAExHSDisable(struct VBVAEXHOSTCONTEXT *pCmdVbva)
728{
729 if (VBoxVBVAExHSIsDisabled(pCmdVbva))
730 return VINF_SUCCESS;
731
732 ASMAtomicWriteS32(&pCmdVbva->i32EnableState, VBVAEXHOSTCONTEXT_ESTATE_DISABLED);
733 return VINF_SUCCESS;
734}
735
736/**
737 * Worker for vboxVDMADestruct() and vboxVDMAConstruct().
738 */
739static void VBoxVBVAExHSTerm(struct VBVAEXHOSTCONTEXT *pCmdVbva)
740{
741 /* ensure the processor is stopped */
742 Assert(pCmdVbva->i32State >= VBVAEXHOSTCONTEXT_STATE_LISTENING);
743
744 /* ensure no one tries to submit the command */
745 if (pCmdVbva->pVBVA)
746 pCmdVbva->pVBVA->hostFlags.u32HostEvents = 0;
747
748 Assert(RTListIsEmpty(&pCmdVbva->GuestCtlList));
749 Assert(RTListIsEmpty(&pCmdVbva->HostCtlList));
750
751 RTCritSectDelete(&pCmdVbva->CltCritSect);
752
753# ifndef VBOXVDBG_MEMCACHE_DISABLE
754 RTMemCacheDestroy(pCmdVbva->CtlCache);
755# endif
756
757 RT_ZERO(*pCmdVbva);
758}
759
760
761/**
762 * Worker for vboxVBVAExHSSaveStateLocked().
763 * @thread VDMA
764 */
765static int vboxVBVAExHSSaveGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
766{
767 RT_NOREF(pCmdVbva);
768 int rc = SSMR3PutU32(pSSM, pCtl->enmType);
769 AssertRCReturn(rc, rc);
770 rc = SSMR3PutU32(pSSM, pCtl->u.cmd.cbCmd);
771 AssertRCReturn(rc, rc);
772 rc = SSMR3PutU32(pSSM, (uint32_t)(pCtl->u.cmd.pu8Cmd - pu8VramBase));
773 AssertRCReturn(rc, rc);
774
775 return VINF_SUCCESS;
776}
777
778/**
779 * Worker for VBoxVBVAExHSSaveState().
780 * @thread VDMA
781 */
782static int vboxVBVAExHSSaveStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
783{
784 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
785 {
786 WARN(("vbva not paused\n"));
787 return VERR_INVALID_STATE;
788 }
789
790 int rc;
791 VBVAEXHOSTCTL* pCtl;
792 RTListForEach(&pCmdVbva->GuestCtlList, pCtl, VBVAEXHOSTCTL, Node)
793 {
794 rc = vboxVBVAExHSSaveGuestCtl(pCmdVbva, pCtl, pu8VramBase, pSSM);
795 AssertRCReturn(rc, rc);
796 }
797
798 rc = SSMR3PutU32(pSSM, 0);
799 AssertRCReturn(rc, rc);
800
801 return VINF_SUCCESS;
802}
803
804/**
805 * Handles VBVAEXHOSTCTL_TYPE_HH_SAVESTATE for vboxVDMACrHostCtlProcess, saving
806 * state on the VDMA thread.
807 *
808 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
809 * @thread VDMA
810 */
811static int VBoxVBVAExHSSaveState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM)
812{
813 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
814 AssertRCReturn(rc, rc);
815
816 rc = vboxVBVAExHSSaveStateLocked(pCmdVbva, pu8VramBase, pSSM);
817 if (RT_FAILURE(rc))
818 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
819
820 RTCritSectLeave(&pCmdVbva->CltCritSect);
821 return rc;
822}
823
824
825/**
826 * Worker for vboxVBVAExHSLoadStateLocked.
827 * @retval VINF_EOF if end stuff to load.
828 * @thread VDMA
829 */
830static int vboxVBVAExHSLoadGuestCtl(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
831{
832 RT_NOREF(u32Version);
833 uint32_t u32;
834 int rc = SSMR3GetU32(pSSM, &u32);
835 AssertLogRelRCReturn(rc, rc);
836
837 if (!u32)
838 return VINF_EOF;
839
840 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(pCmdVbva, (VBVAEXHOSTCTL_TYPE)u32);
841 if (!pHCtl)
842 {
843 WARN(("VBoxVBVAExHCtlCreate failed\n"));
844 return VERR_NO_MEMORY;
845 }
846
847 rc = SSMR3GetU32(pSSM, &u32);
848 AssertLogRelRCReturn(rc, rc);
849 pHCtl->u.cmd.cbCmd = u32;
850
851 rc = SSMR3GetU32(pSSM, &u32);
852 AssertLogRelRCReturn(rc, rc);
853 pHCtl->u.cmd.pu8Cmd = pu8VramBase + u32;
854
855 RTListAppend(&pCmdVbva->GuestCtlList, &pHCtl->Node);
856 ++pCmdVbva->u32cCtls;
857
858 return VINF_SUCCESS;
859}
860
861/**
862 * Worker for VBoxVBVAExHSLoadState.
863 * @thread VDMA
864 */
865static int vboxVBVAExHSLoadStateLocked(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
866{
867 if (ASMAtomicUoReadS32(&pCmdVbva->i32EnableState) != VBVAEXHOSTCONTEXT_ESTATE_PAUSED)
868 {
869 WARN(("vbva not stopped\n"));
870 return VERR_INVALID_STATE;
871 }
872
873 int rc;
874 do
875 {
876 rc = vboxVBVAExHSLoadGuestCtl(pCmdVbva, pu8VramBase, pSSM, u32Version);
877 AssertLogRelRCReturn(rc, rc);
878 } while (rc != VINF_EOF);
879
880 return VINF_SUCCESS;
881}
882
883/**
884 * Handles VBVAEXHOSTCTL_TYPE_HH_LOADSTATE for vboxVDMACrHostCtlProcess(),
885 * loading state on the VDMA thread.
886 *
887 * @returns - same as VBoxVBVAExHSCheckCommands, or failure on load state fail
888 * @thread VDMA
889 */
890static int VBoxVBVAExHSLoadState(struct VBVAEXHOSTCONTEXT *pCmdVbva, uint8_t* pu8VramBase, PSSMHANDLE pSSM, uint32_t u32Version)
891{
892 Assert(VGA_SAVEDSTATE_VERSION_3D <= u32Version);
893 int rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
894 AssertRCReturn(rc, rc);
895
896 rc = vboxVBVAExHSLoadStateLocked(pCmdVbva, pu8VramBase, pSSM, u32Version);
897 if (RT_FAILURE(rc))
898 WARN(("vboxVBVAExHSSaveStateLocked failed %Rrc\n", rc));
899
900 RTCritSectLeave(&pCmdVbva->CltCritSect);
901 return rc;
902}
903
904
905
906/**
907 * Queues a control command to the VDMA worker thread.
908 *
909 * The @a enmSource argument decides which list (guest/host) it's queued on.
910 *
911 */
912static int VBoxVBVAExHCtlSubmit(VBVAEXHOSTCONTEXT *pCmdVbva, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
913 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
914{
915 int rc;
916 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
917 {
918 pCtl->pfnComplete = pfnComplete;
919 pCtl->pvComplete = pvComplete;
920
921 rc = RTCritSectEnter(&pCmdVbva->CltCritSect);
922 if (RT_SUCCESS(rc))
923 {
924 /* Recheck that we're enabled after we've got the lock. */
925 if (VBoxVBVAExHSIsEnabled(pCmdVbva))
926 {
927 /* Queue it. */
928 if (enmSource > VBVAEXHOSTCTL_SOURCE_GUEST)
929 RTListAppend(&pCmdVbva->HostCtlList, &pCtl->Node);
930 else
931 RTListAppend(&pCmdVbva->GuestCtlList, &pCtl->Node);
932 ASMAtomicIncU32(&pCmdVbva->u32cCtls);
933
934 RTCritSectLeave(&pCmdVbva->CltCritSect);
935
936 /* Work the state or something. */
937 rc = VBoxVBVAExHSCheckCommands(pCmdVbva);
938 }
939 else
940 {
941 RTCritSectLeave(&pCmdVbva->CltCritSect);
942 Log(("cmd vbva not enabled (race)\n"));
943 rc = VERR_INVALID_STATE;
944 }
945 }
946 else
947 AssertRC(rc);
948 }
949 else
950 {
951 Log(("cmd vbva not enabled\n"));
952 rc = VERR_INVALID_STATE;
953 }
954 return rc;
955}
956
957/**
958 * Submits the control command and notifies the VDMA thread.
959 */
960static int vdmaVBVACtlSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL *pCtl, VBVAEXHOSTCTL_SOURCE enmSource,
961 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
962{
963 int rc = VBoxVBVAExHCtlSubmit(&pVdma->CmdVbva, pCtl, enmSource, pfnComplete, pvComplete);
964 if (RT_SUCCESS(rc))
965 {
966 if (rc == VINF_SUCCESS)
967 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
968 Assert(rc == VINF_ALREADY_INITIALIZED);
969 }
970 else
971 Log(("VBoxVBVAExHCtlSubmit failed %Rrc\n", rc));
972
973 return rc;
974}
975
976
977/**
978 * Call VDMA thread creation notification callback.
979 */
980void VBoxVDMAThreadNotifyConstructSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
981{
982 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_CREATING);
983 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
984 void *pvChanged = pThread->pvChanged;
985
986 pThread->pfnChanged = NULL;
987 pThread->pvChanged = NULL;
988
989 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_CREATED);
990
991 if (pfnChanged)
992 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
993}
994
995/**
996 * Call VDMA thread termination notification callback.
997 */
998void VBoxVDMAThreadNotifyTerminatingSucceeded(PVBOXVDMATHREAD pThread, void *pvThreadContext)
999{
1000 Assert(pThread->u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1001 PFNVBOXVDMATHREAD_CHANGED pfnChanged = pThread->pfnChanged;
1002 void *pvChanged = pThread->pvChanged;
1003
1004 pThread->pfnChanged = NULL;
1005 pThread->pvChanged = NULL;
1006
1007 if (pfnChanged)
1008 pfnChanged(pThread, VINF_SUCCESS, pvThreadContext, pvChanged);
1009}
1010
1011/**
1012 * Check if VDMA thread is terminating.
1013 */
1014DECLINLINE(bool) VBoxVDMAThreadIsTerminating(PVBOXVDMATHREAD pThread)
1015{
1016 return ASMAtomicUoReadU32(&pThread->u32State) == VBOXVDMATHREAD_STATE_TERMINATING;
1017}
1018
1019/**
1020 * Init VDMA thread.
1021 */
1022void VBoxVDMAThreadInit(PVBOXVDMATHREAD pThread)
1023{
1024 RT_ZERO(*pThread);
1025 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1026}
1027
1028/**
1029 * Clean up VDMA thread.
1030 */
1031int VBoxVDMAThreadCleanup(PVBOXVDMATHREAD pThread)
1032{
1033 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1034 switch (u32State)
1035 {
1036 case VBOXVDMATHREAD_STATE_TERMINATED:
1037 return VINF_SUCCESS;
1038
1039 case VBOXVDMATHREAD_STATE_TERMINATING:
1040 {
1041 int rc = RTThreadWait(pThread->hWorkerThread, RT_INDEFINITE_WAIT, NULL);
1042 if (RT_SUCCESS(rc))
1043 {
1044 RTSemEventDestroy(pThread->hEvent);
1045 pThread->hEvent = NIL_RTSEMEVENT;
1046 pThread->hWorkerThread = NIL_RTTHREAD;
1047 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATED);
1048 }
1049 else
1050 WARN(("RTThreadWait failed %Rrc\n", rc));
1051 return rc;
1052 }
1053
1054 default:
1055 WARN(("invalid state"));
1056 return VERR_INVALID_STATE;
1057 }
1058}
1059
1060/**
1061 * Start VDMA thread.
1062 */
1063int VBoxVDMAThreadCreate(PVBOXVDMATHREAD pThread, PFNRTTHREAD pfnThread, void *pvThread,
1064 PFNVBOXVDMATHREAD_CHANGED pfnCreated, void *pvCreated)
1065{
1066 int rc = VBoxVDMAThreadCleanup(pThread);
1067 if (RT_SUCCESS(rc))
1068 {
1069 rc = RTSemEventCreate(&pThread->hEvent);
1070 pThread->u32State = VBOXVDMATHREAD_STATE_CREATING;
1071 pThread->pfnChanged = pfnCreated;
1072 pThread->pvChanged = pvCreated;
1073 rc = RTThreadCreate(&pThread->hWorkerThread, pfnThread, pvThread, 0, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "VDMA");
1074 if (RT_SUCCESS(rc))
1075 return VINF_SUCCESS;
1076
1077 WARN(("RTThreadCreate failed %Rrc\n", rc));
1078 RTSemEventDestroy(pThread->hEvent);
1079 pThread->hEvent = NIL_RTSEMEVENT;
1080 pThread->hWorkerThread = NIL_RTTHREAD;
1081 pThread->u32State = VBOXVDMATHREAD_STATE_TERMINATED;
1082 }
1083 else
1084 WARN(("VBoxVDMAThreadCleanup failed %Rrc\n", rc));
1085 return rc;
1086}
1087
1088/**
1089 * Notifies the VDMA thread.
1090 * @thread !VDMA
1091 */
1092static int VBoxVDMAThreadEventNotify(PVBOXVDMATHREAD pThread)
1093{
1094 int rc = RTSemEventSignal(pThread->hEvent);
1095 AssertRC(rc);
1096 return rc;
1097}
1098
1099/**
1100 * State worker for VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD &
1101 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrHostCtlProcess(), and
1102 * VBVAEXHOSTCTL_TYPE_GHH_DISABLE in vboxVDMACrGuestCtlProcess().
1103 *
1104 * @thread VDMA
1105 */
1106static int VBoxVDMAThreadTerm(PVBOXVDMATHREAD pThread, PFNVBOXVDMATHREAD_CHANGED pfnTerminated, void *pvTerminated, bool fNotify)
1107{
1108 for (;;)
1109 {
1110 uint32_t u32State = ASMAtomicUoReadU32(&pThread->u32State);
1111 switch (u32State)
1112 {
1113 case VBOXVDMATHREAD_STATE_CREATED:
1114 pThread->pfnChanged = pfnTerminated;
1115 pThread->pvChanged = pvTerminated;
1116 ASMAtomicWriteU32(&pThread->u32State, VBOXVDMATHREAD_STATE_TERMINATING);
1117 if (fNotify)
1118 {
1119 int rc = VBoxVDMAThreadEventNotify(pThread);
1120 AssertRC(rc);
1121 }
1122 return VINF_SUCCESS;
1123
1124 case VBOXVDMATHREAD_STATE_TERMINATING:
1125 case VBOXVDMATHREAD_STATE_TERMINATED:
1126 WARN(("thread is marked to termination or terminated\nn"));
1127 return VERR_INVALID_STATE;
1128
1129 case VBOXVDMATHREAD_STATE_CREATING:
1130 /* wait till the thread creation is completed */
1131 WARN(("concurrent thread create/destron\n"));
1132 RTThreadYield();
1133 continue;
1134
1135 default:
1136 WARN(("invalid state"));
1137 return VERR_INVALID_STATE;
1138 }
1139 }
1140}
1141
1142
1143
1144/*
1145 *
1146 *
1147 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1148 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1149 * vboxVDMACrCtlPost / vboxVDMACrCtlPostAsync
1150 *
1151 *
1152 */
1153
1154/** Completion callback for vboxVDMACrCtlPostAsync(). */
1155typedef DECLCALLBACK(void) FNVBOXVDMACRCTL_CALLBACK(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void* pvContext);
1156/** Pointer to a vboxVDMACrCtlPostAsync completion callback. */
1157typedef FNVBOXVDMACRCTL_CALLBACK *PFNVBOXVDMACRCTL_CALLBACK;
1158
1159/**
1160 * Private wrapper around VBOXVDMACMD_CHROMIUM_CTL.
1161 */
1162typedef struct VBOXVDMACMD_CHROMIUM_CTL_PRIVATE
1163{
1164 uint32_t uMagic; /**< VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC */
1165 uint32_t cRefs;
1166 int32_t volatile rc;
1167 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion;
1168 void *pvCompletion;
1169 RTSEMEVENT hEvtDone;
1170 VBOXVDMACMD_CHROMIUM_CTL Cmd;
1171} VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, *PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE;
1172/** Magic number for VBOXVDMACMD_CHROMIUM_CTL_PRIVATE (Michael Wolff). */
1173# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC UINT32_C(0x19530827)
1174
1175/** Converts from a VBOXVDMACMD_CHROMIUM_CTL::Cmd pointer to a pointer to the
1176 * containing structure. */
1177# define VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(_p) RT_FROM_MEMBER(pCmd, VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd)
1178
1179/**
1180 * Creates a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1181 */
1182static PVBOXVDMACMD_CHROMIUM_CTL vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE enmCmd, uint32_t cbCmd)
1183{
1184 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr;
1185 pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)RTMemAllocZ(cbCmd + RT_OFFSETOF(VBOXVDMACMD_CHROMIUM_CTL_PRIVATE, Cmd));
1186 if (pHdr)
1187 {
1188 pHdr->uMagic = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1189 pHdr->cRefs = 1;
1190 pHdr->rc = VERR_NOT_IMPLEMENTED;
1191 pHdr->hEvtDone = NIL_RTSEMEVENT;
1192 pHdr->Cmd.enmType = enmCmd;
1193 pHdr->Cmd.cbCmd = cbCmd;
1194 return &pHdr->Cmd;
1195 }
1196 return NULL;
1197}
1198
1199/**
1200 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1201 */
1202DECLINLINE(void) vboxVDMACrCtlRelease(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1203{
1204 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1205 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1206
1207 uint32_t cRefs = ASMAtomicDecU32(&pHdr->cRefs);
1208 if (!cRefs)
1209 {
1210 pHdr->uMagic = ~VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC;
1211 if (pHdr->hEvtDone != NIL_RTSEMEVENT)
1212 {
1213 RTSemEventDestroy(pHdr->hEvtDone);
1214 pHdr->hEvtDone = NIL_RTSEMEVENT;
1215 }
1216 RTMemFree(pHdr);
1217 }
1218}
1219
1220/**
1221 * Releases a reference to a VBOXVDMACMD_CHROMIUM_CTL_PRIVATE instance.
1222 */
1223DECLINLINE(void) vboxVDMACrCtlRetain(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1224{
1225 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1226 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1227
1228 uint32_t cRefs = ASMAtomicIncU32(&pHdr->cRefs);
1229 Assert(cRefs > 1);
1230 Assert(cRefs < _1K);
1231 RT_NOREF_PV(cRefs);
1232}
1233
1234/**
1235 * Gets the result from our private chromium control command.
1236 *
1237 * @returns status code.
1238 * @param pCmd The command.
1239 */
1240DECLINLINE(int) vboxVDMACrCtlGetRc(PVBOXVDMACMD_CHROMIUM_CTL pCmd)
1241{
1242 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1243 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1244 return pHdr->rc;
1245}
1246
1247/**
1248 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync,
1249 * Some indirect completion magic, you gotta love this code! }
1250 */
1251DECLCALLBACK(int) vboxVDMACrHgsmiControlCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CTL pCmd, int rc)
1252{
1253 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
1254 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1255 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1256
1257 pHdr->rc = rc;
1258 if (pHdr->pfnCompletion)
1259 pHdr->pfnCompletion(pVGAState, pCmd, pHdr->pvCompletion);
1260 return VINF_SUCCESS;
1261}
1262
1263/**
1264 * @callback_method_impl{FNCRCTLCOMPLETION,
1265 * Completion callback for vboxVDMACrCtlPost. }
1266 */
1267static DECLCALLBACK(void) vboxVDMACrCtlCbSetEvent(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, void *pvContext)
1268{
1269 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = (PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE)pvContext;
1270 Assert(pHdr == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd));
1271 Assert(pHdr->uMagic == VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_MAGIC);
1272 RT_NOREF(pVGAState, pCmd);
1273
1274 int rc = RTSemEventSignal(pHdr->hEvtDone);
1275 AssertRC(rc);
1276
1277 vboxVDMACrCtlRelease(&pHdr->Cmd);
1278}
1279
1280/**
1281 * Worker for vboxVDMACrCtlPost().
1282 */
1283static int vboxVDMACrCtlPostAsync(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd,
1284 PFNVBOXVDMACRCTL_CALLBACK pfnCompletion, void *pvCompletion)
1285{
1286 if ( pVGAState->pDrv
1287 && pVGAState->pDrv->pfnCrHgsmiControlProcess)
1288 {
1289 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1290 pHdr->pfnCompletion = pfnCompletion;
1291 pHdr->pvCompletion = pvCompletion;
1292 pVGAState->pDrv->pfnCrHgsmiControlProcess(pVGAState->pDrv, pCmd, cbCmd);
1293 return VINF_SUCCESS;
1294 }
1295 return VERR_NOT_SUPPORTED;
1296}
1297
1298/**
1299 * Posts stuff and waits.
1300 */
1301static int vboxVDMACrCtlPost(PVGASTATE pVGAState, PVBOXVDMACMD_CHROMIUM_CTL pCmd, uint32_t cbCmd)
1302{
1303 PVBOXVDMACMD_CHROMIUM_CTL_PRIVATE pHdr = VBOXVDMACMD_CHROMIUM_CTL_PRIVATE_FROM_CTL(pCmd);
1304
1305 /* Allocate the semaphore. */
1306 Assert(pHdr->hEvtDone == NIL_RTSEMEVENT);
1307 int rc = RTSemEventCreate(&pHdr->hEvtDone);
1308 AssertRCReturn(rc, rc);
1309
1310 /* Grab a reference for the completion routine. */
1311 vboxVDMACrCtlRetain(&pHdr->Cmd);
1312
1313 /* Submit and wait for it. */
1314 rc = vboxVDMACrCtlPostAsync(pVGAState, pCmd, cbCmd, vboxVDMACrCtlCbSetEvent, pHdr);
1315 AssertRC(rc);
1316 if (RT_SUCCESS(rc))
1317 rc = RTSemEventWaitNoResume(pHdr->hEvtDone, RT_INDEFINITE_WAIT);
1318 else
1319 vboxVDMACrCtlRelease(pCmd);
1320 return rc;
1321}
1322
1323
1324/**
1325 * Structure for passing data between vboxVDMACrHgcmSubmitSync() and the
1326 * completion routine vboxVDMACrHgcmSubmitSyncCompletion().
1327 */
1328typedef struct VDMA_VBVA_CTL_CYNC_COMPLETION
1329{
1330 int volatile rc;
1331 RTSEMEVENT hEvent;
1332} VDMA_VBVA_CTL_CYNC_COMPLETION;
1333
1334/**
1335 * @callback_method_impl{FNCRCTLCOMPLETION,
1336 * Completion callback for vboxVDMACrHgcmSubmitSync() that signals the
1337 * waiting thread.}
1338 */
1339static DECLCALLBACK(void) vboxVDMACrHgcmSubmitSyncCompletion(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
1340{
1341 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION*)pvCompletion;
1342 pData->rc = rc;
1343 rc = RTSemEventSignal(pData->hEvent);
1344 AssertLogRelRC(rc);
1345
1346 RT_NOREF(pCmd, cbCmd);
1347}
1348
1349/**
1350 * Worker for vboxVDMACrHgcmHandleEnable() and vdmaVBVAEnableProcess() that
1351 * works pVGAState->pDrv->pfnCrHgcmCtlSubmit.
1352 *
1353 * @thread VDMA
1354 */
1355static int vboxVDMACrHgcmSubmitSync(struct VBOXVDMAHOST *pVdma, VBOXCRCMDCTL* pCtl, uint32_t cbCtl)
1356{
1357 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
1358 Data.rc = VERR_NOT_IMPLEMENTED;
1359 int rc = RTSemEventCreate(&Data.hEvent);
1360 if (!RT_SUCCESS(rc))
1361 {
1362 WARN(("RTSemEventCreate failed %Rrc\n", rc));
1363 return rc;
1364 }
1365
1366 pCtl->CalloutList.List.pNext = NULL;
1367
1368 PVGASTATE pVGAState = pVdma->pVGAState;
1369 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCtl, cbCtl, vboxVDMACrHgcmSubmitSyncCompletion, &Data);
1370 if (RT_SUCCESS(rc))
1371 {
1372 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
1373 if (RT_SUCCESS(rc))
1374 {
1375 rc = Data.rc;
1376 if (!RT_SUCCESS(rc))
1377 {
1378 WARN(("pfnCrHgcmCtlSubmit command failed %Rrc\n", rc));
1379 }
1380
1381 }
1382 else
1383 WARN(("RTSemEventWait failed %Rrc\n", rc));
1384 }
1385 else
1386 WARN(("pfnCrHgcmCtlSubmit failed %Rrc\n", rc));
1387
1388
1389 RTSemEventDestroy(Data.hEvent);
1390
1391 return rc;
1392}
1393
1394
1395/**
1396 * Worker for vboxVDMAReset().
1397 */
1398static int vdmaVBVACtlDisableSync(PVBOXVDMAHOST pVdma)
1399{
1400 VBVAEXHOSTCTL HCtl;
1401 RT_ZERO(HCtl);
1402 HCtl.enmType = VBVAEXHOSTCTL_TYPE_GHH_DISABLE;
1403 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1404 if (RT_SUCCESS(rc))
1405 vgaUpdateDisplayAll(pVdma->pVGAState, /* fFailOnResize = */ false);
1406 else
1407 Log(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1408 return rc;
1409}
1410
1411
1412/**
1413 * @interface_method_impl{VBOXCRCMDCTL_HGCMENABLE_DATA,pfnRHCmd,
1414 * Used by vboxVDMACrHgcmNotifyTerminatingCb() and called by
1415 * crVBoxServerCrCmdDisablePostProcess() during crServerTearDown() to drain
1416 * command queues or something.}
1417 */
1418static DECLCALLBACK(uint8_t *)
1419vboxVDMACrHgcmHandleEnableRemainingHostCommand(HVBOXCRCMDCTL_REMAINING_HOST_COMMAND hClient, uint32_t *pcbCtl, int prevCmdRc)
1420{
1421 struct VBOXVDMAHOST *pVdma = hClient;
1422
1423 if (!pVdma->pCurRemainingHostCtl)
1424 VBoxVBVAExHSDisable(&pVdma->CmdVbva); /* disable VBVA, all subsequent host commands will go HGCM way */
1425 else
1426 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pVdma->pCurRemainingHostCtl, prevCmdRc);
1427
1428 pVdma->pCurRemainingHostCtl = VBoxVBVAExHPCheckHostCtlOnDisable(&pVdma->CmdVbva);
1429 if (pVdma->pCurRemainingHostCtl)
1430 {
1431 *pcbCtl = pVdma->pCurRemainingHostCtl->u.cmd.cbCmd;
1432 return pVdma->pCurRemainingHostCtl->u.cmd.pu8Cmd;
1433 }
1434
1435 *pcbCtl = 0;
1436 return NULL;
1437}
1438
1439/**
1440 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTermDone,
1441 * Called by crServerTearDown().}
1442 */
1443static DECLCALLBACK(void) vboxVDMACrHgcmNotifyTerminatingDoneCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient)
1444{
1445# ifdef VBOX_STRICT
1446 struct VBOXVDMAHOST *pVdma = hClient;
1447 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_STATE_PROCESSING);
1448 Assert(pVdma->Thread.u32State == VBOXVDMATHREAD_STATE_TERMINATING);
1449# else
1450 RT_NOREF(hClient);
1451# endif
1452}
1453
1454/**
1455 * @interface_method_impl{VBOXCRCMDCTL_HGCMDISABLE_DATA,pfnNotifyTerm,
1456 * Called by crServerTearDown().}
1457 */
1458static DECLCALLBACK(int) vboxVDMACrHgcmNotifyTerminatingCb(HVBOXCRCMDCTL_NOTIFY_TERMINATING hClient,
1459 VBOXCRCMDCTL_HGCMENABLE_DATA *pHgcmEnableData)
1460{
1461 struct VBOXVDMAHOST *pVdma = hClient;
1462
1463 VBVAEXHOSTCTL HCtl;
1464 RT_ZERO(HCtl);
1465 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD;
1466 int rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
1467
1468 pHgcmEnableData->hRHCmd = pVdma;
1469 pHgcmEnableData->pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1470
1471 if (rc == VERR_INVALID_STATE)
1472 rc = VINF_SUCCESS;
1473 else if (RT_FAILURE(rc))
1474 WARN(("vdmaVBVACtlSubmitSync failed %Rrc\n", rc));
1475
1476 return rc;
1477}
1478
1479/**
1480 * Worker for vdmaVBVAEnableProcess() and vdmaVBVADisableProcess().
1481 *
1482 * @thread VDMA
1483 */
1484static int vboxVDMACrHgcmHandleEnable(struct VBOXVDMAHOST *pVdma)
1485{
1486 VBOXCRCMDCTL_ENABLE Enable;
1487 RT_ZERO(Enable);
1488 Enable.Hdr.enmType = VBOXCRCMDCTL_TYPE_ENABLE;
1489 Enable.Data.hRHCmd = pVdma;
1490 Enable.Data.pfnRHCmd = vboxVDMACrHgcmHandleEnableRemainingHostCommand;
1491
1492 int rc = vboxVDMACrHgcmSubmitSync(pVdma, &Enable.Hdr, sizeof (Enable));
1493 Assert(!pVdma->pCurRemainingHostCtl);
1494 if (RT_SUCCESS(rc))
1495 {
1496 Assert(!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1497 return VINF_SUCCESS;
1498 }
1499
1500 Assert(VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva));
1501 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1502 return rc;
1503}
1504
1505/**
1506 * Handles VBVAEXHOSTCTL_TYPE_GHH_ENABLE and VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED
1507 * for vboxVDMACrGuestCtlProcess().
1508 *
1509 * @thread VDMA
1510 */
1511static int vdmaVBVAEnableProcess(struct VBOXVDMAHOST *pVdma, uint32_t u32Offset)
1512{
1513 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1514 {
1515 WARN(("vdma VBVA is already enabled\n"));
1516 return VERR_INVALID_STATE;
1517 }
1518
1519/** @todo r=bird: This needs a closer look! */
1520 VBVABUFFER *pVBVA = (VBVABUFFER *)HGSMIOffsetToPointerHost(pVdma->pHgsmi, u32Offset);
1521 if (!pVBVA)
1522 {
1523 WARN(("invalid offset %d (%#x)\n", u32Offset, u32Offset));
1524 return VERR_INVALID_PARAMETER;
1525 }
1526
1527 int rc = VBoxVBVAExHSEnable(&pVdma->CmdVbva, pVBVA);
1528 if (RT_SUCCESS(rc))
1529 {
1530 if (!pVdma->CrSrvInfo.pfnEnable)
1531 {
1532 /* "HGCM-less" mode. All inited. */
1533 return VINF_SUCCESS;
1534 }
1535
1536 VBOXCRCMDCTL_DISABLE Disable;
1537 Disable.Hdr.enmType = VBOXCRCMDCTL_TYPE_DISABLE;
1538 Disable.Data.hNotifyTerm = pVdma;
1539 Disable.Data.pfnNotifyTerm = vboxVDMACrHgcmNotifyTerminatingCb;
1540 Disable.Data.pfnNotifyTermDone = vboxVDMACrHgcmNotifyTerminatingDoneCb;
1541 rc = vboxVDMACrHgcmSubmitSync(pVdma, &Disable.Hdr, sizeof (Disable));
1542 if (RT_SUCCESS(rc))
1543 {
1544 PVGASTATE pVGAState = pVdma->pVGAState;
1545 VBOXCRCMD_SVRENABLE_INFO Info;
1546 Info.hCltScr = pVGAState->pDrv;
1547 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1548 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1549 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1550 rc = pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info);
1551 if (RT_SUCCESS(rc))
1552 return VINF_SUCCESS;
1553
1554 WARN(("pfnEnable failed %Rrc\n", rc));
1555 vboxVDMACrHgcmHandleEnable(pVdma);
1556 }
1557 else
1558 WARN(("vboxVDMACrHgcmSubmitSync failed %Rrc\n", rc));
1559
1560 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1561 }
1562 else
1563 WARN(("VBoxVBVAExHSEnable failed %Rrc\n", rc));
1564
1565 return rc;
1566}
1567
1568/**
1569 * Worker for several vboxVDMACrHostCtlProcess() commands.
1570 *
1571 * @returns IPRT status code.
1572 * @param pVdma The VDMA channel.
1573 * @param fDoHgcmEnable ???
1574 * @thread VDMA
1575 */
1576static int vdmaVBVADisableProcess(struct VBOXVDMAHOST *pVdma, bool fDoHgcmEnable)
1577{
1578 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1579 {
1580 Log(("vdma VBVA is already disabled\n"));
1581 return VINF_SUCCESS;
1582 }
1583
1584 if (!pVdma->CrSrvInfo.pfnDisable)
1585 {
1586 /* "HGCM-less" mode. Just undo what vdmaVBVAEnableProcess did. */
1587 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
1588 return VINF_SUCCESS;
1589 }
1590
1591 int rc = pVdma->CrSrvInfo.pfnDisable(pVdma->CrSrvInfo.hSvr);
1592 if (RT_SUCCESS(rc))
1593 {
1594 if (fDoHgcmEnable)
1595 {
1596 PVGASTATE pVGAState = pVdma->pVGAState;
1597
1598 /* disable is a bit tricky
1599 * we need to ensure the host ctl commands do not come out of order
1600 * and do not come over HGCM channel until after it is enabled */
1601 rc = vboxVDMACrHgcmHandleEnable(pVdma);
1602 if (RT_SUCCESS(rc))
1603 {
1604 vdmaVBVANotifyDisable(pVGAState);
1605 return VINF_SUCCESS;
1606 }
1607
1608 VBOXCRCMD_SVRENABLE_INFO Info;
1609 Info.hCltScr = pVGAState->pDrv;
1610 Info.pfnCltScrUpdateBegin = pVGAState->pDrv->pfnVBVAUpdateBegin;
1611 Info.pfnCltScrUpdateProcess = pVGAState->pDrv->pfnVBVAUpdateProcess;
1612 Info.pfnCltScrUpdateEnd = pVGAState->pDrv->pfnVBVAUpdateEnd;
1613 pVdma->CrSrvInfo.pfnEnable(pVdma->CrSrvInfo.hSvr, &Info); /** @todo ignoring return code */
1614 }
1615 }
1616 else
1617 WARN(("pfnDisable failed %Rrc\n", rc));
1618
1619 return rc;
1620}
1621
1622/**
1623 * Handles VBVAEXHOST_DATA_TYPE_HOSTCTL for vboxVDMAWorkerThread.
1624 *
1625 * @returns VBox status code.
1626 * @param pVdma The VDMA channel.
1627 * @param pCmd The control command to process. Should be
1628 * safe, i.e. not shared with guest.
1629 * @param pfContinue Where to return whether to continue or not.
1630 * @thread VDMA
1631 */
1632static int vboxVDMACrHostCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd, bool *pfContinue)
1633{
1634 *pfContinue = true;
1635
1636 int rc;
1637 switch (pCmd->enmType)
1638 {
1639 /*
1640 * See vdmaVBVACtlOpaqueHostSubmit() and its callers.
1641 */
1642 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1643 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1644 {
1645 if (pVdma->CrSrvInfo.pfnHostCtl)
1646 return pVdma->CrSrvInfo.pfnHostCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1647 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1648 }
1649 else
1650 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for HGCM-less mode\n"));
1651 return VERR_INVALID_STATE;
1652
1653 /*
1654 * See vdmaVBVACtlDisableSync().
1655 */
1656 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1657 rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
1658 if (RT_SUCCESS(rc))
1659 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */ );
1660 else
1661 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1662 return rc;
1663
1664 /*
1665 * See vboxVDMACrHgcmNotifyTerminatingCb().
1666 */
1667 case VBVAEXHOSTCTL_TYPE_HH_ON_HGCM_UNLOAD:
1668 rc = vdmaVBVADisableProcess(pVdma, false /* fDoHgcmEnable */);
1669 if (RT_SUCCESS(rc))
1670 {
1671 rc = VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, true /* fNotify */);
1672 if (RT_SUCCESS(rc))
1673 *pfContinue = false;
1674 else
1675 WARN(("VBoxVDMAThreadTerm failed %Rrc\n", rc));
1676 }
1677 else
1678 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1679 return rc;
1680
1681 /*
1682 * See vboxVDMASaveStateExecPerform().
1683 */
1684 case VBVAEXHOSTCTL_TYPE_HH_SAVESTATE:
1685 rc = VBoxVBVAExHSSaveState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM);
1686 if (RT_SUCCESS(rc))
1687 {
1688 VGA_SAVED_STATE_PUT_MARKER(pCmd->u.state.pSSM, 4);
1689 if (pVdma->CrSrvInfo.pfnSaveState)
1690 rc = pVdma->CrSrvInfo.pfnSaveState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM);
1691 }
1692 else
1693 WARN(("VBoxVBVAExHSSaveState failed %Rrc\n", rc));
1694 return rc;
1695
1696 /*
1697 * See vboxVDMASaveLoadExecPerform().
1698 */
1699 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE:
1700 rc = VBoxVBVAExHSLoadState(&pVdma->CmdVbva, pVdma->pVGAState->vram_ptrR3, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1701 if (RT_SUCCESS(rc))
1702 {
1703 VGA_SAVED_STATE_GET_MARKER_RETURN_ON_MISMATCH(pCmd->u.state.pSSM, pCmd->u.state.u32Version, 4);
1704 if (pVdma->CrSrvInfo.pfnLoadState)
1705 {
1706 rc = pVdma->CrSrvInfo.pfnLoadState(pVdma->CrSrvInfo.hSvr, pCmd->u.state.pSSM, pCmd->u.state.u32Version);
1707 if (RT_FAILURE(rc))
1708 WARN(("pfnLoadState failed %Rrc\n", rc));
1709 }
1710 }
1711 else
1712 WARN(("VBoxVBVAExHSLoadState failed %Rrc\n", rc));
1713 return rc;
1714
1715 /*
1716 * See vboxVDMASaveLoadDone().
1717 */
1718 case VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE:
1719 {
1720 PVGASTATE pVGAState = pVdma->pVGAState;
1721 for (uint32_t i = 0; i < pVGAState->cMonitors; ++i)
1722 {
1723 VBVAINFOSCREEN CurScreen;
1724 VBVAINFOVIEW CurView;
1725 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1726 AssertLogRelMsgRCReturn(rc, ("VBVAGetInfoViewAndScreen [screen #%u] -> %#x\n", i, rc), rc);
1727
1728 rc = VBVAInfoScreen(pVGAState, &CurScreen);
1729 AssertLogRelMsgRCReturn(rc, ("VBVAInfoScreen [screen #%u] -> %#x\n", i, rc), rc);
1730 }
1731
1732 return VINF_SUCCESS;
1733 }
1734
1735 default:
1736 WARN(("unexpected host ctl type %d\n", pCmd->enmType));
1737 return VERR_INVALID_PARAMETER;
1738 }
1739}
1740
1741/**
1742 * Worker for vboxVDMACrGuestCtlResizeEntryProcess.
1743 *
1744 * @returns VINF_SUCCESS or VERR_INVALID_PARAMETER.
1745 * @param pVGAState The VGA device state.
1746 * @param pScreen The screen info (safe copy).
1747 */
1748static int vboxVDMASetupScreenInfo(PVGASTATE pVGAState, VBVAINFOSCREEN *pScreen)
1749{
1750 const uint32_t idxView = pScreen->u32ViewIndex;
1751 const uint16_t fFlags = pScreen->u16Flags;
1752
1753 if (fFlags & VBVA_SCREEN_F_DISABLED)
1754 {
1755 if ( idxView < pVGAState->cMonitors
1756 || idxView == UINT32_C(0xFFFFFFFF))
1757 {
1758 RT_ZERO(*pScreen);
1759 pScreen->u32ViewIndex = idxView;
1760 pScreen->u16Flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
1761 return VINF_SUCCESS;
1762 }
1763 }
1764 else
1765 {
1766 if (fFlags & VBVA_SCREEN_F_BLANK2)
1767 {
1768 if ( idxView >= pVGAState->cMonitors
1769 && idxView != UINT32_C(0xFFFFFFFF))
1770 return VERR_INVALID_PARAMETER;
1771
1772 /* Special case for blanking using current video mode.
1773 * Only 'u16Flags' and 'u32ViewIndex' field are relevant.
1774 */
1775 RT_ZERO(*pScreen);
1776 pScreen->u32ViewIndex = idxView;
1777 pScreen->u16Flags = fFlags;
1778 return VINF_SUCCESS;
1779 }
1780
1781 if ( idxView < pVGAState->cMonitors
1782 && pScreen->u16BitsPerPixel <= 32
1783 && pScreen->u32Width <= UINT16_MAX
1784 && pScreen->u32Height <= UINT16_MAX
1785 && pScreen->u32LineSize <= UINT16_MAX * 4)
1786 {
1787 const uint32_t u32BytesPerPixel = (pScreen->u16BitsPerPixel + 7) / 8;
1788 if (pScreen->u32Width <= pScreen->u32LineSize / (u32BytesPerPixel? u32BytesPerPixel: 1))
1789 {
1790 const uint64_t u64ScreenSize = (uint64_t)pScreen->u32LineSize * pScreen->u32Height;
1791 if ( pScreen->u32StartOffset <= pVGAState->vram_size
1792 && u64ScreenSize <= pVGAState->vram_size
1793 && pScreen->u32StartOffset <= pVGAState->vram_size - (uint32_t)u64ScreenSize)
1794 return VINF_SUCCESS;
1795 }
1796 }
1797 }
1798
1799 LogFunc(("Failed\n"));
1800 return VERR_INVALID_PARAMETER;
1801}
1802
1803/**
1804 * Handles on entry in a VBVAEXHOSTCTL_TYPE_GHH_RESIZE command.
1805 *
1806 * @returns IPRT status code.
1807 * @param pVdma The VDMA channel
1808 * @param pEntry The entry to handle. Considered volatile.
1809 *
1810 * @thread VDMA
1811 */
1812static int vboxVDMACrGuestCtlResizeEntryProcess(struct VBOXVDMAHOST *pVdma, VBOXCMDVBVA_RESIZE_ENTRY *pEntry)
1813{
1814 PVGASTATE pVGAState = pVdma->pVGAState;
1815 VBVAINFOSCREEN Screen = pEntry->Screen;
1816
1817 /* Verify and cleanup local copy of the input data. */
1818 int rc = vboxVDMASetupScreenInfo(pVGAState, &Screen);
1819 if (RT_FAILURE(rc))
1820 {
1821 WARN(("invalid screen data\n"));
1822 return rc;
1823 }
1824
1825 VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
1826 memcpy(aTargetMap, pEntry->aTargetMap, sizeof(aTargetMap));
1827 ASMBitClearRange(aTargetMap, pVGAState->cMonitors, VBOX_VIDEO_MAX_SCREENS);
1828
1829 if (pVdma->CrSrvInfo.pfnResize)
1830 {
1831 /* Also inform the HGCM service, if it is there. */
1832 rc = pVdma->CrSrvInfo.pfnResize(pVdma->CrSrvInfo.hSvr, &Screen, aTargetMap);
1833 if (RT_FAILURE(rc))
1834 {
1835 WARN(("pfnResize failed %Rrc\n", rc));
1836 return rc;
1837 }
1838 }
1839
1840 /* A fake view which contains the current screen for the 2D VBVAInfoView. */
1841 VBVAINFOVIEW View;
1842 View.u32ViewOffset = 0;
1843 View.u32ViewSize = Screen.u32LineSize * Screen.u32Height + Screen.u32StartOffset;
1844 View.u32MaxScreenSize = Screen.u32LineSize * Screen.u32Height;
1845
1846 const bool fDisable = RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_DISABLED);
1847
1848 for (int i = ASMBitFirstSet(aTargetMap, pVGAState->cMonitors);
1849 i >= 0;
1850 i = ASMBitNextSet(aTargetMap, pVGAState->cMonitors, i))
1851 {
1852 Screen.u32ViewIndex = i;
1853
1854 VBVAINFOSCREEN CurScreen;
1855 VBVAINFOVIEW CurView;
1856
1857 rc = VBVAGetInfoViewAndScreen(pVGAState, i, &CurView, &CurScreen);
1858 AssertRC(rc);
1859
1860 if (!memcmp(&Screen, &CurScreen, sizeof (CurScreen)))
1861 continue;
1862
1863 /* The view does not change if _BLANK2 is set. */
1864 if ( (!fDisable || !CurView.u32ViewSize)
1865 && !RT_BOOL(Screen.u16Flags & VBVA_SCREEN_F_BLANK2))
1866 {
1867 View.u32ViewIndex = Screen.u32ViewIndex;
1868
1869 rc = VBVAInfoView(pVGAState, &View);
1870 if (RT_FAILURE(rc))
1871 {
1872 WARN(("VBVAInfoView failed %Rrc\n", rc));
1873 break;
1874 }
1875 }
1876
1877 rc = VBVAInfoScreen(pVGAState, &Screen);
1878 if (RT_FAILURE(rc))
1879 {
1880 WARN(("VBVAInfoScreen failed %Rrc\n", rc));
1881 break;
1882 }
1883 }
1884
1885 return rc;
1886}
1887
1888
1889/**
1890 * Processes VBVAEXHOST_DATA_TYPE_GUESTCTL for vboxVDMAWorkerThread and
1891 * vdmaVBVACtlThreadCreatedEnable.
1892 *
1893 * @returns VBox status code.
1894 * @param pVdma The VDMA channel.
1895 * @param pCmd The command to process. Maybe safe (not shared
1896 * with guest).
1897 *
1898 * @thread VDMA
1899 */
1900static int vboxVDMACrGuestCtlProcess(struct VBOXVDMAHOST *pVdma, VBVAEXHOSTCTL *pCmd)
1901{
1902 VBVAEXHOSTCTL_TYPE enmType = pCmd->enmType;
1903 switch (enmType)
1904 {
1905 /*
1906 * See handling of VBOXCMDVBVACTL_TYPE_3DCTL in vboxCmdVBVACmdCtl().
1907 */
1908 case VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE:
1909 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1910 {
1911 if (pVdma->CrSrvInfo.pfnGuestCtl)
1912 return pVdma->CrSrvInfo.pfnGuestCtl(pVdma->CrSrvInfo.hSvr, pCmd->u.cmd.pu8Cmd, pCmd->u.cmd.cbCmd);
1913
1914 /* Unexpected. */
1915 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE in HGCM-less mode\n"));
1916 }
1917 else
1918 WARN(("VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE for disabled vdma VBVA\n"));
1919 return VERR_INVALID_STATE;
1920
1921 /*
1922 * See handling of VBOXCMDVBVACTL_TYPE_RESIZE in vboxCmdVBVACmdCtl().
1923 */
1924 case VBVAEXHOSTCTL_TYPE_GHH_RESIZE:
1925 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
1926 {
1927 uint32_t cbCmd = pCmd->u.cmd.cbCmd;
1928 if ( !(cbCmd % sizeof(VBOXCMDVBVA_RESIZE_ENTRY))
1929 && cbCmd > 0)
1930 {
1931 uint32_t cElements = cbCmd / sizeof(VBOXCMDVBVA_RESIZE_ENTRY);
1932 VBOXCMDVBVA_RESIZE *pResize = (VBOXCMDVBVA_RESIZE *)pCmd->u.cmd.pu8Cmd;
1933 for (uint32_t i = 0; i < cElements; ++i)
1934 {
1935 VBOXCMDVBVA_RESIZE_ENTRY *pEntry = &pResize->aEntries[i];
1936 int rc = vboxVDMACrGuestCtlResizeEntryProcess(pVdma, pEntry);
1937 if (RT_FAILURE(rc))
1938 {
1939 WARN(("vboxVDMACrGuestCtlResizeEntryProcess failed %Rrc\n", rc));
1940 return rc;
1941 }
1942 }
1943 return VINF_SUCCESS;
1944 }
1945 else
1946 WARN(("invalid buffer size: cbCmd=%#x\n", cbCmd));
1947 return VERR_INVALID_PARAMETER;
1948 }
1949 WARN(("VBVAEXHOSTCTL_TYPE_GHH_RESIZE for disabled vdma VBVA\n"));
1950 return VERR_INVALID_STATE;
1951
1952 /*
1953 * See vdmaVBVACtlEnableSubmitInternal().
1954 */
1955 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE:
1956 case VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED:
1957 {
1958 VBVAENABLE *pEnable = (VBVAENABLE *)pCmd->u.cmd.pu8Cmd;
1959 Assert(pCmd->u.cmd.cbCmd == sizeof(VBVAENABLE));
1960
1961 uint32_t u32Offset = pEnable->u32Offset;
1962 int rc = vdmaVBVAEnableProcess(pVdma, u32Offset);
1963 if (RT_SUCCESS(rc))
1964 {
1965 if (enmType != VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED)
1966 return VINF_SUCCESS;
1967
1968 rc = VBoxVBVAExHPPause(&pVdma->CmdVbva);
1969 if (RT_SUCCESS(rc))
1970 return VINF_SUCCESS;
1971 WARN(("VBoxVBVAExHPPause failed %Rrc\n", rc));
1972 }
1973 else
1974 WARN(("vdmaVBVAEnableProcess failed %Rrc\n", rc));
1975 return rc;
1976 }
1977
1978 /*
1979 * See vdmaVBVACtlDisableSubmitInternal().
1980 */
1981 case VBVAEXHOSTCTL_TYPE_GHH_DISABLE:
1982 {
1983 int rc = vdmaVBVADisableProcess(pVdma, true /* fDoHgcmEnable */);
1984 if (RT_FAILURE(rc))
1985 {
1986 WARN(("vdmaVBVADisableProcess failed %Rrc\n", rc));
1987 return rc;
1988 }
1989
1990 /* do vgaUpdateDisplayAll right away */
1991 VMR3ReqCallNoWait(PDMDevHlpGetVM(pVdma->pVGAState->pDevInsR3), VMCPUID_ANY,
1992 (PFNRT)vgaUpdateDisplayAll, 2, pVdma->pVGAState, /* fFailOnResize = */ false);
1993
1994 return VBoxVDMAThreadTerm(&pVdma->Thread, NULL, NULL, false /* fNotify */);
1995 }
1996
1997 default:
1998 WARN(("unexpected ctl type %Rrc\n", pCmd->enmType));
1999 return VERR_INVALID_PARAMETER;
2000 }
2001}
2002
2003
2004/**
2005 * Copies one page in a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2006 *
2007 * @param fIn - whether this is a page in or out op.
2008 * @thread VDMA
2009 *
2010 * the direction is VRA#M - related, so fIn == true - transfer to VRAM); false - transfer from VRAM
2011 */
2012static int vboxVDMACrCmdVbvaProcessPagingEl(PPDMDEVINS pDevIns, VBOXCMDVBVAPAGEIDX uPageNo, uint8_t *pbVram, bool fIn)
2013{
2014 RTGCPHYS GCPhysPage = (RTGCPHYS)uPageNo << X86_PAGE_SHIFT;
2015 PGMPAGEMAPLOCK Lock;
2016 int rc;
2017
2018 if (fIn)
2019 {
2020 const void *pvPage;
2021 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2022 if (RT_SUCCESS(rc))
2023 {
2024 memcpy(pbVram, pvPage, PAGE_SIZE);
2025 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2026 }
2027 else
2028 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %Rrc", rc));
2029 }
2030 else
2031 {
2032 void *pvPage;
2033 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysPage, 0, &pvPage, &Lock);
2034 if (RT_SUCCESS(rc))
2035 {
2036 memcpy(pvPage, pbVram, PAGE_SIZE);
2037 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2038 }
2039 else
2040 WARN(("PDMDevHlpPhysGCPhys2CCPtr failed %Rrc", rc));
2041 }
2042
2043 return rc;
2044}
2045
2046/**
2047 * Handles a VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER command.
2048 *
2049 * @return 0 on success, -1 on failure.
2050 *
2051 * @thread VDMA
2052 */
2053static int8_t vboxVDMACrCmdVbvaPageTransfer(PVGASTATE pVGAState, VBOXCMDVBVA_HDR const volatile *pHdr, uint32_t cbCmd,
2054 const VBOXCMDVBVA_PAGING_TRANSFER_DATA *pData)
2055{
2056 /*
2057 * Extract and validate information.
2058 */
2059 AssertMsgReturn(cbCmd >= sizeof(VBOXCMDVBVA_PAGING_TRANSFER), ("%#x\n", cbCmd), -1);
2060
2061 bool const fIn = RT_BOOL(pHdr->u8Flags & VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN);
2062
2063 uint32_t cbPageNumbers = cbCmd - RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers);
2064 AssertMsgReturn(!(cbPageNumbers % sizeof(VBOXCMDVBVAPAGEIDX)), ("%#x\n", cbPageNumbers), -1);
2065 VBOXCMDVBVAPAGEIDX const cPages = cbPageNumbers / sizeof(VBOXCMDVBVAPAGEIDX);
2066
2067 VBOXCMDVBVAOFFSET offVRam = pData->Alloc.u.offVRAM;
2068 AssertMsgReturn(!(offVRam & X86_PAGE_OFFSET_MASK), ("%#x\n", offVRam), -1);
2069 AssertMsgReturn(offVRam < pVGAState->vram_size, ("%#x vs %#x\n", offVRam, pVGAState->vram_size), -1);
2070 uint32_t cVRamPages = (pVGAState->vram_size - offVRam) >> X86_PAGE_SHIFT;
2071 AssertMsgReturn(cPages <= cVRamPages, ("cPages=%#x vs cVRamPages=%#x @ offVRam=%#x\n", cPages, cVRamPages, offVRam), -1);
2072
2073 /*
2074 * Execute the command.
2075 */
2076 uint8_t *pbVRam = (uint8_t *)pVGAState->vram_ptrR3 + offVRam;
2077 for (uint32_t iPage = 0; iPage < cPages; iPage++, pbVRam += X86_PAGE_SIZE)
2078 {
2079 uint32_t uPageNo = pData->aPageNumbers[iPage];
2080 int rc = vboxVDMACrCmdVbvaProcessPagingEl(pVGAState->pDevInsR3, uPageNo, pbVRam, fIn);
2081 AssertMsgReturn(RT_SUCCESS(rc), ("#%#x: uPageNo=%#x rc=%Rrc\n", iPage, uPageNo, rc), -1);
2082 }
2083 return 0;
2084}
2085
2086
2087/**
2088 * Handles VBOXCMDVBVA_OPTYPE_PAGING_FILL.
2089 *
2090 * @returns 0 on success, -1 on failure.
2091 * @param pVGAState The VGA state.
2092 * @param pFill The fill command (volatile).
2093 *
2094 * @thread VDMA
2095 */
2096static int8_t vboxVDMACrCmdVbvaPagingFill(PVGASTATE pVGAState, VBOXCMDVBVA_PAGING_FILL *pFill)
2097{
2098 VBOXCMDVBVA_PAGING_FILL FillSafe = *pFill;
2099 VBOXCMDVBVAOFFSET offVRAM = FillSafe.offVRAM;
2100 if (!(offVRAM & X86_PAGE_OFFSET_MASK))
2101 {
2102 if (offVRAM <= pVGAState->vram_size)
2103 {
2104 uint32_t cbFill = FillSafe.u32CbFill;
2105 AssertStmt(!(cbFill & 3), cbFill &= ~(uint32_t)3);
2106
2107 if ( cbFill < pVGAState->vram_size
2108 && offVRAM <= pVGAState->vram_size - cbFill)
2109 {
2110 uint32_t *pu32Vram = (uint32_t *)((uint8_t *)pVGAState->vram_ptrR3 + offVRAM);
2111 uint32_t const u32Color = FillSafe.u32Pattern;
2112
2113 uint32_t cLoops = cbFill / 4;
2114 while (cLoops-- > 0)
2115 pu32Vram[cLoops] = u32Color;
2116
2117 return 0;
2118
2119 }
2120 else
2121 WARN(("invalid cbFill"));
2122
2123 }
2124 WARN(("invalid vram offset"));
2125
2126 }
2127 else
2128 WARN(("offVRAM address is not on page boundary\n"));
2129 return -1;
2130}
2131
2132/**
2133 * Process command data.
2134 *
2135 * @returns zero or positive is success, negative failure.
2136 * @param pVdma The VDMA channel.
2137 * @param pCmd The command data to process. Assume volatile.
2138 * @param cbCmd The amount of command data.
2139 *
2140 * @thread VDMA
2141 */
2142static int8_t vboxVDMACrCmdVbvaProcessCmdData(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
2143{
2144 uint8_t bOpCode = pCmd->u8OpCode;
2145 switch (bOpCode)
2146 {
2147 case VBOXCMDVBVA_OPTYPE_NOPCMD:
2148 return 0;
2149
2150 case VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER:
2151 return vboxVDMACrCmdVbvaPageTransfer(pVdma->pVGAState, pCmd, cbCmd, &((VBOXCMDVBVA_PAGING_TRANSFER *)pCmd)->Data);
2152
2153 case VBOXCMDVBVA_OPTYPE_PAGING_FILL:
2154 if (cbCmd == sizeof(VBOXCMDVBVA_PAGING_FILL))
2155 return vboxVDMACrCmdVbvaPagingFill(pVdma->pVGAState, (VBOXCMDVBVA_PAGING_FILL *)pCmd);
2156 WARN(("cmd too small"));
2157 return -1;
2158
2159 default:
2160 if (pVdma->CrSrvInfo.pfnCmd)
2161 return pVdma->CrSrvInfo.pfnCmd(pVdma->CrSrvInfo.hSvr, pCmd, cbCmd);
2162 /* Unexpected. */
2163 WARN(("no HGCM"));
2164 return -1;
2165 }
2166}
2167
2168# if 0
2169typedef struct VBOXCMDVBVA_PAGING_TRANSFER
2170{
2171 VBOXCMDVBVA_HDR Hdr;
2172 /* for now can only contain offVRAM.
2173 * paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
2174 VBOXCMDVBVA_ALLOCINFO Alloc;
2175 uint32_t u32Reserved;
2176 VBOXCMDVBVA_SYSMEMEL aSysMem[1];
2177} VBOXCMDVBVA_PAGING_TRANSFER;
2178# endif
2179
2180AssertCompile(sizeof (VBOXCMDVBVA_HDR) == 8);
2181AssertCompile(sizeof (VBOXCMDVBVA_ALLOCINFO) == 4);
2182AssertCompile(sizeof (VBOXCMDVBVAPAGEIDX) == 4);
2183AssertCompile(!(X86_PAGE_SIZE % sizeof (VBOXCMDVBVAPAGEIDX)));
2184
2185# define VBOXCMDVBVA_NUM_SYSMEMEL_PER_PAGE (X86_PAGE_SIZE / sizeof (VBOXCMDVBVA_SYSMEMEL))
2186
2187/**
2188 * Worker for vboxVDMACrCmdProcess.
2189 *
2190 * @returns 8-bit result.
2191 * @param pVdma The VDMA channel.
2192 * @param pCmd The command. Consider volatile!
2193 * @param cbCmd The size of what @a pCmd points to. At least
2194 * sizeof(VBOXCMDVBVA_HDR).
2195 * @param fRecursion Set if recursive call, false if not.
2196 *
2197 * @thread VDMA
2198 */
2199static int8_t vboxVDMACrCmdVbvaProcess(struct VBOXVDMAHOST *pVdma, const VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd, bool fRecursion)
2200{
2201 int8_t i8Result = 0;
2202 uint8_t const bOpCode = pCmd->u8OpCode;
2203 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: ENTER, bOpCode=%u\n", bOpCode));
2204 switch (bOpCode)
2205 {
2206 case VBOXCMDVBVA_OPTYPE_SYSMEMCMD:
2207 {
2208 /*
2209 * Extract the command physical address and size.
2210 */
2211 AssertMsgReturn(cbCmd >= sizeof(VBOXCMDVBVA_SYSMEMCMD), ("%#x\n", cbCmd), -1);
2212 RTGCPHYS GCPhysCmd = ((VBOXCMDVBVA_SYSMEMCMD *)pCmd)->phCmd;
2213 uint32_t cbCmdPart = X86_PAGE_SIZE - (uint32_t)(GCPhysCmd & X86_PAGE_OFFSET_MASK);
2214
2215 uint32_t cbRealCmd = pCmd->u8Flags;
2216 cbRealCmd |= (uint32_t)pCmd->u.u8PrimaryID << 8;
2217 AssertMsgReturn(cbRealCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbRealCmd), -1);
2218 AssertMsgReturn(cbRealCmd <= _1M, ("%#x\n", cbRealCmd), -1);
2219
2220 /*
2221 * Lock down the first page of the memory specified by the command.
2222 */
2223 PGMPAGEMAPLOCK Lock;
2224 PVGASTATE pVGAState = pVdma->pVGAState;
2225 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2226 VBOXCMDVBVA_HDR const *pRealCmdHdr = NULL;
2227 int rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysCmd, 0, (const void **)&pRealCmdHdr, &Lock);
2228 if (!RT_SUCCESS(rc))
2229 {
2230 WARN(("PDMDevHlpPhysGCPhys2CCPtrReadOnly failed %Rrc\n", rc));
2231 return -1;
2232 }
2233 Assert((GCPhysCmd & PAGE_OFFSET_MASK) == (((uintptr_t)pRealCmdHdr) & PAGE_OFFSET_MASK));
2234
2235 /*
2236 * All fits within one page? We can handle that pretty efficiently.
2237 */
2238 if (cbRealCmd <= cbCmdPart)
2239 {
2240 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pRealCmdHdr, cbRealCmd);
2241 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2242 }
2243 else
2244 {
2245 /*
2246 * To keep things damn simple, just double buffer cross page or
2247 * multipage requests.
2248 */
2249 uint8_t *pbCmdBuf = (uint8_t *)RTMemTmpAllocZ(RT_ALIGN_Z(cbRealCmd, 16));
2250 if (pbCmdBuf)
2251 {
2252 memcpy(pbCmdBuf, pRealCmdHdr, cbCmdPart);
2253 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2254 pRealCmdHdr = NULL;
2255
2256 rc = PDMDevHlpPhysRead(pDevIns, GCPhysCmd + cbCmdPart, &pbCmdBuf[cbCmdPart], cbRealCmd - cbCmdPart);
2257 if (RT_SUCCESS(rc))
2258 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, (VBOXCMDVBVA_HDR const *)pbCmdBuf, cbRealCmd);
2259 else
2260 LogRelMax(200, ("VDMA: Error reading %#x bytes of guest memory %#RGp!\n", cbRealCmd, GCPhysCmd));
2261 RTMemTmpFree(pbCmdBuf);
2262 }
2263 else
2264 {
2265 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &Lock);
2266 LogRelMax(200, ("VDMA: Out of temporary memory! %#x\n", cbRealCmd));
2267 i8Result = -1;
2268 }
2269 }
2270 return i8Result;
2271 }
2272
2273 case VBOXCMDVBVA_OPTYPE_COMPLEXCMD:
2274 {
2275 Assert(cbCmd >= sizeof(VBOXCMDVBVA_HDR)); /* caller already checked this */
2276 AssertReturn(!fRecursion, -1);
2277
2278 /* Skip current command. */
2279 cbCmd -= sizeof(*pCmd);
2280 pCmd++;
2281
2282 /* Process subcommands. */
2283 while (cbCmd > 0)
2284 {
2285 AssertMsgReturn(cbCmd >= sizeof(VBOXCMDVBVA_HDR), ("%#x\n", cbCmd), -1);
2286
2287 uint16_t cbCurCmd = pCmd->u2.complexCmdEl.u16CbCmdHost;
2288 AssertMsgReturn(cbCurCmd <= cbCmd, ("cbCurCmd=%#x, cbCmd=%#x\n", cbCurCmd, cbCmd), -1);
2289
2290 i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCurCmd, true /*fRecursive*/);
2291 if (i8Result < 0)
2292 {
2293 WARN(("vboxVDMACrCmdVbvaProcess failed"));
2294 return i8Result;
2295 }
2296
2297 /* Advance to the next command. */
2298 pCmd = (VBOXCMDVBVA_HDR *)((uintptr_t)pCmd + cbCurCmd);
2299 cbCmd -= cbCurCmd;
2300 }
2301 return 0;
2302 }
2303
2304 default:
2305 i8Result = vboxVDMACrCmdVbvaProcessCmdData(pVdma, pCmd, cbCmd);
2306 LogRelFlow(("VDMA: vboxVDMACrCmdVbvaProcess: LEAVE, opCode(%i)\n", pCmd->u8OpCode));
2307 return i8Result;
2308 }
2309}
2310
2311/**
2312 * Worker for vboxVDMAWorkerThread handling VBVAEXHOST_DATA_TYPE_CMD.
2313 *
2314 * @thread VDMA
2315 */
2316static void vboxVDMACrCmdProcess(struct VBOXVDMAHOST *pVdma, uint8_t* pbCmd, uint32_t cbCmd)
2317{
2318 if ( cbCmd > 0
2319 && *pbCmd == VBOXCMDVBVA_OPTYPE_NOP)
2320 { /* nop */ }
2321 else if (cbCmd >= sizeof(VBOXCMDVBVA_HDR))
2322 {
2323 PVBOXCMDVBVA_HDR pCmd = (PVBOXCMDVBVA_HDR)pbCmd;
2324
2325 /* check if the command is cancelled */
2326 if (ASMAtomicCmpXchgU8(&pCmd->u8State, VBOXCMDVBVA_STATE_IN_PROGRESS, VBOXCMDVBVA_STATE_SUBMITTED))
2327 {
2328 /* Process it. */
2329 pCmd->u.i8Result = vboxVDMACrCmdVbvaProcess(pVdma, pCmd, cbCmd, false /*fRecursion*/);
2330 }
2331 else
2332 Assert(pCmd->u8State == VBOXCMDVBVA_STATE_CANCELLED);
2333 }
2334 else
2335 WARN(("invalid command size"));
2336
2337}
2338
2339/**
2340 * Worker for vboxVDMAConstruct().
2341 */
2342static int vboxVDMACrCtlHgsmiSetup(struct VBOXVDMAHOST *pVdma)
2343{
2344 PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP pCmd;
2345 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP, sizeof(*pCmd));
2346 int rc;
2347 if (pCmd)
2348 {
2349 PVGASTATE pVGAState = pVdma->pVGAState;
2350 pCmd->pvVRamBase = pVGAState->vram_ptrR3;
2351 pCmd->cbVRam = pVGAState->vram_size;
2352 pCmd->pLed = &pVGAState->Led3D;
2353 pCmd->CrClientInfo.hClient = pVdma;
2354 pCmd->CrClientInfo.pfnCallout = vboxCmdVBVACmdCallout;
2355 rc = vboxVDMACrCtlPost(pVGAState, &pCmd->Hdr, sizeof (*pCmd));
2356 if (RT_SUCCESS(rc))
2357 {
2358 rc = vboxVDMACrCtlGetRc(&pCmd->Hdr);
2359 if (RT_SUCCESS(rc))
2360 pVdma->CrSrvInfo = pCmd->CrCmdServerInfo;
2361 else if (rc != VERR_NOT_SUPPORTED)
2362 WARN(("vboxVDMACrCtlGetRc returned %Rrc\n", rc));
2363 }
2364 else
2365 WARN(("vboxVDMACrCtlPost failed %Rrc\n", rc));
2366
2367 vboxVDMACrCtlRelease(&pCmd->Hdr);
2368 }
2369 else
2370 rc = VERR_NO_MEMORY;
2371
2372 if (!RT_SUCCESS(rc))
2373 memset(&pVdma->CrSrvInfo, 0, sizeof (pVdma->CrSrvInfo));
2374
2375 return rc;
2376}
2377
2378/**
2379 * Check if this is an external command to be passed to the chromium backend.
2380 *
2381 * @retval VINF_NOT_SUPPORTED if not chromium command.
2382 *
2383 * @note cbCmdDr is at least sizeof(VBOXVDMACBUF_DR).
2384 */
2385static int vboxVDMACmdCheckCrCmd(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmdDr, uint32_t cbCmdDr)
2386{
2387 uint32_t cbDmaCmd = 0;
2388 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
2389 int rc = VINF_NOT_SUPPORTED;
2390
2391 cbDmaCmd = pCmdDr->cbBuf;
2392
2393 PVBOXVDMACMD pDmaCmd;
2394 if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2395 {
2396 AssertReturn(cbCmdDr >= sizeof(*pCmdDr) + VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2397 AssertReturn(cbDmaCmd >= cbCmdDr - sizeof(*pCmdDr) - VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2398
2399 pDmaCmd = VBOXVDMACBUF_DR_TAIL(pCmdDr, VBOXVDMACMD);
2400 }
2401 else if (pCmdDr->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2402 {
2403 VBOXVIDEOOFFSET offBuf = pCmdDr->Location.offVramBuf;
2404 AssertReturn( cbDmaCmd <= pVdma->pVGAState->vram_size
2405 && offBuf <= pVdma->pVGAState->vram_size - cbDmaCmd, VERR_INVALID_PARAMETER);
2406 pDmaCmd = (VBOXVDMACMD *)(pbRam + offBuf);
2407 }
2408 else
2409 pDmaCmd = NULL;
2410 if (pDmaCmd)
2411 {
2412 Assert(cbDmaCmd >= VBOXVDMACMD_HEADER_SIZE());
2413 uint32_t cbBody = VBOXVDMACMD_BODY_SIZE(cbDmaCmd);
2414
2415 switch (pDmaCmd->enmType)
2416 {
2417 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2418 {
2419 PVBOXVDMACMD_CHROMIUM_CMD pCrCmd = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_CHROMIUM_CMD);
2420 AssertReturn(cbBody >= sizeof(*pCrCmd), VERR_INVALID_PARAMETER);
2421
2422 PVGASTATE pVGAState = pVdma->pVGAState;
2423 rc = VINF_SUCCESS;
2424 if (pVGAState->pDrv->pfnCrHgsmiCommandProcess)
2425 {
2426 VBoxSHGSMICommandMarkAsynchCompletion(pCmdDr);
2427 pVGAState->pDrv->pfnCrHgsmiCommandProcess(pVGAState->pDrv, pCrCmd, cbBody);
2428 break;
2429 }
2430
2431 AssertFailed();
2432 int tmpRc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmdDr);
2433 AssertRC(tmpRc);
2434 break;
2435 }
2436
2437 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2438 {
2439 PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pDmaCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2440 AssertReturn(cbBody >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
2441
2442 rc = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, sizeof (*pTransfer));
2443 AssertRC(rc);
2444 if (RT_SUCCESS(rc))
2445 {
2446 pCmdDr->rc = VINF_SUCCESS;
2447 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmdDr);
2448 AssertRC(rc);
2449 rc = VINF_SUCCESS;
2450 }
2451 break;
2452 }
2453
2454 default:
2455 break;
2456 }
2457 }
2458 return rc;
2459}
2460
2461/**
2462 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrHgsmiControlCompleteAsync,
2463 * Some indirect completion magic, you gotta love this code! }
2464 */
2465DECLCALLBACK(int) vboxVDMACrHgsmiCommandCompleteAsync(PPDMIDISPLAYVBVACALLBACKS pInterface, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc)
2466{
2467 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
2468 PHGSMIINSTANCE pIns = pVGAState->pHGSMI;
2469 VBOXVDMACMD *pDmaHdr = VBOXVDMACMD_FROM_BODY(pCmd);
2470 VBOXVDMACBUF_DR *pDr = VBOXVDMACBUF_DR_FROM_TAIL(pDmaHdr);
2471
2472 AssertRC(rc);
2473 pDr->rc = rc;
2474
2475 Assert(pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
2476 rc = VBoxSHGSMICommandComplete(pIns, pDr);
2477 AssertRC(rc);
2478
2479 return rc;
2480}
2481
2482/**
2483 * Worker for vboxVDMACmdExecBlt().
2484 */
2485static int vboxVDMACmdExecBltPerform(PVBOXVDMAHOST pVdma, const VBOXVIDEOOFFSET offDst, const VBOXVIDEOOFFSET offSrc,
2486 const PVBOXVDMA_SURF_DESC pDstDesc, const PVBOXVDMA_SURF_DESC pSrcDesc,
2487 const VBOXVDMA_RECTL *pDstRectl, const VBOXVDMA_RECTL *pSrcRectl)
2488{
2489 /*
2490 * We do not support color conversion.
2491 */
2492 AssertReturn(pDstDesc->format == pSrcDesc->format, VERR_INVALID_FUNCTION);
2493
2494 /* we do not support stretching (checked by caller) */
2495 Assert(pDstRectl->height == pSrcRectl->height);
2496 Assert(pDstRectl->width == pSrcRectl->width);
2497
2498 uint8_t *pbRam = pVdma->pVGAState->vram_ptrR3;
2499 AssertCompileSize(pVdma->pVGAState->vram_size, sizeof(uint32_t));
2500 uint32_t cbVRamSize = pVdma->pVGAState->vram_size;
2501 uint8_t *pbDstSurf = pbRam + offDst;
2502 uint8_t *pbSrcSurf = pbRam + offSrc;
2503
2504 if ( pDstDesc->width == pDstRectl->width
2505 && pSrcDesc->width == pSrcRectl->width
2506 && pSrcDesc->width == pDstDesc->width
2507 && pSrcDesc->pitch == pDstDesc->pitch)
2508 {
2509 Assert(!pDstRectl->left);
2510 Assert(!pSrcRectl->left);
2511 uint32_t offBoth = pDstDesc->pitch * pDstRectl->top;
2512 uint32_t cbToCopy = pDstDesc->pitch * pDstRectl->height;
2513
2514 if ( cbToCopy <= cbVRamSize
2515 && (uintptr_t)(pbDstSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy
2516 && (uintptr_t)(pbSrcSurf + offBoth) - (uintptr_t)pbRam <= cbVRamSize - cbToCopy)
2517 memcpy(pbDstSurf + offBoth, pbSrcSurf + offBoth, cbToCopy);
2518 else
2519 return VERR_INVALID_PARAMETER;
2520 }
2521 else
2522 {
2523 uint32_t offDstLineStart = pDstRectl->left * pDstDesc->bpp >> 3;
2524 uint32_t offDstLineEnd = ((pDstRectl->left * pDstDesc->bpp + 7) >> 3) + ((pDstDesc->bpp * pDstRectl->width + 7) >> 3);
2525 uint32_t cbDstLine = offDstLineEnd - offDstLineStart;
2526 uint32_t offDstStart = pDstDesc->pitch * pDstRectl->top + offDstLineStart;
2527 Assert(cbDstLine <= pDstDesc->pitch);
2528 uint32_t cbDstSkip = pDstDesc->pitch;
2529 uint8_t *pbDstStart = pbDstSurf + offDstStart;
2530
2531 uint32_t offSrcLineStart = pSrcRectl->left * pSrcDesc->bpp >> 3;
2532# ifdef VBOX_STRICT
2533 uint32_t offSrcLineEnd = ((pSrcRectl->left * pSrcDesc->bpp + 7) >> 3) + ((pSrcDesc->bpp * pSrcRectl->width + 7) >> 3);
2534 uint32_t cbSrcLine = offSrcLineEnd - offSrcLineStart;
2535# endif
2536 uint32_t offSrcStart = pSrcDesc->pitch * pSrcRectl->top + offSrcLineStart;
2537 Assert(cbSrcLine <= pSrcDesc->pitch);
2538 uint32_t cbSrcSkip = pSrcDesc->pitch;
2539 const uint8_t *pbSrcStart = pbSrcSurf + offSrcStart;
2540
2541 Assert(cbDstLine == cbSrcLine);
2542
2543 for (uint32_t i = 0; ; ++i)
2544 {
2545 if ( cbDstLine <= cbVRamSize
2546 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine
2547 && (uintptr_t)pbSrcStart - (uintptr_t)pbRam <= cbVRamSize - cbDstLine)
2548 memcpy(pbDstStart, pbSrcStart, cbDstLine);
2549 else
2550 return VERR_INVALID_PARAMETER;
2551 if (i == pDstRectl->height)
2552 break;
2553 pbDstStart += cbDstSkip;
2554 pbSrcStart += cbSrcSkip;
2555 }
2556 }
2557 return VINF_SUCCESS;
2558}
2559
2560#if 0 /* unused */
2561static void vboxVDMARectlUnite(VBOXVDMA_RECTL * pRectl1, const VBOXVDMA_RECTL * pRectl2)
2562{
2563 if (!pRectl1->width)
2564 *pRectl1 = *pRectl2;
2565 else
2566 {
2567 int16_t x21 = pRectl1->left + pRectl1->width;
2568 int16_t x22 = pRectl2->left + pRectl2->width;
2569 if (pRectl1->left > pRectl2->left)
2570 {
2571 pRectl1->left = pRectl2->left;
2572 pRectl1->width = x21 < x22 ? x22 - pRectl1->left : x21 - pRectl1->left;
2573 }
2574 else if (x21 < x22)
2575 pRectl1->width = x22 - pRectl1->left;
2576
2577 x21 = pRectl1->top + pRectl1->height;
2578 x22 = pRectl2->top + pRectl2->height;
2579 if (pRectl1->top > pRectl2->top)
2580 {
2581 pRectl1->top = pRectl2->top;
2582 pRectl1->height = x21 < x22 ? x22 - pRectl1->top : x21 - pRectl1->top;
2583 }
2584 else if (x21 < x22)
2585 pRectl1->height = x22 - pRectl1->top;
2586 }
2587}
2588#endif /* unused */
2589
2590/**
2591 * Handles VBOXVDMACMD_TYPE_DMA_PRESENT_BLT for vboxVDMACmdExec().
2592 *
2593 * @returns number of bytes (positive) of the full command on success,
2594 * otherwise a negative error status (VERR_XXX).
2595 *
2596 * @param pVdma The VDMA channel.
2597 * @param pBlt Blit command buffer. This is to be considered
2598 * volatile!
2599 * @param cbBuffer Number of bytes accessible at @a pBtl.
2600 */
2601static int vboxVDMACmdExecBlt(PVBOXVDMAHOST pVdma, const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt, uint32_t cbBuffer)
2602{
2603 /*
2604 * Validate and make a local copy of the blt command up to the rectangle array.
2605 */
2606 AssertReturn(cbBuffer >= RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects), VERR_INVALID_PARAMETER);
2607 VBOXVDMACMD_DMA_PRESENT_BLT BltSafe;
2608 memcpy(&BltSafe, pBlt, RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects));
2609
2610 AssertReturn(BltSafe.cDstSubRects < _8M, VERR_INVALID_PARAMETER);
2611 uint32_t const cbBlt = RT_UOFFSETOF(VBOXVDMACMD_DMA_PRESENT_BLT, aDstSubRects[BltSafe.cDstSubRects]);
2612 AssertReturn(cbBuffer >= cbBlt, VERR_INVALID_PARAMETER);
2613
2614
2615 /*
2616 * We do not support stretching.
2617 */
2618 AssertReturn(BltSafe.srcRectl.width == BltSafe.dstRectl.width, VERR_INVALID_FUNCTION);
2619 AssertReturn(BltSafe.srcRectl.height == BltSafe.dstRectl.height, VERR_INVALID_FUNCTION);
2620
2621 Assert(BltSafe.cDstSubRects);
2622
2623 //VBOXVDMA_RECTL updateRectl = {0, 0, 0, 0}; - pointless
2624
2625 if (BltSafe.cDstSubRects)
2626 {
2627 for (uint32_t i = 0; i < BltSafe.cDstSubRects; ++i)
2628 {
2629 VBOXVDMA_RECTL dstSubRectl = pBlt->aDstSubRects[i];
2630 VBOXVDMA_RECTL srcSubRectl = dstSubRectl;
2631
2632 dstSubRectl.left += BltSafe.dstRectl.left;
2633 dstSubRectl.top += BltSafe.dstRectl.top;
2634
2635 srcSubRectl.left += BltSafe.srcRectl.left;
2636 srcSubRectl.top += BltSafe.srcRectl.top;
2637
2638 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2639 &dstSubRectl, &srcSubRectl);
2640 AssertRCReturn(rc, rc);
2641
2642 //vboxVDMARectlUnite(&updateRectl, &dstSubRectl); - pointless
2643 }
2644 }
2645 else
2646 {
2647 int rc = vboxVDMACmdExecBltPerform(pVdma, BltSafe.offDst, BltSafe.offSrc, &BltSafe.dstDesc, &BltSafe.srcDesc,
2648 &BltSafe.dstRectl, &BltSafe.srcRectl);
2649 AssertRCReturn(rc, rc);
2650
2651 //vboxVDMARectlUnite(&updateRectl, &BltSafe.dstRectl); - pointless
2652 }
2653
2654 return cbBlt;
2655}
2656
2657
2658/**
2659 * Handles VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER for vboxVDMACmdCheckCrCmd() and
2660 * vboxVDMACmdExec().
2661 *
2662 * @returns number of bytes (positive) of the full command on success,
2663 * otherwise a negative error status (VERR_XXX).
2664 *
2665 * @param pVdma The VDMA channel.
2666 * @param pTransfer Transfer command buffer. This is to be considered
2667 * volatile!
2668 * @param cbBuffer Number of bytes accessible at @a pTransfer.
2669 */
2670static int vboxVDMACmdExecBpbTransfer(PVBOXVDMAHOST pVdma, const VBOXVDMACMD_DMA_BPB_TRANSFER *pTransfer, uint32_t cbBuffer)
2671{
2672 /*
2673 * Make a copy of the command (it's volatile).
2674 */
2675 AssertReturn(cbBuffer >= sizeof(*pTransfer), VERR_INVALID_PARAMETER);
2676 VBOXVDMACMD_DMA_BPB_TRANSFER const TransferSafeCopy = *pTransfer;
2677 pTransfer = &TransferSafeCopy;
2678
2679 PVGASTATE pVGAState = pVdma->pVGAState;
2680 PPDMDEVINS pDevIns = pVGAState->pDevInsR3;
2681 uint8_t *pbRam = pVGAState->vram_ptrR3;
2682 uint32_t cbTransfer = TransferSafeCopy.cbTransferSize;
2683
2684 /*
2685 * Validate VRAM offset.
2686 */
2687 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2688 AssertReturn( cbTransfer <= pVGAState->vram_size
2689 && TransferSafeCopy.Src.offVramBuf <= pVGAState->vram_size - cbTransfer,
2690 VERR_INVALID_PARAMETER);
2691
2692 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2693 AssertReturn( cbTransfer <= pVGAState->vram_size
2694 && TransferSafeCopy.Dst.offVramBuf <= pVGAState->vram_size - cbTransfer,
2695 VERR_INVALID_PARAMETER);
2696
2697 /*
2698 * Transfer loop.
2699 */
2700 uint32_t cbTransfered = 0;
2701 int rc = VINF_SUCCESS;
2702 do
2703 {
2704 uint32_t cbSubTransfer = cbTransfer;
2705
2706 const void *pvSrc;
2707 bool fSrcLocked = false;
2708 PGMPAGEMAPLOCK SrcLock;
2709 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET)
2710 pvSrc = pbRam + TransferSafeCopy.Src.offVramBuf + cbTransfered;
2711 else
2712 {
2713 RTGCPHYS GCPhysSrcPage = TransferSafeCopy.Src.phBuf + cbTransfered;
2714 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pDevIns, GCPhysSrcPage, 0, &pvSrc, &SrcLock);
2715 AssertRC(rc);
2716 if (RT_SUCCESS(rc))
2717 {
2718 fSrcLocked = true;
2719 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysSrcPage & X86_PAGE_OFFSET_MASK));
2720 }
2721 else
2722 break;
2723 }
2724
2725 void *pvDst;
2726 PGMPAGEMAPLOCK DstLock;
2727 bool fDstLocked = false;
2728 if (TransferSafeCopy.fFlags & VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET)
2729 pvDst = pbRam + TransferSafeCopy.Dst.offVramBuf + cbTransfered;
2730 else
2731 {
2732 RTGCPHYS GCPhysDstPage = TransferSafeCopy.Dst.phBuf + cbTransfered;
2733 rc = PDMDevHlpPhysGCPhys2CCPtr(pDevIns, GCPhysDstPage, 0, &pvDst, &DstLock);
2734 AssertRC(rc);
2735 if (RT_SUCCESS(rc))
2736 {
2737 fDstLocked = true;
2738 cbSubTransfer = RT_MIN(cbSubTransfer, X86_PAGE_SIZE - (uint32_t)(GCPhysDstPage & X86_PAGE_OFFSET_MASK));
2739 }
2740 }
2741
2742 if (RT_SUCCESS(rc))
2743 {
2744 memcpy(pvDst, pvSrc, cbSubTransfer);
2745 cbTransfered += cbSubTransfer;
2746 cbTransfer -= cbSubTransfer;
2747 }
2748 else
2749 cbTransfer = 0; /* force break below */
2750
2751 if (fSrcLocked)
2752 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &SrcLock);
2753 if (fDstLocked)
2754 PDMDevHlpPhysReleasePageMappingLock(pDevIns, &DstLock);
2755 } while (cbTransfer);
2756
2757 if (RT_SUCCESS(rc))
2758 return sizeof(TransferSafeCopy);
2759 return rc;
2760}
2761
2762/**
2763 * Worker for vboxVDMACommandProcess().
2764 *
2765 * @param pVdma Tthe VDMA channel.
2766 * @param pbBuffer Command buffer, considered volatile.
2767 * @param cbBuffer The number of bytes at @a pbBuffer.
2768 */
2769static int vboxVDMACmdExec(PVBOXVDMAHOST pVdma, const uint8_t *pbBuffer, uint32_t cbBuffer)
2770{
2771 AssertReturn(pbBuffer, VERR_INVALID_POINTER);
2772
2773 for (;;)
2774 {
2775 AssertReturn(cbBuffer >= VBOXVDMACMD_HEADER_SIZE(), VERR_INVALID_PARAMETER);
2776
2777 VBOXVDMACMD const *pCmd = (VBOXVDMACMD const *)pbBuffer;
2778 VBOXVDMACMD_TYPE enmCmdType = pCmd->enmType;
2779 int cbProcessed;
2780 switch (enmCmdType)
2781 {
2782 case VBOXVDMACMD_TYPE_CHROMIUM_CMD:
2783 {
2784# ifdef VBOXWDDM_TEST_UHGSMI
2785 static int count = 0;
2786 static uint64_t start, end;
2787 if (count==0)
2788 {
2789 start = RTTimeNanoTS();
2790 }
2791 ++count;
2792 if (count==100000)
2793 {
2794 end = RTTimeNanoTS();
2795 float ems = (end-start)/1000000.f;
2796 LogRel(("100000 calls took %i ms, %i cps\n", (int)ems, (int)(100000.f*1000.f/ems) ));
2797 }
2798# endif
2799 /** @todo post the buffer to chromium */
2800 return VINF_SUCCESS;
2801 }
2802
2803 case VBOXVDMACMD_TYPE_DMA_PRESENT_BLT:
2804 {
2805 const PVBOXVDMACMD_DMA_PRESENT_BLT pBlt = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_PRESENT_BLT);
2806 cbProcessed = vboxVDMACmdExecBlt(pVdma, pBlt, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2807 Assert(cbProcessed >= 0);
2808 break;
2809 }
2810
2811 case VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER:
2812 {
2813 const PVBOXVDMACMD_DMA_BPB_TRANSFER pTransfer = VBOXVDMACMD_BODY(pCmd, VBOXVDMACMD_DMA_BPB_TRANSFER);
2814 cbProcessed = vboxVDMACmdExecBpbTransfer(pVdma, pTransfer, cbBuffer - VBOXVDMACMD_HEADER_SIZE());
2815 Assert(cbProcessed >= 0);
2816 break;
2817 }
2818
2819 case VBOXVDMACMD_TYPE_DMA_NOP:
2820 return VINF_SUCCESS;
2821
2822 case VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ:
2823 return VINF_SUCCESS;
2824
2825 default:
2826 AssertFailedReturn(VERR_INVALID_FUNCTION);
2827 }
2828
2829 /* Advance buffer or return. */
2830 if (cbProcessed >= 0)
2831 {
2832 Assert(cbProcessed > 0);
2833 cbProcessed += VBOXVDMACMD_HEADER_SIZE();
2834 if ((uint32_t)cbProcessed >= cbBuffer)
2835 {
2836 Assert((uint32_t)cbProcessed == cbBuffer);
2837 return VINF_SUCCESS;
2838 }
2839
2840 cbBuffer -= cbProcessed;
2841 pbBuffer += cbProcessed;
2842 }
2843 else
2844 return cbProcessed; /* error status */
2845
2846 }
2847}
2848
2849/**
2850 * VDMA worker thread procedure, see vdmaVBVACtlEnableSubmitInternal().
2851 *
2852 * @thread VDMA
2853 */
2854static DECLCALLBACK(int) vboxVDMAWorkerThread(RTTHREAD hThreadSelf, void *pvUser)
2855{
2856 RT_NOREF(hThreadSelf);
2857 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvUser;
2858 PVGASTATE pVGAState = pVdma->pVGAState;
2859 VBVAEXHOSTCONTEXT *pCmdVbva = &pVdma->CmdVbva;
2860 int rc;
2861
2862 VBoxVDMAThreadNotifyConstructSucceeded(&pVdma->Thread, pvUser);
2863
2864 while (!VBoxVDMAThreadIsTerminating(&pVdma->Thread))
2865 {
2866 uint8_t *pbCmd = NULL;
2867 uint32_t cbCmd = 0;
2868 VBVAEXHOST_DATA_TYPE enmType = VBoxVBVAExHPDataGet(pCmdVbva, &pbCmd, &cbCmd);
2869 switch (enmType)
2870 {
2871 case VBVAEXHOST_DATA_TYPE_CMD:
2872 vboxVDMACrCmdProcess(pVdma, pbCmd, cbCmd);
2873 VBoxVBVAExHPDataCompleteCmd(pCmdVbva, cbCmd);
2874 VBVARaiseIrq(pVGAState, 0);
2875 break;
2876
2877 case VBVAEXHOST_DATA_TYPE_GUESTCTL:
2878 rc = vboxVDMACrGuestCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd);
2879 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2880 break;
2881
2882 case VBVAEXHOST_DATA_TYPE_HOSTCTL:
2883 {
2884 bool fContinue = true;
2885 rc = vboxVDMACrHostCtlProcess(pVdma, (VBVAEXHOSTCTL *)pbCmd, &fContinue);
2886 VBoxVBVAExHPDataCompleteCtl(pCmdVbva, (VBVAEXHOSTCTL *)pbCmd, rc);
2887 if (fContinue)
2888 break;
2889 }
2890 RT_FALL_THRU();
2891
2892 case VBVAEXHOST_DATA_TYPE_NO_DATA:
2893 rc = RTSemEventWaitNoResume(pVdma->Thread.hEvent, RT_INDEFINITE_WAIT);
2894 AssertMsg(RT_SUCCESS(rc) || rc == VERR_INTERRUPTED, ("%Rrc\n", rc));
2895 break;
2896
2897 default:
2898 WARN(("unexpected type %d\n", enmType));
2899 break;
2900 }
2901 }
2902
2903 VBoxVDMAThreadNotifyTerminatingSucceeded(&pVdma->Thread, pvUser);
2904
2905 return VINF_SUCCESS;
2906}
2907
2908/**
2909 * Worker for vboxVDMACommand.
2910 *
2911 * @param pCmd The command to process. Consider content volatile.
2912 * @param cbCmd Number of valid bytes at @a pCmd. This is at least
2913 * sizeof(VBOXVDMACBUF_DR).
2914 * @thread VDMA
2915 */
2916static void vboxVDMACommandProcess(PVBOXVDMAHOST pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
2917{
2918 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2919 int rc;
2920
2921 do /* break loop */
2922 {
2923 /*
2924 * Get the command buffer (volatile).
2925 */
2926 uint16_t const cbCmdBuf = pCmd->cbBuf;
2927 const uint8_t *pbCmdBuf;
2928 PGMPAGEMAPLOCK Lock;
2929 bool bReleaseLocked = false;
2930 if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR)
2931 {
2932 pbCmdBuf = VBOXVDMACBUF_DR_TAIL(pCmd, const uint8_t);
2933 rc = VINF_SUCCESS;
2934 AssertBreakStmt((uintptr_t)&pbCmdBuf[cbCmdBuf] <= (uintptr_t)&((uint8_t *)pCmd)[cbCmd],
2935 rc = VERR_INVALID_PARAMETER);
2936 }
2937 else if (pCmd->fFlags & VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET)
2938 {
2939 uint64_t offVRam = pCmd->Location.offVramBuf;
2940 pbCmdBuf = (uint8_t const *)pVdma->pVGAState->vram_ptrR3 + offVRam;
2941 rc = VINF_SUCCESS;
2942 AssertBreakStmt( offVRam <= pVdma->pVGAState->vram_size
2943 && offVRam + cbCmdBuf <= pVdma->pVGAState->vram_size,
2944 rc = VERR_INVALID_PARAMETER);
2945 }
2946 else
2947 {
2948 /* Make sure it doesn't cross a page. */
2949 RTGCPHYS GCPhysBuf = pCmd->Location.phBuf;
2950 AssertBreakStmt((uint32_t)(GCPhysBuf & X86_PAGE_OFFSET_MASK) + cbCmdBuf <= (uint32_t)X86_PAGE_SIZE,
2951 rc = VERR_INVALID_PARAMETER);
2952
2953 rc = PDMDevHlpPhysGCPhys2CCPtrReadOnly(pVdma->pVGAState->pDevInsR3, GCPhysBuf, 0 /*fFlags*/,
2954 (const void **)&pbCmdBuf, &Lock);
2955 AssertRCBreak(rc); /* if (rc == VERR_PGM_PHYS_PAGE_RESERVED) -> fall back on using PGMPhysRead ?? */
2956 bReleaseLocked = true;
2957 }
2958
2959 /*
2960 * Process the command.
2961 */
2962 rc = vboxVDMACmdExec(pVdma, pbCmdBuf, cbCmdBuf);
2963 AssertRC(rc);
2964
2965 /* Clean up comand buffer. */
2966 if (bReleaseLocked)
2967 PDMDevHlpPhysReleasePageMappingLock(pVdma->pVGAState->pDevInsR3, &Lock);
2968
2969 } while (0);
2970
2971 /*
2972 * Complete the command.
2973 */
2974 pCmd->rc = rc;
2975 rc = VBoxSHGSMICommandComplete(pHgsmi, pCmd);
2976 AssertRC(rc);
2977}
2978
2979# if 0 /** @todo vboxVDMAControlProcess is unused */
2980static void vboxVDMAControlProcess(PVBOXVDMAHOST pVdma, PVBOXVDMA_CTL pCmd)
2981{
2982 PHGSMIINSTANCE pHgsmi = pVdma->pHgsmi;
2983 pCmd->i32Result = VINF_SUCCESS;
2984 int rc = VBoxSHGSMICommandComplete (pHgsmi, pCmd);
2985 AssertRC(rc);
2986}
2987# endif
2988
2989#endif /* VBOX_WITH_CRHGSMI */
2990#ifdef VBOX_VDMA_WITH_WATCHDOG
2991
2992/**
2993 * @callback_method_impl{TMTIMER, VDMA watchdog timer.}
2994 */
2995static DECLCALLBACK(void) vboxVDMAWatchDogTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2996{
2997 VBOXVDMAHOST *pVdma = (VBOXVDMAHOST *)pvUser;
2998 PVGASTATE pVGAState = pVdma->pVGAState;
2999 VBVARaiseIrq(pVGAState, HGSMIHOSTFLAGS_WATCHDOG);
3000}
3001
3002/**
3003 * Handles VBOXVDMA_CTL_TYPE_WATCHDOG for vboxVDMAControl.
3004 */
3005static int vboxVDMAWatchDogCtl(struct VBOXVDMAHOST *pVdma, uint32_t cMillis)
3006{
3007 PPDMDEVINS pDevIns = pVdma->pVGAState->pDevInsR3;
3008 if (cMillis)
3009 TMTimerSetMillies(pVdma->WatchDogTimer, cMillis);
3010 else
3011 TMTimerStop(pVdma->WatchDogTimer);
3012 return VINF_SUCCESS;
3013}
3014
3015#endif /* VBOX_VDMA_WITH_WATCHDOG */
3016
3017/**
3018 * Called by vgaR3Construct() to initialize the state.
3019 *
3020 * @returns VBox status code.
3021 */
3022int vboxVDMAConstruct(PVGASTATE pVGAState, uint32_t cPipeElements)
3023{
3024 RT_NOREF(cPipeElements);
3025 int rc;
3026 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)RTMemAllocZ(sizeof(*pVdma));
3027 Assert(pVdma);
3028 if (pVdma)
3029 {
3030 pVdma->pHgsmi = pVGAState->pHGSMI;
3031 pVdma->pVGAState = pVGAState;
3032
3033#ifdef VBOX_VDMA_WITH_WATCHDOG
3034 rc = PDMDevHlpTMTimerCreate(pVGAState->pDevInsR3, TMCLOCK_REAL, vboxVDMAWatchDogTimer,
3035 pVdma, TMTIMER_FLAGS_NO_CRIT_SECT,
3036 "VDMA WatchDog Timer", &pVdma->WatchDogTimer);
3037 AssertRC(rc);
3038#else
3039 rc = VINF_SUCCESS;
3040#endif
3041 if (RT_SUCCESS(rc))
3042 {
3043#ifdef VBOX_WITH_CRHGSMI
3044 VBoxVDMAThreadInit(&pVdma->Thread);
3045
3046 rc = RTSemEventMultiCreate(&pVdma->HostCrCtlCompleteEvent);
3047 if (RT_SUCCESS(rc))
3048 {
3049 rc = VBoxVBVAExHSInit(&pVdma->CmdVbva);
3050 if (RT_SUCCESS(rc))
3051 {
3052 rc = RTCritSectInit(&pVdma->CalloutCritSect);
3053 if (RT_SUCCESS(rc))
3054 {
3055#endif
3056 pVGAState->pVdma = pVdma;
3057
3058 /* No HGCM service if VMSVGA is enabled. */
3059 if (!pVGAState->fVMSVGAEnabled)
3060 {
3061 int rcIgnored = vboxVDMACrCtlHgsmiSetup(pVdma); NOREF(rcIgnored); /** @todo is this ignoring intentional? */
3062 }
3063 return VINF_SUCCESS;
3064
3065#ifdef VBOX_WITH_CRHGSMI
3066 }
3067
3068 WARN(("RTCritSectInit failed %Rrc\n", rc));
3069 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
3070 }
3071 else
3072 WARN(("VBoxVBVAExHSInit failed %Rrc\n", rc));
3073 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
3074 }
3075 else
3076 WARN(("RTSemEventMultiCreate failed %Rrc\n", rc));
3077#endif
3078 /* the timer is cleaned up automatically */
3079 }
3080 RTMemFree(pVdma);
3081 }
3082 else
3083 rc = VERR_OUT_OF_RESOURCES;
3084 return rc;
3085}
3086
3087/**
3088 * Called by vgaR3Reset() to do reset.
3089 */
3090void vboxVDMAReset(struct VBOXVDMAHOST *pVdma)
3091{
3092#ifdef VBOX_WITH_CRHGSMI
3093 vdmaVBVACtlDisableSync(pVdma);
3094#else
3095 RT_NOREF(pVdma);
3096#endif
3097}
3098
3099/**
3100 * Called by vgaR3Destruct() to do cleanup.
3101 */
3102void vboxVDMADestruct(struct VBOXVDMAHOST *pVdma)
3103{
3104 if (!pVdma)
3105 return;
3106#ifdef VBOX_WITH_CRHGSMI
3107 if (pVdma->pVGAState->fVMSVGAEnabled)
3108 VBoxVBVAExHSDisable(&pVdma->CmdVbva);
3109 else
3110 {
3111 /** @todo Remove. It does nothing because pVdma->CmdVbva is already disabled at this point
3112 * as the result of the SharedOpenGL HGCM service unloading.
3113 */
3114 vdmaVBVACtlDisableSync(pVdma);
3115 }
3116 VBoxVDMAThreadCleanup(&pVdma->Thread);
3117 VBoxVBVAExHSTerm(&pVdma->CmdVbva);
3118 RTSemEventMultiDestroy(pVdma->HostCrCtlCompleteEvent);
3119 RTCritSectDelete(&pVdma->CalloutCritSect);
3120#endif
3121 RTMemFree(pVdma);
3122}
3123
3124/**
3125 * Handle VBVA_VDMA_CTL, see vbvaChannelHandler
3126 *
3127 * @param pVdma The VDMA channel.
3128 * @param pCmd The control command to handle. Considered volatile.
3129 * @param cbCmd The size of the command. At least sizeof(VBOXVDMA_CTL).
3130 */
3131void vboxVDMAControl(struct VBOXVDMAHOST *pVdma, PVBOXVDMA_CTL pCmd, uint32_t cbCmd)
3132{
3133 RT_NOREF(cbCmd);
3134 PHGSMIINSTANCE pIns = pVdma->pHgsmi;
3135
3136 VBOXVDMA_CTL_TYPE enmCtl = pCmd->enmCtl;
3137 switch (enmCtl)
3138 {
3139 case VBOXVDMA_CTL_TYPE_ENABLE:
3140 pCmd->i32Result = VINF_SUCCESS;
3141 break;
3142 case VBOXVDMA_CTL_TYPE_DISABLE:
3143 pCmd->i32Result = VINF_SUCCESS;
3144 break;
3145 case VBOXVDMA_CTL_TYPE_FLUSH:
3146 pCmd->i32Result = VINF_SUCCESS;
3147 break;
3148#ifdef VBOX_VDMA_WITH_WATCHDOG
3149 case VBOXVDMA_CTL_TYPE_WATCHDOG:
3150 pCmd->i32Result = vboxVDMAWatchDogCtl(pVdma, pCmd->u32Offset);
3151 break;
3152#endif
3153 default:
3154 WARN(("cmd not supported"));
3155 pCmd->i32Result = VERR_NOT_SUPPORTED;
3156 break;
3157 }
3158
3159 int rc = VBoxSHGSMICommandComplete(pIns, pCmd);
3160 AssertRC(rc);
3161}
3162
3163/**
3164 * Handle VBVA_VDMA_CMD, see vbvaChannelHandler().
3165 *
3166 * @param pVdma The VDMA channel.
3167 * @param pCmd The command to handle. Considered volatile.
3168 * @param cbCmd The size of the command. At least sizeof(VBOXVDMACBUF_DR).
3169 */
3170void vboxVDMACommand(struct VBOXVDMAHOST *pVdma, PVBOXVDMACBUF_DR pCmd, uint32_t cbCmd)
3171{
3172#ifdef VBOX_WITH_CRHGSMI
3173 /* chromium commands are processed by crhomium hgcm thread independently from our internal cmd processing pipeline
3174 * this is why we process them specially */
3175 int rc = vboxVDMACmdCheckCrCmd(pVdma, pCmd, cbCmd);
3176 if (rc == VINF_SUCCESS)
3177 return;
3178
3179 if (RT_FAILURE(rc))
3180 {
3181 pCmd->rc = rc;
3182 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmd);
3183 AssertRC(rc);
3184 return;
3185 }
3186
3187 vboxVDMACommandProcess(pVdma, pCmd, cbCmd);
3188
3189#else
3190 RT_NOREF(cbCmd);
3191 pCmd->rc = VERR_NOT_IMPLEMENTED;
3192 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCmd);
3193 AssertRC(rc);
3194#endif
3195}
3196
3197#ifdef VBOX_WITH_CRHGSMI
3198
3199/**
3200 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3201 * Used by vdmaVBVACtlEnableDisableSubmit() and vdmaVBVACtlEnableDisableSubmit() }
3202 */
3203static DECLCALLBACK(void) vboxCmdVBVACmdCtlGuestCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3204 int rc, void *pvContext)
3205{
3206 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvContext;
3207 VBOXCMDVBVA_CTL *pGCtl = (VBOXCMDVBVA_CTL*)(pCtl->u.cmd.pu8Cmd - sizeof (VBOXCMDVBVA_CTL));
3208 AssertRC(rc);
3209 pGCtl->i32Result = rc;
3210
3211 Assert(pVdma->pVGAState->fGuestCaps & VBVACAPS_COMPLETEGCMD_BY_IOREAD);
3212 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pGCtl);
3213 AssertRC(rc);
3214
3215 VBoxVBVAExHCtlFree(pVbva, pCtl);
3216}
3217
3218/**
3219 * Worker for vdmaVBVACtlGenericGuestSubmit() and vdmaVBVACtlOpaqueHostSubmit().
3220 */
3221static int vdmaVBVACtlGenericSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_SOURCE enmSource, VBVAEXHOSTCTL_TYPE enmType,
3222 uint8_t* pu8Cmd, uint32_t cbCmd, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3223{
3224 int rc;
3225 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, enmType);
3226 if (pHCtl)
3227 {
3228 pHCtl->u.cmd.pu8Cmd = pu8Cmd;
3229 pHCtl->u.cmd.cbCmd = cbCmd;
3230 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, enmSource, pfnComplete, pvComplete);
3231 if (RT_SUCCESS(rc))
3232 return VINF_SUCCESS;
3233
3234 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3235 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3236 }
3237 else
3238 {
3239 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3240 rc = VERR_NO_MEMORY;
3241 }
3242 return rc;
3243}
3244
3245/**
3246 * Handler for vboxCmdVBVACmdCtl()/VBOXCMDVBVACTL_TYPE_3DCTL.
3247 */
3248static int vdmaVBVACtlGenericGuestSubmit(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL_TYPE enmType, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3249{
3250 Assert(cbCtl >= sizeof(VBOXCMDVBVA_CTL)); /* Checked by callers caller, vbvaChannelHandler(). */
3251
3252 VBoxSHGSMICommandMarkAsynchCompletion(pCtl);
3253 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_GUEST, enmType, (uint8_t *)(pCtl + 1),
3254 cbCtl - sizeof(VBOXCMDVBVA_CTL), vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3255 if (RT_SUCCESS(rc))
3256 return VINF_SUCCESS;
3257
3258 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3259 pCtl->i32Result = rc;
3260 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3261 AssertRC(rc);
3262 return VINF_SUCCESS;
3263}
3264
3265/**
3266 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE, Used by vdmaVBVACtlOpaqueHostSubmit()}
3267 */
3268static DECLCALLBACK(void) vboxCmdVBVACmdCtlHostCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3269 int rc, void *pvCompletion)
3270{
3271 VBOXCRCMDCTL* pVboxCtl = (VBOXCRCMDCTL*)pCtl->u.cmd.pu8Cmd;
3272 if (pVboxCtl->u.pfnInternal)
3273 ((PFNCRCTLCOMPLETION)pVboxCtl->u.pfnInternal)(pVboxCtl, pCtl->u.cmd.cbCmd, rc, pvCompletion);
3274 VBoxVBVAExHCtlFree(pVbva, pCtl);
3275}
3276
3277/**
3278 * Worker for vboxCmdVBVACmdHostCtl() and vboxCmdVBVACmdHostCtlSync().
3279 */
3280static int vdmaVBVACtlOpaqueHostSubmit(PVBOXVDMAHOST pVdma, struct VBOXCRCMDCTL* pCmd, uint32_t cbCmd,
3281 PFNCRCTLCOMPLETION pfnCompletion, void *pvCompletion)
3282{
3283 pCmd->u.pfnInternal = (PFNRT)pfnCompletion;
3284 int rc = vdmaVBVACtlGenericSubmit(pVdma, VBVAEXHOSTCTL_SOURCE_HOST, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE,
3285 (uint8_t *)pCmd, cbCmd, vboxCmdVBVACmdCtlHostCompletion, pvCompletion);
3286 if (RT_FAILURE(rc))
3287 {
3288 if (rc == VERR_INVALID_STATE)
3289 {
3290 pCmd->u.pfnInternal = NULL;
3291 PVGASTATE pVGAState = pVdma->pVGAState;
3292 rc = pVGAState->pDrv->pfnCrHgcmCtlSubmit(pVGAState->pDrv, pCmd, cbCmd, pfnCompletion, pvCompletion);
3293 if (!RT_SUCCESS(rc))
3294 WARN(("pfnCrHgsmiControlProcess failed %Rrc\n", rc));
3295
3296 return rc;
3297 }
3298 WARN(("vdmaVBVACtlGenericSubmit failed %Rrc\n", rc));
3299 return rc;
3300 }
3301
3302 return VINF_SUCCESS;
3303}
3304
3305/**
3306 * Called from vdmaVBVACtlThreadCreatedEnable().
3307 */
3308static int vdmaVBVANotifyEnable(PVGASTATE pVGAState)
3309{
3310 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3311 {
3312 int rc = pVGAState->pDrv->pfnVBVAEnable (pVGAState->pDrv, i, NULL, true);
3313 if (!RT_SUCCESS(rc))
3314 {
3315 WARN(("pfnVBVAEnable failed %Rrc\n", rc));
3316 for (uint32_t j = 0; j < i; j++)
3317 {
3318 pVGAState->pDrv->pfnVBVADisable (pVGAState->pDrv, j);
3319 }
3320
3321 return rc;
3322 }
3323 }
3324 return VINF_SUCCESS;
3325}
3326
3327/**
3328 * Called from vdmaVBVACtlThreadCreatedEnable() and vdmaVBVADisableProcess().
3329 */
3330static int vdmaVBVANotifyDisable(PVGASTATE pVGAState)
3331{
3332 for (uint32_t i = 0; i < pVGAState->cMonitors; i++)
3333 pVGAState->pDrv->pfnVBVADisable(pVGAState->pDrv, i);
3334 return VINF_SUCCESS;
3335}
3336
3337/**
3338 * Hook that is called by vboxVDMAWorkerThread when it starts.
3339 *
3340 * @thread VDMA
3341 */
3342static DECLCALLBACK(void) vdmaVBVACtlThreadCreatedEnable(struct VBOXVDMATHREAD *pThread, int rc,
3343 void *pvThreadContext, void *pvContext)
3344{
3345 RT_NOREF(pThread);
3346 PVBOXVDMAHOST pVdma = (PVBOXVDMAHOST)pvThreadContext;
3347 VBVAEXHOSTCTL* pHCtl = (VBVAEXHOSTCTL*)pvContext;
3348
3349 if (RT_SUCCESS(rc))
3350 {
3351 rc = vboxVDMACrGuestCtlProcess(pVdma, pHCtl);
3352 /* rc == VINF_SUCCESS would mean the actual state change has occcured */
3353 if (rc == VINF_SUCCESS)
3354 {
3355 /* we need to inform Main about VBVA enable/disable
3356 * main expects notifications to be done from the main thread
3357 * submit it there */
3358 PVGASTATE pVGAState = pVdma->pVGAState;
3359
3360 if (VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3361 vdmaVBVANotifyEnable(pVGAState);
3362 else
3363 vdmaVBVANotifyDisable(pVGAState);
3364 }
3365 else if (RT_FAILURE(rc))
3366 WARN(("vboxVDMACrGuestCtlProcess failed %Rrc\n", rc));
3367 }
3368 else
3369 WARN(("vdmaVBVACtlThreadCreatedEnable is passed %Rrc\n", rc));
3370
3371 VBoxVBVAExHPDataCompleteCtl(&pVdma->CmdVbva, pHCtl, rc);
3372}
3373
3374/**
3375 * Worker for vdmaVBVACtlEnableDisableSubmitInternal() and vdmaVBVACtlEnableSubmitSync().
3376 */
3377static int vdmaVBVACtlEnableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable, bool fPaused, PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3378{
3379 int rc;
3380 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva,
3381 fPaused ? VBVAEXHOSTCTL_TYPE_GHH_ENABLE_PAUSED : VBVAEXHOSTCTL_TYPE_GHH_ENABLE);
3382 if (pHCtl)
3383 {
3384 pHCtl->u.cmd.pu8Cmd = (uint8_t *)pEnable;
3385 pHCtl->u.cmd.cbCmd = sizeof(*pEnable);
3386 pHCtl->pfnComplete = pfnComplete;
3387 pHCtl->pvComplete = pvComplete;
3388
3389 rc = VBoxVDMAThreadCreate(&pVdma->Thread, vboxVDMAWorkerThread, pVdma, vdmaVBVACtlThreadCreatedEnable, pHCtl);
3390 if (RT_SUCCESS(rc))
3391 return VINF_SUCCESS;
3392 WARN(("VBoxVDMAThreadCreate failed %d\n", rc));
3393
3394 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3395 }
3396 else
3397 {
3398 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3399 rc = VERR_NO_MEMORY;
3400 }
3401
3402 return rc;
3403}
3404
3405/**
3406 * Worker for vboxVDMASaveLoadExecPerform().
3407 */
3408static int vdmaVBVACtlEnableSubmitSync(PVBOXVDMAHOST pVdma, uint32_t offVram, bool fPaused)
3409{
3410 VBVAENABLE Enable = {0};
3411 Enable.u32Flags = VBVA_F_ENABLE;
3412 Enable.u32Offset = offVram;
3413
3414 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3415 Data.rc = VERR_NOT_IMPLEMENTED;
3416 int rc = RTSemEventCreate(&Data.hEvent);
3417 if (!RT_SUCCESS(rc))
3418 {
3419 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3420 return rc;
3421 }
3422
3423 rc = vdmaVBVACtlEnableSubmitInternal(pVdma, &Enable, fPaused, vdmaVBVACtlSubmitSyncCompletion, &Data);
3424 if (RT_SUCCESS(rc))
3425 {
3426 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3427 if (RT_SUCCESS(rc))
3428 {
3429 rc = Data.rc;
3430 if (!RT_SUCCESS(rc))
3431 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3432 }
3433 else
3434 WARN(("RTSemEventWait failed %Rrc\n", rc));
3435 }
3436 else
3437 WARN(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3438
3439 RTSemEventDestroy(Data.hEvent);
3440
3441 return rc;
3442}
3443
3444/**
3445 * Worker for vdmaVBVACtlEnableDisableSubmitInternal().
3446 */
3447static int vdmaVBVACtlDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable,
3448 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3449{
3450 int rc;
3451 VBVAEXHOSTCTL* pHCtl;
3452 if (VBoxVBVAExHSIsDisabled(&pVdma->CmdVbva))
3453 {
3454 WARN(("VBoxVBVAExHSIsDisabled: disabled"));
3455 return VINF_SUCCESS;
3456 }
3457
3458 pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_GHH_DISABLE);
3459 if (!pHCtl)
3460 {
3461 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3462 return VERR_NO_MEMORY;
3463 }
3464
3465 pHCtl->u.cmd.pu8Cmd = (uint8_t*)pEnable;
3466 pHCtl->u.cmd.cbCmd = sizeof (*pEnable);
3467 rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_GUEST, pfnComplete, pvComplete);
3468 if (RT_SUCCESS(rc))
3469 return VINF_SUCCESS;
3470
3471 WARN(("vdmaVBVACtlSubmit failed rc %Rrc\n", rc));
3472 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
3473 return rc;
3474}
3475
3476/**
3477 * Worker for vdmaVBVACtlEnableDisableSubmit().
3478 */
3479static int vdmaVBVACtlEnableDisableSubmitInternal(PVBOXVDMAHOST pVdma, VBVAENABLE *pEnable,
3480 PFNVBVAEXHOSTCTL_COMPLETE pfnComplete, void *pvComplete)
3481{
3482 bool fEnable = (pEnable->u32Flags & (VBVA_F_ENABLE | VBVA_F_DISABLE)) == VBVA_F_ENABLE;
3483 if (fEnable)
3484 return vdmaVBVACtlEnableSubmitInternal(pVdma, pEnable, false, pfnComplete, pvComplete);
3485 return vdmaVBVACtlDisableSubmitInternal(pVdma, pEnable, pfnComplete, pvComplete);
3486}
3487
3488/**
3489 * Handler for vboxCmdVBVACmdCtl/VBOXCMDVBVACTL_TYPE_ENABLE.
3490 */
3491static int vdmaVBVACtlEnableDisableSubmit(PVBOXVDMAHOST pVdma, VBOXCMDVBVA_CTL_ENABLE *pEnable)
3492{
3493 VBoxSHGSMICommandMarkAsynchCompletion(&pEnable->Hdr);
3494 int rc = vdmaVBVACtlEnableDisableSubmitInternal(pVdma, &pEnable->Enable, vboxCmdVBVACmdCtlGuestCompletion, pVdma);
3495 if (RT_SUCCESS(rc))
3496 return VINF_SUCCESS;
3497
3498 WARN(("vdmaVBVACtlEnableDisableSubmitInternal failed %Rrc\n", rc));
3499 pEnable->Hdr.i32Result = rc;
3500 rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, &pEnable->Hdr);
3501 AssertRC(rc);
3502 return VINF_SUCCESS;
3503}
3504
3505/**
3506 * @callback_method_impl{FNVBVAEXHOSTCTL_COMPLETE,
3507 * Used by vdmaVBVACtlSubmitSync() and vdmaVBVACtlEnableSubmitSync().}
3508 */
3509static DECLCALLBACK(void) vdmaVBVACtlSubmitSyncCompletion(VBVAEXHOSTCONTEXT *pVbva, struct VBVAEXHOSTCTL *pCtl,
3510 int rc, void *pvContext)
3511{
3512 RT_NOREF(pVbva, pCtl);
3513 VDMA_VBVA_CTL_CYNC_COMPLETION *pData = (VDMA_VBVA_CTL_CYNC_COMPLETION *)pvContext;
3514 pData->rc = rc;
3515 rc = RTSemEventSignal(pData->hEvent);
3516 if (!RT_SUCCESS(rc))
3517 WARN(("RTSemEventSignal failed %Rrc\n", rc));
3518}
3519
3520
3521static int vdmaVBVACtlSubmitSync(PVBOXVDMAHOST pVdma, VBVAEXHOSTCTL* pCtl, VBVAEXHOSTCTL_SOURCE enmSource)
3522{
3523 VDMA_VBVA_CTL_CYNC_COMPLETION Data;
3524 Data.rc = VERR_NOT_IMPLEMENTED;
3525 int rc = RTSemEventCreate(&Data.hEvent);
3526 if (!RT_SUCCESS(rc))
3527 {
3528 WARN(("RTSemEventCreate failed %Rrc\n", rc));
3529 return rc;
3530 }
3531
3532 rc = vdmaVBVACtlSubmit(pVdma, pCtl, enmSource, vdmaVBVACtlSubmitSyncCompletion, &Data);
3533 if (RT_SUCCESS(rc))
3534 {
3535 rc = RTSemEventWait(Data.hEvent, RT_INDEFINITE_WAIT);
3536 if (RT_SUCCESS(rc))
3537 {
3538 rc = Data.rc;
3539 if (!RT_SUCCESS(rc))
3540 WARN(("vdmaVBVACtlSubmitSyncCompletion returned %Rrc\n", rc));
3541 }
3542 else
3543 WARN(("RTSemEventWait failed %Rrc\n", rc));
3544 }
3545 else
3546 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
3547
3548 RTSemEventDestroy(Data.hEvent);
3549
3550 return rc;
3551}
3552
3553/**
3554 * Worker for vboxVDMASaveStateExecPrep().
3555 */
3556static int vdmaVBVAPause(PVBOXVDMAHOST pVdma)
3557{
3558 VBVAEXHOSTCTL Ctl;
3559 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_PAUSE;
3560 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3561}
3562
3563/**
3564 * Worker for vboxVDMASaveLoadExecPerform() and vboxVDMASaveStateExecDone().
3565 */
3566static int vdmaVBVAResume(PVBOXVDMAHOST pVdma)
3567{
3568 VBVAEXHOSTCTL Ctl;
3569 Ctl.enmType = VBVAEXHOSTCTL_TYPE_HH_INTERNAL_RESUME;
3570 return vdmaVBVACtlSubmitSync(pVdma, &Ctl, VBVAEXHOSTCTL_SOURCE_HOST);
3571}
3572
3573/**
3574 * Worker for vboxCmdVBVACmdSubmit(), vboxCmdVBVACmdFlush() and vboxCmdVBVATimerRefresh().
3575 */
3576static int vboxVDMACmdSubmitPerform(struct VBOXVDMAHOST *pVdma)
3577{
3578 int rc = VBoxVBVAExHSCheckCommands(&pVdma->CmdVbva);
3579 switch (rc)
3580 {
3581 case VINF_SUCCESS:
3582 return VBoxVDMAThreadEventNotify(&pVdma->Thread);
3583 case VINF_ALREADY_INITIALIZED:
3584 case VINF_EOF:
3585 case VERR_INVALID_STATE:
3586 return VINF_SUCCESS;
3587 default:
3588 Assert(!RT_FAILURE(rc));
3589 return RT_FAILURE(rc) ? rc : VERR_INTERNAL_ERROR;
3590 }
3591}
3592
3593
3594/**
3595 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmit}
3596 */
3597int vboxCmdVBVACmdHostCtl(PPDMIDISPLAYVBVACALLBACKS pInterface,
3598 struct VBOXCRCMDCTL *pCmd,
3599 uint32_t cbCmd,
3600 PFNCRCTLCOMPLETION pfnCompletion,
3601 void *pvCompletion)
3602{
3603 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3604 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3605 if (pVdma == NULL)
3606 return VERR_INVALID_STATE;
3607 pCmd->CalloutList.List.pNext = NULL;
3608 return vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, pfnCompletion, pvCompletion);
3609}
3610
3611/**
3612 * Argument package from vboxCmdVBVACmdHostCtlSync to vboxCmdVBVACmdHostCtlSyncCb.
3613 */
3614typedef struct VBOXCMDVBVA_CMDHOSTCTL_SYNC
3615{
3616 struct VBOXVDMAHOST *pVdma;
3617 uint32_t fProcessing;
3618 int rc;
3619} VBOXCMDVBVA_CMDHOSTCTL_SYNC;
3620
3621/**
3622 * @interface_method_impl{FNCRCTLCOMPLETION, Used by vboxCmdVBVACmdHostCtlSync.}
3623 */
3624static DECLCALLBACK(void) vboxCmdVBVACmdHostCtlSyncCb(struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd, int rc, void *pvCompletion)
3625{
3626 RT_NOREF(pCmd, cbCmd);
3627 VBOXCMDVBVA_CMDHOSTCTL_SYNC *pData = (VBOXCMDVBVA_CMDHOSTCTL_SYNC *)pvCompletion;
3628
3629 pData->rc = rc;
3630
3631 struct VBOXVDMAHOST *pVdma = pData->pVdma;
3632
3633 ASMAtomicIncS32(&pVdma->i32cHostCrCtlCompleted);
3634
3635 pData->fProcessing = 0;
3636
3637 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3638}
3639
3640/**
3641 * @callback_method_impl{FNVBOXCRCLIENT_CALLOUT, Worker for vboxVDMACrCtlHgsmiSetup }
3642 *
3643 * @note r=bird: not to be confused with the callout function below. sigh.
3644 */
3645static DECLCALLBACK(int) vboxCmdVBVACmdCallout(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd,
3646 VBOXCRCMDCTL_CALLOUT_LISTENTRY *pEntry, PFNVBOXCRCMDCTL_CALLOUT_CB pfnCb)
3647{
3648 pEntry->pfnCb = pfnCb;
3649 int rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3650 if (RT_SUCCESS(rc))
3651 {
3652 RTListAppend(&pCmd->CalloutList.List, &pEntry->Node);
3653 RTCritSectLeave(&pVdma->CalloutCritSect);
3654
3655 RTSemEventMultiSignal(pVdma->HostCrCtlCompleteEvent);
3656 }
3657 else
3658 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3659
3660 return rc;
3661}
3662
3663
3664/**
3665 * Worker for vboxCmdVBVACmdHostCtlSync.
3666 */
3667static int vboxCmdVBVACmdCalloutProcess(struct VBOXVDMAHOST *pVdma, struct VBOXCRCMDCTL* pCmd)
3668{
3669 int rc = VINF_SUCCESS;
3670 for (;;)
3671 {
3672 rc = RTCritSectEnter(&pVdma->CalloutCritSect);
3673 if (RT_SUCCESS(rc))
3674 {
3675 VBOXCRCMDCTL_CALLOUT_LISTENTRY* pEntry = RTListGetFirst(&pCmd->CalloutList.List, VBOXCRCMDCTL_CALLOUT_LISTENTRY, Node);
3676 if (pEntry)
3677 RTListNodeRemove(&pEntry->Node);
3678 RTCritSectLeave(&pVdma->CalloutCritSect);
3679
3680 if (!pEntry)
3681 break;
3682
3683 pEntry->pfnCb(pEntry);
3684 }
3685 else
3686 {
3687 WARN(("RTCritSectEnter failed %Rrc\n", rc));
3688 break;
3689 }
3690 }
3691
3692 return rc;
3693}
3694
3695/**
3696 * @interface_method_impl{PDMIDISPLAYVBVACALLBACKS,pfnCrCtlSubmitSync}
3697 */
3698DECLCALLBACK(int) vboxCmdVBVACmdHostCtlSync(PPDMIDISPLAYVBVACALLBACKS pInterface, struct VBOXCRCMDCTL *pCmd, uint32_t cbCmd)
3699{
3700 PVGASTATE pVGAState = PPDMIDISPLAYVBVACALLBACKS_2_PVGASTATE(pInterface);
3701 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3702 if (pVdma == NULL)
3703 return VERR_INVALID_STATE;
3704
3705 VBOXCMDVBVA_CMDHOSTCTL_SYNC Data;
3706 Data.pVdma = pVdma;
3707 Data.fProcessing = 1;
3708 Data.rc = VERR_INTERNAL_ERROR;
3709 RTListInit(&pCmd->CalloutList.List);
3710 int rc = vdmaVBVACtlOpaqueHostSubmit(pVdma, pCmd, cbCmd, vboxCmdVBVACmdHostCtlSyncCb, &Data);
3711 if (!RT_SUCCESS(rc))
3712 {
3713 WARN(("vdmaVBVACtlOpaqueHostSubmit failed %Rrc", rc));
3714 return rc;
3715 }
3716
3717 while (Data.fProcessing)
3718 {
3719 /* Poll infrequently to make sure no completed message has been missed. */
3720 RTSemEventMultiWait(pVdma->HostCrCtlCompleteEvent, 500);
3721
3722 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3723
3724 if (Data.fProcessing)
3725 RTThreadYield();
3726 }
3727
3728 /* extra check callouts */
3729 vboxCmdVBVACmdCalloutProcess(pVdma, pCmd);
3730
3731 /* 'Our' message has been processed, so should reset the semaphore.
3732 * There is still possible that another message has been processed
3733 * and the semaphore has been signalled again.
3734 * Reset only if there are no other messages completed.
3735 */
3736 int32_t c = ASMAtomicDecS32(&pVdma->i32cHostCrCtlCompleted);
3737 Assert(c >= 0);
3738 if (!c)
3739 RTSemEventMultiReset(pVdma->HostCrCtlCompleteEvent);
3740
3741 rc = Data.rc;
3742 if (!RT_SUCCESS(rc))
3743 WARN(("host call failed %Rrc", rc));
3744
3745 return rc;
3746}
3747
3748/**
3749 * Handler for VBVA_CMDVBVA_CTL, see vbvaChannelHandler().
3750 *
3751 * @returns VBox status code
3752 * @param pVGAState The VGA state.
3753 * @param pCtl The control command.
3754 * @param cbCtl The size of it. This is at least
3755 * sizeof(VBOXCMDVBVA_CTL).
3756 */
3757int vboxCmdVBVACmdCtl(PVGASTATE pVGAState, VBOXCMDVBVA_CTL *pCtl, uint32_t cbCtl)
3758{
3759 struct VBOXVDMAHOST *pVdma = pVGAState->pVdma;
3760 switch (pCtl->u32Type)
3761 {
3762 case VBOXCMDVBVACTL_TYPE_3DCTL:
3763 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_BE_OPAQUE, pCtl, cbCtl);
3764
3765 case VBOXCMDVBVACTL_TYPE_RESIZE:
3766 return vdmaVBVACtlGenericGuestSubmit(pVdma, VBVAEXHOSTCTL_TYPE_GHH_RESIZE, pCtl, cbCtl);
3767
3768 case VBOXCMDVBVACTL_TYPE_ENABLE:
3769 if (cbCtl == sizeof(VBOXCMDVBVA_CTL_ENABLE))
3770 return vdmaVBVACtlEnableDisableSubmit(pVdma, (VBOXCMDVBVA_CTL_ENABLE *)pCtl);
3771 WARN(("incorrect enable size\n"));
3772 break;
3773 default:
3774 WARN(("unsupported type\n"));
3775 break;
3776 }
3777
3778 pCtl->i32Result = VERR_INVALID_PARAMETER;
3779 int rc = VBoxSHGSMICommandComplete(pVdma->pHgsmi, pCtl);
3780 AssertRC(rc);
3781 return VINF_SUCCESS;
3782}
3783
3784/**
3785 * Handler for VBVA_CMDVBVA_SUBMIT, see vbvaChannelHandler().
3786 */
3787int vboxCmdVBVACmdSubmit(PVGASTATE pVGAState)
3788{
3789 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3790 {
3791 WARN(("vdma VBVA is disabled\n"));
3792 return VERR_INVALID_STATE;
3793 }
3794
3795 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3796}
3797
3798/**
3799 * Handler for VBVA_CMDVBVA_FLUSH, see vbvaChannelHandler().
3800 */
3801int vboxCmdVBVACmdFlush(PVGASTATE pVGAState)
3802{
3803 WARN(("flush\n"));
3804 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3805 {
3806 WARN(("vdma VBVA is disabled\n"));
3807 return VERR_INVALID_STATE;
3808 }
3809 return vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3810}
3811
3812/**
3813 * Called from vgaTimerRefresh().
3814 */
3815void vboxCmdVBVATimerRefresh(PVGASTATE pVGAState)
3816{
3817 if (!VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva))
3818 return;
3819 vboxVDMACmdSubmitPerform(pVGAState->pVdma);
3820}
3821
3822bool vboxCmdVBVAIsEnabled(PVGASTATE pVGAState)
3823{
3824 return VBoxVBVAExHSIsEnabled(&pVGAState->pVdma->CmdVbva);
3825}
3826
3827#endif /* VBOX_WITH_CRHGSMI */
3828
3829
3830/*
3831 *
3832 *
3833 * Saved state.
3834 * Saved state.
3835 * Saved state.
3836 *
3837 *
3838 */
3839
3840int vboxVDMASaveStateExecPrep(struct VBOXVDMAHOST *pVdma)
3841{
3842#ifdef VBOX_WITH_CRHGSMI
3843 int rc = vdmaVBVAPause(pVdma);
3844 if (RT_SUCCESS(rc))
3845 return VINF_SUCCESS;
3846
3847 if (rc != VERR_INVALID_STATE)
3848 {
3849 WARN(("vdmaVBVAPause failed %Rrc\n", rc));
3850 return rc;
3851 }
3852
3853# ifdef DEBUG_misha
3854 WARN(("debug prep"));
3855# endif
3856
3857 PVGASTATE pVGAState = pVdma->pVGAState;
3858 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3859 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN, sizeof(*pCmd));
3860 if (pCmd)
3861 {
3862 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3863 AssertRC(rc);
3864 if (RT_SUCCESS(rc))
3865 rc = vboxVDMACrCtlGetRc(pCmd);
3866 vboxVDMACrCtlRelease(pCmd);
3867 return rc;
3868 }
3869 return VERR_NO_MEMORY;
3870#else
3871 RT_NOREF(pVdma);
3872 return VINF_SUCCESS;
3873#endif
3874}
3875
3876int vboxVDMASaveStateExecDone(struct VBOXVDMAHOST *pVdma)
3877{
3878#ifdef VBOX_WITH_CRHGSMI
3879 int rc = vdmaVBVAResume(pVdma);
3880 if (RT_SUCCESS(rc))
3881 return VINF_SUCCESS;
3882
3883 if (rc != VERR_INVALID_STATE)
3884 {
3885 WARN(("vdmaVBVAResume failed %Rrc\n", rc));
3886 return rc;
3887 }
3888
3889# ifdef DEBUG_misha
3890 WARN(("debug done"));
3891# endif
3892
3893 PVGASTATE pVGAState = pVdma->pVGAState;
3894 PVBOXVDMACMD_CHROMIUM_CTL pCmd;
3895 pCmd = (PVBOXVDMACMD_CHROMIUM_CTL)vboxVDMACrCtlCreate(VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END, sizeof(*pCmd));
3896 Assert(pCmd);
3897 if (pCmd)
3898 {
3899 rc = vboxVDMACrCtlPost(pVGAState, pCmd, sizeof (*pCmd));
3900 AssertRC(rc);
3901 if (RT_SUCCESS(rc))
3902 rc = vboxVDMACrCtlGetRc(pCmd);
3903 vboxVDMACrCtlRelease(pCmd);
3904 return rc;
3905 }
3906 return VERR_NO_MEMORY;
3907#else
3908 RT_NOREF(pVdma);
3909 return VINF_SUCCESS;
3910#endif
3911}
3912
3913int vboxVDMASaveStateExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM)
3914{
3915 int rc;
3916#ifndef VBOX_WITH_CRHGSMI
3917 RT_NOREF(pVdma, pSSM);
3918
3919#else
3920 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3921#endif
3922 {
3923 rc = SSMR3PutU32(pSSM, UINT32_MAX);
3924 AssertRCReturn(rc, rc);
3925 return VINF_SUCCESS;
3926 }
3927
3928#ifdef VBOX_WITH_CRHGSMI
3929 PVGASTATE pVGAState = pVdma->pVGAState;
3930 uint8_t * pu8VramBase = pVGAState->vram_ptrR3;
3931
3932 rc = SSMR3PutU32(pSSM, (uint32_t)(((uint8_t*)pVdma->CmdVbva.pVBVA) - pu8VramBase));
3933 AssertRCReturn(rc, rc);
3934
3935 VBVAEXHOSTCTL HCtl;
3936 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_SAVESTATE;
3937 HCtl.u.state.pSSM = pSSM;
3938 HCtl.u.state.u32Version = 0;
3939 return vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3940#endif
3941}
3942
3943int vboxVDMASaveLoadExecPerform(struct VBOXVDMAHOST *pVdma, PSSMHANDLE pSSM, uint32_t u32Version)
3944{
3945 uint32_t u32;
3946 int rc = SSMR3GetU32(pSSM, &u32);
3947 AssertLogRelRCReturn(rc, rc);
3948
3949 if (u32 != UINT32_MAX)
3950 {
3951#ifdef VBOX_WITH_CRHGSMI
3952 rc = vdmaVBVACtlEnableSubmitSync(pVdma, u32, true);
3953 AssertLogRelRCReturn(rc, rc);
3954
3955 Assert(pVdma->CmdVbva.i32State == VBVAEXHOSTCONTEXT_ESTATE_PAUSED);
3956
3957 VBVAEXHOSTCTL HCtl;
3958 HCtl.enmType = VBVAEXHOSTCTL_TYPE_HH_LOADSTATE;
3959 HCtl.u.state.pSSM = pSSM;
3960 HCtl.u.state.u32Version = u32Version;
3961 rc = vdmaVBVACtlSubmitSync(pVdma, &HCtl, VBVAEXHOSTCTL_SOURCE_HOST);
3962 AssertLogRelRCReturn(rc, rc);
3963
3964 rc = vdmaVBVAResume(pVdma);
3965 AssertLogRelRCReturn(rc, rc);
3966
3967 return VINF_SUCCESS;
3968#else
3969 RT_NOREF(pVdma, u32Version);
3970 WARN(("Unsupported VBVACtl info!\n"));
3971 return VERR_VERSION_MISMATCH;
3972#endif
3973 }
3974
3975 return VINF_SUCCESS;
3976}
3977
3978int vboxVDMASaveLoadDone(struct VBOXVDMAHOST *pVdma)
3979{
3980#ifdef VBOX_WITH_CRHGSMI
3981 if (!VBoxVBVAExHSIsEnabled(&pVdma->CmdVbva))
3982 return VINF_SUCCESS;
3983
3984/** @todo r=bird: BTW. would be great if you put in a couple of comments here and there explaining what
3985 * the purpose of this code is. */
3986 VBVAEXHOSTCTL *pHCtl = VBoxVBVAExHCtlCreate(&pVdma->CmdVbva, VBVAEXHOSTCTL_TYPE_HH_LOADSTATE_DONE);
3987 if (!pHCtl)
3988 {
3989 WARN(("VBoxVBVAExHCtlCreate failed\n"));
3990 return VERR_NO_MEMORY;
3991 }
3992
3993 /* sanity */
3994 pHCtl->u.cmd.pu8Cmd = NULL;
3995 pHCtl->u.cmd.cbCmd = 0;
3996
3997 /* NULL completion will just free the ctl up */
3998 int rc = vdmaVBVACtlSubmit(pVdma, pHCtl, VBVAEXHOSTCTL_SOURCE_HOST, NULL, NULL);
3999 if (RT_FAILURE(rc))
4000 {
4001 Log(("vdmaVBVACtlSubmit failed %Rrc\n", rc));
4002 VBoxVBVAExHCtlFree(&pVdma->CmdVbva, pHCtl);
4003 return rc;
4004 }
4005#else
4006 RT_NOREF(pVdma);
4007#endif
4008 return VINF_SUCCESS;
4009}
4010
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette