VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 27651

最後變更 在這個檔案從27651是 27623,由 vboxsync 提交於 15 年 前

VMDK: Fix OVF export and import

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 225.0 KB
 
1/* $Id: VmdkHDDCore.cpp 27623 2010-03-23 12:06:34Z vboxsync $ */
2/** @file
3 * VMDK Disk image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VD_VMDK
26#include <VBox/VBoxHDD-Plugin.h>
27#include <VBox/err.h>
28
29#include <VBox/log.h>
30#include <iprt/assert.h>
31#include <iprt/alloc.h>
32#include <iprt/uuid.h>
33#include <iprt/file.h>
34#include <iprt/path.h>
35#include <iprt/string.h>
36#include <iprt/rand.h>
37#include <iprt/zip.h>
38
39
40/*******************************************************************************
41* Constants And Macros, Structures and Typedefs *
42*******************************************************************************/
43
44/** Maximum encoded string size (including NUL) we allow for VMDK images.
45 * Deliberately not set high to avoid running out of descriptor space. */
46#define VMDK_ENCODED_COMMENT_MAX 1024
47
48/** VMDK descriptor DDB entry for PCHS cylinders. */
49#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
50
51/** VMDK descriptor DDB entry for PCHS heads. */
52#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
53
54/** VMDK descriptor DDB entry for PCHS sectors. */
55#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
56
57/** VMDK descriptor DDB entry for LCHS cylinders. */
58#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
59
60/** VMDK descriptor DDB entry for LCHS heads. */
61#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
62
63/** VMDK descriptor DDB entry for LCHS sectors. */
64#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
65
66/** VMDK descriptor DDB entry for image UUID. */
67#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
68
69/** VMDK descriptor DDB entry for image modification UUID. */
70#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
71
72/** VMDK descriptor DDB entry for parent image UUID. */
73#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
74
75/** VMDK descriptor DDB entry for parent image modification UUID. */
76#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
77
78/** No compression for streamOptimized files. */
79#define VMDK_COMPRESSION_NONE 0
80
81/** Deflate compression for streamOptimized files. */
82#define VMDK_COMPRESSION_DEFLATE 1
83
84/** Marker that the actual GD value is stored in the footer. */
85#define VMDK_GD_AT_END 0xffffffffffffffffULL
86
87/** Marker for end-of-stream in streamOptimized images. */
88#define VMDK_MARKER_EOS 0
89
90/** Marker for grain table block in streamOptimized images. */
91#define VMDK_MARKER_GT 1
92
93/** Marker for grain directory block in streamOptimized images. */
94#define VMDK_MARKER_GD 2
95
96/** Marker for footer in streamOptimized images. */
97#define VMDK_MARKER_FOOTER 3
98
99/** Dummy marker for "don't check the marker value". */
100#define VMDK_MARKER_IGNORE 0xffffffffU
101
102/**
103 * Magic number for hosted images created by VMware Workstation 4, VMware
104 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
105 */
106#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
107
108/**
109 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
110 * this header is also used for monolithic flat images.
111 */
112#pragma pack(1)
113typedef struct SparseExtentHeader
114{
115 uint32_t magicNumber;
116 uint32_t version;
117 uint32_t flags;
118 uint64_t capacity;
119 uint64_t grainSize;
120 uint64_t descriptorOffset;
121 uint64_t descriptorSize;
122 uint32_t numGTEsPerGT;
123 uint64_t rgdOffset;
124 uint64_t gdOffset;
125 uint64_t overHead;
126 bool uncleanShutdown;
127 char singleEndLineChar;
128 char nonEndLineChar;
129 char doubleEndLineChar1;
130 char doubleEndLineChar2;
131 uint16_t compressAlgorithm;
132 uint8_t pad[433];
133} SparseExtentHeader;
134#pragma pack()
135
136/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
137 * divisible by the default grain size (64K) */
138#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
139
140/** VMDK streamOptimized file format marker. The type field may or may not
141 * be actually valid, but there's always data to read there. */
142#pragma pack(1)
143typedef struct VMDKMARKER
144{
145 uint64_t uSector;
146 uint32_t cbSize;
147 uint32_t uType;
148} VMDKMARKER;
149#pragma pack()
150
151
152#ifdef VBOX_WITH_VMDK_ESX
153
154/** @todo the ESX code is not tested, not used, and lacks error messages. */
155
156/**
157 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
158 */
159#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
160
161#pragma pack(1)
162typedef struct COWDisk_Header
163{
164 uint32_t magicNumber;
165 uint32_t version;
166 uint32_t flags;
167 uint32_t numSectors;
168 uint32_t grainSize;
169 uint32_t gdOffset;
170 uint32_t numGDEntries;
171 uint32_t freeSector;
172 /* The spec incompletely documents quite a few further fields, but states
173 * that they are unused by the current format. Replace them by padding. */
174 char reserved1[1604];
175 uint32_t savedGeneration;
176 char reserved2[8];
177 uint32_t uncleanShutdown;
178 char padding[396];
179} COWDisk_Header;
180#pragma pack()
181#endif /* VBOX_WITH_VMDK_ESX */
182
183
184/** Convert sector number/size to byte offset/size. */
185#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
186
187/** Convert byte offset/size to sector number/size. */
188#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
189
190/**
191 * VMDK extent type.
192 */
193typedef enum VMDKETYPE
194{
195 /** Hosted sparse extent. */
196 VMDKETYPE_HOSTED_SPARSE = 1,
197 /** Flat extent. */
198 VMDKETYPE_FLAT,
199 /** Zero extent. */
200 VMDKETYPE_ZERO,
201 /** VMFS extent, used by ESX. */
202 VMDKETYPE_VMFS
203#ifdef VBOX_WITH_VMDK_ESX
204 ,
205 /** ESX sparse extent. */
206 VMDKETYPE_ESX_SPARSE
207#endif /* VBOX_WITH_VMDK_ESX */
208} VMDKETYPE, *PVMDKETYPE;
209
210/**
211 * VMDK access type for a extent.
212 */
213typedef enum VMDKACCESS
214{
215 /** No access allowed. */
216 VMDKACCESS_NOACCESS = 0,
217 /** Read-only access. */
218 VMDKACCESS_READONLY,
219 /** Read-write access. */
220 VMDKACCESS_READWRITE
221} VMDKACCESS, *PVMDKACCESS;
222
223/** Forward declaration for PVMDKIMAGE. */
224typedef struct VMDKIMAGE *PVMDKIMAGE;
225
226/**
227 * Extents files entry. Used for opening a particular file only once.
228 */
229typedef struct VMDKFILE
230{
231 /** Pointer to filename. Local copy. */
232 const char *pszFilename;
233 /** File open flags for consistency checking. */
234 unsigned fOpen;
235 /** File handle. */
236 RTFILE File;
237 /** Handle for asnychronous access if requested.*/
238 void *pStorage;
239 /** Flag whether to use File or pStorage. */
240 bool fAsyncIO;
241 /** Reference counter. */
242 unsigned uReferences;
243 /** Flag whether the file should be deleted on last close. */
244 bool fDelete;
245 /** Pointer to the image we belong to. */
246 PVMDKIMAGE pImage;
247 /** Pointer to next file descriptor. */
248 struct VMDKFILE *pNext;
249 /** Pointer to the previous file descriptor. */
250 struct VMDKFILE *pPrev;
251} VMDKFILE, *PVMDKFILE;
252
253/**
254 * VMDK extent data structure.
255 */
256typedef struct VMDKEXTENT
257{
258 /** File handle. */
259 PVMDKFILE pFile;
260 /** Base name of the image extent. */
261 const char *pszBasename;
262 /** Full name of the image extent. */
263 const char *pszFullname;
264 /** Number of sectors in this extent. */
265 uint64_t cSectors;
266 /** Number of sectors per block (grain in VMDK speak). */
267 uint64_t cSectorsPerGrain;
268 /** Starting sector number of descriptor. */
269 uint64_t uDescriptorSector;
270 /** Size of descriptor in sectors. */
271 uint64_t cDescriptorSectors;
272 /** Starting sector number of grain directory. */
273 uint64_t uSectorGD;
274 /** Starting sector number of redundant grain directory. */
275 uint64_t uSectorRGD;
276 /** Total number of metadata sectors. */
277 uint64_t cOverheadSectors;
278 /** Nominal size (i.e. as described by the descriptor) of this extent. */
279 uint64_t cNominalSectors;
280 /** Sector offset (i.e. as described by the descriptor) of this extent. */
281 uint64_t uSectorOffset;
282 /** Number of entries in a grain table. */
283 uint32_t cGTEntries;
284 /** Number of sectors reachable via a grain directory entry. */
285 uint32_t cSectorsPerGDE;
286 /** Number of entries in the grain directory. */
287 uint32_t cGDEntries;
288 /** Pointer to the next free sector. Legacy information. Do not use. */
289 uint32_t uFreeSector;
290 /** Number of this extent in the list of images. */
291 uint32_t uExtent;
292 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
293 char *pDescData;
294 /** Pointer to the grain directory. */
295 uint32_t *pGD;
296 /** Pointer to the redundant grain directory. */
297 uint32_t *pRGD;
298 /** VMDK version of this extent. 1=1.0/1.1 */
299 uint32_t uVersion;
300 /** Type of this extent. */
301 VMDKETYPE enmType;
302 /** Access to this extent. */
303 VMDKACCESS enmAccess;
304 /** Flag whether this extent is marked as unclean. */
305 bool fUncleanShutdown;
306 /** Flag whether the metadata in the extent header needs to be updated. */
307 bool fMetaDirty;
308 /** Flag whether there is a footer in this extent. */
309 bool fFooter;
310 /** Compression type for this extent. */
311 uint16_t uCompression;
312 /** Last grain which has been written to. Only for streamOptimized extents. */
313 uint32_t uLastGrainWritten;
314 /** Sector number of last grain which has been written to. Only for
315 * streamOptimized extents. */
316 uint32_t uLastGrainSector;
317 /** Data size of last grain which has been written to. Only for
318 * streamOptimized extents. */
319 uint32_t cbLastGrainWritten;
320 /** Starting sector of the decompressed grain buffer. */
321 uint32_t uGrainSector;
322 /** Decompressed grain buffer for streamOptimized extents. */
323 void *pvGrain;
324 /** Reference to the image in which this extent is used. Do not use this
325 * on a regular basis to avoid passing pImage references to functions
326 * explicitly. */
327 struct VMDKIMAGE *pImage;
328} VMDKEXTENT, *PVMDKEXTENT;
329
330/**
331 * Grain table cache size. Allocated per image.
332 */
333#define VMDK_GT_CACHE_SIZE 256
334
335/**
336 * Grain table block size. Smaller than an actual grain table block to allow
337 * more grain table blocks to be cached without having to allocate excessive
338 * amounts of memory for the cache.
339 */
340#define VMDK_GT_CACHELINE_SIZE 128
341
342
343/**
344 * Maximum number of lines in a descriptor file. Not worth the effort of
345 * making it variable. Descriptor files are generally very short (~20 lines),
346 * with the exception of sparse files split in 2G chunks, which need for the
347 * maximum size (almost 2T) exactly 1025 lines for the disk database.
348 */
349#define VMDK_DESCRIPTOR_LINES_MAX 1100U
350
351/**
352 * Parsed descriptor information. Allows easy access and update of the
353 * descriptor (whether separate file or not). Free form text files suck.
354 */
355typedef struct VMDKDESCRIPTOR
356{
357 /** Line number of first entry of the disk descriptor. */
358 unsigned uFirstDesc;
359 /** Line number of first entry in the extent description. */
360 unsigned uFirstExtent;
361 /** Line number of first disk database entry. */
362 unsigned uFirstDDB;
363 /** Total number of lines. */
364 unsigned cLines;
365 /** Total amount of memory available for the descriptor. */
366 size_t cbDescAlloc;
367 /** Set if descriptor has been changed and not yet written to disk. */
368 bool fDirty;
369 /** Array of pointers to the data in the descriptor. */
370 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
371 /** Array of line indices pointing to the next non-comment line. */
372 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
373} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
374
375
376/**
377 * Cache entry for translating extent/sector to a sector number in that
378 * extent.
379 */
380typedef struct VMDKGTCACHEENTRY
381{
382 /** Extent number for which this entry is valid. */
383 uint32_t uExtent;
384 /** GT data block number. */
385 uint64_t uGTBlock;
386 /** Data part of the cache entry. */
387 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
388} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
389
390/**
391 * Cache data structure for blocks of grain table entries. For now this is a
392 * fixed size direct mapping cache, but this should be adapted to the size of
393 * the sparse image and maybe converted to a set-associative cache. The
394 * implementation below implements a write-through cache with write allocate.
395 */
396typedef struct VMDKGTCACHE
397{
398 /** Cache entries. */
399 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
400 /** Number of cache entries (currently unused). */
401 unsigned cEntries;
402} VMDKGTCACHE, *PVMDKGTCACHE;
403
404/**
405 * Complete VMDK image data structure. Mainly a collection of extents and a few
406 * extra global data fields.
407 */
408typedef struct VMDKIMAGE
409{
410 /** Pointer to the image extents. */
411 PVMDKEXTENT pExtents;
412 /** Number of image extents. */
413 unsigned cExtents;
414 /** Pointer to the files list, for opening a file referenced multiple
415 * times only once (happens mainly with raw partition access). */
416 PVMDKFILE pFiles;
417
418 /** Base image name. */
419 const char *pszFilename;
420 /** Descriptor file if applicable. */
421 PVMDKFILE pFile;
422
423 /** Pointer to the per-disk VD interface list. */
424 PVDINTERFACE pVDIfsDisk;
425
426 /** Error interface. */
427 PVDINTERFACE pInterfaceError;
428 /** Error interface callbacks. */
429 PVDINTERFACEERROR pInterfaceErrorCallbacks;
430
431 /** Async I/O interface. */
432 PVDINTERFACE pInterfaceAsyncIO;
433 /** Async I/O interface callbacks. */
434 PVDINTERFACEASYNCIO pInterfaceAsyncIOCallbacks;
435 /**
436 * Pointer to an array of segment entries for async I/O.
437 * This is an optimization because the task number to submit is not known
438 * and allocating/freeing an array in the read/write functions every time
439 * is too expensive.
440 */
441 PPDMDATASEG paSegments;
442 /** Entries available in the segments array. */
443 unsigned cSegments;
444
445 /** Open flags passed by VBoxHD layer. */
446 unsigned uOpenFlags;
447 /** Image flags defined during creation or determined during open. */
448 unsigned uImageFlags;
449 /** Total size of the image. */
450 uint64_t cbSize;
451 /** Physical geometry of this image. */
452 PDMMEDIAGEOMETRY PCHSGeometry;
453 /** Logical geometry of this image. */
454 PDMMEDIAGEOMETRY LCHSGeometry;
455 /** Image UUID. */
456 RTUUID ImageUuid;
457 /** Image modification UUID. */
458 RTUUID ModificationUuid;
459 /** Parent image UUID. */
460 RTUUID ParentUuid;
461 /** Parent image modification UUID. */
462 RTUUID ParentModificationUuid;
463
464 /** Pointer to grain table cache, if this image contains sparse extents. */
465 PVMDKGTCACHE pGTCache;
466 /** Pointer to the descriptor (NULL if no separate descriptor file). */
467 char *pDescData;
468 /** Allocation size of the descriptor file. */
469 size_t cbDescAlloc;
470 /** Parsed descriptor file content. */
471 VMDKDESCRIPTOR Descriptor;
472} VMDKIMAGE;
473
474
475/** State for the input callout of the inflate reader. */
476typedef struct VMDKINFLATESTATE
477{
478 /* File where the data is stored. */
479 PVMDKFILE File;
480 /* Total size of the data to read. */
481 size_t cbSize;
482 /* Offset in the file to read. */
483 uint64_t uFileOffset;
484 /* Current read position. */
485 ssize_t iOffset;
486} VMDKINFLATESTATE;
487
488/** State for the output callout of the deflate writer. */
489typedef struct VMDKDEFLATESTATE
490{
491 /* File where the data is to be stored. */
492 PVMDKFILE File;
493 /* Offset in the file to write at. */
494 uint64_t uFileOffset;
495 /* Current write position. */
496 ssize_t iOffset;
497} VMDKDEFLATESTATE;
498
499/*******************************************************************************
500 * Static Variables *
501 *******************************************************************************/
502
503/** NULL-terminated array of supported file extensions. */
504static const char *const s_apszVmdkFileExtensions[] =
505{
506 "vmdk",
507 NULL
508};
509
510/*******************************************************************************
511* Internal Functions *
512*******************************************************************************/
513
514static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
515
516static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
517 bool fDelete);
518
519static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
520static int vmdkFlushImage(PVMDKIMAGE pImage);
521static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
522static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
523
524
525/**
526 * Internal: signal an error to the frontend.
527 */
528DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
529 const char *pszFormat, ...)
530{
531 va_list va;
532 va_start(va, pszFormat);
533 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
534 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
535 pszFormat, va);
536 va_end(va);
537 return rc;
538}
539
540/**
541 * Internal: open a file (using a file descriptor cache to ensure each file
542 * is only opened once - anything else can cause locking problems).
543 */
544static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
545 const char *pszFilename, unsigned fOpen, bool fAsyncIO)
546{
547 int rc = VINF_SUCCESS;
548 PVMDKFILE pVmdkFile;
549
550 for (pVmdkFile = pImage->pFiles;
551 pVmdkFile != NULL;
552 pVmdkFile = pVmdkFile->pNext)
553 {
554 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
555 {
556 Assert(fOpen == pVmdkFile->fOpen);
557 pVmdkFile->uReferences++;
558
559 *ppVmdkFile = pVmdkFile;
560
561 return rc;
562 }
563 }
564
565 /* If we get here, there's no matching entry in the cache. */
566 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
567 if (!VALID_PTR(pVmdkFile))
568 {
569 *ppVmdkFile = NULL;
570 return VERR_NO_MEMORY;
571 }
572
573 pVmdkFile->pszFilename = RTStrDup(pszFilename);
574 if (!VALID_PTR(pVmdkFile->pszFilename))
575 {
576 RTMemFree(pVmdkFile);
577 *ppVmdkFile = NULL;
578 return VERR_NO_MEMORY;
579 }
580 pVmdkFile->fOpen = fOpen;
581
582#ifndef VBOX_WITH_NEW_IO_CODE
583 if ((pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO) && (fAsyncIO))
584 {
585 rc = pImage->pInterfaceAsyncIOCallbacks->pfnOpen(pImage->pInterfaceAsyncIO->pvUser,
586 pszFilename,
587 pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
588 ? VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY
589 : 0,
590 NULL,
591 pImage->pVDIfsDisk,
592 &pVmdkFile->pStorage);
593 pVmdkFile->fAsyncIO = true;
594 }
595 else
596 {
597 rc = RTFileOpen(&pVmdkFile->File, pszFilename, fOpen);
598 pVmdkFile->fAsyncIO = false;
599 }
600#else
601 unsigned uOpenFlags = 0;
602
603 if ((fOpen & RTFILE_O_ACCESS_MASK) == RTFILE_O_READ)
604 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY;
605 if ((fOpen & RTFILE_O_ACTION_MASK) == RTFILE_O_CREATE)
606 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_CREATE;
607
608 rc = pImage->pInterfaceAsyncIOCallbacks->pfnOpen(pImage->pInterfaceAsyncIO->pvUser,
609 pszFilename,
610 uOpenFlags,
611 NULL,
612 pImage->pVDIfsDisk,
613 &pVmdkFile->pStorage);
614#endif
615 if (RT_SUCCESS(rc))
616 {
617 pVmdkFile->uReferences = 1;
618 pVmdkFile->pImage = pImage;
619 pVmdkFile->pNext = pImage->pFiles;
620 if (pImage->pFiles)
621 pImage->pFiles->pPrev = pVmdkFile;
622 pImage->pFiles = pVmdkFile;
623 *ppVmdkFile = pVmdkFile;
624 }
625 else
626 {
627 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
628 RTMemFree(pVmdkFile);
629 *ppVmdkFile = NULL;
630 }
631
632 return rc;
633}
634
635/**
636 * Internal: close a file, updating the file descriptor cache.
637 */
638static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
639{
640 int rc = VINF_SUCCESS;
641 PVMDKFILE pVmdkFile = *ppVmdkFile;
642
643 AssertPtr(pVmdkFile);
644
645 pVmdkFile->fDelete |= fDelete;
646 Assert(pVmdkFile->uReferences);
647 pVmdkFile->uReferences--;
648 if (pVmdkFile->uReferences == 0)
649 {
650 PVMDKFILE pPrev;
651 PVMDKFILE pNext;
652
653 /* Unchain the element from the list. */
654 pPrev = pVmdkFile->pPrev;
655 pNext = pVmdkFile->pNext;
656
657 if (pNext)
658 pNext->pPrev = pPrev;
659 if (pPrev)
660 pPrev->pNext = pNext;
661 else
662 pImage->pFiles = pNext;
663
664#ifndef VBOX_WITH_NEW_IO_CODE
665 if (pVmdkFile->fAsyncIO)
666 {
667 rc = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
668 pVmdkFile->pStorage);
669 }
670 else
671 {
672 rc = RTFileClose(pVmdkFile->File);
673 }
674#else
675 rc = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
676 pVmdkFile->pStorage);
677#endif
678 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
679 rc = RTFileDelete(pVmdkFile->pszFilename);
680 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
681 RTMemFree(pVmdkFile);
682 }
683
684 *ppVmdkFile = NULL;
685 return rc;
686}
687
688/**
689 * Internal: read from a file distinguishing between async and normal operation
690 */
691DECLINLINE(int) vmdkFileReadAt(PVMDKFILE pVmdkFile,
692 uint64_t uOffset, void *pvBuf,
693 size_t cbToRead, size_t *pcbRead)
694{
695 PVMDKIMAGE pImage = pVmdkFile->pImage;
696
697#ifndef VBOX_WITH_NEW_IO_CODE
698 if (pVmdkFile->fAsyncIO)
699 return pImage->pInterfaceAsyncIOCallbacks->pfnReadSync(pImage->pInterfaceAsyncIO->pvUser,
700 pVmdkFile->pStorage, uOffset,
701 cbToRead, pvBuf, pcbRead);
702 else
703 return RTFileReadAt(pVmdkFile->File, uOffset, pvBuf, cbToRead, pcbRead);
704#else
705 return pImage->pInterfaceAsyncIOCallbacks->pfnReadSync(pImage->pInterfaceAsyncIO->pvUser,
706 pVmdkFile->pStorage, uOffset,
707 cbToRead, pvBuf, pcbRead);
708#endif
709}
710
711/**
712 * Internal: write to a file distinguishing between async and normal operation
713 */
714DECLINLINE(int) vmdkFileWriteAt(PVMDKFILE pVmdkFile,
715 uint64_t uOffset, const void *pvBuf,
716 size_t cbToWrite, size_t *pcbWritten)
717{
718 PVMDKIMAGE pImage = pVmdkFile->pImage;
719
720#ifndef VBOX_WITH_NEW_IO_CODE
721 if (pVmdkFile->fAsyncIO)
722 return pImage->pInterfaceAsyncIOCallbacks->pfnWriteSync(pImage->pInterfaceAsyncIO->pvUser,
723 pVmdkFile->pStorage, uOffset,
724 cbToWrite, pvBuf, pcbWritten);
725 else
726 return RTFileWriteAt(pVmdkFile->File, uOffset, pvBuf, cbToWrite, pcbWritten);
727#else
728 return pImage->pInterfaceAsyncIOCallbacks->pfnWriteSync(pImage->pInterfaceAsyncIO->pvUser,
729 pVmdkFile->pStorage, uOffset,
730 cbToWrite, pvBuf, pcbWritten);
731#endif
732}
733
734/**
735 * Internal: get the size of a file distinguishing beween async and normal operation
736 */
737DECLINLINE(int) vmdkFileGetSize(PVMDKFILE pVmdkFile, uint64_t *pcbSize)
738{
739 PVMDKIMAGE pImage = pVmdkFile->pImage;
740
741#ifndef VBOX_WITH_NEW_IO_CODE
742 if (pVmdkFile->fAsyncIO)
743 {
744 return pImage->pInterfaceAsyncIOCallbacks->pfnGetSize(pImage->pInterfaceAsyncIO->pvUser,
745 pVmdkFile->pStorage,
746 pcbSize);
747 }
748 else
749 return RTFileGetSize(pVmdkFile->File, pcbSize);
750#else
751 return pImage->pInterfaceAsyncIOCallbacks->pfnGetSize(pImage->pInterfaceAsyncIO->pvUser,
752 pVmdkFile->pStorage,
753 pcbSize);
754#endif
755}
756
757/**
758 * Internal: set the size of a file distinguishing beween async and normal operation
759 */
760DECLINLINE(int) vmdkFileSetSize(PVMDKFILE pVmdkFile, uint64_t cbSize)
761{
762 PVMDKIMAGE pImage = pVmdkFile->pImage;
763
764#ifndef VBOX_WITH_NEW_IO_CODE
765 if (pVmdkFile->fAsyncIO)
766 {
767 return pImage->pInterfaceAsyncIOCallbacks->pfnSetSize(pImage->pInterfaceAsyncIO->pvUser,
768 pVmdkFile->pStorage,
769 cbSize);
770 }
771 else
772 return RTFileSetSize(pVmdkFile->File, cbSize);
773#else
774 return pImage->pInterfaceAsyncIOCallbacks->pfnSetSize(pImage->pInterfaceAsyncIO->pvUser,
775 pVmdkFile->pStorage,
776 cbSize);
777#endif
778}
779
780/**
781 * Internal: flush a file distinguishing between async and normal operation
782 */
783DECLINLINE(int) vmdkFileFlush(PVMDKFILE pVmdkFile)
784{
785 PVMDKIMAGE pImage = pVmdkFile->pImage;
786
787#ifndef VBOX_WITH_NEW_IO_CODE
788 if (pVmdkFile->fAsyncIO)
789 return pImage->pInterfaceAsyncIOCallbacks->pfnFlushSync(pImage->pInterfaceAsyncIO->pvUser,
790 pVmdkFile->pStorage);
791 else
792 return RTFileFlush(pVmdkFile->File);
793#else
794 return pImage->pInterfaceAsyncIOCallbacks->pfnFlushSync(pImage->pInterfaceAsyncIO->pvUser,
795 pVmdkFile->pStorage);
796#endif
797}
798
799
800static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
801{
802 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
803
804 Assert(cbBuf);
805 if (pInflateState->iOffset < 0)
806 {
807 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
808 if (pcbBuf)
809 *pcbBuf = 1;
810 pInflateState->iOffset = 0;
811 return VINF_SUCCESS;
812 }
813 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
814 int rc = vmdkFileReadAt(pInflateState->File, pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
815 if (RT_FAILURE(rc))
816 return rc;
817 pInflateState->uFileOffset += cbBuf;
818 pInflateState->iOffset += cbBuf;
819 pInflateState->cbSize -= cbBuf;
820 Assert(pcbBuf);
821 *pcbBuf = cbBuf;
822 return VINF_SUCCESS;
823}
824
825/**
826 * Internal: read from a file and inflate the compressed data,
827 * distinguishing between async and normal operation
828 */
829DECLINLINE(int) vmdkFileInflateAt(PVMDKFILE pVmdkFile,
830 uint64_t uOffset, void *pvBuf,
831 size_t cbToRead, unsigned uMarker,
832 uint64_t *puLBA, uint32_t *pcbMarkerData)
833{
834 if (pVmdkFile->fAsyncIO)
835 {
836 AssertMsgFailed(("TODO\n"));
837 return VERR_NOT_SUPPORTED;
838 }
839 else
840 {
841 int rc;
842 PRTZIPDECOMP pZip = NULL;
843 VMDKMARKER Marker;
844 uint64_t uCompOffset, cbComp;
845 VMDKINFLATESTATE InflateState;
846 size_t cbActuallyRead;
847 size_t cbMarker = sizeof(Marker);
848
849 if (uMarker == VMDK_MARKER_IGNORE)
850 cbMarker -= sizeof(Marker.uType);
851 rc = vmdkFileReadAt(pVmdkFile, uOffset, &Marker, cbMarker, NULL);
852 if (RT_FAILURE(rc))
853 return rc;
854 Marker.uSector = RT_LE2H_U64(Marker.uSector);
855 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
856 if ( uMarker != VMDK_MARKER_IGNORE
857 && ( RT_LE2H_U32(Marker.uType) != uMarker
858 || Marker.cbSize != 0))
859 return VERR_VD_VMDK_INVALID_FORMAT;
860 if (Marker.cbSize != 0)
861 {
862 /* Compressed grain marker. Data follows immediately. */
863 uCompOffset = uOffset + 12;
864 cbComp = Marker.cbSize;
865 if (puLBA)
866 *puLBA = Marker.uSector;
867 if (pcbMarkerData)
868 *pcbMarkerData = cbComp + 12;
869 }
870 else
871 {
872 Marker.uType = RT_LE2H_U32(Marker.uType);
873 if (Marker.uType == VMDK_MARKER_EOS)
874 {
875 Assert(uMarker != VMDK_MARKER_EOS);
876 return VERR_VD_VMDK_INVALID_FORMAT;
877 }
878 else if ( Marker.uType == VMDK_MARKER_GT
879 || Marker.uType == VMDK_MARKER_GD
880 || Marker.uType == VMDK_MARKER_FOOTER)
881 {
882 uCompOffset = uOffset + 512;
883 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
884 if (pcbMarkerData)
885 *pcbMarkerData = cbComp + 512;
886 }
887 else
888 {
889 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
890 return VERR_VD_VMDK_INVALID_FORMAT;
891 }
892 }
893 InflateState.File = pVmdkFile;
894 InflateState.cbSize = cbComp;
895 InflateState.uFileOffset = uCompOffset;
896 InflateState.iOffset = -1;
897 /* Sanity check - the expansion ratio should be much less than 2. */
898 Assert(cbComp < 2 * cbToRead);
899 if (cbComp >= 2 * cbToRead)
900 return VERR_VD_VMDK_INVALID_FORMAT;
901
902 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
903 if (RT_FAILURE(rc))
904 return rc;
905 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
906 RTZipDecompDestroy(pZip);
907 if (RT_FAILURE(rc))
908 return rc;
909 if (cbActuallyRead != cbToRead)
910 rc = VERR_VD_VMDK_INVALID_FORMAT;
911 return rc;
912 }
913}
914
915static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
916{
917 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
918
919 Assert(cbBuf);
920 if (pDeflateState->iOffset < 0)
921 {
922 pvBuf = (const uint8_t *)pvBuf + 1;
923 cbBuf--;
924 pDeflateState->iOffset = 0;
925 }
926 if (!cbBuf)
927 return VINF_SUCCESS;
928 int rc = vmdkFileWriteAt(pDeflateState->File, pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
929 if (RT_FAILURE(rc))
930 return rc;
931 pDeflateState->uFileOffset += cbBuf;
932 pDeflateState->iOffset += cbBuf;
933 return VINF_SUCCESS;
934}
935
936/**
937 * Internal: deflate the uncompressed data and write to a file,
938 * distinguishing between async and normal operation
939 */
940DECLINLINE(int) vmdkFileDeflateAt(PVMDKFILE pVmdkFile,
941 uint64_t uOffset, const void *pvBuf,
942 size_t cbToWrite, unsigned uMarker,
943 uint64_t uLBA, uint32_t *pcbMarkerData)
944{
945 if (pVmdkFile->fAsyncIO)
946 {
947 AssertMsgFailed(("TODO\n"));
948 return VERR_NOT_SUPPORTED;
949 }
950 else
951 {
952 int rc;
953 PRTZIPCOMP pZip = NULL;
954 VMDKMARKER Marker;
955 uint64_t uCompOffset, cbDecomp;
956 VMDKDEFLATESTATE DeflateState;
957
958 Marker.uSector = RT_H2LE_U64(uLBA);
959 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
960 if (uMarker == VMDK_MARKER_IGNORE)
961 {
962 /* Compressed grain marker. Data follows immediately. */
963 uCompOffset = uOffset + 12;
964 cbDecomp = cbToWrite;
965 }
966 else
967 {
968 /** @todo implement creating the other marker types */
969 return VERR_NOT_IMPLEMENTED;
970 }
971 DeflateState.File = pVmdkFile;
972 DeflateState.uFileOffset = uCompOffset;
973 DeflateState.iOffset = -1;
974
975 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
976 if (RT_FAILURE(rc))
977 return rc;
978 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
979 if (RT_SUCCESS(rc))
980 rc = RTZipCompFinish(pZip);
981 RTZipCompDestroy(pZip);
982 if (RT_SUCCESS(rc))
983 {
984 if (pcbMarkerData)
985 *pcbMarkerData = 12 + DeflateState.iOffset;
986 /* Set the file size to remove old garbage in case the block is
987 * rewritten. Cannot cause data loss as the code calling this
988 * guarantees that data gets only appended. */
989 Assert(DeflateState.uFileOffset > uCompOffset);
990 rc = vmdkFileSetSize(pVmdkFile, DeflateState.uFileOffset);
991
992 if (uMarker == VMDK_MARKER_IGNORE)
993 {
994 /* Compressed grain marker. */
995 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
996 rc = vmdkFileWriteAt(pVmdkFile, uOffset, &Marker, 12, NULL);
997 if (RT_FAILURE(rc))
998 return rc;
999 }
1000 else
1001 {
1002 /** @todo implement creating the other marker types */
1003 return VERR_NOT_IMPLEMENTED;
1004 }
1005 }
1006 return rc;
1007 }
1008}
1009
1010/**
1011 * Internal: check if all files are closed, prevent leaking resources.
1012 */
1013static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1014{
1015 int rc = VINF_SUCCESS, rc2;
1016 PVMDKFILE pVmdkFile;
1017
1018 Assert(pImage->pFiles == NULL);
1019 for (pVmdkFile = pImage->pFiles;
1020 pVmdkFile != NULL;
1021 pVmdkFile = pVmdkFile->pNext)
1022 {
1023 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1024 pVmdkFile->pszFilename));
1025 pImage->pFiles = pVmdkFile->pNext;
1026
1027 if (pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
1028 rc2 = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
1029 pVmdkFile->pStorage);
1030 else
1031 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1032
1033 if (RT_SUCCESS(rc))
1034 rc = rc2;
1035 }
1036 return rc;
1037}
1038
1039/**
1040 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1041 * critical non-ASCII characters.
1042 */
1043static char *vmdkEncodeString(const char *psz)
1044{
1045 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1046 char *pszDst = szEnc;
1047
1048 AssertPtr(psz);
1049
1050 for (; *psz; psz = RTStrNextCp(psz))
1051 {
1052 char *pszDstPrev = pszDst;
1053 RTUNICP Cp = RTStrGetCp(psz);
1054 if (Cp == '\\')
1055 {
1056 pszDst = RTStrPutCp(pszDst, Cp);
1057 pszDst = RTStrPutCp(pszDst, Cp);
1058 }
1059 else if (Cp == '\n')
1060 {
1061 pszDst = RTStrPutCp(pszDst, '\\');
1062 pszDst = RTStrPutCp(pszDst, 'n');
1063 }
1064 else if (Cp == '\r')
1065 {
1066 pszDst = RTStrPutCp(pszDst, '\\');
1067 pszDst = RTStrPutCp(pszDst, 'r');
1068 }
1069 else
1070 pszDst = RTStrPutCp(pszDst, Cp);
1071 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1072 {
1073 pszDst = pszDstPrev;
1074 break;
1075 }
1076 }
1077 *pszDst = '\0';
1078 return RTStrDup(szEnc);
1079}
1080
1081/**
1082 * Internal: decode a string and store it into the specified string.
1083 */
1084static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1085{
1086 int rc = VINF_SUCCESS;
1087 char szBuf[4];
1088
1089 if (!cb)
1090 return VERR_BUFFER_OVERFLOW;
1091
1092 AssertPtr(psz);
1093
1094 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1095 {
1096 char *pszDst = szBuf;
1097 RTUNICP Cp = RTStrGetCp(pszEncoded);
1098 if (Cp == '\\')
1099 {
1100 pszEncoded = RTStrNextCp(pszEncoded);
1101 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1102 if (CpQ == 'n')
1103 RTStrPutCp(pszDst, '\n');
1104 else if (CpQ == 'r')
1105 RTStrPutCp(pszDst, '\r');
1106 else if (CpQ == '\0')
1107 {
1108 rc = VERR_VD_VMDK_INVALID_HEADER;
1109 break;
1110 }
1111 else
1112 RTStrPutCp(pszDst, CpQ);
1113 }
1114 else
1115 pszDst = RTStrPutCp(pszDst, Cp);
1116
1117 /* Need to leave space for terminating NUL. */
1118 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1119 {
1120 rc = VERR_BUFFER_OVERFLOW;
1121 break;
1122 }
1123 memcpy(psz, szBuf, pszDst - szBuf);
1124 psz += pszDst - szBuf;
1125 }
1126 *psz = '\0';
1127 return rc;
1128}
1129
1130static int vmdkReadGrainDirectory(PVMDKEXTENT pExtent)
1131{
1132 int rc = VINF_SUCCESS;
1133 unsigned i;
1134 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1135 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1136
1137 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1138 goto out;
1139
1140 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1141 if (!pGD)
1142 {
1143 rc = VERR_NO_MEMORY;
1144 goto out;
1145 }
1146 pExtent->pGD = pGD;
1147 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1148 * life files don't have them. The spec is wrong in creative ways. */
1149 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1150 pGD, cbGD, NULL);
1151 AssertRC(rc);
1152 if (RT_FAILURE(rc))
1153 {
1154 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1155 goto out;
1156 }
1157 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1158 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1159
1160 if (pExtent->uSectorRGD)
1161 {
1162 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1163 if (!pRGD)
1164 {
1165 rc = VERR_NO_MEMORY;
1166 goto out;
1167 }
1168 pExtent->pRGD = pRGD;
1169 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1170 * life files don't have them. The spec is wrong in creative ways. */
1171 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1172 pRGD, cbGD, NULL);
1173 AssertRC(rc);
1174 if (RT_FAILURE(rc))
1175 {
1176 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1177 goto out;
1178 }
1179 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1180 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1181
1182 /* Check grain table and redundant grain table for consistency. */
1183 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1184 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1185 if (!pTmpGT1)
1186 {
1187 rc = VERR_NO_MEMORY;
1188 goto out;
1189 }
1190 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1191 if (!pTmpGT2)
1192 {
1193 RTMemTmpFree(pTmpGT1);
1194 rc = VERR_NO_MEMORY;
1195 goto out;
1196 }
1197
1198 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1199 i < pExtent->cGDEntries;
1200 i++, pGDTmp++, pRGDTmp++)
1201 {
1202 /* If no grain table is allocated skip the entry. */
1203 if (*pGDTmp == 0 && *pRGDTmp == 0)
1204 continue;
1205
1206 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1207 {
1208 /* Just one grain directory entry refers to a not yet allocated
1209 * grain table or both grain directory copies refer to the same
1210 * grain table. Not allowed. */
1211 RTMemTmpFree(pTmpGT1);
1212 RTMemTmpFree(pTmpGT2);
1213 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1214 goto out;
1215 }
1216 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1217 * life files don't have them. The spec is wrong in creative ways. */
1218 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1219 pTmpGT1, cbGT, NULL);
1220 if (RT_FAILURE(rc))
1221 {
1222 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1223 RTMemTmpFree(pTmpGT1);
1224 RTMemTmpFree(pTmpGT2);
1225 goto out;
1226 }
1227 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1228 * life files don't have them. The spec is wrong in creative ways. */
1229 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pRGDTmp),
1230 pTmpGT2, cbGT, NULL);
1231 if (RT_FAILURE(rc))
1232 {
1233 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1234 RTMemTmpFree(pTmpGT1);
1235 RTMemTmpFree(pTmpGT2);
1236 goto out;
1237 }
1238 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1239 {
1240 RTMemTmpFree(pTmpGT1);
1241 RTMemTmpFree(pTmpGT2);
1242 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1243 goto out;
1244 }
1245 }
1246
1247 /** @todo figure out what to do for unclean VMDKs. */
1248 RTMemTmpFree(pTmpGT1);
1249 RTMemTmpFree(pTmpGT2);
1250 }
1251
1252 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1253 {
1254 uint32_t uLastGrainWritten = 0;
1255 uint32_t uLastGrainSector = 0;
1256 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1257 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1258 if (!pTmpGT)
1259 {
1260 rc = VERR_NO_MEMORY;
1261 goto out;
1262 }
1263 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1264 {
1265 /* If no grain table is allocated skip the entry. */
1266 if (*pGDTmp == 0)
1267 continue;
1268
1269 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1270 * life files don't have them. The spec is wrong in creative ways. */
1271 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1272 pTmpGT, cbGT, NULL);
1273 if (RT_FAILURE(rc))
1274 {
1275 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1276 RTMemTmpFree(pTmpGT);
1277 goto out;
1278 }
1279 uint32_t j;
1280 uint32_t *pGTTmp;
1281 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1282 {
1283 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1284
1285 /* If no grain is allocated skip the entry. */
1286 if (uGTTmp == 0)
1287 continue;
1288
1289 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1290 {
1291 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1292 RTMemTmpFree(pTmpGT);
1293 goto out;
1294 }
1295 uLastGrainSector = uGTTmp;
1296 uLastGrainWritten = i * pExtent->cGTEntries + j;
1297 }
1298 }
1299 RTMemTmpFree(pTmpGT);
1300
1301 /* streamOptimized extents need a grain decompress buffer. */
1302 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1303 if (!pExtent->pvGrain)
1304 {
1305 rc = VERR_NO_MEMORY;
1306 goto out;
1307 }
1308
1309 if (uLastGrainSector)
1310 {
1311 uint64_t uLBA = 0;
1312 uint32_t cbMarker = 0;
1313 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1314 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1315 if (RT_FAILURE(rc))
1316 goto out;
1317
1318 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1319 pExtent->uGrainSector = uLastGrainSector;
1320 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1321 }
1322 pExtent->uLastGrainWritten = uLastGrainWritten;
1323 pExtent->uLastGrainSector = uLastGrainSector;
1324 }
1325
1326out:
1327 if (RT_FAILURE(rc))
1328 vmdkFreeGrainDirectory(pExtent);
1329 return rc;
1330}
1331
1332static int vmdkCreateGrainDirectory(PVMDKEXTENT pExtent, uint64_t uStartSector,
1333 bool fPreAlloc)
1334{
1335 int rc = VINF_SUCCESS;
1336 unsigned i;
1337 uint32_t *pGD = NULL, *pRGD = NULL;
1338 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1339 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1340 size_t cbGTRounded;
1341 uint64_t cbOverhead;
1342
1343 if (fPreAlloc)
1344 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1345 else
1346 cbGTRounded = 0;
1347
1348 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1349 if (!pGD)
1350 {
1351 rc = VERR_NO_MEMORY;
1352 goto out;
1353 }
1354 pExtent->pGD = pGD;
1355 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1356 if (!pRGD)
1357 {
1358 rc = VERR_NO_MEMORY;
1359 goto out;
1360 }
1361 pExtent->pRGD = pRGD;
1362
1363 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1364 /* For streamOptimized extents put the end-of-stream marker at the end. */
1365 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1366 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead + 512);
1367 else
1368 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead);
1369 if (RT_FAILURE(rc))
1370 goto out;
1371 pExtent->uSectorRGD = uStartSector;
1372 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1373
1374 if (fPreAlloc)
1375 {
1376 uint32_t uGTSectorLE;
1377 uint64_t uOffsetSectors;
1378
1379 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1380 for (i = 0; i < pExtent->cGDEntries; i++)
1381 {
1382 pRGD[i] = uOffsetSectors;
1383 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1384 /* Write the redundant grain directory entry to disk. */
1385 rc = vmdkFileWriteAt(pExtent->pFile,
1386 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1387 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1388 if (RT_FAILURE(rc))
1389 {
1390 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1391 goto out;
1392 }
1393 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1394 }
1395
1396 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1397 for (i = 0; i < pExtent->cGDEntries; i++)
1398 {
1399 pGD[i] = uOffsetSectors;
1400 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1401 /* Write the grain directory entry to disk. */
1402 rc = vmdkFileWriteAt(pExtent->pFile,
1403 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1404 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1405 if (RT_FAILURE(rc))
1406 {
1407 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1408 goto out;
1409 }
1410 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1411 }
1412 }
1413 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1414
1415 /* streamOptimized extents need a grain decompress buffer. */
1416 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1417 {
1418 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1419 if (!pExtent->pvGrain)
1420 {
1421 rc = VERR_NO_MEMORY;
1422 goto out;
1423 }
1424 }
1425
1426out:
1427 if (RT_FAILURE(rc))
1428 vmdkFreeGrainDirectory(pExtent);
1429 return rc;
1430}
1431
1432static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1433{
1434 if (pExtent->pGD)
1435 {
1436 RTMemFree(pExtent->pGD);
1437 pExtent->pGD = NULL;
1438 }
1439 if (pExtent->pRGD)
1440 {
1441 RTMemFree(pExtent->pRGD);
1442 pExtent->pRGD = NULL;
1443 }
1444}
1445
1446static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1447 char **ppszUnquoted, char **ppszNext)
1448{
1449 char *pszQ;
1450 char *pszUnquoted;
1451
1452 /* Skip over whitespace. */
1453 while (*pszStr == ' ' || *pszStr == '\t')
1454 pszStr++;
1455
1456 if (*pszStr != '"')
1457 {
1458 pszQ = (char *)pszStr;
1459 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1460 pszQ++;
1461 }
1462 else
1463 {
1464 pszStr++;
1465 pszQ = (char *)strchr(pszStr, '"');
1466 if (pszQ == NULL)
1467 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1468 }
1469
1470 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1471 if (!pszUnquoted)
1472 return VERR_NO_MEMORY;
1473 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1474 pszUnquoted[pszQ - pszStr] = '\0';
1475 *ppszUnquoted = pszUnquoted;
1476 if (ppszNext)
1477 *ppszNext = pszQ + 1;
1478 return VINF_SUCCESS;
1479}
1480
1481static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1482 const char *pszLine)
1483{
1484 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1485 ssize_t cbDiff = strlen(pszLine) + 1;
1486
1487 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1488 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1489 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1490
1491 memcpy(pEnd, pszLine, cbDiff);
1492 pDescriptor->cLines++;
1493 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1494 pDescriptor->fDirty = true;
1495
1496 return VINF_SUCCESS;
1497}
1498
1499static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1500 const char *pszKey, const char **ppszValue)
1501{
1502 size_t cbKey = strlen(pszKey);
1503 const char *pszValue;
1504
1505 while (uStart != 0)
1506 {
1507 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1508 {
1509 /* Key matches, check for a '=' (preceded by whitespace). */
1510 pszValue = pDescriptor->aLines[uStart] + cbKey;
1511 while (*pszValue == ' ' || *pszValue == '\t')
1512 pszValue++;
1513 if (*pszValue == '=')
1514 {
1515 *ppszValue = pszValue + 1;
1516 break;
1517 }
1518 }
1519 uStart = pDescriptor->aNextLines[uStart];
1520 }
1521 return !!uStart;
1522}
1523
1524static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1525 unsigned uStart,
1526 const char *pszKey, const char *pszValue)
1527{
1528 char *pszTmp;
1529 size_t cbKey = strlen(pszKey);
1530 unsigned uLast = 0;
1531
1532 while (uStart != 0)
1533 {
1534 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1535 {
1536 /* Key matches, check for a '=' (preceded by whitespace). */
1537 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1538 while (*pszTmp == ' ' || *pszTmp == '\t')
1539 pszTmp++;
1540 if (*pszTmp == '=')
1541 {
1542 pszTmp++;
1543 while (*pszTmp == ' ' || *pszTmp == '\t')
1544 pszTmp++;
1545 break;
1546 }
1547 }
1548 if (!pDescriptor->aNextLines[uStart])
1549 uLast = uStart;
1550 uStart = pDescriptor->aNextLines[uStart];
1551 }
1552 if (uStart)
1553 {
1554 if (pszValue)
1555 {
1556 /* Key already exists, replace existing value. */
1557 size_t cbOldVal = strlen(pszTmp);
1558 size_t cbNewVal = strlen(pszValue);
1559 ssize_t cbDiff = cbNewVal - cbOldVal;
1560 /* Check for buffer overflow. */
1561 if ( pDescriptor->aLines[pDescriptor->cLines]
1562 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1563 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1564
1565 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1566 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1567 memcpy(pszTmp, pszValue, cbNewVal + 1);
1568 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1569 pDescriptor->aLines[i] += cbDiff;
1570 }
1571 else
1572 {
1573 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1574 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1575 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1576 {
1577 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1578 if (pDescriptor->aNextLines[i])
1579 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1580 else
1581 pDescriptor->aNextLines[i-1] = 0;
1582 }
1583 pDescriptor->cLines--;
1584 /* Adjust starting line numbers of following descriptor sections. */
1585 if (uStart < pDescriptor->uFirstExtent)
1586 pDescriptor->uFirstExtent--;
1587 if (uStart < pDescriptor->uFirstDDB)
1588 pDescriptor->uFirstDDB--;
1589 }
1590 }
1591 else
1592 {
1593 /* Key doesn't exist, append after the last entry in this category. */
1594 if (!pszValue)
1595 {
1596 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1597 return VINF_SUCCESS;
1598 }
1599 cbKey = strlen(pszKey);
1600 size_t cbValue = strlen(pszValue);
1601 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1602 /* Check for buffer overflow. */
1603 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1604 || ( pDescriptor->aLines[pDescriptor->cLines]
1605 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1606 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1607 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1608 {
1609 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1610 if (pDescriptor->aNextLines[i - 1])
1611 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1612 else
1613 pDescriptor->aNextLines[i] = 0;
1614 }
1615 uStart = uLast + 1;
1616 pDescriptor->aNextLines[uLast] = uStart;
1617 pDescriptor->aNextLines[uStart] = 0;
1618 pDescriptor->cLines++;
1619 pszTmp = pDescriptor->aLines[uStart];
1620 memmove(pszTmp + cbDiff, pszTmp,
1621 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1622 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1623 pDescriptor->aLines[uStart][cbKey] = '=';
1624 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1625 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1626 pDescriptor->aLines[i] += cbDiff;
1627
1628 /* Adjust starting line numbers of following descriptor sections. */
1629 if (uStart <= pDescriptor->uFirstExtent)
1630 pDescriptor->uFirstExtent++;
1631 if (uStart <= pDescriptor->uFirstDDB)
1632 pDescriptor->uFirstDDB++;
1633 }
1634 pDescriptor->fDirty = true;
1635 return VINF_SUCCESS;
1636}
1637
1638static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1639 uint32_t *puValue)
1640{
1641 const char *pszValue;
1642
1643 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1644 &pszValue))
1645 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1646 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1647}
1648
1649static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1650 const char *pszKey, const char **ppszValue)
1651{
1652 const char *pszValue;
1653 char *pszValueUnquoted;
1654
1655 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1656 &pszValue))
1657 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1658 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1659 if (RT_FAILURE(rc))
1660 return rc;
1661 *ppszValue = pszValueUnquoted;
1662 return rc;
1663}
1664
1665static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1666 const char *pszKey, const char *pszValue)
1667{
1668 char *pszValueQuoted;
1669
1670 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1671 if (RT_FAILURE(rc))
1672 return rc;
1673 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1674 pszValueQuoted);
1675 RTStrFree(pszValueQuoted);
1676 return rc;
1677}
1678
1679static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1680 PVMDKDESCRIPTOR pDescriptor)
1681{
1682 unsigned uEntry = pDescriptor->uFirstExtent;
1683 ssize_t cbDiff;
1684
1685 if (!uEntry)
1686 return;
1687
1688 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1689 /* Move everything including \0 in the entry marking the end of buffer. */
1690 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1691 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1692 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1693 {
1694 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1695 if (pDescriptor->aNextLines[i])
1696 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1697 else
1698 pDescriptor->aNextLines[i - 1] = 0;
1699 }
1700 pDescriptor->cLines--;
1701 if (pDescriptor->uFirstDDB)
1702 pDescriptor->uFirstDDB--;
1703
1704 return;
1705}
1706
1707static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1708 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1709 VMDKETYPE enmType, const char *pszBasename,
1710 uint64_t uSectorOffset)
1711{
1712 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1713 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1714 char *pszTmp;
1715 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1716 char szExt[1024];
1717 ssize_t cbDiff;
1718
1719 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1720 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1721
1722 /* Find last entry in extent description. */
1723 while (uStart)
1724 {
1725 if (!pDescriptor->aNextLines[uStart])
1726 uLast = uStart;
1727 uStart = pDescriptor->aNextLines[uStart];
1728 }
1729
1730 if (enmType == VMDKETYPE_ZERO)
1731 {
1732 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1733 cNominalSectors, apszType[enmType]);
1734 }
1735 else if (enmType == VMDKETYPE_FLAT)
1736 {
1737 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1738 apszAccess[enmAccess], cNominalSectors,
1739 apszType[enmType], pszBasename, uSectorOffset);
1740 }
1741 else
1742 {
1743 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1744 apszAccess[enmAccess], cNominalSectors,
1745 apszType[enmType], pszBasename);
1746 }
1747 cbDiff = strlen(szExt) + 1;
1748
1749 /* Check for buffer overflow. */
1750 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1751 || ( pDescriptor->aLines[pDescriptor->cLines]
1752 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1753 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1754
1755 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1756 {
1757 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1758 if (pDescriptor->aNextLines[i - 1])
1759 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1760 else
1761 pDescriptor->aNextLines[i] = 0;
1762 }
1763 uStart = uLast + 1;
1764 pDescriptor->aNextLines[uLast] = uStart;
1765 pDescriptor->aNextLines[uStart] = 0;
1766 pDescriptor->cLines++;
1767 pszTmp = pDescriptor->aLines[uStart];
1768 memmove(pszTmp + cbDiff, pszTmp,
1769 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1770 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1771 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1772 pDescriptor->aLines[i] += cbDiff;
1773
1774 /* Adjust starting line numbers of following descriptor sections. */
1775 if (uStart <= pDescriptor->uFirstDDB)
1776 pDescriptor->uFirstDDB++;
1777
1778 pDescriptor->fDirty = true;
1779 return VINF_SUCCESS;
1780}
1781
1782static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1783 const char *pszKey, const char **ppszValue)
1784{
1785 const char *pszValue;
1786 char *pszValueUnquoted;
1787
1788 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1789 &pszValue))
1790 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1791 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1792 if (RT_FAILURE(rc))
1793 return rc;
1794 *ppszValue = pszValueUnquoted;
1795 return rc;
1796}
1797
1798static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1799 const char *pszKey, uint32_t *puValue)
1800{
1801 const char *pszValue;
1802 char *pszValueUnquoted;
1803
1804 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1805 &pszValue))
1806 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1807 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1808 if (RT_FAILURE(rc))
1809 return rc;
1810 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1811 RTMemTmpFree(pszValueUnquoted);
1812 return rc;
1813}
1814
1815static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1816 const char *pszKey, PRTUUID pUuid)
1817{
1818 const char *pszValue;
1819 char *pszValueUnquoted;
1820
1821 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1822 &pszValue))
1823 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1824 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1825 if (RT_FAILURE(rc))
1826 return rc;
1827 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1828 RTMemTmpFree(pszValueUnquoted);
1829 return rc;
1830}
1831
1832static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1833 const char *pszKey, const char *pszVal)
1834{
1835 int rc;
1836 char *pszValQuoted;
1837
1838 if (pszVal)
1839 {
1840 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1841 if (RT_FAILURE(rc))
1842 return rc;
1843 }
1844 else
1845 pszValQuoted = NULL;
1846 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1847 pszValQuoted);
1848 if (pszValQuoted)
1849 RTStrFree(pszValQuoted);
1850 return rc;
1851}
1852
1853static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1854 const char *pszKey, PCRTUUID pUuid)
1855{
1856 char *pszUuid;
1857
1858 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1859 if (RT_FAILURE(rc))
1860 return rc;
1861 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1862 pszUuid);
1863 RTStrFree(pszUuid);
1864 return rc;
1865}
1866
1867static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1868 const char *pszKey, uint32_t uValue)
1869{
1870 char *pszValue;
1871
1872 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1873 if (RT_FAILURE(rc))
1874 return rc;
1875 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1876 pszValue);
1877 RTStrFree(pszValue);
1878 return rc;
1879}
1880
1881static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1882 size_t cbDescData,
1883 PVMDKDESCRIPTOR pDescriptor)
1884{
1885 int rc = VINF_SUCCESS;
1886 unsigned cLine = 0, uLastNonEmptyLine = 0;
1887 char *pTmp = pDescData;
1888
1889 pDescriptor->cbDescAlloc = cbDescData;
1890 while (*pTmp != '\0')
1891 {
1892 pDescriptor->aLines[cLine++] = pTmp;
1893 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1894 {
1895 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1896 goto out;
1897 }
1898
1899 while (*pTmp != '\0' && *pTmp != '\n')
1900 {
1901 if (*pTmp == '\r')
1902 {
1903 if (*(pTmp + 1) != '\n')
1904 {
1905 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1906 goto out;
1907 }
1908 else
1909 {
1910 /* Get rid of CR character. */
1911 *pTmp = '\0';
1912 }
1913 }
1914 pTmp++;
1915 }
1916 /* Get rid of LF character. */
1917 if (*pTmp == '\n')
1918 {
1919 *pTmp = '\0';
1920 pTmp++;
1921 }
1922 }
1923 pDescriptor->cLines = cLine;
1924 /* Pointer right after the end of the used part of the buffer. */
1925 pDescriptor->aLines[cLine] = pTmp;
1926
1927 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1928 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1929 {
1930 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1931 goto out;
1932 }
1933
1934 /* Initialize those, because we need to be able to reopen an image. */
1935 pDescriptor->uFirstDesc = 0;
1936 pDescriptor->uFirstExtent = 0;
1937 pDescriptor->uFirstDDB = 0;
1938 for (unsigned i = 0; i < cLine; i++)
1939 {
1940 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1941 {
1942 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1943 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1944 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1945 {
1946 /* An extent descriptor. */
1947 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1948 {
1949 /* Incorrect ordering of entries. */
1950 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1951 goto out;
1952 }
1953 if (!pDescriptor->uFirstExtent)
1954 {
1955 pDescriptor->uFirstExtent = i;
1956 uLastNonEmptyLine = 0;
1957 }
1958 }
1959 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1960 {
1961 /* A disk database entry. */
1962 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1963 {
1964 /* Incorrect ordering of entries. */
1965 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1966 goto out;
1967 }
1968 if (!pDescriptor->uFirstDDB)
1969 {
1970 pDescriptor->uFirstDDB = i;
1971 uLastNonEmptyLine = 0;
1972 }
1973 }
1974 else
1975 {
1976 /* A normal entry. */
1977 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1978 {
1979 /* Incorrect ordering of entries. */
1980 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1981 goto out;
1982 }
1983 if (!pDescriptor->uFirstDesc)
1984 {
1985 pDescriptor->uFirstDesc = i;
1986 uLastNonEmptyLine = 0;
1987 }
1988 }
1989 if (uLastNonEmptyLine)
1990 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1991 uLastNonEmptyLine = i;
1992 }
1993 }
1994
1995out:
1996 return rc;
1997}
1998
1999static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2000 PCPDMMEDIAGEOMETRY pPCHSGeometry)
2001{
2002 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2003 VMDK_DDB_GEO_PCHS_CYLINDERS,
2004 pPCHSGeometry->cCylinders);
2005 if (RT_FAILURE(rc))
2006 return rc;
2007 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2008 VMDK_DDB_GEO_PCHS_HEADS,
2009 pPCHSGeometry->cHeads);
2010 if (RT_FAILURE(rc))
2011 return rc;
2012 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2013 VMDK_DDB_GEO_PCHS_SECTORS,
2014 pPCHSGeometry->cSectors);
2015 return rc;
2016}
2017
2018static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2019 PCPDMMEDIAGEOMETRY pLCHSGeometry)
2020{
2021 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2022 VMDK_DDB_GEO_LCHS_CYLINDERS,
2023 pLCHSGeometry->cCylinders);
2024 if (RT_FAILURE(rc))
2025 return rc;
2026 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2027 VMDK_DDB_GEO_LCHS_HEADS,
2028 pLCHSGeometry->cHeads);
2029 if (RT_FAILURE(rc))
2030 return rc;
2031 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2032 VMDK_DDB_GEO_LCHS_SECTORS,
2033 pLCHSGeometry->cSectors);
2034 return rc;
2035}
2036
2037static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2038 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2039{
2040 int rc;
2041
2042 pDescriptor->uFirstDesc = 0;
2043 pDescriptor->uFirstExtent = 0;
2044 pDescriptor->uFirstDDB = 0;
2045 pDescriptor->cLines = 0;
2046 pDescriptor->cbDescAlloc = cbDescData;
2047 pDescriptor->fDirty = false;
2048 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2049 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2050
2051 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2052 if (RT_FAILURE(rc))
2053 goto out;
2054 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2055 if (RT_FAILURE(rc))
2056 goto out;
2057 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2058 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2059 if (RT_FAILURE(rc))
2060 goto out;
2061 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2062 if (RT_FAILURE(rc))
2063 goto out;
2064 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2065 if (RT_FAILURE(rc))
2066 goto out;
2067 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2068 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2069 if (RT_FAILURE(rc))
2070 goto out;
2071 /* The trailing space is created by VMware, too. */
2072 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2073 if (RT_FAILURE(rc))
2074 goto out;
2075 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2076 if (RT_FAILURE(rc))
2077 goto out;
2078 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2079 if (RT_FAILURE(rc))
2080 goto out;
2081 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2082 if (RT_FAILURE(rc))
2083 goto out;
2084 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2085
2086 /* Now that the framework is in place, use the normal functions to insert
2087 * the remaining keys. */
2088 char szBuf[9];
2089 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2090 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2091 "CID", szBuf);
2092 if (RT_FAILURE(rc))
2093 goto out;
2094 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2095 "parentCID", "ffffffff");
2096 if (RT_FAILURE(rc))
2097 goto out;
2098
2099 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2100 if (RT_FAILURE(rc))
2101 goto out;
2102
2103out:
2104 return rc;
2105}
2106
2107static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2108 size_t cbDescData)
2109{
2110 int rc;
2111 unsigned cExtents;
2112 unsigned uLine;
2113 unsigned i;
2114
2115 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2116 &pImage->Descriptor);
2117 if (RT_FAILURE(rc))
2118 return rc;
2119
2120 /* Check version, must be 1. */
2121 uint32_t uVersion;
2122 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2123 if (RT_FAILURE(rc))
2124 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2125 if (uVersion != 1)
2126 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2127
2128 /* Get image creation type and determine image flags. */
2129 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2130 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2131 &pszCreateType);
2132 if (RT_FAILURE(rc))
2133 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2134 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2135 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2136 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2137 else if ( !strcmp(pszCreateType, "partitionedDevice")
2138 || !strcmp(pszCreateType, "fullDevice"))
2139 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2140 else if (!strcmp(pszCreateType, "streamOptimized"))
2141 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2142 else if (!strcmp(pszCreateType, "vmfs"))
2143 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2144 RTStrFree((char *)(void *)pszCreateType);
2145
2146 /* Count the number of extent config entries. */
2147 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2148 uLine != 0;
2149 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2150 /* nothing */;
2151
2152 if (!pImage->pDescData && cExtents != 1)
2153 {
2154 /* Monolithic image, must have only one extent (already opened). */
2155 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2156 }
2157
2158 if (pImage->pDescData)
2159 {
2160 /* Non-monolithic image, extents need to be allocated. */
2161 rc = vmdkCreateExtents(pImage, cExtents);
2162 if (RT_FAILURE(rc))
2163 return rc;
2164 }
2165
2166 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2167 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2168 {
2169 char *pszLine = pImage->Descriptor.aLines[uLine];
2170
2171 /* Access type of the extent. */
2172 if (!strncmp(pszLine, "RW", 2))
2173 {
2174 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2175 pszLine += 2;
2176 }
2177 else if (!strncmp(pszLine, "RDONLY", 6))
2178 {
2179 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2180 pszLine += 6;
2181 }
2182 else if (!strncmp(pszLine, "NOACCESS", 8))
2183 {
2184 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2185 pszLine += 8;
2186 }
2187 else
2188 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2189 if (*pszLine++ != ' ')
2190 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2191
2192 /* Nominal size of the extent. */
2193 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2194 &pImage->pExtents[i].cNominalSectors);
2195 if (RT_FAILURE(rc))
2196 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2197 if (*pszLine++ != ' ')
2198 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2199
2200 /* Type of the extent. */
2201#ifdef VBOX_WITH_VMDK_ESX
2202 /** @todo Add the ESX extent types. Not necessary for now because
2203 * the ESX extent types are only used inside an ESX server. They are
2204 * automatically converted if the VMDK is exported. */
2205#endif /* VBOX_WITH_VMDK_ESX */
2206 if (!strncmp(pszLine, "SPARSE", 6))
2207 {
2208 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2209 pszLine += 6;
2210 }
2211 else if (!strncmp(pszLine, "FLAT", 4))
2212 {
2213 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2214 pszLine += 4;
2215 }
2216 else if (!strncmp(pszLine, "ZERO", 4))
2217 {
2218 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2219 pszLine += 4;
2220 }
2221 else if (!strncmp(pszLine, "VMFS", 4))
2222 {
2223 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2224 pszLine += 4;
2225 }
2226 else
2227 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2228 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2229 {
2230 /* This one has no basename or offset. */
2231 if (*pszLine == ' ')
2232 pszLine++;
2233 if (*pszLine != '\0')
2234 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2235 pImage->pExtents[i].pszBasename = NULL;
2236 }
2237 else
2238 {
2239 /* All other extent types have basename and optional offset. */
2240 if (*pszLine++ != ' ')
2241 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2242
2243 /* Basename of the image. Surrounded by quotes. */
2244 char *pszBasename;
2245 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2246 if (RT_FAILURE(rc))
2247 return rc;
2248 pImage->pExtents[i].pszBasename = pszBasename;
2249 if (*pszLine == ' ')
2250 {
2251 pszLine++;
2252 if (*pszLine != '\0')
2253 {
2254 /* Optional offset in extent specified. */
2255 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2256 &pImage->pExtents[i].uSectorOffset);
2257 if (RT_FAILURE(rc))
2258 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2259 }
2260 }
2261
2262 if (*pszLine != '\0')
2263 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2264 }
2265 }
2266
2267 /* Determine PCHS geometry (autogenerate if necessary). */
2268 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2269 VMDK_DDB_GEO_PCHS_CYLINDERS,
2270 &pImage->PCHSGeometry.cCylinders);
2271 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2272 pImage->PCHSGeometry.cCylinders = 0;
2273 else if (RT_FAILURE(rc))
2274 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2275 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2276 VMDK_DDB_GEO_PCHS_HEADS,
2277 &pImage->PCHSGeometry.cHeads);
2278 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2279 pImage->PCHSGeometry.cHeads = 0;
2280 else if (RT_FAILURE(rc))
2281 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2282 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2283 VMDK_DDB_GEO_PCHS_SECTORS,
2284 &pImage->PCHSGeometry.cSectors);
2285 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2286 pImage->PCHSGeometry.cSectors = 0;
2287 else if (RT_FAILURE(rc))
2288 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2289 if ( pImage->PCHSGeometry.cCylinders == 0
2290 || pImage->PCHSGeometry.cHeads == 0
2291 || pImage->PCHSGeometry.cHeads > 16
2292 || pImage->PCHSGeometry.cSectors == 0
2293 || pImage->PCHSGeometry.cSectors > 63)
2294 {
2295 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2296 * as the total image size isn't known yet). */
2297 pImage->PCHSGeometry.cCylinders = 0;
2298 pImage->PCHSGeometry.cHeads = 16;
2299 pImage->PCHSGeometry.cSectors = 63;
2300 }
2301
2302 /* Determine LCHS geometry (set to 0 if not specified). */
2303 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2304 VMDK_DDB_GEO_LCHS_CYLINDERS,
2305 &pImage->LCHSGeometry.cCylinders);
2306 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2307 pImage->LCHSGeometry.cCylinders = 0;
2308 else if (RT_FAILURE(rc))
2309 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2310 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2311 VMDK_DDB_GEO_LCHS_HEADS,
2312 &pImage->LCHSGeometry.cHeads);
2313 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2314 pImage->LCHSGeometry.cHeads = 0;
2315 else if (RT_FAILURE(rc))
2316 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2317 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2318 VMDK_DDB_GEO_LCHS_SECTORS,
2319 &pImage->LCHSGeometry.cSectors);
2320 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2321 pImage->LCHSGeometry.cSectors = 0;
2322 else if (RT_FAILURE(rc))
2323 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2324 if ( pImage->LCHSGeometry.cCylinders == 0
2325 || pImage->LCHSGeometry.cHeads == 0
2326 || pImage->LCHSGeometry.cSectors == 0)
2327 {
2328 pImage->LCHSGeometry.cCylinders = 0;
2329 pImage->LCHSGeometry.cHeads = 0;
2330 pImage->LCHSGeometry.cSectors = 0;
2331 }
2332
2333 /* Get image UUID. */
2334 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2335 &pImage->ImageUuid);
2336 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2337 {
2338 /* Image without UUID. Probably created by VMware and not yet used
2339 * by VirtualBox. Can only be added for images opened in read/write
2340 * mode, so don't bother producing a sensible UUID otherwise. */
2341 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2342 RTUuidClear(&pImage->ImageUuid);
2343 else
2344 {
2345 rc = RTUuidCreate(&pImage->ImageUuid);
2346 if (RT_FAILURE(rc))
2347 return rc;
2348 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2349 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2350 if (RT_FAILURE(rc))
2351 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2352 }
2353 }
2354 else if (RT_FAILURE(rc))
2355 return rc;
2356
2357 /* Get image modification UUID. */
2358 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2359 VMDK_DDB_MODIFICATION_UUID,
2360 &pImage->ModificationUuid);
2361 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2362 {
2363 /* Image without UUID. Probably created by VMware and not yet used
2364 * by VirtualBox. Can only be added for images opened in read/write
2365 * mode, so don't bother producing a sensible UUID otherwise. */
2366 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2367 RTUuidClear(&pImage->ModificationUuid);
2368 else
2369 {
2370 rc = RTUuidCreate(&pImage->ModificationUuid);
2371 if (RT_FAILURE(rc))
2372 return rc;
2373 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2374 VMDK_DDB_MODIFICATION_UUID,
2375 &pImage->ModificationUuid);
2376 if (RT_FAILURE(rc))
2377 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2378 }
2379 }
2380 else if (RT_FAILURE(rc))
2381 return rc;
2382
2383 /* Get UUID of parent image. */
2384 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2385 &pImage->ParentUuid);
2386 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2387 {
2388 /* Image without UUID. Probably created by VMware and not yet used
2389 * by VirtualBox. Can only be added for images opened in read/write
2390 * mode, so don't bother producing a sensible UUID otherwise. */
2391 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2392 RTUuidClear(&pImage->ParentUuid);
2393 else
2394 {
2395 rc = RTUuidClear(&pImage->ParentUuid);
2396 if (RT_FAILURE(rc))
2397 return rc;
2398 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2399 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2400 if (RT_FAILURE(rc))
2401 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2402 }
2403 }
2404 else if (RT_FAILURE(rc))
2405 return rc;
2406
2407 /* Get parent image modification UUID. */
2408 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2409 VMDK_DDB_PARENT_MODIFICATION_UUID,
2410 &pImage->ParentModificationUuid);
2411 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2412 {
2413 /* Image without UUID. Probably created by VMware and not yet used
2414 * by VirtualBox. Can only be added for images opened in read/write
2415 * mode, so don't bother producing a sensible UUID otherwise. */
2416 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2417 RTUuidClear(&pImage->ParentModificationUuid);
2418 else
2419 {
2420 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2421 if (RT_FAILURE(rc))
2422 return rc;
2423 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2424 VMDK_DDB_PARENT_MODIFICATION_UUID,
2425 &pImage->ParentModificationUuid);
2426 if (RT_FAILURE(rc))
2427 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2428 }
2429 }
2430 else if (RT_FAILURE(rc))
2431 return rc;
2432
2433 return VINF_SUCCESS;
2434}
2435
2436/**
2437 * Internal: write/update the descriptor part of the image.
2438 */
2439static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2440{
2441 int rc = VINF_SUCCESS;
2442 uint64_t cbLimit;
2443 uint64_t uOffset;
2444 PVMDKFILE pDescFile;
2445
2446 if (pImage->pDescData)
2447 {
2448 /* Separate descriptor file. */
2449 uOffset = 0;
2450 cbLimit = 0;
2451 pDescFile = pImage->pFile;
2452 }
2453 else
2454 {
2455 /* Embedded descriptor file. */
2456 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2457 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2458 pDescFile = pImage->pExtents[0].pFile;
2459 }
2460 /* Bail out if there is no file to write to. */
2461 if (pDescFile == NULL)
2462 return VERR_INVALID_PARAMETER;
2463
2464 /*
2465 * Allocate temporary descriptor buffer.
2466 * In case there is no limit allocate a default
2467 * and increase if required.
2468 */
2469 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2470 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2471 unsigned offDescriptor = 0;
2472
2473 if (!pszDescriptor)
2474 return VERR_NO_MEMORY;
2475
2476 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2477 {
2478 const char *psz = pImage->Descriptor.aLines[i];
2479 size_t cb = strlen(psz);
2480
2481 /*
2482 * Increase the descriptor if there is no limit and
2483 * there is not enough room left for this line.
2484 */
2485 if (offDescriptor + cb + 1 > cbDescriptor)
2486 {
2487 if (cbLimit)
2488 {
2489 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2490 break;
2491 }
2492 else
2493 {
2494 char *pszDescriptorNew = NULL;
2495 LogFlow(("Increasing descriptor cache\n"));
2496
2497 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2498 if (!pszDescriptorNew)
2499 {
2500 rc = VERR_NO_MEMORY;
2501 break;
2502 }
2503 pszDescriptorNew = pszDescriptor;
2504 cbDescriptor += cb + 4 * _1K;
2505 }
2506 }
2507
2508 if (cb > 0)
2509 {
2510 memcpy(pszDescriptor + offDescriptor, psz, cb);
2511 offDescriptor += cb;
2512 }
2513
2514 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2515 offDescriptor++;
2516 }
2517
2518 if (RT_SUCCESS(rc))
2519 {
2520 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2521 if (RT_FAILURE(rc))
2522 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2523 }
2524
2525 if (RT_SUCCESS(rc) && !cbLimit)
2526 {
2527 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2528 if (RT_FAILURE(rc))
2529 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2530 }
2531
2532 if (RT_SUCCESS(rc))
2533 pImage->Descriptor.fDirty = false;
2534
2535 RTMemFree(pszDescriptor);
2536 return rc;
2537}
2538
2539/**
2540 * Internal: validate the consistency check values in a binary header.
2541 */
2542static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2543{
2544 int rc = VINF_SUCCESS;
2545 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2546 {
2547 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2548 return rc;
2549 }
2550 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2551 {
2552 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2553 return rc;
2554 }
2555 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2556 && ( pHeader->singleEndLineChar != '\n'
2557 || pHeader->nonEndLineChar != ' '
2558 || pHeader->doubleEndLineChar1 != '\r'
2559 || pHeader->doubleEndLineChar2 != '\n') )
2560 {
2561 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2562 return rc;
2563 }
2564 return rc;
2565}
2566
2567/**
2568 * Internal: read metadata belonging to an extent with binary header, i.e.
2569 * as found in monolithic files.
2570 */
2571static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2572{
2573 SparseExtentHeader Header;
2574 uint64_t cSectorsPerGDE;
2575
2576 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2577 AssertRC(rc);
2578 if (RT_FAILURE(rc))
2579 {
2580 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2581 goto out;
2582 }
2583 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2584 if (RT_FAILURE(rc))
2585 goto out;
2586 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2587 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2588 {
2589 /* Read the footer, which isn't compressed and comes before the
2590 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2591 * VMware reality. Theory and practice have very little in common. */
2592 uint64_t cbSize;
2593 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
2594 AssertRC(rc);
2595 if (RT_FAILURE(rc))
2596 {
2597 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2598 goto out;
2599 }
2600 cbSize = RT_ALIGN_64(cbSize, 512);
2601 rc = vmdkFileReadAt(pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2602 AssertRC(rc);
2603 if (RT_FAILURE(rc))
2604 {
2605 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2606 goto out;
2607 }
2608 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2609 if (RT_FAILURE(rc))
2610 goto out;
2611 pExtent->fFooter = true;
2612 }
2613 pExtent->uVersion = RT_LE2H_U32(Header.version);
2614 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2615 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2616 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2617 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2618 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2619 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2620 {
2621 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2622 goto out;
2623 }
2624 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2625 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2626 {
2627 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2628 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2629 }
2630 else
2631 {
2632 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2633 pExtent->uSectorRGD = 0;
2634 }
2635 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2636 {
2637 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2638 goto out;
2639 }
2640 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2641 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2642 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2643 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2644 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2645 {
2646 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2647 goto out;
2648 }
2649 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2650 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2651
2652 /* Fix up the number of descriptor sectors, as some flat images have
2653 * really just one, and this causes failures when inserting the UUID
2654 * values and other extra information. */
2655 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2656 {
2657 /* Do it the easy way - just fix it for flat images which have no
2658 * other complicated metadata which needs space too. */
2659 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2660 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2661 pExtent->cDescriptorSectors = 4;
2662 }
2663
2664out:
2665 if (RT_FAILURE(rc))
2666 vmdkFreeExtentData(pImage, pExtent, false);
2667
2668 return rc;
2669}
2670
2671/**
2672 * Internal: read additional metadata belonging to an extent. For those
2673 * extents which have no additional metadata just verify the information.
2674 */
2675static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2676{
2677 int rc = VINF_SUCCESS;
2678 uint64_t cbExtentSize;
2679
2680 /* The image must be a multiple of a sector in size and contain the data
2681 * area (flat images only). If not, it means the image is at least
2682 * truncated, or even seriously garbled. */
2683 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
2684 if (RT_FAILURE(rc))
2685 {
2686 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2687 goto out;
2688 }
2689/* disabled the size check again as there are too many too short vmdks out there */
2690#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2691 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2692 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2693 {
2694 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2695 goto out;
2696 }
2697#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2698 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2699 goto out;
2700
2701 /* The spec says that this must be a power of two and greater than 8,
2702 * but probably they meant not less than 8. */
2703 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2704 || pExtent->cSectorsPerGrain < 8)
2705 {
2706 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2707 goto out;
2708 }
2709
2710 /* This code requires that a grain table must hold a power of two multiple
2711 * of the number of entries per GT cache entry. */
2712 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2713 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2714 {
2715 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2716 goto out;
2717 }
2718
2719 rc = vmdkReadGrainDirectory(pExtent);
2720
2721out:
2722 if (RT_FAILURE(rc))
2723 vmdkFreeExtentData(pImage, pExtent, false);
2724
2725 return rc;
2726}
2727
2728/**
2729 * Internal: write/update the metadata for a sparse extent.
2730 */
2731static int vmdkWriteMetaSparseExtent(PVMDKEXTENT pExtent, uint64_t uOffset)
2732{
2733 SparseExtentHeader Header;
2734
2735 memset(&Header, '\0', sizeof(Header));
2736 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2737 Header.version = RT_H2LE_U32(pExtent->uVersion);
2738 Header.flags = RT_H2LE_U32(RT_BIT(0));
2739 if (pExtent->pRGD)
2740 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2741 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2742 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2743 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2744 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2745 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2746 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2747 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2748 if (pExtent->fFooter && uOffset == 0)
2749 {
2750 if (pExtent->pRGD)
2751 {
2752 Assert(pExtent->uSectorRGD);
2753 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2754 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2755 }
2756 else
2757 {
2758 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2759 }
2760 }
2761 else
2762 {
2763 if (pExtent->pRGD)
2764 {
2765 Assert(pExtent->uSectorRGD);
2766 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2767 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2768 }
2769 else
2770 {
2771 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2772 }
2773 }
2774 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2775 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2776 Header.singleEndLineChar = '\n';
2777 Header.nonEndLineChar = ' ';
2778 Header.doubleEndLineChar1 = '\r';
2779 Header.doubleEndLineChar2 = '\n';
2780 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2781
2782 int rc = vmdkFileWriteAt(pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2783 AssertRC(rc);
2784 if (RT_FAILURE(rc))
2785 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2786 return rc;
2787}
2788
2789#ifdef VBOX_WITH_VMDK_ESX
2790/**
2791 * Internal: unused code to read the metadata of a sparse ESX extent.
2792 *
2793 * Such extents never leave ESX server, so this isn't ever used.
2794 */
2795static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2796{
2797 COWDisk_Header Header;
2798 uint64_t cSectorsPerGDE;
2799
2800 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2801 AssertRC(rc);
2802 if (RT_FAILURE(rc))
2803 goto out;
2804 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
2805 || RT_LE2H_U32(Header.version) != 1
2806 || RT_LE2H_U32(Header.flags) != 3)
2807 {
2808 rc = VERR_VD_VMDK_INVALID_HEADER;
2809 goto out;
2810 }
2811 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
2812 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
2813 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
2814 /* The spec says that this must be between 1 sector and 1MB. This code
2815 * assumes it's a power of two, so check that requirement, too. */
2816 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2817 || pExtent->cSectorsPerGrain == 0
2818 || pExtent->cSectorsPerGrain > 2048)
2819 {
2820 rc = VERR_VD_VMDK_INVALID_HEADER;
2821 goto out;
2822 }
2823 pExtent->uDescriptorSector = 0;
2824 pExtent->cDescriptorSectors = 0;
2825 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
2826 pExtent->uSectorRGD = 0;
2827 pExtent->cOverheadSectors = 0;
2828 pExtent->cGTEntries = 4096;
2829 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2830 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2831 {
2832 rc = VERR_VD_VMDK_INVALID_HEADER;
2833 goto out;
2834 }
2835 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2836 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2837 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
2838 {
2839 /* Inconsistency detected. Computed number of GD entries doesn't match
2840 * stored value. Better be safe than sorry. */
2841 rc = VERR_VD_VMDK_INVALID_HEADER;
2842 goto out;
2843 }
2844 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
2845 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2846
2847 rc = vmdkReadGrainDirectory(pExtent);
2848
2849out:
2850 if (RT_FAILURE(rc))
2851 vmdkFreeExtentData(pImage, pExtent, false);
2852
2853 return rc;
2854}
2855#endif /* VBOX_WITH_VMDK_ESX */
2856
2857/**
2858 * Internal: free the memory used by the extent data structure, optionally
2859 * deleting the referenced files.
2860 */
2861static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2862 bool fDelete)
2863{
2864 vmdkFreeGrainDirectory(pExtent);
2865 if (pExtent->pDescData)
2866 {
2867 RTMemFree(pExtent->pDescData);
2868 pExtent->pDescData = NULL;
2869 }
2870 if (pExtent->pFile != NULL)
2871 {
2872 /* Do not delete raw extents, these have full and base names equal. */
2873 vmdkFileClose(pImage, &pExtent->pFile,
2874 fDelete
2875 && pExtent->pszFullname
2876 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2877 }
2878 if (pExtent->pszBasename)
2879 {
2880 RTMemTmpFree((void *)pExtent->pszBasename);
2881 pExtent->pszBasename = NULL;
2882 }
2883 if (pExtent->pszFullname)
2884 {
2885 RTStrFree((char *)(void *)pExtent->pszFullname);
2886 pExtent->pszFullname = NULL;
2887 }
2888 if (pExtent->pvGrain)
2889 {
2890 RTMemFree(pExtent->pvGrain);
2891 pExtent->pvGrain = NULL;
2892 }
2893}
2894
2895/**
2896 * Internal: allocate grain table cache if necessary for this image.
2897 */
2898static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2899{
2900 PVMDKEXTENT pExtent;
2901
2902 /* Allocate grain table cache if any sparse extent is present. */
2903 for (unsigned i = 0; i < pImage->cExtents; i++)
2904 {
2905 pExtent = &pImage->pExtents[i];
2906 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
2907#ifdef VBOX_WITH_VMDK_ESX
2908 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
2909#endif /* VBOX_WITH_VMDK_ESX */
2910 )
2911 {
2912 /* Allocate grain table cache. */
2913 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2914 if (!pImage->pGTCache)
2915 return VERR_NO_MEMORY;
2916 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2917 {
2918 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2919 pGCE->uExtent = UINT32_MAX;
2920 }
2921 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2922 break;
2923 }
2924 }
2925
2926 return VINF_SUCCESS;
2927}
2928
2929/**
2930 * Internal: allocate the given number of extents.
2931 */
2932static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2933{
2934 int rc = VINF_SUCCESS;
2935 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2936 if (pImage)
2937 {
2938 for (unsigned i = 0; i < cExtents; i++)
2939 {
2940 pExtents[i].pFile = NULL;
2941 pExtents[i].pszBasename = NULL;
2942 pExtents[i].pszFullname = NULL;
2943 pExtents[i].pGD = NULL;
2944 pExtents[i].pRGD = NULL;
2945 pExtents[i].pDescData = NULL;
2946 pExtents[i].uVersion = 1;
2947 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2948 pExtents[i].uExtent = i;
2949 pExtents[i].pImage = pImage;
2950 }
2951 pImage->pExtents = pExtents;
2952 pImage->cExtents = cExtents;
2953 }
2954 else
2955 rc = VERR_NO_MEMORY;
2956
2957 return rc;
2958}
2959
2960/**
2961 * Internal: Open an image, constructing all necessary data structures.
2962 */
2963static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
2964{
2965 int rc;
2966 uint32_t u32Magic;
2967 PVMDKFILE pFile;
2968 PVMDKEXTENT pExtent;
2969
2970 pImage->uOpenFlags = uOpenFlags;
2971
2972 /* Try to get error interface. */
2973 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
2974 if (pImage->pInterfaceError)
2975 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
2976
2977 /* Try to get async I/O interface. */
2978 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
2979 if (pImage->pInterfaceAsyncIO)
2980 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
2981
2982 /*
2983 * Open the image.
2984 * We don't have to check for asynchronous access because
2985 * we only support raw access and the opened file is a description
2986 * file were no data is stored.
2987 */
2988 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
2989 uOpenFlags & VD_OPEN_FLAGS_READONLY
2990 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
2991 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
2992 if (RT_FAILURE(rc))
2993 {
2994 /* Do NOT signal an appropriate error here, as the VD layer has the
2995 * choice of retrying the open if it failed. */
2996 goto out;
2997 }
2998 pImage->pFile = pFile;
2999
3000 /* Read magic (if present). */
3001 rc = vmdkFileReadAt(pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3002 if (RT_FAILURE(rc))
3003 {
3004 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3005 goto out;
3006 }
3007
3008 /* Handle the file according to its magic number. */
3009 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3010 {
3011 /* Async I/IO is not supported with these files yet. So fail if opened in async I/O mode. */
3012 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3013 {
3014 rc = VERR_NOT_SUPPORTED;
3015 goto out;
3016 }
3017
3018 /* It's a hosted single-extent image. */
3019 rc = vmdkCreateExtents(pImage, 1);
3020 if (RT_FAILURE(rc))
3021 goto out;
3022 /* The opened file is passed to the extent. No separate descriptor
3023 * file, so no need to keep anything open for the image. */
3024 pExtent = &pImage->pExtents[0];
3025 pExtent->pFile = pFile;
3026 pImage->pFile = NULL;
3027 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3028 if (!pExtent->pszFullname)
3029 {
3030 rc = VERR_NO_MEMORY;
3031 goto out;
3032 }
3033 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3034 if (RT_FAILURE(rc))
3035 goto out;
3036
3037 /* As we're dealing with a monolithic image here, there must
3038 * be a descriptor embedded in the image file. */
3039 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3040 {
3041 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3042 goto out;
3043 }
3044 /* HACK: extend the descriptor if it is unusually small and it fits in
3045 * the unused space after the image header. Allows opening VMDK files
3046 * with extremely small descriptor in read/write mode. */
3047 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3048 && pExtent->cDescriptorSectors < 3
3049 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3050 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3051 {
3052 pExtent->cDescriptorSectors = 4;
3053 pExtent->fMetaDirty = true;
3054 }
3055 /* Read the descriptor from the extent. */
3056 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3057 if (!pExtent->pDescData)
3058 {
3059 rc = VERR_NO_MEMORY;
3060 goto out;
3061 }
3062 rc = vmdkFileReadAt(pExtent->pFile,
3063 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3064 pExtent->pDescData,
3065 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3066 AssertRC(rc);
3067 if (RT_FAILURE(rc))
3068 {
3069 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3070 goto out;
3071 }
3072
3073 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3074 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3075 if (RT_FAILURE(rc))
3076 goto out;
3077
3078 rc = vmdkReadMetaExtent(pImage, pExtent);
3079 if (RT_FAILURE(rc))
3080 goto out;
3081
3082 /* Mark the extent as unclean if opened in read-write mode. */
3083 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3084 {
3085 pExtent->fUncleanShutdown = true;
3086 pExtent->fMetaDirty = true;
3087 }
3088 }
3089 else
3090 {
3091 /* Allocate at least 10K, and make sure that there is 5K free space
3092 * in case new entries need to be added to the descriptor. Never
3093 * alocate more than 128K, because that's no valid descriptor file
3094 * and will result in the correct "truncated read" error handling. */
3095 uint64_t cbSize;
3096 rc = vmdkFileGetSize(pFile, &cbSize);
3097 if (RT_FAILURE(rc))
3098 goto out;
3099 if (cbSize % VMDK_SECTOR2BYTE(10))
3100 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3101 else
3102 cbSize += VMDK_SECTOR2BYTE(10);
3103 cbSize = RT_MIN(cbSize, _128K);
3104 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3105 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3106 if (!pImage->pDescData)
3107 {
3108 rc = VERR_NO_MEMORY;
3109 goto out;
3110 }
3111
3112 size_t cbRead;
3113 rc = vmdkFileReadAt(pImage->pFile, 0, pImage->pDescData,
3114 pImage->cbDescAlloc, &cbRead);
3115 if (RT_FAILURE(rc))
3116 {
3117 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3118 goto out;
3119 }
3120 if (cbRead == pImage->cbDescAlloc)
3121 {
3122 /* Likely the read is truncated. Better fail a bit too early
3123 * (normally the descriptor is much smaller than our buffer). */
3124 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3125 goto out;
3126 }
3127
3128 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3129 pImage->cbDescAlloc);
3130 if (RT_FAILURE(rc))
3131 goto out;
3132
3133 /*
3134 * We have to check for the asynchronous open flag. The
3135 * extents are parsed and the type of all are known now.
3136 * Check if every extent is either FLAT or ZERO.
3137 */
3138 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3139 {
3140 unsigned cFlatExtents = 0;
3141
3142 for (unsigned i = 0; i < pImage->cExtents; i++)
3143 {
3144 pExtent = &pImage->pExtents[i];
3145
3146 if (( pExtent->enmType != VMDKETYPE_FLAT
3147 && pExtent->enmType != VMDKETYPE_ZERO
3148 && pExtent->enmType != VMDKETYPE_VMFS)
3149 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3150 {
3151 /*
3152 * Opened image contains at least one none flat or zero extent.
3153 * Return error but don't set error message as the caller
3154 * has the chance to open in non async I/O mode.
3155 */
3156 rc = VERR_NOT_SUPPORTED;
3157 goto out;
3158 }
3159 if (pExtent->enmType == VMDKETYPE_FLAT)
3160 cFlatExtents++;
3161 }
3162 }
3163
3164 for (unsigned i = 0; i < pImage->cExtents; i++)
3165 {
3166 pExtent = &pImage->pExtents[i];
3167
3168 if (pExtent->pszBasename)
3169 {
3170 /* Hack to figure out whether the specified name in the
3171 * extent descriptor is absolute. Doesn't always work, but
3172 * should be good enough for now. */
3173 char *pszFullname;
3174 /** @todo implement proper path absolute check. */
3175 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3176 {
3177 pszFullname = RTStrDup(pExtent->pszBasename);
3178 if (!pszFullname)
3179 {
3180 rc = VERR_NO_MEMORY;
3181 goto out;
3182 }
3183 }
3184 else
3185 {
3186 size_t cbDirname;
3187 char *pszDirname = RTStrDup(pImage->pszFilename);
3188 if (!pszDirname)
3189 {
3190 rc = VERR_NO_MEMORY;
3191 goto out;
3192 }
3193 RTPathStripFilename(pszDirname);
3194 cbDirname = strlen(pszDirname);
3195 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3196 RTPATH_SLASH, pExtent->pszBasename);
3197 RTStrFree(pszDirname);
3198 if (RT_FAILURE(rc))
3199 goto out;
3200 }
3201 pExtent->pszFullname = pszFullname;
3202 }
3203 else
3204 pExtent->pszFullname = NULL;
3205
3206 switch (pExtent->enmType)
3207 {
3208 case VMDKETYPE_HOSTED_SPARSE:
3209 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3210 uOpenFlags & VD_OPEN_FLAGS_READONLY
3211 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3212 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3213 if (RT_FAILURE(rc))
3214 {
3215 /* Do NOT signal an appropriate error here, as the VD
3216 * layer has the choice of retrying the open if it
3217 * failed. */
3218 goto out;
3219 }
3220 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3221 if (RT_FAILURE(rc))
3222 goto out;
3223 rc = vmdkReadMetaExtent(pImage, pExtent);
3224 if (RT_FAILURE(rc))
3225 goto out;
3226
3227 /* Mark extent as unclean if opened in read-write mode. */
3228 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3229 {
3230 pExtent->fUncleanShutdown = true;
3231 pExtent->fMetaDirty = true;
3232 }
3233 break;
3234 case VMDKETYPE_VMFS:
3235 case VMDKETYPE_FLAT:
3236 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3237 uOpenFlags & VD_OPEN_FLAGS_READONLY
3238 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3239 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, true);
3240 if (RT_FAILURE(rc))
3241 {
3242 /* Do NOT signal an appropriate error here, as the VD
3243 * layer has the choice of retrying the open if it
3244 * failed. */
3245 goto out;
3246 }
3247 break;
3248 case VMDKETYPE_ZERO:
3249 /* Nothing to do. */
3250 break;
3251 default:
3252 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3253 }
3254 }
3255 }
3256
3257 /* Make sure this is not reached accidentally with an error status. */
3258 AssertRC(rc);
3259
3260 /* Determine PCHS geometry if not set. */
3261 if (pImage->PCHSGeometry.cCylinders == 0)
3262 {
3263 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3264 / pImage->PCHSGeometry.cHeads
3265 / pImage->PCHSGeometry.cSectors;
3266 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3267 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3268 {
3269 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3270 AssertRC(rc);
3271 }
3272 }
3273
3274 /* Update the image metadata now in case has changed. */
3275 rc = vmdkFlushImage(pImage);
3276 if (RT_FAILURE(rc))
3277 goto out;
3278
3279 /* Figure out a few per-image constants from the extents. */
3280 pImage->cbSize = 0;
3281 for (unsigned i = 0; i < pImage->cExtents; i++)
3282 {
3283 pExtent = &pImage->pExtents[i];
3284 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3285#ifdef VBOX_WITH_VMDK_ESX
3286 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3287#endif /* VBOX_WITH_VMDK_ESX */
3288 )
3289 {
3290 /* Here used to be a check whether the nominal size of an extent
3291 * is a multiple of the grain size. The spec says that this is
3292 * always the case, but unfortunately some files out there in the
3293 * wild violate the spec (e.g. ReactOS 0.3.1). */
3294 }
3295 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3296 }
3297
3298 for (unsigned i = 0; i < pImage->cExtents; i++)
3299 {
3300 pExtent = &pImage->pExtents[i];
3301 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3302 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3303 {
3304 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3305 break;
3306 }
3307 }
3308
3309 rc = vmdkAllocateGrainTableCache(pImage);
3310 if (RT_FAILURE(rc))
3311 goto out;
3312
3313out:
3314 if (RT_FAILURE(rc))
3315 vmdkFreeImage(pImage, false);
3316 return rc;
3317}
3318
3319/**
3320 * Internal: create VMDK images for raw disk/partition access.
3321 */
3322static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3323 uint64_t cbSize)
3324{
3325 int rc = VINF_SUCCESS;
3326 PVMDKEXTENT pExtent;
3327
3328 if (pRaw->fRawDisk)
3329 {
3330 /* Full raw disk access. This requires setting up a descriptor
3331 * file and open the (flat) raw disk. */
3332 rc = vmdkCreateExtents(pImage, 1);
3333 if (RT_FAILURE(rc))
3334 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3335 pExtent = &pImage->pExtents[0];
3336 /* Create raw disk descriptor file. */
3337 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3338 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3339 false);
3340 if (RT_FAILURE(rc))
3341 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3342
3343 /* Set up basename for extent description. Cannot use StrDup. */
3344 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3345 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3346 if (!pszBasename)
3347 return VERR_NO_MEMORY;
3348 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3349 pExtent->pszBasename = pszBasename;
3350 /* For raw disks the full name is identical to the base name. */
3351 pExtent->pszFullname = RTStrDup(pszBasename);
3352 if (!pExtent->pszFullname)
3353 return VERR_NO_MEMORY;
3354 pExtent->enmType = VMDKETYPE_FLAT;
3355 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3356 pExtent->uSectorOffset = 0;
3357 pExtent->enmAccess = VMDKACCESS_READWRITE;
3358 pExtent->fMetaDirty = false;
3359
3360 /* Open flat image, the raw disk. */
3361 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3362 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3363 if (RT_FAILURE(rc))
3364 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3365 }
3366 else
3367 {
3368 /* Raw partition access. This requires setting up a descriptor
3369 * file, write the partition information to a flat extent and
3370 * open all the (flat) raw disk partitions. */
3371
3372 /* First pass over the partitions to determine how many
3373 * extents we need. One partition can require up to 4 extents.
3374 * One to skip over unpartitioned space, one for the
3375 * partitioning data, one to skip over unpartitioned space
3376 * and one for the partition data. */
3377 unsigned cExtents = 0;
3378 uint64_t uStart = 0;
3379 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3380 {
3381 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3382 if (pPart->cbPartitionData)
3383 {
3384 if (uStart > pPart->uPartitionDataStart)
3385 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partitioning information in '%s'"), pImage->pszFilename);
3386 else if (uStart != pPart->uPartitionDataStart)
3387 cExtents++;
3388 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3389 cExtents++;
3390 }
3391 if (pPart->cbPartition)
3392 {
3393 if (uStart > pPart->uPartitionStart)
3394 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partition data in '%s'"), pImage->pszFilename);
3395 else if (uStart != pPart->uPartitionStart)
3396 cExtents++;
3397 uStart = pPart->uPartitionStart + pPart->cbPartition;
3398 cExtents++;
3399 }
3400 }
3401 /* Another extent for filling up the rest of the image. */
3402 if (uStart != cbSize)
3403 cExtents++;
3404
3405 rc = vmdkCreateExtents(pImage, cExtents);
3406 if (RT_FAILURE(rc))
3407 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3408
3409 /* Create raw partition descriptor file. */
3410 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3411 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3412 false);
3413 if (RT_FAILURE(rc))
3414 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3415
3416 /* Create base filename for the partition table extent. */
3417 /** @todo remove fixed buffer without creating memory leaks. */
3418 char pszPartition[1024];
3419 const char *pszBase = RTPathFilename(pImage->pszFilename);
3420 const char *pszExt = RTPathExt(pszBase);
3421 if (pszExt == NULL)
3422 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3423 char *pszBaseBase = RTStrDup(pszBase);
3424 if (!pszBaseBase)
3425 return VERR_NO_MEMORY;
3426 RTPathStripExt(pszBaseBase);
3427 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3428 pszBaseBase, pszExt);
3429 RTStrFree(pszBaseBase);
3430
3431 /* Second pass over the partitions, now define all extents. */
3432 uint64_t uPartOffset = 0;
3433 cExtents = 0;
3434 uStart = 0;
3435 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3436 {
3437 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3438 if (pPart->cbPartitionData)
3439 {
3440 if (uStart != pPart->uPartitionDataStart)
3441 {
3442 pExtent = &pImage->pExtents[cExtents++];
3443 pExtent->pszBasename = NULL;
3444 pExtent->pszFullname = NULL;
3445 pExtent->enmType = VMDKETYPE_ZERO;
3446 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionDataStart - uStart);
3447 pExtent->uSectorOffset = 0;
3448 pExtent->enmAccess = VMDKACCESS_READWRITE;
3449 pExtent->fMetaDirty = false;
3450 }
3451 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3452 pExtent = &pImage->pExtents[cExtents++];
3453 /* Set up basename for extent description. Can't use StrDup. */
3454 size_t cbBasename = strlen(pszPartition) + 1;
3455 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3456 if (!pszBasename)
3457 return VERR_NO_MEMORY;
3458 memcpy(pszBasename, pszPartition, cbBasename);
3459 pExtent->pszBasename = pszBasename;
3460
3461 /* Set up full name for partition extent. */
3462 size_t cbDirname;
3463 char *pszDirname = RTStrDup(pImage->pszFilename);
3464 if (!pszDirname)
3465 return VERR_NO_MEMORY;
3466 RTPathStripFilename(pszDirname);
3467 cbDirname = strlen(pszDirname);
3468 char *pszFullname;
3469 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3470 RTPATH_SLASH, pExtent->pszBasename);
3471 RTStrFree(pszDirname);
3472 if (RT_FAILURE(rc))
3473 return rc;
3474 pExtent->pszFullname = pszFullname;
3475 pExtent->enmType = VMDKETYPE_FLAT;
3476 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3477 pExtent->uSectorOffset = uPartOffset;
3478 pExtent->enmAccess = VMDKACCESS_READWRITE;
3479 pExtent->fMetaDirty = false;
3480
3481 /* Create partition table flat image. */
3482 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3483 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3484 false);
3485 if (RT_FAILURE(rc))
3486 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3487 rc = vmdkFileWriteAt(pExtent->pFile,
3488 VMDK_SECTOR2BYTE(uPartOffset),
3489 pPart->pvPartitionData,
3490 pPart->cbPartitionData, NULL);
3491 if (RT_FAILURE(rc))
3492 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3493 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3494 }
3495 if (pPart->cbPartition)
3496 {
3497 if (uStart != pPart->uPartitionStart)
3498 {
3499 pExtent = &pImage->pExtents[cExtents++];
3500 pExtent->pszBasename = NULL;
3501 pExtent->pszFullname = NULL;
3502 pExtent->enmType = VMDKETYPE_ZERO;
3503 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionStart - uStart);
3504 pExtent->uSectorOffset = 0;
3505 pExtent->enmAccess = VMDKACCESS_READWRITE;
3506 pExtent->fMetaDirty = false;
3507 }
3508 uStart = pPart->uPartitionStart + pPart->cbPartition;
3509 pExtent = &pImage->pExtents[cExtents++];
3510 if (pPart->pszRawDevice)
3511 {
3512 /* Set up basename for extent descr. Can't use StrDup. */
3513 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3514 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3515 if (!pszBasename)
3516 return VERR_NO_MEMORY;
3517 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3518 pExtent->pszBasename = pszBasename;
3519 /* For raw disks full name is identical to base name. */
3520 pExtent->pszFullname = RTStrDup(pszBasename);
3521 if (!pExtent->pszFullname)
3522 return VERR_NO_MEMORY;
3523 pExtent->enmType = VMDKETYPE_FLAT;
3524 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3525 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uPartitionStartOffset);
3526 pExtent->enmAccess = VMDKACCESS_READWRITE;
3527 pExtent->fMetaDirty = false;
3528
3529 /* Open flat image, the raw partition. */
3530 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3531 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE,
3532 false);
3533 if (RT_FAILURE(rc))
3534 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3535 }
3536 else
3537 {
3538 pExtent->pszBasename = NULL;
3539 pExtent->pszFullname = NULL;
3540 pExtent->enmType = VMDKETYPE_ZERO;
3541 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3542 pExtent->uSectorOffset = 0;
3543 pExtent->enmAccess = VMDKACCESS_READWRITE;
3544 pExtent->fMetaDirty = false;
3545 }
3546 }
3547 }
3548 /* Another extent for filling up the rest of the image. */
3549 if (uStart != cbSize)
3550 {
3551 pExtent = &pImage->pExtents[cExtents++];
3552 pExtent->pszBasename = NULL;
3553 pExtent->pszFullname = NULL;
3554 pExtent->enmType = VMDKETYPE_ZERO;
3555 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3556 pExtent->uSectorOffset = 0;
3557 pExtent->enmAccess = VMDKACCESS_READWRITE;
3558 pExtent->fMetaDirty = false;
3559 }
3560 }
3561
3562 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3563 pRaw->fRawDisk ?
3564 "fullDevice" : "partitionedDevice");
3565 if (RT_FAILURE(rc))
3566 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3567 return rc;
3568}
3569
3570/**
3571 * Internal: create a regular (i.e. file-backed) VMDK image.
3572 */
3573static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3574 unsigned uImageFlags,
3575 PFNVDPROGRESS pfnProgress, void *pvUser,
3576 unsigned uPercentStart, unsigned uPercentSpan)
3577{
3578 int rc = VINF_SUCCESS;
3579 unsigned cExtents = 1;
3580 uint64_t cbOffset = 0;
3581 uint64_t cbRemaining = cbSize;
3582
3583 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3584 {
3585 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3586 /* Do proper extent computation: need one smaller extent if the total
3587 * size isn't evenly divisible by the split size. */
3588 if (cbSize % VMDK_2G_SPLIT_SIZE)
3589 cExtents++;
3590 }
3591 rc = vmdkCreateExtents(pImage, cExtents);
3592 if (RT_FAILURE(rc))
3593 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3594
3595 /* Basename strings needed for constructing the extent names. */
3596 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3597 AssertPtr(pszBasenameSubstr);
3598 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3599
3600 /* Create searate descriptor file if necessary. */
3601 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3602 {
3603 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3604 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3605 false);
3606 if (RT_FAILURE(rc))
3607 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3608 }
3609 else
3610 pImage->pFile = NULL;
3611
3612 /* Set up all extents. */
3613 for (unsigned i = 0; i < cExtents; i++)
3614 {
3615 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3616 uint64_t cbExtent = cbRemaining;
3617
3618 /* Set up fullname/basename for extent description. Cannot use StrDup
3619 * for basename, as it is not guaranteed that the memory can be freed
3620 * with RTMemTmpFree, which must be used as in other code paths
3621 * StrDup is not usable. */
3622 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3623 {
3624 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3625 if (!pszBasename)
3626 return VERR_NO_MEMORY;
3627 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3628 pExtent->pszBasename = pszBasename;
3629 }
3630 else
3631 {
3632 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3633 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3634 RTPathStripExt(pszBasenameBase);
3635 char *pszTmp;
3636 size_t cbTmp;
3637 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3638 {
3639 if (cExtents == 1)
3640 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3641 pszBasenameExt);
3642 else
3643 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3644 i+1, pszBasenameExt);
3645 }
3646 else
3647 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3648 pszBasenameExt);
3649 RTStrFree(pszBasenameBase);
3650 if (RT_FAILURE(rc))
3651 return rc;
3652 cbTmp = strlen(pszTmp) + 1;
3653 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3654 if (!pszBasename)
3655 return VERR_NO_MEMORY;
3656 memcpy(pszBasename, pszTmp, cbTmp);
3657 RTStrFree(pszTmp);
3658 pExtent->pszBasename = pszBasename;
3659 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3660 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3661 }
3662 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3663 RTPathStripFilename(pszBasedirectory);
3664 char *pszFullname;
3665 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3666 RTPATH_SLASH, pExtent->pszBasename);
3667 RTStrFree(pszBasedirectory);
3668 if (RT_FAILURE(rc))
3669 return rc;
3670 pExtent->pszFullname = pszFullname;
3671
3672 /* Create file for extent. */
3673 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3674 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3675 false);
3676 if (RT_FAILURE(rc))
3677 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3678 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3679 {
3680 rc = vmdkFileSetSize(pExtent->pFile, cbExtent);
3681 if (RT_FAILURE(rc))
3682 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3683
3684 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3685 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3686 * file and the guest could complain about an ATA timeout. */
3687
3688 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3689 * Currently supported file systems are ext4 and ocfs2. */
3690
3691 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3692 const size_t cbBuf = 128 * _1K;
3693 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3694 if (!pvBuf)
3695 return VERR_NO_MEMORY;
3696
3697 uint64_t uOff = 0;
3698 /* Write data to all image blocks. */
3699 while (uOff < cbExtent)
3700 {
3701 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3702
3703 rc = vmdkFileWriteAt(pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3704 if (RT_FAILURE(rc))
3705 {
3706 RTMemFree(pvBuf);
3707 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3708 }
3709
3710 uOff += cbChunk;
3711
3712 if (pfnProgress)
3713 {
3714 rc = pfnProgress(pvUser,
3715 uPercentStart + uOff * uPercentSpan / cbExtent);
3716 if (RT_FAILURE(rc))
3717 {
3718 RTMemFree(pvBuf);
3719 return rc;
3720 }
3721 }
3722 }
3723 RTMemTmpFree(pvBuf);
3724 }
3725
3726 /* Place descriptor file information (where integrated). */
3727 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3728 {
3729 pExtent->uDescriptorSector = 1;
3730 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3731 /* The descriptor is part of the (only) extent. */
3732 pExtent->pDescData = pImage->pDescData;
3733 pImage->pDescData = NULL;
3734 }
3735
3736 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3737 {
3738 uint64_t cSectorsPerGDE, cSectorsPerGD;
3739 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3740 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, 65536));
3741 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(65536);
3742 pExtent->cGTEntries = 512;
3743 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3744 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3745 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3746 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3747 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3748 {
3749 /* The spec says version is 1 for all VMDKs, but the vast
3750 * majority of streamOptimized VMDKs actually contain
3751 * version 3 - so go with the majority. Both are acepted. */
3752 pExtent->uVersion = 3;
3753 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3754 }
3755 }
3756 else
3757 {
3758 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3759 pExtent->enmType = VMDKETYPE_VMFS;
3760 else
3761 pExtent->enmType = VMDKETYPE_FLAT;
3762 }
3763
3764 pExtent->enmAccess = VMDKACCESS_READWRITE;
3765 pExtent->fUncleanShutdown = true;
3766 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3767 pExtent->uSectorOffset = 0;
3768 pExtent->fMetaDirty = true;
3769
3770 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3771 {
3772 rc = vmdkCreateGrainDirectory(pExtent,
3773 RT_MAX( pExtent->uDescriptorSector
3774 + pExtent->cDescriptorSectors,
3775 1),
3776 true);
3777 if (RT_FAILURE(rc))
3778 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3779 }
3780
3781 if (RT_SUCCESS(rc) && pfnProgress)
3782 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
3783
3784 cbRemaining -= cbExtent;
3785 cbOffset += cbExtent;
3786 }
3787
3788 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3789 {
3790 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3791 * controller type is set in an image. */
3792 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3793 if (RT_FAILURE(rc))
3794 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3795 }
3796
3797 const char *pszDescType = NULL;
3798 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3799 {
3800 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3801 pszDescType = "vmfs";
3802 else
3803 pszDescType = (cExtents == 1)
3804 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3805 }
3806 else
3807 {
3808 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3809 pszDescType = "streamOptimized";
3810 else
3811 {
3812 pszDescType = (cExtents == 1)
3813 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3814 }
3815 }
3816 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3817 pszDescType);
3818 if (RT_FAILURE(rc))
3819 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3820 return rc;
3821}
3822
3823/**
3824 * Internal: The actual code for creating any VMDK variant currently in
3825 * existence on hosted environments.
3826 */
3827static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
3828 unsigned uImageFlags, const char *pszComment,
3829 PCPDMMEDIAGEOMETRY pPCHSGeometry,
3830 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
3831 PFNVDPROGRESS pfnProgress, void *pvUser,
3832 unsigned uPercentStart, unsigned uPercentSpan)
3833{
3834 int rc;
3835
3836 pImage->uImageFlags = uImageFlags;
3837
3838 /* Try to get error interface. */
3839 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3840 if (pImage->pInterfaceError)
3841 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3842
3843 /* Try to get async I/O interface. */
3844 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
3845 if (pImage->pInterfaceAsyncIO)
3846 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
3847
3848 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
3849 &pImage->Descriptor);
3850 if (RT_FAILURE(rc))
3851 {
3852 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
3853 goto out;
3854 }
3855
3856 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3857 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3858 {
3859 /* Raw disk image (includes raw partition). */
3860 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
3861 /* As the comment is misused, zap it so that no garbage comment
3862 * is set below. */
3863 pszComment = NULL;
3864 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
3865 }
3866 else
3867 {
3868 /* Regular fixed or sparse image (monolithic or split). */
3869 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
3870 pfnProgress, pvUser, uPercentStart,
3871 uPercentSpan * 95 / 100);
3872 }
3873
3874 if (RT_FAILURE(rc))
3875 goto out;
3876
3877 if (RT_SUCCESS(rc) && pfnProgress)
3878 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
3879
3880 pImage->cbSize = cbSize;
3881
3882 for (unsigned i = 0; i < pImage->cExtents; i++)
3883 {
3884 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3885
3886 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3887 pExtent->cNominalSectors, pExtent->enmType,
3888 pExtent->pszBasename, pExtent->uSectorOffset);
3889 if (RT_FAILURE(rc))
3890 {
3891 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3892 goto out;
3893 }
3894 }
3895 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
3896
3897 if ( pPCHSGeometry->cCylinders != 0
3898 && pPCHSGeometry->cHeads != 0
3899 && pPCHSGeometry->cSectors != 0)
3900 {
3901 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
3902 if (RT_FAILURE(rc))
3903 goto out;
3904 }
3905 if ( pLCHSGeometry->cCylinders != 0
3906 && pLCHSGeometry->cHeads != 0
3907 && pLCHSGeometry->cSectors != 0)
3908 {
3909 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
3910 if (RT_FAILURE(rc))
3911 goto out;
3912 }
3913
3914 pImage->LCHSGeometry = *pLCHSGeometry;
3915 pImage->PCHSGeometry = *pPCHSGeometry;
3916
3917 pImage->ImageUuid = *pUuid;
3918 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3919 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
3920 if (RT_FAILURE(rc))
3921 {
3922 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
3923 goto out;
3924 }
3925 RTUuidClear(&pImage->ParentUuid);
3926 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3927 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
3928 if (RT_FAILURE(rc))
3929 {
3930 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
3931 goto out;
3932 }
3933 RTUuidClear(&pImage->ModificationUuid);
3934 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3935 VMDK_DDB_MODIFICATION_UUID,
3936 &pImage->ModificationUuid);
3937 if (RT_FAILURE(rc))
3938 {
3939 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3940 goto out;
3941 }
3942 RTUuidClear(&pImage->ParentModificationUuid);
3943 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3944 VMDK_DDB_PARENT_MODIFICATION_UUID,
3945 &pImage->ParentModificationUuid);
3946 if (RT_FAILURE(rc))
3947 {
3948 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3949 goto out;
3950 }
3951
3952 rc = vmdkAllocateGrainTableCache(pImage);
3953 if (RT_FAILURE(rc))
3954 goto out;
3955
3956 rc = vmdkSetImageComment(pImage, pszComment);
3957 if (RT_FAILURE(rc))
3958 {
3959 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
3960 goto out;
3961 }
3962
3963 if (RT_SUCCESS(rc) && pfnProgress)
3964 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
3965
3966 rc = vmdkFlushImage(pImage);
3967
3968out:
3969 if (RT_SUCCESS(rc) && pfnProgress)
3970 pfnProgress(pvUser, uPercentStart + uPercentSpan);
3971
3972 if (RT_FAILURE(rc))
3973 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
3974 return rc;
3975}
3976
3977/**
3978 * Internal: Update image comment.
3979 */
3980static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
3981{
3982 char *pszCommentEncoded;
3983 if (pszComment)
3984 {
3985 pszCommentEncoded = vmdkEncodeString(pszComment);
3986 if (!pszCommentEncoded)
3987 return VERR_NO_MEMORY;
3988 }
3989 else
3990 pszCommentEncoded = NULL;
3991 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
3992 "ddb.comment", pszCommentEncoded);
3993 if (pszComment)
3994 RTStrFree(pszCommentEncoded);
3995 if (RT_FAILURE(rc))
3996 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
3997 return VINF_SUCCESS;
3998}
3999
4000/**
4001 * Internal. Free all allocated space for representing an image, and optionally
4002 * delete the image from disk.
4003 */
4004static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4005{
4006 AssertPtr(pImage);
4007
4008 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4009 {
4010 /* Mark all extents as clean. */
4011 for (unsigned i = 0; i < pImage->cExtents; i++)
4012 {
4013 if (( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4014#ifdef VBOX_WITH_VMDK_ESX
4015 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4016#endif /* VBOX_WITH_VMDK_ESX */
4017 )
4018 && pImage->pExtents[i].fUncleanShutdown)
4019 {
4020 pImage->pExtents[i].fUncleanShutdown = false;
4021 pImage->pExtents[i].fMetaDirty = true;
4022 }
4023 }
4024 }
4025 (void)vmdkFlushImage(pImage);
4026
4027 if (pImage->pExtents != NULL)
4028 {
4029 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4030 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4031 RTMemFree(pImage->pExtents);
4032 pImage->pExtents = NULL;
4033 }
4034 pImage->cExtents = 0;
4035 if (pImage->pFile != NULL)
4036 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4037 vmdkFileCheckAllClose(pImage);
4038 if (pImage->pGTCache)
4039 {
4040 RTMemFree(pImage->pGTCache);
4041 pImage->pGTCache = NULL;
4042 }
4043 if (pImage->pDescData)
4044 {
4045 RTMemFree(pImage->pDescData);
4046 pImage->pDescData = NULL;
4047 }
4048}
4049
4050/**
4051 * Internal. Flush image data (and metadata) to disk.
4052 */
4053static int vmdkFlushImage(PVMDKIMAGE pImage)
4054{
4055 PVMDKEXTENT pExtent;
4056 int rc = VINF_SUCCESS;
4057
4058 /* Update descriptor if changed. */
4059 if (pImage->Descriptor.fDirty)
4060 {
4061 rc = vmdkWriteDescriptor(pImage);
4062 if (RT_FAILURE(rc))
4063 goto out;
4064 }
4065
4066 for (unsigned i = 0; i < pImage->cExtents; i++)
4067 {
4068 pExtent = &pImage->pExtents[i];
4069 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4070 {
4071 switch (pExtent->enmType)
4072 {
4073 case VMDKETYPE_HOSTED_SPARSE:
4074 rc = vmdkWriteMetaSparseExtent(pExtent, 0);
4075 if (RT_FAILURE(rc))
4076 goto out;
4077 if (pExtent->fFooter)
4078 {
4079 uint64_t cbSize;
4080 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
4081 if (RT_FAILURE(rc))
4082 goto out;
4083 cbSize = RT_ALIGN_64(cbSize, 512);
4084 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
4085 if (RT_FAILURE(rc))
4086 goto out;
4087 }
4088 break;
4089#ifdef VBOX_WITH_VMDK_ESX
4090 case VMDKETYPE_ESX_SPARSE:
4091 /** @todo update the header. */
4092 break;
4093#endif /* VBOX_WITH_VMDK_ESX */
4094 case VMDKETYPE_VMFS:
4095 case VMDKETYPE_FLAT:
4096 /* Nothing to do. */
4097 break;
4098 case VMDKETYPE_ZERO:
4099 default:
4100 AssertMsgFailed(("extent with type %d marked as dirty\n",
4101 pExtent->enmType));
4102 break;
4103 }
4104 }
4105 switch (pExtent->enmType)
4106 {
4107 case VMDKETYPE_HOSTED_SPARSE:
4108#ifdef VBOX_WITH_VMDK_ESX
4109 case VMDKETYPE_ESX_SPARSE:
4110#endif /* VBOX_WITH_VMDK_ESX */
4111 case VMDKETYPE_VMFS:
4112 case VMDKETYPE_FLAT:
4113 /** @todo implement proper path absolute check. */
4114 if ( pExtent->pFile != NULL
4115 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4116 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4117 rc = vmdkFileFlush(pExtent->pFile);
4118 break;
4119 case VMDKETYPE_ZERO:
4120 /* No need to do anything for this extent. */
4121 break;
4122 default:
4123 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4124 break;
4125 }
4126 }
4127
4128out:
4129 return rc;
4130}
4131
4132/**
4133 * Internal. Find extent corresponding to the sector number in the disk.
4134 */
4135static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4136 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4137{
4138 PVMDKEXTENT pExtent = NULL;
4139 int rc = VINF_SUCCESS;
4140
4141 for (unsigned i = 0; i < pImage->cExtents; i++)
4142 {
4143 if (offSector < pImage->pExtents[i].cNominalSectors)
4144 {
4145 pExtent = &pImage->pExtents[i];
4146 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4147 break;
4148 }
4149 offSector -= pImage->pExtents[i].cNominalSectors;
4150 }
4151
4152 if (pExtent)
4153 *ppExtent = pExtent;
4154 else
4155 rc = VERR_IO_SECTOR_NOT_FOUND;
4156
4157 return rc;
4158}
4159
4160/**
4161 * Internal. Hash function for placing the grain table hash entries.
4162 */
4163static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4164 unsigned uExtent)
4165{
4166 /** @todo this hash function is quite simple, maybe use a better one which
4167 * scrambles the bits better. */
4168 return (uSector + uExtent) % pCache->cEntries;
4169}
4170
4171/**
4172 * Internal. Get sector number in the extent file from the relative sector
4173 * number in the extent.
4174 */
4175static int vmdkGetSector(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4176 uint64_t uSector, uint64_t *puExtentSector)
4177{
4178 uint64_t uGDIndex, uGTSector, uGTBlock;
4179 uint32_t uGTHash, uGTBlockIndex;
4180 PVMDKGTCACHEENTRY pGTCacheEntry;
4181 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4182 int rc;
4183
4184 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4185 if (uGDIndex >= pExtent->cGDEntries)
4186 return VERR_OUT_OF_RANGE;
4187 uGTSector = pExtent->pGD[uGDIndex];
4188 if (!uGTSector)
4189 {
4190 /* There is no grain table referenced by this grain directory
4191 * entry. So there is absolutely no data in this area. */
4192 *puExtentSector = 0;
4193 return VINF_SUCCESS;
4194 }
4195
4196 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4197 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4198 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4199 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4200 || pGTCacheEntry->uGTBlock != uGTBlock)
4201 {
4202 /* Cache miss, fetch data from disk. */
4203 rc = vmdkFileReadAt(pExtent->pFile,
4204 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4205 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4206 if (RT_FAILURE(rc))
4207 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4208 pGTCacheEntry->uExtent = pExtent->uExtent;
4209 pGTCacheEntry->uGTBlock = uGTBlock;
4210 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4211 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4212 }
4213 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4214 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4215 if (uGrainSector)
4216 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4217 else
4218 *puExtentSector = 0;
4219 return VINF_SUCCESS;
4220}
4221
4222/**
4223 * Internal. Allocates a new grain table (if necessary), writes the grain
4224 * and updates the grain table. The cache is also updated by this operation.
4225 * This is separate from vmdkGetSector, because that should be as fast as
4226 * possible. Most code from vmdkGetSector also appears here.
4227 */
4228static int vmdkAllocGrain(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4229 uint64_t uSector, const void *pvBuf,
4230 uint64_t cbWrite)
4231{
4232 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4233 uint64_t cbExtentSize;
4234 uint32_t uGTHash, uGTBlockIndex;
4235 PVMDKGTCACHEENTRY pGTCacheEntry;
4236 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4237 int rc;
4238
4239 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4240 if (uGDIndex >= pExtent->cGDEntries)
4241 return VERR_OUT_OF_RANGE;
4242 uGTSector = pExtent->pGD[uGDIndex];
4243 if (pExtent->pRGD)
4244 uRGTSector = pExtent->pRGD[uGDIndex];
4245 else
4246 uRGTSector = 0; /**< avoid compiler warning */
4247 if (!uGTSector)
4248 {
4249 /* There is no grain table referenced by this grain directory
4250 * entry. So there is absolutely no data in this area. Allocate
4251 * a new grain table and put the reference to it in the GDs. */
4252 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4253 if (RT_FAILURE(rc))
4254 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4255 Assert(!(cbExtentSize % 512));
4256 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4257 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4258 /* For writable streamOptimized extents the final sector is the
4259 * end-of-stream marker. Will be re-added after the grain table.
4260 * If the file has a footer it also will be re-added before EOS. */
4261 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4262 {
4263 uint64_t uEOSOff = 0;
4264 uGTSector--;
4265 if (pExtent->fFooter)
4266 {
4267 uGTSector--;
4268 uEOSOff = 512;
4269 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4270 if (RT_FAILURE(rc))
4271 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4272 }
4273 pExtent->uLastGrainSector = 0;
4274 uint8_t aEOS[512];
4275 memset(aEOS, '\0', sizeof(aEOS));
4276 rc = vmdkFileWriteAt(pExtent->pFile,
4277 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4278 aEOS, sizeof(aEOS), NULL);
4279 if (RT_FAILURE(rc))
4280 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4281 }
4282 /* Normally the grain table is preallocated for hosted sparse extents
4283 * that support more than 32 bit sector numbers. So this shouldn't
4284 * ever happen on a valid extent. */
4285 if (uGTSector > UINT32_MAX)
4286 return VERR_VD_VMDK_INVALID_HEADER;
4287 /* Write grain table by writing the required number of grain table
4288 * cache chunks. Avoids dynamic memory allocation, but is a bit
4289 * slower. But as this is a pretty infrequently occurring case it
4290 * should be acceptable. */
4291 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4292 for (unsigned i = 0;
4293 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4294 i++)
4295 {
4296 rc = vmdkFileWriteAt(pExtent->pFile,
4297 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4298 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4299 if (RT_FAILURE(rc))
4300 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4301 }
4302 if (pExtent->pRGD)
4303 {
4304 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4305 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4306 if (RT_FAILURE(rc))
4307 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4308 Assert(!(cbExtentSize % 512));
4309 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4310 /* For writable streamOptimized extents the final sector is the
4311 * end-of-stream marker. Will be re-added after the grain table.
4312 * If the file has a footer it also will be re-added before EOS. */
4313 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4314 {
4315 uint64_t uEOSOff = 0;
4316 uRGTSector--;
4317 if (pExtent->fFooter)
4318 {
4319 uRGTSector--;
4320 uEOSOff = 512;
4321 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4322 if (RT_FAILURE(rc))
4323 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4324 }
4325 pExtent->uLastGrainSector = 0;
4326 uint8_t aEOS[512];
4327 memset(aEOS, '\0', sizeof(aEOS));
4328 rc = vmdkFileWriteAt(pExtent->pFile,
4329 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4330 aEOS, sizeof(aEOS), NULL);
4331 if (RT_FAILURE(rc))
4332 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4333 }
4334 /* Normally the redundant grain table is preallocated for hosted
4335 * sparse extents that support more than 32 bit sector numbers. So
4336 * this shouldn't ever happen on a valid extent. */
4337 if (uRGTSector > UINT32_MAX)
4338 return VERR_VD_VMDK_INVALID_HEADER;
4339 /* Write backup grain table by writing the required number of grain
4340 * table cache chunks. Avoids dynamic memory allocation, but is a
4341 * bit slower. But as this is a pretty infrequently occurring case
4342 * it should be acceptable. */
4343 for (unsigned i = 0;
4344 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4345 i++)
4346 {
4347 rc = vmdkFileWriteAt(pExtent->pFile,
4348 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4349 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4350 if (RT_FAILURE(rc))
4351 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4352 }
4353 }
4354
4355 /* Update the grain directory on disk (doing it before writing the
4356 * grain table will result in a garbled extent if the operation is
4357 * aborted for some reason. Otherwise the worst that can happen is
4358 * some unused sectors in the extent. */
4359 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4360 rc = vmdkFileWriteAt(pExtent->pFile,
4361 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4362 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4363 if (RT_FAILURE(rc))
4364 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4365 if (pExtent->pRGD)
4366 {
4367 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4368 rc = vmdkFileWriteAt(pExtent->pFile,
4369 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4370 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4371 if (RT_FAILURE(rc))
4372 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4373 }
4374
4375 /* As the final step update the in-memory copy of the GDs. */
4376 pExtent->pGD[uGDIndex] = uGTSector;
4377 if (pExtent->pRGD)
4378 pExtent->pRGD[uGDIndex] = uRGTSector;
4379 }
4380
4381 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4382 if (RT_FAILURE(rc))
4383 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4384 Assert(!(cbExtentSize % 512));
4385
4386 /* Write the data. Always a full grain, or we're in big trouble. */
4387 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4388 {
4389 /* For streamOptimized extents this is a little more difficult, as the
4390 * cached data also needs to be updated, to handle updating the last
4391 * written block properly. Also we're trying to avoid unnecessary gaps.
4392 * Additionally the end-of-stream marker needs to be written. */
4393 if (!pExtent->uLastGrainSector)
4394 {
4395 cbExtentSize -= 512;
4396 if (pExtent->fFooter)
4397 cbExtentSize -= 512;
4398 }
4399 else
4400 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4401 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4402 uint32_t cbGrain = 0;
4403 rc = vmdkFileDeflateAt(pExtent->pFile, cbExtentSize,
4404 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4405 if (RT_FAILURE(rc))
4406 {
4407 pExtent->uGrainSector = 0;
4408 pExtent->uLastGrainSector = 0;
4409 AssertRC(rc);
4410 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4411 }
4412 cbGrain = RT_ALIGN(cbGrain, 512);
4413 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4414 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4415 pExtent->cbLastGrainWritten = cbGrain;
4416 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4417 pExtent->uGrainSector = uSector;
4418
4419 uint64_t uEOSOff = 0;
4420 if (pExtent->fFooter)
4421 {
4422 uEOSOff = 512;
4423 rc = vmdkWriteMetaSparseExtent(pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4424 if (RT_FAILURE(rc))
4425 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4426 }
4427 uint8_t aEOS[512];
4428 memset(aEOS, '\0', sizeof(aEOS));
4429 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4430 aEOS, sizeof(aEOS), NULL);
4431 if (RT_FAILURE(rc))
4432 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4433 }
4434 else
4435 {
4436 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4437 if (RT_FAILURE(rc))
4438 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4439 }
4440
4441 /* Update the grain table (and the cache). */
4442 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4443 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4444 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4445 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4446 || pGTCacheEntry->uGTBlock != uGTBlock)
4447 {
4448 /* Cache miss, fetch data from disk. */
4449 rc = vmdkFileReadAt(pExtent->pFile,
4450 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4451 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4452 if (RT_FAILURE(rc))
4453 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4454 pGTCacheEntry->uExtent = pExtent->uExtent;
4455 pGTCacheEntry->uGTBlock = uGTBlock;
4456 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4457 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4458 }
4459 else
4460 {
4461 /* Cache hit. Convert grain table block back to disk format, otherwise
4462 * the code below will write garbage for all but the updated entry. */
4463 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4464 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4465 }
4466 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4467 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4468 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4469 /* Update grain table on disk. */
4470 rc = vmdkFileWriteAt(pExtent->pFile,
4471 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4472 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4473 if (RT_FAILURE(rc))
4474 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4475 if (pExtent->pRGD)
4476 {
4477 /* Update backup grain table on disk. */
4478 rc = vmdkFileWriteAt(pExtent->pFile,
4479 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4480 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4481 if (RT_FAILURE(rc))
4482 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4483 }
4484#ifdef VBOX_WITH_VMDK_ESX
4485 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4486 {
4487 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4488 pExtent->fMetaDirty = true;
4489 }
4490#endif /* VBOX_WITH_VMDK_ESX */
4491 return rc;
4492}
4493
4494
4495/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
4496static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk)
4497{
4498 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
4499 int rc = VINF_SUCCESS;
4500 PVMDKIMAGE pImage;
4501
4502 if ( !pszFilename
4503 || !*pszFilename
4504 || strchr(pszFilename, '"'))
4505 {
4506 rc = VERR_INVALID_PARAMETER;
4507 goto out;
4508 }
4509
4510 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4511 if (!pImage)
4512 {
4513 rc = VERR_NO_MEMORY;
4514 goto out;
4515 }
4516 pImage->pszFilename = pszFilename;
4517 pImage->pFile = NULL;
4518 pImage->pExtents = NULL;
4519 pImage->pFiles = NULL;
4520 pImage->pGTCache = NULL;
4521 pImage->pDescData = NULL;
4522 pImage->pVDIfsDisk = pVDIfsDisk;
4523 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
4524 * much as possible in vmdkOpenImage. */
4525 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
4526 vmdkFreeImage(pImage, false);
4527 RTMemFree(pImage);
4528
4529out:
4530 LogFlowFunc(("returns %Rrc\n", rc));
4531 return rc;
4532}
4533
4534/** @copydoc VBOXHDDBACKEND::pfnOpen */
4535static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
4536 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
4537 void **ppBackendData)
4538{
4539 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
4540 int rc;
4541 PVMDKIMAGE pImage;
4542
4543 /* Check open flags. All valid flags are supported. */
4544 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4545 {
4546 rc = VERR_INVALID_PARAMETER;
4547 goto out;
4548 }
4549
4550 /* Check remaining arguments. */
4551 if ( !VALID_PTR(pszFilename)
4552 || !*pszFilename
4553 || strchr(pszFilename, '"'))
4554 {
4555 rc = VERR_INVALID_PARAMETER;
4556 goto out;
4557 }
4558
4559
4560 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4561 if (!pImage)
4562 {
4563 rc = VERR_NO_MEMORY;
4564 goto out;
4565 }
4566 pImage->pszFilename = pszFilename;
4567 pImage->pFile = NULL;
4568 pImage->pExtents = NULL;
4569 pImage->pFiles = NULL;
4570 pImage->pGTCache = NULL;
4571 pImage->pDescData = NULL;
4572 pImage->pVDIfsDisk = pVDIfsDisk;
4573
4574 rc = vmdkOpenImage(pImage, uOpenFlags);
4575 if (RT_SUCCESS(rc))
4576 *ppBackendData = pImage;
4577
4578out:
4579 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4580 return rc;
4581}
4582
4583/** @copydoc VBOXHDDBACKEND::pfnCreate */
4584static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
4585 unsigned uImageFlags, const char *pszComment,
4586 PCPDMMEDIAGEOMETRY pPCHSGeometry,
4587 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4588 unsigned uOpenFlags, unsigned uPercentStart,
4589 unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk,
4590 PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation,
4591 void **ppBackendData)
4592{
4593 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
4594 int rc;
4595 PVMDKIMAGE pImage;
4596
4597 PFNVDPROGRESS pfnProgress = NULL;
4598 void *pvUser = NULL;
4599 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
4600 VDINTERFACETYPE_PROGRESS);
4601 PVDINTERFACEPROGRESS pCbProgress = NULL;
4602 if (pIfProgress)
4603 {
4604 pCbProgress = VDGetInterfaceProgress(pIfProgress);
4605 pfnProgress = pCbProgress->pfnProgress;
4606 pvUser = pIfProgress->pvUser;
4607 }
4608
4609 /* Check open flags. All valid flags are supported. */
4610 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4611 {
4612 rc = VERR_INVALID_PARAMETER;
4613 goto out;
4614 }
4615
4616 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
4617 if ( !cbSize
4618 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
4619 {
4620 rc = VERR_VD_INVALID_SIZE;
4621 goto out;
4622 }
4623
4624 /* Check remaining arguments. */
4625 if ( !VALID_PTR(pszFilename)
4626 || !*pszFilename
4627 || strchr(pszFilename, '"')
4628 || !VALID_PTR(pPCHSGeometry)
4629 || !VALID_PTR(pLCHSGeometry)
4630#ifndef VBOX_WITH_VMDK_ESX
4631 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
4632 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4633#endif
4634 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4635 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
4636 {
4637 rc = VERR_INVALID_PARAMETER;
4638 goto out;
4639 }
4640
4641 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4642 if (!pImage)
4643 {
4644 rc = VERR_NO_MEMORY;
4645 goto out;
4646 }
4647 pImage->pszFilename = pszFilename;
4648 pImage->pFile = NULL;
4649 pImage->pExtents = NULL;
4650 pImage->pFiles = NULL;
4651 pImage->pGTCache = NULL;
4652 pImage->pDescData = NULL;
4653 pImage->pVDIfsDisk = pVDIfsDisk;
4654 /* Descriptors for split images can be pretty large, especially if the
4655 * filename is long. So prepare for the worst, and allocate quite some
4656 * memory for the descriptor in this case. */
4657 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4658 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
4659 else
4660 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
4661 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
4662 if (!pImage->pDescData)
4663 {
4664 rc = VERR_NO_MEMORY;
4665 goto out;
4666 }
4667
4668 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
4669 pPCHSGeometry, pLCHSGeometry, pUuid,
4670 pfnProgress, pvUser, uPercentStart, uPercentSpan);
4671 if (RT_SUCCESS(rc))
4672 {
4673 /* So far the image is opened in read/write mode. Make sure the
4674 * image is opened in read-only mode if the caller requested that. */
4675 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
4676 {
4677 vmdkFreeImage(pImage, false);
4678 rc = vmdkOpenImage(pImage, uOpenFlags);
4679 if (RT_FAILURE(rc))
4680 goto out;
4681 }
4682 *ppBackendData = pImage;
4683 }
4684 else
4685 {
4686 RTMemFree(pImage->pDescData);
4687 RTMemFree(pImage);
4688 }
4689
4690out:
4691 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4692 return rc;
4693}
4694
4695/**
4696 * Replaces a fragment of a string with the specified string.
4697 *
4698 * @returns Pointer to the allocated UTF-8 string.
4699 * @param pszWhere UTF-8 string to search in.
4700 * @param pszWhat UTF-8 string to search for.
4701 * @param pszByWhat UTF-8 string to replace the found string with.
4702 */
4703static char * vmdkStrReplace(const char *pszWhere, const char *pszWhat, const char *pszByWhat)
4704{
4705 AssertPtr(pszWhere);
4706 AssertPtr(pszWhat);
4707 AssertPtr(pszByWhat);
4708 const char *pszFoundStr = strstr(pszWhere, pszWhat);
4709 if (!pszFoundStr)
4710 return NULL;
4711 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
4712 char *pszNewStr = (char *)RTMemAlloc(cFinal);
4713 if (pszNewStr)
4714 {
4715 char *pszTmp = pszNewStr;
4716 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
4717 pszTmp += pszFoundStr - pszWhere;
4718 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
4719 pszTmp += strlen(pszByWhat);
4720 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
4721 }
4722 return pszNewStr;
4723}
4724
4725/** @copydoc VBOXHDDBACKEND::pfnRename */
4726static int vmdkRename(void *pBackendData, const char *pszFilename)
4727{
4728 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
4729
4730 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4731 int rc = VINF_SUCCESS;
4732 char **apszOldName = NULL;
4733 char **apszNewName = NULL;
4734 char **apszNewLines = NULL;
4735 char *pszOldDescName = NULL;
4736 bool fImageFreed = false;
4737 bool fEmbeddedDesc = false;
4738 unsigned cExtents = pImage->cExtents;
4739 char *pszNewBaseName = NULL;
4740 char *pszOldBaseName = NULL;
4741 char *pszNewFullName = NULL;
4742 char *pszOldFullName = NULL;
4743 const char *pszOldImageName;
4744 unsigned i, line;
4745 VMDKDESCRIPTOR DescriptorCopy;
4746 VMDKEXTENT ExtentCopy;
4747
4748 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
4749
4750 /* Check arguments. */
4751 if ( !pImage
4752 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
4753 || !VALID_PTR(pszFilename)
4754 || !*pszFilename)
4755 {
4756 rc = VERR_INVALID_PARAMETER;
4757 goto out;
4758 }
4759
4760 /*
4761 * Allocate an array to store both old and new names of renamed files
4762 * in case we have to roll back the changes. Arrays are initialized
4763 * with zeros. We actually save stuff when and if we change it.
4764 */
4765 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4766 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4767 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
4768 if (!apszOldName || !apszNewName || !apszNewLines)
4769 {
4770 rc = VERR_NO_MEMORY;
4771 goto out;
4772 }
4773
4774 /* Save the descriptor size and position. */
4775 if (pImage->pDescData)
4776 {
4777 /* Separate descriptor file. */
4778 fEmbeddedDesc = false;
4779 }
4780 else
4781 {
4782 /* Embedded descriptor file. */
4783 ExtentCopy = pImage->pExtents[0];
4784 fEmbeddedDesc = true;
4785 }
4786 /* Save the descriptor content. */
4787 DescriptorCopy.cLines = pImage->Descriptor.cLines;
4788 for (i = 0; i < DescriptorCopy.cLines; i++)
4789 {
4790 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
4791 if (!DescriptorCopy.aLines[i])
4792 {
4793 rc = VERR_NO_MEMORY;
4794 goto out;
4795 }
4796 }
4797
4798 /* Prepare both old and new base names used for string replacement. */
4799 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
4800 RTPathStripExt(pszNewBaseName);
4801 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
4802 RTPathStripExt(pszOldBaseName);
4803 /* Prepare both old and new full names used for string replacement. */
4804 pszNewFullName = RTStrDup(pszFilename);
4805 RTPathStripExt(pszNewFullName);
4806 pszOldFullName = RTStrDup(pImage->pszFilename);
4807 RTPathStripExt(pszOldFullName);
4808
4809 /* --- Up to this point we have not done any damage yet. --- */
4810
4811 /* Save the old name for easy access to the old descriptor file. */
4812 pszOldDescName = RTStrDup(pImage->pszFilename);
4813 /* Save old image name. */
4814 pszOldImageName = pImage->pszFilename;
4815
4816 /* Update the descriptor with modified extent names. */
4817 for (i = 0, line = pImage->Descriptor.uFirstExtent;
4818 i < cExtents;
4819 i++, line = pImage->Descriptor.aNextLines[line])
4820 {
4821 /* Assume that vmdkStrReplace will fail. */
4822 rc = VERR_NO_MEMORY;
4823 /* Update the descriptor. */
4824 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
4825 pszOldBaseName, pszNewBaseName);
4826 if (!apszNewLines[i])
4827 goto rollback;
4828 pImage->Descriptor.aLines[line] = apszNewLines[i];
4829 }
4830 /* Make sure the descriptor gets written back. */
4831 pImage->Descriptor.fDirty = true;
4832 /* Flush the descriptor now, in case it is embedded. */
4833 (void)vmdkFlushImage(pImage);
4834
4835 /* Close and rename/move extents. */
4836 for (i = 0; i < cExtents; i++)
4837 {
4838 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4839 /* Compose new name for the extent. */
4840 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
4841 pszOldFullName, pszNewFullName);
4842 if (!apszNewName[i])
4843 goto rollback;
4844 /* Close the extent file. */
4845 vmdkFileClose(pImage, &pExtent->pFile, false);
4846 /* Rename the extent file. */
4847 rc = RTFileMove(pExtent->pszFullname, apszNewName[i], 0);
4848 if (RT_FAILURE(rc))
4849 goto rollback;
4850 /* Remember the old name. */
4851 apszOldName[i] = RTStrDup(pExtent->pszFullname);
4852 }
4853 /* Release all old stuff. */
4854 vmdkFreeImage(pImage, false);
4855
4856 fImageFreed = true;
4857
4858 /* Last elements of new/old name arrays are intended for
4859 * storing descriptor's names.
4860 */
4861 apszNewName[cExtents] = RTStrDup(pszFilename);
4862 /* Rename the descriptor file if it's separate. */
4863 if (!fEmbeddedDesc)
4864 {
4865 rc = RTFileMove(pImage->pszFilename, apszNewName[cExtents], 0);
4866 if (RT_FAILURE(rc))
4867 goto rollback;
4868 /* Save old name only if we may need to change it back. */
4869 apszOldName[cExtents] = RTStrDup(pszFilename);
4870 }
4871
4872 /* Update pImage with the new information. */
4873 pImage->pszFilename = pszFilename;
4874
4875 /* Open the new image. */
4876 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4877 if (RT_SUCCESS(rc))
4878 goto out;
4879
4880rollback:
4881 /* Roll back all changes in case of failure. */
4882 if (RT_FAILURE(rc))
4883 {
4884 int rrc;
4885 if (!fImageFreed)
4886 {
4887 /*
4888 * Some extents may have been closed, close the rest. We will
4889 * re-open the whole thing later.
4890 */
4891 vmdkFreeImage(pImage, false);
4892 }
4893 /* Rename files back. */
4894 for (i = 0; i <= cExtents; i++)
4895 {
4896 if (apszOldName[i])
4897 {
4898 rrc = RTFileMove(apszNewName[i], apszOldName[i], 0);
4899 AssertRC(rrc);
4900 }
4901 }
4902 /* Restore the old descriptor. */
4903 PVMDKFILE pFile;
4904 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
4905 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
4906 AssertRC(rrc);
4907 if (fEmbeddedDesc)
4908 {
4909 ExtentCopy.pFile = pFile;
4910 pImage->pExtents = &ExtentCopy;
4911 }
4912 else
4913 {
4914 /* Shouldn't be null for separate descriptor.
4915 * There will be no access to the actual content.
4916 */
4917 pImage->pDescData = pszOldDescName;
4918 pImage->pFile = pFile;
4919 }
4920 pImage->Descriptor = DescriptorCopy;
4921 vmdkWriteDescriptor(pImage);
4922 vmdkFileClose(pImage, &pFile, false);
4923 /* Get rid of the stuff we implanted. */
4924 pImage->pExtents = NULL;
4925 pImage->pFile = NULL;
4926 pImage->pDescData = NULL;
4927 /* Re-open the image back. */
4928 pImage->pszFilename = pszOldImageName;
4929 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4930 AssertRC(rrc);
4931 }
4932
4933out:
4934 for (i = 0; i < DescriptorCopy.cLines; i++)
4935 if (DescriptorCopy.aLines[i])
4936 RTStrFree(DescriptorCopy.aLines[i]);
4937 if (apszOldName)
4938 {
4939 for (i = 0; i <= cExtents; i++)
4940 if (apszOldName[i])
4941 RTStrFree(apszOldName[i]);
4942 RTMemTmpFree(apszOldName);
4943 }
4944 if (apszNewName)
4945 {
4946 for (i = 0; i <= cExtents; i++)
4947 if (apszNewName[i])
4948 RTStrFree(apszNewName[i]);
4949 RTMemTmpFree(apszNewName);
4950 }
4951 if (apszNewLines)
4952 {
4953 for (i = 0; i < cExtents; i++)
4954 if (apszNewLines[i])
4955 RTStrFree(apszNewLines[i]);
4956 RTMemTmpFree(apszNewLines);
4957 }
4958 if (pszOldDescName)
4959 RTStrFree(pszOldDescName);
4960 if (pszOldBaseName)
4961 RTStrFree(pszOldBaseName);
4962 if (pszNewBaseName)
4963 RTStrFree(pszNewBaseName);
4964 if (pszOldFullName)
4965 RTStrFree(pszOldFullName);
4966 if (pszNewFullName)
4967 RTStrFree(pszNewFullName);
4968 LogFlowFunc(("returns %Rrc\n", rc));
4969 return rc;
4970}
4971
4972/** @copydoc VBOXHDDBACKEND::pfnClose */
4973static int vmdkClose(void *pBackendData, bool fDelete)
4974{
4975 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
4976 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4977 int rc = VINF_SUCCESS;
4978
4979 /* Freeing a never allocated image (e.g. because the open failed) is
4980 * not signalled as an error. After all nothing bad happens. */
4981 if (pImage)
4982 {
4983 vmdkFreeImage(pImage, fDelete);
4984 RTMemFree(pImage);
4985 }
4986
4987 LogFlowFunc(("returns %Rrc\n", rc));
4988 return rc;
4989}
4990
4991/** @copydoc VBOXHDDBACKEND::pfnRead */
4992static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
4993 size_t cbToRead, size_t *pcbActuallyRead)
4994{
4995 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
4996 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4997 PVMDKEXTENT pExtent;
4998 uint64_t uSectorExtentRel;
4999 uint64_t uSectorExtentAbs;
5000 int rc;
5001
5002 AssertPtr(pImage);
5003 Assert(uOffset % 512 == 0);
5004 Assert(cbToRead % 512 == 0);
5005
5006 if ( uOffset + cbToRead > pImage->cbSize
5007 || cbToRead == 0)
5008 {
5009 rc = VERR_INVALID_PARAMETER;
5010 goto out;
5011 }
5012
5013 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5014 &pExtent, &uSectorExtentRel);
5015 if (RT_FAILURE(rc))
5016 goto out;
5017
5018 /* Check access permissions as defined in the extent descriptor. */
5019 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5020 {
5021 rc = VERR_VD_VMDK_INVALID_STATE;
5022 goto out;
5023 }
5024
5025 /* Clip read range to remain in this extent. */
5026 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5027
5028 /* Handle the read according to the current extent type. */
5029 switch (pExtent->enmType)
5030 {
5031 case VMDKETYPE_HOSTED_SPARSE:
5032#ifdef VBOX_WITH_VMDK_ESX
5033 case VMDKETYPE_ESX_SPARSE:
5034#endif /* VBOX_WITH_VMDK_ESX */
5035 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5036 &uSectorExtentAbs);
5037 if (RT_FAILURE(rc))
5038 goto out;
5039 /* Clip read range to at most the rest of the grain. */
5040 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5041 Assert(!(cbToRead % 512));
5042 if (uSectorExtentAbs == 0)
5043 rc = VERR_VD_BLOCK_FREE;
5044 else
5045 {
5046 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5047 {
5048 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5049 uSectorExtentAbs -= uSectorInGrain;
5050 uint64_t uLBA;
5051 if (pExtent->uGrainSector != uSectorExtentAbs)
5052 {
5053 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5054 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5055 if (RT_FAILURE(rc))
5056 {
5057 pExtent->uGrainSector = 0;
5058 AssertRC(rc);
5059 goto out;
5060 }
5061 pExtent->uGrainSector = uSectorExtentAbs;
5062 Assert(uLBA == uSectorExtentRel);
5063 }
5064 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
5065 }
5066 else
5067 {
5068 rc = vmdkFileReadAt(pExtent->pFile,
5069 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5070 pvBuf, cbToRead, NULL);
5071 }
5072 }
5073 break;
5074 case VMDKETYPE_VMFS:
5075 case VMDKETYPE_FLAT:
5076 rc = vmdkFileReadAt(pExtent->pFile,
5077 VMDK_SECTOR2BYTE(uSectorExtentRel),
5078 pvBuf, cbToRead, NULL);
5079 break;
5080 case VMDKETYPE_ZERO:
5081 memset(pvBuf, '\0', cbToRead);
5082 break;
5083 }
5084 if (pcbActuallyRead)
5085 *pcbActuallyRead = cbToRead;
5086
5087out:
5088 LogFlowFunc(("returns %Rrc\n", rc));
5089 return rc;
5090}
5091
5092/** @copydoc VBOXHDDBACKEND::pfnWrite */
5093static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5094 size_t cbToWrite, size_t *pcbWriteProcess,
5095 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5096{
5097 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5098 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5099 PVMDKEXTENT pExtent;
5100 uint64_t uSectorExtentRel;
5101 uint64_t uSectorExtentAbs;
5102 int rc;
5103
5104 AssertPtr(pImage);
5105 Assert(uOffset % 512 == 0);
5106 Assert(cbToWrite % 512 == 0);
5107
5108 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5109 {
5110 rc = VERR_VD_IMAGE_READ_ONLY;
5111 goto out;
5112 }
5113
5114 if (cbToWrite == 0)
5115 {
5116 rc = VERR_INVALID_PARAMETER;
5117 goto out;
5118 }
5119
5120 /* No size check here, will do that later when the extent is located.
5121 * There are sparse images out there which according to the spec are
5122 * invalid, because the total size is not a multiple of the grain size.
5123 * Also for sparse images which are stitched together in odd ways (not at
5124 * grain boundaries, and with the nominal size not being a multiple of the
5125 * grain size), this would prevent writing to the last grain. */
5126
5127 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5128 &pExtent, &uSectorExtentRel);
5129 if (RT_FAILURE(rc))
5130 goto out;
5131
5132 /* Check access permissions as defined in the extent descriptor. */
5133 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
5134 {
5135 rc = VERR_VD_VMDK_INVALID_STATE;
5136 goto out;
5137 }
5138
5139 /* Handle the write according to the current extent type. */
5140 switch (pExtent->enmType)
5141 {
5142 case VMDKETYPE_HOSTED_SPARSE:
5143#ifdef VBOX_WITH_VMDK_ESX
5144 case VMDKETYPE_ESX_SPARSE:
5145#endif /* VBOX_WITH_VMDK_ESX */
5146 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5147 &uSectorExtentAbs);
5148 if (RT_FAILURE(rc))
5149 goto out;
5150 /* Clip write range to at most the rest of the grain. */
5151 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5152 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5153 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
5154 {
5155 rc = VERR_VD_VMDK_INVALID_WRITE;
5156 goto out;
5157 }
5158 if (uSectorExtentAbs == 0)
5159 {
5160 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5161 {
5162 /* Full block write to a previously unallocated block.
5163 * Check if the caller wants to avoid the automatic alloc. */
5164 if (!(fWrite & VD_WRITE_NO_ALLOC))
5165 {
5166 /* Allocate GT and find out where to store the grain. */
5167 rc = vmdkAllocGrain(pImage->pGTCache, pExtent,
5168 uSectorExtentRel, pvBuf, cbToWrite);
5169 }
5170 else
5171 rc = VERR_VD_BLOCK_FREE;
5172 *pcbPreRead = 0;
5173 *pcbPostRead = 0;
5174 }
5175 else
5176 {
5177 /* Clip write range to remain in this extent. */
5178 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5179 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5180 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5181 rc = VERR_VD_BLOCK_FREE;
5182 }
5183 }
5184 else
5185 {
5186 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5187 {
5188 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5189 uSectorExtentAbs -= uSectorInGrain;
5190 uint64_t uLBA = uSectorExtentRel;
5191 if ( pExtent->uGrainSector != uSectorExtentAbs
5192 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5193 {
5194 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5195 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5196 if (RT_FAILURE(rc))
5197 {
5198 pExtent->uGrainSector = 0;
5199 pExtent->uLastGrainSector = 0;
5200 AssertRC(rc);
5201 goto out;
5202 }
5203 pExtent->uGrainSector = uSectorExtentAbs;
5204 pExtent->uLastGrainSector = uSectorExtentAbs;
5205 Assert(uLBA == uSectorExtentRel);
5206 }
5207 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5208 uint32_t cbGrain = 0;
5209 rc = vmdkFileDeflateAt(pExtent->pFile,
5210 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5211 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5212 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5213 if (RT_FAILURE(rc))
5214 {
5215 pExtent->uGrainSector = 0;
5216 pExtent->uLastGrainSector = 0;
5217 AssertRC(rc);
5218 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5219 }
5220 cbGrain = RT_ALIGN(cbGrain, 512);
5221 pExtent->uLastGrainSector = uSectorExtentAbs;
5222 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5223 pExtent->cbLastGrainWritten = cbGrain;
5224
5225 uint64_t uEOSOff = 0;
5226 if (pExtent->fFooter)
5227 {
5228 uEOSOff = 512;
5229 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5230 if (RT_FAILURE(rc))
5231 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5232 }
5233 uint8_t aEOS[512];
5234 memset(aEOS, '\0', sizeof(aEOS));
5235 rc = vmdkFileWriteAt(pExtent->pFile,
5236 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5237 aEOS, sizeof(aEOS), NULL);
5238 if (RT_FAILURE(rc))
5239 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5240 }
5241 else
5242 {
5243 rc = vmdkFileWriteAt(pExtent->pFile,
5244 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5245 pvBuf, cbToWrite, NULL);
5246 }
5247 }
5248 break;
5249 case VMDKETYPE_VMFS:
5250 case VMDKETYPE_FLAT:
5251 /* Clip write range to remain in this extent. */
5252 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5253 rc = vmdkFileWriteAt(pExtent->pFile,
5254 VMDK_SECTOR2BYTE(uSectorExtentRel),
5255 pvBuf, cbToWrite, NULL);
5256 break;
5257 case VMDKETYPE_ZERO:
5258 /* Clip write range to remain in this extent. */
5259 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5260 break;
5261 }
5262 if (pcbWriteProcess)
5263 *pcbWriteProcess = cbToWrite;
5264
5265out:
5266 LogFlowFunc(("returns %Rrc\n", rc));
5267 return rc;
5268}
5269
5270/** @copydoc VBOXHDDBACKEND::pfnFlush */
5271static int vmdkFlush(void *pBackendData)
5272{
5273 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5274 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5275 int rc;
5276
5277 AssertPtr(pImage);
5278
5279 rc = vmdkFlushImage(pImage);
5280 LogFlowFunc(("returns %Rrc\n", rc));
5281 return rc;
5282}
5283
5284/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5285static unsigned vmdkGetVersion(void *pBackendData)
5286{
5287 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5288 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5289
5290 AssertPtr(pImage);
5291
5292 if (pImage)
5293 return VMDK_IMAGE_VERSION;
5294 else
5295 return 0;
5296}
5297
5298/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5299static uint64_t vmdkGetSize(void *pBackendData)
5300{
5301 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5302 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5303
5304 AssertPtr(pImage);
5305
5306 if (pImage)
5307 return pImage->cbSize;
5308 else
5309 return 0;
5310}
5311
5312/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5313static uint64_t vmdkGetFileSize(void *pBackendData)
5314{
5315 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5316 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5317 uint64_t cb = 0;
5318
5319 AssertPtr(pImage);
5320
5321 if (pImage)
5322 {
5323 uint64_t cbFile;
5324 if (pImage->pFile != NULL)
5325 {
5326 int rc = vmdkFileGetSize(pImage->pFile, &cbFile);
5327 if (RT_SUCCESS(rc))
5328 cb += cbFile;
5329 }
5330 for (unsigned i = 0; i < pImage->cExtents; i++)
5331 {
5332 if (pImage->pExtents[i].pFile != NULL)
5333 {
5334 int rc = vmdkFileGetSize(pImage->pExtents[i].pFile, &cbFile);
5335 if (RT_SUCCESS(rc))
5336 cb += cbFile;
5337 }
5338 }
5339 }
5340
5341 LogFlowFunc(("returns %lld\n", cb));
5342 return cb;
5343}
5344
5345/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
5346static int vmdkGetPCHSGeometry(void *pBackendData,
5347 PPDMMEDIAGEOMETRY pPCHSGeometry)
5348{
5349 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5350 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5351 int rc;
5352
5353 AssertPtr(pImage);
5354
5355 if (pImage)
5356 {
5357 if (pImage->PCHSGeometry.cCylinders)
5358 {
5359 *pPCHSGeometry = pImage->PCHSGeometry;
5360 rc = VINF_SUCCESS;
5361 }
5362 else
5363 rc = VERR_VD_GEOMETRY_NOT_SET;
5364 }
5365 else
5366 rc = VERR_VD_NOT_OPENED;
5367
5368 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5369 return rc;
5370}
5371
5372/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
5373static int vmdkSetPCHSGeometry(void *pBackendData,
5374 PCPDMMEDIAGEOMETRY pPCHSGeometry)
5375{
5376 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5377 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5378 int rc;
5379
5380 AssertPtr(pImage);
5381
5382 if (pImage)
5383 {
5384 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5385 {
5386 rc = VERR_VD_IMAGE_READ_ONLY;
5387 goto out;
5388 }
5389 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5390 if (RT_FAILURE(rc))
5391 goto out;
5392
5393 pImage->PCHSGeometry = *pPCHSGeometry;
5394 rc = VINF_SUCCESS;
5395 }
5396 else
5397 rc = VERR_VD_NOT_OPENED;
5398
5399out:
5400 LogFlowFunc(("returns %Rrc\n", rc));
5401 return rc;
5402}
5403
5404/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
5405static int vmdkGetLCHSGeometry(void *pBackendData,
5406 PPDMMEDIAGEOMETRY pLCHSGeometry)
5407{
5408 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
5409 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5410 int rc;
5411
5412 AssertPtr(pImage);
5413
5414 if (pImage)
5415 {
5416 if (pImage->LCHSGeometry.cCylinders)
5417 {
5418 *pLCHSGeometry = pImage->LCHSGeometry;
5419 rc = VINF_SUCCESS;
5420 }
5421 else
5422 rc = VERR_VD_GEOMETRY_NOT_SET;
5423 }
5424 else
5425 rc = VERR_VD_NOT_OPENED;
5426
5427 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5428 return rc;
5429}
5430
5431/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
5432static int vmdkSetLCHSGeometry(void *pBackendData,
5433 PCPDMMEDIAGEOMETRY pLCHSGeometry)
5434{
5435 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5436 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5437 int rc;
5438
5439 AssertPtr(pImage);
5440
5441 if (pImage)
5442 {
5443 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5444 {
5445 rc = VERR_VD_IMAGE_READ_ONLY;
5446 goto out;
5447 }
5448 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5449 if (RT_FAILURE(rc))
5450 goto out;
5451
5452 pImage->LCHSGeometry = *pLCHSGeometry;
5453 rc = VINF_SUCCESS;
5454 }
5455 else
5456 rc = VERR_VD_NOT_OPENED;
5457
5458out:
5459 LogFlowFunc(("returns %Rrc\n", rc));
5460 return rc;
5461}
5462
5463/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
5464static unsigned vmdkGetImageFlags(void *pBackendData)
5465{
5466 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5467 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5468 unsigned uImageFlags;
5469
5470 AssertPtr(pImage);
5471
5472 if (pImage)
5473 uImageFlags = pImage->uImageFlags;
5474 else
5475 uImageFlags = 0;
5476
5477 LogFlowFunc(("returns %#x\n", uImageFlags));
5478 return uImageFlags;
5479}
5480
5481/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
5482static unsigned vmdkGetOpenFlags(void *pBackendData)
5483{
5484 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5485 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5486 unsigned uOpenFlags;
5487
5488 AssertPtr(pImage);
5489
5490 if (pImage)
5491 uOpenFlags = pImage->uOpenFlags;
5492 else
5493 uOpenFlags = 0;
5494
5495 LogFlowFunc(("returns %#x\n", uOpenFlags));
5496 return uOpenFlags;
5497}
5498
5499/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
5500static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
5501{
5502 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
5503 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5504 int rc;
5505
5506 /* Image must be opened and the new flags must be valid. Just readonly and
5507 * info flags are supported. */
5508 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO)))
5509 {
5510 rc = VERR_INVALID_PARAMETER;
5511 goto out;
5512 }
5513
5514 /* Implement this operation via reopening the image. */
5515 vmdkFreeImage(pImage, false);
5516 rc = vmdkOpenImage(pImage, uOpenFlags);
5517
5518out:
5519 LogFlowFunc(("returns %Rrc\n", rc));
5520 return rc;
5521}
5522
5523/** @copydoc VBOXHDDBACKEND::pfnGetComment */
5524static int vmdkGetComment(void *pBackendData, char *pszComment,
5525 size_t cbComment)
5526{
5527 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
5528 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5529 int rc;
5530
5531 AssertPtr(pImage);
5532
5533 if (pImage)
5534 {
5535 const char *pszCommentEncoded = NULL;
5536 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
5537 "ddb.comment", &pszCommentEncoded);
5538 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
5539 pszCommentEncoded = NULL;
5540 else if (RT_FAILURE(rc))
5541 goto out;
5542
5543 if (pszComment && pszCommentEncoded)
5544 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
5545 else
5546 {
5547 if (pszComment)
5548 *pszComment = '\0';
5549 rc = VINF_SUCCESS;
5550 }
5551 if (pszCommentEncoded)
5552 RTStrFree((char *)(void *)pszCommentEncoded);
5553 }
5554 else
5555 rc = VERR_VD_NOT_OPENED;
5556
5557out:
5558 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
5559 return rc;
5560}
5561
5562/** @copydoc VBOXHDDBACKEND::pfnSetComment */
5563static int vmdkSetComment(void *pBackendData, const char *pszComment)
5564{
5565 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
5566 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5567 int rc;
5568
5569 AssertPtr(pImage);
5570
5571 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5572 {
5573 rc = VERR_VD_IMAGE_READ_ONLY;
5574 goto out;
5575 }
5576
5577 if (pImage)
5578 rc = vmdkSetImageComment(pImage, pszComment);
5579 else
5580 rc = VERR_VD_NOT_OPENED;
5581
5582out:
5583 LogFlowFunc(("returns %Rrc\n", rc));
5584 return rc;
5585}
5586
5587/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
5588static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
5589{
5590 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5591 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5592 int rc;
5593
5594 AssertPtr(pImage);
5595
5596 if (pImage)
5597 {
5598 *pUuid = pImage->ImageUuid;
5599 rc = VINF_SUCCESS;
5600 }
5601 else
5602 rc = VERR_VD_NOT_OPENED;
5603
5604 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5605 return rc;
5606}
5607
5608/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
5609static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
5610{
5611 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5612 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5613 int rc;
5614
5615 LogFlowFunc(("%RTuuid\n", pUuid));
5616 AssertPtr(pImage);
5617
5618 if (pImage)
5619 {
5620 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5621 {
5622 pImage->ImageUuid = *pUuid;
5623 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5624 VMDK_DDB_IMAGE_UUID, pUuid);
5625 if (RT_FAILURE(rc))
5626 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
5627 rc = VINF_SUCCESS;
5628 }
5629 else
5630 rc = VERR_VD_IMAGE_READ_ONLY;
5631 }
5632 else
5633 rc = VERR_VD_NOT_OPENED;
5634
5635 LogFlowFunc(("returns %Rrc\n", rc));
5636 return rc;
5637}
5638
5639/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
5640static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
5641{
5642 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5643 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5644 int rc;
5645
5646 AssertPtr(pImage);
5647
5648 if (pImage)
5649 {
5650 *pUuid = pImage->ModificationUuid;
5651 rc = VINF_SUCCESS;
5652 }
5653 else
5654 rc = VERR_VD_NOT_OPENED;
5655
5656 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5657 return rc;
5658}
5659
5660/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
5661static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
5662{
5663 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5664 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5665 int rc;
5666
5667 AssertPtr(pImage);
5668
5669 if (pImage)
5670 {
5671 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5672 {
5673 /*
5674 * Only change the modification uuid if it changed.
5675 * Avoids a lot of unneccessary 1-byte writes during
5676 * vmdkFlush.
5677 */
5678 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
5679 {
5680 pImage->ModificationUuid = *pUuid;
5681 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5682 VMDK_DDB_MODIFICATION_UUID, pUuid);
5683 if (RT_FAILURE(rc))
5684 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
5685 }
5686 rc = VINF_SUCCESS;
5687 }
5688 else
5689 rc = VERR_VD_IMAGE_READ_ONLY;
5690 }
5691 else
5692 rc = VERR_VD_NOT_OPENED;
5693
5694 LogFlowFunc(("returns %Rrc\n", rc));
5695 return rc;
5696}
5697
5698/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
5699static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
5700{
5701 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5702 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5703 int rc;
5704
5705 AssertPtr(pImage);
5706
5707 if (pImage)
5708 {
5709 *pUuid = pImage->ParentUuid;
5710 rc = VINF_SUCCESS;
5711 }
5712 else
5713 rc = VERR_VD_NOT_OPENED;
5714
5715 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5716 return rc;
5717}
5718
5719/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
5720static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
5721{
5722 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5723 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5724 int rc;
5725
5726 AssertPtr(pImage);
5727
5728 if (pImage)
5729 {
5730 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5731 {
5732 pImage->ParentUuid = *pUuid;
5733 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5734 VMDK_DDB_PARENT_UUID, pUuid);
5735 if (RT_FAILURE(rc))
5736 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5737 rc = VINF_SUCCESS;
5738 }
5739 else
5740 rc = VERR_VD_IMAGE_READ_ONLY;
5741 }
5742 else
5743 rc = VERR_VD_NOT_OPENED;
5744
5745 LogFlowFunc(("returns %Rrc\n", rc));
5746 return rc;
5747}
5748
5749/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
5750static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
5751{
5752 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5753 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5754 int rc;
5755
5756 AssertPtr(pImage);
5757
5758 if (pImage)
5759 {
5760 *pUuid = pImage->ParentModificationUuid;
5761 rc = VINF_SUCCESS;
5762 }
5763 else
5764 rc = VERR_VD_NOT_OPENED;
5765
5766 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5767 return rc;
5768}
5769
5770/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
5771static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
5772{
5773 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5774 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5775 int rc;
5776
5777 AssertPtr(pImage);
5778
5779 if (pImage)
5780 {
5781 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5782 {
5783 pImage->ParentModificationUuid = *pUuid;
5784 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5785 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
5786 if (RT_FAILURE(rc))
5787 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5788 rc = VINF_SUCCESS;
5789 }
5790 else
5791 rc = VERR_VD_IMAGE_READ_ONLY;
5792 }
5793 else
5794 rc = VERR_VD_NOT_OPENED;
5795
5796 LogFlowFunc(("returns %Rrc\n", rc));
5797 return rc;
5798}
5799
5800/** @copydoc VBOXHDDBACKEND::pfnDump */
5801static void vmdkDump(void *pBackendData)
5802{
5803 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5804
5805 AssertPtr(pImage);
5806 if (pImage)
5807 {
5808 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
5809 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
5810 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
5811 VMDK_BYTE2SECTOR(pImage->cbSize));
5812 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
5813 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
5814 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
5815 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
5816 }
5817}
5818
5819
5820static int vmdkGetTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5821{
5822 int rc = VERR_NOT_IMPLEMENTED;
5823 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5824 return rc;
5825}
5826
5827static int vmdkGetParentTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5828{
5829 int rc = VERR_NOT_IMPLEMENTED;
5830 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5831 return rc;
5832}
5833
5834static int vmdkSetParentTimeStamp(void *pvBackendData, PCRTTIMESPEC pTimeStamp)
5835{
5836 int rc = VERR_NOT_IMPLEMENTED;
5837 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5838 return rc;
5839}
5840
5841static int vmdkGetParentFilename(void *pvBackendData, char **ppszParentFilename)
5842{
5843 int rc = VERR_NOT_IMPLEMENTED;
5844 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5845 return rc;
5846}
5847
5848static int vmdkSetParentFilename(void *pvBackendData, const char *pszParentFilename)
5849{
5850 int rc = VERR_NOT_IMPLEMENTED;
5851 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5852 return rc;
5853}
5854
5855static bool vmdkIsAsyncIOSupported(void *pvBackendData)
5856{
5857 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5858 bool fAsyncIOSupported = false;
5859
5860 if (pImage)
5861 {
5862 unsigned cFlatExtents = 0;
5863
5864 /* We only support async I/O support if the image only consists of FLAT or ZERO extents.
5865 *
5866 * @todo: At the moment we only support async I/O if there is at most one FLAT extent
5867 * More than one doesn't work yet with the async I/O interface.
5868 */
5869 fAsyncIOSupported = true;
5870 for (unsigned i = 0; i < pImage->cExtents; i++)
5871 {
5872 if (( pImage->pExtents[i].enmType != VMDKETYPE_FLAT
5873 && pImage->pExtents[i].enmType != VMDKETYPE_ZERO
5874 && pImage->pExtents[i].enmType != VMDKETYPE_VMFS)
5875 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
5876 {
5877 fAsyncIOSupported = false;
5878 break; /* Stop search */
5879 }
5880 if (pImage->pExtents[i].enmType == VMDKETYPE_FLAT)
5881 cFlatExtents++;
5882 }
5883 }
5884
5885 return fAsyncIOSupported;
5886}
5887
5888static int vmdkAsyncRead(void *pvBackendData, uint64_t uOffset, size_t cbRead,
5889 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
5890{
5891 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5892 PVMDKEXTENT pExtent = NULL;
5893 int rc = VINF_SUCCESS;
5894 unsigned cSegments = 0;
5895 PPDMDATASEG paSegCurrent = paSeg;
5896 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5897 size_t uOffsetInCurrentSegment = 0;
5898 size_t cbReadLeft = cbRead;
5899 uint64_t uOffCurr = uOffset;
5900
5901 AssertPtr(pImage);
5902 Assert(uOffset % 512 == 0);
5903 Assert(cbRead % 512 == 0);
5904
5905 if ( uOffset + cbRead > pImage->cbSize
5906 || cbRead == 0)
5907 {
5908 rc = VERR_INVALID_PARAMETER;
5909 goto out;
5910 }
5911
5912 while (cbReadLeft && cSeg)
5913 {
5914 size_t cbToRead;
5915 uint64_t uSectorExtentRel;
5916
5917 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffCurr),
5918 &pExtent, &uSectorExtentRel);
5919 if (RT_FAILURE(rc))
5920 goto out;
5921
5922 /* Check access permissions as defined in the extent descriptor. */
5923 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5924 {
5925 rc = VERR_VD_VMDK_INVALID_STATE;
5926 goto out;
5927 }
5928
5929 /* Clip read range to remain in this extent. */
5930 cbToRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5931 /* Clip read range to remain into current data segment. */
5932 cbToRead = RT_MIN(cbToRead, cbLeftInCurrentSegment);
5933
5934 switch (pExtent->enmType)
5935 {
5936 case VMDKETYPE_VMFS:
5937 case VMDKETYPE_FLAT:
5938 {
5939 /* Check for enough room first. */
5940 if (RT_UNLIKELY(cSegments >= pImage->cSegments))
5941 {
5942 /* We reached maximum, resize array. Try to realloc memory first. */
5943 PPDMDATASEG paSegmentsNew = (PPDMDATASEG)RTMemRealloc(pImage->paSegments, (cSegments + 10)*sizeof(PDMDATASEG));
5944
5945 if (!paSegmentsNew)
5946 {
5947 /* We failed. Allocate completely new. */
5948 paSegmentsNew = (PPDMDATASEG)RTMemAllocZ((cSegments + 10)* sizeof(PDMDATASEG));
5949 if (!paSegmentsNew)
5950 {
5951 /* Damn, we are out of memory. */
5952 rc = VERR_NO_MEMORY;
5953 goto out;
5954 }
5955
5956 /* Copy task handles over. */
5957 for (unsigned i = 0; i < cSegments; i++)
5958 paSegmentsNew[i] = pImage->paSegments[i];
5959
5960 /* Free old memory. */
5961 RTMemFree(pImage->paSegments);
5962 }
5963
5964 pImage->cSegments = cSegments + 10;
5965 pImage->paSegments = paSegmentsNew;
5966 }
5967
5968 pImage->paSegments[cSegments].cbSeg = cbToRead;
5969 pImage->paSegments[cSegments].pvSeg = (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment;
5970 cSegments++;
5971 break;
5972 }
5973 case VMDKETYPE_ZERO:
5974 /* Nothing left to do. */
5975 break;
5976 default:
5977 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
5978 }
5979
5980 cbReadLeft -= cbToRead;
5981 uOffCurr += cbToRead;
5982 cbLeftInCurrentSegment -= cbToRead;
5983 uOffsetInCurrentSegment += cbToRead;
5984 /* Go to next extent if there is no space left in current one. */
5985 if (!cbLeftInCurrentSegment)
5986 {
5987 uOffsetInCurrentSegment = 0;
5988 paSegCurrent++;
5989 cSeg--;
5990 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5991 }
5992 }
5993
5994 AssertMsg(cbReadLeft == 0, ("No segment left but there is still data to write\n"));
5995
5996 if (cSegments == 0)
5997 {
5998 /* The request was completely in a ZERO extent nothing to do. */
5999 rc = VINF_VD_ASYNC_IO_FINISHED;
6000 }
6001 else
6002 {
6003 /* Start the write */
6004 void *pTask;
6005 rc = pImage->pInterfaceAsyncIOCallbacks->pfnReadAsync(pImage->pInterfaceAsyncIO->pvUser,
6006 pExtent->pFile->pStorage, uOffset,
6007 pImage->paSegments, cSegments, cbRead,
6008 pvUser, &pTask);
6009 }
6010
6011out:
6012 LogFlowFunc(("returns %Rrc\n", rc));
6013 return rc;
6014}
6015
6016static int vmdkAsyncWrite(void *pvBackendData, uint64_t uOffset, size_t cbWrite,
6017 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
6018{
6019 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6020 PVMDKEXTENT pExtent = NULL;
6021 int rc = VINF_SUCCESS;
6022 unsigned cSegments = 0;
6023 PPDMDATASEG paSegCurrent = paSeg;
6024 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
6025 size_t uOffsetInCurrentSegment = 0;
6026 size_t cbWriteLeft = cbWrite;
6027 uint64_t uOffCurr = uOffset;
6028
6029 AssertPtr(pImage);
6030 Assert(uOffset % 512 == 0);
6031 Assert(cbWrite % 512 == 0);
6032
6033 if ( uOffset + cbWrite > pImage->cbSize
6034 || cbWrite == 0)
6035 {
6036 rc = VERR_INVALID_PARAMETER;
6037 goto out;
6038 }
6039
6040 while (cbWriteLeft && cSeg)
6041 {
6042 size_t cbToWrite;
6043 uint64_t uSectorExtentRel;
6044
6045 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffCurr),
6046 &pExtent, &uSectorExtentRel);
6047 if (RT_FAILURE(rc))
6048 goto out;
6049
6050 /* Check access permissions as defined in the extent descriptor. */
6051 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6052 {
6053 rc = VERR_VD_VMDK_INVALID_STATE;
6054 goto out;
6055 }
6056
6057 /* Clip write range to remain in this extent. */
6058 cbToWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6059 /* Clip write range to remain into current data segment. */
6060 cbToWrite = RT_MIN(cbToWrite, cbLeftInCurrentSegment);
6061
6062 switch (pExtent->enmType)
6063 {
6064 case VMDKETYPE_VMFS:
6065 case VMDKETYPE_FLAT:
6066 {
6067 /* Check for enough room first. */
6068 if (RT_UNLIKELY(cSegments >= pImage->cSegments))
6069 {
6070 /* We reached maximum, resize array. Try to realloc memory first. */
6071 PPDMDATASEG paSegmentsNew = (PPDMDATASEG)RTMemRealloc(pImage->paSegments, (cSegments + 10)*sizeof(PDMDATASEG));
6072
6073 if (!paSegmentsNew)
6074 {
6075 /* We failed. Allocate completely new. */
6076 paSegmentsNew = (PPDMDATASEG)RTMemAllocZ((cSegments + 10)* sizeof(PDMDATASEG));
6077 if (!paSegmentsNew)
6078 {
6079 /* Damn, we are out of memory. */
6080 rc = VERR_NO_MEMORY;
6081 goto out;
6082 }
6083
6084 /* Copy task handles over. */
6085 for (unsigned i = 0; i < cSegments; i++)
6086 paSegmentsNew[i] = pImage->paSegments[i];
6087
6088 /* Free old memory. */
6089 RTMemFree(pImage->paSegments);
6090 }
6091
6092 pImage->cSegments = cSegments + 10;
6093 pImage->paSegments = paSegmentsNew;
6094 }
6095
6096 pImage->paSegments[cSegments].cbSeg = cbToWrite;
6097 pImage->paSegments[cSegments].pvSeg = (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment;
6098 cSegments++;
6099 break;
6100 }
6101 case VMDKETYPE_ZERO:
6102 /* Nothing left to do. */
6103 break;
6104 default:
6105 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
6106 }
6107
6108 cbWriteLeft -= cbToWrite;
6109 uOffCurr += cbToWrite;
6110 cbLeftInCurrentSegment -= cbToWrite;
6111 uOffsetInCurrentSegment += cbToWrite;
6112 /* Go to next extent if there is no space left in current one. */
6113 if (!cbLeftInCurrentSegment)
6114 {
6115 uOffsetInCurrentSegment = 0;
6116 paSegCurrent++;
6117 cSeg--;
6118 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
6119 }
6120 }
6121
6122 AssertMsg(cbWriteLeft == 0, ("No segment left but there is still data to write\n"));
6123
6124 if (cSegments == 0)
6125 {
6126 /* The request was completely in a ZERO extent nothing to do. */
6127 rc = VINF_VD_ASYNC_IO_FINISHED;
6128 }
6129 else
6130 {
6131 /* Start the write */
6132 void *pTask;
6133 rc = pImage->pInterfaceAsyncIOCallbacks->pfnWriteAsync(pImage->pInterfaceAsyncIO->pvUser,
6134 pExtent->pFile->pStorage, uOffset,
6135 pImage->paSegments, cSegments, cbWrite,
6136 pvUser, &pTask);
6137 }
6138
6139out:
6140 LogFlowFunc(("returns %Rrc\n", rc));
6141 return rc;
6142}
6143
6144
6145VBOXHDDBACKEND g_VmdkBackend =
6146{
6147 /* pszBackendName */
6148 "VMDK",
6149 /* cbSize */
6150 sizeof(VBOXHDDBACKEND),
6151 /* uBackendCaps */
6152 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6153 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE |VD_CAP_ASYNC,
6154 /* papszFileExtensions */
6155 s_apszVmdkFileExtensions,
6156 /* paConfigInfo */
6157 NULL,
6158 /* hPlugin */
6159 NIL_RTLDRMOD,
6160 /* pfnCheckIfValid */
6161 vmdkCheckIfValid,
6162 /* pfnOpen */
6163 vmdkOpen,
6164 /* pfnCreate */
6165 vmdkCreate,
6166 /* pfnRename */
6167 vmdkRename,
6168 /* pfnClose */
6169 vmdkClose,
6170 /* pfnRead */
6171 vmdkRead,
6172 /* pfnWrite */
6173 vmdkWrite,
6174 /* pfnFlush */
6175 vmdkFlush,
6176 /* pfnGetVersion */
6177 vmdkGetVersion,
6178 /* pfnGetSize */
6179 vmdkGetSize,
6180 /* pfnGetFileSize */
6181 vmdkGetFileSize,
6182 /* pfnGetPCHSGeometry */
6183 vmdkGetPCHSGeometry,
6184 /* pfnSetPCHSGeometry */
6185 vmdkSetPCHSGeometry,
6186 /* pfnGetLCHSGeometry */
6187 vmdkGetLCHSGeometry,
6188 /* pfnSetLCHSGeometry */
6189 vmdkSetLCHSGeometry,
6190 /* pfnGetImageFlags */
6191 vmdkGetImageFlags,
6192 /* pfnGetOpenFlags */
6193 vmdkGetOpenFlags,
6194 /* pfnSetOpenFlags */
6195 vmdkSetOpenFlags,
6196 /* pfnGetComment */
6197 vmdkGetComment,
6198 /* pfnSetComment */
6199 vmdkSetComment,
6200 /* pfnGetUuid */
6201 vmdkGetUuid,
6202 /* pfnSetUuid */
6203 vmdkSetUuid,
6204 /* pfnGetModificationUuid */
6205 vmdkGetModificationUuid,
6206 /* pfnSetModificationUuid */
6207 vmdkSetModificationUuid,
6208 /* pfnGetParentUuid */
6209 vmdkGetParentUuid,
6210 /* pfnSetParentUuid */
6211 vmdkSetParentUuid,
6212 /* pfnGetParentModificationUuid */
6213 vmdkGetParentModificationUuid,
6214 /* pfnSetParentModificationUuid */
6215 vmdkSetParentModificationUuid,
6216 /* pfnDump */
6217 vmdkDump,
6218 /* pfnGetTimeStamp */
6219 vmdkGetTimeStamp,
6220 /* pfnGetParentTimeStamp */
6221 vmdkGetParentTimeStamp,
6222 /* pfnSetParentTimeStamp */
6223 vmdkSetParentTimeStamp,
6224 /* pfnGetParentFilename */
6225 vmdkGetParentFilename,
6226 /* pfnSetParentFilename */
6227 vmdkSetParentFilename,
6228 /* pfnIsAsyncIOSupported */
6229 vmdkIsAsyncIOSupported,
6230 /* pfnAsyncRead */
6231 vmdkAsyncRead,
6232 /* pfnAsyncWrite */
6233 vmdkAsyncWrite,
6234 /* pfnComposeLocation */
6235 genericFileComposeLocation,
6236 /* pfnComposeName */
6237 genericFileComposeName
6238};
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette