VirtualBox

source: vbox/trunk/src/VBox/Devices/Storage/VmdkHDDCore.cpp@ 27735

最後變更 在這個檔案從27735是 27735,由 vboxsync 提交於 15 年 前

VMDK: Don't try to read more bytes from the descriptor than the filesize

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 225.1 KB
 
1/* $Id: VmdkHDDCore.cpp 27735 2010-03-26 12:56:00Z vboxsync $ */
2/** @file
3 * VMDK Disk image, Core Code.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_VD_VMDK
26#include <VBox/VBoxHDD-Plugin.h>
27#include <VBox/err.h>
28
29#include <VBox/log.h>
30#include <iprt/assert.h>
31#include <iprt/alloc.h>
32#include <iprt/uuid.h>
33#include <iprt/file.h>
34#include <iprt/path.h>
35#include <iprt/string.h>
36#include <iprt/rand.h>
37#include <iprt/zip.h>
38
39
40/*******************************************************************************
41* Constants And Macros, Structures and Typedefs *
42*******************************************************************************/
43
44/** Maximum encoded string size (including NUL) we allow for VMDK images.
45 * Deliberately not set high to avoid running out of descriptor space. */
46#define VMDK_ENCODED_COMMENT_MAX 1024
47
48/** VMDK descriptor DDB entry for PCHS cylinders. */
49#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
50
51/** VMDK descriptor DDB entry for PCHS heads. */
52#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
53
54/** VMDK descriptor DDB entry for PCHS sectors. */
55#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
56
57/** VMDK descriptor DDB entry for LCHS cylinders. */
58#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
59
60/** VMDK descriptor DDB entry for LCHS heads. */
61#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
62
63/** VMDK descriptor DDB entry for LCHS sectors. */
64#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
65
66/** VMDK descriptor DDB entry for image UUID. */
67#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
68
69/** VMDK descriptor DDB entry for image modification UUID. */
70#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
71
72/** VMDK descriptor DDB entry for parent image UUID. */
73#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
74
75/** VMDK descriptor DDB entry for parent image modification UUID. */
76#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
77
78/** No compression for streamOptimized files. */
79#define VMDK_COMPRESSION_NONE 0
80
81/** Deflate compression for streamOptimized files. */
82#define VMDK_COMPRESSION_DEFLATE 1
83
84/** Marker that the actual GD value is stored in the footer. */
85#define VMDK_GD_AT_END 0xffffffffffffffffULL
86
87/** Marker for end-of-stream in streamOptimized images. */
88#define VMDK_MARKER_EOS 0
89
90/** Marker for grain table block in streamOptimized images. */
91#define VMDK_MARKER_GT 1
92
93/** Marker for grain directory block in streamOptimized images. */
94#define VMDK_MARKER_GD 2
95
96/** Marker for footer in streamOptimized images. */
97#define VMDK_MARKER_FOOTER 3
98
99/** Dummy marker for "don't check the marker value". */
100#define VMDK_MARKER_IGNORE 0xffffffffU
101
102/**
103 * Magic number for hosted images created by VMware Workstation 4, VMware
104 * Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
105 */
106#define VMDK_SPARSE_MAGICNUMBER 0x564d444b /* 'V' 'M' 'D' 'K' */
107
108/**
109 * VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
110 * this header is also used for monolithic flat images.
111 */
112#pragma pack(1)
113typedef struct SparseExtentHeader
114{
115 uint32_t magicNumber;
116 uint32_t version;
117 uint32_t flags;
118 uint64_t capacity;
119 uint64_t grainSize;
120 uint64_t descriptorOffset;
121 uint64_t descriptorSize;
122 uint32_t numGTEsPerGT;
123 uint64_t rgdOffset;
124 uint64_t gdOffset;
125 uint64_t overHead;
126 bool uncleanShutdown;
127 char singleEndLineChar;
128 char nonEndLineChar;
129 char doubleEndLineChar1;
130 char doubleEndLineChar2;
131 uint16_t compressAlgorithm;
132 uint8_t pad[433];
133} SparseExtentHeader;
134#pragma pack()
135
136/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
137 * divisible by the default grain size (64K) */
138#define VMDK_2G_SPLIT_SIZE (2047 * 1024 * 1024)
139
140/** VMDK streamOptimized file format marker. The type field may or may not
141 * be actually valid, but there's always data to read there. */
142#pragma pack(1)
143typedef struct VMDKMARKER
144{
145 uint64_t uSector;
146 uint32_t cbSize;
147 uint32_t uType;
148} VMDKMARKER;
149#pragma pack()
150
151
152#ifdef VBOX_WITH_VMDK_ESX
153
154/** @todo the ESX code is not tested, not used, and lacks error messages. */
155
156/**
157 * Magic number for images created by VMware GSX Server 3 or ESX Server 3.
158 */
159#define VMDK_ESX_SPARSE_MAGICNUMBER 0x44574f43 /* 'C' 'O' 'W' 'D' */
160
161#pragma pack(1)
162typedef struct COWDisk_Header
163{
164 uint32_t magicNumber;
165 uint32_t version;
166 uint32_t flags;
167 uint32_t numSectors;
168 uint32_t grainSize;
169 uint32_t gdOffset;
170 uint32_t numGDEntries;
171 uint32_t freeSector;
172 /* The spec incompletely documents quite a few further fields, but states
173 * that they are unused by the current format. Replace them by padding. */
174 char reserved1[1604];
175 uint32_t savedGeneration;
176 char reserved2[8];
177 uint32_t uncleanShutdown;
178 char padding[396];
179} COWDisk_Header;
180#pragma pack()
181#endif /* VBOX_WITH_VMDK_ESX */
182
183
184/** Convert sector number/size to byte offset/size. */
185#define VMDK_SECTOR2BYTE(u) ((uint64_t)(u) << 9)
186
187/** Convert byte offset/size to sector number/size. */
188#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
189
190/**
191 * VMDK extent type.
192 */
193typedef enum VMDKETYPE
194{
195 /** Hosted sparse extent. */
196 VMDKETYPE_HOSTED_SPARSE = 1,
197 /** Flat extent. */
198 VMDKETYPE_FLAT,
199 /** Zero extent. */
200 VMDKETYPE_ZERO,
201 /** VMFS extent, used by ESX. */
202 VMDKETYPE_VMFS
203#ifdef VBOX_WITH_VMDK_ESX
204 ,
205 /** ESX sparse extent. */
206 VMDKETYPE_ESX_SPARSE
207#endif /* VBOX_WITH_VMDK_ESX */
208} VMDKETYPE, *PVMDKETYPE;
209
210/**
211 * VMDK access type for a extent.
212 */
213typedef enum VMDKACCESS
214{
215 /** No access allowed. */
216 VMDKACCESS_NOACCESS = 0,
217 /** Read-only access. */
218 VMDKACCESS_READONLY,
219 /** Read-write access. */
220 VMDKACCESS_READWRITE
221} VMDKACCESS, *PVMDKACCESS;
222
223/** Forward declaration for PVMDKIMAGE. */
224typedef struct VMDKIMAGE *PVMDKIMAGE;
225
226/**
227 * Extents files entry. Used for opening a particular file only once.
228 */
229typedef struct VMDKFILE
230{
231 /** Pointer to filename. Local copy. */
232 const char *pszFilename;
233 /** File open flags for consistency checking. */
234 unsigned fOpen;
235 /** File handle. */
236 RTFILE File;
237 /** Handle for asnychronous access if requested.*/
238 void *pStorage;
239 /** Flag whether to use File or pStorage. */
240 bool fAsyncIO;
241 /** Reference counter. */
242 unsigned uReferences;
243 /** Flag whether the file should be deleted on last close. */
244 bool fDelete;
245 /** Pointer to the image we belong to. */
246 PVMDKIMAGE pImage;
247 /** Pointer to next file descriptor. */
248 struct VMDKFILE *pNext;
249 /** Pointer to the previous file descriptor. */
250 struct VMDKFILE *pPrev;
251} VMDKFILE, *PVMDKFILE;
252
253/**
254 * VMDK extent data structure.
255 */
256typedef struct VMDKEXTENT
257{
258 /** File handle. */
259 PVMDKFILE pFile;
260 /** Base name of the image extent. */
261 const char *pszBasename;
262 /** Full name of the image extent. */
263 const char *pszFullname;
264 /** Number of sectors in this extent. */
265 uint64_t cSectors;
266 /** Number of sectors per block (grain in VMDK speak). */
267 uint64_t cSectorsPerGrain;
268 /** Starting sector number of descriptor. */
269 uint64_t uDescriptorSector;
270 /** Size of descriptor in sectors. */
271 uint64_t cDescriptorSectors;
272 /** Starting sector number of grain directory. */
273 uint64_t uSectorGD;
274 /** Starting sector number of redundant grain directory. */
275 uint64_t uSectorRGD;
276 /** Total number of metadata sectors. */
277 uint64_t cOverheadSectors;
278 /** Nominal size (i.e. as described by the descriptor) of this extent. */
279 uint64_t cNominalSectors;
280 /** Sector offset (i.e. as described by the descriptor) of this extent. */
281 uint64_t uSectorOffset;
282 /** Number of entries in a grain table. */
283 uint32_t cGTEntries;
284 /** Number of sectors reachable via a grain directory entry. */
285 uint32_t cSectorsPerGDE;
286 /** Number of entries in the grain directory. */
287 uint32_t cGDEntries;
288 /** Pointer to the next free sector. Legacy information. Do not use. */
289 uint32_t uFreeSector;
290 /** Number of this extent in the list of images. */
291 uint32_t uExtent;
292 /** Pointer to the descriptor (NULL if no descriptor in this extent). */
293 char *pDescData;
294 /** Pointer to the grain directory. */
295 uint32_t *pGD;
296 /** Pointer to the redundant grain directory. */
297 uint32_t *pRGD;
298 /** VMDK version of this extent. 1=1.0/1.1 */
299 uint32_t uVersion;
300 /** Type of this extent. */
301 VMDKETYPE enmType;
302 /** Access to this extent. */
303 VMDKACCESS enmAccess;
304 /** Flag whether this extent is marked as unclean. */
305 bool fUncleanShutdown;
306 /** Flag whether the metadata in the extent header needs to be updated. */
307 bool fMetaDirty;
308 /** Flag whether there is a footer in this extent. */
309 bool fFooter;
310 /** Compression type for this extent. */
311 uint16_t uCompression;
312 /** Last grain which has been written to. Only for streamOptimized extents. */
313 uint32_t uLastGrainWritten;
314 /** Sector number of last grain which has been written to. Only for
315 * streamOptimized extents. */
316 uint32_t uLastGrainSector;
317 /** Data size of last grain which has been written to. Only for
318 * streamOptimized extents. */
319 uint32_t cbLastGrainWritten;
320 /** Starting sector of the decompressed grain buffer. */
321 uint32_t uGrainSector;
322 /** Decompressed grain buffer for streamOptimized extents. */
323 void *pvGrain;
324 /** Reference to the image in which this extent is used. Do not use this
325 * on a regular basis to avoid passing pImage references to functions
326 * explicitly. */
327 struct VMDKIMAGE *pImage;
328} VMDKEXTENT, *PVMDKEXTENT;
329
330/**
331 * Grain table cache size. Allocated per image.
332 */
333#define VMDK_GT_CACHE_SIZE 256
334
335/**
336 * Grain table block size. Smaller than an actual grain table block to allow
337 * more grain table blocks to be cached without having to allocate excessive
338 * amounts of memory for the cache.
339 */
340#define VMDK_GT_CACHELINE_SIZE 128
341
342
343/**
344 * Maximum number of lines in a descriptor file. Not worth the effort of
345 * making it variable. Descriptor files are generally very short (~20 lines),
346 * with the exception of sparse files split in 2G chunks, which need for the
347 * maximum size (almost 2T) exactly 1025 lines for the disk database.
348 */
349#define VMDK_DESCRIPTOR_LINES_MAX 1100U
350
351/**
352 * Parsed descriptor information. Allows easy access and update of the
353 * descriptor (whether separate file or not). Free form text files suck.
354 */
355typedef struct VMDKDESCRIPTOR
356{
357 /** Line number of first entry of the disk descriptor. */
358 unsigned uFirstDesc;
359 /** Line number of first entry in the extent description. */
360 unsigned uFirstExtent;
361 /** Line number of first disk database entry. */
362 unsigned uFirstDDB;
363 /** Total number of lines. */
364 unsigned cLines;
365 /** Total amount of memory available for the descriptor. */
366 size_t cbDescAlloc;
367 /** Set if descriptor has been changed and not yet written to disk. */
368 bool fDirty;
369 /** Array of pointers to the data in the descriptor. */
370 char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
371 /** Array of line indices pointing to the next non-comment line. */
372 unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
373} VMDKDESCRIPTOR, *PVMDKDESCRIPTOR;
374
375
376/**
377 * Cache entry for translating extent/sector to a sector number in that
378 * extent.
379 */
380typedef struct VMDKGTCACHEENTRY
381{
382 /** Extent number for which this entry is valid. */
383 uint32_t uExtent;
384 /** GT data block number. */
385 uint64_t uGTBlock;
386 /** Data part of the cache entry. */
387 uint32_t aGTData[VMDK_GT_CACHELINE_SIZE];
388} VMDKGTCACHEENTRY, *PVMDKGTCACHEENTRY;
389
390/**
391 * Cache data structure for blocks of grain table entries. For now this is a
392 * fixed size direct mapping cache, but this should be adapted to the size of
393 * the sparse image and maybe converted to a set-associative cache. The
394 * implementation below implements a write-through cache with write allocate.
395 */
396typedef struct VMDKGTCACHE
397{
398 /** Cache entries. */
399 VMDKGTCACHEENTRY aGTCache[VMDK_GT_CACHE_SIZE];
400 /** Number of cache entries (currently unused). */
401 unsigned cEntries;
402} VMDKGTCACHE, *PVMDKGTCACHE;
403
404/**
405 * Complete VMDK image data structure. Mainly a collection of extents and a few
406 * extra global data fields.
407 */
408typedef struct VMDKIMAGE
409{
410 /** Pointer to the image extents. */
411 PVMDKEXTENT pExtents;
412 /** Number of image extents. */
413 unsigned cExtents;
414 /** Pointer to the files list, for opening a file referenced multiple
415 * times only once (happens mainly with raw partition access). */
416 PVMDKFILE pFiles;
417
418 /** Base image name. */
419 const char *pszFilename;
420 /** Descriptor file if applicable. */
421 PVMDKFILE pFile;
422
423 /** Pointer to the per-disk VD interface list. */
424 PVDINTERFACE pVDIfsDisk;
425
426 /** Error interface. */
427 PVDINTERFACE pInterfaceError;
428 /** Error interface callbacks. */
429 PVDINTERFACEERROR pInterfaceErrorCallbacks;
430
431 /** Async I/O interface. */
432 PVDINTERFACE pInterfaceAsyncIO;
433 /** Async I/O interface callbacks. */
434 PVDINTERFACEASYNCIO pInterfaceAsyncIOCallbacks;
435 /**
436 * Pointer to an array of segment entries for async I/O.
437 * This is an optimization because the task number to submit is not known
438 * and allocating/freeing an array in the read/write functions every time
439 * is too expensive.
440 */
441 PPDMDATASEG paSegments;
442 /** Entries available in the segments array. */
443 unsigned cSegments;
444
445 /** Open flags passed by VBoxHD layer. */
446 unsigned uOpenFlags;
447 /** Image flags defined during creation or determined during open. */
448 unsigned uImageFlags;
449 /** Total size of the image. */
450 uint64_t cbSize;
451 /** Physical geometry of this image. */
452 PDMMEDIAGEOMETRY PCHSGeometry;
453 /** Logical geometry of this image. */
454 PDMMEDIAGEOMETRY LCHSGeometry;
455 /** Image UUID. */
456 RTUUID ImageUuid;
457 /** Image modification UUID. */
458 RTUUID ModificationUuid;
459 /** Parent image UUID. */
460 RTUUID ParentUuid;
461 /** Parent image modification UUID. */
462 RTUUID ParentModificationUuid;
463
464 /** Pointer to grain table cache, if this image contains sparse extents. */
465 PVMDKGTCACHE pGTCache;
466 /** Pointer to the descriptor (NULL if no separate descriptor file). */
467 char *pDescData;
468 /** Allocation size of the descriptor file. */
469 size_t cbDescAlloc;
470 /** Parsed descriptor file content. */
471 VMDKDESCRIPTOR Descriptor;
472} VMDKIMAGE;
473
474
475/** State for the input callout of the inflate reader. */
476typedef struct VMDKINFLATESTATE
477{
478 /* File where the data is stored. */
479 PVMDKFILE File;
480 /* Total size of the data to read. */
481 size_t cbSize;
482 /* Offset in the file to read. */
483 uint64_t uFileOffset;
484 /* Current read position. */
485 ssize_t iOffset;
486} VMDKINFLATESTATE;
487
488/** State for the output callout of the deflate writer. */
489typedef struct VMDKDEFLATESTATE
490{
491 /* File where the data is to be stored. */
492 PVMDKFILE File;
493 /* Offset in the file to write at. */
494 uint64_t uFileOffset;
495 /* Current write position. */
496 ssize_t iOffset;
497} VMDKDEFLATESTATE;
498
499/*******************************************************************************
500 * Static Variables *
501 *******************************************************************************/
502
503/** NULL-terminated array of supported file extensions. */
504static const char *const s_apszVmdkFileExtensions[] =
505{
506 "vmdk",
507 NULL
508};
509
510/*******************************************************************************
511* Internal Functions *
512*******************************************************************************/
513
514static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent);
515
516static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
517 bool fDelete);
518
519static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
520static int vmdkFlushImage(PVMDKIMAGE pImage);
521static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
522static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
523
524
525/**
526 * Internal: signal an error to the frontend.
527 */
528DECLINLINE(int) vmdkError(PVMDKIMAGE pImage, int rc, RT_SRC_POS_DECL,
529 const char *pszFormat, ...)
530{
531 va_list va;
532 va_start(va, pszFormat);
533 if (pImage->pInterfaceError && pImage->pInterfaceErrorCallbacks)
534 pImage->pInterfaceErrorCallbacks->pfnError(pImage->pInterfaceError->pvUser, rc, RT_SRC_POS_ARGS,
535 pszFormat, va);
536 va_end(va);
537 return rc;
538}
539
540/**
541 * Internal: open a file (using a file descriptor cache to ensure each file
542 * is only opened once - anything else can cause locking problems).
543 */
544static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
545 const char *pszFilename, unsigned fOpen, bool fAsyncIO)
546{
547 int rc = VINF_SUCCESS;
548 PVMDKFILE pVmdkFile;
549
550 for (pVmdkFile = pImage->pFiles;
551 pVmdkFile != NULL;
552 pVmdkFile = pVmdkFile->pNext)
553 {
554 if (!strcmp(pszFilename, pVmdkFile->pszFilename))
555 {
556 Assert(fOpen == pVmdkFile->fOpen);
557 pVmdkFile->uReferences++;
558
559 *ppVmdkFile = pVmdkFile;
560
561 return rc;
562 }
563 }
564
565 /* If we get here, there's no matching entry in the cache. */
566 pVmdkFile = (PVMDKFILE)RTMemAllocZ(sizeof(VMDKFILE));
567 if (!VALID_PTR(pVmdkFile))
568 {
569 *ppVmdkFile = NULL;
570 return VERR_NO_MEMORY;
571 }
572
573 pVmdkFile->pszFilename = RTStrDup(pszFilename);
574 if (!VALID_PTR(pVmdkFile->pszFilename))
575 {
576 RTMemFree(pVmdkFile);
577 *ppVmdkFile = NULL;
578 return VERR_NO_MEMORY;
579 }
580 pVmdkFile->fOpen = fOpen;
581
582#ifndef VBOX_WITH_NEW_IO_CODE
583 if ((pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO) && (fAsyncIO))
584 {
585 rc = pImage->pInterfaceAsyncIOCallbacks->pfnOpen(pImage->pInterfaceAsyncIO->pvUser,
586 pszFilename,
587 pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY
588 ? VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY
589 : 0,
590 NULL,
591 pImage->pVDIfsDisk,
592 &pVmdkFile->pStorage);
593 pVmdkFile->fAsyncIO = true;
594 }
595 else
596 {
597 rc = RTFileOpen(&pVmdkFile->File, pszFilename, fOpen);
598 pVmdkFile->fAsyncIO = false;
599 }
600#else
601 unsigned uOpenFlags = 0;
602
603 if ((fOpen & RTFILE_O_ACCESS_MASK) == RTFILE_O_READ)
604 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_READONLY;
605 if ((fOpen & RTFILE_O_ACTION_MASK) == RTFILE_O_CREATE)
606 uOpenFlags |= VD_INTERFACEASYNCIO_OPEN_FLAGS_CREATE;
607
608 rc = pImage->pInterfaceAsyncIOCallbacks->pfnOpen(pImage->pInterfaceAsyncIO->pvUser,
609 pszFilename,
610 uOpenFlags,
611 NULL,
612 pImage->pVDIfsDisk,
613 &pVmdkFile->pStorage);
614#endif
615 if (RT_SUCCESS(rc))
616 {
617 pVmdkFile->uReferences = 1;
618 pVmdkFile->pImage = pImage;
619 pVmdkFile->pNext = pImage->pFiles;
620 if (pImage->pFiles)
621 pImage->pFiles->pPrev = pVmdkFile;
622 pImage->pFiles = pVmdkFile;
623 *ppVmdkFile = pVmdkFile;
624 }
625 else
626 {
627 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
628 RTMemFree(pVmdkFile);
629 *ppVmdkFile = NULL;
630 }
631
632 return rc;
633}
634
635/**
636 * Internal: close a file, updating the file descriptor cache.
637 */
638static int vmdkFileClose(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile, bool fDelete)
639{
640 int rc = VINF_SUCCESS;
641 PVMDKFILE pVmdkFile = *ppVmdkFile;
642
643 AssertPtr(pVmdkFile);
644
645 pVmdkFile->fDelete |= fDelete;
646 Assert(pVmdkFile->uReferences);
647 pVmdkFile->uReferences--;
648 if (pVmdkFile->uReferences == 0)
649 {
650 PVMDKFILE pPrev;
651 PVMDKFILE pNext;
652
653 /* Unchain the element from the list. */
654 pPrev = pVmdkFile->pPrev;
655 pNext = pVmdkFile->pNext;
656
657 if (pNext)
658 pNext->pPrev = pPrev;
659 if (pPrev)
660 pPrev->pNext = pNext;
661 else
662 pImage->pFiles = pNext;
663
664#ifndef VBOX_WITH_NEW_IO_CODE
665 if (pVmdkFile->fAsyncIO)
666 {
667 rc = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
668 pVmdkFile->pStorage);
669 }
670 else
671 {
672 rc = RTFileClose(pVmdkFile->File);
673 }
674#else
675 rc = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
676 pVmdkFile->pStorage);
677#endif
678 if (RT_SUCCESS(rc) && pVmdkFile->fDelete)
679 rc = RTFileDelete(pVmdkFile->pszFilename);
680 RTStrFree((char *)(void *)pVmdkFile->pszFilename);
681 RTMemFree(pVmdkFile);
682 }
683
684 *ppVmdkFile = NULL;
685 return rc;
686}
687
688/**
689 * Internal: read from a file distinguishing between async and normal operation
690 */
691DECLINLINE(int) vmdkFileReadAt(PVMDKFILE pVmdkFile,
692 uint64_t uOffset, void *pvBuf,
693 size_t cbToRead, size_t *pcbRead)
694{
695 PVMDKIMAGE pImage = pVmdkFile->pImage;
696
697#ifndef VBOX_WITH_NEW_IO_CODE
698 if (pVmdkFile->fAsyncIO)
699 return pImage->pInterfaceAsyncIOCallbacks->pfnReadSync(pImage->pInterfaceAsyncIO->pvUser,
700 pVmdkFile->pStorage, uOffset,
701 cbToRead, pvBuf, pcbRead);
702 else
703 return RTFileReadAt(pVmdkFile->File, uOffset, pvBuf, cbToRead, pcbRead);
704#else
705 return pImage->pInterfaceAsyncIOCallbacks->pfnReadSync(pImage->pInterfaceAsyncIO->pvUser,
706 pVmdkFile->pStorage, uOffset,
707 cbToRead, pvBuf, pcbRead);
708#endif
709}
710
711/**
712 * Internal: write to a file distinguishing between async and normal operation
713 */
714DECLINLINE(int) vmdkFileWriteAt(PVMDKFILE pVmdkFile,
715 uint64_t uOffset, const void *pvBuf,
716 size_t cbToWrite, size_t *pcbWritten)
717{
718 PVMDKIMAGE pImage = pVmdkFile->pImage;
719
720#ifndef VBOX_WITH_NEW_IO_CODE
721 if (pVmdkFile->fAsyncIO)
722 return pImage->pInterfaceAsyncIOCallbacks->pfnWriteSync(pImage->pInterfaceAsyncIO->pvUser,
723 pVmdkFile->pStorage, uOffset,
724 cbToWrite, pvBuf, pcbWritten);
725 else
726 return RTFileWriteAt(pVmdkFile->File, uOffset, pvBuf, cbToWrite, pcbWritten);
727#else
728 return pImage->pInterfaceAsyncIOCallbacks->pfnWriteSync(pImage->pInterfaceAsyncIO->pvUser,
729 pVmdkFile->pStorage, uOffset,
730 cbToWrite, pvBuf, pcbWritten);
731#endif
732}
733
734/**
735 * Internal: get the size of a file distinguishing beween async and normal operation
736 */
737DECLINLINE(int) vmdkFileGetSize(PVMDKFILE pVmdkFile, uint64_t *pcbSize)
738{
739 PVMDKIMAGE pImage = pVmdkFile->pImage;
740
741#ifndef VBOX_WITH_NEW_IO_CODE
742 if (pVmdkFile->fAsyncIO)
743 {
744 return pImage->pInterfaceAsyncIOCallbacks->pfnGetSize(pImage->pInterfaceAsyncIO->pvUser,
745 pVmdkFile->pStorage,
746 pcbSize);
747 }
748 else
749 return RTFileGetSize(pVmdkFile->File, pcbSize);
750#else
751 return pImage->pInterfaceAsyncIOCallbacks->pfnGetSize(pImage->pInterfaceAsyncIO->pvUser,
752 pVmdkFile->pStorage,
753 pcbSize);
754#endif
755}
756
757/**
758 * Internal: set the size of a file distinguishing beween async and normal operation
759 */
760DECLINLINE(int) vmdkFileSetSize(PVMDKFILE pVmdkFile, uint64_t cbSize)
761{
762 PVMDKIMAGE pImage = pVmdkFile->pImage;
763
764#ifndef VBOX_WITH_NEW_IO_CODE
765 if (pVmdkFile->fAsyncIO)
766 {
767 return pImage->pInterfaceAsyncIOCallbacks->pfnSetSize(pImage->pInterfaceAsyncIO->pvUser,
768 pVmdkFile->pStorage,
769 cbSize);
770 }
771 else
772 return RTFileSetSize(pVmdkFile->File, cbSize);
773#else
774 return pImage->pInterfaceAsyncIOCallbacks->pfnSetSize(pImage->pInterfaceAsyncIO->pvUser,
775 pVmdkFile->pStorage,
776 cbSize);
777#endif
778}
779
780/**
781 * Internal: flush a file distinguishing between async and normal operation
782 */
783DECLINLINE(int) vmdkFileFlush(PVMDKFILE pVmdkFile)
784{
785 PVMDKIMAGE pImage = pVmdkFile->pImage;
786
787#ifndef VBOX_WITH_NEW_IO_CODE
788 if (pVmdkFile->fAsyncIO)
789 return pImage->pInterfaceAsyncIOCallbacks->pfnFlushSync(pImage->pInterfaceAsyncIO->pvUser,
790 pVmdkFile->pStorage);
791 else
792 return RTFileFlush(pVmdkFile->File);
793#else
794 return pImage->pInterfaceAsyncIOCallbacks->pfnFlushSync(pImage->pInterfaceAsyncIO->pvUser,
795 pVmdkFile->pStorage);
796#endif
797}
798
799
800static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
801{
802 VMDKINFLATESTATE *pInflateState = (VMDKINFLATESTATE *)pvUser;
803
804 Assert(cbBuf);
805 if (pInflateState->iOffset < 0)
806 {
807 *(uint8_t *)pvBuf = RTZIPTYPE_ZLIB;
808 if (pcbBuf)
809 *pcbBuf = 1;
810 pInflateState->iOffset = 0;
811 return VINF_SUCCESS;
812 }
813 cbBuf = RT_MIN(cbBuf, pInflateState->cbSize);
814 int rc = vmdkFileReadAt(pInflateState->File, pInflateState->uFileOffset, pvBuf, cbBuf, NULL);
815 if (RT_FAILURE(rc))
816 return rc;
817 pInflateState->uFileOffset += cbBuf;
818 pInflateState->iOffset += cbBuf;
819 pInflateState->cbSize -= cbBuf;
820 Assert(pcbBuf);
821 *pcbBuf = cbBuf;
822 return VINF_SUCCESS;
823}
824
825/**
826 * Internal: read from a file and inflate the compressed data,
827 * distinguishing between async and normal operation
828 */
829DECLINLINE(int) vmdkFileInflateAt(PVMDKFILE pVmdkFile,
830 uint64_t uOffset, void *pvBuf,
831 size_t cbToRead, unsigned uMarker,
832 uint64_t *puLBA, uint32_t *pcbMarkerData)
833{
834 if (pVmdkFile->fAsyncIO)
835 {
836 AssertMsgFailed(("TODO\n"));
837 return VERR_NOT_SUPPORTED;
838 }
839 else
840 {
841 int rc;
842 PRTZIPDECOMP pZip = NULL;
843 VMDKMARKER Marker;
844 uint64_t uCompOffset, cbComp;
845 VMDKINFLATESTATE InflateState;
846 size_t cbActuallyRead;
847 size_t cbMarker = sizeof(Marker);
848
849 if (uMarker == VMDK_MARKER_IGNORE)
850 cbMarker -= sizeof(Marker.uType);
851 rc = vmdkFileReadAt(pVmdkFile, uOffset, &Marker, cbMarker, NULL);
852 if (RT_FAILURE(rc))
853 return rc;
854 Marker.uSector = RT_LE2H_U64(Marker.uSector);
855 Marker.cbSize = RT_LE2H_U32(Marker.cbSize);
856 if ( uMarker != VMDK_MARKER_IGNORE
857 && ( RT_LE2H_U32(Marker.uType) != uMarker
858 || Marker.cbSize != 0))
859 return VERR_VD_VMDK_INVALID_FORMAT;
860 if (Marker.cbSize != 0)
861 {
862 /* Compressed grain marker. Data follows immediately. */
863 uCompOffset = uOffset + 12;
864 cbComp = Marker.cbSize;
865 if (puLBA)
866 *puLBA = Marker.uSector;
867 if (pcbMarkerData)
868 *pcbMarkerData = cbComp + 12;
869 }
870 else
871 {
872 Marker.uType = RT_LE2H_U32(Marker.uType);
873 if (Marker.uType == VMDK_MARKER_EOS)
874 {
875 Assert(uMarker != VMDK_MARKER_EOS);
876 return VERR_VD_VMDK_INVALID_FORMAT;
877 }
878 else if ( Marker.uType == VMDK_MARKER_GT
879 || Marker.uType == VMDK_MARKER_GD
880 || Marker.uType == VMDK_MARKER_FOOTER)
881 {
882 uCompOffset = uOffset + 512;
883 cbComp = VMDK_SECTOR2BYTE(Marker.uSector);
884 if (pcbMarkerData)
885 *pcbMarkerData = cbComp + 512;
886 }
887 else
888 {
889 AssertMsgFailed(("VMDK: unknown marker type %u\n", Marker.uType));
890 return VERR_VD_VMDK_INVALID_FORMAT;
891 }
892 }
893 InflateState.File = pVmdkFile;
894 InflateState.cbSize = cbComp;
895 InflateState.uFileOffset = uCompOffset;
896 InflateState.iOffset = -1;
897 /* Sanity check - the expansion ratio should be much less than 2. */
898 Assert(cbComp < 2 * cbToRead);
899 if (cbComp >= 2 * cbToRead)
900 return VERR_VD_VMDK_INVALID_FORMAT;
901
902 rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
903 if (RT_FAILURE(rc))
904 return rc;
905 rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
906 RTZipDecompDestroy(pZip);
907 if (RT_FAILURE(rc))
908 return rc;
909 if (cbActuallyRead != cbToRead)
910 rc = VERR_VD_VMDK_INVALID_FORMAT;
911 return rc;
912 }
913}
914
915static DECLCALLBACK(int) vmdkFileDeflateHelper(void *pvUser, const void *pvBuf, size_t cbBuf)
916{
917 VMDKDEFLATESTATE *pDeflateState = (VMDKDEFLATESTATE *)pvUser;
918
919 Assert(cbBuf);
920 if (pDeflateState->iOffset < 0)
921 {
922 pvBuf = (const uint8_t *)pvBuf + 1;
923 cbBuf--;
924 pDeflateState->iOffset = 0;
925 }
926 if (!cbBuf)
927 return VINF_SUCCESS;
928 int rc = vmdkFileWriteAt(pDeflateState->File, pDeflateState->uFileOffset, pvBuf, cbBuf, NULL);
929 if (RT_FAILURE(rc))
930 return rc;
931 pDeflateState->uFileOffset += cbBuf;
932 pDeflateState->iOffset += cbBuf;
933 return VINF_SUCCESS;
934}
935
936/**
937 * Internal: deflate the uncompressed data and write to a file,
938 * distinguishing between async and normal operation
939 */
940DECLINLINE(int) vmdkFileDeflateAt(PVMDKFILE pVmdkFile,
941 uint64_t uOffset, const void *pvBuf,
942 size_t cbToWrite, unsigned uMarker,
943 uint64_t uLBA, uint32_t *pcbMarkerData)
944{
945 if (pVmdkFile->fAsyncIO)
946 {
947 AssertMsgFailed(("TODO\n"));
948 return VERR_NOT_SUPPORTED;
949 }
950 else
951 {
952 int rc;
953 PRTZIPCOMP pZip = NULL;
954 VMDKMARKER Marker;
955 uint64_t uCompOffset, cbDecomp;
956 VMDKDEFLATESTATE DeflateState;
957
958 Marker.uSector = RT_H2LE_U64(uLBA);
959 Marker.cbSize = RT_H2LE_U32((uint32_t)cbToWrite);
960 if (uMarker == VMDK_MARKER_IGNORE)
961 {
962 /* Compressed grain marker. Data follows immediately. */
963 uCompOffset = uOffset + 12;
964 cbDecomp = cbToWrite;
965 }
966 else
967 {
968 /** @todo implement creating the other marker types */
969 return VERR_NOT_IMPLEMENTED;
970 }
971 DeflateState.File = pVmdkFile;
972 DeflateState.uFileOffset = uCompOffset;
973 DeflateState.iOffset = -1;
974
975 rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
976 if (RT_FAILURE(rc))
977 return rc;
978 rc = RTZipCompress(pZip, pvBuf, cbDecomp);
979 if (RT_SUCCESS(rc))
980 rc = RTZipCompFinish(pZip);
981 RTZipCompDestroy(pZip);
982 if (RT_SUCCESS(rc))
983 {
984 if (pcbMarkerData)
985 *pcbMarkerData = 12 + DeflateState.iOffset;
986 /* Set the file size to remove old garbage in case the block is
987 * rewritten. Cannot cause data loss as the code calling this
988 * guarantees that data gets only appended. */
989 Assert(DeflateState.uFileOffset > uCompOffset);
990 rc = vmdkFileSetSize(pVmdkFile, DeflateState.uFileOffset);
991
992 if (uMarker == VMDK_MARKER_IGNORE)
993 {
994 /* Compressed grain marker. */
995 Marker.cbSize = RT_H2LE_U32(DeflateState.iOffset);
996 rc = vmdkFileWriteAt(pVmdkFile, uOffset, &Marker, 12, NULL);
997 if (RT_FAILURE(rc))
998 return rc;
999 }
1000 else
1001 {
1002 /** @todo implement creating the other marker types */
1003 return VERR_NOT_IMPLEMENTED;
1004 }
1005 }
1006 return rc;
1007 }
1008}
1009
1010/**
1011 * Internal: check if all files are closed, prevent leaking resources.
1012 */
1013static int vmdkFileCheckAllClose(PVMDKIMAGE pImage)
1014{
1015 int rc = VINF_SUCCESS, rc2;
1016 PVMDKFILE pVmdkFile;
1017
1018 Assert(pImage->pFiles == NULL);
1019 for (pVmdkFile = pImage->pFiles;
1020 pVmdkFile != NULL;
1021 pVmdkFile = pVmdkFile->pNext)
1022 {
1023 LogRel(("VMDK: leaking reference to file \"%s\"\n",
1024 pVmdkFile->pszFilename));
1025 pImage->pFiles = pVmdkFile->pNext;
1026
1027 if (pImage->uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
1028 rc2 = pImage->pInterfaceAsyncIOCallbacks->pfnClose(pImage->pInterfaceAsyncIO->pvUser,
1029 pVmdkFile->pStorage);
1030 else
1031 rc2 = vmdkFileClose(pImage, &pVmdkFile, pVmdkFile->fDelete);
1032
1033 if (RT_SUCCESS(rc))
1034 rc = rc2;
1035 }
1036 return rc;
1037}
1038
1039/**
1040 * Internal: truncate a string (at a UTF8 code point boundary) and encode the
1041 * critical non-ASCII characters.
1042 */
1043static char *vmdkEncodeString(const char *psz)
1044{
1045 char szEnc[VMDK_ENCODED_COMMENT_MAX + 3];
1046 char *pszDst = szEnc;
1047
1048 AssertPtr(psz);
1049
1050 for (; *psz; psz = RTStrNextCp(psz))
1051 {
1052 char *pszDstPrev = pszDst;
1053 RTUNICP Cp = RTStrGetCp(psz);
1054 if (Cp == '\\')
1055 {
1056 pszDst = RTStrPutCp(pszDst, Cp);
1057 pszDst = RTStrPutCp(pszDst, Cp);
1058 }
1059 else if (Cp == '\n')
1060 {
1061 pszDst = RTStrPutCp(pszDst, '\\');
1062 pszDst = RTStrPutCp(pszDst, 'n');
1063 }
1064 else if (Cp == '\r')
1065 {
1066 pszDst = RTStrPutCp(pszDst, '\\');
1067 pszDst = RTStrPutCp(pszDst, 'r');
1068 }
1069 else
1070 pszDst = RTStrPutCp(pszDst, Cp);
1071 if (pszDst - szEnc >= VMDK_ENCODED_COMMENT_MAX - 1)
1072 {
1073 pszDst = pszDstPrev;
1074 break;
1075 }
1076 }
1077 *pszDst = '\0';
1078 return RTStrDup(szEnc);
1079}
1080
1081/**
1082 * Internal: decode a string and store it into the specified string.
1083 */
1084static int vmdkDecodeString(const char *pszEncoded, char *psz, size_t cb)
1085{
1086 int rc = VINF_SUCCESS;
1087 char szBuf[4];
1088
1089 if (!cb)
1090 return VERR_BUFFER_OVERFLOW;
1091
1092 AssertPtr(psz);
1093
1094 for (; *pszEncoded; pszEncoded = RTStrNextCp(pszEncoded))
1095 {
1096 char *pszDst = szBuf;
1097 RTUNICP Cp = RTStrGetCp(pszEncoded);
1098 if (Cp == '\\')
1099 {
1100 pszEncoded = RTStrNextCp(pszEncoded);
1101 RTUNICP CpQ = RTStrGetCp(pszEncoded);
1102 if (CpQ == 'n')
1103 RTStrPutCp(pszDst, '\n');
1104 else if (CpQ == 'r')
1105 RTStrPutCp(pszDst, '\r');
1106 else if (CpQ == '\0')
1107 {
1108 rc = VERR_VD_VMDK_INVALID_HEADER;
1109 break;
1110 }
1111 else
1112 RTStrPutCp(pszDst, CpQ);
1113 }
1114 else
1115 pszDst = RTStrPutCp(pszDst, Cp);
1116
1117 /* Need to leave space for terminating NUL. */
1118 if ((size_t)(pszDst - szBuf) + 1 >= cb)
1119 {
1120 rc = VERR_BUFFER_OVERFLOW;
1121 break;
1122 }
1123 memcpy(psz, szBuf, pszDst - szBuf);
1124 psz += pszDst - szBuf;
1125 }
1126 *psz = '\0';
1127 return rc;
1128}
1129
1130static int vmdkReadGrainDirectory(PVMDKEXTENT pExtent)
1131{
1132 int rc = VINF_SUCCESS;
1133 unsigned i;
1134 uint32_t *pGD = NULL, *pRGD = NULL, *pGDTmp, *pRGDTmp;
1135 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1136
1137 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
1138 goto out;
1139
1140 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1141 if (!pGD)
1142 {
1143 rc = VERR_NO_MEMORY;
1144 goto out;
1145 }
1146 pExtent->pGD = pGD;
1147 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1148 * life files don't have them. The spec is wrong in creative ways. */
1149 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorGD),
1150 pGD, cbGD, NULL);
1151 AssertRC(rc);
1152 if (RT_FAILURE(rc))
1153 {
1154 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
1155 goto out;
1156 }
1157 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1158 *pGDTmp = RT_LE2H_U32(*pGDTmp);
1159
1160 if (pExtent->uSectorRGD)
1161 {
1162 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1163 if (!pRGD)
1164 {
1165 rc = VERR_NO_MEMORY;
1166 goto out;
1167 }
1168 pExtent->pRGD = pRGD;
1169 /* The VMDK 1.1 spec talks about compressed grain directories, but real
1170 * life files don't have them. The spec is wrong in creative ways. */
1171 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(pExtent->uSectorRGD),
1172 pRGD, cbGD, NULL);
1173 AssertRC(rc);
1174 if (RT_FAILURE(rc))
1175 {
1176 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
1177 goto out;
1178 }
1179 for (i = 0, pRGDTmp = pRGD; i < pExtent->cGDEntries; i++, pRGDTmp++)
1180 *pRGDTmp = RT_LE2H_U32(*pRGDTmp);
1181
1182 /* Check grain table and redundant grain table for consistency. */
1183 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1184 uint32_t *pTmpGT1 = (uint32_t *)RTMemTmpAlloc(cbGT);
1185 if (!pTmpGT1)
1186 {
1187 rc = VERR_NO_MEMORY;
1188 goto out;
1189 }
1190 uint32_t *pTmpGT2 = (uint32_t *)RTMemTmpAlloc(cbGT);
1191 if (!pTmpGT2)
1192 {
1193 RTMemTmpFree(pTmpGT1);
1194 rc = VERR_NO_MEMORY;
1195 goto out;
1196 }
1197
1198 for (i = 0, pGDTmp = pGD, pRGDTmp = pRGD;
1199 i < pExtent->cGDEntries;
1200 i++, pGDTmp++, pRGDTmp++)
1201 {
1202 /* If no grain table is allocated skip the entry. */
1203 if (*pGDTmp == 0 && *pRGDTmp == 0)
1204 continue;
1205
1206 if (*pGDTmp == 0 || *pRGDTmp == 0 || *pGDTmp == *pRGDTmp)
1207 {
1208 /* Just one grain directory entry refers to a not yet allocated
1209 * grain table or both grain directory copies refer to the same
1210 * grain table. Not allowed. */
1211 RTMemTmpFree(pTmpGT1);
1212 RTMemTmpFree(pTmpGT2);
1213 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
1214 goto out;
1215 }
1216 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1217 * life files don't have them. The spec is wrong in creative ways. */
1218 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1219 pTmpGT1, cbGT, NULL);
1220 if (RT_FAILURE(rc))
1221 {
1222 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1223 RTMemTmpFree(pTmpGT1);
1224 RTMemTmpFree(pTmpGT2);
1225 goto out;
1226 }
1227 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1228 * life files don't have them. The spec is wrong in creative ways. */
1229 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pRGDTmp),
1230 pTmpGT2, cbGT, NULL);
1231 if (RT_FAILURE(rc))
1232 {
1233 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
1234 RTMemTmpFree(pTmpGT1);
1235 RTMemTmpFree(pTmpGT2);
1236 goto out;
1237 }
1238 if (memcmp(pTmpGT1, pTmpGT2, cbGT))
1239 {
1240 RTMemTmpFree(pTmpGT1);
1241 RTMemTmpFree(pTmpGT2);
1242 rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
1243 goto out;
1244 }
1245 }
1246
1247 /** @todo figure out what to do for unclean VMDKs. */
1248 RTMemTmpFree(pTmpGT1);
1249 RTMemTmpFree(pTmpGT2);
1250 }
1251
1252 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1253 {
1254 uint32_t uLastGrainWritten = 0;
1255 uint32_t uLastGrainSector = 0;
1256 size_t cbGT = pExtent->cGTEntries * sizeof(uint32_t);
1257 uint32_t *pTmpGT = (uint32_t *)RTMemTmpAlloc(cbGT);
1258 if (!pTmpGT)
1259 {
1260 rc = VERR_NO_MEMORY;
1261 goto out;
1262 }
1263 for (i = 0, pGDTmp = pGD; i < pExtent->cGDEntries; i++, pGDTmp++)
1264 {
1265 /* If no grain table is allocated skip the entry. */
1266 if (*pGDTmp == 0)
1267 continue;
1268
1269 /* The VMDK 1.1 spec talks about compressed grain tables, but real
1270 * life files don't have them. The spec is wrong in creative ways. */
1271 rc = vmdkFileReadAt(pExtent->pFile, VMDK_SECTOR2BYTE(*pGDTmp),
1272 pTmpGT, cbGT, NULL);
1273 if (RT_FAILURE(rc))
1274 {
1275 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
1276 RTMemTmpFree(pTmpGT);
1277 goto out;
1278 }
1279 uint32_t j;
1280 uint32_t *pGTTmp;
1281 for (j = 0, pGTTmp = pTmpGT; j < pExtent->cGTEntries; j++, pGTTmp++)
1282 {
1283 uint32_t uGTTmp = RT_LE2H_U32(*pGTTmp);
1284
1285 /* If no grain is allocated skip the entry. */
1286 if (uGTTmp == 0)
1287 continue;
1288
1289 if (uLastGrainSector && uLastGrainSector >= uGTTmp)
1290 {
1291 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
1292 RTMemTmpFree(pTmpGT);
1293 goto out;
1294 }
1295 uLastGrainSector = uGTTmp;
1296 uLastGrainWritten = i * pExtent->cGTEntries + j;
1297 }
1298 }
1299 RTMemTmpFree(pTmpGT);
1300
1301 /* streamOptimized extents need a grain decompress buffer. */
1302 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1303 if (!pExtent->pvGrain)
1304 {
1305 rc = VERR_NO_MEMORY;
1306 goto out;
1307 }
1308
1309 if (uLastGrainSector)
1310 {
1311 uint64_t uLBA = 0;
1312 uint32_t cbMarker = 0;
1313 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uLastGrainSector),
1314 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
1315 if (RT_FAILURE(rc))
1316 goto out;
1317
1318 Assert(uLBA == uLastGrainWritten * pExtent->cSectorsPerGrain);
1319 pExtent->uGrainSector = uLastGrainSector;
1320 pExtent->cbLastGrainWritten = RT_ALIGN(cbMarker, 512);
1321 }
1322 pExtent->uLastGrainWritten = uLastGrainWritten;
1323 pExtent->uLastGrainSector = uLastGrainSector;
1324 }
1325
1326out:
1327 if (RT_FAILURE(rc))
1328 vmdkFreeGrainDirectory(pExtent);
1329 return rc;
1330}
1331
1332static int vmdkCreateGrainDirectory(PVMDKEXTENT pExtent, uint64_t uStartSector,
1333 bool fPreAlloc)
1334{
1335 int rc = VINF_SUCCESS;
1336 unsigned i;
1337 uint32_t *pGD = NULL, *pRGD = NULL;
1338 size_t cbGD = pExtent->cGDEntries * sizeof(uint32_t);
1339 size_t cbGDRounded = RT_ALIGN_64(pExtent->cGDEntries * sizeof(uint32_t), 512);
1340 size_t cbGTRounded;
1341 uint64_t cbOverhead;
1342
1343 if (fPreAlloc)
1344 cbGTRounded = RT_ALIGN_64(pExtent->cGDEntries * pExtent->cGTEntries * sizeof(uint32_t), 512);
1345 else
1346 cbGTRounded = 0;
1347
1348 pGD = (uint32_t *)RTMemAllocZ(cbGD);
1349 if (!pGD)
1350 {
1351 rc = VERR_NO_MEMORY;
1352 goto out;
1353 }
1354 pExtent->pGD = pGD;
1355 pRGD = (uint32_t *)RTMemAllocZ(cbGD);
1356 if (!pRGD)
1357 {
1358 rc = VERR_NO_MEMORY;
1359 goto out;
1360 }
1361 pExtent->pRGD = pRGD;
1362
1363 cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1364 /* For streamOptimized extents put the end-of-stream marker at the end. */
1365 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1366 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead + 512);
1367 else
1368 rc = vmdkFileSetSize(pExtent->pFile, cbOverhead);
1369 if (RT_FAILURE(rc))
1370 goto out;
1371 pExtent->uSectorRGD = uStartSector;
1372 pExtent->uSectorGD = uStartSector + VMDK_BYTE2SECTOR(cbGDRounded + cbGTRounded);
1373
1374 if (fPreAlloc)
1375 {
1376 uint32_t uGTSectorLE;
1377 uint64_t uOffsetSectors;
1378
1379 uOffsetSectors = pExtent->uSectorRGD + VMDK_BYTE2SECTOR(cbGDRounded);
1380 for (i = 0; i < pExtent->cGDEntries; i++)
1381 {
1382 pRGD[i] = uOffsetSectors;
1383 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1384 /* Write the redundant grain directory entry to disk. */
1385 rc = vmdkFileWriteAt(pExtent->pFile,
1386 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + i * sizeof(uGTSectorLE),
1387 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1388 if (RT_FAILURE(rc))
1389 {
1390 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
1391 goto out;
1392 }
1393 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1394 }
1395
1396 uOffsetSectors = pExtent->uSectorGD + VMDK_BYTE2SECTOR(cbGDRounded);
1397 for (i = 0; i < pExtent->cGDEntries; i++)
1398 {
1399 pGD[i] = uOffsetSectors;
1400 uGTSectorLE = RT_H2LE_U64(uOffsetSectors);
1401 /* Write the grain directory entry to disk. */
1402 rc = vmdkFileWriteAt(pExtent->pFile,
1403 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + i * sizeof(uGTSectorLE),
1404 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
1405 if (RT_FAILURE(rc))
1406 {
1407 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
1408 goto out;
1409 }
1410 uOffsetSectors += VMDK_BYTE2SECTOR(pExtent->cGTEntries * sizeof(uint32_t));
1411 }
1412 }
1413 pExtent->cOverheadSectors = VMDK_BYTE2SECTOR(cbOverhead);
1414
1415 /* streamOptimized extents need a grain decompress buffer. */
1416 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
1417 {
1418 pExtent->pvGrain = RTMemAlloc(VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
1419 if (!pExtent->pvGrain)
1420 {
1421 rc = VERR_NO_MEMORY;
1422 goto out;
1423 }
1424 }
1425
1426out:
1427 if (RT_FAILURE(rc))
1428 vmdkFreeGrainDirectory(pExtent);
1429 return rc;
1430}
1431
1432static void vmdkFreeGrainDirectory(PVMDKEXTENT pExtent)
1433{
1434 if (pExtent->pGD)
1435 {
1436 RTMemFree(pExtent->pGD);
1437 pExtent->pGD = NULL;
1438 }
1439 if (pExtent->pRGD)
1440 {
1441 RTMemFree(pExtent->pRGD);
1442 pExtent->pRGD = NULL;
1443 }
1444}
1445
1446static int vmdkStringUnquote(PVMDKIMAGE pImage, const char *pszStr,
1447 char **ppszUnquoted, char **ppszNext)
1448{
1449 char *pszQ;
1450 char *pszUnquoted;
1451
1452 /* Skip over whitespace. */
1453 while (*pszStr == ' ' || *pszStr == '\t')
1454 pszStr++;
1455
1456 if (*pszStr != '"')
1457 {
1458 pszQ = (char *)pszStr;
1459 while (*pszQ && *pszQ != ' ' && *pszQ != '\t')
1460 pszQ++;
1461 }
1462 else
1463 {
1464 pszStr++;
1465 pszQ = (char *)strchr(pszStr, '"');
1466 if (pszQ == NULL)
1467 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
1468 }
1469
1470 pszUnquoted = (char *)RTMemTmpAlloc(pszQ - pszStr + 1);
1471 if (!pszUnquoted)
1472 return VERR_NO_MEMORY;
1473 memcpy(pszUnquoted, pszStr, pszQ - pszStr);
1474 pszUnquoted[pszQ - pszStr] = '\0';
1475 *ppszUnquoted = pszUnquoted;
1476 if (ppszNext)
1477 *ppszNext = pszQ + 1;
1478 return VINF_SUCCESS;
1479}
1480
1481static int vmdkDescInitStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1482 const char *pszLine)
1483{
1484 char *pEnd = pDescriptor->aLines[pDescriptor->cLines];
1485 ssize_t cbDiff = strlen(pszLine) + 1;
1486
1487 if ( pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1
1488 && pEnd - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1489 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1490
1491 memcpy(pEnd, pszLine, cbDiff);
1492 pDescriptor->cLines++;
1493 pDescriptor->aLines[pDescriptor->cLines] = pEnd + cbDiff;
1494 pDescriptor->fDirty = true;
1495
1496 return VINF_SUCCESS;
1497}
1498
1499static bool vmdkDescGetStr(PVMDKDESCRIPTOR pDescriptor, unsigned uStart,
1500 const char *pszKey, const char **ppszValue)
1501{
1502 size_t cbKey = strlen(pszKey);
1503 const char *pszValue;
1504
1505 while (uStart != 0)
1506 {
1507 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1508 {
1509 /* Key matches, check for a '=' (preceded by whitespace). */
1510 pszValue = pDescriptor->aLines[uStart] + cbKey;
1511 while (*pszValue == ' ' || *pszValue == '\t')
1512 pszValue++;
1513 if (*pszValue == '=')
1514 {
1515 *ppszValue = pszValue + 1;
1516 break;
1517 }
1518 }
1519 uStart = pDescriptor->aNextLines[uStart];
1520 }
1521 return !!uStart;
1522}
1523
1524static int vmdkDescSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1525 unsigned uStart,
1526 const char *pszKey, const char *pszValue)
1527{
1528 char *pszTmp;
1529 size_t cbKey = strlen(pszKey);
1530 unsigned uLast = 0;
1531
1532 while (uStart != 0)
1533 {
1534 if (!strncmp(pDescriptor->aLines[uStart], pszKey, cbKey))
1535 {
1536 /* Key matches, check for a '=' (preceded by whitespace). */
1537 pszTmp = pDescriptor->aLines[uStart] + cbKey;
1538 while (*pszTmp == ' ' || *pszTmp == '\t')
1539 pszTmp++;
1540 if (*pszTmp == '=')
1541 {
1542 pszTmp++;
1543 while (*pszTmp == ' ' || *pszTmp == '\t')
1544 pszTmp++;
1545 break;
1546 }
1547 }
1548 if (!pDescriptor->aNextLines[uStart])
1549 uLast = uStart;
1550 uStart = pDescriptor->aNextLines[uStart];
1551 }
1552 if (uStart)
1553 {
1554 if (pszValue)
1555 {
1556 /* Key already exists, replace existing value. */
1557 size_t cbOldVal = strlen(pszTmp);
1558 size_t cbNewVal = strlen(pszValue);
1559 ssize_t cbDiff = cbNewVal - cbOldVal;
1560 /* Check for buffer overflow. */
1561 if ( pDescriptor->aLines[pDescriptor->cLines]
1562 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff)
1563 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1564
1565 memmove(pszTmp + cbNewVal, pszTmp + cbOldVal,
1566 pDescriptor->aLines[pDescriptor->cLines] - pszTmp - cbOldVal);
1567 memcpy(pszTmp, pszValue, cbNewVal + 1);
1568 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1569 pDescriptor->aLines[i] += cbDiff;
1570 }
1571 else
1572 {
1573 memmove(pDescriptor->aLines[uStart], pDescriptor->aLines[uStart+1],
1574 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uStart+1] + 1);
1575 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1576 {
1577 pDescriptor->aLines[i-1] = pDescriptor->aLines[i];
1578 if (pDescriptor->aNextLines[i])
1579 pDescriptor->aNextLines[i-1] = pDescriptor->aNextLines[i] - 1;
1580 else
1581 pDescriptor->aNextLines[i-1] = 0;
1582 }
1583 pDescriptor->cLines--;
1584 /* Adjust starting line numbers of following descriptor sections. */
1585 if (uStart < pDescriptor->uFirstExtent)
1586 pDescriptor->uFirstExtent--;
1587 if (uStart < pDescriptor->uFirstDDB)
1588 pDescriptor->uFirstDDB--;
1589 }
1590 }
1591 else
1592 {
1593 /* Key doesn't exist, append after the last entry in this category. */
1594 if (!pszValue)
1595 {
1596 /* Key doesn't exist, and it should be removed. Simply a no-op. */
1597 return VINF_SUCCESS;
1598 }
1599 cbKey = strlen(pszKey);
1600 size_t cbValue = strlen(pszValue);
1601 ssize_t cbDiff = cbKey + 1 + cbValue + 1;
1602 /* Check for buffer overflow. */
1603 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1604 || ( pDescriptor->aLines[pDescriptor->cLines]
1605 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1606 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1607 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1608 {
1609 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1610 if (pDescriptor->aNextLines[i - 1])
1611 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1612 else
1613 pDescriptor->aNextLines[i] = 0;
1614 }
1615 uStart = uLast + 1;
1616 pDescriptor->aNextLines[uLast] = uStart;
1617 pDescriptor->aNextLines[uStart] = 0;
1618 pDescriptor->cLines++;
1619 pszTmp = pDescriptor->aLines[uStart];
1620 memmove(pszTmp + cbDiff, pszTmp,
1621 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1622 memcpy(pDescriptor->aLines[uStart], pszKey, cbKey);
1623 pDescriptor->aLines[uStart][cbKey] = '=';
1624 memcpy(pDescriptor->aLines[uStart] + cbKey + 1, pszValue, cbValue + 1);
1625 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1626 pDescriptor->aLines[i] += cbDiff;
1627
1628 /* Adjust starting line numbers of following descriptor sections. */
1629 if (uStart <= pDescriptor->uFirstExtent)
1630 pDescriptor->uFirstExtent++;
1631 if (uStart <= pDescriptor->uFirstDDB)
1632 pDescriptor->uFirstDDB++;
1633 }
1634 pDescriptor->fDirty = true;
1635 return VINF_SUCCESS;
1636}
1637
1638static int vmdkDescBaseGetU32(PVMDKDESCRIPTOR pDescriptor, const char *pszKey,
1639 uint32_t *puValue)
1640{
1641 const char *pszValue;
1642
1643 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1644 &pszValue))
1645 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1646 return RTStrToUInt32Ex(pszValue, NULL, 10, puValue);
1647}
1648
1649static int vmdkDescBaseGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1650 const char *pszKey, const char **ppszValue)
1651{
1652 const char *pszValue;
1653 char *pszValueUnquoted;
1654
1655 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDesc, pszKey,
1656 &pszValue))
1657 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1658 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1659 if (RT_FAILURE(rc))
1660 return rc;
1661 *ppszValue = pszValueUnquoted;
1662 return rc;
1663}
1664
1665static int vmdkDescBaseSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1666 const char *pszKey, const char *pszValue)
1667{
1668 char *pszValueQuoted;
1669
1670 int rc = RTStrAPrintf(&pszValueQuoted, "\"%s\"", pszValue);
1671 if (RT_FAILURE(rc))
1672 return rc;
1673 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc, pszKey,
1674 pszValueQuoted);
1675 RTStrFree(pszValueQuoted);
1676 return rc;
1677}
1678
1679static void vmdkDescExtRemoveDummy(PVMDKIMAGE pImage,
1680 PVMDKDESCRIPTOR pDescriptor)
1681{
1682 unsigned uEntry = pDescriptor->uFirstExtent;
1683 ssize_t cbDiff;
1684
1685 if (!uEntry)
1686 return;
1687
1688 cbDiff = strlen(pDescriptor->aLines[uEntry]) + 1;
1689 /* Move everything including \0 in the entry marking the end of buffer. */
1690 memmove(pDescriptor->aLines[uEntry], pDescriptor->aLines[uEntry + 1],
1691 pDescriptor->aLines[pDescriptor->cLines] - pDescriptor->aLines[uEntry + 1] + 1);
1692 for (unsigned i = uEntry + 1; i <= pDescriptor->cLines; i++)
1693 {
1694 pDescriptor->aLines[i - 1] = pDescriptor->aLines[i] - cbDiff;
1695 if (pDescriptor->aNextLines[i])
1696 pDescriptor->aNextLines[i - 1] = pDescriptor->aNextLines[i] - 1;
1697 else
1698 pDescriptor->aNextLines[i - 1] = 0;
1699 }
1700 pDescriptor->cLines--;
1701 if (pDescriptor->uFirstDDB)
1702 pDescriptor->uFirstDDB--;
1703
1704 return;
1705}
1706
1707static int vmdkDescExtInsert(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1708 VMDKACCESS enmAccess, uint64_t cNominalSectors,
1709 VMDKETYPE enmType, const char *pszBasename,
1710 uint64_t uSectorOffset)
1711{
1712 static const char *apszAccess[] = { "NOACCESS", "RDONLY", "RW" };
1713 static const char *apszType[] = { "", "SPARSE", "FLAT", "ZERO", "VMFS" };
1714 char *pszTmp;
1715 unsigned uStart = pDescriptor->uFirstExtent, uLast = 0;
1716 char szExt[1024];
1717 ssize_t cbDiff;
1718
1719 Assert((unsigned)enmAccess < RT_ELEMENTS(apszAccess));
1720 Assert((unsigned)enmType < RT_ELEMENTS(apszType));
1721
1722 /* Find last entry in extent description. */
1723 while (uStart)
1724 {
1725 if (!pDescriptor->aNextLines[uStart])
1726 uLast = uStart;
1727 uStart = pDescriptor->aNextLines[uStart];
1728 }
1729
1730 if (enmType == VMDKETYPE_ZERO)
1731 {
1732 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s ", apszAccess[enmAccess],
1733 cNominalSectors, apszType[enmType]);
1734 }
1735 else if (enmType == VMDKETYPE_FLAT)
1736 {
1737 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\" %llu",
1738 apszAccess[enmAccess], cNominalSectors,
1739 apszType[enmType], pszBasename, uSectorOffset);
1740 }
1741 else
1742 {
1743 RTStrPrintf(szExt, sizeof(szExt), "%s %llu %s \"%s\"",
1744 apszAccess[enmAccess], cNominalSectors,
1745 apszType[enmType], pszBasename);
1746 }
1747 cbDiff = strlen(szExt) + 1;
1748
1749 /* Check for buffer overflow. */
1750 if ( (pDescriptor->cLines >= VMDK_DESCRIPTOR_LINES_MAX - 1)
1751 || ( pDescriptor->aLines[pDescriptor->cLines]
1752 - pDescriptor->aLines[0] > (ptrdiff_t)pDescriptor->cbDescAlloc - cbDiff))
1753 return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1754
1755 for (unsigned i = pDescriptor->cLines + 1; i > uLast + 1; i--)
1756 {
1757 pDescriptor->aLines[i] = pDescriptor->aLines[i - 1];
1758 if (pDescriptor->aNextLines[i - 1])
1759 pDescriptor->aNextLines[i] = pDescriptor->aNextLines[i - 1] + 1;
1760 else
1761 pDescriptor->aNextLines[i] = 0;
1762 }
1763 uStart = uLast + 1;
1764 pDescriptor->aNextLines[uLast] = uStart;
1765 pDescriptor->aNextLines[uStart] = 0;
1766 pDescriptor->cLines++;
1767 pszTmp = pDescriptor->aLines[uStart];
1768 memmove(pszTmp + cbDiff, pszTmp,
1769 pDescriptor->aLines[pDescriptor->cLines] - pszTmp);
1770 memcpy(pDescriptor->aLines[uStart], szExt, cbDiff);
1771 for (unsigned i = uStart + 1; i <= pDescriptor->cLines; i++)
1772 pDescriptor->aLines[i] += cbDiff;
1773
1774 /* Adjust starting line numbers of following descriptor sections. */
1775 if (uStart <= pDescriptor->uFirstDDB)
1776 pDescriptor->uFirstDDB++;
1777
1778 pDescriptor->fDirty = true;
1779 return VINF_SUCCESS;
1780}
1781
1782static int vmdkDescDDBGetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1783 const char *pszKey, const char **ppszValue)
1784{
1785 const char *pszValue;
1786 char *pszValueUnquoted;
1787
1788 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1789 &pszValue))
1790 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1791 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1792 if (RT_FAILURE(rc))
1793 return rc;
1794 *ppszValue = pszValueUnquoted;
1795 return rc;
1796}
1797
1798static int vmdkDescDDBGetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1799 const char *pszKey, uint32_t *puValue)
1800{
1801 const char *pszValue;
1802 char *pszValueUnquoted;
1803
1804 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1805 &pszValue))
1806 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1807 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1808 if (RT_FAILURE(rc))
1809 return rc;
1810 rc = RTStrToUInt32Ex(pszValueUnquoted, NULL, 10, puValue);
1811 RTMemTmpFree(pszValueUnquoted);
1812 return rc;
1813}
1814
1815static int vmdkDescDDBGetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1816 const char *pszKey, PRTUUID pUuid)
1817{
1818 const char *pszValue;
1819 char *pszValueUnquoted;
1820
1821 if (!vmdkDescGetStr(pDescriptor, pDescriptor->uFirstDDB, pszKey,
1822 &pszValue))
1823 return VERR_VD_VMDK_VALUE_NOT_FOUND;
1824 int rc = vmdkStringUnquote(pImage, pszValue, &pszValueUnquoted, NULL);
1825 if (RT_FAILURE(rc))
1826 return rc;
1827 rc = RTUuidFromStr(pUuid, pszValueUnquoted);
1828 RTMemTmpFree(pszValueUnquoted);
1829 return rc;
1830}
1831
1832static int vmdkDescDDBSetStr(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1833 const char *pszKey, const char *pszVal)
1834{
1835 int rc;
1836 char *pszValQuoted;
1837
1838 if (pszVal)
1839 {
1840 rc = RTStrAPrintf(&pszValQuoted, "\"%s\"", pszVal);
1841 if (RT_FAILURE(rc))
1842 return rc;
1843 }
1844 else
1845 pszValQuoted = NULL;
1846 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1847 pszValQuoted);
1848 if (pszValQuoted)
1849 RTStrFree(pszValQuoted);
1850 return rc;
1851}
1852
1853static int vmdkDescDDBSetUuid(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1854 const char *pszKey, PCRTUUID pUuid)
1855{
1856 char *pszUuid;
1857
1858 int rc = RTStrAPrintf(&pszUuid, "\"%RTuuid\"", pUuid);
1859 if (RT_FAILURE(rc))
1860 return rc;
1861 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1862 pszUuid);
1863 RTStrFree(pszUuid);
1864 return rc;
1865}
1866
1867static int vmdkDescDDBSetU32(PVMDKIMAGE pImage, PVMDKDESCRIPTOR pDescriptor,
1868 const char *pszKey, uint32_t uValue)
1869{
1870 char *pszValue;
1871
1872 int rc = RTStrAPrintf(&pszValue, "\"%d\"", uValue);
1873 if (RT_FAILURE(rc))
1874 return rc;
1875 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDDB, pszKey,
1876 pszValue);
1877 RTStrFree(pszValue);
1878 return rc;
1879}
1880
1881static int vmdkPreprocessDescriptor(PVMDKIMAGE pImage, char *pDescData,
1882 size_t cbDescData,
1883 PVMDKDESCRIPTOR pDescriptor)
1884{
1885 int rc = VINF_SUCCESS;
1886 unsigned cLine = 0, uLastNonEmptyLine = 0;
1887 char *pTmp = pDescData;
1888
1889 pDescriptor->cbDescAlloc = cbDescData;
1890 while (*pTmp != '\0')
1891 {
1892 pDescriptor->aLines[cLine++] = pTmp;
1893 if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
1894 {
1895 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
1896 goto out;
1897 }
1898
1899 while (*pTmp != '\0' && *pTmp != '\n')
1900 {
1901 if (*pTmp == '\r')
1902 {
1903 if (*(pTmp + 1) != '\n')
1904 {
1905 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
1906 goto out;
1907 }
1908 else
1909 {
1910 /* Get rid of CR character. */
1911 *pTmp = '\0';
1912 }
1913 }
1914 pTmp++;
1915 }
1916 /* Get rid of LF character. */
1917 if (*pTmp == '\n')
1918 {
1919 *pTmp = '\0';
1920 pTmp++;
1921 }
1922 }
1923 pDescriptor->cLines = cLine;
1924 /* Pointer right after the end of the used part of the buffer. */
1925 pDescriptor->aLines[cLine] = pTmp;
1926
1927 if ( strcmp(pDescriptor->aLines[0], "# Disk DescriptorFile")
1928 && strcmp(pDescriptor->aLines[0], "# Disk Descriptor File"))
1929 {
1930 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
1931 goto out;
1932 }
1933
1934 /* Initialize those, because we need to be able to reopen an image. */
1935 pDescriptor->uFirstDesc = 0;
1936 pDescriptor->uFirstExtent = 0;
1937 pDescriptor->uFirstDDB = 0;
1938 for (unsigned i = 0; i < cLine; i++)
1939 {
1940 if (*pDescriptor->aLines[i] != '#' && *pDescriptor->aLines[i] != '\0')
1941 {
1942 if ( !strncmp(pDescriptor->aLines[i], "RW", 2)
1943 || !strncmp(pDescriptor->aLines[i], "RDONLY", 6)
1944 || !strncmp(pDescriptor->aLines[i], "NOACCESS", 8) )
1945 {
1946 /* An extent descriptor. */
1947 if (!pDescriptor->uFirstDesc || pDescriptor->uFirstDDB)
1948 {
1949 /* Incorrect ordering of entries. */
1950 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1951 goto out;
1952 }
1953 if (!pDescriptor->uFirstExtent)
1954 {
1955 pDescriptor->uFirstExtent = i;
1956 uLastNonEmptyLine = 0;
1957 }
1958 }
1959 else if (!strncmp(pDescriptor->aLines[i], "ddb.", 4))
1960 {
1961 /* A disk database entry. */
1962 if (!pDescriptor->uFirstDesc || !pDescriptor->uFirstExtent)
1963 {
1964 /* Incorrect ordering of entries. */
1965 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1966 goto out;
1967 }
1968 if (!pDescriptor->uFirstDDB)
1969 {
1970 pDescriptor->uFirstDDB = i;
1971 uLastNonEmptyLine = 0;
1972 }
1973 }
1974 else
1975 {
1976 /* A normal entry. */
1977 if (pDescriptor->uFirstExtent || pDescriptor->uFirstDDB)
1978 {
1979 /* Incorrect ordering of entries. */
1980 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
1981 goto out;
1982 }
1983 if (!pDescriptor->uFirstDesc)
1984 {
1985 pDescriptor->uFirstDesc = i;
1986 uLastNonEmptyLine = 0;
1987 }
1988 }
1989 if (uLastNonEmptyLine)
1990 pDescriptor->aNextLines[uLastNonEmptyLine] = i;
1991 uLastNonEmptyLine = i;
1992 }
1993 }
1994
1995out:
1996 return rc;
1997}
1998
1999static int vmdkDescSetPCHSGeometry(PVMDKIMAGE pImage,
2000 PCPDMMEDIAGEOMETRY pPCHSGeometry)
2001{
2002 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2003 VMDK_DDB_GEO_PCHS_CYLINDERS,
2004 pPCHSGeometry->cCylinders);
2005 if (RT_FAILURE(rc))
2006 return rc;
2007 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2008 VMDK_DDB_GEO_PCHS_HEADS,
2009 pPCHSGeometry->cHeads);
2010 if (RT_FAILURE(rc))
2011 return rc;
2012 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2013 VMDK_DDB_GEO_PCHS_SECTORS,
2014 pPCHSGeometry->cSectors);
2015 return rc;
2016}
2017
2018static int vmdkDescSetLCHSGeometry(PVMDKIMAGE pImage,
2019 PCPDMMEDIAGEOMETRY pLCHSGeometry)
2020{
2021 int rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2022 VMDK_DDB_GEO_LCHS_CYLINDERS,
2023 pLCHSGeometry->cCylinders);
2024 if (RT_FAILURE(rc))
2025 return rc;
2026 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2027 VMDK_DDB_GEO_LCHS_HEADS,
2028 pLCHSGeometry->cHeads);
2029 if (RT_FAILURE(rc))
2030 return rc;
2031 rc = vmdkDescDDBSetU32(pImage, &pImage->Descriptor,
2032 VMDK_DDB_GEO_LCHS_SECTORS,
2033 pLCHSGeometry->cSectors);
2034 return rc;
2035}
2036
2037static int vmdkCreateDescriptor(PVMDKIMAGE pImage, char *pDescData,
2038 size_t cbDescData, PVMDKDESCRIPTOR pDescriptor)
2039{
2040 int rc;
2041
2042 pDescriptor->uFirstDesc = 0;
2043 pDescriptor->uFirstExtent = 0;
2044 pDescriptor->uFirstDDB = 0;
2045 pDescriptor->cLines = 0;
2046 pDescriptor->cbDescAlloc = cbDescData;
2047 pDescriptor->fDirty = false;
2048 pDescriptor->aLines[pDescriptor->cLines] = pDescData;
2049 memset(pDescriptor->aNextLines, '\0', sizeof(pDescriptor->aNextLines));
2050
2051 rc = vmdkDescInitStr(pImage, pDescriptor, "# Disk DescriptorFile");
2052 if (RT_FAILURE(rc))
2053 goto out;
2054 rc = vmdkDescInitStr(pImage, pDescriptor, "version=1");
2055 if (RT_FAILURE(rc))
2056 goto out;
2057 pDescriptor->uFirstDesc = pDescriptor->cLines - 1;
2058 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2059 if (RT_FAILURE(rc))
2060 goto out;
2061 rc = vmdkDescInitStr(pImage, pDescriptor, "# Extent description");
2062 if (RT_FAILURE(rc))
2063 goto out;
2064 rc = vmdkDescInitStr(pImage, pDescriptor, "NOACCESS 0 ZERO ");
2065 if (RT_FAILURE(rc))
2066 goto out;
2067 pDescriptor->uFirstExtent = pDescriptor->cLines - 1;
2068 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2069 if (RT_FAILURE(rc))
2070 goto out;
2071 /* The trailing space is created by VMware, too. */
2072 rc = vmdkDescInitStr(pImage, pDescriptor, "# The disk Data Base ");
2073 if (RT_FAILURE(rc))
2074 goto out;
2075 rc = vmdkDescInitStr(pImage, pDescriptor, "#DDB");
2076 if (RT_FAILURE(rc))
2077 goto out;
2078 rc = vmdkDescInitStr(pImage, pDescriptor, "");
2079 if (RT_FAILURE(rc))
2080 goto out;
2081 rc = vmdkDescInitStr(pImage, pDescriptor, "ddb.virtualHWVersion = \"4\"");
2082 if (RT_FAILURE(rc))
2083 goto out;
2084 pDescriptor->uFirstDDB = pDescriptor->cLines - 1;
2085
2086 /* Now that the framework is in place, use the normal functions to insert
2087 * the remaining keys. */
2088 char szBuf[9];
2089 RTStrPrintf(szBuf, sizeof(szBuf), "%08x", RTRandU32());
2090 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2091 "CID", szBuf);
2092 if (RT_FAILURE(rc))
2093 goto out;
2094 rc = vmdkDescSetStr(pImage, pDescriptor, pDescriptor->uFirstDesc,
2095 "parentCID", "ffffffff");
2096 if (RT_FAILURE(rc))
2097 goto out;
2098
2099 rc = vmdkDescDDBSetStr(pImage, pDescriptor, "ddb.adapterType", "ide");
2100 if (RT_FAILURE(rc))
2101 goto out;
2102
2103out:
2104 return rc;
2105}
2106
2107static int vmdkParseDescriptor(PVMDKIMAGE pImage, char *pDescData,
2108 size_t cbDescData)
2109{
2110 int rc;
2111 unsigned cExtents;
2112 unsigned uLine;
2113 unsigned i;
2114
2115 rc = vmdkPreprocessDescriptor(pImage, pDescData, cbDescData,
2116 &pImage->Descriptor);
2117 if (RT_FAILURE(rc))
2118 return rc;
2119
2120 /* Check version, must be 1. */
2121 uint32_t uVersion;
2122 rc = vmdkDescBaseGetU32(&pImage->Descriptor, "version", &uVersion);
2123 if (RT_FAILURE(rc))
2124 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
2125 if (uVersion != 1)
2126 return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
2127
2128 /* Get image creation type and determine image flags. */
2129 const char *pszCreateType = NULL; /* initialized to make gcc shut up */
2130 rc = vmdkDescBaseGetStr(pImage, &pImage->Descriptor, "createType",
2131 &pszCreateType);
2132 if (RT_FAILURE(rc))
2133 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
2134 if ( !strcmp(pszCreateType, "twoGbMaxExtentSparse")
2135 || !strcmp(pszCreateType, "twoGbMaxExtentFlat"))
2136 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_SPLIT_2G;
2137 else if ( !strcmp(pszCreateType, "partitionedDevice")
2138 || !strcmp(pszCreateType, "fullDevice"))
2139 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_RAWDISK;
2140 else if (!strcmp(pszCreateType, "streamOptimized"))
2141 pImage->uImageFlags |= VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED;
2142 else if (!strcmp(pszCreateType, "vmfs"))
2143 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED | VD_VMDK_IMAGE_FLAGS_ESX;
2144 RTStrFree((char *)(void *)pszCreateType);
2145
2146 /* Count the number of extent config entries. */
2147 for (uLine = pImage->Descriptor.uFirstExtent, cExtents = 0;
2148 uLine != 0;
2149 uLine = pImage->Descriptor.aNextLines[uLine], cExtents++)
2150 /* nothing */;
2151
2152 if (!pImage->pDescData && cExtents != 1)
2153 {
2154 /* Monolithic image, must have only one extent (already opened). */
2155 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
2156 }
2157
2158 if (pImage->pDescData)
2159 {
2160 /* Non-monolithic image, extents need to be allocated. */
2161 rc = vmdkCreateExtents(pImage, cExtents);
2162 if (RT_FAILURE(rc))
2163 return rc;
2164 }
2165
2166 for (i = 0, uLine = pImage->Descriptor.uFirstExtent;
2167 i < cExtents; i++, uLine = pImage->Descriptor.aNextLines[uLine])
2168 {
2169 char *pszLine = pImage->Descriptor.aLines[uLine];
2170
2171 /* Access type of the extent. */
2172 if (!strncmp(pszLine, "RW", 2))
2173 {
2174 pImage->pExtents[i].enmAccess = VMDKACCESS_READWRITE;
2175 pszLine += 2;
2176 }
2177 else if (!strncmp(pszLine, "RDONLY", 6))
2178 {
2179 pImage->pExtents[i].enmAccess = VMDKACCESS_READONLY;
2180 pszLine += 6;
2181 }
2182 else if (!strncmp(pszLine, "NOACCESS", 8))
2183 {
2184 pImage->pExtents[i].enmAccess = VMDKACCESS_NOACCESS;
2185 pszLine += 8;
2186 }
2187 else
2188 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2189 if (*pszLine++ != ' ')
2190 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2191
2192 /* Nominal size of the extent. */
2193 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2194 &pImage->pExtents[i].cNominalSectors);
2195 if (RT_FAILURE(rc))
2196 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2197 if (*pszLine++ != ' ')
2198 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2199
2200 /* Type of the extent. */
2201#ifdef VBOX_WITH_VMDK_ESX
2202 /** @todo Add the ESX extent types. Not necessary for now because
2203 * the ESX extent types are only used inside an ESX server. They are
2204 * automatically converted if the VMDK is exported. */
2205#endif /* VBOX_WITH_VMDK_ESX */
2206 if (!strncmp(pszLine, "SPARSE", 6))
2207 {
2208 pImage->pExtents[i].enmType = VMDKETYPE_HOSTED_SPARSE;
2209 pszLine += 6;
2210 }
2211 else if (!strncmp(pszLine, "FLAT", 4))
2212 {
2213 pImage->pExtents[i].enmType = VMDKETYPE_FLAT;
2214 pszLine += 4;
2215 }
2216 else if (!strncmp(pszLine, "ZERO", 4))
2217 {
2218 pImage->pExtents[i].enmType = VMDKETYPE_ZERO;
2219 pszLine += 4;
2220 }
2221 else if (!strncmp(pszLine, "VMFS", 4))
2222 {
2223 pImage->pExtents[i].enmType = VMDKETYPE_VMFS;
2224 pszLine += 4;
2225 }
2226 else
2227 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2228 if (pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
2229 {
2230 /* This one has no basename or offset. */
2231 if (*pszLine == ' ')
2232 pszLine++;
2233 if (*pszLine != '\0')
2234 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2235 pImage->pExtents[i].pszBasename = NULL;
2236 }
2237 else
2238 {
2239 /* All other extent types have basename and optional offset. */
2240 if (*pszLine++ != ' ')
2241 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2242
2243 /* Basename of the image. Surrounded by quotes. */
2244 char *pszBasename;
2245 rc = vmdkStringUnquote(pImage, pszLine, &pszBasename, &pszLine);
2246 if (RT_FAILURE(rc))
2247 return rc;
2248 pImage->pExtents[i].pszBasename = pszBasename;
2249 if (*pszLine == ' ')
2250 {
2251 pszLine++;
2252 if (*pszLine != '\0')
2253 {
2254 /* Optional offset in extent specified. */
2255 rc = RTStrToUInt64Ex(pszLine, &pszLine, 10,
2256 &pImage->pExtents[i].uSectorOffset);
2257 if (RT_FAILURE(rc))
2258 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2259 }
2260 }
2261
2262 if (*pszLine != '\0')
2263 return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
2264 }
2265 }
2266
2267 /* Determine PCHS geometry (autogenerate if necessary). */
2268 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2269 VMDK_DDB_GEO_PCHS_CYLINDERS,
2270 &pImage->PCHSGeometry.cCylinders);
2271 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2272 pImage->PCHSGeometry.cCylinders = 0;
2273 else if (RT_FAILURE(rc))
2274 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2275 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2276 VMDK_DDB_GEO_PCHS_HEADS,
2277 &pImage->PCHSGeometry.cHeads);
2278 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2279 pImage->PCHSGeometry.cHeads = 0;
2280 else if (RT_FAILURE(rc))
2281 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2282 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2283 VMDK_DDB_GEO_PCHS_SECTORS,
2284 &pImage->PCHSGeometry.cSectors);
2285 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2286 pImage->PCHSGeometry.cSectors = 0;
2287 else if (RT_FAILURE(rc))
2288 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
2289 if ( pImage->PCHSGeometry.cCylinders == 0
2290 || pImage->PCHSGeometry.cHeads == 0
2291 || pImage->PCHSGeometry.cHeads > 16
2292 || pImage->PCHSGeometry.cSectors == 0
2293 || pImage->PCHSGeometry.cSectors > 63)
2294 {
2295 /* Mark PCHS geometry as not yet valid (can't do the calculation here
2296 * as the total image size isn't known yet). */
2297 pImage->PCHSGeometry.cCylinders = 0;
2298 pImage->PCHSGeometry.cHeads = 16;
2299 pImage->PCHSGeometry.cSectors = 63;
2300 }
2301
2302 /* Determine LCHS geometry (set to 0 if not specified). */
2303 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2304 VMDK_DDB_GEO_LCHS_CYLINDERS,
2305 &pImage->LCHSGeometry.cCylinders);
2306 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2307 pImage->LCHSGeometry.cCylinders = 0;
2308 else if (RT_FAILURE(rc))
2309 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2310 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2311 VMDK_DDB_GEO_LCHS_HEADS,
2312 &pImage->LCHSGeometry.cHeads);
2313 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2314 pImage->LCHSGeometry.cHeads = 0;
2315 else if (RT_FAILURE(rc))
2316 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2317 rc = vmdkDescDDBGetU32(pImage, &pImage->Descriptor,
2318 VMDK_DDB_GEO_LCHS_SECTORS,
2319 &pImage->LCHSGeometry.cSectors);
2320 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2321 pImage->LCHSGeometry.cSectors = 0;
2322 else if (RT_FAILURE(rc))
2323 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
2324 if ( pImage->LCHSGeometry.cCylinders == 0
2325 || pImage->LCHSGeometry.cHeads == 0
2326 || pImage->LCHSGeometry.cSectors == 0)
2327 {
2328 pImage->LCHSGeometry.cCylinders = 0;
2329 pImage->LCHSGeometry.cHeads = 0;
2330 pImage->LCHSGeometry.cSectors = 0;
2331 }
2332
2333 /* Get image UUID. */
2334 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_IMAGE_UUID,
2335 &pImage->ImageUuid);
2336 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2337 {
2338 /* Image without UUID. Probably created by VMware and not yet used
2339 * by VirtualBox. Can only be added for images opened in read/write
2340 * mode, so don't bother producing a sensible UUID otherwise. */
2341 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2342 RTUuidClear(&pImage->ImageUuid);
2343 else
2344 {
2345 rc = RTUuidCreate(&pImage->ImageUuid);
2346 if (RT_FAILURE(rc))
2347 return rc;
2348 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2349 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
2350 if (RT_FAILURE(rc))
2351 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
2352 }
2353 }
2354 else if (RT_FAILURE(rc))
2355 return rc;
2356
2357 /* Get image modification UUID. */
2358 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2359 VMDK_DDB_MODIFICATION_UUID,
2360 &pImage->ModificationUuid);
2361 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2362 {
2363 /* Image without UUID. Probably created by VMware and not yet used
2364 * by VirtualBox. Can only be added for images opened in read/write
2365 * mode, so don't bother producing a sensible UUID otherwise. */
2366 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2367 RTUuidClear(&pImage->ModificationUuid);
2368 else
2369 {
2370 rc = RTUuidCreate(&pImage->ModificationUuid);
2371 if (RT_FAILURE(rc))
2372 return rc;
2373 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2374 VMDK_DDB_MODIFICATION_UUID,
2375 &pImage->ModificationUuid);
2376 if (RT_FAILURE(rc))
2377 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
2378 }
2379 }
2380 else if (RT_FAILURE(rc))
2381 return rc;
2382
2383 /* Get UUID of parent image. */
2384 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor, VMDK_DDB_PARENT_UUID,
2385 &pImage->ParentUuid);
2386 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2387 {
2388 /* Image without UUID. Probably created by VMware and not yet used
2389 * by VirtualBox. Can only be added for images opened in read/write
2390 * mode, so don't bother producing a sensible UUID otherwise. */
2391 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2392 RTUuidClear(&pImage->ParentUuid);
2393 else
2394 {
2395 rc = RTUuidClear(&pImage->ParentUuid);
2396 if (RT_FAILURE(rc))
2397 return rc;
2398 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2399 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
2400 if (RT_FAILURE(rc))
2401 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
2402 }
2403 }
2404 else if (RT_FAILURE(rc))
2405 return rc;
2406
2407 /* Get parent image modification UUID. */
2408 rc = vmdkDescDDBGetUuid(pImage, &pImage->Descriptor,
2409 VMDK_DDB_PARENT_MODIFICATION_UUID,
2410 &pImage->ParentModificationUuid);
2411 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
2412 {
2413 /* Image without UUID. Probably created by VMware and not yet used
2414 * by VirtualBox. Can only be added for images opened in read/write
2415 * mode, so don't bother producing a sensible UUID otherwise. */
2416 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
2417 RTUuidClear(&pImage->ParentModificationUuid);
2418 else
2419 {
2420 rc = RTUuidCreate(&pImage->ParentModificationUuid);
2421 if (RT_FAILURE(rc))
2422 return rc;
2423 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
2424 VMDK_DDB_PARENT_MODIFICATION_UUID,
2425 &pImage->ParentModificationUuid);
2426 if (RT_FAILURE(rc))
2427 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
2428 }
2429 }
2430 else if (RT_FAILURE(rc))
2431 return rc;
2432
2433 return VINF_SUCCESS;
2434}
2435
2436/**
2437 * Internal: write/update the descriptor part of the image.
2438 */
2439static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
2440{
2441 int rc = VINF_SUCCESS;
2442 uint64_t cbLimit;
2443 uint64_t uOffset;
2444 PVMDKFILE pDescFile;
2445
2446 if (pImage->pDescData)
2447 {
2448 /* Separate descriptor file. */
2449 uOffset = 0;
2450 cbLimit = 0;
2451 pDescFile = pImage->pFile;
2452 }
2453 else
2454 {
2455 /* Embedded descriptor file. */
2456 uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
2457 cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
2458 pDescFile = pImage->pExtents[0].pFile;
2459 }
2460 /* Bail out if there is no file to write to. */
2461 if (pDescFile == NULL)
2462 return VERR_INVALID_PARAMETER;
2463
2464 /*
2465 * Allocate temporary descriptor buffer.
2466 * In case there is no limit allocate a default
2467 * and increase if required.
2468 */
2469 size_t cbDescriptor = cbLimit ? cbLimit : 4 * _1K;
2470 char *pszDescriptor = (char *)RTMemAllocZ(cbDescriptor);
2471 unsigned offDescriptor = 0;
2472
2473 if (!pszDescriptor)
2474 return VERR_NO_MEMORY;
2475
2476 for (unsigned i = 0; i < pImage->Descriptor.cLines; i++)
2477 {
2478 const char *psz = pImage->Descriptor.aLines[i];
2479 size_t cb = strlen(psz);
2480
2481 /*
2482 * Increase the descriptor if there is no limit and
2483 * there is not enough room left for this line.
2484 */
2485 if (offDescriptor + cb + 1 > cbDescriptor)
2486 {
2487 if (cbLimit)
2488 {
2489 rc = vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
2490 break;
2491 }
2492 else
2493 {
2494 char *pszDescriptorNew = NULL;
2495 LogFlow(("Increasing descriptor cache\n"));
2496
2497 pszDescriptorNew = (char *)RTMemRealloc(pszDescriptor, cbDescriptor + cb + 4 * _1K);
2498 if (!pszDescriptorNew)
2499 {
2500 rc = VERR_NO_MEMORY;
2501 break;
2502 }
2503 pszDescriptorNew = pszDescriptor;
2504 cbDescriptor += cb + 4 * _1K;
2505 }
2506 }
2507
2508 if (cb > 0)
2509 {
2510 memcpy(pszDescriptor + offDescriptor, psz, cb);
2511 offDescriptor += cb;
2512 }
2513
2514 memcpy(pszDescriptor + offDescriptor, "\n", 1);
2515 offDescriptor++;
2516 }
2517
2518 if (RT_SUCCESS(rc))
2519 {
2520 rc = vmdkFileWriteAt(pDescFile, uOffset, pszDescriptor, cbLimit ? cbLimit : offDescriptor, NULL);
2521 if (RT_FAILURE(rc))
2522 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
2523 }
2524
2525 if (RT_SUCCESS(rc) && !cbLimit)
2526 {
2527 rc = vmdkFileSetSize(pDescFile, offDescriptor);
2528 if (RT_FAILURE(rc))
2529 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
2530 }
2531
2532 if (RT_SUCCESS(rc))
2533 pImage->Descriptor.fDirty = false;
2534
2535 RTMemFree(pszDescriptor);
2536 return rc;
2537}
2538
2539/**
2540 * Internal: validate the consistency check values in a binary header.
2541 */
2542static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
2543{
2544 int rc = VINF_SUCCESS;
2545 if (RT_LE2H_U32(pHeader->magicNumber) != VMDK_SPARSE_MAGICNUMBER)
2546 {
2547 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
2548 return rc;
2549 }
2550 if (RT_LE2H_U32(pHeader->version) != 1 && RT_LE2H_U32(pHeader->version) != 3)
2551 {
2552 rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
2553 return rc;
2554 }
2555 if ( (RT_LE2H_U32(pHeader->flags) & 1)
2556 && ( pHeader->singleEndLineChar != '\n'
2557 || pHeader->nonEndLineChar != ' '
2558 || pHeader->doubleEndLineChar1 != '\r'
2559 || pHeader->doubleEndLineChar2 != '\n') )
2560 {
2561 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
2562 return rc;
2563 }
2564 return rc;
2565}
2566
2567/**
2568 * Internal: read metadata belonging to an extent with binary header, i.e.
2569 * as found in monolithic files.
2570 */
2571static int vmdkReadBinaryMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2572{
2573 SparseExtentHeader Header;
2574 uint64_t cSectorsPerGDE;
2575
2576 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2577 AssertRC(rc);
2578 if (RT_FAILURE(rc))
2579 {
2580 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
2581 goto out;
2582 }
2583 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2584 if (RT_FAILURE(rc))
2585 goto out;
2586 if ( RT_LE2H_U32(Header.flags & RT_BIT(17))
2587 && RT_LE2H_U64(Header.gdOffset) == VMDK_GD_AT_END)
2588 {
2589 /* Read the footer, which isn't compressed and comes before the
2590 * end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
2591 * VMware reality. Theory and practice have very little in common. */
2592 uint64_t cbSize;
2593 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
2594 AssertRC(rc);
2595 if (RT_FAILURE(rc))
2596 {
2597 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
2598 goto out;
2599 }
2600 cbSize = RT_ALIGN_64(cbSize, 512);
2601 rc = vmdkFileReadAt(pExtent->pFile, cbSize - 2*512, &Header, sizeof(Header), NULL);
2602 AssertRC(rc);
2603 if (RT_FAILURE(rc))
2604 {
2605 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
2606 goto out;
2607 }
2608 rc = vmdkValidateHeader(pImage, pExtent, &Header);
2609 if (RT_FAILURE(rc))
2610 goto out;
2611 pExtent->fFooter = true;
2612 }
2613 pExtent->uVersion = RT_LE2H_U32(Header.version);
2614 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE; /* Just dummy value, changed later. */
2615 pExtent->cSectors = RT_LE2H_U64(Header.capacity);
2616 pExtent->cSectorsPerGrain = RT_LE2H_U64(Header.grainSize);
2617 pExtent->uDescriptorSector = RT_LE2H_U64(Header.descriptorOffset);
2618 pExtent->cDescriptorSectors = RT_LE2H_U64(Header.descriptorSize);
2619 if (pExtent->uDescriptorSector && !pExtent->cDescriptorSectors)
2620 {
2621 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
2622 goto out;
2623 }
2624 pExtent->cGTEntries = RT_LE2H_U32(Header.numGTEsPerGT);
2625 if (RT_LE2H_U32(Header.flags) & RT_BIT(1))
2626 {
2627 pExtent->uSectorRGD = RT_LE2H_U64(Header.rgdOffset);
2628 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2629 }
2630 else
2631 {
2632 pExtent->uSectorGD = RT_LE2H_U64(Header.gdOffset);
2633 pExtent->uSectorRGD = 0;
2634 }
2635 if (pExtent->uSectorGD == VMDK_GD_AT_END || pExtent->uSectorRGD == VMDK_GD_AT_END)
2636 {
2637 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
2638 goto out;
2639 }
2640 pExtent->cOverheadSectors = RT_LE2H_U64(Header.overHead);
2641 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2642 pExtent->uCompression = RT_LE2H_U16(Header.compressAlgorithm);
2643 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2644 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2645 {
2646 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
2647 goto out;
2648 }
2649 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2650 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2651
2652 /* Fix up the number of descriptor sectors, as some flat images have
2653 * really just one, and this causes failures when inserting the UUID
2654 * values and other extra information. */
2655 if (pExtent->cDescriptorSectors != 0 && pExtent->cDescriptorSectors < 4)
2656 {
2657 /* Do it the easy way - just fix it for flat images which have no
2658 * other complicated metadata which needs space too. */
2659 if ( pExtent->uDescriptorSector + 4 < pExtent->cOverheadSectors
2660 && pExtent->cGTEntries * pExtent->cGDEntries == 0)
2661 pExtent->cDescriptorSectors = 4;
2662 }
2663
2664out:
2665 if (RT_FAILURE(rc))
2666 vmdkFreeExtentData(pImage, pExtent, false);
2667
2668 return rc;
2669}
2670
2671/**
2672 * Internal: read additional metadata belonging to an extent. For those
2673 * extents which have no additional metadata just verify the information.
2674 */
2675static int vmdkReadMetaExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent)
2676{
2677 int rc = VINF_SUCCESS;
2678 uint64_t cbExtentSize;
2679
2680 /* The image must be a multiple of a sector in size and contain the data
2681 * area (flat images only). If not, it means the image is at least
2682 * truncated, or even seriously garbled. */
2683 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
2684 if (RT_FAILURE(rc))
2685 {
2686 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
2687 goto out;
2688 }
2689/* disabled the size check again as there are too many too short vmdks out there */
2690#ifdef VBOX_WITH_VMDK_STRICT_SIZE_CHECK
2691 if ( cbExtentSize != RT_ALIGN_64(cbExtentSize, 512)
2692 && (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
2693 {
2694 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
2695 goto out;
2696 }
2697#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
2698 if (pExtent->enmType != VMDKETYPE_HOSTED_SPARSE)
2699 goto out;
2700
2701 /* The spec says that this must be a power of two and greater than 8,
2702 * but probably they meant not less than 8. */
2703 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2704 || pExtent->cSectorsPerGrain < 8)
2705 {
2706 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
2707 goto out;
2708 }
2709
2710 /* This code requires that a grain table must hold a power of two multiple
2711 * of the number of entries per GT cache entry. */
2712 if ( (pExtent->cGTEntries & (pExtent->cGTEntries - 1))
2713 || pExtent->cGTEntries < VMDK_GT_CACHELINE_SIZE)
2714 {
2715 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
2716 goto out;
2717 }
2718
2719 rc = vmdkReadGrainDirectory(pExtent);
2720
2721out:
2722 if (RT_FAILURE(rc))
2723 vmdkFreeExtentData(pImage, pExtent, false);
2724
2725 return rc;
2726}
2727
2728/**
2729 * Internal: write/update the metadata for a sparse extent.
2730 */
2731static int vmdkWriteMetaSparseExtent(PVMDKEXTENT pExtent, uint64_t uOffset)
2732{
2733 SparseExtentHeader Header;
2734
2735 memset(&Header, '\0', sizeof(Header));
2736 Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
2737 Header.version = RT_H2LE_U32(pExtent->uVersion);
2738 Header.flags = RT_H2LE_U32(RT_BIT(0));
2739 if (pExtent->pRGD)
2740 Header.flags |= RT_H2LE_U32(RT_BIT(1));
2741 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
2742 Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
2743 Header.capacity = RT_H2LE_U64(pExtent->cSectors);
2744 Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
2745 Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
2746 Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
2747 Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
2748 if (pExtent->fFooter && uOffset == 0)
2749 {
2750 if (pExtent->pRGD)
2751 {
2752 Assert(pExtent->uSectorRGD);
2753 Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2754 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2755 }
2756 else
2757 {
2758 Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
2759 }
2760 }
2761 else
2762 {
2763 if (pExtent->pRGD)
2764 {
2765 Assert(pExtent->uSectorRGD);
2766 Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
2767 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2768 }
2769 else
2770 {
2771 Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
2772 }
2773 }
2774 Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
2775 Header.uncleanShutdown = pExtent->fUncleanShutdown;
2776 Header.singleEndLineChar = '\n';
2777 Header.nonEndLineChar = ' ';
2778 Header.doubleEndLineChar1 = '\r';
2779 Header.doubleEndLineChar2 = '\n';
2780 Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
2781
2782 int rc = vmdkFileWriteAt(pExtent->pFile, uOffset, &Header, sizeof(Header), NULL);
2783 AssertRC(rc);
2784 if (RT_FAILURE(rc))
2785 rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
2786 return rc;
2787}
2788
2789#ifdef VBOX_WITH_VMDK_ESX
2790/**
2791 * Internal: unused code to read the metadata of a sparse ESX extent.
2792 *
2793 * Such extents never leave ESX server, so this isn't ever used.
2794 */
2795static int vmdkReadMetaESXSparseExtent(PVMDKEXTENT pExtent)
2796{
2797 COWDisk_Header Header;
2798 uint64_t cSectorsPerGDE;
2799
2800 int rc = vmdkFileReadAt(pExtent->pFile, 0, &Header, sizeof(Header), NULL);
2801 AssertRC(rc);
2802 if (RT_FAILURE(rc))
2803 goto out;
2804 if ( RT_LE2H_U32(Header.magicNumber) != VMDK_ESX_SPARSE_MAGICNUMBER
2805 || RT_LE2H_U32(Header.version) != 1
2806 || RT_LE2H_U32(Header.flags) != 3)
2807 {
2808 rc = VERR_VD_VMDK_INVALID_HEADER;
2809 goto out;
2810 }
2811 pExtent->enmType = VMDKETYPE_ESX_SPARSE;
2812 pExtent->cSectors = RT_LE2H_U32(Header.numSectors);
2813 pExtent->cSectorsPerGrain = RT_LE2H_U32(Header.grainSize);
2814 /* The spec says that this must be between 1 sector and 1MB. This code
2815 * assumes it's a power of two, so check that requirement, too. */
2816 if ( (pExtent->cSectorsPerGrain & (pExtent->cSectorsPerGrain - 1))
2817 || pExtent->cSectorsPerGrain == 0
2818 || pExtent->cSectorsPerGrain > 2048)
2819 {
2820 rc = VERR_VD_VMDK_INVALID_HEADER;
2821 goto out;
2822 }
2823 pExtent->uDescriptorSector = 0;
2824 pExtent->cDescriptorSectors = 0;
2825 pExtent->uSectorGD = RT_LE2H_U32(Header.gdOffset);
2826 pExtent->uSectorRGD = 0;
2827 pExtent->cOverheadSectors = 0;
2828 pExtent->cGTEntries = 4096;
2829 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
2830 if (!cSectorsPerGDE || cSectorsPerGDE > UINT32_MAX)
2831 {
2832 rc = VERR_VD_VMDK_INVALID_HEADER;
2833 goto out;
2834 }
2835 pExtent->cSectorsPerGDE = cSectorsPerGDE;
2836 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
2837 if (pExtent->cGDEntries != RT_LE2H_U32(Header.numGDEntries))
2838 {
2839 /* Inconsistency detected. Computed number of GD entries doesn't match
2840 * stored value. Better be safe than sorry. */
2841 rc = VERR_VD_VMDK_INVALID_HEADER;
2842 goto out;
2843 }
2844 pExtent->uFreeSector = RT_LE2H_U32(Header.freeSector);
2845 pExtent->fUncleanShutdown = !!Header.uncleanShutdown;
2846
2847 rc = vmdkReadGrainDirectory(pExtent);
2848
2849out:
2850 if (RT_FAILURE(rc))
2851 vmdkFreeExtentData(pImage, pExtent, false);
2852
2853 return rc;
2854}
2855#endif /* VBOX_WITH_VMDK_ESX */
2856
2857/**
2858 * Internal: free the memory used by the extent data structure, optionally
2859 * deleting the referenced files.
2860 */
2861static void vmdkFreeExtentData(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
2862 bool fDelete)
2863{
2864 vmdkFreeGrainDirectory(pExtent);
2865 if (pExtent->pDescData)
2866 {
2867 RTMemFree(pExtent->pDescData);
2868 pExtent->pDescData = NULL;
2869 }
2870 if (pExtent->pFile != NULL)
2871 {
2872 /* Do not delete raw extents, these have full and base names equal. */
2873 vmdkFileClose(pImage, &pExtent->pFile,
2874 fDelete
2875 && pExtent->pszFullname
2876 && strcmp(pExtent->pszFullname, pExtent->pszBasename));
2877 }
2878 if (pExtent->pszBasename)
2879 {
2880 RTMemTmpFree((void *)pExtent->pszBasename);
2881 pExtent->pszBasename = NULL;
2882 }
2883 if (pExtent->pszFullname)
2884 {
2885 RTStrFree((char *)(void *)pExtent->pszFullname);
2886 pExtent->pszFullname = NULL;
2887 }
2888 if (pExtent->pvGrain)
2889 {
2890 RTMemFree(pExtent->pvGrain);
2891 pExtent->pvGrain = NULL;
2892 }
2893}
2894
2895/**
2896 * Internal: allocate grain table cache if necessary for this image.
2897 */
2898static int vmdkAllocateGrainTableCache(PVMDKIMAGE pImage)
2899{
2900 PVMDKEXTENT pExtent;
2901
2902 /* Allocate grain table cache if any sparse extent is present. */
2903 for (unsigned i = 0; i < pImage->cExtents; i++)
2904 {
2905 pExtent = &pImage->pExtents[i];
2906 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
2907#ifdef VBOX_WITH_VMDK_ESX
2908 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
2909#endif /* VBOX_WITH_VMDK_ESX */
2910 )
2911 {
2912 /* Allocate grain table cache. */
2913 pImage->pGTCache = (PVMDKGTCACHE)RTMemAllocZ(sizeof(VMDKGTCACHE));
2914 if (!pImage->pGTCache)
2915 return VERR_NO_MEMORY;
2916 for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
2917 {
2918 PVMDKGTCACHEENTRY pGCE = &pImage->pGTCache->aGTCache[j];
2919 pGCE->uExtent = UINT32_MAX;
2920 }
2921 pImage->pGTCache->cEntries = VMDK_GT_CACHE_SIZE;
2922 break;
2923 }
2924 }
2925
2926 return VINF_SUCCESS;
2927}
2928
2929/**
2930 * Internal: allocate the given number of extents.
2931 */
2932static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents)
2933{
2934 int rc = VINF_SUCCESS;
2935 PVMDKEXTENT pExtents = (PVMDKEXTENT)RTMemAllocZ(cExtents * sizeof(VMDKEXTENT));
2936 if (pImage)
2937 {
2938 for (unsigned i = 0; i < cExtents; i++)
2939 {
2940 pExtents[i].pFile = NULL;
2941 pExtents[i].pszBasename = NULL;
2942 pExtents[i].pszFullname = NULL;
2943 pExtents[i].pGD = NULL;
2944 pExtents[i].pRGD = NULL;
2945 pExtents[i].pDescData = NULL;
2946 pExtents[i].uVersion = 1;
2947 pExtents[i].uCompression = VMDK_COMPRESSION_NONE;
2948 pExtents[i].uExtent = i;
2949 pExtents[i].pImage = pImage;
2950 }
2951 pImage->pExtents = pExtents;
2952 pImage->cExtents = cExtents;
2953 }
2954 else
2955 rc = VERR_NO_MEMORY;
2956
2957 return rc;
2958}
2959
2960/**
2961 * Internal: Open an image, constructing all necessary data structures.
2962 */
2963static int vmdkOpenImage(PVMDKIMAGE pImage, unsigned uOpenFlags)
2964{
2965 int rc;
2966 uint32_t u32Magic;
2967 PVMDKFILE pFile;
2968 PVMDKEXTENT pExtent;
2969
2970 pImage->uOpenFlags = uOpenFlags;
2971
2972 /* Try to get error interface. */
2973 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
2974 if (pImage->pInterfaceError)
2975 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
2976
2977 /* Try to get async I/O interface. */
2978 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
2979 if (pImage->pInterfaceAsyncIO)
2980 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
2981
2982 /*
2983 * Open the image.
2984 * We don't have to check for asynchronous access because
2985 * we only support raw access and the opened file is a description
2986 * file were no data is stored.
2987 */
2988 rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
2989 uOpenFlags & VD_OPEN_FLAGS_READONLY
2990 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
2991 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
2992 if (RT_FAILURE(rc))
2993 {
2994 /* Do NOT signal an appropriate error here, as the VD layer has the
2995 * choice of retrying the open if it failed. */
2996 goto out;
2997 }
2998 pImage->pFile = pFile;
2999
3000 /* Read magic (if present). */
3001 rc = vmdkFileReadAt(pFile, 0, &u32Magic, sizeof(u32Magic), NULL);
3002 if (RT_FAILURE(rc))
3003 {
3004 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
3005 goto out;
3006 }
3007
3008 /* Handle the file according to its magic number. */
3009 if (RT_LE2H_U32(u32Magic) == VMDK_SPARSE_MAGICNUMBER)
3010 {
3011 /* Async I/IO is not supported with these files yet. So fail if opened in async I/O mode. */
3012 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3013 {
3014 rc = VERR_NOT_SUPPORTED;
3015 goto out;
3016 }
3017
3018 /* It's a hosted single-extent image. */
3019 rc = vmdkCreateExtents(pImage, 1);
3020 if (RT_FAILURE(rc))
3021 goto out;
3022 /* The opened file is passed to the extent. No separate descriptor
3023 * file, so no need to keep anything open for the image. */
3024 pExtent = &pImage->pExtents[0];
3025 pExtent->pFile = pFile;
3026 pImage->pFile = NULL;
3027 pExtent->pszFullname = RTPathAbsDup(pImage->pszFilename);
3028 if (!pExtent->pszFullname)
3029 {
3030 rc = VERR_NO_MEMORY;
3031 goto out;
3032 }
3033 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3034 if (RT_FAILURE(rc))
3035 goto out;
3036
3037 /* As we're dealing with a monolithic image here, there must
3038 * be a descriptor embedded in the image file. */
3039 if (!pExtent->uDescriptorSector || !pExtent->cDescriptorSectors)
3040 {
3041 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
3042 goto out;
3043 }
3044 /* HACK: extend the descriptor if it is unusually small and it fits in
3045 * the unused space after the image header. Allows opening VMDK files
3046 * with extremely small descriptor in read/write mode. */
3047 if ( !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
3048 && pExtent->cDescriptorSectors < 3
3049 && (int64_t)pExtent->uSectorGD - pExtent->uDescriptorSector >= 4
3050 && (!pExtent->uSectorRGD || (int64_t)pExtent->uSectorRGD - pExtent->uDescriptorSector >= 4))
3051 {
3052 pExtent->cDescriptorSectors = 4;
3053 pExtent->fMetaDirty = true;
3054 }
3055 /* Read the descriptor from the extent. */
3056 pExtent->pDescData = (char *)RTMemAllocZ(VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3057 if (!pExtent->pDescData)
3058 {
3059 rc = VERR_NO_MEMORY;
3060 goto out;
3061 }
3062 rc = vmdkFileReadAt(pExtent->pFile,
3063 VMDK_SECTOR2BYTE(pExtent->uDescriptorSector),
3064 pExtent->pDescData,
3065 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors), NULL);
3066 AssertRC(rc);
3067 if (RT_FAILURE(rc))
3068 {
3069 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
3070 goto out;
3071 }
3072
3073 rc = vmdkParseDescriptor(pImage, pExtent->pDescData,
3074 VMDK_SECTOR2BYTE(pExtent->cDescriptorSectors));
3075 if (RT_FAILURE(rc))
3076 goto out;
3077
3078 rc = vmdkReadMetaExtent(pImage, pExtent);
3079 if (RT_FAILURE(rc))
3080 goto out;
3081
3082 /* Mark the extent as unclean if opened in read-write mode. */
3083 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3084 {
3085 pExtent->fUncleanShutdown = true;
3086 pExtent->fMetaDirty = true;
3087 }
3088 }
3089 else
3090 {
3091 /* Allocate at least 10K, and make sure that there is 5K free space
3092 * in case new entries need to be added to the descriptor. Never
3093 * alocate more than 128K, because that's no valid descriptor file
3094 * and will result in the correct "truncated read" error handling. */
3095 uint64_t cbFileSize;
3096 rc = vmdkFileGetSize(pFile, &cbFileSize);
3097 if (RT_FAILURE(rc))
3098 goto out;
3099
3100 uint64_t cbSize = cbFileSize;
3101 if (cbSize % VMDK_SECTOR2BYTE(10))
3102 cbSize += VMDK_SECTOR2BYTE(20) - cbSize % VMDK_SECTOR2BYTE(10);
3103 else
3104 cbSize += VMDK_SECTOR2BYTE(10);
3105 cbSize = RT_MIN(cbSize, _128K);
3106 pImage->cbDescAlloc = RT_MAX(VMDK_SECTOR2BYTE(20), cbSize);
3107 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
3108 if (!pImage->pDescData)
3109 {
3110 rc = VERR_NO_MEMORY;
3111 goto out;
3112 }
3113
3114 size_t cbRead;
3115 rc = vmdkFileReadAt(pImage->pFile, 0, pImage->pDescData,
3116 RT_MIN(pImage->cbDescAlloc, cbFileSize),
3117 &cbRead);
3118 if (RT_FAILURE(rc))
3119 {
3120 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
3121 goto out;
3122 }
3123 if (cbRead == pImage->cbDescAlloc)
3124 {
3125 /* Likely the read is truncated. Better fail a bit too early
3126 * (normally the descriptor is much smaller than our buffer). */
3127 rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
3128 goto out;
3129 }
3130
3131 rc = vmdkParseDescriptor(pImage, pImage->pDescData,
3132 pImage->cbDescAlloc);
3133 if (RT_FAILURE(rc))
3134 goto out;
3135
3136 /*
3137 * We have to check for the asynchronous open flag. The
3138 * extents are parsed and the type of all are known now.
3139 * Check if every extent is either FLAT or ZERO.
3140 */
3141 if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
3142 {
3143 unsigned cFlatExtents = 0;
3144
3145 for (unsigned i = 0; i < pImage->cExtents; i++)
3146 {
3147 pExtent = &pImage->pExtents[i];
3148
3149 if (( pExtent->enmType != VMDKETYPE_FLAT
3150 && pExtent->enmType != VMDKETYPE_ZERO
3151 && pExtent->enmType != VMDKETYPE_VMFS)
3152 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
3153 {
3154 /*
3155 * Opened image contains at least one none flat or zero extent.
3156 * Return error but don't set error message as the caller
3157 * has the chance to open in non async I/O mode.
3158 */
3159 rc = VERR_NOT_SUPPORTED;
3160 goto out;
3161 }
3162 if (pExtent->enmType == VMDKETYPE_FLAT)
3163 cFlatExtents++;
3164 }
3165 }
3166
3167 for (unsigned i = 0; i < pImage->cExtents; i++)
3168 {
3169 pExtent = &pImage->pExtents[i];
3170
3171 if (pExtent->pszBasename)
3172 {
3173 /* Hack to figure out whether the specified name in the
3174 * extent descriptor is absolute. Doesn't always work, but
3175 * should be good enough for now. */
3176 char *pszFullname;
3177 /** @todo implement proper path absolute check. */
3178 if (pExtent->pszBasename[0] == RTPATH_SLASH)
3179 {
3180 pszFullname = RTStrDup(pExtent->pszBasename);
3181 if (!pszFullname)
3182 {
3183 rc = VERR_NO_MEMORY;
3184 goto out;
3185 }
3186 }
3187 else
3188 {
3189 size_t cbDirname;
3190 char *pszDirname = RTStrDup(pImage->pszFilename);
3191 if (!pszDirname)
3192 {
3193 rc = VERR_NO_MEMORY;
3194 goto out;
3195 }
3196 RTPathStripFilename(pszDirname);
3197 cbDirname = strlen(pszDirname);
3198 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3199 RTPATH_SLASH, pExtent->pszBasename);
3200 RTStrFree(pszDirname);
3201 if (RT_FAILURE(rc))
3202 goto out;
3203 }
3204 pExtent->pszFullname = pszFullname;
3205 }
3206 else
3207 pExtent->pszFullname = NULL;
3208
3209 switch (pExtent->enmType)
3210 {
3211 case VMDKETYPE_HOSTED_SPARSE:
3212 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3213 uOpenFlags & VD_OPEN_FLAGS_READONLY
3214 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3215 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3216 if (RT_FAILURE(rc))
3217 {
3218 /* Do NOT signal an appropriate error here, as the VD
3219 * layer has the choice of retrying the open if it
3220 * failed. */
3221 goto out;
3222 }
3223 rc = vmdkReadBinaryMetaExtent(pImage, pExtent);
3224 if (RT_FAILURE(rc))
3225 goto out;
3226 rc = vmdkReadMetaExtent(pImage, pExtent);
3227 if (RT_FAILURE(rc))
3228 goto out;
3229
3230 /* Mark extent as unclean if opened in read-write mode. */
3231 if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
3232 {
3233 pExtent->fUncleanShutdown = true;
3234 pExtent->fMetaDirty = true;
3235 }
3236 break;
3237 case VMDKETYPE_VMFS:
3238 case VMDKETYPE_FLAT:
3239 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3240 uOpenFlags & VD_OPEN_FLAGS_READONLY
3241 ? RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_NONE
3242 : RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, true);
3243 if (RT_FAILURE(rc))
3244 {
3245 /* Do NOT signal an appropriate error here, as the VD
3246 * layer has the choice of retrying the open if it
3247 * failed. */
3248 goto out;
3249 }
3250 break;
3251 case VMDKETYPE_ZERO:
3252 /* Nothing to do. */
3253 break;
3254 default:
3255 AssertMsgFailed(("unknown vmdk extent type %d\n", pExtent->enmType));
3256 }
3257 }
3258 }
3259
3260 /* Make sure this is not reached accidentally with an error status. */
3261 AssertRC(rc);
3262
3263 /* Determine PCHS geometry if not set. */
3264 if (pImage->PCHSGeometry.cCylinders == 0)
3265 {
3266 uint64_t cCylinders = VMDK_BYTE2SECTOR(pImage->cbSize)
3267 / pImage->PCHSGeometry.cHeads
3268 / pImage->PCHSGeometry.cSectors;
3269 pImage->PCHSGeometry.cCylinders = (unsigned)RT_MIN(cCylinders, 16383);
3270 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
3271 {
3272 rc = vmdkDescSetPCHSGeometry(pImage, &pImage->PCHSGeometry);
3273 AssertRC(rc);
3274 }
3275 }
3276
3277 /* Update the image metadata now in case has changed. */
3278 rc = vmdkFlushImage(pImage);
3279 if (RT_FAILURE(rc))
3280 goto out;
3281
3282 /* Figure out a few per-image constants from the extents. */
3283 pImage->cbSize = 0;
3284 for (unsigned i = 0; i < pImage->cExtents; i++)
3285 {
3286 pExtent = &pImage->pExtents[i];
3287 if ( pExtent->enmType == VMDKETYPE_HOSTED_SPARSE
3288#ifdef VBOX_WITH_VMDK_ESX
3289 || pExtent->enmType == VMDKETYPE_ESX_SPARSE
3290#endif /* VBOX_WITH_VMDK_ESX */
3291 )
3292 {
3293 /* Here used to be a check whether the nominal size of an extent
3294 * is a multiple of the grain size. The spec says that this is
3295 * always the case, but unfortunately some files out there in the
3296 * wild violate the spec (e.g. ReactOS 0.3.1). */
3297 }
3298 pImage->cbSize += VMDK_SECTOR2BYTE(pExtent->cNominalSectors);
3299 }
3300
3301 for (unsigned i = 0; i < pImage->cExtents; i++)
3302 {
3303 pExtent = &pImage->pExtents[i];
3304 if ( pImage->pExtents[i].enmType == VMDKETYPE_FLAT
3305 || pImage->pExtents[i].enmType == VMDKETYPE_ZERO)
3306 {
3307 pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED;
3308 break;
3309 }
3310 }
3311
3312 rc = vmdkAllocateGrainTableCache(pImage);
3313 if (RT_FAILURE(rc))
3314 goto out;
3315
3316out:
3317 if (RT_FAILURE(rc))
3318 vmdkFreeImage(pImage, false);
3319 return rc;
3320}
3321
3322/**
3323 * Internal: create VMDK images for raw disk/partition access.
3324 */
3325static int vmdkCreateRawImage(PVMDKIMAGE pImage, const PVBOXHDDRAW pRaw,
3326 uint64_t cbSize)
3327{
3328 int rc = VINF_SUCCESS;
3329 PVMDKEXTENT pExtent;
3330
3331 if (pRaw->fRawDisk)
3332 {
3333 /* Full raw disk access. This requires setting up a descriptor
3334 * file and open the (flat) raw disk. */
3335 rc = vmdkCreateExtents(pImage, 1);
3336 if (RT_FAILURE(rc))
3337 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3338 pExtent = &pImage->pExtents[0];
3339 /* Create raw disk descriptor file. */
3340 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3341 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3342 false);
3343 if (RT_FAILURE(rc))
3344 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3345
3346 /* Set up basename for extent description. Cannot use StrDup. */
3347 size_t cbBasename = strlen(pRaw->pszRawDisk) + 1;
3348 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3349 if (!pszBasename)
3350 return VERR_NO_MEMORY;
3351 memcpy(pszBasename, pRaw->pszRawDisk, cbBasename);
3352 pExtent->pszBasename = pszBasename;
3353 /* For raw disks the full name is identical to the base name. */
3354 pExtent->pszFullname = RTStrDup(pszBasename);
3355 if (!pExtent->pszFullname)
3356 return VERR_NO_MEMORY;
3357 pExtent->enmType = VMDKETYPE_FLAT;
3358 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize);
3359 pExtent->uSectorOffset = 0;
3360 pExtent->enmAccess = VMDKACCESS_READWRITE;
3361 pExtent->fMetaDirty = false;
3362
3363 /* Open flat image, the raw disk. */
3364 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3365 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
3366 if (RT_FAILURE(rc))
3367 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
3368 }
3369 else
3370 {
3371 /* Raw partition access. This requires setting up a descriptor
3372 * file, write the partition information to a flat extent and
3373 * open all the (flat) raw disk partitions. */
3374
3375 /* First pass over the partitions to determine how many
3376 * extents we need. One partition can require up to 4 extents.
3377 * One to skip over unpartitioned space, one for the
3378 * partitioning data, one to skip over unpartitioned space
3379 * and one for the partition data. */
3380 unsigned cExtents = 0;
3381 uint64_t uStart = 0;
3382 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3383 {
3384 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3385 if (pPart->cbPartitionData)
3386 {
3387 if (uStart > pPart->uPartitionDataStart)
3388 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partitioning information in '%s'"), pImage->pszFilename);
3389 else if (uStart != pPart->uPartitionDataStart)
3390 cExtents++;
3391 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3392 cExtents++;
3393 }
3394 if (pPart->cbPartition)
3395 {
3396 if (uStart > pPart->uPartitionStart)
3397 return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partition data in '%s'"), pImage->pszFilename);
3398 else if (uStart != pPart->uPartitionStart)
3399 cExtents++;
3400 uStart = pPart->uPartitionStart + pPart->cbPartition;
3401 cExtents++;
3402 }
3403 }
3404 /* Another extent for filling up the rest of the image. */
3405 if (uStart != cbSize)
3406 cExtents++;
3407
3408 rc = vmdkCreateExtents(pImage, cExtents);
3409 if (RT_FAILURE(rc))
3410 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3411
3412 /* Create raw partition descriptor file. */
3413 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3414 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3415 false);
3416 if (RT_FAILURE(rc))
3417 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
3418
3419 /* Create base filename for the partition table extent. */
3420 /** @todo remove fixed buffer without creating memory leaks. */
3421 char pszPartition[1024];
3422 const char *pszBase = RTPathFilename(pImage->pszFilename);
3423 const char *pszExt = RTPathExt(pszBase);
3424 if (pszExt == NULL)
3425 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
3426 char *pszBaseBase = RTStrDup(pszBase);
3427 if (!pszBaseBase)
3428 return VERR_NO_MEMORY;
3429 RTPathStripExt(pszBaseBase);
3430 RTStrPrintf(pszPartition, sizeof(pszPartition), "%s-pt%s",
3431 pszBaseBase, pszExt);
3432 RTStrFree(pszBaseBase);
3433
3434 /* Second pass over the partitions, now define all extents. */
3435 uint64_t uPartOffset = 0;
3436 cExtents = 0;
3437 uStart = 0;
3438 for (unsigned i = 0; i < pRaw->cPartitions; i++)
3439 {
3440 PVBOXHDDRAWPART pPart = &pRaw->pPartitions[i];
3441 if (pPart->cbPartitionData)
3442 {
3443 if (uStart != pPart->uPartitionDataStart)
3444 {
3445 pExtent = &pImage->pExtents[cExtents++];
3446 pExtent->pszBasename = NULL;
3447 pExtent->pszFullname = NULL;
3448 pExtent->enmType = VMDKETYPE_ZERO;
3449 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionDataStart - uStart);
3450 pExtent->uSectorOffset = 0;
3451 pExtent->enmAccess = VMDKACCESS_READWRITE;
3452 pExtent->fMetaDirty = false;
3453 }
3454 uStart = pPart->uPartitionDataStart + pPart->cbPartitionData;
3455 pExtent = &pImage->pExtents[cExtents++];
3456 /* Set up basename for extent description. Can't use StrDup. */
3457 size_t cbBasename = strlen(pszPartition) + 1;
3458 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3459 if (!pszBasename)
3460 return VERR_NO_MEMORY;
3461 memcpy(pszBasename, pszPartition, cbBasename);
3462 pExtent->pszBasename = pszBasename;
3463
3464 /* Set up full name for partition extent. */
3465 size_t cbDirname;
3466 char *pszDirname = RTStrDup(pImage->pszFilename);
3467 if (!pszDirname)
3468 return VERR_NO_MEMORY;
3469 RTPathStripFilename(pszDirname);
3470 cbDirname = strlen(pszDirname);
3471 char *pszFullname;
3472 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszDirname,
3473 RTPATH_SLASH, pExtent->pszBasename);
3474 RTStrFree(pszDirname);
3475 if (RT_FAILURE(rc))
3476 return rc;
3477 pExtent->pszFullname = pszFullname;
3478 pExtent->enmType = VMDKETYPE_FLAT;
3479 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3480 pExtent->uSectorOffset = uPartOffset;
3481 pExtent->enmAccess = VMDKACCESS_READWRITE;
3482 pExtent->fMetaDirty = false;
3483
3484 /* Create partition table flat image. */
3485 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3486 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3487 false);
3488 if (RT_FAILURE(rc))
3489 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
3490 rc = vmdkFileWriteAt(pExtent->pFile,
3491 VMDK_SECTOR2BYTE(uPartOffset),
3492 pPart->pvPartitionData,
3493 pPart->cbPartitionData, NULL);
3494 if (RT_FAILURE(rc))
3495 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
3496 uPartOffset += VMDK_BYTE2SECTOR(pPart->cbPartitionData);
3497 }
3498 if (pPart->cbPartition)
3499 {
3500 if (uStart != pPart->uPartitionStart)
3501 {
3502 pExtent = &pImage->pExtents[cExtents++];
3503 pExtent->pszBasename = NULL;
3504 pExtent->pszFullname = NULL;
3505 pExtent->enmType = VMDKETYPE_ZERO;
3506 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->uPartitionStart - uStart);
3507 pExtent->uSectorOffset = 0;
3508 pExtent->enmAccess = VMDKACCESS_READWRITE;
3509 pExtent->fMetaDirty = false;
3510 }
3511 uStart = pPart->uPartitionStart + pPart->cbPartition;
3512 pExtent = &pImage->pExtents[cExtents++];
3513 if (pPart->pszRawDevice)
3514 {
3515 /* Set up basename for extent descr. Can't use StrDup. */
3516 size_t cbBasename = strlen(pPart->pszRawDevice) + 1;
3517 char *pszBasename = (char *)RTMemTmpAlloc(cbBasename);
3518 if (!pszBasename)
3519 return VERR_NO_MEMORY;
3520 memcpy(pszBasename, pPart->pszRawDevice, cbBasename);
3521 pExtent->pszBasename = pszBasename;
3522 /* For raw disks full name is identical to base name. */
3523 pExtent->pszFullname = RTStrDup(pszBasename);
3524 if (!pExtent->pszFullname)
3525 return VERR_NO_MEMORY;
3526 pExtent->enmType = VMDKETYPE_FLAT;
3527 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3528 pExtent->uSectorOffset = VMDK_BYTE2SECTOR(pPart->uPartitionStartOffset);
3529 pExtent->enmAccess = VMDKACCESS_READWRITE;
3530 pExtent->fMetaDirty = false;
3531
3532 /* Open flat image, the raw partition. */
3533 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3534 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE,
3535 false);
3536 if (RT_FAILURE(rc))
3537 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
3538 }
3539 else
3540 {
3541 pExtent->pszBasename = NULL;
3542 pExtent->pszFullname = NULL;
3543 pExtent->enmType = VMDKETYPE_ZERO;
3544 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(pPart->cbPartition);
3545 pExtent->uSectorOffset = 0;
3546 pExtent->enmAccess = VMDKACCESS_READWRITE;
3547 pExtent->fMetaDirty = false;
3548 }
3549 }
3550 }
3551 /* Another extent for filling up the rest of the image. */
3552 if (uStart != cbSize)
3553 {
3554 pExtent = &pImage->pExtents[cExtents++];
3555 pExtent->pszBasename = NULL;
3556 pExtent->pszFullname = NULL;
3557 pExtent->enmType = VMDKETYPE_ZERO;
3558 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbSize - uStart);
3559 pExtent->uSectorOffset = 0;
3560 pExtent->enmAccess = VMDKACCESS_READWRITE;
3561 pExtent->fMetaDirty = false;
3562 }
3563 }
3564
3565 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3566 pRaw->fRawDisk ?
3567 "fullDevice" : "partitionedDevice");
3568 if (RT_FAILURE(rc))
3569 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3570 return rc;
3571}
3572
3573/**
3574 * Internal: create a regular (i.e. file-backed) VMDK image.
3575 */
3576static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
3577 unsigned uImageFlags,
3578 PFNVDPROGRESS pfnProgress, void *pvUser,
3579 unsigned uPercentStart, unsigned uPercentSpan)
3580{
3581 int rc = VINF_SUCCESS;
3582 unsigned cExtents = 1;
3583 uint64_t cbOffset = 0;
3584 uint64_t cbRemaining = cbSize;
3585
3586 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3587 {
3588 cExtents = cbSize / VMDK_2G_SPLIT_SIZE;
3589 /* Do proper extent computation: need one smaller extent if the total
3590 * size isn't evenly divisible by the split size. */
3591 if (cbSize % VMDK_2G_SPLIT_SIZE)
3592 cExtents++;
3593 }
3594 rc = vmdkCreateExtents(pImage, cExtents);
3595 if (RT_FAILURE(rc))
3596 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
3597
3598 /* Basename strings needed for constructing the extent names. */
3599 char *pszBasenameSubstr = RTPathFilename(pImage->pszFilename);
3600 AssertPtr(pszBasenameSubstr);
3601 size_t cbBasenameSubstr = strlen(pszBasenameSubstr) + 1;
3602
3603 /* Create searate descriptor file if necessary. */
3604 if (cExtents != 1 || (uImageFlags & VD_IMAGE_FLAGS_FIXED))
3605 {
3606 rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
3607 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3608 false);
3609 if (RT_FAILURE(rc))
3610 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
3611 }
3612 else
3613 pImage->pFile = NULL;
3614
3615 /* Set up all extents. */
3616 for (unsigned i = 0; i < cExtents; i++)
3617 {
3618 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3619 uint64_t cbExtent = cbRemaining;
3620
3621 /* Set up fullname/basename for extent description. Cannot use StrDup
3622 * for basename, as it is not guaranteed that the memory can be freed
3623 * with RTMemTmpFree, which must be used as in other code paths
3624 * StrDup is not usable. */
3625 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3626 {
3627 char *pszBasename = (char *)RTMemTmpAlloc(cbBasenameSubstr);
3628 if (!pszBasename)
3629 return VERR_NO_MEMORY;
3630 memcpy(pszBasename, pszBasenameSubstr, cbBasenameSubstr);
3631 pExtent->pszBasename = pszBasename;
3632 }
3633 else
3634 {
3635 char *pszBasenameExt = RTPathExt(pszBasenameSubstr);
3636 char *pszBasenameBase = RTStrDup(pszBasenameSubstr);
3637 RTPathStripExt(pszBasenameBase);
3638 char *pszTmp;
3639 size_t cbTmp;
3640 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3641 {
3642 if (cExtents == 1)
3643 rc = RTStrAPrintf(&pszTmp, "%s-flat%s", pszBasenameBase,
3644 pszBasenameExt);
3645 else
3646 rc = RTStrAPrintf(&pszTmp, "%s-f%03d%s", pszBasenameBase,
3647 i+1, pszBasenameExt);
3648 }
3649 else
3650 rc = RTStrAPrintf(&pszTmp, "%s-s%03d%s", pszBasenameBase, i+1,
3651 pszBasenameExt);
3652 RTStrFree(pszBasenameBase);
3653 if (RT_FAILURE(rc))
3654 return rc;
3655 cbTmp = strlen(pszTmp) + 1;
3656 char *pszBasename = (char *)RTMemTmpAlloc(cbTmp);
3657 if (!pszBasename)
3658 return VERR_NO_MEMORY;
3659 memcpy(pszBasename, pszTmp, cbTmp);
3660 RTStrFree(pszTmp);
3661 pExtent->pszBasename = pszBasename;
3662 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
3663 cbExtent = RT_MIN(cbRemaining, VMDK_2G_SPLIT_SIZE);
3664 }
3665 char *pszBasedirectory = RTStrDup(pImage->pszFilename);
3666 RTPathStripFilename(pszBasedirectory);
3667 char *pszFullname;
3668 rc = RTStrAPrintf(&pszFullname, "%s%c%s", pszBasedirectory,
3669 RTPATH_SLASH, pExtent->pszBasename);
3670 RTStrFree(pszBasedirectory);
3671 if (RT_FAILURE(rc))
3672 return rc;
3673 pExtent->pszFullname = pszFullname;
3674
3675 /* Create file for extent. */
3676 rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
3677 RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_WRITE | RTFILE_O_NOT_CONTENT_INDEXED,
3678 false);
3679 if (RT_FAILURE(rc))
3680 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
3681 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3682 {
3683 rc = vmdkFileSetSize(pExtent->pFile, cbExtent);
3684 if (RT_FAILURE(rc))
3685 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
3686
3687 /* Fill image with zeroes. We do this for every fixed-size image since on some systems
3688 * (for example Windows Vista), it takes ages to write a block near the end of a sparse
3689 * file and the guest could complain about an ATA timeout. */
3690
3691 /** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
3692 * Currently supported file systems are ext4 and ocfs2. */
3693
3694 /* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
3695 const size_t cbBuf = 128 * _1K;
3696 void *pvBuf = RTMemTmpAllocZ(cbBuf);
3697 if (!pvBuf)
3698 return VERR_NO_MEMORY;
3699
3700 uint64_t uOff = 0;
3701 /* Write data to all image blocks. */
3702 while (uOff < cbExtent)
3703 {
3704 unsigned cbChunk = (unsigned)RT_MIN(cbExtent, cbBuf);
3705
3706 rc = vmdkFileWriteAt(pExtent->pFile, uOff, pvBuf, cbChunk, NULL);
3707 if (RT_FAILURE(rc))
3708 {
3709 RTMemFree(pvBuf);
3710 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
3711 }
3712
3713 uOff += cbChunk;
3714
3715 if (pfnProgress)
3716 {
3717 rc = pfnProgress(pvUser,
3718 uPercentStart + uOff * uPercentSpan / cbExtent);
3719 if (RT_FAILURE(rc))
3720 {
3721 RTMemFree(pvBuf);
3722 return rc;
3723 }
3724 }
3725 }
3726 RTMemTmpFree(pvBuf);
3727 }
3728
3729 /* Place descriptor file information (where integrated). */
3730 if (cExtents == 1 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3731 {
3732 pExtent->uDescriptorSector = 1;
3733 pExtent->cDescriptorSectors = VMDK_BYTE2SECTOR(pImage->cbDescAlloc);
3734 /* The descriptor is part of the (only) extent. */
3735 pExtent->pDescData = pImage->pDescData;
3736 pImage->pDescData = NULL;
3737 }
3738
3739 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3740 {
3741 uint64_t cSectorsPerGDE, cSectorsPerGD;
3742 pExtent->enmType = VMDKETYPE_HOSTED_SPARSE;
3743 pExtent->cSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(cbExtent, 65536));
3744 pExtent->cSectorsPerGrain = VMDK_BYTE2SECTOR(65536);
3745 pExtent->cGTEntries = 512;
3746 cSectorsPerGDE = pExtent->cGTEntries * pExtent->cSectorsPerGrain;
3747 pExtent->cSectorsPerGDE = cSectorsPerGDE;
3748 pExtent->cGDEntries = (pExtent->cSectors + cSectorsPerGDE - 1) / cSectorsPerGDE;
3749 cSectorsPerGD = (pExtent->cGDEntries + (512 / sizeof(uint32_t) - 1)) / (512 / sizeof(uint32_t));
3750 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3751 {
3752 /* The spec says version is 1 for all VMDKs, but the vast
3753 * majority of streamOptimized VMDKs actually contain
3754 * version 3 - so go with the majority. Both are acepted. */
3755 pExtent->uVersion = 3;
3756 pExtent->uCompression = VMDK_COMPRESSION_DEFLATE;
3757 }
3758 }
3759 else
3760 {
3761 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3762 pExtent->enmType = VMDKETYPE_VMFS;
3763 else
3764 pExtent->enmType = VMDKETYPE_FLAT;
3765 }
3766
3767 pExtent->enmAccess = VMDKACCESS_READWRITE;
3768 pExtent->fUncleanShutdown = true;
3769 pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
3770 pExtent->uSectorOffset = 0;
3771 pExtent->fMetaDirty = true;
3772
3773 if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
3774 {
3775 rc = vmdkCreateGrainDirectory(pExtent,
3776 RT_MAX( pExtent->uDescriptorSector
3777 + pExtent->cDescriptorSectors,
3778 1),
3779 true);
3780 if (RT_FAILURE(rc))
3781 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
3782 }
3783
3784 if (RT_SUCCESS(rc) && pfnProgress)
3785 pfnProgress(pvUser, uPercentStart + i * uPercentSpan / cExtents);
3786
3787 cbRemaining -= cbExtent;
3788 cbOffset += cbExtent;
3789 }
3790
3791 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3792 {
3793 /* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
3794 * controller type is set in an image. */
3795 rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor, "ddb.adapterType", "lsilogic");
3796 if (RT_FAILURE(rc))
3797 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
3798 }
3799
3800 const char *pszDescType = NULL;
3801 if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3802 {
3803 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX)
3804 pszDescType = "vmfs";
3805 else
3806 pszDescType = (cExtents == 1)
3807 ? "monolithicFlat" : "twoGbMaxExtentFlat";
3808 }
3809 else
3810 {
3811 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
3812 pszDescType = "streamOptimized";
3813 else
3814 {
3815 pszDescType = (cExtents == 1)
3816 ? "monolithicSparse" : "twoGbMaxExtentSparse";
3817 }
3818 }
3819 rc = vmdkDescBaseSetStr(pImage, &pImage->Descriptor, "createType",
3820 pszDescType);
3821 if (RT_FAILURE(rc))
3822 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
3823 return rc;
3824}
3825
3826/**
3827 * Internal: The actual code for creating any VMDK variant currently in
3828 * existence on hosted environments.
3829 */
3830static int vmdkCreateImage(PVMDKIMAGE pImage, uint64_t cbSize,
3831 unsigned uImageFlags, const char *pszComment,
3832 PCPDMMEDIAGEOMETRY pPCHSGeometry,
3833 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
3834 PFNVDPROGRESS pfnProgress, void *pvUser,
3835 unsigned uPercentStart, unsigned uPercentSpan)
3836{
3837 int rc;
3838
3839 pImage->uImageFlags = uImageFlags;
3840
3841 /* Try to get error interface. */
3842 pImage->pInterfaceError = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ERROR);
3843 if (pImage->pInterfaceError)
3844 pImage->pInterfaceErrorCallbacks = VDGetInterfaceError(pImage->pInterfaceError);
3845
3846 /* Try to get async I/O interface. */
3847 pImage->pInterfaceAsyncIO = VDInterfaceGet(pImage->pVDIfsDisk, VDINTERFACETYPE_ASYNCIO);
3848 if (pImage->pInterfaceAsyncIO)
3849 pImage->pInterfaceAsyncIOCallbacks = VDGetInterfaceAsyncIO(pImage->pInterfaceAsyncIO);
3850
3851 rc = vmdkCreateDescriptor(pImage, pImage->pDescData, pImage->cbDescAlloc,
3852 &pImage->Descriptor);
3853 if (RT_FAILURE(rc))
3854 {
3855 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
3856 goto out;
3857 }
3858
3859 if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
3860 && (uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK))
3861 {
3862 /* Raw disk image (includes raw partition). */
3863 const PVBOXHDDRAW pRaw = (const PVBOXHDDRAW)pszComment;
3864 /* As the comment is misused, zap it so that no garbage comment
3865 * is set below. */
3866 pszComment = NULL;
3867 rc = vmdkCreateRawImage(pImage, pRaw, cbSize);
3868 }
3869 else
3870 {
3871 /* Regular fixed or sparse image (monolithic or split). */
3872 rc = vmdkCreateRegularImage(pImage, cbSize, uImageFlags,
3873 pfnProgress, pvUser, uPercentStart,
3874 uPercentSpan * 95 / 100);
3875 }
3876
3877 if (RT_FAILURE(rc))
3878 goto out;
3879
3880 if (RT_SUCCESS(rc) && pfnProgress)
3881 pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100);
3882
3883 pImage->cbSize = cbSize;
3884
3885 for (unsigned i = 0; i < pImage->cExtents; i++)
3886 {
3887 PVMDKEXTENT pExtent = &pImage->pExtents[i];
3888
3889 rc = vmdkDescExtInsert(pImage, &pImage->Descriptor, pExtent->enmAccess,
3890 pExtent->cNominalSectors, pExtent->enmType,
3891 pExtent->pszBasename, pExtent->uSectorOffset);
3892 if (RT_FAILURE(rc))
3893 {
3894 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
3895 goto out;
3896 }
3897 }
3898 vmdkDescExtRemoveDummy(pImage, &pImage->Descriptor);
3899
3900 if ( pPCHSGeometry->cCylinders != 0
3901 && pPCHSGeometry->cHeads != 0
3902 && pPCHSGeometry->cSectors != 0)
3903 {
3904 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
3905 if (RT_FAILURE(rc))
3906 goto out;
3907 }
3908 if ( pLCHSGeometry->cCylinders != 0
3909 && pLCHSGeometry->cHeads != 0
3910 && pLCHSGeometry->cSectors != 0)
3911 {
3912 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
3913 if (RT_FAILURE(rc))
3914 goto out;
3915 }
3916
3917 pImage->LCHSGeometry = *pLCHSGeometry;
3918 pImage->PCHSGeometry = *pPCHSGeometry;
3919
3920 pImage->ImageUuid = *pUuid;
3921 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3922 VMDK_DDB_IMAGE_UUID, &pImage->ImageUuid);
3923 if (RT_FAILURE(rc))
3924 {
3925 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
3926 goto out;
3927 }
3928 RTUuidClear(&pImage->ParentUuid);
3929 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3930 VMDK_DDB_PARENT_UUID, &pImage->ParentUuid);
3931 if (RT_FAILURE(rc))
3932 {
3933 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
3934 goto out;
3935 }
3936 RTUuidClear(&pImage->ModificationUuid);
3937 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3938 VMDK_DDB_MODIFICATION_UUID,
3939 &pImage->ModificationUuid);
3940 if (RT_FAILURE(rc))
3941 {
3942 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3943 goto out;
3944 }
3945 RTUuidClear(&pImage->ParentModificationUuid);
3946 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
3947 VMDK_DDB_PARENT_MODIFICATION_UUID,
3948 &pImage->ParentModificationUuid);
3949 if (RT_FAILURE(rc))
3950 {
3951 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
3952 goto out;
3953 }
3954
3955 rc = vmdkAllocateGrainTableCache(pImage);
3956 if (RT_FAILURE(rc))
3957 goto out;
3958
3959 rc = vmdkSetImageComment(pImage, pszComment);
3960 if (RT_FAILURE(rc))
3961 {
3962 rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
3963 goto out;
3964 }
3965
3966 if (RT_SUCCESS(rc) && pfnProgress)
3967 pfnProgress(pvUser, uPercentStart + uPercentSpan * 99 / 100);
3968
3969 rc = vmdkFlushImage(pImage);
3970
3971out:
3972 if (RT_SUCCESS(rc) && pfnProgress)
3973 pfnProgress(pvUser, uPercentStart + uPercentSpan);
3974
3975 if (RT_FAILURE(rc))
3976 vmdkFreeImage(pImage, rc != VERR_ALREADY_EXISTS);
3977 return rc;
3978}
3979
3980/**
3981 * Internal: Update image comment.
3982 */
3983static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment)
3984{
3985 char *pszCommentEncoded;
3986 if (pszComment)
3987 {
3988 pszCommentEncoded = vmdkEncodeString(pszComment);
3989 if (!pszCommentEncoded)
3990 return VERR_NO_MEMORY;
3991 }
3992 else
3993 pszCommentEncoded = NULL;
3994 int rc = vmdkDescDDBSetStr(pImage, &pImage->Descriptor,
3995 "ddb.comment", pszCommentEncoded);
3996 if (pszComment)
3997 RTStrFree(pszCommentEncoded);
3998 if (RT_FAILURE(rc))
3999 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
4000 return VINF_SUCCESS;
4001}
4002
4003/**
4004 * Internal. Free all allocated space for representing an image, and optionally
4005 * delete the image from disk.
4006 */
4007static void vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete)
4008{
4009 AssertPtr(pImage);
4010
4011 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
4012 {
4013 /* Mark all extents as clean. */
4014 for (unsigned i = 0; i < pImage->cExtents; i++)
4015 {
4016 if (( pImage->pExtents[i].enmType == VMDKETYPE_HOSTED_SPARSE
4017#ifdef VBOX_WITH_VMDK_ESX
4018 || pImage->pExtents[i].enmType == VMDKETYPE_ESX_SPARSE
4019#endif /* VBOX_WITH_VMDK_ESX */
4020 )
4021 && pImage->pExtents[i].fUncleanShutdown)
4022 {
4023 pImage->pExtents[i].fUncleanShutdown = false;
4024 pImage->pExtents[i].fMetaDirty = true;
4025 }
4026 }
4027 }
4028 (void)vmdkFlushImage(pImage);
4029
4030 if (pImage->pExtents != NULL)
4031 {
4032 for (unsigned i = 0 ; i < pImage->cExtents; i++)
4033 vmdkFreeExtentData(pImage, &pImage->pExtents[i], fDelete);
4034 RTMemFree(pImage->pExtents);
4035 pImage->pExtents = NULL;
4036 }
4037 pImage->cExtents = 0;
4038 if (pImage->pFile != NULL)
4039 vmdkFileClose(pImage, &pImage->pFile, fDelete);
4040 vmdkFileCheckAllClose(pImage);
4041 if (pImage->pGTCache)
4042 {
4043 RTMemFree(pImage->pGTCache);
4044 pImage->pGTCache = NULL;
4045 }
4046 if (pImage->pDescData)
4047 {
4048 RTMemFree(pImage->pDescData);
4049 pImage->pDescData = NULL;
4050 }
4051}
4052
4053/**
4054 * Internal. Flush image data (and metadata) to disk.
4055 */
4056static int vmdkFlushImage(PVMDKIMAGE pImage)
4057{
4058 PVMDKEXTENT pExtent;
4059 int rc = VINF_SUCCESS;
4060
4061 /* Update descriptor if changed. */
4062 if (pImage->Descriptor.fDirty)
4063 {
4064 rc = vmdkWriteDescriptor(pImage);
4065 if (RT_FAILURE(rc))
4066 goto out;
4067 }
4068
4069 for (unsigned i = 0; i < pImage->cExtents; i++)
4070 {
4071 pExtent = &pImage->pExtents[i];
4072 if (pExtent->pFile != NULL && pExtent->fMetaDirty)
4073 {
4074 switch (pExtent->enmType)
4075 {
4076 case VMDKETYPE_HOSTED_SPARSE:
4077 rc = vmdkWriteMetaSparseExtent(pExtent, 0);
4078 if (RT_FAILURE(rc))
4079 goto out;
4080 if (pExtent->fFooter)
4081 {
4082 uint64_t cbSize;
4083 rc = vmdkFileGetSize(pExtent->pFile, &cbSize);
4084 if (RT_FAILURE(rc))
4085 goto out;
4086 cbSize = RT_ALIGN_64(cbSize, 512);
4087 rc = vmdkWriteMetaSparseExtent(pExtent, cbSize - 2*512);
4088 if (RT_FAILURE(rc))
4089 goto out;
4090 }
4091 break;
4092#ifdef VBOX_WITH_VMDK_ESX
4093 case VMDKETYPE_ESX_SPARSE:
4094 /** @todo update the header. */
4095 break;
4096#endif /* VBOX_WITH_VMDK_ESX */
4097 case VMDKETYPE_VMFS:
4098 case VMDKETYPE_FLAT:
4099 /* Nothing to do. */
4100 break;
4101 case VMDKETYPE_ZERO:
4102 default:
4103 AssertMsgFailed(("extent with type %d marked as dirty\n",
4104 pExtent->enmType));
4105 break;
4106 }
4107 }
4108 switch (pExtent->enmType)
4109 {
4110 case VMDKETYPE_HOSTED_SPARSE:
4111#ifdef VBOX_WITH_VMDK_ESX
4112 case VMDKETYPE_ESX_SPARSE:
4113#endif /* VBOX_WITH_VMDK_ESX */
4114 case VMDKETYPE_VMFS:
4115 case VMDKETYPE_FLAT:
4116 /** @todo implement proper path absolute check. */
4117 if ( pExtent->pFile != NULL
4118 && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
4119 && !(pExtent->pszBasename[0] == RTPATH_SLASH))
4120 rc = vmdkFileFlush(pExtent->pFile);
4121 break;
4122 case VMDKETYPE_ZERO:
4123 /* No need to do anything for this extent. */
4124 break;
4125 default:
4126 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
4127 break;
4128 }
4129 }
4130
4131out:
4132 return rc;
4133}
4134
4135/**
4136 * Internal. Find extent corresponding to the sector number in the disk.
4137 */
4138static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
4139 PVMDKEXTENT *ppExtent, uint64_t *puSectorInExtent)
4140{
4141 PVMDKEXTENT pExtent = NULL;
4142 int rc = VINF_SUCCESS;
4143
4144 for (unsigned i = 0; i < pImage->cExtents; i++)
4145 {
4146 if (offSector < pImage->pExtents[i].cNominalSectors)
4147 {
4148 pExtent = &pImage->pExtents[i];
4149 *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
4150 break;
4151 }
4152 offSector -= pImage->pExtents[i].cNominalSectors;
4153 }
4154
4155 if (pExtent)
4156 *ppExtent = pExtent;
4157 else
4158 rc = VERR_IO_SECTOR_NOT_FOUND;
4159
4160 return rc;
4161}
4162
4163/**
4164 * Internal. Hash function for placing the grain table hash entries.
4165 */
4166static uint32_t vmdkGTCacheHash(PVMDKGTCACHE pCache, uint64_t uSector,
4167 unsigned uExtent)
4168{
4169 /** @todo this hash function is quite simple, maybe use a better one which
4170 * scrambles the bits better. */
4171 return (uSector + uExtent) % pCache->cEntries;
4172}
4173
4174/**
4175 * Internal. Get sector number in the extent file from the relative sector
4176 * number in the extent.
4177 */
4178static int vmdkGetSector(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4179 uint64_t uSector, uint64_t *puExtentSector)
4180{
4181 uint64_t uGDIndex, uGTSector, uGTBlock;
4182 uint32_t uGTHash, uGTBlockIndex;
4183 PVMDKGTCACHEENTRY pGTCacheEntry;
4184 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4185 int rc;
4186
4187 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4188 if (uGDIndex >= pExtent->cGDEntries)
4189 return VERR_OUT_OF_RANGE;
4190 uGTSector = pExtent->pGD[uGDIndex];
4191 if (!uGTSector)
4192 {
4193 /* There is no grain table referenced by this grain directory
4194 * entry. So there is absolutely no data in this area. */
4195 *puExtentSector = 0;
4196 return VINF_SUCCESS;
4197 }
4198
4199 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4200 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4201 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4202 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4203 || pGTCacheEntry->uGTBlock != uGTBlock)
4204 {
4205 /* Cache miss, fetch data from disk. */
4206 rc = vmdkFileReadAt(pExtent->pFile,
4207 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4208 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4209 if (RT_FAILURE(rc))
4210 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
4211 pGTCacheEntry->uExtent = pExtent->uExtent;
4212 pGTCacheEntry->uGTBlock = uGTBlock;
4213 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4214 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4215 }
4216 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4217 uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
4218 if (uGrainSector)
4219 *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
4220 else
4221 *puExtentSector = 0;
4222 return VINF_SUCCESS;
4223}
4224
4225/**
4226 * Internal. Allocates a new grain table (if necessary), writes the grain
4227 * and updates the grain table. The cache is also updated by this operation.
4228 * This is separate from vmdkGetSector, because that should be as fast as
4229 * possible. Most code from vmdkGetSector also appears here.
4230 */
4231static int vmdkAllocGrain(PVMDKGTCACHE pCache, PVMDKEXTENT pExtent,
4232 uint64_t uSector, const void *pvBuf,
4233 uint64_t cbWrite)
4234{
4235 uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
4236 uint64_t cbExtentSize;
4237 uint32_t uGTHash, uGTBlockIndex;
4238 PVMDKGTCACHEENTRY pGTCacheEntry;
4239 uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
4240 int rc;
4241
4242 uGDIndex = uSector / pExtent->cSectorsPerGDE;
4243 if (uGDIndex >= pExtent->cGDEntries)
4244 return VERR_OUT_OF_RANGE;
4245 uGTSector = pExtent->pGD[uGDIndex];
4246 if (pExtent->pRGD)
4247 uRGTSector = pExtent->pRGD[uGDIndex];
4248 else
4249 uRGTSector = 0; /**< avoid compiler warning */
4250 if (!uGTSector)
4251 {
4252 /* There is no grain table referenced by this grain directory
4253 * entry. So there is absolutely no data in this area. Allocate
4254 * a new grain table and put the reference to it in the GDs. */
4255 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4256 if (RT_FAILURE(rc))
4257 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4258 Assert(!(cbExtentSize % 512));
4259 cbExtentSize = RT_ALIGN_64(cbExtentSize, 512);
4260 uGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4261 /* For writable streamOptimized extents the final sector is the
4262 * end-of-stream marker. Will be re-added after the grain table.
4263 * If the file has a footer it also will be re-added before EOS. */
4264 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4265 {
4266 uint64_t uEOSOff = 0;
4267 uGTSector--;
4268 if (pExtent->fFooter)
4269 {
4270 uGTSector--;
4271 uEOSOff = 512;
4272 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4273 if (RT_FAILURE(rc))
4274 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
4275 }
4276 pExtent->uLastGrainSector = 0;
4277 uint8_t aEOS[512];
4278 memset(aEOS, '\0', sizeof(aEOS));
4279 rc = vmdkFileWriteAt(pExtent->pFile,
4280 VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4281 aEOS, sizeof(aEOS), NULL);
4282 if (RT_FAILURE(rc))
4283 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
4284 }
4285 /* Normally the grain table is preallocated for hosted sparse extents
4286 * that support more than 32 bit sector numbers. So this shouldn't
4287 * ever happen on a valid extent. */
4288 if (uGTSector > UINT32_MAX)
4289 return VERR_VD_VMDK_INVALID_HEADER;
4290 /* Write grain table by writing the required number of grain table
4291 * cache chunks. Avoids dynamic memory allocation, but is a bit
4292 * slower. But as this is a pretty infrequently occurring case it
4293 * should be acceptable. */
4294 memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
4295 for (unsigned i = 0;
4296 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4297 i++)
4298 {
4299 rc = vmdkFileWriteAt(pExtent->pFile,
4300 VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
4301 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4302 if (RT_FAILURE(rc))
4303 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
4304 }
4305 if (pExtent->pRGD)
4306 {
4307 AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
4308 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4309 if (RT_FAILURE(rc))
4310 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4311 Assert(!(cbExtentSize % 512));
4312 uRGTSector = VMDK_BYTE2SECTOR(cbExtentSize);
4313 /* For writable streamOptimized extents the final sector is the
4314 * end-of-stream marker. Will be re-added after the grain table.
4315 * If the file has a footer it also will be re-added before EOS. */
4316 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4317 {
4318 uint64_t uEOSOff = 0;
4319 uRGTSector--;
4320 if (pExtent->fFooter)
4321 {
4322 uRGTSector--;
4323 uEOSOff = 512;
4324 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
4325 if (RT_FAILURE(rc))
4326 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
4327 }
4328 pExtent->uLastGrainSector = 0;
4329 uint8_t aEOS[512];
4330 memset(aEOS, '\0', sizeof(aEOS));
4331 rc = vmdkFileWriteAt(pExtent->pFile,
4332 VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t) + uEOSOff,
4333 aEOS, sizeof(aEOS), NULL);
4334 if (RT_FAILURE(rc))
4335 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
4336 }
4337 /* Normally the redundant grain table is preallocated for hosted
4338 * sparse extents that support more than 32 bit sector numbers. So
4339 * this shouldn't ever happen on a valid extent. */
4340 if (uRGTSector > UINT32_MAX)
4341 return VERR_VD_VMDK_INVALID_HEADER;
4342 /* Write backup grain table by writing the required number of grain
4343 * table cache chunks. Avoids dynamic memory allocation, but is a
4344 * bit slower. But as this is a pretty infrequently occurring case
4345 * it should be acceptable. */
4346 for (unsigned i = 0;
4347 i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
4348 i++)
4349 {
4350 rc = vmdkFileWriteAt(pExtent->pFile,
4351 VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
4352 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4353 if (RT_FAILURE(rc))
4354 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
4355 }
4356 }
4357
4358 /* Update the grain directory on disk (doing it before writing the
4359 * grain table will result in a garbled extent if the operation is
4360 * aborted for some reason. Otherwise the worst that can happen is
4361 * some unused sectors in the extent. */
4362 uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
4363 rc = vmdkFileWriteAt(pExtent->pFile,
4364 VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
4365 &uGTSectorLE, sizeof(uGTSectorLE), NULL);
4366 if (RT_FAILURE(rc))
4367 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
4368 if (pExtent->pRGD)
4369 {
4370 uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
4371 rc = vmdkFileWriteAt(pExtent->pFile,
4372 VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
4373 &uRGTSectorLE, sizeof(uRGTSectorLE), NULL);
4374 if (RT_FAILURE(rc))
4375 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
4376 }
4377
4378 /* As the final step update the in-memory copy of the GDs. */
4379 pExtent->pGD[uGDIndex] = uGTSector;
4380 if (pExtent->pRGD)
4381 pExtent->pRGD[uGDIndex] = uRGTSector;
4382 }
4383
4384 rc = vmdkFileGetSize(pExtent->pFile, &cbExtentSize);
4385 if (RT_FAILURE(rc))
4386 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
4387 Assert(!(cbExtentSize % 512));
4388
4389 /* Write the data. Always a full grain, or we're in big trouble. */
4390 if (pExtent->pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4391 {
4392 /* For streamOptimized extents this is a little more difficult, as the
4393 * cached data also needs to be updated, to handle updating the last
4394 * written block properly. Also we're trying to avoid unnecessary gaps.
4395 * Additionally the end-of-stream marker needs to be written. */
4396 if (!pExtent->uLastGrainSector)
4397 {
4398 cbExtentSize -= 512;
4399 if (pExtent->fFooter)
4400 cbExtentSize -= 512;
4401 }
4402 else
4403 cbExtentSize = VMDK_SECTOR2BYTE(pExtent->uLastGrainSector) + pExtent->cbLastGrainWritten;
4404 Assert(cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
4405 uint32_t cbGrain = 0;
4406 rc = vmdkFileDeflateAt(pExtent->pFile, cbExtentSize,
4407 pvBuf, cbWrite, VMDK_MARKER_IGNORE, uSector, &cbGrain);
4408 if (RT_FAILURE(rc))
4409 {
4410 pExtent->uGrainSector = 0;
4411 pExtent->uLastGrainSector = 0;
4412 AssertRC(rc);
4413 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
4414 }
4415 cbGrain = RT_ALIGN(cbGrain, 512);
4416 pExtent->uLastGrainSector = VMDK_BYTE2SECTOR(cbExtentSize);
4417 pExtent->uLastGrainWritten = uSector / pExtent->cSectorsPerGrain;
4418 pExtent->cbLastGrainWritten = cbGrain;
4419 memcpy(pExtent->pvGrain, pvBuf, cbWrite);
4420 pExtent->uGrainSector = uSector;
4421
4422 uint64_t uEOSOff = 0;
4423 if (pExtent->fFooter)
4424 {
4425 uEOSOff = 512;
4426 rc = vmdkWriteMetaSparseExtent(pExtent, cbExtentSize + RT_ALIGN(cbGrain, 512));
4427 if (RT_FAILURE(rc))
4428 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
4429 }
4430 uint8_t aEOS[512];
4431 memset(aEOS, '\0', sizeof(aEOS));
4432 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize + RT_ALIGN(cbGrain, 512) + uEOSOff,
4433 aEOS, sizeof(aEOS), NULL);
4434 if (RT_FAILURE(rc))
4435 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
4436 }
4437 else
4438 {
4439 rc = vmdkFileWriteAt(pExtent->pFile, cbExtentSize, pvBuf, cbWrite, NULL);
4440 if (RT_FAILURE(rc))
4441 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
4442 }
4443
4444 /* Update the grain table (and the cache). */
4445 uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
4446 uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
4447 pGTCacheEntry = &pCache->aGTCache[uGTHash];
4448 if ( pGTCacheEntry->uExtent != pExtent->uExtent
4449 || pGTCacheEntry->uGTBlock != uGTBlock)
4450 {
4451 /* Cache miss, fetch data from disk. */
4452 rc = vmdkFileReadAt(pExtent->pFile,
4453 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4454 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4455 if (RT_FAILURE(rc))
4456 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
4457 pGTCacheEntry->uExtent = pExtent->uExtent;
4458 pGTCacheEntry->uGTBlock = uGTBlock;
4459 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4460 pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
4461 }
4462 else
4463 {
4464 /* Cache hit. Convert grain table block back to disk format, otherwise
4465 * the code below will write garbage for all but the updated entry. */
4466 for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
4467 aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
4468 }
4469 uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
4470 aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(cbExtentSize));
4471 pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(cbExtentSize);
4472 /* Update grain table on disk. */
4473 rc = vmdkFileWriteAt(pExtent->pFile,
4474 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4475 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4476 if (RT_FAILURE(rc))
4477 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
4478 if (pExtent->pRGD)
4479 {
4480 /* Update backup grain table on disk. */
4481 rc = vmdkFileWriteAt(pExtent->pFile,
4482 VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
4483 aGTDataTmp, sizeof(aGTDataTmp), NULL);
4484 if (RT_FAILURE(rc))
4485 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
4486 }
4487#ifdef VBOX_WITH_VMDK_ESX
4488 if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
4489 {
4490 pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
4491 pExtent->fMetaDirty = true;
4492 }
4493#endif /* VBOX_WITH_VMDK_ESX */
4494 return rc;
4495}
4496
4497
4498/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
4499static int vmdkCheckIfValid(const char *pszFilename, PVDINTERFACE pVDIfsDisk)
4500{
4501 LogFlowFunc(("pszFilename=\"%s\"\n", pszFilename));
4502 int rc = VINF_SUCCESS;
4503 PVMDKIMAGE pImage;
4504
4505 if ( !pszFilename
4506 || !*pszFilename
4507 || strchr(pszFilename, '"'))
4508 {
4509 rc = VERR_INVALID_PARAMETER;
4510 goto out;
4511 }
4512
4513 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4514 if (!pImage)
4515 {
4516 rc = VERR_NO_MEMORY;
4517 goto out;
4518 }
4519 pImage->pszFilename = pszFilename;
4520 pImage->pFile = NULL;
4521 pImage->pExtents = NULL;
4522 pImage->pFiles = NULL;
4523 pImage->pGTCache = NULL;
4524 pImage->pDescData = NULL;
4525 pImage->pVDIfsDisk = pVDIfsDisk;
4526 /** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
4527 * much as possible in vmdkOpenImage. */
4528 rc = vmdkOpenImage(pImage, VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_READONLY);
4529 vmdkFreeImage(pImage, false);
4530 RTMemFree(pImage);
4531
4532out:
4533 LogFlowFunc(("returns %Rrc\n", rc));
4534 return rc;
4535}
4536
4537/** @copydoc VBOXHDDBACKEND::pfnOpen */
4538static int vmdkOpen(const char *pszFilename, unsigned uOpenFlags,
4539 PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage,
4540 void **ppBackendData)
4541{
4542 LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
4543 int rc;
4544 PVMDKIMAGE pImage;
4545
4546 /* Check open flags. All valid flags are supported. */
4547 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4548 {
4549 rc = VERR_INVALID_PARAMETER;
4550 goto out;
4551 }
4552
4553 /* Check remaining arguments. */
4554 if ( !VALID_PTR(pszFilename)
4555 || !*pszFilename
4556 || strchr(pszFilename, '"'))
4557 {
4558 rc = VERR_INVALID_PARAMETER;
4559 goto out;
4560 }
4561
4562
4563 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4564 if (!pImage)
4565 {
4566 rc = VERR_NO_MEMORY;
4567 goto out;
4568 }
4569 pImage->pszFilename = pszFilename;
4570 pImage->pFile = NULL;
4571 pImage->pExtents = NULL;
4572 pImage->pFiles = NULL;
4573 pImage->pGTCache = NULL;
4574 pImage->pDescData = NULL;
4575 pImage->pVDIfsDisk = pVDIfsDisk;
4576
4577 rc = vmdkOpenImage(pImage, uOpenFlags);
4578 if (RT_SUCCESS(rc))
4579 *ppBackendData = pImage;
4580
4581out:
4582 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4583 return rc;
4584}
4585
4586/** @copydoc VBOXHDDBACKEND::pfnCreate */
4587static int vmdkCreate(const char *pszFilename, uint64_t cbSize,
4588 unsigned uImageFlags, const char *pszComment,
4589 PCPDMMEDIAGEOMETRY pPCHSGeometry,
4590 PCPDMMEDIAGEOMETRY pLCHSGeometry, PCRTUUID pUuid,
4591 unsigned uOpenFlags, unsigned uPercentStart,
4592 unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk,
4593 PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation,
4594 void **ppBackendData)
4595{
4596 LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
4597 int rc;
4598 PVMDKIMAGE pImage;
4599
4600 PFNVDPROGRESS pfnProgress = NULL;
4601 void *pvUser = NULL;
4602 PVDINTERFACE pIfProgress = VDInterfaceGet(pVDIfsOperation,
4603 VDINTERFACETYPE_PROGRESS);
4604 PVDINTERFACEPROGRESS pCbProgress = NULL;
4605 if (pIfProgress)
4606 {
4607 pCbProgress = VDGetInterfaceProgress(pIfProgress);
4608 pfnProgress = pCbProgress->pfnProgress;
4609 pvUser = pIfProgress->pvUser;
4610 }
4611
4612 /* Check open flags. All valid flags are supported. */
4613 if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
4614 {
4615 rc = VERR_INVALID_PARAMETER;
4616 goto out;
4617 }
4618
4619 /* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
4620 if ( !cbSize
4621 || (!(uImageFlags & VD_IMAGE_FLAGS_FIXED) && cbSize >= _1T * 2 - _64K))
4622 {
4623 rc = VERR_VD_INVALID_SIZE;
4624 goto out;
4625 }
4626
4627 /* Check remaining arguments. */
4628 if ( !VALID_PTR(pszFilename)
4629 || !*pszFilename
4630 || strchr(pszFilename, '"')
4631 || !VALID_PTR(pPCHSGeometry)
4632 || !VALID_PTR(pLCHSGeometry)
4633#ifndef VBOX_WITH_VMDK_ESX
4634 || ( uImageFlags & VD_VMDK_IMAGE_FLAGS_ESX
4635 && !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
4636#endif
4637 || ( (uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
4638 && (uImageFlags & ~(VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED | VD_IMAGE_FLAGS_DIFF))))
4639 {
4640 rc = VERR_INVALID_PARAMETER;
4641 goto out;
4642 }
4643
4644 pImage = (PVMDKIMAGE)RTMemAllocZ(sizeof(VMDKIMAGE));
4645 if (!pImage)
4646 {
4647 rc = VERR_NO_MEMORY;
4648 goto out;
4649 }
4650 pImage->pszFilename = pszFilename;
4651 pImage->pFile = NULL;
4652 pImage->pExtents = NULL;
4653 pImage->pFiles = NULL;
4654 pImage->pGTCache = NULL;
4655 pImage->pDescData = NULL;
4656 pImage->pVDIfsDisk = pVDIfsDisk;
4657 /* Descriptors for split images can be pretty large, especially if the
4658 * filename is long. So prepare for the worst, and allocate quite some
4659 * memory for the descriptor in this case. */
4660 if (uImageFlags & VD_VMDK_IMAGE_FLAGS_SPLIT_2G)
4661 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(200);
4662 else
4663 pImage->cbDescAlloc = VMDK_SECTOR2BYTE(20);
4664 pImage->pDescData = (char *)RTMemAllocZ(pImage->cbDescAlloc);
4665 if (!pImage->pDescData)
4666 {
4667 rc = VERR_NO_MEMORY;
4668 goto out;
4669 }
4670
4671 rc = vmdkCreateImage(pImage, cbSize, uImageFlags, pszComment,
4672 pPCHSGeometry, pLCHSGeometry, pUuid,
4673 pfnProgress, pvUser, uPercentStart, uPercentSpan);
4674 if (RT_SUCCESS(rc))
4675 {
4676 /* So far the image is opened in read/write mode. Make sure the
4677 * image is opened in read-only mode if the caller requested that. */
4678 if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
4679 {
4680 vmdkFreeImage(pImage, false);
4681 rc = vmdkOpenImage(pImage, uOpenFlags);
4682 if (RT_FAILURE(rc))
4683 goto out;
4684 }
4685 *ppBackendData = pImage;
4686 }
4687 else
4688 {
4689 RTMemFree(pImage->pDescData);
4690 RTMemFree(pImage);
4691 }
4692
4693out:
4694 LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData));
4695 return rc;
4696}
4697
4698/**
4699 * Replaces a fragment of a string with the specified string.
4700 *
4701 * @returns Pointer to the allocated UTF-8 string.
4702 * @param pszWhere UTF-8 string to search in.
4703 * @param pszWhat UTF-8 string to search for.
4704 * @param pszByWhat UTF-8 string to replace the found string with.
4705 */
4706static char * vmdkStrReplace(const char *pszWhere, const char *pszWhat, const char *pszByWhat)
4707{
4708 AssertPtr(pszWhere);
4709 AssertPtr(pszWhat);
4710 AssertPtr(pszByWhat);
4711 const char *pszFoundStr = strstr(pszWhere, pszWhat);
4712 if (!pszFoundStr)
4713 return NULL;
4714 size_t cFinal = strlen(pszWhere) + 1 + strlen(pszByWhat) - strlen(pszWhat);
4715 char *pszNewStr = (char *)RTMemAlloc(cFinal);
4716 if (pszNewStr)
4717 {
4718 char *pszTmp = pszNewStr;
4719 memcpy(pszTmp, pszWhere, pszFoundStr - pszWhere);
4720 pszTmp += pszFoundStr - pszWhere;
4721 memcpy(pszTmp, pszByWhat, strlen(pszByWhat));
4722 pszTmp += strlen(pszByWhat);
4723 strcpy(pszTmp, pszFoundStr + strlen(pszWhat));
4724 }
4725 return pszNewStr;
4726}
4727
4728/** @copydoc VBOXHDDBACKEND::pfnRename */
4729static int vmdkRename(void *pBackendData, const char *pszFilename)
4730{
4731 LogFlowFunc(("pBackendData=%#p pszFilename=%#p\n", pBackendData, pszFilename));
4732
4733 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4734 int rc = VINF_SUCCESS;
4735 char **apszOldName = NULL;
4736 char **apszNewName = NULL;
4737 char **apszNewLines = NULL;
4738 char *pszOldDescName = NULL;
4739 bool fImageFreed = false;
4740 bool fEmbeddedDesc = false;
4741 unsigned cExtents = pImage->cExtents;
4742 char *pszNewBaseName = NULL;
4743 char *pszOldBaseName = NULL;
4744 char *pszNewFullName = NULL;
4745 char *pszOldFullName = NULL;
4746 const char *pszOldImageName;
4747 unsigned i, line;
4748 VMDKDESCRIPTOR DescriptorCopy;
4749 VMDKEXTENT ExtentCopy;
4750
4751 memset(&DescriptorCopy, 0, sizeof(DescriptorCopy));
4752
4753 /* Check arguments. */
4754 if ( !pImage
4755 || (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_RAWDISK)
4756 || !VALID_PTR(pszFilename)
4757 || !*pszFilename)
4758 {
4759 rc = VERR_INVALID_PARAMETER;
4760 goto out;
4761 }
4762
4763 /*
4764 * Allocate an array to store both old and new names of renamed files
4765 * in case we have to roll back the changes. Arrays are initialized
4766 * with zeros. We actually save stuff when and if we change it.
4767 */
4768 apszOldName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4769 apszNewName = (char **)RTMemTmpAllocZ((cExtents + 1) * sizeof(char*));
4770 apszNewLines = (char **)RTMemTmpAllocZ((cExtents) * sizeof(char*));
4771 if (!apszOldName || !apszNewName || !apszNewLines)
4772 {
4773 rc = VERR_NO_MEMORY;
4774 goto out;
4775 }
4776
4777 /* Save the descriptor size and position. */
4778 if (pImage->pDescData)
4779 {
4780 /* Separate descriptor file. */
4781 fEmbeddedDesc = false;
4782 }
4783 else
4784 {
4785 /* Embedded descriptor file. */
4786 ExtentCopy = pImage->pExtents[0];
4787 fEmbeddedDesc = true;
4788 }
4789 /* Save the descriptor content. */
4790 DescriptorCopy.cLines = pImage->Descriptor.cLines;
4791 for (i = 0; i < DescriptorCopy.cLines; i++)
4792 {
4793 DescriptorCopy.aLines[i] = RTStrDup(pImage->Descriptor.aLines[i]);
4794 if (!DescriptorCopy.aLines[i])
4795 {
4796 rc = VERR_NO_MEMORY;
4797 goto out;
4798 }
4799 }
4800
4801 /* Prepare both old and new base names used for string replacement. */
4802 pszNewBaseName = RTStrDup(RTPathFilename(pszFilename));
4803 RTPathStripExt(pszNewBaseName);
4804 pszOldBaseName = RTStrDup(RTPathFilename(pImage->pszFilename));
4805 RTPathStripExt(pszOldBaseName);
4806 /* Prepare both old and new full names used for string replacement. */
4807 pszNewFullName = RTStrDup(pszFilename);
4808 RTPathStripExt(pszNewFullName);
4809 pszOldFullName = RTStrDup(pImage->pszFilename);
4810 RTPathStripExt(pszOldFullName);
4811
4812 /* --- Up to this point we have not done any damage yet. --- */
4813
4814 /* Save the old name for easy access to the old descriptor file. */
4815 pszOldDescName = RTStrDup(pImage->pszFilename);
4816 /* Save old image name. */
4817 pszOldImageName = pImage->pszFilename;
4818
4819 /* Update the descriptor with modified extent names. */
4820 for (i = 0, line = pImage->Descriptor.uFirstExtent;
4821 i < cExtents;
4822 i++, line = pImage->Descriptor.aNextLines[line])
4823 {
4824 /* Assume that vmdkStrReplace will fail. */
4825 rc = VERR_NO_MEMORY;
4826 /* Update the descriptor. */
4827 apszNewLines[i] = vmdkStrReplace(pImage->Descriptor.aLines[line],
4828 pszOldBaseName, pszNewBaseName);
4829 if (!apszNewLines[i])
4830 goto rollback;
4831 pImage->Descriptor.aLines[line] = apszNewLines[i];
4832 }
4833 /* Make sure the descriptor gets written back. */
4834 pImage->Descriptor.fDirty = true;
4835 /* Flush the descriptor now, in case it is embedded. */
4836 (void)vmdkFlushImage(pImage);
4837
4838 /* Close and rename/move extents. */
4839 for (i = 0; i < cExtents; i++)
4840 {
4841 PVMDKEXTENT pExtent = &pImage->pExtents[i];
4842 /* Compose new name for the extent. */
4843 apszNewName[i] = vmdkStrReplace(pExtent->pszFullname,
4844 pszOldFullName, pszNewFullName);
4845 if (!apszNewName[i])
4846 goto rollback;
4847 /* Close the extent file. */
4848 vmdkFileClose(pImage, &pExtent->pFile, false);
4849 /* Rename the extent file. */
4850 rc = RTFileMove(pExtent->pszFullname, apszNewName[i], 0);
4851 if (RT_FAILURE(rc))
4852 goto rollback;
4853 /* Remember the old name. */
4854 apszOldName[i] = RTStrDup(pExtent->pszFullname);
4855 }
4856 /* Release all old stuff. */
4857 vmdkFreeImage(pImage, false);
4858
4859 fImageFreed = true;
4860
4861 /* Last elements of new/old name arrays are intended for
4862 * storing descriptor's names.
4863 */
4864 apszNewName[cExtents] = RTStrDup(pszFilename);
4865 /* Rename the descriptor file if it's separate. */
4866 if (!fEmbeddedDesc)
4867 {
4868 rc = RTFileMove(pImage->pszFilename, apszNewName[cExtents], 0);
4869 if (RT_FAILURE(rc))
4870 goto rollback;
4871 /* Save old name only if we may need to change it back. */
4872 apszOldName[cExtents] = RTStrDup(pszFilename);
4873 }
4874
4875 /* Update pImage with the new information. */
4876 pImage->pszFilename = pszFilename;
4877
4878 /* Open the new image. */
4879 rc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4880 if (RT_SUCCESS(rc))
4881 goto out;
4882
4883rollback:
4884 /* Roll back all changes in case of failure. */
4885 if (RT_FAILURE(rc))
4886 {
4887 int rrc;
4888 if (!fImageFreed)
4889 {
4890 /*
4891 * Some extents may have been closed, close the rest. We will
4892 * re-open the whole thing later.
4893 */
4894 vmdkFreeImage(pImage, false);
4895 }
4896 /* Rename files back. */
4897 for (i = 0; i <= cExtents; i++)
4898 {
4899 if (apszOldName[i])
4900 {
4901 rrc = RTFileMove(apszNewName[i], apszOldName[i], 0);
4902 AssertRC(rrc);
4903 }
4904 }
4905 /* Restore the old descriptor. */
4906 PVMDKFILE pFile;
4907 rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
4908 RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE, false);
4909 AssertRC(rrc);
4910 if (fEmbeddedDesc)
4911 {
4912 ExtentCopy.pFile = pFile;
4913 pImage->pExtents = &ExtentCopy;
4914 }
4915 else
4916 {
4917 /* Shouldn't be null for separate descriptor.
4918 * There will be no access to the actual content.
4919 */
4920 pImage->pDescData = pszOldDescName;
4921 pImage->pFile = pFile;
4922 }
4923 pImage->Descriptor = DescriptorCopy;
4924 vmdkWriteDescriptor(pImage);
4925 vmdkFileClose(pImage, &pFile, false);
4926 /* Get rid of the stuff we implanted. */
4927 pImage->pExtents = NULL;
4928 pImage->pFile = NULL;
4929 pImage->pDescData = NULL;
4930 /* Re-open the image back. */
4931 pImage->pszFilename = pszOldImageName;
4932 rrc = vmdkOpenImage(pImage, pImage->uOpenFlags);
4933 AssertRC(rrc);
4934 }
4935
4936out:
4937 for (i = 0; i < DescriptorCopy.cLines; i++)
4938 if (DescriptorCopy.aLines[i])
4939 RTStrFree(DescriptorCopy.aLines[i]);
4940 if (apszOldName)
4941 {
4942 for (i = 0; i <= cExtents; i++)
4943 if (apszOldName[i])
4944 RTStrFree(apszOldName[i]);
4945 RTMemTmpFree(apszOldName);
4946 }
4947 if (apszNewName)
4948 {
4949 for (i = 0; i <= cExtents; i++)
4950 if (apszNewName[i])
4951 RTStrFree(apszNewName[i]);
4952 RTMemTmpFree(apszNewName);
4953 }
4954 if (apszNewLines)
4955 {
4956 for (i = 0; i < cExtents; i++)
4957 if (apszNewLines[i])
4958 RTStrFree(apszNewLines[i]);
4959 RTMemTmpFree(apszNewLines);
4960 }
4961 if (pszOldDescName)
4962 RTStrFree(pszOldDescName);
4963 if (pszOldBaseName)
4964 RTStrFree(pszOldBaseName);
4965 if (pszNewBaseName)
4966 RTStrFree(pszNewBaseName);
4967 if (pszOldFullName)
4968 RTStrFree(pszOldFullName);
4969 if (pszNewFullName)
4970 RTStrFree(pszNewFullName);
4971 LogFlowFunc(("returns %Rrc\n", rc));
4972 return rc;
4973}
4974
4975/** @copydoc VBOXHDDBACKEND::pfnClose */
4976static int vmdkClose(void *pBackendData, bool fDelete)
4977{
4978 LogFlowFunc(("pBackendData=%#p fDelete=%d\n", pBackendData, fDelete));
4979 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
4980 int rc = VINF_SUCCESS;
4981
4982 /* Freeing a never allocated image (e.g. because the open failed) is
4983 * not signalled as an error. After all nothing bad happens. */
4984 if (pImage)
4985 {
4986 vmdkFreeImage(pImage, fDelete);
4987 RTMemFree(pImage);
4988 }
4989
4990 LogFlowFunc(("returns %Rrc\n", rc));
4991 return rc;
4992}
4993
4994/** @copydoc VBOXHDDBACKEND::pfnRead */
4995static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
4996 size_t cbToRead, size_t *pcbActuallyRead)
4997{
4998 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
4999 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5000 PVMDKEXTENT pExtent;
5001 uint64_t uSectorExtentRel;
5002 uint64_t uSectorExtentAbs;
5003 int rc;
5004
5005 AssertPtr(pImage);
5006 Assert(uOffset % 512 == 0);
5007 Assert(cbToRead % 512 == 0);
5008
5009 if ( uOffset + cbToRead > pImage->cbSize
5010 || cbToRead == 0)
5011 {
5012 rc = VERR_INVALID_PARAMETER;
5013 goto out;
5014 }
5015
5016 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5017 &pExtent, &uSectorExtentRel);
5018 if (RT_FAILURE(rc))
5019 goto out;
5020
5021 /* Check access permissions as defined in the extent descriptor. */
5022 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5023 {
5024 rc = VERR_VD_VMDK_INVALID_STATE;
5025 goto out;
5026 }
5027
5028 /* Clip read range to remain in this extent. */
5029 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5030
5031 /* Handle the read according to the current extent type. */
5032 switch (pExtent->enmType)
5033 {
5034 case VMDKETYPE_HOSTED_SPARSE:
5035#ifdef VBOX_WITH_VMDK_ESX
5036 case VMDKETYPE_ESX_SPARSE:
5037#endif /* VBOX_WITH_VMDK_ESX */
5038 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5039 &uSectorExtentAbs);
5040 if (RT_FAILURE(rc))
5041 goto out;
5042 /* Clip read range to at most the rest of the grain. */
5043 cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5044 Assert(!(cbToRead % 512));
5045 if (uSectorExtentAbs == 0)
5046 rc = VERR_VD_BLOCK_FREE;
5047 else
5048 {
5049 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5050 {
5051 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5052 uSectorExtentAbs -= uSectorInGrain;
5053 uint64_t uLBA;
5054 if (pExtent->uGrainSector != uSectorExtentAbs)
5055 {
5056 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5057 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5058 if (RT_FAILURE(rc))
5059 {
5060 pExtent->uGrainSector = 0;
5061 AssertRC(rc);
5062 goto out;
5063 }
5064 pExtent->uGrainSector = uSectorExtentAbs;
5065 Assert(uLBA == uSectorExtentRel);
5066 }
5067 memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
5068 }
5069 else
5070 {
5071 rc = vmdkFileReadAt(pExtent->pFile,
5072 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5073 pvBuf, cbToRead, NULL);
5074 }
5075 }
5076 break;
5077 case VMDKETYPE_VMFS:
5078 case VMDKETYPE_FLAT:
5079 rc = vmdkFileReadAt(pExtent->pFile,
5080 VMDK_SECTOR2BYTE(uSectorExtentRel),
5081 pvBuf, cbToRead, NULL);
5082 break;
5083 case VMDKETYPE_ZERO:
5084 memset(pvBuf, '\0', cbToRead);
5085 break;
5086 }
5087 if (pcbActuallyRead)
5088 *pcbActuallyRead = cbToRead;
5089
5090out:
5091 LogFlowFunc(("returns %Rrc\n", rc));
5092 return rc;
5093}
5094
5095/** @copydoc VBOXHDDBACKEND::pfnWrite */
5096static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
5097 size_t cbToWrite, size_t *pcbWriteProcess,
5098 size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
5099{
5100 LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
5101 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5102 PVMDKEXTENT pExtent;
5103 uint64_t uSectorExtentRel;
5104 uint64_t uSectorExtentAbs;
5105 int rc;
5106
5107 AssertPtr(pImage);
5108 Assert(uOffset % 512 == 0);
5109 Assert(cbToWrite % 512 == 0);
5110
5111 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5112 {
5113 rc = VERR_VD_IMAGE_READ_ONLY;
5114 goto out;
5115 }
5116
5117 if (cbToWrite == 0)
5118 {
5119 rc = VERR_INVALID_PARAMETER;
5120 goto out;
5121 }
5122
5123 /* No size check here, will do that later when the extent is located.
5124 * There are sparse images out there which according to the spec are
5125 * invalid, because the total size is not a multiple of the grain size.
5126 * Also for sparse images which are stitched together in odd ways (not at
5127 * grain boundaries, and with the nominal size not being a multiple of the
5128 * grain size), this would prevent writing to the last grain. */
5129
5130 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
5131 &pExtent, &uSectorExtentRel);
5132 if (RT_FAILURE(rc))
5133 goto out;
5134
5135 /* Check access permissions as defined in the extent descriptor. */
5136 if (pExtent->enmAccess != VMDKACCESS_READWRITE)
5137 {
5138 rc = VERR_VD_VMDK_INVALID_STATE;
5139 goto out;
5140 }
5141
5142 /* Handle the write according to the current extent type. */
5143 switch (pExtent->enmType)
5144 {
5145 case VMDKETYPE_HOSTED_SPARSE:
5146#ifdef VBOX_WITH_VMDK_ESX
5147 case VMDKETYPE_ESX_SPARSE:
5148#endif /* VBOX_WITH_VMDK_ESX */
5149 rc = vmdkGetSector(pImage->pGTCache, pExtent, uSectorExtentRel,
5150 &uSectorExtentAbs);
5151 if (RT_FAILURE(rc))
5152 goto out;
5153 /* Clip write range to at most the rest of the grain. */
5154 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
5155 if ( pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
5156 && uSectorExtentRel < (uint64_t)pExtent->uLastGrainWritten * pExtent->cSectorsPerGrain)
5157 {
5158 rc = VERR_VD_VMDK_INVALID_WRITE;
5159 goto out;
5160 }
5161 if (uSectorExtentAbs == 0)
5162 {
5163 if (cbToWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
5164 {
5165 /* Full block write to a previously unallocated block.
5166 * Check if the caller wants to avoid the automatic alloc. */
5167 if (!(fWrite & VD_WRITE_NO_ALLOC))
5168 {
5169 /* Allocate GT and find out where to store the grain. */
5170 rc = vmdkAllocGrain(pImage->pGTCache, pExtent,
5171 uSectorExtentRel, pvBuf, cbToWrite);
5172 }
5173 else
5174 rc = VERR_VD_BLOCK_FREE;
5175 *pcbPreRead = 0;
5176 *pcbPostRead = 0;
5177 }
5178 else
5179 {
5180 /* Clip write range to remain in this extent. */
5181 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5182 *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
5183 *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbToWrite - *pcbPreRead;
5184 rc = VERR_VD_BLOCK_FREE;
5185 }
5186 }
5187 else
5188 {
5189 if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
5190 {
5191 uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
5192 uSectorExtentAbs -= uSectorInGrain;
5193 uint64_t uLBA = uSectorExtentRel;
5194 if ( pExtent->uGrainSector != uSectorExtentAbs
5195 || pExtent->uGrainSector != pExtent->uLastGrainSector)
5196 {
5197 rc = vmdkFileInflateAt(pExtent->pFile, VMDK_SECTOR2BYTE(uSectorExtentAbs),
5198 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, NULL);
5199 if (RT_FAILURE(rc))
5200 {
5201 pExtent->uGrainSector = 0;
5202 pExtent->uLastGrainSector = 0;
5203 AssertRC(rc);
5204 goto out;
5205 }
5206 pExtent->uGrainSector = uSectorExtentAbs;
5207 pExtent->uLastGrainSector = uSectorExtentAbs;
5208 Assert(uLBA == uSectorExtentRel);
5209 }
5210 memcpy((uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), pvBuf, cbToWrite);
5211 uint32_t cbGrain = 0;
5212 rc = vmdkFileDeflateAt(pExtent->pFile,
5213 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5214 pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
5215 VMDK_MARKER_IGNORE, uLBA, &cbGrain);
5216 if (RT_FAILURE(rc))
5217 {
5218 pExtent->uGrainSector = 0;
5219 pExtent->uLastGrainSector = 0;
5220 AssertRC(rc);
5221 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
5222 }
5223 cbGrain = RT_ALIGN(cbGrain, 512);
5224 pExtent->uLastGrainSector = uSectorExtentAbs;
5225 pExtent->uLastGrainWritten = uSectorExtentRel / pExtent->cSectorsPerGrain;
5226 pExtent->cbLastGrainWritten = cbGrain;
5227
5228 uint64_t uEOSOff = 0;
5229 if (pExtent->fFooter)
5230 {
5231 uEOSOff = 512;
5232 rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
5233 if (RT_FAILURE(rc))
5234 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
5235 }
5236 uint8_t aEOS[512];
5237 memset(aEOS, '\0', sizeof(aEOS));
5238 rc = vmdkFileWriteAt(pExtent->pFile,
5239 VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512) + uEOSOff,
5240 aEOS, sizeof(aEOS), NULL);
5241 if (RT_FAILURE(rc))
5242 return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
5243 }
5244 else
5245 {
5246 rc = vmdkFileWriteAt(pExtent->pFile,
5247 VMDK_SECTOR2BYTE(uSectorExtentAbs),
5248 pvBuf, cbToWrite, NULL);
5249 }
5250 }
5251 break;
5252 case VMDKETYPE_VMFS:
5253 case VMDKETYPE_FLAT:
5254 /* Clip write range to remain in this extent. */
5255 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5256 rc = vmdkFileWriteAt(pExtent->pFile,
5257 VMDK_SECTOR2BYTE(uSectorExtentRel),
5258 pvBuf, cbToWrite, NULL);
5259 break;
5260 case VMDKETYPE_ZERO:
5261 /* Clip write range to remain in this extent. */
5262 cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5263 break;
5264 }
5265 if (pcbWriteProcess)
5266 *pcbWriteProcess = cbToWrite;
5267
5268out:
5269 LogFlowFunc(("returns %Rrc\n", rc));
5270 return rc;
5271}
5272
5273/** @copydoc VBOXHDDBACKEND::pfnFlush */
5274static int vmdkFlush(void *pBackendData)
5275{
5276 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5277 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5278 int rc;
5279
5280 AssertPtr(pImage);
5281
5282 rc = vmdkFlushImage(pImage);
5283 LogFlowFunc(("returns %Rrc\n", rc));
5284 return rc;
5285}
5286
5287/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
5288static unsigned vmdkGetVersion(void *pBackendData)
5289{
5290 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5291 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5292
5293 AssertPtr(pImage);
5294
5295 if (pImage)
5296 return VMDK_IMAGE_VERSION;
5297 else
5298 return 0;
5299}
5300
5301/** @copydoc VBOXHDDBACKEND::pfnGetSize */
5302static uint64_t vmdkGetSize(void *pBackendData)
5303{
5304 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5305 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5306
5307 AssertPtr(pImage);
5308
5309 if (pImage)
5310 return pImage->cbSize;
5311 else
5312 return 0;
5313}
5314
5315/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
5316static uint64_t vmdkGetFileSize(void *pBackendData)
5317{
5318 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5319 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5320 uint64_t cb = 0;
5321
5322 AssertPtr(pImage);
5323
5324 if (pImage)
5325 {
5326 uint64_t cbFile;
5327 if (pImage->pFile != NULL)
5328 {
5329 int rc = vmdkFileGetSize(pImage->pFile, &cbFile);
5330 if (RT_SUCCESS(rc))
5331 cb += cbFile;
5332 }
5333 for (unsigned i = 0; i < pImage->cExtents; i++)
5334 {
5335 if (pImage->pExtents[i].pFile != NULL)
5336 {
5337 int rc = vmdkFileGetSize(pImage->pExtents[i].pFile, &cbFile);
5338 if (RT_SUCCESS(rc))
5339 cb += cbFile;
5340 }
5341 }
5342 }
5343
5344 LogFlowFunc(("returns %lld\n", cb));
5345 return cb;
5346}
5347
5348/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
5349static int vmdkGetPCHSGeometry(void *pBackendData,
5350 PPDMMEDIAGEOMETRY pPCHSGeometry)
5351{
5352 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p\n", pBackendData, pPCHSGeometry));
5353 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5354 int rc;
5355
5356 AssertPtr(pImage);
5357
5358 if (pImage)
5359 {
5360 if (pImage->PCHSGeometry.cCylinders)
5361 {
5362 *pPCHSGeometry = pImage->PCHSGeometry;
5363 rc = VINF_SUCCESS;
5364 }
5365 else
5366 rc = VERR_VD_GEOMETRY_NOT_SET;
5367 }
5368 else
5369 rc = VERR_VD_NOT_OPENED;
5370
5371 LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5372 return rc;
5373}
5374
5375/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
5376static int vmdkSetPCHSGeometry(void *pBackendData,
5377 PCPDMMEDIAGEOMETRY pPCHSGeometry)
5378{
5379 LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
5380 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5381 int rc;
5382
5383 AssertPtr(pImage);
5384
5385 if (pImage)
5386 {
5387 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5388 {
5389 rc = VERR_VD_IMAGE_READ_ONLY;
5390 goto out;
5391 }
5392 rc = vmdkDescSetPCHSGeometry(pImage, pPCHSGeometry);
5393 if (RT_FAILURE(rc))
5394 goto out;
5395
5396 pImage->PCHSGeometry = *pPCHSGeometry;
5397 rc = VINF_SUCCESS;
5398 }
5399 else
5400 rc = VERR_VD_NOT_OPENED;
5401
5402out:
5403 LogFlowFunc(("returns %Rrc\n", rc));
5404 return rc;
5405}
5406
5407/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
5408static int vmdkGetLCHSGeometry(void *pBackendData,
5409 PPDMMEDIAGEOMETRY pLCHSGeometry)
5410{
5411 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p\n", pBackendData, pLCHSGeometry));
5412 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5413 int rc;
5414
5415 AssertPtr(pImage);
5416
5417 if (pImage)
5418 {
5419 if (pImage->LCHSGeometry.cCylinders)
5420 {
5421 *pLCHSGeometry = pImage->LCHSGeometry;
5422 rc = VINF_SUCCESS;
5423 }
5424 else
5425 rc = VERR_VD_GEOMETRY_NOT_SET;
5426 }
5427 else
5428 rc = VERR_VD_NOT_OPENED;
5429
5430 LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5431 return rc;
5432}
5433
5434/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
5435static int vmdkSetLCHSGeometry(void *pBackendData,
5436 PCPDMMEDIAGEOMETRY pLCHSGeometry)
5437{
5438 LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
5439 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5440 int rc;
5441
5442 AssertPtr(pImage);
5443
5444 if (pImage)
5445 {
5446 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5447 {
5448 rc = VERR_VD_IMAGE_READ_ONLY;
5449 goto out;
5450 }
5451 rc = vmdkDescSetLCHSGeometry(pImage, pLCHSGeometry);
5452 if (RT_FAILURE(rc))
5453 goto out;
5454
5455 pImage->LCHSGeometry = *pLCHSGeometry;
5456 rc = VINF_SUCCESS;
5457 }
5458 else
5459 rc = VERR_VD_NOT_OPENED;
5460
5461out:
5462 LogFlowFunc(("returns %Rrc\n", rc));
5463 return rc;
5464}
5465
5466/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
5467static unsigned vmdkGetImageFlags(void *pBackendData)
5468{
5469 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5470 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5471 unsigned uImageFlags;
5472
5473 AssertPtr(pImage);
5474
5475 if (pImage)
5476 uImageFlags = pImage->uImageFlags;
5477 else
5478 uImageFlags = 0;
5479
5480 LogFlowFunc(("returns %#x\n", uImageFlags));
5481 return uImageFlags;
5482}
5483
5484/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
5485static unsigned vmdkGetOpenFlags(void *pBackendData)
5486{
5487 LogFlowFunc(("pBackendData=%#p\n", pBackendData));
5488 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5489 unsigned uOpenFlags;
5490
5491 AssertPtr(pImage);
5492
5493 if (pImage)
5494 uOpenFlags = pImage->uOpenFlags;
5495 else
5496 uOpenFlags = 0;
5497
5498 LogFlowFunc(("returns %#x\n", uOpenFlags));
5499 return uOpenFlags;
5500}
5501
5502/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
5503static int vmdkSetOpenFlags(void *pBackendData, unsigned uOpenFlags)
5504{
5505 LogFlowFunc(("pBackendData=%#p\n uOpenFlags=%#x", pBackendData, uOpenFlags));
5506 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5507 int rc;
5508
5509 /* Image must be opened and the new flags must be valid. Just readonly and
5510 * info flags are supported. */
5511 if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO)))
5512 {
5513 rc = VERR_INVALID_PARAMETER;
5514 goto out;
5515 }
5516
5517 /* Implement this operation via reopening the image. */
5518 vmdkFreeImage(pImage, false);
5519 rc = vmdkOpenImage(pImage, uOpenFlags);
5520
5521out:
5522 LogFlowFunc(("returns %Rrc\n", rc));
5523 return rc;
5524}
5525
5526/** @copydoc VBOXHDDBACKEND::pfnGetComment */
5527static int vmdkGetComment(void *pBackendData, char *pszComment,
5528 size_t cbComment)
5529{
5530 LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
5531 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5532 int rc;
5533
5534 AssertPtr(pImage);
5535
5536 if (pImage)
5537 {
5538 const char *pszCommentEncoded = NULL;
5539 rc = vmdkDescDDBGetStr(pImage, &pImage->Descriptor,
5540 "ddb.comment", &pszCommentEncoded);
5541 if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
5542 pszCommentEncoded = NULL;
5543 else if (RT_FAILURE(rc))
5544 goto out;
5545
5546 if (pszComment && pszCommentEncoded)
5547 rc = vmdkDecodeString(pszCommentEncoded, pszComment, cbComment);
5548 else
5549 {
5550 if (pszComment)
5551 *pszComment = '\0';
5552 rc = VINF_SUCCESS;
5553 }
5554 if (pszCommentEncoded)
5555 RTStrFree((char *)(void *)pszCommentEncoded);
5556 }
5557 else
5558 rc = VERR_VD_NOT_OPENED;
5559
5560out:
5561 LogFlowFunc(("returns %Rrc comment='%s'\n", rc, pszComment));
5562 return rc;
5563}
5564
5565/** @copydoc VBOXHDDBACKEND::pfnSetComment */
5566static int vmdkSetComment(void *pBackendData, const char *pszComment)
5567{
5568 LogFlowFunc(("pBackendData=%#p pszComment=\"%s\"\n", pBackendData, pszComment));
5569 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5570 int rc;
5571
5572 AssertPtr(pImage);
5573
5574 if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
5575 {
5576 rc = VERR_VD_IMAGE_READ_ONLY;
5577 goto out;
5578 }
5579
5580 if (pImage)
5581 rc = vmdkSetImageComment(pImage, pszComment);
5582 else
5583 rc = VERR_VD_NOT_OPENED;
5584
5585out:
5586 LogFlowFunc(("returns %Rrc\n", rc));
5587 return rc;
5588}
5589
5590/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
5591static int vmdkGetUuid(void *pBackendData, PRTUUID pUuid)
5592{
5593 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5594 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5595 int rc;
5596
5597 AssertPtr(pImage);
5598
5599 if (pImage)
5600 {
5601 *pUuid = pImage->ImageUuid;
5602 rc = VINF_SUCCESS;
5603 }
5604 else
5605 rc = VERR_VD_NOT_OPENED;
5606
5607 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5608 return rc;
5609}
5610
5611/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
5612static int vmdkSetUuid(void *pBackendData, PCRTUUID pUuid)
5613{
5614 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5615 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5616 int rc;
5617
5618 LogFlowFunc(("%RTuuid\n", pUuid));
5619 AssertPtr(pImage);
5620
5621 if (pImage)
5622 {
5623 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5624 {
5625 pImage->ImageUuid = *pUuid;
5626 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5627 VMDK_DDB_IMAGE_UUID, pUuid);
5628 if (RT_FAILURE(rc))
5629 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
5630 rc = VINF_SUCCESS;
5631 }
5632 else
5633 rc = VERR_VD_IMAGE_READ_ONLY;
5634 }
5635 else
5636 rc = VERR_VD_NOT_OPENED;
5637
5638 LogFlowFunc(("returns %Rrc\n", rc));
5639 return rc;
5640}
5641
5642/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
5643static int vmdkGetModificationUuid(void *pBackendData, PRTUUID pUuid)
5644{
5645 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5646 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5647 int rc;
5648
5649 AssertPtr(pImage);
5650
5651 if (pImage)
5652 {
5653 *pUuid = pImage->ModificationUuid;
5654 rc = VINF_SUCCESS;
5655 }
5656 else
5657 rc = VERR_VD_NOT_OPENED;
5658
5659 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5660 return rc;
5661}
5662
5663/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
5664static int vmdkSetModificationUuid(void *pBackendData, PCRTUUID pUuid)
5665{
5666 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5667 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5668 int rc;
5669
5670 AssertPtr(pImage);
5671
5672 if (pImage)
5673 {
5674 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5675 {
5676 /*
5677 * Only change the modification uuid if it changed.
5678 * Avoids a lot of unneccessary 1-byte writes during
5679 * vmdkFlush.
5680 */
5681 if (RTUuidCompare(&pImage->ModificationUuid, pUuid))
5682 {
5683 pImage->ModificationUuid = *pUuid;
5684 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5685 VMDK_DDB_MODIFICATION_UUID, pUuid);
5686 if (RT_FAILURE(rc))
5687 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
5688 }
5689 rc = VINF_SUCCESS;
5690 }
5691 else
5692 rc = VERR_VD_IMAGE_READ_ONLY;
5693 }
5694 else
5695 rc = VERR_VD_NOT_OPENED;
5696
5697 LogFlowFunc(("returns %Rrc\n", rc));
5698 return rc;
5699}
5700
5701/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
5702static int vmdkGetParentUuid(void *pBackendData, PRTUUID pUuid)
5703{
5704 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5705 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5706 int rc;
5707
5708 AssertPtr(pImage);
5709
5710 if (pImage)
5711 {
5712 *pUuid = pImage->ParentUuid;
5713 rc = VINF_SUCCESS;
5714 }
5715 else
5716 rc = VERR_VD_NOT_OPENED;
5717
5718 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5719 return rc;
5720}
5721
5722/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
5723static int vmdkSetParentUuid(void *pBackendData, PCRTUUID pUuid)
5724{
5725 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5726 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5727 int rc;
5728
5729 AssertPtr(pImage);
5730
5731 if (pImage)
5732 {
5733 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5734 {
5735 pImage->ParentUuid = *pUuid;
5736 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5737 VMDK_DDB_PARENT_UUID, pUuid);
5738 if (RT_FAILURE(rc))
5739 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5740 rc = VINF_SUCCESS;
5741 }
5742 else
5743 rc = VERR_VD_IMAGE_READ_ONLY;
5744 }
5745 else
5746 rc = VERR_VD_NOT_OPENED;
5747
5748 LogFlowFunc(("returns %Rrc\n", rc));
5749 return rc;
5750}
5751
5752/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
5753static int vmdkGetParentModificationUuid(void *pBackendData, PRTUUID pUuid)
5754{
5755 LogFlowFunc(("pBackendData=%#p pUuid=%#p\n", pBackendData, pUuid));
5756 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5757 int rc;
5758
5759 AssertPtr(pImage);
5760
5761 if (pImage)
5762 {
5763 *pUuid = pImage->ParentModificationUuid;
5764 rc = VINF_SUCCESS;
5765 }
5766 else
5767 rc = VERR_VD_NOT_OPENED;
5768
5769 LogFlowFunc(("returns %Rrc (%RTuuid)\n", rc, pUuid));
5770 return rc;
5771}
5772
5773/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
5774static int vmdkSetParentModificationUuid(void *pBackendData, PCRTUUID pUuid)
5775{
5776 LogFlowFunc(("pBackendData=%#p Uuid=%RTuuid\n", pBackendData, pUuid));
5777 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5778 int rc;
5779
5780 AssertPtr(pImage);
5781
5782 if (pImage)
5783 {
5784 if (!(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
5785 {
5786 pImage->ParentModificationUuid = *pUuid;
5787 rc = vmdkDescDDBSetUuid(pImage, &pImage->Descriptor,
5788 VMDK_DDB_PARENT_MODIFICATION_UUID, pUuid);
5789 if (RT_FAILURE(rc))
5790 return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
5791 rc = VINF_SUCCESS;
5792 }
5793 else
5794 rc = VERR_VD_IMAGE_READ_ONLY;
5795 }
5796 else
5797 rc = VERR_VD_NOT_OPENED;
5798
5799 LogFlowFunc(("returns %Rrc\n", rc));
5800 return rc;
5801}
5802
5803/** @copydoc VBOXHDDBACKEND::pfnDump */
5804static void vmdkDump(void *pBackendData)
5805{
5806 PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
5807
5808 AssertPtr(pImage);
5809 if (pImage)
5810 {
5811 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
5812 pImage->PCHSGeometry.cCylinders, pImage->PCHSGeometry.cHeads, pImage->PCHSGeometry.cSectors,
5813 pImage->LCHSGeometry.cCylinders, pImage->LCHSGeometry.cHeads, pImage->LCHSGeometry.cSectors,
5814 VMDK_BYTE2SECTOR(pImage->cbSize));
5815 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidCreation={%RTuuid}\n", &pImage->ImageUuid);
5816 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
5817 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParent={%RTuuid}\n", &pImage->ParentUuid);
5818 pImage->pInterfaceErrorCallbacks->pfnMessage(pImage->pInterfaceError->pvUser, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
5819 }
5820}
5821
5822
5823static int vmdkGetTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5824{
5825 int rc = VERR_NOT_IMPLEMENTED;
5826 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5827 return rc;
5828}
5829
5830static int vmdkGetParentTimeStamp(void *pvBackendData, PRTTIMESPEC pTimeStamp)
5831{
5832 int rc = VERR_NOT_IMPLEMENTED;
5833 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5834 return rc;
5835}
5836
5837static int vmdkSetParentTimeStamp(void *pvBackendData, PCRTTIMESPEC pTimeStamp)
5838{
5839 int rc = VERR_NOT_IMPLEMENTED;
5840 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5841 return rc;
5842}
5843
5844static int vmdkGetParentFilename(void *pvBackendData, char **ppszParentFilename)
5845{
5846 int rc = VERR_NOT_IMPLEMENTED;
5847 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5848 return rc;
5849}
5850
5851static int vmdkSetParentFilename(void *pvBackendData, const char *pszParentFilename)
5852{
5853 int rc = VERR_NOT_IMPLEMENTED;
5854 LogFlow(("%s: returned %Rrc\n", __FUNCTION__, rc));
5855 return rc;
5856}
5857
5858static bool vmdkIsAsyncIOSupported(void *pvBackendData)
5859{
5860 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5861 bool fAsyncIOSupported = false;
5862
5863 if (pImage)
5864 {
5865 unsigned cFlatExtents = 0;
5866
5867 /* We only support async I/O support if the image only consists of FLAT or ZERO extents.
5868 *
5869 * @todo: At the moment we only support async I/O if there is at most one FLAT extent
5870 * More than one doesn't work yet with the async I/O interface.
5871 */
5872 fAsyncIOSupported = true;
5873 for (unsigned i = 0; i < pImage->cExtents; i++)
5874 {
5875 if (( pImage->pExtents[i].enmType != VMDKETYPE_FLAT
5876 && pImage->pExtents[i].enmType != VMDKETYPE_ZERO
5877 && pImage->pExtents[i].enmType != VMDKETYPE_VMFS)
5878 || ((pImage->pExtents[i].enmType == VMDKETYPE_FLAT) && (cFlatExtents > 0)))
5879 {
5880 fAsyncIOSupported = false;
5881 break; /* Stop search */
5882 }
5883 if (pImage->pExtents[i].enmType == VMDKETYPE_FLAT)
5884 cFlatExtents++;
5885 }
5886 }
5887
5888 return fAsyncIOSupported;
5889}
5890
5891static int vmdkAsyncRead(void *pvBackendData, uint64_t uOffset, size_t cbRead,
5892 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
5893{
5894 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
5895 PVMDKEXTENT pExtent = NULL;
5896 int rc = VINF_SUCCESS;
5897 unsigned cSegments = 0;
5898 PPDMDATASEG paSegCurrent = paSeg;
5899 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5900 size_t uOffsetInCurrentSegment = 0;
5901 size_t cbReadLeft = cbRead;
5902 uint64_t uOffCurr = uOffset;
5903
5904 AssertPtr(pImage);
5905 Assert(uOffset % 512 == 0);
5906 Assert(cbRead % 512 == 0);
5907
5908 if ( uOffset + cbRead > pImage->cbSize
5909 || cbRead == 0)
5910 {
5911 rc = VERR_INVALID_PARAMETER;
5912 goto out;
5913 }
5914
5915 while (cbReadLeft && cSeg)
5916 {
5917 size_t cbToRead;
5918 uint64_t uSectorExtentRel;
5919
5920 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffCurr),
5921 &pExtent, &uSectorExtentRel);
5922 if (RT_FAILURE(rc))
5923 goto out;
5924
5925 /* Check access permissions as defined in the extent descriptor. */
5926 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
5927 {
5928 rc = VERR_VD_VMDK_INVALID_STATE;
5929 goto out;
5930 }
5931
5932 /* Clip read range to remain in this extent. */
5933 cbToRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
5934 /* Clip read range to remain into current data segment. */
5935 cbToRead = RT_MIN(cbToRead, cbLeftInCurrentSegment);
5936
5937 switch (pExtent->enmType)
5938 {
5939 case VMDKETYPE_VMFS:
5940 case VMDKETYPE_FLAT:
5941 {
5942 /* Check for enough room first. */
5943 if (RT_UNLIKELY(cSegments >= pImage->cSegments))
5944 {
5945 /* We reached maximum, resize array. Try to realloc memory first. */
5946 PPDMDATASEG paSegmentsNew = (PPDMDATASEG)RTMemRealloc(pImage->paSegments, (cSegments + 10)*sizeof(PDMDATASEG));
5947
5948 if (!paSegmentsNew)
5949 {
5950 /* We failed. Allocate completely new. */
5951 paSegmentsNew = (PPDMDATASEG)RTMemAllocZ((cSegments + 10)* sizeof(PDMDATASEG));
5952 if (!paSegmentsNew)
5953 {
5954 /* Damn, we are out of memory. */
5955 rc = VERR_NO_MEMORY;
5956 goto out;
5957 }
5958
5959 /* Copy task handles over. */
5960 for (unsigned i = 0; i < cSegments; i++)
5961 paSegmentsNew[i] = pImage->paSegments[i];
5962
5963 /* Free old memory. */
5964 RTMemFree(pImage->paSegments);
5965 }
5966
5967 pImage->cSegments = cSegments + 10;
5968 pImage->paSegments = paSegmentsNew;
5969 }
5970
5971 pImage->paSegments[cSegments].cbSeg = cbToRead;
5972 pImage->paSegments[cSegments].pvSeg = (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment;
5973 cSegments++;
5974 break;
5975 }
5976 case VMDKETYPE_ZERO:
5977 /* Nothing left to do. */
5978 break;
5979 default:
5980 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
5981 }
5982
5983 cbReadLeft -= cbToRead;
5984 uOffCurr += cbToRead;
5985 cbLeftInCurrentSegment -= cbToRead;
5986 uOffsetInCurrentSegment += cbToRead;
5987 /* Go to next extent if there is no space left in current one. */
5988 if (!cbLeftInCurrentSegment)
5989 {
5990 uOffsetInCurrentSegment = 0;
5991 paSegCurrent++;
5992 cSeg--;
5993 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
5994 }
5995 }
5996
5997 AssertMsg(cbReadLeft == 0, ("No segment left but there is still data to write\n"));
5998
5999 if (cSegments == 0)
6000 {
6001 /* The request was completely in a ZERO extent nothing to do. */
6002 rc = VINF_VD_ASYNC_IO_FINISHED;
6003 }
6004 else
6005 {
6006 /* Start the write */
6007 void *pTask;
6008 rc = pImage->pInterfaceAsyncIOCallbacks->pfnReadAsync(pImage->pInterfaceAsyncIO->pvUser,
6009 pExtent->pFile->pStorage, uOffset,
6010 pImage->paSegments, cSegments, cbRead,
6011 pvUser, &pTask);
6012 }
6013
6014out:
6015 LogFlowFunc(("returns %Rrc\n", rc));
6016 return rc;
6017}
6018
6019static int vmdkAsyncWrite(void *pvBackendData, uint64_t uOffset, size_t cbWrite,
6020 PPDMDATASEG paSeg, unsigned cSeg, void *pvUser)
6021{
6022 PVMDKIMAGE pImage = (PVMDKIMAGE)pvBackendData;
6023 PVMDKEXTENT pExtent = NULL;
6024 int rc = VINF_SUCCESS;
6025 unsigned cSegments = 0;
6026 PPDMDATASEG paSegCurrent = paSeg;
6027 size_t cbLeftInCurrentSegment = paSegCurrent->cbSeg;
6028 size_t uOffsetInCurrentSegment = 0;
6029 size_t cbWriteLeft = cbWrite;
6030 uint64_t uOffCurr = uOffset;
6031
6032 AssertPtr(pImage);
6033 Assert(uOffset % 512 == 0);
6034 Assert(cbWrite % 512 == 0);
6035
6036 if ( uOffset + cbWrite > pImage->cbSize
6037 || cbWrite == 0)
6038 {
6039 rc = VERR_INVALID_PARAMETER;
6040 goto out;
6041 }
6042
6043 while (cbWriteLeft && cSeg)
6044 {
6045 size_t cbToWrite;
6046 uint64_t uSectorExtentRel;
6047
6048 rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffCurr),
6049 &pExtent, &uSectorExtentRel);
6050 if (RT_FAILURE(rc))
6051 goto out;
6052
6053 /* Check access permissions as defined in the extent descriptor. */
6054 if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
6055 {
6056 rc = VERR_VD_VMDK_INVALID_STATE;
6057 goto out;
6058 }
6059
6060 /* Clip write range to remain in this extent. */
6061 cbToWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
6062 /* Clip write range to remain into current data segment. */
6063 cbToWrite = RT_MIN(cbToWrite, cbLeftInCurrentSegment);
6064
6065 switch (pExtent->enmType)
6066 {
6067 case VMDKETYPE_VMFS:
6068 case VMDKETYPE_FLAT:
6069 {
6070 /* Check for enough room first. */
6071 if (RT_UNLIKELY(cSegments >= pImage->cSegments))
6072 {
6073 /* We reached maximum, resize array. Try to realloc memory first. */
6074 PPDMDATASEG paSegmentsNew = (PPDMDATASEG)RTMemRealloc(pImage->paSegments, (cSegments + 10)*sizeof(PDMDATASEG));
6075
6076 if (!paSegmentsNew)
6077 {
6078 /* We failed. Allocate completely new. */
6079 paSegmentsNew = (PPDMDATASEG)RTMemAllocZ((cSegments + 10)* sizeof(PDMDATASEG));
6080 if (!paSegmentsNew)
6081 {
6082 /* Damn, we are out of memory. */
6083 rc = VERR_NO_MEMORY;
6084 goto out;
6085 }
6086
6087 /* Copy task handles over. */
6088 for (unsigned i = 0; i < cSegments; i++)
6089 paSegmentsNew[i] = pImage->paSegments[i];
6090
6091 /* Free old memory. */
6092 RTMemFree(pImage->paSegments);
6093 }
6094
6095 pImage->cSegments = cSegments + 10;
6096 pImage->paSegments = paSegmentsNew;
6097 }
6098
6099 pImage->paSegments[cSegments].cbSeg = cbToWrite;
6100 pImage->paSegments[cSegments].pvSeg = (uint8_t *)paSegCurrent->pvSeg + uOffsetInCurrentSegment;
6101 cSegments++;
6102 break;
6103 }
6104 case VMDKETYPE_ZERO:
6105 /* Nothing left to do. */
6106 break;
6107 default:
6108 AssertMsgFailed(("Unsupported extent type %u\n", pExtent->enmType));
6109 }
6110
6111 cbWriteLeft -= cbToWrite;
6112 uOffCurr += cbToWrite;
6113 cbLeftInCurrentSegment -= cbToWrite;
6114 uOffsetInCurrentSegment += cbToWrite;
6115 /* Go to next extent if there is no space left in current one. */
6116 if (!cbLeftInCurrentSegment)
6117 {
6118 uOffsetInCurrentSegment = 0;
6119 paSegCurrent++;
6120 cSeg--;
6121 cbLeftInCurrentSegment = paSegCurrent->cbSeg;
6122 }
6123 }
6124
6125 AssertMsg(cbWriteLeft == 0, ("No segment left but there is still data to write\n"));
6126
6127 if (cSegments == 0)
6128 {
6129 /* The request was completely in a ZERO extent nothing to do. */
6130 rc = VINF_VD_ASYNC_IO_FINISHED;
6131 }
6132 else
6133 {
6134 /* Start the write */
6135 void *pTask;
6136 rc = pImage->pInterfaceAsyncIOCallbacks->pfnWriteAsync(pImage->pInterfaceAsyncIO->pvUser,
6137 pExtent->pFile->pStorage, uOffset,
6138 pImage->paSegments, cSegments, cbWrite,
6139 pvUser, &pTask);
6140 }
6141
6142out:
6143 LogFlowFunc(("returns %Rrc\n", rc));
6144 return rc;
6145}
6146
6147
6148VBOXHDDBACKEND g_VmdkBackend =
6149{
6150 /* pszBackendName */
6151 "VMDK",
6152 /* cbSize */
6153 sizeof(VBOXHDDBACKEND),
6154 /* uBackendCaps */
6155 VD_CAP_UUID | VD_CAP_CREATE_FIXED | VD_CAP_CREATE_DYNAMIC
6156 | VD_CAP_CREATE_SPLIT_2G | VD_CAP_DIFF | VD_CAP_FILE |VD_CAP_ASYNC,
6157 /* papszFileExtensions */
6158 s_apszVmdkFileExtensions,
6159 /* paConfigInfo */
6160 NULL,
6161 /* hPlugin */
6162 NIL_RTLDRMOD,
6163 /* pfnCheckIfValid */
6164 vmdkCheckIfValid,
6165 /* pfnOpen */
6166 vmdkOpen,
6167 /* pfnCreate */
6168 vmdkCreate,
6169 /* pfnRename */
6170 vmdkRename,
6171 /* pfnClose */
6172 vmdkClose,
6173 /* pfnRead */
6174 vmdkRead,
6175 /* pfnWrite */
6176 vmdkWrite,
6177 /* pfnFlush */
6178 vmdkFlush,
6179 /* pfnGetVersion */
6180 vmdkGetVersion,
6181 /* pfnGetSize */
6182 vmdkGetSize,
6183 /* pfnGetFileSize */
6184 vmdkGetFileSize,
6185 /* pfnGetPCHSGeometry */
6186 vmdkGetPCHSGeometry,
6187 /* pfnSetPCHSGeometry */
6188 vmdkSetPCHSGeometry,
6189 /* pfnGetLCHSGeometry */
6190 vmdkGetLCHSGeometry,
6191 /* pfnSetLCHSGeometry */
6192 vmdkSetLCHSGeometry,
6193 /* pfnGetImageFlags */
6194 vmdkGetImageFlags,
6195 /* pfnGetOpenFlags */
6196 vmdkGetOpenFlags,
6197 /* pfnSetOpenFlags */
6198 vmdkSetOpenFlags,
6199 /* pfnGetComment */
6200 vmdkGetComment,
6201 /* pfnSetComment */
6202 vmdkSetComment,
6203 /* pfnGetUuid */
6204 vmdkGetUuid,
6205 /* pfnSetUuid */
6206 vmdkSetUuid,
6207 /* pfnGetModificationUuid */
6208 vmdkGetModificationUuid,
6209 /* pfnSetModificationUuid */
6210 vmdkSetModificationUuid,
6211 /* pfnGetParentUuid */
6212 vmdkGetParentUuid,
6213 /* pfnSetParentUuid */
6214 vmdkSetParentUuid,
6215 /* pfnGetParentModificationUuid */
6216 vmdkGetParentModificationUuid,
6217 /* pfnSetParentModificationUuid */
6218 vmdkSetParentModificationUuid,
6219 /* pfnDump */
6220 vmdkDump,
6221 /* pfnGetTimeStamp */
6222 vmdkGetTimeStamp,
6223 /* pfnGetParentTimeStamp */
6224 vmdkGetParentTimeStamp,
6225 /* pfnSetParentTimeStamp */
6226 vmdkSetParentTimeStamp,
6227 /* pfnGetParentFilename */
6228 vmdkGetParentFilename,
6229 /* pfnSetParentFilename */
6230 vmdkSetParentFilename,
6231 /* pfnIsAsyncIOSupported */
6232 vmdkIsAsyncIOSupported,
6233 /* pfnAsyncRead */
6234 vmdkAsyncRead,
6235 /* pfnAsyncWrite */
6236 vmdkAsyncWrite,
6237 /* pfnComposeLocation */
6238 genericFileComposeLocation,
6239 /* pfnComposeName */
6240 genericFileComposeName
6241};
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette