VirtualBox

source: vbox/trunk/src/VBox/ValidationKit/tests/storage/storagecfg.py@ 102100

最後變更 在這個檔案從102100是 98103,由 vboxsync 提交於 2 年 前

Copyright year updates by scm.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 22.8 KB
 
1# -*- coding: utf-8 -*-
2# $Id: storagecfg.py 98103 2023-01-17 14:15:46Z vboxsync $
3
4"""
5VirtualBox Validation Kit - Storage test configuration API.
6"""
7
8__copyright__ = \
9"""
10Copyright (C) 2016-2023 Oracle and/or its affiliates.
11
12This file is part of VirtualBox base platform packages, as
13available from https://www.alldomusa.eu.org.
14
15This program is free software; you can redistribute it and/or
16modify it under the terms of the GNU General Public License
17as published by the Free Software Foundation, in version 3 of the
18License.
19
20This program is distributed in the hope that it will be useful, but
21WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23General Public License for more details.
24
25You should have received a copy of the GNU General Public License
26along with this program; if not, see <https://www.gnu.org/licenses>.
27
28The contents of this file may alternatively be used under the terms
29of the Common Development and Distribution License Version 1.0
30(CDDL), a copy of it is provided in the "COPYING.CDDL" file included
31in the VirtualBox distribution, in which case the provisions of the
32CDDL are applicable instead of those of the GPL.
33
34You may elect to license modified versions of this file under the
35terms and conditions of either the GPL or the CDDL or both.
36
37SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
38"""
39__version__ = "$Revision: 98103 $"
40
41# Standard Python imports.
42import os;
43import re;
44
45
46class StorageDisk(object):
47 """
48 Class representing a disk for testing.
49 """
50
51 def __init__(self, sPath, fRamDisk = False):
52 self.sPath = sPath;
53 self.fUsed = False;
54 self.fRamDisk = fRamDisk;
55
56 def getPath(self):
57 """
58 Return the disk path.
59 """
60 return self.sPath;
61
62 def isUsed(self):
63 """
64 Returns whether the disk is currently in use.
65 """
66 return self.fUsed;
67
68 def isRamDisk(self):
69 """
70 Returns whether the disk objecthas a RAM backing.
71 """
72 return self.fRamDisk;
73
74 def setUsed(self, fUsed):
75 """
76 Sets the used flag for the disk.
77 """
78 if fUsed:
79 if self.fUsed:
80 return False;
81
82 self.fUsed = True;
83 else:
84 self.fUsed = fUsed;
85
86 return True;
87
88class StorageConfigOs(object):
89 """
90 Base class for a single hosts OS storage configuration.
91 """
92
93 def _getDisksMatchingRegExpWithPath(self, sPath, sRegExp):
94 """
95 Adds new disks to the config matching the given regular expression.
96 """
97
98 lstDisks = [];
99 oRegExp = re.compile(sRegExp);
100 asFiles = os.listdir(sPath);
101 for sFile in asFiles:
102 if oRegExp.match(os.path.basename(sFile)) and os.path.exists(sPath + '/' + sFile):
103 lstDisks.append(StorageDisk(sPath + '/' + sFile));
104
105 return lstDisks;
106
107class StorageConfigOsSolaris(StorageConfigOs):
108 """
109 Class implementing the Solaris specifics for a storage configuration.
110 """
111
112 def __init__(self):
113 StorageConfigOs.__init__(self);
114 self.idxRamDisk = 0;
115
116 def _getActivePoolsStartingWith(self, oExec, sPoolIdStart):
117 """
118 Returns a list of pools starting with the given ID or None on failure.
119 """
120 lstPools = None;
121 fRc, sOutput, _ = oExec.execBinary('zpool', ('list', '-H'));
122 if fRc:
123 lstPools = [];
124 asPools = sOutput.splitlines();
125 for sPool in asPools:
126 if sPool.startswith(sPoolIdStart):
127 # Extract the whole name and add it to the list.
128 asItems = sPool.split('\t');
129 lstPools.append(asItems[0]);
130 return lstPools;
131
132 def _getActiveVolumesInPoolStartingWith(self, oExec, sPool, sVolumeIdStart):
133 """
134 Returns a list of active volumes for the given pool starting with the given
135 identifier or None on failure.
136 """
137 lstVolumes = None;
138 fRc, sOutput, _ = oExec.execBinary('zfs', ('list', '-H'));
139 if fRc:
140 lstVolumes = [];
141 asVolumes = sOutput.splitlines();
142 for sVolume in asVolumes:
143 if sVolume.startswith(sPool + '/' + sVolumeIdStart):
144 # Extract the whole name and add it to the list.
145 asItems = sVolume.split('\t');
146 lstVolumes.append(asItems[0]);
147 return lstVolumes;
148
149 def getDisksMatchingRegExp(self, sRegExp):
150 """
151 Returns a list of disks matching the regular expression.
152 """
153 return self._getDisksMatchingRegExpWithPath('/dev/dsk', sRegExp);
154
155 def getMntBase(self):
156 """
157 Returns the mountpoint base for the host.
158 """
159 return '/pools';
160
161 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
162 """
163 Creates a new storage pool with the given disks and the given RAID level.
164 """
165 sZPoolRaid = None;
166 if len(asDisks) > 1 and (sRaidLvl == 'raid5' or sRaidLvl is None):
167 sZPoolRaid = 'raidz';
168
169 fRc = True;
170 if sZPoolRaid is not None:
171 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool, sZPoolRaid,) + tuple(asDisks));
172 else:
173 fRc = oExec.execBinaryNoStdOut('zpool', ('create', '-f', sPool,) + tuple(asDisks));
174
175 return fRc;
176
177 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
178 """
179 Creates and mounts a filesystem at the given mountpoint using the
180 given pool and volume IDs.
181 """
182 fRc = True;
183 if cbVol is not None:
184 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, '-V', cbVol, sPool + '/' + sVol));
185 else:
186 fRc = oExec.execBinaryNoStdOut('zfs', ('create', '-o', 'mountpoint='+sMountPoint, sPool + '/' + sVol));
187
188 # @todo Add proper parameters to set proper owner:group ownership, the testcase broke in r133060 for Solaris
189 # because ceating directories is now done using the python mkdir API instead of calling 'sudo mkdir...'.
190 # No one noticed though because testboxstor1 went out of action before...
191 # Will get fixed as soon as I'm back home.
192 if fRc:
193 fRc = oExec.execBinaryNoStdOut('chmod', ('777', sMountPoint));
194
195 return fRc;
196
197 def destroyVolume(self, oExec, sPool, sVol):
198 """
199 Destroys the given volume.
200 """
201 fRc = oExec.execBinaryNoStdOut('zfs', ('destroy', sPool + '/' + sVol));
202 return fRc;
203
204 def destroyPool(self, oExec, sPool):
205 """
206 Destroys the given storage pool.
207 """
208 fRc = oExec.execBinaryNoStdOut('zpool', ('destroy', sPool));
209 return fRc;
210
211 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
212 """
213 Cleans up any pools and volumes starting with the name in the given
214 parameters.
215 """
216 fRc = True;
217 lstPools = self._getActivePoolsStartingWith(oExec, sPoolIdStart);
218 if lstPools is not None:
219 for sPool in lstPools:
220 lstVolumes = self._getActiveVolumesInPoolStartingWith(oExec, sPool, sVolIdStart);
221 if lstVolumes is not None:
222 # Destroy all the volumes first
223 for sVolume in lstVolumes:
224 fRc2 = oExec.execBinaryNoStdOut('zfs', ('destroy', sVolume));
225 if not fRc2:
226 fRc = fRc2;
227
228 # Destroy the pool
229 fRc2 = self.destroyPool(oExec, sPool);
230 if not fRc2:
231 fRc = fRc2;
232 else:
233 fRc = False;
234 else:
235 fRc = False;
236
237 return fRc;
238
239 def createRamDisk(self, oExec, cbRamDisk):
240 """
241 Creates a RAM backed disk with the given size.
242 """
243 oDisk = None;
244 sRamDiskName = 'ramdisk%u' % (self.idxRamDisk,);
245 fRc, _ , _ = oExec.execBinary('ramdiskadm', ('-a', sRamDiskName, str(cbRamDisk)));
246 if fRc:
247 self.idxRamDisk += 1;
248 oDisk = StorageDisk('/dev/ramdisk/%s' % (sRamDiskName, ), True);
249
250 return oDisk;
251
252 def destroyRamDisk(self, oExec, oDisk):
253 """
254 Destroys the given ramdisk object.
255 """
256 sRamDiskName = os.path.basename(oDisk.getPath());
257 return oExec.execBinaryNoStdOut('ramdiskadm', ('-d', sRamDiskName));
258
259class StorageConfigOsLinux(StorageConfigOs):
260 """
261 Class implementing the Linux specifics for a storage configuration.
262 """
263
264 def __init__(self):
265 StorageConfigOs.__init__(self);
266 self.dSimplePools = { }; # Simple storage pools which don't use lvm (just one partition)
267 self.dMounts = { }; # Pool/Volume to mountpoint mapping.
268
269 def _getDmRaidLevelFromLvl(self, sRaidLvl):
270 """
271 Converts our raid level indicators to something mdadm can understand.
272 """
273 if sRaidLvl is None or sRaidLvl == 'raid0':
274 return 'stripe';
275 if sRaidLvl == 'raid5':
276 return '5';
277 if sRaidLvl == 'raid1':
278 return 'mirror';
279 return 'stripe';
280
281 def getDisksMatchingRegExp(self, sRegExp):
282 """
283 Returns a list of disks matching the regular expression.
284 """
285 return self._getDisksMatchingRegExpWithPath('/dev/', sRegExp);
286
287 def getMntBase(self):
288 """
289 Returns the mountpoint base for the host.
290 """
291 return '/mnt';
292
293 def createStoragePool(self, oExec, sPool, asDisks, sRaidLvl):
294 """
295 Creates a new storage pool with the given disks and the given RAID level.
296 """
297 fRc = True;
298 if len(asDisks) == 1 and sRaidLvl is None:
299 # Doesn't require LVM, put into the simple pools dictionary so we can
300 # use it when creating a volume later.
301 self.dSimplePools[sPool] = asDisks[0];
302 else:
303 # If a RAID is required use dm-raid first to create one.
304 asLvmPvDisks = asDisks;
305 fRc = oExec.execBinaryNoStdOut('mdadm', ('--create', '/dev/md0', '--assume-clean',
306 '--level=' + self._getDmRaidLevelFromLvl(sRaidLvl),
307 '--raid-devices=' + str(len(asDisks))) + tuple(asDisks));
308 if fRc:
309 # /dev/md0 is the only block device to use for our volume group.
310 asLvmPvDisks = [ '/dev/md0' ];
311
312 # Create a physical volume on every disk first.
313 for sLvmPvDisk in asLvmPvDisks:
314 fRc = oExec.execBinaryNoStdOut('pvcreate', (sLvmPvDisk, ));
315 if not fRc:
316 break;
317
318 if fRc:
319 # Create volume group with all physical volumes included
320 fRc = oExec.execBinaryNoStdOut('vgcreate', (sPool, ) + tuple(asLvmPvDisks));
321 return fRc;
322
323 def createVolume(self, oExec, sPool, sVol, sMountPoint, cbVol = None):
324 """
325 Creates and mounts a filesystem at the given mountpoint using the
326 given pool and volume IDs.
327 """
328 fRc = True;
329 sBlkDev = None;
330 if sPool in self.dSimplePools:
331 sDiskPath = self.dSimplePools.get(sPool);
332 if sDiskPath.find('zram') != -1:
333 sBlkDev = sDiskPath;
334 else:
335 # Create a partition with the requested size
336 sFdiskScript = ';\n'; # Single partition filling everything
337 if cbVol is not None:
338 sFdiskScript = ',' + str(cbVol // 512) + '\n'; # Get number of sectors
339 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', sDiskPath), \
340 sFdiskScript);
341 if fRc:
342 if sDiskPath.find('nvme') != -1:
343 sBlkDev = sDiskPath + 'p1';
344 else:
345 sBlkDev = sDiskPath + '1';
346 else:
347 if cbVol is None:
348 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-l', '100%FREE', '-n', sVol, sPool));
349 else:
350 fRc = oExec.execBinaryNoStdOut('lvcreate', ('-L', str(cbVol), '-n', sVol, sPool));
351 if fRc:
352 sBlkDev = '/dev/mapper' + sPool + '-' + sVol;
353
354 if fRc is True and sBlkDev is not None:
355 # Create a filesystem and mount it
356 fRc = oExec.execBinaryNoStdOut('mkfs.ext4', ('-F', '-F', sBlkDev,));
357 fRc = fRc and oExec.mkDir(sMountPoint);
358 fRc = fRc and oExec.execBinaryNoStdOut('mount', (sBlkDev, sMountPoint));
359 if fRc:
360 self.dMounts[sPool + '/' + sVol] = sMountPoint;
361 return fRc;
362
363 def destroyVolume(self, oExec, sPool, sVol):
364 """
365 Destroys the given volume.
366 """
367 # Unmount first
368 sMountPoint = self.dMounts[sPool + '/' + sVol];
369 fRc = oExec.execBinaryNoStdOut('umount', (sMountPoint,));
370 self.dMounts.pop(sPool + '/' + sVol);
371 oExec.rmDir(sMountPoint);
372 if sPool in self.dSimplePools:
373 # Wipe partition table
374 sDiskPath = self.dSimplePools.get(sPool);
375 if sDiskPath.find('zram') == -1:
376 fRc = oExec.execBinaryNoStdOut('sfdisk', ('--no-reread', '--wipe', 'always', '-q', '-f', '--delete', \
377 sDiskPath));
378 else:
379 fRc = oExec.execBinaryNoStdOut('lvremove', (sPool + '/' + sVol,));
380 return fRc;
381
382 def destroyPool(self, oExec, sPool):
383 """
384 Destroys the given storage pool.
385 """
386 fRc = True;
387 if sPool in self.dSimplePools:
388 self.dSimplePools.pop(sPool);
389 else:
390 fRc = oExec.execBinaryNoStdOut('vgremove', (sPool,));
391 return fRc;
392
393 def cleanupPoolsAndVolumes(self, oExec, sPoolIdStart, sVolIdStart):
394 """
395 Cleans up any pools and volumes starting with the name in the given
396 parameters.
397 """
398 # @todo: Needs implementation, for LVM based configs a similar approach can be used
399 # as for Solaris.
400 _ = oExec;
401 _ = sPoolIdStart;
402 _ = sVolIdStart;
403 return True;
404
405 def createRamDisk(self, oExec, cbRamDisk):
406 """
407 Creates a RAM backed disk with the given size.
408 """
409 # Make sure the ZRAM module is loaded.
410 oDisk = None;
411 fRc = oExec.execBinaryNoStdOut('modprobe', ('zram',));
412 if fRc:
413 fRc, sOut, _ = oExec.execBinary('zramctl', ('--raw', '-f', '-s', str(cbRamDisk)));
414 if fRc:
415 oDisk = StorageDisk(sOut.rstrip(), True);
416
417 return oDisk;
418
419 def destroyRamDisk(self, oExec, oDisk):
420 """
421 Destroys the given ramdisk object.
422 """
423 return oExec.execBinaryNoStdOut('zramctl', ('-r', oDisk.getPath()));
424
425## @name Host disk config types.
426## @{
427g_ksDiskCfgStatic = 'StaticDir';
428g_ksDiskCfgRegExp = 'RegExp';
429g_ksDiskCfgList = 'DiskList';
430## @}
431
432class DiskCfg(object):
433 """
434 Host disk configuration.
435 """
436
437 def __init__(self, sTargetOs, sCfgType, oDisks):
438 self.sTargetOs = sTargetOs;
439 self.sCfgType = sCfgType;
440 self.oDisks = oDisks;
441
442 def getTargetOs(self):
443 return self.sTargetOs;
444
445 def getCfgType(self):
446 return self.sCfgType;
447
448 def isCfgStaticDir(self):
449 return self.sCfgType == g_ksDiskCfgStatic;
450
451 def isCfgRegExp(self):
452 return self.sCfgType == g_ksDiskCfgRegExp;
453
454 def isCfgList(self):
455 return self.sCfgType == g_ksDiskCfgList;
456
457 def getDisks(self):
458 return self.oDisks;
459
460class StorageCfg(object):
461 """
462 Storage configuration helper class taking care of the different host OS.
463 """
464
465 def __init__(self, oExec, oDiskCfg):
466 self.oExec = oExec;
467 self.lstDisks = [ ]; # List of disks present in the system.
468 self.dPools = { }; # Dictionary of storage pools.
469 self.dVols = { }; # Dictionary of volumes.
470 self.iPoolId = 0;
471 self.iVolId = 0;
472 self.oDiskCfg = oDiskCfg;
473
474 fRc = True;
475 oStorOs = None;
476 if oDiskCfg.getTargetOs() == 'solaris':
477 oStorOs = StorageConfigOsSolaris();
478 elif oDiskCfg.getTargetOs() == 'linux':
479 oStorOs = StorageConfigOsLinux(); # pylint: disable=redefined-variable-type
480 elif not oDiskCfg.isCfgStaticDir():
481 # For unknown hosts only allow a static testing directory we don't care about setting up
482 fRc = False;
483
484 if fRc:
485 self.oStorOs = oStorOs;
486 if oDiskCfg.isCfgRegExp():
487 self.lstDisks = oStorOs.getDisksMatchingRegExp(oDiskCfg.getDisks());
488 elif oDiskCfg.isCfgList():
489 # Assume a list of of disks and add.
490 for sDisk in oDiskCfg.getDisks():
491 self.lstDisks.append(StorageDisk(sDisk));
492 elif oDiskCfg.isCfgStaticDir():
493 if not os.path.exists(oDiskCfg.getDisks()):
494 self.oExec.mkDir(oDiskCfg.getDisks(), 0o700);
495
496 def __del__(self):
497 self.cleanup();
498 self.oDiskCfg = None;
499
500 def cleanup(self):
501 """
502 Cleans up any created storage configs.
503 """
504
505 if not self.oDiskCfg.isCfgStaticDir():
506 # Destroy all volumes first.
507 for sMountPoint in list(self.dVols.keys()): # pylint: disable=consider-iterating-dictionary
508 self.destroyVolume(sMountPoint);
509
510 # Destroy all pools.
511 for sPool in list(self.dPools.keys()): # pylint: disable=consider-iterating-dictionary
512 self.destroyStoragePool(sPool);
513
514 self.dVols.clear();
515 self.dPools.clear();
516 self.iPoolId = 0;
517 self.iVolId = 0;
518
519 def getRawDisk(self):
520 """
521 Returns a raw disk device from the list of free devices for use.
522 """
523
524 for oDisk in self.lstDisks:
525 if oDisk.isUsed() is False:
526 oDisk.setUsed(True);
527 return oDisk.getPath();
528
529 return None;
530
531 def getUnusedDiskCount(self):
532 """
533 Returns the number of unused disks.
534 """
535
536 cDisksUnused = 0;
537 for oDisk in self.lstDisks:
538 if not oDisk.isUsed():
539 cDisksUnused += 1;
540
541 return cDisksUnused;
542
543 def createStoragePool(self, cDisks = 0, sRaidLvl = None,
544 cbPool = None, fRamDisk = False):
545 """
546 Create a new storage pool
547 """
548 lstDisks = [ ];
549 fRc = True;
550 sPool = None;
551
552 if not self.oDiskCfg.isCfgStaticDir():
553 if fRamDisk:
554 oDisk = self.oStorOs.createRamDisk(self.oExec, cbPool);
555 if oDisk is not None:
556 lstDisks.append(oDisk);
557 cDisks = 1;
558 else:
559 if cDisks == 0:
560 cDisks = self.getUnusedDiskCount();
561
562 for oDisk in self.lstDisks:
563 if not oDisk.isUsed():
564 oDisk.setUsed(True);
565 lstDisks.append(oDisk);
566 if len(lstDisks) == cDisks:
567 break;
568
569 # Enough drives to satisfy the request?
570 if len(lstDisks) == cDisks:
571 # Create a list of all device paths
572 lstDiskPaths = [ ];
573 for oDisk in lstDisks:
574 lstDiskPaths.append(oDisk.getPath());
575
576 # Find a name for the pool
577 sPool = 'pool' + str(self.iPoolId);
578 self.iPoolId += 1;
579
580 fRc = self.oStorOs.createStoragePool(self.oExec, sPool, lstDiskPaths, sRaidLvl);
581 if fRc:
582 self.dPools[sPool] = lstDisks;
583 else:
584 self.iPoolId -= 1;
585 else:
586 fRc = False;
587
588 # Cleanup in case of error.
589 if not fRc:
590 for oDisk in lstDisks:
591 oDisk.setUsed(False);
592 if oDisk.isRamDisk():
593 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
594 else:
595 sPool = 'StaticDummy';
596
597 return fRc, sPool;
598
599 def destroyStoragePool(self, sPool):
600 """
601 Destroys the storage pool with the given ID.
602 """
603
604 fRc = True;
605
606 if not self.oDiskCfg.isCfgStaticDir():
607 lstDisks = self.dPools.get(sPool);
608 if lstDisks is not None:
609 fRc = self.oStorOs.destroyPool(self.oExec, sPool);
610 if fRc:
611 # Mark disks as unused
612 self.dPools.pop(sPool);
613 for oDisk in lstDisks:
614 oDisk.setUsed(False);
615 if oDisk.isRamDisk():
616 self.oStorOs.destroyRamDisk(self.oExec, oDisk);
617 else:
618 fRc = False;
619
620 return fRc;
621
622 def createVolume(self, sPool, cbVol = None):
623 """
624 Creates a new volume from the given pool returning the mountpoint.
625 """
626
627 fRc = True;
628 sMountPoint = None;
629 if not self.oDiskCfg.isCfgStaticDir():
630 if sPool in self.dPools:
631 sVol = 'vol' + str(self.iVolId);
632 sMountPoint = self.oStorOs.getMntBase() + '/' + sVol;
633 self.iVolId += 1;
634 fRc = self.oStorOs.createVolume(self.oExec, sPool, sVol, sMountPoint, cbVol);
635 if fRc:
636 self.dVols[sMountPoint] = (sVol, sPool);
637 else:
638 self.iVolId -= 1;
639 else:
640 fRc = False;
641 else:
642 sMountPoint = self.oDiskCfg.getDisks();
643
644 return fRc, sMountPoint;
645
646 def destroyVolume(self, sMountPoint):
647 """
648 Destroy the volume at the given mount point.
649 """
650
651 fRc = True;
652 if not self.oDiskCfg.isCfgStaticDir():
653 sVol, sPool = self.dVols.get(sMountPoint);
654 if sVol is not None:
655 fRc = self.oStorOs.destroyVolume(self.oExec, sPool, sVol);
656 if fRc:
657 self.dVols.pop(sMountPoint);
658 else:
659 fRc = False;
660
661 return fRc;
662
663 def mkDirOnVolume(self, sMountPoint, sDir, fMode = 0o700):
664 """
665 Creates a new directory on the volume pointed to by the given mount point.
666 """
667 return self.oExec.mkDir(sMountPoint + '/' + sDir, fMode);
668
669 def cleanupLeftovers(self):
670 """
671 Tries to cleanup any leftover pools and volumes from a failed previous run.
672 """
673 if not self.oDiskCfg.isCfgStaticDir():
674 return self.oStorOs.cleanupPoolsAndVolumes(self.oExec, 'pool', 'vol');
675
676 fRc = True;
677 if os.path.exists(self.oDiskCfg.getDisks()):
678 for sEntry in os.listdir(self.oDiskCfg.getDisks()):
679 fRc = fRc and self.oExec.rmTree(os.path.join(self.oDiskCfg.getDisks(), sEntry));
680
681 return fRc;
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette