VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 30153

最後變更 在這個檔案從30153是 30153,由 vboxsync 提交於 15 年 前

Linux shared folders: spaces

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 19.9 KB
 
1/** @file
2 *
3 * vboxsf -- VirtualBox Guest Additions for Linux:
4 * Regular file inode and file operations
5 */
6
7/*
8 * Copyright (C) 2006-2007 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.alldomusa.eu.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19/*
20 * Limitations: only COW memory mapping is supported
21 */
22
23#include "vfsmod.h"
24
25
26static void *alloc_bounch_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t xfer_size, const char *caller)
27{
28 size_t tmp_size;
29 void *tmp;
30
31 /* try for big first. */
32 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
33 if (tmp_size > 16U*_1K)
34 tmp_size = 16U*_1K;
35 tmp = kmalloc(tmp_size, GFP_KERNEL);
36 if (!tmp) {
37
38 /* fall back on a page sized buffer. */
39 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
40 if (!tmp) {
41 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
42 return NULL;
43 }
44 tmp_size = PAGE_SIZE;
45 }
46
47 *tmp_sizep = tmp_size;
48 *physp = virt_to_phys(tmp);
49 return tmp;
50}
51
52static void free_bounch_buffer(void *tmp)
53{
54 kfree (tmp);
55}
56
57
58/* fops */
59static int
60sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
61 struct sf_reg_info *sf_r, void *buf, uint32_t *nread,
62 uint64_t pos)
63{
64 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
65 * contiguous in physical memory (kmalloc or single page), we should
66 * use a physical address here to speed things up. */
67 int rc = vboxCallRead(&client_handle, &sf_g->map, sf_r->handle,
68 pos, nread, buf, false /* already locked? */);
69 if (RT_FAILURE(rc)) {
70 LogFunc(("vboxCallRead failed. caller=%s, rc=%Rrc\n",
71 caller, rc));
72 return -EPROTO;
73 }
74 return 0;
75}
76
77static int
78sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
79 struct sf_reg_info *sf_r, void *buf, uint32_t *nwritten,
80 uint64_t pos)
81{
82 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
83 * contiguous in physical memory (kmalloc or single page), we should
84 * use a physical address here to speed things up. */
85 int rc = vboxCallWrite(&client_handle, &sf_g->map, sf_r->handle,
86 pos, nwritten, buf, false /* already locked? */);
87 if (RT_FAILURE(rc)) {
88 LogFunc(("vboxCallWrite failed. caller=%s, rc=%Rrc\n",
89 caller, rc));
90 return -EPROTO;
91 }
92 return 0;
93}
94
95static ssize_t
96sf_reg_read(struct file *file, char *buf, size_t size, loff_t *off)
97{
98 int err;
99 void *tmp;
100 RTCCPHYS tmp_phys;
101 size_t tmp_size;
102 size_t left = size;
103 ssize_t total_bytes_read = 0;
104 struct inode *inode = file->f_dentry->d_inode;
105 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
106 struct sf_reg_info *sf_r = file->private_data;
107 loff_t pos = *off;
108
109 TRACE();
110 if (!S_ISREG(inode->i_mode)) {
111 LogFunc(("read from non regular file %d\n", inode->i_mode));
112 return -EINVAL;
113 }
114
115 /** XXX Check read permission accoring to inode->i_mode! */
116
117 if (!size) {
118 return 0;
119 }
120
121 tmp = alloc_bounch_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
122 if (!tmp)
123 return -ENOMEM;
124
125 while (left) {
126 uint32_t to_read, nread;
127
128 to_read = tmp_size;
129 if (to_read > left) {
130 to_read = (uint32_t) left;
131 }
132 nread = to_read;
133
134 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
135 if (err)
136 goto fail;
137
138 if (copy_to_user(buf, tmp, nread)) {
139 err = -EFAULT;
140 goto fail;
141 }
142
143 pos += nread;
144 left -= nread;
145 buf += nread;
146 total_bytes_read += nread;
147 if (nread != to_read) {
148 break;
149 }
150 }
151
152 *off += total_bytes_read;
153 free_bounch_buffer(tmp);
154 return total_bytes_read;
155
156 fail:
157 free_bounch_buffer(tmp);
158 return err;
159}
160
161static ssize_t
162sf_reg_write(struct file *file, const char *buf, size_t size, loff_t *off)
163{
164 int err;
165 void *tmp;
166 RTCCPHYS tmp_phys;
167 size_t tmp_size;
168 size_t left = size;
169 ssize_t total_bytes_written = 0;
170 struct inode *inode = file->f_dentry->d_inode;
171 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
172 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
173 struct sf_reg_info *sf_r = file->private_data;
174 loff_t pos;
175
176 TRACE();
177 BUG_ON(!sf_i);
178 BUG_ON(!sf_g);
179 BUG_ON(!sf_r);
180
181 if (!S_ISREG(inode->i_mode)) {
182 LogFunc(("write to non regular file %d\n", inode->i_mode));
183 return -EINVAL;
184 }
185
186 pos = *off;
187 if (file->f_flags & O_APPEND)
188 {
189 pos = inode->i_size;
190 *off = pos;
191 }
192
193 /** XXX Check write permission accoring to inode->i_mode! */
194
195 if (!size)
196 return 0;
197
198 tmp = alloc_bounch_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
199 if (!tmp)
200 return -ENOMEM;
201
202 while (left) {
203 uint32_t to_write, nwritten;
204
205 to_write = tmp_size;
206 if (to_write > left) {
207 to_write = (uint32_t) left;
208 }
209 nwritten = to_write;
210
211 if (copy_from_user(tmp, buf, to_write)) {
212 err = -EFAULT;
213 goto fail;
214 }
215
216#if 1
217 if (VbglR0CanUsePhysPageList()) {
218 err = VbglR0SfWritePhysCont(&client_handle, &sf_g->map, sf_r->handle,
219 pos, &nwritten, tmp_phys);
220 err = RT_FAILURE(err) ? -EPROTO : 0;
221 } else
222#endif
223 err = sf_reg_write_aux(__func__, sf_g, sf_r, tmp, &nwritten, pos);
224 if (err)
225 goto fail;
226
227 pos += nwritten;
228 left -= nwritten;
229 buf += nwritten;
230 total_bytes_written += nwritten;
231 if (nwritten != to_write)
232 break;
233 }
234
235 *off += total_bytes_written;
236 if (*off > inode->i_size)
237 inode->i_size = *off;
238
239 sf_i->force_restat = 1;
240 free_bounch_buffer(tmp);
241 return total_bytes_written;
242
243 fail:
244 free_bounch_buffer(tmp);
245 return err;
246}
247
248static int
249sf_reg_open(struct inode *inode, struct file *file)
250{
251 int rc, rc_linux = 0;
252 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
253 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
254 struct sf_reg_info *sf_r;
255 SHFLCREATEPARMS params;
256
257 TRACE();
258 BUG_ON(!sf_g);
259 BUG_ON(!sf_i);
260
261 LogFunc(("open %s\n", sf_i->path->String.utf8));
262
263 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
264 if (!sf_r) {
265 LogRelFunc(("could not allocate reg info\n"));
266 return -ENOMEM;
267 }
268
269 /* Already open? */
270 if (sf_i->handle != SHFL_HANDLE_NIL)
271 {
272 /*
273 * This inode was created with sf_create_aux(). Check the CreateFlags:
274 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
275 * about the access flags (SHFL_CF_ACCESS_*).
276 */
277 sf_i->force_restat = 1;
278 sf_r->handle = sf_i->handle;
279 sf_i->handle = SHFL_HANDLE_NIL;
280 sf_i->file = file;
281 file->private_data = sf_r;
282 return 0;
283 }
284
285 RT_ZERO(params);
286 params.Handle = SHFL_HANDLE_NIL;
287 /* We check the value of params.Handle afterwards to find out if
288 * the call succeeded or failed, as the API does not seem to cleanly
289 * distinguish error and informational messages.
290 *
291 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
292 * make the shared folders host service use our fMode parameter */
293
294 if (file->f_flags & O_CREAT) {
295 LogFunc(("O_CREAT set\n"));
296 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
297 /* We ignore O_EXCL, as the Linux kernel seems to call create
298 beforehand itself, so O_EXCL should always fail. */
299 if (file->f_flags & O_TRUNC) {
300 LogFunc(("O_TRUNC set\n"));
301 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
302 | SHFL_CF_ACCESS_WRITE);
303 }
304 else {
305 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
306 }
307 }
308 else {
309 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
310 if (file->f_flags & O_TRUNC) {
311 LogFunc(("O_TRUNC set\n"));
312 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
313 | SHFL_CF_ACCESS_WRITE);
314 }
315 }
316
317 if (!(params.CreateFlags & SHFL_CF_ACCESS_READWRITE)) {
318 switch (file->f_flags & O_ACCMODE) {
319 case O_RDONLY:
320 params.CreateFlags |= SHFL_CF_ACCESS_READ;
321 break;
322
323 case O_WRONLY:
324 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
325 break;
326
327 case O_RDWR:
328 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
329 break;
330
331 default:
332 BUG ();
333 }
334 }
335
336 if (file->f_flags & O_APPEND) {
337 LogFunc(("O_APPEND set\n"));
338 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
339 }
340
341 params.Info.Attr.fMode = inode->i_mode;
342 LogFunc(("sf_reg_open: calling vboxCallCreate, file %s, flags=%#x, %#x\n",
343 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
344 rc = vboxCallCreate(&client_handle, &sf_g->map, sf_i->path, &params);
345
346 if (RT_FAILURE(rc)) {
347 LogFunc(("vboxCallCreate failed flags=%d,%#x rc=%Rrc\n",
348 file->f_flags, params.CreateFlags, rc));
349 kfree(sf_r);
350 return -RTErrConvertToErrno(rc);
351 }
352
353 if (SHFL_HANDLE_NIL == params.Handle) {
354 switch (params.Result) {
355 case SHFL_PATH_NOT_FOUND:
356 case SHFL_FILE_NOT_FOUND:
357 rc_linux = -ENOENT;
358 break;
359 case SHFL_FILE_EXISTS:
360 rc_linux = -EEXIST;
361 break;
362 default:
363 break;
364 }
365 }
366
367 sf_i->force_restat = 1;
368 sf_r->handle = params.Handle;
369 sf_i->file = file;
370 file->private_data = sf_r;
371 return rc_linux;
372}
373
374static int
375sf_reg_release(struct inode *inode, struct file *file)
376{
377 int rc;
378 struct sf_reg_info *sf_r;
379 struct sf_glob_info *sf_g;
380 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
381
382 TRACE();
383 sf_g = GET_GLOB_INFO(inode->i_sb);
384 sf_r = file->private_data;
385
386 BUG_ON(!sf_g);
387 BUG_ON(!sf_r);
388
389 rc = vboxCallClose(&client_handle, &sf_g->map, sf_r->handle);
390 if (RT_FAILURE(rc)) {
391 LogFunc(("vboxCallClose failed rc=%Rrc\n", rc));
392 }
393
394 kfree(sf_r);
395 sf_i->file = NULL;
396 sf_i->handle = SHFL_HANDLE_NIL;
397 file->private_data = NULL;
398 return 0;
399}
400
401#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
402static int
403sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
404#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
405static struct page *
406sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type)
407# define SET_TYPE(t) *type = (t)
408#else /* LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0) */
409static struct page *
410sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused)
411# define SET_TYPE(t)
412#endif
413{
414 struct page *page;
415 char *buf;
416 loff_t off;
417 uint32_t nread = PAGE_SIZE;
418 int err;
419 struct file *file = vma->vm_file;
420 struct inode *inode = file->f_dentry->d_inode;
421 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
422 struct sf_reg_info *sf_r = file->private_data;
423
424 TRACE();
425#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
426 if (vmf->pgoff > vma->vm_end)
427 return VM_FAULT_SIGBUS;
428#else
429 if (vaddr > vma->vm_end) {
430 SET_TYPE(VM_FAULT_SIGBUS);
431 return NOPAGE_SIGBUS;
432 }
433#endif
434
435 page = alloc_page(GFP_HIGHUSER);
436 if (!page) {
437 LogRelFunc(("failed to allocate page\n"));
438#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
439 return VM_FAULT_OOM;
440#else
441 SET_TYPE(VM_FAULT_OOM);
442 return NOPAGE_OOM;
443#endif
444 }
445
446 buf = kmap(page);
447#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
448 off = (vmf->pgoff << PAGE_SHIFT);
449#else
450 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
451#endif
452 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
453 if (err) {
454 kunmap(page);
455 put_page(page);
456#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
457 return VM_FAULT_SIGBUS;
458#else
459 SET_TYPE(VM_FAULT_SIGBUS);
460 return NOPAGE_SIGBUS;
461#endif
462 }
463
464 BUG_ON (nread > PAGE_SIZE);
465 if (!nread) {
466#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
467 clear_user_page(page_address(page), vmf->pgoff, page);
468#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
469 clear_user_page(page_address(page), vaddr, page);
470#else
471 clear_user_page(page_address(page), vaddr);
472#endif
473 }
474 else {
475 memset(buf + nread, 0, PAGE_SIZE - nread);
476 }
477
478 flush_dcache_page(page);
479 kunmap(page);
480#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
481 vmf->page = page;
482 return 0;
483#else
484 SET_TYPE(VM_FAULT_MAJOR);
485 return page;
486#endif
487}
488
489static struct vm_operations_struct sf_vma_ops = {
490#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
491 .fault = sf_reg_fault
492#else
493 .nopage = sf_reg_nopage
494#endif
495};
496
497static int
498sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
499{
500 TRACE();
501 if (vma->vm_flags & VM_SHARED) {
502 LogFunc(("shared mmapping not available\n"));
503 return -EINVAL;
504 }
505
506 vma->vm_ops = &sf_vma_ops;
507 return 0;
508}
509
510struct file_operations sf_reg_fops = {
511 .read = sf_reg_read,
512 .open = sf_reg_open,
513 .write = sf_reg_write,
514 .release = sf_reg_release,
515 .mmap = sf_reg_mmap,
516#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
517# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
518 .splice_read = generic_file_splice_read,
519# else
520 .sendfile = generic_file_sendfile,
521# endif
522 .aio_read = generic_file_aio_read,
523 .aio_write = generic_file_aio_write,
524# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
525 .fsync = noop_fsync,
526# else
527 .fsync = simple_sync_file,
528# endif
529 .llseek = generic_file_llseek,
530#endif
531};
532
533
534struct inode_operations sf_reg_iops = {
535#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
536 .revalidate = sf_inode_revalidate
537#else
538 .getattr = sf_getattr,
539 .setattr = sf_setattr
540#endif
541};
542
543
544#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
545static int
546sf_readpage(struct file *file, struct page *page)
547{
548 struct inode *inode = file->f_dentry->d_inode;
549 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
550 struct sf_reg_info *sf_r = file->private_data;
551 uint32_t nread = PAGE_SIZE;
552 char *buf;
553 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
554 int ret;
555
556 TRACE();
557
558 buf = kmap(page);
559 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
560 if (ret) {
561 kunmap(page);
562 if (PageLocked(page))
563 unlock_page(page);
564 return ret;
565 }
566 BUG_ON(nread > PAGE_SIZE);
567 memset(&buf[nread], 0, PAGE_SIZE - nread);
568 flush_dcache_page(page);
569 kunmap(page);
570 SetPageUptodate(page);
571 unlock_page(page);
572 return 0;
573}
574
575static int
576sf_writepage(struct page *page, struct writeback_control *wbc)
577{
578 struct address_space *mapping = page->mapping;
579 struct inode *inode = mapping->host;
580 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
581 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
582 struct file *file = sf_i->file;
583 struct sf_reg_info *sf_r = file->private_data;
584 char *buf;
585 uint32_t nwritten = PAGE_SIZE;
586 int end_index = inode->i_size >> PAGE_SHIFT;
587 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
588 int err;
589
590 TRACE();
591
592 if (page->index >= end_index)
593 nwritten = inode->i_size & (PAGE_SIZE-1);
594
595 buf = kmap(page);
596
597 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
598 if (err < 0) {
599 ClearPageUptodate(page);
600 goto out;
601 }
602
603 if (off > inode->i_size)
604 inode->i_size = off;
605
606 if (PageError(page))
607 ClearPageError(page);
608 err = 0;
609out:
610 kunmap(page);
611
612 unlock_page(page);
613 return err;
614}
615
616# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
617int
618sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
619 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
620{
621 TRACE();
622
623 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
624}
625
626int
627sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
628 unsigned len, unsigned copied, struct page *page, void *fsdata)
629{
630 struct inode *inode = mapping->host;
631 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
632 struct sf_reg_info *sf_r = file->private_data;
633 void *buf;
634 unsigned from = pos & (PAGE_SIZE - 1);
635 uint32_t nwritten = len;
636 int err;
637
638 TRACE();
639
640 buf = kmap(page);
641 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf+from, &nwritten, pos);
642 kunmap(page);
643
644 if (!PageUptodate(page) && err == PAGE_SIZE)
645 SetPageUptodate(page);
646
647 if (err >= 0) {
648 pos += nwritten;
649 if (pos > inode->i_size)
650 inode->i_size = pos;
651 }
652
653 unlock_page(page);
654 page_cache_release(page);
655
656 return nwritten;
657}
658
659# endif /* KERNEL_VERSION >= 2.6.24 */
660
661struct address_space_operations sf_reg_aops = {
662 .readpage = sf_readpage,
663 .writepage = sf_writepage,
664# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
665 .write_begin = sf_write_begin,
666 .write_end = sf_write_end,
667# else
668 .prepare_write = simple_prepare_write,
669 .commit_write = simple_commit_write,
670# endif
671};
672#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette