VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c@ 66523

最後變更 在這個檔案從66523是 66523,由 vboxsync 提交於 8 年 前

bugref:8834: Makeself installers/Linux: indicate problems better in the exit codes
[PATCH] additions/linux/drm: Cleanup vram mapping and offset
calculating code

Trying to put everything in one pci_iomap_range mapped area leads to
hard to read code and has no advantages. It actually causes us to map
more memory then we need, resulting in part of the memory being intended
for generic use also being mapped.

This commit splits the mapping up in to 2 pci_iomap_range calls, one
for the guest_heap + host-flags and another one for the vbva buffers
which is not done until we actually know how much buffers we need.

Signed-off-by: Hans de Goede <hdegoede@…>
Minor adjustment: GUEST_HEAP_OFFSET -> GUEST_HEAP_OFFSET(vbox)

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 16.3 KB
 
1/* $Id: vbox_main.c 66523 2017-04-12 10:22:50Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 * --------------------------------------------------------------------
17 *
18 * This code is based on
19 * ast_main.c
20 * with the following copyright and permission notice:
21 *
22 * Copyright 2012 Red Hat Inc.
23 *
24 * Permission is hereby granted, free of charge, to any person obtaining a
25 * copy of this software and associated documentation files (the
26 * "Software"), to deal in the Software without restriction, including
27 * without limitation the rights to use, copy, modify, merge, publish,
28 * distribute, sub license, and/or sell copies of the Software, and to
29 * permit persons to whom the Software is furnished to do so, subject to
30 * the following conditions:
31 *
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
38 * USE OR OTHER DEALINGS IN THE SOFTWARE.
39 *
40 * The above copyright notice and this permission notice (including the
41 * next paragraph) shall be included in all copies or substantial portions
42 * of the Software.
43 *
44 */
45/*
46 * Authors: Dave Airlie <[email protected]>
47 */
48#include "vbox_drv.h"
49
50#include <VBoxVideoGuest.h>
51#include <VBoxVideoVBE.h>
52
53#include <drm/drm_fb_helper.h>
54#include <drm/drm_crtc_helper.h>
55
56static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
57{
58 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
59 if (vbox_fb->obj)
60 drm_gem_object_unreference_unlocked(vbox_fb->obj);
61
62 drm_framebuffer_cleanup(fb);
63 kfree(fb);
64}
65
66void vbox_enable_accel(struct vbox_private *vbox)
67{
68 unsigned i;
69 struct VBVABUFFER *vbva;
70
71 if (!vbox->vbva_info || !vbox->vbva_buffers) { /* Should never happen... */
72 printk(KERN_ERR "vboxvideo: failed to set up VBVA.\n");
73 return;
74 }
75 for (i = 0; i < vbox->num_crtcs; ++i) {
76 if (vbox->vbva_info[i].pVBVA == NULL) {
77 vbva = (struct VBVABUFFER *) ((u8 *)vbox->vbva_buffers
78 + i * VBVA_MIN_BUFFER_SIZE);
79 if (!VBoxVBVAEnable(&vbox->vbva_info[i], &vbox->submit_info, vbva, i)) {
80 /* very old host or driver error. */
81 printk(KERN_ERR "vboxvideo: VBoxVBVAEnable failed - heap allocation error.\n");
82 return;
83 }
84 }
85 }
86}
87
88void vbox_disable_accel(struct vbox_private *vbox)
89{
90 unsigned i;
91
92 for (i = 0; i < vbox->num_crtcs; ++i)
93 VBoxVBVADisable(&vbox->vbva_info[i], &vbox->submit_info, i);
94}
95
96void vbox_report_caps(struct vbox_private *vbox)
97{
98 uint32_t caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
99 | VBVACAPS_IRQ
100 | VBVACAPS_USE_VBVA_ONLY;
101 if (vbox->initial_mode_queried)
102 caps |= VBVACAPS_VIDEO_MODE_HINTS;
103 VBoxHGSMISendCapsInfo(&vbox->submit_info, caps);
104}
105
106/** Send information about dirty rectangles to VBVA. If necessary we enable
107 * VBVA first, as this is normally disabled after a change of master in case
108 * the new master does not send dirty rectangle information (is this even
109 * allowed?) */
110void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
111 struct drm_clip_rect *rects,
112 unsigned num_rects)
113{
114 struct vbox_private *vbox = fb->dev->dev_private;
115 struct drm_crtc *crtc;
116 unsigned i;
117
118 mutex_lock(&vbox->hw_mutex);
119 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
120 if (CRTC_FB(crtc) == fb) {
121 vbox_enable_accel(vbox);
122 for (i = 0; i < num_rects; ++i)
123 {
124 unsigned crtc_id = to_vbox_crtc(crtc)->crtc_id;
125 VBVACMDHDR cmd_hdr;
126
127 if ( rects[i].x1 > crtc->x
128 + crtc->hwmode.hdisplay
129 || rects[i].y1 > crtc->y
130 + crtc->hwmode.vdisplay
131 || rects[i].x2 < crtc->x
132 || rects[i].y2 < crtc->y)
133 continue;
134 cmd_hdr.x = (int16_t)rects[i].x1;
135 cmd_hdr.y = (int16_t)rects[i].y1;
136 cmd_hdr.w = (uint16_t)rects[i].x2 - rects[i].x1;
137 cmd_hdr.h = (uint16_t)rects[i].y2 - rects[i].y1;
138 if (VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
139 &vbox->submit_info))
140 {
141 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], &vbox->submit_info, &cmd_hdr,
142 sizeof(cmd_hdr));
143 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
144 }
145 }
146 }
147 }
148 mutex_unlock(&vbox->hw_mutex);
149}
150
151static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
152 struct drm_file *file_priv,
153 unsigned flags, unsigned color,
154 struct drm_clip_rect *rects,
155 unsigned num_rects)
156{
157 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
158 return 0;
159}
160
161static const struct drm_framebuffer_funcs vbox_fb_funcs = {
162 .destroy = vbox_user_framebuffer_destroy,
163 .dirty = vbox_user_framebuffer_dirty,
164};
165
166
167int vbox_framebuffer_init(struct drm_device *dev,
168 struct vbox_framebuffer *vbox_fb,
169#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
170 const
171#endif
172 struct DRM_MODE_FB_CMD *mode_cmd,
173 struct drm_gem_object *obj)
174{
175 int ret;
176
177#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
178 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
179#else
180 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
181#endif
182 vbox_fb->obj = obj;
183 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
184 if (ret) {
185 DRM_ERROR("framebuffer init failed %d\n", ret);
186 return ret;
187 }
188 return 0;
189}
190
191static struct drm_framebuffer *
192vbox_user_framebuffer_create(struct drm_device *dev,
193 struct drm_file *filp,
194#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
195 const
196#endif
197 struct drm_mode_fb_cmd2 *mode_cmd)
198{
199 struct drm_gem_object *obj;
200 struct vbox_framebuffer *vbox_fb;
201 int ret;
202
203#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
204 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
205#else
206 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
207#endif
208 if (obj == NULL)
209 return ERR_PTR(-ENOENT);
210
211 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
212 if (!vbox_fb) {
213 drm_gem_object_unreference_unlocked(obj);
214 return ERR_PTR(-ENOMEM);
215 }
216
217 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
218 if (ret) {
219 drm_gem_object_unreference_unlocked(obj);
220 kfree(vbox_fb);
221 return ERR_PTR(ret);
222 }
223 return &vbox_fb->base;
224}
225
226static const struct drm_mode_config_funcs vbox_mode_funcs = {
227 .fb_create = vbox_user_framebuffer_create,
228};
229
230static void vbox_accel_fini(struct vbox_private *vbox)
231{
232 if (vbox->vbva_info)
233 {
234 vbox_disable_accel(vbox);
235 kfree(vbox->vbva_info);
236 vbox->vbva_info = NULL;
237 }
238 if (vbox->vbva_buffers) {
239 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
240 vbox->vbva_buffers = NULL;
241 }
242}
243
244static int vbox_accel_init(struct vbox_private *vbox)
245{
246 unsigned i;
247
248 vbox->vbva_info = kcalloc(vbox->num_crtcs, sizeof(*vbox->vbva_info),
249 GFP_KERNEL);
250 if (!vbox->vbva_info)
251 return -ENOMEM;
252
253 /* Take a command buffer for each screen from the end of usable VRAM. */
254 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
255
256 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
257 vbox->available_vram_size,
258 vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE);
259 if (!vbox->vbva_buffers)
260 return -ENOMEM;
261
262 for (i = 0; i < vbox->num_crtcs; ++i)
263 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
264 vbox->available_vram_size + i * VBVA_MIN_BUFFER_SIZE,
265 VBVA_MIN_BUFFER_SIZE);
266 return 0;
267}
268
269/** Allocation function for the HGSMI heap and data. */
270static DECLCALLBACK(void *) alloc_hgsmi_environ(void *environ, HGSMISIZE size)
271{
272 NOREF(environ);
273 return kmalloc(size, GFP_KERNEL);
274}
275
276
277/** Free function for the HGSMI heap and data. */
278static DECLCALLBACK(void) free_hgsmi_environ(void *environ, void *ptr)
279{
280 NOREF(environ);
281 kfree(ptr);
282}
283
284
285/** Pointers to the HGSMI heap and data manipulation functions. */
286static HGSMIENV hgsmi_environ =
287{
288 NULL,
289 alloc_hgsmi_environ,
290 free_hgsmi_environ
291};
292
293
294/** Do we support the 4.3 plus mode hint reporting interface? */
295static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
296{
297 uint32_t have_hints, have_cursor;
298
299 return RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_MODE_HINT_REPORTING, &have_hints))
300 && RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, &have_cursor))
301 && have_hints == VINF_SUCCESS
302 && have_cursor == VINF_SUCCESS;
303}
304
305#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
306# define pci_iomap_range(dev, bar, offset, maxlen) \
307 ioremap(pci_resource_start(dev, bar) + offset, maxlen)
308#endif
309
310/** Set up our heaps and data exchange buffers in VRAM before handing the rest
311 * to the memory manager. */
312static int vbox_hw_init(struct vbox_private *vbox)
313{
314 vbox->full_vram_size = VBoxVideoGetVRAMSize();
315 vbox->any_pitch = VBoxVideoAnyWidthAllowed();
316 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
317
318 /* Map guest-heap at end of vram */
319 vbox->guest_heap = pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
320 GUEST_HEAP_SIZE);
321 if (!vbox->guest_heap)
322 return -ENOMEM;
323
324 if (RT_FAILURE(VBoxHGSMISetupGuestContext(&vbox->submit_info, vbox->guest_heap,
325 GUEST_HEAP_USABLE_SIZE, GUEST_HEAP_OFFSET(vbox),
326 &hgsmi_environ)))
327 return -ENOMEM;
328 /* Reduce available VRAM size to reflect the guest heap. */
329 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
330 /* Linux drm represents monitors as a 32-bit array. */
331 vbox->num_crtcs = min(VBoxHGSMIGetMonitorCount(&vbox->submit_info),
332 (uint32_t)VBOX_MAX_SCREENS);
333 if (!have_hgsmi_mode_hints(vbox))
334 return -ENOTSUPP;
335 vbox->last_mode_hints = kzalloc(sizeof(VBVAMODEHINT) * vbox->num_crtcs, GFP_KERNEL);
336 if (!vbox->last_mode_hints)
337 return -ENOMEM;
338 return vbox_accel_init(vbox);
339}
340
341static void vbox_hw_fini(struct vbox_private *vbox)
342{
343 vbox_accel_fini(vbox);
344 if (vbox->last_mode_hints)
345 kfree(vbox->last_mode_hints);
346 vbox->last_mode_hints = NULL;
347}
348
349int vbox_driver_load(struct drm_device *dev, unsigned long flags)
350{
351 struct vbox_private *vbox;
352 int ret = 0;
353
354 if (!VBoxHGSMIIsSupported())
355 return -ENODEV;
356 vbox = kzalloc(sizeof(struct vbox_private), GFP_KERNEL);
357 if (!vbox)
358 return -ENOMEM;
359
360 dev->dev_private = vbox;
361 vbox->dev = dev;
362
363 mutex_init(&vbox->hw_mutex);
364
365 ret = vbox_hw_init(vbox);
366 if (ret)
367 goto out_free;
368
369 ret = vbox_mm_init(vbox);
370 if (ret)
371 goto out_free;
372
373 drm_mode_config_init(dev);
374
375 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
376 dev->mode_config.min_width = 64;
377 dev->mode_config.min_height = 64;
378 dev->mode_config.preferred_depth = 24;
379 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
380 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
381
382 ret = vbox_mode_init(dev);
383 if (ret)
384 goto out_free;
385
386 ret = vbox_irq_init(vbox);
387 if (ret)
388 goto out_free;
389
390 ret = vbox_fbdev_init(dev);
391 if (ret)
392 goto out_free;
393 return 0;
394out_free:
395 vbox_driver_unload(dev);
396 return ret;
397}
398
399#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
400void vbox_driver_unload(struct drm_device *dev)
401#else
402int vbox_driver_unload(struct drm_device *dev)
403#endif
404{
405 struct vbox_private *vbox = dev->dev_private;
406
407 vbox_fbdev_fini(dev);
408 vbox_irq_fini(vbox);
409 vbox_mode_fini(dev);
410 if (dev->mode_config.funcs)
411 drm_mode_config_cleanup(dev);
412
413 vbox_hw_fini(vbox);
414 vbox_mm_fini(vbox);
415 if (vbox->guest_heap)
416 pci_iounmap(dev->pdev, vbox->guest_heap);
417 kfree(vbox);
418 dev->dev_private = NULL;
419#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
420 return 0;
421#endif
422}
423
424/** @note this is described in the DRM framework documentation. AST does not
425 * have it, but we get an oops on driver unload if it is not present. */
426void vbox_driver_lastclose(struct drm_device *dev)
427{
428 struct vbox_private *vbox = dev->dev_private;
429
430#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
431 if (vbox->fbdev)
432 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
433#else
434 drm_modeset_lock_all(dev);
435 if (vbox->fbdev)
436 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
437 drm_modeset_unlock_all(dev);
438#endif
439}
440
441int vbox_gem_create(struct drm_device *dev,
442 u32 size, bool iskernel,
443 struct drm_gem_object **obj)
444{
445 struct vbox_bo *vboxbo;
446 int ret;
447
448 *obj = NULL;
449
450 size = roundup(size, PAGE_SIZE);
451 if (size == 0)
452 return -EINVAL;
453
454 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
455 if (ret) {
456 if (ret != -ERESTARTSYS)
457 DRM_ERROR("failed to allocate GEM object\n");
458 return ret;
459 }
460 *obj = &vboxbo->gem;
461 return 0;
462}
463
464int vbox_dumb_create(struct drm_file *file,
465 struct drm_device *dev,
466 struct drm_mode_create_dumb *args)
467{
468 int ret;
469 struct drm_gem_object *gobj;
470 u32 handle;
471
472 args->pitch = args->width * ((args->bpp + 7) / 8);
473 args->size = args->pitch * args->height;
474
475 ret = vbox_gem_create(dev, args->size, false,
476 &gobj);
477 if (ret)
478 return ret;
479
480 ret = drm_gem_handle_create(file, gobj, &handle);
481 drm_gem_object_unreference_unlocked(gobj);
482 if (ret)
483 return ret;
484
485 args->handle = handle;
486 return 0;
487}
488
489#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
490int vbox_dumb_destroy(struct drm_file *file,
491 struct drm_device *dev,
492 uint32_t handle)
493{
494 return drm_gem_handle_delete(file, handle);
495}
496#endif
497
498static void vbox_bo_unref(struct vbox_bo **bo)
499{
500 struct ttm_buffer_object *tbo;
501
502 if ((*bo) == NULL)
503 return;
504
505 tbo = &((*bo)->bo);
506 ttm_bo_unref(&tbo);
507 if (tbo == NULL)
508 *bo = NULL;
509
510}
511void vbox_gem_free_object(struct drm_gem_object *obj)
512{
513 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
514
515 vbox_bo_unref(&vbox_bo);
516}
517
518
519static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
520{
521#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
522 return bo->bo.addr_space_offset;
523#else
524 return drm_vma_node_offset_addr(&bo->bo.vma_node);
525#endif
526}
527int
528vbox_dumb_mmap_offset(struct drm_file *file,
529 struct drm_device *dev,
530 uint32_t handle,
531 uint64_t *offset)
532{
533 struct drm_gem_object *obj;
534 int ret;
535 struct vbox_bo *bo;
536
537 mutex_lock(&dev->struct_mutex);
538#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
539 obj = drm_gem_object_lookup(file, handle);
540#else
541 obj = drm_gem_object_lookup(dev, file, handle);
542#endif
543 if (obj == NULL) {
544 ret = -ENOENT;
545 goto out_unlock;
546 }
547
548 bo = gem_to_vbox_bo(obj);
549 *offset = vbox_bo_mmap_offset(bo);
550
551 drm_gem_object_unreference(obj);
552 ret = 0;
553out_unlock:
554 mutex_unlock(&dev->struct_mutex);
555 return ret;
556
557}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette