VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/drm/vbox_main.c@ 66509

最後變更 在這個檔案從66509是 66509,由 vboxsync 提交於 8 年 前

bugref:8524: Additions/linux: play nicely with distribution-installed Additions

additions/linux/drm: Remove unnecessary check from accel_init

accel_init gets called exactly once per instance, so there is no need
to check of vbva_info has already been allocated.

While at it also switch to kcalloc.

Signed-off-by: Hans de Goede <hdegoede@…>.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 16.7 KB
 
1/* $Id: vbox_main.c 66509 2017-04-11 11:19:10Z vboxsync $ */
2/** @file
3 * VirtualBox Additions Linux kernel video driver
4 */
5
6/*
7 * Copyright (C) 2013-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 * --------------------------------------------------------------------
17 *
18 * This code is based on
19 * ast_main.c
20 * with the following copyright and permission notice:
21 *
22 * Copyright 2012 Red Hat Inc.
23 *
24 * Permission is hereby granted, free of charge, to any person obtaining a
25 * copy of this software and associated documentation files (the
26 * "Software"), to deal in the Software without restriction, including
27 * without limitation the rights to use, copy, modify, merge, publish,
28 * distribute, sub license, and/or sell copies of the Software, and to
29 * permit persons to whom the Software is furnished to do so, subject to
30 * the following conditions:
31 *
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
35 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
36 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
37 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
38 * USE OR OTHER DEALINGS IN THE SOFTWARE.
39 *
40 * The above copyright notice and this permission notice (including the
41 * next paragraph) shall be included in all copies or substantial portions
42 * of the Software.
43 *
44 */
45/*
46 * Authors: Dave Airlie <[email protected]>
47 */
48#include "vbox_drv.h"
49
50#include <VBoxVideoGuest.h>
51#include <VBoxVideoVBE.h>
52
53#include <drm/drm_fb_helper.h>
54#include <drm/drm_crtc_helper.h>
55
56static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
57{
58 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
59 if (vbox_fb->obj)
60 drm_gem_object_unreference_unlocked(vbox_fb->obj);
61
62 drm_framebuffer_cleanup(fb);
63 kfree(fb);
64}
65
66void vbox_enable_accel(struct vbox_private *vbox)
67{
68 unsigned i;
69 struct VBVABUFFER *vbva;
70 uint32_t vram_map_offset = vbox->available_vram_size - vbox->vram_map_start;
71
72 if (vbox->vbva_info == NULL) { /* Should never happen... */
73 printk(KERN_ERR "vboxvideo: failed to set up VBVA.\n");
74 return;
75 }
76 for (i = 0; i < vbox->num_crtcs; ++i) {
77 if (vbox->vbva_info[i].pVBVA == NULL) {
78 vbva = (struct VBVABUFFER *) ( ((uint8_t *)vbox->mapped_vram)
79 + vram_map_offset
80 + i * VBVA_MIN_BUFFER_SIZE);
81 if (!VBoxVBVAEnable(&vbox->vbva_info[i], &vbox->submit_info, vbva, i)) {
82 /* very old host or driver error. */
83 printk(KERN_ERR "vboxvideo: VBoxVBVAEnable failed - heap allocation error.\n");
84 return;
85 }
86 }
87 }
88}
89
90void vbox_disable_accel(struct vbox_private *vbox)
91{
92 unsigned i;
93
94 for (i = 0; i < vbox->num_crtcs; ++i)
95 VBoxVBVADisable(&vbox->vbva_info[i], &vbox->submit_info, i);
96}
97
98void vbox_report_caps(struct vbox_private *vbox)
99{
100 uint32_t caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
101 | VBVACAPS_IRQ
102 | VBVACAPS_USE_VBVA_ONLY;
103 if (vbox->initial_mode_queried)
104 caps |= VBVACAPS_VIDEO_MODE_HINTS;
105 VBoxHGSMISendCapsInfo(&vbox->submit_info, caps);
106}
107
108/** Send information about dirty rectangles to VBVA. If necessary we enable
109 * VBVA first, as this is normally disabled after a change of master in case
110 * the new master does not send dirty rectangle information (is this even
111 * allowed?) */
112void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
113 struct drm_clip_rect *rects,
114 unsigned num_rects)
115{
116 struct vbox_private *vbox = fb->dev->dev_private;
117 struct drm_crtc *crtc;
118 unsigned i;
119
120 mutex_lock(&vbox->hw_mutex);
121 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
122 if (CRTC_FB(crtc) == fb) {
123 vbox_enable_accel(vbox);
124 for (i = 0; i < num_rects; ++i)
125 {
126 unsigned crtc_id = to_vbox_crtc(crtc)->crtc_id;
127 VBVACMDHDR cmd_hdr;
128
129 if ( rects[i].x1 > crtc->x
130 + crtc->hwmode.hdisplay
131 || rects[i].y1 > crtc->y
132 + crtc->hwmode.vdisplay
133 || rects[i].x2 < crtc->x
134 || rects[i].y2 < crtc->y)
135 continue;
136 cmd_hdr.x = (int16_t)rects[i].x1;
137 cmd_hdr.y = (int16_t)rects[i].y1;
138 cmd_hdr.w = (uint16_t)rects[i].x2 - rects[i].x1;
139 cmd_hdr.h = (uint16_t)rects[i].y2 - rects[i].y1;
140 if (VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
141 &vbox->submit_info))
142 {
143 VBoxVBVAWrite(&vbox->vbva_info[crtc_id], &vbox->submit_info, &cmd_hdr,
144 sizeof(cmd_hdr));
145 VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
146 }
147 }
148 }
149 }
150 mutex_unlock(&vbox->hw_mutex);
151}
152
153static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
154 struct drm_file *file_priv,
155 unsigned flags, unsigned color,
156 struct drm_clip_rect *rects,
157 unsigned num_rects)
158{
159 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
160 return 0;
161}
162
163static const struct drm_framebuffer_funcs vbox_fb_funcs = {
164 .destroy = vbox_user_framebuffer_destroy,
165 .dirty = vbox_user_framebuffer_dirty,
166};
167
168
169int vbox_framebuffer_init(struct drm_device *dev,
170 struct vbox_framebuffer *vbox_fb,
171#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
172 const
173#endif
174 struct DRM_MODE_FB_CMD *mode_cmd,
175 struct drm_gem_object *obj)
176{
177 int ret;
178
179#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
180 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
181#else
182 drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
183#endif
184 vbox_fb->obj = obj;
185 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
186 if (ret) {
187 DRM_ERROR("framebuffer init failed %d\n", ret);
188 return ret;
189 }
190 return 0;
191}
192
193static struct drm_framebuffer *
194vbox_user_framebuffer_create(struct drm_device *dev,
195 struct drm_file *filp,
196#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
197 const
198#endif
199 struct drm_mode_fb_cmd2 *mode_cmd)
200{
201 struct drm_gem_object *obj;
202 struct vbox_framebuffer *vbox_fb;
203 int ret;
204
205#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
206 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
207#else
208 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
209#endif
210 if (obj == NULL)
211 return ERR_PTR(-ENOENT);
212
213 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
214 if (!vbox_fb) {
215 drm_gem_object_unreference_unlocked(obj);
216 return ERR_PTR(-ENOMEM);
217 }
218
219 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
220 if (ret) {
221 drm_gem_object_unreference_unlocked(obj);
222 kfree(vbox_fb);
223 return ERR_PTR(ret);
224 }
225 return &vbox_fb->base;
226}
227
228static const struct drm_mode_config_funcs vbox_mode_funcs = {
229 .fb_create = vbox_user_framebuffer_create,
230};
231
232static void vbox_accel_fini(struct vbox_private *vbox)
233{
234 if (vbox->vbva_info)
235 {
236 vbox_disable_accel(vbox);
237 kfree(vbox->vbva_info);
238 vbox->vbva_info = NULL;
239 }
240}
241
242static int vbox_accel_init(struct vbox_private *vbox)
243{
244 unsigned i;
245
246 vbox->vbva_info = kcalloc(vbox->num_crtcs, sizeof(*vbox->vbva_info),
247 GFP_KERNEL);
248 if (!vbox->vbva_info)
249 return -ENOMEM;
250
251 /* Take a command buffer for each screen from the end of usable VRAM. */
252 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
253 for (i = 0; i < vbox->num_crtcs; ++i)
254 VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
255 vbox->available_vram_size + i * VBVA_MIN_BUFFER_SIZE,
256 VBVA_MIN_BUFFER_SIZE);
257 return 0;
258}
259
260/** Allocation function for the HGSMI heap and data. */
261static DECLCALLBACK(void *) alloc_hgsmi_environ(void *environ, HGSMISIZE size)
262{
263 NOREF(environ);
264 return kmalloc(size, GFP_KERNEL);
265}
266
267
268/** Free function for the HGSMI heap and data. */
269static DECLCALLBACK(void) free_hgsmi_environ(void *environ, void *ptr)
270{
271 NOREF(environ);
272 kfree(ptr);
273}
274
275
276/** Pointers to the HGSMI heap and data manipulation functions. */
277static HGSMIENV hgsmi_environ =
278{
279 NULL,
280 alloc_hgsmi_environ,
281 free_hgsmi_environ
282};
283
284
285/** Do we support the 4.3 plus mode hint reporting interface? */
286static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
287{
288 uint32_t have_hints, have_cursor;
289
290 return RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_MODE_HINT_REPORTING, &have_hints))
291 && RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, &have_cursor))
292 && have_hints == VINF_SUCCESS
293 && have_cursor == VINF_SUCCESS;
294}
295
296#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
297# define pci_iomap_range(dev, bar, offset, maxlen) \
298 ioremap(pci_resource_start(dev, bar) + offset, maxlen)
299#endif
300
301/** Set up our heaps and data exchange buffers in VRAM before handing the rest
302 * to the memory manager. */
303static int vbox_hw_init(struct vbox_private *vbox)
304{
305 uint32_t base_offset, map_start, guest_heap_offset, guest_heap_size, host_flags_offset;
306 void *guest_heap;
307
308 vbox->full_vram_size = VBoxVideoGetVRAMSize();
309 vbox->any_pitch = VBoxVideoAnyWidthAllowed();
310 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
311 VBoxHGSMIGetBaseMappingInfo(vbox->full_vram_size, &base_offset, NULL,
312 &guest_heap_offset, &guest_heap_size, &host_flags_offset);
313 map_start = (uint32_t)max((int)base_offset
314 - VBOX_MAX_SCREENS * VBVA_MIN_BUFFER_SIZE, 0);
315 vbox->mapped_vram = pci_iomap_range(vbox->dev->pdev, 0, map_start,
316 vbox->full_vram_size - map_start);
317 if (!vbox->mapped_vram)
318 return -ENOMEM;
319 vbox->vram_map_start = map_start;
320 guest_heap = ((uint8_t *)vbox->mapped_vram) + base_offset - map_start
321 + guest_heap_offset;
322 vbox->host_flags_offset = base_offset - map_start + host_flags_offset;
323 if (RT_FAILURE(VBoxHGSMISetupGuestContext(&vbox->submit_info, guest_heap,
324 guest_heap_size,
325 base_offset + guest_heap_offset,
326 &hgsmi_environ)))
327 return -ENOMEM;
328 /* Reduce available VRAM size to reflect the guest heap. */
329 vbox->available_vram_size = base_offset;
330 /* Linux drm represents monitors as a 32-bit array. */
331 vbox->num_crtcs = min(VBoxHGSMIGetMonitorCount(&vbox->submit_info),
332 (uint32_t)VBOX_MAX_SCREENS);
333 if (!have_hgsmi_mode_hints(vbox))
334 return -ENOTSUPP;
335 vbox->last_mode_hints = kzalloc(sizeof(VBVAMODEHINT) * vbox->num_crtcs, GFP_KERNEL);
336 if (!vbox->last_mode_hints)
337 return -ENOMEM;
338 return vbox_accel_init(vbox);
339}
340
341static void vbox_hw_fini(struct vbox_private *vbox)
342{
343 vbox_accel_fini(vbox);
344 if (vbox->last_mode_hints)
345 kfree(vbox->last_mode_hints);
346 vbox->last_mode_hints = NULL;
347}
348
349int vbox_driver_load(struct drm_device *dev, unsigned long flags)
350{
351 struct vbox_private *vbox;
352 int ret = 0;
353
354 if (!VBoxHGSMIIsSupported())
355 return -ENODEV;
356 vbox = kzalloc(sizeof(struct vbox_private), GFP_KERNEL);
357 if (!vbox)
358 return -ENOMEM;
359
360 dev->dev_private = vbox;
361 vbox->dev = dev;
362
363 mutex_init(&vbox->hw_mutex);
364
365 ret = vbox_hw_init(vbox);
366 if (ret)
367 goto out_free;
368
369 ret = vbox_mm_init(vbox);
370 if (ret)
371 goto out_free;
372
373 drm_mode_config_init(dev);
374
375 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
376 dev->mode_config.min_width = 64;
377 dev->mode_config.min_height = 64;
378 dev->mode_config.preferred_depth = 24;
379 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
380 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
381
382 ret = vbox_mode_init(dev);
383 if (ret)
384 goto out_free;
385
386 ret = vbox_irq_init(vbox);
387 if (ret)
388 goto out_free;
389
390 ret = vbox_fbdev_init(dev);
391 if (ret)
392 goto out_free;
393 return 0;
394out_free:
395 vbox_driver_unload(dev);
396 return ret;
397}
398
399#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
400void vbox_driver_unload(struct drm_device *dev)
401#else
402int vbox_driver_unload(struct drm_device *dev)
403#endif
404{
405 struct vbox_private *vbox = dev->dev_private;
406
407 vbox_fbdev_fini(dev);
408 vbox_irq_fini(vbox);
409 vbox_mode_fini(dev);
410 if (dev->mode_config.funcs)
411 drm_mode_config_cleanup(dev);
412
413 vbox_hw_fini(vbox);
414 vbox_mm_fini(vbox);
415 if (vbox->mapped_vram)
416 pci_iounmap(dev->pdev, vbox->mapped_vram);
417 kfree(vbox);
418 dev->dev_private = NULL;
419#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
420 return 0;
421#endif
422}
423
424/** @note this is described in the DRM framework documentation. AST does not
425 * have it, but we get an oops on driver unload if it is not present. */
426void vbox_driver_lastclose(struct drm_device *dev)
427{
428 struct vbox_private *vbox = dev->dev_private;
429
430#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
431 if (vbox->fbdev)
432 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
433#else
434 drm_modeset_lock_all(dev);
435 if (vbox->fbdev)
436 drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
437 drm_modeset_unlock_all(dev);
438#endif
439}
440
441int vbox_gem_create(struct drm_device *dev,
442 u32 size, bool iskernel,
443 struct drm_gem_object **obj)
444{
445 struct vbox_bo *vboxbo;
446 int ret;
447
448 *obj = NULL;
449
450 size = roundup(size, PAGE_SIZE);
451 if (size == 0)
452 return -EINVAL;
453
454 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
455 if (ret) {
456 if (ret != -ERESTARTSYS)
457 DRM_ERROR("failed to allocate GEM object\n");
458 return ret;
459 }
460 *obj = &vboxbo->gem;
461 return 0;
462}
463
464int vbox_dumb_create(struct drm_file *file,
465 struct drm_device *dev,
466 struct drm_mode_create_dumb *args)
467{
468 int ret;
469 struct drm_gem_object *gobj;
470 u32 handle;
471
472 args->pitch = args->width * ((args->bpp + 7) / 8);
473 args->size = args->pitch * args->height;
474
475 ret = vbox_gem_create(dev, args->size, false,
476 &gobj);
477 if (ret)
478 return ret;
479
480 ret = drm_gem_handle_create(file, gobj, &handle);
481 drm_gem_object_unreference_unlocked(gobj);
482 if (ret)
483 return ret;
484
485 args->handle = handle;
486 return 0;
487}
488
489#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
490int vbox_dumb_destroy(struct drm_file *file,
491 struct drm_device *dev,
492 uint32_t handle)
493{
494 return drm_gem_handle_delete(file, handle);
495}
496#endif
497
498static void vbox_bo_unref(struct vbox_bo **bo)
499{
500 struct ttm_buffer_object *tbo;
501
502 if ((*bo) == NULL)
503 return;
504
505 tbo = &((*bo)->bo);
506 ttm_bo_unref(&tbo);
507 if (tbo == NULL)
508 *bo = NULL;
509
510}
511void vbox_gem_free_object(struct drm_gem_object *obj)
512{
513 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
514
515 vbox_bo_unref(&vbox_bo);
516}
517
518
519static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
520{
521#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
522 return bo->bo.addr_space_offset;
523#else
524 return drm_vma_node_offset_addr(&bo->bo.vma_node);
525#endif
526}
527int
528vbox_dumb_mmap_offset(struct drm_file *file,
529 struct drm_device *dev,
530 uint32_t handle,
531 uint64_t *offset)
532{
533 struct drm_gem_object *obj;
534 int ret;
535 struct vbox_bo *bo;
536
537 mutex_lock(&dev->struct_mutex);
538#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
539 obj = drm_gem_object_lookup(file, handle);
540#else
541 obj = drm_gem_object_lookup(dev, file, handle);
542#endif
543 if (obj == NULL) {
544 ret = -ENOENT;
545 goto out_unlock;
546 }
547
548 bo = gem_to_vbox_bo(obj);
549 *offset = vbox_bo_mmap_offset(bo);
550
551 drm_gem_object_unreference(obj);
552 ret = 0;
553out_unlock:
554 mutex_unlock(&dev->struct_mutex);
555 return ret;
556
557}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette