VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 19114

最後變更 在這個檔案從19114是 19114,由 vboxsync 提交於 16 年 前

Additional logging in netfilter.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 36.8 KB
 
1/* $Id: VBoxNetFlt-linux.c 19114 2009-04-22 15:10:36Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include "the-linux-kernel.h"
26#include "version-generated.h"
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/rtnetlink.h>
30#include <linux/miscdevice.h>
31
32#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
33#include <VBox/log.h>
34#include <VBox/err.h>
35#include <iprt/alloca.h>
36#include <iprt/assert.h>
37#include <iprt/spinlock.h>
38#include <iprt/semaphore.h>
39#include <iprt/initterm.h>
40#include <iprt/process.h>
41#include <iprt/mem.h>
42#include <iprt/log.h>
43#include <iprt/mp.h>
44#include <iprt/mem.h>
45#include <iprt/time.h>
46
47#define VBOXNETFLT_OS_SPECFIC 1
48#include "../VBoxNetFltInternal.h"
49
50#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
51 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
52#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
53 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
54#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
55 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
56
57#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
58
59#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
60# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
61# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
62#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
63# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
64# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
65#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
66
67#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
68# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
69#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
70# define CHECKSUM_PARTIAL CHECKSUM_HW
71# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
72# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
73# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
74# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
75# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
76# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
77# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
78# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
79# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
80#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
81
82#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
83# define VBOX_SKB_IS_GSO(skb) skb_is_gso(skb)
84 /* No features, very dumb device */
85# define VBOX_SKB_GSO_SEGMENT(skb) skb_gso_segment(skb, 0)
86#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
87# define VBOX_SKB_IS_GSO(skb) false
88# define VBOX_SKB_GSO_SEGMENT(skb) NULL
89#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
90
91#ifndef NET_IP_ALIGN
92# define NET_IP_ALIGN 2
93#endif
94
95#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
96unsigned dev_get_flags(const struct net_device *dev)
97{
98 unsigned flags;
99
100 flags = (dev->flags & ~(IFF_PROMISC |
101 IFF_ALLMULTI |
102 IFF_RUNNING)) |
103 (dev->gflags & (IFF_PROMISC |
104 IFF_ALLMULTI));
105
106 if (netif_running(dev) && netif_carrier_ok(dev))
107 flags |= IFF_RUNNING;
108
109 return flags;
110}
111#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
112
113/*******************************************************************************
114* Internal Functions *
115*******************************************************************************/
116static int VBoxNetFltLinuxInit(void);
117static void VBoxNetFltLinuxUnload(void);
118
119
120/*******************************************************************************
121* Global Variables *
122*******************************************************************************/
123/**
124 * The (common) global data.
125 */
126#ifdef RT_ARCH_AMD64
127/**
128 * Memory for the executable memory heap (in IPRT).
129 */
130extern uint8_t g_abExecMemory[4096]; /* cannot donate less than one page */
131__asm__(".section execmemory, \"awx\", @progbits\n\t"
132 ".align 32\n\t"
133 ".globl g_abExecMemory\n"
134 "g_abExecMemory:\n\t"
135 ".zero 4096\n\t"
136 ".type g_abExecMemory, @object\n\t"
137 ".size g_abExecMemory, 4096\n\t"
138 ".text\n\t");
139#endif
140
141static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
142
143module_init(VBoxNetFltLinuxInit);
144module_exit(VBoxNetFltLinuxUnload);
145
146MODULE_AUTHOR("Sun Microsystems, Inc.");
147MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
148MODULE_LICENSE("GPL");
149#ifdef MODULE_VERSION
150# define xstr(s) str(s)
151# define str(s) #s
152MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
153#endif
154
155/**
156 * The (common) global data.
157 */
158static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
159
160/**
161 * Initialize module.
162 *
163 * @returns appropriate status code.
164 */
165static int __init VBoxNetFltLinuxInit(void)
166{
167 int rc;
168 /*
169 * Initialize IPRT.
170 */
171 rc = RTR0Init(0);
172 if (RT_SUCCESS(rc))
173 {
174#ifdef RT_ARCH_AMD64
175 rc = RTR0MemExecDonate(&g_abExecMemory[0], sizeof(g_abExecMemory));
176 printk("VBoxNetFlt: dbg - g_abExecMemory=%p\n", (void *)&g_abExecMemory[0]);
177 if (RT_FAILURE(rc))
178 {
179 printk("VBoxNetFlt: failed to donate exec memory, no logging will be available.\n");
180 }
181#endif
182 Log(("VBoxNetFltLinuxInit\n"));
183
184 /*
185 * Initialize the globals and connect to the support driver.
186 *
187 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
188 * for establishing the connect to the support driver.
189 */
190 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
191 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
192 if (RT_SUCCESS(rc))
193 {
194 LogRel(("VBoxNetFlt: Successfully started.\n"));
195 return 0;
196 }
197 else
198 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
199 RTR0Term();
200 }
201 else
202 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
203
204 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
205 return -RTErrConvertToErrno(rc);
206}
207
208
209/**
210 * Unload the module.
211 *
212 * @todo We have to prevent this if we're busy!
213 */
214static void __exit VBoxNetFltLinuxUnload(void)
215{
216 int rc;
217 Log(("VBoxNetFltLinuxUnload\n"));
218 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
219
220 /*
221 * Undo the work done during start (in reverse order).
222 */
223 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
224 AssertRC(rc); NOREF(rc);
225
226 RTR0Term();
227
228 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
229
230 Log(("VBoxNetFltLinuxUnload - done\n"));
231}
232
233
234/**
235 * Reads and retains the host interface handle.
236 *
237 * @returns The handle, NULL if detached.
238 * @param pThis
239 */
240DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
241{
242#if 0
243 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
244 struct net_device *pDev = NULL;
245
246 Log(("vboxNetFltLinuxRetainNetDev\n"));
247 /*
248 * Be careful here to avoid problems racing the detached callback.
249 */
250 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
251 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
252 {
253 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
254 if (pDev)
255 {
256 dev_hold(pDev);
257 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
258 }
259 }
260 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
261
262 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
263 return pDev;
264#else
265 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
266#endif
267}
268
269
270/**
271 * Release the host interface handle previously retained
272 * by vboxNetFltLinuxRetainNetDev.
273 *
274 * @param pThis The instance.
275 * @param pDev The vboxNetFltLinuxRetainNetDev
276 * return value, NULL is fine.
277 */
278DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
279{
280#if 0
281 Log(("vboxNetFltLinuxReleaseNetDev\n"));
282 NOREF(pThis);
283 if (pDev)
284 {
285 dev_put(pDev);
286 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
287 }
288 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
289#endif
290}
291
292#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
293#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
294
295/**
296 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
297 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
298 *
299 * @returns true / false accordingly.
300 * @param pBuf The sk_buff.
301 */
302DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
303{
304 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
305}
306
307
308/**
309 * Internal worker that create a linux sk_buff for a
310 * (scatter/)gather list.
311 *
312 * @returns Pointer to the sk_buff.
313 * @param pThis The instance.
314 * @param pSG The (scatter/)gather list.
315 */
316static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
317{
318 struct sk_buff *pPkt;
319 struct net_device *pDev;
320 /*
321 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
322 * will only contain one single segment.
323 */
324 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
325 {
326 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
327 return NULL;
328 }
329 if (pSG->cbTotal == 0)
330 {
331 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
332 return NULL;
333 }
334
335 /*
336 * Allocate a packet and copy over the data.
337 *
338 */
339 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
340 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
341 if (pPkt)
342 {
343 pPkt->dev = pDev;
344 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
345 skb_reserve(pPkt, NET_IP_ALIGN);
346 skb_put(pPkt, pSG->cbTotal);
347 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
348 /* Set protocol and packet_type fields. */
349 pPkt->protocol = eth_type_trans(pPkt, pDev);
350 pPkt->ip_summed = CHECKSUM_NONE;
351 if (fDstWire)
352 {
353 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
354 /* Restore ethernet header back. */
355 skb_push(pPkt, ETH_HLEN);
356 VBOX_SKB_RESET_MAC_HDR(pPkt);
357 }
358 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
359
360 return pPkt;
361 }
362 else
363 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
364 pSG->pvUserData = NULL;
365
366 return NULL;
367}
368
369
370/**
371 * Initializes a SG list from an sk_buff.
372 *
373 * @returns Number of segments.
374 * @param pThis The instance.
375 * @param pBuf The sk_buff.
376 * @param pSG The SG.
377 * @param pvFrame The frame pointer, optional.
378 * @param cSegs The number of segments allocated for the SG.
379 * This should match the number in the mbuf exactly!
380 * @param fSrc The source of the frame.
381 */
382DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
383{
384 int i;
385 NOREF(pThis);
386
387 Assert(!skb_shinfo(pBuf)->frag_list);
388 pSG->pvOwnerData = NULL;
389 pSG->pvUserData = NULL;
390 pSG->pvUserData2 = NULL;
391 pSG->cUsers = 1;
392 pSG->fFlags = INTNETSG_FLAGS_TEMP;
393 pSG->cSegsAlloc = cSegs;
394
395 if (fSrc & INTNETTRUNKDIR_WIRE)
396 {
397 /*
398 * The packet came from wire, ethernet header was removed by device driver.
399 * Restore it.
400 */
401 skb_push(pBuf, ETH_HLEN);
402 }
403 pSG->cbTotal = pBuf->len;
404#ifdef VBOXNETFLT_SG_SUPPORT
405 pSG->aSegs[0].cb = skb_headlen(pBuf);
406 pSG->aSegs[0].pv = pBuf->data;
407 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
408
409 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
410 {
411 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
412 pSG->aSegs[i+1].cb = pFrag->size;
413 pSG->aSegs[i+1].pv = kmap(pFrag->page);
414 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
415 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
416 }
417 pSG->cSegsUsed = ++i;
418#else
419 pSG->aSegs[0].cb = pBuf->len;
420 pSG->aSegs[0].pv = pBuf->data;
421 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
422 pSG->cSegsUsed = i = 1;
423#endif
424
425
426#ifdef PADD_RUNT_FRAMES_FROM_HOST
427 /*
428 * Add a trailer if the frame is too small.
429 *
430 * Since we're getting to the packet before it is framed, it has not
431 * yet been padded. The current solution is to add a segment pointing
432 * to a buffer containing all zeros and pray that works for all frames...
433 */
434 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
435 {
436 static uint8_t const s_abZero[128] = {0};
437
438 AssertReturnVoid(i < cSegs);
439
440 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
441 pSG->aSegs[i].pv = (void *)&s_abZero[0];
442 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
443 pSG->cbTotal = 60;
444 pSG->cSegsUsed++;
445 }
446#endif
447 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
448 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
449 for (i = 0; i < pSG->cSegsUsed; i++)
450 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
451 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
452}
453
454/**
455 * Packet handler,
456 *
457 * @returns 0 or EJUSTRETURN.
458 * @param pThis The instance.
459 * @param pMBuf The mbuf.
460 * @param pvFrame The start of the frame, optional.
461 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
462 * @param eProtocol The protocol.
463 */
464#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
465static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
466 struct net_device *pSkbDev,
467 struct packet_type *pPacketType,
468 struct net_device *pOrigDev)
469#else
470static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
471 struct net_device *pSkbDev,
472 struct packet_type *pPacketType)
473#endif
474{
475 PVBOXNETFLTINS pThis;
476 struct net_device *pDev;
477 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
478 pBuf, pSkbDev, pPacketType));
479#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
480 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
481 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
482#else
483 Log3(("vboxNetFltLinuxPacketHandler: skb len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
484 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
485#endif
486 /*
487 * Drop it immediately?
488 */
489 if (!pBuf)
490 return 0;
491
492 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
493 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
494 if (pThis->u.s.pDev != pSkbDev)
495 {
496 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
497 return 0;
498 }
499
500 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
501 if (vboxNetFltLinuxSkBufIsOur(pBuf))
502 {
503 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
504 dev_kfree_skb(pBuf);
505 return 0;
506 }
507
508#ifndef VBOXNETFLT_SG_SUPPORT
509 {
510 /*
511 * Get rid of fragmented packets, they cause too much trouble.
512 */
513 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
514 kfree_skb(pBuf);
515 if (!pCopy)
516 {
517 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
518 return 0;
519 }
520 pBuf = pCopy;
521#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
522 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u gso_size=%u gso_seqs=%u gso_type=%x frag_list=%p pkt_type=%x\n",
523 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
524#else
525 Log3(("vboxNetFltLinuxPacketHandler: skb copy len=%u data_len=%u truesize=%u next=%p nr_frags=%u tso_size=%u tso_seqs=%u frag_list=%p pkt_type=%x\n",
526 pBuf->len, pBuf->data_len, pBuf->truesize, pBuf->next, skb_shinfo(pBuf)->nr_frags, skb_shinfo(pBuf)->tso_size, skb_shinfo(pBuf)->tso_segs, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type));
527#endif
528 }
529#endif
530
531 /* Add the packet to transmit queue and schedule the bottom half. */
532 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
533 schedule_work(&pThis->u.s.XmitTask);
534 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
535 &pThis->u.s.XmitTask, pBuf));
536 /* It does not really matter what we return, it is ignored by the kernel. */
537 return 0;
538}
539
540static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
541{
542#ifdef VBOXNETFLT_SG_SUPPORT
543 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
544#else
545 unsigned cSegs = 1;
546#endif
547#ifdef PADD_RUNT_FRAMES_FROM_HOST
548 /*
549 * Add a trailer if the frame is too small.
550 */
551 if (pBuf->len < 60)
552 cSegs++;
553#endif
554 return cSegs;
555}
556
557/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
558static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
559{
560#ifdef VBOXNETFLT_SG_SUPPORT
561 int i;
562
563 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
564 {
565 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
566 kunmap(pSG->aSegs[i+1].pv);
567 }
568#endif
569
570 dev_kfree_skb(pBuf);
571}
572
573#ifndef LOG_ENABLED
574#define vboxNetFltDumpPacket(a, b, c, d)
575#else
576static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
577{
578 uint8_t *pInt, *pExt;
579 static int iPacketNo = 1;
580 iPacketNo += iIncrement;
581 if (fEgress)
582 {
583 pExt = pSG->aSegs[0].pv;
584 pInt = pExt + 6;
585 }
586 else
587 {
588 pInt = pSG->aSegs[0].pv;
589 pExt = pInt + 6;
590 }
591 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
592 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
593 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
594 fEgress ? "-->" : "<--", pszWhere,
595 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
596 pSG->cbTotal, iPacketNo));
597 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
598}
599#endif
600
601static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
602{
603 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
604 if (cSegs < MAX_SKB_FRAGS)
605 {
606 uint8_t *pTmp;
607 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
608 if (!pSG)
609 {
610 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
611 return VERR_NO_MEMORY;
612 }
613 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
614
615 pTmp = pSG->aSegs[0].pv;
616 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
617 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
618 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
619 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
620 }
621
622 return VINF_SUCCESS;
623}
624
625static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
626{
627 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
628
629 if (VBOX_SKB_IS_GSO(pBuf))
630 {
631 /* Need to segment the packet */
632 struct sk_buff *pNext, *pSegment;
633 //Log2(("vboxNetFltLinuxForwardToIntNet: cb=%u gso_size=%u gso_segs=%u gso_type=%u\n",
634 // pBuf->len, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type));
635
636 for (pSegment = VBOX_SKB_GSO_SEGMENT(pBuf); pSegment; pSegment = pNext)
637 {
638 pNext = pSegment->next;
639 pSegment->next = 0;
640 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
641 }
642 dev_kfree_skb(pBuf);
643 }
644 else
645 {
646 if (pBuf->ip_summed == CHECKSUM_PARTIAL)
647 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
648 {
649 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
650 dev_kfree_skb(pBuf);
651 return;
652 }
653 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
654 }
655 /*
656 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
657 */
658}
659
660#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
661static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
662#else
663static void vboxNetFltLinuxXmitTask(void *pWork)
664#endif
665{
666 struct sk_buff *pBuf;
667 bool fActive;
668 PVBOXNETFLTINS pThis;
669 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
670
671 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
672 pThis = VBOX_FLT_XT_TO_INST(pWork);
673 /*
674 * Active? Retain the instance and increment the busy counter.
675 */
676 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
677 fActive = ASMAtomicUoReadBool(&pThis->fActive);
678 if (fActive)
679 vboxNetFltRetain(pThis, true /* fBusy */);
680 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
681 if (!fActive)
682 return;
683
684 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
685 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
686
687 vboxNetFltRelease(pThis, true /* fBusy */);
688}
689
690/**
691 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
692 *
693 * @returns VBox status code.
694 * @param pThis The instance.
695 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
696 * flood the release log.
697 */
698static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
699{
700 struct packet_type *pt;
701 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
702
703 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
704
705 if (!pDev)
706 {
707 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
708 return VERR_INTNET_FLT_IF_NOT_FOUND;
709 }
710
711 dev_hold(pDev);
712 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
713 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
714 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
715
716 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
717 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
718 /*
719 * Get the mac address while we still have a valid ifnet reference.
720 */
721 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
722
723 pt = &pThis->u.s.PacketType;
724 pt->type = __constant_htons(ETH_P_ALL);
725 pt->dev = pDev;
726 pt->func = vboxNetFltLinuxPacketHandler;
727 dev_add_pack(pt);
728 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
729 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
730 if (pDev)
731 {
732 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
733 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
734 pDev = NULL; /* don't dereference it */
735 }
736 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
737 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
738
739 /* Release the interface on failure. */
740 if (pDev)
741 {
742 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
743 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
744 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
745 dev_put(pDev);
746 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
747 }
748
749 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
750 return VINF_SUCCESS;
751}
752
753
754static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
755{
756 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
757
758 Assert(!pThis->fDisconnectedFromHost);
759 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
760 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
761 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
762 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
763 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
764
765 dev_remove_pack(&pThis->u.s.PacketType);
766 skb_queue_purge(&pThis->u.s.XmitQueue);
767 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
768 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
769 dev_put(pDev);
770
771 return NOTIFY_OK;
772}
773
774static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
775{
776 /* Check if we are not suspended and promiscuous mode has not been set. */
777 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
778 {
779 /* Note that there is no need for locking as the kernel got hold of the lock already. */
780 dev_set_promiscuity(pDev, 1);
781 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
782 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
783 }
784 else
785 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
786 return NOTIFY_OK;
787}
788
789static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
790{
791 /* Undo promiscuous mode if we has set it. */
792 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
793 {
794 /* Note that there is no need for locking as the kernel got hold of the lock already. */
795 dev_set_promiscuity(pDev, -1);
796 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
797 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
798 }
799 else
800 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
801 return NOTIFY_OK;
802}
803
804static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
805
806{
807 int rc = NOTIFY_OK;
808#ifdef DEBUG
809 char *pszEvent = "<unknown>";
810#endif
811 struct net_device *pDev = (struct net_device *)ptr;
812 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
813
814#ifdef DEBUG
815 switch (ulEventType)
816 {
817 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
818 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
819 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
820 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
821 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
822 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
823 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
824 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
825 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
826 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
827 }
828 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
829 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
830#endif
831 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
832 {
833 vboxNetFltLinuxAttachToInterface(pThis, pDev);
834 }
835 else
836 {
837 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
838 if (pDev != ptr)
839 return NOTIFY_OK;
840 rc = NOTIFY_OK;
841 switch (ulEventType)
842 {
843 case NETDEV_UNREGISTER:
844 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
845 break;
846 case NETDEV_UP:
847 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
848 break;
849 case NETDEV_GOING_DOWN:
850 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
851 break;
852 case NETDEV_CHANGENAME:
853 break;
854 }
855 }
856
857 return rc;
858}
859
860bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
861{
862 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
863}
864
865int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
866{
867 struct net_device * pDev;
868 int err;
869 int rc = VINF_SUCCESS;
870
871 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
872
873 pDev = vboxNetFltLinuxRetainNetDev(pThis);
874 if (pDev)
875 {
876 /*
877 * Create a sk_buff for the gather list and push it onto the wire.
878 */
879 if (fDst & INTNETTRUNKDIR_WIRE)
880 {
881 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
882 if (pBuf)
883 {
884 vboxNetFltDumpPacket(pSG, true, "wire", 1);
885 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
886 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
887 err = dev_queue_xmit(pBuf);
888 if (err)
889 rc = RTErrConvertFromErrno(err);
890 }
891 else
892 rc = VERR_NO_MEMORY;
893 }
894
895 /*
896 * Create a sk_buff for the gather list and push it onto the host stack.
897 */
898 if (fDst & INTNETTRUNKDIR_HOST)
899 {
900 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
901 if (pBuf)
902 {
903 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
904 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
905 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
906 err = netif_rx_ni(pBuf);
907 if (err)
908 rc = RTErrConvertFromErrno(err);
909 }
910 else
911 rc = VERR_NO_MEMORY;
912 }
913
914 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
915 }
916
917 return rc;
918}
919
920
921bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
922{
923 bool fRc = false;
924 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
925 if (pDev)
926 {
927 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
928 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
929 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
930 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
931 }
932 return fRc;
933}
934
935
936void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
937{
938 *pMac = pThis->u.s.Mac;
939}
940
941
942bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
943{
944 /* ASSUMES that the MAC address never changes. */
945 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
946 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
947 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
948}
949
950
951void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
952{
953 struct net_device * pDev;
954
955 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
956 pThis, pThis->szName, fActive?"true":"false",
957 pThis->fDisablePromiscuous?"true":"false"));
958
959 if (pThis->fDisablePromiscuous)
960 return;
961
962 pDev = vboxNetFltLinuxRetainNetDev(pThis);
963 if (pDev)
964 {
965 /*
966 * This api is a bit weird, the best reference is the code.
967 *
968 * Also, we have a bit or race conditions wrt the maintance of
969 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
970 */
971#ifdef LOG_ENABLED
972 u_int16_t fIf;
973 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
974#endif
975 if (fActive)
976 {
977 Assert(!pThis->u.s.fPromiscuousSet);
978
979 rtnl_lock();
980 dev_set_promiscuity(pDev, 1);
981 rtnl_unlock();
982 pThis->u.s.fPromiscuousSet = true;
983 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
984 }
985 else
986 {
987 if (pThis->u.s.fPromiscuousSet)
988 {
989 rtnl_lock();
990 dev_set_promiscuity(pDev, -1);
991 rtnl_unlock();
992 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
993 }
994 pThis->u.s.fPromiscuousSet = false;
995
996#ifdef LOG_ENABLED
997 fIf = dev_get_flags(pDev);
998 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
999#endif
1000 }
1001
1002 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1003 }
1004}
1005
1006
1007int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
1008{
1009 /* Nothing to do here. */
1010 return VINF_SUCCESS;
1011}
1012
1013
1014int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
1015{
1016 /* Nothing to do here. */
1017 return VINF_SUCCESS;
1018}
1019
1020
1021void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
1022{
1023 struct net_device *pDev;
1024 bool fRegistered;
1025 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1026
1027 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1028 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1029 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
1030 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1031 if (fRegistered)
1032 {
1033 dev_remove_pack(&pThis->u.s.PacketType);
1034 skb_queue_purge(&pThis->u.s.XmitQueue);
1035 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1036 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1037 dev_put(pDev);
1038 }
1039 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
1040 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1041 module_put(THIS_MODULE);
1042}
1043
1044
1045int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
1046{
1047 int err;
1048 NOREF(pvContext);
1049
1050 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
1051 err = register_netdevice_notifier(&pThis->u.s.Notifier);
1052 if (err)
1053 return VERR_INTNET_FLT_IF_FAILED;
1054 if (!pThis->u.s.fRegistered)
1055 {
1056 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1057 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
1058 return VERR_INTNET_FLT_IF_NOT_FOUND;
1059 }
1060
1061 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
1062 if ( pThis->fDisconnectedFromHost
1063 || !try_module_get(THIS_MODULE))
1064 return VERR_INTNET_FLT_IF_FAILED;
1065
1066 return VINF_SUCCESS;
1067}
1068
1069int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
1070{
1071 /*
1072 * Init the linux specific members.
1073 */
1074 pThis->u.s.pDev = NULL;
1075 pThis->u.s.fRegistered = false;
1076 pThis->u.s.fPromiscuousSet = false;
1077 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1078 skb_queue_head_init(&pThis->u.s.XmitQueue);
1079#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1080 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1081#else
1082 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
1083#endif
1084
1085 return VINF_SUCCESS;
1086}
1087
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette