VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 18810

最後變更 在這個檔案從18810是 18706,由 vboxsync 提交於 16 年 前

TAP -> NetAdp rename for linux NetFlt

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 38.2 KB
 
1/* $Id: VBoxNetFlt-linux.c 18706 2009-04-03 17:25:02Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include "the-linux-kernel.h"
26#include "version-generated.h"
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/rtnetlink.h>
30
31#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/alloca.h>
35#include <iprt/assert.h>
36#include <iprt/spinlock.h>
37#include <iprt/semaphore.h>
38#include <iprt/initterm.h>
39#include <iprt/process.h>
40#include <iprt/mem.h>
41#include <iprt/log.h>
42#include <iprt/mp.h>
43#include <iprt/mem.h>
44#include <iprt/time.h>
45
46#define VBOXNETFLT_OS_SPECFIC 1
47#include "../VBoxNetFltInternal.h"
48
49#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
50 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
51#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
52 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
53#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
54 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
55
56#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
57
58#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
59# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
60# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
61#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
62# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
63# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
64#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
65
66#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
67# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
68#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
69# define CHECKSUM_PARTIAL CHECKSUM_HW
70# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
71# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
72# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
73# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
74# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
75# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
76# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
77# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
78# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
79#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
80
81#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
82# define VBOX_SKB_IS_GSO(skb) skb_is_gso(skb)
83 /* No features, very dumb device */
84# define VBOX_SKB_GSO_SEGMENT(skb) skb_gso_segment(skb, 0)
85#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
86# define VBOX_SKB_IS_GSO(skb) false
87# define VBOX_SKB_GSO_SEGMENT(skb) NULL
88#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
89
90#ifndef NET_IP_ALIGN
91# define NET_IP_ALIGN 2
92#endif
93
94#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
95unsigned dev_get_flags(const struct net_device *dev)
96{
97 unsigned flags;
98
99 flags = (dev->flags & ~(IFF_PROMISC |
100 IFF_ALLMULTI |
101 IFF_RUNNING)) |
102 (dev->gflags & (IFF_PROMISC |
103 IFF_ALLMULTI));
104
105 if (netif_running(dev) && netif_carrier_ok(dev))
106 flags |= IFF_RUNNING;
107
108 return flags;
109}
110#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
111
112/*******************************************************************************
113* Internal Functions *
114*******************************************************************************/
115static int VBoxNetFltLinuxInit(void);
116static void VBoxNetFltLinuxUnload(void);
117
118
119/*******************************************************************************
120* Global Variables *
121*******************************************************************************/
122/**
123 * The (common) global data.
124 */
125#ifdef RT_ARCH_AMD64
126/**
127 * Memory for the executable memory heap (in IPRT).
128 */
129extern uint8_t g_abExecMemory[4096]; /* cannot donate less than one page */
130__asm__(".section execmemory, \"awx\", @progbits\n\t"
131 ".align 32\n\t"
132 ".globl g_abExecMemory\n"
133 "g_abExecMemory:\n\t"
134 ".zero 4096\n\t"
135 ".type g_abExecMemory, @object\n\t"
136 ".size g_abExecMemory, 4096\n\t"
137 ".text\n\t");
138#endif
139
140static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
141
142module_init(VBoxNetFltLinuxInit);
143module_exit(VBoxNetFltLinuxUnload);
144
145MODULE_AUTHOR("Sun Microsystems, Inc.");
146MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
147MODULE_LICENSE("GPL");
148#ifdef MODULE_VERSION
149# define xstr(s) str(s)
150# define str(s) #s
151MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
152#endif
153
154/**
155 * The (common) global data.
156 */
157static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
158
159
160/*
161 * NetAdp-related part
162 */
163
164#define VBOX_NETADP_NAME "vboxnet%d"
165
166struct net_device *g_pNetDev;
167
168struct VBoxNetAdpPriv
169{
170 struct net_device_stats Stats;
171};
172typedef struct VBoxNetAdpPriv VBOXNETADPPRIV;
173typedef VBOXNETADPPRIV *PVBOXNETADPPRIV;
174
175static int vboxNetAdpOpen(struct net_device *pNetDev)
176{
177 netif_start_queue(pNetDev);
178 printk("vboxNetAdpOpen returns 0\n");
179 return 0;
180}
181
182static int vboxNetAdpStop(struct net_device *pNetDev)
183{
184 netif_stop_queue(pNetDev);
185 return 0;
186}
187
188static int vboxNetAdpXmit(struct sk_buff *pSkb, struct net_device *pNetDev)
189{
190 PVBOXNETADPPRIV pPriv = netdev_priv(pNetDev);
191
192 /* Update the stats. */
193 pPriv->Stats.tx_packets++;
194 pPriv->Stats.tx_bytes += pSkb->len;
195 /* Update transmission time stamp. */
196 pNetDev->trans_start = jiffies;
197 /* Nothing else to do, just free the sk_buff. */
198 dev_kfree_skb(pSkb);
199 return 0;
200}
201
202struct net_device_stats *vboxNetAdpGetStats(struct net_device *pNetDev)
203{
204 PVBOXNETADPPRIV pPriv = netdev_priv(pNetDev);
205 return &pPriv->Stats;
206}
207
208/* Currently not referenced in vboxNetAdpNetDevInit
209static int vboxNetAdpValidateAddr(struct net_device *dev)
210{
211 Log(("vboxNetAdpValidateAddr: %02x:%02x:%02x:%02x:%02x:%02x\n",
212 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
213 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]));
214 return -EADDRNOTAVAIL;
215} */
216
217static void vboxNetAdpNetDevInit(struct net_device *pNetDev)
218{
219 PVBOXNETADPPRIV pPriv;
220
221 ether_setup(pNetDev);
222 /// @todo Use Sun vendor id
223 memcpy(pNetDev->dev_addr, "\0vbnet", ETH_ALEN);
224 Log(("vboxNetAdpNetDevInit: pNetDev->dev_addr = %.6Rhxd\n", pNetDev->dev_addr));
225 pNetDev->open = vboxNetAdpOpen;
226 pNetDev->stop = vboxNetAdpStop;
227 pNetDev->hard_start_xmit = vboxNetAdpXmit;
228 pNetDev->get_stats = vboxNetAdpGetStats;
229 //pNetDev->validate_addr = vboxNetAdpValidateAddr;
230/* pNetDev-> = vboxNetAdp;
231 pNetDev-> = vboxNetAdp;
232 pNetDev-> = vboxNetAdp;
233 pNetDev-> = vboxNetAdp;
234 pNetDev-> = vboxNetAdp;*/
235
236 pPriv = netdev_priv(pNetDev);
237 memset(pPriv, 0, sizeof(*pPriv));
238}
239
240static int vboxNetAdpRegisterNetDev(void)
241{
242 int rc = VINF_SUCCESS;
243 struct net_device *pNetDev;
244
245 /* No need for private data. */
246 pNetDev = alloc_netdev(sizeof(VBOXNETADPPRIV), VBOX_NETADP_NAME, vboxNetAdpNetDevInit);
247 if (pNetDev)
248 {
249 int err = register_netdev(pNetDev);
250 if (!err)
251 {
252 g_pNetDev = pNetDev;
253 return VINF_SUCCESS;
254 }
255 free_netdev(pNetDev);
256 rc = RTErrConvertFromErrno(err);
257 }
258 return rc;
259}
260
261static int vboxNetAdpUnregisterNetDev(void)
262{
263 unregister_netdev(g_pNetDev);
264 free_netdev(g_pNetDev);
265 g_pNetDev = NULL;
266 return VINF_SUCCESS;
267}
268
269/**
270 * Initialize module.
271 *
272 * @returns appropriate status code.
273 */
274static int __init VBoxNetFltLinuxInit(void)
275{
276 int rc;
277 /*
278 * Initialize IPRT.
279 */
280 rc = RTR0Init(0);
281 if (RT_SUCCESS(rc))
282 {
283#ifdef RT_ARCH_AMD64
284 rc = RTR0MemExecDonate(&g_abExecMemory[0], sizeof(g_abExecMemory));
285 printk("VBoxNetFlt: dbg - g_abExecMemory=%p\n", (void *)&g_abExecMemory[0]);
286 if (RT_FAILURE(rc))
287 {
288 printk("VBoxNetFlt: failed to donate exec memory, no logging will be available.\n");
289 }
290#endif
291 Log(("VBoxNetFltLinuxInit\n"));
292
293 /*
294 * Initialize the globals and connect to the support driver.
295 *
296 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
297 * for establishing the connect to the support driver.
298 */
299 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
300 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
301 if (RT_SUCCESS(rc))
302 {
303 rc = vboxNetAdpRegisterNetDev();
304 if (RT_SUCCESS(rc))
305 {
306 LogRel(("VBoxNetFlt: Successfully started.\n"));
307 return 0;
308 }
309 else
310 LogRel(("VBoxNetFlt: failed to register device (rc=%d)\n", rc));
311 }
312 else
313 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
314 RTR0Term();
315 }
316 else
317 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
318
319 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
320 return -RTErrConvertToErrno(rc);
321}
322
323
324/**
325 * Unload the module.
326 *
327 * @todo We have to prevent this if we're busy!
328 */
329static void __exit VBoxNetFltLinuxUnload(void)
330{
331 int rc;
332 Log(("VBoxNetFltLinuxUnload\n"));
333 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
334
335 /*
336 * Undo the work done during start (in reverse order).
337 */
338 rc = vboxNetAdpUnregisterNetDev();
339 AssertRC(rc);
340 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
341 AssertRC(rc); NOREF(rc);
342
343 RTR0Term();
344
345 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
346
347 Log(("VBoxNetFltLinuxUnload - done\n"));
348}
349
350
351/**
352 * Reads and retains the host interface handle.
353 *
354 * @returns The handle, NULL if detached.
355 * @param pThis
356 */
357DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
358{
359#if 0
360 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
361 struct net_device *pDev = NULL;
362
363 Log(("vboxNetFltLinuxRetainNetDev\n"));
364 /*
365 * Be careful here to avoid problems racing the detached callback.
366 */
367 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
368 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
369 {
370 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
371 if (pDev)
372 {
373 dev_hold(pDev);
374 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
375 }
376 }
377 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
378
379 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
380 return pDev;
381#else
382 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
383#endif
384}
385
386
387/**
388 * Release the host interface handle previously retained
389 * by vboxNetFltLinuxRetainNetDev.
390 *
391 * @param pThis The instance.
392 * @param pDev The vboxNetFltLinuxRetainNetDev
393 * return value, NULL is fine.
394 */
395DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
396{
397#if 0
398 Log(("vboxNetFltLinuxReleaseNetDev\n"));
399 NOREF(pThis);
400 if (pDev)
401 {
402 dev_put(pDev);
403 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
404 }
405 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
406#endif
407}
408
409#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
410#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
411
412/**
413 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
414 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
415 *
416 * @returns true / false accordingly.
417 * @param pBuf The sk_buff.
418 */
419DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
420{
421 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
422}
423
424
425/**
426 * Internal worker that create a linux sk_buff for a
427 * (scatter/)gather list.
428 *
429 * @returns Pointer to the sk_buff.
430 * @param pThis The instance.
431 * @param pSG The (scatter/)gather list.
432 */
433static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
434{
435 struct sk_buff *pPkt;
436 struct net_device *pDev;
437 /*
438 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
439 * will only contain one single segment.
440 */
441 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
442 {
443 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
444 return NULL;
445 }
446 if (pSG->cbTotal == 0)
447 {
448 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
449 return NULL;
450 }
451
452 /*
453 * Allocate a packet and copy over the data.
454 *
455 */
456 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
457 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
458 if (pPkt)
459 {
460 pPkt->dev = pDev;
461 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
462 skb_reserve(pPkt, NET_IP_ALIGN);
463 skb_put(pPkt, pSG->cbTotal);
464 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
465 /* Set protocol and packet_type fields. */
466 pPkt->protocol = eth_type_trans(pPkt, pDev);
467 pPkt->ip_summed = CHECKSUM_NONE;
468 if (fDstWire)
469 {
470 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
471 /* Restore ethernet header back. */
472 skb_push(pPkt, ETH_HLEN);
473 VBOX_SKB_RESET_MAC_HDR(pPkt);
474 }
475 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
476
477 return pPkt;
478 }
479 else
480 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
481 pSG->pvUserData = NULL;
482
483 return NULL;
484}
485
486
487/**
488 * Initializes a SG list from an sk_buff.
489 *
490 * @returns Number of segments.
491 * @param pThis The instance.
492 * @param pBuf The sk_buff.
493 * @param pSG The SG.
494 * @param pvFrame The frame pointer, optional.
495 * @param cSegs The number of segments allocated for the SG.
496 * This should match the number in the mbuf exactly!
497 * @param fSrc The source of the frame.
498 */
499DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
500{
501 int i;
502 NOREF(pThis);
503
504 Assert(!skb_shinfo(pBuf)->frag_list);
505 pSG->pvOwnerData = NULL;
506 pSG->pvUserData = NULL;
507 pSG->pvUserData2 = NULL;
508 pSG->cUsers = 1;
509 pSG->fFlags = INTNETSG_FLAGS_TEMP;
510 pSG->cSegsAlloc = cSegs;
511
512 if (fSrc & INTNETTRUNKDIR_WIRE)
513 {
514 /*
515 * The packet came from wire, ethernet header was removed by device driver.
516 * Restore it.
517 */
518 skb_push(pBuf, ETH_HLEN);
519 }
520 pSG->cbTotal = pBuf->len;
521#ifdef VBOXNETFLT_SG_SUPPORT
522 pSG->aSegs[0].cb = skb_headlen(pBuf);
523 pSG->aSegs[0].pv = pBuf->data;
524 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
525
526 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
527 {
528 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
529 pSG->aSegs[i+1].cb = pFrag->size;
530 pSG->aSegs[i+1].pv = kmap(pFrag->page);
531 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
532 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
533 }
534 pSG->cSegsUsed = ++i;
535#else
536 pSG->aSegs[0].cb = pBuf->len;
537 pSG->aSegs[0].pv = pBuf->data;
538 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
539 pSG->cSegsUsed = i = 1;
540#endif
541
542
543#ifdef PADD_RUNT_FRAMES_FROM_HOST
544 /*
545 * Add a trailer if the frame is too small.
546 *
547 * Since we're getting to the packet before it is framed, it has not
548 * yet been padded. The current solution is to add a segment pointing
549 * to a buffer containing all zeros and pray that works for all frames...
550 */
551 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
552 {
553 static uint8_t const s_abZero[128] = {0};
554
555 AssertReturnVoid(i < cSegs);
556
557 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
558 pSG->aSegs[i].pv = (void *)&s_abZero[0];
559 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
560 pSG->cbTotal = 60;
561 pSG->cSegsUsed++;
562 }
563#endif
564 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
565 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
566 for (i = 0; i < pSG->cSegsUsed; i++)
567 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
568 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
569}
570
571/**
572 * Packet handler,
573 *
574 * @returns 0 or EJUSTRETURN.
575 * @param pThis The instance.
576 * @param pMBuf The mbuf.
577 * @param pvFrame The start of the frame, optional.
578 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
579 * @param eProtocol The protocol.
580 */
581#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
582static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
583 struct net_device *pSkbDev,
584 struct packet_type *pPacketType,
585 struct net_device *pOrigDev)
586#else
587static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
588 struct net_device *pSkbDev,
589 struct packet_type *pPacketType)
590#endif
591{
592 PVBOXNETFLTINS pThis;
593 struct net_device *pDev;
594 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
595 pBuf, pSkbDev, pPacketType));
596 /*
597 * Drop it immediately?
598 */
599 if (!pBuf)
600 return 0;
601
602 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
603 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
604 if (pThis->u.s.pDev != pSkbDev)
605 {
606 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
607 return 0;
608 }
609
610 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
611 if (vboxNetFltLinuxSkBufIsOur(pBuf))
612 {
613 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
614 dev_kfree_skb(pBuf);
615 return 0;
616 }
617
618#ifndef VBOXNETFLT_SG_SUPPORT
619 {
620 /*
621 * Get rid of fragmented packets, they cause too much trouble.
622 */
623 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
624 kfree_skb(pBuf);
625 if (!pCopy)
626 {
627 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
628 return 0;
629 }
630 pBuf = pCopy;
631 }
632#endif
633
634 /* Add the packet to transmit queue and schedule the bottom half. */
635 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
636 schedule_work(&pThis->u.s.XmitTask);
637 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
638 &pThis->u.s.XmitTask, pBuf));
639 /* It does not really matter what we return, it is ignored by the kernel. */
640 return 0;
641}
642
643static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
644{
645#ifdef VBOXNETFLT_SG_SUPPORT
646 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
647#else
648 unsigned cSegs = 1;
649#endif
650#ifdef PADD_RUNT_FRAMES_FROM_HOST
651 /*
652 * Add a trailer if the frame is too small.
653 */
654 if (pBuf->len < 60)
655 cSegs++;
656#endif
657 return cSegs;
658}
659
660/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
661static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
662{
663#ifdef VBOXNETFLT_SG_SUPPORT
664 int i;
665
666 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
667 {
668 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
669 kunmap(pSG->aSegs[i+1].pv);
670 }
671#endif
672
673 dev_kfree_skb(pBuf);
674}
675
676#ifndef LOG_ENABLED
677#define vboxNetFltDumpPacket(a, b, c, d)
678#else
679static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
680{
681 uint8_t *pInt, *pExt;
682 static int iPacketNo = 1;
683 iPacketNo += iIncrement;
684 if (fEgress)
685 {
686 pExt = pSG->aSegs[0].pv;
687 pInt = pExt + 6;
688 }
689 else
690 {
691 pInt = pSG->aSegs[0].pv;
692 pExt = pInt + 6;
693 }
694 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
695 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
696 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
697 fEgress ? "-->" : "<--", pszWhere,
698 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
699 pSG->cbTotal, iPacketNo));
700 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
701}
702#endif
703
704static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
705{
706 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
707 if (cSegs < MAX_SKB_FRAGS)
708 {
709 uint8_t *pTmp;
710 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
711 if (!pSG)
712 {
713 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
714 return VERR_NO_MEMORY;
715 }
716 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
717
718 pTmp = pSG->aSegs[0].pv;
719 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
720 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
721 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
722 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
723 }
724
725 return VINF_SUCCESS;
726}
727
728static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
729{
730 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
731
732 if (VBOX_SKB_IS_GSO(pBuf))
733 {
734 /* Need to segment the packet */
735 struct sk_buff *pNext, *pSegment;
736 //Log2(("vboxNetFltLinuxForwardToIntNet: cb=%u gso_size=%u gso_segs=%u gso_type=%u\n",
737 // pBuf->len, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type));
738
739 for (pSegment = VBOX_SKB_GSO_SEGMENT(pBuf); pSegment; pSegment = pNext)
740 {
741 pNext = pSegment->next;
742 pSegment->next = 0;
743 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
744 }
745 dev_kfree_skb(pBuf);
746 }
747 else
748 {
749 if (pBuf->ip_summed == CHECKSUM_PARTIAL)
750 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
751 {
752 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
753 dev_kfree_skb(pBuf);
754 return;
755 }
756 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
757 }
758 /*
759 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
760 */
761}
762
763#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
764static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
765#else
766static void vboxNetFltLinuxXmitTask(void *pWork)
767#endif
768{
769 struct sk_buff *pBuf;
770 bool fActive;
771 PVBOXNETFLTINS pThis;
772 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
773
774 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
775 pThis = VBOX_FLT_XT_TO_INST(pWork);
776 /*
777 * Active? Retain the instance and increment the busy counter.
778 */
779 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
780 fActive = ASMAtomicUoReadBool(&pThis->fActive);
781 if (fActive)
782 vboxNetFltRetain(pThis, true /* fBusy */);
783 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
784 if (!fActive)
785 return;
786
787 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
788 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
789
790 vboxNetFltRelease(pThis, true /* fBusy */);
791}
792
793/**
794 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
795 *
796 * @returns VBox status code.
797 * @param pThis The instance.
798 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
799 * flood the release log.
800 */
801static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
802{
803 struct packet_type *pt;
804 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
805
806 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
807
808 if (!pDev)
809 {
810 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
811 return VERR_INTNET_FLT_IF_NOT_FOUND;
812 }
813
814 dev_hold(pDev);
815 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
816 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
817 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
818
819 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
820 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
821 /*
822 * Get the mac address while we still have a valid ifnet reference.
823 */
824 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
825
826 pt = &pThis->u.s.PacketType;
827 pt->type = __constant_htons(ETH_P_ALL);
828 pt->dev = pDev;
829 pt->func = vboxNetFltLinuxPacketHandler;
830 dev_add_pack(pt);
831 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
832 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
833 if (pDev)
834 {
835 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
836 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
837 pDev = NULL; /* don't dereference it */
838 }
839 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
840 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
841
842 /* Release the interface on failure. */
843 if (pDev)
844 {
845 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
846 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
847 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
848 dev_put(pDev);
849 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
850 }
851
852 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
853 return VINF_SUCCESS;
854}
855
856
857static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
858{
859 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
860
861 Assert(!pThis->fDisconnectedFromHost);
862 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
863 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
864 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
865 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
866 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
867
868 dev_remove_pack(&pThis->u.s.PacketType);
869 skb_queue_purge(&pThis->u.s.XmitQueue);
870 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
871 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
872 dev_put(pDev);
873
874 return NOTIFY_OK;
875}
876
877static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
878{
879 /* Check if we are not suspended and promiscuous mode has not been set. */
880 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
881 {
882 /* Note that there is no need for locking as the kernel got hold of the lock already. */
883 dev_set_promiscuity(pDev, 1);
884 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
885 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
886 }
887 else
888 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
889 return NOTIFY_OK;
890}
891
892static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
893{
894 /* Undo promiscuous mode if we has set it. */
895 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
896 {
897 /* Note that there is no need for locking as the kernel got hold of the lock already. */
898 dev_set_promiscuity(pDev, -1);
899 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
900 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
901 }
902 else
903 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
904 return NOTIFY_OK;
905}
906
907static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
908
909{
910 int rc = NOTIFY_OK;
911#ifdef DEBUG
912 char *pszEvent = "<unknown>";
913#endif
914 struct net_device *pDev = (struct net_device *)ptr;
915 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
916
917#ifdef DEBUG
918 switch (ulEventType)
919 {
920 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
921 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
922 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
923 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
924 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
925 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
926 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
927 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
928 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
929 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
930 }
931 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
932 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
933#endif
934 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
935 {
936 vboxNetFltLinuxAttachToInterface(pThis, pDev);
937 }
938 else
939 {
940 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
941 if (pDev != ptr)
942 return NOTIFY_OK;
943 rc = NOTIFY_OK;
944 switch (ulEventType)
945 {
946 case NETDEV_UNREGISTER:
947 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
948 break;
949 case NETDEV_UP:
950 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
951 break;
952 case NETDEV_GOING_DOWN:
953 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
954 break;
955 case NETDEV_CHANGENAME:
956 break;
957 }
958 }
959
960 return rc;
961}
962
963bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
964{
965 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
966}
967
968int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
969{
970 struct net_device * pDev;
971 int err;
972 int rc = VINF_SUCCESS;
973
974 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
975
976 pDev = vboxNetFltLinuxRetainNetDev(pThis);
977 if (pDev)
978 {
979 /*
980 * Create a sk_buff for the gather list and push it onto the wire.
981 */
982 if (fDst & INTNETTRUNKDIR_WIRE)
983 {
984 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
985 if (pBuf)
986 {
987 vboxNetFltDumpPacket(pSG, true, "wire", 1);
988 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
989 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
990 err = dev_queue_xmit(pBuf);
991 if (err)
992 rc = RTErrConvertFromErrno(err);
993 }
994 else
995 rc = VERR_NO_MEMORY;
996 }
997
998 /*
999 * Create a sk_buff for the gather list and push it onto the host stack.
1000 */
1001 if (fDst & INTNETTRUNKDIR_HOST)
1002 {
1003 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
1004 if (pBuf)
1005 {
1006 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
1007 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
1008 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
1009 err = netif_rx_ni(pBuf);
1010 if (err)
1011 rc = RTErrConvertFromErrno(err);
1012 }
1013 else
1014 rc = VERR_NO_MEMORY;
1015 }
1016
1017 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1018 }
1019
1020 return rc;
1021}
1022
1023
1024bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
1025{
1026 bool fRc = false;
1027 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
1028 if (pDev)
1029 {
1030 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
1031 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
1032 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
1033 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1034 }
1035 return fRc;
1036}
1037
1038
1039void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
1040{
1041 *pMac = pThis->u.s.Mac;
1042}
1043
1044
1045bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
1046{
1047 /* ASSUMES that the MAC address never changes. */
1048 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
1049 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
1050 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
1051}
1052
1053
1054void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
1055{
1056 struct net_device * pDev;
1057
1058 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
1059 pThis, pThis->szName, fActive?"true":"false",
1060 pThis->fDisablePromiscuous?"true":"false"));
1061
1062 if (pThis->fDisablePromiscuous)
1063 return;
1064
1065 pDev = vboxNetFltLinuxRetainNetDev(pThis);
1066 if (pDev)
1067 {
1068 /*
1069 * This api is a bit weird, the best reference is the code.
1070 *
1071 * Also, we have a bit or race conditions wrt the maintance of
1072 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
1073 */
1074#ifdef LOG_ENABLED
1075 u_int16_t fIf;
1076 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
1077#endif
1078 if (fActive)
1079 {
1080 Assert(!pThis->u.s.fPromiscuousSet);
1081
1082 rtnl_lock();
1083 dev_set_promiscuity(pDev, 1);
1084 rtnl_unlock();
1085 pThis->u.s.fPromiscuousSet = true;
1086 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
1087 }
1088 else
1089 {
1090 if (pThis->u.s.fPromiscuousSet)
1091 {
1092 rtnl_lock();
1093 dev_set_promiscuity(pDev, -1);
1094 rtnl_unlock();
1095 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
1096 }
1097 pThis->u.s.fPromiscuousSet = false;
1098
1099#ifdef LOG_ENABLED
1100 fIf = dev_get_flags(pDev);
1101 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
1102#endif
1103 }
1104
1105 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1106 }
1107}
1108
1109
1110int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
1111{
1112 /* Nothing to do here. */
1113 return VINF_SUCCESS;
1114}
1115
1116
1117int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
1118{
1119 /* Nothing to do here. */
1120 return VINF_SUCCESS;
1121}
1122
1123
1124void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
1125{
1126 struct net_device *pDev;
1127 bool fRegistered;
1128 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1129
1130 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1131 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1132 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
1133 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1134 if (fRegistered)
1135 {
1136 dev_remove_pack(&pThis->u.s.PacketType);
1137 skb_queue_purge(&pThis->u.s.XmitQueue);
1138 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1139 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1140 dev_put(pDev);
1141 }
1142 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
1143 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1144 module_put(THIS_MODULE);
1145}
1146
1147
1148int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
1149{
1150 int err;
1151 NOREF(pvContext);
1152
1153 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
1154 err = register_netdevice_notifier(&pThis->u.s.Notifier);
1155 if (err)
1156 return VERR_INTNET_FLT_IF_FAILED;
1157 if (!pThis->u.s.fRegistered)
1158 {
1159 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1160 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
1161 return VERR_INTNET_FLT_IF_NOT_FOUND;
1162 }
1163
1164 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
1165 if ( pThis->fDisconnectedFromHost
1166 || !try_module_get(THIS_MODULE))
1167 return VERR_INTNET_FLT_IF_FAILED;
1168
1169 return VINF_SUCCESS;
1170}
1171
1172int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
1173{
1174 /*
1175 * Init the linux specific members.
1176 */
1177 pThis->u.s.pDev = NULL;
1178 pThis->u.s.fRegistered = false;
1179 pThis->u.s.fPromiscuousSet = false;
1180 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1181 skb_queue_head_init(&pThis->u.s.XmitQueue);
1182#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1183 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1184#else
1185 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
1186#endif
1187
1188 return VINF_SUCCESS;
1189}
1190
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette