VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/VBoxNetFlt/linux/VBoxNetFlt-linux.c@ 18973

最後變更 在這個檔案從18973是 18973,由 vboxsync 提交於 16 年 前

#2954: Dynamic add/remove for virtual adapters. Moved all adapter-related code to vboxnetadp.ko

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 35.2 KB
 
1/* $Id: VBoxNetFlt-linux.c 18973 2009-04-17 06:59:16Z vboxsync $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include "the-linux-kernel.h"
26#include "version-generated.h"
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/rtnetlink.h>
30#include <linux/miscdevice.h>
31
32#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
33#include <VBox/log.h>
34#include <VBox/err.h>
35#include <iprt/alloca.h>
36#include <iprt/assert.h>
37#include <iprt/spinlock.h>
38#include <iprt/semaphore.h>
39#include <iprt/initterm.h>
40#include <iprt/process.h>
41#include <iprt/mem.h>
42#include <iprt/log.h>
43#include <iprt/mp.h>
44#include <iprt/mem.h>
45#include <iprt/time.h>
46
47#define VBOXNETFLT_OS_SPECFIC 1
48#include "../VBoxNetFltInternal.h"
49
50#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
51 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
52#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
53 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
54#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
55 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
56
57#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
58
59#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
60# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
61# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
62#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
63# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
64# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
65#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
66
67#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
68# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
69#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
70# define CHECKSUM_PARTIAL CHECKSUM_HW
71# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
72# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
73# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
74# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
75# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
76# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
77# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
78# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
79# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
80#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
81
82#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
83# define VBOX_SKB_IS_GSO(skb) skb_is_gso(skb)
84 /* No features, very dumb device */
85# define VBOX_SKB_GSO_SEGMENT(skb) skb_gso_segment(skb, 0)
86#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
87# define VBOX_SKB_IS_GSO(skb) false
88# define VBOX_SKB_GSO_SEGMENT(skb) NULL
89#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
90
91#ifndef NET_IP_ALIGN
92# define NET_IP_ALIGN 2
93#endif
94
95#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
96unsigned dev_get_flags(const struct net_device *dev)
97{
98 unsigned flags;
99
100 flags = (dev->flags & ~(IFF_PROMISC |
101 IFF_ALLMULTI |
102 IFF_RUNNING)) |
103 (dev->gflags & (IFF_PROMISC |
104 IFF_ALLMULTI));
105
106 if (netif_running(dev) && netif_carrier_ok(dev))
107 flags |= IFF_RUNNING;
108
109 return flags;
110}
111#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
112
113/*******************************************************************************
114* Internal Functions *
115*******************************************************************************/
116static int VBoxNetFltLinuxInit(void);
117static void VBoxNetFltLinuxUnload(void);
118
119
120/*******************************************************************************
121* Global Variables *
122*******************************************************************************/
123/**
124 * The (common) global data.
125 */
126#ifdef RT_ARCH_AMD64
127/**
128 * Memory for the executable memory heap (in IPRT).
129 */
130extern uint8_t g_abExecMemory[4096]; /* cannot donate less than one page */
131__asm__(".section execmemory, \"awx\", @progbits\n\t"
132 ".align 32\n\t"
133 ".globl g_abExecMemory\n"
134 "g_abExecMemory:\n\t"
135 ".zero 4096\n\t"
136 ".type g_abExecMemory, @object\n\t"
137 ".size g_abExecMemory, 4096\n\t"
138 ".text\n\t");
139#endif
140
141static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
142
143module_init(VBoxNetFltLinuxInit);
144module_exit(VBoxNetFltLinuxUnload);
145
146MODULE_AUTHOR("Sun Microsystems, Inc.");
147MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
148MODULE_LICENSE("GPL");
149#ifdef MODULE_VERSION
150# define xstr(s) str(s)
151# define str(s) #s
152MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
153#endif
154
155/**
156 * The (common) global data.
157 */
158static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
159
160/**
161 * Initialize module.
162 *
163 * @returns appropriate status code.
164 */
165static int __init VBoxNetFltLinuxInit(void)
166{
167 int rc;
168 /*
169 * Initialize IPRT.
170 */
171 rc = RTR0Init(0);
172 if (RT_SUCCESS(rc))
173 {
174#ifdef RT_ARCH_AMD64
175 rc = RTR0MemExecDonate(&g_abExecMemory[0], sizeof(g_abExecMemory));
176 printk("VBoxNetFlt: dbg - g_abExecMemory=%p\n", (void *)&g_abExecMemory[0]);
177 if (RT_FAILURE(rc))
178 {
179 printk("VBoxNetFlt: failed to donate exec memory, no logging will be available.\n");
180 }
181#endif
182 Log(("VBoxNetFltLinuxInit\n"));
183
184 /*
185 * Initialize the globals and connect to the support driver.
186 *
187 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
188 * for establishing the connect to the support driver.
189 */
190 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
191 rc = vboxNetFltInitGlobalsAndIdc(&g_VBoxNetFltGlobals);
192 if (RT_SUCCESS(rc))
193 {
194 LogRel(("VBoxNetFlt: Successfully started.\n"));
195 return 0;
196 }
197 else
198 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
199 RTR0Term();
200 }
201 else
202 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
203
204 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
205 return -RTErrConvertToErrno(rc);
206}
207
208
209/**
210 * Unload the module.
211 *
212 * @todo We have to prevent this if we're busy!
213 */
214static void __exit VBoxNetFltLinuxUnload(void)
215{
216 int rc;
217 Log(("VBoxNetFltLinuxUnload\n"));
218 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
219
220 /*
221 * Undo the work done during start (in reverse order).
222 */
223 rc = vboxNetFltTryDeleteIdcAndGlobals(&g_VBoxNetFltGlobals);
224 AssertRC(rc); NOREF(rc);
225
226 RTR0Term();
227
228 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
229
230 Log(("VBoxNetFltLinuxUnload - done\n"));
231}
232
233
234/**
235 * Reads and retains the host interface handle.
236 *
237 * @returns The handle, NULL if detached.
238 * @param pThis
239 */
240DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
241{
242#if 0
243 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
244 struct net_device *pDev = NULL;
245
246 Log(("vboxNetFltLinuxRetainNetDev\n"));
247 /*
248 * Be careful here to avoid problems racing the detached callback.
249 */
250 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
251 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
252 {
253 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
254 if (pDev)
255 {
256 dev_hold(pDev);
257 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
258 }
259 }
260 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
261
262 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
263 return pDev;
264#else
265 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
266#endif
267}
268
269
270/**
271 * Release the host interface handle previously retained
272 * by vboxNetFltLinuxRetainNetDev.
273 *
274 * @param pThis The instance.
275 * @param pDev The vboxNetFltLinuxRetainNetDev
276 * return value, NULL is fine.
277 */
278DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
279{
280#if 0
281 Log(("vboxNetFltLinuxReleaseNetDev\n"));
282 NOREF(pThis);
283 if (pDev)
284 {
285 dev_put(pDev);
286 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
287 }
288 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
289#endif
290}
291
292#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
293#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
294
295/**
296 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
297 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
298 *
299 * @returns true / false accordingly.
300 * @param pBuf The sk_buff.
301 */
302DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
303{
304 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
305}
306
307
308/**
309 * Internal worker that create a linux sk_buff for a
310 * (scatter/)gather list.
311 *
312 * @returns Pointer to the sk_buff.
313 * @param pThis The instance.
314 * @param pSG The (scatter/)gather list.
315 */
316static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
317{
318 struct sk_buff *pPkt;
319 struct net_device *pDev;
320 /*
321 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
322 * will only contain one single segment.
323 */
324 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
325 {
326 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
327 return NULL;
328 }
329 if (pSG->cbTotal == 0)
330 {
331 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
332 return NULL;
333 }
334
335 /*
336 * Allocate a packet and copy over the data.
337 *
338 */
339 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
340 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
341 if (pPkt)
342 {
343 pPkt->dev = pDev;
344 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
345 skb_reserve(pPkt, NET_IP_ALIGN);
346 skb_put(pPkt, pSG->cbTotal);
347 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
348 /* Set protocol and packet_type fields. */
349 pPkt->protocol = eth_type_trans(pPkt, pDev);
350 pPkt->ip_summed = CHECKSUM_NONE;
351 if (fDstWire)
352 {
353 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
354 /* Restore ethernet header back. */
355 skb_push(pPkt, ETH_HLEN);
356 VBOX_SKB_RESET_MAC_HDR(pPkt);
357 }
358 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
359
360 return pPkt;
361 }
362 else
363 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
364 pSG->pvUserData = NULL;
365
366 return NULL;
367}
368
369
370/**
371 * Initializes a SG list from an sk_buff.
372 *
373 * @returns Number of segments.
374 * @param pThis The instance.
375 * @param pBuf The sk_buff.
376 * @param pSG The SG.
377 * @param pvFrame The frame pointer, optional.
378 * @param cSegs The number of segments allocated for the SG.
379 * This should match the number in the mbuf exactly!
380 * @param fSrc The source of the frame.
381 */
382DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
383{
384 int i;
385 NOREF(pThis);
386
387 Assert(!skb_shinfo(pBuf)->frag_list);
388 pSG->pvOwnerData = NULL;
389 pSG->pvUserData = NULL;
390 pSG->pvUserData2 = NULL;
391 pSG->cUsers = 1;
392 pSG->fFlags = INTNETSG_FLAGS_TEMP;
393 pSG->cSegsAlloc = cSegs;
394
395 if (fSrc & INTNETTRUNKDIR_WIRE)
396 {
397 /*
398 * The packet came from wire, ethernet header was removed by device driver.
399 * Restore it.
400 */
401 skb_push(pBuf, ETH_HLEN);
402 }
403 pSG->cbTotal = pBuf->len;
404#ifdef VBOXNETFLT_SG_SUPPORT
405 pSG->aSegs[0].cb = skb_headlen(pBuf);
406 pSG->aSegs[0].pv = pBuf->data;
407 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
408
409 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
410 {
411 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
412 pSG->aSegs[i+1].cb = pFrag->size;
413 pSG->aSegs[i+1].pv = kmap(pFrag->page);
414 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
415 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
416 }
417 pSG->cSegsUsed = ++i;
418#else
419 pSG->aSegs[0].cb = pBuf->len;
420 pSG->aSegs[0].pv = pBuf->data;
421 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
422 pSG->cSegsUsed = i = 1;
423#endif
424
425
426#ifdef PADD_RUNT_FRAMES_FROM_HOST
427 /*
428 * Add a trailer if the frame is too small.
429 *
430 * Since we're getting to the packet before it is framed, it has not
431 * yet been padded. The current solution is to add a segment pointing
432 * to a buffer containing all zeros and pray that works for all frames...
433 */
434 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
435 {
436 static uint8_t const s_abZero[128] = {0};
437
438 AssertReturnVoid(i < cSegs);
439
440 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
441 pSG->aSegs[i].pv = (void *)&s_abZero[0];
442 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
443 pSG->cbTotal = 60;
444 pSG->cSegsUsed++;
445 }
446#endif
447 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
448 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
449 for (i = 0; i < pSG->cSegsUsed; i++)
450 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
451 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
452}
453
454/**
455 * Packet handler,
456 *
457 * @returns 0 or EJUSTRETURN.
458 * @param pThis The instance.
459 * @param pMBuf The mbuf.
460 * @param pvFrame The start of the frame, optional.
461 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
462 * @param eProtocol The protocol.
463 */
464#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
465static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
466 struct net_device *pSkbDev,
467 struct packet_type *pPacketType,
468 struct net_device *pOrigDev)
469#else
470static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
471 struct net_device *pSkbDev,
472 struct packet_type *pPacketType)
473#endif
474{
475 PVBOXNETFLTINS pThis;
476 struct net_device *pDev;
477 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
478 pBuf, pSkbDev, pPacketType));
479 /*
480 * Drop it immediately?
481 */
482 if (!pBuf)
483 return 0;
484
485 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
486 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
487 if (pThis->u.s.pDev != pSkbDev)
488 {
489 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
490 return 0;
491 }
492
493 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
494 if (vboxNetFltLinuxSkBufIsOur(pBuf))
495 {
496 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
497 dev_kfree_skb(pBuf);
498 return 0;
499 }
500
501#ifndef VBOXNETFLT_SG_SUPPORT
502 {
503 /*
504 * Get rid of fragmented packets, they cause too much trouble.
505 */
506 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
507 kfree_skb(pBuf);
508 if (!pCopy)
509 {
510 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
511 return 0;
512 }
513 pBuf = pCopy;
514 }
515#endif
516
517 /* Add the packet to transmit queue and schedule the bottom half. */
518 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
519 schedule_work(&pThis->u.s.XmitTask);
520 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
521 &pThis->u.s.XmitTask, pBuf));
522 /* It does not really matter what we return, it is ignored by the kernel. */
523 return 0;
524}
525
526static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
527{
528#ifdef VBOXNETFLT_SG_SUPPORT
529 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
530#else
531 unsigned cSegs = 1;
532#endif
533#ifdef PADD_RUNT_FRAMES_FROM_HOST
534 /*
535 * Add a trailer if the frame is too small.
536 */
537 if (pBuf->len < 60)
538 cSegs++;
539#endif
540 return cSegs;
541}
542
543/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
544static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
545{
546#ifdef VBOXNETFLT_SG_SUPPORT
547 int i;
548
549 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
550 {
551 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
552 kunmap(pSG->aSegs[i+1].pv);
553 }
554#endif
555
556 dev_kfree_skb(pBuf);
557}
558
559#ifndef LOG_ENABLED
560#define vboxNetFltDumpPacket(a, b, c, d)
561#else
562static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
563{
564 uint8_t *pInt, *pExt;
565 static int iPacketNo = 1;
566 iPacketNo += iIncrement;
567 if (fEgress)
568 {
569 pExt = pSG->aSegs[0].pv;
570 pInt = pExt + 6;
571 }
572 else
573 {
574 pInt = pSG->aSegs[0].pv;
575 pExt = pInt + 6;
576 }
577 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
578 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
579 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
580 fEgress ? "-->" : "<--", pszWhere,
581 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
582 pSG->cbTotal, iPacketNo));
583 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
584}
585#endif
586
587static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
588{
589 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
590 if (cSegs < MAX_SKB_FRAGS)
591 {
592 uint8_t *pTmp;
593 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
594 if (!pSG)
595 {
596 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
597 return VERR_NO_MEMORY;
598 }
599 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
600
601 pTmp = pSG->aSegs[0].pv;
602 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
603 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
604 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
605 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
606 }
607
608 return VINF_SUCCESS;
609}
610
611static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
612{
613 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
614
615 if (VBOX_SKB_IS_GSO(pBuf))
616 {
617 /* Need to segment the packet */
618 struct sk_buff *pNext, *pSegment;
619 //Log2(("vboxNetFltLinuxForwardToIntNet: cb=%u gso_size=%u gso_segs=%u gso_type=%u\n",
620 // pBuf->len, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type));
621
622 for (pSegment = VBOX_SKB_GSO_SEGMENT(pBuf); pSegment; pSegment = pNext)
623 {
624 pNext = pSegment->next;
625 pSegment->next = 0;
626 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
627 }
628 dev_kfree_skb(pBuf);
629 }
630 else
631 {
632 if (pBuf->ip_summed == CHECKSUM_PARTIAL)
633 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
634 {
635 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
636 dev_kfree_skb(pBuf);
637 return;
638 }
639 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
640 }
641 /*
642 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
643 */
644}
645
646#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
647static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
648#else
649static void vboxNetFltLinuxXmitTask(void *pWork)
650#endif
651{
652 struct sk_buff *pBuf;
653 bool fActive;
654 PVBOXNETFLTINS pThis;
655 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
656
657 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
658 pThis = VBOX_FLT_XT_TO_INST(pWork);
659 /*
660 * Active? Retain the instance and increment the busy counter.
661 */
662 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
663 fActive = ASMAtomicUoReadBool(&pThis->fActive);
664 if (fActive)
665 vboxNetFltRetain(pThis, true /* fBusy */);
666 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
667 if (!fActive)
668 return;
669
670 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
671 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
672
673 vboxNetFltRelease(pThis, true /* fBusy */);
674}
675
676/**
677 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
678 *
679 * @returns VBox status code.
680 * @param pThis The instance.
681 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
682 * flood the release log.
683 */
684static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
685{
686 struct packet_type *pt;
687 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
688
689 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
690
691 if (!pDev)
692 {
693 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
694 return VERR_INTNET_FLT_IF_NOT_FOUND;
695 }
696
697 dev_hold(pDev);
698 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
699 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
700 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
701
702 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
703 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
704 /*
705 * Get the mac address while we still have a valid ifnet reference.
706 */
707 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
708
709 pt = &pThis->u.s.PacketType;
710 pt->type = __constant_htons(ETH_P_ALL);
711 pt->dev = pDev;
712 pt->func = vboxNetFltLinuxPacketHandler;
713 dev_add_pack(pt);
714 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
715 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
716 if (pDev)
717 {
718 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
719 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
720 pDev = NULL; /* don't dereference it */
721 }
722 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
723 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
724
725 /* Release the interface on failure. */
726 if (pDev)
727 {
728 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
729 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
730 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
731 dev_put(pDev);
732 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
733 }
734
735 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
736 return VINF_SUCCESS;
737}
738
739
740static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
741{
742 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
743
744 Assert(!pThis->fDisconnectedFromHost);
745 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
746 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
747 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
748 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
749 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
750
751 dev_remove_pack(&pThis->u.s.PacketType);
752 skb_queue_purge(&pThis->u.s.XmitQueue);
753 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
754 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
755 dev_put(pDev);
756
757 return NOTIFY_OK;
758}
759
760static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
761{
762 /* Check if we are not suspended and promiscuous mode has not been set. */
763 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
764 {
765 /* Note that there is no need for locking as the kernel got hold of the lock already. */
766 dev_set_promiscuity(pDev, 1);
767 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
768 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
769 }
770 else
771 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
772 return NOTIFY_OK;
773}
774
775static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
776{
777 /* Undo promiscuous mode if we has set it. */
778 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
779 {
780 /* Note that there is no need for locking as the kernel got hold of the lock already. */
781 dev_set_promiscuity(pDev, -1);
782 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
783 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
784 }
785 else
786 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
787 return NOTIFY_OK;
788}
789
790static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
791
792{
793 int rc = NOTIFY_OK;
794#ifdef DEBUG
795 char *pszEvent = "<unknown>";
796#endif
797 struct net_device *pDev = (struct net_device *)ptr;
798 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
799
800#ifdef DEBUG
801 switch (ulEventType)
802 {
803 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
804 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
805 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
806 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
807 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
808 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
809 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
810 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
811 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
812 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
813 }
814 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
815 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
816#endif
817 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
818 {
819 vboxNetFltLinuxAttachToInterface(pThis, pDev);
820 }
821 else
822 {
823 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
824 if (pDev != ptr)
825 return NOTIFY_OK;
826 rc = NOTIFY_OK;
827 switch (ulEventType)
828 {
829 case NETDEV_UNREGISTER:
830 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
831 break;
832 case NETDEV_UP:
833 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
834 break;
835 case NETDEV_GOING_DOWN:
836 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
837 break;
838 case NETDEV_CHANGENAME:
839 break;
840 }
841 }
842
843 return rc;
844}
845
846bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
847{
848 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
849}
850
851int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
852{
853 struct net_device * pDev;
854 int err;
855 int rc = VINF_SUCCESS;
856
857 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
858
859 pDev = vboxNetFltLinuxRetainNetDev(pThis);
860 if (pDev)
861 {
862 /*
863 * Create a sk_buff for the gather list and push it onto the wire.
864 */
865 if (fDst & INTNETTRUNKDIR_WIRE)
866 {
867 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
868 if (pBuf)
869 {
870 vboxNetFltDumpPacket(pSG, true, "wire", 1);
871 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
872 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
873 err = dev_queue_xmit(pBuf);
874 if (err)
875 rc = RTErrConvertFromErrno(err);
876 }
877 else
878 rc = VERR_NO_MEMORY;
879 }
880
881 /*
882 * Create a sk_buff for the gather list and push it onto the host stack.
883 */
884 if (fDst & INTNETTRUNKDIR_HOST)
885 {
886 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
887 if (pBuf)
888 {
889 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
890 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
891 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
892 err = netif_rx_ni(pBuf);
893 if (err)
894 rc = RTErrConvertFromErrno(err);
895 }
896 else
897 rc = VERR_NO_MEMORY;
898 }
899
900 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
901 }
902
903 return rc;
904}
905
906
907bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
908{
909 bool fRc = false;
910 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
911 if (pDev)
912 {
913 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
914 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
915 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
916 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
917 }
918 return fRc;
919}
920
921
922void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
923{
924 *pMac = pThis->u.s.Mac;
925}
926
927
928bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
929{
930 /* ASSUMES that the MAC address never changes. */
931 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
932 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
933 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
934}
935
936
937void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
938{
939 struct net_device * pDev;
940
941 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s, fDisablePromiscuous=%s\n",
942 pThis, pThis->szName, fActive?"true":"false",
943 pThis->fDisablePromiscuous?"true":"false"));
944
945 if (pThis->fDisablePromiscuous)
946 return;
947
948 pDev = vboxNetFltLinuxRetainNetDev(pThis);
949 if (pDev)
950 {
951 /*
952 * This api is a bit weird, the best reference is the code.
953 *
954 * Also, we have a bit or race conditions wrt the maintance of
955 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
956 */
957#ifdef LOG_ENABLED
958 u_int16_t fIf;
959 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
960#endif
961 if (fActive)
962 {
963 Assert(!pThis->u.s.fPromiscuousSet);
964
965 rtnl_lock();
966 dev_set_promiscuity(pDev, 1);
967 rtnl_unlock();
968 pThis->u.s.fPromiscuousSet = true;
969 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
970 }
971 else
972 {
973 if (pThis->u.s.fPromiscuousSet)
974 {
975 rtnl_lock();
976 dev_set_promiscuity(pDev, -1);
977 rtnl_unlock();
978 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
979 }
980 pThis->u.s.fPromiscuousSet = false;
981
982#ifdef LOG_ENABLED
983 fIf = dev_get_flags(pDev);
984 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
985#endif
986 }
987
988 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
989 }
990}
991
992
993int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
994{
995 /* Nothing to do here. */
996 return VINF_SUCCESS;
997}
998
999
1000int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
1001{
1002 /* Nothing to do here. */
1003 return VINF_SUCCESS;
1004}
1005
1006
1007void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
1008{
1009 struct net_device *pDev;
1010 bool fRegistered;
1011 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1012
1013 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1014 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1015 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
1016 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1017 if (fRegistered)
1018 {
1019 dev_remove_pack(&pThis->u.s.PacketType);
1020 skb_queue_purge(&pThis->u.s.XmitQueue);
1021 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1022 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1023 dev_put(pDev);
1024 }
1025 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
1026 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1027 module_put(THIS_MODULE);
1028}
1029
1030
1031int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis, void *pvContext)
1032{
1033 int err;
1034 NOREF(pvContext);
1035
1036 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
1037 err = register_netdevice_notifier(&pThis->u.s.Notifier);
1038 if (err)
1039 return VERR_INTNET_FLT_IF_FAILED;
1040 if (!pThis->u.s.fRegistered)
1041 {
1042 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1043 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
1044 return VERR_INTNET_FLT_IF_NOT_FOUND;
1045 }
1046
1047 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
1048 if ( pThis->fDisconnectedFromHost
1049 || !try_module_get(THIS_MODULE))
1050 return VERR_INTNET_FLT_IF_FAILED;
1051
1052 return VINF_SUCCESS;
1053}
1054
1055int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
1056{
1057 /*
1058 * Init the linux specific members.
1059 */
1060 pThis->u.s.pDev = NULL;
1061 pThis->u.s.fRegistered = false;
1062 pThis->u.s.fPromiscuousSet = false;
1063 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1064 skb_queue_head_init(&pThis->u.s.XmitQueue);
1065#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1066 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1067#else
1068 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
1069#endif
1070
1071 return VINF_SUCCESS;
1072}
1073
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette