VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp

最後變更 在這個檔案是 107792,由 vboxsync 提交於 2 月 前

Devices/Network/DevE1000.cpp: Convert redundant condition in if into a debug assertion, cbFrame is at least 14 bytes at this point due to a check before, bugref:3409

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 339.5 KB
 
1/* $Id: DevE1000.cpp 107792 2025-01-15 16:15:30Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2024 Oracle and/or its affiliates.
18 *
19 * This file is part of VirtualBox base platform packages, as
20 * available from https://www.alldomusa.eu.org.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation, in version 3 of the
25 * License.
26 *
27 * This program is distributed in the hope that it will be useful, but
28 * WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
30 * General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, see <https://www.gnu.org/licenses>.
34 *
35 * SPDX-License-Identifier: GPL-3.0-only
36 */
37
38
39/*********************************************************************************************************************************
40* Header Files *
41*********************************************************************************************************************************/
42#define LOG_GROUP LOG_GROUP_DEV_E1000
43#include <iprt/crc.h>
44#include <iprt/ctype.h>
45#include <iprt/net.h>
46#include <iprt/semaphore.h>
47#include <iprt/string.h>
48#include <iprt/time.h>
49#include <iprt/uuid.h>
50#include <VBox/vmm/pdmdev.h>
51#include <VBox/vmm/pdmnetifs.h>
52#include <VBox/vmm/pdmnetinline.h>
53#include <VBox/param.h>
54#include "VBoxDD.h"
55
56#include "DevEEPROM.h"
57#include "DevE1000Phy.h"
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63/** @name E1000 Build Options
64 * @{ */
65/** @def E1K_INIT_RA0
66 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
67 * table to MAC address obtained from CFGM. Most guests read MAC address from
68 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
69 * being already set (see @bugref{4657}).
70 */
71#define E1K_INIT_RA0
72/** @def E1K_LSC_ON_RESET
73 * E1K_LSC_ON_RESET causes e1000 to generate Link Status Change
74 * interrupt after hard reset. This makes the E1K_LSC_ON_SLU option unnecessary.
75 * With unplugged cable, LSC is triggerred for 82543GC only.
76 */
77#define E1K_LSC_ON_RESET
78/** @def E1K_LSC_ON_SLU
79 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
80 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
81 * that requires it is Mac OS X (see @bugref{4657}).
82 */
83//#define E1K_LSC_ON_SLU
84/** @def E1K_INIT_LINKUP_DELAY
85 * E1K_INIT_LINKUP_DELAY prevents the link going up while the driver is still
86 * in init (see @bugref{8624}).
87 */
88#define E1K_INIT_LINKUP_DELAY_US (2000 * 1000)
89/** @def E1K_IMS_INT_DELAY_NS
90 * E1K_IMS_INT_DELAY_NS prevents interrupt storms in Windows guests on enabling
91 * interrupts (see @bugref{8624}).
92 */
93#define E1K_IMS_INT_DELAY_NS 100
94/** @def E1K_TX_DELAY
95 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
96 * preventing packets to be sent immediately. It allows to send several
97 * packets in a batch reducing the number of acknowledgments. Note that it
98 * effectively disables R0 TX path, forcing sending in R3.
99 */
100//#define E1K_TX_DELAY 150
101/** @def E1K_USE_TX_TIMERS
102 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
103 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
104 * register. Enabling it showed no positive effects on existing guests so it
105 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
106 * Ethernet Controllers Software Developer’s Manual" for more detailed
107 * explanation.
108 */
109//#define E1K_USE_TX_TIMERS
110/** @def E1K_NO_TAD
111 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
112 * Transmit Absolute Delay time. This timer sets the maximum time interval
113 * during which TX interrupts can be postponed (delayed). It has no effect
114 * if E1K_USE_TX_TIMERS is not defined.
115 */
116//#define E1K_NO_TAD
117/** @def E1K_REL_DEBUG
118 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
119 */
120//#define E1K_REL_DEBUG
121/** @def E1K_INT_STATS
122 * E1K_INT_STATS enables collection of internal statistics used for
123 * debugging of delayed interrupts, etc.
124 */
125#define E1K_INT_STATS
126/** @def E1K_WITH_MSI
127 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
128 */
129//#define E1K_WITH_MSI
130/** @def E1K_WITH_TX_CS
131 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
132 */
133#define E1K_WITH_TX_CS
134/** @def E1K_WITH_TXD_CACHE
135 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
136 * single physical memory read (or two if it wraps around the end of TX
137 * descriptor ring). It is required for proper functioning of bandwidth
138 * resource control as it allows to compute exact sizes of packets prior
139 * to allocating their buffers (see @bugref{5582}).
140 */
141#define E1K_WITH_TXD_CACHE
142/** @def E1K_WITH_RXD_CACHE
143 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
144 * single physical memory read (or two if it wraps around the end of RX
145 * descriptor ring). Intel's packet driver for DOS needs this option in
146 * order to work properly (see @bugref{6217}).
147 */
148#define E1K_WITH_RXD_CACHE
149/** @def E1K_WITH_PREREG_MMIO
150 * E1K_WITH_PREREG_MMIO enables a new style MMIO registration and is
151 * currently only done for testing the relateted PDM, IOM and PGM code. */
152//#define E1K_WITH_PREREG_MMIO
153/* @} */
154/* End of Options ************************************************************/
155
156#ifdef E1K_WITH_TXD_CACHE
157/**
158 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
159 * in the state structure. It limits the amount of descriptors loaded in one
160 * batch read. For example, Linux guest may use up to 20 descriptors per
161 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
162 */
163# define E1K_TXD_CACHE_SIZE 64u
164#endif /* E1K_WITH_TXD_CACHE */
165
166#ifdef E1K_WITH_RXD_CACHE
167/**
168 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
169 * in the state structure. It limits the amount of descriptors loaded in one
170 * batch read. For example, XP guest adds 15 RX descriptors at a time.
171 */
172# define E1K_RXD_CACHE_SIZE 16u
173#endif /* E1K_WITH_RXD_CACHE */
174
175
176/* Little helpers ************************************************************/
177#undef htons
178#undef ntohs
179#undef htonl
180#undef ntohl
181#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
182#define ntohs(x) htons(x)
183#define htonl(x) ASMByteSwapU32(x)
184#define ntohl(x) htonl(x)
185
186#ifndef DEBUG
187# ifdef E1K_REL_DEBUG
188# define DEBUG
189# define E1kLog(a) LogRel(a)
190# define E1kLog2(a) LogRel(a)
191# define E1kLog3(a) LogRel(a)
192# define E1kLogX(x, a) LogRel(a)
193//# define E1kLog3(a) do {} while (0)
194# else
195# define E1kLog(a) do {} while (0)
196# define E1kLog2(a) do {} while (0)
197# define E1kLog3(a) do {} while (0)
198# define E1kLogX(x, a) do {} while (0)
199# endif
200#else
201# define E1kLog(a) Log(a)
202# define E1kLog2(a) Log2(a)
203# define E1kLog3(a) Log3(a)
204# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
205//# define E1kLog(a) do {} while (0)
206//# define E1kLog2(a) do {} while (0)
207//# define E1kLog3(a) do {} while (0)
208#endif
209
210#if 0
211# define LOG_ENABLED
212# define E1kLogRel(a) LogRel(a)
213# undef Log6
214# define Log6(a) LogRel(a)
215#else
216# define E1kLogRel(a) do { } while (0)
217#endif
218
219//#undef DEBUG
220
221#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
222
223#define E1K_INC_CNT32(cnt) \
224do { \
225 if (cnt < UINT32_MAX) \
226 cnt++; \
227} while (0)
228
229#define E1K_ADD_CNT64(cntLo, cntHi, val) \
230do { \
231 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
232 uint64_t tmp = u64Cnt; \
233 u64Cnt += val; \
234 if (tmp > u64Cnt ) \
235 u64Cnt = UINT64_MAX; \
236 cntLo = (uint32_t)u64Cnt; \
237 cntHi = (uint32_t)(u64Cnt >> 32); \
238} while (0)
239
240#ifdef E1K_INT_STATS
241# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
242#else /* E1K_INT_STATS */
243# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
244#endif /* E1K_INT_STATS */
245
246
247/*****************************************************************************/
248
249typedef uint32_t E1KCHIP;
250#define E1K_CHIP_82540EM 0
251#define E1K_CHIP_82543GC 1
252#define E1K_CHIP_82545EM 2
253
254#ifdef IN_RING3
255/** Different E1000 chips. */
256static const struct E1kChips
257{
258 uint16_t uPCIVendorId;
259 uint16_t uPCIDeviceId;
260 uint16_t uPCISubsystemVendorId;
261 uint16_t uPCISubsystemId;
262 const char *pcszName;
263} g_aChips[] =
264{
265 /* Vendor Device SSVendor SubSys Name */
266 { 0x8086,
267 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
268# ifdef E1K_WITH_MSI
269 0x105E,
270# else
271 0x100E,
272# endif
273 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
274 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
275 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
276};
277#endif /* IN_RING3 */
278
279
280/* The size of register area mapped to I/O space */
281#define E1K_IOPORT_SIZE 0x8
282/* The size of memory-mapped register area */
283#define E1K_MM_SIZE 0x20000
284
285#define E1K_MAX_TX_PKT_SIZE 16288
286#define E1K_MAX_RX_PKT_SIZE 16384
287
288/*****************************************************************************/
289
290#ifndef VBOX_DEVICE_STRUCT_TESTCASE
291/** Gets the specfieid bits from the register. */
292#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
293#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
294#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
295#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
296#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
297
298#define CTRL_SLU UINT32_C(0x00000040)
299#define CTRL_MDIO UINT32_C(0x00100000)
300#define CTRL_MDC UINT32_C(0x00200000)
301#define CTRL_MDIO_DIR UINT32_C(0x01000000)
302#define CTRL_MDC_DIR UINT32_C(0x02000000)
303#define CTRL_RESET UINT32_C(0x04000000)
304#define CTRL_VME UINT32_C(0x40000000)
305
306#define STATUS_LU UINT32_C(0x00000002)
307#define STATUS_TXOFF UINT32_C(0x00000010)
308
309#define EECD_EE_WIRES UINT32_C(0x0F)
310#define EECD_EE_REQ UINT32_C(0x40)
311#define EECD_EE_GNT UINT32_C(0x80)
312
313#define EERD_START UINT32_C(0x00000001)
314#define EERD_DONE UINT32_C(0x00000010)
315#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
316#define EERD_DATA_SHIFT 16
317#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
318#define EERD_ADDR_SHIFT 8
319
320#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
321#define MDIC_DATA_SHIFT 0
322#define MDIC_REG_MASK UINT32_C(0x001F0000)
323#define MDIC_REG_SHIFT 16
324#define MDIC_PHY_MASK UINT32_C(0x03E00000)
325#define MDIC_PHY_SHIFT 21
326#define MDIC_OP_WRITE UINT32_C(0x04000000)
327#define MDIC_OP_READ UINT32_C(0x08000000)
328#define MDIC_READY UINT32_C(0x10000000)
329#define MDIC_INT_EN UINT32_C(0x20000000)
330#define MDIC_ERROR UINT32_C(0x40000000)
331
332#define TCTL_EN UINT32_C(0x00000002)
333#define TCTL_PSP UINT32_C(0x00000008)
334
335#define RCTL_EN UINT32_C(0x00000002)
336#define RCTL_UPE UINT32_C(0x00000008)
337#define RCTL_MPE UINT32_C(0x00000010)
338#define RCTL_LPE UINT32_C(0x00000020)
339#define RCTL_LBM_MASK UINT32_C(0x000000C0)
340#define RCTL_LBM_SHIFT 6
341#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
342#define RCTL_RDMTS_SHIFT 8
343#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
344#define RCTL_MO_MASK UINT32_C(0x00003000)
345#define RCTL_MO_SHIFT 12
346#define RCTL_BAM UINT32_C(0x00008000)
347#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
348#define RCTL_BSIZE_SHIFT 16
349#define RCTL_VFE UINT32_C(0x00040000)
350#define RCTL_CFIEN UINT32_C(0x00080000)
351#define RCTL_CFI UINT32_C(0x00100000)
352#define RCTL_BSEX UINT32_C(0x02000000)
353#define RCTL_SECRC UINT32_C(0x04000000)
354
355#define ICR_TXDW UINT32_C(0x00000001)
356#define ICR_TXQE UINT32_C(0x00000002)
357#define ICR_LSC UINT32_C(0x00000004)
358#define ICR_RXDMT0 UINT32_C(0x00000010)
359#define ICR_RXT0 UINT32_C(0x00000080)
360#define ICR_TXD_LOW UINT32_C(0x00008000)
361#define RDTR_FPD UINT32_C(0x80000000)
362
363#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
364typedef struct
365{
366 unsigned rxa : 7;
367 unsigned rxa_r : 9;
368 unsigned txa : 16;
369} PBAST;
370AssertCompileSize(PBAST, 4);
371
372#define TXDCTL_WTHRESH_MASK 0x003F0000
373#define TXDCTL_WTHRESH_SHIFT 16
374#define TXDCTL_LWTHRESH_MASK 0xFE000000
375#define TXDCTL_LWTHRESH_SHIFT 25
376
377#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
378#define RXCSUM_PCSS_SHIFT 0
379
380/** @name Register access macros
381 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
382 * @{ */
383#define CTRL pThis->auRegs[CTRL_IDX]
384#define STATUS pThis->auRegs[STATUS_IDX]
385#define EECD pThis->auRegs[EECD_IDX]
386#define EERD pThis->auRegs[EERD_IDX]
387#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
388#define FLA pThis->auRegs[FLA_IDX]
389#define MDIC pThis->auRegs[MDIC_IDX]
390#define FCAL pThis->auRegs[FCAL_IDX]
391#define FCAH pThis->auRegs[FCAH_IDX]
392#define FCT pThis->auRegs[FCT_IDX]
393#define VET pThis->auRegs[VET_IDX]
394#define ICR pThis->auRegs[ICR_IDX]
395#define ITR pThis->auRegs[ITR_IDX]
396#define ICS pThis->auRegs[ICS_IDX]
397#define IMS pThis->auRegs[IMS_IDX]
398#define IMC pThis->auRegs[IMC_IDX]
399#define RCTL pThis->auRegs[RCTL_IDX]
400#define FCTTV pThis->auRegs[FCTTV_IDX]
401#define TXCW pThis->auRegs[TXCW_IDX]
402#define RXCW pThis->auRegs[RXCW_IDX]
403#define TCTL pThis->auRegs[TCTL_IDX]
404#define TIPG pThis->auRegs[TIPG_IDX]
405#define AIFS pThis->auRegs[AIFS_IDX]
406#define LEDCTL pThis->auRegs[LEDCTL_IDX]
407#define PBA pThis->auRegs[PBA_IDX]
408#define FCRTL pThis->auRegs[FCRTL_IDX]
409#define FCRTH pThis->auRegs[FCRTH_IDX]
410#define RDFH pThis->auRegs[RDFH_IDX]
411#define RDFT pThis->auRegs[RDFT_IDX]
412#define RDFHS pThis->auRegs[RDFHS_IDX]
413#define RDFTS pThis->auRegs[RDFTS_IDX]
414#define RDFPC pThis->auRegs[RDFPC_IDX]
415#define RDBAL pThis->auRegs[RDBAL_IDX]
416#define RDBAH pThis->auRegs[RDBAH_IDX]
417#define RDLEN pThis->auRegs[RDLEN_IDX]
418#define RDH pThis->auRegs[RDH_IDX]
419#define RDT pThis->auRegs[RDT_IDX]
420#define RDTR pThis->auRegs[RDTR_IDX]
421#define RXDCTL pThis->auRegs[RXDCTL_IDX]
422#define RADV pThis->auRegs[RADV_IDX]
423#define RSRPD pThis->auRegs[RSRPD_IDX]
424#define TXDMAC pThis->auRegs[TXDMAC_IDX]
425#define TDFH pThis->auRegs[TDFH_IDX]
426#define TDFT pThis->auRegs[TDFT_IDX]
427#define TDFHS pThis->auRegs[TDFHS_IDX]
428#define TDFTS pThis->auRegs[TDFTS_IDX]
429#define TDFPC pThis->auRegs[TDFPC_IDX]
430#define TDBAL pThis->auRegs[TDBAL_IDX]
431#define TDBAH pThis->auRegs[TDBAH_IDX]
432#define TDLEN pThis->auRegs[TDLEN_IDX]
433#define TDH pThis->auRegs[TDH_IDX]
434#define TDT pThis->auRegs[TDT_IDX]
435#define TIDV pThis->auRegs[TIDV_IDX]
436#define TXDCTL pThis->auRegs[TXDCTL_IDX]
437#define TADV pThis->auRegs[TADV_IDX]
438#define TSPMT pThis->auRegs[TSPMT_IDX]
439#define CRCERRS pThis->auRegs[CRCERRS_IDX]
440#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
441#define SYMERRS pThis->auRegs[SYMERRS_IDX]
442#define RXERRC pThis->auRegs[RXERRC_IDX]
443#define MPC pThis->auRegs[MPC_IDX]
444#define SCC pThis->auRegs[SCC_IDX]
445#define ECOL pThis->auRegs[ECOL_IDX]
446#define MCC pThis->auRegs[MCC_IDX]
447#define LATECOL pThis->auRegs[LATECOL_IDX]
448#define COLC pThis->auRegs[COLC_IDX]
449#define DC pThis->auRegs[DC_IDX]
450#define TNCRS pThis->auRegs[TNCRS_IDX]
451/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
452#define CEXTERR pThis->auRegs[CEXTERR_IDX]
453#define RLEC pThis->auRegs[RLEC_IDX]
454#define XONRXC pThis->auRegs[XONRXC_IDX]
455#define XONTXC pThis->auRegs[XONTXC_IDX]
456#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
457#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
458#define FCRUC pThis->auRegs[FCRUC_IDX]
459#define PRC64 pThis->auRegs[PRC64_IDX]
460#define PRC127 pThis->auRegs[PRC127_IDX]
461#define PRC255 pThis->auRegs[PRC255_IDX]
462#define PRC511 pThis->auRegs[PRC511_IDX]
463#define PRC1023 pThis->auRegs[PRC1023_IDX]
464#define PRC1522 pThis->auRegs[PRC1522_IDX]
465#define GPRC pThis->auRegs[GPRC_IDX]
466#define BPRC pThis->auRegs[BPRC_IDX]
467#define MPRC pThis->auRegs[MPRC_IDX]
468#define GPTC pThis->auRegs[GPTC_IDX]
469#define GORCL pThis->auRegs[GORCL_IDX]
470#define GORCH pThis->auRegs[GORCH_IDX]
471#define GOTCL pThis->auRegs[GOTCL_IDX]
472#define GOTCH pThis->auRegs[GOTCH_IDX]
473#define RNBC pThis->auRegs[RNBC_IDX]
474#define RUC pThis->auRegs[RUC_IDX]
475#define RFC pThis->auRegs[RFC_IDX]
476#define ROC pThis->auRegs[ROC_IDX]
477#define RJC pThis->auRegs[RJC_IDX]
478#define MGTPRC pThis->auRegs[MGTPRC_IDX]
479#define MGTPDC pThis->auRegs[MGTPDC_IDX]
480#define MGTPTC pThis->auRegs[MGTPTC_IDX]
481#define TORL pThis->auRegs[TORL_IDX]
482#define TORH pThis->auRegs[TORH_IDX]
483#define TOTL pThis->auRegs[TOTL_IDX]
484#define TOTH pThis->auRegs[TOTH_IDX]
485#define TPR pThis->auRegs[TPR_IDX]
486#define TPT pThis->auRegs[TPT_IDX]
487#define PTC64 pThis->auRegs[PTC64_IDX]
488#define PTC127 pThis->auRegs[PTC127_IDX]
489#define PTC255 pThis->auRegs[PTC255_IDX]
490#define PTC511 pThis->auRegs[PTC511_IDX]
491#define PTC1023 pThis->auRegs[PTC1023_IDX]
492#define PTC1522 pThis->auRegs[PTC1522_IDX]
493#define MPTC pThis->auRegs[MPTC_IDX]
494#define BPTC pThis->auRegs[BPTC_IDX]
495#define TSCTC pThis->auRegs[TSCTC_IDX]
496#define TSCTFC pThis->auRegs[TSCTFC_IDX]
497#define RXCSUM pThis->auRegs[RXCSUM_IDX]
498#define WUC pThis->auRegs[WUC_IDX]
499#define WUFC pThis->auRegs[WUFC_IDX]
500#define WUS pThis->auRegs[WUS_IDX]
501#define MANC pThis->auRegs[MANC_IDX]
502#define IPAV pThis->auRegs[IPAV_IDX]
503#define WUPL pThis->auRegs[WUPL_IDX]
504/** @} */
505#endif /* VBOX_DEVICE_STRUCT_TESTCASE */
506
507/**
508 * Indices of memory-mapped registers in register table.
509 */
510typedef enum
511{
512 CTRL_IDX,
513 STATUS_IDX,
514 EECD_IDX,
515 EERD_IDX,
516 CTRL_EXT_IDX,
517 FLA_IDX,
518 MDIC_IDX,
519 FCAL_IDX,
520 FCAH_IDX,
521 FCT_IDX,
522 VET_IDX,
523 ICR_IDX,
524 ITR_IDX,
525 ICS_IDX,
526 IMS_IDX,
527 IMC_IDX,
528 RCTL_IDX,
529 FCTTV_IDX,
530 TXCW_IDX,
531 RXCW_IDX,
532 TCTL_IDX,
533 TIPG_IDX,
534 AIFS_IDX,
535 LEDCTL_IDX,
536 PBA_IDX,
537 FCRTL_IDX,
538 FCRTH_IDX,
539 RDFH_IDX,
540 RDFT_IDX,
541 RDFHS_IDX,
542 RDFTS_IDX,
543 RDFPC_IDX,
544 RDBAL_IDX,
545 RDBAH_IDX,
546 RDLEN_IDX,
547 RDH_IDX,
548 RDT_IDX,
549 RDTR_IDX,
550 RXDCTL_IDX,
551 RADV_IDX,
552 RSRPD_IDX,
553 TXDMAC_IDX,
554 TDFH_IDX,
555 TDFT_IDX,
556 TDFHS_IDX,
557 TDFTS_IDX,
558 TDFPC_IDX,
559 TDBAL_IDX,
560 TDBAH_IDX,
561 TDLEN_IDX,
562 TDH_IDX,
563 TDT_IDX,
564 TIDV_IDX,
565 TXDCTL_IDX,
566 TADV_IDX,
567 TSPMT_IDX,
568 CRCERRS_IDX,
569 ALGNERRC_IDX,
570 SYMERRS_IDX,
571 RXERRC_IDX,
572 MPC_IDX,
573 SCC_IDX,
574 ECOL_IDX,
575 MCC_IDX,
576 LATECOL_IDX,
577 COLC_IDX,
578 DC_IDX,
579 TNCRS_IDX,
580 SEC_IDX,
581 CEXTERR_IDX,
582 RLEC_IDX,
583 XONRXC_IDX,
584 XONTXC_IDX,
585 XOFFRXC_IDX,
586 XOFFTXC_IDX,
587 FCRUC_IDX,
588 PRC64_IDX,
589 PRC127_IDX,
590 PRC255_IDX,
591 PRC511_IDX,
592 PRC1023_IDX,
593 PRC1522_IDX,
594 GPRC_IDX,
595 BPRC_IDX,
596 MPRC_IDX,
597 GPTC_IDX,
598 GORCL_IDX,
599 GORCH_IDX,
600 GOTCL_IDX,
601 GOTCH_IDX,
602 RNBC_IDX,
603 RUC_IDX,
604 RFC_IDX,
605 ROC_IDX,
606 RJC_IDX,
607 MGTPRC_IDX,
608 MGTPDC_IDX,
609 MGTPTC_IDX,
610 TORL_IDX,
611 TORH_IDX,
612 TOTL_IDX,
613 TOTH_IDX,
614 TPR_IDX,
615 TPT_IDX,
616 PTC64_IDX,
617 PTC127_IDX,
618 PTC255_IDX,
619 PTC511_IDX,
620 PTC1023_IDX,
621 PTC1522_IDX,
622 MPTC_IDX,
623 BPTC_IDX,
624 TSCTC_IDX,
625 TSCTFC_IDX,
626 RXCSUM_IDX,
627 WUC_IDX,
628 WUFC_IDX,
629 WUS_IDX,
630 MANC_IDX,
631 IPAV_IDX,
632 WUPL_IDX,
633 MTA_IDX,
634 RA_IDX,
635 VFTA_IDX,
636 IP4AT_IDX,
637 IP6AT_IDX,
638 WUPM_IDX,
639 FFLT_IDX,
640 FFMT_IDX,
641 FFVT_IDX,
642 PBM_IDX,
643 RA_82542_IDX,
644 MTA_82542_IDX,
645 VFTA_82542_IDX,
646 E1K_NUM_OF_REGS
647} E1kRegIndex;
648
649#define E1K_NUM_OF_32BIT_REGS MTA_IDX
650/** The number of registers with strictly increasing offset. */
651#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
652
653
654/**
655 * Define E1000-specific EEPROM layout.
656 */
657struct E1kEEPROM
658{
659 public:
660 EEPROM93C46 eeprom;
661
662#ifdef IN_RING3
663 /**
664 * Initialize EEPROM content.
665 *
666 * @param macAddr MAC address of E1000.
667 */
668 void init(RTMAC &macAddr)
669 {
670 eeprom.init();
671 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
672 eeprom.m_au16Data[0x04] = 0xFFFF;
673 /*
674 * bit 3 - full support for power management
675 * bit 10 - full duplex
676 */
677 eeprom.m_au16Data[0x0A] = 0x4408;
678 eeprom.m_au16Data[0x0B] = 0x001E;
679 eeprom.m_au16Data[0x0C] = 0x8086;
680 eeprom.m_au16Data[0x0D] = 0x100E;
681 eeprom.m_au16Data[0x0E] = 0x8086;
682 eeprom.m_au16Data[0x0F] = 0x3040;
683 eeprom.m_au16Data[0x21] = 0x7061;
684 eeprom.m_au16Data[0x22] = 0x280C;
685 eeprom.m_au16Data[0x23] = 0x00C8;
686 eeprom.m_au16Data[0x24] = 0x00C8;
687 eeprom.m_au16Data[0x2F] = 0x0602;
688 updateChecksum();
689 };
690
691 /**
692 * Compute the checksum as required by E1000 and store it
693 * in the last word.
694 */
695 void updateChecksum()
696 {
697 uint16_t u16Checksum = 0;
698
699 for (int i = 0; i < eeprom.SIZE-1; i++)
700 u16Checksum += eeprom.m_au16Data[i];
701 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
702 };
703
704 /**
705 * First 6 bytes of EEPROM contain MAC address.
706 *
707 * @returns MAC address of E1000.
708 */
709 void getMac(PRTMAC pMac)
710 {
711 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
712 };
713
714 uint32_t read()
715 {
716 return eeprom.read();
717 }
718
719 void write(uint32_t u32Wires)
720 {
721 eeprom.write(u32Wires);
722 }
723
724 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
725 {
726 return eeprom.readWord(u32Addr, pu16Value);
727 }
728
729 int load(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
730 {
731 return eeprom.load(pHlp, pSSM);
732 }
733
734 void save(PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM)
735 {
736 eeprom.save(pHlp, pSSM);
737 }
738#endif /* IN_RING3 */
739};
740
741
742#define E1K_SPEC_VLAN(s) (s & 0xFFF)
743#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
744#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
745
746struct E1kRxDStatus
747{
748 /** @name Descriptor Status field (3.2.3.1)
749 * @{ */
750 unsigned fDD : 1; /**< Descriptor Done. */
751 unsigned fEOP : 1; /**< End of packet. */
752 unsigned fIXSM : 1; /**< Ignore checksum indication. */
753 unsigned fVP : 1; /**< VLAN, matches VET. */
754 unsigned : 1;
755 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
756 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
757 unsigned fPIF : 1; /**< Passed in-exact filter */
758 /** @} */
759 /** @name Descriptor Errors field (3.2.3.2)
760 * (Only valid when fEOP and fDD are set.)
761 * @{ */
762 unsigned fCE : 1; /**< CRC or alignment error. */
763 unsigned : 4; /**< Reserved, varies with different models... */
764 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
765 unsigned fIPE : 1; /**< IP Checksum error. */
766 unsigned fRXE : 1; /**< RX Data error. */
767 /** @} */
768 /** @name Descriptor Special field (3.2.3.3)
769 * @{ */
770 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
771 /** @} */
772};
773typedef struct E1kRxDStatus E1KRXDST;
774
775struct E1kRxDesc_st
776{
777 uint64_t u64BufAddr; /**< Address of data buffer */
778 uint16_t u16Length; /**< Length of data in buffer */
779 uint16_t u16Checksum; /**< Packet checksum */
780 E1KRXDST status;
781};
782typedef struct E1kRxDesc_st E1KRXDESC;
783AssertCompileSize(E1KRXDESC, 16);
784
785#define E1K_DTYP_LEGACY -1
786#define E1K_DTYP_CONTEXT 0
787#define E1K_DTYP_DATA 1
788#define E1K_DTYP_INVALID 2
789
790struct E1kTDLegacy
791{
792 uint64_t u64BufAddr; /**< Address of data buffer */
793 struct TDLCmd_st
794 {
795 unsigned u16Length : 16;
796 unsigned u8CSO : 8;
797 /* CMD field : 8 */
798 unsigned fEOP : 1;
799 unsigned fIFCS : 1;
800 unsigned fIC : 1;
801 unsigned fRS : 1;
802 unsigned fRPS : 1;
803 unsigned fDEXT : 1;
804 unsigned fVLE : 1;
805 unsigned fIDE : 1;
806 } cmd;
807 struct TDLDw3_st
808 {
809 /* STA field */
810 unsigned fDD : 1;
811 unsigned fEC : 1;
812 unsigned fLC : 1;
813 unsigned fTURSV : 1;
814 /* RSV field */
815 unsigned u4RSV : 4;
816 /* CSS field */
817 unsigned u8CSS : 8;
818 /* Special field*/
819 unsigned u16Special: 16;
820 } dw3;
821};
822
823/**
824 * TCP/IP Context Transmit Descriptor, section 3.3.6.
825 */
826struct E1kTDContext
827{
828 struct CheckSum_st
829 {
830 /** TSE: Header start. !TSE: Checksum start. */
831 unsigned u8CSS : 8;
832 /** Checksum offset - where to store it. */
833 unsigned u8CSO : 8;
834 /** Checksum ending (inclusive) offset, 0 = end of packet. */
835 unsigned u16CSE : 16;
836 } ip;
837 struct CheckSum_st tu;
838 struct TDCDw2_st
839 {
840 /** TSE: The total number of payload bytes for this context. Sans header. */
841 unsigned u20PAYLEN : 20;
842 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
843 unsigned u4DTYP : 4;
844 /** TUCMD field, 8 bits
845 * @{ */
846 /** TSE: TCP (set) or UDP (clear). */
847 unsigned fTCP : 1;
848 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
849 * the IP header. Does not affect the checksumming.
850 * @remarks 82544GC/EI interprets a cleared field differently. */
851 unsigned fIP : 1;
852 /** TSE: TCP segmentation enable. When clear the context describes */
853 unsigned fTSE : 1;
854 /** Report status (only applies to dw3.fDD for here). */
855 unsigned fRS : 1;
856 /** Reserved, MBZ. */
857 unsigned fRSV1 : 1;
858 /** Descriptor extension, must be set for this descriptor type. */
859 unsigned fDEXT : 1;
860 /** Reserved, MBZ. */
861 unsigned fRSV2 : 1;
862 /** Interrupt delay enable. */
863 unsigned fIDE : 1;
864 /** @} */
865 } dw2;
866 struct TDCDw3_st
867 {
868 /** Descriptor Done. */
869 unsigned fDD : 1;
870 /** Reserved, MBZ. */
871 unsigned u7RSV : 7;
872 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
873 unsigned u8HDRLEN : 8;
874 /** TSO: Maximum segment size. */
875 unsigned u16MSS : 16;
876 } dw3;
877};
878typedef struct E1kTDContext E1KTXCTX;
879
880/**
881 * TCP/IP Data Transmit Descriptor, section 3.3.7.
882 */
883struct E1kTDData
884{
885 uint64_t u64BufAddr; /**< Address of data buffer */
886 struct TDDCmd_st
887 {
888 /** The total length of data pointed to by this descriptor. */
889 unsigned u20DTALEN : 20;
890 /** The descriptor type - E1K_DTYP_DATA (1). */
891 unsigned u4DTYP : 4;
892 /** @name DCMD field, 8 bits (3.3.7.1).
893 * @{ */
894 /** End of packet. Note TSCTFC update. */
895 unsigned fEOP : 1;
896 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
897 unsigned fIFCS : 1;
898 /** Use the TSE context when set and the normal when clear. */
899 unsigned fTSE : 1;
900 /** Report status (dw3.STA). */
901 unsigned fRS : 1;
902 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
903 unsigned fRPS : 1;
904 /** Descriptor extension, must be set for this descriptor type. */
905 unsigned fDEXT : 1;
906 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
907 * Insert dw3.SPECIAL after ethernet header. */
908 unsigned fVLE : 1;
909 /** Interrupt delay enable. */
910 unsigned fIDE : 1;
911 /** @} */
912 } cmd;
913 struct TDDDw3_st
914 {
915 /** @name STA field (3.3.7.2)
916 * @{ */
917 unsigned fDD : 1; /**< Descriptor done. */
918 unsigned fEC : 1; /**< Excess collision. */
919 unsigned fLC : 1; /**< Late collision. */
920 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
921 unsigned fTURSV : 1;
922 /** @} */
923 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
924 /** @name POPTS (Packet Option) field (3.3.7.3)
925 * @{ */
926 unsigned fIXSM : 1; /**< Insert IP checksum. */
927 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
928 unsigned u6RSV : 6; /**< Reserved, MBZ. */
929 /** @} */
930 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
931 * Requires fEOP, fVLE and CTRL.VME to be set.
932 * @{ */
933 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
934 /** @} */
935 } dw3;
936};
937typedef struct E1kTDData E1KTXDAT;
938
939union E1kTxDesc
940{
941 struct E1kTDLegacy legacy;
942 struct E1kTDContext context;
943 struct E1kTDData data;
944};
945typedef union E1kTxDesc E1KTXDESC;
946AssertCompileSize(E1KTXDESC, 16);
947
948#define RA_CTL_AS 0x0003
949#define RA_CTL_AV 0x8000
950
951union E1kRecAddr
952{
953 uint32_t au32[32];
954 struct RAArray
955 {
956 uint8_t addr[6];
957 uint16_t ctl;
958 } array[16];
959};
960typedef struct E1kRecAddr::RAArray E1KRAELEM;
961typedef union E1kRecAddr E1KRA;
962AssertCompileSize(E1KRA, 8*16);
963
964#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
965#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
966#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
967#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
968
969/** @todo use+extend RTNETIPV4 */
970struct E1kIpHeader
971{
972 /* type of service / version / header length */
973 uint16_t tos_ver_hl;
974 /* total length */
975 uint16_t total_len;
976 /* identification */
977 uint16_t ident;
978 /* fragment offset field */
979 uint16_t offset;
980 /* time to live / protocol*/
981 uint16_t ttl_proto;
982 /* checksum */
983 uint16_t chksum;
984 /* source IP address */
985 uint32_t src;
986 /* destination IP address */
987 uint32_t dest;
988};
989AssertCompileSize(struct E1kIpHeader, 20);
990
991#define E1K_TCP_FIN UINT16_C(0x01)
992#define E1K_TCP_SYN UINT16_C(0x02)
993#define E1K_TCP_RST UINT16_C(0x04)
994#define E1K_TCP_PSH UINT16_C(0x08)
995#define E1K_TCP_ACK UINT16_C(0x10)
996#define E1K_TCP_URG UINT16_C(0x20)
997#define E1K_TCP_ECE UINT16_C(0x40)
998#define E1K_TCP_CWR UINT16_C(0x80)
999#define E1K_TCP_FLAGS UINT16_C(0x3f)
1000
1001/** @todo use+extend RTNETTCP */
1002struct E1kTcpHeader
1003{
1004 uint16_t src;
1005 uint16_t dest;
1006 uint32_t seqno;
1007 uint32_t ackno;
1008 uint16_t hdrlen_flags;
1009 uint16_t wnd;
1010 uint16_t chksum;
1011 uint16_t urgp;
1012};
1013AssertCompileSize(struct E1kTcpHeader, 20);
1014
1015
1016#ifdef E1K_WITH_TXD_CACHE
1017/** The current Saved state version. */
1018# define E1K_SAVEDSTATE_VERSION 4
1019/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
1020# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
1021#else /* !E1K_WITH_TXD_CACHE */
1022/** The current Saved state version. */
1023# define E1K_SAVEDSTATE_VERSION 3
1024#endif /* !E1K_WITH_TXD_CACHE */
1025/** Saved state version for VirtualBox 4.1 and earlier.
1026 * These did not include VLAN tag fields. */
1027#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
1028/** Saved state version for VirtualBox 3.0 and earlier.
1029 * This did not include the configuration part nor the E1kEEPROM. */
1030#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
1031
1032/**
1033 * E1000 shared device state.
1034 *
1035 * This is shared between ring-0 and ring-3.
1036 */
1037typedef struct E1KSTATE
1038{
1039 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1040
1041 /** Handle to PCI region \#0, the MMIO region. */
1042 IOMIOPORTHANDLE hMmioRegion;
1043 /** Handle to PCI region \#2, the I/O ports. */
1044 IOMIOPORTHANDLE hIoPorts;
1045
1046 /** Receive Interrupt Delay Timer. */
1047 TMTIMERHANDLE hRIDTimer;
1048 /** Receive Absolute Delay Timer. */
1049 TMTIMERHANDLE hRADTimer;
1050 /** Transmit Interrupt Delay Timer. */
1051 TMTIMERHANDLE hTIDTimer;
1052 /** Transmit Absolute Delay Timer. */
1053 TMTIMERHANDLE hTADTimer;
1054 /** Transmit Delay Timer. */
1055 TMTIMERHANDLE hTXDTimer;
1056 /** Late Interrupt Timer. */
1057 TMTIMERHANDLE hIntTimer;
1058 /** Link Up(/Restore) Timer. */
1059 TMTIMERHANDLE hLUTimer;
1060
1061 /** Transmit task. */
1062 PDMTASKHANDLE hTxTask;
1063
1064 /** Critical section - what is it protecting? */
1065 PDMCRITSECT cs;
1066 /** RX Critical section. */
1067 PDMCRITSECT csRx;
1068#ifdef E1K_WITH_TX_CS
1069 /** TX Critical section. */
1070 PDMCRITSECT csTx;
1071#endif /* E1K_WITH_TX_CS */
1072 /** MAC address obtained from the configuration. */
1073 RTMAC macConfigured;
1074 uint16_t u16Padding0;
1075 /** EMT: Last time the interrupt was acknowledged. */
1076 uint64_t u64AckedAt;
1077 /** All: Used for eliminating spurious interrupts. */
1078 bool fIntRaised;
1079 /** EMT: false if the cable is disconnected by the GUI. */
1080 bool fCableConnected;
1081 /** true if the device is attached to a driver. */
1082 bool fIsAttached;
1083 /** EMT: Compute Ethernet CRC for RX packets. */
1084 bool fEthernetCRC;
1085 /** All: throttle interrupts. */
1086 bool fItrEnabled;
1087 /** All: throttle RX interrupts. */
1088 bool fItrRxEnabled;
1089 /** All: Delay TX interrupts using TIDV/TADV. */
1090 bool fTidEnabled;
1091 bool afPadding[2];
1092 /** Link up delay (in milliseconds). */
1093 uint32_t cMsLinkUpDelay;
1094
1095 /** All: Device register storage. */
1096 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1097 /** TX/RX: Status LED. */
1098 PDMLED led;
1099 /** TX/RX: Number of packet being sent/received to show in debug log. */
1100 uint32_t u32PktNo;
1101
1102 /** EMT: Offset of the register to be read via IO. */
1103 uint32_t uSelectedReg;
1104 /** EMT: Multicast Table Array. */
1105 uint32_t auMTA[128];
1106 /** EMT: Receive Address registers. */
1107 E1KRA aRecAddr;
1108 /** EMT: VLAN filter table array. */
1109 uint32_t auVFTA[128];
1110 /** EMT: Receive buffer size. */
1111 uint16_t u16RxBSize;
1112 /** EMT: Locked state -- no state alteration possible. */
1113 bool fLocked;
1114 /** EMT: */
1115 bool fDelayInts;
1116 /** All: */
1117 bool fIntMaskUsed;
1118
1119 /** N/A: */
1120 bool volatile fMaybeOutOfSpace;
1121 /** EMT: Gets signalled when more RX descriptors become available. */
1122 SUPSEMEVENT hEventMoreRxDescAvail;
1123#ifdef E1K_WITH_RXD_CACHE
1124 /** RX: Fetched RX descriptors. */
1125 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1126 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1127 /** RX: Actual number of fetched RX descriptors. */
1128 uint32_t nRxDFetched;
1129 /** RX: Index in cache of RX descriptor being processed. */
1130 uint32_t iRxDCurrent;
1131#endif /* E1K_WITH_RXD_CACHE */
1132
1133 /** TX: Context used for TCP segmentation packets. */
1134 E1KTXCTX contextTSE;
1135 /** TX: Context used for ordinary packets. */
1136 E1KTXCTX contextNormal;
1137#ifdef E1K_WITH_TXD_CACHE
1138 /** TX: Fetched TX descriptors. */
1139 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1140 /** TX: Validity of TX descriptors. Set by e1kLocateTxPacket, used by e1kXmitPacket. */
1141 bool afTxDValid[E1K_TXD_CACHE_SIZE];
1142 /** TX: Actual number of fetched TX descriptors. */
1143 uint8_t nTxDFetched;
1144 /** TX: Index in cache of TX descriptor being processed. */
1145 uint8_t iTxDCurrent;
1146 /** TX: Will this frame be sent as GSO. */
1147 bool fGSO;
1148 /** Alignment padding. */
1149 bool fReserved;
1150 /** TX: Number of bytes in next packet. */
1151 uint32_t cbTxAlloc;
1152
1153#endif /* E1K_WITH_TXD_CACHE */
1154 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1155 * applicable to the current TSE mode. */
1156 PDMNETWORKGSO GsoCtx;
1157 /** Scratch space for holding the loopback / fallback scatter / gather
1158 * descriptor. */
1159 union
1160 {
1161 PDMSCATTERGATHER Sg;
1162 uint8_t padding[8 * sizeof(RTUINTPTR)];
1163 } uTxFallback;
1164 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1165 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1166 /** TX: Number of bytes assembled in TX packet buffer. */
1167 uint16_t u16TxPktLen;
1168 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1169 bool fGSOEnabled;
1170 /** TX: IP checksum has to be inserted if true. */
1171 bool fIPcsum;
1172 /** TX: TCP/UDP checksum has to be inserted if true. */
1173 bool fTCPcsum;
1174 /** TX: VLAN tag has to be inserted if true. */
1175 bool fVTag;
1176 /** TX: TCI part of VLAN tag to be inserted. */
1177 uint16_t u16VTagTCI;
1178 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1179 uint32_t u32PayRemain;
1180 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1181 uint16_t u16HdrRemain;
1182 /** TX TSE fallback: Flags from template header. */
1183 uint16_t u16SavedFlags;
1184 /** TX TSE fallback: Partial checksum from template header. */
1185 uint32_t u32SavedCsum;
1186 /** ?: Emulated controller type. */
1187 E1KCHIP eChip;
1188
1189 /** EMT: Physical interface emulation. */
1190 PHY phy;
1191
1192#if 0
1193 /** Alignment padding. */
1194 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1195#endif
1196
1197 STAMCOUNTER StatReceiveBytes;
1198 STAMCOUNTER StatTransmitBytes;
1199#if defined(VBOX_WITH_STATISTICS)
1200 STAMPROFILEADV StatMMIOReadRZ;
1201 STAMPROFILEADV StatMMIOReadR3;
1202 STAMPROFILEADV StatMMIOWriteRZ;
1203 STAMPROFILEADV StatMMIOWriteR3;
1204 STAMPROFILEADV StatEEPROMRead;
1205 STAMPROFILEADV StatEEPROMWrite;
1206 STAMPROFILEADV StatIOReadRZ;
1207 STAMPROFILEADV StatIOReadR3;
1208 STAMPROFILEADV StatIOWriteRZ;
1209 STAMPROFILEADV StatIOWriteR3;
1210 STAMPROFILEADV StatLateIntTimer;
1211 STAMCOUNTER StatLateInts;
1212 STAMCOUNTER StatIntsRaised;
1213 STAMCOUNTER StatIntsPrevented;
1214 STAMPROFILEADV StatReceive;
1215 STAMPROFILEADV StatReceiveCRC;
1216 STAMPROFILEADV StatReceiveFilter;
1217 STAMPROFILEADV StatReceiveStore;
1218 STAMPROFILEADV StatTransmitRZ;
1219 STAMPROFILEADV StatTransmitR3;
1220 STAMPROFILE StatTransmitSendRZ;
1221 STAMPROFILE StatTransmitSendR3;
1222 STAMPROFILE StatRxOverflow;
1223 STAMCOUNTER StatRxOverflowWakeupRZ;
1224 STAMCOUNTER StatRxOverflowWakeupR3;
1225 STAMCOUNTER StatTxDescCtxNormal;
1226 STAMCOUNTER StatTxDescCtxTSE;
1227 STAMCOUNTER StatTxDescLegacy;
1228 STAMCOUNTER StatTxDescData;
1229 STAMCOUNTER StatTxDescTSEData;
1230 STAMCOUNTER StatTxPathFallback;
1231 STAMCOUNTER StatTxPathGSO;
1232 STAMCOUNTER StatTxPathRegular;
1233 STAMCOUNTER StatPHYAccesses;
1234 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1235 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1236#endif /* VBOX_WITH_STATISTICS */
1237
1238#ifdef E1K_INT_STATS
1239 /* Internal stats */
1240 uint64_t u64ArmedAt;
1241 uint64_t uStatMaxTxDelay;
1242 uint32_t uStatInt;
1243 uint32_t uStatIntTry;
1244 uint32_t uStatIntLower;
1245 uint32_t uStatNoIntICR;
1246 int32_t iStatIntLost;
1247 int32_t iStatIntLostOne;
1248 uint32_t uStatIntIMS;
1249 uint32_t uStatIntSkip;
1250 uint32_t uStatIntLate;
1251 uint32_t uStatIntMasked;
1252 uint32_t uStatIntEarly;
1253 uint32_t uStatIntRx;
1254 uint32_t uStatIntTx;
1255 uint32_t uStatIntICS;
1256 uint32_t uStatIntRDTR;
1257 uint32_t uStatIntRXDMT0;
1258 uint32_t uStatIntTXQE;
1259 uint32_t uStatTxNoRS;
1260 uint32_t uStatTxIDE;
1261 uint32_t uStatTxDelayed;
1262 uint32_t uStatTxDelayExp;
1263 uint32_t uStatTAD;
1264 uint32_t uStatTID;
1265 uint32_t uStatRAD;
1266 uint32_t uStatRID;
1267 uint32_t uStatRxFrm;
1268 uint32_t uStatTxFrm;
1269 uint32_t uStatDescCtx;
1270 uint32_t uStatDescDat;
1271 uint32_t uStatDescLeg;
1272 uint32_t uStatTx1514;
1273 uint32_t uStatTx2962;
1274 uint32_t uStatTx4410;
1275 uint32_t uStatTx5858;
1276 uint32_t uStatTx7306;
1277 uint32_t uStatTx8754;
1278 uint32_t uStatTx16384;
1279 uint32_t uStatTx32768;
1280 uint32_t uStatTxLarge;
1281 uint32_t uStatAlign;
1282#endif /* E1K_INT_STATS */
1283} E1KSTATE;
1284/** Pointer to the E1000 device state. */
1285typedef E1KSTATE *PE1KSTATE;
1286
1287/**
1288 * E1000 ring-3 device state
1289 *
1290 * @implements PDMINETWORKDOWN
1291 * @implements PDMINETWORKCONFIG
1292 * @implements PDMILEDPORTS
1293 */
1294typedef struct E1KSTATER3
1295{
1296 PDMIBASE IBase;
1297 PDMINETWORKDOWN INetworkDown;
1298 PDMINETWORKCONFIG INetworkConfig;
1299 /** LED interface */
1300 PDMILEDPORTS ILeds;
1301 /** Attached network driver. */
1302 R3PTRTYPE(PPDMIBASE) pDrvBase;
1303 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1304
1305 /** Pointer to the shared state. */
1306 R3PTRTYPE(PE1KSTATE) pShared;
1307
1308 /** Device instance. */
1309 PPDMDEVINSR3 pDevInsR3;
1310 /** Attached network driver. */
1311 PPDMINETWORKUPR3 pDrvR3;
1312 /** The scatter / gather buffer used for the current outgoing packet. */
1313 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1314
1315 /** EMT: EEPROM emulation */
1316 E1kEEPROM eeprom;
1317} E1KSTATER3;
1318/** Pointer to the E1000 ring-3 device state. */
1319typedef E1KSTATER3 *PE1KSTATER3;
1320
1321
1322/**
1323 * E1000 ring-0 device state
1324 */
1325typedef struct E1KSTATER0
1326{
1327 /** Device instance. */
1328 PPDMDEVINSR0 pDevInsR0;
1329 /** Attached network driver. */
1330 PPDMINETWORKUPR0 pDrvR0;
1331 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1332 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1333} E1KSTATER0;
1334/** Pointer to the E1000 ring-0 device state. */
1335typedef E1KSTATER0 *PE1KSTATER0;
1336
1337
1338/**
1339 * E1000 raw-mode device state
1340 */
1341typedef struct E1KSTATERC
1342{
1343 /** Device instance. */
1344 PPDMDEVINSRC pDevInsRC;
1345 /** Attached network driver. */
1346 PPDMINETWORKUPRC pDrvRC;
1347 /** The scatter / gather buffer used for the current outgoing packet. */
1348 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1349} E1KSTATERC;
1350/** Pointer to the E1000 raw-mode device state. */
1351typedef E1KSTATERC *PE1KSTATERC;
1352
1353
1354/** @def PE1KSTATECC
1355 * Pointer to the instance data for the current context. */
1356#ifdef IN_RING3
1357typedef E1KSTATER3 E1KSTATECC;
1358typedef PE1KSTATER3 PE1KSTATECC;
1359#elif defined(IN_RING0)
1360typedef E1KSTATER0 E1KSTATECC;
1361typedef PE1KSTATER0 PE1KSTATECC;
1362#elif defined(IN_RC)
1363typedef E1KSTATERC E1KSTATECC;
1364typedef PE1KSTATERC PE1KSTATECC;
1365#else
1366# error "Not IN_RING3, IN_RING0 or IN_RC"
1367#endif
1368
1369
1370#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1371
1372/* Forward declarations ******************************************************/
1373static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread);
1374
1375/**
1376 * E1000 register read handler.
1377 */
1378typedef int (FNE1KREGREAD)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1379/**
1380 * E1000 register write handler.
1381 */
1382typedef int (FNE1KREGWRITE)(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1383
1384static FNE1KREGREAD e1kRegReadUnimplemented;
1385static FNE1KREGWRITE e1kRegWriteUnimplemented;
1386static FNE1KREGREAD e1kRegReadAutoClear;
1387static FNE1KREGREAD e1kRegReadDefault;
1388static FNE1KREGWRITE e1kRegWriteDefault;
1389#if 0 /* unused */
1390static FNE1KREGREAD e1kRegReadCTRL;
1391#endif
1392static FNE1KREGWRITE e1kRegWriteCTRL;
1393static FNE1KREGREAD e1kRegReadEECD;
1394static FNE1KREGWRITE e1kRegWriteEECD;
1395static FNE1KREGWRITE e1kRegWriteEERD;
1396static FNE1KREGWRITE e1kRegWriteMDIC;
1397static FNE1KREGREAD e1kRegReadICR;
1398static FNE1KREGWRITE e1kRegWriteICR;
1399static FNE1KREGREAD e1kRegReadICS;
1400static FNE1KREGWRITE e1kRegWriteICS;
1401static FNE1KREGWRITE e1kRegWriteIMS;
1402static FNE1KREGWRITE e1kRegWriteIMC;
1403static FNE1KREGWRITE e1kRegWriteRCTL;
1404static FNE1KREGWRITE e1kRegWritePBA;
1405static FNE1KREGWRITE e1kRegWriteRDT;
1406static FNE1KREGWRITE e1kRegWriteRDTR;
1407static FNE1KREGWRITE e1kRegWriteTDT;
1408static FNE1KREGREAD e1kRegReadMTA;
1409static FNE1KREGWRITE e1kRegWriteMTA;
1410static FNE1KREGREAD e1kRegReadRA;
1411static FNE1KREGWRITE e1kRegWriteRA;
1412static FNE1KREGREAD e1kRegReadVFTA;
1413static FNE1KREGWRITE e1kRegWriteVFTA;
1414
1415/**
1416 * Register map table.
1417 *
1418 * Override pfnRead and pfnWrite to get register-specific behavior.
1419 */
1420static const struct E1kRegMap_st
1421{
1422 /** Register offset in the register space. */
1423 uint32_t offset;
1424 /** Size in bytes. Registers of size > 4 are in fact tables. */
1425 uint32_t size;
1426 /** Readable bits. */
1427 uint32_t readable;
1428 /** Writable bits. */
1429 uint32_t writable;
1430 /** Read callback. */
1431 FNE1KREGREAD *pfnRead;
1432 /** Write callback. */
1433 FNE1KREGWRITE *pfnWrite;
1434 /** Abbreviated name. */
1435 const char *abbrev;
1436 /** Full name. */
1437 const char *name;
1438} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1439{
1440 /* offset size read mask write mask read callback write callback abbrev full name */
1441 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1442 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1443 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1444 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1445 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1446 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1447 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1448 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1449 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1450 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1451 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1452 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1453 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1454 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1455 { 0x000c8, 0x00004, 0x0001F6DF, 0xFFFFFFFF, e1kRegReadICS , e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1456 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1457 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1458 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1459 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1460 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1461 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1462 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1463 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1464 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1465 { 0x00e00, 0x00004, 0xCFCFCFCF, 0xCFCFCFCF, e1kRegReadDefault , e1kRegWriteDefault , "LEDCTL" , "LED Control" },
1466 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1467 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1468 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1469 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1470 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1471 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1472 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1473 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1474 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1475 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1476 { 0x02808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1477 { 0x02810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1478 { 0x02818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1479 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1480 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1481 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1482 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1483 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1484 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1485 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1486 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1487 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1488 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1489 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1490 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1491 { 0x03808, 0x00004, 0x000FFF80, 0x000FFF80, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1492 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1493 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1494 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1495 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1496 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1497 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1498 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1499 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1500 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1501 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1502 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1503 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1504 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1505 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1506 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1507 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1508 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1509 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1510 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1511 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1512 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1513 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1514 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1515 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1516 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1517 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1518 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1519 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1520 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1521 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1522 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1523 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1524 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1525 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1526 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1527 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1528 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1529 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1530 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1531 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1532 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1533 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1534 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1535 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1536 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1537 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1538 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1539 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1540 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1541 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1542 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1543 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1544 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1545 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1546 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1547 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1548 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1549 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1550 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1551 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1552 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1553 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1554 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1555 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1556 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1557 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1558 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1559 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1560 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1561 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1562 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1563 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1564 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1565 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1566 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1567 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1568 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1569 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1570 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1571 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1572 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1573 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1574 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1575 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1576};
1577
1578#ifdef LOG_ENABLED
1579
1580/**
1581 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1582 *
1583 * @remarks The mask has half-byte byte (not bit) granularity (e.g. 0000000F).
1584 *
1585 * @returns The buffer.
1586 *
1587 * @param u32 The word to convert into string.
1588 * @param mask Selects which bytes to convert.
1589 * @param buf Where to put the result.
1590 */
1591static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1592{
1593 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1594 {
1595 if (mask & 0xF)
1596 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1597 else
1598 *ptr = '.';
1599 }
1600 buf[8] = 0;
1601 return buf;
1602}
1603
1604/**
1605 * Returns timer name for debug purposes.
1606 *
1607 * @returns The timer name.
1608 *
1609 * @param pThis The device state structure.
1610 * @param hTimer The timer to name.
1611 */
1612DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1613{
1614 if (hTimer == pThis->hTIDTimer)
1615 return "TID";
1616 if (hTimer == pThis->hTADTimer)
1617 return "TAD";
1618 if (hTimer == pThis->hRIDTimer)
1619 return "RID";
1620 if (hTimer == pThis->hRADTimer)
1621 return "RAD";
1622 if (hTimer == pThis->hIntTimer)
1623 return "Int";
1624 if (hTimer == pThis->hTXDTimer)
1625 return "TXD";
1626 if (hTimer == pThis->hLUTimer)
1627 return "LinkUp";
1628 return "unknown";
1629}
1630
1631#endif /* LOG_ENABLED */
1632
1633/**
1634 * Arm a timer.
1635 *
1636 * @param pDevIns The device instance.
1637 * @param pThis Pointer to the device state structure.
1638 * @param hTimer The timer to arm.
1639 * @param uExpireIn Expiration interval in microseconds.
1640 */
1641DECLINLINE(void) e1kArmTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer, uint32_t uExpireIn)
1642{
1643 if (pThis->fLocked)
1644 return;
1645
1646 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1647 pThis->szPrf, e1kGetTimerName(pThis, hTimer), uExpireIn));
1648 int rc = PDMDevHlpTimerSetMicro(pDevIns, hTimer, uExpireIn);
1649 AssertRC(rc);
1650}
1651
1652#ifdef IN_RING3
1653/**
1654 * Cancel a timer.
1655 *
1656 * @param pDevIns The device instance.
1657 * @param pThis Pointer to the device state structure.
1658 * @param pTimer Pointer to the timer.
1659 */
1660DECLINLINE(void) e1kCancelTimer(PPDMDEVINS pDevIns, PE1KSTATE pThis, TMTIMERHANDLE hTimer)
1661{
1662 E1kLog2(("%s Stopping %s timer...\n",
1663 pThis->szPrf, e1kGetTimerName(pThis, hTimer)));
1664 int rc = PDMDevHlpTimerStop(pDevIns, hTimer);
1665 if (RT_FAILURE(rc))
1666 E1kLog2(("%s e1kCancelTimer: TMTimerStop(%s) failed with %Rrc\n",
1667 pThis->szPrf, e1kGetTimerName(pThis, hTimer), rc));
1668 RT_NOREF_PV(pThis);
1669}
1670#endif /* IN_RING3 */
1671
1672
1673#define e1kCsEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, (rcBusy))
1674#define e1kCsEnterReturn(ps, rcBusy) do { \
1675 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, (rcBusy)); \
1676 if (rcLock == VINF_SUCCESS) { /* likely */ } \
1677 else return rcLock; \
1678 } while (0)
1679#define e1kR3CsEnterAsserted(ps) do { \
1680 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->cs, VERR_SEM_BUSY); \
1681 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->cs, rcLock); \
1682 } while (0)
1683#define e1kCsLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->cs)
1684
1685
1686#define e1kCsRxEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, (rcBusy))
1687#define e1kCsRxEnterReturn(ps) do { \
1688 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, VERR_SEM_BUSY); \
1689 AssertRCReturn(rcLock, rcLock); \
1690 } while (0)
1691#define e1kR3CsRxEnterAsserted(ps) do { \
1692 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csRx, VERR_SEM_BUSY); \
1693 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->csRx, rcLock); \
1694 } while (0)
1695#define e1kCsRxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->csRx)
1696#define e1kCsRxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &(ps)->csRx)
1697
1698
1699#ifndef E1K_WITH_TX_CS
1700# define e1kCsTxEnter(ps, rcBusy) VINF_SUCCESS
1701# define e1kR3CsTxEnterAsserted(ps) do { } while (0)
1702# define e1kCsTxLeave(ps) do { } while (0)
1703#else /* E1K_WITH_TX_CS */
1704# define e1kCsTxEnter(ps, rcBusy) PDMDevHlpCritSectEnter(pDevIns, &(ps)->csTx, (rcBusy))
1705# define e1kR3CsTxEnterAsserted(ps) do { \
1706 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, &(ps)->csTx, VERR_SEM_BUSY); \
1707 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &(ps)->csTx, rcLock); \
1708 } while (0)
1709# define e1kCsTxLeave(ps) PDMDevHlpCritSectLeave(pDevIns, &(ps)->csTx)
1710# define e1kCsTxIsOwner(ps) PDMDevHlpCritSectIsOwner(pDevIns, &(ps)->csTx)
1711#endif /* E1K_WITH_TX_CS */
1712
1713
1714#ifdef E1K_WITH_TXD_CACHE
1715/*
1716 * Transmit Descriptor Register Context
1717 */
1718struct E1kTxDContext
1719{
1720 uint32_t tdlen;
1721 uint32_t tdh;
1722 uint32_t tdt;
1723 uint8_t nextPacket;
1724};
1725typedef struct E1kTxDContext E1KTXDC, *PE1KTXDC;
1726
1727DECLINLINE(bool) e1kUpdateTxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pContext)
1728{
1729 Assert(e1kCsTxIsOwner(pThis));
1730 if (!e1kCsTxIsOwner(pThis))
1731 {
1732 memset(pContext, 0, sizeof(E1KTXDC));
1733 return false;
1734 }
1735 pContext->tdlen = TDLEN;
1736 pContext->tdh = TDH;
1737 pContext->tdt = TDT;
1738 uint32_t cTxRingSize = pContext->tdlen / sizeof(E1KTXDESC);
1739#ifdef DEBUG
1740 if (pContext->tdh >= cTxRingSize)
1741 {
1742 Log(("%s e1kUpdateTxDContext: will return false because TDH too big (%u >= %u)\n",
1743 pThis->szPrf, pContext->tdh, cTxRingSize));
1744 return VINF_SUCCESS;
1745 }
1746 if (pContext->tdt >= cTxRingSize)
1747 {
1748 Log(("%s e1kUpdateTxDContext: will return false because TDT too big (%u >= %u)\n",
1749 pThis->szPrf, pContext->tdt, cTxRingSize));
1750 return VINF_SUCCESS;
1751 }
1752#endif /* DEBUG */
1753 return pContext->tdh < cTxRingSize && pContext->tdt < cTxRingSize;
1754}
1755#endif /* E1K_WITH_TXD_CACHE */
1756#ifdef E1K_WITH_RXD_CACHE
1757/*
1758 * Receive Descriptor Register Context
1759 */
1760struct E1kRxDContext
1761{
1762 uint32_t rdlen;
1763 uint32_t rdh;
1764 uint32_t rdt;
1765};
1766typedef struct E1kRxDContext E1KRXDC, *PE1KRXDC;
1767
1768DECLINLINE(bool) e1kUpdateRxDContext(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pContext, const char *pcszCallee)
1769{
1770 Assert(e1kCsRxIsOwner(pThis));
1771 if (!e1kCsRxIsOwner(pThis))
1772 return false;
1773 pContext->rdlen = RDLEN;
1774 pContext->rdh = RDH;
1775 pContext->rdt = RDT;
1776 uint32_t cRxRingSize = pContext->rdlen / sizeof(E1KRXDESC);
1777 /*
1778 * Note that the checks for RDT are a bit different. Some guests, OS/2 for
1779 * example, intend to use all descriptors in RX ring, so they point RDT
1780 * right beyond the last descriptor in the ring. While this is not
1781 * acceptable for other registers, it works out fine for RDT.
1782 */
1783#ifdef DEBUG
1784 if (pContext->rdh >= cRxRingSize)
1785 {
1786 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDH too big (%u >= %u)\n",
1787 pThis->szPrf, pcszCallee, pContext->rdh, cRxRingSize));
1788 return VINF_SUCCESS;
1789 }
1790 if (pContext->rdt > cRxRingSize)
1791 {
1792 Log(("%s e1kUpdateRxDContext: called from %s, will return false because RDT too big (%u > %u)\n",
1793 pThis->szPrf, pcszCallee, pContext->rdt, cRxRingSize));
1794 return VINF_SUCCESS;
1795 }
1796#else /* !DEBUG */
1797 RT_NOREF(pcszCallee);
1798#endif /* !DEBUG */
1799 return pContext->rdh < cRxRingSize && pContext->rdt <= cRxRingSize; // && (RCTL & RCTL_EN);
1800}
1801#endif /* E1K_WITH_RXD_CACHE */
1802
1803/**
1804 * Wakeup the RX thread.
1805 */
1806static void e1kWakeupReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
1807{
1808 if ( pThis->fMaybeOutOfSpace
1809 && pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
1810 {
1811 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatRxOverflowWakeup));
1812 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1813 int rc = PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
1814 AssertRC(rc);
1815 }
1816}
1817
1818#ifdef IN_RING3
1819
1820/**
1821 * Hardware reset. Revert all registers to initial values.
1822 *
1823 * @param pDevIns The device instance.
1824 * @param pThis The device state structure.
1825 * @param pThisCC The current context instance data.
1826 */
1827static void e1kR3HardReset(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
1828{
1829 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1830 /* No interrupts should survive device reset, see @bugref(9556). */
1831 if (pThis->fIntRaised)
1832 {
1833 /* Lower(0) INTA(0) */
1834 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
1835 pThis->fIntRaised = false;
1836 E1kLog(("%s e1kR3HardReset: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
1837 }
1838 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1839 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1840# ifdef E1K_INIT_RA0
1841 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1842 sizeof(pThis->macConfigured.au8));
1843 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1844# endif /* E1K_INIT_RA0 */
1845 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1846 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1847 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1848 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1849 Assert(GET_BITS(RCTL, BSIZE) == 0);
1850 pThis->u16RxBSize = 2048;
1851
1852 uint16_t u16LedCtl = 0x0602; /* LED0/LINK_UP#, LED2/LINK100# */
1853 pThisCC->eeprom.readWord(0x2F, &u16LedCtl); /* Read LEDCTL defaults from EEPROM */
1854 LEDCTL = 0x07008300 | (((uint32_t)u16LedCtl & 0xCF00) << 8) | (u16LedCtl & 0xCF); /* Only LED0 and LED2 defaults come from EEPROM */
1855
1856 /* Reset promiscuous mode */
1857 if (pThisCC->pDrvR3)
1858 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, false);
1859
1860# ifdef E1K_WITH_TXD_CACHE
1861 e1kR3CsTxEnterAsserted(pThis);
1862 pThis->nTxDFetched = 0;
1863 pThis->iTxDCurrent = 0;
1864 pThis->fGSO = false;
1865 pThis->cbTxAlloc = 0;
1866 e1kCsTxLeave(pThis);
1867# endif /* E1K_WITH_TXD_CACHE */
1868# ifdef E1K_WITH_RXD_CACHE
1869 e1kR3CsRxEnterAsserted(pThis);
1870 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1871 e1kCsRxLeave(pThis);
1872# endif /* E1K_WITH_RXD_CACHE */
1873# ifdef E1K_LSC_ON_RESET
1874 E1kLog(("%s Will trigger LSC in %d seconds...\n",
1875 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
1876 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
1877# endif /* E1K_LSC_ON_RESET */
1878}
1879
1880#endif /* IN_RING3 */
1881
1882/**
1883 * Compute Internet checksum.
1884 *
1885 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1886 *
1887 * @param pThis The device state structure.
1888 * @param cpPacket The packet.
1889 * @param cb The size of the packet.
1890 * @param pszText A string denoting direction of packet transfer.
1891 *
1892 * @return The 1's complement of the 1's complement sum.
1893 *
1894 * @thread E1000_TX
1895 */
1896static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1897{
1898 uint32_t csum = 0;
1899 uint16_t *pu16 = (uint16_t *)pvBuf;
1900
1901 while (cb > 1)
1902 {
1903 csum += *pu16++;
1904 cb -= 2;
1905 }
1906 if (cb)
1907 csum += *(uint8_t*)pu16;
1908 while (csum >> 16)
1909 csum = (csum >> 16) + (csum & 0xFFFF);
1910 Assert(csum < 65536);
1911 return (uint16_t)~csum;
1912}
1913
1914/**
1915 * Dump a packet to debug log.
1916 *
1917 * @param pDevIns The device instance.
1918 * @param pThis The device state structure.
1919 * @param cpPacket The packet.
1920 * @param cb The size of the packet.
1921 * @param pszText A string denoting direction of packet transfer.
1922 * @thread E1000_TX
1923 */
1924DECLINLINE(void) e1kPacketDump(PPDMDEVINS pDevIns, PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1925{
1926#ifdef DEBUG
1927 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1928 {
1929 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1930 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1931 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1932 {
1933 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1934 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1935 if (*(cpPacket+14+6) == 0x6)
1936 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1937 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1938 }
1939 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1940 {
1941 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1942 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1943 if (*(cpPacket+14+6) == 0x6)
1944 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1945 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1946 }
1947 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1948 e1kCsLeave(pThis);
1949 }
1950#else
1951 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1952 {
1953 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1954 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1955 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1956 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1957 else
1958 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1959 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1960 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1961 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1962 e1kCsLeave(pThis);
1963 }
1964 RT_NOREF2(cb, pszText);
1965#endif
1966}
1967
1968/**
1969 * Determine the type of transmit descriptor.
1970 *
1971 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1972 *
1973 * @param pDesc Pointer to descriptor union.
1974 * @thread E1000_TX
1975 */
1976DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1977{
1978 if (pDesc->legacy.cmd.fDEXT)
1979 return pDesc->context.dw2.u4DTYP;
1980 return E1K_DTYP_LEGACY;
1981}
1982
1983
1984#ifdef E1K_WITH_RXD_CACHE
1985/**
1986 * Return the number of RX descriptor that belong to the hardware.
1987 *
1988 * @returns the number of available descriptors in RX ring.
1989 * @param pRxdc The receive descriptor register context.
1990 * @thread ???
1991 */
1992DECLINLINE(uint32_t) e1kGetRxLen(PE1KRXDC pRxdc)
1993{
1994 /**
1995 * Make sure RDT won't change during computation. EMT may modify RDT at
1996 * any moment.
1997 */
1998 uint32_t rdt = pRxdc->rdt;
1999 return (pRxdc->rdh > rdt ? pRxdc->rdlen/sizeof(E1KRXDESC) : 0) + rdt - pRxdc->rdh;
2000}
2001
2002DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2003{
2004 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2005 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2006}
2007
2008DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2009{
2010 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2011}
2012
2013/**
2014 * Load receive descriptors from guest memory. The caller needs to be in Rx
2015 * critical section.
2016 *
2017 * We need two physical reads in case the tail wrapped around the end of RX
2018 * descriptor ring.
2019 *
2020 * @returns the actual number of descriptors fetched.
2021 * @param pDevIns The device instance.
2022 * @param pThis The device state structure.
2023 * @thread EMT, RX
2024 */
2025DECLINLINE(unsigned) e1kRxDPrefetch(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2026{
2027 E1kLog3(("%s e1kRxDPrefetch: RDH=%x RDT=%x RDLEN=%x "
2028 "iRxDCurrent=%x nRxDFetched=%x\n",
2029 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pRxdc->rdlen, pThis->iRxDCurrent, pThis->nRxDFetched));
2030 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2031 unsigned nDescsAvailable = e1kGetRxLen(pRxdc) - e1kRxDInCache(pThis);
2032 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2033 unsigned nDescsTotal = pRxdc->rdlen / sizeof(E1KRXDESC);
2034 Assert(nDescsTotal != 0);
2035 if (nDescsTotal == 0)
2036 return 0;
2037 unsigned nFirstNotLoaded = (pRxdc->rdh + e1kRxDInCache(pThis)) % nDescsTotal;
2038 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2039 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2040 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2041 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2042 nFirstNotLoaded, nDescsInSingleRead));
2043 if (nDescsToFetch == 0)
2044 return 0;
2045 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2046 PDMDevHlpPCIPhysRead(pDevIns,
2047 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2048 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2049 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2050 // unsigned i, j;
2051 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2052 // {
2053 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2054 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2055 // }
2056 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2057 pThis->szPrf, nDescsInSingleRead,
2058 RDBAH, RDBAL + pRxdc->rdh * sizeof(E1KRXDESC),
2059 nFirstNotLoaded, pRxdc->rdlen, pRxdc->rdh, pRxdc->rdt));
2060 if (nDescsToFetch > nDescsInSingleRead)
2061 {
2062 PDMDevHlpPCIPhysRead(pDevIns,
2063 ((uint64_t)RDBAH << 32) + RDBAL,
2064 pFirstEmptyDesc + nDescsInSingleRead,
2065 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2066 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2067 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2068 // {
2069 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2070 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2071 // }
2072 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2073 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2074 RDBAH, RDBAL));
2075 }
2076 pThis->nRxDFetched += nDescsToFetch;
2077 return nDescsToFetch;
2078}
2079
2080# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2081/**
2082 * Dump receive descriptor to debug log.
2083 *
2084 * @param pThis The device state structure.
2085 * @param pDesc Pointer to the descriptor.
2086 * @thread E1000_RX
2087 */
2088static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
2089{
2090 RT_NOREF2(pThis, pDesc);
2091 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
2092 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
2093 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
2094 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
2095 pDesc->status.fPIF ? "PIF" : "pif",
2096 pDesc->status.fIPCS ? "IPCS" : "ipcs",
2097 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
2098 pDesc->status.fVP ? "VP" : "vp",
2099 pDesc->status.fIXSM ? "IXSM" : "ixsm",
2100 pDesc->status.fEOP ? "EOP" : "eop",
2101 pDesc->status.fDD ? "DD" : "dd",
2102 pDesc->status.fRXE ? "RXE" : "rxe",
2103 pDesc->status.fIPE ? "IPE" : "ipe",
2104 pDesc->status.fTCPE ? "TCPE" : "tcpe",
2105 pDesc->status.fCE ? "CE" : "ce",
2106 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
2107 E1K_SPEC_VLAN(pDesc->status.u16Special),
2108 E1K_SPEC_PRI(pDesc->status.u16Special)));
2109}
2110# endif /* IN_RING3 */
2111#endif /* E1K_WITH_RXD_CACHE */
2112
2113/**
2114 * Dump transmit descriptor to debug log.
2115 *
2116 * @param pThis The device state structure.
2117 * @param pDesc Pointer to descriptor union.
2118 * @param pszDir A string denoting direction of descriptor transfer
2119 * @thread E1000_TX
2120 */
2121static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
2122 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
2123{
2124 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
2125
2126 /*
2127 * Unfortunately we cannot use our format handler here, we want R0 logging
2128 * as well.
2129 */
2130 switch (e1kGetDescType(pDesc))
2131 {
2132 case E1K_DTYP_CONTEXT:
2133 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
2134 pThis->szPrf, pszDir, pszDir));
2135 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
2136 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
2137 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
2138 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
2139 pDesc->context.dw2.fIDE ? " IDE":"",
2140 pDesc->context.dw2.fRS ? " RS" :"",
2141 pDesc->context.dw2.fTSE ? " TSE":"",
2142 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
2143 pDesc->context.dw2.fTCP ? "TCP":"UDP",
2144 pDesc->context.dw2.u20PAYLEN,
2145 pDesc->context.dw3.u8HDRLEN,
2146 pDesc->context.dw3.u16MSS,
2147 pDesc->context.dw3.fDD?"DD":""));
2148 break;
2149 case E1K_DTYP_DATA:
2150 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
2151 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
2152 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2153 pDesc->data.u64BufAddr,
2154 pDesc->data.cmd.u20DTALEN));
2155 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
2156 pDesc->data.cmd.fIDE ? " IDE" :"",
2157 pDesc->data.cmd.fVLE ? " VLE" :"",
2158 pDesc->data.cmd.fRPS ? " RPS" :"",
2159 pDesc->data.cmd.fRS ? " RS" :"",
2160 pDesc->data.cmd.fTSE ? " TSE" :"",
2161 pDesc->data.cmd.fIFCS? " IFCS":"",
2162 pDesc->data.cmd.fEOP ? " EOP" :"",
2163 pDesc->data.dw3.fDD ? " DD" :"",
2164 pDesc->data.dw3.fEC ? " EC" :"",
2165 pDesc->data.dw3.fLC ? " LC" :"",
2166 pDesc->data.dw3.fTXSM? " TXSM":"",
2167 pDesc->data.dw3.fIXSM? " IXSM":"",
2168 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
2169 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
2170 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
2171 break;
2172 case E1K_DTYP_LEGACY:
2173 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
2174 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
2175 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
2176 pDesc->data.u64BufAddr,
2177 pDesc->legacy.cmd.u16Length));
2178 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
2179 pDesc->legacy.cmd.fIDE ? " IDE" :"",
2180 pDesc->legacy.cmd.fVLE ? " VLE" :"",
2181 pDesc->legacy.cmd.fRPS ? " RPS" :"",
2182 pDesc->legacy.cmd.fRS ? " RS" :"",
2183 pDesc->legacy.cmd.fIC ? " IC" :"",
2184 pDesc->legacy.cmd.fIFCS? " IFCS":"",
2185 pDesc->legacy.cmd.fEOP ? " EOP" :"",
2186 pDesc->legacy.dw3.fDD ? " DD" :"",
2187 pDesc->legacy.dw3.fEC ? " EC" :"",
2188 pDesc->legacy.dw3.fLC ? " LC" :"",
2189 pDesc->legacy.cmd.u8CSO,
2190 pDesc->legacy.dw3.u8CSS,
2191 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
2192 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
2193 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
2194 break;
2195 default:
2196 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
2197 pThis->szPrf, pszDir, pszDir));
2198 break;
2199 }
2200}
2201
2202/**
2203 * Raise an interrupt later.
2204 *
2205 * @param pThis The device state structure.
2206 */
2207DECLINLINE(void) e1kPostponeInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint64_t nsDeadline)
2208{
2209 if (!PDMDevHlpTimerIsActive(pDevIns, pThis->hIntTimer))
2210 PDMDevHlpTimerSetNano(pDevIns, pThis->hIntTimer, nsDeadline);
2211}
2212
2213/**
2214 * Raise interrupt if not masked.
2215 *
2216 * @param pThis The device state structure.
2217 */
2218static int e1kRaiseInterrupt(PPDMDEVINS pDevIns, PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause)
2219{
2220 /* Do NOT use e1kCsEnterReturn here as most callers doesn't check the
2221 status code. They'll pass a negative rcBusy. */
2222 int rc = e1kCsEnter(pThis, rcBusy);
2223 if (RT_LIKELY(rc == VINF_SUCCESS))
2224 { /* likely */ }
2225 else
2226 {
2227 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, &pThis->cs, rc);
2228 return rc;
2229 }
2230
2231 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
2232 ICR |= u32IntCause;
2233 if (ICR & IMS)
2234 {
2235 if (pThis->fIntRaised)
2236 {
2237 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
2238 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
2239 pThis->szPrf, ICR & IMS));
2240 }
2241 else
2242 {
2243 uint64_t tsNow = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
2244 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
2245 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
2246 {
2247 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
2248 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
2249 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
2250 e1kPostponeInterrupt(pDevIns, pThis, ITR * 256);
2251 }
2252 else
2253 {
2254
2255 /* Since we are delivering the interrupt now
2256 * there is no need to do it later -- stop the timer.
2257 */
2258 PDMDevHlpTimerStop(pDevIns, pThis->hIntTimer);
2259 E1K_INC_ISTAT_CNT(pThis->uStatInt);
2260 STAM_COUNTER_INC(&pThis->StatIntsRaised);
2261 /* Got at least one unmasked interrupt cause */
2262 pThis->fIntRaised = true;
2263 /* Raise(1) INTA(0) */
2264 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
2265 PDMDevHlpPCISetIrq(pDevIns, 0, 1);
2266 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
2267 pThis->szPrf, ICR & IMS));
2268 }
2269 }
2270 }
2271 else
2272 {
2273 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
2274 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
2275 pThis->szPrf, ICR, IMS));
2276 }
2277 e1kCsLeave(pThis);
2278 return VINF_SUCCESS;
2279}
2280
2281/**
2282 * Compute the physical address of the descriptor.
2283 *
2284 * @returns the physical address of the descriptor.
2285 *
2286 * @param baseHigh High-order 32 bits of descriptor table address.
2287 * @param baseLow Low-order 32 bits of descriptor table address.
2288 * @param idxDesc The descriptor index in the table.
2289 */
2290DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
2291{
2292 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
2293 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
2294}
2295
2296#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2297/**
2298 * Advance the head pointer of the receive descriptor queue.
2299 *
2300 * @remarks RDH always points to the next available RX descriptor.
2301 *
2302 * @param pDevIns The device instance.
2303 * @param pThis The device state structure.
2304 */
2305DECLINLINE(void) e1kAdvanceRDH(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2306{
2307 Assert(e1kCsRxIsOwner(pThis));
2308 //e1kR3CsEnterAsserted(pThis);
2309 if (++pRxdc->rdh * sizeof(E1KRXDESC) >= pRxdc->rdlen)
2310 pRxdc->rdh = 0;
2311 RDH = pRxdc->rdh; /* Sync the actual register and RXDC */
2312#ifdef E1K_WITH_RXD_CACHE
2313 /*
2314 * We need to fetch descriptors now as the guest may advance RDT all the way
2315 * to RDH as soon as we generate RXDMT0 interrupt. This is mostly to provide
2316 * compatibility with Phar Lap ETS, see @bugref(7346). Note that we do not
2317 * check if the receiver is enabled. It must be, otherwise we won't get here
2318 * in the first place.
2319 *
2320 * Note that we should have moved both RDH and iRxDCurrent by now.
2321 */
2322 if (e1kRxDIsCacheEmpty(pThis))
2323 {
2324 /* Cache is empty, reset it and check if we can fetch more. */
2325 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2326 E1kLog3(("%s e1kAdvanceRDH: Rx cache is empty, RDH=%x RDT=%x "
2327 "iRxDCurrent=%x nRxDFetched=%x\n",
2328 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, pThis->iRxDCurrent, pThis->nRxDFetched));
2329 e1kRxDPrefetch(pDevIns, pThis, pRxdc);
2330 }
2331#endif /* E1K_WITH_RXD_CACHE */
2332 /*
2333 * Compute current receive queue length and fire RXDMT0 interrupt
2334 * if we are low on receive buffers
2335 */
2336 uint32_t uRQueueLen = pRxdc->rdh>pRxdc->rdt ? pRxdc->rdlen/sizeof(E1KRXDESC)-pRxdc->rdh+pRxdc->rdt : pRxdc->rdt-pRxdc->rdh;
2337 /*
2338 * The minimum threshold is controlled by RDMTS bits of RCTL:
2339 * 00 = 1/2 of RDLEN
2340 * 01 = 1/4 of RDLEN
2341 * 10 = 1/8 of RDLEN
2342 * 11 = reserved
2343 */
2344 uint32_t uMinRQThreshold = pRxdc->rdlen / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2345 if (uRQueueLen <= uMinRQThreshold)
2346 {
2347 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2348 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2349 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen, uMinRQThreshold));
2350 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
2351 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXDMT0);
2352 }
2353 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2354 pThis->szPrf, pRxdc->rdh, pRxdc->rdt, uRQueueLen));
2355 //e1kCsLeave(pThis);
2356}
2357#endif /* IN_RING3 */
2358
2359#ifdef E1K_WITH_RXD_CACHE
2360
2361# ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2362
2363/**
2364 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2365 * RX ring if the cache is empty.
2366 *
2367 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2368 * go out of sync with RDH which will cause trouble when EMT checks if the
2369 * cache is empty to do pre-fetch @bugref(6217).
2370 *
2371 * @param pDevIns The device instance.
2372 * @param pThis The device state structure.
2373 * @thread RX
2374 */
2375DECLINLINE(E1KRXDESC *) e1kRxDGet(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KRXDC pRxdc)
2376{
2377 Assert(e1kCsRxIsOwner(pThis));
2378 /* Check the cache first. */
2379 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2380 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2381 /* Cache is empty, reset it and check if we can fetch more. */
2382 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2383 if (e1kRxDPrefetch(pDevIns, pThis, pRxdc))
2384 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2385 /* Out of Rx descriptors. */
2386 return NULL;
2387}
2388
2389
2390/**
2391 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2392 * pointer. The descriptor gets written back to the RXD ring.
2393 *
2394 * @param pDevIns The device instance.
2395 * @param pThis The device state structure.
2396 * @param pDesc The descriptor being "returned" to the RX ring.
2397 * @thread RX
2398 */
2399DECLINLINE(void) e1kRxDPut(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC* pDesc, PE1KRXDC pRxdc)
2400{
2401 Assert(e1kCsRxIsOwner(pThis));
2402 pThis->iRxDCurrent++;
2403 // Assert(pDesc >= pThis->aRxDescriptors);
2404 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2405 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2406 // uint32_t rdh = RDH;
2407 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2408 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, pRxdc->rdh), pDesc, sizeof(E1KRXDESC));
2409 /*
2410 * We need to print the descriptor before advancing RDH as it may fetch new
2411 * descriptors into the cache.
2412 */
2413 e1kPrintRDesc(pThis, pDesc);
2414 e1kAdvanceRDH(pDevIns, pThis, pRxdc);
2415}
2416
2417/**
2418 * Store a fragment of received packet at the specifed address.
2419 *
2420 * @param pDevIns The device instance.
2421 * @param pThis The device state structure.
2422 * @param pDesc The next available RX descriptor.
2423 * @param pvBuf The fragment.
2424 * @param cb The size of the fragment.
2425 */
2426static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2427{
2428 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2429 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2430 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2431 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2432 pDesc->u16Length = (uint16_t)cb;
2433 Assert(pDesc->u16Length == cb);
2434 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2435 RT_NOREF(pThis);
2436}
2437
2438# endif /* IN_RING3 */
2439
2440#else /* !E1K_WITH_RXD_CACHE */
2441
2442/**
2443 * Store a fragment of received packet that fits into the next available RX
2444 * buffer.
2445 *
2446 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2447 *
2448 * @param pDevIns The device instance.
2449 * @param pThis The device state structure.
2450 * @param pDesc The next available RX descriptor.
2451 * @param pvBuf The fragment.
2452 * @param cb The size of the fragment.
2453 */
2454static void e1kStoreRxFragment(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2455{
2456 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2457 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2458 PDMDevHlpPCIPhysWrite(pDevIns, pDesc->u64BufAddr, pvBuf, cb);
2459 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2460 /* Write back the descriptor */
2461 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2462 e1kPrintRDesc(pThis, pDesc);
2463 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2464 /* Advance head */
2465 e1kAdvanceRDH(pDevIns, pThis);
2466 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2467 if (pDesc->status.fEOP)
2468 {
2469 /* Complete packet has been stored -- it is time to let the guest know. */
2470#ifdef E1K_USE_RX_TIMERS
2471 if (RDTR)
2472 {
2473 /* Arm the timer to fire in RDTR usec (discard .024) */
2474 e1kArmTimer(pDevIns, pThis, pThis->hRIDTimer, RDTR);
2475 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2476 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->CTX_SUFF(pRADTimer)))
2477 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2478 }
2479 else
2480 {
2481#endif
2482 /* 0 delay means immediate interrupt */
2483 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2484 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2485#ifdef E1K_USE_RX_TIMERS
2486 }
2487#endif
2488 }
2489 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2490}
2491
2492#endif /* !E1K_WITH_RXD_CACHE */
2493
2494/**
2495 * Returns true if it is a broadcast packet.
2496 *
2497 * @returns true if destination address indicates broadcast.
2498 * @param pvBuf The ethernet packet.
2499 */
2500DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2501{
2502 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2503 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2504}
2505
2506/**
2507 * Returns true if it is a multicast packet.
2508 *
2509 * @remarks returns true for broadcast packets as well.
2510 * @returns true if destination address indicates multicast.
2511 * @param pvBuf The ethernet packet.
2512 */
2513DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2514{
2515 return (*(char*)pvBuf) & 1;
2516}
2517
2518#ifdef IN_RING3 /* currently only used in ring-3 due to stack space requirements of the caller */
2519/**
2520 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2521 *
2522 * @remarks We emulate checksum offloading for major packets types only.
2523 *
2524 * @returns VBox status code.
2525 * @param pThis The device state structure.
2526 * @param pFrame The available data.
2527 * @param cb Number of bytes available in the buffer.
2528 * @param status Bit fields containing status info.
2529 */
2530static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2531{
2532 /** @todo
2533 * It is not safe to bypass checksum verification for packets coming
2534 * from real wire. We currently unable to tell where packets are
2535 * coming from so we tell the driver to ignore our checksum flags
2536 * and do verification in software.
2537 */
2538# if 0
2539 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2540
2541 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2542
2543 switch (uEtherType)
2544 {
2545 case 0x800: /* IPv4 */
2546 {
2547 pStatus->fIXSM = false;
2548 pStatus->fIPCS = true;
2549 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2550 /* TCP/UDP checksum offloading works with TCP and UDP only */
2551 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2552 break;
2553 }
2554 case 0x86DD: /* IPv6 */
2555 pStatus->fIXSM = false;
2556 pStatus->fIPCS = false;
2557 pStatus->fTCPCS = true;
2558 break;
2559 default: /* ARP, VLAN, etc. */
2560 pStatus->fIXSM = true;
2561 break;
2562 }
2563# else
2564 pStatus->fIXSM = true;
2565 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2566# endif
2567 return VINF_SUCCESS;
2568}
2569#endif /* IN_RING3 */
2570
2571/**
2572 * Pad and store received packet.
2573 *
2574 * @remarks Make sure that the packet appears to upper layer as one coming
2575 * from real Ethernet: pad it and insert FCS.
2576 *
2577 * @returns VBox status code.
2578 * @param pDevIns The device instance.
2579 * @param pThis The device state structure.
2580 * @param pvBuf The available data.
2581 * @param cb Number of bytes available in the buffer.
2582 * @param status Bit fields containing status info.
2583 */
2584static int e1kHandleRxPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2585{
2586#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2587 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2588 uint8_t *ptr = rxPacket;
2589# ifdef E1K_WITH_RXD_CACHE
2590 E1KRXDC rxdc;
2591# endif /* E1K_WITH_RXD_CACHE */
2592
2593 e1kCsRxEnterReturn(pThis);
2594# ifdef E1K_WITH_RXD_CACHE
2595 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2596 {
2597 e1kCsRxLeave(pThis);
2598 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2599 return VINF_SUCCESS;
2600 }
2601# endif /* E1K_WITH_RXD_CACHE */
2602
2603 if (cb > 70) /* unqualified guess */
2604 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2605
2606 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2607 Assert(cb > 16);
2608 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2609 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2610 if (status.fVP)
2611 {
2612 /* VLAN packet -- strip VLAN tag in VLAN mode */
2613 if ((CTRL & CTRL_VME) && cb > 16)
2614 {
2615 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2616 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2617 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2618 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2619 cb -= 4;
2620 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2621 pThis->szPrf, status.u16Special, cb));
2622 }
2623 else
2624 {
2625 status.fVP = false; /* Set VP only if we stripped the tag */
2626 memcpy(rxPacket, pvBuf, cb);
2627 }
2628 }
2629 else
2630 memcpy(rxPacket, pvBuf, cb);
2631 /* Pad short packets */
2632 if (cb < 60)
2633 {
2634 memset(rxPacket + cb, 0, 60 - cb);
2635 cb = 60;
2636 }
2637 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2638 {
2639 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2640 /*
2641 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2642 * is ignored by most of drivers we may as well save us the trouble
2643 * of calculating it (see EthernetCRC CFGM parameter).
2644 */
2645 if (pThis->fEthernetCRC)
2646 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2647 cb += sizeof(uint32_t);
2648 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2649 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2650 }
2651 /* Compute checksum of complete packet */
2652 size_t cbCSumStart = RT_MIN(GET_BITS(RXCSUM, PCSS), cb);
2653 uint16_t checksum = e1kCSum16(rxPacket + cbCSumStart, cb - cbCSumStart);
2654 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2655
2656 /* Update stats */
2657 E1K_INC_CNT32(GPRC);
2658 if (e1kIsBroadcast(pvBuf))
2659 E1K_INC_CNT32(BPRC);
2660 else if (e1kIsMulticast(pvBuf))
2661 E1K_INC_CNT32(MPRC);
2662 /* Update octet receive counter */
2663 E1K_ADD_CNT64(GORCL, GORCH, cb);
2664 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2665 if (cb == 64)
2666 E1K_INC_CNT32(PRC64);
2667 else if (cb < 128)
2668 E1K_INC_CNT32(PRC127);
2669 else if (cb < 256)
2670 E1K_INC_CNT32(PRC255);
2671 else if (cb < 512)
2672 E1K_INC_CNT32(PRC511);
2673 else if (cb < 1024)
2674 E1K_INC_CNT32(PRC1023);
2675 else
2676 E1K_INC_CNT32(PRC1522);
2677
2678 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2679
2680# ifdef E1K_WITH_RXD_CACHE
2681 while (cb > 0)
2682 {
2683 E1KRXDESC *pDesc = e1kRxDGet(pDevIns, pThis, &rxdc);
2684
2685 if (pDesc == NULL)
2686 {
2687 E1kLog(("%s Out of receive buffers, dropping the packet "
2688 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2689 pThis->szPrf, cb, e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt));
2690 break;
2691 }
2692# else /* !E1K_WITH_RXD_CACHE */
2693 if (RDH == RDT)
2694 {
2695 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2696 pThis->szPrf));
2697 }
2698 /* Store the packet to receive buffers */
2699 while (RDH != RDT)
2700 {
2701 /* Load the descriptor pointed by head */
2702 E1KRXDESC desc, *pDesc = &desc;
2703 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
2704# endif /* !E1K_WITH_RXD_CACHE */
2705 if (pDesc->u64BufAddr)
2706 {
2707 uint16_t u16RxBufferSize = pThis->u16RxBSize; /* see @bugref{9427} */
2708
2709 /* Update descriptor */
2710 pDesc->status = status;
2711 pDesc->u16Checksum = checksum;
2712 pDesc->status.fDD = true;
2713
2714 /*
2715 * We need to leave Rx critical section here or we risk deadlocking
2716 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2717 * page or has an access handler associated with it.
2718 * Note that it is safe to leave the critical section here since
2719 * e1kRegWriteRDT() never modifies RDH. It never touches already
2720 * fetched RxD cache entries either.
2721 */
2722 if (cb > u16RxBufferSize)
2723 {
2724 pDesc->status.fEOP = false;
2725 e1kCsRxLeave(pThis);
2726 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, u16RxBufferSize);
2727 e1kCsRxEnterReturn(pThis);
2728# ifdef E1K_WITH_RXD_CACHE
2729 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2730 {
2731 e1kCsRxLeave(pThis);
2732 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2733 return VINF_SUCCESS;
2734 }
2735# endif /* E1K_WITH_RXD_CACHE */
2736 ptr += u16RxBufferSize;
2737 cb -= u16RxBufferSize;
2738 }
2739 else
2740 {
2741 pDesc->status.fEOP = true;
2742 e1kCsRxLeave(pThis);
2743 e1kStoreRxFragment(pDevIns, pThis, pDesc, ptr, cb);
2744# ifdef E1K_WITH_RXD_CACHE
2745 e1kCsRxEnterReturn(pThis);
2746 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kHandleRxPacket")))
2747 {
2748 e1kCsRxLeave(pThis);
2749 E1kLog(("%s e1kHandleRxPacket: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
2750 return VINF_SUCCESS;
2751 }
2752 cb = 0;
2753# else /* !E1K_WITH_RXD_CACHE */
2754 pThis->led.Actual.s.fReading = 0;
2755 return VINF_SUCCESS;
2756# endif /* !E1K_WITH_RXD_CACHE */
2757 }
2758 /*
2759 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2760 * is not defined.
2761 */
2762 }
2763# ifdef E1K_WITH_RXD_CACHE
2764 /* Write back the descriptor. */
2765 pDesc->status.fDD = true;
2766 e1kRxDPut(pDevIns, pThis, pDesc, &rxdc);
2767# else /* !E1K_WITH_RXD_CACHE */
2768 else
2769 {
2770 /* Write back the descriptor. */
2771 pDesc->status.fDD = true;
2772 PDMDevHlpPCIPhysWrite(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2773 e1kAdvanceRDH(pDevIns, pThis);
2774 }
2775# endif /* !E1K_WITH_RXD_CACHE */
2776 }
2777
2778 if (cb > 0)
2779 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2780
2781 pThis->led.Actual.s.fReading = 0;
2782
2783 e1kCsRxLeave(pThis);
2784# ifdef E1K_WITH_RXD_CACHE
2785 /* Complete packet has been stored -- it is time to let the guest know. */
2786# ifdef E1K_USE_RX_TIMERS
2787 if (RDTR)
2788 {
2789 /* Arm the timer to fire in RDTR usec (discard .024) */
2790 e1kArmTimer(pThis, pThis->hRIDTimer, RDTR);
2791 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2792 if (RADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hRADTimer))
2793 e1kArmTimer(pThis, pThis->hRADTimer, RADV);
2794 }
2795 else
2796 {
2797# endif /* E1K_USE_RX_TIMERS */
2798 /* 0 delay means immediate interrupt */
2799 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2800 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_RXT0);
2801# ifdef E1K_USE_RX_TIMERS
2802 }
2803# endif /* E1K_USE_RX_TIMERS */
2804# endif /* E1K_WITH_RXD_CACHE */
2805
2806 return VINF_SUCCESS;
2807#else /* !IN_RING3 */
2808 RT_NOREF(pDevIns, pThis, pvBuf, cb, status);
2809 return VERR_INTERNAL_ERROR_2;
2810#endif /* !IN_RING3 */
2811}
2812
2813
2814#ifdef IN_RING3
2815/**
2816 * Bring the link up after the configured delay, 5 seconds by default.
2817 *
2818 * @param pDevIns The device instance.
2819 * @param pThis The device state structure.
2820 * @thread any
2821 */
2822DECLINLINE(void) e1kBringLinkUpDelayed(PPDMDEVINS pDevIns, PE1KSTATE pThis)
2823{
2824 E1kLog(("%s Will bring up the link in %d seconds...\n",
2825 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2826 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, pThis->cMsLinkUpDelay * 1000);
2827}
2828
2829/**
2830 * Bring up the link immediately.
2831 *
2832 * @param pDevIns The device instance.
2833 * @param pThis The device state structure.
2834 * @param pThisCC The current context instance data.
2835 */
2836DECLINLINE(void) e1kR3LinkUp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2837{
2838 E1kLog(("%s Link is up\n", pThis->szPrf));
2839 STATUS |= STATUS_LU;
2840 Phy::setLinkStatus(&pThis->phy, true);
2841 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2842 if (pThisCC->pDrvR3)
2843 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_UP);
2844 /* Trigger processing of pending TX descriptors (see @bugref{8942}). */
2845 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
2846}
2847
2848/**
2849 * Bring down the link immediately.
2850 *
2851 * @param pDevIns The device instance.
2852 * @param pThis The device state structure.
2853 * @param pThisCC The current context instance data.
2854 */
2855DECLINLINE(void) e1kR3LinkDown(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2856{
2857 E1kLog(("%s Link is down\n", pThis->szPrf));
2858 STATUS &= ~STATUS_LU;
2859#ifdef E1K_LSC_ON_RESET
2860 Phy::setLinkStatus(&pThis->phy, false);
2861#endif /* E1K_LSC_ON_RESET */
2862 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2863 if (pThisCC->pDrvR3)
2864 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2865}
2866
2867/**
2868 * Bring down the link temporarily.
2869 *
2870 * @param pDevIns The device instance.
2871 * @param pThis The device state structure.
2872 * @param pThisCC The current context instance data.
2873 */
2874DECLINLINE(void) e1kR3LinkDownTemp(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC)
2875{
2876 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2877 STATUS &= ~STATUS_LU;
2878 Phy::setLinkStatus(&pThis->phy, false);
2879 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_LSC);
2880 /*
2881 * Notifying the associated driver that the link went down (even temporarily)
2882 * seems to be the right thing, but it was not done before. This may cause
2883 * a regression if the driver does not expect the link to go down as a result
2884 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2885 * of code notified the driver that the link was up! See @bugref{7057}.
2886 */
2887 if (pThisCC->pDrvR3)
2888 pThisCC->pDrvR3->pfnNotifyLinkChanged(pThisCC->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2889 e1kBringLinkUpDelayed(pDevIns, pThis);
2890}
2891#endif /* IN_RING3 */
2892
2893#if 0 /* unused */
2894/**
2895 * Read handler for Device Status register.
2896 *
2897 * Get the link status from PHY.
2898 *
2899 * @returns VBox status code.
2900 *
2901 * @param pThis The device state structure.
2902 * @param offset Register offset in memory-mapped frame.
2903 * @param index Register index in register array.
2904 * @param mask Used to implement partial reads (8 and 16-bit).
2905 */
2906static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2907{
2908 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2909 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2910 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2911 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2912 {
2913 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2914 if (Phy::readMDIO(&pThis->phy))
2915 *pu32Value = CTRL | CTRL_MDIO;
2916 else
2917 *pu32Value = CTRL & ~CTRL_MDIO;
2918 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2919 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2920 }
2921 else
2922 {
2923 /* MDIO pin is used for output, ignore it */
2924 *pu32Value = CTRL;
2925 }
2926 return VINF_SUCCESS;
2927}
2928#endif /* unused */
2929
2930/**
2931 * A helper function to detect the link state to the other side of "the wire".
2932 *
2933 * When deciding to bring up the link we need to take into account both if the
2934 * cable is connected and if our device is actually connected to the outside
2935 * world. If no driver is attached we won't be able to allocate TX buffers,
2936 * which will prevent us from TX descriptor processing, which will result in
2937 * "TX unit hang" in the guest.
2938 *
2939 * @returns true if the device is connected to something.
2940 *
2941 * @param pDevIns The device instance.
2942 */
2943DECLINLINE(bool) e1kIsConnected(PPDMDEVINS pDevIns)
2944{
2945 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2946 return pThis->fCableConnected && pThis->fIsAttached;
2947}
2948
2949/**
2950 * A callback used by PHY to indicate that the link needs to be updated due to
2951 * reset of PHY.
2952 *
2953 * @param pDevIns The device instance.
2954 * @thread any
2955 */
2956void e1kPhyLinkResetCallback(PPDMDEVINS pDevIns)
2957{
2958 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
2959
2960 /* Make sure we have cable connected and MAC can talk to PHY */
2961 if (e1kIsConnected(pDevIns) && (CTRL & CTRL_SLU))
2962 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
2963 else
2964 Log(("%s PHY link reset callback ignored (cable %sconnected, driver %stached, CTRL_SLU=%u)\n", pThis->szPrf,
2965 pThis->fCableConnected ? "" : "dis", pThis->fIsAttached ? "at" : "de", CTRL & CTRL_SLU ? 1 : 0));
2966}
2967
2968/**
2969 * Write handler for Device Control register.
2970 *
2971 * Handles reset.
2972 *
2973 * @param pThis The device state structure.
2974 * @param offset Register offset in memory-mapped frame.
2975 * @param index Register index in register array.
2976 * @param value The value to store.
2977 * @param mask Used to implement partial writes (8 and 16-bit).
2978 * @thread EMT
2979 */
2980static int e1kRegWriteCTRL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2981{
2982 int rc = VINF_SUCCESS;
2983
2984 if (value & CTRL_RESET)
2985 { /* RST */
2986#ifndef IN_RING3
2987 return VINF_IOM_R3_MMIO_WRITE;
2988#else
2989 e1kR3HardReset(pDevIns, pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
2990#endif
2991 }
2992 else
2993 {
2994#ifdef E1K_LSC_ON_SLU
2995 /*
2996 * When the guest changes 'Set Link Up' bit from 0 to 1 we check if
2997 * the link is down and the cable is connected, and if they are we
2998 * bring the link up, see @bugref{8624}.
2999 */
3000 if ( (value & CTRL_SLU)
3001 && !(CTRL & CTRL_SLU)
3002 && pThis->fCableConnected
3003 && !(STATUS & STATUS_LU))
3004 {
3005 /* It should take about 2 seconds for the link to come up */
3006 e1kArmTimer(pDevIns, pThis, pThis->hLUTimer, E1K_INIT_LINKUP_DELAY_US);
3007 }
3008#else /* !E1K_LSC_ON_SLU */
3009 if ( (value & CTRL_SLU)
3010 && !(CTRL & CTRL_SLU)
3011 && e1kIsConnected(pDevIns)
3012 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hLUTimer))
3013 {
3014 /* PXE does not use LSC interrupts, see @bugref{9113}. */
3015 STATUS |= STATUS_LU;
3016 }
3017#endif /* !E1K_LSC_ON_SLU */
3018 if ((value & CTRL_VME) != (CTRL & CTRL_VME))
3019 {
3020 E1kLog(("%s VLAN Mode %s\n", pThis->szPrf, (value & CTRL_VME) ? "Enabled" : "Disabled"));
3021 }
3022 Log7(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
3023 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
3024 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
3025 if (value & CTRL_MDC)
3026 {
3027 if (value & CTRL_MDIO_DIR)
3028 {
3029 Log7(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
3030 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
3031 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO), pDevIns);
3032 }
3033 else
3034 {
3035 if (Phy::readMDIO(&pThis->phy))
3036 value |= CTRL_MDIO;
3037 else
3038 value &= ~CTRL_MDIO;
3039 Log7(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
3040 }
3041 }
3042 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3043 }
3044
3045 return rc;
3046}
3047
3048/**
3049 * Write handler for EEPROM/Flash Control/Data register.
3050 *
3051 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
3052 *
3053 * @param pThis The device state structure.
3054 * @param offset Register offset in memory-mapped frame.
3055 * @param index Register index in register array.
3056 * @param value The value to store.
3057 * @param mask Used to implement partial writes (8 and 16-bit).
3058 * @thread EMT
3059 */
3060static int e1kRegWriteEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3061{
3062 RT_NOREF(pDevIns, offset, index);
3063#ifdef IN_RING3
3064 /* So far we are concerned with lower byte only */
3065 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3066 {
3067 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
3068 /* Note: 82543GC does not need to request EEPROM access */
3069 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
3070 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3071 pThisCC->eeprom.write(value & EECD_EE_WIRES);
3072 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
3073 }
3074 if (value & EECD_EE_REQ)
3075 EECD |= EECD_EE_REQ|EECD_EE_GNT;
3076 else
3077 EECD &= ~EECD_EE_GNT;
3078 //e1kRegWriteDefault(pThis, offset, index, value );
3079
3080 return VINF_SUCCESS;
3081#else /* !IN_RING3 */
3082 RT_NOREF(pThis, value);
3083 return VINF_IOM_R3_MMIO_WRITE;
3084#endif /* !IN_RING3 */
3085}
3086
3087/**
3088 * Read handler for EEPROM/Flash Control/Data register.
3089 *
3090 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
3091 *
3092 * @returns VBox status code.
3093 *
3094 * @param pThis The device state structure.
3095 * @param offset Register offset in memory-mapped frame.
3096 * @param index Register index in register array.
3097 * @param mask Used to implement partial reads (8 and 16-bit).
3098 * @thread EMT
3099 */
3100static int e1kRegReadEECD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3101{
3102#ifdef IN_RING3
3103 uint32_t value = 0; /* Get rid of false positive in parfait. */
3104 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3105 if (RT_SUCCESS(rc))
3106 {
3107 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
3108 {
3109 /* Note: 82543GC does not need to request EEPROM access */
3110 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
3111 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3112 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3113 value |= pThisCC->eeprom.read();
3114 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3115 }
3116 *pu32Value = value;
3117 }
3118
3119 return rc;
3120#else /* !IN_RING3 */
3121 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
3122 return VINF_IOM_R3_MMIO_READ;
3123#endif /* !IN_RING3 */
3124}
3125
3126/**
3127 * Write handler for EEPROM Read register.
3128 *
3129 * Handles EEPROM word access requests, reads EEPROM and stores the result
3130 * into DATA field.
3131 *
3132 * @param pThis The device state structure.
3133 * @param offset Register offset in memory-mapped frame.
3134 * @param index Register index in register array.
3135 * @param value The value to store.
3136 * @param mask Used to implement partial writes (8 and 16-bit).
3137 * @thread EMT
3138 */
3139static int e1kRegWriteEERD(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3140{
3141#ifdef IN_RING3
3142 /* Make use of 'writable' and 'readable' masks. */
3143 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3144 /* DONE and DATA are set only if read was triggered by START. */
3145 if (value & EERD_START)
3146 {
3147 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
3148 uint16_t tmp;
3149 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3150 if (pThisCC->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
3151 SET_BITS(EERD, DATA, tmp);
3152 EERD |= EERD_DONE;
3153 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
3154 }
3155
3156 return VINF_SUCCESS;
3157#else /* !IN_RING3 */
3158 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
3159 return VINF_IOM_R3_MMIO_WRITE;
3160#endif /* !IN_RING3 */
3161}
3162
3163
3164/**
3165 * Write handler for MDI Control register.
3166 *
3167 * Handles PHY read/write requests; forwards requests to internal PHY device.
3168 *
3169 * @param pThis The device state structure.
3170 * @param offset Register offset in memory-mapped frame.
3171 * @param index Register index in register array.
3172 * @param value The value to store.
3173 * @param mask Used to implement partial writes (8 and 16-bit).
3174 * @thread EMT
3175 */
3176static int e1kRegWriteMDIC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3177{
3178 if (value & MDIC_INT_EN)
3179 {
3180 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
3181 pThis->szPrf));
3182 }
3183 else if (value & MDIC_READY)
3184 {
3185 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
3186 pThis->szPrf));
3187 }
3188 else if (GET_BITS_V(value, MDIC, PHY) != 1)
3189 {
3190 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
3191 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
3192 /*
3193 * Some drivers scan the MDIO bus for a PHY. We can work with these
3194 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
3195 * at the requested address, see @bugref{7346}.
3196 */
3197 MDIC = MDIC_READY | MDIC_ERROR;
3198 }
3199 else
3200 {
3201 /* Store the value */
3202 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3203 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
3204 /* Forward op to PHY */
3205 if (value & MDIC_OP_READ)
3206 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), pDevIns));
3207 else
3208 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK, pDevIns);
3209 /* Let software know that we are done */
3210 MDIC |= MDIC_READY;
3211 }
3212
3213 return VINF_SUCCESS;
3214}
3215
3216/**
3217 * Write handler for Interrupt Cause Read register.
3218 *
3219 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
3220 *
3221 * @param pThis The device state structure.
3222 * @param offset Register offset in memory-mapped frame.
3223 * @param index Register index in register array.
3224 * @param value The value to store.
3225 * @param mask Used to implement partial writes (8 and 16-bit).
3226 * @thread EMT
3227 */
3228static int e1kRegWriteICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3229{
3230 ICR &= ~value;
3231
3232 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
3233 return VINF_SUCCESS;
3234}
3235
3236/**
3237 * Read handler for Interrupt Cause Read register.
3238 *
3239 * Reading this register acknowledges all interrupts.
3240 *
3241 * @returns VBox status code.
3242 *
3243 * @param pThis The device state structure.
3244 * @param offset Register offset in memory-mapped frame.
3245 * @param index Register index in register array.
3246 * @param mask Not used.
3247 * @thread EMT
3248 */
3249static int e1kRegReadICR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3250{
3251 e1kCsEnterReturn(pThis, VINF_IOM_R3_MMIO_READ);
3252
3253 uint32_t value = 0;
3254 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, &value);
3255 if (RT_SUCCESS(rc))
3256 {
3257 if (value)
3258 {
3259 if (!pThis->fIntRaised)
3260 E1K_INC_ISTAT_CNT(pThis->uStatNoIntICR);
3261 /*
3262 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
3263 * with disabled interrupts.
3264 */
3265 //if (IMS)
3266 if (1)
3267 {
3268 /*
3269 * Interrupts were enabled -- we are supposedly at the very
3270 * beginning of interrupt handler
3271 */
3272 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
3273 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
3274 /* Clear all pending interrupts */
3275 ICR = 0;
3276 pThis->fIntRaised = false;
3277 /* Lower(0) INTA(0) */
3278 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3279
3280 pThis->u64AckedAt = PDMDevHlpTimerGet(pDevIns, pThis->hIntTimer);
3281 if (pThis->fIntMaskUsed)
3282 pThis->fDelayInts = true;
3283 }
3284 else
3285 {
3286 /*
3287 * Interrupts are disabled -- in windows guests ICR read is done
3288 * just before re-enabling interrupts
3289 */
3290 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
3291 }
3292 }
3293 *pu32Value = value;
3294 }
3295 e1kCsLeave(pThis);
3296
3297 return rc;
3298}
3299
3300/**
3301 * Read handler for Interrupt Cause Set register.
3302 *
3303 * VxWorks driver uses this undocumented feature of real H/W to read ICR without acknowledging interrupts.
3304 *
3305 * @returns VBox status code.
3306 *
3307 * @param pThis The device state structure.
3308 * @param offset Register offset in memory-mapped frame.
3309 * @param index Register index in register array.
3310 * @param pu32Value Where to store the value of the register.
3311 * @thread EMT
3312 */
3313static int e1kRegReadICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
3314{
3315 RT_NOREF_PV(index);
3316 return e1kRegReadDefault(pDevIns, pThis, offset, ICR_IDX, pu32Value);
3317}
3318
3319/**
3320 * Write handler for Interrupt Cause Set register.
3321 *
3322 * Bits corresponding to 1s in 'value' will be set in ICR register.
3323 *
3324 * @param pThis The device state structure.
3325 * @param offset Register offset in memory-mapped frame.
3326 * @param index Register index in register array.
3327 * @param value The value to store.
3328 * @param mask Used to implement partial writes (8 and 16-bit).
3329 * @thread EMT
3330 */
3331static int e1kRegWriteICS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3332{
3333 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3334 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
3335 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
3336}
3337
3338/**
3339 * Write handler for Interrupt Mask Set register.
3340 *
3341 * Will trigger pending interrupts.
3342 *
3343 * @param pThis The device state structure.
3344 * @param offset Register offset in memory-mapped frame.
3345 * @param index Register index in register array.
3346 * @param value The value to store.
3347 * @param mask Used to implement partial writes (8 and 16-bit).
3348 * @thread EMT
3349 */
3350static int e1kRegWriteIMS(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3351{
3352 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3353
3354 IMS |= value;
3355 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
3356 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
3357 /*
3358 * We cannot raise an interrupt here as it will occasionally cause an interrupt storm
3359 * in Windows guests (see @bugref{8624}, @bugref{5023}).
3360 */
3361 if ((ICR & IMS) && !pThis->fLocked)
3362 {
3363 E1K_INC_ISTAT_CNT(pThis->uStatIntIMS);
3364 e1kPostponeInterrupt(pDevIns, pThis, E1K_IMS_INT_DELAY_NS);
3365 }
3366
3367 return VINF_SUCCESS;
3368}
3369
3370/**
3371 * Write handler for Interrupt Mask Clear register.
3372 *
3373 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
3374 *
3375 * @param pThis The device state structure.
3376 * @param offset Register offset in memory-mapped frame.
3377 * @param index Register index in register array.
3378 * @param value The value to store.
3379 * @param mask Used to implement partial writes (8 and 16-bit).
3380 * @thread EMT
3381 */
3382static int e1kRegWriteIMC(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3383{
3384 RT_NOREF_PV(offset); RT_NOREF_PV(index);
3385
3386 e1kCsEnterReturn(pThis, VINF_IOM_R3_MMIO_WRITE);
3387 if (pThis->fIntRaised)
3388 {
3389 /*
3390 * Technically we should reset fIntRaised in ICR read handler, but it will cause
3391 * Windows to freeze since it may receive an interrupt while still in the very beginning
3392 * of interrupt handler.
3393 */
3394 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
3395 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
3396 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3397 /* Lower(0) INTA(0) */
3398 PDMDevHlpPCISetIrq(pDevIns, 0, 0);
3399 pThis->fIntRaised = false;
3400 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3401 }
3402 IMS &= ~value;
3403 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3404 e1kCsLeave(pThis);
3405
3406 return VINF_SUCCESS;
3407}
3408
3409/**
3410 * Write handler for Receive Control register.
3411 *
3412 * @param pThis The device state structure.
3413 * @param offset Register offset in memory-mapped frame.
3414 * @param index Register index in register array.
3415 * @param value The value to store.
3416 * @param mask Used to implement partial writes (8 and 16-bit).
3417 * @thread EMT
3418 */
3419static int e1kRegWriteRCTL(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3420{
3421 /* Update promiscuous mode */
3422 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3423 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3424 {
3425 /* Promiscuity has changed, pass the knowledge on. */
3426#ifndef IN_RING3
3427 return VINF_IOM_R3_MMIO_WRITE;
3428#else
3429 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3430 if (pThisCC->pDrvR3)
3431 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, fBecomePromiscous);
3432#endif
3433 }
3434
3435 /* Adjust receive buffer size */
3436 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3437 if (value & RCTL_BSEX)
3438 cbRxBuf *= 16;
3439 if (cbRxBuf > E1K_MAX_RX_PKT_SIZE)
3440 cbRxBuf = E1K_MAX_RX_PKT_SIZE;
3441 if (cbRxBuf != pThis->u16RxBSize)
3442 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3443 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3444 Assert(cbRxBuf < 65536);
3445 pThis->u16RxBSize = (uint16_t)cbRxBuf;
3446
3447 /* Update the register */
3448 return e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3449}
3450
3451/**
3452 * Write handler for Packet Buffer Allocation register.
3453 *
3454 * TXA = 64 - RXA.
3455 *
3456 * @param pThis The device state structure.
3457 * @param offset Register offset in memory-mapped frame.
3458 * @param index Register index in register array.
3459 * @param value The value to store.
3460 * @param mask Used to implement partial writes (8 and 16-bit).
3461 * @thread EMT
3462 */
3463static int e1kRegWritePBA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3464{
3465 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3466 PBA_st->txa = 64 - PBA_st->rxa;
3467
3468 return VINF_SUCCESS;
3469}
3470
3471/**
3472 * Write handler for Receive Descriptor Tail register.
3473 *
3474 * @remarks Write into RDT forces switch to HC and signal to
3475 * e1kR3NetworkDown_WaitReceiveAvail().
3476 *
3477 * @returns VBox status code.
3478 *
3479 * @param pThis The device state structure.
3480 * @param offset Register offset in memory-mapped frame.
3481 * @param index Register index in register array.
3482 * @param value The value to store.
3483 * @param mask Used to implement partial writes (8 and 16-bit).
3484 * @thread EMT
3485 */
3486static int e1kRegWriteRDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3487{
3488#ifndef IN_RING3
3489 /* XXX */
3490// return VINF_IOM_R3_MMIO_WRITE;
3491#endif
3492 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3493 if (RT_LIKELY(rc == VINF_SUCCESS))
3494 {
3495 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3496#ifndef E1K_WITH_RXD_CACHE
3497 /*
3498 * Some drivers advance RDT too far, so that it equals RDH. This
3499 * somehow manages to work with real hardware but not with this
3500 * emulated device. We can work with these drivers if we just
3501 * write 1 less when we see a driver writing RDT equal to RDH,
3502 * see @bugref{7346}.
3503 */
3504 if (value == RDH)
3505 {
3506 if (RDH == 0)
3507 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3508 else
3509 value = RDH - 1;
3510 }
3511#endif /* !E1K_WITH_RXD_CACHE */
3512 rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3513#ifdef E1K_WITH_RXD_CACHE
3514 E1KRXDC rxdc;
3515 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kRegWriteRDT")))
3516 {
3517 e1kCsRxLeave(pThis);
3518 E1kLog(("%s e1kRegWriteRDT: failed to update Rx context, returning VINF_SUCCESS\n", pThis->szPrf));
3519 return VINF_SUCCESS;
3520 }
3521 /*
3522 * We need to fetch descriptors now as RDT may go whole circle
3523 * before we attempt to store a received packet. For example,
3524 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3525 * size being only 8 descriptors! Note that we fetch descriptors
3526 * only when the cache is empty to reduce the number of memory reads
3527 * in case of frequent RDT writes. Don't fetch anything when the
3528 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3529 * messed up state.
3530 * Note that despite the cache may seem empty, meaning that there are
3531 * no more available descriptors in it, it may still be used by RX
3532 * thread which has not yet written the last descriptor back but has
3533 * temporarily released the RX lock in order to write the packet body
3534 * to descriptor's buffer. At this point we still going to do prefetch
3535 * but it won't actually fetch anything if there are no unused slots in
3536 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3537 * reset the cache here even if it appears empty. It will be reset at
3538 * a later point in e1kRxDGet().
3539 */
3540 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3541 e1kRxDPrefetch(pDevIns, pThis, &rxdc);
3542#endif /* E1K_WITH_RXD_CACHE */
3543 e1kCsRxLeave(pThis);
3544 if (RT_SUCCESS(rc))
3545 {
3546 /* Signal that we have more receive descriptors available. */
3547 e1kWakeupReceive(pDevIns, pThis);
3548 }
3549 }
3550 return rc;
3551}
3552
3553/**
3554 * Write handler for Receive Delay Timer register.
3555 *
3556 * @param pThis The device state structure.
3557 * @param offset Register offset in memory-mapped frame.
3558 * @param index Register index in register array.
3559 * @param value The value to store.
3560 * @param mask Used to implement partial writes (8 and 16-bit).
3561 * @thread EMT
3562 */
3563static int e1kRegWriteRDTR(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3564{
3565 e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
3566 if (value & RDTR_FPD)
3567 {
3568 /* Flush requested, cancel both timers and raise interrupt */
3569#ifdef E1K_USE_RX_TIMERS
3570 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3571 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3572#endif
3573 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3574 return e1kRaiseInterrupt(pDevIns, pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3575 }
3576
3577 return VINF_SUCCESS;
3578}
3579
3580DECLINLINE(uint32_t) e1kGetTxLen(PE1KTXDC pTxdc)
3581{
3582 /**
3583 * Make sure TDT won't change during computation. EMT may modify TDT at
3584 * any moment.
3585 */
3586 uint32_t tdt = pTxdc->tdt;
3587 return (pTxdc->tdh > tdt ? pTxdc->tdlen/sizeof(E1KTXDESC) : 0) + tdt - pTxdc->tdh;
3588}
3589
3590#ifdef IN_RING3
3591
3592# ifdef E1K_TX_DELAY
3593/**
3594 * @callback_method_impl{FNTMTIMERDEV, Transmit Delay Timer handler.}
3595 */
3596static DECLCALLBACK(void) e1kR3TxDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3597{
3598 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3599 Assert(PDMDevHlpCritSectIsOwner(pDevIns, &pThis->csTx));
3600 RT_NOREF(hTimer);
3601
3602 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3603# ifdef E1K_INT_STATS
3604 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3605 if (u64Elapsed > pThis->uStatMaxTxDelay)
3606 pThis->uStatMaxTxDelay = u64Elapsed;
3607# endif
3608 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
3609 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3610}
3611# endif /* E1K_TX_DELAY */
3612
3613//# ifdef E1K_USE_TX_TIMERS
3614
3615/**
3616 * @callback_method_impl{FNTMTIMERDEV, Transmit Interrupt Delay Timer handler.}
3617 */
3618static DECLCALLBACK(void) e1kR3TxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3619{
3620 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3621 Assert(hTimer == pThis->hTIDTimer); RT_NOREF(hTimer);
3622
3623 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3624 /* Cancel absolute delay timer as we have already got attention */
3625# ifndef E1K_NO_TAD
3626 e1kCancelTimer(pDevIns, pThis, pThis->hTADTimer);
3627# endif
3628 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3629}
3630
3631/**
3632 * @callback_method_impl{FNTMTIMERDEV, Transmit Absolute Delay Timer handler.}
3633 */
3634static DECLCALLBACK(void) e1kR3TxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3635{
3636 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3637 Assert(hTimer == pThis->hTADTimer); RT_NOREF(hTimer);
3638
3639 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3640 /* Cancel interrupt delay timer as we have already got attention */
3641 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
3642 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_TXDW);
3643}
3644
3645//# endif /* E1K_USE_TX_TIMERS */
3646# ifdef E1K_USE_RX_TIMERS
3647
3648/**
3649 * @callback_method_impl{FNTMTIMERDEV, Receive Interrupt Delay Timer handler.}
3650 */
3651static DECLCALLBACK(void) e1kR3RxIntDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3652{
3653 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3654 Assert(hTimer == pThis->hRIDTimer); RT_NOREF(hTimer);
3655
3656 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3657 /* Cancel absolute delay timer as we have already got attention */
3658 e1kCancelTimer(pDevIns, pThis, pThis->hRADTimer);
3659 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3660}
3661
3662/**
3663 * @callback_method_impl{FNTMTIMERDEV, Receive Absolute Delay Timer handler.}
3664 */
3665static DECLCALLBACK(void) e1kR3RxAbsDelayTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3666{
3667 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3668 Assert(hTimer == pThis->hRADTimer); RT_NOREF(hTimer);
3669
3670 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3671 /* Cancel interrupt delay timer as we have already got attention */
3672 e1kCancelTimer(pDevIns, pThis, pThis->hRIDTimer);
3673 e1kRaiseInterrupt(pDevIns, pThis, VERR_IGNORED, ICR_RXT0);
3674}
3675
3676# endif /* E1K_USE_RX_TIMERS */
3677
3678/**
3679 * @callback_method_impl{FNTMTIMERDEV, Late Interrupt Timer handler.}
3680 */
3681static DECLCALLBACK(void) e1kR3LateIntTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3682{
3683 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3684 Assert(hTimer == pThis->hIntTimer); RT_NOREF(hTimer);
3685 RT_NOREF(hTimer);
3686
3687 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3688 STAM_COUNTER_INC(&pThis->StatLateInts);
3689 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3690# if 0
3691 if (pThis->iStatIntLost > -100)
3692 pThis->iStatIntLost--;
3693# endif
3694 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, 0);
3695 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3696}
3697
3698/**
3699 * @callback_method_impl{FNTMTIMERDEV, Link Up Timer handler.}
3700 */
3701static DECLCALLBACK(void) e1kR3LinkUpTimer(PPDMDEVINS pDevIns, TMTIMERHANDLE hTimer, void *pvUser)
3702{
3703 PE1KSTATE pThis = (PE1KSTATE)pvUser;
3704 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
3705 Assert(hTimer == pThis->hLUTimer); RT_NOREF(hTimer);
3706
3707 /*
3708 * This can happen if we set the link status to down when the Link up timer was
3709 * already armed (shortly after e1kR3LoadDone() or when the cable was disconnected
3710 * and connect+disconnect the cable very quick. Moreover, 82543GC triggers LSC
3711 * on reset even if the cable is unplugged (see @bugref{8942}).
3712 */
3713 if (e1kIsConnected(pDevIns))
3714 {
3715 /* 82543GC does not have an internal PHY */
3716 if (pThis->eChip == E1K_CHIP_82543GC || (CTRL & CTRL_SLU))
3717 e1kR3LinkUp(pDevIns, pThis, pThisCC);
3718 }
3719# ifdef E1K_LSC_ON_RESET
3720 else if (pThis->eChip == E1K_CHIP_82543GC)
3721 e1kR3LinkDown(pDevIns, pThis, pThisCC);
3722# endif /* E1K_LSC_ON_RESET */
3723}
3724
3725#endif /* IN_RING3 */
3726
3727/**
3728 * Sets up the GSO context according to the TSE new context descriptor.
3729 *
3730 * @param pGso The GSO context to setup.
3731 * @param pCtx The context descriptor.
3732 */
3733DECLINLINE(bool) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3734{
3735 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3736
3737 /*
3738 * See if the context descriptor describes something that could be TCP or
3739 * UDP over IPv[46].
3740 */
3741 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3742 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3743 {
3744 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3745 return false;
3746 }
3747 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3748 {
3749 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3750 return false;
3751 }
3752 if (RT_UNLIKELY( pCtx->dw2.fTCP
3753 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3754 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3755 {
3756 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3757 return false;
3758 }
3759
3760 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3761 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3762 {
3763 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3764 return false;
3765 }
3766
3767 /* IPv4 checksum offset. */
3768 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3769 {
3770 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3771 return false;
3772 }
3773
3774 /* TCP/UDP checksum offsets. */
3775 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3776 != ( pCtx->dw2.fTCP
3777 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3778 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3779 {
3780 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3781 return false;
3782 }
3783
3784 /*
3785 * Because of internal networking using a 16-bit size field for GSO context
3786 * plus frame, we have to make sure we don't exceed this.
3787 */
3788 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3789 {
3790 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3791 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3792 return false;
3793 }
3794
3795 /*
3796 * We're good for now - we'll do more checks when seeing the data.
3797 * So, figure the type of offloading and setup the context.
3798 */
3799 if (pCtx->dw2.fIP)
3800 {
3801 if (pCtx->dw2.fTCP)
3802 {
3803 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3804 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3805 }
3806 else
3807 {
3808 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3809 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3810 }
3811 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3812 * this yet it seems)... */
3813 }
3814 else
3815 {
3816 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /** @todo IPv6 UFO */
3817 if (pCtx->dw2.fTCP)
3818 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3819 else
3820 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3821 }
3822 pGso->offHdr1 = pCtx->ip.u8CSS;
3823 pGso->offHdr2 = pCtx->tu.u8CSS;
3824 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3825 pGso->cbMaxSeg = pCtx->dw3.u16MSS + (pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP ? pGso->offHdr2 : 0);
3826 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3827 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3828 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3829 return PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5);
3830}
3831
3832/**
3833 * Checks if we can use GSO processing for the current TSE frame.
3834 *
3835 * @param pThis The device state structure.
3836 * @param pGso The GSO context.
3837 * @param pData The first data descriptor of the frame.
3838 * @param pCtx The TSO context descriptor.
3839 */
3840DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3841{
3842 if (!pData->cmd.fTSE)
3843 {
3844 E1kLog2(("e1kCanDoGso: !TSE\n"));
3845 return false;
3846 }
3847 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3848 {
3849 E1kLog(("e1kCanDoGso: VLE\n"));
3850 return false;
3851 }
3852 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3853 {
3854 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3855 return false;
3856 }
3857
3858 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3859 {
3860 case PDMNETWORKGSOTYPE_IPV4_TCP:
3861 case PDMNETWORKGSOTYPE_IPV4_UDP:
3862 if (!pData->dw3.fIXSM)
3863 {
3864 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3865 return false;
3866 }
3867 if (!pData->dw3.fTXSM)
3868 {
3869 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3870 return false;
3871 }
3872 /** @todo what more check should we perform here? Ethernet frame type? */
3873 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3874 return true;
3875
3876 case PDMNETWORKGSOTYPE_IPV6_TCP:
3877 case PDMNETWORKGSOTYPE_IPV6_UDP:
3878 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3879 {
3880 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3881 return false;
3882 }
3883 if (!pData->dw3.fTXSM)
3884 {
3885 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3886 return false;
3887 }
3888 /** @todo what more check should we perform here? Ethernet frame type? */
3889 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3890 return true;
3891
3892 default:
3893 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3894 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3895 return false;
3896 }
3897}
3898
3899/**
3900 * Frees the current xmit buffer.
3901 *
3902 * @param pThis The device state structure.
3903 */
3904static void e1kXmitFreeBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC)
3905{
3906 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
3907 if (pSg)
3908 {
3909 pThisCC->CTX_SUFF(pTxSg) = NULL;
3910
3911 if (pSg->pvAllocator != pThis)
3912 {
3913 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3914 if (pDrv)
3915 pDrv->pfnFreeBuf(pDrv, pSg);
3916 }
3917 else
3918 {
3919 /* loopback */
3920 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3921 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3922 pSg->fFlags = 0;
3923 pSg->pvAllocator = NULL;
3924 }
3925 }
3926}
3927
3928#ifndef E1K_WITH_TXD_CACHE
3929/**
3930 * Allocates an xmit buffer.
3931 *
3932 * @returns See PDMINETWORKUP::pfnAllocBuf.
3933 * @param pThis The device state structure.
3934 * @param cbMin The minimum frame size.
3935 * @param fExactSize Whether cbMin is exact or if we have to max it
3936 * out to the max MTU size.
3937 * @param fGso Whether this is a GSO frame or not.
3938 */
3939DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, size_t cbMin, bool fExactSize, bool fGso)
3940{
3941 /* Adjust cbMin if necessary. */
3942 if (!fExactSize)
3943 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3944
3945 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3946 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
3947 e1kXmitFreeBuf(pThis, pThisCC);
3948 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
3949
3950 /*
3951 * Allocate the buffer.
3952 */
3953 PPDMSCATTERGATHER pSg;
3954 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3955 {
3956 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
3957 if (RT_UNLIKELY(!pDrv))
3958 return VERR_NET_DOWN;
3959 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3960 if (RT_FAILURE(rc))
3961 {
3962 /* Suspend TX as we are out of buffers atm */
3963 STATUS |= STATUS_TXOFF;
3964 return rc;
3965 }
3966 }
3967 else
3968 {
3969 /* Create a loopback using the fallback buffer and preallocated SG. */
3970 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3971 pSg = &pThis->uTxFallback.Sg;
3972 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3973 pSg->cbUsed = 0;
3974 pSg->cbAvailable = 0;
3975 pSg->pvAllocator = pThis;
3976 pSg->pvUser = NULL; /* No GSO here. */
3977 pSg->cSegs = 1;
3978 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3979 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3980 }
3981
3982 pThisCC->CTX_SUFF(pTxSg) = pSg;
3983 return VINF_SUCCESS;
3984}
3985#else /* E1K_WITH_TXD_CACHE */
3986/**
3987 * Allocates an xmit buffer.
3988 *
3989 * @returns See PDMINETWORKUP::pfnAllocBuf.
3990 * @param pThis The device state structure.
3991 * @param cbMin The minimum frame size.
3992 * @param fExactSize Whether cbMin is exact or if we have to max it
3993 * out to the max MTU size.
3994 * @param fGso Whether this is a GSO frame or not.
3995 */
3996DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fGso)
3997{
3998 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3999 if (RT_UNLIKELY(pThisCC->CTX_SUFF(pTxSg)))
4000 e1kXmitFreeBuf(pThis, pThisCC);
4001 Assert(pThisCC->CTX_SUFF(pTxSg) == NULL);
4002
4003 /*
4004 * Allocate the buffer.
4005 */
4006 PPDMSCATTERGATHER pSg;
4007 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
4008 {
4009 if (pThis->cbTxAlloc == 0)
4010 {
4011 /* Zero packet, no need for the buffer */
4012 return VINF_SUCCESS;
4013 }
4014 if (fGso && pThis->GsoCtx.u8Type == PDMNETWORKGSOTYPE_INVALID)
4015 {
4016 E1kLog3(("Invalid GSO context, won't allocate this packet, cb=%u %s%s\n",
4017 pThis->cbTxAlloc, pThis->fVTag ? "VLAN " : "", pThis->fGSO ? "GSO " : ""));
4018 /* No valid GSO context is available, ignore this packet. */
4019 pThis->cbTxAlloc = 0;
4020 return VINF_SUCCESS;
4021 }
4022
4023 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4024 if (RT_UNLIKELY(!pDrv))
4025 return VERR_NET_DOWN;
4026 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
4027 if (RT_FAILURE(rc))
4028 {
4029 /* Suspend TX as we are out of buffers atm */
4030 STATUS |= STATUS_TXOFF;
4031 return rc;
4032 }
4033 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
4034 pThis->szPrf, pThis->cbTxAlloc,
4035 pThis->fVTag ? "VLAN " : "",
4036 pThis->fGSO ? "GSO " : ""));
4037 }
4038 else
4039 {
4040 /* Create a loopback using the fallback buffer and preallocated SG. */
4041 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
4042 pSg = &pThis->uTxFallback.Sg;
4043 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
4044 pSg->cbUsed = 0;
4045 pSg->cbAvailable = sizeof(pThis->aTxPacketFallback);
4046 pSg->pvAllocator = pThis;
4047 pSg->pvUser = NULL; /* No GSO here. */
4048 pSg->cSegs = 1;
4049 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
4050 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
4051 }
4052 pThis->cbTxAlloc = 0;
4053
4054 pThisCC->CTX_SUFF(pTxSg) = pSg;
4055 return VINF_SUCCESS;
4056}
4057#endif /* E1K_WITH_TXD_CACHE */
4058
4059/**
4060 * Checks if it's a GSO buffer or not.
4061 *
4062 * @returns true / false.
4063 * @param pTxSg The scatter / gather buffer.
4064 */
4065DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
4066{
4067#if 0
4068 if (!pTxSg)
4069 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
4070 if (pTxSg && pTxSg->pvUser)
4071 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
4072#endif
4073 return pTxSg && pTxSg->pvUser /* GSO indicator */;
4074}
4075
4076#ifndef E1K_WITH_TXD_CACHE
4077/**
4078 * Load transmit descriptor from guest memory.
4079 *
4080 * @param pDevIns The device instance.
4081 * @param pDesc Pointer to descriptor union.
4082 * @param addr Physical address in guest context.
4083 * @thread E1000_TX
4084 */
4085DECLINLINE(void) e1kLoadDesc(PPDMDEVINS pDevIns, E1KTXDESC *pDesc, RTGCPHYS addr)
4086{
4087 PDMDevHlpPCIPhysRead(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4088}
4089#else /* E1K_WITH_TXD_CACHE */
4090/**
4091 * Load transmit descriptors from guest memory.
4092 *
4093 * We need two physical reads in case the tail wrapped around the end of TX
4094 * descriptor ring.
4095 *
4096 * @returns the actual number of descriptors fetched.
4097 * @param pDevIns The device instance.
4098 * @param pThis The device state structure.
4099 * @thread E1000_TX
4100 */
4101DECLINLINE(unsigned) e1kTxDLoadMore(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4102{
4103 Assert(pThis->iTxDCurrent == 0);
4104 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
4105 unsigned nDescsAvailable = e1kGetTxLen(pTxdc) - pThis->nTxDFetched;
4106 /* The following two lines ensure that pThis->nTxDFetched never overflows. */
4107 AssertCompile(E1K_TXD_CACHE_SIZE < (256 * sizeof(pThis->nTxDFetched)));
4108 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
4109 unsigned nDescsTotal = pTxdc->tdlen / sizeof(E1KTXDESC);
4110 Assert(nDescsTotal != 0);
4111 if (nDescsTotal == 0)
4112 return 0;
4113 unsigned nFirstNotLoaded = (pTxdc->tdh + pThis->nTxDFetched) % nDescsTotal;
4114 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
4115 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
4116 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
4117 nFirstNotLoaded, nDescsInSingleRead));
4118 if (nDescsToFetch == 0)
4119 return 0;
4120 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
4121 PDMDevHlpPCIPhysRead(pDevIns,
4122 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
4123 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
4124 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4125 pThis->szPrf, nDescsInSingleRead,
4126 TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC),
4127 nFirstNotLoaded, pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
4128 if (nDescsToFetch > nDescsInSingleRead)
4129 {
4130 PDMDevHlpPCIPhysRead(pDevIns,
4131 ((uint64_t)TDBAH << 32) + TDBAL,
4132 pFirstEmptyDesc + nDescsInSingleRead,
4133 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
4134 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
4135 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
4136 TDBAH, TDBAL));
4137 }
4138 pThis->nTxDFetched += (uint8_t)nDescsToFetch;
4139 return nDescsToFetch;
4140}
4141
4142/**
4143 * Load transmit descriptors from guest memory only if there are no loaded
4144 * descriptors.
4145 *
4146 * @returns true if there are descriptors in cache.
4147 * @param pDevIns The device instance.
4148 * @param pThis The device state structure.
4149 * @thread E1000_TX
4150 */
4151DECLINLINE(bool) e1kTxDLazyLoad(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
4152{
4153 if (pThis->nTxDFetched == 0)
4154 return e1kTxDLoadMore(pDevIns, pThis, pTxdc) != 0;
4155 return true;
4156}
4157#endif /* E1K_WITH_TXD_CACHE */
4158
4159/**
4160 * Write back transmit descriptor to guest memory.
4161 *
4162 * @param pDevIns The device instance.
4163 * @param pThis The device state structure.
4164 * @param pDesc Pointer to descriptor union.
4165 * @param addr Physical address in guest context.
4166 * @thread E1000_TX
4167 */
4168DECLINLINE(void) e1kWriteBackDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4169{
4170 /* Only the last half of the descriptor has to be written back. */
4171 e1kPrintTDesc(pThis, pDesc, "^^^");
4172 PDMDevHlpPCIPhysWrite(pDevIns, addr, pDesc, sizeof(E1KTXDESC));
4173}
4174
4175/**
4176 * Transmit complete frame.
4177 *
4178 * @remarks We skip the FCS since we're not responsible for sending anything to
4179 * a real ethernet wire.
4180 *
4181 * @param pDevIns The device instance.
4182 * @param pThis The device state structure.
4183 * @param pThisCC The current context instance data.
4184 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4185 * @thread E1000_TX
4186 */
4187static void e1kTransmitFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, bool fOnWorkerThread)
4188{
4189 PPDMSCATTERGATHER pSg = pThisCC->CTX_SUFF(pTxSg);
4190 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
4191 Assert(!pSg || pSg->cSegs == 1);
4192
4193 if (cbFrame < 14)
4194 {
4195 Log(("%s Ignoring invalid frame (%u bytes)\n", pThis->szPrf, cbFrame));
4196 return;
4197 }
4198 if (cbFrame > 70) /* unqualified guess */
4199 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
4200
4201#ifdef E1K_INT_STATS
4202 if (cbFrame <= 1514)
4203 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
4204 else if (cbFrame <= 2962)
4205 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
4206 else if (cbFrame <= 4410)
4207 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
4208 else if (cbFrame <= 5858)
4209 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
4210 else if (cbFrame <= 7306)
4211 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
4212 else if (cbFrame <= 8754)
4213 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
4214 else if (cbFrame <= 16384)
4215 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
4216 else if (cbFrame <= 32768)
4217 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
4218 else
4219 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
4220#endif /* E1K_INT_STATS */
4221
4222 /* Add VLAN tag */
4223 if (pThis->fVTag && pSg->cbUsed + 4 <= pSg->cbAvailable)
4224 {
4225 Assert(cbFrame > 12);
4226
4227 E1kLog3(("%s Inserting VLAN tag %08x\n",
4228 pThis->szPrf, RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
4229 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
4230 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16((uint16_t)VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
4231 pSg->cbUsed += 4;
4232 cbFrame += 4;
4233 Assert(pSg->cbUsed == cbFrame);
4234 Assert(pSg->cbUsed <= pSg->cbAvailable);
4235 }
4236/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
4237 "%.*Rhxd\n"
4238 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
4239 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
4240
4241 /* Update the stats */
4242 E1K_INC_CNT32(TPT);
4243 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
4244 E1K_INC_CNT32(GPTC);
4245 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
4246 E1K_INC_CNT32(BPTC);
4247 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
4248 E1K_INC_CNT32(MPTC);
4249 /* Update octet transmit counter */
4250 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
4251 if (pThisCC->CTX_SUFF(pDrv))
4252 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
4253 if (cbFrame == 64)
4254 E1K_INC_CNT32(PTC64);
4255 else if (cbFrame < 128)
4256 E1K_INC_CNT32(PTC127);
4257 else if (cbFrame < 256)
4258 E1K_INC_CNT32(PTC255);
4259 else if (cbFrame < 512)
4260 E1K_INC_CNT32(PTC511);
4261 else if (cbFrame < 1024)
4262 E1K_INC_CNT32(PTC1023);
4263 else
4264 E1K_INC_CNT32(PTC1522);
4265
4266 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
4267
4268 /*
4269 * Dump and send the packet.
4270 */
4271 int rc = VERR_NET_DOWN;
4272 if (pSg && pSg->pvAllocator != pThis)
4273 {
4274 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
4275
4276 pThisCC->CTX_SUFF(pTxSg) = NULL;
4277 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
4278 if (pDrv)
4279 {
4280 /* Release critical section to avoid deadlock in CanReceive */
4281 //e1kCsLeave(pThis);
4282 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4283 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
4284 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
4285 //e1kR3CsEnterAsserted(pThis);
4286 }
4287 }
4288 else if (pSg)
4289 {
4290 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
4291 e1kPacketDump(pDevIns, pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
4292
4293 /** @todo do we actually need to check that we're in loopback mode here? */
4294 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
4295 {
4296 E1KRXDST status;
4297 RT_ZERO(status);
4298 status.fPIF = true;
4299 e1kHandleRxPacket(pDevIns, pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
4300 rc = VINF_SUCCESS;
4301 }
4302 e1kXmitFreeBuf(pThis, pThisCC);
4303 }
4304 else
4305 rc = VERR_NET_DOWN;
4306 if (RT_FAILURE(rc))
4307 {
4308 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
4309 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
4310 }
4311
4312 pThis->led.Actual.s.fWriting = 0;
4313}
4314
4315/**
4316 * Compute and write internet checksum (e1kCSum16) at the specified offset.
4317 *
4318 * @param pThis The device state structure.
4319 * @param pPkt Pointer to the packet.
4320 * @param u16PktLen Total length of the packet.
4321 * @param cso Offset in packet to write checksum at.
4322 * @param css Offset in packet to start computing
4323 * checksum from.
4324 * @param cse Offset in packet to stop computing
4325 * checksum at.
4326 * @param fUdp Replace 0 checksum with all 1s.
4327 * @thread E1000_TX
4328 */
4329static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse, bool fUdp = false)
4330{
4331 RT_NOREF1(pThis);
4332
4333 if (css >= u16PktLen)
4334 {
4335 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
4336 pThis->szPrf, cso, u16PktLen));
4337 return;
4338 }
4339
4340 if (cso >= u16PktLen - 1)
4341 {
4342 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
4343 pThis->szPrf, cso, u16PktLen));
4344 return;
4345 }
4346
4347 if (cse == 0 || cse >= u16PktLen)
4348 cse = u16PktLen - 1;
4349 else if (cse < css)
4350 {
4351 E1kLog2(("%s css(%X) is greater than cse(%X), checksum is not inserted\n",
4352 pThis->szPrf, css, cse));
4353 return;
4354 }
4355
4356 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
4357 if (fUdp && u16ChkSum == 0)
4358 u16ChkSum = ~u16ChkSum; /* 0 means no checksum computed in case of UDP (see @bugref{9883}) */
4359 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
4360 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
4361 *(uint16_t*)(pPkt + cso) = u16ChkSum;
4362}
4363
4364/**
4365 * Add a part of descriptor's buffer to transmit frame.
4366 *
4367 * @remarks data.u64BufAddr is used unconditionally for both data
4368 * and legacy descriptors since it is identical to
4369 * legacy.u64BufAddr.
4370 *
4371 * @param pDevIns The device instance.
4372 * @param pThis The device state structure.
4373 * @param pDesc Pointer to the descriptor to transmit.
4374 * @param u16Len Length of buffer to the end of segment.
4375 * @param fSend Force packet sending.
4376 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4377 * @thread E1000_TX
4378 */
4379#ifndef E1K_WITH_TXD_CACHE
4380static void e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4381{
4382 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4383 /* TCP header being transmitted */
4384 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4385 /* IP header being transmitted */
4386 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4387
4388 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4389 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4390 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4391
4392 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4393 E1kLog3(("%s Dump of the segment:\n"
4394 "%.*Rhxd\n"
4395 "%s --- End of dump ---\n",
4396 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4397 pThis->u16TxPktLen += u16Len;
4398 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4399 pThis->szPrf, pThis->u16TxPktLen));
4400 if (pThis->u16HdrRemain > 0)
4401 {
4402 /* The header was not complete, check if it is now */
4403 if (u16Len >= pThis->u16HdrRemain)
4404 {
4405 /* The rest is payload */
4406 u16Len -= pThis->u16HdrRemain;
4407 pThis->u16HdrRemain = 0;
4408 /* Save partial checksum and flags */
4409 pThis->u32SavedCsum = pTcpHdr->chksum;
4410 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4411 /* Clear FIN and PSH flags now and set them only in the last segment */
4412 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4413 }
4414 else
4415 {
4416 /* Still not */
4417 pThis->u16HdrRemain -= u16Len;
4418 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4419 pThis->szPrf, pThis->u16HdrRemain));
4420 return;
4421 }
4422 }
4423
4424 pThis->u32PayRemain -= u16Len;
4425
4426 if (fSend)
4427 {
4428 /* Leave ethernet header intact */
4429 /* IP Total Length = payload + headers - ethernet header */
4430 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4431 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4432 pThis->szPrf, ntohs(pIpHdr->total_len)));
4433 /* Update IP Checksum */
4434 pIpHdr->chksum = 0;
4435 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4436 pThis->contextTSE.ip.u8CSO,
4437 pThis->contextTSE.ip.u8CSS,
4438 pThis->contextTSE.ip.u16CSE);
4439
4440 /* Update TCP flags */
4441 /* Restore original FIN and PSH flags for the last segment */
4442 if (pThis->u32PayRemain == 0)
4443 {
4444 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4445 E1K_INC_CNT32(TSCTC);
4446 }
4447 /* Add TCP length to partial pseudo header sum */
4448 uint32_t csum = pThis->u32SavedCsum
4449 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4450 while (csum >> 16)
4451 csum = (csum >> 16) + (csum & 0xFFFF);
4452 pTcpHdr->chksum = csum;
4453 /* Compute final checksum */
4454 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4455 pThis->contextTSE.tu.u8CSO,
4456 pThis->contextTSE.tu.u8CSS,
4457 pThis->contextTSE.tu.u16CSE);
4458
4459 /*
4460 * Transmit it. If we've use the SG already, allocate a new one before
4461 * we copy of the data.
4462 */
4463 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4464 if (!pTxSg)
4465 {
4466 e1kXmitAllocBuf(pThis, pThisCC, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4467 pTxSg = pThisCC->CTX_SUFF(pTxSg);
4468 }
4469 if (pTxSg)
4470 {
4471 Assert(pThis->u16TxPktLen <= pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4472 Assert(pTxSg->cSegs == 1);
4473 if (pThis->CCCTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4474 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4475 pTxSg->cbUsed = pThis->u16TxPktLen;
4476 pTxSg->aSegs[0].cbSeg = pThis->u16TxPktLen;
4477 }
4478 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4479
4480 /* Update Sequence Number */
4481 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4482 - pThis->contextTSE.dw3.u8HDRLEN);
4483 /* Increment IP identification */
4484 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4485 }
4486}
4487#else /* E1K_WITH_TXD_CACHE */
4488static int e1kFallbackAddSegment(PPDMDEVINS pDevIns, PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4489{
4490 int rc = VINF_SUCCESS;
4491 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
4492 /* TCP header being transmitted */
4493 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4494 /* IP header being transmitted */
4495 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)(pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4496
4497 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4498 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4499 AssertReturn(pThis->u32PayRemain + pThis->u16HdrRemain > 0, VINF_SUCCESS);
4500
4501 if (pThis->u16TxPktLen + u16Len <= sizeof(pThis->aTxPacketFallback))
4502 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4503 else
4504 E1kLog(("%s e1kFallbackAddSegment: writing beyond aTxPacketFallback, u16TxPktLen=%d(0x%x) + u16Len=%d(0x%x) > %d\n",
4505 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, u16Len, u16Len, sizeof(pThis->aTxPacketFallback)));
4506 E1kLog3(("%s Dump of the segment:\n"
4507 "%.*Rhxd\n"
4508 "%s --- End of dump ---\n",
4509 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4510 pThis->u16TxPktLen += u16Len;
4511 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4512 pThis->szPrf, pThis->u16TxPktLen));
4513 if (pThis->u16HdrRemain > 0)
4514 {
4515 /* The header was not complete, check if it is now */
4516 if (u16Len >= pThis->u16HdrRemain)
4517 {
4518 /* The rest is payload */
4519 u16Len -= pThis->u16HdrRemain;
4520 pThis->u16HdrRemain = 0;
4521 /* Save partial checksum and flags */
4522 pThis->u32SavedCsum = pTcpHdr->chksum;
4523 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4524 /* Clear FIN and PSH flags now and set them only in the last segment */
4525 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4526 }
4527 else
4528 {
4529 /* Still not */
4530 pThis->u16HdrRemain -= u16Len;
4531 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4532 pThis->szPrf, pThis->u16HdrRemain));
4533 return rc;
4534 }
4535 }
4536
4537 if (u16Len > pThis->u32PayRemain)
4538 pThis->u32PayRemain = 0;
4539 else
4540 pThis->u32PayRemain -= u16Len;
4541
4542 if (fSend)
4543 {
4544 /* Leave ethernet header intact */
4545 /* IP Total Length = payload + headers - ethernet header */
4546 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4547 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4548 pThis->szPrf, ntohs(pIpHdr->total_len)));
4549 /* Update IP Checksum */
4550 pIpHdr->chksum = 0;
4551 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4552 pThis->contextTSE.ip.u8CSO,
4553 pThis->contextTSE.ip.u8CSS,
4554 pThis->contextTSE.ip.u16CSE);
4555
4556 /* Update TCP flags */
4557 /* Restore original FIN and PSH flags for the last segment */
4558 if (pThis->u32PayRemain == 0)
4559 {
4560 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4561 E1K_INC_CNT32(TSCTC);
4562 }
4563 /* Add TCP length to partial pseudo header sum */
4564 uint32_t csum = pThis->u32SavedCsum
4565 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4566 while (csum >> 16)
4567 csum = (csum >> 16) + (csum & 0xFFFF);
4568 Assert(csum < 65536);
4569 pTcpHdr->chksum = (uint16_t)csum;
4570 /* Compute final checksum */
4571 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4572 pThis->contextTSE.tu.u8CSO,
4573 pThis->contextTSE.tu.u8CSS,
4574 pThis->contextTSE.tu.u16CSE);
4575
4576 /*
4577 * Transmit it.
4578 */
4579 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4580 if (pTxSg)
4581 {
4582 /* Make sure the packet fits into the allocated buffer */
4583 size_t cbCopy = RT_MIN(pThis->u16TxPktLen, pThisCC->CTX_SUFF(pTxSg)->cbAvailable);
4584#ifdef DEBUG
4585 if (pThis->u16TxPktLen > pTxSg->cbAvailable)
4586 E1kLog(("%s e1kFallbackAddSegment: truncating packet, u16TxPktLen=%d(0x%x) > cbAvailable=%d(0x%x)\n",
4587 pThis->szPrf, pThis->u16TxPktLen, pThis->u16TxPktLen, pTxSg->cbAvailable, pTxSg->cbAvailable));
4588#endif /* DEBUG */
4589 Assert(pTxSg->cSegs == 1);
4590 if (pTxSg->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4591 memcpy(pTxSg->aSegs[0].pvSeg, pThis->aTxPacketFallback, cbCopy);
4592 pTxSg->cbUsed = cbCopy;
4593 pTxSg->aSegs[0].cbSeg = cbCopy;
4594 }
4595 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
4596
4597 /* Update Sequence Number */
4598 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4599 - pThis->contextTSE.dw3.u8HDRLEN);
4600 /* Increment IP identification */
4601 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4602
4603 /* Allocate new buffer for the next segment. */
4604 if (pThis->u32PayRemain)
4605 {
4606 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4607 pThis->contextTSE.dw3.u16MSS)
4608 + pThis->contextTSE.dw3.u8HDRLEN;
4609 /* Do not add VLAN tags to empty packets. */
4610 if (pThis->fVTag && pThis->cbTxAlloc > 0)
4611 pThis->cbTxAlloc += 4;
4612 rc = e1kXmitAllocBuf(pThis, pThisCC, false /* fGSO */);
4613 }
4614 }
4615
4616 return rc;
4617}
4618#endif /* E1K_WITH_TXD_CACHE */
4619
4620#ifndef E1K_WITH_TXD_CACHE
4621/**
4622 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4623 * frame.
4624 *
4625 * We construct the frame in the fallback buffer first and the copy it to the SG
4626 * buffer before passing it down to the network driver code.
4627 *
4628 * @returns true if the frame should be transmitted, false if not.
4629 *
4630 * @param pThis The device state structure.
4631 * @param pDesc Pointer to the descriptor to transmit.
4632 * @param cbFragment Length of descriptor's buffer.
4633 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4634 * @thread E1000_TX
4635 */
4636static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4637{
4638 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4639 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4640 Assert(pDesc->data.cmd.fTSE);
4641 Assert(!e1kXmitIsGsoBuf(pTxSg));
4642
4643 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4644 Assert(u16MaxPktLen != 0);
4645 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4646
4647 /*
4648 * Carve out segments.
4649 */
4650 do
4651 {
4652 /* Calculate how many bytes we have left in this TCP segment */
4653 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4654 if (cb > cbFragment)
4655 {
4656 /* This descriptor fits completely into current segment */
4657 cb = cbFragment;
4658 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4659 }
4660 else
4661 {
4662 e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4663 /*
4664 * Rewind the packet tail pointer to the beginning of payload,
4665 * so we continue writing right beyond the header.
4666 */
4667 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4668 }
4669
4670 pDesc->data.u64BufAddr += cb;
4671 cbFragment -= cb;
4672 } while (cbFragment > 0);
4673
4674 if (pDesc->data.cmd.fEOP)
4675 {
4676 /* End of packet, next segment will contain header. */
4677 if (pThis->u32PayRemain != 0)
4678 E1K_INC_CNT32(TSCTFC);
4679 pThis->u16TxPktLen = 0;
4680 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4681 }
4682
4683 return false;
4684}
4685#else /* E1K_WITH_TXD_CACHE */
4686/**
4687 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4688 * frame.
4689 *
4690 * We construct the frame in the fallback buffer first and the copy it to the SG
4691 * buffer before passing it down to the network driver code.
4692 *
4693 * @returns error code
4694 *
4695 * @param pDevIns The device instance.
4696 * @param pThis The device state structure.
4697 * @param pDesc Pointer to the descriptor to transmit.
4698 * @param cbFragment Length of descriptor's buffer.
4699 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4700 * @thread E1000_TX
4701 */
4702static int e1kFallbackAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4703{
4704#ifdef VBOX_STRICT
4705 PPDMSCATTERGATHER pTxSg = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC)->CTX_SUFF(pTxSg);
4706 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4707 Assert(pDesc->data.cmd.fTSE);
4708 Assert(!e1kXmitIsGsoBuf(pTxSg));
4709#endif
4710
4711 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4712 /* We cannot produce empty packets, ignore all TX descriptors (see @bugref{9571}) */
4713 if (u16MaxPktLen == 0)
4714 return VINF_SUCCESS;
4715
4716 /*
4717 * Carve out segments.
4718 */
4719 int rc = VINF_SUCCESS;
4720 do
4721 {
4722 /* Calculate how many bytes we have left in this TCP segment */
4723 uint16_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4724 if (cb > pDesc->data.cmd.u20DTALEN)
4725 {
4726 /* This descriptor fits completely into current segment */
4727 cb = (uint16_t)pDesc->data.cmd.u20DTALEN; /* u20DTALEN at this point is guarantied to fit into 16 bits. */
4728 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4729 }
4730 else
4731 {
4732 rc = e1kFallbackAddSegment(pDevIns, pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4733 /*
4734 * Rewind the packet tail pointer to the beginning of payload,
4735 * so we continue writing right beyond the header.
4736 */
4737 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4738 }
4739
4740 pDesc->data.u64BufAddr += cb;
4741 pDesc->data.cmd.u20DTALEN -= cb;
4742 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4743
4744 if (pDesc->data.cmd.fEOP)
4745 {
4746 /* End of packet, next segment will contain header. */
4747 if (pThis->u32PayRemain != 0)
4748 E1K_INC_CNT32(TSCTFC);
4749 pThis->u16TxPktLen = 0;
4750 e1kXmitFreeBuf(pThis, PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC));
4751 }
4752
4753 return VINF_SUCCESS; /// @todo consider rc;
4754}
4755#endif /* E1K_WITH_TXD_CACHE */
4756
4757
4758/**
4759 * Add descriptor's buffer to transmit frame.
4760 *
4761 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4762 * TSE frames we cannot handle as GSO.
4763 *
4764 * @returns true on success, false on failure.
4765 *
4766 * @param pDevIns The device instance.
4767 * @param pThisCC The current context instance data.
4768 * @param pThis The device state structure.
4769 * @param PhysAddr The physical address of the descriptor buffer.
4770 * @param cbFragment Length of descriptor's buffer.
4771 * @thread E1000_TX
4772 */
4773static bool e1kAddToFrame(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, RTGCPHYS PhysAddr, uint32_t cbFragment)
4774{
4775 PPDMSCATTERGATHER pTxSg = pThisCC->CTX_SUFF(pTxSg);
4776 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4777 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4778
4779 LogFlow(("%s e1kAddToFrame: ENTER cbFragment=%d u16TxPktLen=%d cbUsed=%d cbAvailable=%d fGSO=%s\n",
4780 pThis->szPrf, cbFragment, pThis->u16TxPktLen, pTxSg->cbUsed, pTxSg->cbAvailable,
4781 fGso ? "true" : "false"));
4782 PCPDMNETWORKGSO pGso = (PCPDMNETWORKGSO)pTxSg->pvUser;
4783 if (pGso)
4784 {
4785 if (RT_UNLIKELY(pGso->cbMaxSeg == 0))
4786 {
4787 E1kLog(("%s zero-sized fragments are not allowed\n", pThis->szPrf));
4788 return false;
4789 }
4790 if (RT_UNLIKELY(pGso->u8Type == PDMNETWORKGSOTYPE_IPV4_UDP))
4791 {
4792 E1kLog(("%s UDP fragmentation is no longer supported\n", pThis->szPrf));
4793 return false;
4794 }
4795 }
4796 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4797 {
4798 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4799 return false;
4800 }
4801 if (RT_UNLIKELY( cbNewPkt > pTxSg->cbAvailable ))
4802 {
4803 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4804 return false;
4805 }
4806
4807 if (RT_LIKELY(pTxSg))
4808 {
4809 Assert(pTxSg->cSegs == 1);
4810 if (pTxSg->cbUsed != pThis->u16TxPktLen)
4811 E1kLog(("%s e1kAddToFrame: pTxSg->cbUsed=%d(0x%x) != u16TxPktLen=%d(0x%x)\n",
4812 pThis->szPrf, pTxSg->cbUsed, pTxSg->cbUsed, pThis->u16TxPktLen, pThis->u16TxPktLen));
4813
4814 PDMDevHlpPCIPhysRead(pDevIns, PhysAddr, (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4815
4816 pTxSg->cbUsed = cbNewPkt;
4817 }
4818 pThis->u16TxPktLen = cbNewPkt;
4819
4820 return true;
4821}
4822
4823
4824/**
4825 * Write the descriptor back to guest memory and notify the guest.
4826 *
4827 * @param pThis The device state structure.
4828 * @param pDesc Pointer to the descriptor have been transmitted.
4829 * @param addr Physical address of the descriptor in guest memory.
4830 * @thread E1000_TX
4831 */
4832static void e1kDescReport(PPDMDEVINS pDevIns, PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4833{
4834 /*
4835 * We fake descriptor write-back bursting. Descriptors are written back as they are
4836 * processed.
4837 */
4838 /* Let's pretend we process descriptors. Write back with DD set. */
4839 /*
4840 * Prior to r71586 we tried to accomodate the case when write-back bursts
4841 * are enabled without actually implementing bursting by writing back all
4842 * descriptors, even the ones that do not have RS set. This caused kernel
4843 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4844 * associated with written back descriptor if it happened to be a context
4845 * descriptor since context descriptors do not have skb associated to them.
4846 * Starting from r71586 we write back only the descriptors with RS set,
4847 * which is a little bit different from what the real hardware does in
4848 * case there is a chain of data descritors where some of them have RS set
4849 * and others do not. It is very uncommon scenario imho.
4850 * We need to check RPS as well since some legacy drivers use it instead of
4851 * RS even with newer cards.
4852 */
4853 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4854 {
4855 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4856 e1kWriteBackDesc(pDevIns, pThis, pDesc, addr);
4857 if (pDesc->legacy.cmd.fEOP)
4858 {
4859//#ifdef E1K_USE_TX_TIMERS
4860 if (pThis->fTidEnabled && pDesc->legacy.cmd.fIDE)
4861 {
4862 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4863 //if (pThis->fIntRaised)
4864 //{
4865 // /* Interrupt is already pending, no need for timers */
4866 // ICR |= ICR_TXDW;
4867 //}
4868 //else {
4869 /* Arm the timer to fire in TIVD usec (discard .024) */
4870 e1kArmTimer(pDevIns, pThis, pThis->hTIDTimer, TIDV);
4871# ifndef E1K_NO_TAD
4872 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4873 E1kLog2(("%s Checking if TAD timer is running\n",
4874 pThis->szPrf));
4875 if (TADV != 0 && !PDMDevHlpTimerIsActive(pDevIns, pThis->hTADTimer))
4876 e1kArmTimer(pDevIns, pThis, pThis->hTADTimer, TADV);
4877# endif /* E1K_NO_TAD */
4878 }
4879 else
4880 {
4881 if (pThis->fTidEnabled)
4882 {
4883 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4884 pThis->szPrf));
4885 /* Cancel both timers if armed and fire immediately. */
4886# ifndef E1K_NO_TAD
4887 PDMDevHlpTimerStop(pDevIns, pThis->hTADTimer);
4888# endif
4889 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
4890 }
4891//#endif /* E1K_USE_TX_TIMERS */
4892 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4893 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXDW);
4894//#ifdef E1K_USE_TX_TIMERS
4895 }
4896//#endif /* E1K_USE_TX_TIMERS */
4897 }
4898 }
4899 else
4900 {
4901 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4902 }
4903}
4904
4905#ifndef E1K_WITH_TXD_CACHE
4906
4907/**
4908 * Process Transmit Descriptor.
4909 *
4910 * E1000 supports three types of transmit descriptors:
4911 * - legacy data descriptors of older format (context-less).
4912 * - data the same as legacy but providing new offloading capabilities.
4913 * - context sets up the context for following data descriptors.
4914 *
4915 * @param pDevIns The device instance.
4916 * @param pThis The device state structure.
4917 * @param pThisCC The current context instance data.
4918 * @param pDesc Pointer to descriptor union.
4919 * @param addr Physical address of descriptor in guest memory.
4920 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4921 * @thread E1000_TX
4922 */
4923static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
4924 RTGCPHYS addr, bool fOnWorkerThread)
4925{
4926 int rc = VINF_SUCCESS;
4927 uint32_t cbVTag = 0;
4928
4929 e1kPrintTDesc(pThis, pDesc, "vvv");
4930
4931//#ifdef E1K_USE_TX_TIMERS
4932 if (pThis->fTidEnabled)
4933 e1kCancelTimer(pDevIns, pThis, pThis->hTIDTimer);
4934//#endif /* E1K_USE_TX_TIMERS */
4935
4936 switch (e1kGetDescType(pDesc))
4937 {
4938 case E1K_DTYP_CONTEXT:
4939 if (pDesc->context.dw2.fTSE)
4940 {
4941 pThis->contextTSE = pDesc->context;
4942 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4943 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4944 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4945 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4946 }
4947 else
4948 {
4949 pThis->contextNormal = pDesc->context;
4950 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4951 }
4952 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4953 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4954 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4955 pDesc->context.ip.u8CSS,
4956 pDesc->context.ip.u8CSO,
4957 pDesc->context.ip.u16CSE,
4958 pDesc->context.tu.u8CSS,
4959 pDesc->context.tu.u8CSO,
4960 pDesc->context.tu.u16CSE));
4961 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4962 e1kDescReport(pThis, pDesc, addr);
4963 break;
4964
4965 case E1K_DTYP_DATA:
4966 {
4967 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4968 {
4969 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4970 /** @todo Same as legacy when !TSE. See below. */
4971 break;
4972 }
4973 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4974 &pThis->StatTxDescTSEData:
4975 &pThis->StatTxDescData);
4976 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4977 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4978
4979 /*
4980 * The last descriptor of non-TSE packet must contain VLE flag.
4981 * TSE packets have VLE flag in the first descriptor. The later
4982 * case is taken care of a bit later when cbVTag gets assigned.
4983 *
4984 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4985 */
4986 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4987 {
4988 pThis->fVTag = pDesc->data.cmd.fVLE;
4989 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4990 }
4991 /*
4992 * First fragment: Allocate new buffer and save the IXSM and TXSM
4993 * packet options as these are only valid in the first fragment.
4994 */
4995 if (pThis->u16TxPktLen == 0)
4996 {
4997 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4998 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4999 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
5000 pThis->fIPcsum ? " IP" : "",
5001 pThis->fTCPcsum ? " TCP/UDP" : ""));
5002 if (pDesc->data.cmd.fTSE)
5003 {
5004 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
5005 pThis->fVTag = pDesc->data.cmd.fVLE;
5006 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5007 cbVTag = pThis->fVTag ? 4 : 0;
5008 }
5009 else if (pDesc->data.cmd.fEOP)
5010 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
5011 else
5012 cbVTag = 4;
5013 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
5014 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
5015 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
5016 true /*fExactSize*/, true /*fGso*/);
5017 else if (pDesc->data.cmd.fTSE)
5018 rc = e1kXmitAllocBuf(pThis, pThisCC, , pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
5019 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
5020 else
5021 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->data.cmd.u20DTALEN + cbVTag,
5022 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
5023
5024 /**
5025 * @todo: Perhaps it is not that simple for GSO packets! We may
5026 * need to unwind some changes.
5027 */
5028 if (RT_FAILURE(rc))
5029 {
5030 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5031 break;
5032 }
5033 /** @todo Is there any way to indicating errors other than collisions? Like
5034 * VERR_NET_DOWN. */
5035 }
5036
5037 /*
5038 * Add the descriptor data to the frame. If the frame is complete,
5039 * transmit it and reset the u16TxPktLen field.
5040 */
5041 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5042 {
5043 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5044 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5045 if (pDesc->data.cmd.fEOP)
5046 {
5047 if ( fRc
5048 && pThisCC->CTX_SUFF(pTxSg)
5049 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5050 {
5051 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5052 E1K_INC_CNT32(TSCTC);
5053 }
5054 else
5055 {
5056 if (fRc)
5057 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5058 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5059 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5060 e1kXmitFreeBuf(pThis);
5061 E1K_INC_CNT32(TSCTFC);
5062 }
5063 pThis->u16TxPktLen = 0;
5064 }
5065 }
5066 else if (!pDesc->data.cmd.fTSE)
5067 {
5068 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5069 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5070 if (pDesc->data.cmd.fEOP)
5071 {
5072 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5073 {
5074 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5075 if (pThis->fIPcsum)
5076 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5077 pThis->contextNormal.ip.u8CSO,
5078 pThis->contextNormal.ip.u8CSS,
5079 pThis->contextNormal.ip.u16CSE);
5080 if (pThis->fTCPcsum)
5081 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5082 pThis->contextNormal.tu.u8CSO,
5083 pThis->contextNormal.tu.u8CSS,
5084 pThis->contextNormal.tu.u16CSE,
5085 !pThis->contextNormal.dw2.fTCP);
5086 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5087 }
5088 else
5089 e1kXmitFreeBuf(pThis);
5090 pThis->u16TxPktLen = 0;
5091 }
5092 }
5093 else
5094 {
5095 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5096 e1kFallbackAddToFrame(pDevIns, pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
5097 }
5098
5099 e1kDescReport(pThis, pDesc, addr);
5100 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5101 break;
5102 }
5103
5104 case E1K_DTYP_LEGACY:
5105 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5106 {
5107 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5108 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
5109 break;
5110 }
5111 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5112 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5113
5114 /* First fragment: allocate new buffer. */
5115 if (pThis->u16TxPktLen == 0)
5116 {
5117 if (pDesc->legacy.cmd.fEOP)
5118 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
5119 else
5120 cbVTag = 4;
5121 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
5122 /** @todo reset status bits? */
5123 rc = e1kXmitAllocBuf(pThis, pThisCC, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
5124 if (RT_FAILURE(rc))
5125 {
5126 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5127 break;
5128 }
5129
5130 /** @todo Is there any way to indicating errors other than collisions? Like
5131 * VERR_NET_DOWN. */
5132 }
5133
5134 /* Add fragment to frame. */
5135 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5136 {
5137 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5138
5139 /* Last fragment: Transmit and reset the packet storage counter. */
5140 if (pDesc->legacy.cmd.fEOP)
5141 {
5142 pThis->fVTag = pDesc->legacy.cmd.fVLE;
5143 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
5144 /** @todo Offload processing goes here. */
5145 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5146 pThis->u16TxPktLen = 0;
5147 }
5148 }
5149 /* Last fragment + failure: free the buffer and reset the storage counter. */
5150 else if (pDesc->legacy.cmd.fEOP)
5151 {
5152 e1kXmitFreeBuf(pThis);
5153 pThis->u16TxPktLen = 0;
5154 }
5155
5156 e1kDescReport(pThis, pDesc, addr);
5157 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5158 break;
5159
5160 default:
5161 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5162 pThis->szPrf, e1kGetDescType(pDesc)));
5163 break;
5164 }
5165
5166 return rc;
5167}
5168
5169#else /* E1K_WITH_TXD_CACHE */
5170
5171/**
5172 * Process Transmit Descriptor.
5173 *
5174 * E1000 supports three types of transmit descriptors:
5175 * - legacy data descriptors of older format (context-less).
5176 * - data the same as legacy but providing new offloading capabilities.
5177 * - context sets up the context for following data descriptors.
5178 *
5179 * @param pDevIns The device instance.
5180 * @param pThis The device state structure.
5181 * @param pThisCC The current context instance data.
5182 * @param pDesc Pointer to descriptor union.
5183 * @param addr Physical address of descriptor in guest memory.
5184 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
5185 * @param cbPacketSize Size of the packet as previously computed.
5186 * @thread E1000_TX
5187 */
5188static int e1kXmitDesc(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KSTATECC pThisCC, E1KTXDESC *pDesc,
5189 RTGCPHYS addr, bool fOnWorkerThread)
5190{
5191 int rc = VINF_SUCCESS;
5192
5193 e1kPrintTDesc(pThis, pDesc, "vvv");
5194
5195//#ifdef E1K_USE_TX_TIMERS
5196 if (pThis->fTidEnabled)
5197 PDMDevHlpTimerStop(pDevIns, pThis->hTIDTimer);
5198//#endif /* E1K_USE_TX_TIMERS */
5199
5200 switch (e1kGetDescType(pDesc))
5201 {
5202 case E1K_DTYP_CONTEXT:
5203 /* The caller have already updated the context */
5204 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
5205 e1kDescReport(pDevIns, pThis, pDesc, addr);
5206 break;
5207
5208 case E1K_DTYP_DATA:
5209 {
5210 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
5211 &pThis->StatTxDescTSEData:
5212 &pThis->StatTxDescData);
5213 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
5214 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5215 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
5216 {
5217 E1kLog2(("%s Empty data descriptor, skipped.\n", pThis->szPrf));
5218 if (pDesc->data.cmd.fEOP)
5219 {
5220 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5221 pThis->u16TxPktLen = 0;
5222 }
5223 }
5224 else
5225 {
5226 /*
5227 * Add the descriptor data to the frame. If the frame is complete,
5228 * transmit it and reset the u16TxPktLen field.
5229 */
5230 if (e1kXmitIsGsoBuf(pThisCC->CTX_SUFF(pTxSg)))
5231 {
5232 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
5233 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5234 if (pDesc->data.cmd.fEOP)
5235 {
5236 if ( fRc
5237 && pThisCC->CTX_SUFF(pTxSg)
5238 && pThisCC->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
5239 {
5240 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5241 E1K_INC_CNT32(TSCTC);
5242 }
5243 else
5244 {
5245 if (fRc)
5246 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
5247 pThisCC->CTX_SUFF(pTxSg), pThisCC->CTX_SUFF(pTxSg) ? pThisCC->CTX_SUFF(pTxSg)->cbUsed : 0,
5248 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
5249 e1kXmitFreeBuf(pThis, pThisCC);
5250 E1K_INC_CNT32(TSCTFC);
5251 }
5252 pThis->u16TxPktLen = 0;
5253 }
5254 }
5255 else if (!pDesc->data.cmd.fTSE)
5256 {
5257 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
5258 bool fRc = e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
5259 if (pDesc->data.cmd.fEOP)
5260 {
5261 if (fRc && pThisCC->CTX_SUFF(pTxSg))
5262 {
5263 Assert(pThisCC->CTX_SUFF(pTxSg)->cSegs == 1);
5264 if (pThis->fIPcsum)
5265 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5266 pThis->contextNormal.ip.u8CSO,
5267 pThis->contextNormal.ip.u8CSS,
5268 pThis->contextNormal.ip.u16CSE);
5269 if (pThis->fTCPcsum)
5270 e1kInsertChecksum(pThis, (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
5271 pThis->contextNormal.tu.u8CSO,
5272 pThis->contextNormal.tu.u8CSS,
5273 pThis->contextNormal.tu.u16CSE,
5274 !pThis->contextNormal.dw2.fTCP);
5275 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5276 }
5277 else
5278 e1kXmitFreeBuf(pThis, pThisCC);
5279 pThis->u16TxPktLen = 0;
5280 }
5281 }
5282 else
5283 {
5284 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
5285 rc = e1kFallbackAddToFrame(pDevIns, pThis, pDesc, fOnWorkerThread);
5286 }
5287 }
5288 e1kDescReport(pDevIns, pThis, pDesc, addr);
5289 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5290 break;
5291 }
5292
5293 case E1K_DTYP_LEGACY:
5294 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
5295 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5296 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
5297 {
5298 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
5299 if (pDesc->data.cmd.fEOP)
5300 {
5301 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5302 pThis->u16TxPktLen = 0;
5303 }
5304 }
5305 else
5306 {
5307 /* Add fragment to frame. */
5308 if (e1kAddToFrame(pDevIns, pThis, pThisCC, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
5309 {
5310 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
5311
5312 /* Last fragment: Transmit and reset the packet storage counter. */
5313 if (pDesc->legacy.cmd.fEOP)
5314 {
5315 if (pDesc->legacy.cmd.fIC)
5316 {
5317 e1kInsertChecksum(pThis,
5318 (uint8_t *)pThisCC->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
5319 pThis->u16TxPktLen,
5320 pDesc->legacy.cmd.u8CSO,
5321 pDesc->legacy.dw3.u8CSS,
5322 0);
5323 }
5324 e1kTransmitFrame(pDevIns, pThis, pThisCC, fOnWorkerThread);
5325 pThis->u16TxPktLen = 0;
5326 }
5327 }
5328 /* Last fragment + failure: free the buffer and reset the storage counter. */
5329 else if (pDesc->legacy.cmd.fEOP)
5330 {
5331 e1kXmitFreeBuf(pThis, pThisCC);
5332 pThis->u16TxPktLen = 0;
5333 }
5334 }
5335 e1kDescReport(pDevIns, pThis, pDesc, addr);
5336 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5337 break;
5338
5339 default:
5340 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
5341 pThis->szPrf, e1kGetDescType(pDesc)));
5342 break;
5343 }
5344
5345 return rc;
5346}
5347
5348DECLINLINE(bool) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
5349{
5350 if (pDesc->context.dw2.fTSE)
5351 {
5352 if (!e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context))
5353 {
5354 pThis->contextTSE.dw2.u4DTYP = E1K_DTYP_INVALID;
5355 return false;
5356 }
5357 pThis->contextTSE = pDesc->context;
5358 uint32_t cbMaxSegmentSize = pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + 4; /*VTAG*/
5359 if (RT_UNLIKELY(cbMaxSegmentSize > E1K_MAX_TX_PKT_SIZE))
5360 {
5361 pThis->contextTSE.dw3.u16MSS = E1K_MAX_TX_PKT_SIZE - pThis->contextTSE.dw3.u8HDRLEN - 4; /*VTAG*/
5362 LogRelMax(10, ("%s: Transmit packet is too large: %u > %u(max). Adjusted MSS to %u.\n",
5363 pThis->szPrf, cbMaxSegmentSize, E1K_MAX_TX_PKT_SIZE, pThis->contextTSE.dw3.u16MSS));
5364 }
5365 pThis->u32PayRemain = pThis->contextTSE.dw2.u20PAYLEN;
5366 pThis->u16HdrRemain = pThis->contextTSE.dw3.u8HDRLEN;
5367 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
5368 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
5369 }
5370 else
5371 {
5372 pThis->contextNormal = pDesc->context;
5373 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
5374 }
5375 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
5376 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
5377 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
5378 pDesc->context.ip.u8CSS,
5379 pDesc->context.ip.u8CSO,
5380 pDesc->context.ip.u16CSE,
5381 pDesc->context.tu.u8CSS,
5382 pDesc->context.tu.u8CSO,
5383 pDesc->context.tu.u16CSE));
5384 return true; /* Consider returning false for invalid descriptors */
5385}
5386
5387enum E1kPacketType
5388{
5389 E1K_PACKET_NONE = 0,
5390 E1K_PACKET_LEGACY,
5391 E1K_PACKET_NORMAL,
5392 E1K_PACKET_TSE
5393};
5394
5395static int e1kLocateTxPacket(PE1KSTATE pThis, PE1KTXDC pTxdc)
5396{
5397 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
5398 pThis->szPrf, pThis->cbTxAlloc));
5399 /* Check if we have located the packet already. */
5400 if (pThis->cbTxAlloc)
5401 {
5402 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
5403 pThis->szPrf, pThis->cbTxAlloc));
5404 return true;
5405 }
5406
5407 pThis->fGSO = false;
5408 pThis->fVTag = false;
5409 pThis->fIPcsum = false;
5410 pThis->fTCPcsum = false;
5411 pThis->u16TxPktLen = 0;
5412
5413 enum E1kPacketType packetType = E1K_PACKET_NONE;
5414 enum E1kPacketType expectedPacketType = E1K_PACKET_NONE;
5415 /*
5416 * Valid packets start with 1 or 0 context descriptors, followed by 1 or
5417 * more data descriptors of the same type: legacy, normal or TSE. Note
5418 * that legacy descriptors do not belong to neither normal nor segmentation
5419 * contexts rendering the sequence (context_descriptor, legacy_descriptor)
5420 * invalid, but the context descriptor will still be applied and the legacy
5421 * descriptor will be treated as the beginning of next packet.
5422 */
5423 bool fInvalidPacket = false;
5424 bool fTSE = false;
5425 uint32_t cbPacket = 0;
5426
5427 /* Since we process one packet at a time we will only mark current packet's descriptors as valid */
5428 memset(pThis->afTxDValid, 0, sizeof(pThis->afTxDValid));
5429 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
5430 {
5431 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
5432
5433 switch (e1kGetDescType(pDesc))
5434 {
5435 case E1K_DTYP_CONTEXT:
5436 /* There can be only one context per packet. Each context descriptor starts a new packet. */
5437 if (packetType != E1K_PACKET_NONE)
5438 {
5439 fInvalidPacket = true;
5440 break;
5441 }
5442 packetType = (pDesc->context.dw2.fTSE) ? E1K_PACKET_TSE : E1K_PACKET_NORMAL;
5443 if (cbPacket == 0)
5444 pThis->afTxDValid[i] = e1kUpdateTxContext(pThis, pDesc);
5445 else
5446 E1kLog(("%s e1kLocateTxPacket: ignoring a context descriptor in the middle of a packet, cbPacket=%d\n",
5447 pThis->szPrf, cbPacket));
5448 continue;
5449 case E1K_DTYP_LEGACY:
5450 if (packetType != E1K_PACKET_NONE && packetType != E1K_PACKET_LEGACY)
5451 {
5452 fInvalidPacket = true;
5453 break;
5454 }
5455 packetType = E1K_PACKET_LEGACY;
5456 /* Skip invalid descriptors. */
5457 if (cbPacket > 0 && (pThis->fGSO || fTSE))
5458 {
5459 E1kLog(("%s e1kLocateTxPacket: ignoring a legacy descriptor in the segmentation context, cbPacket=%d\n",
5460 pThis->szPrf, cbPacket));
5461 continue;
5462 }
5463 pThis->afTxDValid[i] = true; /* Passed all checks, process it */
5464
5465 /* Skip empty descriptors. */
5466 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
5467 break;
5468 cbPacket += pDesc->legacy.cmd.u16Length;
5469 pThis->fGSO = false;
5470 break;
5471 case E1K_DTYP_DATA:
5472 expectedPacketType = pDesc->data.cmd.fTSE ? E1K_PACKET_TSE : E1K_PACKET_NORMAL;
5473 if (packetType != E1K_PACKET_NONE && packetType != expectedPacketType)
5474 {
5475 fInvalidPacket = true;
5476 break;
5477 }
5478 /* Skip invalid descriptors. */
5479 if (pDesc->data.cmd.fTSE)
5480 {
5481 if (pThis->contextTSE.dw2.u4DTYP == E1K_DTYP_INVALID)
5482 {
5483 E1kLog(("%s e1kLocateTxPacket: ignoring TSE descriptor in invalid segmentation context, cbPacket=%d\n",
5484 pThis->szPrf, cbPacket));
5485 continue;
5486 }
5487 }
5488 else /* !TSE */
5489 {
5490 if (pThis->contextNormal.dw2.u4DTYP == E1K_DTYP_INVALID)
5491 {
5492 E1kLog(("%s e1kLocateTxPacket: ignoring non-TSE descriptor in invalid normal context, cbPacket=%d\n",
5493 pThis->szPrf, cbPacket));
5494 continue;
5495 }
5496 }
5497 if (cbPacket > 0 && (bool)pDesc->data.cmd.fTSE != fTSE)
5498 {
5499 E1kLog(("%s e1kLocateTxPacket: ignoring %sTSE descriptor in the %ssegmentation context, cbPacket=%d\n",
5500 pThis->szPrf, pDesc->data.cmd.fTSE ? "" : "non-", fTSE ? "" : "non-", cbPacket));
5501 continue;
5502 }
5503 pThis->afTxDValid[i] = true; /* Passed all checks, process it */
5504
5505 /* Skip empty descriptors. */
5506 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
5507 break;
5508 if (cbPacket == 0)
5509 {
5510 /*
5511 * The first fragment: save IXSM and TXSM options
5512 * as these are only valid in the first fragment.
5513 */
5514 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
5515 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
5516 fTSE = pDesc->data.cmd.fTSE;
5517 /*
5518 * TSE descriptors have VLE bit properly set in
5519 * the first fragment.
5520 */
5521 if (fTSE)
5522 {
5523 pThis->fVTag = pDesc->data.cmd.fVLE;
5524 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5525 }
5526 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
5527 }
5528 cbPacket += pDesc->data.cmd.u20DTALEN;
5529 break;
5530 default:
5531 AssertMsgFailed(("Impossible descriptor type!"));
5532 continue;
5533 }
5534 if (fInvalidPacket)
5535 {
5536 for (int index = pThis->iTxDCurrent; index < i; ++index)
5537 pThis->afTxDValid[index] = false; /* Make sure all descriptors for this packet are skipped by processing */
5538 LogFlow(("%s e1kLocateTxPacket: marked %d descriptors as invalid\n", pThis->szPrf, i - pThis->iTxDCurrent));
5539 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5540 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5541 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5542 pTxdc->nextPacket = i;
5543 return true;
5544 }
5545 if (pDesc->legacy.cmd.fEOP)
5546 {
5547 /*
5548 * Non-TSE descriptors have VLE bit properly set in
5549 * the last fragment.
5550 */
5551 if (!fTSE)
5552 {
5553 pThis->fVTag = pDesc->data.cmd.fVLE;
5554 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
5555 }
5556 /*
5557 * Compute the required buffer size. If we cannot do GSO but still
5558 * have to do segmentation we allocate the first segment only.
5559 */
5560 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
5561 cbPacket :
5562 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
5563 /* Do not add VLAN tags to empty packets. */
5564 if (pThis->fVTag && pThis->cbTxAlloc > 0)
5565 pThis->cbTxAlloc += 4;
5566 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d cbPacket=%d%s%s\n",
5567 pThis->szPrf, pThis->cbTxAlloc, cbPacket,
5568 pThis->fGSO ? " GSO" : "", fTSE ? " TSE" : ""));
5569 pTxdc->nextPacket = i + 1;
5570 return true;
5571 }
5572 }
5573
5574 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5575 {
5576 /* All descriptors were empty, we need to process them as a dummy packet */
5577 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5578 pThis->szPrf, pThis->cbTxAlloc));
5579 pTxdc->nextPacket = pThis->nTxDFetched;
5580 return true;
5581 }
5582 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d cbPacket=%d\n",
5583 pThis->szPrf, pThis->cbTxAlloc, cbPacket));
5584 return false;
5585}
5586
5587static int e1kXmitPacket(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread, PE1KTXDC pTxdc)
5588{
5589 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5590 int rc = VINF_SUCCESS;
5591
5592 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5593 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5594
5595 while (pThis->iTxDCurrent < pTxdc->nextPacket && pThis->iTxDCurrent < pThis->nTxDFetched)
5596 {
5597 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5598 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5599 pThis->szPrf, TDBAH, TDBAL + pTxdc->tdh * sizeof(E1KTXDESC), pTxdc->tdlen, pTxdc->tdh, pTxdc->tdt));
5600 if (!pThis->afTxDValid[pThis->iTxDCurrent])
5601 {
5602 e1kPrintTDesc(pThis, pDesc, "vvv");
5603 E1kLog(("%s e1kXmitDesc: skipping bad descriptor ^^^\n", pThis->szPrf));
5604 e1kDescReport(pDevIns, pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh));
5605 rc = VINF_SUCCESS;
5606 }
5607 else
5608 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, pDesc, e1kDescAddr(TDBAH, TDBAL, pTxdc->tdh), fOnWorkerThread);
5609 if (RT_FAILURE(rc))
5610 break;
5611 if (++pTxdc->tdh * sizeof(E1KTXDESC) >= pTxdc->tdlen)
5612 pTxdc->tdh = 0;
5613 TDH = pTxdc->tdh; /* Sync the actual register and TXDC */
5614 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5615 if (uLowThreshold != 0 && e1kGetTxLen(pTxdc) <= uLowThreshold)
5616 {
5617 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5618 pThis->szPrf, e1kGetTxLen(pTxdc), GET_BITS(TXDCTL, LWTHRESH)*8));
5619 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5620 }
5621 ++pThis->iTxDCurrent;
5622 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5623 break;
5624 }
5625
5626 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5627 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5628 return rc;
5629}
5630
5631#endif /* E1K_WITH_TXD_CACHE */
5632#ifndef E1K_WITH_TXD_CACHE
5633
5634/**
5635 * Transmit pending descriptors.
5636 *
5637 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5638 *
5639 * @param pDevIns The device instance.
5640 * @param pThis The E1000 state.
5641 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5642 */
5643static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5644{
5645 int rc = VINF_SUCCESS;
5646 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5647
5648 /* Check if transmitter is enabled. */
5649 if (!(TCTL & TCTL_EN))
5650 return VINF_SUCCESS;
5651 /*
5652 * Grab the xmit lock of the driver as well as the E1K device state.
5653 */
5654 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5655 if (RT_LIKELY(rc == VINF_SUCCESS))
5656 {
5657 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5658 if (pDrv)
5659 {
5660 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5661 if (RT_FAILURE(rc))
5662 {
5663 e1kCsTxLeave(pThis);
5664 return rc;
5665 }
5666 }
5667 /*
5668 * Process all pending descriptors.
5669 * Note! Do not process descriptors in locked state
5670 */
5671 while (TDH != TDT && !pThis->fLocked)
5672 {
5673 E1KTXDESC desc;
5674 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5675 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5676
5677 e1kLoadDesc(pDevIns, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5678 rc = e1kXmitDesc(pDevIns, pThis, pThisCC, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5679 /* If we failed to transmit descriptor we will try it again later */
5680 if (RT_FAILURE(rc))
5681 break;
5682 if (++TDH * sizeof(desc) >= TDLEN)
5683 TDH = 0;
5684
5685 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5686 {
5687 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5688 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5689 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5690 }
5691
5692 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5693 }
5694
5695 /// @todo uncomment: pThis->uStatIntTXQE++;
5696 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5697 /*
5698 * Release the lock.
5699 */
5700 if (pDrv)
5701 pDrv->pfnEndXmit(pDrv);
5702 e1kCsTxLeave(pThis);
5703 }
5704
5705 return rc;
5706}
5707
5708#else /* E1K_WITH_TXD_CACHE */
5709
5710static void e1kDumpTxDCache(PPDMDEVINS pDevIns, PE1KSTATE pThis, PE1KTXDC pTxdc)
5711{
5712 unsigned i, cDescs = pTxdc->tdlen / sizeof(E1KTXDESC);
5713 uint32_t tdh = pTxdc->tdh;
5714 LogRel(("E1000: -- Transmit Descriptors (%d total) --\n", cDescs));
5715 for (i = 0; i < cDescs; ++i)
5716 {
5717 E1KTXDESC desc;
5718 PDMDevHlpPCIPhysRead(pDevIns , e1kDescAddr(TDBAH, TDBAL, i), &desc, sizeof(desc));
5719 if (i == tdh)
5720 LogRel(("E1000: >>> "));
5721 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5722 }
5723 LogRel(("E1000: -- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5724 pThis->iTxDCurrent, pTxdc->tdh, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5725 if (tdh > pThis->iTxDCurrent)
5726 tdh -= pThis->iTxDCurrent;
5727 else
5728 tdh = cDescs + tdh - pThis->iTxDCurrent;
5729 for (i = 0; i < pThis->nTxDFetched; ++i)
5730 {
5731 if (i == pThis->iTxDCurrent)
5732 LogRel(("E1000: >>> "));
5733 if (cDescs)
5734 LogRel(("E1000: %RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5735 else
5736 LogRel(("E1000: <lost>: %R[e1ktxd]\n", &pThis->aTxDescriptors[i]));
5737 }
5738}
5739
5740/**
5741 * Transmit pending descriptors.
5742 *
5743 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5744 *
5745 * @param pDevIns The device instance.
5746 * @param pThis The E1000 state.
5747 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5748 */
5749static int e1kXmitPending(PPDMDEVINS pDevIns, PE1KSTATE pThis, bool fOnWorkerThread)
5750{
5751 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5752 int rc = VINF_SUCCESS;
5753
5754 /* Check if transmitter is enabled. */
5755 if (!(TCTL & TCTL_EN))
5756 return VINF_SUCCESS;
5757 /*
5758 * Grab the xmit lock of the driver as well as the E1K device state.
5759 */
5760 PPDMINETWORKUP pDrv = pThisCC->CTX_SUFF(pDrv);
5761 if (pDrv)
5762 {
5763 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5764 if (RT_FAILURE(rc))
5765 return rc;
5766 }
5767
5768 /*
5769 * Process all pending descriptors.
5770 * Note! Do not process descriptors in locked state
5771 */
5772 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5773 if (RT_LIKELY(rc == VINF_SUCCESS && (TCTL & TCTL_EN)))
5774 {
5775 E1KTXDC txdc;
5776 bool fTxContextValid = e1kUpdateTxDContext(pDevIns, pThis, &txdc);
5777 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5778 /*
5779 * fIncomplete is set whenever we try to fetch additional descriptors
5780 * for an incomplete packet. If fail to locate a complete packet on
5781 * the next iteration we need to reset the cache or we risk to get
5782 * stuck in this loop forever.
5783 */
5784 bool fIncomplete = false;
5785 while (fTxContextValid && !pThis->fLocked && e1kTxDLazyLoad(pDevIns, pThis, &txdc))
5786 {
5787 while (e1kLocateTxPacket(pThis, &txdc))
5788 {
5789 Log4(("%s e1kXmitPending: Located packet at %d. Next packet at %d\n",
5790 pThis->szPrf, pThis->iTxDCurrent, txdc.nextPacket));
5791 fIncomplete = false;
5792 /* Found a complete packet, allocate it. */
5793 rc = e1kXmitAllocBuf(pThis, pThisCC, pThis->fGSO);
5794 /* If we're out of bandwidth we'll come back later. */
5795 if (RT_FAILURE(rc))
5796 goto out;
5797 /* Copy the packet to allocated buffer and send it. */
5798 rc = e1kXmitPacket(pDevIns, pThis, fOnWorkerThread, &txdc);
5799 /* If we're out of bandwidth we'll come back later. */
5800 if (RT_FAILURE(rc))
5801 goto out;
5802 }
5803 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5804 if (RT_UNLIKELY(fIncomplete))
5805 {
5806 static bool fTxDCacheDumped = false;
5807 /*
5808 * The descriptor cache is full, but we were unable to find
5809 * a complete packet in it. Drop the cache and hope that
5810 * the guest driver can recover from network card error.
5811 */
5812 LogRel(("%s: No complete packets in%s TxD cache! "
5813 "Fetched=%d, current=%d, TX len=%d.\n",
5814 pThis->szPrf,
5815 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5816 pThis->nTxDFetched, pThis->iTxDCurrent,
5817 e1kGetTxLen(&txdc)));
5818 if (!fTxDCacheDumped)
5819 {
5820 fTxDCacheDumped = true;
5821 e1kDumpTxDCache(pDevIns, pThis, &txdc);
5822 }
5823 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5824 /*
5825 * Returning an error at this point means Guru in R0
5826 * (see @bugref{6428}).
5827 */
5828# ifdef IN_RING3
5829 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5830# else /* !IN_RING3 */
5831 rc = VINF_IOM_R3_MMIO_WRITE;
5832# endif /* !IN_RING3 */
5833 goto out;
5834 }
5835 if (u8Remain > 0)
5836 {
5837 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5838 "%d more are available\n",
5839 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5840 e1kGetTxLen(&txdc) - u8Remain));
5841
5842 /*
5843 * A packet was partially fetched. Move incomplete packet to
5844 * the beginning of cache buffer, then load more descriptors.
5845 */
5846 memmove(pThis->aTxDescriptors,
5847 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5848 u8Remain * sizeof(E1KTXDESC));
5849 pThis->iTxDCurrent = 0;
5850 pThis->nTxDFetched = u8Remain;
5851 e1kTxDLoadMore(pDevIns, pThis, &txdc);
5852 fIncomplete = true;
5853 }
5854 else
5855 pThis->nTxDFetched = 0;
5856 pThis->iTxDCurrent = 0;
5857 }
5858 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5859 {
5860 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5861 pThis->szPrf));
5862 e1kRaiseInterrupt(pDevIns, pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5863 }
5864out:
5865 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5866
5867 /// @todo uncomment: pThis->uStatIntTXQE++;
5868 /// @todo uncomment: e1kRaiseInterrupt(pDevIns, pThis, ICR_TXQE);
5869
5870 e1kCsTxLeave(pThis);
5871 }
5872
5873
5874 /*
5875 * Release the lock.
5876 */
5877 if (pDrv)
5878 pDrv->pfnEndXmit(pDrv);
5879 return rc;
5880}
5881
5882#endif /* E1K_WITH_TXD_CACHE */
5883#ifdef IN_RING3
5884
5885/**
5886 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5887 */
5888static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5889{
5890 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
5891 PE1KSTATE pThis = pThisCC->pShared;
5892 /* Resume suspended transmission */
5893 STATUS &= ~STATUS_TXOFF;
5894 e1kXmitPending(pThisCC->pDevInsR3, pThis, true /*fOnWorkerThread*/);
5895}
5896
5897/**
5898 * @callback_method_impl{FNPDMTASKDEV,
5899 * Executes e1kXmitPending at the behest of ring-0/raw-mode.}
5900 * @note Not executed on EMT.
5901 */
5902static DECLCALLBACK(void) e1kR3TxTaskCallback(PPDMDEVINS pDevIns, void *pvUser)
5903{
5904 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
5905 E1kLog2(("%s e1kR3TxTaskCallback:\n", pThis->szPrf));
5906
5907 int rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5908 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN || rc == VERR_NET_DOWN, ("%Rrc\n", rc));
5909
5910 RT_NOREF(rc, pvUser);
5911}
5912
5913#endif /* IN_RING3 */
5914
5915/**
5916 * Write handler for Transmit Descriptor Tail register.
5917 *
5918 * @param pThis The device state structure.
5919 * @param offset Register offset in memory-mapped frame.
5920 * @param index Register index in register array.
5921 * @param value The value to store.
5922 * @param mask Used to implement partial writes (8 and 16-bit).
5923 * @thread EMT
5924 */
5925static int e1kRegWriteTDT(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5926{
5927 int rc = e1kRegWriteDefault(pDevIns, pThis, offset, index, value);
5928
5929 /* All descriptors starting with head and not including tail belong to us. */
5930 /* Process them. */
5931 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5932 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5933
5934 /* Compose a temporary TX context, breaking TX CS rule, for debugging purposes. */
5935 /* If we decide to transmit, the TX critical section will be entered later in e1kXmitPending(). */
5936 E1KTXDC txdc;
5937 txdc.tdlen = TDLEN;
5938 txdc.tdh = TDH;
5939 txdc.tdt = TDT;
5940 /* Ignore TDT writes when the link is down. */
5941 if (txdc.tdh != txdc.tdt && (STATUS & STATUS_LU))
5942 {
5943 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", txdc.tdh, txdc.tdt, e1kGetTxLen(&txdc)));
5944 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5945 pThis->szPrf, e1kGetTxLen(&txdc)));
5946
5947 /* Transmit pending packets if possible, defer it if we cannot do it
5948 in the current context. */
5949#ifdef E1K_TX_DELAY
5950 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5951 if (RT_LIKELY(rc == VINF_SUCCESS))
5952 {
5953 if (!PDMDevInsTimerIsActive(pDevIns, pThis->hTXDTimer))
5954 {
5955# ifdef E1K_INT_STATS
5956 pThis->u64ArmedAt = RTTimeNanoTS();
5957# endif
5958 e1kArmTimer(pDevIns, pThis, pThis->hTXDTimer, E1K_TX_DELAY);
5959 }
5960 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5961 e1kCsTxLeave(pThis);
5962 return rc;
5963 }
5964 /* We failed to enter the TX critical section -- transmit as usual. */
5965#endif /* E1K_TX_DELAY */
5966#ifndef IN_RING3
5967 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
5968 if (!pThisCC->CTX_SUFF(pDrv))
5969 {
5970 PDMDevHlpTaskTrigger(pDevIns, pThis->hTxTask);
5971 rc = VINF_SUCCESS;
5972 }
5973 else
5974#endif
5975 {
5976 rc = e1kXmitPending(pDevIns, pThis, false /*fOnWorkerThread*/);
5977 if ( rc == VERR_TRY_AGAIN
5978 || rc == VERR_NET_DOWN)
5979 rc = VINF_SUCCESS;
5980#ifndef IN_RING3
5981 else if (rc == VERR_SEM_BUSY)
5982 rc = VINF_IOM_R3_MMIO_WRITE;
5983#endif
5984 AssertRC(rc);
5985 }
5986 }
5987
5988 return rc;
5989}
5990
5991/**
5992 * Write handler for Multicast Table Array registers.
5993 *
5994 * @param pThis The device state structure.
5995 * @param offset Register offset in memory-mapped frame.
5996 * @param index Register index in register array.
5997 * @param value The value to store.
5998 * @thread EMT
5999 */
6000static int e1kRegWriteMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6001{
6002 RT_NOREF_PV(pDevIns);
6003 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
6004 pThis->auMTA[(offset - g_aE1kRegMap[index].offset) / sizeof(pThis->auMTA[0])] = value;
6005
6006 return VINF_SUCCESS;
6007}
6008
6009/**
6010 * Read handler for Multicast Table Array registers.
6011 *
6012 * @returns VBox status code.
6013 *
6014 * @param pThis The device state structure.
6015 * @param offset Register offset in memory-mapped frame.
6016 * @param index Register index in register array.
6017 * @thread EMT
6018 */
6019static int e1kRegReadMTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6020{
6021 RT_NOREF_PV(pDevIns);
6022 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
6023 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
6024
6025 return VINF_SUCCESS;
6026}
6027
6028/**
6029 * Write handler for Receive Address registers.
6030 *
6031 * @param pThis The device state structure.
6032 * @param offset Register offset in memory-mapped frame.
6033 * @param index Register index in register array.
6034 * @param value The value to store.
6035 * @thread EMT
6036 */
6037static int e1kRegWriteRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6038{
6039 RT_NOREF_PV(pDevIns);
6040 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
6041 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
6042
6043 return VINF_SUCCESS;
6044}
6045
6046/**
6047 * Read handler for Receive Address registers.
6048 *
6049 * @returns VBox status code.
6050 *
6051 * @param pThis The device state structure.
6052 * @param offset Register offset in memory-mapped frame.
6053 * @param index Register index in register array.
6054 * @thread EMT
6055 */
6056static int e1kRegReadRA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6057{
6058 RT_NOREF_PV(pDevIns);
6059 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
6060 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
6061
6062 return VINF_SUCCESS;
6063}
6064
6065/**
6066 * Write handler for VLAN Filter Table Array registers.
6067 *
6068 * @param pThis The device state structure.
6069 * @param offset Register offset in memory-mapped frame.
6070 * @param index Register index in register array.
6071 * @param value The value to store.
6072 * @thread EMT
6073 */
6074static int e1kRegWriteVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6075{
6076 RT_NOREF_PV(pDevIns);
6077 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
6078 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
6079
6080 return VINF_SUCCESS;
6081}
6082
6083/**
6084 * Read handler for VLAN Filter Table Array registers.
6085 *
6086 * @returns VBox status code.
6087 *
6088 * @param pThis The device state structure.
6089 * @param offset Register offset in memory-mapped frame.
6090 * @param index Register index in register array.
6091 * @thread EMT
6092 */
6093static int e1kRegReadVFTA(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6094{
6095 RT_NOREF_PV(pDevIns);
6096 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
6097 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
6098
6099 return VINF_SUCCESS;
6100}
6101
6102/**
6103 * Read handler for unimplemented registers.
6104 *
6105 * Merely reports reads from unimplemented registers.
6106 *
6107 * @returns VBox status code.
6108 *
6109 * @param pThis The device state structure.
6110 * @param offset Register offset in memory-mapped frame.
6111 * @param index Register index in register array.
6112 * @thread EMT
6113 */
6114static int e1kRegReadUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6115{
6116 RT_NOREF(pDevIns, pThis, offset, index);
6117 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
6118 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6119 *pu32Value = 0;
6120
6121 return VINF_SUCCESS;
6122}
6123
6124/**
6125 * Default register read handler with automatic clear operation.
6126 *
6127 * Retrieves the value of register from register array in device state structure.
6128 * Then resets all bits.
6129 *
6130 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
6131 * done in the caller.
6132 *
6133 * @returns VBox status code.
6134 *
6135 * @param pThis The device state structure.
6136 * @param offset Register offset in memory-mapped frame.
6137 * @param index Register index in register array.
6138 * @thread EMT
6139 */
6140static int e1kRegReadAutoClear(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6141{
6142 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6143 int rc = e1kRegReadDefault(pDevIns, pThis, offset, index, pu32Value);
6144 pThis->auRegs[index] = 0;
6145
6146 return rc;
6147}
6148
6149/**
6150 * Default register read handler.
6151 *
6152 * Retrieves the value of register from register array in device state structure.
6153 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
6154 *
6155 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
6156 * done in the caller.
6157 *
6158 * @returns VBox status code.
6159 *
6160 * @param pThis The device state structure.
6161 * @param offset Register offset in memory-mapped frame.
6162 * @param index Register index in register array.
6163 * @thread EMT
6164 */
6165static int e1kRegReadDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
6166{
6167 RT_NOREF_PV(pDevIns); RT_NOREF_PV(offset);
6168
6169 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6170 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
6171
6172 return VINF_SUCCESS;
6173}
6174
6175/**
6176 * Write handler for unimplemented registers.
6177 *
6178 * Merely reports writes to unimplemented registers.
6179 *
6180 * @param pThis The device state structure.
6181 * @param offset Register offset in memory-mapped frame.
6182 * @param index Register index in register array.
6183 * @param value The value to store.
6184 * @thread EMT
6185 */
6186
6187 static int e1kRegWriteUnimplemented(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6188{
6189 RT_NOREF_PV(pDevIns); RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
6190
6191 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
6192 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6193
6194 return VINF_SUCCESS;
6195}
6196
6197/**
6198 * Default register write handler.
6199 *
6200 * Stores the value to the register array in device state structure. Only bits
6201 * corresponding to 1s both in 'writable' and 'mask' will be stored.
6202 *
6203 * @returns VBox status code.
6204 *
6205 * @param pThis The device state structure.
6206 * @param offset Register offset in memory-mapped frame.
6207 * @param index Register index in register array.
6208 * @param value The value to store.
6209 * @param mask Used to implement partial writes (8 and 16-bit).
6210 * @thread EMT
6211 */
6212
6213static int e1kRegWriteDefault(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
6214{
6215 RT_NOREF(pDevIns, offset);
6216
6217 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
6218 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
6219 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
6220
6221 return VINF_SUCCESS;
6222}
6223
6224/**
6225 * Search register table for matching register.
6226 *
6227 * @returns Index in the register table or -1 if not found.
6228 *
6229 * @param offReg Register offset in memory-mapped region.
6230 * @thread EMT
6231 */
6232static int e1kRegLookup(uint32_t offReg)
6233{
6234
6235#if 0
6236 int index;
6237
6238 for (index = 0; index < E1K_NUM_OF_REGS; index++)
6239 {
6240 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
6241 {
6242 return index;
6243 }
6244 }
6245#else
6246 int iStart = 0;
6247 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
6248 for (;;)
6249 {
6250 int i = (iEnd - iStart) / 2 + iStart;
6251 uint32_t offCur = g_aE1kRegMap[i].offset;
6252 if (offReg < offCur)
6253 {
6254 if (i == iStart)
6255 break;
6256 iEnd = i;
6257 }
6258 else if (offReg >= offCur + g_aE1kRegMap[i].size)
6259 {
6260 i++;
6261 if (i == iEnd)
6262 break;
6263 iStart = i;
6264 }
6265 else
6266 return i;
6267 Assert(iEnd > iStart);
6268 }
6269
6270 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6271 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
6272 return (int)i;
6273
6274# ifdef VBOX_STRICT
6275 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
6276 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
6277# endif
6278
6279#endif
6280
6281 return -1;
6282}
6283
6284/**
6285 * Handle unaligned register read operation.
6286 *
6287 * Looks up and calls appropriate handler.
6288 *
6289 * @returns VBox status code.
6290 *
6291 * @param pDevIns The device instance.
6292 * @param pThis The device state structure.
6293 * @param offReg Register offset in memory-mapped frame.
6294 * @param pv Where to store the result.
6295 * @param cb Number of bytes to read.
6296 * @thread EMT
6297 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
6298 * accesses we have to take care of that ourselves.
6299 */
6300static int e1kRegReadUnaligned(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
6301{
6302 uint32_t u32 = 0;
6303 uint32_t shift;
6304 int rc = VINF_SUCCESS;
6305 int index = e1kRegLookup(offReg);
6306#ifdef LOG_ENABLED
6307 char buf[9];
6308#endif
6309
6310 /*
6311 * From the spec:
6312 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
6313 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
6314 */
6315
6316 /*
6317 * To be able to read bytes and short word we convert them to properly
6318 * shifted 32-bit words and masks. The idea is to keep register-specific
6319 * handlers simple. Most accesses will be 32-bit anyway.
6320 */
6321 uint32_t mask;
6322 switch (cb)
6323 {
6324 case 4: mask = 0xFFFFFFFF; break;
6325 case 2: mask = 0x0000FFFF; break;
6326 case 1: mask = 0x000000FF; break;
6327 default:
6328 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
6329 }
6330 if (index >= 0)
6331 {
6332 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6333 if (g_aE1kRegMap[index].readable)
6334 {
6335 /* Make the mask correspond to the bits we are about to read. */
6336 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
6337 mask <<= shift;
6338 if (!mask)
6339 return PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
6340 /*
6341 * Read it. Pass the mask so the handler knows what has to be read.
6342 * Mask out irrelevant bits.
6343 */
6344 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6345 //pThis->fDelayInts = false;
6346 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6347 //pThis->iStatIntLostOne = 0;
6348 rc = g_aE1kRegMap[index].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)index, &u32);
6349 u32 &= mask;
6350 //e1kCsLeave(pThis);
6351 E1kLog2(("%s At %08X read %s from %s (%s)\n",
6352 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6353 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
6354 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6355 /* Shift back the result. */
6356 u32 >>= shift;
6357 }
6358 else
6359 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
6360 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6361 if (IOM_SUCCESS(rc))
6362 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
6363 }
6364 else
6365 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
6366 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
6367
6368 memcpy(pv, &u32, cb);
6369 return rc;
6370}
6371
6372/**
6373 * Handle 4 byte aligned and sized read operation.
6374 *
6375 * Looks up and calls appropriate handler.
6376 *
6377 * @returns VBox status code.
6378 *
6379 * @param pDevIns The device instance.
6380 * @param pThis The device state structure.
6381 * @param offReg Register offset in memory-mapped frame.
6382 * @param pu32 Where to store the result.
6383 * @thread EMT
6384 */
6385static VBOXSTRICTRC e1kRegReadAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
6386{
6387 Assert(!(offReg & 3));
6388
6389 /*
6390 * Lookup the register and check that it's readable.
6391 */
6392 VBOXSTRICTRC rc = VINF_SUCCESS;
6393 int idxReg = e1kRegLookup(offReg);
6394 if (RT_LIKELY(idxReg >= 0))
6395 {
6396 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6397 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
6398 {
6399 /*
6400 * Read it. Pass the mask so the handler knows what has to be read.
6401 * Mask out irrelevant bits.
6402 */
6403 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6404 //pThis->fDelayInts = false;
6405 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6406 //pThis->iStatIntLostOne = 0;
6407 rc = g_aE1kRegMap[idxReg].pfnRead(pDevIns, pThis, offReg & 0xFFFFFFFC, (uint32_t)idxReg, pu32);
6408 //e1kCsLeave(pThis);
6409 Log6(("%s At %08X read %08X from %s (%s)\n",
6410 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6411 if (IOM_SUCCESS(rc))
6412 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
6413 }
6414 else
6415 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
6416 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
6417 }
6418 else
6419 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
6420 return rc;
6421}
6422
6423/**
6424 * Handle 4 byte sized and aligned register write operation.
6425 *
6426 * Looks up and calls appropriate handler.
6427 *
6428 * @returns VBox status code.
6429 *
6430 * @param pDevIns The device instance.
6431 * @param pThis The device state structure.
6432 * @param offReg Register offset in memory-mapped frame.
6433 * @param u32Value The value to write.
6434 * @thread EMT
6435 */
6436static VBOXSTRICTRC e1kRegWriteAlignedU32(PPDMDEVINS pDevIns, PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
6437{
6438 VBOXSTRICTRC rc = VINF_SUCCESS;
6439 int index = e1kRegLookup(offReg);
6440 if (RT_LIKELY(index >= 0))
6441 {
6442 RT_UNTRUSTED_VALIDATED_FENCE(); /* paranoia because of port I/O. */
6443 if (RT_LIKELY(g_aE1kRegMap[index].writable))
6444 {
6445 /*
6446 * Write it. Pass the mask so the handler knows what has to be written.
6447 * Mask out irrelevant bits.
6448 */
6449 Log6(("%s At %08X write %08X to %s (%s)\n",
6450 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6451 //e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
6452 //pThis->fDelayInts = false;
6453 //pThis->iStatIntLost += pThis->iStatIntLostOne;
6454 //pThis->iStatIntLostOne = 0;
6455 rc = g_aE1kRegMap[index].pfnWrite(pDevIns, pThis, offReg, (uint32_t)index, u32Value);
6456 //e1kCsLeave(pThis);
6457 }
6458 else
6459 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
6460 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
6461 if (IOM_SUCCESS(rc))
6462 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
6463 }
6464 else
6465 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
6466 pThis->szPrf, offReg, u32Value));
6467 return rc;
6468}
6469
6470
6471/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
6472
6473/**
6474 * @callback_method_impl{FNIOMMMIONEWREAD}
6475 */
6476static DECLCALLBACK(VBOXSTRICTRC) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, uint32_t cb)
6477{
6478 RT_NOREF2(pvUser, cb);
6479 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6480 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6481
6482 Assert(off < E1K_MM_SIZE);
6483 Assert(cb == 4);
6484 Assert(!(off & 3));
6485
6486 VBOXSTRICTRC rcStrict = e1kRegReadAlignedU32(pDevIns, pThis, (uint32_t)off, (uint32_t *)pv);
6487
6488 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
6489 return rcStrict;
6490}
6491
6492/**
6493 * @callback_method_impl{FNIOMMMIONEWWRITE}
6494 */
6495static DECLCALLBACK(VBOXSTRICTRC) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, uint32_t cb)
6496{
6497 RT_NOREF2(pvUser, cb);
6498 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6499 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6500
6501 Assert(off < E1K_MM_SIZE);
6502 Assert(cb == 4);
6503 Assert(!(off & 3));
6504
6505 VBOXSTRICTRC rcStrict = e1kRegWriteAlignedU32(pDevIns, pThis, (uint32_t)off, *(uint32_t const *)pv);
6506
6507 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
6508 return rcStrict;
6509}
6510
6511/**
6512 * @callback_method_impl{FNIOMIOPORTNEWIN}
6513 */
6514static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
6515{
6516 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6517 VBOXSTRICTRC rc;
6518 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
6519 RT_NOREF_PV(pvUser);
6520
6521 if (RT_LIKELY(cb == 4))
6522 switch (offPort)
6523 {
6524 case 0x00: /* IOADDR */
6525 *pu32 = pThis->uSelectedReg;
6526 Log9(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6527 rc = VINF_SUCCESS;
6528 break;
6529
6530 case 0x04: /* IODATA */
6531 if (!(pThis->uSelectedReg & 3))
6532 rc = e1kRegReadAlignedU32(pDevIns, pThis, pThis->uSelectedReg, pu32);
6533 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
6534 rc = e1kRegReadUnaligned(pDevIns, pThis, pThis->uSelectedReg, pu32, cb);
6535 if (rc == VINF_IOM_R3_MMIO_READ)
6536 rc = VINF_IOM_R3_IOPORT_READ;
6537 Log9(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
6538 break;
6539
6540 default:
6541 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, offPort));
6542 /** @todo r=bird: Check what real hardware returns here. */
6543 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
6544 rc = VINF_IOM_MMIO_UNUSED_00; /* used to return VINF_SUCCESS and not touch *pu32, which amounted to this. */
6545 break;
6546 }
6547 else
6548 {
6549 E1kLog(("%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x", pThis->szPrf, offPort, cb));
6550 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb);
6551 *pu32 = 0; /** @todo r=bird: Check what real hardware returns here. (Didn't used to set a value here, picked zero as that's what we'd end up in most cases.) */
6552 }
6553 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
6554 return rc;
6555}
6556
6557
6558/**
6559 * @callback_method_impl{FNIOMIOPORTNEWOUT}
6560 */
6561static DECLCALLBACK(VBOXSTRICTRC) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
6562{
6563 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
6564 VBOXSTRICTRC rc;
6565 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6566 RT_NOREF_PV(pvUser);
6567
6568 Log9(("%s e1kIOPortOut: offPort=%RTiop value=%08x\n", pThis->szPrf, offPort, u32));
6569 if (RT_LIKELY(cb == 4))
6570 {
6571 switch (offPort)
6572 {
6573 case 0x00: /* IOADDR */
6574 pThis->uSelectedReg = u32;
6575 Log9(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
6576 rc = VINF_SUCCESS;
6577 break;
6578
6579 case 0x04: /* IODATA */
6580 Log9(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
6581 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
6582 {
6583 rc = e1kRegWriteAlignedU32(pDevIns, pThis, pThis->uSelectedReg, u32);
6584 if (rc == VINF_IOM_R3_MMIO_WRITE)
6585 rc = VINF_IOM_R3_IOPORT_WRITE;
6586 }
6587 else
6588 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
6589 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
6590 break;
6591
6592 default:
6593 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, offPort));
6594 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", offPort);
6595 }
6596 }
6597 else
6598 {
6599 E1kLog(("%s e1kIOPortOut: invalid op size: offPort=%RTiop cb=%08x\n", pThis->szPrf, offPort, cb));
6600 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: offPort=%RTiop cb=%#x\n", pThis->szPrf, offPort, cb);
6601 }
6602
6603 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6604 return rc;
6605}
6606
6607#ifdef IN_RING3
6608
6609/**
6610 * Dump complete device state to log.
6611 *
6612 * @param pThis Pointer to device state.
6613 */
6614static void e1kDumpState(PE1KSTATE pThis)
6615{
6616 RT_NOREF(pThis);
6617 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6618 E1kLog2(("%s: %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6619# ifdef E1K_INT_STATS
6620 LogRel(("%s: Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6621 LogRel(("%s: Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6622 LogRel(("%s: Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6623 LogRel(("%s: ICR outside ISR : %d\n", pThis->szPrf, pThis->uStatNoIntICR));
6624 LogRel(("%s: IMS raised ints : %d\n", pThis->szPrf, pThis->uStatIntIMS));
6625 LogRel(("%s: Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6626 LogRel(("%s: Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6627 LogRel(("%s: Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6628 LogRel(("%s: Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6629 LogRel(("%s: Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6630 LogRel(("%s: Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6631 LogRel(("%s: Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6632 LogRel(("%s: Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6633 LogRel(("%s: Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6634 LogRel(("%s: Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6635 LogRel(("%s: Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6636 LogRel(("%s: TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6637 LogRel(("%s: TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6638 LogRel(("%s: TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6639 LogRel(("%s: TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6640 LogRel(("%s: TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6641 LogRel(("%s: TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6642 LogRel(("%s: RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6643 LogRel(("%s: RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6644 LogRel(("%s: TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6645 LogRel(("%s: TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6646 LogRel(("%s: TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6647 LogRel(("%s: Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6648 LogRel(("%s: Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6649 LogRel(("%s: TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6650 LogRel(("%s: TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6651 LogRel(("%s: TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6652 LogRel(("%s: TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6653 LogRel(("%s: TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6654 LogRel(("%s: TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6655 LogRel(("%s: TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6656 LogRel(("%s: TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6657 LogRel(("%s: Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6658 LogRel(("%s: Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6659# endif /* E1K_INT_STATS */
6660}
6661
6662
6663/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6664
6665/**
6666 * Check if the device can receive data now.
6667 * This must be called before the pfnRecieve() method is called.
6668 *
6669 * @returns VBox status code.
6670 * @retval VERR_NET_NO_BUFFER_SPACE if we cannot receive.
6671 * @param pDevIns The device instance.
6672 * @param pThis The instance data.
6673 * @thread EMT
6674 */
6675static int e1kR3CanReceive(PPDMDEVINS pDevIns, PE1KSTATE pThis)
6676{
6677# ifndef E1K_WITH_RXD_CACHE
6678 size_t cb;
6679
6680 e1kCsRxEnterReturn(pThis);
6681
6682 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6683 {
6684 E1KRXDESC desc;
6685 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, RDH), &desc, sizeof(desc));
6686 if (desc.status.fDD)
6687 cb = 0;
6688 else
6689 cb = pThis->u16RxBSize;
6690 }
6691 else if (RDH < RDT)
6692 cb = (RDT - RDH) * pThis->u16RxBSize;
6693 else if (RDH > RDT)
6694 cb = (RDLEN / sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6695 else
6696 {
6697 cb = 0;
6698 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6699 }
6700 E1kLog2(("%s e1kR3CanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6701 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6702
6703 e1kCsRxLeave(pThis);
6704 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6705# else /* E1K_WITH_RXD_CACHE */
6706
6707 e1kCsRxEnterReturn(pThis);
6708
6709 E1KRXDC rxdc;
6710 if (RT_UNLIKELY(!e1kUpdateRxDContext(pDevIns, pThis, &rxdc, "e1kR3CanReceive")))
6711 {
6712 e1kCsRxLeave(pThis);
6713 E1kLog(("%s e1kR3CanReceive: failed to update Rx context, returning VERR_NET_NO_BUFFER_SPACE\n", pThis->szPrf));
6714 return VERR_NET_NO_BUFFER_SPACE;
6715 }
6716
6717 int rc = VINF_SUCCESS;
6718 if (RT_UNLIKELY(rxdc.rdlen == sizeof(E1KRXDESC)))
6719 {
6720 E1KRXDESC desc;
6721 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, rxdc.rdh), &desc, sizeof(desc));
6722 if (desc.status.fDD)
6723 rc = VERR_NET_NO_BUFFER_SPACE;
6724 }
6725 else if (e1kRxDIsCacheEmpty(pThis) && rxdc.rdh == rxdc.rdt)
6726 {
6727 /* Cache is empty, so is the RX ring. */
6728 rc = VERR_NET_NO_BUFFER_SPACE;
6729 }
6730 E1kLog2(("%s e1kR3CanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6731 e1kRxDInCache(pThis), rxdc.rdh, rxdc.rdt, rxdc.rdlen, pThis->u16RxBSize, rc));
6732
6733 e1kCsRxLeave(pThis);
6734 return rc;
6735# endif /* E1K_WITH_RXD_CACHE */
6736}
6737
6738/**
6739 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6740 */
6741static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6742{
6743 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6744 PE1KSTATE pThis = pThisCC->pShared;
6745 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6746
6747 int rc = e1kR3CanReceive(pDevIns, pThis);
6748 if (RT_SUCCESS(rc))
6749 return VINF_SUCCESS;
6750
6751 if (RT_UNLIKELY(cMillies == 0))
6752 return VERR_NET_NO_BUFFER_SPACE;
6753
6754 rc = VERR_INTERRUPTED;
6755 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6756 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6757 VMSTATE enmVMState;
6758 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pDevIns)) == VMSTATE_RUNNING
6759 || enmVMState == VMSTATE_RUNNING_LS))
6760 {
6761 int rc2 = e1kR3CanReceive(pDevIns, pThis);
6762 if (RT_SUCCESS(rc2))
6763 {
6764 rc = VINF_SUCCESS;
6765 break;
6766 }
6767 E1kLogRel(("E1000: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6768 E1kLog(("%s: e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6769 PDMDevHlpSUPSemEventWaitNoResume(pDevIns, pThis->hEventMoreRxDescAvail, cMillies);
6770 }
6771 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6772 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6773
6774 return rc;
6775}
6776
6777
6778/**
6779 * Matches the packet addresses against Receive Address table. Looks for
6780 * exact matches only.
6781 *
6782 * @returns true if address matches.
6783 * @param pThis Pointer to the state structure.
6784 * @param pvBuf The ethernet packet.
6785 * @param cb Number of bytes available in the packet.
6786 * @thread EMT
6787 */
6788static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6789{
6790 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6791 {
6792 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6793
6794 /* Valid address? */
6795 if (ra->ctl & RA_CTL_AV)
6796 {
6797 Assert((ra->ctl & RA_CTL_AS) < 2);
6798 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6799 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6800 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6801 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6802 /*
6803 * Address Select:
6804 * 00b = Destination address
6805 * 01b = Source address
6806 * 10b = Reserved
6807 * 11b = Reserved
6808 * Since ethernet header is (DA, SA, len) we can use address
6809 * select as index.
6810 */
6811 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6812 ra->addr, sizeof(ra->addr)) == 0)
6813 return true;
6814 }
6815 }
6816
6817 return false;
6818}
6819
6820/**
6821 * Matches the packet addresses against Multicast Table Array.
6822 *
6823 * @remarks This is imperfect match since it matches not exact address but
6824 * a subset of addresses.
6825 *
6826 * @returns true if address matches.
6827 * @param pThis Pointer to the state structure.
6828 * @param pvBuf The ethernet packet.
6829 * @param cb Number of bytes available in the packet.
6830 * @thread EMT
6831 */
6832static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6833{
6834 /* Get bits 32..47 of destination address */
6835 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6836
6837 unsigned offset = GET_BITS(RCTL, MO);
6838 /*
6839 * offset means:
6840 * 00b = bits 36..47
6841 * 01b = bits 35..46
6842 * 10b = bits 34..45
6843 * 11b = bits 32..43
6844 */
6845 if (offset < 3)
6846 u16Bit = u16Bit >> (4 - offset);
6847 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6848}
6849
6850/**
6851 * Determines if the packet is to be delivered to upper layer.
6852 *
6853 * The following filters supported:
6854 * - Exact Unicast/Multicast
6855 * - Promiscuous Unicast/Multicast
6856 * - Multicast
6857 * - VLAN
6858 *
6859 * @returns true if packet is intended for this node.
6860 * @param pThis Pointer to the state structure.
6861 * @param pvBuf The ethernet packet.
6862 * @param cb Number of bytes available in the packet.
6863 * @param pStatus Bit field to store status bits.
6864 * @thread EMT
6865 */
6866static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6867{
6868 Assert(cb > 14);
6869 /* Assume that we fail to pass exact filter. */
6870 pStatus->fPIF = false;
6871 pStatus->fVP = false;
6872 /* Discard oversized packets */
6873 if (cb > E1K_MAX_RX_PKT_SIZE)
6874 {
6875 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6876 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6877 E1K_INC_CNT32(ROC);
6878 return false;
6879 }
6880 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6881 {
6882 /* When long packet reception is disabled packets over 1522 are discarded */
6883 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6884 pThis->szPrf, cb));
6885 E1K_INC_CNT32(ROC);
6886 return false;
6887 }
6888
6889 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6890 /* Compare TPID with VLAN Ether Type */
6891 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6892 {
6893 pStatus->fVP = true;
6894 /* Is VLAN filtering enabled? */
6895 if (RCTL & RCTL_VFE)
6896 {
6897 /* It is 802.1q packet indeed, let's filter by VID */
6898 if (RCTL & RCTL_CFIEN)
6899 {
6900 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6901 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6902 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6903 !!(RCTL & RCTL_CFI)));
6904 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6905 {
6906 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6907 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6908 return false;
6909 }
6910 }
6911 else
6912 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6913 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6914 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6915 {
6916 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6917 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6918 return false;
6919 }
6920 }
6921 }
6922 /* Broadcast filtering */
6923 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6924 return true;
6925 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6926 if (e1kIsMulticast(pvBuf))
6927 {
6928 /* Is multicast promiscuous enabled? */
6929 if (RCTL & RCTL_MPE)
6930 return true;
6931 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6932 /* Try perfect matches first */
6933 if (e1kPerfectMatch(pThis, pvBuf))
6934 {
6935 pStatus->fPIF = true;
6936 return true;
6937 }
6938 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6939 if (e1kImperfectMatch(pThis, pvBuf))
6940 return true;
6941 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6942 }
6943 else {
6944 /* Is unicast promiscuous enabled? */
6945 if (RCTL & RCTL_UPE)
6946 return true;
6947 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6948 if (e1kPerfectMatch(pThis, pvBuf))
6949 {
6950 pStatus->fPIF = true;
6951 return true;
6952 }
6953 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6954 }
6955 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6956 return false;
6957}
6958
6959/**
6960 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6961 */
6962static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6963{
6964 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkDown);
6965 PE1KSTATE pThis = pThisCC->pShared;
6966 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
6967 int rc = VINF_SUCCESS;
6968
6969 /*
6970 * Drop packets if the VM is not running yet/anymore.
6971 */
6972 VMSTATE enmVMState = PDMDevHlpVMState(pDevIns);
6973 if ( enmVMState != VMSTATE_RUNNING
6974 && enmVMState != VMSTATE_RUNNING_LS)
6975 {
6976 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6977 return VINF_SUCCESS;
6978 }
6979
6980 /* Discard incoming packets in locked state */
6981 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6982 {
6983 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6984 return VINF_SUCCESS;
6985 }
6986
6987 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6988
6989 //e1kR3CsEnterAsserted(pThis);
6990
6991 e1kPacketDump(pDevIns, pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6992
6993 /* Update stats */
6994 e1kR3CsEnterAsserted(pThis);
6995 E1K_INC_CNT32(TPR);
6996 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6997 e1kCsLeave(pThis);
6998
6999 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
7000 E1KRXDST status;
7001 RT_ZERO(status);
7002 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
7003 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
7004 if (fPassed)
7005 {
7006 rc = e1kHandleRxPacket(pDevIns, pThis, pvBuf, cb, status);
7007 }
7008 //e1kCsLeave(pThis);
7009 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
7010
7011 return rc;
7012}
7013
7014
7015/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
7016
7017/**
7018 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
7019 */
7020static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
7021{
7022 if (iLUN == 0)
7023 {
7024 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, ILeds);
7025 *ppLed = &pThisCC->pShared->led;
7026 return VINF_SUCCESS;
7027 }
7028 return VERR_PDM_LUN_NOT_FOUND;
7029}
7030
7031
7032/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
7033
7034/**
7035 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
7036 */
7037static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
7038{
7039 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
7040 pThisCC->eeprom.getMac(pMac);
7041 return VINF_SUCCESS;
7042}
7043
7044/**
7045 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
7046 */
7047static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
7048{
7049 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
7050 PE1KSTATE pThis = pThisCC->pShared;
7051 if (STATUS & STATUS_LU)
7052 return PDMNETWORKLINKSTATE_UP;
7053 return PDMNETWORKLINKSTATE_DOWN;
7054}
7055
7056/**
7057 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
7058 */
7059static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
7060{
7061 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, INetworkConfig);
7062 PE1KSTATE pThis = pThisCC->pShared;
7063 PPDMDEVINS pDevIns = pThisCC->pDevInsR3;
7064
7065 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
7066 switch (enmState)
7067 {
7068 case PDMNETWORKLINKSTATE_UP:
7069 pThis->fCableConnected = true;
7070 /* If link was down, bring it up after a while. */
7071 if (!(STATUS & STATUS_LU))
7072 e1kBringLinkUpDelayed(pDevIns, pThis);
7073 break;
7074 case PDMNETWORKLINKSTATE_DOWN:
7075 pThis->fCableConnected = false;
7076 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
7077 * We might have to set the link state before the driver initializes us. */
7078 Phy::setLinkStatus(&pThis->phy, false);
7079 /* If link was up, bring it down. */
7080 if (STATUS & STATUS_LU)
7081 e1kR3LinkDown(pDevIns, pThis, pThisCC);
7082 break;
7083 case PDMNETWORKLINKSTATE_DOWN_RESUME:
7084 /*
7085 * There is not much sense in bringing down the link if it has not come up yet.
7086 * If it is up though, we bring it down temporarely, then bring it up again.
7087 */
7088 if (STATUS & STATUS_LU)
7089 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7090 break;
7091 default:
7092 ;
7093 }
7094 return VINF_SUCCESS;
7095}
7096
7097
7098/* -=-=-=-=- PDMIBASE -=-=-=-=- */
7099
7100/**
7101 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
7102 */
7103static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
7104{
7105 PE1KSTATECC pThisCC = RT_FROM_MEMBER(pInterface, E1KSTATECC, IBase);
7106 Assert(&pThisCC->IBase == pInterface);
7107
7108 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThisCC->IBase);
7109 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThisCC->INetworkDown);
7110 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThisCC->INetworkConfig);
7111 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThisCC->ILeds);
7112 return NULL;
7113}
7114
7115
7116/* -=-=-=-=- Saved State -=-=-=-=- */
7117
7118/**
7119 * Saves the configuration.
7120 *
7121 * @param pThis The E1K state.
7122 * @param pSSM The handle to the saved state.
7123 */
7124static void e1kR3SaveConfig(PCPDMDEVHLPR3 pHlp, PE1KSTATE pThis, PSSMHANDLE pSSM)
7125{
7126 pHlp->pfnSSMPutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
7127 pHlp->pfnSSMPutU32(pSSM, pThis->eChip);
7128}
7129
7130/**
7131 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
7132 */
7133static DECLCALLBACK(int) e1kR3LiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
7134{
7135 RT_NOREF(uPass);
7136 e1kR3SaveConfig(pDevIns->pHlpR3, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE), pSSM);
7137 return VINF_SSM_DONT_CALL_AGAIN;
7138}
7139
7140/**
7141 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
7142 */
7143static DECLCALLBACK(int) e1kR3SavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7144{
7145 RT_NOREF(pSSM);
7146 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7147
7148 e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
7149 e1kCsLeave(pThis);
7150 return VINF_SUCCESS;
7151#if 0
7152 /* 1) Prevent all threads from modifying the state and memory */
7153 //pThis->fLocked = true;
7154 /* 2) Cancel all timers */
7155#ifdef E1K_TX_DELAY
7156 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7157#endif /* E1K_TX_DELAY */
7158//#ifdef E1K_USE_TX_TIMERS
7159 if (pThis->fTidEnabled)
7160 {
7161 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
7162#ifndef E1K_NO_TAD
7163 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
7164#endif /* E1K_NO_TAD */
7165 }
7166//#endif /* E1K_USE_TX_TIMERS */
7167#ifdef E1K_USE_RX_TIMERS
7168 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
7169 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
7170#endif /* E1K_USE_RX_TIMERS */
7171 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7172 /* 3) Did I forget anything? */
7173 E1kLog(("%s Locked\n", pThis->szPrf));
7174 return VINF_SUCCESS;
7175#endif
7176}
7177
7178/**
7179 * @callback_method_impl{FNSSMDEVSAVEEXEC}
7180 */
7181static DECLCALLBACK(int) e1kR3SaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7182{
7183 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7184 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7185 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7186
7187 e1kR3SaveConfig(pHlp, pThis, pSSM);
7188 pThisCC->eeprom.save(pHlp, pSSM);
7189 e1kDumpState(pThis);
7190 pHlp->pfnSSMPutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
7191 pHlp->pfnSSMPutBool(pSSM, pThis->fIntRaised);
7192 Phy::saveState(pHlp, pSSM, &pThis->phy);
7193 pHlp->pfnSSMPutU32(pSSM, pThis->uSelectedReg);
7194 pHlp->pfnSSMPutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
7195 pHlp->pfnSSMPutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7196 pHlp->pfnSSMPutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
7197 pHlp->pfnSSMPutU64(pSSM, pThis->u64AckedAt);
7198 pHlp->pfnSSMPutU16(pSSM, pThis->u16RxBSize);
7199 //pHlp->pfnSSMPutBool(pSSM, pThis->fDelayInts);
7200 //pHlp->pfnSSMPutBool(pSSM, pThis->fIntMaskUsed);
7201 pHlp->pfnSSMPutU16(pSSM, pThis->u16TxPktLen);
7202/** @todo State wrt to the TSE buffer is incomplete, so little point in
7203 * saving this actually. */
7204 pHlp->pfnSSMPutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
7205 pHlp->pfnSSMPutBool(pSSM, pThis->fIPcsum);
7206 pHlp->pfnSSMPutBool(pSSM, pThis->fTCPcsum);
7207 pHlp->pfnSSMPutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7208 pHlp->pfnSSMPutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7209 pHlp->pfnSSMPutBool(pSSM, pThis->fVTag);
7210 pHlp->pfnSSMPutU16(pSSM, pThis->u16VTagTCI);
7211#ifdef E1K_WITH_TXD_CACHE
7212# if 0
7213 pHlp->pfnSSMPutU8(pSSM, pThis->nTxDFetched);
7214 pHlp->pfnSSMPutMem(pSSM, pThis->aTxDescriptors,
7215 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7216# else
7217 /*
7218 * There is no point in storing TX descriptor cache entries as we can simply
7219 * fetch them again. Moreover, normally the cache is always empty when we
7220 * save the state. Store zero entries for compatibility.
7221 */
7222 pHlp->pfnSSMPutU8(pSSM, 0);
7223# endif
7224#endif /* E1K_WITH_TXD_CACHE */
7225/** @todo GSO requires some more state here. */
7226 E1kLog(("%s State has been saved\n", pThis->szPrf));
7227 return VINF_SUCCESS;
7228}
7229
7230#if 0
7231/**
7232 * @callback_method_impl{FNSSMDEVSAVEDONE}
7233 */
7234static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7235{
7236 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7237
7238 /* If VM is being powered off unlocking will result in assertions in PGM */
7239 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
7240 pThis->fLocked = false;
7241 else
7242 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
7243 E1kLog(("%s Unlocked\n", pThis->szPrf));
7244 return VINF_SUCCESS;
7245}
7246#endif
7247
7248/**
7249 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
7250 */
7251static DECLCALLBACK(int) e1kR3LoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7252{
7253 RT_NOREF(pSSM);
7254 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7255
7256 e1kCsEnterReturn(pThis, VERR_SEM_BUSY);
7257 e1kCsLeave(pThis);
7258 return VINF_SUCCESS;
7259}
7260
7261/**
7262 * @callback_method_impl{FNSSMDEVLOADEXEC}
7263 */
7264static DECLCALLBACK(int) e1kR3LoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
7265{
7266 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7267 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7268 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
7269 int rc;
7270
7271 if ( uVersion != E1K_SAVEDSTATE_VERSION
7272#ifdef E1K_WITH_TXD_CACHE
7273 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
7274#endif /* E1K_WITH_TXD_CACHE */
7275 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
7276 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
7277 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
7278
7279 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
7280 || uPass != SSM_PASS_FINAL)
7281 {
7282 /* config checks */
7283 RTMAC macConfigured;
7284 rc = pHlp->pfnSSMGetMem(pSSM, &macConfigured, sizeof(macConfigured));
7285 AssertRCReturn(rc, rc);
7286 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
7287 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
7288 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
7289
7290 E1KCHIP eChip;
7291 rc = pHlp->pfnSSMGetU32(pSSM, &eChip);
7292 AssertRCReturn(rc, rc);
7293 if (eChip != pThis->eChip)
7294 return pHlp->pfnSSMSetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
7295 }
7296
7297 if (uPass == SSM_PASS_FINAL)
7298 {
7299 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
7300 {
7301 rc = pThisCC->eeprom.load(pHlp, pSSM);
7302 AssertRCReturn(rc, rc);
7303 }
7304 /* the state */
7305 pHlp->pfnSSMGetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
7306 pHlp->pfnSSMGetBool(pSSM, &pThis->fIntRaised);
7307 /** @todo PHY could be made a separate device with its own versioning */
7308 Phy::loadState(pHlp, pSSM, &pThis->phy);
7309 pHlp->pfnSSMGetU32(pSSM, &pThis->uSelectedReg);
7310 pHlp->pfnSSMGetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
7311 pHlp->pfnSSMGetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
7312 pHlp->pfnSSMGetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
7313 pHlp->pfnSSMGetU64(pSSM, &pThis->u64AckedAt);
7314 pHlp->pfnSSMGetU16(pSSM, &pThis->u16RxBSize);
7315 //pHlp->pfnSSMGetBool(pSSM, pThis->fDelayInts);
7316 //pHlp->pfnSSMGetBool(pSSM, pThis->fIntMaskUsed);
7317 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16TxPktLen);
7318 AssertRCReturn(rc, rc);
7319 if (pThis->u16TxPktLen > sizeof(pThis->aTxPacketFallback))
7320 pThis->u16TxPktLen = sizeof(pThis->aTxPacketFallback);
7321 pHlp->pfnSSMGetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
7322 pHlp->pfnSSMGetBool(pSSM, &pThis->fIPcsum);
7323 pHlp->pfnSSMGetBool(pSSM, &pThis->fTCPcsum);
7324 pHlp->pfnSSMGetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
7325 rc = pHlp->pfnSSMGetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
7326 AssertRCReturn(rc, rc);
7327 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
7328 {
7329 pHlp->pfnSSMGetBool(pSSM, &pThis->fVTag);
7330 rc = pHlp->pfnSSMGetU16(pSSM, &pThis->u16VTagTCI);
7331 AssertRCReturn(rc, rc);
7332 }
7333 else
7334 {
7335 pThis->fVTag = false;
7336 pThis->u16VTagTCI = 0;
7337 }
7338#ifdef E1K_WITH_TXD_CACHE
7339 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
7340 {
7341 rc = pHlp->pfnSSMGetU8(pSSM, &pThis->nTxDFetched);
7342 AssertRCReturn(rc, rc);
7343 if (pThis->nTxDFetched)
7344 pHlp->pfnSSMGetMem(pSSM, pThis->aTxDescriptors,
7345 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
7346 }
7347 else
7348 pThis->nTxDFetched = 0;
7349 /**
7350 * @todo Perhaps we should not store TXD cache as the entries can be
7351 * simply fetched again from guest's memory. Or can't they?
7352 */
7353#endif /* E1K_WITH_TXD_CACHE */
7354#ifdef E1K_WITH_RXD_CACHE
7355 /*
7356 * There is no point in storing the RX descriptor cache in the saved
7357 * state, we just need to make sure it is empty.
7358 */
7359 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
7360#endif /* E1K_WITH_RXD_CACHE */
7361 rc = pHlp->pfnSSMHandleGetStatus(pSSM);
7362 AssertRCReturn(rc, rc);
7363
7364 /* derived state */
7365 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
7366
7367 E1kLog(("%s State has been restored\n", pThis->szPrf));
7368 e1kDumpState(pThis);
7369 }
7370 return VINF_SUCCESS;
7371}
7372
7373/**
7374 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
7375 */
7376static DECLCALLBACK(int) e1kR3LoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
7377{
7378 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7379 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7380 RT_NOREF(pSSM);
7381
7382 /* Update promiscuous mode */
7383 if (pThisCC->pDrvR3)
7384 pThisCC->pDrvR3->pfnSetPromiscuousMode(pThisCC->pDrvR3, !!(RCTL & (RCTL_UPE | RCTL_MPE)));
7385
7386 /*
7387 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
7388 * passed to us. We go through all this stuff if the link was up and we
7389 * wasn't teleported.
7390 */
7391 if ( (STATUS & STATUS_LU)
7392 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
7393 && pThis->cMsLinkUpDelay)
7394 {
7395 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7396 }
7397 return VINF_SUCCESS;
7398}
7399
7400
7401
7402/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
7403
7404/**
7405 * @callback_method_impl{FNRTSTRFORMATTYPE}
7406 */
7407static DECLCALLBACK(size_t) e1kR3FmtRxDesc(PFNRTSTROUTPUT pfnOutput,
7408 void *pvArgOutput,
7409 const char *pszType,
7410 void const *pvValue,
7411 int cchWidth,
7412 int cchPrecision,
7413 unsigned fFlags,
7414 void *pvUser)
7415{
7416 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7417 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
7418 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
7419 if (!pDesc)
7420 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
7421
7422 size_t cbPrintf = 0;
7423 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
7424 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
7425 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
7426 pDesc->status.fPIF ? "PIF" : "pif",
7427 pDesc->status.fIPCS ? "IPCS" : "ipcs",
7428 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
7429 pDesc->status.fVP ? "VP" : "vp",
7430 pDesc->status.fIXSM ? "IXSM" : "ixsm",
7431 pDesc->status.fEOP ? "EOP" : "eop",
7432 pDesc->status.fDD ? "DD" : "dd",
7433 pDesc->status.fRXE ? "RXE" : "rxe",
7434 pDesc->status.fIPE ? "IPE" : "ipe",
7435 pDesc->status.fTCPE ? "TCPE" : "tcpe",
7436 pDesc->status.fCE ? "CE" : "ce",
7437 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
7438 E1K_SPEC_VLAN(pDesc->status.u16Special),
7439 E1K_SPEC_PRI(pDesc->status.u16Special));
7440 return cbPrintf;
7441}
7442
7443/**
7444 * @callback_method_impl{FNRTSTRFORMATTYPE}
7445 */
7446static DECLCALLBACK(size_t) e1kR3FmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7447 void *pvArgOutput,
7448 const char *pszType,
7449 void const *pvValue,
7450 int cchWidth,
7451 int cchPrecision,
7452 unsigned fFlags,
7453 void *pvUser)
7454{
7455 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
7456 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7457 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
7458 if (!pDesc)
7459 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7460
7461 size_t cbPrintf = 0;
7462 switch (e1kGetDescType(pDesc))
7463 {
7464 case E1K_DTYP_CONTEXT:
7465 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7466 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7467 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7468 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7469 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7470 pDesc->context.dw2.fIDE ? " IDE":"",
7471 pDesc->context.dw2.fRS ? " RS" :"",
7472 pDesc->context.dw2.fTSE ? " TSE":"",
7473 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7474 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7475 pDesc->context.dw2.u20PAYLEN,
7476 pDesc->context.dw3.u8HDRLEN,
7477 pDesc->context.dw3.u16MSS,
7478 pDesc->context.dw3.fDD?"DD":"");
7479 break;
7480 case E1K_DTYP_DATA:
7481 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7482 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7483 pDesc->data.u64BufAddr,
7484 pDesc->data.cmd.u20DTALEN,
7485 pDesc->data.cmd.fIDE ? " IDE" :"",
7486 pDesc->data.cmd.fVLE ? " VLE" :"",
7487 pDesc->data.cmd.fRPS ? " RPS" :"",
7488 pDesc->data.cmd.fRS ? " RS" :"",
7489 pDesc->data.cmd.fTSE ? " TSE" :"",
7490 pDesc->data.cmd.fIFCS? " IFCS":"",
7491 pDesc->data.cmd.fEOP ? " EOP" :"",
7492 pDesc->data.dw3.fDD ? " DD" :"",
7493 pDesc->data.dw3.fEC ? " EC" :"",
7494 pDesc->data.dw3.fLC ? " LC" :"",
7495 pDesc->data.dw3.fTXSM? " TXSM":"",
7496 pDesc->data.dw3.fIXSM? " IXSM":"",
7497 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7498 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7499 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7500 break;
7501 case E1K_DTYP_LEGACY:
7502 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7503 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7504 pDesc->data.u64BufAddr,
7505 pDesc->legacy.cmd.u16Length,
7506 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7507 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7508 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7509 pDesc->legacy.cmd.fRS ? " RS" :"",
7510 pDesc->legacy.cmd.fIC ? " IC" :"",
7511 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7512 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7513 pDesc->legacy.dw3.fDD ? " DD" :"",
7514 pDesc->legacy.dw3.fEC ? " EC" :"",
7515 pDesc->legacy.dw3.fLC ? " LC" :"",
7516 pDesc->legacy.cmd.u8CSO,
7517 pDesc->legacy.dw3.u8CSS,
7518 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7519 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7520 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7521 break;
7522 default:
7523 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7524 break;
7525 }
7526
7527 return cbPrintf;
7528}
7529
7530/** Initializes debug helpers (logging format types). */
7531static int e1kR3InitDebugHelpers(void)
7532{
7533 int rc = VINF_SUCCESS;
7534 static bool s_fHelpersRegistered = false;
7535 if (!s_fHelpersRegistered)
7536 {
7537 s_fHelpersRegistered = true;
7538 rc = RTStrFormatTypeRegister("e1krxd", e1kR3FmtRxDesc, NULL);
7539 AssertRCReturn(rc, rc);
7540 rc = RTStrFormatTypeRegister("e1ktxd", e1kR3FmtTxDesc, NULL);
7541 AssertRCReturn(rc, rc);
7542 }
7543 return rc;
7544}
7545
7546/**
7547 * Status info callback.
7548 *
7549 * @param pDevIns The device instance.
7550 * @param pHlp The output helpers.
7551 * @param pszArgs The arguments.
7552 */
7553static DECLCALLBACK(void) e1kR3Info(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7554{
7555 RT_NOREF(pszArgs);
7556 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7557 unsigned i;
7558 // bool fRcvRing = false;
7559 // bool fXmtRing = false;
7560
7561 /*
7562 * Parse args.
7563 if (pszArgs)
7564 {
7565 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7566 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7567 }
7568 */
7569
7570 /*
7571 * Show info.
7572 */
7573 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%04x mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7574 pDevIns->iInstance,
7575 PDMDevHlpIoPortGetMappingAddress(pDevIns, pThis->hIoPorts),
7576 PDMDevHlpMmioGetMappingAddress(pDevIns, pThis->hMmioRegion),
7577 &pThis->macConfigured, g_aChips[pThis->eChip].pcszName,
7578 pDevIns->fRCEnabled ? " RC" : "", pDevIns->fR0Enabled ? " R0" : "");
7579
7580 e1kR3CsEnterAsserted(pThis); /* Not sure why but PCNet does it */
7581
7582 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7583 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7584
7585 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7586 {
7587 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7588 if (ra->ctl & RA_CTL_AV)
7589 {
7590 const char *pcszTmp;
7591 switch (ra->ctl & RA_CTL_AS)
7592 {
7593 case 0: pcszTmp = "DST"; break;
7594 case 1: pcszTmp = "SRC"; break;
7595 default: pcszTmp = "reserved";
7596 }
7597 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7598 }
7599 }
7600 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7601 uint32_t rdh = RDH;
7602 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7603 for (i = 0; i < cDescs; ++i)
7604 {
7605 E1KRXDESC desc;
7606 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7607 &desc, sizeof(desc));
7608 if (i == rdh)
7609 pHlp->pfnPrintf(pHlp, ">>> ");
7610 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7611 }
7612#ifdef E1K_WITH_RXD_CACHE
7613 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7614 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7615 if (rdh > pThis->iRxDCurrent)
7616 rdh -= pThis->iRxDCurrent;
7617 else
7618 rdh = cDescs + rdh - pThis->iRxDCurrent;
7619 for (i = 0; i < pThis->nRxDFetched; ++i)
7620 {
7621 if (i == pThis->iRxDCurrent)
7622 pHlp->pfnPrintf(pHlp, ">>> ");
7623 if (cDescs)
7624 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7625 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7626 &pThis->aRxDescriptors[i]);
7627 else
7628 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1krxd]\n",
7629 &pThis->aRxDescriptors[i]);
7630 }
7631#endif /* E1K_WITH_RXD_CACHE */
7632
7633 cDescs = TDLEN / sizeof(E1KTXDESC);
7634 uint32_t tdh = TDH;
7635 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7636 for (i = 0; i < cDescs; ++i)
7637 {
7638 E1KTXDESC desc;
7639 PDMDevHlpPCIPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7640 &desc, sizeof(desc));
7641 if (i == tdh)
7642 pHlp->pfnPrintf(pHlp, ">>> ");
7643 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7644 }
7645#ifdef E1K_WITH_TXD_CACHE
7646 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7647 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7648 if (tdh > pThis->iTxDCurrent)
7649 tdh -= pThis->iTxDCurrent;
7650 else
7651 tdh = cDescs + tdh - pThis->iTxDCurrent;
7652 for (i = 0; i < pThis->nTxDFetched; ++i)
7653 {
7654 if (i == pThis->iTxDCurrent)
7655 pHlp->pfnPrintf(pHlp, ">>> ");
7656 if (cDescs)
7657 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7658 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7659 &pThis->aTxDescriptors[i]);
7660 else
7661 pHlp->pfnPrintf(pHlp, "<lost>: %R[e1ktxd]\n",
7662 &pThis->aTxDescriptors[i]);
7663 }
7664#endif /* E1K_WITH_TXD_CACHE */
7665
7666
7667#ifdef E1K_INT_STATS
7668 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7669 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7670 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7671 pHlp->pfnPrintf(pHlp, "ICR outside ISR : %d\n", pThis->uStatNoIntICR);
7672 pHlp->pfnPrintf(pHlp, "IMS raised ints : %d\n", pThis->uStatIntIMS);
7673 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7674 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7675 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7676 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7677 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7678 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7679 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7680 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7681 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7682 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7683 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7684 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7685 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7686 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7687 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7688 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7689 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7690 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7691 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7692 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7693 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7694 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7695 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7696 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7697 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7698 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7699 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7700 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7701 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7702 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7703 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7704 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7705 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7706#endif /* E1K_INT_STATS */
7707
7708 e1kCsLeave(pThis);
7709}
7710
7711
7712
7713/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7714
7715/**
7716 * Detach notification.
7717 *
7718 * One port on the network card has been disconnected from the network.
7719 *
7720 * @param pDevIns The device instance.
7721 * @param iLUN The logical unit which is being detached.
7722 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7723 */
7724static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7725{
7726 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7727 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7728 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7729 RT_NOREF(fFlags);
7730
7731 AssertLogRelReturnVoid(iLUN == 0);
7732
7733 e1kR3CsEnterAsserted(pThis);
7734
7735 /* Mark device as detached. */
7736 pThis->fIsAttached = false;
7737 /*
7738 * Zero some important members.
7739 */
7740 pThisCC->pDrvBase = NULL;
7741 pThisCC->pDrvR3 = NULL;
7742#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7743 pThisR0->pDrvR0 = NIL_RTR0PTR;
7744 pThisRC->pDrvRC = NIL_RTRCPTR;
7745#endif
7746
7747 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7748}
7749
7750/**
7751 * Attach the Network attachment.
7752 *
7753 * One port on the network card has been connected to a network.
7754 *
7755 * @returns VBox status code.
7756 * @param pDevIns The device instance.
7757 * @param iLUN The logical unit which is being attached.
7758 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7759 *
7760 * @remarks This code path is not used during construction.
7761 */
7762static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7763{
7764 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7765 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7766 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7767 RT_NOREF(fFlags);
7768
7769 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7770
7771 e1kR3CsEnterAsserted(pThis);
7772
7773 /*
7774 * Attach the driver.
7775 */
7776 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
7777 if (RT_SUCCESS(rc))
7778 {
7779 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
7780 AssertMsgStmt(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7781 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7782 if (RT_SUCCESS(rc))
7783 {
7784#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
7785 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7786 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7787#endif
7788 /* Mark device as attached. */
7789 pThis->fIsAttached = true;
7790 }
7791 }
7792 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7793 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7794 {
7795 /* This should never happen because this function is not called
7796 * if there is no driver to attach! */
7797 Log(("%s No attached driver!\n", pThis->szPrf));
7798 }
7799
7800 /*
7801 * Temporary set the link down if it was up so that the guest will know
7802 * that we have change the configuration of the network card
7803 */
7804 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7805 e1kR3LinkDownTemp(pDevIns, pThis, pThisCC);
7806
7807 PDMDevHlpCritSectLeave(pDevIns, &pThis->cs);
7808 return rc;
7809}
7810
7811/**
7812 * @copydoc FNPDMDEVPOWEROFF
7813 */
7814static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7815{
7816 /* Poke thread waiting for buffer space. */
7817 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7818}
7819
7820/**
7821 * @copydoc FNPDMDEVRESET
7822 */
7823static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7824{
7825 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7826 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7827#ifdef E1K_TX_DELAY
7828 e1kCancelTimer(pDevIns, pThis, pThis->hTXDTimer);
7829#endif /* E1K_TX_DELAY */
7830 e1kCancelTimer(pDevIns, pThis, pThis->hIntTimer);
7831 e1kCancelTimer(pDevIns, pThis, pThis->hLUTimer);
7832 e1kXmitFreeBuf(pThis, pThisCC);
7833 pThis->u16TxPktLen = 0;
7834 pThis->fIPcsum = false;
7835 pThis->fTCPcsum = false;
7836 pThis->fIntMaskUsed = false;
7837 pThis->fDelayInts = false;
7838 pThis->fLocked = false;
7839 pThis->u64AckedAt = 0;
7840 e1kR3HardReset(pDevIns, pThis, pThisCC);
7841}
7842
7843/**
7844 * @copydoc FNPDMDEVSUSPEND
7845 */
7846static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7847{
7848 /* Poke thread waiting for buffer space. */
7849 e1kWakeupReceive(pDevIns, PDMDEVINS_2_DATA(pDevIns, PE1KSTATE));
7850}
7851
7852/**
7853 * Device relocation callback.
7854 *
7855 * When this callback is called the device instance data, and if the
7856 * device have a GC component, is being relocated, or/and the selectors
7857 * have been changed. The device must use the chance to perform the
7858 * necessary pointer relocations and data updates.
7859 *
7860 * Before the GC code is executed the first time, this function will be
7861 * called with a 0 delta so GC pointer calculations can be one in one place.
7862 *
7863 * @param pDevIns Pointer to the device instance.
7864 * @param offDelta The relocation delta relative to the old location.
7865 *
7866 * @remark A relocation CANNOT fail.
7867 */
7868static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7869{
7870 PE1KSTATERC pThisRC = PDMINS_2_DATA_RC(pDevIns, PE1KSTATERC);
7871 if (pThisRC)
7872 pThisRC->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7873 RT_NOREF(offDelta);
7874}
7875
7876/**
7877 * Destruct a device instance.
7878 *
7879 * We need to free non-VM resources only.
7880 *
7881 * @returns VBox status code.
7882 * @param pDevIns The device instance data.
7883 * @thread EMT
7884 */
7885static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7886{
7887 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7888 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7889
7890 e1kDumpState(pThis);
7891 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7892 if (PDMDevHlpCritSectIsInitialized(pDevIns, &pThis->cs))
7893 {
7894 if (pThis->hEventMoreRxDescAvail != NIL_SUPSEMEVENT)
7895 {
7896 PDMDevHlpSUPSemEventSignal(pDevIns, pThis->hEventMoreRxDescAvail);
7897 RTThreadYield();
7898 PDMDevHlpSUPSemEventClose(pDevIns, pThis->hEventMoreRxDescAvail);
7899 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
7900 }
7901#ifdef E1K_WITH_TX_CS
7902 PDMDevHlpCritSectDelete(pDevIns, &pThis->csTx);
7903#endif /* E1K_WITH_TX_CS */
7904 PDMDevHlpCritSectDelete(pDevIns, &pThis->csRx);
7905 PDMDevHlpCritSectDelete(pDevIns, &pThis->cs);
7906 }
7907 return VINF_SUCCESS;
7908}
7909
7910
7911/**
7912 * Set PCI configuration space registers.
7913 *
7914 * @param pci Reference to PCI device structure.
7915 * @thread EMT
7916 */
7917static void e1kR3ConfigurePciDev(PPDMPCIDEV pPciDev, E1KCHIP eChip)
7918{
7919 Assert(eChip < RT_ELEMENTS(g_aChips));
7920 /* Configure PCI Device, assume 32-bit mode ******************************/
7921 PDMPciDevSetVendorId(pPciDev, g_aChips[eChip].uPCIVendorId);
7922 PDMPciDevSetDeviceId(pPciDev, g_aChips[eChip].uPCIDeviceId);
7923 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_aChips[eChip].uPCISubsystemVendorId);
7924 PDMPciDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_aChips[eChip].uPCISubsystemId);
7925
7926 PDMPciDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7927 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7928 PDMPciDevSetWord( pPciDev, VBOX_PCI_STATUS,
7929 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7930 /* Stepping A2 */
7931 PDMPciDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7932 /* Ethernet adapter */
7933 PDMPciDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7934 PDMPciDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7935 /* normal single function Ethernet controller */
7936 PDMPciDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7937 /* Memory Register Base Address */
7938 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7939 /* Memory Flash Base Address */
7940 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7941 /* IO Register Base Address */
7942 PDMPciDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7943 /* Expansion ROM Base Address */
7944 PDMPciDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7945 /* Capabilities Pointer */
7946 PDMPciDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7947 /* Interrupt Pin: INTA# */
7948 PDMPciDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7949 /* Max_Lat/Min_Gnt: very high priority and time slice */
7950 PDMPciDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7951 PDMPciDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7952
7953 /* PCI Power Management Registers ****************************************/
7954 /* Capability ID: PCI Power Management Registers */
7955 PDMPciDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7956 /* Next Item Pointer: PCI-X */
7957 PDMPciDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7958 /* Power Management Capabilities: PM disabled, DSI */
7959 PDMPciDevSetWord( pPciDev, 0xDC + 2,
7960 0x0002 | VBOX_PCI_PM_CAP_DSI);
7961 /* Power Management Control / Status Register: PM disabled */
7962 PDMPciDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7963 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7964 PDMPciDevSetByte( pPciDev, 0xDC + 6, 0x00);
7965 /* Data Register: PM disabled, always 0 */
7966 PDMPciDevSetByte( pPciDev, 0xDC + 7, 0x00);
7967
7968 /* PCI-X Configuration Registers *****************************************/
7969 /* Capability ID: PCI-X Configuration Registers */
7970 PDMPciDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7971#ifdef E1K_WITH_MSI
7972 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7973#else
7974 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7975 PDMPciDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7976#endif
7977 /* PCI-X Command: Enable Relaxed Ordering */
7978 PDMPciDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7979 /* PCI-X Status: 32-bit, 66MHz*/
7980 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7981 PDMPciDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7982}
7983
7984/**
7985 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7986 */
7987static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7988{
7989 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7990 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
7991 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
7992 int rc;
7993
7994 /*
7995 * Initialize the instance data (state).
7996 * Note! Caller has initialized it to ZERO already.
7997 */
7998 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7999 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
8000 pThis->hEventMoreRxDescAvail = NIL_SUPSEMEVENT;
8001 pThis->u16TxPktLen = 0;
8002 pThis->fIPcsum = false;
8003 pThis->fTCPcsum = false;
8004 pThis->fIntMaskUsed = false;
8005 pThis->fDelayInts = false;
8006 pThis->fLocked = false;
8007 pThis->u64AckedAt = 0;
8008 pThis->led.u32Magic = PDMLED_MAGIC;
8009 pThis->u32PktNo = 1;
8010 pThis->fIsAttached = false;
8011
8012 pThisCC->pDevInsR3 = pDevIns;
8013 pThisCC->pShared = pThis;
8014
8015 /* Interfaces */
8016 pThisCC->IBase.pfnQueryInterface = e1kR3QueryInterface;
8017
8018 pThisCC->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
8019 pThisCC->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
8020 pThisCC->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
8021
8022 pThisCC->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
8023
8024 pThisCC->INetworkConfig.pfnGetMac = e1kR3GetMac;
8025 pThisCC->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
8026 pThisCC->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
8027
8028 /*
8029 * Internal validations.
8030 */
8031 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
8032 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
8033 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
8034 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
8035 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
8036 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
8037 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
8038 VERR_INTERNAL_ERROR_4);
8039
8040 /*
8041 * Validate configuration.
8042 */
8043 PDMDEV_VALIDATE_CONFIG_RETURN(pDevIns,
8044 "MAC|"
8045 "CableConnected|"
8046 "AdapterType|"
8047 "LineSpeed|"
8048 "ItrEnabled|"
8049 "ItrRxEnabled|"
8050 "EthernetCRC|"
8051 "GSOEnabled|"
8052 "LinkUpDelay|"
8053 "StatNo",
8054 "");
8055
8056 /** @todo LineSpeed unused! */
8057
8058 /*
8059 * Get config params
8060 */
8061 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
8062 rc = pHlp->pfnCFGMQueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
8063 if (RT_FAILURE(rc))
8064 return PDMDEV_SET_ERROR(pDevIns, rc,
8065 N_("Configuration error: Failed to get MAC address"));
8066 rc = pHlp->pfnCFGMQueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
8067 if (RT_FAILURE(rc))
8068 return PDMDEV_SET_ERROR(pDevIns, rc,
8069 N_("Configuration error: Failed to get the value of 'CableConnected'"));
8070 rc = pHlp->pfnCFGMQueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
8071 if (RT_FAILURE(rc))
8072 return PDMDEV_SET_ERROR(pDevIns, rc,
8073 N_("Configuration error: Failed to get the value of 'AdapterType'"));
8074 Assert(pThis->eChip <= E1K_CHIP_82545EM);
8075
8076 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
8077 if (RT_FAILURE(rc))
8078 return PDMDEV_SET_ERROR(pDevIns, rc,
8079 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
8080
8081 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
8082 if (RT_FAILURE(rc))
8083 return PDMDEV_SET_ERROR(pDevIns, rc,
8084 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
8085
8086 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, false);
8087 if (RT_FAILURE(rc))
8088 return PDMDEV_SET_ERROR(pDevIns, rc,
8089 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
8090
8091 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
8092 if (RT_FAILURE(rc))
8093 return PDMDEV_SET_ERROR(pDevIns, rc,
8094 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
8095
8096 rc = pHlp->pfnCFGMQueryBoolDef(pCfg, "TidEnabled", &pThis->fTidEnabled, false);
8097 if (RT_FAILURE(rc))
8098 return PDMDEV_SET_ERROR(pDevIns, rc,
8099 N_("Configuration error: Failed to get the value of 'TidEnabled'"));
8100
8101 /*
8102 * Increased the link up delay from 3 to 5 seconds to make sure a guest notices the link loss
8103 * and updates its network configuration when the link is restored. See @bugref{10114}.
8104 */
8105 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
8106 if (RT_FAILURE(rc))
8107 return PDMDEV_SET_ERROR(pDevIns, rc,
8108 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
8109 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
8110 if (pThis->cMsLinkUpDelay > 5000)
8111 LogRel(("%s: WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
8112 else if (pThis->cMsLinkUpDelay == 0)
8113 LogRel(("%s: WARNING! Link up delay is disabled!\n", pThis->szPrf));
8114
8115 uint32_t uStatNo = (uint32_t)iInstance;
8116 rc = pHlp->pfnCFGMQueryU32Def(pCfg, "StatNo", &uStatNo, (uint32_t)iInstance);
8117 if (RT_FAILURE(rc))
8118 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to get the \"StatNo\" value"));
8119
8120 LogRel(("%s: Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s TID=%s R0=%s RC=%s\n", pThis->szPrf,
8121 g_aChips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
8122 pThis->fEthernetCRC ? "on" : "off",
8123 pThis->fGSOEnabled ? "enabled" : "disabled",
8124 pThis->fItrEnabled ? "enabled" : "disabled",
8125 pThis->fItrRxEnabled ? "enabled" : "disabled",
8126 pThis->fTidEnabled ? "enabled" : "disabled",
8127 pDevIns->fR0Enabled ? "enabled" : "disabled",
8128 pDevIns->fRCEnabled ? "enabled" : "disabled"));
8129
8130 /*
8131 * Initialize sub-components and register everything with the VMM.
8132 */
8133
8134 /* Initialize the EEPROM. */
8135 pThisCC->eeprom.init(pThis->macConfigured);
8136
8137 /* Initialize internal PHY. */
8138 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
8139
8140 /* Initialize critical sections. We do our own locking. */
8141 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8142 AssertRCReturn(rc, rc);
8143
8144 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
8145 AssertRCReturn(rc, rc);
8146 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
8147 AssertRCReturn(rc, rc);
8148#ifdef E1K_WITH_TX_CS
8149 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
8150 AssertRCReturn(rc, rc);
8151#endif
8152
8153 /* Saved state registration. */
8154 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
8155 NULL, e1kR3LiveExec, NULL,
8156 e1kR3SavePrep, e1kR3SaveExec, NULL,
8157 e1kR3LoadPrep, e1kR3LoadExec, e1kR3LoadDone);
8158 AssertRCReturn(rc, rc);
8159
8160 /* Set PCI config registers and register ourselves with the PCI bus. */
8161 PDMPCIDEV_ASSERT_VALID(pDevIns, pDevIns->apPciDevs[0]);
8162 e1kR3ConfigurePciDev(pDevIns->apPciDevs[0], pThis->eChip);
8163 rc = PDMDevHlpPCIRegister(pDevIns, pDevIns->apPciDevs[0]);
8164 AssertRCReturn(rc, rc);
8165
8166#ifdef E1K_WITH_MSI
8167 PDMMSIREG MsiReg;
8168 RT_ZERO(MsiReg);
8169 MsiReg.cMsiVectors = 1;
8170 MsiReg.iMsiCapOffset = 0x80;
8171 MsiReg.iMsiNextOffset = 0x0;
8172 MsiReg.fMsi64bit = false;
8173 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
8174 AssertRCReturn(rc, rc);
8175#endif
8176
8177 /*
8178 * Map our registers to memory space (region 0, see e1kR3ConfigurePciDev)
8179 * From the spec (regarding flags):
8180 * For registers that should be accessed as 32-bit double words,
8181 * partial writes (less than a 32-bit double word) is ignored.
8182 * Partial reads return all 32 bits of data regardless of the
8183 * byte enables.
8184 */
8185 rc = PDMDevHlpMmioCreateEx(pDevIns, E1K_MM_SIZE, IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
8186 pDevIns->apPciDevs[0], 0 /*iPciRegion*/,
8187 e1kMMIOWrite, e1kMMIORead, NULL /*pfnFill*/, NULL /*pvUser*/, "E1000", &pThis->hMmioRegion);
8188 AssertRCReturn(rc, rc);
8189 rc = PDMDevHlpPCIIORegionRegisterMmio(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, pThis->hMmioRegion, NULL);
8190 AssertRCReturn(rc, rc);
8191
8192 /* Map our registers to IO space (region 2, see e1kR3ConfigurePciDev) */
8193 static IOMIOPORTDESC const s_aExtDescs[] =
8194 {
8195 { "IOADDR", "IOADDR", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8196 { "IODATA", "IODATA", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL }, { "unused", "unused", NULL, NULL },
8197 { NULL, NULL, NULL, NULL }
8198 };
8199 rc = PDMDevHlpIoPortCreate(pDevIns, E1K_IOPORT_SIZE, pDevIns->apPciDevs[0], 2 /*iPciRegion*/,
8200 e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/, "E1000", s_aExtDescs, &pThis->hIoPorts);
8201 AssertRCReturn(rc, rc);
8202 rc = PDMDevHlpPCIIORegionRegisterIo(pDevIns, 2, E1K_IOPORT_SIZE, pThis->hIoPorts);
8203 AssertRCReturn(rc, rc);
8204
8205 /* Create transmit queue */
8206 rc = PDMDevHlpTaskCreate(pDevIns, PDMTASK_F_RZ, "E1000-Xmit", e1kR3TxTaskCallback, NULL, &pThis->hTxTask);
8207 AssertRCReturn(rc, rc);
8208
8209#ifdef E1K_TX_DELAY
8210 /* Create Transmit Delay Timer */
8211 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxDelayTimer, pThis,
8212 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Delay", &pThis->hTXDTimer);
8213 AssertRCReturn(rc, rc);
8214 rc = PDMDevHlpTimerSetCritSect(pDevIns, pThis->hTXDTimer, &pThis->csTx);
8215 AssertRCReturn(rc, rc);
8216#endif /* E1K_TX_DELAY */
8217
8218//#ifdef E1K_USE_TX_TIMERS
8219 if (pThis->fTidEnabled)
8220 {
8221 /* Create Transmit Interrupt Delay Timer */
8222 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxIntDelayTimer, pThis,
8223 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit IRQ Delay", &pThis->hTIDTimer);
8224 AssertRCReturn(rc, rc);
8225
8226# ifndef E1K_NO_TAD
8227 /* Create Transmit Absolute Delay Timer */
8228 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3TxAbsDelayTimer, pThis,
8229 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Xmit Abs Delay", &pThis->hTADTimer);
8230 AssertRCReturn(rc, rc);
8231# endif /* E1K_NO_TAD */
8232 }
8233//#endif /* E1K_USE_TX_TIMERS */
8234
8235#ifdef E1K_USE_RX_TIMERS
8236 /* Create Receive Interrupt Delay Timer */
8237 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxIntDelayTimer, pThis,
8238 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv IRQ Delay", &pThis->hRIDTimer);
8239 AssertRCReturn(rc, rc);
8240
8241 /* Create Receive Absolute Delay Timer */
8242 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3RxAbsDelayTimer, pThis,
8243 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Recv Abs Delay", &pThis->hRADTimer);
8244 AssertRCReturn(rc, rc);
8245#endif /* E1K_USE_RX_TIMERS */
8246
8247 /* Create Late Interrupt Timer */
8248 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LateIntTimer, pThis,
8249 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Late IRQ", &pThis->hIntTimer);
8250 AssertRCReturn(rc, rc);
8251
8252 /* Create Link Up Timer */
8253 rc = PDMDevHlpTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kR3LinkUpTimer, pThis,
8254 TMTIMER_FLAGS_NO_CRIT_SECT | TMTIMER_FLAGS_RING0, "E1000 Link Up", &pThis->hLUTimer);
8255 AssertRCReturn(rc, rc);
8256
8257 /* Register the info item */
8258 char szTmp[20];
8259 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
8260 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kR3Info);
8261
8262 /* Status driver */
8263 PPDMIBASE pBase;
8264 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThisCC->IBase, &pBase, "Status Port");
8265 if (RT_SUCCESS(rc))
8266 pThisCC->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
8267 else if (rc == VERR_PDM_NO_ATTACHED_DRIVER)
8268 rc = VINF_SUCCESS;
8269 else
8270 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
8271
8272 /* Network driver */
8273 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThisCC->IBase, &pThisCC->pDrvBase, "Network Port");
8274 if (RT_SUCCESS(rc))
8275 {
8276 pThisCC->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMINETWORKUP);
8277 AssertMsgReturn(pThisCC->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
8278
8279#if 0 /** @todo @bugref{9218} ring-0 driver stuff */
8280 pThisR0->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASER0), PDMINETWORKUP);
8281 pThisRC->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThisCC->pDrvBase, PDMIBASERC), PDMINETWORKUP);
8282#endif
8283 /* Mark device as attached. */
8284 pThis->fIsAttached = true;
8285 }
8286 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
8287 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
8288 {
8289 /* No error! */
8290 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
8291 }
8292 else
8293 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
8294
8295 rc = PDMDevHlpSUPSemEventCreate(pDevIns, &pThis->hEventMoreRxDescAvail);
8296 AssertRCReturn(rc, rc);
8297
8298 rc = e1kR3InitDebugHelpers();
8299 AssertRCReturn(rc, rc);
8300
8301 e1kR3HardReset(pDevIns, pThis, pThisCC);
8302
8303 /*
8304 * Register statistics.
8305 * The /Public/ bits are official and used by session info in the GUI.
8306 */
8307 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8308 "Amount of data received", "/Public/NetAdapter/%u/BytesReceived", uStatNo);
8309 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
8310 "Amount of data transmitted", "/Public/NetAdapter/%u/BytesTransmitted", uStatNo);
8311 PDMDevHlpSTAMRegisterF(pDevIns, &pDevIns->iInstance, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
8312 "Device instance number", "/Public/NetAdapter/%u/%s", uStatNo, pDevIns->pReg->szName);
8313
8314 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, "ReceiveBytes", STAMUNIT_BYTES, "Amount of data received");
8315 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, "TransmitBytes", STAMUNIT_BYTES, "Amount of data transmitted");
8316
8317#if defined(VBOX_WITH_STATISTICS)
8318 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, "MMIO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ");
8319 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, "MMIO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3");
8320 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, "MMIO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ");
8321 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, "MMIO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3");
8322 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, "EEPROM/Read", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads");
8323 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, "EEPROM/Write", STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes");
8324 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, "IO/ReadRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ");
8325 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
8326 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, "IO/WriteRZ", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ");
8327 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
8328 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, "LateInt/Timer", STAMUNIT_TICKS_PER_CALL, "Profiling late int timer");
8329 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, "LateInt/Occured", STAMUNIT_OCCURENCES, "Number of late interrupts");
8330 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, "Interrupts/Raised", STAMUNIT_OCCURENCES, "Number of raised interrupts");
8331 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, "Interrupts/Prevented", STAMUNIT_OCCURENCES, "Number of prevented interrupts");
8332 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, "Receive/Total", STAMUNIT_TICKS_PER_CALL, "Profiling receive");
8333 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, "Receive/CRC", STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming");
8334 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, "Receive/Filter", STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering");
8335 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, "Receive/Store", STAMUNIT_TICKS_PER_CALL, "Profiling receive storing");
8336 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, "RxOverflow", STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows");
8337 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupRZ, STAMTYPE_COUNTER, "RxOverflowWakeupRZ", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in RZ");
8338 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatRxOverflowWakeupR3, STAMTYPE_COUNTER, "RxOverflowWakeupR3", STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups in R3");
8339 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, "Transmit/TotalRZ", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ");
8340 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, "Transmit/TotalR3", STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3");
8341 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, "Transmit/SendRZ", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ");
8342 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, "Transmit/SendR3", STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3");
8343
8344 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, "TxDesc/ContexNormal", STAMUNIT_OCCURENCES, "Number of normal context descriptors");
8345 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, "TxDesc/ContextTSE", STAMUNIT_OCCURENCES, "Number of TSE context descriptors");
8346 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, "TxDesc/Data", STAMUNIT_OCCURENCES, "Number of TX data descriptors");
8347 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, "TxDesc/Legacy", STAMUNIT_OCCURENCES, "Number of TX legacy descriptors");
8348 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, "TxDesc/TSEData", STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors");
8349 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, "TxPath/Fallback", STAMUNIT_OCCURENCES, "Fallback TSE descriptor path");
8350 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, "TxPath/GSO", STAMUNIT_OCCURENCES, "GSO TSE descriptor path");
8351 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, "TxPath/Normal", STAMUNIT_OCCURENCES, "Regular descriptor path");
8352 PDMDevHlpSTAMRegister(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, "PHYAccesses", STAMUNIT_OCCURENCES, "Number of PHY accesses");
8353 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
8354 {
8355 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8356 g_aE1kRegMap[iReg].name, "Regs/%s-Reads", g_aE1kRegMap[iReg].abbrev);
8357 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
8358 g_aE1kRegMap[iReg].name, "Regs/%s-Writes", g_aE1kRegMap[iReg].abbrev);
8359 }
8360#endif /* VBOX_WITH_STATISTICS */
8361
8362#ifdef E1K_INT_STATS
8363 PDMDevHlpSTAMRegister(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, "u64ArmedAt", STAMUNIT_NS, NULL);
8364 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, "uStatMaxTxDelay", STAMUNIT_NS, NULL);
8365 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatInt, STAMTYPE_U32, "uStatInt", STAMUNIT_NS, NULL);
8366 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, "uStatIntTry", STAMUNIT_NS, NULL);
8367 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, "uStatIntLower", STAMUNIT_NS, NULL);
8368 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatNoIntICR, STAMTYPE_U32, "uStatNoIntICR", STAMUNIT_NS, NULL);
8369 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, "iStatIntLost", STAMUNIT_NS, NULL);
8370 PDMDevHlpSTAMRegister(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, "iStatIntLostOne", STAMUNIT_NS, NULL);
8371 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntIMS, STAMTYPE_U32, "uStatIntIMS", STAMUNIT_NS, NULL);
8372 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, "uStatIntSkip", STAMUNIT_NS, NULL);
8373 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, "uStatIntLate", STAMUNIT_NS, NULL);
8374 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, "uStatIntMasked", STAMUNIT_NS, NULL);
8375 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, "uStatIntEarly", STAMUNIT_NS, NULL);
8376 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, "uStatIntRx", STAMUNIT_NS, NULL);
8377 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, "uStatIntTx", STAMUNIT_NS, NULL);
8378 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, "uStatIntICS", STAMUNIT_NS, NULL);
8379 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, "uStatIntRDTR", STAMUNIT_NS, NULL);
8380 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, "uStatIntRXDMT0", STAMUNIT_NS, NULL);
8381 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, "uStatIntTXQE", STAMUNIT_NS, NULL);
8382 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, "uStatTxNoRS", STAMUNIT_NS, NULL);
8383 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, "uStatTxIDE", STAMUNIT_NS, NULL);
8384 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, "uStatTxDelayed", STAMUNIT_NS, NULL);
8385 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, "uStatTxDelayExp", STAMUNIT_NS, NULL);
8386 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, "uStatTAD", STAMUNIT_NS, NULL);
8387 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTID, STAMTYPE_U32, "uStatTID", STAMUNIT_NS, NULL);
8388 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, "uStatRAD", STAMUNIT_NS, NULL);
8389 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRID, STAMTYPE_U32, "uStatRID", STAMUNIT_NS, NULL);
8390 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, "uStatRxFrm", STAMUNIT_NS, NULL);
8391 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, "uStatTxFrm", STAMUNIT_NS, NULL);
8392 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, "uStatDescCtx", STAMUNIT_NS, NULL);
8393 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, "uStatDescDat", STAMUNIT_NS, NULL);
8394 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, "uStatDescLeg", STAMUNIT_NS, NULL);
8395 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, "uStatTx1514", STAMUNIT_NS, NULL);
8396 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, "uStatTx2962", STAMUNIT_NS, NULL);
8397 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, "uStatTx4410", STAMUNIT_NS, NULL);
8398 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, "uStatTx5858", STAMUNIT_NS, NULL);
8399 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, "uStatTx7306", STAMUNIT_NS, NULL);
8400 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, "uStatTx8754", STAMUNIT_NS, NULL);
8401 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, "uStatTx16384", STAMUNIT_NS, NULL);
8402 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, "uStatTx32768", STAMUNIT_NS, NULL);
8403 PDMDevHlpSTAMRegister(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, "uStatTxLarge", STAMUNIT_NS, NULL);
8404#endif /* E1K_INT_STATS */
8405
8406 return VINF_SUCCESS;
8407}
8408
8409#else /* !IN_RING3 */
8410
8411/**
8412 * @callback_method_impl{PDMDEVREGR0,pfnConstruct}
8413 */
8414static DECLCALLBACK(int) e1kRZConstruct(PPDMDEVINS pDevIns)
8415{
8416 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
8417 PE1KSTATE pThis = PDMDEVINS_2_DATA(pDevIns, PE1KSTATE);
8418 PE1KSTATECC pThisCC = PDMDEVINS_2_DATA_CC(pDevIns, PE1KSTATECC);
8419
8420 /* Initialize context specific state data: */
8421 pThisCC->CTX_SUFF(pDevIns) = pDevIns;
8422 /** @todo @bugref{9218} ring-0 driver stuff */
8423 pThisCC->CTX_SUFF(pDrv) = NULL;
8424 pThisCC->CTX_SUFF(pTxSg) = NULL;
8425
8426 /* Configure critical sections the same way: */
8427 int rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
8428 AssertRCReturn(rc, rc);
8429
8430 /* Set up MMIO and I/O port callbacks for this context: */
8431 rc = PDMDevHlpMmioSetUpContext(pDevIns, pThis->hMmioRegion, e1kMMIOWrite, e1kMMIORead, NULL /*pvUser*/);
8432 AssertRCReturn(rc, rc);
8433
8434 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pThis->hIoPorts, e1kIOPortOut, e1kIOPortIn, NULL /*pvUser*/);
8435 AssertRCReturn(rc, rc);
8436
8437 return VINF_SUCCESS;
8438}
8439
8440#endif /* !IN_RING3 */
8441
8442/**
8443 * The device registration structure.
8444 */
8445const PDMDEVREG g_DeviceE1000 =
8446{
8447 /* .u32version = */ PDM_DEVREG_VERSION,
8448 /* .uReserved0 = */ 0,
8449 /* .szName = */ "e1000",
8450 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
8451 /* .fClass = */ PDM_DEVREG_CLASS_NETWORK,
8452 /* .cMaxInstances = */ ~0U,
8453 /* .uSharedVersion = */ 42,
8454 /* .cbInstanceShared = */ sizeof(E1KSTATE),
8455 /* .cbInstanceCC = */ sizeof(E1KSTATECC),
8456 /* .cbInstanceRC = */ sizeof(E1KSTATERC),
8457 /* .cMaxPciDevices = */ 1,
8458 /* .cMaxMsixVectors = */ 0,
8459 /* .pszDescription = */ "Intel PRO/1000 MT Desktop Ethernet.",
8460#if defined(IN_RING3)
8461 /* .pszRCMod = */ "VBoxDDRC.rc",
8462 /* .pszR0Mod = */ "VBoxDDR0.r0",
8463 /* .pfnConstruct = */ e1kR3Construct,
8464 /* .pfnDestruct = */ e1kR3Destruct,
8465 /* .pfnRelocate = */ e1kR3Relocate,
8466 /* .pfnMemSetup = */ NULL,
8467 /* .pfnPowerOn = */ NULL,
8468 /* .pfnReset = */ e1kR3Reset,
8469 /* .pfnSuspend = */ e1kR3Suspend,
8470 /* .pfnResume = */ NULL,
8471 /* .pfnAttach = */ e1kR3Attach,
8472 /* .pfnDeatch = */ e1kR3Detach,
8473 /* .pfnQueryInterface = */ NULL,
8474 /* .pfnInitComplete = */ NULL,
8475 /* .pfnPowerOff = */ e1kR3PowerOff,
8476 /* .pfnSoftReset = */ NULL,
8477 /* .pfnReserved0 = */ NULL,
8478 /* .pfnReserved1 = */ NULL,
8479 /* .pfnReserved2 = */ NULL,
8480 /* .pfnReserved3 = */ NULL,
8481 /* .pfnReserved4 = */ NULL,
8482 /* .pfnReserved5 = */ NULL,
8483 /* .pfnReserved6 = */ NULL,
8484 /* .pfnReserved7 = */ NULL,
8485#elif defined(IN_RING0)
8486 /* .pfnEarlyConstruct = */ NULL,
8487 /* .pfnConstruct = */ e1kRZConstruct,
8488 /* .pfnDestruct = */ NULL,
8489 /* .pfnFinalDestruct = */ NULL,
8490 /* .pfnRequest = */ NULL,
8491 /* .pfnReserved0 = */ NULL,
8492 /* .pfnReserved1 = */ NULL,
8493 /* .pfnReserved2 = */ NULL,
8494 /* .pfnReserved3 = */ NULL,
8495 /* .pfnReserved4 = */ NULL,
8496 /* .pfnReserved5 = */ NULL,
8497 /* .pfnReserved6 = */ NULL,
8498 /* .pfnReserved7 = */ NULL,
8499#elif defined(IN_RC)
8500 /* .pfnConstruct = */ e1kRZConstruct,
8501 /* .pfnReserved0 = */ NULL,
8502 /* .pfnReserved1 = */ NULL,
8503 /* .pfnReserved2 = */ NULL,
8504 /* .pfnReserved3 = */ NULL,
8505 /* .pfnReserved4 = */ NULL,
8506 /* .pfnReserved5 = */ NULL,
8507 /* .pfnReserved6 = */ NULL,
8508 /* .pfnReserved7 = */ NULL,
8509#else
8510# error "Not in IN_RING3, IN_RING0 or IN_RC!"
8511#endif
8512 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
8513};
8514
8515#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette