VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 63015

最後變更 在這個檔案從63015是 62962,由 vboxsync 提交於 9 年 前

Devices: warnings

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 318.3 KB
 
1/* $Id: DevE1000.cpp 62962 2016-08-04 09:00:52Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2016 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.alldomusa.eu.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_DEV_E1000
33#include <iprt/crc.h>
34#include <iprt/ctype.h>
35#include <iprt/net.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/time.h>
39#include <iprt/uuid.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/vmm/pdmnetifs.h>
42#include <VBox/vmm/pdmnetinline.h>
43#include <VBox/param.h>
44#include "VBoxDD.h"
45
46#include "DevEEPROM.h"
47#include "DevE1000Phy.h"
48
49
50/* Options *******************************************************************/
51/** @def E1K_INIT_RA0
52 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
53 * table to MAC address obtained from CFGM. Most guests read MAC address from
54 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
55 * being already set (see @bugref{4657}).
56 */
57#define E1K_INIT_RA0
58/** @def E1K_LSC_ON_SLU
59 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
60 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
61 * that requires it is Mac OS X (see @bugref{4657}).
62 */
63#define E1K_LSC_ON_SLU
64/** @def E1K_TX_DELAY
65 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
66 * preventing packets to be sent immediately. It allows to send several
67 * packets in a batch reducing the number of acknowledgments. Note that it
68 * effectively disables R0 TX path, forcing sending in R3.
69 */
70//#define E1K_TX_DELAY 150
71/** @def E1K_USE_TX_TIMERS
72 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
73 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
74 * register. Enabling it showed no positive effects on existing guests so it
75 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
76 * Ethernet Controllers Software Developer’s Manual" for more detailed
77 * explanation.
78 */
79//#define E1K_USE_TX_TIMERS
80/** @def E1K_NO_TAD
81 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
82 * Transmit Absolute Delay time. This timer sets the maximum time interval
83 * during which TX interrupts can be postponed (delayed). It has no effect
84 * if E1K_USE_TX_TIMERS is not defined.
85 */
86//#define E1K_NO_TAD
87/** @def E1K_REL_DEBUG
88 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
89 */
90//#define E1K_REL_DEBUG
91/** @def E1K_INT_STATS
92 * E1K_INT_STATS enables collection of internal statistics used for
93 * debugging of delayed interrupts, etc.
94 */
95//#define E1K_INT_STATS
96/** @def E1K_WITH_MSI
97 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
98 */
99//#define E1K_WITH_MSI
100/** @def E1K_WITH_TX_CS
101 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
102 */
103#define E1K_WITH_TX_CS
104/** @def E1K_WITH_TXD_CACHE
105 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
106 * single physical memory read (or two if it wraps around the end of TX
107 * descriptor ring). It is required for proper functioning of bandwidth
108 * resource control as it allows to compute exact sizes of packets prior
109 * to allocating their buffers (see @bugref{5582}).
110 */
111#define E1K_WITH_TXD_CACHE
112/** @def E1K_WITH_RXD_CACHE
113 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
114 * single physical memory read (or two if it wraps around the end of RX
115 * descriptor ring). Intel's packet driver for DOS needs this option in
116 * order to work properly (see @bugref{6217}).
117 */
118#define E1K_WITH_RXD_CACHE
119/* End of Options ************************************************************/
120
121#ifdef E1K_WITH_TXD_CACHE
122/**
123 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
124 * in the state structure. It limits the amount of descriptors loaded in one
125 * batch read. For example, Linux guest may use up to 20 descriptors per
126 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
127 */
128# define E1K_TXD_CACHE_SIZE 64u
129#endif /* E1K_WITH_TXD_CACHE */
130
131#ifdef E1K_WITH_RXD_CACHE
132/**
133 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
134 * in the state structure. It limits the amount of descriptors loaded in one
135 * batch read. For example, XP guest adds 15 RX descriptors at a time.
136 */
137# define E1K_RXD_CACHE_SIZE 16u
138#endif /* E1K_WITH_RXD_CACHE */
139
140
141/* Little helpers ************************************************************/
142#undef htons
143#undef ntohs
144#undef htonl
145#undef ntohl
146#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
147#define ntohs(x) htons(x)
148#define htonl(x) ASMByteSwapU32(x)
149#define ntohl(x) htonl(x)
150
151#ifndef DEBUG
152# ifdef E1K_REL_DEBUG
153# define DEBUG
154# define E1kLog(a) LogRel(a)
155# define E1kLog2(a) LogRel(a)
156# define E1kLog3(a) LogRel(a)
157# define E1kLogX(x, a) LogRel(a)
158//# define E1kLog3(a) do {} while (0)
159# else
160# define E1kLog(a) do {} while (0)
161# define E1kLog2(a) do {} while (0)
162# define E1kLog3(a) do {} while (0)
163# define E1kLogX(x, a) do {} while (0)
164# endif
165#else
166# define E1kLog(a) Log(a)
167# define E1kLog2(a) Log2(a)
168# define E1kLog3(a) Log3(a)
169# define E1kLogX(x, a) LogIt(x, LOG_GROUP, a)
170//# define E1kLog(a) do {} while (0)
171//# define E1kLog2(a) do {} while (0)
172//# define E1kLog3(a) do {} while (0)
173#endif
174
175#if 0
176# define LOG_ENABLED
177# define E1kLogRel(a) LogRel(a)
178# undef Log6
179# define Log6(a) LogRel(a)
180#else
181# define E1kLogRel(a) do { } while (0)
182#endif
183
184//#undef DEBUG
185
186#define STATE_TO_DEVINS(pThis) (((PE1KSTATE )pThis)->CTX_SUFF(pDevIns))
187#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
188
189#define E1K_INC_CNT32(cnt) \
190do { \
191 if (cnt < UINT32_MAX) \
192 cnt++; \
193} while (0)
194
195#define E1K_ADD_CNT64(cntLo, cntHi, val) \
196do { \
197 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
198 uint64_t tmp = u64Cnt; \
199 u64Cnt += val; \
200 if (tmp > u64Cnt ) \
201 u64Cnt = UINT64_MAX; \
202 cntLo = (uint32_t)u64Cnt; \
203 cntHi = (uint32_t)(u64Cnt >> 32); \
204} while (0)
205
206#ifdef E1K_INT_STATS
207# define E1K_INC_ISTAT_CNT(cnt) do { ++cnt; } while (0)
208#else /* E1K_INT_STATS */
209# define E1K_INC_ISTAT_CNT(cnt) do { } while (0)
210#endif /* E1K_INT_STATS */
211
212
213/*****************************************************************************/
214
215typedef uint32_t E1KCHIP;
216#define E1K_CHIP_82540EM 0
217#define E1K_CHIP_82543GC 1
218#define E1K_CHIP_82545EM 2
219
220/** Different E1000 chips. */
221static const struct E1kChips
222{
223 uint16_t uPCIVendorId;
224 uint16_t uPCIDeviceId;
225 uint16_t uPCISubsystemVendorId;
226 uint16_t uPCISubsystemId;
227 const char *pcszName;
228} g_Chips[] =
229{
230 /* Vendor Device SSVendor SubSys Name */
231 { 0x8086,
232 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
233#ifdef E1K_WITH_MSI
234 0x105E,
235#else
236 0x100E,
237#endif
238 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
239 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
240 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
241};
242
243
244/* The size of register area mapped to I/O space */
245#define E1K_IOPORT_SIZE 0x8
246/* The size of memory-mapped register area */
247#define E1K_MM_SIZE 0x20000
248
249#define E1K_MAX_TX_PKT_SIZE 16288
250#define E1K_MAX_RX_PKT_SIZE 16384
251
252/*****************************************************************************/
253
254/** Gets the specfieid bits from the register. */
255#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
256#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
257#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
258#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
259#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
260
261#define CTRL_SLU UINT32_C(0x00000040)
262#define CTRL_MDIO UINT32_C(0x00100000)
263#define CTRL_MDC UINT32_C(0x00200000)
264#define CTRL_MDIO_DIR UINT32_C(0x01000000)
265#define CTRL_MDC_DIR UINT32_C(0x02000000)
266#define CTRL_RESET UINT32_C(0x04000000)
267#define CTRL_VME UINT32_C(0x40000000)
268
269#define STATUS_LU UINT32_C(0x00000002)
270#define STATUS_TXOFF UINT32_C(0x00000010)
271
272#define EECD_EE_WIRES UINT32_C(0x0F)
273#define EECD_EE_REQ UINT32_C(0x40)
274#define EECD_EE_GNT UINT32_C(0x80)
275
276#define EERD_START UINT32_C(0x00000001)
277#define EERD_DONE UINT32_C(0x00000010)
278#define EERD_DATA_MASK UINT32_C(0xFFFF0000)
279#define EERD_DATA_SHIFT 16
280#define EERD_ADDR_MASK UINT32_C(0x0000FF00)
281#define EERD_ADDR_SHIFT 8
282
283#define MDIC_DATA_MASK UINT32_C(0x0000FFFF)
284#define MDIC_DATA_SHIFT 0
285#define MDIC_REG_MASK UINT32_C(0x001F0000)
286#define MDIC_REG_SHIFT 16
287#define MDIC_PHY_MASK UINT32_C(0x03E00000)
288#define MDIC_PHY_SHIFT 21
289#define MDIC_OP_WRITE UINT32_C(0x04000000)
290#define MDIC_OP_READ UINT32_C(0x08000000)
291#define MDIC_READY UINT32_C(0x10000000)
292#define MDIC_INT_EN UINT32_C(0x20000000)
293#define MDIC_ERROR UINT32_C(0x40000000)
294
295#define TCTL_EN UINT32_C(0x00000002)
296#define TCTL_PSP UINT32_C(0x00000008)
297
298#define RCTL_EN UINT32_C(0x00000002)
299#define RCTL_UPE UINT32_C(0x00000008)
300#define RCTL_MPE UINT32_C(0x00000010)
301#define RCTL_LPE UINT32_C(0x00000020)
302#define RCTL_LBM_MASK UINT32_C(0x000000C0)
303#define RCTL_LBM_SHIFT 6
304#define RCTL_RDMTS_MASK UINT32_C(0x00000300)
305#define RCTL_RDMTS_SHIFT 8
306#define RCTL_LBM_TCVR UINT32_C(3) /**< PHY or external SerDes loopback. */
307#define RCTL_MO_MASK UINT32_C(0x00003000)
308#define RCTL_MO_SHIFT 12
309#define RCTL_BAM UINT32_C(0x00008000)
310#define RCTL_BSIZE_MASK UINT32_C(0x00030000)
311#define RCTL_BSIZE_SHIFT 16
312#define RCTL_VFE UINT32_C(0x00040000)
313#define RCTL_CFIEN UINT32_C(0x00080000)
314#define RCTL_CFI UINT32_C(0x00100000)
315#define RCTL_BSEX UINT32_C(0x02000000)
316#define RCTL_SECRC UINT32_C(0x04000000)
317
318#define ICR_TXDW UINT32_C(0x00000001)
319#define ICR_TXQE UINT32_C(0x00000002)
320#define ICR_LSC UINT32_C(0x00000004)
321#define ICR_RXDMT0 UINT32_C(0x00000010)
322#define ICR_RXT0 UINT32_C(0x00000080)
323#define ICR_TXD_LOW UINT32_C(0x00008000)
324#define RDTR_FPD UINT32_C(0x80000000)
325
326#define PBA_st ((PBAST*)(pThis->auRegs + PBA_IDX))
327typedef struct
328{
329 unsigned rxa : 7;
330 unsigned rxa_r : 9;
331 unsigned txa : 16;
332} PBAST;
333AssertCompileSize(PBAST, 4);
334
335#define TXDCTL_WTHRESH_MASK 0x003F0000
336#define TXDCTL_WTHRESH_SHIFT 16
337#define TXDCTL_LWTHRESH_MASK 0xFE000000
338#define TXDCTL_LWTHRESH_SHIFT 25
339
340#define RXCSUM_PCSS_MASK UINT32_C(0x000000FF)
341#define RXCSUM_PCSS_SHIFT 0
342
343/** @name Register access macros
344 * @remarks These ASSUME alocal variable @a pThis of type PE1KSTATE.
345 * @{ */
346#define CTRL pThis->auRegs[CTRL_IDX]
347#define STATUS pThis->auRegs[STATUS_IDX]
348#define EECD pThis->auRegs[EECD_IDX]
349#define EERD pThis->auRegs[EERD_IDX]
350#define CTRL_EXT pThis->auRegs[CTRL_EXT_IDX]
351#define FLA pThis->auRegs[FLA_IDX]
352#define MDIC pThis->auRegs[MDIC_IDX]
353#define FCAL pThis->auRegs[FCAL_IDX]
354#define FCAH pThis->auRegs[FCAH_IDX]
355#define FCT pThis->auRegs[FCT_IDX]
356#define VET pThis->auRegs[VET_IDX]
357#define ICR pThis->auRegs[ICR_IDX]
358#define ITR pThis->auRegs[ITR_IDX]
359#define ICS pThis->auRegs[ICS_IDX]
360#define IMS pThis->auRegs[IMS_IDX]
361#define IMC pThis->auRegs[IMC_IDX]
362#define RCTL pThis->auRegs[RCTL_IDX]
363#define FCTTV pThis->auRegs[FCTTV_IDX]
364#define TXCW pThis->auRegs[TXCW_IDX]
365#define RXCW pThis->auRegs[RXCW_IDX]
366#define TCTL pThis->auRegs[TCTL_IDX]
367#define TIPG pThis->auRegs[TIPG_IDX]
368#define AIFS pThis->auRegs[AIFS_IDX]
369#define LEDCTL pThis->auRegs[LEDCTL_IDX]
370#define PBA pThis->auRegs[PBA_IDX]
371#define FCRTL pThis->auRegs[FCRTL_IDX]
372#define FCRTH pThis->auRegs[FCRTH_IDX]
373#define RDFH pThis->auRegs[RDFH_IDX]
374#define RDFT pThis->auRegs[RDFT_IDX]
375#define RDFHS pThis->auRegs[RDFHS_IDX]
376#define RDFTS pThis->auRegs[RDFTS_IDX]
377#define RDFPC pThis->auRegs[RDFPC_IDX]
378#define RDBAL pThis->auRegs[RDBAL_IDX]
379#define RDBAH pThis->auRegs[RDBAH_IDX]
380#define RDLEN pThis->auRegs[RDLEN_IDX]
381#define RDH pThis->auRegs[RDH_IDX]
382#define RDT pThis->auRegs[RDT_IDX]
383#define RDTR pThis->auRegs[RDTR_IDX]
384#define RXDCTL pThis->auRegs[RXDCTL_IDX]
385#define RADV pThis->auRegs[RADV_IDX]
386#define RSRPD pThis->auRegs[RSRPD_IDX]
387#define TXDMAC pThis->auRegs[TXDMAC_IDX]
388#define TDFH pThis->auRegs[TDFH_IDX]
389#define TDFT pThis->auRegs[TDFT_IDX]
390#define TDFHS pThis->auRegs[TDFHS_IDX]
391#define TDFTS pThis->auRegs[TDFTS_IDX]
392#define TDFPC pThis->auRegs[TDFPC_IDX]
393#define TDBAL pThis->auRegs[TDBAL_IDX]
394#define TDBAH pThis->auRegs[TDBAH_IDX]
395#define TDLEN pThis->auRegs[TDLEN_IDX]
396#define TDH pThis->auRegs[TDH_IDX]
397#define TDT pThis->auRegs[TDT_IDX]
398#define TIDV pThis->auRegs[TIDV_IDX]
399#define TXDCTL pThis->auRegs[TXDCTL_IDX]
400#define TADV pThis->auRegs[TADV_IDX]
401#define TSPMT pThis->auRegs[TSPMT_IDX]
402#define CRCERRS pThis->auRegs[CRCERRS_IDX]
403#define ALGNERRC pThis->auRegs[ALGNERRC_IDX]
404#define SYMERRS pThis->auRegs[SYMERRS_IDX]
405#define RXERRC pThis->auRegs[RXERRC_IDX]
406#define MPC pThis->auRegs[MPC_IDX]
407#define SCC pThis->auRegs[SCC_IDX]
408#define ECOL pThis->auRegs[ECOL_IDX]
409#define MCC pThis->auRegs[MCC_IDX]
410#define LATECOL pThis->auRegs[LATECOL_IDX]
411#define COLC pThis->auRegs[COLC_IDX]
412#define DC pThis->auRegs[DC_IDX]
413#define TNCRS pThis->auRegs[TNCRS_IDX]
414/* #define SEC pThis->auRegs[SEC_IDX] Conflict with sys/time.h */
415#define CEXTERR pThis->auRegs[CEXTERR_IDX]
416#define RLEC pThis->auRegs[RLEC_IDX]
417#define XONRXC pThis->auRegs[XONRXC_IDX]
418#define XONTXC pThis->auRegs[XONTXC_IDX]
419#define XOFFRXC pThis->auRegs[XOFFRXC_IDX]
420#define XOFFTXC pThis->auRegs[XOFFTXC_IDX]
421#define FCRUC pThis->auRegs[FCRUC_IDX]
422#define PRC64 pThis->auRegs[PRC64_IDX]
423#define PRC127 pThis->auRegs[PRC127_IDX]
424#define PRC255 pThis->auRegs[PRC255_IDX]
425#define PRC511 pThis->auRegs[PRC511_IDX]
426#define PRC1023 pThis->auRegs[PRC1023_IDX]
427#define PRC1522 pThis->auRegs[PRC1522_IDX]
428#define GPRC pThis->auRegs[GPRC_IDX]
429#define BPRC pThis->auRegs[BPRC_IDX]
430#define MPRC pThis->auRegs[MPRC_IDX]
431#define GPTC pThis->auRegs[GPTC_IDX]
432#define GORCL pThis->auRegs[GORCL_IDX]
433#define GORCH pThis->auRegs[GORCH_IDX]
434#define GOTCL pThis->auRegs[GOTCL_IDX]
435#define GOTCH pThis->auRegs[GOTCH_IDX]
436#define RNBC pThis->auRegs[RNBC_IDX]
437#define RUC pThis->auRegs[RUC_IDX]
438#define RFC pThis->auRegs[RFC_IDX]
439#define ROC pThis->auRegs[ROC_IDX]
440#define RJC pThis->auRegs[RJC_IDX]
441#define MGTPRC pThis->auRegs[MGTPRC_IDX]
442#define MGTPDC pThis->auRegs[MGTPDC_IDX]
443#define MGTPTC pThis->auRegs[MGTPTC_IDX]
444#define TORL pThis->auRegs[TORL_IDX]
445#define TORH pThis->auRegs[TORH_IDX]
446#define TOTL pThis->auRegs[TOTL_IDX]
447#define TOTH pThis->auRegs[TOTH_IDX]
448#define TPR pThis->auRegs[TPR_IDX]
449#define TPT pThis->auRegs[TPT_IDX]
450#define PTC64 pThis->auRegs[PTC64_IDX]
451#define PTC127 pThis->auRegs[PTC127_IDX]
452#define PTC255 pThis->auRegs[PTC255_IDX]
453#define PTC511 pThis->auRegs[PTC511_IDX]
454#define PTC1023 pThis->auRegs[PTC1023_IDX]
455#define PTC1522 pThis->auRegs[PTC1522_IDX]
456#define MPTC pThis->auRegs[MPTC_IDX]
457#define BPTC pThis->auRegs[BPTC_IDX]
458#define TSCTC pThis->auRegs[TSCTC_IDX]
459#define TSCTFC pThis->auRegs[TSCTFC_IDX]
460#define RXCSUM pThis->auRegs[RXCSUM_IDX]
461#define WUC pThis->auRegs[WUC_IDX]
462#define WUFC pThis->auRegs[WUFC_IDX]
463#define WUS pThis->auRegs[WUS_IDX]
464#define MANC pThis->auRegs[MANC_IDX]
465#define IPAV pThis->auRegs[IPAV_IDX]
466#define WUPL pThis->auRegs[WUPL_IDX]
467/** @} */
468
469/**
470 * Indices of memory-mapped registers in register table.
471 */
472typedef enum
473{
474 CTRL_IDX,
475 STATUS_IDX,
476 EECD_IDX,
477 EERD_IDX,
478 CTRL_EXT_IDX,
479 FLA_IDX,
480 MDIC_IDX,
481 FCAL_IDX,
482 FCAH_IDX,
483 FCT_IDX,
484 VET_IDX,
485 ICR_IDX,
486 ITR_IDX,
487 ICS_IDX,
488 IMS_IDX,
489 IMC_IDX,
490 RCTL_IDX,
491 FCTTV_IDX,
492 TXCW_IDX,
493 RXCW_IDX,
494 TCTL_IDX,
495 TIPG_IDX,
496 AIFS_IDX,
497 LEDCTL_IDX,
498 PBA_IDX,
499 FCRTL_IDX,
500 FCRTH_IDX,
501 RDFH_IDX,
502 RDFT_IDX,
503 RDFHS_IDX,
504 RDFTS_IDX,
505 RDFPC_IDX,
506 RDBAL_IDX,
507 RDBAH_IDX,
508 RDLEN_IDX,
509 RDH_IDX,
510 RDT_IDX,
511 RDTR_IDX,
512 RXDCTL_IDX,
513 RADV_IDX,
514 RSRPD_IDX,
515 TXDMAC_IDX,
516 TDFH_IDX,
517 TDFT_IDX,
518 TDFHS_IDX,
519 TDFTS_IDX,
520 TDFPC_IDX,
521 TDBAL_IDX,
522 TDBAH_IDX,
523 TDLEN_IDX,
524 TDH_IDX,
525 TDT_IDX,
526 TIDV_IDX,
527 TXDCTL_IDX,
528 TADV_IDX,
529 TSPMT_IDX,
530 CRCERRS_IDX,
531 ALGNERRC_IDX,
532 SYMERRS_IDX,
533 RXERRC_IDX,
534 MPC_IDX,
535 SCC_IDX,
536 ECOL_IDX,
537 MCC_IDX,
538 LATECOL_IDX,
539 COLC_IDX,
540 DC_IDX,
541 TNCRS_IDX,
542 SEC_IDX,
543 CEXTERR_IDX,
544 RLEC_IDX,
545 XONRXC_IDX,
546 XONTXC_IDX,
547 XOFFRXC_IDX,
548 XOFFTXC_IDX,
549 FCRUC_IDX,
550 PRC64_IDX,
551 PRC127_IDX,
552 PRC255_IDX,
553 PRC511_IDX,
554 PRC1023_IDX,
555 PRC1522_IDX,
556 GPRC_IDX,
557 BPRC_IDX,
558 MPRC_IDX,
559 GPTC_IDX,
560 GORCL_IDX,
561 GORCH_IDX,
562 GOTCL_IDX,
563 GOTCH_IDX,
564 RNBC_IDX,
565 RUC_IDX,
566 RFC_IDX,
567 ROC_IDX,
568 RJC_IDX,
569 MGTPRC_IDX,
570 MGTPDC_IDX,
571 MGTPTC_IDX,
572 TORL_IDX,
573 TORH_IDX,
574 TOTL_IDX,
575 TOTH_IDX,
576 TPR_IDX,
577 TPT_IDX,
578 PTC64_IDX,
579 PTC127_IDX,
580 PTC255_IDX,
581 PTC511_IDX,
582 PTC1023_IDX,
583 PTC1522_IDX,
584 MPTC_IDX,
585 BPTC_IDX,
586 TSCTC_IDX,
587 TSCTFC_IDX,
588 RXCSUM_IDX,
589 WUC_IDX,
590 WUFC_IDX,
591 WUS_IDX,
592 MANC_IDX,
593 IPAV_IDX,
594 WUPL_IDX,
595 MTA_IDX,
596 RA_IDX,
597 VFTA_IDX,
598 IP4AT_IDX,
599 IP6AT_IDX,
600 WUPM_IDX,
601 FFLT_IDX,
602 FFMT_IDX,
603 FFVT_IDX,
604 PBM_IDX,
605 RA_82542_IDX,
606 MTA_82542_IDX,
607 VFTA_82542_IDX,
608 E1K_NUM_OF_REGS
609} E1kRegIndex;
610
611#define E1K_NUM_OF_32BIT_REGS MTA_IDX
612/** The number of registers with strictly increasing offset. */
613#define E1K_NUM_OF_BINARY_SEARCHABLE (WUPL_IDX + 1)
614
615
616/**
617 * Define E1000-specific EEPROM layout.
618 */
619struct E1kEEPROM
620{
621 public:
622 EEPROM93C46 eeprom;
623
624#ifdef IN_RING3
625 /**
626 * Initialize EEPROM content.
627 *
628 * @param macAddr MAC address of E1000.
629 */
630 void init(RTMAC &macAddr)
631 {
632 eeprom.init();
633 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
634 eeprom.m_au16Data[0x04] = 0xFFFF;
635 /*
636 * bit 3 - full support for power management
637 * bit 10 - full duplex
638 */
639 eeprom.m_au16Data[0x0A] = 0x4408;
640 eeprom.m_au16Data[0x0B] = 0x001E;
641 eeprom.m_au16Data[0x0C] = 0x8086;
642 eeprom.m_au16Data[0x0D] = 0x100E;
643 eeprom.m_au16Data[0x0E] = 0x8086;
644 eeprom.m_au16Data[0x0F] = 0x3040;
645 eeprom.m_au16Data[0x21] = 0x7061;
646 eeprom.m_au16Data[0x22] = 0x280C;
647 eeprom.m_au16Data[0x23] = 0x00C8;
648 eeprom.m_au16Data[0x24] = 0x00C8;
649 eeprom.m_au16Data[0x2F] = 0x0602;
650 updateChecksum();
651 };
652
653 /**
654 * Compute the checksum as required by E1000 and store it
655 * in the last word.
656 */
657 void updateChecksum()
658 {
659 uint16_t u16Checksum = 0;
660
661 for (int i = 0; i < eeprom.SIZE-1; i++)
662 u16Checksum += eeprom.m_au16Data[i];
663 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
664 };
665
666 /**
667 * First 6 bytes of EEPROM contain MAC address.
668 *
669 * @returns MAC address of E1000.
670 */
671 void getMac(PRTMAC pMac)
672 {
673 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
674 };
675
676 uint32_t read()
677 {
678 return eeprom.read();
679 }
680
681 void write(uint32_t u32Wires)
682 {
683 eeprom.write(u32Wires);
684 }
685
686 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
687 {
688 return eeprom.readWord(u32Addr, pu16Value);
689 }
690
691 int load(PSSMHANDLE pSSM)
692 {
693 return eeprom.load(pSSM);
694 }
695
696 void save(PSSMHANDLE pSSM)
697 {
698 eeprom.save(pSSM);
699 }
700#endif /* IN_RING3 */
701};
702
703
704#define E1K_SPEC_VLAN(s) (s & 0xFFF)
705#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
706#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
707
708struct E1kRxDStatus
709{
710 /** @name Descriptor Status field (3.2.3.1)
711 * @{ */
712 unsigned fDD : 1; /**< Descriptor Done. */
713 unsigned fEOP : 1; /**< End of packet. */
714 unsigned fIXSM : 1; /**< Ignore checksum indication. */
715 unsigned fVP : 1; /**< VLAN, matches VET. */
716 unsigned : 1;
717 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
718 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
719 unsigned fPIF : 1; /**< Passed in-exact filter */
720 /** @} */
721 /** @name Descriptor Errors field (3.2.3.2)
722 * (Only valid when fEOP and fDD are set.)
723 * @{ */
724 unsigned fCE : 1; /**< CRC or alignment error. */
725 unsigned : 4; /**< Reserved, varies with different models... */
726 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
727 unsigned fIPE : 1; /**< IP Checksum error. */
728 unsigned fRXE : 1; /**< RX Data error. */
729 /** @} */
730 /** @name Descriptor Special field (3.2.3.3)
731 * @{ */
732 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
733 /** @} */
734};
735typedef struct E1kRxDStatus E1KRXDST;
736
737struct E1kRxDesc_st
738{
739 uint64_t u64BufAddr; /**< Address of data buffer */
740 uint16_t u16Length; /**< Length of data in buffer */
741 uint16_t u16Checksum; /**< Packet checksum */
742 E1KRXDST status;
743};
744typedef struct E1kRxDesc_st E1KRXDESC;
745AssertCompileSize(E1KRXDESC, 16);
746
747#define E1K_DTYP_LEGACY -1
748#define E1K_DTYP_CONTEXT 0
749#define E1K_DTYP_DATA 1
750
751struct E1kTDLegacy
752{
753 uint64_t u64BufAddr; /**< Address of data buffer */
754 struct TDLCmd_st
755 {
756 unsigned u16Length : 16;
757 unsigned u8CSO : 8;
758 /* CMD field : 8 */
759 unsigned fEOP : 1;
760 unsigned fIFCS : 1;
761 unsigned fIC : 1;
762 unsigned fRS : 1;
763 unsigned fRPS : 1;
764 unsigned fDEXT : 1;
765 unsigned fVLE : 1;
766 unsigned fIDE : 1;
767 } cmd;
768 struct TDLDw3_st
769 {
770 /* STA field */
771 unsigned fDD : 1;
772 unsigned fEC : 1;
773 unsigned fLC : 1;
774 unsigned fTURSV : 1;
775 /* RSV field */
776 unsigned u4RSV : 4;
777 /* CSS field */
778 unsigned u8CSS : 8;
779 /* Special field*/
780 unsigned u16Special: 16;
781 } dw3;
782};
783
784/**
785 * TCP/IP Context Transmit Descriptor, section 3.3.6.
786 */
787struct E1kTDContext
788{
789 struct CheckSum_st
790 {
791 /** TSE: Header start. !TSE: Checksum start. */
792 unsigned u8CSS : 8;
793 /** Checksum offset - where to store it. */
794 unsigned u8CSO : 8;
795 /** Checksum ending (inclusive) offset, 0 = end of packet. */
796 unsigned u16CSE : 16;
797 } ip;
798 struct CheckSum_st tu;
799 struct TDCDw2_st
800 {
801 /** TSE: The total number of payload bytes for this context. Sans header. */
802 unsigned u20PAYLEN : 20;
803 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
804 unsigned u4DTYP : 4;
805 /** TUCMD field, 8 bits
806 * @{ */
807 /** TSE: TCP (set) or UDP (clear). */
808 unsigned fTCP : 1;
809 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
810 * the IP header. Does not affect the checksumming.
811 * @remarks 82544GC/EI interprets a cleared field differently. */
812 unsigned fIP : 1;
813 /** TSE: TCP segmentation enable. When clear the context describes */
814 unsigned fTSE : 1;
815 /** Report status (only applies to dw3.fDD for here). */
816 unsigned fRS : 1;
817 /** Reserved, MBZ. */
818 unsigned fRSV1 : 1;
819 /** Descriptor extension, must be set for this descriptor type. */
820 unsigned fDEXT : 1;
821 /** Reserved, MBZ. */
822 unsigned fRSV2 : 1;
823 /** Interrupt delay enable. */
824 unsigned fIDE : 1;
825 /** @} */
826 } dw2;
827 struct TDCDw3_st
828 {
829 /** Descriptor Done. */
830 unsigned fDD : 1;
831 /** Reserved, MBZ. */
832 unsigned u7RSV : 7;
833 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
834 unsigned u8HDRLEN : 8;
835 /** TSO: Maximum segment size. */
836 unsigned u16MSS : 16;
837 } dw3;
838};
839typedef struct E1kTDContext E1KTXCTX;
840
841/**
842 * TCP/IP Data Transmit Descriptor, section 3.3.7.
843 */
844struct E1kTDData
845{
846 uint64_t u64BufAddr; /**< Address of data buffer */
847 struct TDDCmd_st
848 {
849 /** The total length of data pointed to by this descriptor. */
850 unsigned u20DTALEN : 20;
851 /** The descriptor type - E1K_DTYP_DATA (1). */
852 unsigned u4DTYP : 4;
853 /** @name DCMD field, 8 bits (3.3.7.1).
854 * @{ */
855 /** End of packet. Note TSCTFC update. */
856 unsigned fEOP : 1;
857 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
858 unsigned fIFCS : 1;
859 /** Use the TSE context when set and the normal when clear. */
860 unsigned fTSE : 1;
861 /** Report status (dw3.STA). */
862 unsigned fRS : 1;
863 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
864 unsigned fRPS : 1;
865 /** Descriptor extension, must be set for this descriptor type. */
866 unsigned fDEXT : 1;
867 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
868 * Insert dw3.SPECIAL after ethernet header. */
869 unsigned fVLE : 1;
870 /** Interrupt delay enable. */
871 unsigned fIDE : 1;
872 /** @} */
873 } cmd;
874 struct TDDDw3_st
875 {
876 /** @name STA field (3.3.7.2)
877 * @{ */
878 unsigned fDD : 1; /**< Descriptor done. */
879 unsigned fEC : 1; /**< Excess collision. */
880 unsigned fLC : 1; /**< Late collision. */
881 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
882 unsigned fTURSV : 1;
883 /** @} */
884 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
885 /** @name POPTS (Packet Option) field (3.3.7.3)
886 * @{ */
887 unsigned fIXSM : 1; /**< Insert IP checksum. */
888 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
889 unsigned u6RSV : 6; /**< Reserved, MBZ. */
890 /** @} */
891 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
892 * Requires fEOP, fVLE and CTRL.VME to be set.
893 * @{ */
894 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
895 /** @} */
896 } dw3;
897};
898typedef struct E1kTDData E1KTXDAT;
899
900union E1kTxDesc
901{
902 struct E1kTDLegacy legacy;
903 struct E1kTDContext context;
904 struct E1kTDData data;
905};
906typedef union E1kTxDesc E1KTXDESC;
907AssertCompileSize(E1KTXDESC, 16);
908
909#define RA_CTL_AS 0x0003
910#define RA_CTL_AV 0x8000
911
912union E1kRecAddr
913{
914 uint32_t au32[32];
915 struct RAArray
916 {
917 uint8_t addr[6];
918 uint16_t ctl;
919 } array[16];
920};
921typedef struct E1kRecAddr::RAArray E1KRAELEM;
922typedef union E1kRecAddr E1KRA;
923AssertCompileSize(E1KRA, 8*16);
924
925#define E1K_IP_RF UINT16_C(0x8000) /**< reserved fragment flag */
926#define E1K_IP_DF UINT16_C(0x4000) /**< dont fragment flag */
927#define E1K_IP_MF UINT16_C(0x2000) /**< more fragments flag */
928#define E1K_IP_OFFMASK UINT16_C(0x1fff) /**< mask for fragmenting bits */
929
930/** @todo use+extend RTNETIPV4 */
931struct E1kIpHeader
932{
933 /* type of service / version / header length */
934 uint16_t tos_ver_hl;
935 /* total length */
936 uint16_t total_len;
937 /* identification */
938 uint16_t ident;
939 /* fragment offset field */
940 uint16_t offset;
941 /* time to live / protocol*/
942 uint16_t ttl_proto;
943 /* checksum */
944 uint16_t chksum;
945 /* source IP address */
946 uint32_t src;
947 /* destination IP address */
948 uint32_t dest;
949};
950AssertCompileSize(struct E1kIpHeader, 20);
951
952#define E1K_TCP_FIN UINT16_C(0x01)
953#define E1K_TCP_SYN UINT16_C(0x02)
954#define E1K_TCP_RST UINT16_C(0x04)
955#define E1K_TCP_PSH UINT16_C(0x08)
956#define E1K_TCP_ACK UINT16_C(0x10)
957#define E1K_TCP_URG UINT16_C(0x20)
958#define E1K_TCP_ECE UINT16_C(0x40)
959#define E1K_TCP_CWR UINT16_C(0x80)
960#define E1K_TCP_FLAGS UINT16_C(0x3f)
961
962/** @todo use+extend RTNETTCP */
963struct E1kTcpHeader
964{
965 uint16_t src;
966 uint16_t dest;
967 uint32_t seqno;
968 uint32_t ackno;
969 uint16_t hdrlen_flags;
970 uint16_t wnd;
971 uint16_t chksum;
972 uint16_t urgp;
973};
974AssertCompileSize(struct E1kTcpHeader, 20);
975
976
977#ifdef E1K_WITH_TXD_CACHE
978/** The current Saved state version. */
979# define E1K_SAVEDSTATE_VERSION 4
980/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
981# define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
982#else /* !E1K_WITH_TXD_CACHE */
983/** The current Saved state version. */
984# define E1K_SAVEDSTATE_VERSION 3
985#endif /* !E1K_WITH_TXD_CACHE */
986/** Saved state version for VirtualBox 4.1 and earlier.
987 * These did not include VLAN tag fields. */
988#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
989/** Saved state version for VirtualBox 3.0 and earlier.
990 * This did not include the configuration part nor the E1kEEPROM. */
991#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
992
993/**
994 * Device state structure.
995 *
996 * Holds the current state of device.
997 *
998 * @implements PDMINETWORKDOWN
999 * @implements PDMINETWORKCONFIG
1000 * @implements PDMILEDPORTS
1001 */
1002struct E1kState_st
1003{
1004 char szPrf[8]; /**< Log prefix, e.g. E1000#1. */
1005 PDMIBASE IBase;
1006 PDMINETWORKDOWN INetworkDown;
1007 PDMINETWORKCONFIG INetworkConfig;
1008 PDMILEDPORTS ILeds; /**< LED interface */
1009 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1010 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1011
1012 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1013 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1014 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1015 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1016 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1017 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1018 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1019 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1020 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1021 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1022 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1023 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1024 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1025
1026 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1027 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1028 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1029 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1030 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1031 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1032 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1033 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1034 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1035 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1036 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1037 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1038 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1039
1040 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1041 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1042 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1043 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1044 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1045 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1046 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1047 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1048 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1049 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1050 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1051 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1052 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1053 RTRCPTR RCPtrAlignment;
1054
1055#if HC_ARCH_BITS != 32
1056 uint32_t Alignment1;
1057#endif
1058 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1059 PDMCRITSECT csRx; /**< RX Critical section. */
1060#ifdef E1K_WITH_TX_CS
1061 PDMCRITSECT csTx; /**< TX Critical section. */
1062#endif /* E1K_WITH_TX_CS */
1063 /** Base address of memory-mapped registers. */
1064 RTGCPHYS addrMMReg;
1065 /** MAC address obtained from the configuration. */
1066 RTMAC macConfigured;
1067 /** Base port of I/O space region. */
1068 RTIOPORT IOPortBase;
1069 /** EMT: */
1070 PCIDEVICE pciDevice;
1071 /** EMT: Last time the interrupt was acknowledged. */
1072 uint64_t u64AckedAt;
1073 /** All: Used for eliminating spurious interrupts. */
1074 bool fIntRaised;
1075 /** EMT: false if the cable is disconnected by the GUI. */
1076 bool fCableConnected;
1077 /** EMT: */
1078 bool fR0Enabled;
1079 /** EMT: */
1080 bool fRCEnabled;
1081 /** EMT: Compute Ethernet CRC for RX packets. */
1082 bool fEthernetCRC;
1083 /** All: throttle interrupts. */
1084 bool fItrEnabled;
1085 /** All: throttle RX interrupts. */
1086 bool fItrRxEnabled;
1087
1088 bool Alignment2;
1089 /** Link up delay (in milliseconds). */
1090 uint32_t cMsLinkUpDelay;
1091
1092 /** All: Device register storage. */
1093 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1094 /** TX/RX: Status LED. */
1095 PDMLED led;
1096 /** TX/RX: Number of packet being sent/received to show in debug log. */
1097 uint32_t u32PktNo;
1098
1099 /** EMT: Offset of the register to be read via IO. */
1100 uint32_t uSelectedReg;
1101 /** EMT: Multicast Table Array. */
1102 uint32_t auMTA[128];
1103 /** EMT: Receive Address registers. */
1104 E1KRA aRecAddr;
1105 /** EMT: VLAN filter table array. */
1106 uint32_t auVFTA[128];
1107 /** EMT: Receive buffer size. */
1108 uint16_t u16RxBSize;
1109 /** EMT: Locked state -- no state alteration possible. */
1110 bool fLocked;
1111 /** EMT: */
1112 bool fDelayInts;
1113 /** All: */
1114 bool fIntMaskUsed;
1115
1116 /** N/A: */
1117 bool volatile fMaybeOutOfSpace;
1118 /** EMT: Gets signalled when more RX descriptors become available. */
1119 RTSEMEVENT hEventMoreRxDescAvail;
1120#ifdef E1K_WITH_RXD_CACHE
1121 /** RX: Fetched RX descriptors. */
1122 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1123 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1124 /** RX: Actual number of fetched RX descriptors. */
1125 uint32_t nRxDFetched;
1126 /** RX: Index in cache of RX descriptor being processed. */
1127 uint32_t iRxDCurrent;
1128#endif /* E1K_WITH_RXD_CACHE */
1129
1130 /** TX: Context used for TCP segmentation packets. */
1131 E1KTXCTX contextTSE;
1132 /** TX: Context used for ordinary packets. */
1133 E1KTXCTX contextNormal;
1134#ifdef E1K_WITH_TXD_CACHE
1135 /** TX: Fetched TX descriptors. */
1136 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1137 /** TX: Actual number of fetched TX descriptors. */
1138 uint8_t nTxDFetched;
1139 /** TX: Index in cache of TX descriptor being processed. */
1140 uint8_t iTxDCurrent;
1141 /** TX: Will this frame be sent as GSO. */
1142 bool fGSO;
1143 /** Alignment padding. */
1144 bool fReserved;
1145 /** TX: Number of bytes in next packet. */
1146 uint32_t cbTxAlloc;
1147
1148#endif /* E1K_WITH_TXD_CACHE */
1149 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1150 * applicable to the current TSE mode. */
1151 PDMNETWORKGSO GsoCtx;
1152 /** Scratch space for holding the loopback / fallback scatter / gather
1153 * descriptor. */
1154 union
1155 {
1156 PDMSCATTERGATHER Sg;
1157 uint8_t padding[8 * sizeof(RTUINTPTR)];
1158 } uTxFallback;
1159 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1160 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1161 /** TX: Number of bytes assembled in TX packet buffer. */
1162 uint16_t u16TxPktLen;
1163 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1164 bool fGSOEnabled;
1165 /** TX: IP checksum has to be inserted if true. */
1166 bool fIPcsum;
1167 /** TX: TCP/UDP checksum has to be inserted if true. */
1168 bool fTCPcsum;
1169 /** TX: VLAN tag has to be inserted if true. */
1170 bool fVTag;
1171 /** TX: TCI part of VLAN tag to be inserted. */
1172 uint16_t u16VTagTCI;
1173 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1174 uint32_t u32PayRemain;
1175 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1176 uint16_t u16HdrRemain;
1177 /** TX TSE fallback: Flags from template header. */
1178 uint16_t u16SavedFlags;
1179 /** TX TSE fallback: Partial checksum from template header. */
1180 uint32_t u32SavedCsum;
1181 /** ?: Emulated controller type. */
1182 E1KCHIP eChip;
1183
1184 /** EMT: EEPROM emulation */
1185 E1kEEPROM eeprom;
1186 /** EMT: Physical interface emulation. */
1187 PHY phy;
1188
1189#if 0
1190 /** Alignment padding. */
1191 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1192#endif
1193
1194 STAMCOUNTER StatReceiveBytes;
1195 STAMCOUNTER StatTransmitBytes;
1196#if defined(VBOX_WITH_STATISTICS)
1197 STAMPROFILEADV StatMMIOReadRZ;
1198 STAMPROFILEADV StatMMIOReadR3;
1199 STAMPROFILEADV StatMMIOWriteRZ;
1200 STAMPROFILEADV StatMMIOWriteR3;
1201 STAMPROFILEADV StatEEPROMRead;
1202 STAMPROFILEADV StatEEPROMWrite;
1203 STAMPROFILEADV StatIOReadRZ;
1204 STAMPROFILEADV StatIOReadR3;
1205 STAMPROFILEADV StatIOWriteRZ;
1206 STAMPROFILEADV StatIOWriteR3;
1207 STAMPROFILEADV StatLateIntTimer;
1208 STAMCOUNTER StatLateInts;
1209 STAMCOUNTER StatIntsRaised;
1210 STAMCOUNTER StatIntsPrevented;
1211 STAMPROFILEADV StatReceive;
1212 STAMPROFILEADV StatReceiveCRC;
1213 STAMPROFILEADV StatReceiveFilter;
1214 STAMPROFILEADV StatReceiveStore;
1215 STAMPROFILEADV StatTransmitRZ;
1216 STAMPROFILEADV StatTransmitR3;
1217 STAMPROFILE StatTransmitSendRZ;
1218 STAMPROFILE StatTransmitSendR3;
1219 STAMPROFILE StatRxOverflow;
1220 STAMCOUNTER StatRxOverflowWakeup;
1221 STAMCOUNTER StatTxDescCtxNormal;
1222 STAMCOUNTER StatTxDescCtxTSE;
1223 STAMCOUNTER StatTxDescLegacy;
1224 STAMCOUNTER StatTxDescData;
1225 STAMCOUNTER StatTxDescTSEData;
1226 STAMCOUNTER StatTxPathFallback;
1227 STAMCOUNTER StatTxPathGSO;
1228 STAMCOUNTER StatTxPathRegular;
1229 STAMCOUNTER StatPHYAccesses;
1230 STAMCOUNTER aStatRegWrites[E1K_NUM_OF_REGS];
1231 STAMCOUNTER aStatRegReads[E1K_NUM_OF_REGS];
1232#endif /* VBOX_WITH_STATISTICS */
1233
1234#ifdef E1K_INT_STATS
1235 /* Internal stats */
1236 uint64_t u64ArmedAt;
1237 uint64_t uStatMaxTxDelay;
1238 uint32_t uStatInt;
1239 uint32_t uStatIntTry;
1240 uint32_t uStatIntLower;
1241 uint32_t uStatIntDly;
1242 int32_t iStatIntLost;
1243 int32_t iStatIntLostOne;
1244 uint32_t uStatDisDly;
1245 uint32_t uStatIntSkip;
1246 uint32_t uStatIntLate;
1247 uint32_t uStatIntMasked;
1248 uint32_t uStatIntEarly;
1249 uint32_t uStatIntRx;
1250 uint32_t uStatIntTx;
1251 uint32_t uStatIntICS;
1252 uint32_t uStatIntRDTR;
1253 uint32_t uStatIntRXDMT0;
1254 uint32_t uStatIntTXQE;
1255 uint32_t uStatTxNoRS;
1256 uint32_t uStatTxIDE;
1257 uint32_t uStatTxDelayed;
1258 uint32_t uStatTxDelayExp;
1259 uint32_t uStatTAD;
1260 uint32_t uStatTID;
1261 uint32_t uStatRAD;
1262 uint32_t uStatRID;
1263 uint32_t uStatRxFrm;
1264 uint32_t uStatTxFrm;
1265 uint32_t uStatDescCtx;
1266 uint32_t uStatDescDat;
1267 uint32_t uStatDescLeg;
1268 uint32_t uStatTx1514;
1269 uint32_t uStatTx2962;
1270 uint32_t uStatTx4410;
1271 uint32_t uStatTx5858;
1272 uint32_t uStatTx7306;
1273 uint32_t uStatTx8754;
1274 uint32_t uStatTx16384;
1275 uint32_t uStatTx32768;
1276 uint32_t uStatTxLarge;
1277 uint32_t uStatAlign;
1278#endif /* E1K_INT_STATS */
1279};
1280typedef struct E1kState_st E1KSTATE;
1281/** Pointer to the E1000 device state. */
1282typedef E1KSTATE *PE1KSTATE;
1283
1284#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1285
1286/* Forward declarations ******************************************************/
1287static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread);
1288
1289static int e1kRegReadUnimplemented (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1290static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1291static int e1kRegReadAutoClear (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1292static int e1kRegReadDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1293static int e1kRegWriteDefault (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1294#if 0 /* unused */
1295static int e1kRegReadCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1296#endif
1297static int e1kRegWriteCTRL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1298static int e1kRegReadEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1299static int e1kRegWriteEECD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1300static int e1kRegWriteEERD (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1301static int e1kRegWriteMDIC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1302static int e1kRegReadICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1303static int e1kRegWriteICR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1304static int e1kRegWriteICS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1305static int e1kRegWriteIMS (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1306static int e1kRegWriteIMC (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1307static int e1kRegWriteRCTL (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1308static int e1kRegWritePBA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1309static int e1kRegWriteRDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1310static int e1kRegWriteRDTR (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1311static int e1kRegWriteTDT (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1312static int e1kRegReadMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1313static int e1kRegWriteMTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1314static int e1kRegReadRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1315static int e1kRegWriteRA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1316static int e1kRegReadVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317static int e1kRegWriteVFTA (PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1318
1319/**
1320 * Register map table.
1321 *
1322 * Override pfnRead and pfnWrite to get register-specific behavior.
1323 */
1324static const struct E1kRegMap_st
1325{
1326 /** Register offset in the register space. */
1327 uint32_t offset;
1328 /** Size in bytes. Registers of size > 4 are in fact tables. */
1329 uint32_t size;
1330 /** Readable bits. */
1331 uint32_t readable;
1332 /** Writable bits. */
1333 uint32_t writable;
1334 /** Read callback. */
1335 int (*pfnRead)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1336 /** Write callback. */
1337 int (*pfnWrite)(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t u32Value);
1338 /** Abbreviated name. */
1339 const char *abbrev;
1340 /** Full name. */
1341 const char *name;
1342} g_aE1kRegMap[E1K_NUM_OF_REGS] =
1343{
1344 /* offset size read mask write mask read callback write callback abbrev full name */
1345 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1346 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1347 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1348 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1349 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1350 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1351 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1352 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1353 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1354 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1355 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1356 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1357 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1358 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1359 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1360 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1361 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1362 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1363 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1364 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1365 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1366 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1367 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1368 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1369 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1370 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1371 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1372 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1373 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1374 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1375 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1376 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1377 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1378 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1379 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1380 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1381 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1382 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1383 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1384 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1385 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1386 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1387 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1388 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1389 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1390 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1391 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1392 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1393 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1394 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1395 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1396 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1397 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1398 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1399 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1400 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1401 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1402 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1403 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1404 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1405 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1406 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1407 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1408 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1409 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1410 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1411 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1412 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1413 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1414 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1415 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1416 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1417 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1418 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1419 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1420 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1421 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1422 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1423 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1424 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1425 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1426 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1427 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1428 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1429 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1430 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1431 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1432 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1433 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1434 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1435 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1436 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1437 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1438 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1439 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1440 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1441 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1442 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1443 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1444 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1445 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1446 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1447 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1448 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1449 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1450 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1451 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1452 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1453 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1454 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1455 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1456 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1457 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1458 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1459 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1460 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1461 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1462 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1463 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1464 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1465 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1466 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1467 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1468 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1469 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1470 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1471 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1472 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1473 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1474 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1475 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1476 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1477 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA82542" , "Receive Address (64-bit) (n) (82542)" },
1478 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA82542", "Multicast Table Array (n) (82542)" },
1479 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA82542", "VLAN Filter Table Array (n) (82542)" }
1480};
1481
1482#ifdef LOG_ENABLED
1483
1484/**
1485 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1486 *
1487 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1488 *
1489 * @returns The buffer.
1490 *
1491 * @param u32 The word to convert into string.
1492 * @param mask Selects which bytes to convert.
1493 * @param buf Where to put the result.
1494 */
1495static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1496{
1497 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1498 {
1499 if (mask & 0xF)
1500 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1501 else
1502 *ptr = '.';
1503 }
1504 buf[8] = 0;
1505 return buf;
1506}
1507
1508/**
1509 * Returns timer name for debug purposes.
1510 *
1511 * @returns The timer name.
1512 *
1513 * @param pThis The device state structure.
1514 * @param pTimer The timer to get the name for.
1515 */
1516DECLINLINE(const char *) e1kGetTimerName(PE1KSTATE pThis, PTMTIMER pTimer)
1517{
1518 if (pTimer == pThis->CTX_SUFF(pTIDTimer))
1519 return "TID";
1520 if (pTimer == pThis->CTX_SUFF(pTADTimer))
1521 return "TAD";
1522 if (pTimer == pThis->CTX_SUFF(pRIDTimer))
1523 return "RID";
1524 if (pTimer == pThis->CTX_SUFF(pRADTimer))
1525 return "RAD";
1526 if (pTimer == pThis->CTX_SUFF(pIntTimer))
1527 return "Int";
1528 if (pTimer == pThis->CTX_SUFF(pTXDTimer))
1529 return "TXD";
1530 if (pTimer == pThis->CTX_SUFF(pLUTimer))
1531 return "LinkUp";
1532 return "unknown";
1533}
1534
1535#endif /* DEBUG */
1536
1537/**
1538 * Arm a timer.
1539 *
1540 * @param pThis Pointer to the device state structure.
1541 * @param pTimer Pointer to the timer.
1542 * @param uExpireIn Expiration interval in microseconds.
1543 */
1544DECLINLINE(void) e1kArmTimer(PE1KSTATE pThis, PTMTIMER pTimer, uint32_t uExpireIn)
1545{
1546 if (pThis->fLocked)
1547 return;
1548
1549 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1550 pThis->szPrf, e1kGetTimerName(pThis, pTimer), uExpireIn));
1551 TMTimerSetMicro(pTimer, uExpireIn);
1552}
1553
1554/**
1555 * Cancel a timer.
1556 *
1557 * @param pThis Pointer to the device state structure.
1558 * @param pTimer Pointer to the timer.
1559 */
1560DECLINLINE(void) e1kCancelTimer(PE1KSTATE pThis, PTMTIMER pTimer)
1561{
1562 E1kLog2(("%s Stopping %s timer...\n",
1563 pThis->szPrf, e1kGetTimerName(pThis, pTimer)));
1564 int rc = TMTimerStop(pTimer);
1565 if (RT_FAILURE(rc))
1566 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1567 pThis->szPrf, rc));
1568 RT_NOREF1(pThis);
1569}
1570
1571#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1572#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1573
1574#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1575#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1576#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1577
1578#ifndef E1K_WITH_TX_CS
1579# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1580# define e1kCsTxLeave(ps) do { } while (0)
1581#else /* E1K_WITH_TX_CS */
1582# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1583# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1584#endif /* E1K_WITH_TX_CS */
1585
1586#ifdef IN_RING3
1587
1588/**
1589 * Wakeup the RX thread.
1590 */
1591static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1592{
1593 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
1594 if ( pThis->fMaybeOutOfSpace
1595 && pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1596 {
1597 STAM_COUNTER_INC(&pThis->StatRxOverflowWakeup);
1598 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", pThis->szPrf));
1599 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
1600 }
1601}
1602
1603/**
1604 * Hardware reset. Revert all registers to initial values.
1605 *
1606 * @param pThis The device state structure.
1607 */
1608static void e1kHardReset(PE1KSTATE pThis)
1609{
1610 E1kLog(("%s Hard reset triggered\n", pThis->szPrf));
1611 memset(pThis->auRegs, 0, sizeof(pThis->auRegs));
1612 memset(pThis->aRecAddr.au32, 0, sizeof(pThis->aRecAddr.au32));
1613#ifdef E1K_INIT_RA0
1614 memcpy(pThis->aRecAddr.au32, pThis->macConfigured.au8,
1615 sizeof(pThis->macConfigured.au8));
1616 pThis->aRecAddr.array[0].ctl |= RA_CTL_AV;
1617#endif /* E1K_INIT_RA0 */
1618 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1619 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1620 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1621 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1622 Assert(GET_BITS(RCTL, BSIZE) == 0);
1623 pThis->u16RxBSize = 2048;
1624
1625 /* Reset promiscuous mode */
1626 if (pThis->pDrvR3)
1627 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, false);
1628
1629#ifdef E1K_WITH_TXD_CACHE
1630 int rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
1631 if (RT_LIKELY(rc == VINF_SUCCESS))
1632 {
1633 pThis->nTxDFetched = 0;
1634 pThis->iTxDCurrent = 0;
1635 pThis->fGSO = false;
1636 pThis->cbTxAlloc = 0;
1637 e1kCsTxLeave(pThis);
1638 }
1639#endif /* E1K_WITH_TXD_CACHE */
1640#ifdef E1K_WITH_RXD_CACHE
1641 if (RT_LIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1642 {
1643 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
1644 e1kCsRxLeave(pThis);
1645 }
1646#endif /* E1K_WITH_RXD_CACHE */
1647}
1648
1649#endif /* IN_RING3 */
1650
1651/**
1652 * Compute Internet checksum.
1653 *
1654 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1655 *
1656 * @param pThis The device state structure.
1657 * @param cpPacket The packet.
1658 * @param cb The size of the packet.
1659 * @param pszText A string denoting direction of packet transfer.
1660 *
1661 * @return The 1's complement of the 1's complement sum.
1662 *
1663 * @thread E1000_TX
1664 */
1665static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1666{
1667 uint32_t csum = 0;
1668 uint16_t *pu16 = (uint16_t *)pvBuf;
1669
1670 while (cb > 1)
1671 {
1672 csum += *pu16++;
1673 cb -= 2;
1674 }
1675 if (cb)
1676 csum += *(uint8_t*)pu16;
1677 while (csum >> 16)
1678 csum = (csum >> 16) + (csum & 0xFFFF);
1679 return ~csum;
1680}
1681
1682/**
1683 * Dump a packet to debug log.
1684 *
1685 * @param pThis The device state structure.
1686 * @param cpPacket The packet.
1687 * @param cb The size of the packet.
1688 * @param pszText A string denoting direction of packet transfer.
1689 * @thread E1000_TX
1690 */
1691DECLINLINE(void) e1kPacketDump(PE1KSTATE pThis, const uint8_t *cpPacket, size_t cb, const char *pszText)
1692{
1693#ifdef DEBUG
1694 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1695 {
1696 Log4(("%s --- %s packet #%d: %RTmac => %RTmac (%d bytes) ---\n",
1697 pThis->szPrf, pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cb));
1698 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1699 {
1700 Log4(("%s --- IPv6: %RTnaipv6 => %RTnaipv6\n",
1701 pThis->szPrf, cpPacket+14+8, cpPacket+14+24));
1702 if (*(cpPacket+14+6) == 0x6)
1703 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1704 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1705 }
1706 else if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x800)
1707 {
1708 Log4(("%s --- IPv4: %RTnaipv4 => %RTnaipv4\n",
1709 pThis->szPrf, *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16)));
1710 if (*(cpPacket+14+6) == 0x6)
1711 Log4(("%s --- TCP: seq=%x ack=%x\n", pThis->szPrf,
1712 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1713 }
1714 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1715 e1kCsLeave(pThis);
1716 }
1717#else
1718 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
1719 {
1720 if (ntohs(*(uint16_t*)(cpPacket+12)) == 0x86DD)
1721 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv6 => %RTnaipv6, seq=%x ack=%x\n",
1722 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket, cpPacket+14+8, cpPacket+14+24,
1723 ntohl(*(uint32_t*)(cpPacket+14+40+4)), ntohl(*(uint32_t*)(cpPacket+14+40+8))));
1724 else
1725 E1kLogRel(("E1000: %s packet #%d, %RTmac => %RTmac, %RTnaipv4 => %RTnaipv4, seq=%x ack=%x\n",
1726 pszText, ++pThis->u32PktNo, cpPacket+6, cpPacket,
1727 *(uint32_t*)(cpPacket+14+12), *(uint32_t*)(cpPacket+14+16),
1728 ntohl(*(uint32_t*)(cpPacket+14+20+4)), ntohl(*(uint32_t*)(cpPacket+14+20+8))));
1729 e1kCsLeave(pThis);
1730 }
1731 RT_NOREF2(cb, pszText);
1732#endif
1733}
1734
1735/**
1736 * Determine the type of transmit descriptor.
1737 *
1738 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1739 *
1740 * @param pDesc Pointer to descriptor union.
1741 * @thread E1000_TX
1742 */
1743DECLINLINE(int) e1kGetDescType(E1KTXDESC *pDesc)
1744{
1745 if (pDesc->legacy.cmd.fDEXT)
1746 return pDesc->context.dw2.u4DTYP;
1747 return E1K_DTYP_LEGACY;
1748}
1749
1750/**
1751 * Dump receive descriptor to debug log.
1752 *
1753 * @param pThis The device state structure.
1754 * @param pDesc Pointer to the descriptor.
1755 * @thread E1000_RX
1756 */
1757static void e1kPrintRDesc(PE1KSTATE pThis, E1KRXDESC *pDesc)
1758{
1759 RT_NOREF2(pThis, pDesc);
1760 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", pThis->szPrf, pDesc->u16Length));
1761 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1762 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1763 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1764 pDesc->status.fPIF ? "PIF" : "pif",
1765 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1766 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1767 pDesc->status.fVP ? "VP" : "vp",
1768 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1769 pDesc->status.fEOP ? "EOP" : "eop",
1770 pDesc->status.fDD ? "DD" : "dd",
1771 pDesc->status.fRXE ? "RXE" : "rxe",
1772 pDesc->status.fIPE ? "IPE" : "ipe",
1773 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1774 pDesc->status.fCE ? "CE" : "ce",
1775 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1776 E1K_SPEC_VLAN(pDesc->status.u16Special),
1777 E1K_SPEC_PRI(pDesc->status.u16Special)));
1778}
1779
1780/**
1781 * Dump transmit descriptor to debug log.
1782 *
1783 * @param pThis The device state structure.
1784 * @param pDesc Pointer to descriptor union.
1785 * @param pszDir A string denoting direction of descriptor transfer
1786 * @thread E1000_TX
1787 */
1788static void e1kPrintTDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, const char *pszDir,
1789 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1790{
1791 RT_NOREF4(pThis, pDesc, pszDir, uLevel);
1792
1793 /*
1794 * Unfortunately we cannot use our format handler here, we want R0 logging
1795 * as well.
1796 */
1797 switch (e1kGetDescType(pDesc))
1798 {
1799 case E1K_DTYP_CONTEXT:
1800 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1801 pThis->szPrf, pszDir, pszDir));
1802 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1803 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1804 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1805 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1806 pDesc->context.dw2.fIDE ? " IDE":"",
1807 pDesc->context.dw2.fRS ? " RS" :"",
1808 pDesc->context.dw2.fTSE ? " TSE":"",
1809 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1810 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1811 pDesc->context.dw2.u20PAYLEN,
1812 pDesc->context.dw3.u8HDRLEN,
1813 pDesc->context.dw3.u16MSS,
1814 pDesc->context.dw3.fDD?"DD":""));
1815 break;
1816 case E1K_DTYP_DATA:
1817 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1818 pThis->szPrf, pszDir, pDesc->data.cmd.u20DTALEN, pszDir));
1819 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1820 pDesc->data.u64BufAddr,
1821 pDesc->data.cmd.u20DTALEN));
1822 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1823 pDesc->data.cmd.fIDE ? " IDE" :"",
1824 pDesc->data.cmd.fVLE ? " VLE" :"",
1825 pDesc->data.cmd.fRPS ? " RPS" :"",
1826 pDesc->data.cmd.fRS ? " RS" :"",
1827 pDesc->data.cmd.fTSE ? " TSE" :"",
1828 pDesc->data.cmd.fIFCS? " IFCS":"",
1829 pDesc->data.cmd.fEOP ? " EOP" :"",
1830 pDesc->data.dw3.fDD ? " DD" :"",
1831 pDesc->data.dw3.fEC ? " EC" :"",
1832 pDesc->data.dw3.fLC ? " LC" :"",
1833 pDesc->data.dw3.fTXSM? " TXSM":"",
1834 pDesc->data.dw3.fIXSM? " IXSM":"",
1835 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1836 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1837 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1838 break;
1839 case E1K_DTYP_LEGACY:
1840 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1841 pThis->szPrf, pszDir, pDesc->legacy.cmd.u16Length, pszDir));
1842 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1843 pDesc->data.u64BufAddr,
1844 pDesc->legacy.cmd.u16Length));
1845 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1846 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1847 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1848 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1849 pDesc->legacy.cmd.fRS ? " RS" :"",
1850 pDesc->legacy.cmd.fIC ? " IC" :"",
1851 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1852 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1853 pDesc->legacy.dw3.fDD ? " DD" :"",
1854 pDesc->legacy.dw3.fEC ? " EC" :"",
1855 pDesc->legacy.dw3.fLC ? " LC" :"",
1856 pDesc->legacy.cmd.u8CSO,
1857 pDesc->legacy.dw3.u8CSS,
1858 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1859 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1860 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1861 break;
1862 default:
1863 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1864 pThis->szPrf, pszDir, pszDir));
1865 break;
1866 }
1867}
1868
1869/**
1870 * Raise an interrupt later.
1871 *
1872 * @param pThis The device state structure.
1873 */
1874inline void e1kPostponeInterrupt(PE1KSTATE pThis, uint64_t uNanoseconds)
1875{
1876 if (!TMTimerIsActive(pThis->CTX_SUFF(pIntTimer)))
1877 TMTimerSetNano(pThis->CTX_SUFF(pIntTimer), uNanoseconds);
1878}
1879
1880/**
1881 * Raise interrupt if not masked.
1882 *
1883 * @param pThis The device state structure.
1884 */
1885static int e1kRaiseInterrupt(PE1KSTATE pThis, int rcBusy, uint32_t u32IntCause = 0)
1886{
1887 int rc = e1kCsEnter(pThis, rcBusy);
1888 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1889 return rc;
1890
1891 E1K_INC_ISTAT_CNT(pThis->uStatIntTry);
1892 ICR |= u32IntCause;
1893 if (ICR & IMS)
1894 {
1895 if (pThis->fIntRaised)
1896 {
1897 E1K_INC_ISTAT_CNT(pThis->uStatIntSkip);
1898 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1899 pThis->szPrf, ICR & IMS));
1900 }
1901 else
1902 {
1903 uint64_t tsNow = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
1904 if (!!ITR && tsNow - pThis->u64AckedAt < ITR * 256
1905 && pThis->fItrEnabled && (pThis->fItrRxEnabled || !(ICR & ICR_RXT0)))
1906 {
1907 E1K_INC_ISTAT_CNT(pThis->uStatIntEarly);
1908 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1909 pThis->szPrf, (uint32_t)(tsNow - pThis->u64AckedAt), ITR * 256));
1910 e1kPostponeInterrupt(pThis, ITR * 256);
1911 }
1912 else
1913 {
1914
1915 /* Since we are delivering the interrupt now
1916 * there is no need to do it later -- stop the timer.
1917 */
1918 TMTimerStop(pThis->CTX_SUFF(pIntTimer));
1919 E1K_INC_ISTAT_CNT(pThis->uStatInt);
1920 STAM_COUNTER_INC(&pThis->StatIntsRaised);
1921 /* Got at least one unmasked interrupt cause */
1922 pThis->fIntRaised = true;
1923 /* Raise(1) INTA(0) */
1924 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1925 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 1);
1926 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1927 pThis->szPrf, ICR & IMS));
1928 }
1929 }
1930 }
1931 else
1932 {
1933 E1K_INC_ISTAT_CNT(pThis->uStatIntMasked);
1934 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1935 pThis->szPrf, ICR, IMS));
1936 }
1937 e1kCsLeave(pThis);
1938 return VINF_SUCCESS;
1939}
1940
1941/**
1942 * Compute the physical address of the descriptor.
1943 *
1944 * @returns the physical address of the descriptor.
1945 *
1946 * @param baseHigh High-order 32 bits of descriptor table address.
1947 * @param baseLow Low-order 32 bits of descriptor table address.
1948 * @param idxDesc The descriptor index in the table.
1949 */
1950DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1951{
1952 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1953 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1954}
1955
1956/**
1957 * Advance the head pointer of the receive descriptor queue.
1958 *
1959 * @remarks RDH always points to the next available RX descriptor.
1960 *
1961 * @param pThis The device state structure.
1962 */
1963DECLINLINE(void) e1kAdvanceRDH(PE1KSTATE pThis)
1964{
1965 Assert(e1kCsRxIsOwner(pThis));
1966 //e1kCsEnter(pThis, RT_SRC_POS);
1967 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1968 RDH = 0;
1969 /*
1970 * Compute current receive queue length and fire RXDMT0 interrupt
1971 * if we are low on receive buffers
1972 */
1973 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1974 /*
1975 * The minimum threshold is controlled by RDMTS bits of RCTL:
1976 * 00 = 1/2 of RDLEN
1977 * 01 = 1/4 of RDLEN
1978 * 10 = 1/8 of RDLEN
1979 * 11 = reserved
1980 */
1981 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1982 if (uRQueueLen <= uMinRQThreshold)
1983 {
1984 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1985 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1986 pThis->szPrf, RDH, RDT, uRQueueLen, uMinRQThreshold));
1987 E1K_INC_ISTAT_CNT(pThis->uStatIntRXDMT0);
1988 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXDMT0);
1989 }
1990 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1991 pThis->szPrf, RDH, RDT, uRQueueLen));
1992 //e1kCsLeave(pThis);
1993}
1994
1995#ifdef E1K_WITH_RXD_CACHE
1996/**
1997 * Return the number of RX descriptor that belong to the hardware.
1998 *
1999 * @returns the number of available descriptors in RX ring.
2000 * @param pThis The device state structure.
2001 * @thread ???
2002 */
2003DECLINLINE(uint32_t) e1kGetRxLen(PE1KSTATE pThis)
2004{
2005 /**
2006 * Make sure RDT won't change during computation. EMT may modify RDT at
2007 * any moment.
2008 */
2009 uint32_t rdt = RDT;
2010 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
2011}
2012
2013DECLINLINE(unsigned) e1kRxDInCache(PE1KSTATE pThis)
2014{
2015 return pThis->nRxDFetched > pThis->iRxDCurrent ?
2016 pThis->nRxDFetched - pThis->iRxDCurrent : 0;
2017}
2018
2019DECLINLINE(unsigned) e1kRxDIsCacheEmpty(PE1KSTATE pThis)
2020{
2021 return pThis->iRxDCurrent >= pThis->nRxDFetched;
2022}
2023
2024/**
2025 * Load receive descriptors from guest memory. The caller needs to be in Rx
2026 * critical section.
2027 *
2028 * We need two physical reads in case the tail wrapped around the end of RX
2029 * descriptor ring.
2030 *
2031 * @returns the actual number of descriptors fetched.
2032 * @param pThis The device state structure.
2033 * @param pDesc Pointer to descriptor union.
2034 * @param addr Physical address in guest context.
2035 * @thread EMT, RX
2036 */
2037DECLINLINE(unsigned) e1kRxDPrefetch(PE1KSTATE pThis)
2038{
2039 /* We've already loaded pThis->nRxDFetched descriptors past RDH. */
2040 unsigned nDescsAvailable = e1kGetRxLen(pThis) - e1kRxDInCache(pThis);
2041 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pThis->nRxDFetched);
2042 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2043 Assert(nDescsTotal != 0);
2044 if (nDescsTotal == 0)
2045 return 0;
2046 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pThis)) % nDescsTotal;
2047 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2048 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2049 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2050 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
2051 nFirstNotLoaded, nDescsInSingleRead));
2052 if (nDescsToFetch == 0)
2053 return 0;
2054 E1KRXDESC* pFirstEmptyDesc = &pThis->aRxDescriptors[pThis->nRxDFetched];
2055 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2056 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2057 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2058 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2059 // unsigned i, j;
2060 // for (i = pThis->nRxDFetched; i < pThis->nRxDFetched + nDescsInSingleRead; ++i)
2061 // {
2062 // pThis->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pThis->nRxDFetched) * sizeof(E1KRXDESC);
2063 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2064 // }
2065 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2066 pThis->szPrf, nDescsInSingleRead,
2067 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2068 nFirstNotLoaded, RDLEN, RDH, RDT));
2069 if (nDescsToFetch > nDescsInSingleRead)
2070 {
2071 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
2072 ((uint64_t)RDBAH << 32) + RDBAL,
2073 pFirstEmptyDesc + nDescsInSingleRead,
2074 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2075 // Assert(i == pThis->nRxDFetched + nDescsInSingleRead);
2076 // for (j = 0; i < pThis->nRxDFetched + nDescsToFetch; ++i, ++j)
2077 // {
2078 // pThis->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2079 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", pThis->szPrf, i, pThis->aRxDescAddr[i]));
2080 // }
2081 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2082 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
2083 RDBAH, RDBAL));
2084 }
2085 pThis->nRxDFetched += nDescsToFetch;
2086 return nDescsToFetch;
2087}
2088
2089/**
2090 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2091 * RX ring if the cache is empty.
2092 *
2093 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2094 * go out of sync with RDH which will cause trouble when EMT checks if the
2095 * cache is empty to do pre-fetch @bugref(6217).
2096 *
2097 * @param pThis The device state structure.
2098 * @thread RX
2099 */
2100DECLINLINE(E1KRXDESC*) e1kRxDGet(PE1KSTATE pThis)
2101{
2102 Assert(e1kCsRxIsOwner(pThis));
2103 /* Check the cache first. */
2104 if (pThis->iRxDCurrent < pThis->nRxDFetched)
2105 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2106 /* Cache is empty, reset it and check if we can fetch more. */
2107 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
2108 if (e1kRxDPrefetch(pThis))
2109 return &pThis->aRxDescriptors[pThis->iRxDCurrent];
2110 /* Out of Rx descriptors. */
2111 return NULL;
2112}
2113
2114/**
2115 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2116 * pointer. The descriptor gets written back to the RXD ring.
2117 *
2118 * @param pThis The device state structure.
2119 * @param pDesc The descriptor being "returned" to the RX ring.
2120 * @thread RX
2121 */
2122DECLINLINE(void) e1kRxDPut(PE1KSTATE pThis, E1KRXDESC* pDesc)
2123{
2124 Assert(e1kCsRxIsOwner(pThis));
2125 pThis->iRxDCurrent++;
2126 // Assert(pDesc >= pThis->aRxDescriptors);
2127 // Assert(pDesc < pThis->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2128 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2129 // uint32_t rdh = RDH;
2130 // Assert(pThis->aRxDescAddr[pDesc - pThis->aRxDescriptors] == addr);
2131 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2132 e1kDescAddr(RDBAH, RDBAL, RDH),
2133 pDesc, sizeof(E1KRXDESC));
2134 e1kAdvanceRDH(pThis);
2135 e1kPrintRDesc(pThis, pDesc);
2136}
2137
2138/**
2139 * Store a fragment of received packet at the specifed address.
2140 *
2141 * @param pThis The device state structure.
2142 * @param pDesc The next available RX descriptor.
2143 * @param pvBuf The fragment.
2144 * @param cb The size of the fragment.
2145 */
2146static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2147{
2148 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2149 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2150 pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2151 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2152 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2153 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2154}
2155
2156#else /* !E1K_WITH_RXD_CACHE */
2157
2158/**
2159 * Store a fragment of received packet that fits into the next available RX
2160 * buffer.
2161 *
2162 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2163 *
2164 * @param pThis The device state structure.
2165 * @param pDesc The next available RX descriptor.
2166 * @param pvBuf The fragment.
2167 * @param cb The size of the fragment.
2168 */
2169static DECLCALLBACK(void) e1kStoreRxFragment(PE1KSTATE pThis, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2170{
2171 STAM_PROFILE_ADV_START(&pThis->StatReceiveStore, a);
2172 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pThis->szPrf, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2173 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2174 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2175 /* Write back the descriptor */
2176 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2177 e1kPrintRDesc(pThis, pDesc);
2178 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2179 /* Advance head */
2180 e1kAdvanceRDH(pThis);
2181 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", pThis->szPrf, pDesc->fEOP, RDTR, RADV));
2182 if (pDesc->status.fEOP)
2183 {
2184 /* Complete packet has been stored -- it is time to let the guest know. */
2185#ifdef E1K_USE_RX_TIMERS
2186 if (RDTR)
2187 {
2188 /* Arm the timer to fire in RDTR usec (discard .024) */
2189 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2190 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2191 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2192 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2193 }
2194 else
2195 {
2196#endif
2197 /* 0 delay means immediate interrupt */
2198 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2199 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2200#ifdef E1K_USE_RX_TIMERS
2201 }
2202#endif
2203 }
2204 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveStore, a);
2205}
2206#endif /* !E1K_WITH_RXD_CACHE */
2207
2208/**
2209 * Returns true if it is a broadcast packet.
2210 *
2211 * @returns true if destination address indicates broadcast.
2212 * @param pvBuf The ethernet packet.
2213 */
2214DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2215{
2216 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2217 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2218}
2219
2220/**
2221 * Returns true if it is a multicast packet.
2222 *
2223 * @remarks returns true for broadcast packets as well.
2224 * @returns true if destination address indicates multicast.
2225 * @param pvBuf The ethernet packet.
2226 */
2227DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2228{
2229 return (*(char*)pvBuf) & 1;
2230}
2231
2232/**
2233 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2234 *
2235 * @remarks We emulate checksum offloading for major packets types only.
2236 *
2237 * @returns VBox status code.
2238 * @param pThis The device state structure.
2239 * @param pFrame The available data.
2240 * @param cb Number of bytes available in the buffer.
2241 * @param status Bit fields containing status info.
2242 */
2243static int e1kRxChecksumOffload(PE1KSTATE pThis, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2244{
2245 /** @todo
2246 * It is not safe to bypass checksum verification for packets coming
2247 * from real wire. We currently unable to tell where packets are
2248 * coming from so we tell the driver to ignore our checksum flags
2249 * and do verification in software.
2250 */
2251#if 0
2252 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2253
2254 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", pThis->szPrf, uEtherType));
2255
2256 switch (uEtherType)
2257 {
2258 case 0x800: /* IPv4 */
2259 {
2260 pStatus->fIXSM = false;
2261 pStatus->fIPCS = true;
2262 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2263 /* TCP/UDP checksum offloading works with TCP and UDP only */
2264 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2265 break;
2266 }
2267 case 0x86DD: /* IPv6 */
2268 pStatus->fIXSM = false;
2269 pStatus->fIPCS = false;
2270 pStatus->fTCPCS = true;
2271 break;
2272 default: /* ARP, VLAN, etc. */
2273 pStatus->fIXSM = true;
2274 break;
2275 }
2276#else
2277 pStatus->fIXSM = true;
2278 RT_NOREF_PV(pThis); RT_NOREF_PV(pFrame); RT_NOREF_PV(cb);
2279#endif
2280 return VINF_SUCCESS;
2281}
2282
2283/**
2284 * Pad and store received packet.
2285 *
2286 * @remarks Make sure that the packet appears to upper layer as one coming
2287 * from real Ethernet: pad it and insert FCS.
2288 *
2289 * @returns VBox status code.
2290 * @param pThis The device state structure.
2291 * @param pvBuf The available data.
2292 * @param cb Number of bytes available in the buffer.
2293 * @param status Bit fields containing status info.
2294 */
2295static int e1kHandleRxPacket(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST status)
2296{
2297#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2298 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2299 uint8_t *ptr = rxPacket;
2300
2301 int rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2302 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2303 return rc;
2304
2305 if (cb > 70) /* unqualified guess */
2306 pThis->led.Asserted.s.fReading = pThis->led.Actual.s.fReading = 1;
2307
2308 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2309 Assert(cb > 16);
2310 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2311 E1kLog3(("%s Max RX packet size is %u\n", pThis->szPrf, cbMax));
2312 if (status.fVP)
2313 {
2314 /* VLAN packet -- strip VLAN tag in VLAN mode */
2315 if ((CTRL & CTRL_VME) && cb > 16)
2316 {
2317 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2318 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2319 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2320 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2321 cb -= 4;
2322 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2323 pThis->szPrf, status.u16Special, cb));
2324 }
2325 else
2326 status.fVP = false; /* Set VP only if we stripped the tag */
2327 }
2328 else
2329 memcpy(rxPacket, pvBuf, cb);
2330 /* Pad short packets */
2331 if (cb < 60)
2332 {
2333 memset(rxPacket + cb, 0, 60 - cb);
2334 cb = 60;
2335 }
2336 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2337 {
2338 STAM_PROFILE_ADV_START(&pThis->StatReceiveCRC, a);
2339 /*
2340 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2341 * is ignored by most of drivers we may as well save us the trouble
2342 * of calculating it (see EthernetCRC CFGM parameter).
2343 */
2344 if (pThis->fEthernetCRC)
2345 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2346 cb += sizeof(uint32_t);
2347 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveCRC, a);
2348 E1kLog3(("%s Added FCS (cb=%u)\n", pThis->szPrf, cb));
2349 }
2350 /* Compute checksum of complete packet */
2351 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2352 e1kRxChecksumOffload(pThis, rxPacket, cb, &status);
2353
2354 /* Update stats */
2355 E1K_INC_CNT32(GPRC);
2356 if (e1kIsBroadcast(pvBuf))
2357 E1K_INC_CNT32(BPRC);
2358 else if (e1kIsMulticast(pvBuf))
2359 E1K_INC_CNT32(MPRC);
2360 /* Update octet receive counter */
2361 E1K_ADD_CNT64(GORCL, GORCH, cb);
2362 STAM_REL_COUNTER_ADD(&pThis->StatReceiveBytes, cb);
2363 if (cb == 64)
2364 E1K_INC_CNT32(PRC64);
2365 else if (cb < 128)
2366 E1K_INC_CNT32(PRC127);
2367 else if (cb < 256)
2368 E1K_INC_CNT32(PRC255);
2369 else if (cb < 512)
2370 E1K_INC_CNT32(PRC511);
2371 else if (cb < 1024)
2372 E1K_INC_CNT32(PRC1023);
2373 else
2374 E1K_INC_CNT32(PRC1522);
2375
2376 E1K_INC_ISTAT_CNT(pThis->uStatRxFrm);
2377
2378# ifdef E1K_WITH_RXD_CACHE
2379 while (cb > 0)
2380 {
2381 E1KRXDESC *pDesc = e1kRxDGet(pThis);
2382
2383 if (pDesc == NULL)
2384 {
2385 E1kLog(("%s Out of receive buffers, dropping the packet "
2386 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2387 pThis->szPrf, cb, e1kRxDInCache(pThis), RDH, RDT));
2388 break;
2389 }
2390# else /* !E1K_WITH_RXD_CACHE */
2391 if (RDH == RDT)
2392 {
2393 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2394 pThis->szPrf));
2395 }
2396 /* Store the packet to receive buffers */
2397 while (RDH != RDT)
2398 {
2399 /* Load the descriptor pointed by head */
2400 E1KRXDESC desc, *pDesc = &desc;
2401 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2402 &desc, sizeof(desc));
2403# endif /* !E1K_WITH_RXD_CACHE */
2404 if (pDesc->u64BufAddr)
2405 {
2406 /* Update descriptor */
2407 pDesc->status = status;
2408 pDesc->u16Checksum = checksum;
2409 pDesc->status.fDD = true;
2410
2411 /*
2412 * We need to leave Rx critical section here or we risk deadlocking
2413 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2414 * page or has an access handler associated with it.
2415 * Note that it is safe to leave the critical section here since
2416 * e1kRegWriteRDT() never modifies RDH. It never touches already
2417 * fetched RxD cache entries either.
2418 */
2419 if (cb > pThis->u16RxBSize)
2420 {
2421 pDesc->status.fEOP = false;
2422 e1kCsRxLeave(pThis);
2423 e1kStoreRxFragment(pThis, pDesc, ptr, pThis->u16RxBSize);
2424 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2425 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2426 return rc;
2427 ptr += pThis->u16RxBSize;
2428 cb -= pThis->u16RxBSize;
2429 }
2430 else
2431 {
2432 pDesc->status.fEOP = true;
2433 e1kCsRxLeave(pThis);
2434 e1kStoreRxFragment(pThis, pDesc, ptr, cb);
2435# ifdef E1K_WITH_RXD_CACHE
2436 rc = e1kCsRxEnter(pThis, VERR_SEM_BUSY);
2437 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2438 return rc;
2439 cb = 0;
2440# else /* !E1K_WITH_RXD_CACHE */
2441 pThis->led.Actual.s.fReading = 0;
2442 return VINF_SUCCESS;
2443# endif /* !E1K_WITH_RXD_CACHE */
2444 }
2445 /*
2446 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2447 * is not defined.
2448 */
2449 }
2450# ifdef E1K_WITH_RXD_CACHE
2451 /* Write back the descriptor. */
2452 pDesc->status.fDD = true;
2453 e1kRxDPut(pThis, pDesc);
2454# else /* !E1K_WITH_RXD_CACHE */
2455 else
2456 {
2457 /* Write back the descriptor. */
2458 pDesc->status.fDD = true;
2459 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns),
2460 e1kDescAddr(RDBAH, RDBAL, RDH),
2461 pDesc, sizeof(E1KRXDESC));
2462 e1kAdvanceRDH(pThis);
2463 }
2464# endif /* !E1K_WITH_RXD_CACHE */
2465 }
2466
2467 if (cb > 0)
2468 E1kLog(("%s Out of receive buffers, dropping %u bytes", pThis->szPrf, cb));
2469
2470 pThis->led.Actual.s.fReading = 0;
2471
2472 e1kCsRxLeave(pThis);
2473# ifdef E1K_WITH_RXD_CACHE
2474 /* Complete packet has been stored -- it is time to let the guest know. */
2475# ifdef E1K_USE_RX_TIMERS
2476 if (RDTR)
2477 {
2478 /* Arm the timer to fire in RDTR usec (discard .024) */
2479 e1kArmTimer(pThis, pThis->CTX_SUFF(pRIDTimer), RDTR);
2480 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2481 if (RADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pRADTimer)))
2482 e1kArmTimer(pThis, pThis->CTX_SUFF(pRADTimer), RADV);
2483 }
2484 else
2485 {
2486# endif /* E1K_USE_RX_TIMERS */
2487 /* 0 delay means immediate interrupt */
2488 E1K_INC_ISTAT_CNT(pThis->uStatIntRx);
2489 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_RXT0);
2490# ifdef E1K_USE_RX_TIMERS
2491 }
2492# endif /* E1K_USE_RX_TIMERS */
2493# endif /* E1K_WITH_RXD_CACHE */
2494
2495 return VINF_SUCCESS;
2496#else /* !IN_RING3 */
2497 RT_NOREF_PV(pThis); RT_NOREF_PV(pvBuf); RT_NOREF_PV(cb); RT_NOREF_PV(status);
2498 return VERR_INTERNAL_ERROR_2;
2499#endif /* !IN_RING3 */
2500}
2501
2502
2503/**
2504 * Bring the link up after the configured delay, 5 seconds by default.
2505 *
2506 * @param pThis The device state structure.
2507 * @thread any
2508 */
2509DECLINLINE(void) e1kBringLinkUpDelayed(PE1KSTATE pThis)
2510{
2511 E1kLog(("%s Will bring up the link in %d seconds...\n",
2512 pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
2513 e1kArmTimer(pThis, pThis->CTX_SUFF(pLUTimer), pThis->cMsLinkUpDelay * 1000);
2514}
2515
2516#ifdef IN_RING3
2517/**
2518 * Bring up the link immediately.
2519 *
2520 * @param pThis The device state structure.
2521 */
2522DECLINLINE(void) e1kR3LinkUp(PE1KSTATE pThis)
2523{
2524 E1kLog(("%s Link is up\n", pThis->szPrf));
2525 STATUS |= STATUS_LU;
2526 Phy::setLinkStatus(&pThis->phy, true);
2527 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2528 if (pThis->pDrvR3)
2529 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_UP);
2530}
2531
2532/**
2533 * Bring down the link immediately.
2534 *
2535 * @param pThis The device state structure.
2536 */
2537DECLINLINE(void) e1kR3LinkDown(PE1KSTATE pThis)
2538{
2539 E1kLog(("%s Link is down\n", pThis->szPrf));
2540 STATUS &= ~STATUS_LU;
2541 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2542 if (pThis->pDrvR3)
2543 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2544}
2545
2546/**
2547 * Bring down the link temporarily.
2548 *
2549 * @param pThis The device state structure.
2550 */
2551DECLINLINE(void) e1kR3LinkDownTemp(PE1KSTATE pThis)
2552{
2553 E1kLog(("%s Link is down temporarily\n", pThis->szPrf));
2554 STATUS &= ~STATUS_LU;
2555 Phy::setLinkStatus(&pThis->phy, false);
2556 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_LSC);
2557 /*
2558 * Notifying the associated driver that the link went down (even temporarily)
2559 * seems to be the right thing, but it was not done before. This may cause
2560 * a regression if the driver does not expect the link to go down as a result
2561 * of sending PDMNETWORKLINKSTATE_DOWN_RESUME to this device. Earlier versions
2562 * of code notified the driver that the link was up! See @bugref{7057}.
2563 */
2564 if (pThis->pDrvR3)
2565 pThis->pDrvR3->pfnNotifyLinkChanged(pThis->pDrvR3, PDMNETWORKLINKSTATE_DOWN);
2566 e1kBringLinkUpDelayed(pThis);
2567}
2568#endif /* IN_RING3 */
2569
2570#if 0 /* unused */
2571/**
2572 * Read handler for Device Status register.
2573 *
2574 * Get the link status from PHY.
2575 *
2576 * @returns VBox status code.
2577 *
2578 * @param pThis The device state structure.
2579 * @param offset Register offset in memory-mapped frame.
2580 * @param index Register index in register array.
2581 * @param mask Used to implement partial reads (8 and 16-bit).
2582 */
2583static int e1kRegReadCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2584{
2585 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2586 pThis->szPrf, (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2587 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2588 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2589 {
2590 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2591 if (Phy::readMDIO(&pThis->phy))
2592 *pu32Value = CTRL | CTRL_MDIO;
2593 else
2594 *pu32Value = CTRL & ~CTRL_MDIO;
2595 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2596 pThis->szPrf, !!(*pu32Value & CTRL_MDIO)));
2597 }
2598 else
2599 {
2600 /* MDIO pin is used for output, ignore it */
2601 *pu32Value = CTRL;
2602 }
2603 return VINF_SUCCESS;
2604}
2605#endif /* unused */
2606
2607/**
2608 * Write handler for Device Control register.
2609 *
2610 * Handles reset.
2611 *
2612 * @param pThis The device state structure.
2613 * @param offset Register offset in memory-mapped frame.
2614 * @param index Register index in register array.
2615 * @param value The value to store.
2616 * @param mask Used to implement partial writes (8 and 16-bit).
2617 * @thread EMT
2618 */
2619static int e1kRegWriteCTRL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2620{
2621 int rc = VINF_SUCCESS;
2622
2623 if (value & CTRL_RESET)
2624 { /* RST */
2625#ifndef IN_RING3
2626 return VINF_IOM_R3_MMIO_WRITE;
2627#else
2628 e1kHardReset(pThis);
2629#endif
2630 }
2631 else
2632 {
2633 if ( (value & CTRL_SLU)
2634 && pThis->fCableConnected
2635 && !(STATUS & STATUS_LU))
2636 {
2637 /* The driver indicates that we should bring up the link */
2638 /* Do so in 5 seconds (by default). */
2639 e1kBringLinkUpDelayed(pThis);
2640 /*
2641 * Change the status (but not PHY status) anyway as Windows expects
2642 * it for 82543GC.
2643 */
2644 STATUS |= STATUS_LU;
2645 }
2646 if (value & CTRL_VME)
2647 {
2648 E1kLog(("%s VLAN Mode Enabled\n", pThis->szPrf));
2649 }
2650 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2651 pThis->szPrf, (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2652 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2653 if (value & CTRL_MDC)
2654 {
2655 if (value & CTRL_MDIO_DIR)
2656 {
2657 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", pThis->szPrf, !!(value & CTRL_MDIO)));
2658 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2659 Phy::writeMDIO(&pThis->phy, !!(value & CTRL_MDIO));
2660 }
2661 else
2662 {
2663 if (Phy::readMDIO(&pThis->phy))
2664 value |= CTRL_MDIO;
2665 else
2666 value &= ~CTRL_MDIO;
2667 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2668 pThis->szPrf, !!(value & CTRL_MDIO)));
2669 }
2670 }
2671 rc = e1kRegWriteDefault(pThis, offset, index, value);
2672 }
2673
2674 return rc;
2675}
2676
2677/**
2678 * Write handler for EEPROM/Flash Control/Data register.
2679 *
2680 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2681 *
2682 * @param pThis The device state structure.
2683 * @param offset Register offset in memory-mapped frame.
2684 * @param index Register index in register array.
2685 * @param value The value to store.
2686 * @param mask Used to implement partial writes (8 and 16-bit).
2687 * @thread EMT
2688 */
2689static int e1kRegWriteEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2690{
2691 RT_NOREF(offset, index);
2692#ifdef IN_RING3
2693 /* So far we are concerned with lower byte only */
2694 if ((EECD & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2695 {
2696 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2697 /* Note: 82543GC does not need to request EEPROM access */
2698 STAM_PROFILE_ADV_START(&pThis->StatEEPROMWrite, a);
2699 pThis->eeprom.write(value & EECD_EE_WIRES);
2700 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMWrite, a);
2701 }
2702 if (value & EECD_EE_REQ)
2703 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2704 else
2705 EECD &= ~EECD_EE_GNT;
2706 //e1kRegWriteDefault(pThis, offset, index, value );
2707
2708 return VINF_SUCCESS;
2709#else /* !IN_RING3 */
2710 RT_NOREF(pThis, value);
2711 return VINF_IOM_R3_MMIO_WRITE;
2712#endif /* !IN_RING3 */
2713}
2714
2715/**
2716 * Read handler for EEPROM/Flash Control/Data register.
2717 *
2718 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2719 *
2720 * @returns VBox status code.
2721 *
2722 * @param pThis The device state structure.
2723 * @param offset Register offset in memory-mapped frame.
2724 * @param index Register index in register array.
2725 * @param mask Used to implement partial reads (8 and 16-bit).
2726 * @thread EMT
2727 */
2728static int e1kRegReadEECD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2729{
2730#ifdef IN_RING3
2731 uint32_t value;
2732 int rc = e1kRegReadDefault(pThis, offset, index, &value);
2733 if (RT_SUCCESS(rc))
2734 {
2735 if ((value & EECD_EE_GNT) || pThis->eChip == E1K_CHIP_82543GC)
2736 {
2737 /* Note: 82543GC does not need to request EEPROM access */
2738 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2739 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2740 value |= pThis->eeprom.read();
2741 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2742 }
2743 *pu32Value = value;
2744 }
2745
2746 return rc;
2747#else /* !IN_RING3 */
2748 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(pu32Value);
2749 return VINF_IOM_R3_MMIO_READ;
2750#endif /* !IN_RING3 */
2751}
2752
2753/**
2754 * Write handler for EEPROM Read register.
2755 *
2756 * Handles EEPROM word access requests, reads EEPROM and stores the result
2757 * into DATA field.
2758 *
2759 * @param pThis The device state structure.
2760 * @param offset Register offset in memory-mapped frame.
2761 * @param index Register index in register array.
2762 * @param value The value to store.
2763 * @param mask Used to implement partial writes (8 and 16-bit).
2764 * @thread EMT
2765 */
2766static int e1kRegWriteEERD(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2767{
2768#ifdef IN_RING3
2769 /* Make use of 'writable' and 'readable' masks. */
2770 e1kRegWriteDefault(pThis, offset, index, value);
2771 /* DONE and DATA are set only if read was triggered by START. */
2772 if (value & EERD_START)
2773 {
2774 uint16_t tmp;
2775 STAM_PROFILE_ADV_START(&pThis->StatEEPROMRead, a);
2776 if (pThis->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2777 SET_BITS(EERD, DATA, tmp);
2778 EERD |= EERD_DONE;
2779 STAM_PROFILE_ADV_STOP(&pThis->StatEEPROMRead, a);
2780 }
2781
2782 return VINF_SUCCESS;
2783#else /* !IN_RING3 */
2784 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
2785 return VINF_IOM_R3_MMIO_WRITE;
2786#endif /* !IN_RING3 */
2787}
2788
2789
2790/**
2791 * Write handler for MDI Control register.
2792 *
2793 * Handles PHY read/write requests; forwards requests to internal PHY device.
2794 *
2795 * @param pThis The device state structure.
2796 * @param offset Register offset in memory-mapped frame.
2797 * @param index Register index in register array.
2798 * @param value The value to store.
2799 * @param mask Used to implement partial writes (8 and 16-bit).
2800 * @thread EMT
2801 */
2802static int e1kRegWriteMDIC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2803{
2804 if (value & MDIC_INT_EN)
2805 {
2806 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2807 pThis->szPrf));
2808 }
2809 else if (value & MDIC_READY)
2810 {
2811 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2812 pThis->szPrf));
2813 }
2814 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2815 {
2816 E1kLog(("%s WARNING! Access to invalid PHY detected, phy=%d.\n",
2817 pThis->szPrf, GET_BITS_V(value, MDIC, PHY)));
2818 /*
2819 * Some drivers scan the MDIO bus for a PHY. We can work with these
2820 * drivers if we set MDIC_READY and MDIC_ERROR when there isn't a PHY
2821 * at the requested address, see @bugref{7346}.
2822 */
2823 MDIC = MDIC_READY | MDIC_ERROR;
2824 }
2825 else
2826 {
2827 /* Store the value */
2828 e1kRegWriteDefault(pThis, offset, index, value);
2829 STAM_COUNTER_INC(&pThis->StatPHYAccesses);
2830 /* Forward op to PHY */
2831 if (value & MDIC_OP_READ)
2832 SET_BITS(MDIC, DATA, Phy::readRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG)));
2833 else
2834 Phy::writeRegister(&pThis->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2835 /* Let software know that we are done */
2836 MDIC |= MDIC_READY;
2837 }
2838
2839 return VINF_SUCCESS;
2840}
2841
2842/**
2843 * Write handler for Interrupt Cause Read register.
2844 *
2845 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2846 *
2847 * @param pThis The device state structure.
2848 * @param offset Register offset in memory-mapped frame.
2849 * @param index Register index in register array.
2850 * @param value The value to store.
2851 * @param mask Used to implement partial writes (8 and 16-bit).
2852 * @thread EMT
2853 */
2854static int e1kRegWriteICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2855{
2856 ICR &= ~value;
2857
2858 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index);
2859 return VINF_SUCCESS;
2860}
2861
2862/**
2863 * Read handler for Interrupt Cause Read register.
2864 *
2865 * Reading this register acknowledges all interrupts.
2866 *
2867 * @returns VBox status code.
2868 *
2869 * @param pThis The device state structure.
2870 * @param offset Register offset in memory-mapped frame.
2871 * @param index Register index in register array.
2872 * @param mask Not used.
2873 * @thread EMT
2874 */
2875static int e1kRegReadICR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2876{
2877 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_READ);
2878 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2879 return rc;
2880
2881 uint32_t value = 0;
2882 rc = e1kRegReadDefault(pThis, offset, index, &value);
2883 if (RT_SUCCESS(rc))
2884 {
2885 /* Do not return masked bits. */
2886 value &= IMS;
2887 if (value)
2888 {
2889 /*
2890 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2891 * with disabled interrupts.
2892 */
2893 //if (IMS)
2894 if (1)
2895 {
2896 /*
2897 * Interrupts were enabled -- we are supposedly at the very
2898 * beginning of interrupt handler
2899 */
2900 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2901 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", pThis->szPrf, ICR));
2902 /* Clear all pending interrupts */
2903 ICR = 0;
2904 pThis->fIntRaised = false;
2905 /* Lower(0) INTA(0) */
2906 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
2907
2908 pThis->u64AckedAt = TMTimerGet(pThis->CTX_SUFF(pIntTimer));
2909 if (pThis->fIntMaskUsed)
2910 pThis->fDelayInts = true;
2911 }
2912 else
2913 {
2914 /*
2915 * Interrupts are disabled -- in windows guests ICR read is done
2916 * just before re-enabling interrupts
2917 */
2918 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", pThis->szPrf, ICR));
2919 }
2920 }
2921 *pu32Value = value;
2922 }
2923 e1kCsLeave(pThis);
2924
2925 return rc;
2926}
2927
2928/**
2929 * Write handler for Interrupt Cause Set register.
2930 *
2931 * Bits corresponding to 1s in 'value' will be set in ICR register.
2932 *
2933 * @param pThis The device state structure.
2934 * @param offset Register offset in memory-mapped frame.
2935 * @param index Register index in register array.
2936 * @param value The value to store.
2937 * @param mask Used to implement partial writes (8 and 16-bit).
2938 * @thread EMT
2939 */
2940static int e1kRegWriteICS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2941{
2942 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2943 E1K_INC_ISTAT_CNT(pThis->uStatIntICS);
2944 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, value & g_aE1kRegMap[ICS_IDX].writable);
2945}
2946
2947/**
2948 * Write handler for Interrupt Mask Set register.
2949 *
2950 * Will trigger pending interrupts.
2951 *
2952 * @param pThis The device state structure.
2953 * @param offset Register offset in memory-mapped frame.
2954 * @param index Register index in register array.
2955 * @param value The value to store.
2956 * @param mask Used to implement partial writes (8 and 16-bit).
2957 * @thread EMT
2958 */
2959static int e1kRegWriteIMS(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2960{
2961 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2962
2963 IMS |= value;
2964 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2965 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", pThis->szPrf));
2966 e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, 0);
2967
2968 return VINF_SUCCESS;
2969}
2970
2971/**
2972 * Write handler for Interrupt Mask Clear register.
2973 *
2974 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2975 *
2976 * @param pThis The device state structure.
2977 * @param offset Register offset in memory-mapped frame.
2978 * @param index Register index in register array.
2979 * @param value The value to store.
2980 * @param mask Used to implement partial writes (8 and 16-bit).
2981 * @thread EMT
2982 */
2983static int e1kRegWriteIMC(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
2984{
2985 RT_NOREF_PV(offset); RT_NOREF_PV(index);
2986
2987 int rc = e1kCsEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
2988 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2989 return rc;
2990 if (pThis->fIntRaised)
2991 {
2992 /*
2993 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2994 * Windows to freeze since it may receive an interrupt while still in the very beginning
2995 * of interrupt handler.
2996 */
2997 E1K_INC_ISTAT_CNT(pThis->uStatIntLower);
2998 STAM_COUNTER_INC(&pThis->StatIntsPrevented);
2999 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
3000 /* Lower(0) INTA(0) */
3001 PDMDevHlpPCISetIrq(pThis->CTX_SUFF(pDevIns), 0, 0);
3002 pThis->fIntRaised = false;
3003 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", pThis->szPrf, ICR));
3004 }
3005 IMS &= ~value;
3006 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", pThis->szPrf));
3007 e1kCsLeave(pThis);
3008
3009 return VINF_SUCCESS;
3010}
3011
3012/**
3013 * Write handler for Receive Control register.
3014 *
3015 * @param pThis The device state structure.
3016 * @param offset Register offset in memory-mapped frame.
3017 * @param index Register index in register array.
3018 * @param value The value to store.
3019 * @param mask Used to implement partial writes (8 and 16-bit).
3020 * @thread EMT
3021 */
3022static int e1kRegWriteRCTL(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3023{
3024 /* Update promiscuous mode */
3025 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
3026 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
3027 {
3028 /* Promiscuity has changed, pass the knowledge on. */
3029#ifndef IN_RING3
3030 return VINF_IOM_R3_MMIO_WRITE;
3031#else
3032 if (pThis->pDrvR3)
3033 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3, fBecomePromiscous);
3034#endif
3035 }
3036
3037 /* Adjust receive buffer size */
3038 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
3039 if (value & RCTL_BSEX)
3040 cbRxBuf *= 16;
3041 if (cbRxBuf != pThis->u16RxBSize)
3042 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
3043 pThis->szPrf, cbRxBuf, pThis->u16RxBSize));
3044 pThis->u16RxBSize = cbRxBuf;
3045
3046 /* Update the register */
3047 e1kRegWriteDefault(pThis, offset, index, value);
3048
3049 return VINF_SUCCESS;
3050}
3051
3052/**
3053 * Write handler for Packet Buffer Allocation register.
3054 *
3055 * TXA = 64 - RXA.
3056 *
3057 * @param pThis The device state structure.
3058 * @param offset Register offset in memory-mapped frame.
3059 * @param index Register index in register array.
3060 * @param value The value to store.
3061 * @param mask Used to implement partial writes (8 and 16-bit).
3062 * @thread EMT
3063 */
3064static int e1kRegWritePBA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3065{
3066 e1kRegWriteDefault(pThis, offset, index, value);
3067 PBA_st->txa = 64 - PBA_st->rxa;
3068
3069 return VINF_SUCCESS;
3070}
3071
3072/**
3073 * Write handler for Receive Descriptor Tail register.
3074 *
3075 * @remarks Write into RDT forces switch to HC and signal to
3076 * e1kR3NetworkDown_WaitReceiveAvail().
3077 *
3078 * @returns VBox status code.
3079 *
3080 * @param pThis The device state structure.
3081 * @param offset Register offset in memory-mapped frame.
3082 * @param index Register index in register array.
3083 * @param value The value to store.
3084 * @param mask Used to implement partial writes (8 and 16-bit).
3085 * @thread EMT
3086 */
3087static int e1kRegWriteRDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3088{
3089#ifndef IN_RING3
3090 /* XXX */
3091// return VINF_IOM_R3_MMIO_WRITE;
3092#endif
3093 int rc = e1kCsRxEnter(pThis, VINF_IOM_R3_MMIO_WRITE);
3094 if (RT_LIKELY(rc == VINF_SUCCESS))
3095 {
3096 E1kLog(("%s e1kRegWriteRDT\n", pThis->szPrf));
3097 /*
3098 * Some drivers advance RDT too far, so that it equals RDH. This
3099 * somehow manages to work with real hardware but not with this
3100 * emulated device. We can work with these drivers if we just
3101 * write 1 less when we see a driver writing RDT equal to RDH,
3102 * see @bugref{7346}.
3103 */
3104 if (value == RDH)
3105 {
3106 if (RDH == 0)
3107 value = (RDLEN / sizeof(E1KRXDESC)) - 1;
3108 else
3109 value = RDH - 1;
3110 }
3111 rc = e1kRegWriteDefault(pThis, offset, index, value);
3112#ifdef E1K_WITH_RXD_CACHE
3113 /*
3114 * We need to fetch descriptors now as RDT may go whole circle
3115 * before we attempt to store a received packet. For example,
3116 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
3117 * size being only 8 descriptors! Note that we fetch descriptors
3118 * only when the cache is empty to reduce the number of memory reads
3119 * in case of frequent RDT writes. Don't fetch anything when the
3120 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3121 * messed up state.
3122 * Note that despite the cache may seem empty, meaning that there are
3123 * no more available descriptors in it, it may still be used by RX
3124 * thread which has not yet written the last descriptor back but has
3125 * temporarily released the RX lock in order to write the packet body
3126 * to descriptor's buffer. At this point we still going to do prefetch
3127 * but it won't actually fetch anything if there are no unused slots in
3128 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3129 * reset the cache here even if it appears empty. It will be reset at
3130 * a later point in e1kRxDGet().
3131 */
3132 if (e1kRxDIsCacheEmpty(pThis) && (RCTL & RCTL_EN))
3133 e1kRxDPrefetch(pThis);
3134#endif /* E1K_WITH_RXD_CACHE */
3135 e1kCsRxLeave(pThis);
3136 if (RT_SUCCESS(rc))
3137 {
3138/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3139 * without requiring any context switches. We should also check the
3140 * wait condition before bothering to queue the item as we're currently
3141 * queuing thousands of items per second here in a normal transmit
3142 * scenario. Expect performance changes when fixing this! */
3143#ifdef IN_RING3
3144 /* Signal that we have more receive descriptors available. */
3145 e1kWakeupReceive(pThis->CTX_SUFF(pDevIns));
3146#else
3147 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pCanRxQueue));
3148 if (pItem)
3149 PDMQueueInsert(pThis->CTX_SUFF(pCanRxQueue), pItem);
3150#endif
3151 }
3152 }
3153 return rc;
3154}
3155
3156/**
3157 * Write handler for Receive Delay Timer register.
3158 *
3159 * @param pThis The device state structure.
3160 * @param offset Register offset in memory-mapped frame.
3161 * @param index Register index in register array.
3162 * @param value The value to store.
3163 * @param mask Used to implement partial writes (8 and 16-bit).
3164 * @thread EMT
3165 */
3166static int e1kRegWriteRDTR(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
3167{
3168 e1kRegWriteDefault(pThis, offset, index, value);
3169 if (value & RDTR_FPD)
3170 {
3171 /* Flush requested, cancel both timers and raise interrupt */
3172#ifdef E1K_USE_RX_TIMERS
3173 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3174 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3175#endif
3176 E1K_INC_ISTAT_CNT(pThis->uStatIntRDTR);
3177 return e1kRaiseInterrupt(pThis, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3178 }
3179
3180 return VINF_SUCCESS;
3181}
3182
3183DECLINLINE(uint32_t) e1kGetTxLen(PE1KSTATE pThis)
3184{
3185 /**
3186 * Make sure TDT won't change during computation. EMT may modify TDT at
3187 * any moment.
3188 */
3189 uint32_t tdt = TDT;
3190 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3191}
3192
3193#ifdef IN_RING3
3194
3195# ifdef E1K_TX_DELAY
3196/**
3197 * Transmit Delay Timer handler.
3198 *
3199 * @remarks We only get here when the timer expires.
3200 *
3201 * @param pDevIns Pointer to device instance structure.
3202 * @param pTimer Pointer to the timer.
3203 * @param pvUser NULL.
3204 * @thread EMT
3205 */
3206static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3207{
3208 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3209 Assert(PDMCritSectIsOwner(&pThis->csTx));
3210
3211 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayExp);
3212# ifdef E1K_INT_STATS
3213 uint64_t u64Elapsed = RTTimeNanoTS() - pThis->u64ArmedAt;
3214 if (u64Elapsed > pThis->uStatMaxTxDelay)
3215 pThis->uStatMaxTxDelay = u64Elapsed;
3216# endif
3217 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
3218 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3219}
3220# endif /* E1K_TX_DELAY */
3221
3222# ifdef E1K_USE_TX_TIMERS
3223
3224/**
3225 * Transmit Interrupt Delay Timer handler.
3226 *
3227 * @remarks We only get here when the timer expires.
3228 *
3229 * @param pDevIns Pointer to device instance structure.
3230 * @param pTimer Pointer to the timer.
3231 * @param pvUser NULL.
3232 * @thread EMT
3233 */
3234static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3235{
3236 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3237
3238 E1K_INC_ISTAT_CNT(pThis->uStatTID);
3239 /* Cancel absolute delay timer as we have already got attention */
3240# ifndef E1K_NO_TAD
3241 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
3242# endif /* E1K_NO_TAD */
3243 e1kRaiseInterrupt(pThis, ICR_TXDW);
3244}
3245
3246/**
3247 * Transmit Absolute Delay Timer handler.
3248 *
3249 * @remarks We only get here when the timer expires.
3250 *
3251 * @param pDevIns Pointer to device instance structure.
3252 * @param pTimer Pointer to the timer.
3253 * @param pvUser NULL.
3254 * @thread EMT
3255 */
3256static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3257{
3258 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3259
3260 E1K_INC_ISTAT_CNT(pThis->uStatTAD);
3261 /* Cancel interrupt delay timer as we have already got attention */
3262 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
3263 e1kRaiseInterrupt(pThis, ICR_TXDW);
3264}
3265
3266# endif /* E1K_USE_TX_TIMERS */
3267# ifdef E1K_USE_RX_TIMERS
3268
3269/**
3270 * Receive Interrupt Delay Timer handler.
3271 *
3272 * @remarks We only get here when the timer expires.
3273 *
3274 * @param pDevIns Pointer to device instance structure.
3275 * @param pTimer Pointer to the timer.
3276 * @param pvUser NULL.
3277 * @thread EMT
3278 */
3279static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3280{
3281 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3282
3283 E1K_INC_ISTAT_CNT(pThis->uStatRID);
3284 /* Cancel absolute delay timer as we have already got attention */
3285 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
3286 e1kRaiseInterrupt(pThis, ICR_RXT0);
3287}
3288
3289/**
3290 * Receive Absolute Delay Timer handler.
3291 *
3292 * @remarks We only get here when the timer expires.
3293 *
3294 * @param pDevIns Pointer to device instance structure.
3295 * @param pTimer Pointer to the timer.
3296 * @param pvUser NULL.
3297 * @thread EMT
3298 */
3299static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3300{
3301 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3302
3303 E1K_INC_ISTAT_CNT(pThis->uStatRAD);
3304 /* Cancel interrupt delay timer as we have already got attention */
3305 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
3306 e1kRaiseInterrupt(pThis, ICR_RXT0);
3307}
3308
3309# endif /* E1K_USE_RX_TIMERS */
3310
3311/**
3312 * Late Interrupt Timer handler.
3313 *
3314 * @param pDevIns Pointer to device instance structure.
3315 * @param pTimer Pointer to the timer.
3316 * @param pvUser NULL.
3317 * @thread EMT
3318 */
3319static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3320{
3321 RT_NOREF(pDevIns, pTimer);
3322 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3323
3324 STAM_PROFILE_ADV_START(&pThis->StatLateIntTimer, a);
3325 STAM_COUNTER_INC(&pThis->StatLateInts);
3326 E1K_INC_ISTAT_CNT(pThis->uStatIntLate);
3327# if 0
3328 if (pThis->iStatIntLost > -100)
3329 pThis->iStatIntLost--;
3330# endif
3331 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, 0);
3332 STAM_PROFILE_ADV_STOP(&pThis->StatLateIntTimer, a);
3333}
3334
3335/**
3336 * Link Up Timer handler.
3337 *
3338 * @param pDevIns Pointer to device instance structure.
3339 * @param pTimer Pointer to the timer.
3340 * @param pvUser NULL.
3341 * @thread EMT
3342 */
3343static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3344{
3345 RT_NOREF(pDevIns, pTimer);
3346 PE1KSTATE pThis = (PE1KSTATE )pvUser;
3347
3348 /*
3349 * This can happen if we set the link status to down when the Link up timer was
3350 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3351 * and connect+disconnect the cable very quick.
3352 */
3353 if (!pThis->fCableConnected)
3354 return;
3355
3356 e1kR3LinkUp(pThis);
3357}
3358
3359#endif /* IN_RING3 */
3360
3361/**
3362 * Sets up the GSO context according to the TSE new context descriptor.
3363 *
3364 * @param pGso The GSO context to setup.
3365 * @param pCtx The context descriptor.
3366 */
3367DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3368{
3369 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3370
3371 /*
3372 * See if the context descriptor describes something that could be TCP or
3373 * UDP over IPv[46].
3374 */
3375 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3376 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3377 {
3378 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3379 return;
3380 }
3381 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3382 {
3383 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3384 return;
3385 }
3386 if (RT_UNLIKELY( pCtx->dw2.fTCP
3387 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3388 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3389 {
3390 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3391 return;
3392 }
3393
3394 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3395 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3396 {
3397 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3398 return;
3399 }
3400
3401 /* IPv4 checksum offset. */
3402 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3403 {
3404 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3405 return;
3406 }
3407
3408 /* TCP/UDP checksum offsets. */
3409 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3410 != ( pCtx->dw2.fTCP
3411 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3412 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3413 {
3414 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3415 return;
3416 }
3417
3418 /*
3419 * Because of internal networking using a 16-bit size field for GSO context
3420 * plus frame, we have to make sure we don't exceed this.
3421 */
3422 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3423 {
3424 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3425 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3426 return;
3427 }
3428
3429 /*
3430 * We're good for now - we'll do more checks when seeing the data.
3431 * So, figure the type of offloading and setup the context.
3432 */
3433 if (pCtx->dw2.fIP)
3434 {
3435 if (pCtx->dw2.fTCP)
3436 {
3437 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3438 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3439 }
3440 else
3441 {
3442 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3443 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3444 }
3445 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3446 * this yet it seems)... */
3447 }
3448 else
3449 {
3450 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
3451 if (pCtx->dw2.fTCP)
3452 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3453 else
3454 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3455 }
3456 pGso->offHdr1 = pCtx->ip.u8CSS;
3457 pGso->offHdr2 = pCtx->tu.u8CSS;
3458 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3459 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3460 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3461 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3462 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3463}
3464
3465/**
3466 * Checks if we can use GSO processing for the current TSE frame.
3467 *
3468 * @param pThis The device state structure.
3469 * @param pGso The GSO context.
3470 * @param pData The first data descriptor of the frame.
3471 * @param pCtx The TSO context descriptor.
3472 */
3473DECLINLINE(bool) e1kCanDoGso(PE1KSTATE pThis, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3474{
3475 if (!pData->cmd.fTSE)
3476 {
3477 E1kLog2(("e1kCanDoGso: !TSE\n"));
3478 return false;
3479 }
3480 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3481 {
3482 E1kLog(("e1kCanDoGso: VLE\n"));
3483 return false;
3484 }
3485 if (RT_UNLIKELY(!pThis->fGSOEnabled))
3486 {
3487 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3488 return false;
3489 }
3490
3491 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3492 {
3493 case PDMNETWORKGSOTYPE_IPV4_TCP:
3494 case PDMNETWORKGSOTYPE_IPV4_UDP:
3495 if (!pData->dw3.fIXSM)
3496 {
3497 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3498 return false;
3499 }
3500 if (!pData->dw3.fTXSM)
3501 {
3502 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3503 return false;
3504 }
3505 /** @todo what more check should we perform here? Ethernet frame type? */
3506 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3507 return true;
3508
3509 case PDMNETWORKGSOTYPE_IPV6_TCP:
3510 case PDMNETWORKGSOTYPE_IPV6_UDP:
3511 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3512 {
3513 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3514 return false;
3515 }
3516 if (!pData->dw3.fTXSM)
3517 {
3518 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3519 return false;
3520 }
3521 /** @todo what more check should we perform here? Ethernet frame type? */
3522 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3523 return true;
3524
3525 default:
3526 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3527 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3528 return false;
3529 }
3530}
3531
3532/**
3533 * Frees the current xmit buffer.
3534 *
3535 * @param pThis The device state structure.
3536 */
3537static void e1kXmitFreeBuf(PE1KSTATE pThis)
3538{
3539 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3540 if (pSg)
3541 {
3542 pThis->CTX_SUFF(pTxSg) = NULL;
3543
3544 if (pSg->pvAllocator != pThis)
3545 {
3546 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3547 if (pDrv)
3548 pDrv->pfnFreeBuf(pDrv, pSg);
3549 }
3550 else
3551 {
3552 /* loopback */
3553 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3554 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3555 pSg->fFlags = 0;
3556 pSg->pvAllocator = NULL;
3557 }
3558 }
3559}
3560
3561#ifndef E1K_WITH_TXD_CACHE
3562/**
3563 * Allocates an xmit buffer.
3564 *
3565 * @returns See PDMINETWORKUP::pfnAllocBuf.
3566 * @param pThis The device state structure.
3567 * @param cbMin The minimum frame size.
3568 * @param fExactSize Whether cbMin is exact or if we have to max it
3569 * out to the max MTU size.
3570 * @param fGso Whether this is a GSO frame or not.
3571 */
3572DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, size_t cbMin, bool fExactSize, bool fGso)
3573{
3574 /* Adjust cbMin if necessary. */
3575 if (!fExactSize)
3576 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3577
3578 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3579 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3580 e1kXmitFreeBuf(pThis);
3581 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3582
3583 /*
3584 * Allocate the buffer.
3585 */
3586 PPDMSCATTERGATHER pSg;
3587 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3588 {
3589 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3590 if (RT_UNLIKELY(!pDrv))
3591 return VERR_NET_DOWN;
3592 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pThis->GsoCtx : NULL, &pSg);
3593 if (RT_FAILURE(rc))
3594 {
3595 /* Suspend TX as we are out of buffers atm */
3596 STATUS |= STATUS_TXOFF;
3597 return rc;
3598 }
3599 }
3600 else
3601 {
3602 /* Create a loopback using the fallback buffer and preallocated SG. */
3603 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3604 pSg = &pThis->uTxFallback.Sg;
3605 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3606 pSg->cbUsed = 0;
3607 pSg->cbAvailable = 0;
3608 pSg->pvAllocator = pThis;
3609 pSg->pvUser = NULL; /* No GSO here. */
3610 pSg->cSegs = 1;
3611 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3612 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3613 }
3614
3615 pThis->CTX_SUFF(pTxSg) = pSg;
3616 return VINF_SUCCESS;
3617}
3618#else /* E1K_WITH_TXD_CACHE */
3619/**
3620 * Allocates an xmit buffer.
3621 *
3622 * @returns See PDMINETWORKUP::pfnAllocBuf.
3623 * @param pThis The device state structure.
3624 * @param cbMin The minimum frame size.
3625 * @param fExactSize Whether cbMin is exact or if we have to max it
3626 * out to the max MTU size.
3627 * @param fGso Whether this is a GSO frame or not.
3628 */
3629DECLINLINE(int) e1kXmitAllocBuf(PE1KSTATE pThis, bool fGso)
3630{
3631 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3632 if (RT_UNLIKELY(pThis->CTX_SUFF(pTxSg)))
3633 e1kXmitFreeBuf(pThis);
3634 Assert(pThis->CTX_SUFF(pTxSg) == NULL);
3635
3636 /*
3637 * Allocate the buffer.
3638 */
3639 PPDMSCATTERGATHER pSg;
3640 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3641 {
3642 if (pThis->cbTxAlloc == 0)
3643 {
3644 /* Zero packet, no need for the buffer */
3645 return VINF_SUCCESS;
3646 }
3647
3648 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3649 if (RT_UNLIKELY(!pDrv))
3650 return VERR_NET_DOWN;
3651 int rc = pDrv->pfnAllocBuf(pDrv, pThis->cbTxAlloc, fGso ? &pThis->GsoCtx : NULL, &pSg);
3652 if (RT_FAILURE(rc))
3653 {
3654 /* Suspend TX as we are out of buffers atm */
3655 STATUS |= STATUS_TXOFF;
3656 return rc;
3657 }
3658 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3659 pThis->szPrf, pThis->cbTxAlloc,
3660 pThis->fVTag ? "VLAN " : "",
3661 pThis->fGSO ? "GSO " : ""));
3662 pThis->cbTxAlloc = 0;
3663 }
3664 else
3665 {
3666 /* Create a loopback using the fallback buffer and preallocated SG. */
3667 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3668 pSg = &pThis->uTxFallback.Sg;
3669 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3670 pSg->cbUsed = 0;
3671 pSg->cbAvailable = 0;
3672 pSg->pvAllocator = pThis;
3673 pSg->pvUser = NULL; /* No GSO here. */
3674 pSg->cSegs = 1;
3675 pSg->aSegs[0].pvSeg = pThis->aTxPacketFallback;
3676 pSg->aSegs[0].cbSeg = sizeof(pThis->aTxPacketFallback);
3677 }
3678
3679 pThis->CTX_SUFF(pTxSg) = pSg;
3680 return VINF_SUCCESS;
3681}
3682#endif /* E1K_WITH_TXD_CACHE */
3683
3684/**
3685 * Checks if it's a GSO buffer or not.
3686 *
3687 * @returns true / false.
3688 * @param pTxSg The scatter / gather buffer.
3689 */
3690DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3691{
3692#if 0
3693 if (!pTxSg)
3694 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3695 if (pTxSg && pTxSg->pvUser)
3696 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3697#endif
3698 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3699}
3700
3701#ifndef E1K_WITH_TXD_CACHE
3702/**
3703 * Load transmit descriptor from guest memory.
3704 *
3705 * @param pThis The device state structure.
3706 * @param pDesc Pointer to descriptor union.
3707 * @param addr Physical address in guest context.
3708 * @thread E1000_TX
3709 */
3710DECLINLINE(void) e1kLoadDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3711{
3712 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3713}
3714#else /* E1K_WITH_TXD_CACHE */
3715/**
3716 * Load transmit descriptors from guest memory.
3717 *
3718 * We need two physical reads in case the tail wrapped around the end of TX
3719 * descriptor ring.
3720 *
3721 * @returns the actual number of descriptors fetched.
3722 * @param pThis The device state structure.
3723 * @param pDesc Pointer to descriptor union.
3724 * @param addr Physical address in guest context.
3725 * @thread E1000_TX
3726 */
3727DECLINLINE(unsigned) e1kTxDLoadMore(PE1KSTATE pThis)
3728{
3729 Assert(pThis->iTxDCurrent == 0);
3730 /* We've already loaded pThis->nTxDFetched descriptors past TDH. */
3731 unsigned nDescsAvailable = e1kGetTxLen(pThis) - pThis->nTxDFetched;
3732 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pThis->nTxDFetched);
3733 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3734 unsigned nFirstNotLoaded = (TDH + pThis->nTxDFetched) % nDescsTotal;
3735 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3736 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3737 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3738 pThis->szPrf, nDescsAvailable, nDescsToFetch, nDescsTotal,
3739 nFirstNotLoaded, nDescsInSingleRead));
3740 if (nDescsToFetch == 0)
3741 return 0;
3742 E1KTXDESC* pFirstEmptyDesc = &pThis->aTxDescriptors[pThis->nTxDFetched];
3743 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3744 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3745 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3746 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3747 pThis->szPrf, nDescsInSingleRead,
3748 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3749 nFirstNotLoaded, TDLEN, TDH, TDT));
3750 if (nDescsToFetch > nDescsInSingleRead)
3751 {
3752 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns),
3753 ((uint64_t)TDBAH << 32) + TDBAL,
3754 pFirstEmptyDesc + nDescsInSingleRead,
3755 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3756 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3757 pThis->szPrf, nDescsToFetch - nDescsInSingleRead,
3758 TDBAH, TDBAL));
3759 }
3760 pThis->nTxDFetched += nDescsToFetch;
3761 return nDescsToFetch;
3762}
3763
3764/**
3765 * Load transmit descriptors from guest memory only if there are no loaded
3766 * descriptors.
3767 *
3768 * @returns true if there are descriptors in cache.
3769 * @param pThis The device state structure.
3770 * @param pDesc Pointer to descriptor union.
3771 * @param addr Physical address in guest context.
3772 * @thread E1000_TX
3773 */
3774DECLINLINE(bool) e1kTxDLazyLoad(PE1KSTATE pThis)
3775{
3776 if (pThis->nTxDFetched == 0)
3777 return e1kTxDLoadMore(pThis) != 0;
3778 return true;
3779}
3780#endif /* E1K_WITH_TXD_CACHE */
3781
3782/**
3783 * Write back transmit descriptor to guest memory.
3784 *
3785 * @param pThis The device state structure.
3786 * @param pDesc Pointer to descriptor union.
3787 * @param addr Physical address in guest context.
3788 * @thread E1000_TX
3789 */
3790DECLINLINE(void) e1kWriteBackDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
3791{
3792 /* Only the last half of the descriptor has to be written back. */
3793 e1kPrintTDesc(pThis, pDesc, "^^^");
3794 PDMDevHlpPCIPhysWrite(pThis->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3795}
3796
3797/**
3798 * Transmit complete frame.
3799 *
3800 * @remarks We skip the FCS since we're not responsible for sending anything to
3801 * a real ethernet wire.
3802 *
3803 * @param pThis The device state structure.
3804 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3805 * @thread E1000_TX
3806 */
3807static void e1kTransmitFrame(PE1KSTATE pThis, bool fOnWorkerThread)
3808{
3809 PPDMSCATTERGATHER pSg = pThis->CTX_SUFF(pTxSg);
3810 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3811 Assert(!pSg || pSg->cSegs == 1);
3812
3813 if (cbFrame > 70) /* unqualified guess */
3814 pThis->led.Asserted.s.fWriting = pThis->led.Actual.s.fWriting = 1;
3815
3816#ifdef E1K_INT_STATS
3817 if (cbFrame <= 1514)
3818 E1K_INC_ISTAT_CNT(pThis->uStatTx1514);
3819 else if (cbFrame <= 2962)
3820 E1K_INC_ISTAT_CNT(pThis->uStatTx2962);
3821 else if (cbFrame <= 4410)
3822 E1K_INC_ISTAT_CNT(pThis->uStatTx4410);
3823 else if (cbFrame <= 5858)
3824 E1K_INC_ISTAT_CNT(pThis->uStatTx5858);
3825 else if (cbFrame <= 7306)
3826 E1K_INC_ISTAT_CNT(pThis->uStatTx7306);
3827 else if (cbFrame <= 8754)
3828 E1K_INC_ISTAT_CNT(pThis->uStatTx8754);
3829 else if (cbFrame <= 16384)
3830 E1K_INC_ISTAT_CNT(pThis->uStatTx16384);
3831 else if (cbFrame <= 32768)
3832 E1K_INC_ISTAT_CNT(pThis->uStatTx32768);
3833 else
3834 E1K_INC_ISTAT_CNT(pThis->uStatTxLarge);
3835#endif /* E1K_INT_STATS */
3836
3837 /* Add VLAN tag */
3838 if (cbFrame > 12 && pThis->fVTag)
3839 {
3840 E1kLog3(("%s Inserting VLAN tag %08x\n",
3841 pThis->szPrf, RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16)));
3842 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3843 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pThis->u16VTagTCI) << 16);
3844 pSg->cbUsed += 4;
3845 cbFrame += 4;
3846 Assert(pSg->cbUsed == cbFrame);
3847 Assert(pSg->cbUsed <= pSg->cbAvailable);
3848 }
3849/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3850 "%.*Rhxd\n"
3851 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3852 pThis->szPrf, cbFrame, pSg->aSegs[0].pvSeg, pThis->szPrf));*/
3853
3854 /* Update the stats */
3855 E1K_INC_CNT32(TPT);
3856 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3857 E1K_INC_CNT32(GPTC);
3858 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3859 E1K_INC_CNT32(BPTC);
3860 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3861 E1K_INC_CNT32(MPTC);
3862 /* Update octet transmit counter */
3863 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3864 if (pThis->CTX_SUFF(pDrv))
3865 STAM_REL_COUNTER_ADD(&pThis->StatTransmitBytes, cbFrame);
3866 if (cbFrame == 64)
3867 E1K_INC_CNT32(PTC64);
3868 else if (cbFrame < 128)
3869 E1K_INC_CNT32(PTC127);
3870 else if (cbFrame < 256)
3871 E1K_INC_CNT32(PTC255);
3872 else if (cbFrame < 512)
3873 E1K_INC_CNT32(PTC511);
3874 else if (cbFrame < 1024)
3875 E1K_INC_CNT32(PTC1023);
3876 else
3877 E1K_INC_CNT32(PTC1522);
3878
3879 E1K_INC_ISTAT_CNT(pThis->uStatTxFrm);
3880
3881 /*
3882 * Dump and send the packet.
3883 */
3884 int rc = VERR_NET_DOWN;
3885 if (pSg && pSg->pvAllocator != pThis)
3886 {
3887 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3888
3889 pThis->CTX_SUFF(pTxSg) = NULL;
3890 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
3891 if (pDrv)
3892 {
3893 /* Release critical section to avoid deadlock in CanReceive */
3894 //e1kCsLeave(pThis);
3895 STAM_PROFILE_START(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3896 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3897 STAM_PROFILE_STOP(&pThis->CTX_SUFF_Z(StatTransmitSend), a);
3898 //e1kCsEnter(pThis, RT_SRC_POS);
3899 }
3900 }
3901 else if (pSg)
3902 {
3903 Assert(pSg->aSegs[0].pvSeg == pThis->aTxPacketFallback);
3904 e1kPacketDump(pThis, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3905
3906 /** @todo do we actually need to check that we're in loopback mode here? */
3907 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3908 {
3909 E1KRXDST status;
3910 RT_ZERO(status);
3911 status.fPIF = true;
3912 e1kHandleRxPacket(pThis, pSg->aSegs[0].pvSeg, cbFrame, status);
3913 rc = VINF_SUCCESS;
3914 }
3915 e1kXmitFreeBuf(pThis);
3916 }
3917 else
3918 rc = VERR_NET_DOWN;
3919 if (RT_FAILURE(rc))
3920 {
3921 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3922 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3923 }
3924
3925 pThis->led.Actual.s.fWriting = 0;
3926}
3927
3928/**
3929 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3930 *
3931 * @param pThis The device state structure.
3932 * @param pPkt Pointer to the packet.
3933 * @param u16PktLen Total length of the packet.
3934 * @param cso Offset in packet to write checksum at.
3935 * @param css Offset in packet to start computing
3936 * checksum from.
3937 * @param cse Offset in packet to stop computing
3938 * checksum at.
3939 * @thread E1000_TX
3940 */
3941static void e1kInsertChecksum(PE1KSTATE pThis, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3942{
3943 RT_NOREF1(pThis);
3944
3945 if (css >= u16PktLen)
3946 {
3947 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3948 pThis->szPrf, cso, u16PktLen));
3949 return;
3950 }
3951
3952 if (cso >= u16PktLen - 1)
3953 {
3954 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3955 pThis->szPrf, cso, u16PktLen));
3956 return;
3957 }
3958
3959 if (cse == 0)
3960 cse = u16PktLen - 1;
3961 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3962 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", pThis->szPrf,
3963 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3964 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3965}
3966
3967/**
3968 * Add a part of descriptor's buffer to transmit frame.
3969 *
3970 * @remarks data.u64BufAddr is used unconditionally for both data
3971 * and legacy descriptors since it is identical to
3972 * legacy.u64BufAddr.
3973 *
3974 * @param pThis The device state structure.
3975 * @param pDesc Pointer to the descriptor to transmit.
3976 * @param u16Len Length of buffer to the end of segment.
3977 * @param fSend Force packet sending.
3978 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3979 * @thread E1000_TX
3980 */
3981#ifndef E1K_WITH_TXD_CACHE
3982static void e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3983{
3984 /* TCP header being transmitted */
3985 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3986 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
3987 /* IP header being transmitted */
3988 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3989 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
3990
3991 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3992 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
3993 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
3994
3995 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
3996 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
3997 E1kLog3(("%s Dump of the segment:\n"
3998 "%.*Rhxd\n"
3999 "%s --- End of dump ---\n",
4000 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4001 pThis->u16TxPktLen += u16Len;
4002 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4003 pThis->szPrf, pThis->u16TxPktLen));
4004 if (pThis->u16HdrRemain > 0)
4005 {
4006 /* The header was not complete, check if it is now */
4007 if (u16Len >= pThis->u16HdrRemain)
4008 {
4009 /* The rest is payload */
4010 u16Len -= pThis->u16HdrRemain;
4011 pThis->u16HdrRemain = 0;
4012 /* Save partial checksum and flags */
4013 pThis->u32SavedCsum = pTcpHdr->chksum;
4014 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4015 /* Clear FIN and PSH flags now and set them only in the last segment */
4016 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4017 }
4018 else
4019 {
4020 /* Still not */
4021 pThis->u16HdrRemain -= u16Len;
4022 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4023 pThis->szPrf, pThis->u16HdrRemain));
4024 return;
4025 }
4026 }
4027
4028 pThis->u32PayRemain -= u16Len;
4029
4030 if (fSend)
4031 {
4032 /* Leave ethernet header intact */
4033 /* IP Total Length = payload + headers - ethernet header */
4034 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4035 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4036 pThis->szPrf, ntohs(pIpHdr->total_len)));
4037 /* Update IP Checksum */
4038 pIpHdr->chksum = 0;
4039 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4040 pThis->contextTSE.ip.u8CSO,
4041 pThis->contextTSE.ip.u8CSS,
4042 pThis->contextTSE.ip.u16CSE);
4043
4044 /* Update TCP flags */
4045 /* Restore original FIN and PSH flags for the last segment */
4046 if (pThis->u32PayRemain == 0)
4047 {
4048 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4049 E1K_INC_CNT32(TSCTC);
4050 }
4051 /* Add TCP length to partial pseudo header sum */
4052 uint32_t csum = pThis->u32SavedCsum
4053 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4054 while (csum >> 16)
4055 csum = (csum >> 16) + (csum & 0xFFFF);
4056 pTcpHdr->chksum = csum;
4057 /* Compute final checksum */
4058 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4059 pThis->contextTSE.tu.u8CSO,
4060 pThis->contextTSE.tu.u8CSS,
4061 pThis->contextTSE.tu.u16CSE);
4062
4063 /*
4064 * Transmit it. If we've use the SG already, allocate a new one before
4065 * we copy of the data.
4066 */
4067 if (!pThis->CTX_SUFF(pTxSg))
4068 e1kXmitAllocBuf(pThis, pThis->u16TxPktLen + (pThis->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
4069 if (pThis->CTX_SUFF(pTxSg))
4070 {
4071 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4072 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4073 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4074 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4075 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4076 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4077 }
4078 e1kTransmitFrame(pThis, fOnWorkerThread);
4079
4080 /* Update Sequence Number */
4081 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4082 - pThis->contextTSE.dw3.u8HDRLEN);
4083 /* Increment IP identification */
4084 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4085 }
4086}
4087#else /* E1K_WITH_TXD_CACHE */
4088static int e1kFallbackAddSegment(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
4089{
4090 int rc = VINF_SUCCESS;
4091 /* TCP header being transmitted */
4092 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
4093 (pThis->aTxPacketFallback + pThis->contextTSE.tu.u8CSS);
4094 /* IP header being transmitted */
4095 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
4096 (pThis->aTxPacketFallback + pThis->contextTSE.ip.u8CSS);
4097
4098 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
4099 pThis->szPrf, u16Len, pThis->u32PayRemain, pThis->u16HdrRemain, fSend));
4100 Assert(pThis->u32PayRemain + pThis->u16HdrRemain > 0);
4101
4102 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4103 pThis->aTxPacketFallback + pThis->u16TxPktLen, u16Len);
4104 E1kLog3(("%s Dump of the segment:\n"
4105 "%.*Rhxd\n"
4106 "%s --- End of dump ---\n",
4107 pThis->szPrf, u16Len, pThis->aTxPacketFallback + pThis->u16TxPktLen, pThis->szPrf));
4108 pThis->u16TxPktLen += u16Len;
4109 E1kLog3(("%s e1kFallbackAddSegment: pThis->u16TxPktLen=%x\n",
4110 pThis->szPrf, pThis->u16TxPktLen));
4111 if (pThis->u16HdrRemain > 0)
4112 {
4113 /* The header was not complete, check if it is now */
4114 if (u16Len >= pThis->u16HdrRemain)
4115 {
4116 /* The rest is payload */
4117 u16Len -= pThis->u16HdrRemain;
4118 pThis->u16HdrRemain = 0;
4119 /* Save partial checksum and flags */
4120 pThis->u32SavedCsum = pTcpHdr->chksum;
4121 pThis->u16SavedFlags = pTcpHdr->hdrlen_flags;
4122 /* Clear FIN and PSH flags now and set them only in the last segment */
4123 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4124 }
4125 else
4126 {
4127 /* Still not */
4128 pThis->u16HdrRemain -= u16Len;
4129 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4130 pThis->szPrf, pThis->u16HdrRemain));
4131 return rc;
4132 }
4133 }
4134
4135 pThis->u32PayRemain -= u16Len;
4136
4137 if (fSend)
4138 {
4139 /* Leave ethernet header intact */
4140 /* IP Total Length = payload + headers - ethernet header */
4141 pIpHdr->total_len = htons(pThis->u16TxPktLen - pThis->contextTSE.ip.u8CSS);
4142 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4143 pThis->szPrf, ntohs(pIpHdr->total_len)));
4144 /* Update IP Checksum */
4145 pIpHdr->chksum = 0;
4146 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4147 pThis->contextTSE.ip.u8CSO,
4148 pThis->contextTSE.ip.u8CSS,
4149 pThis->contextTSE.ip.u16CSE);
4150
4151 /* Update TCP flags */
4152 /* Restore original FIN and PSH flags for the last segment */
4153 if (pThis->u32PayRemain == 0)
4154 {
4155 pTcpHdr->hdrlen_flags = pThis->u16SavedFlags;
4156 E1K_INC_CNT32(TSCTC);
4157 }
4158 /* Add TCP length to partial pseudo header sum */
4159 uint32_t csum = pThis->u32SavedCsum
4160 + htons(pThis->u16TxPktLen - pThis->contextTSE.tu.u8CSS);
4161 while (csum >> 16)
4162 csum = (csum >> 16) + (csum & 0xFFFF);
4163 pTcpHdr->chksum = csum;
4164 /* Compute final checksum */
4165 e1kInsertChecksum(pThis, pThis->aTxPacketFallback, pThis->u16TxPktLen,
4166 pThis->contextTSE.tu.u8CSO,
4167 pThis->contextTSE.tu.u8CSS,
4168 pThis->contextTSE.tu.u16CSE);
4169
4170 /*
4171 * Transmit it.
4172 */
4173 if (pThis->CTX_SUFF(pTxSg))
4174 {
4175 Assert(pThis->u16TxPktLen <= pThis->CTX_SUFF(pTxSg)->cbAvailable);
4176 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4177 if (pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pThis->aTxPacketFallback)
4178 memcpy(pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->aTxPacketFallback, pThis->u16TxPktLen);
4179 pThis->CTX_SUFF(pTxSg)->cbUsed = pThis->u16TxPktLen;
4180 pThis->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pThis->u16TxPktLen;
4181 }
4182 e1kTransmitFrame(pThis, fOnWorkerThread);
4183
4184 /* Update Sequence Number */
4185 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pThis->u16TxPktLen
4186 - pThis->contextTSE.dw3.u8HDRLEN);
4187 /* Increment IP identification */
4188 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4189
4190 /* Allocate new buffer for the next segment. */
4191 if (pThis->u32PayRemain)
4192 {
4193 pThis->cbTxAlloc = RT_MIN(pThis->u32PayRemain,
4194 pThis->contextTSE.dw3.u16MSS)
4195 + pThis->contextTSE.dw3.u8HDRLEN
4196 + (pThis->fVTag ? 4 : 0);
4197 rc = e1kXmitAllocBuf(pThis, false /* fGSO */);
4198 }
4199 }
4200
4201 return rc;
4202}
4203#endif /* E1K_WITH_TXD_CACHE */
4204
4205#ifndef E1K_WITH_TXD_CACHE
4206/**
4207 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4208 * frame.
4209 *
4210 * We construct the frame in the fallback buffer first and the copy it to the SG
4211 * buffer before passing it down to the network driver code.
4212 *
4213 * @returns true if the frame should be transmitted, false if not.
4214 *
4215 * @param pThis The device state structure.
4216 * @param pDesc Pointer to the descriptor to transmit.
4217 * @param cbFragment Length of descriptor's buffer.
4218 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4219 * @thread E1000_TX
4220 */
4221static bool e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4222{
4223 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4224 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4225 Assert(pDesc->data.cmd.fTSE);
4226 Assert(!e1kXmitIsGsoBuf(pTxSg));
4227
4228 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4229 Assert(u16MaxPktLen != 0);
4230 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4231
4232 /*
4233 * Carve out segments.
4234 */
4235 do
4236 {
4237 /* Calculate how many bytes we have left in this TCP segment */
4238 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4239 if (cb > cbFragment)
4240 {
4241 /* This descriptor fits completely into current segment */
4242 cb = cbFragment;
4243 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4244 }
4245 else
4246 {
4247 e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4248 /*
4249 * Rewind the packet tail pointer to the beginning of payload,
4250 * so we continue writing right beyond the header.
4251 */
4252 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4253 }
4254
4255 pDesc->data.u64BufAddr += cb;
4256 cbFragment -= cb;
4257 } while (cbFragment > 0);
4258
4259 if (pDesc->data.cmd.fEOP)
4260 {
4261 /* End of packet, next segment will contain header. */
4262 if (pThis->u32PayRemain != 0)
4263 E1K_INC_CNT32(TSCTFC);
4264 pThis->u16TxPktLen = 0;
4265 e1kXmitFreeBuf(pThis);
4266 }
4267
4268 return false;
4269}
4270#else /* E1K_WITH_TXD_CACHE */
4271/**
4272 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4273 * frame.
4274 *
4275 * We construct the frame in the fallback buffer first and the copy it to the SG
4276 * buffer before passing it down to the network driver code.
4277 *
4278 * @returns error code
4279 *
4280 * @param pThis The device state structure.
4281 * @param pDesc Pointer to the descriptor to transmit.
4282 * @param cbFragment Length of descriptor's buffer.
4283 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4284 * @thread E1000_TX
4285 */
4286static int e1kFallbackAddToFrame(PE1KSTATE pThis, E1KTXDESC *pDesc, bool fOnWorkerThread)
4287{
4288#ifdef VBOX_STRICT
4289 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4290 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4291 Assert(pDesc->data.cmd.fTSE);
4292 Assert(!e1kXmitIsGsoBuf(pTxSg));
4293#endif
4294
4295 uint16_t u16MaxPktLen = pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw3.u16MSS;
4296 Assert(u16MaxPktLen != 0);
4297 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4298
4299 /*
4300 * Carve out segments.
4301 */
4302 int rc;
4303 do
4304 {
4305 /* Calculate how many bytes we have left in this TCP segment */
4306 uint32_t cb = u16MaxPktLen - pThis->u16TxPktLen;
4307 if (cb > pDesc->data.cmd.u20DTALEN)
4308 {
4309 /* This descriptor fits completely into current segment */
4310 cb = pDesc->data.cmd.u20DTALEN;
4311 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4312 }
4313 else
4314 {
4315 rc = e1kFallbackAddSegment(pThis, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4316 /*
4317 * Rewind the packet tail pointer to the beginning of payload,
4318 * so we continue writing right beyond the header.
4319 */
4320 pThis->u16TxPktLen = pThis->contextTSE.dw3.u8HDRLEN;
4321 }
4322
4323 pDesc->data.u64BufAddr += cb;
4324 pDesc->data.cmd.u20DTALEN -= cb;
4325 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4326
4327 if (pDesc->data.cmd.fEOP)
4328 {
4329 /* End of packet, next segment will contain header. */
4330 if (pThis->u32PayRemain != 0)
4331 E1K_INC_CNT32(TSCTFC);
4332 pThis->u16TxPktLen = 0;
4333 e1kXmitFreeBuf(pThis);
4334 }
4335
4336 return false;
4337}
4338#endif /* E1K_WITH_TXD_CACHE */
4339
4340
4341/**
4342 * Add descriptor's buffer to transmit frame.
4343 *
4344 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4345 * TSE frames we cannot handle as GSO.
4346 *
4347 * @returns true on success, false on failure.
4348 *
4349 * @param pThis The device state structure.
4350 * @param PhysAddr The physical address of the descriptor buffer.
4351 * @param cbFragment Length of descriptor's buffer.
4352 * @thread E1000_TX
4353 */
4354static bool e1kAddToFrame(PE1KSTATE pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4355{
4356 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4357 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4358 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4359
4360 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4361 {
4362 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", pThis->szPrf, cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4363 return false;
4364 }
4365 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4366 {
4367 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", pThis->szPrf, cbNewPkt, pTxSg->cbAvailable));
4368 return false;
4369 }
4370
4371 if (RT_LIKELY(pTxSg))
4372 {
4373 Assert(pTxSg->cSegs == 1);
4374 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4375
4376 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4377 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4378
4379 pTxSg->cbUsed = cbNewPkt;
4380 }
4381 pThis->u16TxPktLen = cbNewPkt;
4382
4383 return true;
4384}
4385
4386
4387/**
4388 * Write the descriptor back to guest memory and notify the guest.
4389 *
4390 * @param pThis The device state structure.
4391 * @param pDesc Pointer to the descriptor have been transmitted.
4392 * @param addr Physical address of the descriptor in guest memory.
4393 * @thread E1000_TX
4394 */
4395static void e1kDescReport(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr)
4396{
4397 /*
4398 * We fake descriptor write-back bursting. Descriptors are written back as they are
4399 * processed.
4400 */
4401 /* Let's pretend we process descriptors. Write back with DD set. */
4402 /*
4403 * Prior to r71586 we tried to accomodate the case when write-back bursts
4404 * are enabled without actually implementing bursting by writing back all
4405 * descriptors, even the ones that do not have RS set. This caused kernel
4406 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4407 * associated with written back descriptor if it happened to be a context
4408 * descriptor since context descriptors do not have skb associated to them.
4409 * Starting from r71586 we write back only the descriptors with RS set,
4410 * which is a little bit different from what the real hardware does in
4411 * case there is a chain of data descritors where some of them have RS set
4412 * and others do not. It is very uncommon scenario imho.
4413 * We need to check RPS as well since some legacy drivers use it instead of
4414 * RS even with newer cards.
4415 */
4416 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4417 {
4418 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4419 e1kWriteBackDesc(pThis, pDesc, addr);
4420 if (pDesc->legacy.cmd.fEOP)
4421 {
4422#ifdef E1K_USE_TX_TIMERS
4423 if (pDesc->legacy.cmd.fIDE)
4424 {
4425 E1K_INC_ISTAT_CNT(pThis->uStatTxIDE);
4426 //if (pThis->fIntRaised)
4427 //{
4428 // /* Interrupt is already pending, no need for timers */
4429 // ICR |= ICR_TXDW;
4430 //}
4431 //else {
4432 /* Arm the timer to fire in TIVD usec (discard .024) */
4433 e1kArmTimer(pThis, pThis->CTX_SUFF(pTIDTimer), TIDV);
4434# ifndef E1K_NO_TAD
4435 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4436 E1kLog2(("%s Checking if TAD timer is running\n",
4437 pThis->szPrf));
4438 if (TADV != 0 && !TMTimerIsActive(pThis->CTX_SUFF(pTADTimer)))
4439 e1kArmTimer(pThis, pThis->CTX_SUFF(pTADTimer), TADV);
4440# endif /* E1K_NO_TAD */
4441 }
4442 else
4443 {
4444 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4445 pThis->szPrf));
4446# ifndef E1K_NO_TAD
4447 /* Cancel both timers if armed and fire immediately. */
4448 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
4449# endif /* E1K_NO_TAD */
4450#endif /* E1K_USE_TX_TIMERS */
4451 E1K_INC_ISTAT_CNT(pThis->uStatIntTx);
4452 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXDW);
4453#ifdef E1K_USE_TX_TIMERS
4454 }
4455#endif /* E1K_USE_TX_TIMERS */
4456 }
4457 }
4458 else
4459 {
4460 E1K_INC_ISTAT_CNT(pThis->uStatTxNoRS);
4461 }
4462}
4463
4464#ifndef E1K_WITH_TXD_CACHE
4465
4466/**
4467 * Process Transmit Descriptor.
4468 *
4469 * E1000 supports three types of transmit descriptors:
4470 * - legacy data descriptors of older format (context-less).
4471 * - data the same as legacy but providing new offloading capabilities.
4472 * - context sets up the context for following data descriptors.
4473 *
4474 * @param pThis The device state structure.
4475 * @param pDesc Pointer to descriptor union.
4476 * @param addr Physical address of descriptor in guest memory.
4477 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4478 * @thread E1000_TX
4479 */
4480static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4481{
4482 int rc = VINF_SUCCESS;
4483 uint32_t cbVTag = 0;
4484
4485 e1kPrintTDesc(pThis, pDesc, "vvv");
4486
4487#ifdef E1K_USE_TX_TIMERS
4488 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4489#endif /* E1K_USE_TX_TIMERS */
4490
4491 switch (e1kGetDescType(pDesc))
4492 {
4493 case E1K_DTYP_CONTEXT:
4494 if (pDesc->context.dw2.fTSE)
4495 {
4496 pThis->contextTSE = pDesc->context;
4497 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4498 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4499 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4500 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4501 }
4502 else
4503 {
4504 pThis->contextNormal = pDesc->context;
4505 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4506 }
4507 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4508 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4509 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4510 pDesc->context.ip.u8CSS,
4511 pDesc->context.ip.u8CSO,
4512 pDesc->context.ip.u16CSE,
4513 pDesc->context.tu.u8CSS,
4514 pDesc->context.tu.u8CSO,
4515 pDesc->context.tu.u16CSE));
4516 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4517 e1kDescReport(pThis, pDesc, addr);
4518 break;
4519
4520 case E1K_DTYP_DATA:
4521 {
4522 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4523 {
4524 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4525 /** @todo Same as legacy when !TSE. See below. */
4526 break;
4527 }
4528 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4529 &pThis->StatTxDescTSEData:
4530 &pThis->StatTxDescData);
4531 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4532 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4533
4534 /*
4535 * The last descriptor of non-TSE packet must contain VLE flag.
4536 * TSE packets have VLE flag in the first descriptor. The later
4537 * case is taken care of a bit later when cbVTag gets assigned.
4538 *
4539 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4540 */
4541 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4542 {
4543 pThis->fVTag = pDesc->data.cmd.fVLE;
4544 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4545 }
4546 /*
4547 * First fragment: Allocate new buffer and save the IXSM and TXSM
4548 * packet options as these are only valid in the first fragment.
4549 */
4550 if (pThis->u16TxPktLen == 0)
4551 {
4552 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4553 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4554 E1kLog2(("%s Saving checksum flags:%s%s; \n", pThis->szPrf,
4555 pThis->fIPcsum ? " IP" : "",
4556 pThis->fTCPcsum ? " TCP/UDP" : ""));
4557 if (pDesc->data.cmd.fTSE)
4558 {
4559 /* 2) pDesc->data.cmd.fTSE && pThis->u16TxPktLen == 0 */
4560 pThis->fVTag = pDesc->data.cmd.fVLE;
4561 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4562 cbVTag = pThis->fVTag ? 4 : 0;
4563 }
4564 else if (pDesc->data.cmd.fEOP)
4565 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4566 else
4567 cbVTag = 4;
4568 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4569 if (e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE))
4570 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw2.u20PAYLEN + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4571 true /*fExactSize*/, true /*fGso*/);
4572 else if (pDesc->data.cmd.fTSE)
4573 rc = e1kXmitAllocBuf(pThis, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN + cbVTag,
4574 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4575 else
4576 rc = e1kXmitAllocBuf(pThis, pDesc->data.cmd.u20DTALEN + cbVTag,
4577 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4578
4579 /**
4580 * @todo: Perhaps it is not that simple for GSO packets! We may
4581 * need to unwind some changes.
4582 */
4583 if (RT_FAILURE(rc))
4584 {
4585 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4586 break;
4587 }
4588 /** @todo Is there any way to indicating errors other than collisions? Like
4589 * VERR_NET_DOWN. */
4590 }
4591
4592 /*
4593 * Add the descriptor data to the frame. If the frame is complete,
4594 * transmit it and reset the u16TxPktLen field.
4595 */
4596 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4597 {
4598 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4599 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4600 if (pDesc->data.cmd.fEOP)
4601 {
4602 if ( fRc
4603 && pThis->CTX_SUFF(pTxSg)
4604 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4605 {
4606 e1kTransmitFrame(pThis, fOnWorkerThread);
4607 E1K_INC_CNT32(TSCTC);
4608 }
4609 else
4610 {
4611 if (fRc)
4612 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4613 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4614 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4615 e1kXmitFreeBuf(pThis);
4616 E1K_INC_CNT32(TSCTFC);
4617 }
4618 pThis->u16TxPktLen = 0;
4619 }
4620 }
4621 else if (!pDesc->data.cmd.fTSE)
4622 {
4623 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4624 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4625 if (pDesc->data.cmd.fEOP)
4626 {
4627 if (fRc && pThis->CTX_SUFF(pTxSg))
4628 {
4629 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4630 if (pThis->fIPcsum)
4631 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4632 pThis->contextNormal.ip.u8CSO,
4633 pThis->contextNormal.ip.u8CSS,
4634 pThis->contextNormal.ip.u16CSE);
4635 if (pThis->fTCPcsum)
4636 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4637 pThis->contextNormal.tu.u8CSO,
4638 pThis->contextNormal.tu.u8CSS,
4639 pThis->contextNormal.tu.u16CSE);
4640 e1kTransmitFrame(pThis, fOnWorkerThread);
4641 }
4642 else
4643 e1kXmitFreeBuf(pThis);
4644 pThis->u16TxPktLen = 0;
4645 }
4646 }
4647 else
4648 {
4649 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4650 e1kFallbackAddToFrame(pThis, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4651 }
4652
4653 e1kDescReport(pThis, pDesc, addr);
4654 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4655 break;
4656 }
4657
4658 case E1K_DTYP_LEGACY:
4659 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4660 {
4661 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4662 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4663 break;
4664 }
4665 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4666 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4667
4668 /* First fragment: allocate new buffer. */
4669 if (pThis->u16TxPktLen == 0)
4670 {
4671 if (pDesc->legacy.cmd.fEOP)
4672 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4673 else
4674 cbVTag = 4;
4675 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", pThis->szPrf, cbVTag));
4676 /** @todo reset status bits? */
4677 rc = e1kXmitAllocBuf(pThis, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4678 if (RT_FAILURE(rc))
4679 {
4680 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4681 break;
4682 }
4683
4684 /** @todo Is there any way to indicating errors other than collisions? Like
4685 * VERR_NET_DOWN. */
4686 }
4687
4688 /* Add fragment to frame. */
4689 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4690 {
4691 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4692
4693 /* Last fragment: Transmit and reset the packet storage counter. */
4694 if (pDesc->legacy.cmd.fEOP)
4695 {
4696 pThis->fVTag = pDesc->legacy.cmd.fVLE;
4697 pThis->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4698 /** @todo Offload processing goes here. */
4699 e1kTransmitFrame(pThis, fOnWorkerThread);
4700 pThis->u16TxPktLen = 0;
4701 }
4702 }
4703 /* Last fragment + failure: free the buffer and reset the storage counter. */
4704 else if (pDesc->legacy.cmd.fEOP)
4705 {
4706 e1kXmitFreeBuf(pThis);
4707 pThis->u16TxPktLen = 0;
4708 }
4709
4710 e1kDescReport(pThis, pDesc, addr);
4711 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4712 break;
4713
4714 default:
4715 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4716 pThis->szPrf, e1kGetDescType(pDesc)));
4717 break;
4718 }
4719
4720 return rc;
4721}
4722
4723#else /* E1K_WITH_TXD_CACHE */
4724
4725/**
4726 * Process Transmit Descriptor.
4727 *
4728 * E1000 supports three types of transmit descriptors:
4729 * - legacy data descriptors of older format (context-less).
4730 * - data the same as legacy but providing new offloading capabilities.
4731 * - context sets up the context for following data descriptors.
4732 *
4733 * @param pThis The device state structure.
4734 * @param pDesc Pointer to descriptor union.
4735 * @param addr Physical address of descriptor in guest memory.
4736 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4737 * @param cbPacketSize Size of the packet as previously computed.
4738 * @thread E1000_TX
4739 */
4740static int e1kXmitDesc(PE1KSTATE pThis, E1KTXDESC *pDesc, RTGCPHYS addr,
4741 bool fOnWorkerThread)
4742{
4743 int rc = VINF_SUCCESS;
4744
4745 e1kPrintTDesc(pThis, pDesc, "vvv");
4746
4747#ifdef E1K_USE_TX_TIMERS
4748 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
4749#endif /* E1K_USE_TX_TIMERS */
4750
4751 switch (e1kGetDescType(pDesc))
4752 {
4753 case E1K_DTYP_CONTEXT:
4754 /* The caller have already updated the context */
4755 E1K_INC_ISTAT_CNT(pThis->uStatDescCtx);
4756 e1kDescReport(pThis, pDesc, addr);
4757 break;
4758
4759 case E1K_DTYP_DATA:
4760 {
4761 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4762 &pThis->StatTxDescTSEData:
4763 &pThis->StatTxDescData);
4764 E1K_INC_ISTAT_CNT(pThis->uStatDescDat);
4765 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4766 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4767 {
4768 E1kLog2(("% Empty data descriptor, skipped.\n", pThis->szPrf));
4769 }
4770 else
4771 {
4772 /*
4773 * Add the descriptor data to the frame. If the frame is complete,
4774 * transmit it and reset the u16TxPktLen field.
4775 */
4776 if (e1kXmitIsGsoBuf(pThis->CTX_SUFF(pTxSg)))
4777 {
4778 STAM_COUNTER_INC(&pThis->StatTxPathGSO);
4779 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4780 if (pDesc->data.cmd.fEOP)
4781 {
4782 if ( fRc
4783 && pThis->CTX_SUFF(pTxSg)
4784 && pThis->CTX_SUFF(pTxSg)->cbUsed == (size_t)pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN)
4785 {
4786 e1kTransmitFrame(pThis, fOnWorkerThread);
4787 E1K_INC_CNT32(TSCTC);
4788 }
4789 else
4790 {
4791 if (fRc)
4792 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , pThis->szPrf,
4793 pThis->CTX_SUFF(pTxSg), pThis->CTX_SUFF(pTxSg) ? pThis->CTX_SUFF(pTxSg)->cbUsed : 0,
4794 pThis->contextTSE.dw3.u8HDRLEN + pThis->contextTSE.dw2.u20PAYLEN));
4795 e1kXmitFreeBuf(pThis);
4796 E1K_INC_CNT32(TSCTFC);
4797 }
4798 pThis->u16TxPktLen = 0;
4799 }
4800 }
4801 else if (!pDesc->data.cmd.fTSE)
4802 {
4803 STAM_COUNTER_INC(&pThis->StatTxPathRegular);
4804 bool fRc = e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4805 if (pDesc->data.cmd.fEOP)
4806 {
4807 if (fRc && pThis->CTX_SUFF(pTxSg))
4808 {
4809 Assert(pThis->CTX_SUFF(pTxSg)->cSegs == 1);
4810 if (pThis->fIPcsum)
4811 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4812 pThis->contextNormal.ip.u8CSO,
4813 pThis->contextNormal.ip.u8CSS,
4814 pThis->contextNormal.ip.u16CSE);
4815 if (pThis->fTCPcsum)
4816 e1kInsertChecksum(pThis, (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pThis->u16TxPktLen,
4817 pThis->contextNormal.tu.u8CSO,
4818 pThis->contextNormal.tu.u8CSS,
4819 pThis->contextNormal.tu.u16CSE);
4820 e1kTransmitFrame(pThis, fOnWorkerThread);
4821 }
4822 else
4823 e1kXmitFreeBuf(pThis);
4824 pThis->u16TxPktLen = 0;
4825 }
4826 }
4827 else
4828 {
4829 STAM_COUNTER_INC(&pThis->StatTxPathFallback);
4830 rc = e1kFallbackAddToFrame(pThis, pDesc, fOnWorkerThread);
4831 }
4832 }
4833 e1kDescReport(pThis, pDesc, addr);
4834 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4835 break;
4836 }
4837
4838 case E1K_DTYP_LEGACY:
4839 STAM_COUNTER_INC(&pThis->StatTxDescLegacy);
4840 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
4841 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4842 {
4843 E1kLog(("%s Empty legacy descriptor, skipped.\n", pThis->szPrf));
4844 }
4845 else
4846 {
4847 /* Add fragment to frame. */
4848 if (e1kAddToFrame(pThis, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4849 {
4850 E1K_INC_ISTAT_CNT(pThis->uStatDescLeg);
4851
4852 /* Last fragment: Transmit and reset the packet storage counter. */
4853 if (pDesc->legacy.cmd.fEOP)
4854 {
4855 if (pDesc->legacy.cmd.fIC)
4856 {
4857 e1kInsertChecksum(pThis,
4858 (uint8_t *)pThis->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4859 pThis->u16TxPktLen,
4860 pDesc->legacy.cmd.u8CSO,
4861 pDesc->legacy.dw3.u8CSS,
4862 0);
4863 }
4864 e1kTransmitFrame(pThis, fOnWorkerThread);
4865 pThis->u16TxPktLen = 0;
4866 }
4867 }
4868 /* Last fragment + failure: free the buffer and reset the storage counter. */
4869 else if (pDesc->legacy.cmd.fEOP)
4870 {
4871 e1kXmitFreeBuf(pThis);
4872 pThis->u16TxPktLen = 0;
4873 }
4874 }
4875 e1kDescReport(pThis, pDesc, addr);
4876 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
4877 break;
4878
4879 default:
4880 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4881 pThis->szPrf, e1kGetDescType(pDesc)));
4882 break;
4883 }
4884
4885 return rc;
4886}
4887
4888DECLINLINE(void) e1kUpdateTxContext(PE1KSTATE pThis, E1KTXDESC *pDesc)
4889{
4890 if (pDesc->context.dw2.fTSE)
4891 {
4892 pThis->contextTSE = pDesc->context;
4893 pThis->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4894 pThis->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4895 e1kSetupGsoCtx(&pThis->GsoCtx, &pDesc->context);
4896 STAM_COUNTER_INC(&pThis->StatTxDescCtxTSE);
4897 }
4898 else
4899 {
4900 pThis->contextNormal = pDesc->context;
4901 STAM_COUNTER_INC(&pThis->StatTxDescCtxNormal);
4902 }
4903 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4904 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", pThis->szPrf,
4905 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4906 pDesc->context.ip.u8CSS,
4907 pDesc->context.ip.u8CSO,
4908 pDesc->context.ip.u16CSE,
4909 pDesc->context.tu.u8CSS,
4910 pDesc->context.tu.u8CSO,
4911 pDesc->context.tu.u16CSE));
4912}
4913
4914static bool e1kLocateTxPacket(PE1KSTATE pThis)
4915{
4916 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4917 pThis->szPrf, pThis->cbTxAlloc));
4918 /* Check if we have located the packet already. */
4919 if (pThis->cbTxAlloc)
4920 {
4921 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4922 pThis->szPrf, pThis->cbTxAlloc));
4923 return true;
4924 }
4925
4926 bool fTSE = false;
4927 uint32_t cbPacket = 0;
4928
4929 for (int i = pThis->iTxDCurrent; i < pThis->nTxDFetched; ++i)
4930 {
4931 E1KTXDESC *pDesc = &pThis->aTxDescriptors[i];
4932 switch (e1kGetDescType(pDesc))
4933 {
4934 case E1K_DTYP_CONTEXT:
4935 e1kUpdateTxContext(pThis, pDesc);
4936 continue;
4937 case E1K_DTYP_LEGACY:
4938 /* Skip empty descriptors. */
4939 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4940 break;
4941 cbPacket += pDesc->legacy.cmd.u16Length;
4942 pThis->fGSO = false;
4943 break;
4944 case E1K_DTYP_DATA:
4945 /* Skip empty descriptors. */
4946 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4947 break;
4948 if (cbPacket == 0)
4949 {
4950 /*
4951 * The first fragment: save IXSM and TXSM options
4952 * as these are only valid in the first fragment.
4953 */
4954 pThis->fIPcsum = pDesc->data.dw3.fIXSM;
4955 pThis->fTCPcsum = pDesc->data.dw3.fTXSM;
4956 fTSE = pDesc->data.cmd.fTSE;
4957 /*
4958 * TSE descriptors have VLE bit properly set in
4959 * the first fragment.
4960 */
4961 if (fTSE)
4962 {
4963 pThis->fVTag = pDesc->data.cmd.fVLE;
4964 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4965 }
4966 pThis->fGSO = e1kCanDoGso(pThis, &pThis->GsoCtx, &pDesc->data, &pThis->contextTSE);
4967 }
4968 cbPacket += pDesc->data.cmd.u20DTALEN;
4969 break;
4970 default:
4971 AssertMsgFailed(("Impossible descriptor type!"));
4972 }
4973 if (pDesc->legacy.cmd.fEOP)
4974 {
4975 /*
4976 * Non-TSE descriptors have VLE bit properly set in
4977 * the last fragment.
4978 */
4979 if (!fTSE)
4980 {
4981 pThis->fVTag = pDesc->data.cmd.fVLE;
4982 pThis->u16VTagTCI = pDesc->data.dw3.u16Special;
4983 }
4984 /*
4985 * Compute the required buffer size. If we cannot do GSO but still
4986 * have to do segmentation we allocate the first segment only.
4987 */
4988 pThis->cbTxAlloc = (!fTSE || pThis->fGSO) ?
4989 cbPacket :
4990 RT_MIN(cbPacket, pThis->contextTSE.dw3.u16MSS + pThis->contextTSE.dw3.u8HDRLEN);
4991 if (pThis->fVTag)
4992 pThis->cbTxAlloc += 4;
4993 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4994 pThis->szPrf, pThis->cbTxAlloc));
4995 return true;
4996 }
4997 }
4998
4999 if (cbPacket == 0 && pThis->nTxDFetched - pThis->iTxDCurrent > 0)
5000 {
5001 /* All descriptors were empty, we need to process them as a dummy packet */
5002 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
5003 pThis->szPrf, pThis->cbTxAlloc));
5004 return true;
5005 }
5006 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
5007 pThis->szPrf, pThis->cbTxAlloc));
5008 return false;
5009}
5010
5011static int e1kXmitPacket(PE1KSTATE pThis, bool fOnWorkerThread)
5012{
5013 int rc = VINF_SUCCESS;
5014
5015 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
5016 pThis->szPrf, pThis->iTxDCurrent, pThis->nTxDFetched));
5017
5018 while (pThis->iTxDCurrent < pThis->nTxDFetched)
5019 {
5020 E1KTXDESC *pDesc = &pThis->aTxDescriptors[pThis->iTxDCurrent];
5021 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5022 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
5023 rc = e1kXmitDesc(pThis, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5024 if (RT_FAILURE(rc))
5025 break;
5026 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
5027 TDH = 0;
5028 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
5029 if (uLowThreshold != 0 && e1kGetTxLen(pThis) <= uLowThreshold)
5030 {
5031 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5032 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5033 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5034 }
5035 ++pThis->iTxDCurrent;
5036 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
5037 break;
5038 }
5039
5040 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
5041 pThis->szPrf, rc, pThis->iTxDCurrent, pThis->nTxDFetched));
5042 return rc;
5043}
5044
5045#endif /* E1K_WITH_TXD_CACHE */
5046#ifndef E1K_WITH_TXD_CACHE
5047
5048/**
5049 * Transmit pending descriptors.
5050 *
5051 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5052 *
5053 * @param pThis The E1000 state.
5054 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5055 */
5056static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5057{
5058 int rc = VINF_SUCCESS;
5059
5060 /* Check if transmitter is enabled. */
5061 if (!(TCTL & TCTL_EN))
5062 return VINF_SUCCESS;
5063 /*
5064 * Grab the xmit lock of the driver as well as the E1K device state.
5065 */
5066 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5067 if (RT_LIKELY(rc == VINF_SUCCESS))
5068 {
5069 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5070 if (pDrv)
5071 {
5072 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5073 if (RT_FAILURE(rc))
5074 {
5075 e1kCsTxLeave(pThis);
5076 return rc;
5077 }
5078 }
5079 /*
5080 * Process all pending descriptors.
5081 * Note! Do not process descriptors in locked state
5082 */
5083 while (TDH != TDT && !pThis->fLocked)
5084 {
5085 E1KTXDESC desc;
5086 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5087 pThis->szPrf, TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
5088
5089 e1kLoadDesc(pThis, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
5090 rc = e1kXmitDesc(pThis, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
5091 /* If we failed to transmit descriptor we will try it again later */
5092 if (RT_FAILURE(rc))
5093 break;
5094 if (++TDH * sizeof(desc) >= TDLEN)
5095 TDH = 0;
5096
5097 if (e1kGetTxLen(pThis) <= GET_BITS(TXDCTL, LWTHRESH)*8)
5098 {
5099 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
5100 pThis->szPrf, e1kGetTxLen(pThis), GET_BITS(TXDCTL, LWTHRESH)*8));
5101 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5102 }
5103
5104 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5105 }
5106
5107 /// @todo: uncomment: pThis->uStatIntTXQE++;
5108 /// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5109 /*
5110 * Release the lock.
5111 */
5112 if (pDrv)
5113 pDrv->pfnEndXmit(pDrv);
5114 e1kCsTxLeave(pThis);
5115 }
5116
5117 return rc;
5118}
5119
5120#else /* E1K_WITH_TXD_CACHE */
5121
5122static void e1kDumpTxDCache(PE1KSTATE pThis)
5123{
5124 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5125 uint32_t tdh = TDH;
5126 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5127 for (i = 0; i < cDescs; ++i)
5128 {
5129 E1KTXDESC desc;
5130 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5131 &desc, sizeof(desc));
5132 if (i == tdh)
5133 LogRel((">>> "));
5134 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5135 }
5136 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5137 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE));
5138 if (tdh > pThis->iTxDCurrent)
5139 tdh -= pThis->iTxDCurrent;
5140 else
5141 tdh = cDescs + tdh - pThis->iTxDCurrent;
5142 for (i = 0; i < pThis->nTxDFetched; ++i)
5143 {
5144 if (i == pThis->iTxDCurrent)
5145 LogRel((">>> "));
5146 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pThis->aTxDescriptors[i]));
5147 }
5148}
5149
5150/**
5151 * Transmit pending descriptors.
5152 *
5153 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5154 *
5155 * @param pThis The E1000 state.
5156 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5157 */
5158static int e1kXmitPending(PE1KSTATE pThis, bool fOnWorkerThread)
5159{
5160 int rc = VINF_SUCCESS;
5161
5162 /* Check if transmitter is enabled. */
5163 if (!(TCTL & TCTL_EN))
5164 return VINF_SUCCESS;
5165 /*
5166 * Grab the xmit lock of the driver as well as the E1K device state.
5167 */
5168 PPDMINETWORKUP pDrv = pThis->CTX_SUFF(pDrv);
5169 if (pDrv)
5170 {
5171 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5172 if (RT_FAILURE(rc))
5173 return rc;
5174 }
5175
5176 /*
5177 * Process all pending descriptors.
5178 * Note! Do not process descriptors in locked state
5179 */
5180 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5181 if (RT_LIKELY(rc == VINF_SUCCESS))
5182 {
5183 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatTransmit), a);
5184 /*
5185 * fIncomplete is set whenever we try to fetch additional descriptors
5186 * for an incomplete packet. If fail to locate a complete packet on
5187 * the next iteration we need to reset the cache or we risk to get
5188 * stuck in this loop forever.
5189 */
5190 bool fIncomplete = false;
5191 while (!pThis->fLocked && e1kTxDLazyLoad(pThis))
5192 {
5193 while (e1kLocateTxPacket(pThis))
5194 {
5195 fIncomplete = false;
5196 /* Found a complete packet, allocate it. */
5197 rc = e1kXmitAllocBuf(pThis, pThis->fGSO);
5198 /* If we're out of bandwidth we'll come back later. */
5199 if (RT_FAILURE(rc))
5200 goto out;
5201 /* Copy the packet to allocated buffer and send it. */
5202 rc = e1kXmitPacket(pThis, fOnWorkerThread);
5203 /* If we're out of bandwidth we'll come back later. */
5204 if (RT_FAILURE(rc))
5205 goto out;
5206 }
5207 uint8_t u8Remain = pThis->nTxDFetched - pThis->iTxDCurrent;
5208 if (RT_UNLIKELY(fIncomplete))
5209 {
5210 static bool fTxDCacheDumped = false;
5211 /*
5212 * The descriptor cache is full, but we were unable to find
5213 * a complete packet in it. Drop the cache and hope that
5214 * the guest driver can recover from network card error.
5215 */
5216 LogRel(("%s No complete packets in%s TxD cache! "
5217 "Fetched=%d, current=%d, TX len=%d.\n",
5218 pThis->szPrf,
5219 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5220 pThis->nTxDFetched, pThis->iTxDCurrent,
5221 e1kGetTxLen(pThis)));
5222 if (!fTxDCacheDumped)
5223 {
5224 fTxDCacheDumped = true;
5225 e1kDumpTxDCache(pThis);
5226 }
5227 pThis->iTxDCurrent = pThis->nTxDFetched = 0;
5228 /*
5229 * Returning an error at this point means Guru in R0
5230 * (see @bugref{6428}).
5231 */
5232# ifdef IN_RING3
5233 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5234# else /* !IN_RING3 */
5235 rc = VINF_IOM_R3_MMIO_WRITE;
5236# endif /* !IN_RING3 */
5237 goto out;
5238 }
5239 if (u8Remain > 0)
5240 {
5241 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5242 "%d more are available\n",
5243 pThis->szPrf, pThis->iTxDCurrent, u8Remain,
5244 e1kGetTxLen(pThis) - u8Remain));
5245
5246 /*
5247 * A packet was partially fetched. Move incomplete packet to
5248 * the beginning of cache buffer, then load more descriptors.
5249 */
5250 memmove(pThis->aTxDescriptors,
5251 &pThis->aTxDescriptors[pThis->iTxDCurrent],
5252 u8Remain * sizeof(E1KTXDESC));
5253 pThis->iTxDCurrent = 0;
5254 pThis->nTxDFetched = u8Remain;
5255 e1kTxDLoadMore(pThis);
5256 fIncomplete = true;
5257 }
5258 else
5259 pThis->nTxDFetched = 0;
5260 pThis->iTxDCurrent = 0;
5261 }
5262 if (!pThis->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5263 {
5264 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5265 pThis->szPrf));
5266 e1kRaiseInterrupt(pThis, VERR_SEM_BUSY, ICR_TXD_LOW);
5267 }
5268out:
5269 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatTransmit), a);
5270
5271 /// @todo: uncomment: pThis->uStatIntTXQE++;
5272 /// @todo: uncomment: e1kRaiseInterrupt(pThis, ICR_TXQE);
5273
5274 e1kCsTxLeave(pThis);
5275 }
5276
5277
5278 /*
5279 * Release the lock.
5280 */
5281 if (pDrv)
5282 pDrv->pfnEndXmit(pDrv);
5283 return rc;
5284}
5285
5286#endif /* E1K_WITH_TXD_CACHE */
5287#ifdef IN_RING3
5288
5289/**
5290 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5291 */
5292static DECLCALLBACK(void) e1kR3NetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5293{
5294 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5295 /* Resume suspended transmission */
5296 STATUS &= ~STATUS_TXOFF;
5297 e1kXmitPending(pThis, true /*fOnWorkerThread*/);
5298}
5299
5300/**
5301 * Callback for consuming from transmit queue. It gets called in R3 whenever
5302 * we enqueue something in R0/GC.
5303 *
5304 * @returns true
5305 * @param pDevIns Pointer to device instance structure.
5306 * @param pItem Pointer to the element being dequeued (not used).
5307 * @thread ???
5308 */
5309static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5310{
5311 NOREF(pItem);
5312 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5313 E1kLog2(("%s e1kTxQueueConsumer:\n", pThis->szPrf));
5314
5315 int rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/); NOREF(rc);
5316#ifndef DEBUG_andy /** @todo r=andy Happens for me a lot, mute this for me. */
5317 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5318#endif
5319 return true;
5320}
5321
5322/**
5323 * Handler for the wakeup signaller queue.
5324 */
5325static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5326{
5327 RT_NOREF(pItem);
5328 e1kWakeupReceive(pDevIns);
5329 return true;
5330}
5331
5332#endif /* IN_RING3 */
5333
5334/**
5335 * Write handler for Transmit Descriptor Tail register.
5336 *
5337 * @param pThis The device state structure.
5338 * @param offset Register offset in memory-mapped frame.
5339 * @param index Register index in register array.
5340 * @param value The value to store.
5341 * @param mask Used to implement partial writes (8 and 16-bit).
5342 * @thread EMT
5343 */
5344static int e1kRegWriteTDT(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5345{
5346 int rc = e1kRegWriteDefault(pThis, offset, index, value);
5347
5348 /* All descriptors starting with head and not including tail belong to us. */
5349 /* Process them. */
5350 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5351 pThis->szPrf, TDBAL, TDBAH, TDLEN, TDH, TDT));
5352
5353 /* Ignore TDT writes when the link is down. */
5354 if (TDH != TDT && (STATUS & STATUS_LU))
5355 {
5356 Log5(("E1000: TDT write: TDH=%08x, TDT=%08x, %d descriptors to process\n", TDH, TDT, e1kGetTxLen(pThis)));
5357 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5358 pThis->szPrf, e1kGetTxLen(pThis)));
5359
5360 /* Transmit pending packets if possible, defer it if we cannot do it
5361 in the current context. */
5362#ifdef E1K_TX_DELAY
5363 rc = e1kCsTxEnter(pThis, VERR_SEM_BUSY);
5364 if (RT_LIKELY(rc == VINF_SUCCESS))
5365 {
5366 if (!TMTimerIsActive(pThis->CTX_SUFF(pTXDTimer)))
5367 {
5368#ifdef E1K_INT_STATS
5369 pThis->u64ArmedAt = RTTimeNanoTS();
5370#endif
5371 e1kArmTimer(pThis, pThis->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5372 }
5373 E1K_INC_ISTAT_CNT(pThis->uStatTxDelayed);
5374 e1kCsTxLeave(pThis);
5375 return rc;
5376 }
5377 /* We failed to enter the TX critical section -- transmit as usual. */
5378#endif /* E1K_TX_DELAY */
5379#ifndef IN_RING3
5380 if (!pThis->CTX_SUFF(pDrv))
5381 {
5382 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pThis->CTX_SUFF(pTxQueue));
5383 if (RT_UNLIKELY(pItem))
5384 PDMQueueInsert(pThis->CTX_SUFF(pTxQueue), pItem);
5385 }
5386 else
5387#endif
5388 {
5389 rc = e1kXmitPending(pThis, false /*fOnWorkerThread*/);
5390 if (rc == VERR_TRY_AGAIN)
5391 rc = VINF_SUCCESS;
5392 else if (rc == VERR_SEM_BUSY)
5393 rc = VINF_IOM_R3_MMIO_WRITE;
5394 AssertRC(rc);
5395 }
5396 }
5397
5398 return rc;
5399}
5400
5401/**
5402 * Write handler for Multicast Table Array registers.
5403 *
5404 * @param pThis The device state structure.
5405 * @param offset Register offset in memory-mapped frame.
5406 * @param index Register index in register array.
5407 * @param value The value to store.
5408 * @thread EMT
5409 */
5410static int e1kRegWriteMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5411{
5412 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5413 pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])] = value;
5414
5415 return VINF_SUCCESS;
5416}
5417
5418/**
5419 * Read handler for Multicast Table Array registers.
5420 *
5421 * @returns VBox status code.
5422 *
5423 * @param pThis The device state structure.
5424 * @param offset Register offset in memory-mapped frame.
5425 * @param index Register index in register array.
5426 * @thread EMT
5427 */
5428static int e1kRegReadMTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5429{
5430 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auMTA), VERR_DEV_IO_ERROR);
5431 *pu32Value = pThis->auMTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auMTA[0])];
5432
5433 return VINF_SUCCESS;
5434}
5435
5436/**
5437 * Write handler for Receive Address registers.
5438 *
5439 * @param pThis The device state structure.
5440 * @param offset Register offset in memory-mapped frame.
5441 * @param index Register index in register array.
5442 * @param value The value to store.
5443 * @thread EMT
5444 */
5445static int e1kRegWriteRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5446{
5447 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5448 pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])] = value;
5449
5450 return VINF_SUCCESS;
5451}
5452
5453/**
5454 * Read handler for Receive Address registers.
5455 *
5456 * @returns VBox status code.
5457 *
5458 * @param pThis The device state structure.
5459 * @param offset Register offset in memory-mapped frame.
5460 * @param index Register index in register array.
5461 * @thread EMT
5462 */
5463static int e1kRegReadRA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5464{
5465 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->aRecAddr.au32), VERR_DEV_IO_ERROR);
5466 *pu32Value = pThis->aRecAddr.au32[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->aRecAddr.au32[0])];
5467
5468 return VINF_SUCCESS;
5469}
5470
5471/**
5472 * Write handler for VLAN Filter Table Array registers.
5473 *
5474 * @param pThis The device state structure.
5475 * @param offset Register offset in memory-mapped frame.
5476 * @param index Register index in register array.
5477 * @param value The value to store.
5478 * @thread EMT
5479 */
5480static int e1kRegWriteVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5481{
5482 AssertReturn(offset - g_aE1kRegMap[index].offset < sizeof(pThis->auVFTA), VINF_SUCCESS);
5483 pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])] = value;
5484
5485 return VINF_SUCCESS;
5486}
5487
5488/**
5489 * Read handler for VLAN Filter Table Array registers.
5490 *
5491 * @returns VBox status code.
5492 *
5493 * @param pThis The device state structure.
5494 * @param offset Register offset in memory-mapped frame.
5495 * @param index Register index in register array.
5496 * @thread EMT
5497 */
5498static int e1kRegReadVFTA(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5499{
5500 AssertReturn(offset - g_aE1kRegMap[index].offset< sizeof(pThis->auVFTA), VERR_DEV_IO_ERROR);
5501 *pu32Value = pThis->auVFTA[(offset - g_aE1kRegMap[index].offset)/sizeof(pThis->auVFTA[0])];
5502
5503 return VINF_SUCCESS;
5504}
5505
5506/**
5507 * Read handler for unimplemented registers.
5508 *
5509 * Merely reports reads from unimplemented registers.
5510 *
5511 * @returns VBox status code.
5512 *
5513 * @param pThis The device state structure.
5514 * @param offset Register offset in memory-mapped frame.
5515 * @param index Register index in register array.
5516 * @thread EMT
5517 */
5518static int e1kRegReadUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5519{
5520 RT_NOREF3(pThis, offset, index);
5521 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5522 pThis->szPrf, offset, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5523 *pu32Value = 0;
5524
5525 return VINF_SUCCESS;
5526}
5527
5528/**
5529 * Default register read handler with automatic clear operation.
5530 *
5531 * Retrieves the value of register from register array in device state structure.
5532 * Then resets all bits.
5533 *
5534 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5535 * done in the caller.
5536 *
5537 * @returns VBox status code.
5538 *
5539 * @param pThis The device state structure.
5540 * @param offset Register offset in memory-mapped frame.
5541 * @param index Register index in register array.
5542 * @thread EMT
5543 */
5544static int e1kRegReadAutoClear(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5545{
5546 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5547 int rc = e1kRegReadDefault(pThis, offset, index, pu32Value);
5548 pThis->auRegs[index] = 0;
5549
5550 return rc;
5551}
5552
5553/**
5554 * Default register read handler.
5555 *
5556 * Retrieves the value of register from register array in device state structure.
5557 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5558 *
5559 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5560 * done in the caller.
5561 *
5562 * @returns VBox status code.
5563 *
5564 * @param pThis The device state structure.
5565 * @param offset Register offset in memory-mapped frame.
5566 * @param index Register index in register array.
5567 * @thread EMT
5568 */
5569static int e1kRegReadDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5570{
5571 RT_NOREF_PV(offset);
5572
5573 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5574 *pu32Value = pThis->auRegs[index] & g_aE1kRegMap[index].readable;
5575
5576 return VINF_SUCCESS;
5577}
5578
5579/**
5580 * Write handler for unimplemented registers.
5581 *
5582 * Merely reports writes to unimplemented registers.
5583 *
5584 * @param pThis The device state structure.
5585 * @param offset Register offset in memory-mapped frame.
5586 * @param index Register index in register array.
5587 * @param value The value to store.
5588 * @thread EMT
5589 */
5590
5591 static int e1kRegWriteUnimplemented(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5592{
5593 RT_NOREF_PV(pThis); RT_NOREF_PV(offset); RT_NOREF_PV(index); RT_NOREF_PV(value);
5594
5595 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5596 pThis->szPrf, offset, value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5597
5598 return VINF_SUCCESS;
5599}
5600
5601/**
5602 * Default register write handler.
5603 *
5604 * Stores the value to the register array in device state structure. Only bits
5605 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5606 *
5607 * @returns VBox status code.
5608 *
5609 * @param pThis The device state structure.
5610 * @param offset Register offset in memory-mapped frame.
5611 * @param index Register index in register array.
5612 * @param value The value to store.
5613 * @param mask Used to implement partial writes (8 and 16-bit).
5614 * @thread EMT
5615 */
5616
5617static int e1kRegWriteDefault(PE1KSTATE pThis, uint32_t offset, uint32_t index, uint32_t value)
5618{
5619 RT_NOREF_PV(offset);
5620
5621 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5622 pThis->auRegs[index] = (value & g_aE1kRegMap[index].writable)
5623 | (pThis->auRegs[index] & ~g_aE1kRegMap[index].writable);
5624
5625 return VINF_SUCCESS;
5626}
5627
5628/**
5629 * Search register table for matching register.
5630 *
5631 * @returns Index in the register table or -1 if not found.
5632 *
5633 * @param offReg Register offset in memory-mapped region.
5634 * @thread EMT
5635 */
5636static int e1kRegLookup(uint32_t offReg)
5637{
5638
5639#if 0
5640 int index;
5641
5642 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5643 {
5644 if (g_aE1kRegMap[index].offset <= offReg && offReg < g_aE1kRegMap[index].offset + g_aE1kRegMap[index].size)
5645 {
5646 return index;
5647 }
5648 }
5649#else
5650 int iStart = 0;
5651 int iEnd = E1K_NUM_OF_BINARY_SEARCHABLE;
5652 for (;;)
5653 {
5654 int i = (iEnd - iStart) / 2 + iStart;
5655 uint32_t offCur = g_aE1kRegMap[i].offset;
5656 if (offReg < offCur)
5657 {
5658 if (i == iStart)
5659 break;
5660 iEnd = i;
5661 }
5662 else if (offReg >= offCur + g_aE1kRegMap[i].size)
5663 {
5664 i++;
5665 if (i == iEnd)
5666 break;
5667 iStart = i;
5668 }
5669 else
5670 return i;
5671 Assert(iEnd > iStart);
5672 }
5673
5674 for (unsigned i = E1K_NUM_OF_BINARY_SEARCHABLE; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5675 if (offReg - g_aE1kRegMap[i].offset < g_aE1kRegMap[i].size)
5676 return i;
5677
5678# ifdef VBOX_STRICT
5679 for (unsigned i = 0; i < RT_ELEMENTS(g_aE1kRegMap); i++)
5680 Assert(offReg - g_aE1kRegMap[i].offset >= g_aE1kRegMap[i].size);
5681# endif
5682
5683#endif
5684
5685 return -1;
5686}
5687
5688/**
5689 * Handle unaligned register read operation.
5690 *
5691 * Looks up and calls appropriate handler.
5692 *
5693 * @returns VBox status code.
5694 *
5695 * @param pThis The device state structure.
5696 * @param offReg Register offset in memory-mapped frame.
5697 * @param pv Where to store the result.
5698 * @param cb Number of bytes to read.
5699 * @thread EMT
5700 * @remarks IOM takes care of unaligned and small reads via MMIO. For I/O port
5701 * accesses we have to take care of that ourselves.
5702 */
5703static int e1kRegReadUnaligned(PE1KSTATE pThis, uint32_t offReg, void *pv, uint32_t cb)
5704{
5705 uint32_t u32 = 0;
5706 uint32_t shift;
5707 int rc = VINF_SUCCESS;
5708 int index = e1kRegLookup(offReg);
5709#ifdef LOG_ENABLED
5710 char buf[9];
5711#endif
5712
5713 /*
5714 * From the spec:
5715 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5716 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5717 */
5718
5719 /*
5720 * To be able to read bytes and short word we convert them to properly
5721 * shifted 32-bit words and masks. The idea is to keep register-specific
5722 * handlers simple. Most accesses will be 32-bit anyway.
5723 */
5724 uint32_t mask;
5725 switch (cb)
5726 {
5727 case 4: mask = 0xFFFFFFFF; break;
5728 case 2: mask = 0x0000FFFF; break;
5729 case 1: mask = 0x000000FF; break;
5730 default:
5731 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5732 "unsupported op size: offset=%#10x cb=%#10x\n", offReg, cb);
5733 }
5734 if (index != -1)
5735 {
5736 if (g_aE1kRegMap[index].readable)
5737 {
5738 /* Make the mask correspond to the bits we are about to read. */
5739 shift = (offReg - g_aE1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5740 mask <<= shift;
5741 if (!mask)
5742 return PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS, "Zero mask: offset=%#10x cb=%#10x\n", offReg, cb);
5743 /*
5744 * Read it. Pass the mask so the handler knows what has to be read.
5745 * Mask out irrelevant bits.
5746 */
5747 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5748 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5749 return rc;
5750 //pThis->fDelayInts = false;
5751 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5752 //pThis->iStatIntLostOne = 0;
5753 rc = g_aE1kRegMap[index].pfnRead(pThis, offReg & 0xFFFFFFFC, index, &u32);
5754 u32 &= mask;
5755 //e1kCsLeave(pThis);
5756 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5757 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5758 Log6(("%s At %08X read %s from %s (%s) [UNALIGNED]\n",
5759 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5760 /* Shift back the result. */
5761 u32 >>= shift;
5762 }
5763 else
5764 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5765 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf), g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5766 if (IOM_SUCCESS(rc))
5767 STAM_COUNTER_INC(&pThis->aStatRegReads[index]);
5768 }
5769 else
5770 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5771 pThis->szPrf, offReg, e1kU32toHex(u32, mask, buf)));
5772
5773 memcpy(pv, &u32, cb);
5774 return rc;
5775}
5776
5777/**
5778 * Handle 4 byte aligned and sized read operation.
5779 *
5780 * Looks up and calls appropriate handler.
5781 *
5782 * @returns VBox status code.
5783 *
5784 * @param pThis The device state structure.
5785 * @param offReg Register offset in memory-mapped frame.
5786 * @param pu32 Where to store the result.
5787 * @thread EMT
5788 */
5789static int e1kRegReadAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t *pu32)
5790{
5791 Assert(!(offReg & 3));
5792
5793 /*
5794 * Lookup the register and check that it's readable.
5795 */
5796 int rc = VINF_SUCCESS;
5797 int idxReg = e1kRegLookup(offReg);
5798 if (RT_LIKELY(idxReg != -1))
5799 {
5800 if (RT_UNLIKELY(g_aE1kRegMap[idxReg].readable))
5801 {
5802 /*
5803 * Read it. Pass the mask so the handler knows what has to be read.
5804 * Mask out irrelevant bits.
5805 */
5806 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5807 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5808 // return rc;
5809 //pThis->fDelayInts = false;
5810 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5811 //pThis->iStatIntLostOne = 0;
5812 rc = g_aE1kRegMap[idxReg].pfnRead(pThis, offReg & 0xFFFFFFFC, idxReg, pu32);
5813 //e1kCsLeave(pThis);
5814 Log6(("%s At %08X read %08X from %s (%s)\n",
5815 pThis->szPrf, offReg, *pu32, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5816 if (IOM_SUCCESS(rc))
5817 STAM_COUNTER_INC(&pThis->aStatRegReads[idxReg]);
5818 }
5819 else
5820 E1kLog(("%s At %08X read attempt from non-readable register %s (%s)\n",
5821 pThis->szPrf, offReg, g_aE1kRegMap[idxReg].abbrev, g_aE1kRegMap[idxReg].name));
5822 }
5823 else
5824 E1kLog(("%s At %08X read attempt from non-existing register\n", pThis->szPrf, offReg));
5825 return rc;
5826}
5827
5828/**
5829 * Handle 4 byte sized and aligned register write operation.
5830 *
5831 * Looks up and calls appropriate handler.
5832 *
5833 * @returns VBox status code.
5834 *
5835 * @param pThis The device state structure.
5836 * @param offReg Register offset in memory-mapped frame.
5837 * @param u32Value The value to write.
5838 * @thread EMT
5839 */
5840static int e1kRegWriteAlignedU32(PE1KSTATE pThis, uint32_t offReg, uint32_t u32Value)
5841{
5842 int rc = VINF_SUCCESS;
5843 int index = e1kRegLookup(offReg);
5844 if (RT_LIKELY(index != -1))
5845 {
5846 if (RT_LIKELY(g_aE1kRegMap[index].writable))
5847 {
5848 /*
5849 * Write it. Pass the mask so the handler knows what has to be written.
5850 * Mask out irrelevant bits.
5851 */
5852 Log6(("%s At %08X write %08X to %s (%s)\n",
5853 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5854 //rc = e1kCsEnter(pThis, VERR_SEM_BUSY, RT_SRC_POS);
5855 //if (RT_UNLIKELY(rc != VINF_SUCCESS))
5856 // return rc;
5857 //pThis->fDelayInts = false;
5858 //pThis->iStatIntLost += pThis->iStatIntLostOne;
5859 //pThis->iStatIntLostOne = 0;
5860 rc = g_aE1kRegMap[index].pfnWrite(pThis, offReg, index, u32Value);
5861 //e1kCsLeave(pThis);
5862 }
5863 else
5864 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5865 pThis->szPrf, offReg, u32Value, g_aE1kRegMap[index].abbrev, g_aE1kRegMap[index].name));
5866 if (IOM_SUCCESS(rc))
5867 STAM_COUNTER_INC(&pThis->aStatRegWrites[index]);
5868 }
5869 else
5870 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5871 pThis->szPrf, offReg, u32Value));
5872 return rc;
5873}
5874
5875
5876/* -=-=-=-=- MMIO and I/O Port Callbacks -=-=-=-=- */
5877
5878/**
5879 * @callback_method_impl{FNIOMMMIOREAD}
5880 */
5881PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5882{
5883 RT_NOREF2(pvUser, cb);
5884 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5885 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5886
5887 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5888 Assert(offReg < E1K_MM_SIZE);
5889 Assert(cb == 4);
5890 Assert(!(GCPhysAddr & 3));
5891
5892 int rc = e1kRegReadAlignedU32(pThis, offReg, (uint32_t *)pv);
5893
5894 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIORead), a);
5895 return rc;
5896}
5897
5898/**
5899 * @callback_method_impl{FNIOMMMIOWRITE}
5900 */
5901PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5902{
5903 RT_NOREF2(pvUser, cb);
5904 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5905 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5906
5907 uint32_t offReg = GCPhysAddr - pThis->addrMMReg;
5908 Assert(offReg < E1K_MM_SIZE);
5909 Assert(cb == 4);
5910 Assert(!(GCPhysAddr & 3));
5911
5912 int rc = e1kRegWriteAlignedU32(pThis, offReg, *(uint32_t const *)pv);
5913
5914 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatMMIOWrite), a);
5915 return rc;
5916}
5917
5918/**
5919 * @callback_method_impl{FNIOMIOPORTIN}
5920 */
5921PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t *pu32, unsigned cb)
5922{
5923 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5924 int rc;
5925 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIORead), a);
5926 RT_NOREF_PV(pvUser);
5927
5928 uPort -= pThis->IOPortBase;
5929 if (RT_LIKELY(cb == 4))
5930 switch (uPort)
5931 {
5932 case 0x00: /* IOADDR */
5933 *pu32 = pThis->uSelectedReg;
5934 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5935 rc = VINF_SUCCESS;
5936 break;
5937
5938 case 0x04: /* IODATA */
5939 if (!(pThis->uSelectedReg & 3))
5940 rc = e1kRegReadAlignedU32(pThis, pThis->uSelectedReg, pu32);
5941 else /** @todo r=bird: I wouldn't be surprised if this unaligned branch wasn't necessary. */
5942 rc = e1kRegReadUnaligned(pThis, pThis->uSelectedReg, pu32, cb);
5943 if (rc == VINF_IOM_R3_MMIO_READ)
5944 rc = VINF_IOM_R3_IOPORT_READ;
5945 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", pThis->szPrf, pThis->uSelectedReg, *pu32));
5946 break;
5947
5948 default:
5949 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", pThis->szPrf, uPort));
5950 //rc = VERR_IOM_IOPORT_UNUSED; /* Why not? */
5951 rc = VINF_SUCCESS;
5952 }
5953 else
5954 {
5955 E1kLog(("%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x", pThis->szPrf, uPort, cb));
5956 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb);
5957 }
5958 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIORead), a);
5959 return rc;
5960}
5961
5962
5963/**
5964 * @callback_method_impl{FNIOMIOPORTOUT}
5965 */
5966PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT uPort, uint32_t u32, unsigned cb)
5967{
5968 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, PE1KSTATE);
5969 int rc;
5970 STAM_PROFILE_ADV_START(&pThis->CTX_SUFF_Z(StatIOWrite), a);
5971 RT_NOREF_PV(pvUser);
5972
5973 E1kLog2(("%s e1kIOPortOut: uPort=%RTiop value=%08x\n", pThis->szPrf, uPort, u32));
5974 if (RT_LIKELY(cb == 4))
5975 {
5976 uPort -= pThis->IOPortBase;
5977 switch (uPort)
5978 {
5979 case 0x00: /* IOADDR */
5980 pThis->uSelectedReg = u32;
5981 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", pThis->szPrf, pThis->uSelectedReg));
5982 rc = VINF_SUCCESS;
5983 break;
5984
5985 case 0x04: /* IODATA */
5986 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", pThis->szPrf, pThis->uSelectedReg, u32));
5987 if (RT_LIKELY(!(pThis->uSelectedReg & 3)))
5988 {
5989 rc = e1kRegWriteAlignedU32(pThis, pThis->uSelectedReg, u32);
5990 if (rc == VINF_IOM_R3_MMIO_WRITE)
5991 rc = VINF_IOM_R3_IOPORT_WRITE;
5992 }
5993 else
5994 rc = PDMDevHlpDBGFStop(pThis->CTX_SUFF(pDevIns), RT_SRC_POS,
5995 "Spec violation: misaligned offset: %#10x, ignored.\n", pThis->uSelectedReg);
5996 break;
5997
5998 default:
5999 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", pThis->szPrf, uPort));
6000 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "invalid port %#010x\n", uPort);
6001 }
6002 }
6003 else
6004 {
6005 E1kLog(("%s e1kIOPortOut: invalid op size: uPort=%RTiop cb=%08x\n", pThis->szPrf, uPort, cb));
6006 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s: invalid op size: uPort=%RTiop cb=%#x\n", pThis->szPrf, uPort, cb);
6007 }
6008
6009 STAM_PROFILE_ADV_STOP(&pThis->CTX_SUFF_Z(StatIOWrite), a);
6010 return rc;
6011}
6012
6013#ifdef IN_RING3
6014
6015/**
6016 * Dump complete device state to log.
6017 *
6018 * @param pThis Pointer to device state.
6019 */
6020static void e1kDumpState(PE1KSTATE pThis)
6021{
6022 RT_NOREF(pThis);
6023 for (int i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6024 E1kLog2(("%s %8.8s = %08x\n", pThis->szPrf, g_aE1kRegMap[i].abbrev, pThis->auRegs[i]));
6025# ifdef E1K_INT_STATS
6026 LogRel(("%s Interrupt attempts: %d\n", pThis->szPrf, pThis->uStatIntTry));
6027 LogRel(("%s Interrupts raised : %d\n", pThis->szPrf, pThis->uStatInt));
6028 LogRel(("%s Interrupts lowered: %d\n", pThis->szPrf, pThis->uStatIntLower));
6029 LogRel(("%s Interrupts delayed: %d\n", pThis->szPrf, pThis->uStatIntDly));
6030 LogRel(("%s Disabled delayed: %d\n", pThis->szPrf, pThis->uStatDisDly));
6031 LogRel(("%s Interrupts skipped: %d\n", pThis->szPrf, pThis->uStatIntSkip));
6032 LogRel(("%s Masked interrupts : %d\n", pThis->szPrf, pThis->uStatIntMasked));
6033 LogRel(("%s Early interrupts : %d\n", pThis->szPrf, pThis->uStatIntEarly));
6034 LogRel(("%s Late interrupts : %d\n", pThis->szPrf, pThis->uStatIntLate));
6035 LogRel(("%s Lost interrupts : %d\n", pThis->szPrf, pThis->iStatIntLost));
6036 LogRel(("%s Interrupts by RX : %d\n", pThis->szPrf, pThis->uStatIntRx));
6037 LogRel(("%s Interrupts by TX : %d\n", pThis->szPrf, pThis->uStatIntTx));
6038 LogRel(("%s Interrupts by ICS : %d\n", pThis->szPrf, pThis->uStatIntICS));
6039 LogRel(("%s Interrupts by RDTR: %d\n", pThis->szPrf, pThis->uStatIntRDTR));
6040 LogRel(("%s Interrupts by RDMT: %d\n", pThis->szPrf, pThis->uStatIntRXDMT0));
6041 LogRel(("%s Interrupts by TXQE: %d\n", pThis->szPrf, pThis->uStatIntTXQE));
6042 LogRel(("%s TX int delay asked: %d\n", pThis->szPrf, pThis->uStatTxIDE));
6043 LogRel(("%s TX delayed: %d\n", pThis->szPrf, pThis->uStatTxDelayed));
6044 LogRel(("%s TX delay expired: %d\n", pThis->szPrf, pThis->uStatTxDelayExp));
6045 LogRel(("%s TX no report asked: %d\n", pThis->szPrf, pThis->uStatTxNoRS));
6046 LogRel(("%s TX abs timer expd : %d\n", pThis->szPrf, pThis->uStatTAD));
6047 LogRel(("%s TX int timer expd : %d\n", pThis->szPrf, pThis->uStatTID));
6048 LogRel(("%s RX abs timer expd : %d\n", pThis->szPrf, pThis->uStatRAD));
6049 LogRel(("%s RX int timer expd : %d\n", pThis->szPrf, pThis->uStatRID));
6050 LogRel(("%s TX CTX descriptors: %d\n", pThis->szPrf, pThis->uStatDescCtx));
6051 LogRel(("%s TX DAT descriptors: %d\n", pThis->szPrf, pThis->uStatDescDat));
6052 LogRel(("%s TX LEG descriptors: %d\n", pThis->szPrf, pThis->uStatDescLeg));
6053 LogRel(("%s Received frames : %d\n", pThis->szPrf, pThis->uStatRxFrm));
6054 LogRel(("%s Transmitted frames: %d\n", pThis->szPrf, pThis->uStatTxFrm));
6055 LogRel(("%s TX frames up to 1514: %d\n", pThis->szPrf, pThis->uStatTx1514));
6056 LogRel(("%s TX frames up to 2962: %d\n", pThis->szPrf, pThis->uStatTx2962));
6057 LogRel(("%s TX frames up to 4410: %d\n", pThis->szPrf, pThis->uStatTx4410));
6058 LogRel(("%s TX frames up to 5858: %d\n", pThis->szPrf, pThis->uStatTx5858));
6059 LogRel(("%s TX frames up to 7306: %d\n", pThis->szPrf, pThis->uStatTx7306));
6060 LogRel(("%s TX frames up to 8754: %d\n", pThis->szPrf, pThis->uStatTx8754));
6061 LogRel(("%s TX frames up to 16384: %d\n", pThis->szPrf, pThis->uStatTx16384));
6062 LogRel(("%s TX frames up to 32768: %d\n", pThis->szPrf, pThis->uStatTx32768));
6063 LogRel(("%s Larger TX frames : %d\n", pThis->szPrf, pThis->uStatTxLarge));
6064 LogRel(("%s Max TX Delay : %lld\n", pThis->szPrf, pThis->uStatMaxTxDelay));
6065# endif /* E1K_INT_STATS */
6066}
6067
6068/**
6069 * @callback_method_impl{FNPCIIOREGIONMAP}
6070 */
6071static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion, RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
6072{
6073 RT_NOREF(iRegion);
6074 PE1KSTATE pThis = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
6075 int rc;
6076
6077 switch (enmType)
6078 {
6079 case PCI_ADDRESS_SPACE_IO:
6080 pThis->IOPortBase = (RTIOPORT)GCPhysAddress;
6081 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pThis->IOPortBase, cb, NULL /*pvUser*/,
6082 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
6083 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6084 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTR0PTR /*pvUser*/,
6085 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6086 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6087 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pThis->IOPortBase, cb, NIL_RTRCPTR /*pvUser*/,
6088 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
6089 break;
6090
6091 case PCI_ADDRESS_SPACE_MEM:
6092 /*
6093 * From the spec:
6094 * For registers that should be accessed as 32-bit double words,
6095 * partial writes (less than a 32-bit double word) is ignored.
6096 * Partial reads return all 32 bits of data regardless of the
6097 * byte enables.
6098 */
6099 pThis->addrMMReg = GCPhysAddress; Assert(!(GCPhysAddress & 7));
6100 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
6101 IOMMMIO_FLAGS_READ_DWORD | IOMMMIO_FLAGS_WRITE_ONLY_DWORD,
6102 e1kMMIOWrite, e1kMMIORead, "E1000");
6103 if (pThis->fR0Enabled && RT_SUCCESS(rc))
6104 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
6105 "e1kMMIOWrite", "e1kMMIORead");
6106 if (pThis->fRCEnabled && RT_SUCCESS(rc))
6107 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
6108 "e1kMMIOWrite", "e1kMMIORead");
6109 break;
6110
6111 default:
6112 /* We should never get here */
6113 AssertMsgFailed(("Invalid PCI address space param in map callback"));
6114 rc = VERR_INTERNAL_ERROR;
6115 break;
6116 }
6117 return rc;
6118}
6119
6120
6121/* -=-=-=-=- PDMINETWORKDOWN -=-=-=-=- */
6122
6123/**
6124 * Check if the device can receive data now.
6125 * This must be called before the pfnRecieve() method is called.
6126 *
6127 * @returns Number of bytes the device can receive.
6128 * @param pInterface Pointer to the interface structure containing the called function pointer.
6129 * @thread EMT
6130 */
6131static int e1kCanReceive(PE1KSTATE pThis)
6132{
6133#ifndef E1K_WITH_RXD_CACHE
6134 size_t cb;
6135
6136 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6137 return VERR_NET_NO_BUFFER_SPACE;
6138
6139 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6140 {
6141 E1KRXDESC desc;
6142 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6143 &desc, sizeof(desc));
6144 if (desc.status.fDD)
6145 cb = 0;
6146 else
6147 cb = pThis->u16RxBSize;
6148 }
6149 else if (RDH < RDT)
6150 cb = (RDT - RDH) * pThis->u16RxBSize;
6151 else if (RDH > RDT)
6152 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pThis->u16RxBSize;
6153 else
6154 {
6155 cb = 0;
6156 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6157 }
6158 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6159 pThis->szPrf, RDH, RDT, RDLEN, pThis->u16RxBSize, cb));
6160
6161 e1kCsRxLeave(pThis);
6162 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6163#else /* E1K_WITH_RXD_CACHE */
6164 int rc = VINF_SUCCESS;
6165
6166 if (RT_UNLIKELY(e1kCsRxEnter(pThis, VERR_SEM_BUSY) != VINF_SUCCESS))
6167 return VERR_NET_NO_BUFFER_SPACE;
6168
6169 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6170 {
6171 E1KRXDESC desc;
6172 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6173 &desc, sizeof(desc));
6174 if (desc.status.fDD)
6175 rc = VERR_NET_NO_BUFFER_SPACE;
6176 }
6177 else if (e1kRxDIsCacheEmpty(pThis) && RDH == RDT)
6178 {
6179 /* Cache is empty, so is the RX ring. */
6180 rc = VERR_NET_NO_BUFFER_SPACE;
6181 }
6182 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6183 " u16RxBSize=%d rc=%Rrc\n", pThis->szPrf,
6184 e1kRxDInCache(pThis), RDH, RDT, RDLEN, pThis->u16RxBSize, rc));
6185
6186 e1kCsRxLeave(pThis);
6187 return rc;
6188#endif /* E1K_WITH_RXD_CACHE */
6189}
6190
6191/**
6192 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6193 */
6194static DECLCALLBACK(int) e1kR3NetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6195{
6196 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6197 int rc = e1kCanReceive(pThis);
6198
6199 if (RT_SUCCESS(rc))
6200 return VINF_SUCCESS;
6201 if (RT_UNLIKELY(cMillies == 0))
6202 return VERR_NET_NO_BUFFER_SPACE;
6203
6204 rc = VERR_INTERRUPTED;
6205 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, true);
6206 STAM_PROFILE_START(&pThis->StatRxOverflow, a);
6207 VMSTATE enmVMState;
6208 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pThis->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6209 || enmVMState == VMSTATE_RUNNING_LS))
6210 {
6211 int rc2 = e1kCanReceive(pThis);
6212 if (RT_SUCCESS(rc2))
6213 {
6214 rc = VINF_SUCCESS;
6215 break;
6216 }
6217 E1kLogRel(("E1000 e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", cMillies));
6218 E1kLog(("%s e1kR3NetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n", pThis->szPrf, cMillies));
6219 RTSemEventWait(pThis->hEventMoreRxDescAvail, cMillies);
6220 }
6221 STAM_PROFILE_STOP(&pThis->StatRxOverflow, a);
6222 ASMAtomicXchgBool(&pThis->fMaybeOutOfSpace, false);
6223
6224 return rc;
6225}
6226
6227
6228/**
6229 * Matches the packet addresses against Receive Address table. Looks for
6230 * exact matches only.
6231 *
6232 * @returns true if address matches.
6233 * @param pThis Pointer to the state structure.
6234 * @param pvBuf The ethernet packet.
6235 * @param cb Number of bytes available in the packet.
6236 * @thread EMT
6237 */
6238static bool e1kPerfectMatch(PE1KSTATE pThis, const void *pvBuf)
6239{
6240 for (unsigned i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
6241 {
6242 E1KRAELEM* ra = pThis->aRecAddr.array + i;
6243
6244 /* Valid address? */
6245 if (ra->ctl & RA_CTL_AV)
6246 {
6247 Assert((ra->ctl & RA_CTL_AS) < 2);
6248 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6249 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6250 // pThis->szPrf, pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6251 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6252 /*
6253 * Address Select:
6254 * 00b = Destination address
6255 * 01b = Source address
6256 * 10b = Reserved
6257 * 11b = Reserved
6258 * Since ethernet header is (DA, SA, len) we can use address
6259 * select as index.
6260 */
6261 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6262 ra->addr, sizeof(ra->addr)) == 0)
6263 return true;
6264 }
6265 }
6266
6267 return false;
6268}
6269
6270/**
6271 * Matches the packet addresses against Multicast Table Array.
6272 *
6273 * @remarks This is imperfect match since it matches not exact address but
6274 * a subset of addresses.
6275 *
6276 * @returns true if address matches.
6277 * @param pThis Pointer to the state structure.
6278 * @param pvBuf The ethernet packet.
6279 * @param cb Number of bytes available in the packet.
6280 * @thread EMT
6281 */
6282static bool e1kImperfectMatch(PE1KSTATE pThis, const void *pvBuf)
6283{
6284 /* Get bits 32..47 of destination address */
6285 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6286
6287 unsigned offset = GET_BITS(RCTL, MO);
6288 /*
6289 * offset means:
6290 * 00b = bits 36..47
6291 * 01b = bits 35..46
6292 * 10b = bits 34..45
6293 * 11b = bits 32..43
6294 */
6295 if (offset < 3)
6296 u16Bit = u16Bit >> (4 - offset);
6297 return ASMBitTest(pThis->auMTA, u16Bit & 0xFFF);
6298}
6299
6300/**
6301 * Determines if the packet is to be delivered to upper layer.
6302 *
6303 * The following filters supported:
6304 * - Exact Unicast/Multicast
6305 * - Promiscuous Unicast/Multicast
6306 * - Multicast
6307 * - VLAN
6308 *
6309 * @returns true if packet is intended for this node.
6310 * @param pThis Pointer to the state structure.
6311 * @param pvBuf The ethernet packet.
6312 * @param cb Number of bytes available in the packet.
6313 * @param pStatus Bit field to store status bits.
6314 * @thread EMT
6315 */
6316static bool e1kAddressFilter(PE1KSTATE pThis, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6317{
6318 Assert(cb > 14);
6319 /* Assume that we fail to pass exact filter. */
6320 pStatus->fPIF = false;
6321 pStatus->fVP = false;
6322 /* Discard oversized packets */
6323 if (cb > E1K_MAX_RX_PKT_SIZE)
6324 {
6325 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6326 pThis->szPrf, cb, E1K_MAX_RX_PKT_SIZE));
6327 E1K_INC_CNT32(ROC);
6328 return false;
6329 }
6330 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6331 {
6332 /* When long packet reception is disabled packets over 1522 are discarded */
6333 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6334 pThis->szPrf, cb));
6335 E1K_INC_CNT32(ROC);
6336 return false;
6337 }
6338
6339 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6340 /* Compare TPID with VLAN Ether Type */
6341 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6342 {
6343 pStatus->fVP = true;
6344 /* Is VLAN filtering enabled? */
6345 if (RCTL & RCTL_VFE)
6346 {
6347 /* It is 802.1q packet indeed, let's filter by VID */
6348 if (RCTL & RCTL_CFIEN)
6349 {
6350 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", pThis->szPrf,
6351 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6352 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6353 !!(RCTL & RCTL_CFI)));
6354 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6355 {
6356 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6357 pThis->szPrf, E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6358 return false;
6359 }
6360 }
6361 else
6362 E1kLog3(("%s VLAN filter: VLAN=%d\n", pThis->szPrf,
6363 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6364 if (!ASMBitTest(pThis->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6365 {
6366 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6367 pThis->szPrf, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6368 return false;
6369 }
6370 }
6371 }
6372 /* Broadcast filtering */
6373 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6374 return true;
6375 E1kLog2(("%s Packet filter: not a broadcast\n", pThis->szPrf));
6376 if (e1kIsMulticast(pvBuf))
6377 {
6378 /* Is multicast promiscuous enabled? */
6379 if (RCTL & RCTL_MPE)
6380 return true;
6381 E1kLog2(("%s Packet filter: no promiscuous multicast\n", pThis->szPrf));
6382 /* Try perfect matches first */
6383 if (e1kPerfectMatch(pThis, pvBuf))
6384 {
6385 pStatus->fPIF = true;
6386 return true;
6387 }
6388 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6389 if (e1kImperfectMatch(pThis, pvBuf))
6390 return true;
6391 E1kLog2(("%s Packet filter: no imperfect match\n", pThis->szPrf));
6392 }
6393 else {
6394 /* Is unicast promiscuous enabled? */
6395 if (RCTL & RCTL_UPE)
6396 return true;
6397 E1kLog2(("%s Packet filter: no promiscuous unicast\n", pThis->szPrf));
6398 if (e1kPerfectMatch(pThis, pvBuf))
6399 {
6400 pStatus->fPIF = true;
6401 return true;
6402 }
6403 E1kLog2(("%s Packet filter: no perfect match\n", pThis->szPrf));
6404 }
6405 E1kLog2(("%s Packet filter: packet discarded\n", pThis->szPrf));
6406 return false;
6407}
6408
6409/**
6410 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6411 */
6412static DECLCALLBACK(int) e1kR3NetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6413{
6414 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6415 int rc = VINF_SUCCESS;
6416
6417 /*
6418 * Drop packets if the VM is not running yet/anymore.
6419 */
6420 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pThis));
6421 if ( enmVMState != VMSTATE_RUNNING
6422 && enmVMState != VMSTATE_RUNNING_LS)
6423 {
6424 E1kLog(("%s Dropping incoming packet as VM is not running.\n", pThis->szPrf));
6425 return VINF_SUCCESS;
6426 }
6427
6428 /* Discard incoming packets in locked state */
6429 if (!(RCTL & RCTL_EN) || pThis->fLocked || !(STATUS & STATUS_LU))
6430 {
6431 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", pThis->szPrf));
6432 return VINF_SUCCESS;
6433 }
6434
6435 STAM_PROFILE_ADV_START(&pThis->StatReceive, a);
6436
6437 //if (!e1kCsEnter(pThis, RT_SRC_POS))
6438 // return VERR_PERMISSION_DENIED;
6439
6440 e1kPacketDump(pThis, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6441
6442 /* Update stats */
6443 if (RT_LIKELY(e1kCsEnter(pThis, VERR_SEM_BUSY) == VINF_SUCCESS))
6444 {
6445 E1K_INC_CNT32(TPR);
6446 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6447 e1kCsLeave(pThis);
6448 }
6449 STAM_PROFILE_ADV_START(&pThis->StatReceiveFilter, a);
6450 E1KRXDST status;
6451 RT_ZERO(status);
6452 bool fPassed = e1kAddressFilter(pThis, pvBuf, cb, &status);
6453 STAM_PROFILE_ADV_STOP(&pThis->StatReceiveFilter, a);
6454 if (fPassed)
6455 {
6456 rc = e1kHandleRxPacket(pThis, pvBuf, cb, status);
6457 }
6458 //e1kCsLeave(pThis);
6459 STAM_PROFILE_ADV_STOP(&pThis->StatReceive, a);
6460
6461 return rc;
6462}
6463
6464
6465/* -=-=-=-=- PDMILEDPORTS -=-=-=-=- */
6466
6467/**
6468 * @interface_method_impl{PDMILEDPORTS,pfnQueryStatusLed}
6469 */
6470static DECLCALLBACK(int) e1kR3QueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6471{
6472 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6473 int rc = VERR_PDM_LUN_NOT_FOUND;
6474
6475 if (iLUN == 0)
6476 {
6477 *ppLed = &pThis->led;
6478 rc = VINF_SUCCESS;
6479 }
6480 return rc;
6481}
6482
6483
6484/* -=-=-=-=- PDMINETWORKCONFIG -=-=-=-=- */
6485
6486/**
6487 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetMac}
6488 */
6489static DECLCALLBACK(int) e1kR3GetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6490{
6491 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6492 pThis->eeprom.getMac(pMac);
6493 return VINF_SUCCESS;
6494}
6495
6496/**
6497 * @interface_method_impl{PDMINETWORKCONFIG,pfnGetLinkState}
6498 */
6499static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kR3GetLinkState(PPDMINETWORKCONFIG pInterface)
6500{
6501 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6502 if (STATUS & STATUS_LU)
6503 return PDMNETWORKLINKSTATE_UP;
6504 return PDMNETWORKLINKSTATE_DOWN;
6505}
6506
6507/**
6508 * @interface_method_impl{PDMINETWORKCONFIG,pfnSetLinkState}
6509 */
6510static DECLCALLBACK(int) e1kR3SetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6511{
6512 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6513
6514 E1kLog(("%s e1kR3SetLinkState: enmState=%d\n", pThis->szPrf, enmState));
6515 switch (enmState)
6516 {
6517 case PDMNETWORKLINKSTATE_UP:
6518 pThis->fCableConnected = true;
6519 /* If link was down, bring it up after a while. */
6520 if (!(STATUS & STATUS_LU))
6521 e1kBringLinkUpDelayed(pThis);
6522 break;
6523 case PDMNETWORKLINKSTATE_DOWN:
6524 pThis->fCableConnected = false;
6525 /* Always set the phy link state to down, regardless of the STATUS_LU bit.
6526 * We might have to set the link state before the driver initializes us. */
6527 Phy::setLinkStatus(&pThis->phy, false);
6528 /* If link was up, bring it down. */
6529 if (STATUS & STATUS_LU)
6530 e1kR3LinkDown(pThis);
6531 break;
6532 case PDMNETWORKLINKSTATE_DOWN_RESUME:
6533 /*
6534 * There is not much sense in bringing down the link if it has not come up yet.
6535 * If it is up though, we bring it down temporarely, then bring it up again.
6536 */
6537 if (STATUS & STATUS_LU)
6538 e1kR3LinkDownTemp(pThis);
6539 break;
6540 default:
6541 ;
6542 }
6543 return VINF_SUCCESS;
6544}
6545
6546
6547/* -=-=-=-=- PDMIBASE -=-=-=-=- */
6548
6549/**
6550 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6551 */
6552static DECLCALLBACK(void *) e1kR3QueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6553{
6554 PE1KSTATE pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6555 Assert(&pThis->IBase == pInterface);
6556
6557 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6558 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6559 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6560 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6561 return NULL;
6562}
6563
6564
6565/* -=-=-=-=- Saved State -=-=-=-=- */
6566
6567/**
6568 * Saves the configuration.
6569 *
6570 * @param pThis The E1K state.
6571 * @param pSSM The handle to the saved state.
6572 */
6573static void e1kSaveConfig(PE1KSTATE pThis, PSSMHANDLE pSSM)
6574{
6575 SSMR3PutMem(pSSM, &pThis->macConfigured, sizeof(pThis->macConfigured));
6576 SSMR3PutU32(pSSM, pThis->eChip);
6577}
6578
6579/**
6580 * @callback_method_impl{FNSSMDEVLIVEEXEC,Save basic configuration.}
6581 */
6582static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6583{
6584 RT_NOREF(uPass);
6585 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6586 e1kSaveConfig(pThis, pSSM);
6587 return VINF_SSM_DONT_CALL_AGAIN;
6588}
6589
6590/**
6591 * @callback_method_impl{FNSSMDEVSAVEPREP,Synchronize.}
6592 */
6593static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6594{
6595 RT_NOREF(pSSM);
6596 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6597
6598 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6599 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6600 return rc;
6601 e1kCsLeave(pThis);
6602 return VINF_SUCCESS;
6603#if 0
6604 /* 1) Prevent all threads from modifying the state and memory */
6605 //pThis->fLocked = true;
6606 /* 2) Cancel all timers */
6607#ifdef E1K_TX_DELAY
6608 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
6609#endif /* E1K_TX_DELAY */
6610#ifdef E1K_USE_TX_TIMERS
6611 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTIDTimer));
6612#ifndef E1K_NO_TAD
6613 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTADTimer));
6614#endif /* E1K_NO_TAD */
6615#endif /* E1K_USE_TX_TIMERS */
6616#ifdef E1K_USE_RX_TIMERS
6617 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRIDTimer));
6618 e1kCancelTimer(pThis, pThis->CTX_SUFF(pRADTimer));
6619#endif /* E1K_USE_RX_TIMERS */
6620 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
6621 /* 3) Did I forget anything? */
6622 E1kLog(("%s Locked\n", pThis->szPrf));
6623 return VINF_SUCCESS;
6624#endif
6625}
6626
6627/**
6628 * @callback_method_impl{FNSSMDEVSAVEEXEC}
6629 */
6630static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6631{
6632 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6633
6634 e1kSaveConfig(pThis, pSSM);
6635 pThis->eeprom.save(pSSM);
6636 e1kDumpState(pThis);
6637 SSMR3PutMem(pSSM, pThis->auRegs, sizeof(pThis->auRegs));
6638 SSMR3PutBool(pSSM, pThis->fIntRaised);
6639 Phy::saveState(pSSM, &pThis->phy);
6640 SSMR3PutU32(pSSM, pThis->uSelectedReg);
6641 SSMR3PutMem(pSSM, pThis->auMTA, sizeof(pThis->auMTA));
6642 SSMR3PutMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6643 SSMR3PutMem(pSSM, pThis->auVFTA, sizeof(pThis->auVFTA));
6644 SSMR3PutU64(pSSM, pThis->u64AckedAt);
6645 SSMR3PutU16(pSSM, pThis->u16RxBSize);
6646 //SSMR3PutBool(pSSM, pThis->fDelayInts);
6647 //SSMR3PutBool(pSSM, pThis->fIntMaskUsed);
6648 SSMR3PutU16(pSSM, pThis->u16TxPktLen);
6649/** @todo State wrt to the TSE buffer is incomplete, so little point in
6650 * saving this actually. */
6651 SSMR3PutMem(pSSM, pThis->aTxPacketFallback, pThis->u16TxPktLen);
6652 SSMR3PutBool(pSSM, pThis->fIPcsum);
6653 SSMR3PutBool(pSSM, pThis->fTCPcsum);
6654 SSMR3PutMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6655 SSMR3PutMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6656 SSMR3PutBool(pSSM, pThis->fVTag);
6657 SSMR3PutU16(pSSM, pThis->u16VTagTCI);
6658#ifdef E1K_WITH_TXD_CACHE
6659#if 0
6660 SSMR3PutU8(pSSM, pThis->nTxDFetched);
6661 SSMR3PutMem(pSSM, pThis->aTxDescriptors,
6662 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6663#else
6664 /*
6665 * There is no point in storing TX descriptor cache entries as we can simply
6666 * fetch them again. Moreover, normally the cache is always empty when we
6667 * save the state. Store zero entries for compatibility.
6668 */
6669 SSMR3PutU8(pSSM, 0);
6670#endif
6671#endif /* E1K_WITH_TXD_CACHE */
6672/**@todo GSO requires some more state here. */
6673 E1kLog(("%s State has been saved\n", pThis->szPrf));
6674 return VINF_SUCCESS;
6675}
6676
6677#if 0
6678/**
6679 * @callback_method_impl{FNSSMDEVSAVEDONE}
6680 */
6681static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6682{
6683 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6684
6685 /* If VM is being powered off unlocking will result in assertions in PGM */
6686 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6687 pThis->fLocked = false;
6688 else
6689 E1kLog(("%s VM is not running -- remain locked\n", pThis->szPrf));
6690 E1kLog(("%s Unlocked\n", pThis->szPrf));
6691 return VINF_SUCCESS;
6692}
6693#endif
6694
6695/**
6696 * @callback_method_impl{FNSSMDEVLOADPREP,Synchronize.}
6697 */
6698static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6699{
6700 RT_NOREF(pSSM);
6701 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6702
6703 int rc = e1kCsEnter(pThis, VERR_SEM_BUSY);
6704 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6705 return rc;
6706 e1kCsLeave(pThis);
6707 return VINF_SUCCESS;
6708}
6709
6710/**
6711 * @callback_method_impl{FNSSMDEVLOADEXEC}
6712 */
6713static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6714{
6715 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6716 int rc;
6717
6718 if ( uVersion != E1K_SAVEDSTATE_VERSION
6719#ifdef E1K_WITH_TXD_CACHE
6720 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6721#endif /* E1K_WITH_TXD_CACHE */
6722 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6723 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6724 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6725
6726 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6727 || uPass != SSM_PASS_FINAL)
6728 {
6729 /* config checks */
6730 RTMAC macConfigured;
6731 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6732 AssertRCReturn(rc, rc);
6733 if ( memcmp(&macConfigured, &pThis->macConfigured, sizeof(macConfigured))
6734 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6735 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", pThis->szPrf, &pThis->macConfigured, &macConfigured));
6736
6737 E1KCHIP eChip;
6738 rc = SSMR3GetU32(pSSM, &eChip);
6739 AssertRCReturn(rc, rc);
6740 if (eChip != pThis->eChip)
6741 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pThis->eChip, eChip);
6742 }
6743
6744 if (uPass == SSM_PASS_FINAL)
6745 {
6746 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6747 {
6748 rc = pThis->eeprom.load(pSSM);
6749 AssertRCReturn(rc, rc);
6750 }
6751 /* the state */
6752 SSMR3GetMem(pSSM, &pThis->auRegs, sizeof(pThis->auRegs));
6753 SSMR3GetBool(pSSM, &pThis->fIntRaised);
6754 /** @todo: PHY could be made a separate device with its own versioning */
6755 Phy::loadState(pSSM, &pThis->phy);
6756 SSMR3GetU32(pSSM, &pThis->uSelectedReg);
6757 SSMR3GetMem(pSSM, &pThis->auMTA, sizeof(pThis->auMTA));
6758 SSMR3GetMem(pSSM, &pThis->aRecAddr, sizeof(pThis->aRecAddr));
6759 SSMR3GetMem(pSSM, &pThis->auVFTA, sizeof(pThis->auVFTA));
6760 SSMR3GetU64(pSSM, &pThis->u64AckedAt);
6761 SSMR3GetU16(pSSM, &pThis->u16RxBSize);
6762 //SSMR3GetBool(pSSM, pThis->fDelayInts);
6763 //SSMR3GetBool(pSSM, pThis->fIntMaskUsed);
6764 SSMR3GetU16(pSSM, &pThis->u16TxPktLen);
6765 SSMR3GetMem(pSSM, &pThis->aTxPacketFallback[0], pThis->u16TxPktLen);
6766 SSMR3GetBool(pSSM, &pThis->fIPcsum);
6767 SSMR3GetBool(pSSM, &pThis->fTCPcsum);
6768 SSMR3GetMem(pSSM, &pThis->contextTSE, sizeof(pThis->contextTSE));
6769 rc = SSMR3GetMem(pSSM, &pThis->contextNormal, sizeof(pThis->contextNormal));
6770 AssertRCReturn(rc, rc);
6771 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6772 {
6773 SSMR3GetBool(pSSM, &pThis->fVTag);
6774 rc = SSMR3GetU16(pSSM, &pThis->u16VTagTCI);
6775 AssertRCReturn(rc, rc);
6776 }
6777 else
6778 {
6779 pThis->fVTag = false;
6780 pThis->u16VTagTCI = 0;
6781 }
6782#ifdef E1K_WITH_TXD_CACHE
6783 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6784 {
6785 rc = SSMR3GetU8(pSSM, &pThis->nTxDFetched);
6786 AssertRCReturn(rc, rc);
6787 if (pThis->nTxDFetched)
6788 SSMR3GetMem(pSSM, pThis->aTxDescriptors,
6789 pThis->nTxDFetched * sizeof(pThis->aTxDescriptors[0]));
6790 }
6791 else
6792 pThis->nTxDFetched = 0;
6793 /*
6794 * @todo: Perhaps we should not store TXD cache as the entries can be
6795 * simply fetched again from guest's memory. Or can't they?
6796 */
6797#endif /* E1K_WITH_TXD_CACHE */
6798#ifdef E1K_WITH_RXD_CACHE
6799 /*
6800 * There is no point in storing the RX descriptor cache in the saved
6801 * state, we just need to make sure it is empty.
6802 */
6803 pThis->iRxDCurrent = pThis->nRxDFetched = 0;
6804#endif /* E1K_WITH_RXD_CACHE */
6805 /* derived state */
6806 e1kSetupGsoCtx(&pThis->GsoCtx, &pThis->contextTSE);
6807
6808 E1kLog(("%s State has been restored\n", pThis->szPrf));
6809 e1kDumpState(pThis);
6810 }
6811 return VINF_SUCCESS;
6812}
6813
6814/**
6815 * @callback_method_impl{FNSSMDEVLOADDONE, Link status adjustments after loading.}
6816 */
6817static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6818{
6819 RT_NOREF(pSSM);
6820 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6821
6822 /* Update promiscuous mode */
6823 if (pThis->pDrvR3)
6824 pThis->pDrvR3->pfnSetPromiscuousMode(pThis->pDrvR3,
6825 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6826
6827 /*
6828 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6829 * passed to us. We go through all this stuff if the link was up and we
6830 * wasn't teleported.
6831 */
6832 if ( (STATUS & STATUS_LU)
6833 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6834 && pThis->cMsLinkUpDelay)
6835 {
6836 e1kR3LinkDownTemp(pThis);
6837 }
6838 return VINF_SUCCESS;
6839}
6840
6841
6842
6843/* -=-=-=-=- Debug Info + Log Types -=-=-=-=- */
6844
6845/**
6846 * @callback_method_impl{FNRTSTRFORMATTYPE}
6847 */
6848static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6849 void *pvArgOutput,
6850 const char *pszType,
6851 void const *pvValue,
6852 int cchWidth,
6853 int cchPrecision,
6854 unsigned fFlags,
6855 void *pvUser)
6856{
6857 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6858 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6859 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6860 if (!pDesc)
6861 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6862
6863 size_t cbPrintf = 0;
6864 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6865 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6866 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6867 pDesc->status.fPIF ? "PIF" : "pif",
6868 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6869 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6870 pDesc->status.fVP ? "VP" : "vp",
6871 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6872 pDesc->status.fEOP ? "EOP" : "eop",
6873 pDesc->status.fDD ? "DD" : "dd",
6874 pDesc->status.fRXE ? "RXE" : "rxe",
6875 pDesc->status.fIPE ? "IPE" : "ipe",
6876 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6877 pDesc->status.fCE ? "CE" : "ce",
6878 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6879 E1K_SPEC_VLAN(pDesc->status.u16Special),
6880 E1K_SPEC_PRI(pDesc->status.u16Special));
6881 return cbPrintf;
6882}
6883
6884/**
6885 * @callback_method_impl{FNRTSTRFORMATTYPE}
6886 */
6887static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
6888 void *pvArgOutput,
6889 const char *pszType,
6890 void const *pvValue,
6891 int cchWidth,
6892 int cchPrecision,
6893 unsigned fFlags,
6894 void *pvUser)
6895{
6896 RT_NOREF(cchWidth, cchPrecision, fFlags, pvUser);
6897 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
6898 E1KTXDESC *pDesc = (E1KTXDESC*)pvValue;
6899 if (!pDesc)
6900 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
6901
6902 size_t cbPrintf = 0;
6903 switch (e1kGetDescType(pDesc))
6904 {
6905 case E1K_DTYP_CONTEXT:
6906 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
6907 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
6908 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
6909 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6910 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
6911 pDesc->context.dw2.fIDE ? " IDE":"",
6912 pDesc->context.dw2.fRS ? " RS" :"",
6913 pDesc->context.dw2.fTSE ? " TSE":"",
6914 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6915 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6916 pDesc->context.dw2.u20PAYLEN,
6917 pDesc->context.dw3.u8HDRLEN,
6918 pDesc->context.dw3.u16MSS,
6919 pDesc->context.dw3.fDD?"DD":"");
6920 break;
6921 case E1K_DTYP_DATA:
6922 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
6923 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
6924 pDesc->data.u64BufAddr,
6925 pDesc->data.cmd.u20DTALEN,
6926 pDesc->data.cmd.fIDE ? " IDE" :"",
6927 pDesc->data.cmd.fVLE ? " VLE" :"",
6928 pDesc->data.cmd.fRPS ? " RPS" :"",
6929 pDesc->data.cmd.fRS ? " RS" :"",
6930 pDesc->data.cmd.fTSE ? " TSE" :"",
6931 pDesc->data.cmd.fIFCS? " IFCS":"",
6932 pDesc->data.cmd.fEOP ? " EOP" :"",
6933 pDesc->data.dw3.fDD ? " DD" :"",
6934 pDesc->data.dw3.fEC ? " EC" :"",
6935 pDesc->data.dw3.fLC ? " LC" :"",
6936 pDesc->data.dw3.fTXSM? " TXSM":"",
6937 pDesc->data.dw3.fIXSM? " IXSM":"",
6938 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6939 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6940 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6941 break;
6942 case E1K_DTYP_LEGACY:
6943 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
6944 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
6945 pDesc->data.u64BufAddr,
6946 pDesc->legacy.cmd.u16Length,
6947 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6948 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6949 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6950 pDesc->legacy.cmd.fRS ? " RS" :"",
6951 pDesc->legacy.cmd.fIC ? " IC" :"",
6952 pDesc->legacy.cmd.fIFCS? " IFCS":"",
6953 pDesc->legacy.cmd.fEOP ? " EOP" :"",
6954 pDesc->legacy.dw3.fDD ? " DD" :"",
6955 pDesc->legacy.dw3.fEC ? " EC" :"",
6956 pDesc->legacy.dw3.fLC ? " LC" :"",
6957 pDesc->legacy.cmd.u8CSO,
6958 pDesc->legacy.dw3.u8CSS,
6959 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
6960 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
6961 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
6962 break;
6963 default:
6964 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
6965 break;
6966 }
6967
6968 return cbPrintf;
6969}
6970
6971/** Initializes debug helpers (logging format types). */
6972static int e1kInitDebugHelpers(void)
6973{
6974 int rc = VINF_SUCCESS;
6975 static bool s_fHelpersRegistered = false;
6976 if (!s_fHelpersRegistered)
6977 {
6978 s_fHelpersRegistered = true;
6979 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
6980 AssertRCReturn(rc, rc);
6981 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
6982 AssertRCReturn(rc, rc);
6983 }
6984 return rc;
6985}
6986
6987/**
6988 * Status info callback.
6989 *
6990 * @param pDevIns The device instance.
6991 * @param pHlp The output helpers.
6992 * @param pszArgs The arguments.
6993 */
6994static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
6995{
6996 RT_NOREF(pszArgs);
6997 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6998 unsigned i;
6999 // bool fRcvRing = false;
7000 // bool fXmtRing = false;
7001
7002 /*
7003 * Parse args.
7004 if (pszArgs)
7005 {
7006 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7007 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7008 }
7009 */
7010
7011 /*
7012 * Show info.
7013 */
7014 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7015 pDevIns->iInstance, pThis->IOPortBase, pThis->addrMMReg,
7016 &pThis->macConfigured, g_Chips[pThis->eChip].pcszName,
7017 pThis->fRCEnabled ? " GC" : "", pThis->fR0Enabled ? " R0" : "");
7018
7019 e1kCsEnter(pThis, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7020
7021 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7022 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", g_aE1kRegMap[i].abbrev, pThis->auRegs[i]);
7023
7024 for (i = 0; i < RT_ELEMENTS(pThis->aRecAddr.array); i++)
7025 {
7026 E1KRAELEM* ra = pThis->aRecAddr.array + i;
7027 if (ra->ctl & RA_CTL_AV)
7028 {
7029 const char *pcszTmp;
7030 switch (ra->ctl & RA_CTL_AS)
7031 {
7032 case 0: pcszTmp = "DST"; break;
7033 case 1: pcszTmp = "SRC"; break;
7034 default: pcszTmp = "reserved";
7035 }
7036 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7037 }
7038 }
7039 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7040 uint32_t rdh = RDH;
7041 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7042 for (i = 0; i < cDescs; ++i)
7043 {
7044 E1KRXDESC desc;
7045 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7046 &desc, sizeof(desc));
7047 if (i == rdh)
7048 pHlp->pfnPrintf(pHlp, ">>> ");
7049 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7050 }
7051#ifdef E1K_WITH_RXD_CACHE
7052 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7053 pThis->iRxDCurrent, RDH, pThis->nRxDFetched, E1K_RXD_CACHE_SIZE);
7054 if (rdh > pThis->iRxDCurrent)
7055 rdh -= pThis->iRxDCurrent;
7056 else
7057 rdh = cDescs + rdh - pThis->iRxDCurrent;
7058 for (i = 0; i < pThis->nRxDFetched; ++i)
7059 {
7060 if (i == pThis->iRxDCurrent)
7061 pHlp->pfnPrintf(pHlp, ">>> ");
7062 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7063 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7064 &pThis->aRxDescriptors[i]);
7065 }
7066#endif /* E1K_WITH_RXD_CACHE */
7067
7068 cDescs = TDLEN / sizeof(E1KTXDESC);
7069 uint32_t tdh = TDH;
7070 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7071 for (i = 0; i < cDescs; ++i)
7072 {
7073 E1KTXDESC desc;
7074 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7075 &desc, sizeof(desc));
7076 if (i == tdh)
7077 pHlp->pfnPrintf(pHlp, ">>> ");
7078 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7079 }
7080#ifdef E1K_WITH_TXD_CACHE
7081 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7082 pThis->iTxDCurrent, TDH, pThis->nTxDFetched, E1K_TXD_CACHE_SIZE);
7083 if (tdh > pThis->iTxDCurrent)
7084 tdh -= pThis->iTxDCurrent;
7085 else
7086 tdh = cDescs + tdh - pThis->iTxDCurrent;
7087 for (i = 0; i < pThis->nTxDFetched; ++i)
7088 {
7089 if (i == pThis->iTxDCurrent)
7090 pHlp->pfnPrintf(pHlp, ">>> ");
7091 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7092 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7093 &pThis->aTxDescriptors[i]);
7094 }
7095#endif /* E1K_WITH_TXD_CACHE */
7096
7097
7098#ifdef E1K_INT_STATS
7099 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pThis->uStatIntTry);
7100 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pThis->uStatInt);
7101 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pThis->uStatIntLower);
7102 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pThis->uStatIntDly);
7103 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pThis->uStatDisDly);
7104 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pThis->uStatIntSkip);
7105 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pThis->uStatIntMasked);
7106 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pThis->uStatIntEarly);
7107 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pThis->uStatIntLate);
7108 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pThis->iStatIntLost);
7109 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pThis->uStatIntRx);
7110 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pThis->uStatIntTx);
7111 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pThis->uStatIntICS);
7112 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pThis->uStatIntRDTR);
7113 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pThis->uStatIntRXDMT0);
7114 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pThis->uStatIntTXQE);
7115 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pThis->uStatTxIDE);
7116 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pThis->uStatTxDelayed);
7117 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pThis->uStatTxDelayExp);
7118 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pThis->uStatTxNoRS);
7119 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pThis->uStatTAD);
7120 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pThis->uStatTID);
7121 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pThis->uStatRAD);
7122 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pThis->uStatRID);
7123 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pThis->uStatDescCtx);
7124 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pThis->uStatDescDat);
7125 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pThis->uStatDescLeg);
7126 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pThis->uStatRxFrm);
7127 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pThis->uStatTxFrm);
7128 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pThis->uStatTx1514);
7129 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pThis->uStatTx2962);
7130 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pThis->uStatTx4410);
7131 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pThis->uStatTx5858);
7132 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pThis->uStatTx7306);
7133 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pThis->uStatTx8754);
7134 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pThis->uStatTx16384);
7135 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pThis->uStatTx32768);
7136 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pThis->uStatTxLarge);
7137#endif /* E1K_INT_STATS */
7138
7139 e1kCsLeave(pThis);
7140}
7141
7142
7143
7144/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
7145
7146/**
7147 * Detach notification.
7148 *
7149 * One port on the network card has been disconnected from the network.
7150 *
7151 * @param pDevIns The device instance.
7152 * @param iLUN The logical unit which is being detached.
7153 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7154 */
7155static DECLCALLBACK(void) e1kR3Detach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7156{
7157 RT_NOREF(fFlags);
7158 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7159 Log(("%s e1kR3Detach:\n", pThis->szPrf));
7160
7161 AssertLogRelReturnVoid(iLUN == 0);
7162
7163 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7164
7165 /** @todo: r=pritesh still need to check if i missed
7166 * to clean something in this function
7167 */
7168
7169 /*
7170 * Zero some important members.
7171 */
7172 pThis->pDrvBase = NULL;
7173 pThis->pDrvR3 = NULL;
7174 pThis->pDrvR0 = NIL_RTR0PTR;
7175 pThis->pDrvRC = NIL_RTRCPTR;
7176
7177 PDMCritSectLeave(&pThis->cs);
7178}
7179
7180/**
7181 * Attach the Network attachment.
7182 *
7183 * One port on the network card has been connected to a network.
7184 *
7185 * @returns VBox status code.
7186 * @param pDevIns The device instance.
7187 * @param iLUN The logical unit which is being attached.
7188 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
7189 *
7190 * @remarks This code path is not used during construction.
7191 */
7192static DECLCALLBACK(int) e1kR3Attach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
7193{
7194 RT_NOREF(fFlags);
7195 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7196 LogFlow(("%s e1kR3Attach:\n", pThis->szPrf));
7197
7198 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
7199
7200 PDMCritSectEnter(&pThis->cs, VERR_SEM_BUSY);
7201
7202 /*
7203 * Attach the driver.
7204 */
7205 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7206 if (RT_SUCCESS(rc))
7207 {
7208 if (rc == VINF_NAT_DNS)
7209 {
7210#ifdef RT_OS_LINUX
7211 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7212 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7213#else
7214 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7215 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7216#endif
7217 }
7218 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7219 AssertMsgStmt(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7220 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
7221 if (RT_SUCCESS(rc))
7222 {
7223 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0);
7224 pThis->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7225
7226 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC);
7227 pThis->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
7228 }
7229 }
7230 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7231 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7232 {
7233 /* This should never happen because this function is not called
7234 * if there is no driver to attach! */
7235 Log(("%s No attached driver!\n", pThis->szPrf));
7236 }
7237
7238 /*
7239 * Temporary set the link down if it was up so that the guest
7240 * will know that we have change the configuration of the
7241 * network card
7242 */
7243 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
7244 e1kR3LinkDownTemp(pThis);
7245
7246 PDMCritSectLeave(&pThis->cs);
7247 return rc;
7248
7249}
7250
7251/**
7252 * @copydoc FNPDMDEVPOWEROFF
7253 */
7254static DECLCALLBACK(void) e1kR3PowerOff(PPDMDEVINS pDevIns)
7255{
7256 /* Poke thread waiting for buffer space. */
7257 e1kWakeupReceive(pDevIns);
7258}
7259
7260/**
7261 * @copydoc FNPDMDEVRESET
7262 */
7263static DECLCALLBACK(void) e1kR3Reset(PPDMDEVINS pDevIns)
7264{
7265 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7266#ifdef E1K_TX_DELAY
7267 e1kCancelTimer(pThis, pThis->CTX_SUFF(pTXDTimer));
7268#endif /* E1K_TX_DELAY */
7269 e1kCancelTimer(pThis, pThis->CTX_SUFF(pIntTimer));
7270 e1kCancelTimer(pThis, pThis->CTX_SUFF(pLUTimer));
7271 e1kXmitFreeBuf(pThis);
7272 pThis->u16TxPktLen = 0;
7273 pThis->fIPcsum = false;
7274 pThis->fTCPcsum = false;
7275 pThis->fIntMaskUsed = false;
7276 pThis->fDelayInts = false;
7277 pThis->fLocked = false;
7278 pThis->u64AckedAt = 0;
7279 e1kHardReset(pThis);
7280}
7281
7282/**
7283 * @copydoc FNPDMDEVSUSPEND
7284 */
7285static DECLCALLBACK(void) e1kR3Suspend(PPDMDEVINS pDevIns)
7286{
7287 /* Poke thread waiting for buffer space. */
7288 e1kWakeupReceive(pDevIns);
7289}
7290
7291/**
7292 * Device relocation callback.
7293 *
7294 * When this callback is called the device instance data, and if the
7295 * device have a GC component, is being relocated, or/and the selectors
7296 * have been changed. The device must use the chance to perform the
7297 * necessary pointer relocations and data updates.
7298 *
7299 * Before the GC code is executed the first time, this function will be
7300 * called with a 0 delta so GC pointer calculations can be one in one place.
7301 *
7302 * @param pDevIns Pointer to the device instance.
7303 * @param offDelta The relocation delta relative to the old location.
7304 *
7305 * @remark A relocation CANNOT fail.
7306 */
7307static DECLCALLBACK(void) e1kR3Relocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
7308{
7309 RT_NOREF(offDelta);
7310 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7311 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7312 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7313 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7314#ifdef E1K_USE_RX_TIMERS
7315 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7316 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7317#endif /* E1K_USE_RX_TIMERS */
7318#ifdef E1K_USE_TX_TIMERS
7319 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7320# ifndef E1K_NO_TAD
7321 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7322# endif /* E1K_NO_TAD */
7323#endif /* E1K_USE_TX_TIMERS */
7324#ifdef E1K_TX_DELAY
7325 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7326#endif /* E1K_TX_DELAY */
7327 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7328 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7329}
7330
7331/**
7332 * Destruct a device instance.
7333 *
7334 * We need to free non-VM resources only.
7335 *
7336 * @returns VBox status code.
7337 * @param pDevIns The device instance data.
7338 * @thread EMT
7339 */
7340static DECLCALLBACK(int) e1kR3Destruct(PPDMDEVINS pDevIns)
7341{
7342 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7343 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
7344
7345 e1kDumpState(pThis);
7346 E1kLog(("%s Destroying instance\n", pThis->szPrf));
7347 if (PDMCritSectIsInitialized(&pThis->cs))
7348 {
7349 if (pThis->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
7350 {
7351 RTSemEventSignal(pThis->hEventMoreRxDescAvail);
7352 RTSemEventDestroy(pThis->hEventMoreRxDescAvail);
7353 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7354 }
7355#ifdef E1K_WITH_TX_CS
7356 PDMR3CritSectDelete(&pThis->csTx);
7357#endif /* E1K_WITH_TX_CS */
7358 PDMR3CritSectDelete(&pThis->csRx);
7359 PDMR3CritSectDelete(&pThis->cs);
7360 }
7361 return VINF_SUCCESS;
7362}
7363
7364
7365/**
7366 * Set PCI configuration space registers.
7367 *
7368 * @param pci Reference to PCI device structure.
7369 * @thread EMT
7370 */
7371static DECLCALLBACK(void) e1kConfigurePciDev(PPCIDEVICE pPciDev, E1KCHIP eChip)
7372{
7373 Assert(eChip < RT_ELEMENTS(g_Chips));
7374 /* Configure PCI Device, assume 32-bit mode ******************************/
7375 PCIDevSetVendorId(pPciDev, g_Chips[eChip].uPCIVendorId);
7376 PCIDevSetDeviceId(pPciDev, g_Chips[eChip].uPCIDeviceId);
7377 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
7378 PCIDevSetWord( pPciDev, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
7379
7380 PCIDevSetWord( pPciDev, VBOX_PCI_COMMAND, 0x0000);
7381 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7382 PCIDevSetWord( pPciDev, VBOX_PCI_STATUS,
7383 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7384 /* Stepping A2 */
7385 PCIDevSetByte( pPciDev, VBOX_PCI_REVISION_ID, 0x02);
7386 /* Ethernet adapter */
7387 PCIDevSetByte( pPciDev, VBOX_PCI_CLASS_PROG, 0x00);
7388 PCIDevSetWord( pPciDev, VBOX_PCI_CLASS_DEVICE, 0x0200);
7389 /* normal single function Ethernet controller */
7390 PCIDevSetByte( pPciDev, VBOX_PCI_HEADER_TYPE, 0x00);
7391 /* Memory Register Base Address */
7392 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7393 /* Memory Flash Base Address */
7394 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7395 /* IO Register Base Address */
7396 PCIDevSetDWord(pPciDev, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7397 /* Expansion ROM Base Address */
7398 PCIDevSetDWord(pPciDev, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7399 /* Capabilities Pointer */
7400 PCIDevSetByte( pPciDev, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7401 /* Interrupt Pin: INTA# */
7402 PCIDevSetByte( pPciDev, VBOX_PCI_INTERRUPT_PIN, 0x01);
7403 /* Max_Lat/Min_Gnt: very high priority and time slice */
7404 PCIDevSetByte( pPciDev, VBOX_PCI_MIN_GNT, 0xFF);
7405 PCIDevSetByte( pPciDev, VBOX_PCI_MAX_LAT, 0x00);
7406
7407 /* PCI Power Management Registers ****************************************/
7408 /* Capability ID: PCI Power Management Registers */
7409 PCIDevSetByte( pPciDev, 0xDC, VBOX_PCI_CAP_ID_PM);
7410 /* Next Item Pointer: PCI-X */
7411 PCIDevSetByte( pPciDev, 0xDC + 1, 0xE4);
7412 /* Power Management Capabilities: PM disabled, DSI */
7413 PCIDevSetWord( pPciDev, 0xDC + 2,
7414 0x0002 | VBOX_PCI_PM_CAP_DSI);
7415 /* Power Management Control / Status Register: PM disabled */
7416 PCIDevSetWord( pPciDev, 0xDC + 4, 0x0000);
7417 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7418 PCIDevSetByte( pPciDev, 0xDC + 6, 0x00);
7419 /* Data Register: PM disabled, always 0 */
7420 PCIDevSetByte( pPciDev, 0xDC + 7, 0x00);
7421
7422 /* PCI-X Configuration Registers *****************************************/
7423 /* Capability ID: PCI-X Configuration Registers */
7424 PCIDevSetByte( pPciDev, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7425#ifdef E1K_WITH_MSI
7426 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x80);
7427#else
7428 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7429 PCIDevSetByte( pPciDev, 0xE4 + 1, 0x00);
7430#endif
7431 /* PCI-X Command: Enable Relaxed Ordering */
7432 PCIDevSetWord( pPciDev, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7433 /* PCI-X Status: 32-bit, 66MHz*/
7434 /** @todo is this value really correct? fff8 doesn't look like actual PCI address */
7435 PCIDevSetDWord(pPciDev, 0xE4 + 4, 0x0040FFF8);
7436}
7437
7438/**
7439 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7440 */
7441static DECLCALLBACK(int) e1kR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7442{
7443 PE1KSTATE pThis = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7444 int rc;
7445 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7446
7447 /*
7448 * Initialize the instance data (state).
7449 * Note! Caller has initialized it to ZERO already.
7450 */
7451 RTStrPrintf(pThis->szPrf, sizeof(pThis->szPrf), "E1000#%d", iInstance);
7452 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", pThis->szPrf, sizeof(E1KRXDESC)));
7453 pThis->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7454 pThis->pDevInsR3 = pDevIns;
7455 pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7456 pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7457 pThis->u16TxPktLen = 0;
7458 pThis->fIPcsum = false;
7459 pThis->fTCPcsum = false;
7460 pThis->fIntMaskUsed = false;
7461 pThis->fDelayInts = false;
7462 pThis->fLocked = false;
7463 pThis->u64AckedAt = 0;
7464 pThis->led.u32Magic = PDMLED_MAGIC;
7465 pThis->u32PktNo = 1;
7466
7467 /* Interfaces */
7468 pThis->IBase.pfnQueryInterface = e1kR3QueryInterface;
7469
7470 pThis->INetworkDown.pfnWaitReceiveAvail = e1kR3NetworkDown_WaitReceiveAvail;
7471 pThis->INetworkDown.pfnReceive = e1kR3NetworkDown_Receive;
7472 pThis->INetworkDown.pfnXmitPending = e1kR3NetworkDown_XmitPending;
7473
7474 pThis->ILeds.pfnQueryStatusLed = e1kR3QueryStatusLed;
7475
7476 pThis->INetworkConfig.pfnGetMac = e1kR3GetMac;
7477 pThis->INetworkConfig.pfnGetLinkState = e1kR3GetLinkState;
7478 pThis->INetworkConfig.pfnSetLinkState = e1kR3SetLinkState;
7479
7480 /*
7481 * Internal validations.
7482 */
7483 for (uint32_t iReg = 1; iReg < E1K_NUM_OF_BINARY_SEARCHABLE; iReg++)
7484 AssertLogRelMsgReturn( g_aE1kRegMap[iReg].offset > g_aE1kRegMap[iReg - 1].offset
7485 && g_aE1kRegMap[iReg].offset + g_aE1kRegMap[iReg].size
7486 >= g_aE1kRegMap[iReg - 1].offset + g_aE1kRegMap[iReg - 1].size,
7487 ("%s@%#xLB%#x vs %s@%#xLB%#x\n",
7488 g_aE1kRegMap[iReg].abbrev, g_aE1kRegMap[iReg].offset, g_aE1kRegMap[iReg].size,
7489 g_aE1kRegMap[iReg - 1].abbrev, g_aE1kRegMap[iReg - 1].offset, g_aE1kRegMap[iReg - 1].size),
7490 VERR_INTERNAL_ERROR_4);
7491
7492 /*
7493 * Validate configuration.
7494 */
7495 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7496 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7497 "ItrEnabled\0" "ItrRxEnabled\0"
7498 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7499 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7500 N_("Invalid configuration for E1000 device"));
7501
7502 /** @todo: LineSpeed unused! */
7503
7504 pThis->fR0Enabled = true;
7505 pThis->fRCEnabled = true;
7506 pThis->fEthernetCRC = true;
7507 pThis->fGSOEnabled = true;
7508 pThis->fItrEnabled = true;
7509 pThis->fItrRxEnabled = true;
7510
7511 /* Get config params */
7512 rc = CFGMR3QueryBytes(pCfg, "MAC", pThis->macConfigured.au8, sizeof(pThis->macConfigured.au8));
7513 if (RT_FAILURE(rc))
7514 return PDMDEV_SET_ERROR(pDevIns, rc,
7515 N_("Configuration error: Failed to get MAC address"));
7516 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pThis->fCableConnected);
7517 if (RT_FAILURE(rc))
7518 return PDMDEV_SET_ERROR(pDevIns, rc,
7519 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7520 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pThis->eChip);
7521 if (RT_FAILURE(rc))
7522 return PDMDEV_SET_ERROR(pDevIns, rc,
7523 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7524 Assert(pThis->eChip <= E1K_CHIP_82545EM);
7525 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pThis->fRCEnabled, true);
7526 if (RT_FAILURE(rc))
7527 return PDMDEV_SET_ERROR(pDevIns, rc,
7528 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7529
7530 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pThis->fR0Enabled, true);
7531 if (RT_FAILURE(rc))
7532 return PDMDEV_SET_ERROR(pDevIns, rc,
7533 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7534
7535 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pThis->fEthernetCRC, true);
7536 if (RT_FAILURE(rc))
7537 return PDMDEV_SET_ERROR(pDevIns, rc,
7538 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7539
7540 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pThis->fGSOEnabled, true);
7541 if (RT_FAILURE(rc))
7542 return PDMDEV_SET_ERROR(pDevIns, rc,
7543 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7544
7545 rc = CFGMR3QueryBoolDef(pCfg, "ItrEnabled", &pThis->fItrEnabled, true);
7546 if (RT_FAILURE(rc))
7547 return PDMDEV_SET_ERROR(pDevIns, rc,
7548 N_("Configuration error: Failed to get the value of 'ItrEnabled'"));
7549
7550 rc = CFGMR3QueryBoolDef(pCfg, "ItrRxEnabled", &pThis->fItrRxEnabled, true);
7551 if (RT_FAILURE(rc))
7552 return PDMDEV_SET_ERROR(pDevIns, rc,
7553 N_("Configuration error: Failed to get the value of 'ItrRxEnabled'"));
7554
7555 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pThis->cMsLinkUpDelay, 5000); /* ms */
7556 if (RT_FAILURE(rc))
7557 return PDMDEV_SET_ERROR(pDevIns, rc,
7558 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7559 Assert(pThis->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7560 if (pThis->cMsLinkUpDelay > 5000)
7561 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n", pThis->szPrf, pThis->cMsLinkUpDelay / 1000));
7562 else if (pThis->cMsLinkUpDelay == 0)
7563 LogRel(("%s WARNING! Link up delay is disabled!\n", pThis->szPrf));
7564
7565 LogRel(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s Itr=%s ItrRx=%s R0=%s GC=%s\n", pThis->szPrf,
7566 g_Chips[pThis->eChip].pcszName, pThis->cMsLinkUpDelay,
7567 pThis->fEthernetCRC ? "on" : "off",
7568 pThis->fGSOEnabled ? "enabled" : "disabled",
7569 pThis->fItrEnabled ? "enabled" : "disabled",
7570 pThis->fItrRxEnabled ? "enabled" : "disabled",
7571 pThis->fR0Enabled ? "enabled" : "disabled",
7572 pThis->fRCEnabled ? "enabled" : "disabled"));
7573
7574 /* Initialize the EEPROM. */
7575 pThis->eeprom.init(pThis->macConfigured);
7576
7577 /* Initialize internal PHY. */
7578 Phy::init(&pThis->phy, iInstance, pThis->eChip == E1K_CHIP_82543GC ? PHY_EPID_M881000 : PHY_EPID_M881011);
7579 Phy::setLinkStatus(&pThis->phy, pThis->fCableConnected);
7580
7581 /* Initialize critical sections. We do our own locking. */
7582 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
7583 AssertRCReturn(rc, rc);
7584
7585 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->cs, RT_SRC_POS, "E1000#%d", iInstance);
7586 if (RT_FAILURE(rc))
7587 return rc;
7588 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csRx, RT_SRC_POS, "E1000#%dRX", iInstance);
7589 if (RT_FAILURE(rc))
7590 return rc;
7591#ifdef E1K_WITH_TX_CS
7592 rc = PDMDevHlpCritSectInit(pDevIns, &pThis->csTx, RT_SRC_POS, "E1000#%dTX", iInstance);
7593 if (RT_FAILURE(rc))
7594 return rc;
7595#endif /* E1K_WITH_TX_CS */
7596
7597 /* Saved state registration. */
7598 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7599 NULL, e1kLiveExec, NULL,
7600 e1kSavePrep, e1kSaveExec, NULL,
7601 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7602 if (RT_FAILURE(rc))
7603 return rc;
7604
7605 /* Set PCI config registers and register ourselves with the PCI bus. */
7606 e1kConfigurePciDev(&pThis->pciDevice, pThis->eChip);
7607 rc = PDMDevHlpPCIRegister(pDevIns, &pThis->pciDevice);
7608 if (RT_FAILURE(rc))
7609 return rc;
7610
7611#ifdef E1K_WITH_MSI
7612 PDMMSIREG MsiReg;
7613 RT_ZERO(MsiReg);
7614 MsiReg.cMsiVectors = 1;
7615 MsiReg.iMsiCapOffset = 0x80;
7616 MsiReg.iMsiNextOffset = 0x0;
7617 MsiReg.fMsi64bit = false;
7618 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &MsiReg);
7619 AssertRCReturn(rc, rc);
7620#endif
7621
7622
7623 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7624 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE, PCI_ADDRESS_SPACE_MEM, e1kMap);
7625 if (RT_FAILURE(rc))
7626 return rc;
7627 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7628 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE, PCI_ADDRESS_SPACE_IO, e1kMap);
7629 if (RT_FAILURE(rc))
7630 return rc;
7631
7632 /* Create transmit queue */
7633 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7634 e1kTxQueueConsumer, true, "E1000-Xmit", &pThis->pTxQueueR3);
7635 if (RT_FAILURE(rc))
7636 return rc;
7637 pThis->pTxQueueR0 = PDMQueueR0Ptr(pThis->pTxQueueR3);
7638 pThis->pTxQueueRC = PDMQueueRCPtr(pThis->pTxQueueR3);
7639
7640 /* Create the RX notifier signaller. */
7641 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7642 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pThis->pCanRxQueueR3);
7643 if (RT_FAILURE(rc))
7644 return rc;
7645 pThis->pCanRxQueueR0 = PDMQueueR0Ptr(pThis->pCanRxQueueR3);
7646 pThis->pCanRxQueueRC = PDMQueueRCPtr(pThis->pCanRxQueueR3);
7647
7648#ifdef E1K_TX_DELAY
7649 /* Create Transmit Delay Timer */
7650 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pThis,
7651 TMTIMER_FLAGS_NO_CRIT_SECT,
7652 "E1000 Transmit Delay Timer", &pThis->pTXDTimerR3);
7653 if (RT_FAILURE(rc))
7654 return rc;
7655 pThis->pTXDTimerR0 = TMTimerR0Ptr(pThis->pTXDTimerR3);
7656 pThis->pTXDTimerRC = TMTimerRCPtr(pThis->pTXDTimerR3);
7657 TMR3TimerSetCritSect(pThis->pTXDTimerR3, &pThis->csTx);
7658#endif /* E1K_TX_DELAY */
7659
7660#ifdef E1K_USE_TX_TIMERS
7661 /* Create Transmit Interrupt Delay Timer */
7662 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pThis,
7663 TMTIMER_FLAGS_NO_CRIT_SECT,
7664 "E1000 Transmit Interrupt Delay Timer", &pThis->pTIDTimerR3);
7665 if (RT_FAILURE(rc))
7666 return rc;
7667 pThis->pTIDTimerR0 = TMTimerR0Ptr(pThis->pTIDTimerR3);
7668 pThis->pTIDTimerRC = TMTimerRCPtr(pThis->pTIDTimerR3);
7669
7670# ifndef E1K_NO_TAD
7671 /* Create Transmit Absolute Delay Timer */
7672 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pThis,
7673 TMTIMER_FLAGS_NO_CRIT_SECT,
7674 "E1000 Transmit Absolute Delay Timer", &pThis->pTADTimerR3);
7675 if (RT_FAILURE(rc))
7676 return rc;
7677 pThis->pTADTimerR0 = TMTimerR0Ptr(pThis->pTADTimerR3);
7678 pThis->pTADTimerRC = TMTimerRCPtr(pThis->pTADTimerR3);
7679# endif /* E1K_NO_TAD */
7680#endif /* E1K_USE_TX_TIMERS */
7681
7682#ifdef E1K_USE_RX_TIMERS
7683 /* Create Receive Interrupt Delay Timer */
7684 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pThis,
7685 TMTIMER_FLAGS_NO_CRIT_SECT,
7686 "E1000 Receive Interrupt Delay Timer", &pThis->pRIDTimerR3);
7687 if (RT_FAILURE(rc))
7688 return rc;
7689 pThis->pRIDTimerR0 = TMTimerR0Ptr(pThis->pRIDTimerR3);
7690 pThis->pRIDTimerRC = TMTimerRCPtr(pThis->pRIDTimerR3);
7691
7692 /* Create Receive Absolute Delay Timer */
7693 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pThis,
7694 TMTIMER_FLAGS_NO_CRIT_SECT,
7695 "E1000 Receive Absolute Delay Timer", &pThis->pRADTimerR3);
7696 if (RT_FAILURE(rc))
7697 return rc;
7698 pThis->pRADTimerR0 = TMTimerR0Ptr(pThis->pRADTimerR3);
7699 pThis->pRADTimerRC = TMTimerRCPtr(pThis->pRADTimerR3);
7700#endif /* E1K_USE_RX_TIMERS */
7701
7702 /* Create Late Interrupt Timer */
7703 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pThis,
7704 TMTIMER_FLAGS_NO_CRIT_SECT,
7705 "E1000 Late Interrupt Timer", &pThis->pIntTimerR3);
7706 if (RT_FAILURE(rc))
7707 return rc;
7708 pThis->pIntTimerR0 = TMTimerR0Ptr(pThis->pIntTimerR3);
7709 pThis->pIntTimerRC = TMTimerRCPtr(pThis->pIntTimerR3);
7710
7711 /* Create Link Up Timer */
7712 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pThis,
7713 TMTIMER_FLAGS_NO_CRIT_SECT,
7714 "E1000 Link Up Timer", &pThis->pLUTimerR3);
7715 if (RT_FAILURE(rc))
7716 return rc;
7717 pThis->pLUTimerR0 = TMTimerR0Ptr(pThis->pLUTimerR3);
7718 pThis->pLUTimerRC = TMTimerRCPtr(pThis->pLUTimerR3);
7719
7720 /* Register the info item */
7721 char szTmp[20];
7722 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7723 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7724
7725 /* Status driver */
7726 PPDMIBASE pBase;
7727 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pThis->IBase, &pBase, "Status Port");
7728 if (RT_FAILURE(rc))
7729 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7730 pThis->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7731
7732 /* Network driver */
7733 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pThis->IBase, &pThis->pDrvBase, "Network Port");
7734 if (RT_SUCCESS(rc))
7735 {
7736 if (rc == VINF_NAT_DNS)
7737 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7738 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7739 pThis->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMINETWORKUP);
7740 AssertMsgReturn(pThis->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"), VERR_PDM_MISSING_INTERFACE_BELOW);
7741
7742 pThis->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7743 pThis->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pThis->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7744 }
7745 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7746 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7747 {
7748 /* No error! */
7749 E1kLog(("%s This adapter is not attached to any network!\n", pThis->szPrf));
7750 }
7751 else
7752 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7753
7754 rc = RTSemEventCreate(&pThis->hEventMoreRxDescAvail);
7755 if (RT_FAILURE(rc))
7756 return rc;
7757
7758 rc = e1kInitDebugHelpers();
7759 if (RT_FAILURE(rc))
7760 return rc;
7761
7762 e1kHardReset(pThis);
7763
7764 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Public/Net/E1k%u/BytesReceived", iInstance);
7765 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Public/Net/E1k%u/BytesTransmitted", iInstance);
7766
7767 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7768 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7769
7770#if defined(VBOX_WITH_STATISTICS)
7771 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7772 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7773 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7774 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7775 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7776 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7777 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7778 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7779 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7780 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7781 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7782 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7783 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7784 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7785 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7786 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7787 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7788 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7789 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7790 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7791 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7792 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7793 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7794 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7795
7796 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7797 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7798 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7799 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7800 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7801 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7802 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7803 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7804 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7805 for (unsigned iReg = 0; iReg < E1K_NUM_OF_REGS; iReg++)
7806 {
7807 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegReads[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7808 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Reads", iInstance, g_aE1kRegMap[iReg].abbrev);
7809 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->aStatRegWrites[iReg], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
7810 g_aE1kRegMap[iReg].name, "/Devices/E1k%d/Regs/%s-Writes", iInstance, g_aE1kRegMap[iReg].abbrev);
7811 }
7812#endif /* VBOX_WITH_STATISTICS */
7813
7814#ifdef E1K_INT_STATS
7815 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->u64ArmedAt, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "u64ArmedAt", "/Devices/E1k%d/u64ArmedAt", iInstance);
7816 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatMaxTxDelay, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatMaxTxDelay", "/Devices/E1k%d/uStatMaxTxDelay", iInstance);
7817 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatInt, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatInt", "/Devices/E1k%d/uStatInt", iInstance);
7818 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTry, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTry", "/Devices/E1k%d/uStatIntTry", iInstance);
7819 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLower, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLower", "/Devices/E1k%d/uStatIntLower", iInstance);
7820 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntDly", "/Devices/E1k%d/uStatIntDly", iInstance);
7821 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLost, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLost", "/Devices/E1k%d/iStatIntLost", iInstance);
7822 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->iStatIntLostOne, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "iStatIntLostOne", "/Devices/E1k%d/iStatIntLostOne", iInstance);
7823 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDisDly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDisDly", "/Devices/E1k%d/uStatDisDly", iInstance);
7824 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntSkip, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntSkip", "/Devices/E1k%d/uStatIntSkip", iInstance);
7825 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntLate, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntLate", "/Devices/E1k%d/uStatIntLate", iInstance);
7826 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntMasked, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntMasked", "/Devices/E1k%d/uStatIntMasked", iInstance);
7827 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntEarly, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntEarly", "/Devices/E1k%d/uStatIntEarly", iInstance);
7828 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRx", "/Devices/E1k%d/uStatIntRx", iInstance);
7829 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTx", "/Devices/E1k%d/uStatIntTx", iInstance);
7830 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntICS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntICS", "/Devices/E1k%d/uStatIntICS", iInstance);
7831 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRDTR, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRDTR", "/Devices/E1k%d/uStatIntRDTR", iInstance);
7832 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntRXDMT0, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntRXDMT0", "/Devices/E1k%d/uStatIntRXDMT0", iInstance);
7833 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatIntTXQE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatIntTXQE", "/Devices/E1k%d/uStatIntTXQE", iInstance);
7834 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxNoRS, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxNoRS", "/Devices/E1k%d/uStatTxNoRS", iInstance);
7835 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxIDE, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxIDE", "/Devices/E1k%d/uStatTxIDE", iInstance);
7836 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayed", "/Devices/E1k%d/uStatTxDelayed", iInstance);
7837 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxDelayExp, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxDelayExp", "/Devices/E1k%d/uStatTxDelayExp", iInstance);
7838 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTAD", "/Devices/E1k%d/uStatTAD", iInstance);
7839 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTID", "/Devices/E1k%d/uStatTID", iInstance);
7840 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRAD, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRAD", "/Devices/E1k%d/uStatRAD", iInstance);
7841 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRID, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRID", "/Devices/E1k%d/uStatRID", iInstance);
7842 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatRxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatRxFrm", "/Devices/E1k%d/uStatRxFrm", iInstance);
7843 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxFrm, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxFrm", "/Devices/E1k%d/uStatTxFrm", iInstance);
7844 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescCtx, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescCtx", "/Devices/E1k%d/uStatDescCtx", iInstance);
7845 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescDat, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescDat", "/Devices/E1k%d/uStatDescDat", iInstance);
7846 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatDescLeg, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatDescLeg", "/Devices/E1k%d/uStatDescLeg", iInstance);
7847 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx1514, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx1514", "/Devices/E1k%d/uStatTx1514", iInstance);
7848 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx2962, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx2962", "/Devices/E1k%d/uStatTx2962", iInstance);
7849 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx4410, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx4410", "/Devices/E1k%d/uStatTx4410", iInstance);
7850 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx5858, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx5858", "/Devices/E1k%d/uStatTx5858", iInstance);
7851 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx7306, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx7306", "/Devices/E1k%d/uStatTx7306", iInstance);
7852 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx8754, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx8754", "/Devices/E1k%d/uStatTx8754", iInstance);
7853 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx16384, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx16384", "/Devices/E1k%d/uStatTx16384", iInstance);
7854 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTx32768, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTx32768", "/Devices/E1k%d/uStatTx32768", iInstance);
7855 PDMDevHlpSTAMRegisterF(pDevIns, &pThis->uStatTxLarge, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "uStatTxLarge", "/Devices/E1k%d/uStatTxLarge", iInstance);
7856#endif /* E1K_INT_STATS */
7857
7858 return VINF_SUCCESS;
7859}
7860
7861/**
7862 * The device registration structure.
7863 */
7864const PDMDEVREG g_DeviceE1000 =
7865{
7866 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7867 PDM_DEVREG_VERSION,
7868 /* Device name. */
7869 "e1000",
7870 /* Name of guest context module (no path).
7871 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7872 "VBoxDDRC.rc",
7873 /* Name of ring-0 module (no path).
7874 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7875 "VBoxDDR0.r0",
7876 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7877 * remain unchanged from registration till VM destruction. */
7878 "Intel PRO/1000 MT Desktop Ethernet.\n",
7879
7880 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7881 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7882 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7883 PDM_DEVREG_CLASS_NETWORK,
7884 /* Maximum number of instances (per VM). */
7885 ~0U,
7886 /* Size of the instance data. */
7887 sizeof(E1KSTATE),
7888
7889 /* pfnConstruct */
7890 e1kR3Construct,
7891 /* pfnDestruct */
7892 e1kR3Destruct,
7893 /* pfnRelocate */
7894 e1kR3Relocate,
7895 /* pfnMemSetup */
7896 NULL,
7897 /* pfnPowerOn */
7898 NULL,
7899 /* pfnReset */
7900 e1kR3Reset,
7901 /* pfnSuspend */
7902 e1kR3Suspend,
7903 /* pfnResume */
7904 NULL,
7905 /* pfnAttach */
7906 e1kR3Attach,
7907 /* pfnDeatch */
7908 e1kR3Detach,
7909 /* pfnQueryInterface */
7910 NULL,
7911 /* pfnInitComplete */
7912 NULL,
7913 /* pfnPowerOff */
7914 e1kR3PowerOff,
7915 /* pfnSoftReset */
7916 NULL,
7917
7918 /* u32VersionEnd */
7919 PDM_DEVREG_VERSION
7920};
7921
7922#endif /* IN_RING3 */
7923#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette