• R/O
  • HTTP
  • SSH
  • HTTPS

linux-2.4.36: Commit

2.4.36-stable kernel tree


Commit MetaInfo

Revision99405c24cd80afa8f002574f65700296c838260d (tree)
Zeit2007-02-04 06:56:26
AutorWilly Tarreau <w@1wt....>
CommiterWilly Tarreau

Log Message

[PATCH] merge 2.6 backport of skge/sky2 network drivers

Quite a bunch of x86 motherboards equipped with a Gigabit LAN
port make use of a Marvell chip which requires the vendor's driver
from www.marvell.com. This driver is known to have some troubles,
among which losing frames under UDP-only trafic such as NFS and
DNS.

Stephen Hemminger has rewritten two separate drivers from scratch
for those chips to solve many problems in the historical driver.
A backport of his work is provided here as an alternate driver
for people experiencing problems with the vendor's one.

Current versions (skge-1.6 and sky2-1.5) do not support NAPI and
as such, consume slightly more CPU than the vendor's driver, but
at least they do seem to work correctly on UP. No test has been
performed on SMP yet.

Signed-off-by: Willy Tarreau <w@1wt.eu>

Ändern Zusammenfassung

Diff

--- a/Documentation/Configure.help
+++ b/Documentation/Configure.help
@@ -12140,6 +12140,37 @@ CONFIG_SK98LIN
1214012140 say M here and read Documentation/modules.txt. This is recommended.
1214112141 The module will be called sk98lin.o.
1214212142
12143+Marvell Yukon Gigabit Ethernet Adapter family support
12144+CONFIG_SKGE
12145+ This is an alternate driver for the Marvell Yukon Gigabit Ethernet
12146+ family. It is a backport of version 1.6 for kernel 2.6. The vendor's
12147+ one is normally recommended (sk98lin), but under some circumstances, it
12148+ is known to have trouble (eg: sending UDP only on old chips). This driver
12149+ is not very fast an may lead to higher CPU loads than the original one
12150+ since it does not support NAPI yet, but at least it is reported to work
12151+ and is maintained in 2.6. Linking it with the kernel is not recommended
12152+ since it may conflict with sk98lin.
12153+
12154+ If you want to compile this driver as a module ( = code which can be
12155+ inserted in and removed from the running kernel whenever you want),
12156+ say M here and read Documentation/modules.txt. This is recommended.
12157+ The module will be called skge.o.
12158+
12159+Marvell Yukon 2 Gigabit Ethernet Adapter family support
12160+CONFIG_SKY2
12161+ This is an alternate driver for the Marvell Yukon 2 Gigabit Ethernet
12162+ family. It is a backport of version 1.5 for kernel 2.6. The vendor's
12163+ one is normally recommended (sk98lin), but under some circumstances, it
12164+ is known to have trouble (eg: sending UDP only on old chips). This driver
12165+ is not very fast an may lead to higher CPU loads than the original one
12166+ since it does not support NAPI yet, but at least it is reported to work
12167+ and is maintained in 2.6. Linking it with the kernel is not recommended
12168+ since it may conflict with sk98lin.
12169+
12170+ If you want to compile this driver as a module ( = code which can be
12171+ inserted in and removed from the running kernel whenever you want),
12172+ say M here and read Documentation/modules.txt. This is recommended.
12173+ The module will be called sky2.o.
1214312174
1214412175 Sun GEM support
1214512176 CONFIG_SUNGEM
--- a/drivers/net/Config.in
+++ b/drivers/net/Config.in
@@ -272,6 +272,10 @@ dep_tristate 'Realtek 8169 Gigabit Ethernet support' CONFIG_R8169 $CONFIG_PCI
272272 if [ "$CONFIG_SIBYTE_SB1xxx_SOC" = "y" ]; then
273273 tristate 'SB1250 Ethernet support' CONFIG_NET_SB1250_MAC
274274 fi
275+
276+dep_tristate 'Alternate Marvell Yukon Chipset Support (not Yukon2)' CONFIG_SKGE $CONFIG_PCI
277+dep_tristate 'Alternate Marvell Yukon 2 Chipset Support' CONFIG_SKY2 $CONFIG_PCI
278+
275279 dep_tristate 'Marvell Yukon Chipset / SysKonnect SK-98xx Support' CONFIG_SK98LIN $CONFIG_PCI
276280 dep_tristate 'Broadcom Tigon3 support' CONFIG_TIGON3 $CONFIG_PCI
277281
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -104,6 +104,9 @@ ifeq ($(CONFIG_SK98LIN),y)
104104 obj-y += sk98lin/sk98lin.o
105105 endif
106106
107+obj-$(CONFIG_SKGE) += skge.o
108+obj-$(CONFIG_SKY2) += sky2.o
109+
107110 ifeq ($(CONFIG_SKFP),y)
108111 obj-y += skfp/skfp.o
109112 endif
--- /dev/null
+++ b/drivers/net/skge.c
@@ -0,0 +1,3434 @@
1+/*
2+ * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3+ * Ethernet adapters. Based on earlier sk98lin, e100 and
4+ * FreeBSD if_sk drivers.
5+ *
6+ * This driver intentionally does not support all the features
7+ * of the original driver such as link fail-over and link management because
8+ * those should be done at higher levels.
9+ *
10+ * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11+ *
12+ * This program is free software; you can redistribute it and/or modify
13+ * it under the terms of the GNU General Public License as published by
14+ * the Free Software Foundation; either version 2 of the License
15+ *
16+ * This program is distributed in the hope that it will be useful,
17+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
18+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19+ * GNU General Public License for more details.
20+ *
21+ * You should have received a copy of the GNU General Public License
22+ * along with this program; if not, write to the Free Software
23+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24+ */
25+
26+#include <linux/module.h>
27+#include <linux/pci.h>
28+#include <linux/if_vlan.h>
29+#include <linux/types.h>
30+#include <linux/kernel.h>
31+#include <asm/bitops.h>
32+#include <asm/byteorder.h>
33+#include <asm/io.h>
34+#include <asm/irq.h>
35+#include <linux/ip.h>
36+#include <linux/socket.h>
37+#include <linux/in.h>
38+#include <linux/init.h>
39+#include <linux/netdevice.h>
40+#include <linux/ethtool.h>
41+#include <linux/etherdevice.h>
42+#include <linux/crc32.h>
43+#include <linux/mii.h>
44+#include <linux/delay.h>
45+
46+#include "skge_backport.h"
47+#include "skge.h"
48+
49+#define DRV_NAME "skge"
50+#define DRV_VERSION "1.6 classic"
51+#define PFX DRV_NAME " "
52+
53+#define DEFAULT_TX_RING_SIZE 128
54+#define DEFAULT_RX_RING_SIZE 512
55+#define MAX_TX_RING_SIZE 1024
56+#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
57+#define MAX_RX_RING_SIZE 4096
58+#define RX_COPY_THRESHOLD 128
59+#define RX_BUF_SIZE 1536
60+#define PHY_RETRIES 1000
61+#define ETH_JUMBO_MTU 9000
62+#define TX_WATCHDOG (5 * HZ)
63+#define NAPI_WEIGHT 64
64+#define BLINK_MS 250
65+
66+MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
67+MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
68+MODULE_LICENSE("GPL");
69+
70+static const u32 default_msg
71+ = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
72+ | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
73+
74+static int debug = -1; /* defaults above */
75+module_param(debug, int, 0);
76+
77+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
78+
79+static const struct pci_device_id skge_id_table[] = {
80+ { PCI_DEVICE(0x10b7, 0x1700) }, /* 3COM 3C940 */
81+ { PCI_DEVICE(0x10b7, 0x80eb) }, /* 3COM 3C940B */
82+ { PCI_DEVICE(0x1148, 0x4300) }, /* SysKonnect Genesis */
83+ { PCI_DEVICE(0x1148, 0x4320) }, /* SysKonnect Yukon */
84+ { PCI_DEVICE(0x1186, 0x4c00) }, /* Dlink DGE 510T */
85+ { PCI_DEVICE(0x1186, 0x4b01) }, /* DGE-530T */
86+ { PCI_DEVICE(0x11ab, 0x4320) }, /* Marvell */
87+ { PCI_DEVICE(0x11ab, 0x5005) }, /* Belkin */
88+ { PCI_DEVICE(0x1371, 0x434e) }, /* Cnet gigacard */
89+ { PCI_DEVICE(0x1737, 0x1064) }, /* Linksys EG1064 */
90+ { 0x1737, 0x1032, PCI_ANY_ID, 0x0015, }, /* Linksys EG1032 (some versions) */
91+ { 0 }
92+};
93+MODULE_DEVICE_TABLE(pci, skge_id_table);
94+
95+static int skge_up(struct net_device *dev);
96+static int skge_down(struct net_device *dev);
97+static void skge_phy_reset(struct skge_port *skge);
98+static void skge_tx_clean(struct skge_port *skge);
99+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
100+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
101+static void genesis_get_stats(struct skge_port *skge, u64 *data);
102+static void yukon_get_stats(struct skge_port *skge, u64 *data);
103+static void yukon_init(struct skge_hw *hw, int port);
104+static void genesis_mac_init(struct skge_hw *hw, int port);
105+static void genesis_link_up(struct skge_port *skge);
106+
107+/* Avoid conditionals by using array */
108+static const int txqaddr[] = { Q_XA1, Q_XA2 };
109+static const int rxqaddr[] = { Q_R1, Q_R2 };
110+static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
111+static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
112+
113+static int skge_get_regs_len(struct net_device *dev)
114+{
115+ return 0x4000;
116+}
117+
118+/*
119+ * Returns copy of whole control register region
120+ * Note: skip RAM address register because accessing it will
121+ * cause bus hangs!
122+ */
123+static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
124+ void *p)
125+{
126+ const struct skge_port *skge = netdev_priv(dev);
127+ const void __iomem *io = skge->hw->regs;
128+
129+ regs->version = 1;
130+ memset(p, 0, regs->len);
131+ memcpy_fromio(p, io, B3_RAM_ADDR);
132+
133+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
134+ regs->len - B3_RI_WTO_R1);
135+}
136+
137+
138+/* Determine supported/advertised modes based on hardware.
139+ * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
140+ */
141+static u32 skge_supported_modes(const struct skge_hw *hw)
142+{
143+ u32 supported;
144+
145+ if (hw->copper) {
146+ supported = SUPPORTED_10baseT_Half
147+ | SUPPORTED_10baseT_Full
148+ | SUPPORTED_100baseT_Half
149+ | SUPPORTED_100baseT_Full
150+ | SUPPORTED_1000baseT_Half
151+ | SUPPORTED_1000baseT_Full
152+ | SUPPORTED_Autoneg| SUPPORTED_TP;
153+
154+ if (hw->chip_id == CHIP_ID_GENESIS)
155+ supported &= ~(SUPPORTED_10baseT_Half
156+ | SUPPORTED_10baseT_Full
157+ | SUPPORTED_100baseT_Half
158+ | SUPPORTED_100baseT_Full);
159+
160+ else if (hw->chip_id == CHIP_ID_YUKON)
161+ supported &= ~SUPPORTED_1000baseT_Half;
162+ } else
163+ supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
164+ | SUPPORTED_Autoneg;
165+
166+ return supported;
167+}
168+
169+static int skge_get_settings(struct net_device *dev,
170+ struct ethtool_cmd *ecmd)
171+{
172+ struct skge_port *skge = netdev_priv(dev);
173+ struct skge_hw *hw = skge->hw;
174+
175+ ecmd->transceiver = XCVR_INTERNAL;
176+ ecmd->supported = skge_supported_modes(hw);
177+
178+ if (hw->copper) {
179+ ecmd->port = PORT_TP;
180+ ecmd->phy_address = hw->phy_addr;
181+ } else
182+ ecmd->port = PORT_FIBRE;
183+
184+ ecmd->advertising = skge->advertising;
185+ ecmd->autoneg = skge->autoneg;
186+ ecmd->speed = skge->speed;
187+ ecmd->duplex = skge->duplex;
188+ return 0;
189+}
190+
191+static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
192+{
193+ struct skge_port *skge = netdev_priv(dev);
194+ const struct skge_hw *hw = skge->hw;
195+ u32 supported = skge_supported_modes(hw);
196+
197+ if (ecmd->autoneg == AUTONEG_ENABLE) {
198+ ecmd->advertising = supported;
199+ skge->duplex = -1;
200+ skge->speed = -1;
201+ } else {
202+ u32 setting;
203+
204+ switch (ecmd->speed) {
205+ case SPEED_1000:
206+ if (ecmd->duplex == DUPLEX_FULL)
207+ setting = SUPPORTED_1000baseT_Full;
208+ else if (ecmd->duplex == DUPLEX_HALF)
209+ setting = SUPPORTED_1000baseT_Half;
210+ else
211+ return -EINVAL;
212+ break;
213+ case SPEED_100:
214+ if (ecmd->duplex == DUPLEX_FULL)
215+ setting = SUPPORTED_100baseT_Full;
216+ else if (ecmd->duplex == DUPLEX_HALF)
217+ setting = SUPPORTED_100baseT_Half;
218+ else
219+ return -EINVAL;
220+ break;
221+
222+ case SPEED_10:
223+ if (ecmd->duplex == DUPLEX_FULL)
224+ setting = SUPPORTED_10baseT_Full;
225+ else if (ecmd->duplex == DUPLEX_HALF)
226+ setting = SUPPORTED_10baseT_Half;
227+ else
228+ return -EINVAL;
229+ break;
230+ default:
231+ return -EINVAL;
232+ }
233+
234+ if ((setting & supported) == 0)
235+ return -EINVAL;
236+
237+ skge->speed = ecmd->speed;
238+ skge->duplex = ecmd->duplex;
239+ }
240+
241+ skge->autoneg = ecmd->autoneg;
242+ skge->advertising = ecmd->advertising;
243+
244+ if (netif_running(dev))
245+ skge_phy_reset(skge);
246+
247+ return (0);
248+}
249+
250+static void skge_get_drvinfo(struct net_device *dev,
251+ struct ethtool_drvinfo *info)
252+{
253+ struct skge_port *skge = netdev_priv(dev);
254+
255+ strcpy(info->driver, DRV_NAME);
256+ strcpy(info->version, DRV_VERSION);
257+ strcpy(info->fw_version, "N/A");
258+ strcpy(info->bus_info, pci_name(skge->hw->pdev));
259+}
260+
261+static const struct skge_stat {
262+ char name[ETH_GSTRING_LEN];
263+ u16 xmac_offset;
264+ u16 gma_offset;
265+} skge_stats[] = {
266+ { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI },
267+ { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI },
268+
269+ { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK },
270+ { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK },
271+ { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK },
272+ { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK },
273+ { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK },
274+ { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK },
275+ { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE },
276+ { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE },
277+
278+ { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL },
279+ { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL },
280+ { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL },
281+ { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL },
282+ { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR },
283+ { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV },
284+
285+ { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
286+ { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT },
287+ { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG },
288+ { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR },
289+ { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR },
290+};
291+
292+static int skge_get_stats_count(struct net_device *dev)
293+{
294+ return ARRAY_SIZE(skge_stats);
295+}
296+
297+static void skge_get_ethtool_stats(struct net_device *dev,
298+ struct ethtool_stats *stats, u64 *data)
299+{
300+ struct skge_port *skge = netdev_priv(dev);
301+
302+ if (skge->hw->chip_id == CHIP_ID_GENESIS)
303+ genesis_get_stats(skge, data);
304+ else
305+ yukon_get_stats(skge, data);
306+}
307+
308+/* Use hardware MIB variables for critical path statistics and
309+ * transmit feedback not reported at interrupt.
310+ * Other errors are accounted for in interrupt handler.
311+ */
312+static struct net_device_stats *skge_get_stats(struct net_device *dev)
313+{
314+ struct skge_port *skge = netdev_priv(dev);
315+ u64 data[ARRAY_SIZE(skge_stats)];
316+
317+ if (skge->hw->chip_id == CHIP_ID_GENESIS)
318+ genesis_get_stats(skge, data);
319+ else
320+ yukon_get_stats(skge, data);
321+
322+ skge->net_stats.tx_bytes = data[0];
323+ skge->net_stats.rx_bytes = data[1];
324+ skge->net_stats.tx_packets = data[2] + data[4] + data[6];
325+ skge->net_stats.rx_packets = data[3] + data[5] + data[7];
326+ skge->net_stats.multicast = data[3] + data[5];
327+ skge->net_stats.collisions = data[10];
328+ skge->net_stats.tx_aborted_errors = data[12];
329+
330+ return &skge->net_stats;
331+}
332+
333+static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
334+{
335+ int i;
336+
337+ switch (stringset) {
338+ case ETH_SS_STATS:
339+ for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
340+ memcpy(data + i * ETH_GSTRING_LEN,
341+ skge_stats[i].name, ETH_GSTRING_LEN);
342+ break;
343+ }
344+}
345+
346+static void skge_get_ring_param(struct net_device *dev,
347+ struct ethtool_ringparam *p)
348+{
349+ struct skge_port *skge = netdev_priv(dev);
350+
351+ p->rx_max_pending = MAX_RX_RING_SIZE;
352+ p->tx_max_pending = MAX_TX_RING_SIZE;
353+ p->rx_mini_max_pending = 0;
354+ p->rx_jumbo_max_pending = 0;
355+
356+ p->rx_pending = skge->rx_ring.count;
357+ p->tx_pending = skge->tx_ring.count;
358+ p->rx_mini_pending = 0;
359+ p->rx_jumbo_pending = 0;
360+}
361+
362+static int skge_set_ring_param(struct net_device *dev,
363+ struct ethtool_ringparam *p)
364+{
365+ struct skge_port *skge = netdev_priv(dev);
366+ int err;
367+
368+ if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
369+ p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
370+ return -EINVAL;
371+
372+ skge->rx_ring.count = p->rx_pending;
373+ skge->tx_ring.count = p->tx_pending;
374+
375+ if (netif_running(dev)) {
376+ skge_down(dev);
377+ err = skge_up(dev);
378+ if (err)
379+ dev_close(dev);
380+ }
381+
382+ return 0;
383+}
384+
385+static u32 skge_get_msglevel(struct net_device *netdev)
386+{
387+ struct skge_port *skge = netdev_priv(netdev);
388+ return skge->msg_enable;
389+}
390+
391+static void skge_set_msglevel(struct net_device *netdev, u32 value)
392+{
393+ struct skge_port *skge = netdev_priv(netdev);
394+ skge->msg_enable = value;
395+}
396+
397+static int skge_nway_reset(struct net_device *dev)
398+{
399+ struct skge_port *skge = netdev_priv(dev);
400+
401+ if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
402+ return -EINVAL;
403+
404+ skge_phy_reset(skge);
405+ return 0;
406+}
407+
408+static int skge_set_sg(struct net_device *dev, u32 data)
409+{
410+ struct skge_port *skge = netdev_priv(dev);
411+ struct skge_hw *hw = skge->hw;
412+
413+ if (hw->chip_id == CHIP_ID_GENESIS && data)
414+ return -EOPNOTSUPP;
415+ return ethtool_op_set_sg(dev, data);
416+}
417+
418+static u32 skge_get_tx_csum(struct net_device *dev)
419+{
420+ return (dev->features & NETIF_F_HW_CSUM) != 0;
421+}
422+
423+static int skge_set_tx_csum(struct net_device *dev, u32 data)
424+{
425+ struct skge_port *skge = netdev_priv(dev);
426+ struct skge_hw *hw = skge->hw;
427+
428+ if (hw->chip_id == CHIP_ID_GENESIS && data)
429+ return -EOPNOTSUPP;
430+
431+ if (data)
432+ dev->features |= NETIF_F_HW_CSUM;
433+ else
434+ dev->features &= ~NETIF_F_HW_CSUM;
435+ return 0;
436+}
437+
438+
439+static u32 skge_get_rx_csum(struct net_device *dev)
440+{
441+ struct skge_port *skge = netdev_priv(dev);
442+
443+ return skge->rx_csum;
444+}
445+
446+/* Only Yukon supports checksum offload. */
447+static int skge_set_rx_csum(struct net_device *dev, u32 data)
448+{
449+ struct skge_port *skge = netdev_priv(dev);
450+
451+ if (skge->hw->chip_id == CHIP_ID_GENESIS && data)
452+ return -EOPNOTSUPP;
453+
454+ skge->rx_csum = data;
455+ return 0;
456+}
457+
458+static void skge_get_pauseparam(struct net_device *dev,
459+ struct ethtool_pauseparam *ecmd)
460+{
461+ struct skge_port *skge = netdev_priv(dev);
462+
463+ ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND)
464+ || (skge->flow_control == FLOW_MODE_SYMMETRIC);
465+ ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND)
466+ || (skge->flow_control == FLOW_MODE_SYMMETRIC);
467+
468+ ecmd->autoneg = skge->autoneg;
469+}
470+
471+static int skge_set_pauseparam(struct net_device *dev,
472+ struct ethtool_pauseparam *ecmd)
473+{
474+ struct skge_port *skge = netdev_priv(dev);
475+
476+ skge->autoneg = ecmd->autoneg;
477+ if (ecmd->rx_pause && ecmd->tx_pause)
478+ skge->flow_control = FLOW_MODE_SYMMETRIC;
479+ else if (ecmd->rx_pause && !ecmd->tx_pause)
480+ skge->flow_control = FLOW_MODE_REM_SEND;
481+ else if (!ecmd->rx_pause && ecmd->tx_pause)
482+ skge->flow_control = FLOW_MODE_LOC_SEND;
483+ else
484+ skge->flow_control = FLOW_MODE_NONE;
485+
486+ if (netif_running(dev))
487+ skge_phy_reset(skge);
488+ return 0;
489+}
490+
491+/* Chip internal frequency for clock calculations */
492+static inline u32 hwkhz(const struct skge_hw *hw)
493+{
494+ return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
495+}
496+
497+/* Chip HZ to microseconds */
498+static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
499+{
500+ return (ticks * 1000) / hwkhz(hw);
501+}
502+
503+/* Microseconds to chip HZ */
504+static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
505+{
506+ return hwkhz(hw) * usec / 1000;
507+}
508+
509+static int skge_get_coalesce(struct net_device *dev,
510+ struct ethtool_coalesce *ecmd)
511+{
512+ struct skge_port *skge = netdev_priv(dev);
513+ struct skge_hw *hw = skge->hw;
514+ int port = skge->port;
515+
516+ ecmd->rx_coalesce_usecs = 0;
517+ ecmd->tx_coalesce_usecs = 0;
518+
519+ if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) {
520+ u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI));
521+ u32 msk = skge_read32(hw, B2_IRQM_MSK);
522+
523+ if (msk & rxirqmask[port])
524+ ecmd->rx_coalesce_usecs = delay;
525+ if (msk & txirqmask[port])
526+ ecmd->tx_coalesce_usecs = delay;
527+ }
528+
529+ return 0;
530+}
531+
532+/* Note: interrupt timer is per board, but can turn on/off per port */
533+static int skge_set_coalesce(struct net_device *dev,
534+ struct ethtool_coalesce *ecmd)
535+{
536+ struct skge_port *skge = netdev_priv(dev);
537+ struct skge_hw *hw = skge->hw;
538+ int port = skge->port;
539+ u32 msk = skge_read32(hw, B2_IRQM_MSK);
540+ u32 delay = 25;
541+
542+ if (ecmd->rx_coalesce_usecs == 0)
543+ msk &= ~rxirqmask[port];
544+ else if (ecmd->rx_coalesce_usecs < 25 ||
545+ ecmd->rx_coalesce_usecs > 33333)
546+ return -EINVAL;
547+ else {
548+ msk |= rxirqmask[port];
549+ delay = ecmd->rx_coalesce_usecs;
550+ }
551+
552+ if (ecmd->tx_coalesce_usecs == 0)
553+ msk &= ~txirqmask[port];
554+ else if (ecmd->tx_coalesce_usecs < 25 ||
555+ ecmd->tx_coalesce_usecs > 33333)
556+ return -EINVAL;
557+ else {
558+ msk |= txirqmask[port];
559+ delay = min(delay, ecmd->rx_coalesce_usecs);
560+ }
561+
562+ skge_write32(hw, B2_IRQM_MSK, msk);
563+ if (msk == 0)
564+ skge_write32(hw, B2_IRQM_CTRL, TIM_STOP);
565+ else {
566+ skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay));
567+ skge_write32(hw, B2_IRQM_CTRL, TIM_START);
568+ }
569+ return 0;
570+}
571+
572+enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST };
573+static void skge_led(struct skge_port *skge, enum led_mode mode)
574+{
575+ struct skge_hw *hw = skge->hw;
576+ int port = skge->port;
577+
578+ spin_lock_bh(&hw->phy_lock);
579+ if (hw->chip_id == CHIP_ID_GENESIS) {
580+ switch (mode) {
581+ case LED_MODE_OFF:
582+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
583+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
584+ skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
585+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
586+ break;
587+
588+ case LED_MODE_ON:
589+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
590+ skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
591+
592+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
593+ skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
594+
595+ break;
596+
597+ case LED_MODE_TST:
598+ skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
599+ skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
600+ skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
601+
602+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
603+ break;
604+ }
605+ } else {
606+ switch (mode) {
607+ case LED_MODE_OFF:
608+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
609+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
610+ PHY_M_LED_MO_DUP(MO_LED_OFF) |
611+ PHY_M_LED_MO_10(MO_LED_OFF) |
612+ PHY_M_LED_MO_100(MO_LED_OFF) |
613+ PHY_M_LED_MO_1000(MO_LED_OFF) |
614+ PHY_M_LED_MO_RX(MO_LED_OFF));
615+ break;
616+ case LED_MODE_ON:
617+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL,
618+ PHY_M_LED_PULS_DUR(PULS_170MS) |
619+ PHY_M_LED_BLINK_RT(BLINK_84MS) |
620+ PHY_M_LEDC_TX_CTRL |
621+ PHY_M_LEDC_DP_CTRL);
622+
623+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
624+ PHY_M_LED_MO_RX(MO_LED_OFF) |
625+ (skge->speed == SPEED_100 ?
626+ PHY_M_LED_MO_100(MO_LED_ON) : 0));
627+ break;
628+ case LED_MODE_TST:
629+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
630+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
631+ PHY_M_LED_MO_DUP(MO_LED_ON) |
632+ PHY_M_LED_MO_10(MO_LED_ON) |
633+ PHY_M_LED_MO_100(MO_LED_ON) |
634+ PHY_M_LED_MO_1000(MO_LED_ON) |
635+ PHY_M_LED_MO_RX(MO_LED_ON));
636+ }
637+ }
638+ spin_unlock_bh(&hw->phy_lock);
639+}
640+
641+/* blink LED's for finding board */
642+static int skge_phys_id(struct net_device *dev, u32 data)
643+{
644+ struct skge_port *skge = netdev_priv(dev);
645+ unsigned long ms;
646+ enum led_mode mode = LED_MODE_TST;
647+
648+ if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
649+ ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000;
650+ else
651+ ms = data * 1000;
652+
653+ while (ms > 0) {
654+ skge_led(skge, mode);
655+ mode ^= LED_MODE_TST;
656+
657+ if (msleep_interruptible(BLINK_MS))
658+ break;
659+ ms -= BLINK_MS;
660+ }
661+
662+ /* back to regular LED state */
663+ skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
664+
665+ return 0;
666+}
667+
668+static struct ethtool_ops skge_ethtool_ops = {
669+ .get_settings = skge_get_settings,
670+ .set_settings = skge_set_settings,
671+ .get_drvinfo = skge_get_drvinfo,
672+ .get_regs_len = skge_get_regs_len,
673+ .get_regs = skge_get_regs,
674+ .get_msglevel = skge_get_msglevel,
675+ .set_msglevel = skge_set_msglevel,
676+ .nway_reset = skge_nway_reset,
677+ .get_link = ethtool_op_get_link,
678+ .get_ringparam = skge_get_ring_param,
679+ .set_ringparam = skge_set_ring_param,
680+ .get_pauseparam = skge_get_pauseparam,
681+ .set_pauseparam = skge_set_pauseparam,
682+ .get_coalesce = skge_get_coalesce,
683+ .set_coalesce = skge_set_coalesce,
684+ .get_sg = ethtool_op_get_sg,
685+ .set_sg = skge_set_sg,
686+ .get_tx_csum = skge_get_tx_csum,
687+ .set_tx_csum = skge_set_tx_csum,
688+ .get_rx_csum = skge_get_rx_csum,
689+ .set_rx_csum = skge_set_rx_csum,
690+ .get_strings = skge_get_strings,
691+ .phys_id = skge_phys_id,
692+ .get_stats_count = skge_get_stats_count,
693+ .get_ethtool_stats = skge_get_ethtool_stats,
694+};
695+
696+/*
697+ * Allocate ring elements and chain them together
698+ * One-to-one association of board descriptors with ring elements
699+ */
700+static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
701+{
702+ struct skge_tx_desc *d;
703+ struct skge_element *e;
704+ int i;
705+
706+ ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL);
707+ if (!ring->start)
708+ return -ENOMEM;
709+
710+ for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
711+ e->desc = d;
712+ e->skb = NULL;
713+ if (i == ring->count - 1) {
714+ e->next = ring->start;
715+ d->next_offset = base;
716+ } else {
717+ e->next = e + 1;
718+ d->next_offset = base + (i+1) * sizeof(*d);
719+ }
720+ }
721+ ring->to_use = ring->to_clean = ring->start;
722+
723+ return 0;
724+}
725+
726+/* Allocate and setup a new buffer for receiving */
727+static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
728+ struct sk_buff *skb, unsigned int bufsize)
729+{
730+ struct skge_rx_desc *rd = e->desc;
731+ u64 map;
732+
733+ map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
734+ PCI_DMA_FROMDEVICE);
735+
736+ rd->dma_lo = map;
737+ rd->dma_hi = map >> 32;
738+ e->skb = skb;
739+ rd->csum1_start = ETH_HLEN;
740+ rd->csum2_start = ETH_HLEN;
741+ rd->csum1 = 0;
742+ rd->csum2 = 0;
743+
744+ wmb();
745+
746+ rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
747+ pci_unmap_addr_set(e, mapaddr, map);
748+ pci_unmap_len_set(e, maplen, bufsize);
749+}
750+
751+/* Resume receiving using existing skb,
752+ * Note: DMA address is not changed by chip.
753+ * MTU not changed while receiver active.
754+ */
755+static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
756+{
757+ struct skge_rx_desc *rd = e->desc;
758+
759+ rd->csum2 = 0;
760+ rd->csum2_start = ETH_HLEN;
761+
762+ wmb();
763+
764+ rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
765+}
766+
767+
768+/* Free all buffers in receive ring, assumes receiver stopped */
769+static void skge_rx_clean(struct skge_port *skge)
770+{
771+ struct skge_hw *hw = skge->hw;
772+ struct skge_ring *ring = &skge->rx_ring;
773+ struct skge_element *e;
774+
775+ e = ring->start;
776+ do {
777+ struct skge_rx_desc *rd = e->desc;
778+ rd->control = 0;
779+ if (e->skb) {
780+ pci_unmap_single(hw->pdev,
781+ pci_unmap_addr(e, mapaddr),
782+ pci_unmap_len(e, maplen),
783+ PCI_DMA_FROMDEVICE);
784+ dev_kfree_skb(e->skb);
785+ e->skb = NULL;
786+ }
787+ } while ((e = e->next) != ring->start);
788+}
789+
790+
791+/* Allocate buffers for receive ring
792+ * For receive: to_clean is next received frame.
793+ */
794+static int skge_rx_fill(struct skge_port *skge)
795+{
796+ struct skge_ring *ring = &skge->rx_ring;
797+ struct skge_element *e;
798+
799+ e = ring->start;
800+ do {
801+ struct sk_buff *skb;
802+
803+ skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
804+ if (!skb)
805+ return -ENOMEM;
806+
807+ skb_reserve(skb, NET_IP_ALIGN);
808+ skge_rx_setup(skge, e, skb, skge->rx_buf_size);
809+ } while ( (e = e->next) != ring->start);
810+
811+ ring->to_clean = ring->start;
812+ return 0;
813+}
814+
815+static void skge_link_up(struct skge_port *skge)
816+{
817+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
818+ LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
819+
820+ netif_carrier_on(skge->netdev);
821+ netif_wake_queue(skge->netdev);
822+
823+ if (netif_msg_link(skge))
824+ printk(KERN_INFO PFX
825+ "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
826+ skge->netdev->name, skge->speed,
827+ skge->duplex == DUPLEX_FULL ? "full" : "half",
828+ (skge->flow_control == FLOW_MODE_NONE) ? "none" :
829+ (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" :
830+ (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
831+ (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
832+ "unknown");
833+}
834+
835+static void skge_link_down(struct skge_port *skge)
836+{
837+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
838+ netif_carrier_off(skge->netdev);
839+ netif_stop_queue(skge->netdev);
840+
841+ if (netif_msg_link(skge))
842+ printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
843+}
844+
845+static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
846+{
847+ int i;
848+
849+ xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
850+ *val = xm_read16(hw, port, XM_PHY_DATA);
851+
852+ for (i = 0; i < PHY_RETRIES; i++) {
853+ if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
854+ goto ready;
855+ udelay(1);
856+ }
857+
858+ return -ETIMEDOUT;
859+ ready:
860+ *val = xm_read16(hw, port, XM_PHY_DATA);
861+
862+ return 0;
863+}
864+
865+static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
866+{
867+ u16 v = 0;
868+ if (__xm_phy_read(hw, port, reg, &v))
869+ printk(KERN_WARNING PFX "%s: phy read timed out\n",
870+ hw->dev[port]->name);
871+ return v;
872+}
873+
874+static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
875+{
876+ int i;
877+
878+ xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
879+ for (i = 0; i < PHY_RETRIES; i++) {
880+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
881+ goto ready;
882+ udelay(1);
883+ }
884+ return -EIO;
885+
886+ ready:
887+ xm_write16(hw, port, XM_PHY_DATA, val);
888+ for (i = 0; i < PHY_RETRIES; i++) {
889+ if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
890+ return 0;
891+ udelay(1);
892+ }
893+ return -ETIMEDOUT;
894+}
895+
896+static void genesis_init(struct skge_hw *hw)
897+{
898+ /* set blink source counter */
899+ skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100);
900+ skge_write8(hw, B2_BSC_CTRL, BSC_START);
901+
902+ /* configure mac arbiter */
903+ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
904+
905+ /* configure mac arbiter timeout values */
906+ skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53);
907+ skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53);
908+ skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53);
909+ skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53);
910+
911+ skge_write8(hw, B3_MA_RCINI_RX1, 0);
912+ skge_write8(hw, B3_MA_RCINI_RX2, 0);
913+ skge_write8(hw, B3_MA_RCINI_TX1, 0);
914+ skge_write8(hw, B3_MA_RCINI_TX2, 0);
915+
916+ /* configure packet arbiter timeout */
917+ skge_write16(hw, B3_PA_CTRL, PA_RST_CLR);
918+ skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
919+ skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
920+ skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
921+ skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
922+}
923+
924+static void genesis_reset(struct skge_hw *hw, int port)
925+{
926+ const u8 zero[8] = { 0 };
927+
928+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
929+
930+ /* reset the statistics module */
931+ xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
932+ xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
933+ xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
934+ xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
935+ xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
936+
937+ /* disable Broadcom PHY IRQ */
938+ xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
939+
940+ xm_outhash(hw, port, XM_HSM, zero);
941+}
942+
943+
944+/* Convert mode to MII values */
945+static const u16 phy_pause_map[] = {
946+ [FLOW_MODE_NONE] = 0,
947+ [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
948+ [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
949+ [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
950+};
951+
952+
953+/* Check status of Broadcom phy link */
954+static void bcom_check_link(struct skge_hw *hw, int port)
955+{
956+ struct net_device *dev = hw->dev[port];
957+ struct skge_port *skge = netdev_priv(dev);
958+ u16 status;
959+
960+ /* read twice because of latch */
961+ (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
962+ status = xm_phy_read(hw, port, PHY_BCOM_STAT);
963+
964+ if ((status & PHY_ST_LSYNC) == 0) {
965+ u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
966+ cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
967+ xm_write16(hw, port, XM_MMU_CMD, cmd);
968+ /* dummy read to ensure writing */
969+ (void) xm_read16(hw, port, XM_MMU_CMD);
970+
971+ if (netif_carrier_ok(dev))
972+ skge_link_down(skge);
973+ } else {
974+ if (skge->autoneg == AUTONEG_ENABLE &&
975+ (status & PHY_ST_AN_OVER)) {
976+ u16 lpa = xm_phy_read(hw, port, PHY_BCOM_AUNE_LP);
977+ u16 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
978+
979+ if (lpa & PHY_B_AN_RF) {
980+ printk(KERN_NOTICE PFX "%s: remote fault\n",
981+ dev->name);
982+ return;
983+ }
984+
985+ /* Check Duplex mismatch */
986+ switch (aux & PHY_B_AS_AN_RES_MSK) {
987+ case PHY_B_RES_1000FD:
988+ skge->duplex = DUPLEX_FULL;
989+ break;
990+ case PHY_B_RES_1000HD:
991+ skge->duplex = DUPLEX_HALF;
992+ break;
993+ default:
994+ printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
995+ dev->name);
996+ return;
997+ }
998+
999+
1000+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1001+ switch (aux & PHY_B_AS_PAUSE_MSK) {
1002+ case PHY_B_AS_PAUSE_MSK:
1003+ skge->flow_control = FLOW_MODE_SYMMETRIC;
1004+ break;
1005+ case PHY_B_AS_PRR:
1006+ skge->flow_control = FLOW_MODE_REM_SEND;
1007+ break;
1008+ case PHY_B_AS_PRT:
1009+ skge->flow_control = FLOW_MODE_LOC_SEND;
1010+ break;
1011+ default:
1012+ skge->flow_control = FLOW_MODE_NONE;
1013+ }
1014+
1015+ skge->speed = SPEED_1000;
1016+ }
1017+
1018+ if (!netif_carrier_ok(dev))
1019+ genesis_link_up(skge);
1020+ }
1021+}
1022+
1023+/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1024+ * Phy on for 100 or 10Mbit operation
1025+ */
1026+static void bcom_phy_init(struct skge_port *skge, int jumbo)
1027+{
1028+ struct skge_hw *hw = skge->hw;
1029+ int port = skge->port;
1030+ int i;
1031+ u16 id1, r, ext, ctl;
1032+
1033+ /* magic workaround patterns for Broadcom */
1034+ static const struct {
1035+ u16 reg;
1036+ u16 val;
1037+ } A1hack[] = {
1038+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1039+ { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1040+ { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1041+ { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1042+ }, C0hack[] = {
1043+ { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1044+ { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1045+ };
1046+
1047+ /* read Id from external PHY (all have the same address) */
1048+ id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1049+
1050+ /* Optimize MDIO transfer by suppressing preamble. */
1051+ r = xm_read16(hw, port, XM_MMU_CMD);
1052+ r |= XM_MMU_NO_PRE;
1053+ xm_write16(hw, port, XM_MMU_CMD,r);
1054+
1055+ switch (id1) {
1056+ case PHY_BCOM_ID1_C0:
1057+ /*
1058+ * Workaround BCOM Errata for the C0 type.
1059+ * Write magic patterns to reserved registers.
1060+ */
1061+ for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1062+ xm_phy_write(hw, port,
1063+ C0hack[i].reg, C0hack[i].val);
1064+
1065+ break;
1066+ case PHY_BCOM_ID1_A1:
1067+ /*
1068+ * Workaround BCOM Errata for the A1 type.
1069+ * Write magic patterns to reserved registers.
1070+ */
1071+ for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1072+ xm_phy_write(hw, port,
1073+ A1hack[i].reg, A1hack[i].val);
1074+ break;
1075+ }
1076+
1077+ /*
1078+ * Workaround BCOM Errata (#10523) for all BCom PHYs.
1079+ * Disable Power Management after reset.
1080+ */
1081+ r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1082+ r |= PHY_B_AC_DIS_PM;
1083+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1084+
1085+ /* Dummy read */
1086+ xm_read16(hw, port, XM_ISRC);
1087+
1088+ ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1089+ ctl = PHY_CT_SP1000; /* always 1000mbit */
1090+
1091+ if (skge->autoneg == AUTONEG_ENABLE) {
1092+ /*
1093+ * Workaround BCOM Errata #1 for the C5 type.
1094+ * 1000Base-T Link Acquisition Failure in Slave Mode
1095+ * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1096+ */
1097+ u16 adv = PHY_B_1000C_RD;
1098+ if (skge->advertising & ADVERTISED_1000baseT_Half)
1099+ adv |= PHY_B_1000C_AHD;
1100+ if (skge->advertising & ADVERTISED_1000baseT_Full)
1101+ adv |= PHY_B_1000C_AFD;
1102+ xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1103+
1104+ ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1105+ } else {
1106+ if (skge->duplex == DUPLEX_FULL)
1107+ ctl |= PHY_CT_DUP_MD;
1108+ /* Force to slave */
1109+ xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1110+ }
1111+
1112+ /* Set autonegotiation pause parameters */
1113+ xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1114+ phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1115+
1116+ /* Handle Jumbo frames */
1117+ if (jumbo) {
1118+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1119+ PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1120+
1121+ ext |= PHY_B_PEC_HIGH_LA;
1122+
1123+ }
1124+
1125+ xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1126+ xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1127+
1128+ /* Use link status change interrupt */
1129+ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1130+
1131+ bcom_check_link(hw, port);
1132+}
1133+
1134+static void genesis_mac_init(struct skge_hw *hw, int port)
1135+{
1136+ struct net_device *dev = hw->dev[port];
1137+ struct skge_port *skge = netdev_priv(dev);
1138+ int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1139+ int i;
1140+ u32 r;
1141+ const u8 zero[6] = { 0 };
1142+
1143+ for (i = 0; i < 10; i++) {
1144+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
1145+ MFF_SET_MAC_RST);
1146+ if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
1147+ goto reset_ok;
1148+ udelay(1);
1149+ }
1150+
1151+ printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
1152+
1153+ reset_ok:
1154+ /* Unreset the XMAC. */
1155+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1156+
1157+ /*
1158+ * Perform additional initialization for external PHYs,
1159+ * namely for the 1000baseTX cards that use the XMAC's
1160+ * GMII mode.
1161+ */
1162+ /* Take external Phy out of reset */
1163+ r = skge_read32(hw, B2_GP_IO);
1164+ if (port == 0)
1165+ r |= GP_DIR_0|GP_IO_0;
1166+ else
1167+ r |= GP_DIR_2|GP_IO_2;
1168+
1169+ skge_write32(hw, B2_GP_IO, r);
1170+
1171+
1172+ /* Enable GMII interface */
1173+ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1174+
1175+ bcom_phy_init(skge, jumbo);
1176+
1177+ /* Set Station Address */
1178+ xm_outaddr(hw, port, XM_SA, dev->dev_addr);
1179+
1180+ /* We don't use match addresses so clear */
1181+ for (i = 1; i < 16; i++)
1182+ xm_outaddr(hw, port, XM_EXM(i), zero);
1183+
1184+ /* Clear MIB counters */
1185+ xm_write16(hw, port, XM_STAT_CMD,
1186+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1187+ /* Clear two times according to Errata #3 */
1188+ xm_write16(hw, port, XM_STAT_CMD,
1189+ XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1190+
1191+ /* configure Rx High Water Mark (XM_RX_HI_WM) */
1192+ xm_write16(hw, port, XM_RX_HI_WM, 1450);
1193+
1194+ /* We don't need the FCS appended to the packet. */
1195+ r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1196+ if (jumbo)
1197+ r |= XM_RX_BIG_PK_OK;
1198+
1199+ if (skge->duplex == DUPLEX_HALF) {
1200+ /*
1201+ * If in manual half duplex mode the other side might be in
1202+ * full duplex mode, so ignore if a carrier extension is not seen
1203+ * on frames received
1204+ */
1205+ r |= XM_RX_DIS_CEXT;
1206+ }
1207+ xm_write16(hw, port, XM_RX_CMD, r);
1208+
1209+
1210+ /* We want short frames padded to 60 bytes. */
1211+ xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1212+
1213+ /*
1214+ * Bump up the transmit threshold. This helps hold off transmit
1215+ * underruns when we're blasting traffic from both ports at once.
1216+ */
1217+ xm_write16(hw, port, XM_TX_THR, 512);
1218+
1219+ /*
1220+ * Enable the reception of all error frames. This is is
1221+ * a necessary evil due to the design of the XMAC. The
1222+ * XMAC's receive FIFO is only 8K in size, however jumbo
1223+ * frames can be up to 9000 bytes in length. When bad
1224+ * frame filtering is enabled, the XMAC's RX FIFO operates
1225+ * in 'store and forward' mode. For this to work, the
1226+ * entire frame has to fit into the FIFO, but that means
1227+ * that jumbo frames larger than 8192 bytes will be
1228+ * truncated. Disabling all bad frame filtering causes
1229+ * the RX FIFO to operate in streaming mode, in which
1230+ * case the XMAC will start transferring frames out of the
1231+ * RX FIFO as soon as the FIFO threshold is reached.
1232+ */
1233+ xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
1234+
1235+
1236+ /*
1237+ * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1238+ * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1239+ * and 'Octets Rx OK Hi Cnt Ov'.
1240+ */
1241+ xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1242+
1243+ /*
1244+ * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1245+ * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1246+ * and 'Octets Tx OK Hi Cnt Ov'.
1247+ */
1248+ xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
1249+
1250+ /* Configure MAC arbiter */
1251+ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
1252+
1253+ /* configure timeout values */
1254+ skge_write8(hw, B3_MA_TOINI_RX1, 72);
1255+ skge_write8(hw, B3_MA_TOINI_RX2, 72);
1256+ skge_write8(hw, B3_MA_TOINI_TX1, 72);
1257+ skge_write8(hw, B3_MA_TOINI_TX2, 72);
1258+
1259+ skge_write8(hw, B3_MA_RCINI_RX1, 0);
1260+ skge_write8(hw, B3_MA_RCINI_RX2, 0);
1261+ skge_write8(hw, B3_MA_RCINI_TX1, 0);
1262+ skge_write8(hw, B3_MA_RCINI_TX2, 0);
1263+
1264+ /* Configure Rx MAC FIFO */
1265+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1266+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1267+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1268+
1269+ /* Configure Tx MAC FIFO */
1270+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1271+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1272+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1273+
1274+ if (jumbo) {
1275+ /* Enable frame flushing if jumbo frames used */
1276+ skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
1277+ } else {
1278+ /* enable timeout timers if normal frames */
1279+ skge_write16(hw, B3_PA_CTRL,
1280+ (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1281+ }
1282+}
1283+
1284+static void genesis_stop(struct skge_port *skge)
1285+{
1286+ struct skge_hw *hw = skge->hw;
1287+ int port = skge->port;
1288+ u32 reg;
1289+
1290+ genesis_reset(hw, port);
1291+
1292+ /* Clear Tx packet arbiter timeout IRQ */
1293+ skge_write16(hw, B3_PA_CTRL,
1294+ port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
1295+
1296+ /*
1297+ * If the transfer sticks at the MAC the STOP command will not
1298+ * terminate if we don't flush the XMAC's transmit FIFO !
1299+ */
1300+ xm_write32(hw, port, XM_MODE,
1301+ xm_read32(hw, port, XM_MODE)|XM_MD_FTF);
1302+
1303+
1304+ /* Reset the MAC */
1305+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1306+
1307+ /* For external PHYs there must be special handling */
1308+ reg = skge_read32(hw, B2_GP_IO);
1309+ if (port == 0) {
1310+ reg |= GP_DIR_0;
1311+ reg &= ~GP_IO_0;
1312+ } else {
1313+ reg |= GP_DIR_2;
1314+ reg &= ~GP_IO_2;
1315+ }
1316+ skge_write32(hw, B2_GP_IO, reg);
1317+ skge_read32(hw, B2_GP_IO);
1318+
1319+ xm_write16(hw, port, XM_MMU_CMD,
1320+ xm_read16(hw, port, XM_MMU_CMD)
1321+ & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1322+
1323+ xm_read16(hw, port, XM_MMU_CMD);
1324+}
1325+
1326+
1327+static void genesis_get_stats(struct skge_port *skge, u64 *data)
1328+{
1329+ struct skge_hw *hw = skge->hw;
1330+ int port = skge->port;
1331+ int i;
1332+ unsigned long timeout = jiffies + HZ;
1333+
1334+ xm_write16(hw, port,
1335+ XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1336+
1337+ /* wait for update to complete */
1338+ while (xm_read16(hw, port, XM_STAT_CMD)
1339+ & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1340+ if (time_after(jiffies, timeout))
1341+ break;
1342+ udelay(10);
1343+ }
1344+
1345+ /* special case for 64 bit octet counter */
1346+ data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1347+ | xm_read32(hw, port, XM_TXO_OK_LO);
1348+ data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1349+ | xm_read32(hw, port, XM_RXO_OK_LO);
1350+
1351+ for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1352+ data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
1353+}
1354+
1355+static void genesis_mac_intr(struct skge_hw *hw, int port)
1356+{
1357+ struct skge_port *skge = netdev_priv(hw->dev[port]);
1358+ u16 status = xm_read16(hw, port, XM_ISRC);
1359+
1360+ if (netif_msg_intr(skge))
1361+ printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1362+ skge->netdev->name, status);
1363+
1364+ if (status & XM_IS_TXF_UR) {
1365+ xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1366+ ++skge->net_stats.tx_fifo_errors;
1367+ }
1368+ if (status & XM_IS_RXF_OV) {
1369+ xm_write32(hw, port, XM_MODE, XM_MD_FRF);
1370+ ++skge->net_stats.rx_fifo_errors;
1371+ }
1372+}
1373+
1374+static void genesis_link_up(struct skge_port *skge)
1375+{
1376+ struct skge_hw *hw = skge->hw;
1377+ int port = skge->port;
1378+ u16 cmd;
1379+ u32 mode, msk;
1380+
1381+ cmd = xm_read16(hw, port, XM_MMU_CMD);
1382+
1383+ /*
1384+ * enabling pause frame reception is required for 1000BT
1385+ * because the XMAC is not reset if the link is going down
1386+ */
1387+ if (skge->flow_control == FLOW_MODE_NONE ||
1388+ skge->flow_control == FLOW_MODE_LOC_SEND)
1389+ /* Disable Pause Frame Reception */
1390+ cmd |= XM_MMU_IGN_PF;
1391+ else
1392+ /* Enable Pause Frame Reception */
1393+ cmd &= ~XM_MMU_IGN_PF;
1394+
1395+ xm_write16(hw, port, XM_MMU_CMD, cmd);
1396+
1397+ mode = xm_read32(hw, port, XM_MODE);
1398+ if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
1399+ skge->flow_control == FLOW_MODE_LOC_SEND) {
1400+ /*
1401+ * Configure Pause Frame Generation
1402+ * Use internal and external Pause Frame Generation.
1403+ * Sending pause frames is edge triggered.
1404+ * Send a Pause frame with the maximum pause time if
1405+ * internal oder external FIFO full condition occurs.
1406+ * Send a zero pause time frame to re-start transmission.
1407+ */
1408+ /* XM_PAUSE_DA = '010000C28001' (default) */
1409+ /* XM_MAC_PTIME = 0xffff (maximum) */
1410+ /* remember this value is defined in big endian (!) */
1411+ xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1412+
1413+ mode |= XM_PAUSE_MODE;
1414+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1415+ } else {
1416+ /*
1417+ * disable pause frame generation is required for 1000BT
1418+ * because the XMAC is not reset if the link is going down
1419+ */
1420+ /* Disable Pause Mode in Mode Register */
1421+ mode &= ~XM_PAUSE_MODE;
1422+
1423+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1424+ }
1425+
1426+ xm_write32(hw, port, XM_MODE, mode);
1427+
1428+ msk = XM_DEF_MSK;
1429+ /* disable GP0 interrupt bit for external Phy */
1430+ msk |= XM_IS_INP_ASS;
1431+
1432+ xm_write16(hw, port, XM_IMSK, msk);
1433+ xm_read16(hw, port, XM_ISRC);
1434+
1435+ /* get MMU Command Reg. */
1436+ cmd = xm_read16(hw, port, XM_MMU_CMD);
1437+ if (skge->duplex == DUPLEX_FULL)
1438+ cmd |= XM_MMU_GMII_FD;
1439+
1440+ /*
1441+ * Workaround BCOM Errata (#10523) for all BCom Phys
1442+ * Enable Power Management after link up
1443+ */
1444+ xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1445+ xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1446+ & ~PHY_B_AC_DIS_PM);
1447+ xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1448+
1449+ /* enable Rx/Tx */
1450+ xm_write16(hw, port, XM_MMU_CMD,
1451+ cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1452+ skge_link_up(skge);
1453+}
1454+
1455+
1456+static inline void bcom_phy_intr(struct skge_port *skge)
1457+{
1458+ struct skge_hw *hw = skge->hw;
1459+ int port = skge->port;
1460+ u16 isrc;
1461+
1462+ isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1463+ if (netif_msg_intr(skge))
1464+ printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
1465+ skge->netdev->name, isrc);
1466+
1467+ if (isrc & PHY_B_IS_PSE)
1468+ printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
1469+ hw->dev[port]->name);
1470+
1471+ /* Workaround BCom Errata:
1472+ * enable and disable loopback mode if "NO HCD" occurs.
1473+ */
1474+ if (isrc & PHY_B_IS_NO_HDCL) {
1475+ u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1476+ xm_phy_write(hw, port, PHY_BCOM_CTRL,
1477+ ctrl | PHY_CT_LOOP);
1478+ xm_phy_write(hw, port, PHY_BCOM_CTRL,
1479+ ctrl & ~PHY_CT_LOOP);
1480+ }
1481+
1482+ if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1483+ bcom_check_link(hw, port);
1484+
1485+}
1486+
1487+static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1488+{
1489+ int i;
1490+
1491+ gma_write16(hw, port, GM_SMI_DATA, val);
1492+ gma_write16(hw, port, GM_SMI_CTRL,
1493+ GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1494+ for (i = 0; i < PHY_RETRIES; i++) {
1495+ udelay(1);
1496+
1497+ if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1498+ return 0;
1499+ }
1500+
1501+ printk(KERN_WARNING PFX "%s: phy write timeout\n",
1502+ hw->dev[port]->name);
1503+ return -EIO;
1504+}
1505+
1506+static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
1507+{
1508+ int i;
1509+
1510+ gma_write16(hw, port, GM_SMI_CTRL,
1511+ GM_SMI_CT_PHY_AD(hw->phy_addr)
1512+ | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1513+
1514+ for (i = 0; i < PHY_RETRIES; i++) {
1515+ udelay(1);
1516+ if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1517+ goto ready;
1518+ }
1519+
1520+ return -ETIMEDOUT;
1521+ ready:
1522+ *val = gma_read16(hw, port, GM_SMI_DATA);
1523+ return 0;
1524+}
1525+
1526+static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1527+{
1528+ u16 v = 0;
1529+ if (__gm_phy_read(hw, port, reg, &v))
1530+ printk(KERN_WARNING PFX "%s: phy read timeout\n",
1531+ hw->dev[port]->name);
1532+ return v;
1533+}
1534+
1535+/* Marvell Phy Initialization */
1536+static void yukon_init(struct skge_hw *hw, int port)
1537+{
1538+ struct skge_port *skge = netdev_priv(hw->dev[port]);
1539+ u16 ctrl, ct1000, adv;
1540+
1541+ if (skge->autoneg == AUTONEG_ENABLE) {
1542+ u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1543+
1544+ ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1545+ PHY_M_EC_MAC_S_MSK);
1546+ ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1547+
1548+ ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1549+
1550+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1551+ }
1552+
1553+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1554+ if (skge->autoneg == AUTONEG_DISABLE)
1555+ ctrl &= ~PHY_CT_ANE;
1556+
1557+ ctrl |= PHY_CT_RESET;
1558+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1559+
1560+ ctrl = 0;
1561+ ct1000 = 0;
1562+ adv = PHY_AN_CSMA;
1563+
1564+ if (skge->autoneg == AUTONEG_ENABLE) {
1565+ if (hw->copper) {
1566+ if (skge->advertising & ADVERTISED_1000baseT_Full)
1567+ ct1000 |= PHY_M_1000C_AFD;
1568+ if (skge->advertising & ADVERTISED_1000baseT_Half)
1569+ ct1000 |= PHY_M_1000C_AHD;
1570+ if (skge->advertising & ADVERTISED_100baseT_Full)
1571+ adv |= PHY_M_AN_100_FD;
1572+ if (skge->advertising & ADVERTISED_100baseT_Half)
1573+ adv |= PHY_M_AN_100_HD;
1574+ if (skge->advertising & ADVERTISED_10baseT_Full)
1575+ adv |= PHY_M_AN_10_FD;
1576+ if (skge->advertising & ADVERTISED_10baseT_Half)
1577+ adv |= PHY_M_AN_10_HD;
1578+ } else /* special defines for FIBER (88E1011S only) */
1579+ adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1580+
1581+ /* Set Flow-control capabilities */
1582+ adv |= phy_pause_map[skge->flow_control];
1583+
1584+ /* Restart Auto-negotiation */
1585+ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1586+ } else {
1587+ /* forced speed/duplex settings */
1588+ ct1000 = PHY_M_1000C_MSE;
1589+
1590+ if (skge->duplex == DUPLEX_FULL)
1591+ ctrl |= PHY_CT_DUP_MD;
1592+
1593+ switch (skge->speed) {
1594+ case SPEED_1000:
1595+ ctrl |= PHY_CT_SP1000;
1596+ break;
1597+ case SPEED_100:
1598+ ctrl |= PHY_CT_SP100;
1599+ break;
1600+ }
1601+
1602+ ctrl |= PHY_CT_RESET;
1603+ }
1604+
1605+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1606+
1607+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1608+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1609+
1610+ /* Enable phy interrupt on autonegotiation complete (or link up) */
1611+ if (skge->autoneg == AUTONEG_ENABLE)
1612+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK);
1613+ else
1614+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1615+}
1616+
1617+static void yukon_reset(struct skge_hw *hw, int port)
1618+{
1619+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1620+ gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1621+ gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1622+ gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1623+ gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1624+
1625+ gma_write16(hw, port, GM_RX_CTRL,
1626+ gma_read16(hw, port, GM_RX_CTRL)
1627+ | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1628+}
1629+
1630+/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1631+static int is_yukon_lite_a0(struct skge_hw *hw)
1632+{
1633+ u32 reg;
1634+ int ret;
1635+
1636+ if (hw->chip_id != CHIP_ID_YUKON)
1637+ return 0;
1638+
1639+ reg = skge_read32(hw, B2_FAR);
1640+ skge_write8(hw, B2_FAR + 3, 0xff);
1641+ ret = (skge_read8(hw, B2_FAR + 3) != 0);
1642+ skge_write32(hw, B2_FAR, reg);
1643+ return ret;
1644+}
1645+
1646+static void yukon_mac_init(struct skge_hw *hw, int port)
1647+{
1648+ struct skge_port *skge = netdev_priv(hw->dev[port]);
1649+ int i;
1650+ u32 reg;
1651+ const u8 *addr = hw->dev[port]->dev_addr;
1652+
1653+ /* WA code for COMA mode -- set PHY reset */
1654+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1655+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1656+ reg = skge_read32(hw, B2_GP_IO);
1657+ reg |= GP_DIR_9 | GP_IO_9;
1658+ skge_write32(hw, B2_GP_IO, reg);
1659+ }
1660+
1661+ /* hard reset */
1662+ skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1663+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1664+
1665+ /* WA code for COMA mode -- clear PHY reset */
1666+ if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1667+ hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1668+ reg = skge_read32(hw, B2_GP_IO);
1669+ reg |= GP_DIR_9;
1670+ reg &= ~GP_IO_9;
1671+ skge_write32(hw, B2_GP_IO, reg);
1672+ }
1673+
1674+ /* Set hardware config mode */
1675+ reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1676+ GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
1677+ reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1678+
1679+ /* Clear GMC reset */
1680+ skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1681+ skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1682+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1683+
1684+ if (skge->autoneg == AUTONEG_DISABLE) {
1685+ reg = GM_GPCR_AU_ALL_DIS;
1686+ gma_write16(hw, port, GM_GP_CTRL,
1687+ gma_read16(hw, port, GM_GP_CTRL) | reg);
1688+
1689+ switch (skge->speed) {
1690+ case SPEED_1000:
1691+ reg &= ~GM_GPCR_SPEED_100;
1692+ reg |= GM_GPCR_SPEED_1000;
1693+ break;
1694+ case SPEED_100:
1695+ reg &= ~GM_GPCR_SPEED_1000;
1696+ reg |= GM_GPCR_SPEED_100;
1697+ break;
1698+ case SPEED_10:
1699+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1700+ break;
1701+ }
1702+
1703+ if (skge->duplex == DUPLEX_FULL)
1704+ reg |= GM_GPCR_DUP_FULL;
1705+ } else
1706+ reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1707+
1708+ switch (skge->flow_control) {
1709+ case FLOW_MODE_NONE:
1710+ skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1711+ reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1712+ break;
1713+ case FLOW_MODE_LOC_SEND:
1714+ /* disable Rx flow-control */
1715+ reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1716+ }
1717+
1718+ gma_write16(hw, port, GM_GP_CTRL, reg);
1719+ skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1720+
1721+ yukon_init(hw, port);
1722+
1723+ /* MIB clear */
1724+ reg = gma_read16(hw, port, GM_PHY_ADDR);
1725+ gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1726+
1727+ for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1728+ gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1729+ gma_write16(hw, port, GM_PHY_ADDR, reg);
1730+
1731+ /* transmit control */
1732+ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1733+
1734+ /* receive control reg: unicast + multicast + no FCS */
1735+ gma_write16(hw, port, GM_RX_CTRL,
1736+ GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1737+
1738+ /* transmit flow control */
1739+ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1740+
1741+ /* transmit parameter */
1742+ gma_write16(hw, port, GM_TX_PARAM,
1743+ TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1744+ TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1745+ TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
1746+
1747+ /* serial mode register */
1748+ reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1749+ if (hw->dev[port]->mtu > 1500)
1750+ reg |= GM_SMOD_JUMBO_ENA;
1751+
1752+ gma_write16(hw, port, GM_SERIAL_MODE, reg);
1753+
1754+ /* physical address: used for pause frames */
1755+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1756+ /* virtual address for data */
1757+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1758+
1759+ /* enable interrupt mask for counter overflows */
1760+ gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1761+ gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1762+ gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1763+
1764+ /* Initialize Mac Fifo */
1765+
1766+ /* Configure Rx MAC FIFO */
1767+ skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1768+ reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1769+
1770+ /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1771+ if (is_yukon_lite_a0(hw))
1772+ reg &= ~GMF_RX_F_FL_ON;
1773+
1774+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1775+ skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1776+ /*
1777+ * because Pause Packet Truncation in GMAC is not working
1778+ * we have to increase the Flush Threshold to 64 bytes
1779+ * in order to flush pause packets in Rx FIFO on Yukon-1
1780+ */
1781+ skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
1782+
1783+ /* Configure Tx MAC FIFO */
1784+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1785+ skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1786+}
1787+
1788+/* Go into power down mode */
1789+static void yukon_suspend(struct skge_hw *hw, int port)
1790+{
1791+ u16 ctrl;
1792+
1793+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
1794+ ctrl |= PHY_M_PC_POL_R_DIS;
1795+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
1796+
1797+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1798+ ctrl |= PHY_CT_RESET;
1799+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1800+
1801+ /* switch IEEE compatible power down mode on */
1802+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1803+ ctrl |= PHY_CT_PDOWN;
1804+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1805+}
1806+
1807+static void yukon_stop(struct skge_port *skge)
1808+{
1809+ struct skge_hw *hw = skge->hw;
1810+ int port = skge->port;
1811+
1812+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1813+ yukon_reset(hw, port);
1814+
1815+ gma_write16(hw, port, GM_GP_CTRL,
1816+ gma_read16(hw, port, GM_GP_CTRL)
1817+ & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1818+ gma_read16(hw, port, GM_GP_CTRL);
1819+
1820+ yukon_suspend(hw, port);
1821+
1822+ /* set GPHY Control reset */
1823+ skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1824+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1825+}
1826+
1827+static void yukon_get_stats(struct skge_port *skge, u64 *data)
1828+{
1829+ struct skge_hw *hw = skge->hw;
1830+ int port = skge->port;
1831+ int i;
1832+
1833+ data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
1834+ | gma_read32(hw, port, GM_TXO_OK_LO);
1835+ data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
1836+ | gma_read32(hw, port, GM_RXO_OK_LO);
1837+
1838+ for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1839+ data[i] = gma_read32(hw, port,
1840+ skge_stats[i].gma_offset);
1841+}
1842+
1843+static void yukon_mac_intr(struct skge_hw *hw, int port)
1844+{
1845+ struct net_device *dev = hw->dev[port];
1846+ struct skge_port *skge = netdev_priv(dev);
1847+ u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1848+
1849+ if (netif_msg_intr(skge))
1850+ printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1851+ dev->name, status);
1852+
1853+ if (status & GM_IS_RX_FF_OR) {
1854+ ++skge->net_stats.rx_fifo_errors;
1855+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
1856+ }
1857+
1858+ if (status & GM_IS_TX_FF_UR) {
1859+ ++skge->net_stats.tx_fifo_errors;
1860+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
1861+ }
1862+
1863+}
1864+
1865+static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
1866+{
1867+ switch (aux & PHY_M_PS_SPEED_MSK) {
1868+ case PHY_M_PS_SPEED_1000:
1869+ return SPEED_1000;
1870+ case PHY_M_PS_SPEED_100:
1871+ return SPEED_100;
1872+ default:
1873+ return SPEED_10;
1874+ }
1875+}
1876+
1877+static void yukon_link_up(struct skge_port *skge)
1878+{
1879+ struct skge_hw *hw = skge->hw;
1880+ int port = skge->port;
1881+ u16 reg;
1882+
1883+ /* Enable Transmit FIFO Underrun */
1884+ skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1885+
1886+ reg = gma_read16(hw, port, GM_GP_CTRL);
1887+ if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1888+ reg |= GM_GPCR_DUP_FULL;
1889+
1890+ /* enable Rx/Tx */
1891+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1892+ gma_write16(hw, port, GM_GP_CTRL, reg);
1893+
1894+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK);
1895+ skge_link_up(skge);
1896+}
1897+
1898+static void yukon_link_down(struct skge_port *skge)
1899+{
1900+ struct skge_hw *hw = skge->hw;
1901+ int port = skge->port;
1902+ u16 ctrl;
1903+
1904+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1905+
1906+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
1907+ ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1908+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
1909+
1910+ if (skge->flow_control == FLOW_MODE_REM_SEND) {
1911+ /* restore Asymmetric Pause bit */
1912+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1913+ gm_phy_read(hw, port,
1914+ PHY_MARV_AUNE_ADV)
1915+ | PHY_M_AN_ASP);
1916+
1917+ }
1918+
1919+ yukon_reset(hw, port);
1920+ skge_link_down(skge);
1921+
1922+ yukon_init(hw, port);
1923+}
1924+
1925+static void yukon_phy_intr(struct skge_port *skge)
1926+{
1927+ struct skge_hw *hw = skge->hw;
1928+ int port = skge->port;
1929+ const char *reason = NULL;
1930+ u16 istatus, phystat;
1931+
1932+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1933+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1934+
1935+ if (netif_msg_intr(skge))
1936+ printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
1937+ skge->netdev->name, istatus, phystat);
1938+
1939+ if (istatus & PHY_M_IS_AN_COMPL) {
1940+ if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
1941+ & PHY_M_AN_RF) {
1942+ reason = "remote fault";
1943+ goto failed;
1944+ }
1945+
1946+ if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1947+ reason = "master/slave fault";
1948+ goto failed;
1949+ }
1950+
1951+ if (!(phystat & PHY_M_PS_SPDUP_RES)) {
1952+ reason = "speed/duplex";
1953+ goto failed;
1954+ }
1955+
1956+ skge->duplex = (phystat & PHY_M_PS_FULL_DUP)
1957+ ? DUPLEX_FULL : DUPLEX_HALF;
1958+ skge->speed = yukon_speed(hw, phystat);
1959+
1960+ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1961+ switch (phystat & PHY_M_PS_PAUSE_MSK) {
1962+ case PHY_M_PS_PAUSE_MSK:
1963+ skge->flow_control = FLOW_MODE_SYMMETRIC;
1964+ break;
1965+ case PHY_M_PS_RX_P_EN:
1966+ skge->flow_control = FLOW_MODE_REM_SEND;
1967+ break;
1968+ case PHY_M_PS_TX_P_EN:
1969+ skge->flow_control = FLOW_MODE_LOC_SEND;
1970+ break;
1971+ default:
1972+ skge->flow_control = FLOW_MODE_NONE;
1973+ }
1974+
1975+ if (skge->flow_control == FLOW_MODE_NONE ||
1976+ (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
1977+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1978+ else
1979+ skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1980+ yukon_link_up(skge);
1981+ return;
1982+ }
1983+
1984+ if (istatus & PHY_M_IS_LSP_CHANGE)
1985+ skge->speed = yukon_speed(hw, phystat);
1986+
1987+ if (istatus & PHY_M_IS_DUP_CHANGE)
1988+ skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1989+ if (istatus & PHY_M_IS_LST_CHANGE) {
1990+ if (phystat & PHY_M_PS_LINK_UP)
1991+ yukon_link_up(skge);
1992+ else
1993+ yukon_link_down(skge);
1994+ }
1995+ return;
1996+ failed:
1997+ printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n",
1998+ skge->netdev->name, reason);
1999+
2000+ /* XXX restart autonegotiation? */
2001+}
2002+
2003+static void skge_phy_reset(struct skge_port *skge)
2004+{
2005+ struct skge_hw *hw = skge->hw;
2006+ int port = skge->port;
2007+
2008+ netif_stop_queue(skge->netdev);
2009+ netif_carrier_off(skge->netdev);
2010+
2011+ spin_lock_bh(&hw->phy_lock);
2012+ if (hw->chip_id == CHIP_ID_GENESIS) {
2013+ genesis_reset(hw, port);
2014+ genesis_mac_init(hw, port);
2015+ } else {
2016+ yukon_reset(hw, port);
2017+ yukon_init(hw, port);
2018+ }
2019+ spin_unlock_bh(&hw->phy_lock);
2020+}
2021+
2022+/* Basic MII support */
2023+static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2024+{
2025+ struct mii_ioctl_data *data;
2026+ struct skge_port *skge = netdev_priv(dev);
2027+ struct skge_hw *hw = skge->hw;
2028+ int err = -EOPNOTSUPP;
2029+
2030+ if (!netif_running(dev))
2031+ return -ENODEV; /* Phy still in reset */
2032+
2033+ data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
2034+ switch(cmd) {
2035+ case SIOCGMIIPHY:
2036+ data->phy_id = hw->phy_addr;
2037+
2038+ /* fallthru */
2039+ case SIOCGMIIREG: {
2040+ u16 val = 0;
2041+ spin_lock_bh(&hw->phy_lock);
2042+ if (hw->chip_id == CHIP_ID_GENESIS)
2043+ err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2044+ else
2045+ err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2046+ spin_unlock_bh(&hw->phy_lock);
2047+ data->val_out = val;
2048+ break;
2049+ }
2050+
2051+ case SIOCSMIIREG:
2052+ if (!capable(CAP_NET_ADMIN))
2053+ return -EPERM;
2054+
2055+ spin_lock_bh(&hw->phy_lock);
2056+ if (hw->chip_id == CHIP_ID_GENESIS)
2057+ err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2058+ data->val_in);
2059+ else
2060+ err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2061+ data->val_in);
2062+ spin_unlock_bh(&hw->phy_lock);
2063+ break;
2064+ }
2065+ return err;
2066+}
2067+
2068+static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
2069+{
2070+ u32 end;
2071+
2072+ start /= 8;
2073+ len /= 8;
2074+ end = start + len - 1;
2075+
2076+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
2077+ skge_write32(hw, RB_ADDR(q, RB_START), start);
2078+ skge_write32(hw, RB_ADDR(q, RB_WP), start);
2079+ skge_write32(hw, RB_ADDR(q, RB_RP), start);
2080+ skge_write32(hw, RB_ADDR(q, RB_END), end);
2081+
2082+ if (q == Q_R1 || q == Q_R2) {
2083+ /* Set thresholds on receive queue's */
2084+ skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
2085+ start + (2*len)/3);
2086+ skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
2087+ start + (len/3));
2088+ } else {
2089+ /* Enable store & forward on Tx queue's because
2090+ * Tx FIFO is only 4K on Genesis and 1K on Yukon
2091+ */
2092+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
2093+ }
2094+
2095+ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
2096+}
2097+
2098+/* Setup Bus Memory Interface */
2099+static void skge_qset(struct skge_port *skge, u16 q,
2100+ const struct skge_element *e)
2101+{
2102+ struct skge_hw *hw = skge->hw;
2103+ u32 watermark = 0x600;
2104+ u64 base = skge->dma + (e->desc - skge->mem);
2105+
2106+ /* optimization to reduce window on 32bit/33mhz */
2107+ if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0)
2108+ watermark /= 2;
2109+
2110+ skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET);
2111+ skge_write32(hw, Q_ADDR(q, Q_F), watermark);
2112+ skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32));
2113+ skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base);
2114+}
2115+
2116+static int skge_up(struct net_device *dev)
2117+{
2118+ struct skge_port *skge = netdev_priv(dev);
2119+ struct skge_hw *hw = skge->hw;
2120+ int port = skge->port;
2121+ u32 chunk, ram_addr;
2122+ size_t rx_size, tx_size;
2123+ int err;
2124+
2125+ if (netif_msg_ifup(skge))
2126+ printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2127+
2128+ if (dev->mtu > RX_BUF_SIZE)
2129+ skge->rx_buf_size = dev->mtu + ETH_HLEN;
2130+ else
2131+ skge->rx_buf_size = RX_BUF_SIZE;
2132+
2133+
2134+ rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2135+ tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2136+ skge->mem_size = tx_size + rx_size;
2137+ skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
2138+ if (!skge->mem)
2139+ return -ENOMEM;
2140+
2141+ pr_debug(PFX "pci region (%lu) %p = %x\n", skge->mem_size,
2142+ skge->mem, skge->dma);
2143+
2144+ BUG_ON(skge->dma & 7);
2145+
2146+ if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2147+ printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
2148+ err = -EINVAL;
2149+ goto free_pci_mem;
2150+ }
2151+
2152+ memset(skge->mem, 0, skge->mem_size);
2153+
2154+ err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
2155+ if (err)
2156+ goto free_pci_mem;
2157+
2158+ err = skge_rx_fill(skge);
2159+ if (err)
2160+ goto free_rx_ring;
2161+
2162+ err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2163+ skge->dma + rx_size);
2164+ if (err)
2165+ goto free_rx_ring;
2166+
2167+ /* Initialize MAC */
2168+ spin_lock_bh(&hw->phy_lock);
2169+ if (hw->chip_id == CHIP_ID_GENESIS)
2170+ genesis_mac_init(hw, port);
2171+ else
2172+ yukon_mac_init(hw, port);
2173+ spin_unlock_bh(&hw->phy_lock);
2174+
2175+ /* Configure RAMbuffers */
2176+ chunk = hw->ram_size / ((hw->ports + 1)*2);
2177+ ram_addr = hw->ram_offset + 2 * chunk * port;
2178+
2179+ skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
2180+ skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
2181+
2182+ BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
2183+ skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
2184+ skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
2185+
2186+ /* Start receiver BMU */
2187+ wmb();
2188+ skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2189+ skge_led(skge, LED_MODE_ON);
2190+
2191+ return 0;
2192+
2193+ free_rx_ring:
2194+ skge_rx_clean(skge);
2195+ kfree(skge->rx_ring.start);
2196+ free_pci_mem:
2197+ pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2198+ skge->mem = NULL;
2199+
2200+ return err;
2201+}
2202+
2203+static int skge_down(struct net_device *dev)
2204+{
2205+ struct skge_port *skge = netdev_priv(dev);
2206+ struct skge_hw *hw = skge->hw;
2207+ int port = skge->port;
2208+
2209+ if (skge->mem == NULL)
2210+ return 0;
2211+
2212+ if (netif_msg_ifdown(skge))
2213+ printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2214+
2215+ netif_stop_queue(dev);
2216+
2217+ skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2218+ if (hw->chip_id == CHIP_ID_GENESIS)
2219+ genesis_stop(skge);
2220+ else
2221+ yukon_stop(skge);
2222+
2223+ /* Stop transmitter */
2224+ skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2225+ skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2226+ RB_RST_SET|RB_DIS_OP_MD);
2227+
2228+
2229+ /* Disable Force Sync bit and Enable Alloc bit */
2230+ skge_write8(hw, SK_REG(port, TXA_CTRL),
2231+ TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2232+
2233+ /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2234+ skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2235+ skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2236+
2237+ /* Reset PCI FIFO */
2238+ skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
2239+ skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
2240+
2241+ /* Reset the RAM Buffer async Tx queue */
2242+ skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
2243+ /* stop receiver */
2244+ skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
2245+ skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2246+ RB_RST_SET|RB_DIS_OP_MD);
2247+ skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2248+
2249+ if (hw->chip_id == CHIP_ID_GENESIS) {
2250+ skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2251+ skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2252+ } else {
2253+ skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2254+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2255+ }
2256+
2257+ skge_led(skge, LED_MODE_OFF);
2258+
2259+ skge_tx_clean(skge);
2260+ skge_rx_clean(skge);
2261+
2262+ kfree(skge->rx_ring.start);
2263+ kfree(skge->tx_ring.start);
2264+ pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
2265+ skge->mem = NULL;
2266+ return 0;
2267+}
2268+
2269+static inline int skge_avail(const struct skge_ring *ring)
2270+{
2271+ return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2272+ + (ring->to_clean - ring->to_use) - 1;
2273+}
2274+
2275+static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2276+{
2277+ struct skge_port *skge = netdev_priv(dev);
2278+ struct skge_hw *hw = skge->hw;
2279+ struct skge_element *e;
2280+ struct skge_tx_desc *td;
2281+ int i;
2282+ u32 control, len;
2283+ u64 map;
2284+
2285+ if (skb->len < ETH_ZLEN) {
2286+ skb = skb_padto(skb, ETH_ZLEN);
2287+ if (skb == NULL)
2288+ return NETDEV_TX_OK;
2289+ }
2290+
2291+ spin_lock_irq(&skge->tx_lock);
2292+
2293+ if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2294+ spin_unlock_irq(&skge->tx_lock);
2295+ return NETDEV_TX_BUSY;
2296+ }
2297+
2298+ e = skge->tx_ring.to_use;
2299+ td = e->desc;
2300+ BUG_ON(td->control & BMU_OWN);
2301+ e->skb = skb;
2302+ len = skb_headlen(skb);
2303+ map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2304+ pci_unmap_addr_set(e, mapaddr, map);
2305+ pci_unmap_len_set(e, maplen, len);
2306+
2307+ td->dma_lo = map;
2308+ td->dma_hi = map >> 32;
2309+
2310+ if (skb->ip_summed == CHECKSUM_HW) {
2311+ int offset = skb->h.raw - skb->data;
2312+
2313+ /* This seems backwards, but it is what the sk98lin
2314+ * does. Looks like hardware is wrong?
2315+ */
2316+ if (skb->h.ipiph->protocol == IPPROTO_UDP
2317+ && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2318+ control = BMU_TCP_CHECK;
2319+ else
2320+ control = BMU_UDP_CHECK;
2321+
2322+ td->csum_offs = 0;
2323+ td->csum_start = offset;
2324+ td->csum_write = offset + skb->csum;
2325+ } else
2326+ control = BMU_CHECK;
2327+
2328+ if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
2329+ control |= BMU_EOF| BMU_IRQ_EOF;
2330+ else {
2331+ struct skge_tx_desc *tf = td;
2332+
2333+ control |= BMU_STFWD;
2334+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2335+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2336+
2337+ map = pci_map_page(hw->pdev, frag->page, frag->page_offset,
2338+ frag->size, PCI_DMA_TODEVICE);
2339+
2340+ e = e->next;
2341+ e->skb = skb;
2342+ tf = e->desc;
2343+ BUG_ON(tf->control & BMU_OWN);
2344+
2345+ tf->dma_lo = map;
2346+ tf->dma_hi = (u64) map >> 32;
2347+ pci_unmap_addr_set(e, mapaddr, map);
2348+ pci_unmap_len_set(e, maplen, frag->size);
2349+
2350+ tf->control = BMU_OWN | BMU_SW | control | frag->size;
2351+ }
2352+ tf->control |= BMU_EOF | BMU_IRQ_EOF;
2353+ }
2354+ /* Make sure all the descriptors written */
2355+ wmb();
2356+ td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
2357+ wmb();
2358+
2359+ skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2360+
2361+ if (unlikely(netif_msg_tx_queued(skge)))
2362+ printk(KERN_DEBUG "%s: tx queued, slot %d, len %d\n",
2363+ dev->name, e - skge->tx_ring.start, skb->len);
2364+
2365+ skge->tx_ring.to_use = e->next;
2366+ if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
2367+ pr_debug("%s: transmit queue full\n", dev->name);
2368+ netif_stop_queue(dev);
2369+ }
2370+
2371+ spin_unlock_irq(&skge->tx_lock);
2372+
2373+ dev->trans_start = jiffies;
2374+
2375+ return NETDEV_TX_OK;
2376+}
2377+
2378+
2379+/* Free resources associated with this reing element */
2380+static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2381+ u32 control)
2382+{
2383+ struct pci_dev *pdev = skge->hw->pdev;
2384+
2385+ BUG_ON(!e->skb);
2386+
2387+ /* skb header vs. fragment */
2388+ if (control & BMU_STF)
2389+ pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
2390+ pci_unmap_len(e, maplen),
2391+ PCI_DMA_TODEVICE);
2392+ else
2393+ pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
2394+ pci_unmap_len(e, maplen),
2395+ PCI_DMA_TODEVICE);
2396+
2397+ if (control & BMU_EOF) {
2398+ if (unlikely(netif_msg_tx_done(skge)))
2399+ printk(KERN_DEBUG PFX "%s: tx done slot %d\n",
2400+ skge->netdev->name, e - skge->tx_ring.start);
2401+
2402+ dev_kfree_skb_any(e->skb);
2403+ }
2404+ e->skb = NULL;
2405+}
2406+
2407+/* Free all buffers in transmit ring */
2408+static void skge_tx_clean(struct skge_port *skge)
2409+{
2410+ struct skge_element *e;
2411+ unsigned long flags;
2412+
2413+ spin_lock_irqsave(&skge->tx_lock, flags);
2414+ for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2415+ struct skge_tx_desc *td = e->desc;
2416+ skge_tx_free(skge, e, td->control);
2417+ td->control = 0;
2418+ }
2419+
2420+ skge->tx_ring.to_clean = e;
2421+ netif_wake_queue(skge->netdev);
2422+ spin_unlock_irqrestore(&skge->tx_lock, flags);
2423+}
2424+
2425+static void skge_tx_timeout(struct net_device *dev)
2426+{
2427+ struct skge_port *skge = netdev_priv(dev);
2428+
2429+ if (netif_msg_timer(skge))
2430+ printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
2431+
2432+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2433+ skge_tx_clean(skge);
2434+}
2435+
2436+static int skge_change_mtu(struct net_device *dev, int new_mtu)
2437+{
2438+ int err;
2439+
2440+ if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2441+ return -EINVAL;
2442+
2443+ if (!netif_running(dev)) {
2444+ dev->mtu = new_mtu;
2445+ return 0;
2446+ }
2447+
2448+ skge_down(dev);
2449+
2450+ dev->mtu = new_mtu;
2451+
2452+ err = skge_up(dev);
2453+ if (err)
2454+ dev_close(dev);
2455+
2456+ return err;
2457+}
2458+
2459+static void genesis_set_multicast(struct net_device *dev)
2460+{
2461+ struct skge_port *skge = netdev_priv(dev);
2462+ struct skge_hw *hw = skge->hw;
2463+ int port = skge->port;
2464+ int i, count = dev->mc_count;
2465+ struct dev_mc_list *list = dev->mc_list;
2466+ u32 mode;
2467+ u8 filter[8];
2468+
2469+ mode = xm_read32(hw, port, XM_MODE);
2470+ mode |= XM_MD_ENA_HASH;
2471+ if (dev->flags & IFF_PROMISC)
2472+ mode |= XM_MD_ENA_PROM;
2473+ else
2474+ mode &= ~XM_MD_ENA_PROM;
2475+
2476+ if (dev->flags & IFF_ALLMULTI)
2477+ memset(filter, 0xff, sizeof(filter));
2478+ else {
2479+ memset(filter, 0, sizeof(filter));
2480+ for (i = 0; list && i < count; i++, list = list->next) {
2481+ u32 crc, bit;
2482+ crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
2483+ bit = ~crc & 0x3f;
2484+ filter[bit/8] |= 1 << (bit%8);
2485+ }
2486+ }
2487+
2488+ xm_write32(hw, port, XM_MODE, mode);
2489+ xm_outhash(hw, port, XM_HSM, filter);
2490+}
2491+
2492+static void yukon_set_multicast(struct net_device *dev)
2493+{
2494+ struct skge_port *skge = netdev_priv(dev);
2495+ struct skge_hw *hw = skge->hw;
2496+ int port = skge->port;
2497+ struct dev_mc_list *list = dev->mc_list;
2498+ u16 reg;
2499+ u8 filter[8];
2500+
2501+ memset(filter, 0, sizeof(filter));
2502+
2503+ reg = gma_read16(hw, port, GM_RX_CTRL);
2504+ reg |= GM_RXCR_UCF_ENA;
2505+
2506+ if (dev->flags & IFF_PROMISC) /* promiscuous */
2507+ reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2508+ else if (dev->flags & IFF_ALLMULTI) /* all multicast */
2509+ memset(filter, 0xff, sizeof(filter));
2510+ else if (dev->mc_count == 0) /* no multicast */
2511+ reg &= ~GM_RXCR_MCF_ENA;
2512+ else {
2513+ int i;
2514+ reg |= GM_RXCR_MCF_ENA;
2515+
2516+ for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2517+ u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2518+ filter[bit/8] |= 1 << (bit%8);
2519+ }
2520+ }
2521+
2522+
2523+ gma_write16(hw, port, GM_MC_ADDR_H1,
2524+ (u16)filter[0] | ((u16)filter[1] << 8));
2525+ gma_write16(hw, port, GM_MC_ADDR_H2,
2526+ (u16)filter[2] | ((u16)filter[3] << 8));
2527+ gma_write16(hw, port, GM_MC_ADDR_H3,
2528+ (u16)filter[4] | ((u16)filter[5] << 8));
2529+ gma_write16(hw, port, GM_MC_ADDR_H4,
2530+ (u16)filter[6] | ((u16)filter[7] << 8));
2531+
2532+ gma_write16(hw, port, GM_RX_CTRL, reg);
2533+}
2534+
2535+static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2536+{
2537+ if (hw->chip_id == CHIP_ID_GENESIS)
2538+ return status >> XMR_FS_LEN_SHIFT;
2539+ else
2540+ return status >> GMR_FS_LEN_SHIFT;
2541+}
2542+
2543+static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2544+{
2545+ if (hw->chip_id == CHIP_ID_GENESIS)
2546+ return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
2547+ else
2548+ return (status & GMR_FS_ANY_ERR) ||
2549+ (status & GMR_FS_RX_OK) == 0;
2550+}
2551+
2552+
2553+/* Get receive buffer from descriptor.
2554+ * Handles copy of small buffers and reallocation failures
2555+ */
2556+static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2557+ struct skge_element *e,
2558+ u32 control, u32 status, u16 csum)
2559+{
2560+ struct sk_buff *skb;
2561+ u16 len = control & BMU_BBC;
2562+
2563+ if (unlikely(netif_msg_rx_status(skge)))
2564+ printk(KERN_DEBUG PFX "%s: rx slot %d status 0x%x len %d\n",
2565+ skge->netdev->name, e - skge->rx_ring.start,
2566+ status, len);
2567+
2568+ if (len > skge->rx_buf_size)
2569+ goto error;
2570+
2571+ if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2572+ goto error;
2573+
2574+ if (bad_phy_status(skge->hw, status))
2575+ goto error;
2576+
2577+ if (phy_length(skge->hw, status) != len)
2578+ goto error;
2579+
2580+ if (len < RX_COPY_THRESHOLD) {
2581+ skb = alloc_skb(len + 2, GFP_ATOMIC);
2582+ if (!skb)
2583+ goto resubmit;
2584+
2585+ skb_reserve(skb, 2);
2586+ pci_dma_sync_single_for_cpu(skge->hw->pdev,
2587+ pci_unmap_addr(e, mapaddr),
2588+ len, PCI_DMA_FROMDEVICE);
2589+ memcpy(skb->data, e->skb->data, len);
2590+ pci_dma_sync_single_for_device(skge->hw->pdev,
2591+ pci_unmap_addr(e, mapaddr),
2592+ len, PCI_DMA_FROMDEVICE);
2593+ skge_rx_reuse(e, skge->rx_buf_size);
2594+ } else {
2595+ struct sk_buff *nskb;
2596+ nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
2597+ if (!nskb)
2598+ goto resubmit;
2599+
2600+ skb_reserve(nskb, NET_IP_ALIGN);
2601+ pci_unmap_single(skge->hw->pdev,
2602+ pci_unmap_addr(e, mapaddr),
2603+ pci_unmap_len(e, maplen),
2604+ PCI_DMA_FROMDEVICE);
2605+ skb = e->skb;
2606+ prefetch(skb->data);
2607+ skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2608+ }
2609+
2610+ skb_put(skb, len);
2611+ skb->dev = skge->netdev;
2612+ if (skge->rx_csum) {
2613+ skb->csum = csum;
2614+ skb->ip_summed = CHECKSUM_HW;
2615+ }
2616+
2617+ skb->protocol = eth_type_trans(skb, skge->netdev);
2618+
2619+ return skb;
2620+error:
2621+
2622+ if (netif_msg_rx_err(skge))
2623+ printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2624+ skge->netdev->name, e - skge->rx_ring.start,
2625+ control, status);
2626+
2627+ if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2628+ if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2629+ skge->net_stats.rx_length_errors++;
2630+ if (status & XMR_FS_FRA_ERR)
2631+ skge->net_stats.rx_frame_errors++;
2632+ if (status & XMR_FS_FCS_ERR)
2633+ skge->net_stats.rx_crc_errors++;
2634+ } else {
2635+ if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2636+ skge->net_stats.rx_length_errors++;
2637+ if (status & GMR_FS_FRAGMENT)
2638+ skge->net_stats.rx_frame_errors++;
2639+ if (status & GMR_FS_CRC_ERR)
2640+ skge->net_stats.rx_crc_errors++;
2641+ }
2642+
2643+resubmit:
2644+ skge_rx_reuse(e, skge->rx_buf_size);
2645+ return NULL;
2646+}
2647+
2648+/* Free all buffers in Tx ring which are no longer owned by device */
2649+static void skge_txirq(struct net_device *dev)
2650+{
2651+ struct skge_port *skge = netdev_priv(dev);
2652+ struct skge_ring *ring = &skge->tx_ring;
2653+ struct skge_element *e;
2654+
2655+ rmb();
2656+
2657+ spin_lock(&skge->tx_lock);
2658+ for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2659+ struct skge_tx_desc *td = e->desc;
2660+
2661+ if (td->control & BMU_OWN)
2662+ break;
2663+
2664+ skge_tx_free(skge, e, td->control);
2665+ }
2666+ skge->tx_ring.to_clean = e;
2667+
2668+ if (netif_queue_stopped(skge->netdev)
2669+ && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
2670+ netif_wake_queue(skge->netdev);
2671+
2672+ spin_unlock(&skge->tx_lock);
2673+}
2674+
2675+static int skge_poll(struct net_device *dev, int *budget)
2676+{
2677+ struct skge_port *skge = netdev_priv(dev);
2678+ struct skge_hw *hw = skge->hw;
2679+ struct skge_ring *ring = &skge->rx_ring;
2680+ struct skge_element *e;
2681+ int to_do = min(dev->quota, *budget);
2682+ int work_done = 0;
2683+
2684+ pr_debug("skge_poll ring %lu\n", (unsigned long) ring->to_clean);
2685+
2686+ for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2687+ struct skge_rx_desc *rd = e->desc;
2688+ struct sk_buff *skb;
2689+ u32 control;
2690+
2691+ rmb();
2692+ control = rd->control;
2693+ if (control & BMU_OWN)
2694+ break;
2695+
2696+ skb = skge_rx_get(skge, e, control, rd->status, rd->csum2);
2697+ if (likely(skb != NULL)) {
2698+ dev->last_rx = jiffies;
2699+ netif_receive_skb(skb);
2700+
2701+ ++work_done;
2702+ }
2703+ }
2704+ ring->to_clean = e;
2705+
2706+ /* restart receiver */
2707+ wmb();
2708+ skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2709+
2710+ *budget -= work_done;
2711+ dev->quota -= work_done;
2712+
2713+ if (work_done >= to_do)
2714+ return 1; /* not done */
2715+
2716+ netif_rx_complete(dev);
2717+
2718+ spin_lock_irq(&hw->hw_lock);
2719+ hw->intr_mask |= rxirqmask[skge->port];
2720+ skge_write32(hw, B0_IMSK, hw->intr_mask);
2721+ skge_read32(hw, B0_IMSK);
2722+
2723+ spin_unlock_irq(&hw->hw_lock);
2724+
2725+ return 0;
2726+}
2727+
2728+/* Parity errors seem to happen when Genesis is connected to a switch
2729+ * with no other ports present. Heartbeat error??
2730+ */
2731+static void skge_mac_parity(struct skge_hw *hw, int port)
2732+{
2733+ struct net_device *dev = hw->dev[port];
2734+
2735+ if (dev) {
2736+ struct skge_port *skge = netdev_priv(dev);
2737+ ++skge->net_stats.tx_heartbeat_errors;
2738+ }
2739+
2740+ if (hw->chip_id == CHIP_ID_GENESIS)
2741+ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
2742+ MFF_CLR_PERR);
2743+ else
2744+ /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2745+ skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
2746+ (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
2747+ ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2748+}
2749+
2750+static void skge_mac_intr(struct skge_hw *hw, int port)
2751+{
2752+ if (hw->chip_id == CHIP_ID_GENESIS)
2753+ genesis_mac_intr(hw, port);
2754+ else
2755+ yukon_mac_intr(hw, port);
2756+}
2757+
2758+/* Handle device specific framing and timeout interrupts */
2759+static void skge_error_irq(struct skge_hw *hw)
2760+{
2761+ u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2762+
2763+ if (hw->chip_id == CHIP_ID_GENESIS) {
2764+ /* clear xmac errors */
2765+ if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2766+ skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
2767+ if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2768+ skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
2769+ } else {
2770+ /* Timestamp (unused) overflow */
2771+ if (hwstatus & IS_IRQ_TIST_OV)
2772+ skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2773+ }
2774+
2775+ if (hwstatus & IS_RAM_RD_PAR) {
2776+ printk(KERN_ERR PFX "Ram read data parity error\n");
2777+ skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
2778+ }
2779+
2780+ if (hwstatus & IS_RAM_WR_PAR) {
2781+ printk(KERN_ERR PFX "Ram write data parity error\n");
2782+ skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
2783+ }
2784+
2785+ if (hwstatus & IS_M1_PAR_ERR)
2786+ skge_mac_parity(hw, 0);
2787+
2788+ if (hwstatus & IS_M2_PAR_ERR)
2789+ skge_mac_parity(hw, 1);
2790+
2791+ if (hwstatus & IS_R1_PAR_ERR) {
2792+ printk(KERN_ERR PFX "%s: receive queue parity error\n",
2793+ hw->dev[0]->name);
2794+ skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
2795+ }
2796+
2797+ if (hwstatus & IS_R2_PAR_ERR) {
2798+ printk(KERN_ERR PFX "%s: receive queue parity error\n",
2799+ hw->dev[1]->name);
2800+ skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
2801+ }
2802+
2803+ if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
2804+ u16 pci_status, pci_cmd;
2805+
2806+ pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
2807+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2808+
2809+ printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
2810+ pci_name(hw->pdev), pci_cmd, pci_status);
2811+
2812+ /* Write the error bits back to clear them. */
2813+ pci_status &= PCI_STATUS_ERROR_BITS;
2814+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2815+ pci_write_config_word(hw->pdev, PCI_COMMAND,
2816+ pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
2817+ pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
2818+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2819+
2820+ /* if error still set then just ignore it */
2821+ hwstatus = skge_read32(hw, B0_HWE_ISRC);
2822+ if (hwstatus & IS_IRQ_STAT) {
2823+ printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
2824+ hw->intr_mask &= ~IS_HW_ERR;
2825+ }
2826+ }
2827+}
2828+
2829+/*
2830+ * Interrupt from PHY are handled in tasklet
2831+ * because accessing phy registers requires spin wait which might
2832+ * cause excess interrupt latency.
2833+ */
2834+static void skge_phytask(unsigned long arg)
2835+{
2836+ struct skge_hw *hw = (void *) arg;
2837+ int port;
2838+
2839+ for (port = 0; port < hw->ports; port++) {
2840+ struct net_device *dev = hw->dev[port];
2841+ struct skge_port *skge = netdev_priv(dev);
2842+
2843+ if (netif_running(dev)) {
2844+ if (hw->chip_id != CHIP_ID_GENESIS)
2845+ yukon_phy_intr(skge);
2846+ else
2847+ bcom_phy_intr(skge);
2848+ }
2849+ }
2850+ spin_unlock(&hw->phy_lock);
2851+
2852+ spin_lock_irq(&hw->hw_lock);
2853+ hw->intr_mask |= IS_EXT_REG;
2854+ skge_write32(hw, B0_IMSK, hw->intr_mask);
2855+ spin_unlock_irq(&hw->hw_lock);
2856+}
2857+
2858+static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2859+{
2860+ struct skge_hw *hw = dev_id;
2861+ u32 status;
2862+
2863+ /* Reading this register masks IRQ */
2864+ status = skge_read32(hw, B0_SP_ISRC);
2865+ if (status == 0 || status == ~0)
2866+ return IRQ_NONE;
2867+
2868+ pr_debug("skge_inter status %x\n", status);
2869+ spin_lock(&hw->hw_lock);
2870+ status &= hw->intr_mask;
2871+ if (status & IS_EXT_REG) {
2872+ hw->intr_mask &= ~IS_EXT_REG;
2873+ tasklet_schedule(&hw->phy_task);
2874+ }
2875+
2876+ if (status & IS_XA1_F) {
2877+ skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
2878+ skge_txirq(hw->dev[0]);
2879+ }
2880+
2881+ if (status & IS_R1_F) {
2882+ skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2883+ hw->intr_mask &= ~IS_R1_F;
2884+ netif_rx_schedule(hw->dev[0]);
2885+ }
2886+
2887+ if (status & IS_PA_TO_TX1)
2888+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2889+
2890+ if (status & IS_PA_TO_RX1) {
2891+ struct skge_port *skge = netdev_priv(hw->dev[0]);
2892+
2893+ ++skge->net_stats.rx_over_errors;
2894+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2895+ }
2896+
2897+
2898+ if (status & IS_MAC1)
2899+ skge_mac_intr(hw, 0);
2900+
2901+ if (hw->dev[1]) {
2902+ if (status & IS_XA2_F) {
2903+ skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
2904+ skge_txirq(hw->dev[1]);
2905+ }
2906+
2907+ if (status & IS_R2_F) {
2908+ skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2909+ hw->intr_mask &= ~IS_R2_F;
2910+ netif_rx_schedule(hw->dev[1]);
2911+ }
2912+
2913+ if (status & IS_PA_TO_RX2) {
2914+ struct skge_port *skge = netdev_priv(hw->dev[1]);
2915+ ++skge->net_stats.rx_over_errors;
2916+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
2917+ }
2918+
2919+ if (status & IS_PA_TO_TX2)
2920+ skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2921+
2922+ if (status & IS_MAC2)
2923+ skge_mac_intr(hw, 1);
2924+ }
2925+
2926+ if (status & IS_HW_ERR)
2927+ skge_error_irq(hw);
2928+
2929+ skge_write32(hw, B0_IMSK, hw->intr_mask);
2930+ spin_unlock(&hw->hw_lock);
2931+
2932+ return IRQ_HANDLED;
2933+}
2934+
2935+#ifdef CONFIG_NET_POLL_CONTROLLER
2936+static void skge_netpoll(struct net_device *dev)
2937+{
2938+ struct skge_port *skge = netdev_priv(dev);
2939+
2940+ disable_irq(dev->irq);
2941+ skge_intr(dev->irq, skge->hw, NULL);
2942+ enable_irq(dev->irq);
2943+}
2944+#endif
2945+
2946+static int skge_set_mac_address(struct net_device *dev, void *p)
2947+{
2948+ struct skge_port *skge = netdev_priv(dev);
2949+ struct skge_hw *hw = skge->hw;
2950+ unsigned port = skge->port;
2951+ const struct sockaddr *addr = p;
2952+
2953+ if (addr->sa_data[1] & 0x1) /* multicast */
2954+ return -EADDRNOTAVAIL;
2955+
2956+ spin_lock_bh(&hw->phy_lock);
2957+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2958+ memcpy_toio(hw->regs + B2_MAC_1 + port*8,
2959+ dev->dev_addr, ETH_ALEN);
2960+ memcpy_toio(hw->regs + B2_MAC_2 + port*8,
2961+ dev->dev_addr, ETH_ALEN);
2962+
2963+ if (hw->chip_id == CHIP_ID_GENESIS)
2964+ xm_outaddr(hw, port, XM_SA, dev->dev_addr);
2965+ else {
2966+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2967+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2968+ }
2969+ spin_unlock_bh(&hw->phy_lock);
2970+
2971+ return 0;
2972+}
2973+
2974+static const struct {
2975+ u8 id;
2976+ const char *name;
2977+} skge_chips[] = {
2978+ { CHIP_ID_GENESIS, "Genesis" },
2979+ { CHIP_ID_YUKON, "Yukon" },
2980+ { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2981+ { CHIP_ID_YUKON_LP, "Yukon-LP"},
2982+};
2983+
2984+static const char *skge_board_name(const struct skge_hw *hw)
2985+{
2986+ int i;
2987+ static char buf[16];
2988+
2989+ for (i = 0; i < ARRAY_SIZE(skge_chips); i++)
2990+ if (skge_chips[i].id == hw->chip_id)
2991+ return skge_chips[i].name;
2992+
2993+ snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id);
2994+ return buf;
2995+}
2996+
2997+
2998+/*
2999+ * Setup the board data structure, but don't bring up
3000+ * the port(s)
3001+ */
3002+static int skge_reset(struct skge_hw *hw)
3003+{
3004+ u32 reg;
3005+ u16 ctst, pci_status;
3006+ u8 t8, mac_cfg, pmd_type, phy_type;
3007+ int i;
3008+
3009+ ctst = skge_read16(hw, B0_CTST);
3010+
3011+ /* do a SW reset */
3012+ skge_write8(hw, B0_CTST, CS_RST_SET);
3013+ skge_write8(hw, B0_CTST, CS_RST_CLR);
3014+
3015+ /* clear PCI errors, if any */
3016+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3017+ skge_write8(hw, B2_TST_CTRL2, 0);
3018+
3019+ pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
3020+ pci_write_config_word(hw->pdev, PCI_STATUS,
3021+ pci_status | PCI_STATUS_ERROR_BITS);
3022+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3023+ skge_write8(hw, B0_CTST, CS_MRST_CLR);
3024+
3025+ /* restore CLK_RUN bits (for Yukon-Lite) */
3026+ skge_write16(hw, B0_CTST,
3027+ ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
3028+
3029+ hw->chip_id = skge_read8(hw, B2_CHIP_ID);
3030+ phy_type = skge_read8(hw, B2_E_1) & 0xf;
3031+ pmd_type = skge_read8(hw, B2_PMD_TYP);
3032+ hw->copper = (pmd_type == 'T' || pmd_type == '1');
3033+
3034+ switch (hw->chip_id) {
3035+ case CHIP_ID_GENESIS:
3036+ switch (phy_type) {
3037+ case SK_PHY_BCOM:
3038+ hw->phy_addr = PHY_ADDR_BCOM;
3039+ break;
3040+ default:
3041+ printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
3042+ pci_name(hw->pdev), phy_type);
3043+ return -EOPNOTSUPP;
3044+ }
3045+ break;
3046+
3047+ case CHIP_ID_YUKON:
3048+ case CHIP_ID_YUKON_LITE:
3049+ case CHIP_ID_YUKON_LP:
3050+ if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
3051+ hw->copper = 1;
3052+
3053+ hw->phy_addr = PHY_ADDR_MARV;
3054+ break;
3055+
3056+ default:
3057+ printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
3058+ pci_name(hw->pdev), hw->chip_id);
3059+ return -EOPNOTSUPP;
3060+ }
3061+
3062+ mac_cfg = skge_read8(hw, B2_MAC_CFG);
3063+ hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
3064+ hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
3065+
3066+ /* read the adapters RAM size */
3067+ t8 = skge_read8(hw, B2_E_0);
3068+ if (hw->chip_id == CHIP_ID_GENESIS) {
3069+ if (t8 == 3) {
3070+ /* special case: 4 x 64k x 36, offset = 0x80000 */
3071+ hw->ram_size = 0x100000;
3072+ hw->ram_offset = 0x80000;
3073+ } else
3074+ hw->ram_size = t8 * 512;
3075+ }
3076+ else if (t8 == 0)
3077+ hw->ram_size = 0x20000;
3078+ else
3079+ hw->ram_size = t8 * 4096;
3080+
3081+ spin_lock_init(&hw->hw_lock);
3082+ hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3083+ if (hw->ports > 1)
3084+ hw->intr_mask |= IS_PORT_2;
3085+
3086+ if (hw->chip_id == CHIP_ID_GENESIS)
3087+ genesis_init(hw);
3088+ else {
3089+ /* switch power to VCC (WA for VAUX problem) */
3090+ skge_write8(hw, B0_POWER_CTRL,
3091+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3092+
3093+ /* avoid boards with stuck Hardware error bits */
3094+ if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
3095+ (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
3096+ printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
3097+ hw->intr_mask &= ~IS_HW_ERR;
3098+ }
3099+
3100+ /* Clear PHY COMA */
3101+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3102+ pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
3103+ reg &= ~PCI_PHY_COMA;
3104+ pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
3105+ skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3106+
3107+
3108+ for (i = 0; i < hw->ports; i++) {
3109+ skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3110+ skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3111+ }
3112+ }
3113+
3114+ /* turn off hardware timer (unused) */
3115+ skge_write8(hw, B2_TI_CTRL, TIM_STOP);
3116+ skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
3117+ skge_write8(hw, B0_LED, LED_STAT_ON);
3118+
3119+ /* enable the Tx Arbiters */
3120+ for (i = 0; i < hw->ports; i++)
3121+ skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3122+
3123+ /* Initialize ram interface */
3124+ skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
3125+
3126+ skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53);
3127+ skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53);
3128+ skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53);
3129+ skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53);
3130+ skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53);
3131+ skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53);
3132+ skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53);
3133+ skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53);
3134+ skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53);
3135+ skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53);
3136+ skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53);
3137+ skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53);
3138+
3139+ skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK);
3140+
3141+ /* Set interrupt moderation for Transmit only
3142+ * Receive interrupts avoided by NAPI
3143+ */
3144+ skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F);
3145+ skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3146+ skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3147+
3148+ skge_write32(hw, B0_IMSK, hw->intr_mask);
3149+
3150+ spin_lock_bh(&hw->phy_lock);
3151+ for (i = 0; i < hw->ports; i++) {
3152+ if (hw->chip_id == CHIP_ID_GENESIS)
3153+ genesis_reset(hw, i);
3154+ else
3155+ yukon_reset(hw, i);
3156+ }
3157+ spin_unlock_bh(&hw->phy_lock);
3158+
3159+ return 0;
3160+}
3161+
3162+/* Initialize network device */
3163+static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3164+ int highmem)
3165+{
3166+ struct skge_port *skge;
3167+ struct net_device *dev = alloc_etherdev(sizeof(*skge));
3168+
3169+ if (!dev) {
3170+ printk(KERN_ERR "skge etherdev alloc failed");
3171+ return NULL;
3172+ }
3173+
3174+ SET_MODULE_OWNER(dev);
3175+ SET_NETDEV_DEV(dev, &hw->pdev->dev);
3176+ dev->open = skge_up;
3177+ dev->stop = skge_down;
3178+ dev->do_ioctl = skge_ioctl;
3179+ dev->hard_start_xmit = skge_xmit_frame;
3180+ dev->get_stats = skge_get_stats;
3181+ if (hw->chip_id == CHIP_ID_GENESIS)
3182+ dev->set_multicast_list = genesis_set_multicast;
3183+ else
3184+ dev->set_multicast_list = yukon_set_multicast;
3185+
3186+ dev->set_mac_address = skge_set_mac_address;
3187+ dev->change_mtu = skge_change_mtu;
3188+ SET_ETHTOOL_OPS(dev, &skge_ethtool_ops);
3189+ dev->tx_timeout = skge_tx_timeout;
3190+ dev->watchdog_timeo = TX_WATCHDOG;
3191+ dev->poll = skge_poll;
3192+ dev->weight = NAPI_WEIGHT;
3193+#ifdef CONFIG_NET_POLL_CONTROLLER
3194+ dev->poll_controller = skge_netpoll;
3195+#endif
3196+ dev->irq = hw->pdev->irq;
3197+ if (highmem)
3198+ dev->features |= NETIF_F_HIGHDMA;
3199+
3200+ skge = netdev_priv(dev);
3201+ skge->netdev = dev;
3202+ skge->hw = hw;
3203+ skge->msg_enable = netif_msg_init(debug, default_msg);
3204+ skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3205+ skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3206+
3207+ /* Auto speed and flow control */
3208+ skge->autoneg = AUTONEG_ENABLE;
3209+ skge->flow_control = FLOW_MODE_SYMMETRIC;
3210+ skge->duplex = -1;
3211+ skge->speed = -1;
3212+ skge->advertising = skge_supported_modes(hw);
3213+
3214+ hw->dev[port] = dev;
3215+
3216+ skge->port = port;
3217+
3218+ spin_lock_init(&skge->tx_lock);
3219+
3220+ if (hw->chip_id != CHIP_ID_GENESIS)
3221+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3222+
3223+ /* read the mac address */
3224+ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3225+
3226+ /* device is off until link detection */
3227+ netif_carrier_off(dev);
3228+ netif_stop_queue(dev);
3229+
3230+ return dev;
3231+}
3232+
3233+static void __devinit skge_show_addr(struct net_device *dev)
3234+{
3235+ const struct skge_port *skge = netdev_priv(dev);
3236+
3237+ if (netif_msg_probe(skge))
3238+ printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
3239+ dev->name,
3240+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
3241+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
3242+}
3243+
3244+static int __devinit skge_probe(struct pci_dev *pdev,
3245+ const struct pci_device_id *ent)
3246+{
3247+ struct net_device *dev, *dev1;
3248+ struct skge_hw *hw;
3249+ int err, using_dac = 0;
3250+
3251+ err = pci_enable_device(pdev);
3252+ if (err) {
3253+ printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3254+ pci_name(pdev));
3255+ goto err_out;
3256+ }
3257+
3258+ err = pci_request_regions(pdev, DRV_NAME);
3259+ if (err) {
3260+ printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3261+ pci_name(pdev));
3262+ goto err_out_disable_pdev;
3263+ }
3264+
3265+ pci_set_master(pdev);
3266+
3267+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3268+ using_dac = 1;
3269+ err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
3270+ } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3271+ using_dac = 0;
3272+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3273+ }
3274+
3275+ if (err) {
3276+ printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3277+ pci_name(pdev));
3278+ goto err_out_free_regions;
3279+ }
3280+
3281+#ifdef __BIG_ENDIAN
3282+ /* byte swap descriptors in hardware */
3283+ {
3284+ u32 reg;
3285+
3286+ pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
3287+ reg |= PCI_REV_DESC;
3288+ pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
3289+ }
3290+#endif
3291+
3292+ err = -ENOMEM;
3293+ hw = kmalloc(sizeof(*hw), GFP_KERNEL);
3294+ if (!hw) {
3295+ printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
3296+ pci_name(pdev));
3297+ goto err_out_free_regions;
3298+ }
3299+ memset(hw, 0, sizeof(*hw));
3300+
3301+ hw->pdev = pdev;
3302+ spin_lock_init(&hw->phy_lock);
3303+ tasklet_init(&hw->phy_task, skge_phytask, (unsigned long) hw);
3304+
3305+ hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3306+ if (!hw->regs) {
3307+ printk(KERN_ERR PFX "%s: cannot map device registers\n",
3308+ pci_name(pdev));
3309+ goto err_out_free_hw;
3310+ }
3311+
3312+ err = request_irq(pdev->irq, skge_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
3313+ DRV_NAME, hw);
3314+ if (err) {
3315+ printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3316+ pci_name(pdev), pdev->irq);
3317+ goto err_out_iounmap;
3318+ }
3319+ pci_set_drvdata(pdev, hw);
3320+
3321+ err = skge_reset(hw);
3322+ if (err)
3323+ goto err_out_free_irq;
3324+
3325+ printk(KERN_INFO PFX "(%s) addr 0x%llx irq %d chip %s rev %d\n",
3326+ DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
3327+ pdev->irq, skge_board_name(hw), hw->chip_rev);
3328+
3329+ if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3330+ goto err_out_led_off;
3331+
3332+ if (!is_valid_ether_addr(dev->dev_addr)) {
3333+ printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
3334+ pci_name(pdev));
3335+ err = -EIO;
3336+ goto err_out_free_netdev;
3337+ }
3338+
3339+
3340+ err = register_netdev(dev);
3341+ if (err) {
3342+ printk(KERN_ERR PFX "%s: cannot register net device\n",
3343+ pci_name(pdev));
3344+ goto err_out_free_netdev;
3345+ }
3346+
3347+ skge_show_addr(dev);
3348+
3349+ if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
3350+ if (register_netdev(dev1) == 0)
3351+ skge_show_addr(dev1);
3352+ else {
3353+ /* Failure to register second port need not be fatal */
3354+ printk(KERN_WARNING PFX "register of second port failed\n");
3355+ hw->dev[1] = NULL;
3356+ free_netdev(dev1);
3357+ }
3358+ }
3359+
3360+ return 0;
3361+
3362+err_out_free_netdev:
3363+ free_netdev(dev);
3364+err_out_led_off:
3365+ skge_write16(hw, B0_LED, LED_STAT_OFF);
3366+err_out_free_irq:
3367+ free_irq(pdev->irq, hw);
3368+err_out_iounmap:
3369+ iounmap(hw->regs);
3370+err_out_free_hw:
3371+ kfree(hw);
3372+err_out_free_regions:
3373+ pci_release_regions(pdev);
3374+err_out_disable_pdev:
3375+ pci_disable_device(pdev);
3376+ pci_set_drvdata(pdev, NULL);
3377+err_out:
3378+ return err;
3379+}
3380+
3381+static void __devexit skge_remove(struct pci_dev *pdev)
3382+{
3383+ struct skge_hw *hw = pci_get_drvdata(pdev);
3384+ struct net_device *dev0, *dev1;
3385+
3386+ if (!hw)
3387+ return;
3388+
3389+ if ((dev1 = hw->dev[1]))
3390+ unregister_netdev(dev1);
3391+ dev0 = hw->dev[0];
3392+ unregister_netdev(dev0);
3393+
3394+ spin_lock_irq(&hw->hw_lock);
3395+ hw->intr_mask = 0;
3396+ skge_write32(hw, B0_IMSK, 0);
3397+ spin_unlock_irq(&hw->hw_lock);
3398+
3399+ skge_write16(hw, B0_LED, LED_STAT_OFF);
3400+ skge_write8(hw, B0_CTST, CS_RST_SET);
3401+
3402+ tasklet_disable(&hw->phy_task);
3403+
3404+ free_irq(pdev->irq, hw);
3405+ pci_release_regions(pdev);
3406+ pci_disable_device(pdev);
3407+ if (dev1)
3408+ free_netdev(dev1);
3409+ free_netdev(dev0);
3410+
3411+ iounmap(hw->regs);
3412+ kfree(hw);
3413+ pci_set_drvdata(pdev, NULL);
3414+}
3415+
3416+static struct pci_driver skge_driver = {
3417+ .name = DRV_NAME,
3418+ .id_table = skge_id_table,
3419+ .probe = skge_probe,
3420+ .remove = __devexit_p(skge_remove),
3421+};
3422+
3423+static int __init skge_init_module(void)
3424+{
3425+ return pci_module_init(&skge_driver);
3426+}
3427+
3428+static void __exit skge_cleanup_module(void)
3429+{
3430+ pci_unregister_driver(&skge_driver);
3431+}
3432+
3433+module_init(skge_init_module);
3434+module_exit(skge_cleanup_module);
--- /dev/null
+++ b/drivers/net/skge.h
@@ -0,0 +1,2546 @@
1+/*
2+ * Definitions for the new Marvell Yukon / SysKonenct driver.
3+ */
4+#ifndef _SKGE_H
5+#define _SKGE_H
6+
7+/* PCI config registers */
8+#define PCI_DEV_REG1 0x40
9+#define PCI_PHY_COMA 0x8000000
10+#define PCI_VIO 0x2000000
11+#define PCI_DEV_REG2 0x44
12+#define PCI_REV_DESC 0x4
13+
14+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
15+ PCI_STATUS_SIG_SYSTEM_ERROR | \
16+ PCI_STATUS_REC_MASTER_ABORT | \
17+ PCI_STATUS_REC_TARGET_ABORT | \
18+ PCI_STATUS_PARITY)
19+
20+enum csr_regs {
21+ B0_RAP = 0x0000,
22+ B0_CTST = 0x0004,
23+ B0_LED = 0x0006,
24+ B0_POWER_CTRL = 0x0007,
25+ B0_ISRC = 0x0008,
26+ B0_IMSK = 0x000c,
27+ B0_HWE_ISRC = 0x0010,
28+ B0_HWE_IMSK = 0x0014,
29+ B0_SP_ISRC = 0x0018,
30+ B0_XM1_IMSK = 0x0020,
31+ B0_XM1_ISRC = 0x0028,
32+ B0_XM1_PHY_ADDR = 0x0030,
33+ B0_XM1_PHY_DATA = 0x0034,
34+ B0_XM2_IMSK = 0x0040,
35+ B0_XM2_ISRC = 0x0048,
36+ B0_XM2_PHY_ADDR = 0x0050,
37+ B0_XM2_PHY_DATA = 0x0054,
38+ B0_R1_CSR = 0x0060,
39+ B0_R2_CSR = 0x0064,
40+ B0_XS1_CSR = 0x0068,
41+ B0_XA1_CSR = 0x006c,
42+ B0_XS2_CSR = 0x0070,
43+ B0_XA2_CSR = 0x0074,
44+
45+ B2_MAC_1 = 0x0100,
46+ B2_MAC_2 = 0x0108,
47+ B2_MAC_3 = 0x0110,
48+ B2_CONN_TYP = 0x0118,
49+ B2_PMD_TYP = 0x0119,
50+ B2_MAC_CFG = 0x011a,
51+ B2_CHIP_ID = 0x011b,
52+ B2_E_0 = 0x011c,
53+ B2_E_1 = 0x011d,
54+ B2_E_2 = 0x011e,
55+ B2_E_3 = 0x011f,
56+ B2_FAR = 0x0120,
57+ B2_FDP = 0x0124,
58+ B2_LD_CTRL = 0x0128,
59+ B2_LD_TEST = 0x0129,
60+ B2_TI_INI = 0x0130,
61+ B2_TI_VAL = 0x0134,
62+ B2_TI_CTRL = 0x0138,
63+ B2_TI_TEST = 0x0139,
64+ B2_IRQM_INI = 0x0140,
65+ B2_IRQM_VAL = 0x0144,
66+ B2_IRQM_CTRL = 0x0148,
67+ B2_IRQM_TEST = 0x0149,
68+ B2_IRQM_MSK = 0x014c,
69+ B2_IRQM_HWE_MSK = 0x0150,
70+ B2_TST_CTRL1 = 0x0158,
71+ B2_TST_CTRL2 = 0x0159,
72+ B2_GP_IO = 0x015c,
73+ B2_I2C_CTRL = 0x0160,
74+ B2_I2C_DATA = 0x0164,
75+ B2_I2C_IRQ = 0x0168,
76+ B2_I2C_SW = 0x016c,
77+ B2_BSC_INI = 0x0170,
78+ B2_BSC_VAL = 0x0174,
79+ B2_BSC_CTRL = 0x0178,
80+ B2_BSC_STAT = 0x0179,
81+ B2_BSC_TST = 0x017a,
82+
83+ B3_RAM_ADDR = 0x0180,
84+ B3_RAM_DATA_LO = 0x0184,
85+ B3_RAM_DATA_HI = 0x0188,
86+ B3_RI_WTO_R1 = 0x0190,
87+ B3_RI_WTO_XA1 = 0x0191,
88+ B3_RI_WTO_XS1 = 0x0192,
89+ B3_RI_RTO_R1 = 0x0193,
90+ B3_RI_RTO_XA1 = 0x0194,
91+ B3_RI_RTO_XS1 = 0x0195,
92+ B3_RI_WTO_R2 = 0x0196,
93+ B3_RI_WTO_XA2 = 0x0197,
94+ B3_RI_WTO_XS2 = 0x0198,
95+ B3_RI_RTO_R2 = 0x0199,
96+ B3_RI_RTO_XA2 = 0x019a,
97+ B3_RI_RTO_XS2 = 0x019b,
98+ B3_RI_TO_VAL = 0x019c,
99+ B3_RI_CTRL = 0x01a0,
100+ B3_RI_TEST = 0x01a2,
101+ B3_MA_TOINI_RX1 = 0x01b0,
102+ B3_MA_TOINI_RX2 = 0x01b1,
103+ B3_MA_TOINI_TX1 = 0x01b2,
104+ B3_MA_TOINI_TX2 = 0x01b3,
105+ B3_MA_TOVAL_RX1 = 0x01b4,
106+ B3_MA_TOVAL_RX2 = 0x01b5,
107+ B3_MA_TOVAL_TX1 = 0x01b6,
108+ B3_MA_TOVAL_TX2 = 0x01b7,
109+ B3_MA_TO_CTRL = 0x01b8,
110+ B3_MA_TO_TEST = 0x01ba,
111+ B3_MA_RCINI_RX1 = 0x01c0,
112+ B3_MA_RCINI_RX2 = 0x01c1,
113+ B3_MA_RCINI_TX1 = 0x01c2,
114+ B3_MA_RCINI_TX2 = 0x01c3,
115+ B3_MA_RCVAL_RX1 = 0x01c4,
116+ B3_MA_RCVAL_RX2 = 0x01c5,
117+ B3_MA_RCVAL_TX1 = 0x01c6,
118+ B3_MA_RCVAL_TX2 = 0x01c7,
119+ B3_MA_RC_CTRL = 0x01c8,
120+ B3_MA_RC_TEST = 0x01ca,
121+ B3_PA_TOINI_RX1 = 0x01d0,
122+ B3_PA_TOINI_RX2 = 0x01d4,
123+ B3_PA_TOINI_TX1 = 0x01d8,
124+ B3_PA_TOINI_TX2 = 0x01dc,
125+ B3_PA_TOVAL_RX1 = 0x01e0,
126+ B3_PA_TOVAL_RX2 = 0x01e4,
127+ B3_PA_TOVAL_TX1 = 0x01e8,
128+ B3_PA_TOVAL_TX2 = 0x01ec,
129+ B3_PA_CTRL = 0x01f0,
130+ B3_PA_TEST = 0x01f2,
131+};
132+
133+/* B0_CTST 16 bit Control/Status register */
134+enum {
135+ CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */
136+ CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */
137+ CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */
138+ CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */
139+ CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */
140+ CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */
141+ CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */
142+ CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */
143+ CS_STOP_DONE = 1<<5, /* Stop Master is finished */
144+ CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */
145+ CS_MRST_CLR = 1<<3, /* Clear Master reset */
146+ CS_MRST_SET = 1<<2, /* Set Master reset */
147+ CS_RST_CLR = 1<<1, /* Clear Software reset */
148+ CS_RST_SET = 1, /* Set Software reset */
149+
150+/* B0_LED 8 Bit LED register */
151+/* Bit 7.. 2: reserved */
152+ LED_STAT_ON = 1<<1, /* Status LED on */
153+ LED_STAT_OFF = 1, /* Status LED off */
154+
155+/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
156+ PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */
157+ PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */
158+ PC_VCC_ENA = 1<<5, /* Switch VCC Enable */
159+ PC_VCC_DIS = 1<<4, /* Switch VCC Disable */
160+ PC_VAUX_ON = 1<<3, /* Switch VAUX On */
161+ PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */
162+ PC_VCC_ON = 1<<1, /* Switch VCC On */
163+ PC_VCC_OFF = 1<<0, /* Switch VCC Off */
164+};
165+
166+/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
167+enum {
168+ IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */
169+ IS_HW_ERR = 1<<31, /* Interrupt HW Error */
170+ /* Bit 30: reserved */
171+ IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */
172+ IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */
173+ IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */
174+ IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */
175+ IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */
176+ IS_IRQ_SW = 1<<24, /* SW forced IRQ */
177+ IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */
178+ /* IRQ from PHY (YUKON only) */
179+ IS_TIMINT = 1<<22, /* IRQ from Timer */
180+ IS_MAC1 = 1<<21, /* IRQ from MAC 1 */
181+ IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */
182+ IS_MAC2 = 1<<19, /* IRQ from MAC 2 */
183+ IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */
184+/* Receive Queue 1 */
185+ IS_R1_B = 1<<17, /* Q_R1 End of Buffer */
186+ IS_R1_F = 1<<16, /* Q_R1 End of Frame */
187+ IS_R1_C = 1<<15, /* Q_R1 Encoding Error */
188+/* Receive Queue 2 */
189+ IS_R2_B = 1<<14, /* Q_R2 End of Buffer */
190+ IS_R2_F = 1<<13, /* Q_R2 End of Frame */
191+ IS_R2_C = 1<<12, /* Q_R2 Encoding Error */
192+/* Synchronous Transmit Queue 1 */
193+ IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */
194+ IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */
195+ IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */
196+/* Asynchronous Transmit Queue 1 */
197+ IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */
198+ IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */
199+ IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */
200+/* Synchronous Transmit Queue 2 */
201+ IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */
202+ IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */
203+ IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */
204+/* Asynchronous Transmit Queue 2 */
205+ IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */
206+ IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
207+ IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
208+
209+ IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1,
210+ IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2,
211+
212+ IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
213+ IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
214+};
215+
216+
217+/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
218+enum {
219+ IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
220+ IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */
221+ IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */
222+ IS_IRQ_STAT = 1<<10, /* IRQ status exception */
223+ IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */
224+ IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */
225+ IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */
226+ IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */
227+ IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */
228+ IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */
229+ IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */
230+ IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */
231+ IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */
232+ IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */
233+
234+ IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT
235+ | IS_NO_STAT_M1 | IS_NO_STAT_M2
236+ | IS_RAM_RD_PAR | IS_RAM_WR_PAR
237+ | IS_M1_PAR_ERR | IS_M2_PAR_ERR
238+ | IS_R1_PAR_ERR | IS_R2_PAR_ERR,
239+};
240+
241+/* B2_TST_CTRL1 8 bit Test Control Register 1 */
242+enum {
243+ TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
244+ TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
245+ TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
246+ TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
247+ TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */
248+ TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */
249+ TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */
250+ TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
251+};
252+
253+/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
254+enum {
255+ CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
256+ /* Bit 3.. 2: reserved */
257+ CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */
258+ CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/
259+};
260+
261+/* B2_CHIP_ID 8 bit Chip Identification Number */
262+enum {
263+ CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
264+ CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
265+ CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
266+ CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
267+ CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
268+ CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
269+ CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
270+
271+ CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */
272+ CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
273+};
274+
275+/* B2_TI_CTRL 8 bit Timer control */
276+/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
277+enum {
278+ TIM_START = 1<<2, /* Start Timer */
279+ TIM_STOP = 1<<1, /* Stop Timer */
280+ TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */
281+};
282+
283+/* B2_TI_TEST 8 Bit Timer Test */
284+/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
285+/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
286+enum {
287+ TIM_T_ON = 1<<2, /* Test mode on */
288+ TIM_T_OFF = 1<<1, /* Test mode off */
289+ TIM_T_STEP = 1<<0, /* Test step */
290+};
291+
292+/* B2_GP_IO 32 bit General Purpose I/O Register */
293+enum {
294+ GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
295+ GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */
296+ GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */
297+ GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */
298+ GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */
299+ GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */
300+ GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */
301+ GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */
302+ GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */
303+ GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */
304+
305+ GP_IO_9 = 1<<9, /* IO_9 pin */
306+ GP_IO_8 = 1<<8, /* IO_8 pin */
307+ GP_IO_7 = 1<<7, /* IO_7 pin */
308+ GP_IO_6 = 1<<6, /* IO_6 pin */
309+ GP_IO_5 = 1<<5, /* IO_5 pin */
310+ GP_IO_4 = 1<<4, /* IO_4 pin */
311+ GP_IO_3 = 1<<3, /* IO_3 pin */
312+ GP_IO_2 = 1<<2, /* IO_2 pin */
313+ GP_IO_1 = 1<<1, /* IO_1 pin */
314+ GP_IO_0 = 1<<0, /* IO_0 pin */
315+};
316+
317+/* Descriptor Bit Definition */
318+/* TxCtrl Transmit Buffer Control Field */
319+/* RxCtrl Receive Buffer Control Field */
320+enum {
321+ BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */
322+ BMU_STF = 1<<30, /* Start of Frame */
323+ BMU_EOF = 1<<29, /* End of Frame */
324+ BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */
325+ BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */
326+ /* TxCtrl specific bits */
327+ BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */
328+ BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */
329+ BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */
330+ /* RxCtrl specific bits */
331+ BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */
332+ BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */
333+ BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */
334+ /* Bit 23..16: BMU Check Opcodes */
335+ BMU_CHECK = 0x55<<16, /* Default BMU check */
336+ BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */
337+ BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */
338+ BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */
339+};
340+
341+/* B2_BSC_CTRL 8 bit Blink Source Counter Control */
342+enum {
343+ BSC_START = 1<<1, /* Start Blink Source Counter */
344+ BSC_STOP = 1<<0, /* Stop Blink Source Counter */
345+};
346+
347+/* B2_BSC_STAT 8 bit Blink Source Counter Status */
348+enum {
349+ BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */
350+};
351+
352+/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */
353+enum {
354+ BSC_T_ON = 1<<2, /* Test mode on */
355+ BSC_T_OFF = 1<<1, /* Test mode off */
356+ BSC_T_STEP = 1<<0, /* Test step */
357+};
358+
359+/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
360+ /* Bit 31..19: reserved */
361+#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
362+/* RAM Interface Registers */
363+
364+/* B3_RI_CTRL 16 bit RAM Iface Control Register */
365+enum {
366+ RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */
367+ RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/
368+
369+ RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */
370+ RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
371+};
372+
373+/* MAC Arbiter Registers */
374+/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
375+enum {
376+ MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */
377+ MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */
378+ MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
379+ MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
380+
381+};
382+
383+/* Timeout values */
384+#define SK_MAC_TO_53 72 /* MAC arbiter timeout */
385+#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */
386+#define SK_PKT_TO_MAX 0xffff /* Maximum value */
387+#define SK_RI_TO_53 36 /* RAM interface timeout */
388+
389+/* Packet Arbiter Registers */
390+/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
391+enum {
392+ PA_CLR_TO_TX2 = 1<<13, /* Clear IRQ Packet Timeout TX2 */
393+ PA_CLR_TO_TX1 = 1<<12, /* Clear IRQ Packet Timeout TX1 */
394+ PA_CLR_TO_RX2 = 1<<11, /* Clear IRQ Packet Timeout RX2 */
395+ PA_CLR_TO_RX1 = 1<<10, /* Clear IRQ Packet Timeout RX1 */
396+ PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */
397+ PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */
398+ PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */
399+ PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */
400+ PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */
401+ PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */
402+ PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */
403+ PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */
404+ PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */
405+ PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */
406+};
407+
408+#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
409+ PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
410+
411+
412+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
413+/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
414+/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
415+/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
416+/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
417+
418+#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */
419+
420+/* TXA_CTRL 8 bit Tx Arbiter Control Register */
421+enum {
422+ TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */
423+ TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */
424+ TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */
425+ TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */
426+ TXA_START_RC = 1<<3, /* Start sync Rate Control */
427+ TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */
428+ TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */
429+ TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */
430+};
431+
432+/*
433+ * Bank 4 - 5
434+ */
435+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
436+enum {
437+ TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
438+ TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
439+ TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */
440+ TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */
441+ TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
442+ TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
443+ TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
444+};
445+
446+
447+enum {
448+ B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
449+ B7_CFG_SPC = 0x0380,/* copy of the Configuration register */
450+ B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */
451+ B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */
452+ B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */
453+ B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */
454+ B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */
455+ B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */
456+ B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */
457+};
458+
459+/* Queue Register Offsets, use Q_ADDR() to access */
460+enum {
461+ B8_Q_REGS = 0x0400, /* base of Queue registers */
462+ Q_D = 0x00, /* 8*32 bit Current Descriptor */
463+ Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
464+ Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
465+ Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
466+ Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
467+ Q_BC = 0x30, /* 32 bit Current Byte Counter */
468+ Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
469+ Q_F = 0x38, /* 32 bit Flag Register */
470+ Q_T1 = 0x3c, /* 32 bit Test Register 1 */
471+ Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
472+ Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
473+ Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
474+ Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
475+ Q_T2 = 0x40, /* 32 bit Test Register 2 */
476+ Q_T3 = 0x44, /* 32 bit Test Register 3 */
477+
478+};
479+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
480+
481+/* RAM Buffer Register Offsets */
482+enum {
483+
484+ RB_START = 0x00,/* 32 bit RAM Buffer Start Address */
485+ RB_END = 0x04,/* 32 bit RAM Buffer End Address */
486+ RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */
487+ RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */
488+ RB_RX_UTPP = 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */
489+ RB_RX_LTPP = 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */
490+ RB_RX_UTHP = 0x18,/* 32 bit Rx Upper Threshold, High Prio */
491+ RB_RX_LTHP = 0x1c,/* 32 bit Rx Lower Threshold, High Prio */
492+ /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
493+ RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */
494+ RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */
495+ RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */
496+ RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */
497+ RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */
498+};
499+
500+/* Receive and Transmit Queues */
501+enum {
502+ Q_R1 = 0x0000, /* Receive Queue 1 */
503+ Q_R2 = 0x0080, /* Receive Queue 2 */
504+ Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */
505+ Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */
506+ Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */
507+ Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */
508+};
509+
510+/* Different MAC Types */
511+enum {
512+ SK_MAC_XMAC = 0, /* Xaqti XMAC II */
513+ SK_MAC_GMAC = 1, /* Marvell GMAC */
514+};
515+
516+/* Different PHY Types */
517+enum {
518+ SK_PHY_XMAC = 0,/* integrated in XMAC II */
519+ SK_PHY_BCOM = 1,/* Broadcom BCM5400 */
520+ SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/
521+ SK_PHY_NAT = 3,/* National DP83891 [not supported] */
522+ SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */
523+ SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */
524+};
525+
526+/* PHY addresses (bits 12..8 of PHY address reg) */
527+enum {
528+ PHY_ADDR_XMAC = 0<<8,
529+ PHY_ADDR_BCOM = 1<<8,
530+
531+/* GPHY address (bits 15..11 of SMI control reg) */
532+ PHY_ADDR_MARV = 0,
533+};
534+
535+#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
536+
537+/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
538+enum {
539+ RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */
540+ RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */
541+
542+ RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */
543+ RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */
544+ RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */
545+ RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/
546+ RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */
547+ RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */
548+ RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/
549+ RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */
550+ RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */
551+
552+ RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */
553+ RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */
554+ RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */
555+ RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */
556+
557+ LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */
558+ LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */
559+ LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */
560+ LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */
561+ LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */
562+};
563+
564+/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
565+/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */
566+enum {
567+ MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */
568+ MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */
569+ MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */
570+ MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */
571+ MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */
572+ MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */
573+ MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */
574+ MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */
575+ MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */
576+ MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */
577+ MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */
578+ MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */
579+ MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */
580+ MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */
581+#define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT
582+};
583+
584+/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */
585+enum {
586+ MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */
587+ /* Bit 14: reserved */
588+ MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */
589+ MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */
590+
591+ MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */
592+ MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */
593+
594+ MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */
595+ MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */
596+ MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */
597+ MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */
598+};
599+
600+#define MFF_TX_CTRL_DEF (MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH)
601+
602+/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */
603+/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */
604+enum {
605+ MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */
606+ MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */
607+ MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */
608+ MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */
609+ MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */
610+ MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */
611+ MFF_PC_INC = 1<<0, /* Packet Counter Increment */
612+};
613+
614+/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */
615+/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */
616+enum {
617+ MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */
618+ MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */
619+ MFF_WP_INC = 1<<4, /* Write Pointer Increm */
620+
621+ MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */
622+ MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */
623+ MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */
624+};
625+
626+/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */
627+/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */
628+enum {
629+ MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
630+ MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
631+ MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */
632+ MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */
633+};
634+
635+
636+/* Link LED Counter Registers (GENESIS only) */
637+
638+/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */
639+/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */
640+/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */
641+enum {
642+ LED_START = 1<<2, /* Start Timer */
643+ LED_STOP = 1<<1, /* Stop Timer */
644+ LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */
645+};
646+
647+/* RX_LED_TST 8 bit Receive LED Cnt Test Register */
648+/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */
649+/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */
650+enum {
651+ LED_T_ON = 1<<2, /* LED Counter Test mode On */
652+ LED_T_OFF = 1<<1, /* LED Counter Test mode Off */
653+ LED_T_STEP = 1<<0, /* LED Counter Step */
654+};
655+
656+/* LNK_LED_REG 8 bit Link LED Register */
657+enum {
658+ LED_BLK_ON = 1<<5, /* Link LED Blinking On */
659+ LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */
660+ LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */
661+ LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */
662+ LED_ON = 1<<1, /* switch LED on */
663+ LED_OFF = 1<<0, /* switch LED off */
664+};
665+
666+/* Receive GMAC FIFO (YUKON) */
667+enum {
668+ RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */
669+ RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
670+ RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
671+ RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
672+ RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */
673+ RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
674+ RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */
675+ RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */
676+ RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */
677+};
678+
679+
680+/* TXA_TEST 8 bit Tx Arbiter Test Register */
681+enum {
682+ TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */
683+ TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */
684+ TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */
685+ TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */
686+ TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */
687+ TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */
688+};
689+
690+/* TXA_STAT 8 bit Tx Arbiter Status Register */
691+enum {
692+ TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */
693+};
694+
695+
696+/* Q_BC 32 bit Current Byte Counter */
697+
698+/* BMU Control Status Registers */
699+/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
700+/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
701+/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
702+/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
703+/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
704+/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
705+/* Q_CSR 32 bit BMU Control/Status Register */
706+
707+enum {
708+ CSR_SV_IDLE = 1<<24, /* BMU SM Idle */
709+
710+ CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */
711+ CSR_DESC_SET = 1<<20, /* Set Reset for Descr */
712+ CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */
713+ CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */
714+ CSR_HPI_RUN = 1<<17, /* Release HPI SM */
715+ CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */
716+ CSR_SV_RUN = 1<<15, /* Release Supervisor SM */
717+ CSR_SV_RST = 1<<14, /* Reset Supervisor SM */
718+ CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */
719+ CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */
720+ CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */
721+ CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */
722+ CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */
723+ CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */
724+ CSR_ENA_POL = 1<<7, /* Enable Descr Polling */
725+ CSR_DIS_POL = 1<<6, /* Disable Descr Polling */
726+ CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */
727+ CSR_START = 1<<4, /* Start Rx/Tx Queue */
728+ CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */
729+ CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */
730+ CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */
731+ CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */
732+};
733+
734+#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
735+ CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
736+ CSR_TRANS_RST)
737+#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
738+ CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
739+ CSR_TRANS_RUN)
740+
741+/* Q_F 32 bit Flag Register */
742+enum {
743+ F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
744+ F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
745+ F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
746+ F_WM_REACHED = 1<<25, /* Watermark reached */
747+
748+ F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
749+ F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
750+};
751+
752+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
753+/* RB_START 32 bit RAM Buffer Start Address */
754+/* RB_END 32 bit RAM Buffer End Address */
755+/* RB_WP 32 bit RAM Buffer Write Pointer */
756+/* RB_RP 32 bit RAM Buffer Read Pointer */
757+/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
758+/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
759+/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
760+/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
761+/* RB_PC 32 bit RAM Buffer Packet Counter */
762+/* RB_LEV 32 bit RAM Buffer Level Register */
763+
764+#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
765+/* RB_TST2 8 bit RAM Buffer Test Register 2 */
766+/* RB_TST1 8 bit RAM Buffer Test Register 1 */
767+
768+/* RB_CTRL 8 bit RAM Buffer Control Register */
769+enum {
770+ RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */
771+ RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */
772+ RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */
773+ RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */
774+ RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */
775+ RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */
776+};
777+
778+/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */
779+enum {
780+ TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */
781+ TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */
782+ TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */
783+ TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */
784+ TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */
785+ TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */
786+ TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */
787+ TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */
788+
789+ TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */
790+ TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */
791+ TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */
792+
793+ TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */
794+ TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */
795+ TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */
796+ TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */
797+};
798+
799+/* Counter and Timer constants, for a host clock of 62.5 MHz */
800+#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */
801+#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */
802+
803+#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */
804+
805+#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */
806+ /* 215 ms at 78.12 MHz */
807+
808+#define SK_FACT_62 100 /* is given in percent */
809+#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */
810+#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */
811+
812+
813+/* Transmit GMAC FIFO (YUKON only) */
814+enum {
815+ TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */
816+ TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
817+ TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */
818+
819+ TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */
820+ TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
821+ TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */
822+
823+ TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */
824+ TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */
825+ TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */
826+
827+ /* Descriptor Poll Timer Registers */
828+ B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */
829+ B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */
830+ B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */
831+
832+ B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */
833+
834+ /* Time Stamp Timer Registers (YUKON only) */
835+ GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */
836+ GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */
837+ GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */
838+};
839+
840+
841+enum {
842+ LINKLED_OFF = 0x01,
843+ LINKLED_ON = 0x02,
844+ LINKLED_LINKSYNC_OFF = 0x04,
845+ LINKLED_LINKSYNC_ON = 0x08,
846+ LINKLED_BLINK_OFF = 0x10,
847+ LINKLED_BLINK_ON = 0x20,
848+};
849+
850+/* GMAC and GPHY Control Registers (YUKON only) */
851+enum {
852+ GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
853+ GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */
854+ GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */
855+ GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */
856+ GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
857+
858+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
859+
860+ WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
861+
862+ WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
863+ WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
864+ WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
865+ WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
866+ WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
867+
868+/* WOL Pattern Length Registers (YUKON only) */
869+
870+ WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
871+ WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
872+
873+/* WOL Pattern Counter Registers (YUKON only) */
874+
875+ WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
876+ WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
877+};
878+
879+enum {
880+ WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
881+ WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
882+};
883+
884+enum {
885+ BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
886+ BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
887+ BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */
888+ BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */
889+};
890+
891+/*
892+ * Receive Frame Status Encoding
893+ */
894+enum {
895+ XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
896+ XMR_FS_LEN_SHIFT = 18,
897+ XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
898+ XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
899+ XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
900+ XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */
901+ XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */
902+
903+ XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */
904+ XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */
905+ XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */
906+ XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */
907+ XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */
908+ XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */
909+ XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */
910+ XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */
911+ XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */
912+ XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */
913+ XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */
914+ XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */
915+
916+/*
917+ * XMR_FS_ERR will be set if
918+ * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
919+ * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
920+ * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
921+ * XMR_FS_ERR unless the corresponding bit in the Receive Command
922+ * Register is set.
923+ */
924+};
925+
926+/*
927+,* XMAC-PHY Registers, indirect addressed over the XMAC
928+ */
929+enum {
930+ PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
931+ PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */
932+ PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
933+ PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
934+ PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
935+ PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */
936+ PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
937+ PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */
938+ PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
939+
940+ PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */
941+ PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */
942+};
943+/*
944+ * Broadcom-PHY Registers, indirect addressed over XMAC
945+ */
946+enum {
947+ PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
948+ PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */
949+ PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
950+ PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
951+ PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
952+ PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
953+ PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
954+ PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */
955+ PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
956+ /* Broadcom-specific registers */
957+ PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
958+ PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
959+ PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
960+ PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */
961+ PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */
962+ PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */
963+ PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */
964+ PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */
965+
966+ PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */
967+ PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */
968+ PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */
969+ PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */
970+};
971+
972+/*
973+ * Marvel-PHY Registers, indirect addressed over GMAC
974+ */
975+enum {
976+ PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
977+ PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */
978+ PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
979+ PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
980+ PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
981+ PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
982+ PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
983+ PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */
984+ PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
985+ /* Marvel-specific registers */
986+ PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
987+ PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
988+ PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
989+ PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */
990+ PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */
991+ PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */
992+ PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
993+ PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */
994+ PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */
995+ PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */
996+ PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */
997+ PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */
998+ PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */
999+ PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */
1000+ PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */
1001+ PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */
1002+ PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */
1003+ PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */
1004+
1005+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1006+ PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */
1007+ PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */
1008+ PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */
1009+ PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */
1010+ PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
1011+};
1012+
1013+enum {
1014+ PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
1015+ PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
1016+ PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */
1017+ PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */
1018+ PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */
1019+ PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */
1020+ PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */
1021+ PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */
1022+ PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */
1023+ PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */
1024+};
1025+
1026+enum {
1027+ PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
1028+ PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */
1029+ PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */
1030+};
1031+
1032+enum {
1033+ PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */
1034+
1035+ PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
1036+ PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
1037+ PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occured */
1038+ PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
1039+ PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
1040+ PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
1041+ PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */
1042+};
1043+
1044+enum {
1045+ PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */
1046+ PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */
1047+ PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */
1048+};
1049+
1050+/* different Broadcom PHY Ids */
1051+enum {
1052+ PHY_BCOM_ID1_A1 = 0x6041,
1053+ PHY_BCOM_ID1_B2 = 0x6043,
1054+ PHY_BCOM_ID1_C0 = 0x6044,
1055+ PHY_BCOM_ID1_C5 = 0x6047,
1056+};
1057+
1058+/* different Marvell PHY Ids */
1059+enum {
1060+ PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
1061+ PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
1062+ PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
1063+ PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
1064+ PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
1065+};
1066+
1067+/* Advertisement register bits */
1068+enum {
1069+ PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1070+ PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1071+ PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
1072+
1073+ PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
1074+ PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
1075+ PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
1076+ PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
1077+ PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
1078+ PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
1079+ PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
1080+ PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
1081+ PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1082+ PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
1083+ PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
1084+ PHY_AN_100HALF | PHY_AN_100FULL,
1085+};
1086+
1087+/* Xmac Specific */
1088+enum {
1089+ PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1090+ PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1091+ PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
1092+
1093+ PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */
1094+ PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */
1095+ PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
1096+};
1097+
1098+/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
1099+enum {
1100+ PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */
1101+ PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */
1102+ PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */
1103+ PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */
1104+};
1105+
1106+
1107+/* Broadcom-Specific */
1108+/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1109+enum {
1110+ PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1111+ PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1112+ PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1113+ PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1114+ PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1115+ PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1116+};
1117+
1118+/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1119+/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1120+enum {
1121+ PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1122+ PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1123+ PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1124+ PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1125+ PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1126+ PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1127+ /* Bit 9..8: reserved */
1128+ PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1129+};
1130+
1131+/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/
1132+enum {
1133+ PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
1134+ PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
1135+ PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
1136+ PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
1137+};
1138+
1139+/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/
1140+enum {
1141+ PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */
1142+ PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */
1143+ PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
1144+ PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */
1145+ PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */
1146+ PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */
1147+ PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */
1148+ PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */
1149+ PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */
1150+ PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */
1151+ PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */
1152+ PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */
1153+ PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */
1154+ PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */
1155+ PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */
1156+ PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */
1157+};
1158+
1159+/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/
1160+enum {
1161+ PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */
1162+ PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */
1163+ PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */
1164+ PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */
1165+ PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */
1166+ PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */
1167+ PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */
1168+ PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */
1169+ PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */
1170+ PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */
1171+ PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */
1172+ PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */
1173+ PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */
1174+ PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
1175+};
1176+
1177+/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
1178+/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1179+enum {
1180+ PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1181+
1182+ PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1183+ PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1184+};
1185+
1186+
1187+/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
1188+enum {
1189+ PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
1190+
1191+/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/
1192+ PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */
1193+ PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */
1194+
1195+/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/
1196+ PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */
1197+ PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */
1198+ PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */
1199+ /* Bit 11: reserved */
1200+ PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */
1201+ /* Bit 9.. 8: reserved */
1202+ PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */
1203+ /* Bit 6: reserved */
1204+ PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */
1205+ /* Bit 4: reserved */
1206+ PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */
1207+};
1208+
1209+/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/
1210+enum {
1211+ PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */
1212+ PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */
1213+ PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */
1214+ PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */
1215+ PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */
1216+ PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */
1217+ PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */
1218+ PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */
1219+ PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */
1220+ PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */
1221+ PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */
1222+ PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */
1223+ PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */
1224+ PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */
1225+};
1226+#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT)
1227+
1228+/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1229+/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
1230+enum {
1231+ PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */
1232+ PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */
1233+ PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */
1234+ PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */
1235+ PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */
1236+ PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */
1237+ PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */
1238+ PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */
1239+ PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */
1240+ PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */
1241+ PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */
1242+ PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */
1243+ PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */
1244+ PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
1245+ PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
1246+};
1247+#define PHY_B_DEF_MSK \
1248+ (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
1249+ PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
1250+
1251+/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
1252+enum {
1253+ PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */
1254+ PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
1255+ PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
1256+ PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
1257+};
1258+/*
1259+ * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
1260+ */
1261+enum {
1262+ PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */
1263+ PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
1264+};
1265+
1266+/** Marvell-Specific */
1267+enum {
1268+ PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
1269+ PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */
1270+ PHY_M_AN_RF = 1<<13, /* Remote Fault */
1271+
1272+ PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */
1273+ PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */
1274+ PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */
1275+ PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */
1276+ PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */
1277+ PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */
1278+ PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */
1279+ PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */
1280+};
1281+
1282+/* special defines for FIBER (88E1011S only) */
1283+enum {
1284+ PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */
1285+ PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */
1286+ PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */
1287+ PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */
1288+};
1289+
1290+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
1291+enum {
1292+ PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */
1293+ PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */
1294+ PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */
1295+ PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */
1296+};
1297+
1298+/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1299+enum {
1300+ PHY_M_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1301+ PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
1302+ PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
1303+ PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
1304+ PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */
1305+ PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */
1306+};
1307+
1308+/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
1309+enum {
1310+ PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
1311+ PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
1312+ PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */
1313+ PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */
1314+ PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */
1315+ PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */
1316+ PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */
1317+ PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */
1318+ PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */
1319+ PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */
1320+ PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */
1321+ PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */
1322+};
1323+
1324+enum {
1325+ PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */
1326+ PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1327+};
1328+
1329+#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1330+
1331+enum {
1332+ PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
1333+ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */
1334+ PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */
1335+};
1336+
1337+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1338+enum {
1339+ PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
1340+ PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */
1341+ PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */
1342+ PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */
1343+ PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */
1344+
1345+ PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */
1346+ PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */
1347+
1348+ PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */
1349+ PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */
1350+};
1351+
1352+/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1353+enum {
1354+ PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */
1355+ PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */
1356+ PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */
1357+ PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */
1358+ PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */
1359+ PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */
1360+ PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */
1361+ PHY_M_PS_LINK_UP = 1<<10, /* Link Up */
1362+ PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */
1363+ PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */
1364+ PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */
1365+ PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */
1366+ PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */
1367+ PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */
1368+ PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */
1369+ PHY_M_PS_JABBER = 1<<0, /* Jabber */
1370+};
1371+
1372+#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1373+
1374+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1375+enum {
1376+ PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */
1377+ PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
1378+};
1379+
1380+enum {
1381+ PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */
1382+ PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */
1383+ PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */
1384+ PHY_M_IS_AN_PR = 1<<12, /* Page Received */
1385+ PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */
1386+ PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */
1387+ PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */
1388+ PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */
1389+ PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */
1390+ PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */
1391+ PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */
1392+ PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */
1393+
1394+ PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */
1395+ PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */
1396+ PHY_M_IS_JABBER = 1<<0, /* Jabber */
1397+
1398+ PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE |
1399+ PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR,
1400+
1401+ PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
1402+};
1403+
1404+/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1405+enum {
1406+ PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
1407+ PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
1408+
1409+ PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
1410+ PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */
1411+ /* (88E1011 only) */
1412+ PHY_M_EC_S_DSC_MSK = 3<<8,/* Bit 9.. 8: Slave Downshift Counter */
1413+ /* (88E1011 only) */
1414+ PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9: Master Downshift Counter */
1415+ /* (88E1111 only) */
1416+ PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
1417+ /* !!! Errata in spec. (1 = disable) */
1418+ PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/
1419+ PHY_M_EC_MAC_S_MSK = 7<<4,/* Bit 6.. 4: Def. MAC interface speed */
1420+ PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
1421+ PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
1422+ PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
1423+ PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
1424+
1425+#define PHY_M_EC_M_DSC(x) ((x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
1426+#define PHY_M_EC_S_DSC(x) ((x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
1427+#define PHY_M_EC_MAC_S(x) ((x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
1428+
1429+#define PHY_M_EC_M_DSC_2(x) ((x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
1430+ /* 100=5x; 101=6x; 110=7x; 111=8x */
1431+enum {
1432+ MAC_TX_CLK_0_MHZ = 2,
1433+ MAC_TX_CLK_2_5_MHZ = 6,
1434+ MAC_TX_CLK_25_MHZ = 7,
1435+};
1436+
1437+/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1438+enum {
1439+ PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */
1440+ PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */
1441+ PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */
1442+ PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */
1443+ PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */
1444+ PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */
1445+ PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */
1446+ /* (88E1111 only) */
1447+};
1448+
1449+enum {
1450+ PHY_M_LEDC_LINK_MSK = 3<<3,/* Bit 4.. 3: Link Control Mask */
1451+ /* (88E1011 only) */
1452+ PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */
1453+ PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
1454+ PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */
1455+ PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */
1456+ PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */
1457+};
1458+
1459+#define PHY_M_LED_PULS_DUR(x) (((x)<<12) & PHY_M_LEDC_PULS_MSK)
1460+
1461+enum {
1462+ PULS_NO_STR = 0,/* no pulse stretching */
1463+ PULS_21MS = 1,/* 21 ms to 42 ms */
1464+ PULS_42MS = 2,/* 42 ms to 84 ms */
1465+ PULS_84MS = 3,/* 84 ms to 170 ms */
1466+ PULS_170MS = 4,/* 170 ms to 340 ms */
1467+ PULS_340MS = 5,/* 340 ms to 670 ms */
1468+ PULS_670MS = 6,/* 670 ms to 1.3 s */
1469+ PULS_1300MS = 7,/* 1.3 s to 2.7 s */
1470+};
1471+
1472+#define PHY_M_LED_BLINK_RT(x) (((x)<<8) & PHY_M_LEDC_BL_R_MSK)
1473+
1474+enum {
1475+ BLINK_42MS = 0,/* 42 ms */
1476+ BLINK_84MS = 1,/* 84 ms */
1477+ BLINK_170MS = 2,/* 170 ms */
1478+ BLINK_340MS = 3,/* 340 ms */
1479+ BLINK_670MS = 4,/* 670 ms */
1480+};
1481+
1482+/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1483+#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1484+ /* Bit 13..12: reserved */
1485+#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1486+#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1487+#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1488+#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1489+#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1490+#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1491+
1492+enum {
1493+ MO_LED_NORM = 0,
1494+ MO_LED_BLINK = 1,
1495+ MO_LED_OFF = 2,
1496+ MO_LED_ON = 3,
1497+};
1498+
1499+/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1500+enum {
1501+ PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */
1502+ PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */
1503+ PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */
1504+ PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */
1505+ PHY_M_EC2_FO_AM_MSK = 7,/* Bit 2.. 0: Fiber Output Amplitude */
1506+};
1507+
1508+/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1509+enum {
1510+ PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */
1511+ PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */
1512+ PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */
1513+ PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */
1514+ PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */
1515+ PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */
1516+ PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */
1517+ /* (88E1111 only) */
1518+ /* Bit 9.. 4: reserved (88E1011 only) */
1519+ PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */
1520+ PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */
1521+ PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */
1522+};
1523+
1524+/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/
1525+enum {
1526+ PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */
1527+ PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */
1528+ /* (88E1111 only) */
1529+ PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */
1530+ PHY_M_CABD_AMPL_MSK = 0x1f<<8,/* Bit 12.. 8: Amplitude Mask */
1531+ /* (88E1111 only) */
1532+ PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */
1533+};
1534+
1535+/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
1536+enum {
1537+ CABD_STAT_NORMAL= 0,
1538+ CABD_STAT_SHORT = 1,
1539+ CABD_STAT_OPEN = 2,
1540+ CABD_STAT_FAIL = 3,
1541+};
1542+
1543+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
1544+/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/
1545+ /* Bit 15..12: reserved (used internally) */
1546+enum {
1547+ PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */
1548+ PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */
1549+ PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */
1550+};
1551+
1552+#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK)
1553+#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK)
1554+#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK)
1555+
1556+enum {
1557+ LED_PAR_CTRL_COLX = 0x00,
1558+ LED_PAR_CTRL_ERROR = 0x01,
1559+ LED_PAR_CTRL_DUPLEX = 0x02,
1560+ LED_PAR_CTRL_DP_COL = 0x03,
1561+ LED_PAR_CTRL_SPEED = 0x04,
1562+ LED_PAR_CTRL_LINK = 0x05,
1563+ LED_PAR_CTRL_TX = 0x06,
1564+ LED_PAR_CTRL_RX = 0x07,
1565+ LED_PAR_CTRL_ACT = 0x08,
1566+ LED_PAR_CTRL_LNK_RX = 0x09,
1567+ LED_PAR_CTRL_LNK_AC = 0x0a,
1568+ LED_PAR_CTRL_ACT_BL = 0x0b,
1569+ LED_PAR_CTRL_TX_BL = 0x0c,
1570+ LED_PAR_CTRL_RX_BL = 0x0d,
1571+ LED_PAR_CTRL_COL_BL = 0x0e,
1572+ LED_PAR_CTRL_INACT = 0x0f
1573+};
1574+
1575+/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/
1576+enum {
1577+ PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */
1578+ PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */
1579+ PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */
1580+};
1581+
1582+
1583+/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/
1584+enum {
1585+ PHY_M_LEDC_LOS_MSK = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
1586+ PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
1587+ PHY_M_LEDC_STA1_MSK = 0xf<<4,/* Bit 7.. 4: STAT1 LED Ctrl. Mask */
1588+ PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */
1589+};
1590+
1591+#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK)
1592+#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK)
1593+#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK)
1594+#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK)
1595+
1596+/* GMAC registers */
1597+/* Port Registers */
1598+enum {
1599+ GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */
1600+ GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */
1601+ GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */
1602+ GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */
1603+ GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */
1604+ GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */
1605+ GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */
1606+/* Source Address Registers */
1607+ GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */
1608+ GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */
1609+ GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */
1610+ GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */
1611+ GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */
1612+ GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */
1613+
1614+/* Multicast Address Hash Registers */
1615+ GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */
1616+ GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */
1617+ GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */
1618+ GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */
1619+
1620+/* Interrupt Source Registers */
1621+ GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */
1622+ GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */
1623+ GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */
1624+
1625+/* Interrupt Mask Registers */
1626+ GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */
1627+ GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */
1628+ GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */
1629+
1630+/* Serial Management Interface (SMI) Registers */
1631+ GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */
1632+ GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */
1633+ GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */
1634+};
1635+
1636+/* MIB Counters */
1637+#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
1638+#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
1639+
1640+/*
1641+ * MIB Counters base address definitions (low word) -
1642+ * use offset 4 for access to high word (32 bit r/o)
1643+ */
1644+enum {
1645+ GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */
1646+ GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */
1647+ GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */
1648+ GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */
1649+ GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */
1650+ /* GM_MIB_CNT_BASE + 40: reserved */
1651+ GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */
1652+ GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */
1653+ GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */
1654+ GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */
1655+ GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */
1656+ GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */
1657+ GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */
1658+ GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */
1659+ GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */
1660+ GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */
1661+ GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */
1662+ GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */
1663+ GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */
1664+ GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */
1665+ GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */
1666+ /* GM_MIB_CNT_BASE + 168: reserved */
1667+ GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */
1668+ /* GM_MIB_CNT_BASE + 184: reserved */
1669+ GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */
1670+ GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */
1671+ GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */
1672+ GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */
1673+ GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */
1674+ GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */
1675+ GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */
1676+ GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */
1677+ GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */
1678+ GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */
1679+ GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */
1680+ GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */
1681+ GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */
1682+
1683+ GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */
1684+ GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */
1685+ GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */
1686+ GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */
1687+ GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */
1688+ GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */
1689+};
1690+
1691+/* GMAC Bit Definitions */
1692+/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
1693+enum {
1694+ GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */
1695+ GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */
1696+ GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */
1697+ GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
1698+ GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
1699+ GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
1700+ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */
1701+ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */
1702+
1703+ GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
1704+ GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
1705+ GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */
1706+ GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
1707+ GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
1708+};
1709+
1710+/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
1711+enum {
1712+ GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
1713+ GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */
1714+ GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */
1715+ GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */
1716+ GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */
1717+ GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */
1718+ GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */
1719+ GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */
1720+ GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */
1721+ GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */
1722+ GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */
1723+ GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */
1724+ GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */
1725+ GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */
1726+ GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */
1727+};
1728+
1729+#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
1730+#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
1731+
1732+/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
1733+enum {
1734+ GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
1735+ GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */
1736+ GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */
1737+ GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */
1738+};
1739+
1740+#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
1741+#define TX_COL_DEF 0x04 /* late collision after 64 byte */
1742+
1743+/* GM_RX_CTRL 16 bit r/w Receive Control Register */
1744+enum {
1745+ GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
1746+ GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */
1747+ GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
1748+ GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
1749+};
1750+
1751+/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
1752+enum {
1753+ GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
1754+ GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */
1755+ GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */
1756+
1757+ TX_JAM_LEN_DEF = 0x03,
1758+ TX_JAM_IPG_DEF = 0x0b,
1759+ TX_IPG_JAM_DEF = 0x1c,
1760+};
1761+
1762+#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK)
1763+#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK)
1764+#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK)
1765+
1766+
1767+/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1768+enum {
1769+ GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1770+ GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */
1771+ GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */
1772+ GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
1773+ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1774+};
1775+
1776+#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
1777+#define DATA_BLIND_DEF 0x04
1778+
1779+#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
1780+#define IPG_DATA_DEF 0x1e
1781+
1782+/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
1783+enum {
1784+ GM_SMI_CT_PHY_A_MSK = 0x1f<<11,/* Bit 15..11: PHY Device Address */
1785+ GM_SMI_CT_REG_A_MSK = 0x1f<<6,/* Bit 10.. 6: PHY Register Address */
1786+ GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/
1787+ GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
1788+ GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
1789+};
1790+
1791+#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
1792+#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
1793+
1794+/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
1795+enum {
1796+ GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
1797+ GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
1798+};
1799+
1800+/* Receive Frame Status Encoding */
1801+enum {
1802+ GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1803+ GMR_FS_LEN_SHIFT = 16,
1804+ GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
1805+ GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
1806+ GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
1807+ GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */
1808+ GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */
1809+ GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */
1810+ GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */
1811+ GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */
1812+ GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */
1813+ GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */
1814+ GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */
1815+
1816+ GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */
1817+ GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */
1818+
1819+/*
1820+ * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
1821+ */
1822+ GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
1823+ GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
1824+ GMR_FS_JABBER,
1825+/* Rx GMAC FIFO Flush Mask (default) */
1826+ RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
1827+ GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE |
1828+ GMR_FS_JABBER,
1829+};
1830+
1831+/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1832+enum {
1833+ GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1834+ GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1835+ GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
1836+
1837+ GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */
1838+ GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */
1839+ GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */
1840+ GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */
1841+ GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */
1842+ GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */
1843+ GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */
1844+ GMF_OPER_ON = 1<<3, /* Operational Mode On */
1845+ GMF_OPER_OFF = 1<<2, /* Operational Mode Off */
1846+ GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */
1847+ GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */
1848+
1849+ RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */
1850+};
1851+
1852+
1853+/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1854+enum {
1855+ GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
1856+ GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
1857+ GMF_WSP_STEP = 1<<16,/* Write Shadow Pointer Step/Increment */
1858+
1859+ GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */
1860+ GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */
1861+ GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */
1862+};
1863+
1864+/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
1865+enum {
1866+ GMT_ST_START = 1<<2, /* Start Time Stamp Timer */
1867+ GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */
1868+ GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */
1869+};
1870+
1871+/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
1872+enum {
1873+ GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
1874+ GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
1875+ GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
1876+ GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */
1877+ GMC_PAUSE_ON = 1<<3, /* Pause On */
1878+ GMC_PAUSE_OFF = 1<<2, /* Pause Off */
1879+ GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */
1880+ GMC_RST_SET = 1<<0, /* Set GMAC Reset */
1881+};
1882+
1883+/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
1884+enum {
1885+ GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
1886+ GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */
1887+ GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */
1888+ GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */
1889+ GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */
1890+ GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */
1891+ GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */
1892+ GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */
1893+ GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */
1894+ GPC_ANEG_0 = 1<<19, /* ANEG[0] */
1895+ GPC_ENA_XC = 1<<18, /* Enable MDI crossover */
1896+ GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */
1897+ GPC_ANEG_3 = 1<<16, /* ANEG[3] */
1898+ GPC_ANEG_2 = 1<<15, /* ANEG[2] */
1899+ GPC_ANEG_1 = 1<<14, /* ANEG[1] */
1900+ GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */
1901+ GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */
1902+ GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */
1903+ GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */
1904+ GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */
1905+ GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */
1906+ /* Bits 7..2: reserved */
1907+ GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */
1908+ GPC_RST_SET = 1<<0, /* Set GPHY Reset */
1909+};
1910+
1911+#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
1912+#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
1913+#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0)
1914+
1915+/* forced speed and duplex mode (don't mix with other ANEG bits) */
1916+#define GPC_FRC10MBIT_HALF 0
1917+#define GPC_FRC10MBIT_FULL GPC_ANEG_0
1918+#define GPC_FRC100MBIT_HALF GPC_ANEG_1
1919+#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1)
1920+
1921+/* auto-negotiation with limited advertised speeds */
1922+/* mix only with master/slave settings (for copper) */
1923+#define GPC_ADV_1000_HALF GPC_ANEG_2
1924+#define GPC_ADV_1000_FULL GPC_ANEG_3
1925+#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3)
1926+
1927+/* master/slave settings */
1928+/* only for copper with 1000 Mbps */
1929+#define GPC_FORCE_MASTER 0
1930+#define GPC_FORCE_SLAVE GPC_ANEG_0
1931+#define GPC_PREF_MASTER GPC_ANEG_1
1932+#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0)
1933+
1934+/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
1935+/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
1936+enum {
1937+ GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */
1938+ GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */
1939+ GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */
1940+ GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */
1941+ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
1942+ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1943+
1944+#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
1945+
1946+/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1947+ /* Bits 15.. 2: reserved */
1948+ GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1949+ GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1950+
1951+
1952+/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1953+ WOL_CTL_LINK_CHG_OCC = 1<<15,
1954+ WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1955+ WOL_CTL_PATTERN_OCC = 1<<13,
1956+ WOL_CTL_CLEAR_RESULT = 1<<12,
1957+ WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11,
1958+ WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10,
1959+ WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9,
1960+ WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8,
1961+ WOL_CTL_ENA_PME_ON_PATTERN = 1<<7,
1962+ WOL_CTL_DIS_PME_ON_PATTERN = 1<<6,
1963+ WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5,
1964+ WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4,
1965+ WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3,
1966+ WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2,
1967+ WOL_CTL_ENA_PATTERN_UNIT = 1<<1,
1968+ WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1969+};
1970+
1971+#define WOL_CTL_DEFAULT \
1972+ (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1973+ WOL_CTL_DIS_PME_ON_PATTERN | \
1974+ WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1975+ WOL_CTL_DIS_LINK_CHG_UNIT | \
1976+ WOL_CTL_DIS_PATTERN_UNIT | \
1977+ WOL_CTL_DIS_MAGIC_PKT_UNIT)
1978+
1979+/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1980+#define WOL_CTL_PATT_ENA(x) (1 << (x))
1981+
1982+
1983+/* XMAC II registers */
1984+enum {
1985+ XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */
1986+ XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */
1987+ XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/
1988+ XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */
1989+ XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */
1990+ XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */
1991+ XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */
1992+ XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */
1993+ XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */
1994+ XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */
1995+ XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */
1996+ XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */
1997+ XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */
1998+ XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */
1999+ XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */
2000+ XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */
2001+ XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */
2002+ XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */
2003+ XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */
2004+ XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */
2005+ XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */
2006+ XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */
2007+ XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */
2008+ XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/
2009+ XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */
2010+
2011+ XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */
2012+#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3))
2013+};
2014+
2015+enum {
2016+ XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */
2017+ XM_SA = 0x0108, /* NA reg r/w Station Address Register */
2018+ XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */
2019+ XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */
2020+ XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */
2021+ XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */
2022+ XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */
2023+ XM_MODE = 0x0124, /* 32 bit r/w Mode Register */
2024+ XM_LSA = 0x0128, /* NA reg r/o Last Source Register */
2025+ XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */
2026+ XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */
2027+ XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */
2028+ XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */
2029+ XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */
2030+ XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */
2031+ XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */
2032+ XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */
2033+ XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/
2034+ XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */
2035+ XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */
2036+ XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */
2037+ XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */
2038+ XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */
2039+ XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */
2040+ XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */
2041+ XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */
2042+ XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */
2043+ XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */
2044+ XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */
2045+ XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */
2046+ XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */
2047+ XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */
2048+ XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */
2049+ XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */
2050+ XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */
2051+ XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */
2052+ XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */
2053+ XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */
2054+ XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */
2055+ XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/
2056+ XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/
2057+ XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */
2058+ XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */
2059+ XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/
2060+ XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */
2061+ XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */
2062+ XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */
2063+ XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */
2064+ XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */
2065+ XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */
2066+ XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/
2067+ XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */
2068+ XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */
2069+ XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */
2070+ XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */
2071+ XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */
2072+ XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */
2073+ XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */
2074+ XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */
2075+ XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */
2076+ XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */
2077+ XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */
2078+ XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */
2079+ XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/
2080+ XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */
2081+ XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */
2082+ XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */
2083+ XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */
2084+ XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */
2085+ XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/
2086+ XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/
2087+};
2088+
2089+/* XM_MMU_CMD 16 bit r/w MMU Command Register */
2090+enum {
2091+ XM_MMU_PHY_RDY = 1<<12,/* Bit 12: PHY Read Ready */
2092+ XM_MMU_PHY_BUSY = 1<<11,/* Bit 11: PHY Busy */
2093+ XM_MMU_IGN_PF = 1<<10,/* Bit 10: Ignore Pause Frame */
2094+ XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */
2095+ XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */
2096+ XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */
2097+ XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */
2098+ XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */
2099+ XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */
2100+ XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */
2101+ XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */
2102+ XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */
2103+};
2104+
2105+
2106+/* XM_TX_CMD 16 bit r/w Transmit Command Register */
2107+enum {
2108+ XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/
2109+ XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */
2110+ XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */
2111+ XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */
2112+ XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */
2113+ XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */
2114+ XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */
2115+};
2116+
2117+/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */
2118+#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */
2119+
2120+
2121+/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */
2122+#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */
2123+
2124+
2125+/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */
2126+#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */
2127+
2128+
2129+/* XM_RX_CMD 16 bit r/w Receive Command Register */
2130+enum {
2131+ XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */
2132+ /* inrange error packets */
2133+ XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */
2134+ /* jumbo packets */
2135+ XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */
2136+ XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */
2137+ XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */
2138+ XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */
2139+ XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */
2140+ XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */
2141+ XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */
2142+};
2143+
2144+
2145+/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
2146+enum {
2147+ XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
2148+ XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */
2149+ XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */
2150+ XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */
2151+ XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */
2152+};
2153+
2154+
2155+/* XM_IMSK 16 bit r/w Interrupt Mask Register */
2156+/* XM_ISRC 16 bit r/o Interrupt Status Register */
2157+enum {
2158+ XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */
2159+ XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */
2160+ XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */
2161+ XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */
2162+ XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */
2163+ XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */
2164+ XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */
2165+ XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */
2166+ XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */
2167+ XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */
2168+ XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */
2169+ XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */
2170+ XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */
2171+ XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */
2172+ XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
2173+};
2174+
2175+#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | \
2176+ XM_IS_AND | XM_IS_RXC_OV | XM_IS_TXC_OV | \
2177+ XM_IS_RXF_OV | XM_IS_TXF_UR))
2178+
2179+
2180+/* XM_HW_CFG 16 bit r/w Hardware Config Register */
2181+enum {
2182+ XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */
2183+ XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/
2184+ XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */
2185+};
2186+
2187+
2188+/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */
2189+/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */
2190+#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */
2191+
2192+/* XM_TX_THR 16 bit r/w Tx Request Threshold */
2193+/* XM_HT_THR 16 bit r/w Host Request Threshold */
2194+/* XM_RX_THR 16 bit r/w Rx Request Threshold */
2195+#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */
2196+
2197+
2198+/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */
2199+enum {
2200+ XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */
2201+ XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */
2202+ XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */
2203+ XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */
2204+ XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */
2205+ XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/
2206+ XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */
2207+ XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */
2208+ XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */
2209+ XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */
2210+ XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occured */
2211+ XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */
2212+ XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */
2213+ XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */
2214+ XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */
2215+};
2216+
2217+/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */
2218+/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */
2219+#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */
2220+
2221+
2222+/* XM_DEV_ID 32 bit r/o Device ID Register */
2223+#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */
2224+#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */
2225+
2226+
2227+/* XM_MODE 32 bit r/w Mode Register */
2228+enum {
2229+ XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */
2230+ XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */
2231+ /* extern generated */
2232+ XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */
2233+ XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */
2234+ /* intern generated */
2235+ XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */
2236+ XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */
2237+ XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */
2238+ XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */
2239+ XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */
2240+ /* intern generated */
2241+ XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */
2242+ /* intern generated */
2243+ XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */
2244+ XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */
2245+ XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */
2246+ XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */
2247+ XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */
2248+ XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */
2249+ XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */
2250+ XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */
2251+ XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */
2252+ XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */
2253+ XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */
2254+ XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */
2255+ XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */
2256+ XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */
2257+ XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */
2258+ XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */
2259+ XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */
2260+};
2261+
2262+#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
2263+#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
2264+ XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
2265+
2266+/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
2267+enum {
2268+ XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */
2269+ XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */
2270+ XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */
2271+ XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */
2272+ XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */
2273+ XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */
2274+};
2275+
2276+
2277+/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */
2278+/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */
2279+enum {
2280+ XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/
2281+ XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/
2282+ XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/
2283+ XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/
2284+ XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */
2285+ XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */
2286+ XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */
2287+ XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */
2288+ XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */
2289+ XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */
2290+ XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/
2291+ XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */
2292+ XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/
2293+ XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */
2294+ XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */
2295+ XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */
2296+ XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */
2297+ XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */
2298+ XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */
2299+ XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */
2300+ XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/
2301+ XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */
2302+ XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */
2303+ XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/
2304+ XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/
2305+ XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */
2306+ XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */
2307+ XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/
2308+ XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/
2309+ XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */
2310+};
2311+
2312+#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV)
2313+
2314+/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */
2315+/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */
2316+enum {
2317+ XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/
2318+ XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/
2319+ XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/
2320+ XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/
2321+ XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */
2322+ XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */
2323+ XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */
2324+ XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */
2325+ XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/
2326+ XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */
2327+ XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */
2328+ XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */
2329+ XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */
2330+ XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/
2331+ XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */
2332+ XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */
2333+ XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/
2334+ XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/
2335+ XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */
2336+ XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */
2337+ XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */
2338+ XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */
2339+ XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */
2340+ XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/
2341+ XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/
2342+ XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */
2343+};
2344+
2345+#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV)
2346+
2347+struct skge_rx_desc {
2348+ u32 control;
2349+ u32 next_offset;
2350+ u32 dma_lo;
2351+ u32 dma_hi;
2352+ u32 status;
2353+ u32 timestamp;
2354+ u16 csum2;
2355+ u16 csum1;
2356+ u16 csum2_start;
2357+ u16 csum1_start;
2358+};
2359+
2360+struct skge_tx_desc {
2361+ u32 control;
2362+ u32 next_offset;
2363+ u32 dma_lo;
2364+ u32 dma_hi;
2365+ u32 status;
2366+ u32 csum_offs;
2367+ u16 csum_write;
2368+ u16 csum_start;
2369+ u32 rsvd;
2370+};
2371+
2372+struct skge_element {
2373+ struct skge_element *next;
2374+ void *desc;
2375+ struct sk_buff *skb;
2376+ DECLARE_PCI_UNMAP_ADDR(mapaddr);
2377+ DECLARE_PCI_UNMAP_LEN(maplen);
2378+};
2379+
2380+struct skge_ring {
2381+ struct skge_element *to_clean;
2382+ struct skge_element *to_use;
2383+ struct skge_element *start;
2384+ unsigned long count;
2385+};
2386+
2387+
2388+struct skge_hw {
2389+ void __iomem *regs;
2390+ struct pci_dev *pdev;
2391+ spinlock_t hw_lock;
2392+ u32 intr_mask;
2393+ struct net_device *dev[2];
2394+
2395+ u8 chip_id;
2396+ u8 chip_rev;
2397+ u8 copper;
2398+ u8 ports;
2399+
2400+ u32 ram_size;
2401+ u32 ram_offset;
2402+ u16 phy_addr;
2403+ struct tasklet_struct phy_task;
2404+ spinlock_t phy_lock;
2405+};
2406+
2407+enum {
2408+ FLOW_MODE_NONE = 0, /* No Flow-Control */
2409+ FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */
2410+ FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
2411+ FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
2412+};
2413+
2414+struct skge_port {
2415+ u32 msg_enable;
2416+ struct skge_hw *hw;
2417+ struct net_device *netdev;
2418+ int port;
2419+
2420+ spinlock_t tx_lock;
2421+ struct skge_ring tx_ring;
2422+ struct skge_ring rx_ring;
2423+
2424+ struct net_device_stats net_stats;
2425+
2426+ u8 rx_csum;
2427+ u8 blink_on;
2428+ u8 flow_control;
2429+ u8 wol;
2430+ u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
2431+ u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
2432+ u16 speed; /* SPEED_1000, SPEED_100, ... */
2433+ u32 advertising;
2434+
2435+ void *mem; /* PCI memory for rings */
2436+ dma_addr_t dma;
2437+ unsigned long mem_size;
2438+ unsigned int rx_buf_size;
2439+};
2440+
2441+
2442+/* Register accessor for memory mapped device */
2443+static inline u32 skge_read32(const struct skge_hw *hw, int reg)
2444+{
2445+ return readl(hw->regs + reg);
2446+}
2447+
2448+static inline u16 skge_read16(const struct skge_hw *hw, int reg)
2449+{
2450+ return readw(hw->regs + reg);
2451+}
2452+
2453+static inline u8 skge_read8(const struct skge_hw *hw, int reg)
2454+{
2455+ return readb(hw->regs + reg);
2456+}
2457+
2458+static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val)
2459+{
2460+ writel(val, hw->regs + reg);
2461+}
2462+
2463+static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val)
2464+{
2465+ writew(val, hw->regs + reg);
2466+}
2467+
2468+static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
2469+{
2470+ writeb(val, hw->regs + reg);
2471+}
2472+
2473+/* MAC Related Registers inside the device. */
2474+#define SK_REG(port,reg) (((port)<<7)+(reg))
2475+#define SK_XMAC_REG(port, reg) \
2476+ ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
2477+
2478+static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
2479+{
2480+ u32 v;
2481+ v = skge_read16(hw, SK_XMAC_REG(port, reg));
2482+ v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
2483+ return v;
2484+}
2485+
2486+static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
2487+{
2488+ return skge_read16(hw, SK_XMAC_REG(port,reg));
2489+}
2490+
2491+static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
2492+{
2493+ skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
2494+ skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
2495+}
2496+
2497+static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
2498+{
2499+ skge_write16(hw, SK_XMAC_REG(port,r), v);
2500+}
2501+
2502+static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
2503+ const u8 *hash)
2504+{
2505+ xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8));
2506+ xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
2507+ xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
2508+ xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
2509+}
2510+
2511+static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
2512+ const u8 *addr)
2513+{
2514+ xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8));
2515+ xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
2516+ xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
2517+}
2518+
2519+#define SK_GMAC_REG(port,reg) \
2520+ (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
2521+
2522+static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
2523+{
2524+ return skge_read16(hw, SK_GMAC_REG(port,reg));
2525+}
2526+
2527+static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
2528+{
2529+ return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
2530+ | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
2531+}
2532+
2533+static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
2534+{
2535+ skge_write16(hw, SK_GMAC_REG(port,r), v);
2536+}
2537+
2538+static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
2539+ const u8 *addr)
2540+{
2541+ gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
2542+ gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
2543+ gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
2544+}
2545+
2546+#endif
--- /dev/null
+++ b/drivers/net/skge_backport.h
@@ -0,0 +1,80 @@
1+/*
2+ * Backport hacks. Redefine world to look as close to current 2.6
3+ * as posssible.
4+ */
5+
6+#include <linux/version.h>
7+
8+#ifndef __iomem
9+#define __iomem
10+#endif
11+
12+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13+#define pci_dma_sync_single_for_device(pdev, addr, len, dir)
14+#define pci_dma_sync_single_for_cpu(pdev, addr, len, dir) \
15+ pci_dma_sync_single(pdev, addr, len, dir)
16+
17+#else
18+#include <linux/dma-mapping.h>
19+#endif
20+
21+#ifndef DMA_32BIT_MASK
22+#define DMA_64BIT_MASK 0xffffffffffffffffULL
23+#define DMA_32BIT_MASK 0x00000000ffffffffULL
24+#endif
25+
26+#ifndef module_param
27+#define module_param(var, type, perm) \
28+ MODULE_PARM(var, "i");
29+#endif
30+
31+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
32+static unsigned long msleep_interruptible(unsigned int msecs)
33+{
34+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
35+
36+ __set_current_state(TASK_INTERRUPTIBLE);
37+ while (timeout && !signal_pending(current))
38+ timeout = schedule_timeout(timeout);
39+ return jiffies_to_msecs(timeout);
40+}
41+#endif
42+
43+#ifndef PCI_DEVICE
44+#define PCI_DEVICE(vend,dev) \
45+ .vendor = (vend), .device = (dev), \
46+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
47+#endif
48+
49+#ifndef HAVE_NETDEV_PRIV
50+#define netdev_priv(dev) ((dev)->priv)
51+#endif
52+
53+#ifndef ALIGN
54+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
55+#endif
56+
57+#ifndef NET_IP_ALIGN
58+#define NET_IP_ALIGN 2
59+#endif
60+
61+#ifndef NETDEV_TX_OK
62+#define NETDEV_TX_OK 0 /* driver took care of packet */
63+#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
64+#endif
65+
66+#ifndef IRQ_NONE
67+#define irqreturn_t void
68+#define IRQ_NONE
69+#define IRQ_HANDLED
70+#endif
71+
72+#ifndef PCI_D0
73+#define PCI_D0 0
74+#define PCI_D1 1
75+#define PCI_D2 2
76+#define PCI_D3hot 3
77+#define PCI_D3cold 4
78+typedef int pci_power_t;
79+#endif
80+
--- /dev/null
+++ b/drivers/net/sky2.c
@@ -0,0 +1,3250 @@
1+/*
2+ * New driver for Marvell Yukon 2 chipset.
3+ * Based on earlier sk98lin, and skge driver.
4+ *
5+ * This driver intentionally does not support all the features
6+ * of the original driver such as link fail-over and link management because
7+ * those should be done at higher levels.
8+ *
9+ * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
10+ *
11+ * This program is free software; you can redistribute it and/or modify
12+ * it under the terms of the GNU General Public License as published by
13+ * the Free Software Foundation; either version 2 of the License
14+ *
15+ * This program is distributed in the hope that it will be useful,
16+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
17+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18+ * GNU General Public License for more details.
19+ *
20+ * You should have received a copy of the GNU General Public License
21+ * along with this program; if not, write to the Free Software
22+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23+ */
24+
25+#include <linux/crc32.h>
26+#include <linux/kernel.h>
27+#include <linux/version.h>
28+#include <linux/module.h>
29+#include <linux/netdevice.h>
30+#include <linux/etherdevice.h>
31+#include <linux/ethtool.h>
32+#include <linux/pci.h>
33+#include <linux/ip.h>
34+#include <linux/tcp.h>
35+#include <linux/in.h>
36+#include <linux/init.h>
37+#include <linux/delay.h>
38+#include <linux/if_vlan.h>
39+#include <linux/prefetch.h>
40+#include <linux/mii.h>
41+
42+#include <asm/bitops.h>
43+#include <asm/byteorder.h>
44+#include <asm/io.h>
45+#include <asm/irq.h>
46+
47+#include "skge_backport.h"
48+#include "sky2.h"
49+
50+#define DRV_NAME "sky2"
51+#define DRV_VERSION "1.5 classic"
52+#define PFX DRV_NAME " "
53+
54+/*
55+ * The Yukon II chipset takes 64 bit command blocks (called list elements)
56+ * that are organized into three (receive, transmit, status) different rings
57+ * similar to Tigon3. A transmit can require several elements;
58+ * a receive requires one (or two if using 64 bit dma).
59+ */
60+
61+#define RX_LE_SIZE 512
62+#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
63+#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
64+#define RX_DEF_PENDING RX_MAX_PENDING
65+#define RX_SKB_ALIGN 8
66+#define RX_BUF_WRITE 16
67+
68+#define TX_RING_SIZE 512
69+#define TX_DEF_PENDING (TX_RING_SIZE - 1)
70+#define TX_MIN_PENDING 64
71+#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
72+
73+#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
74+#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
75+#define ETH_JUMBO_MTU 9000
76+#define TX_WATCHDOG (5 * HZ)
77+
78+#define PHY_RETRIES 1000
79+
80+#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
81+
82+static const u32 default_msg =
83+ NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
84+ | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
85+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
86+
87+static int debug = -1; /* defaults above */
88+module_param(debug, int, 0);
89+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
90+
91+static int copybreak = 256;
92+module_param(copybreak, int, 0);
93+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
94+
95+static const struct pci_device_id sky2_id_table[] = {
96+ { PCI_DEVICE(0x1148, 0x9000) },
97+ { PCI_DEVICE(0x1148, 0x9E00) },
98+ { PCI_DEVICE(0x1186, 0x4b00) }, /* DGE-560T */
99+ { PCI_DEVICE(0x11ab, 0x4340) },
100+ { PCI_DEVICE(0x11ab, 0x4341) },
101+ { PCI_DEVICE(0x11ab, 0x4342) },
102+ { PCI_DEVICE(0x11ab, 0x4343) },
103+ { PCI_DEVICE(0x11ab, 0x4344) },
104+ { PCI_DEVICE(0x11ab, 0x4345) },
105+ { PCI_DEVICE(0x11ab, 0x4346) },
106+ { PCI_DEVICE(0x11ab, 0x4347) },
107+ { PCI_DEVICE(0x11ab, 0x4350) },
108+ { PCI_DEVICE(0x11ab, 0x4351) },
109+ { PCI_DEVICE(0x11ab, 0x4352) },
110+ { PCI_DEVICE(0x11ab, 0x4360) },
111+ { PCI_DEVICE(0x11ab, 0x4361) },
112+ { PCI_DEVICE(0x11ab, 0x4362) },
113+ { PCI_DEVICE(0x11ab, 0x4363) },
114+ { PCI_DEVICE(0x11ab, 0x4364) },
115+ { PCI_DEVICE(0x11ab, 0x4365) },
116+ { PCI_DEVICE(0x11ab, 0x4366) },
117+ { PCI_DEVICE(0x11ab, 0x4367) },
118+ { PCI_DEVICE(0x11ab, 0x4368) },
119+ { 0 }
120+};
121+
122+MODULE_DEVICE_TABLE(pci, sky2_id_table);
123+
124+/* Avoid conditionals by using array */
125+static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
126+static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
127+static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
128+
129+/* This driver supports yukon2 chipset only */
130+static const char *yukon2_name[] = {
131+ "XL", /* 0xb3 */
132+ "EC Ultra", /* 0xb4 */
133+ "UNKNOWN", /* 0xb5 */
134+ "EC", /* 0xb6 */
135+ "FE", /* 0xb7 */
136+};
137+
138+/* Access to external PHY */
139+static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
140+{
141+ int i;
142+
143+ gma_write16(hw, port, GM_SMI_DATA, val);
144+ gma_write16(hw, port, GM_SMI_CTRL,
145+ GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
146+
147+ for (i = 0; i < PHY_RETRIES; i++) {
148+ if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
149+ return 0;
150+ udelay(1);
151+ }
152+
153+ printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name);
154+ return -ETIMEDOUT;
155+}
156+
157+static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
158+{
159+ int i;
160+
161+ gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
162+ | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
163+
164+ for (i = 0; i < PHY_RETRIES; i++) {
165+ if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) {
166+ *val = gma_read16(hw, port, GM_SMI_DATA);
167+ return 0;
168+ }
169+
170+ udelay(1);
171+ }
172+
173+ return -ETIMEDOUT;
174+}
175+
176+static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
177+{
178+ u16 v;
179+
180+ if (__gm_phy_read(hw, port, reg, &v) != 0)
181+ printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name);
182+ return v;
183+}
184+
185+static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
186+{
187+ u16 power_control;
188+ u32 reg1;
189+ int vaux;
190+
191+ pr_debug("sky2_set_power_state %d\n", state);
192+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
193+
194+ power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
195+ vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
196+ (power_control & PCI_PM_CAP_PME_D3cold);
197+
198+ power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
199+
200+ power_control |= PCI_PM_CTRL_PME_STATUS;
201+ power_control &= ~(PCI_PM_CTRL_STATE_MASK);
202+
203+ switch (state) {
204+ case PCI_D0:
205+ /* switch power to VCC (WA for VAUX problem) */
206+ sky2_write8(hw, B0_POWER_CTRL,
207+ PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
208+
209+ /* disable Core Clock Division, */
210+ sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
211+
212+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
213+ /* enable bits are inverted */
214+ sky2_write8(hw, B2_Y2_CLK_GATE,
215+ Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
216+ Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
217+ Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
218+ else
219+ sky2_write8(hw, B2_Y2_CLK_GATE, 0);
220+
221+ /* Turn off phy power saving */
222+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
223+ reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
224+
225+ /* looks like this XL is back asswards .. */
226+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
227+ reg1 |= PCI_Y2_PHY1_COMA;
228+ if (hw->ports > 1)
229+ reg1 |= PCI_Y2_PHY2_COMA;
230+ }
231+
232+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
233+ sky2_pci_write32(hw, PCI_DEV_REG3, 0);
234+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
235+ reg1 &= P_ASPM_CONTROL_MSK;
236+ sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
237+ sky2_pci_write32(hw, PCI_DEV_REG5, 0);
238+ }
239+
240+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
241+ udelay(100);
242+
243+ break;
244+
245+ case PCI_D3hot:
246+ case PCI_D3cold:
247+ /* Turn on phy power saving */
248+ reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
249+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
250+ reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
251+ else
252+ reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
253+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
254+ udelay(100);
255+
256+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
257+ sky2_write8(hw, B2_Y2_CLK_GATE, 0);
258+ else
259+ /* enable bits are inverted */
260+ sky2_write8(hw, B2_Y2_CLK_GATE,
261+ Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
262+ Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
263+ Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
264+
265+ /* switch power to VAUX */
266+ if (vaux && state != PCI_D3cold)
267+ sky2_write8(hw, B0_POWER_CTRL,
268+ (PC_VAUX_ENA | PC_VCC_ENA |
269+ PC_VAUX_ON | PC_VCC_OFF));
270+ break;
271+ default:
272+ printk(KERN_ERR PFX "Unknown power state %d\n", state);
273+ }
274+
275+ sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
276+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
277+}
278+
279+static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
280+{
281+ u16 reg;
282+
283+ /* disable all GMAC IRQ's */
284+ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
285+ /* disable PHY IRQs */
286+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
287+
288+ gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
289+ gma_write16(hw, port, GM_MC_ADDR_H2, 0);
290+ gma_write16(hw, port, GM_MC_ADDR_H3, 0);
291+ gma_write16(hw, port, GM_MC_ADDR_H4, 0);
292+
293+ reg = gma_read16(hw, port, GM_RX_CTRL);
294+ reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
295+ gma_write16(hw, port, GM_RX_CTRL, reg);
296+}
297+
298+static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
299+{
300+ struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
301+ u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
302+
303+ if (sky2->autoneg == AUTONEG_ENABLE &&
304+ !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
305+ u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
306+
307+ ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
308+ PHY_M_EC_MAC_S_MSK);
309+ ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
310+
311+ if (hw->chip_id == CHIP_ID_YUKON_EC)
312+ ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
313+ else
314+ ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
315+
316+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
317+ }
318+
319+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
320+ if (hw->copper) {
321+ if (hw->chip_id == CHIP_ID_YUKON_FE) {
322+ /* enable automatic crossover */
323+ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
324+ } else {
325+ /* disable energy detect */
326+ ctrl &= ~PHY_M_PC_EN_DET_MSK;
327+
328+ /* enable automatic crossover */
329+ ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
330+
331+ if (sky2->autoneg == AUTONEG_ENABLE &&
332+ (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
333+ ctrl &= ~PHY_M_PC_DSC_MSK;
334+ ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
335+ }
336+ }
337+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
338+ } else {
339+ /* workaround for deviation #4.88 (CRC errors) */
340+ /* disable Automatic Crossover */
341+
342+ ctrl &= ~PHY_M_PC_MDIX_MSK;
343+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
344+
345+ if (hw->chip_id == CHIP_ID_YUKON_XL) {
346+ /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
347+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
348+ ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
349+ ctrl &= ~PHY_M_MAC_MD_MSK;
350+ ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
351+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
352+
353+ /* select page 1 to access Fiber registers */
354+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
355+ }
356+ }
357+
358+ ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
359+ if (sky2->autoneg == AUTONEG_DISABLE)
360+ ctrl &= ~PHY_CT_ANE;
361+ else
362+ ctrl |= PHY_CT_ANE;
363+
364+ ctrl |= PHY_CT_RESET;
365+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
366+
367+ ctrl = 0;
368+ ct1000 = 0;
369+ adv = PHY_AN_CSMA;
370+
371+ if (sky2->autoneg == AUTONEG_ENABLE) {
372+ if (hw->copper) {
373+ if (sky2->advertising & ADVERTISED_1000baseT_Full)
374+ ct1000 |= PHY_M_1000C_AFD;
375+ if (sky2->advertising & ADVERTISED_1000baseT_Half)
376+ ct1000 |= PHY_M_1000C_AHD;
377+ if (sky2->advertising & ADVERTISED_100baseT_Full)
378+ adv |= PHY_M_AN_100_FD;
379+ if (sky2->advertising & ADVERTISED_100baseT_Half)
380+ adv |= PHY_M_AN_100_HD;
381+ if (sky2->advertising & ADVERTISED_10baseT_Full)
382+ adv |= PHY_M_AN_10_FD;
383+ if (sky2->advertising & ADVERTISED_10baseT_Half)
384+ adv |= PHY_M_AN_10_HD;
385+ } else /* special defines for FIBER (88E1011S only) */
386+ adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
387+
388+ /* Set Flow-control capabilities */
389+ if (sky2->tx_pause && sky2->rx_pause)
390+ adv |= PHY_AN_PAUSE_CAP; /* symmetric */
391+ else if (sky2->rx_pause && !sky2->tx_pause)
392+ adv |= PHY_AN_PAUSE_ASYM | PHY_AN_PAUSE_CAP;
393+ else if (!sky2->rx_pause && sky2->tx_pause)
394+ adv |= PHY_AN_PAUSE_ASYM; /* local */
395+
396+ /* Restart Auto-negotiation */
397+ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
398+ } else {
399+ /* forced speed/duplex settings */
400+ ct1000 = PHY_M_1000C_MSE;
401+
402+ if (sky2->duplex == DUPLEX_FULL)
403+ ctrl |= PHY_CT_DUP_MD;
404+
405+ switch (sky2->speed) {
406+ case SPEED_1000:
407+ ctrl |= PHY_CT_SP1000;
408+ break;
409+ case SPEED_100:
410+ ctrl |= PHY_CT_SP100;
411+ break;
412+ }
413+
414+ ctrl |= PHY_CT_RESET;
415+ }
416+
417+ if (hw->chip_id != CHIP_ID_YUKON_FE)
418+ gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
419+
420+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
421+ gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
422+
423+ /* Setup Phy LED's */
424+ ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
425+ ledover = 0;
426+
427+ switch (hw->chip_id) {
428+ case CHIP_ID_YUKON_FE:
429+ /* on 88E3082 these bits are at 11..9 (shifted left) */
430+ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
431+
432+ ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
433+
434+ /* delete ACT LED control bits */
435+ ctrl &= ~PHY_M_FELP_LED1_MSK;
436+ /* change ACT LED control to blink mode */
437+ ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
438+ gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
439+ break;
440+
441+ case CHIP_ID_YUKON_XL:
442+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
443+
444+ /* select page 3 to access LED control register */
445+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
446+
447+ /* set LED Function Control register */
448+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
449+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
450+ PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
451+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
452+ PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
453+
454+ /* set Polarity Control register */
455+ gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
456+ (PHY_M_POLC_LS1_P_MIX(4) |
457+ PHY_M_POLC_IS0_P_MIX(4) |
458+ PHY_M_POLC_LOS_CTRL(2) |
459+ PHY_M_POLC_INIT_CTRL(2) |
460+ PHY_M_POLC_STA1_CTRL(2) |
461+ PHY_M_POLC_STA0_CTRL(2)));
462+
463+ /* restore page register */
464+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
465+ break;
466+ case CHIP_ID_YUKON_EC_U:
467+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
468+
469+ /* select page 3 to access LED control register */
470+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
471+
472+ /* set LED Function Control register */
473+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
474+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
475+ PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
476+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
477+ PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
478+
479+ /* set Blink Rate in LED Timer Control Register */
480+ gm_phy_write(hw, port, PHY_MARV_INT_MASK,
481+ ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
482+ /* restore page register */
483+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
484+ break;
485+
486+ default:
487+ /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
488+ ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
489+ /* turn off the Rx LED (LED_RX) */
490+ ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
491+ }
492+
493+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
494+ /* apply fixes in PHY AFE */
495+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
496+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
497+
498+ /* increase differential signal amplitude in 10BASE-T */
499+ gm_phy_write(hw, port, 0x18, 0xaa99);
500+ gm_phy_write(hw, port, 0x17, 0x2011);
501+
502+ /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
503+ gm_phy_write(hw, port, 0x18, 0xa204);
504+ gm_phy_write(hw, port, 0x17, 0x2002);
505+
506+ /* set page register to 0 */
507+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
508+ } else {
509+ gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
510+
511+ if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
512+ /* turn on 100 Mbps LED (LED_LINK100) */
513+ ledover |= PHY_M_LED_MO_100(MO_LED_ON);
514+ }
515+
516+ if (ledover)
517+ gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
518+
519+ }
520+ /* Enable phy interrupt on auto-negotiation complete (or link up) */
521+ if (sky2->autoneg == AUTONEG_ENABLE)
522+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
523+ else
524+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
525+}
526+
527+/* Force a renegotiation */
528+static void sky2_phy_reinit(struct sky2_port *sky2)
529+{
530+ spin_lock_bh(&sky2->phy_lock);
531+ sky2_phy_init(sky2->hw, sky2->port);
532+ spin_unlock_bh(&sky2->phy_lock);
533+}
534+
535+static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
536+{
537+ struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
538+ u16 reg;
539+ int i;
540+ const u8 *addr = hw->dev[port]->dev_addr;
541+
542+ sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
543+ sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR|GPC_ENA_PAUSE);
544+
545+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
546+
547+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
548+ /* WA DEV_472 -- looks like crossed wires on port 2 */
549+ /* clear GMAC 1 Control reset */
550+ sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
551+ do {
552+ sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
553+ sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
554+ } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
555+ gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
556+ gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
557+ }
558+
559+ if (sky2->autoneg == AUTONEG_DISABLE) {
560+ reg = gma_read16(hw, port, GM_GP_CTRL);
561+ reg |= GM_GPCR_AU_ALL_DIS;
562+ gma_write16(hw, port, GM_GP_CTRL, reg);
563+ gma_read16(hw, port, GM_GP_CTRL);
564+
565+ switch (sky2->speed) {
566+ case SPEED_1000:
567+ reg &= ~GM_GPCR_SPEED_100;
568+ reg |= GM_GPCR_SPEED_1000;
569+ break;
570+ case SPEED_100:
571+ reg &= ~GM_GPCR_SPEED_1000;
572+ reg |= GM_GPCR_SPEED_100;
573+ break;
574+ case SPEED_10:
575+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
576+ break;
577+ }
578+
579+ if (sky2->duplex == DUPLEX_FULL)
580+ reg |= GM_GPCR_DUP_FULL;
581+
582+ /* turn off pause in 10/100mbps half duplex */
583+ else if (sky2->speed != SPEED_1000 &&
584+ hw->chip_id != CHIP_ID_YUKON_EC_U)
585+ sky2->tx_pause = sky2->rx_pause = 0;
586+ } else
587+ reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
588+
589+ if (!sky2->tx_pause && !sky2->rx_pause) {
590+ sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
591+ reg |=
592+ GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
593+ } else if (sky2->tx_pause && !sky2->rx_pause) {
594+ /* disable Rx flow-control */
595+ reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
596+ }
597+
598+ gma_write16(hw, port, GM_GP_CTRL, reg);
599+
600+ sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
601+
602+ spin_lock_bh(&sky2->phy_lock);
603+ sky2_phy_init(hw, port);
604+ spin_unlock_bh(&sky2->phy_lock);
605+
606+ /* MIB clear */
607+ reg = gma_read16(hw, port, GM_PHY_ADDR);
608+ gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
609+
610+ for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
611+ gma_read16(hw, port, i);
612+ gma_write16(hw, port, GM_PHY_ADDR, reg);
613+
614+ /* transmit control */
615+ gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
616+
617+ /* receive control reg: unicast + multicast + no FCS */
618+ gma_write16(hw, port, GM_RX_CTRL,
619+ GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
620+
621+ /* transmit flow control */
622+ gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
623+
624+ /* transmit parameter */
625+ gma_write16(hw, port, GM_TX_PARAM,
626+ TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
627+ TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
628+ TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
629+ TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
630+
631+ /* serial mode register */
632+ reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
633+ GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
634+
635+ if (hw->dev[port]->mtu > ETH_DATA_LEN)
636+ reg |= GM_SMOD_JUMBO_ENA;
637+
638+ gma_write16(hw, port, GM_SERIAL_MODE, reg);
639+
640+ /* virtual address for data */
641+ gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
642+
643+ /* physical address: used for pause frames */
644+ gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
645+
646+ /* ignore counter overflows */
647+ gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
648+ gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
649+ gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
650+
651+ /* Configure Rx MAC FIFO */
652+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
653+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
654+ GMF_OPER_ON | GMF_RX_F_FL_ON);
655+
656+ /* Flush Rx MAC FIFO on any flow control or error */
657+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
658+
659+ /* Set threshold to 0xa (64 bytes)
660+ * ASF disabled so no need to do WA dev #4.30
661+ */
662+ sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
663+
664+ /* Configure Tx MAC FIFO */
665+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
666+ sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
667+
668+ if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
669+ sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
670+ sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
671+ if (hw->dev[port]->mtu > ETH_DATA_LEN) {
672+ /* set Tx GMAC FIFO Almost Empty Threshold */
673+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
674+ /* Disable Store & Forward mode for TX */
675+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
676+ }
677+ }
678+
679+}
680+
681+/* Assign Ram Buffer allocation.
682+ * start and end are in units of 4k bytes
683+ * ram registers are in units of 64bit words
684+ */
685+static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
686+{
687+ u32 start, end;
688+
689+ start = startk * 4096/8;
690+ end = (endk * 4096/8) - 1;
691+
692+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
693+ sky2_write32(hw, RB_ADDR(q, RB_START), start);
694+ sky2_write32(hw, RB_ADDR(q, RB_END), end);
695+ sky2_write32(hw, RB_ADDR(q, RB_WP), start);
696+ sky2_write32(hw, RB_ADDR(q, RB_RP), start);
697+
698+ if (q == Q_R1 || q == Q_R2) {
699+ u32 space = (endk - startk) * 4096/8;
700+ u32 tp = space - space/4;
701+
702+ /* On receive queue's set the thresholds
703+ * give receiver priority when > 3/4 full
704+ * send pause when down to 2K
705+ */
706+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
707+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
708+
709+ tp = space - 2048/8;
710+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
711+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
712+ } else {
713+ /* Enable store & forward on Tx queue's because
714+ * Tx FIFO is only 1K on Yukon
715+ */
716+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
717+ }
718+
719+ sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
720+ sky2_read8(hw, RB_ADDR(q, RB_CTRL));
721+}
722+
723+/* Setup Bus Memory Interface */
724+static void sky2_qset(struct sky2_hw *hw, u16 q)
725+{
726+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
727+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
728+ sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
729+ sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT);
730+}
731+
732+/* Setup prefetch unit registers. This is the interface between
733+ * hardware and driver list elements
734+ */
735+static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
736+ u64 addr, u32 last)
737+{
738+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
739+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
740+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
741+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
742+ sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
743+ sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
744+
745+ sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
746+}
747+
748+static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
749+{
750+ struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
751+
752+ sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
753+ return le;
754+}
755+
756+/* Update chip's next pointer */
757+static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
758+{
759+ wmb();
760+ sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
761+ sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX));
762+}
763+
764+
765+static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
766+{
767+ struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
768+ sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
769+ return le;
770+}
771+
772+/* Return high part of DMA address (could be 32 or 64 bit) */
773+static inline u32 high32(dma_addr_t a)
774+{
775+ return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
776+}
777+
778+/* Build description to hardware about buffer */
779+static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
780+{
781+ struct sky2_rx_le *le;
782+ u32 hi = high32(map);
783+ u16 len = sky2->rx_bufsize;
784+
785+ if (sky2->rx_addr64 != hi) {
786+ le = sky2_next_rx(sky2);
787+ le->addr = cpu_to_le32(hi);
788+ le->ctrl = 0;
789+ le->opcode = OP_ADDR64 | HW_OWNER;
790+ sky2->rx_addr64 = high32(map + len);
791+ }
792+
793+ le = sky2_next_rx(sky2);
794+ le->addr = cpu_to_le32((u32) map);
795+ le->length = cpu_to_le16(len);
796+ le->ctrl = 0;
797+ le->opcode = OP_PACKET | HW_OWNER;
798+}
799+
800+
801+/* Tell chip where to start receive checksum.
802+ * Actually has two checksums, but set both same to avoid possible byte
803+ * order problems.
804+ */
805+static void rx_set_checksum(struct sky2_port *sky2)
806+{
807+ struct sky2_rx_le *le;
808+
809+ le = sky2_next_rx(sky2);
810+ le->addr = (ETH_HLEN << 16) | ETH_HLEN;
811+ le->ctrl = 0;
812+ le->opcode = OP_TCPSTART | HW_OWNER;
813+
814+ sky2_write32(sky2->hw,
815+ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
816+ sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
817+
818+}
819+
820+/*
821+ * The RX Stop command will not work for Yukon-2 if the BMU does not
822+ * reach the end of packet and since we can't make sure that we have
823+ * incoming data, we must reset the BMU while it is not doing a DMA
824+ * transfer. Since it is possible that the RX path is still active,
825+ * the RX RAM buffer will be stopped first, so any possible incoming
826+ * data will not trigger a DMA. After the RAM buffer is stopped, the
827+ * BMU is polled until any DMA in progress is ended and only then it
828+ * will be reset.
829+ */
830+static void sky2_rx_stop(struct sky2_port *sky2)
831+{
832+ struct sky2_hw *hw = sky2->hw;
833+ unsigned rxq = rxqaddr[sky2->port];
834+ int i;
835+
836+ /* disable the RAM Buffer receive queue */
837+ sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
838+
839+ for (i = 0; i < 0xffff; i++)
840+ if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
841+ == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
842+ goto stopped;
843+
844+ printk(KERN_WARNING PFX "%s: receiver stop failed\n",
845+ sky2->netdev->name);
846+stopped:
847+ sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
848+
849+ /* reset the Rx prefetch unit */
850+ sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
851+}
852+
853+/* Clean out receive buffer area, assumes receiver hardware stopped */
854+static void sky2_rx_clean(struct sky2_port *sky2)
855+{
856+ unsigned i;
857+
858+ memset(sky2->rx_le, 0, RX_LE_BYTES);
859+ for (i = 0; i < sky2->rx_pending; i++) {
860+ struct ring_info *re = sky2->rx_ring + i;
861+
862+ if (re->skb) {
863+ pci_unmap_single(sky2->hw->pdev,
864+ re->mapaddr, sky2->rx_bufsize,
865+ PCI_DMA_FROMDEVICE);
866+ kfree_skb(re->skb);
867+ re->skb = NULL;
868+ }
869+ }
870+}
871+
872+/* Basic MII support */
873+static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
874+{
875+ struct mii_ioctl_data *data;
876+ struct sky2_port *sky2 = netdev_priv(dev);
877+ struct sky2_hw *hw = sky2->hw;
878+ int err = -EOPNOTSUPP;
879+
880+ if (!netif_running(dev))
881+ return -ENODEV; /* Phy still in reset */
882+
883+ data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
884+ switch (cmd) {
885+ case SIOCGMIIPHY:
886+ data->phy_id = PHY_ADDR_MARV;
887+
888+ /* fallthru */
889+ case SIOCGMIIREG: {
890+ u16 val = 0;
891+
892+ spin_lock_bh(&sky2->phy_lock);
893+ err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
894+ spin_unlock_bh(&sky2->phy_lock);
895+
896+ data->val_out = val;
897+ break;
898+ }
899+
900+ case SIOCSMIIREG:
901+ if (!capable(CAP_NET_ADMIN))
902+ return -EPERM;
903+
904+ spin_lock_bh(&sky2->phy_lock);
905+ err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
906+ data->val_in);
907+ spin_unlock_bh(&sky2->phy_lock);
908+ break;
909+ }
910+ return err;
911+}
912+
913+
914+/*
915+ * It appears the hardware has a bug in the FIFO logic that
916+ * cause it to hang if the FIFO gets overrun and the receive buffer
917+ * is not aligned. Also alloc_skb() won't align properly if slab
918+ * debugging is enabled.
919+ */
920+static inline struct sk_buff *sky2_alloc_skb(unsigned int size)
921+{
922+ struct sk_buff *skb;
923+
924+ skb = alloc_skb(size + RX_SKB_ALIGN, GFP_ATOMIC);
925+ if (likely(skb != NULL)) {
926+ unsigned long p = (unsigned long) skb->data;
927+ skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
928+ }
929+
930+ return skb;
931+}
932+
933+/*
934+ * Allocate and setup receiver buffer pool.
935+ * In case of 64 bit dma, there are 2X as many list elements
936+ * available as ring entries
937+ * and need to reserve one list element so we don't wrap around.
938+ */
939+static int sky2_rx_start(struct sky2_port *sky2)
940+{
941+ struct sky2_hw *hw = sky2->hw;
942+ unsigned rxq = rxqaddr[sky2->port];
943+ int i;
944+ unsigned thresh;
945+
946+ sky2->rx_put = sky2->rx_next = 0;
947+ sky2_qset(hw, rxq);
948+
949+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
950+ /* MAC Rx RAM Read is controlled by hardware */
951+ sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
952+ }
953+
954+ sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
955+
956+ rx_set_checksum(sky2);
957+ for (i = 0; i < sky2->rx_pending; i++) {
958+ struct ring_info *re = sky2->rx_ring + i;
959+
960+ re->skb = sky2_alloc_skb(sky2->rx_bufsize);
961+ if (!re->skb)
962+ goto nomem;
963+
964+ re->mapaddr = pci_map_single(hw->pdev, re->skb->data,
965+ sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
966+ sky2_rx_add(sky2, re->mapaddr);
967+ }
968+
969+
970+ /*
971+ * The receiver hangs if it receives frames larger than the
972+ * packet buffer. As a workaround, truncate oversize frames, but
973+ * the register is limited to 9 bits, so if you do frames > 2052
974+ * you better get the MTU right!
975+ */
976+ thresh = (sky2->rx_bufsize - 8) / sizeof(u32);
977+ if (thresh > 0x1ff)
978+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
979+ else {
980+ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
981+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
982+ }
983+
984+
985+ /* Tell chip about available buffers */
986+ sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
987+ return 0;
988+nomem:
989+ sky2_rx_clean(sky2);
990+ return -ENOMEM;
991+}
992+
993+/* Bring up network interface. */
994+static int sky2_up(struct net_device *dev)
995+{
996+ struct sky2_port *sky2 = netdev_priv(dev);
997+ struct sky2_hw *hw = sky2->hw;
998+ unsigned port = sky2->port;
999+ u32 ramsize, rxspace, imask;
1000+ int cap, err = -ENOMEM;
1001+ struct net_device *otherdev = hw->dev[sky2->port^1];
1002+
1003+ /*
1004+ * On dual port PCI-X card, there is an problem where status
1005+ * can be received out of order due to split transactions
1006+ */
1007+ if (otherdev && netif_running(otherdev) &&
1008+ (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1009+ struct sky2_port *osky2 = netdev_priv(otherdev);
1010+ u16 cmd;
1011+
1012+ cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1013+ cmd &= ~PCI_X_CMD_MAX_SPLIT;
1014+ sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1015+
1016+ sky2->rx_csum = 0;
1017+ osky2->rx_csum = 0;
1018+ }
1019+
1020+ if (netif_msg_ifup(sky2))
1021+ printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1022+
1023+ /* must be power of 2 */
1024+ sky2->tx_le = pci_alloc_consistent(hw->pdev,
1025+ TX_RING_SIZE *
1026+ sizeof(struct sky2_tx_le),
1027+ &sky2->tx_le_map);
1028+ if (!sky2->tx_le)
1029+ goto err_out;
1030+
1031+ sky2->tx_ring = kmalloc(TX_RING_SIZE * sizeof(struct tx_ring_info),
1032+ GFP_KERNEL);
1033+ if (!sky2->tx_ring)
1034+ goto err_out;
1035+ memset(sky2->tx_ring, 0, TX_RING_SIZE * sizeof(struct tx_ring_info));
1036+ sky2->tx_prod = sky2->tx_cons = 0;
1037+
1038+ sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1039+ &sky2->rx_le_map);
1040+ if (!sky2->rx_le)
1041+ goto err_out;
1042+ memset(sky2->rx_le, 0, RX_LE_BYTES);
1043+
1044+ sky2->rx_ring = kmalloc(sky2->rx_pending * sizeof(struct ring_info),
1045+ GFP_KERNEL);
1046+ if (!sky2->rx_ring)
1047+ goto err_out;
1048+
1049+ memset(sky2->rx_ring, 0, sky2->rx_pending * sizeof(struct ring_info));
1050+ sky2_mac_init(hw, port);
1051+
1052+ /* Determine available ram buffer space (in 4K blocks).
1053+ * Note: not sure about the FE setting below yet
1054+ */
1055+ if (hw->chip_id == CHIP_ID_YUKON_FE)
1056+ ramsize = 4;
1057+ else
1058+ ramsize = sky2_read8(hw, B2_E_0);
1059+
1060+ /* Give transmitter one third (rounded up) */
1061+ rxspace = ramsize - (ramsize + 2) / 3;
1062+
1063+ sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1064+ sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
1065+
1066+ /* Make sure SyncQ is disabled */
1067+ sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1068+ RB_RST_SET);
1069+
1070+ sky2_qset(hw, txqaddr[port]);
1071+
1072+ /* Set almost empty threshold */
1073+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
1074+ sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1075+
1076+ sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1077+ TX_RING_SIZE - 1);
1078+
1079+ err = sky2_rx_start(sky2);
1080+ if (err)
1081+ goto err_out;
1082+
1083+ /* Enable interrupts from phy/mac for port */
1084+ imask = sky2_read32(hw, B0_IMSK);
1085+ imask |= portirq_msk[port];
1086+ sky2_write32(hw, B0_IMSK, imask);
1087+
1088+ return 0;
1089+
1090+err_out:
1091+ if (sky2->rx_le) {
1092+ pci_free_consistent(hw->pdev, RX_LE_BYTES,
1093+ sky2->rx_le, sky2->rx_le_map);
1094+ sky2->rx_le = NULL;
1095+ }
1096+ if (sky2->tx_le) {
1097+ pci_free_consistent(hw->pdev,
1098+ TX_RING_SIZE * sizeof(struct sky2_tx_le),
1099+ sky2->tx_le, sky2->tx_le_map);
1100+ sky2->tx_le = NULL;
1101+ }
1102+ kfree(sky2->tx_ring);
1103+ kfree(sky2->rx_ring);
1104+
1105+ sky2->tx_ring = NULL;
1106+ sky2->rx_ring = NULL;
1107+ return err;
1108+}
1109+
1110+/* Modular subtraction in ring */
1111+static inline int tx_dist(unsigned tail, unsigned head)
1112+{
1113+ return (head - tail) & (TX_RING_SIZE - 1);
1114+}
1115+
1116+/* Number of list elements available for next tx */
1117+static inline int tx_avail(const struct sky2_port *sky2)
1118+{
1119+ return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1120+}
1121+
1122+/* Estimate of number of transmit list elements required */
1123+static inline unsigned tx_le_req(const struct sk_buff *skb)
1124+{
1125+ unsigned count;
1126+
1127+ count = sizeof(dma_addr_t) / sizeof(u32);
1128+ count += skb_shinfo(skb)->nr_frags * count;
1129+
1130+#ifdef NETIF_F_TSO
1131+ if (skb_shinfo(skb)->tso_size)
1132+ ++count;
1133+#endif
1134+
1135+ if (skb->ip_summed == CHECKSUM_HW)
1136+ ++count;
1137+
1138+ return count;
1139+}
1140+
1141+/*
1142+ * Put one packet in ring for transmit.
1143+ * A single packet can generate multiple list elements, and
1144+ * the number of ring elements will probably be less than the number
1145+ * of list elements used.
1146+ *
1147+ * No BH disabling for tx_lock here (like tg3)
1148+ */
1149+static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1150+{
1151+ struct sky2_port *sky2 = netdev_priv(dev);
1152+ struct sky2_hw *hw = sky2->hw;
1153+ struct sky2_tx_le *le = NULL;
1154+ struct tx_ring_info *re;
1155+ unsigned long flags;
1156+ unsigned i, len;
1157+ int avail;
1158+ dma_addr_t mapping;
1159+ u32 addr64;
1160+ u16 mss;
1161+ u8 ctrl;
1162+
1163+ spin_lock_irqsave(&sky2->tx_lock, flags);
1164+
1165+ if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
1166+ spin_unlock_irqrestore(&sky2->tx_lock, flags);
1167+ return NETDEV_TX_BUSY;
1168+ }
1169+
1170+ if (unlikely(netif_msg_tx_queued(sky2)))
1171+ printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1172+ dev->name, sky2->tx_prod, skb->len);
1173+
1174+ len = skb_headlen(skb);
1175+ mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1176+ addr64 = high32(mapping);
1177+
1178+ re = sky2->tx_ring + sky2->tx_prod;
1179+
1180+ /* Send high bits if changed or crosses boundary */
1181+ if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) {
1182+ le = get_tx_le(sky2);
1183+ le->tx.addr = cpu_to_le32(addr64);
1184+ le->ctrl = 0;
1185+ le->opcode = OP_ADDR64 | HW_OWNER;
1186+ sky2->tx_addr64 = high32(mapping + len);
1187+ }
1188+
1189+#ifdef NETIF_F_TSO
1190+ /* Check for TCP Segmentation Offload */
1191+ mss = skb_shinfo(skb)->tso_size;
1192+ if (mss != 0) {
1193+#ifdef SKB_DATAREF_MASK
1194+ /* just drop the packet if non-linear expansion fails */
1195+ if (skb_header_cloned(skb) &&
1196+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1197+ dev_kfree_skb(skb);
1198+ goto out_unlock;
1199+ }
1200+#endif
1201+ mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
1202+ mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
1203+ mss += ETH_HLEN;
1204+ }
1205+
1206+ if (mss != sky2->tx_last_mss) {
1207+ le = get_tx_le(sky2);
1208+ le->tx.tso.size = cpu_to_le16(mss);
1209+ le->tx.tso.rsvd = 0;
1210+ le->opcode = OP_LRGLEN | HW_OWNER;
1211+ le->ctrl = 0;
1212+ sky2->tx_last_mss = mss;
1213+ }
1214+#endif
1215+
1216+ ctrl = 0;
1217+
1218+ /* Handle TCP checksum offload */
1219+ if (skb->ip_summed == CHECKSUM_HW) {
1220+ u16 hdr = skb->h.raw - skb->data;
1221+ u16 offset = hdr + skb->csum;
1222+
1223+ ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1224+ if (skb->nh.iph->protocol == IPPROTO_UDP)
1225+ ctrl |= UDPTCP;
1226+
1227+ le = get_tx_le(sky2);
1228+ le->tx.csum.start = cpu_to_le16(hdr);
1229+ le->tx.csum.offset = cpu_to_le16(offset);
1230+ le->length = 0; /* initial checksum value */
1231+ le->ctrl = 1; /* one packet */
1232+ le->opcode = OP_TCPLISW | HW_OWNER;
1233+ }
1234+
1235+ le = get_tx_le(sky2);
1236+ le->tx.addr = cpu_to_le32((u32) mapping);
1237+ le->length = cpu_to_le16(len);
1238+ le->ctrl = ctrl;
1239+ le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1240+
1241+ /* Record the transmit mapping info */
1242+ re->skb = skb;
1243+ pci_unmap_addr_set(re, mapaddr, mapping);
1244+
1245+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1246+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1247+ struct tx_ring_info *fre;
1248+
1249+ mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1250+ frag->size, PCI_DMA_TODEVICE);
1251+ addr64 = high32(mapping);
1252+ if (addr64 != sky2->tx_addr64) {
1253+ le = get_tx_le(sky2);
1254+ le->tx.addr = cpu_to_le32(addr64);
1255+ le->ctrl = 0;
1256+ le->opcode = OP_ADDR64 | HW_OWNER;
1257+ sky2->tx_addr64 = addr64;
1258+ }
1259+
1260+ le = get_tx_le(sky2);
1261+ le->tx.addr = cpu_to_le32((u32) mapping);
1262+ le->length = cpu_to_le16(frag->size);
1263+ le->ctrl = ctrl;
1264+ le->opcode = OP_BUFFER | HW_OWNER;
1265+
1266+ fre = sky2->tx_ring
1267+ + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
1268+ pci_unmap_addr_set(fre, mapaddr, mapping);
1269+ }
1270+
1271+ re->idx = sky2->tx_prod;
1272+ le->ctrl |= EOP;
1273+
1274+ avail = tx_avail(sky2);
1275+ if (mss != 0 || avail < TX_MIN_PENDING) {
1276+ le->ctrl |= FRC_STAT;
1277+ if (avail <= MAX_SKB_TX_LE)
1278+ netif_stop_queue(dev);
1279+ }
1280+
1281+ sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1282+
1283+out_unlock:
1284+ spin_unlock_irqrestore(&sky2->tx_lock, flags);
1285+
1286+ dev->trans_start = jiffies;
1287+ return NETDEV_TX_OK;
1288+}
1289+
1290+/*
1291+ * Free ring elements from starting at tx_cons until "done"
1292+ *
1293+ * NB: the hardware will tell us about partial completion of multi-part
1294+ * buffers; these are deferred until completion.
1295+ */
1296+static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1297+{
1298+ struct net_device *dev = sky2->netdev;
1299+ struct pci_dev *pdev = sky2->hw->pdev;
1300+ u16 nxt, put;
1301+ unsigned i;
1302+
1303+ BUG_ON(done >= TX_RING_SIZE);
1304+
1305+ if (unlikely(netif_msg_tx_done(sky2)))
1306+ printk(KERN_DEBUG "%s: tx done, up to %u\n",
1307+ dev->name, done);
1308+
1309+ for (put = sky2->tx_cons; put != done; put = nxt) {
1310+ struct tx_ring_info *re = sky2->tx_ring + put;
1311+ struct sk_buff *skb = re->skb;
1312+
1313+ nxt = re->idx;
1314+ BUG_ON(nxt >= TX_RING_SIZE);
1315+ prefetch(sky2->tx_ring + nxt);
1316+
1317+ /* Check for partial status */
1318+ if (tx_dist(put, done) < tx_dist(put, nxt))
1319+ break;
1320+
1321+ skb = re->skb;
1322+ pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
1323+ skb_headlen(skb), PCI_DMA_TODEVICE);
1324+
1325+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1326+ struct tx_ring_info *fre;
1327+ fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
1328+ pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
1329+ skb_shinfo(skb)->frags[i].size,
1330+ PCI_DMA_TODEVICE);
1331+ }
1332+
1333+ dev_kfree_skb_any(skb);
1334+ }
1335+
1336+ sky2->tx_cons = put;
1337+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
1338+ netif_wake_queue(dev);
1339+}
1340+
1341+/* Cleanup all untransmitted buffers, assume transmitter not running */
1342+static void sky2_tx_clean(struct sky2_port *sky2)
1343+{
1344+ spin_lock_bh(&sky2->tx_lock);
1345+ sky2_tx_complete(sky2, sky2->tx_prod);
1346+ spin_unlock_bh(&sky2->tx_lock);
1347+}
1348+
1349+/* Network shutdown */
1350+static int sky2_down(struct net_device *dev)
1351+{
1352+ struct sky2_port *sky2 = netdev_priv(dev);
1353+ struct sky2_hw *hw = sky2->hw;
1354+ unsigned port = sky2->port;
1355+ u16 ctrl;
1356+ u32 imask;
1357+
1358+ /* Never really got started! */
1359+ if (!sky2->tx_le)
1360+ return 0;
1361+
1362+ if (netif_msg_ifdown(sky2))
1363+ printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1364+
1365+ /* Stop more packets from being queued */
1366+ netif_stop_queue(dev);
1367+
1368+ sky2_phy_reset(hw, port);
1369+
1370+ /* Stop transmitter */
1371+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1372+ sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1373+
1374+ sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1375+ RB_RST_SET | RB_DIS_OP_MD);
1376+
1377+ ctrl = gma_read16(hw, port, GM_GP_CTRL);
1378+ ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1379+ gma_write16(hw, port, GM_GP_CTRL, ctrl);
1380+
1381+ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1382+
1383+ /* Workaround shared GMAC reset */
1384+ if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1385+ && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1386+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1387+
1388+ /* Disable Force Sync bit and Enable Alloc bit */
1389+ sky2_write8(hw, SK_REG(port, TXA_CTRL),
1390+ TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1391+
1392+ /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1393+ sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1394+ sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1395+
1396+ /* Reset the PCI FIFO of the async Tx queue */
1397+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1398+ BMU_RST_SET | BMU_FIFO_RST);
1399+
1400+ /* Reset the Tx prefetch units */
1401+ sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1402+ PREF_UNIT_RST_SET);
1403+
1404+ sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1405+
1406+ sky2_rx_stop(sky2);
1407+
1408+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1409+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1410+
1411+ /* Disable port IRQ */
1412+ imask = sky2_read32(hw, B0_IMSK);
1413+ imask &= ~portirq_msk[port];
1414+ sky2_write32(hw, B0_IMSK, imask);
1415+
1416+ /* turn off LED's */
1417+ sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1418+
1419+ sky2_tx_clean(sky2);
1420+ sky2_rx_clean(sky2);
1421+
1422+ pci_free_consistent(hw->pdev, RX_LE_BYTES,
1423+ sky2->rx_le, sky2->rx_le_map);
1424+ kfree(sky2->rx_ring);
1425+
1426+ pci_free_consistent(hw->pdev,
1427+ TX_RING_SIZE * sizeof(struct sky2_tx_le),
1428+ sky2->tx_le, sky2->tx_le_map);
1429+ kfree(sky2->tx_ring);
1430+
1431+ sky2->tx_le = NULL;
1432+ sky2->rx_le = NULL;
1433+
1434+ sky2->rx_ring = NULL;
1435+ sky2->tx_ring = NULL;
1436+
1437+ return 0;
1438+}
1439+
1440+static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1441+{
1442+ if (!hw->copper)
1443+ return SPEED_1000;
1444+
1445+ if (hw->chip_id == CHIP_ID_YUKON_FE)
1446+ return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1447+
1448+ switch (aux & PHY_M_PS_SPEED_MSK) {
1449+ case PHY_M_PS_SPEED_1000:
1450+ return SPEED_1000;
1451+ case PHY_M_PS_SPEED_100:
1452+ return SPEED_100;
1453+ default:
1454+ return SPEED_10;
1455+ }
1456+}
1457+
1458+static void sky2_link_up(struct sky2_port *sky2)
1459+{
1460+ struct sky2_hw *hw = sky2->hw;
1461+ unsigned port = sky2->port;
1462+ u16 reg;
1463+
1464+ /* Enable Transmit FIFO Underrun */
1465+ sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1466+
1467+ reg = gma_read16(hw, port, GM_GP_CTRL);
1468+ if (sky2->autoneg == AUTONEG_DISABLE) {
1469+ reg |= GM_GPCR_AU_ALL_DIS;
1470+
1471+ /* Is write/read necessary? Copied from sky2_mac_init */
1472+ gma_write16(hw, port, GM_GP_CTRL, reg);
1473+ gma_read16(hw, port, GM_GP_CTRL);
1474+
1475+ switch (sky2->speed) {
1476+ case SPEED_1000:
1477+ reg &= ~GM_GPCR_SPEED_100;
1478+ reg |= GM_GPCR_SPEED_1000;
1479+ break;
1480+ case SPEED_100:
1481+ reg &= ~GM_GPCR_SPEED_1000;
1482+ reg |= GM_GPCR_SPEED_100;
1483+ break;
1484+ case SPEED_10:
1485+ reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1486+ break;
1487+ }
1488+ } else
1489+ reg &= ~GM_GPCR_AU_ALL_DIS;
1490+
1491+ if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
1492+ reg |= GM_GPCR_DUP_FULL;
1493+
1494+ /* enable Rx/Tx */
1495+ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1496+ gma_write16(hw, port, GM_GP_CTRL, reg);
1497+ gma_read16(hw, port, GM_GP_CTRL);
1498+
1499+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1500+
1501+ netif_carrier_on(sky2->netdev);
1502+ netif_wake_queue(sky2->netdev);
1503+
1504+ /* Turn on link LED */
1505+ sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1506+ LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1507+
1508+ if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
1509+ u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1510+ u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1511+
1512+ switch(sky2->speed) {
1513+ case SPEED_10:
1514+ led |= PHY_M_LEDC_INIT_CTRL(7);
1515+ break;
1516+
1517+ case SPEED_100:
1518+ led |= PHY_M_LEDC_STA1_CTRL(7);
1519+ break;
1520+
1521+ case SPEED_1000:
1522+ led |= PHY_M_LEDC_STA0_CTRL(7);
1523+ break;
1524+ }
1525+
1526+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1527+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
1528+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
1529+ }
1530+
1531+ if (netif_msg_link(sky2))
1532+ printk(KERN_INFO PFX
1533+ "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1534+ sky2->netdev->name, sky2->speed,
1535+ sky2->duplex == DUPLEX_FULL ? "full" : "half",
1536+ (sky2->tx_pause && sky2->rx_pause) ? "both" :
1537+ sky2->tx_pause ? "tx" : sky2->rx_pause ? "rx" : "none");
1538+}
1539+
1540+static void sky2_link_down(struct sky2_port *sky2)
1541+{
1542+ struct sky2_hw *hw = sky2->hw;
1543+ unsigned port = sky2->port;
1544+ u16 reg;
1545+
1546+ gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1547+
1548+ reg = gma_read16(hw, port, GM_GP_CTRL);
1549+ reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1550+ gma_write16(hw, port, GM_GP_CTRL, reg);
1551+ gma_read16(hw, port, GM_GP_CTRL); /* PCI post */
1552+
1553+ if (sky2->rx_pause && !sky2->tx_pause) {
1554+ /* restore Asymmetric Pause bit */
1555+ gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
1556+ gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
1557+ | PHY_M_AN_ASP);
1558+ }
1559+
1560+ netif_carrier_off(sky2->netdev);
1561+ netif_stop_queue(sky2->netdev);
1562+
1563+ /* Turn on link LED */
1564+ sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1565+
1566+ if (netif_msg_link(sky2))
1567+ printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1568+ sky2_phy_init(hw, port);
1569+}
1570+
1571+static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1572+{
1573+ struct sky2_hw *hw = sky2->hw;
1574+ unsigned port = sky2->port;
1575+ u16 lpa;
1576+
1577+ lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1578+
1579+ if (lpa & PHY_M_AN_RF) {
1580+ printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1581+ return -1;
1582+ }
1583+
1584+ if (hw->chip_id != CHIP_ID_YUKON_FE &&
1585+ gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
1586+ printk(KERN_ERR PFX "%s: master/slave fault",
1587+ sky2->netdev->name);
1588+ return -1;
1589+ }
1590+
1591+ if (!(aux & PHY_M_PS_SPDUP_RES)) {
1592+ printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1593+ sky2->netdev->name);
1594+ return -1;
1595+ }
1596+
1597+ sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1598+
1599+ sky2->speed = sky2_phy_speed(hw, aux);
1600+
1601+ /* Pause bits are offset (9..8) */
1602+ if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
1603+ aux >>= 6;
1604+
1605+ sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
1606+ sky2->tx_pause = (aux & PHY_M_PS_TX_P_EN) != 0;
1607+
1608+ if ((sky2->tx_pause || sky2->rx_pause)
1609+ && !(sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
1610+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1611+ else
1612+ sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1613+
1614+ return 0;
1615+}
1616+
1617+/* Interrupt from PHY */
1618+static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1619+{
1620+ struct net_device *dev = hw->dev[port];
1621+ struct sky2_port *sky2 = netdev_priv(dev);
1622+ u16 istatus, phystat;
1623+
1624+ spin_lock(&sky2->phy_lock);
1625+ istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
1626+ phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
1627+
1628+ if (!netif_running(dev))
1629+ goto out;
1630+
1631+ if (netif_msg_intr(sky2))
1632+ printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
1633+ sky2->netdev->name, istatus, phystat);
1634+
1635+ if (istatus & PHY_M_IS_AN_COMPL) {
1636+ if (sky2_autoneg_done(sky2, phystat) == 0)
1637+ sky2_link_up(sky2);
1638+ goto out;
1639+ }
1640+
1641+ if (istatus & PHY_M_IS_LSP_CHANGE)
1642+ sky2->speed = sky2_phy_speed(hw, phystat);
1643+
1644+ if (istatus & PHY_M_IS_DUP_CHANGE)
1645+ sky2->duplex =
1646+ (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1647+
1648+ if (istatus & PHY_M_IS_LST_CHANGE) {
1649+ if (phystat & PHY_M_PS_LINK_UP)
1650+ sky2_link_up(sky2);
1651+ else
1652+ sky2_link_down(sky2);
1653+ }
1654+out:
1655+ spin_unlock(&sky2->phy_lock);
1656+}
1657+
1658+
1659+/* Transmit timeout is only called if we are running, carries is up
1660+ * and tx queue is full (stopped).
1661+ */
1662+static void sky2_tx_timeout(struct net_device *dev)
1663+{
1664+ struct sky2_port *sky2 = netdev_priv(dev);
1665+ struct sky2_hw *hw = sky2->hw;
1666+ unsigned txq = txqaddr[sky2->port];
1667+ u16 report, done;
1668+
1669+ if (netif_msg_timer(sky2))
1670+ printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1671+
1672+ report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1673+ done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
1674+
1675+ printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1676+ dev->name,
1677+ sky2->tx_cons, sky2->tx_prod, report, done);
1678+
1679+ if (report != done) {
1680+ printk(KERN_INFO PFX "status burst pending (irq moderation?)\n");
1681+
1682+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1683+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1684+ } else if (report != sky2->tx_cons) {
1685+ printk(KERN_INFO PFX "status report lost?\n");
1686+
1687+ spin_lock_bh(&sky2->tx_lock);
1688+ sky2_tx_complete(sky2, report);
1689+ spin_unlock_bh(&sky2->tx_lock);
1690+ } else {
1691+ printk(KERN_INFO PFX "hardware hung? flushing\n");
1692+
1693+ sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
1694+ sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1695+
1696+ sky2_tx_clean(sky2);
1697+
1698+ sky2_qset(hw, txq);
1699+ sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1700+ }
1701+}
1702+
1703+
1704+/* Want receive buffer size to be multiple of 64 bits
1705+ * and incl room for vlan and truncation
1706+ */
1707+static inline unsigned sky2_buf_size(int mtu)
1708+{
1709+ return ALIGN(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
1710+}
1711+
1712+static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1713+{
1714+ struct sky2_port *sky2 = netdev_priv(dev);
1715+ struct sky2_hw *hw = sky2->hw;
1716+ int err;
1717+ u16 ctl, mode;
1718+ u32 imask;
1719+
1720+ if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1721+ return -EINVAL;
1722+
1723+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
1724+ return -EINVAL;
1725+
1726+ if (!netif_running(dev)) {
1727+ dev->mtu = new_mtu;
1728+ return 0;
1729+ }
1730+
1731+ imask = sky2_read32(hw, B0_IMSK);
1732+ sky2_write32(hw, B0_IMSK, 0);
1733+
1734+ dev->trans_start = jiffies; /* prevent tx timeout */
1735+ netif_stop_queue(dev);
1736+
1737+ ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
1738+ gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
1739+ sky2_rx_stop(sky2);
1740+ sky2_rx_clean(sky2);
1741+
1742+ dev->mtu = new_mtu;
1743+ sky2->rx_bufsize = sky2_buf_size(new_mtu);
1744+ mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
1745+ GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
1746+
1747+ if (dev->mtu > ETH_DATA_LEN)
1748+ mode |= GM_SMOD_JUMBO_ENA;
1749+
1750+ gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
1751+
1752+ sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1753+
1754+ err = sky2_rx_start(sky2);
1755+ sky2_write32(hw, B0_IMSK, imask);
1756+
1757+ if (err)
1758+ dev_close(dev);
1759+ else {
1760+ gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
1761+
1762+ netif_wake_queue(dev);
1763+ }
1764+
1765+ return err;
1766+}
1767+
1768+/*
1769+ * Receive one packet.
1770+ * For small packets or errors, just reuse existing skb.
1771+ * For larger packets, get new buffer.
1772+ */
1773+static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1774+ u16 length, u32 status)
1775+{
1776+ struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1777+ struct sk_buff *skb = NULL;
1778+
1779+ if (unlikely(netif_msg_rx_status(sky2)))
1780+ printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1781+ sky2->netdev->name, sky2->rx_next, status, length);
1782+
1783+ sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
1784+ prefetch(sky2->rx_ring + sky2->rx_next);
1785+
1786+ if (status & GMR_FS_ANY_ERR)
1787+ goto error;
1788+
1789+ if (!(status & GMR_FS_RX_OK))
1790+ goto resubmit;
1791+
1792+ if (length > sky2->netdev->mtu + ETH_HLEN)
1793+ goto oversize;
1794+
1795+ if (length < copybreak) {
1796+ skb = alloc_skb(length + 2, GFP_ATOMIC);
1797+ if (!skb)
1798+ goto resubmit;
1799+
1800+ skb_reserve(skb, 2);
1801+ pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->mapaddr,
1802+ length, PCI_DMA_FROMDEVICE);
1803+ memcpy(skb->data, re->skb->data, length);
1804+ skb->ip_summed = re->skb->ip_summed;
1805+ skb->csum = re->skb->csum;
1806+ pci_dma_sync_single_for_device(sky2->hw->pdev, re->mapaddr,
1807+ length, PCI_DMA_FROMDEVICE);
1808+ } else {
1809+ struct sk_buff *nskb;
1810+
1811+ nskb = sky2_alloc_skb(sky2->rx_bufsize);
1812+ if (!nskb)
1813+ goto resubmit;
1814+
1815+ skb = re->skb;
1816+ re->skb = nskb;
1817+ pci_unmap_single(sky2->hw->pdev, re->mapaddr,
1818+ sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1819+ prefetch(skb->data);
1820+
1821+ re->mapaddr = pci_map_single(sky2->hw->pdev, nskb->data,
1822+ sky2->rx_bufsize, PCI_DMA_FROMDEVICE);
1823+ }
1824+
1825+ skb_put(skb, length);
1826+resubmit:
1827+ re->skb->ip_summed = CHECKSUM_NONE;
1828+ sky2_rx_add(sky2, re->mapaddr);
1829+
1830+ return skb;
1831+
1832+oversize:
1833+ ++sky2->net_stats.rx_over_errors;
1834+ goto resubmit;
1835+
1836+error:
1837+ ++sky2->net_stats.rx_errors;
1838+
1839+ if (netif_msg_rx_err(sky2) && net_ratelimit())
1840+ printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1841+ sky2->netdev->name, status, length);
1842+
1843+ if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
1844+ sky2->net_stats.rx_length_errors++;
1845+ if (status & GMR_FS_FRAGMENT)
1846+ sky2->net_stats.rx_frame_errors++;
1847+ if (status & GMR_FS_CRC_ERR)
1848+ sky2->net_stats.rx_crc_errors++;
1849+ if (status & GMR_FS_RX_FF_OV)
1850+ sky2->net_stats.rx_fifo_errors++;
1851+
1852+ goto resubmit;
1853+}
1854+
1855+/* Transmit complete */
1856+static inline void sky2_tx_done(struct net_device *dev, u16 last)
1857+{
1858+ struct sky2_port *sky2 = netdev_priv(dev);
1859+
1860+ if (netif_running(dev)) {
1861+ spin_lock(&sky2->tx_lock);
1862+ sky2_tx_complete(sky2, last);
1863+ spin_unlock(&sky2->tx_lock);
1864+ }
1865+}
1866+
1867+/* Is status ring empty or is there more to do? */
1868+static inline int sky2_more_work(const struct sky2_hw *hw)
1869+{
1870+ return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
1871+}
1872+
1873+
1874+/* Process status response ring */
1875+static void sky2_status_intr(struct sky2_hw *hw)
1876+{
1877+ struct sky2_port *sky2;
1878+ unsigned buf_write[2] = { 0, 0 };
1879+ u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
1880+
1881+ rmb();
1882+
1883+ while (hw->st_idx != hwidx) {
1884+ struct sky2_status_le *le = hw->st_le + hw->st_idx;
1885+ struct net_device *dev;
1886+ struct sk_buff *skb;
1887+ u32 status;
1888+ u16 length;
1889+
1890+ hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
1891+
1892+ BUG_ON(le->link >= 2);
1893+ dev = hw->dev[le->link];
1894+
1895+ sky2 = netdev_priv(dev);
1896+ length = le->length;
1897+ status = le->status;
1898+
1899+ switch (le->opcode & ~HW_OWNER) {
1900+ case OP_RXSTAT:
1901+ skb = sky2_receive(sky2, length, status);
1902+ if (!skb)
1903+ break;
1904+
1905+ skb->dev = dev;
1906+ skb->protocol = eth_type_trans(skb, dev);
1907+ dev->last_rx = jiffies;
1908+
1909+ netif_rx(skb);
1910+
1911+ /* Update receiver after 16 frames */
1912+ if (++buf_write[le->link] == RX_BUF_WRITE) {
1913+ sky2_put_idx(hw, rxqaddr[le->link],
1914+ sky2->rx_put);
1915+ buf_write[le->link] = 0;
1916+ }
1917+ break;
1918+
1919+ case OP_RXCHKS:
1920+ skb = sky2->rx_ring[sky2->rx_next].skb;
1921+ skb->ip_summed = CHECKSUM_HW;
1922+ skb->csum = le16_to_cpu(status);
1923+ break;
1924+
1925+ case OP_TXINDEXLE:
1926+ /* TX index reports status for both ports */
1927+ sky2_tx_done(hw->dev[0], status & 0xfff);
1928+ if (hw->dev[1])
1929+ sky2_tx_done(hw->dev[1],
1930+ ((status >> 24) & 0xff)
1931+ | (u16)(length & 0xf) << 8);
1932+ break;
1933+
1934+ default:
1935+ if (net_ratelimit())
1936+ printk(KERN_WARNING PFX
1937+ "unknown status opcode 0x%x\n", le->opcode);
1938+ goto exit_loop;
1939+ }
1940+ }
1941+
1942+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
1943+
1944+exit_loop:
1945+ if (buf_write[0]) {
1946+ sky2 = netdev_priv(hw->dev[0]);
1947+ sky2_put_idx(hw, Q_R1, sky2->rx_put);
1948+ }
1949+
1950+ if (buf_write[1]) {
1951+ sky2 = netdev_priv(hw->dev[1]);
1952+ sky2_put_idx(hw, Q_R2, sky2->rx_put);
1953+ }
1954+
1955+}
1956+
1957+static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
1958+{
1959+ struct net_device *dev = hw->dev[port];
1960+
1961+ if (net_ratelimit())
1962+ printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
1963+ dev->name, status);
1964+
1965+ if (status & Y2_IS_PAR_RD1) {
1966+ if (net_ratelimit())
1967+ printk(KERN_ERR PFX "%s: ram data read parity error\n",
1968+ dev->name);
1969+ /* Clear IRQ */
1970+ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
1971+ }
1972+
1973+ if (status & Y2_IS_PAR_WR1) {
1974+ if (net_ratelimit())
1975+ printk(KERN_ERR PFX "%s: ram data write parity error\n",
1976+ dev->name);
1977+
1978+ sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
1979+ }
1980+
1981+ if (status & Y2_IS_PAR_MAC1) {
1982+ if (net_ratelimit())
1983+ printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
1984+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
1985+ }
1986+
1987+ if (status & Y2_IS_PAR_RX1) {
1988+ if (net_ratelimit())
1989+ printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
1990+ sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
1991+ }
1992+
1993+ if (status & Y2_IS_TCP_TXA1) {
1994+ if (net_ratelimit())
1995+ printk(KERN_ERR PFX "%s: TCP segmentation error\n",
1996+ dev->name);
1997+ sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
1998+ }
1999+}
2000+
2001+static void sky2_hw_intr(struct sky2_hw *hw)
2002+{
2003+ u32 status = sky2_read32(hw, B0_HWE_ISRC);
2004+
2005+ if (status & Y2_IS_TIST_OV)
2006+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2007+
2008+ if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2009+ u16 pci_err;
2010+
2011+ pci_err = sky2_pci_read16(hw, PCI_STATUS);
2012+ if (net_ratelimit())
2013+ printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
2014+ pci_name(hw->pdev), pci_err);
2015+
2016+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2017+ sky2_pci_write16(hw, PCI_STATUS,
2018+ pci_err | PCI_STATUS_ERROR_BITS);
2019+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2020+ }
2021+
2022+ if (status & Y2_IS_PCI_EXP) {
2023+ /* PCI-Express uncorrectable Error occurred */
2024+ u32 pex_err;
2025+
2026+ pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2027+
2028+ if (net_ratelimit())
2029+ printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
2030+ pci_name(hw->pdev), pex_err);
2031+
2032+ /* clear the interrupt */
2033+ sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2034+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
2035+ 0xffffffffUL);
2036+ sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2037+
2038+ if (pex_err & PEX_FATAL_ERRORS) {
2039+ u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2040+ hwmsk &= ~Y2_IS_PCI_EXP;
2041+ sky2_write32(hw, B0_HWE_IMSK, hwmsk);
2042+ }
2043+ }
2044+
2045+ if (status & Y2_HWE_L1_MASK)
2046+ sky2_hw_error(hw, 0, status);
2047+ status >>= 8;
2048+ if (status & Y2_HWE_L1_MASK)
2049+ sky2_hw_error(hw, 1, status);
2050+}
2051+
2052+static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2053+{
2054+ struct net_device *dev = hw->dev[port];
2055+ struct sky2_port *sky2 = netdev_priv(dev);
2056+ u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2057+
2058+ if (netif_msg_intr(sky2))
2059+ printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
2060+ dev->name, status);
2061+
2062+ if (status & GM_IS_RX_FF_OR) {
2063+ ++sky2->net_stats.rx_fifo_errors;
2064+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2065+ }
2066+
2067+ if (status & GM_IS_TX_FF_UR) {
2068+ ++sky2->net_stats.tx_fifo_errors;
2069+ sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2070+ }
2071+}
2072+
2073+/* This should never happen it is a fatal situation */
2074+static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
2075+ const char *rxtx, u32 mask)
2076+{
2077+ struct net_device *dev = hw->dev[port];
2078+ struct sky2_port *sky2 = netdev_priv(dev);
2079+ u32 imask;
2080+
2081+ printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
2082+ dev ? dev->name : "<not registered>", rxtx);
2083+
2084+ imask = sky2_read32(hw, B0_IMSK);
2085+ imask &= ~mask;
2086+ sky2_write32(hw, B0_IMSK, imask);
2087+
2088+ if (dev) {
2089+ spin_lock(&sky2->phy_lock);
2090+ sky2_link_down(sky2);
2091+ spin_unlock(&sky2->phy_lock);
2092+ }
2093+}
2094+
2095+static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
2096+{
2097+ struct sky2_hw *hw = dev_id;
2098+ u32 status;
2099+
2100+ status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2101+ if (status == 0 || status == ~0)
2102+ return IRQ_NONE;
2103+
2104+ pr_debug("sky2_intr %#x\n", status);
2105+
2106+ if (status & Y2_IS_HW_ERR)
2107+ sky2_hw_intr(hw);
2108+
2109+ if (status & Y2_IS_IRQ_PHY1)
2110+ sky2_phy_intr(hw, 0);
2111+
2112+ if (status & Y2_IS_IRQ_PHY2)
2113+ sky2_phy_intr(hw, 1);
2114+
2115+ if (status & Y2_IS_IRQ_MAC1)
2116+ sky2_mac_intr(hw, 0);
2117+
2118+ if (status & Y2_IS_IRQ_MAC2)
2119+ sky2_mac_intr(hw, 1);
2120+
2121+ if (status & Y2_IS_CHK_RX1)
2122+ sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2123+
2124+ if (status & Y2_IS_CHK_RX2)
2125+ sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2126+
2127+ if (status & Y2_IS_CHK_TXA1)
2128+ sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2129+
2130+ if (status & Y2_IS_CHK_TXA2)
2131+ sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2132+
2133+ if (status & Y2_IS_STAT_BMU)
2134+ sky2_status_intr(hw);
2135+
2136+ sky2_read32(hw, B0_Y2_SP_LISR);
2137+
2138+ return IRQ_HANDLED;
2139+}
2140+
2141+/* Chip internal frequency for clock calculations */
2142+static inline u32 sky2_mhz(const struct sky2_hw *hw)
2143+{
2144+ switch (hw->chip_id) {
2145+ case CHIP_ID_YUKON_EC:
2146+ case CHIP_ID_YUKON_EC_U:
2147+ return 125; /* 125 Mhz */
2148+ case CHIP_ID_YUKON_FE:
2149+ return 100; /* 100 Mhz */
2150+ default: /* YUKON_XL */
2151+ return 156; /* 156 Mhz */
2152+ }
2153+}
2154+
2155+static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
2156+{
2157+ return sky2_mhz(hw) * us;
2158+}
2159+
2160+static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2161+{
2162+ return clk / sky2_mhz(hw);
2163+}
2164+
2165+
2166+static int sky2_reset(struct sky2_hw *hw)
2167+{
2168+ u16 status;
2169+ u8 t8, pmd_type;
2170+ int i;
2171+
2172+ sky2_write8(hw, B0_CTST, CS_RST_CLR);
2173+
2174+ hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2175+ if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2176+ printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
2177+ pci_name(hw->pdev), hw->chip_id);
2178+ return -EOPNOTSUPP;
2179+ }
2180+
2181+ hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2182+
2183+ /* This rev is really old, and requires untested workarounds */
2184+ if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2185+ printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n",
2186+ pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
2187+ hw->chip_id, hw->chip_rev);
2188+ return -EOPNOTSUPP;
2189+ }
2190+
2191+ /* disable ASF */
2192+ if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2193+ sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2194+ sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2195+ }
2196+
2197+ /* do a SW reset */
2198+ sky2_write8(hw, B0_CTST, CS_RST_SET);
2199+ sky2_write8(hw, B0_CTST, CS_RST_CLR);
2200+
2201+ /* clear PCI errors, if any */
2202+ status = sky2_pci_read16(hw, PCI_STATUS);
2203+
2204+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2205+ sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
2206+
2207+
2208+ sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2209+
2210+ /* clear any PEX errors */
2211+ if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
2212+ sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2213+
2214+
2215+ pmd_type = sky2_read8(hw, B2_PMD_TYP);
2216+ hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
2217+
2218+ hw->ports = 1;
2219+ t8 = sky2_read8(hw, B2_Y2_HW_RES);
2220+ if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2221+ if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2222+ ++hw->ports;
2223+ }
2224+
2225+ sky2_set_power_state(hw, PCI_D0);
2226+
2227+ for (i = 0; i < hw->ports; i++) {
2228+ sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2229+ sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2230+ }
2231+
2232+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2233+
2234+ /* Clear I2C IRQ noise */
2235+ sky2_write32(hw, B2_I2C_IRQ, 1);
2236+
2237+ /* turn off hardware timer (unused) */
2238+ sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2239+ sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2240+
2241+ sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2242+
2243+ /* Turn off descriptor polling */
2244+ sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
2245+
2246+ /* Turn off receive timestamp */
2247+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2248+ sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2249+
2250+ /* enable the Tx Arbiters */
2251+ for (i = 0; i < hw->ports; i++)
2252+ sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2253+
2254+ /* Initialize ram interface */
2255+ for (i = 0; i < hw->ports; i++) {
2256+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2257+
2258+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2259+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2260+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2261+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2262+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2263+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2264+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2265+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2266+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2267+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2268+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2269+ sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2270+ }
2271+
2272+ sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2273+
2274+ for (i = 0; i < hw->ports; i++)
2275+ sky2_phy_reset(hw, i);
2276+
2277+ memset(hw->st_le, 0, STATUS_LE_BYTES);
2278+ hw->st_idx = 0;
2279+
2280+ sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2281+ sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2282+
2283+ sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2284+ sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2285+
2286+ /* Set the list last index */
2287+ sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2288+
2289+ sky2_write16(hw, STAT_TX_IDX_TH, 10);
2290+ sky2_write8(hw, STAT_FIFO_WM, 16);
2291+
2292+ /* set Status-FIFO ISR watermark */
2293+ if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2294+ sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2295+ else
2296+ sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2297+
2298+ sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2299+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2300+ sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2301+
2302+ /* enable status unit */
2303+ sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2304+
2305+ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2306+ sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2307+ sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2308+
2309+ return 0;
2310+}
2311+
2312+static u32 sky2_supported_modes(const struct sky2_hw *hw)
2313+{
2314+ u32 modes;
2315+ if (hw->copper) {
2316+ modes = SUPPORTED_10baseT_Half
2317+ | SUPPORTED_10baseT_Full
2318+ | SUPPORTED_100baseT_Half
2319+ | SUPPORTED_100baseT_Full
2320+ | SUPPORTED_Autoneg | SUPPORTED_TP;
2321+
2322+ if (hw->chip_id != CHIP_ID_YUKON_FE)
2323+ modes |= SUPPORTED_1000baseT_Half
2324+ | SUPPORTED_1000baseT_Full;
2325+ } else
2326+ modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
2327+ | SUPPORTED_Autoneg;
2328+ return modes;
2329+}
2330+
2331+static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2332+{
2333+ struct sky2_port *sky2 = netdev_priv(dev);
2334+ struct sky2_hw *hw = sky2->hw;
2335+
2336+ ecmd->transceiver = XCVR_INTERNAL;
2337+ ecmd->supported = sky2_supported_modes(hw);
2338+ ecmd->phy_address = PHY_ADDR_MARV;
2339+ if (hw->copper) {
2340+ ecmd->supported = SUPPORTED_10baseT_Half
2341+ | SUPPORTED_10baseT_Full
2342+ | SUPPORTED_100baseT_Half
2343+ | SUPPORTED_100baseT_Full
2344+ | SUPPORTED_1000baseT_Half
2345+ | SUPPORTED_1000baseT_Full
2346+ | SUPPORTED_Autoneg | SUPPORTED_TP;
2347+ ecmd->port = PORT_TP;
2348+ } else
2349+ ecmd->port = PORT_FIBRE;
2350+
2351+ ecmd->advertising = sky2->advertising;
2352+ ecmd->autoneg = sky2->autoneg;
2353+ ecmd->speed = sky2->speed;
2354+ ecmd->duplex = sky2->duplex;
2355+ return 0;
2356+}
2357+
2358+static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2359+{
2360+ struct sky2_port *sky2 = netdev_priv(dev);
2361+ const struct sky2_hw *hw = sky2->hw;
2362+ u32 supported = sky2_supported_modes(hw);
2363+
2364+ if (ecmd->autoneg == AUTONEG_ENABLE) {
2365+ ecmd->advertising = supported;
2366+ sky2->duplex = -1;
2367+ sky2->speed = -1;
2368+ } else {
2369+ u32 setting;
2370+
2371+ switch (ecmd->speed) {
2372+ case SPEED_1000:
2373+ if (ecmd->duplex == DUPLEX_FULL)
2374+ setting = SUPPORTED_1000baseT_Full;
2375+ else if (ecmd->duplex == DUPLEX_HALF)
2376+ setting = SUPPORTED_1000baseT_Half;
2377+ else
2378+ return -EINVAL;
2379+ break;
2380+ case SPEED_100:
2381+ if (ecmd->duplex == DUPLEX_FULL)
2382+ setting = SUPPORTED_100baseT_Full;
2383+ else if (ecmd->duplex == DUPLEX_HALF)
2384+ setting = SUPPORTED_100baseT_Half;
2385+ else
2386+ return -EINVAL;
2387+ break;
2388+
2389+ case SPEED_10:
2390+ if (ecmd->duplex == DUPLEX_FULL)
2391+ setting = SUPPORTED_10baseT_Full;
2392+ else if (ecmd->duplex == DUPLEX_HALF)
2393+ setting = SUPPORTED_10baseT_Half;
2394+ else
2395+ return -EINVAL;
2396+ break;
2397+ default:
2398+ return -EINVAL;
2399+ }
2400+
2401+ if ((setting & supported) == 0)
2402+ return -EINVAL;
2403+
2404+ sky2->speed = ecmd->speed;
2405+ sky2->duplex = ecmd->duplex;
2406+ }
2407+
2408+ sky2->autoneg = ecmd->autoneg;
2409+ sky2->advertising = ecmd->advertising;
2410+
2411+ if (netif_running(dev))
2412+ sky2_phy_reinit(sky2);
2413+
2414+ return 0;
2415+}
2416+
2417+static void sky2_get_drvinfo(struct net_device *dev,
2418+ struct ethtool_drvinfo *info)
2419+{
2420+ struct sky2_port *sky2 = netdev_priv(dev);
2421+
2422+ strcpy(info->driver, DRV_NAME);
2423+ strcpy(info->version, DRV_VERSION);
2424+ strcpy(info->fw_version, "N/A");
2425+ strcpy(info->bus_info, pci_name(sky2->hw->pdev));
2426+}
2427+
2428+static const struct sky2_stat {
2429+ char name[ETH_GSTRING_LEN];
2430+ u16 offset;
2431+} sky2_stats[] = {
2432+ { "tx_bytes", GM_TXO_OK_HI },
2433+ { "rx_bytes", GM_RXO_OK_HI },
2434+ { "tx_broadcast", GM_TXF_BC_OK },
2435+ { "rx_broadcast", GM_RXF_BC_OK },
2436+ { "tx_multicast", GM_TXF_MC_OK },
2437+ { "rx_multicast", GM_RXF_MC_OK },
2438+ { "tx_unicast", GM_TXF_UC_OK },
2439+ { "rx_unicast", GM_RXF_UC_OK },
2440+ { "tx_mac_pause", GM_TXF_MPAUSE },
2441+ { "rx_mac_pause", GM_RXF_MPAUSE },
2442+ { "collisions", GM_TXF_COL },
2443+ { "late_collision",GM_TXF_LAT_COL },
2444+ { "aborted", GM_TXF_ABO_COL },
2445+ { "single_collisions", GM_TXF_SNG_COL },
2446+ { "multi_collisions", GM_TXF_MUL_COL },
2447+
2448+ { "rx_short", GM_RXF_SHT },
2449+ { "rx_runt", GM_RXE_FRAG },
2450+ { "rx_64_byte_packets", GM_RXF_64B },
2451+ { "rx_65_to_127_byte_packets", GM_RXF_127B },
2452+ { "rx_128_to_255_byte_packets", GM_RXF_255B },
2453+ { "rx_256_to_511_byte_packets", GM_RXF_511B },
2454+ { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
2455+ { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
2456+ { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
2457+ { "rx_too_long", GM_RXF_LNG_ERR },
2458+ { "rx_fifo_overflow", GM_RXE_FIFO_OV },
2459+ { "rx_jabber", GM_RXF_JAB_PKT },
2460+ { "rx_fcs_error", GM_RXF_FCS_ERR },
2461+
2462+ { "tx_64_byte_packets", GM_TXF_64B },
2463+ { "tx_65_to_127_byte_packets", GM_TXF_127B },
2464+ { "tx_128_to_255_byte_packets", GM_TXF_255B },
2465+ { "tx_256_to_511_byte_packets", GM_TXF_511B },
2466+ { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
2467+ { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
2468+ { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
2469+ { "tx_fifo_underrun", GM_TXE_FIFO_UR },
2470+};
2471+
2472+static u32 sky2_get_rx_csum(struct net_device *dev)
2473+{
2474+ struct sky2_port *sky2 = netdev_priv(dev);
2475+
2476+ return sky2->rx_csum;
2477+}
2478+
2479+static int sky2_set_rx_csum(struct net_device *dev, u32 data)
2480+{
2481+ struct sky2_port *sky2 = netdev_priv(dev);
2482+
2483+ sky2->rx_csum = data;
2484+
2485+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
2486+ data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
2487+
2488+ return 0;
2489+}
2490+
2491+static u32 sky2_get_msglevel(struct net_device *netdev)
2492+{
2493+ struct sky2_port *sky2 = netdev_priv(netdev);
2494+ return sky2->msg_enable;
2495+}
2496+
2497+static int sky2_nway_reset(struct net_device *dev)
2498+{
2499+ struct sky2_port *sky2 = netdev_priv(dev);
2500+
2501+ if (sky2->autoneg != AUTONEG_ENABLE)
2502+ return -EINVAL;
2503+
2504+ sky2_phy_reinit(sky2);
2505+
2506+ return 0;
2507+}
2508+
2509+static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
2510+{
2511+ struct sky2_hw *hw = sky2->hw;
2512+ unsigned port = sky2->port;
2513+ int i;
2514+
2515+ data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
2516+ | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2517