LPCOpen Platform
LPCOpen Platform for NXP LPC Microcontrollers
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpc18xx_43xx_emac.c
Go to the documentation of this file.
1 /*
2  * @brief LPC18xx/43xx LWIP EMAC driver
3  *
4  * @note
5  * Copyright(C) NXP Semiconductors, 2012
6  * All rights reserved.
7  *
8  * @par
9  * Software that is described herein is for illustrative purposes only
10  * which provides customers with programming information regarding the
11  * LPC products. This software is supplied "AS IS" without any warranties of
12  * any kind, and NXP Semiconductors and its licensor disclaim any and
13  * all warranties, express or implied, including all implied warranties of
14  * merchantability, fitness for a particular purpose and non-infringement of
15  * intellectual property rights. NXP Semiconductors assumes no responsibility
16  * or liability for the use of the software, conveys no license or rights under any
17  * patent, copyright, mask work right, or any other intellectual property rights in
18  * or to any products. NXP Semiconductors reserves the right to make changes
19  * in the software without notification. NXP Semiconductors also makes no
20  * representation or warranty that such application will be suitable for the
21  * specified use without further testing or modification.
22  *
23  * @par
24  * Permission to use, copy, modify, and distribute this software and its
25  * documentation is hereby granted, under NXP Semiconductors' and its
26  * licensor's relevant copyrights in the software, without fee, provided that it
27  * is used in conjunction with NXP Semiconductors microcontrollers. This
28  * copyright, permission, and disclaimer notice must appear in all copies of
29  * this code.
30  */
31 
32 #include "lwip/opt.h"
33 #include "lwip/sys.h"
34 #include "lwip/def.h"
35 #include "lwip/mem.h"
36 #include "lwip/pbuf.h"
37 #include "lwip/stats.h"
38 #include "lwip/snmp.h"
39 #include "netif/etharp.h"
40 #include "netif/ppp_oe.h"
41 
42 #include "lpc_18xx43xx_emac_config.h"
43 #include "lpc18xx_43xx_emac.h"
44 
45 #include "chip.h"
46 #include "board.h"
47 #include "lpc_phy.h"
48 
49 #include <string.h>
50 
51 extern void msDelay(uint32_t ms);
52 
53 #if LPC_NUM_BUFF_TXDESCS < 2
54 #error LPC_NUM_BUFF_TXDESCS must be at least 2
55 #endif
56 
57 #if LPC_NUM_BUFF_RXDESCS < 3
58 #error LPC_NUM_BUFF_RXDESCS must be at least 3
59 #endif
60 
61 #ifndef LPC_CHECK_SLOWMEM
62 #error LPC_CHECK_SLOWMEM must be 0 or 1
63 #endif
64 
69 /*****************************************************************************
70  * Private types/enumerations/variables
71  ****************************************************************************/
72 
73 #if NO_SYS == 0
74 
80 #define tskTXCLEAN_PRIORITY (TCPIP_THREAD_PRIO - 1)
81 #define tskRECPKT_PRIORITY (TCPIP_THREAD_PRIO - 1)
82 #endif
83 
91 // #define LOCK_RX_THREAD
92 
93 /* LPC EMAC driver data structure */
94 struct lpc_enetdata {
95  struct netif *netif;
99  struct pbuf *txpbufs[LPC_NUM_BUFF_TXDESCS];
101  volatile u32_t tx_free_descs;
106  volatile u32_t rx_free_descs;
107  volatile u32_t rx_get_idx;
109 #if NO_SYS == 0
113  xSemaphoreHandle xTXDCountSem;
114 #endif
115 };
116 
117 /* LPC EMAC driver work data */
119 
121 
122 #if LPC_CHECK_SLOWMEM == 1
123 struct lpc_slowmem_array_t {
124  u32_t start;
125  u32_t end;
126 };
127 
128 const static struct lpc_slowmem_array_t slmem[] = LPC_SLOWMEM_ARRAY;
129 #endif
130 
131 /*****************************************************************************
132  * Public types/enumerations/variables
133  ****************************************************************************/
134 
135 /*****************************************************************************
136  * Private functions
137  ****************************************************************************/
138 
139 /* Queues a pbuf into a free RX descriptor */
140 static void lpc_rxqueue_pbuf(struct lpc_enetdata *lpc_netifdata,
141  struct pbuf *p)
142 {
143  u32_t idx = lpc_netifdata->rx_next_idx;
144 
145  /* Save location of pbuf so we know what to pass to LWIP later */
146  lpc_netifdata->rxpbufs[idx] = p;
147 
148  /* Buffer size and address for pbuf */
149  lpc_netifdata->prdesc[idx].CTRL = (u32_t) RDES_ENH_BS1(p->len) |
150  RDES_ENH_RCH;
151  if (idx == (LPC_NUM_BUFF_RXDESCS - 1)) {
152  lpc_netifdata->prdesc[idx].CTRL |= RDES_ENH_RER;
153  }
154  lpc_netifdata->prdesc[idx].B1ADD = (u32_t) p->payload;
155 
156  /* Give descriptor to MAC/DMA */
157  lpc_netifdata->prdesc[idx].STATUS = RDES_OWN;
158 
159  /* Update free count */
160  lpc_netifdata->rx_free_descs--;
161 
162  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
163  ("lpc_rxqueue_pbuf: Queueing packet %p at index %d, free %d\n",
164  p, idx, lpc_netifdata->rx_free_descs));
165 
166  /* Update index for next pbuf */
167  idx++;
168  if (idx >= LPC_NUM_BUFF_RXDESCS) {
169  idx = 0;
170  }
171  lpc_netifdata->rx_next_idx = idx;
172 }
173 
174 /* This function sets up the descriptor list used for receive packets */
175 static err_t lpc_rx_setup(struct lpc_enetdata *lpc_netifdata)
176 {
177  s32_t idx;
178 
179  /* Set to start of list */
180  lpc_netifdata->rx_get_idx = 0;
181  lpc_netifdata->rx_next_idx = 0;
182  lpc_netifdata->rx_free_descs = LPC_NUM_BUFF_RXDESCS;
183 
184  /* Clear initial RX descriptor list */
185  memset(lpc_netifdata->prdesc, 0, sizeof(lpc_netifdata->prdesc));
186 
187  /* Setup buffer chaining before allocating pbufs for descriptors
188  just in case memory runs out. */
189  for (idx = 0; idx < LPC_NUM_BUFF_RXDESCS; idx++) {
190  lpc_netifdata->prdesc[idx].CTRL = RDES_ENH_RCH;
191  lpc_netifdata->prdesc[idx].B2ADD = (u32_t)
192  &lpc_netifdata->prdesc[idx + 1];
193  }
194  lpc_netifdata->prdesc[LPC_NUM_BUFF_RXDESCS - 1].CTRL =
196  lpc_netifdata->prdesc[LPC_NUM_BUFF_RXDESCS - 1].B2ADD =
197  (u32_t) &lpc_netifdata->prdesc[0];
198  LPC_ETHERNET->DMA_REC_DES_ADDR = (u32_t) lpc_netifdata->prdesc;
199 
200  /* Setup up RX pbuf queue, but post a warning if not enough were
201  queued for all descriptors. */
202  if (lpc_rx_queue(lpc_netifdata->netif) != LPC_NUM_BUFF_RXDESCS) {
203  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
204  ("lpc_rx_setup: Warning, not enough memory for RX pbufs\n"));
205  }
206 
207  return ERR_OK;
208 }
209 
210 /* Gets data from queue and forwards to LWIP */
211 static struct pbuf *lpc_low_level_input(struct netif *netif) {
212  struct lpc_enetdata *lpc_netifdata = netif->state;
213  u32_t status, ridx;
214  int rxerr = 0;
215  struct pbuf *p;
216 
217 #ifdef LOCK_RX_THREAD
218 #if NO_SYS == 0
219  /* Get exclusive access */
220  sys_mutex_lock(&lpc_netifdata->TXLockMutex);
221 #endif
222 #endif
223 
224  /* If there are no used descriptors, then this call was
225  not for a received packet, try to setup some descriptors now */
226  if (lpc_netifdata->rx_free_descs == LPC_NUM_BUFF_RXDESCS) {
227  lpc_rx_queue(netif);
228 #ifdef LOCK_RX_THREAD
229 #if NO_SYS == 0
230  sys_mutex_unlock(&lpc_netifdata->TXLockMutex);
231 #endif
232 #endif
233  return NULL;
234  }
235 
236  /* Get index for next descriptor with data */
237  ridx = lpc_netifdata->rx_get_idx;
238 
239  /* Return if descriptor is still owned by DMA */
240  if (lpc_netifdata->prdesc[ridx].STATUS & RDES_OWN) {
241 #ifdef LOCK_RX_THREAD
242 #if NO_SYS == 0
243  sys_mutex_unlock(&lpc_netifdata->TXLockMutex);
244 #endif
245 #endif
246  return NULL;
247  }
248 
249  /* Get address of pbuf for this descriptor */
250  p = lpc_netifdata->rxpbufs[ridx];
251 
252  /* Get receive packet status */
253  status = lpc_netifdata->prdesc[ridx].STATUS;
254 
255  /* Check packet for errors */
256  if (status & RDES_ES) {
257  LINK_STATS_INC(link.drop);
258 
259  /* Error conditions that cause a packet drop */
260  if (status & intMask) {
261  LINK_STATS_INC(link.err);
262  rxerr = 1;
263  }
264  else
265  /* Length error check needs qualification */
266  if ((status & (RDES_LE | RDES_FT)) == RDES_LE) {
267  LINK_STATS_INC(link.lenerr);
268  rxerr = 1;
269  }
270  else
271  /* CRC error check needs qualification */
272  if ((status & (RDES_CE | RDES_LS)) == (RDES_CE | RDES_LS)) {
273  LINK_STATS_INC(link.chkerr);
274  rxerr = 1;
275  }
276 
277  /* Descriptor error check needs qualification */
278  if ((status & (RDES_DE | RDES_LS)) == (RDES_DE | RDES_LS)) {
279  LINK_STATS_INC(link.err);
280  rxerr = 1;
281  }
282  else
283  /* Dribble bit error only applies in half duplex mode */
284  if ((status & RDES_DE) &&
285  (!(LPC_ETHERNET->MAC_CONFIG & MAC_CFG_DM))) {
286  LINK_STATS_INC(link.err);
287  rxerr = 1;
288  }
289  }
290 
291  /* Increment free descriptor count and next get index */
292  lpc_netifdata->rx_free_descs++;
293  ridx++;
294  if (ridx >= LPC_NUM_BUFF_RXDESCS) {
295  ridx = 0;
296  }
297  lpc_netifdata->rx_get_idx = ridx;
298 
299  /* If an error occurred, just re-queue the pbuf */
300  if (rxerr) {
301  lpc_rxqueue_pbuf(lpc_netifdata, p);
302  p = NULL;
303 
304  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
305  ("lpc_low_level_input: RX error condition status 0x%08x\n",
306  status));
307  }
308  else {
309  /* Attempt to queue a new pbuf for the descriptor */
310  lpc_rx_queue(netif);
311 
312  /* Get length of received packet */
313  p->len = p->tot_len = (u16_t) RDES_FLMSK(status);
314 
315  LINK_STATS_INC(link.recv);
316 
317  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
318  ("lpc_low_level_input: Packet received, %d bytes, "
319  "status 0x%08x\n", p->len, status));
320  }
321 
322  /* (Re)start receive polling */
323  LPC_ETHERNET->DMA_REC_POLL_DEMAND = 1;
324 
325 #ifdef LOCK_RX_THREAD
326 #if NO_SYS == 0
327  /* Get exclusive access */
328  sys_mutex_unlock(&lpc_netifdata->TXLockMutex);
329 #endif
330 #endif
331 
332  return p;
333 }
334 
335 /* This function sets up the descriptor list used for transmit packets */
336 static err_t lpc_tx_setup(struct lpc_enetdata *lpc_netifdata)
337 {
338  s32_t idx;
339 
340  /* Clear TX descriptors, will be queued with pbufs as needed */
341  memset((void *) &lpc_netifdata->ptdesc[0], 0, sizeof(lpc_netifdata->ptdesc));
342  lpc_netifdata->tx_free_descs = LPC_NUM_BUFF_TXDESCS;
343  lpc_netifdata->tx_fill_idx = 0;
344  lpc_netifdata->tx_reclaim_idx = 0;
345 
346  /* Link/wrap descriptors */
347  for (idx = 0; idx < LPC_NUM_BUFF_TXDESCS; idx++) {
348  lpc_netifdata->ptdesc[idx].CTRLSTAT = TDES_ENH_TCH | TDES_ENH_CIC(3);
349  lpc_netifdata->ptdesc[idx].B2ADD =
350  (u32_t) &lpc_netifdata->ptdesc[idx + 1];
351  }
352  lpc_netifdata->ptdesc[LPC_NUM_BUFF_TXDESCS - 1].CTRLSTAT =
354  lpc_netifdata->ptdesc[LPC_NUM_BUFF_TXDESCS - 1].B2ADD =
355  (u32_t) &lpc_netifdata->ptdesc[0];
356 
357  /* Setup pointer to TX descriptor table */
358  LPC_ETHERNET->DMA_TRANS_DES_ADDR = (u32_t) lpc_netifdata->ptdesc;
359 
360  return ERR_OK;
361 }
362 
363 /* Low level output of a packet. Never call this from an interrupt context,
364  as it may block until TX descriptors become available */
365 static err_t lpc_low_level_output(struct netif *netif, struct pbuf *sendp)
366 {
367  struct lpc_enetdata *lpc_netifdata = netif->state;
368  u32_t idx, fidx, dn;
369  struct pbuf *p = sendp;
370 
371 #if LPC_CHECK_SLOWMEM == 1
372  struct pbuf *q, *wp;
373 
374  u8_t *dst;
375  int pcopy = 0;
376 
377  /* Check packet address to determine if it's in slow memory and
378  relocate if necessary */
379  for (q = p; ((q != NULL) && (pcopy == 0)); q = q->next) {
380  fidx = 0;
381  for (idx = 0; idx < sizeof(slmem);
382  idx += sizeof(struct lpc_slowmem_array_t)) {
383  if ((q->payload >= (void *) slmem[fidx].start) &&
384  (q->payload <= (void *) slmem[fidx].end)) {
385  /* Needs copy */
386  pcopy = 1;
387  }
388  }
389  }
390 
391  if (pcopy) {
392  /* Create a new pbuf with the total pbuf size */
393  wp = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM);
394  if (!wp) {
395  /* Exit with error */
396  return ERR_MEM;
397  }
398 
399  /* Copy pbuf */
400  dst = (u8_t *) wp->payload;
401  wp->tot_len = 0;
402  for (q = p; q != NULL; q = q->next) {
403  MEMCPY(dst, (u8_t *) q->payload, q->len);
404  dst += q->len;
405  wp->tot_len += q->len;
406  }
407  wp->len = wp->tot_len;
408 
409  /* LWIP will free original pbuf on exit of function */
410 
411  p = sendp = wp;
412  }
413 #endif
414 
415  /* Zero-copy TX buffers may be fragmented across mutliple payload
416  chains. Determine the number of descriptors needed for the
417  transfer. The pbuf chaining can be a mess! */
418  dn = (u32_t) pbuf_clen(p);
419 
420  /* Wait until enough descriptors are available for the transfer. */
421  /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
422  while (dn > lpc_tx_ready(netif))
423 #if NO_SYS == 0
424  {xSemaphoreTake(lpc_netifdata->xTXDCountSem, 0); }
425 #else
426  {msDelay(1); }
427 #endif
428 
429  /* Get the next free descriptor index */
430  fidx = idx = lpc_netifdata->tx_fill_idx;
431 
432 #if NO_SYS == 0
433  /* Get exclusive access */
434  sys_mutex_lock(&lpc_netifdata->TXLockMutex);
435 #endif
436 
437  /* Fill in the next free descriptor(s) */
438  while (dn > 0) {
439  dn--;
440 
441  /* Setup packet address and length */
442  lpc_netifdata->ptdesc[idx].B1ADD = (u32_t) p->payload;
443  lpc_netifdata->ptdesc[idx].BSIZE = (u32_t) TDES_ENH_BS1(p->len);
444 
445  /* Save pointer to pbuf so we can reclain the memory for
446  the pbuf after the buffer has been sent. Only the first
447  pbuf in a chain is saved since the full chain doesn't
448  need to be freed. */
449  /* For first packet only, first flag */
450  lpc_netifdata->tx_free_descs--;
451  if (idx == fidx) {
452  lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_FS;
453 #if LPC_CHECK_SLOWMEM == 1
454  /* If this is a copied pbuf, then avoid getting the extra reference
455  or the TX reclaim will be off by 1 */
456  if (!pcopy) {
457  pbuf_ref(p);
458  }
459 #else
460  /* Increment reference count on this packet so LWIP doesn't
461  attempt to free it on return from this call */
462  pbuf_ref(p);
463 #endif
464  }
465  else {
466  lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_OWN;
467  }
468 
469  /* Save address of pbuf, but make sure it's associated with the
470  first chained pbuf so it gets freed once all pbuf chains are
471  transferred. */
472  if (!dn) {
473  lpc_netifdata->txpbufs[idx] = sendp;
474  }
475  else {
476  lpc_netifdata->txpbufs[idx] = NULL;
477  }
478 
479  /* For last packet only, interrupt and last flag */
480  if (dn == 0) {
481  lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_LS |
482  TDES_ENH_IC;
483  }
484 
485  /* IP checksumming requires full buffering in IP */
486  lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_CIC(3);
487 
488  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
489  ("lpc_low_level_output: pbuf packet %p sent, chain %d,"
490  " size %d, index %d, free %d\n", p, dn, p->len, idx,
491  lpc_netifdata->tx_free_descs));
492 
493  /* Update next available descriptor */
494  idx++;
495  if (idx >= LPC_NUM_BUFF_TXDESCS) {
496  idx = 0;
497  }
498 
499  /* Next packet fragment */
500  p = p->next;
501  }
502 
503  lpc_netifdata->tx_fill_idx = idx;
504 
505  LINK_STATS_INC(link.xmit);
506 
507  /* Give first descriptor to DMA to start transfer */
508  lpc_netifdata->ptdesc[fidx].CTRLSTAT |= TDES_OWN;
509 
510  /* Tell DMA to poll descriptors to start transfer */
511  LPC_ETHERNET->DMA_TRANS_POLL_DEMAND = 1;
512 
513 #if NO_SYS == 0
514  /* Restore access */
515  sys_mutex_unlock(&lpc_netifdata->TXLockMutex);
516 #endif
517 
518  return ERR_OK;
519 }
520 
521 /* This function is the ethernet packet send function. It calls
522  etharp_output after checking link status */
523 static err_t lpc_etharp_output(struct netif *netif, struct pbuf *q,
524  ip_addr_t *ipaddr)
525 {
526  /* Only send packet is link is up */
527  if (netif->flags & NETIF_FLAG_LINK_UP) {
528  return etharp_output(netif, q, ipaddr);
529  }
530 
531  return ERR_CONN;
532 }
533 
534 #if NO_SYS == 0
535 /* Packet reception task
536  This task is called when a packet is received. It will
537  pass the packet to the LWIP core */
538 static portTASK_FUNCTION(vPacketReceiveTask, pvParameters) {
539  struct lpc_enetdata *lpc_netifdata = pvParameters;
540 
541  while (1) {
542  /* Wait for receive task to wakeup */
543  sys_arch_sem_wait(&lpc_netifdata->RxSem, 0);
544 
545  /* Process receive packets */
546  while (!(lpc_netifdata->prdesc[lpc_netifdata->rx_get_idx].STATUS
547  & RDES_OWN)) {
548  lpc_enetif_input(lpc_netifdata->netif);
549  }
550  }
551 }
552 
553 /* Transmit cleanup task
554  This task is called when a transmit interrupt occurs and
555  reclaims the pbuf and descriptor used for the packet once
556  the packet has been transferred */
557 static portTASK_FUNCTION(vTransmitCleanupTask, pvParameters) {
558  struct lpc_enetdata *lpc_netifdata = pvParameters;
559 
560  while (1) {
561  /* Wait for transmit cleanup task to wakeup */
562  sys_arch_sem_wait(&lpc_netifdata->TxCleanSem, 0);
563 
564  /* Free TX pbufs and descriptors that are done */
565  lpc_tx_reclaim(lpc_netifdata->netif);
566  }
567 }
568 #endif
569 
570 /* Low level init of the MAC and PHY */
571 static err_t low_level_init(struct netif *netif)
572 {
573  struct lpc_enetdata *lpc_netifdata = netif->state;
574 
575  /* Initialize via Chip ENET function */
576  Chip_ENET_Init();
577 
578  /* Save MAC address */
579  Chip_ENET_SetADDR(netif->hwaddr);
580 
581  /* Initial MAC configuration for checksum offload, full duplex,
582  100Mbps, disable receive own in half duplex, inter-frame gap
583  of 64-bits */
584  LPC_ETHERNET->MAC_CONFIG = MAC_CFG_BL(0) | MAC_CFG_IPC | MAC_CFG_DM |
586 
587  /* Setup filter */
588 #if IP_SOF_BROADCAST_RECV
589  LPC_ETHERNET->MAC_FRAME_FILTER = MAC_FF_PR | MAC_FF_RA;
590 #else
591  LPC_ETHERNET->MAC_FRAME_FILTER = 0; /* Only matching MAC address */
592 #endif
593 
594  /* Initialize the PHY */
595 #if defined(USE_RMII)
596  if (lpc_phy_init(true, msDelay) != SUCCESS) {
597  return ERROR;
598  }
599 
601  RDES_SAF | RDES_AFM;
602 #else
603  if (lpc_phy_init(false, msDelay) != SUCCESS) {
604  return ERROR;
605  }
606 
608  RDES_AFM;
609 #endif
610 
611  /* Setup transmit and receive descriptors */
612  if (lpc_tx_setup(lpc_netifdata) != ERR_OK) {
613  return ERR_BUF;
614  }
615  if (lpc_rx_setup(lpc_netifdata) != ERR_OK) {
616  return ERR_BUF;
617  }
618 
619  /* Flush transmit FIFO */
620  LPC_ETHERNET->DMA_OP_MODE = DMA_OM_FTF;
621 
622  /* Setup DMA to flush receive FIFOs at 32 bytes, service TX FIFOs at
623  64 bytes */
624  LPC_ETHERNET->DMA_OP_MODE |= DMA_OM_RTC(1) | DMA_OM_TTC(0);
625 
626  /* Clear all MAC interrupts */
627  LPC_ETHERNET->DMA_STAT = DMA_ST_ALL;
628 
629  /* Enable MAC interrupts */
630  LPC_ETHERNET->DMA_INT_EN =
631 #if NO_SYS == 1
632  0;
633 #else
636 #endif
637 
638  /* Enable receive and transmit DMA processes */
639  LPC_ETHERNET->DMA_OP_MODE |= DMA_OM_ST | DMA_OM_SR;
640 
641  /* Enable packet reception */
642  LPC_ETHERNET->MAC_CONFIG |= MAC_CFG_RE | MAC_CFG_TE;
643 
644  /* Start receive polling */
645  LPC_ETHERNET->DMA_REC_POLL_DEMAND = 1;
646 
647  return ERR_OK;
648 }
649 
650 /*****************************************************************************
651  * Public functions
652  ****************************************************************************/
653 
654 /* Write a value via the MII link (non-blocking) */
655 void lpc_mii_write_noblock(u32_t PhyReg, u32_t Value)
656 {
657  /* Write value at PHY address and register */
658  LPC_ETHERNET->MAC_MII_ADDR = MAC_MIIA_PA(LPC_PHYDEF_PHYADDR) |
659  MAC_MIIA_GR(PhyReg) | MAC_MIIA_CR(4) | MAC_MIIA_W;
660  LPC_ETHERNET->MAC_MII_DATA = Value;
661  LPC_ETHERNET->MAC_MII_ADDR |= MAC_MIIA_GB;
662 }
663 
664 /* Write a value via the MII link (blocking) */
665 err_t lpc_mii_write(u32_t PhyReg, u32_t Value)
666 {
667  u32_t mst = 250;
668  err_t sts = ERR_OK;
669 
670  /* Write value at PHY address and register */
671  lpc_mii_write_noblock(PhyReg, Value);
672 
673  /* Wait for unbusy status */
674  while (mst > 0) {
675  sts = LPC_ETHERNET->MAC_MII_ADDR & MAC_MIIA_GB;
676  if (sts == 0) {
677  mst = 0;
678  }
679  else {
680  mst--;
681  msDelay(1);
682  }
683  }
684 
685  if (sts != 0) {
686  sts = ERR_TIMEOUT;
687  }
688 
689  return sts;
690 }
691 
692 /* Reads current MII link busy status */
694 {
695  return LPC_ETHERNET->MAC_MII_ADDR & MAC_MIIA_GB;
696 }
697 
698 /* Read current value in MII data register */
700 {
701  return LPC_ETHERNET->MAC_MII_DATA;
702 }
703 
704 /* Starts a read operation via the MII link (non-blocking) */
706 {
707  /* Read value at PHY address and register */
708  LPC_ETHERNET->MAC_MII_ADDR = MAC_MIIA_PA(LPC_PHYDEF_PHYADDR) |
709  MAC_MIIA_GR(PhyReg) | MAC_MIIA_CR(4);
710  LPC_ETHERNET->MAC_MII_ADDR |= MAC_MIIA_GB;
711 }
712 
713 /* Read a value via the MII link (blocking) */
714 err_t lpc_mii_read(u32_t PhyReg, u32_t *data)
715 {
716  u32_t mst = 250;
717  err_t sts = ERR_OK;
718 
719  /* Read value at PHY address and register */
720  lpc_mii_read_noblock(PhyReg);
721 
722  /* Wait for unbusy status */
723  while (mst > 0) {
724  sts = LPC_ETHERNET->MAC_MII_ADDR & MAC_MIIA_GB;
725  if (sts == 0) {
726  mst = 0;
727  *data = LPC_ETHERNET->MAC_MII_DATA;
728  }
729  else {
730  mst--;
731  msDelay(1);
732  }
733  }
734 
735  if (sts != 0) {
736  sts = ERR_TIMEOUT;
737  }
738 
739  return sts;
740 }
741 
742 /* Attempt to allocate and requeue a new pbuf for RX */
744 {
745  struct lpc_enetdata *lpc_netifdata = netif->state;
746  struct pbuf *p;
747 
748  s32_t queued = 0;
749 
750  /* Attempt to requeue as many packets as possible */
751  while (lpc_netifdata->rx_free_descs > 0) {
752  /* Allocate a pbuf from the pool. We need to allocate at the
753  maximum size as we don't know the size of the yet to be
754  received packet. */
755  p = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM);
756  if (p == NULL) {
757  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
758  ("lpc_rx_queue: could not allocate RX pbuf index %d, "
759  "free %d)\n", lpc_netifdata->rx_next_idx,
760  lpc_netifdata->rx_free_descs));
761  return queued;
762  }
763 
764  /* pbufs allocated from the RAM pool should be non-chained (although
765  the hardware will allow chaining) */
766  LWIP_ASSERT("lpc_rx_queue: pbuf is not contiguous (chained)",
767  pbuf_clen(p) <= 1);
768 
769  /* Queue packet */
770  lpc_rxqueue_pbuf(lpc_netifdata, p);
771 
772  /* Update queued count */
773  queued++;
774  }
775 
776  return queued;
777 }
778 
779 /* Attempt to read a packet from the EMAC interface */
780 void lpc_enetif_input(struct netif *netif)
781 {
782  struct eth_hdr *ethhdr;
783 
784  struct pbuf *p;
785 
786  /* move received packet into a new pbuf */
787  p = lpc_low_level_input(netif);
788  if (p == NULL) {
789  return;
790  }
791 
792  /* points to packet payload, which starts with an Ethernet header */
793  ethhdr = p->payload;
794 
795  switch (htons(ethhdr->type)) {
796  case ETHTYPE_IP:
797  case ETHTYPE_ARP:
798 #if PPPOE_SUPPORT
799  case ETHTYPE_PPPOEDISC:
800  case ETHTYPE_PPPOE:
801 #endif /* PPPOE_SUPPORT */
802  /* full packet send to tcpip_thread to process */
803  if (netif->input(p, netif) != ERR_OK) {
804  LWIP_DEBUGF(NETIF_DEBUG,
805  ("lpc_enetif_input: IP input error\n"));
806  /* Free buffer */
807  pbuf_free(p);
808  }
809  break;
810 
811  default:
812  /* Return buffer */
813  pbuf_free(p);
814  break;
815  }
816 }
817 
818 /* Call for freeing TX buffers that are complete */
819 void lpc_tx_reclaim(struct netif *netif)
820 {
821  struct lpc_enetdata *lpc_netifdata = netif->state;
822  s32_t ridx;
823  u32_t status;
824 
825 #if NO_SYS == 0
826  /* Get exclusive access */
827  sys_mutex_lock(&lpc_netifdata->TXLockMutex);
828 #endif
829 
830  /* If a descriptor is available and is no longer owned by the
831  hardware, it can be reclaimed */
832  ridx = lpc_netifdata->tx_reclaim_idx;
833  while ((lpc_netifdata->tx_free_descs < LPC_NUM_BUFF_TXDESCS) &&
834  (!(lpc_netifdata->ptdesc[ridx].CTRLSTAT & TDES_OWN))) {
835  /* Peek at the status of the descriptor to determine if the
836  packet is good and any status information. */
837  status = lpc_netifdata->ptdesc[ridx].CTRLSTAT;
838 
839  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
840  ("lpc_tx_reclaim: Reclaiming sent packet %p, index %d\n",
841  lpc_netifdata->txpbufs[ridx], ridx));
842 
843  /* Check TX error conditions */
844  if (status & TDES_ES) {
845  LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
846  ("lpc_tx_reclaim: TX error condition status 0x%x\n", status));
847  LINK_STATS_INC(link.err);
848 
849 #if LINK_STATS == 1
850  /* Error conditions that cause a packet drop */
851  if (status & (TDES_UF | TDES_ED | TDES_EC | TDES_LC)) {
852  LINK_STATS_INC(link.drop);
853  }
854 #endif
855  }
856 
857  /* Reset control for this descriptor */
858  if (ridx == (LPC_NUM_BUFF_TXDESCS - 1)) {
859  lpc_netifdata->ptdesc[ridx].CTRLSTAT = TDES_ENH_TCH |
860  TDES_ENH_TER;
861  }
862  else {
863  lpc_netifdata->ptdesc[ridx].CTRLSTAT = TDES_ENH_TCH;
864  }
865 
866  /* Free the pbuf associate with this descriptor */
867  if (lpc_netifdata->txpbufs[ridx]) {
868  pbuf_free(lpc_netifdata->txpbufs[ridx]);
869  }
870 
871  /* Reclaim this descriptor */
872  lpc_netifdata->tx_free_descs++;
873 #if NO_SYS == 0
874  xSemaphoreGive(lpc_netifdata->xTXDCountSem);
875 #endif
876  ridx++;
877  if (ridx >= LPC_NUM_BUFF_TXDESCS) {
878  ridx = 0;
879  }
880  }
881 
882  lpc_netifdata->tx_reclaim_idx = ridx;
883 
884 #if NO_SYS == 0
885  /* Restore access */
886  sys_mutex_unlock(&lpc_netifdata->TXLockMutex);
887 #endif
888 }
889 
890 /* Polls if an available TX descriptor is ready */
892 {
893  return ((struct lpc_enetdata *) netif->state)->tx_free_descs;
894 }
895 
902 void ETH_IRQHandler(void)
903 {
904 #if NO_SYS == 1
905  /* Interrupts are not used without an RTOS */
906  NVIC_DisableIRQ((IRQn_Type) ETHERNET_IRQn);
907 #else
908  signed portBASE_TYPE xRecTaskWoken = pdFALSE, XTXTaskWoken = pdFALSE;
909  uint32_t ints;
910 
911  /* Get pending interrupts */
912  ints = LPC_ETHERNET->DMA_STAT;
913 
914  /* RX group interrupt(s) */
915  if (ints & (DMA_ST_RI | DMA_ST_OVF | DMA_ST_RU)) {
916  /* Give semaphore to wakeup RX receive task. Note the FreeRTOS
917  method is used instead of the LWIP arch method. */
918  xSemaphoreGiveFromISR(lpc_enetdata.RxSem, &xRecTaskWoken);
919  }
920 
921  /* TX group interrupt(s) */
922  if (ints & (DMA_ST_TI | DMA_ST_UNF | DMA_ST_TU)) {
923  /* Give semaphore to wakeup TX cleanup task. Note the FreeRTOS
924  method is used instead of the LWIP arch method. */
925  xSemaphoreGiveFromISR(lpc_enetdata.TxCleanSem, &XTXTaskWoken);
926  }
927 
928  /* Clear pending interrupts */
929  LPC_ETHERNET->DMA_STAT = ints;
930 
931  /* Context switch needed? */
932  portEND_SWITCHING_ISR(xRecTaskWoken || XTXTaskWoken);
933 #endif
934 }
935 
936 /* Set up the MAC interface duplex */
937 void lpc_emac_set_duplex(int full_duplex)
938 {
939  if (full_duplex) {
940  LPC_ETHERNET->MAC_CONFIG |= MAC_CFG_DM;
941  }
942  else {
943  LPC_ETHERNET->MAC_CONFIG &= ~MAC_CFG_DM;
944  }
945 }
946 
947 /* Set up the MAC interface speed */
948 void lpc_emac_set_speed(int mbs_100)
949 {
950  if (mbs_100) {
951  LPC_ETHERNET->MAC_CONFIG |= MAC_CFG_FES;
952  }
953  else {
954  LPC_ETHERNET->MAC_CONFIG &= ~MAC_CFG_FES;
955  }
956 }
957 
958 /* LWIP 18xx/43xx EMAC initialization function */
960 {
961  err_t err;
962  extern void Board_ENET_GetMacADDR(u8_t *mcaddr);
963 
964  LWIP_ASSERT("netif != NULL", (netif != NULL));
965 
967 
968  /* set MAC hardware address */
969  Board_ENET_GetMacADDR(netif->hwaddr);
970  netif->hwaddr_len = ETHARP_HWADDR_LEN;
971 
972  /* maximum transfer unit */
973  netif->mtu = 1500;
974 
975  /* device capabilities */
976  netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_UP |
977  NETIF_FLAG_ETHERNET;
978 
979  /* Initialize the hardware */
980  netif->state = &lpc_enetdata;
981  err = low_level_init(netif);
982  if (err != ERR_OK) {
983  return err;
984  }
985 
986 #if LWIP_NETIF_HOSTNAME
987  /* Initialize interface hostname */
988  netif->hostname = "lwiplpc";
989 #endif /* LWIP_NETIF_HOSTNAME */
990 
991  netif->name[0] = 'e';
992  netif->name[1] = 'n';
993 
994  netif->output = lpc_etharp_output;
995  netif->linkoutput = lpc_low_level_output;
996 
997  /* For FreeRTOS, start tasks */
998 #if NO_SYS == 0
999  lpc_enetdata.xTXDCountSem = xSemaphoreCreateCounting(LPC_NUM_BUFF_TXDESCS,
1001  LWIP_ASSERT("xTXDCountSem creation error",
1003 
1005  LWIP_ASSERT("TXLockMutex creation error", (err == ERR_OK));
1006 
1007  /* Packet receive task */
1008  err = sys_sem_new(&lpc_enetdata.RxSem, 0);
1009  LWIP_ASSERT("RxSem creation error", (err == ERR_OK));
1010  sys_thread_new("receive_thread", vPacketReceiveTask, netif->state,
1012 
1013  /* Transmit cleanup task */
1014  err = sys_sem_new(&lpc_enetdata.TxCleanSem, 0);
1015  LWIP_ASSERT("TxCleanSem creation error", (err == ERR_OK));
1016  sys_thread_new("txclean_thread", vTransmitCleanupTask, netif->state,
1018 #endif
1019 
1020  return ERR_OK;
1021 }
1022