Skip to content
Snippets Groups Projects
bnx2x_cmn.c 181 KiB
Newer Older
/* bnx2x_cmn.c: QLogic Everest network driver.
 *
 * Copyright (c) 2007-2013 Broadcom Corporation
 * Copyright (c) 2014 QLogic Corporation
 * All rights reserved
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/crash_dump.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/busy_poll.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
#include "bnx2x_sp.h"

Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
#include <linux/irqdesc.h>
extern int (*lnk_intercept_sk_buff) (struct sk_buff *skb, char *extra_dbg_string);
Aastha Mehta's avatar
Aastha Mehta committed
extern void (*lnk_intercept_rx_path) (void *dev, int fp_idx,
    uint16_t rx_bd_prod, uint16_t rx_bd_cons, uint16_t comp_prod,
    uint16_t comp_cons, int irq, char *extra_dbg_string);
#define SME_NAPI_POLL_WEIGHT 64
Aastha Mehta's avatar
Aastha Mehta committed
#endif

static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
static int bnx2x_poll(struct napi_struct *napi, int budget);

static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
{
	int i;

	/* Add NAPI objects */
	for_each_rx_queue_cnic(bp, i) {
		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
			       bnx2x_poll, NAPI_POLL_WEIGHT);
	}
}

static void bnx2x_add_all_napi(struct bnx2x *bp)
{
	int i;

	/* Add NAPI objects */
	for_each_eth_queue(bp, i) {
Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
				bnx2x_poll, SME_NAPI_POLL_WEIGHT);
#else
		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
			       bnx2x_poll, NAPI_POLL_WEIGHT);
	}
}

static int bnx2x_calc_num_queues(struct bnx2x *bp)
{
	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();

	/* Reduce memory usage in kdump environment by using only one queue */
	if (is_kdump_kernel())
		nq = 1;

	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
	return nq;
}

/**
 * bnx2x_move_fp - move content of the fastpath structure.
 *
 * @bp:		driver handle
 * @from:	source FP index
 * @to:		destination FP index
 *
 * Makes sure the contents of the bp->fp[to].napi is kept
 * intact. This is done by first copying the napi struct from
 * the target to the source, and then mem copying the entire
 * source onto the target. Update txdata pointers and related
 * content.
 */
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
	struct bnx2x_fastpath *from_fp = &bp->fp[from];
	struct bnx2x_fastpath *to_fp = &bp->fp[to];
	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
	int old_max_eth_txqs, new_max_eth_txqs;
	int old_txdata_index = 0, new_txdata_index = 0;
	struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;

	/* Copy the NAPI object as it has been already initialized */
	from_fp->napi = to_fp->napi;

	/* Move bnx2x_fastpath contents */
	memcpy(to_fp, from_fp, sizeof(*to_fp));
	to_fp->index = to;

	/* Retain the tpa_info of the original `to' version as we don't want
	 * 2 FPs to contain the same tpa_info pointer.
	 */
	to_fp->tpa_info = old_tpa_info;

	/* move sp_objs contents as well, as their indices match fp ones */
	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));

	/* move fp_stats contents as well, as their indices match fp ones */
	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));

	/* Update txdata pointers in fp and move txdata content accordingly:
	 * Each fp consumes 'max_cos' txdata structures, so the index should be
	 * decremented by max_cos x delta.
	 */

	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
				(bp)->max_cos;
	if (from == FCOE_IDX(bp)) {
		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
	}

	memcpy(&bp->bnx2x_txq[new_txdata_index],
	       &bp->bnx2x_txq[old_txdata_index],
	       sizeof(struct bnx2x_fp_txdata));
	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}

/**
 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 *
 * @bp:        driver handle
 * @buf:       character buffer to fill with the fw name
 * @buf_len:   length of the above buffer
 *
 */
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
{
	if (IS_PF(bp)) {
		u8 phy_fw_ver[PHY_FW_VER_LEN];

		phy_fw_ver[0] = '\0';
		bnx2x_get_ext_phy_fw_version(&bp->link_params,
					     phy_fw_ver, PHY_FW_VER_LEN);
		strlcpy(buf, bp->fw_ver, buf_len);
		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
			 "bc %d.%d.%d%s%s",
			 (bp->common.bc_ver & 0xff0000) >> 16,
			 (bp->common.bc_ver & 0xff00) >> 8,
			 (bp->common.bc_ver & 0xff),
			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
	} else {
		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
	}
}

/**
 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 *
 * @bp:	driver handle
 * @delta:	number of eth queues which were not allocated
 */
static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
{
	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);

	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
	 * backward along the array could cause memory to be overridden
	 */
	for (cos = 1; cos < bp->max_cos; cos++) {
		for (i = 0; i < old_eth_num - delta; i++) {
			struct bnx2x_fastpath *fp = &bp->fp[i];
			int new_idx = cos * (old_eth_num - delta) + i;

			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
			       sizeof(struct bnx2x_fp_txdata));
			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
		}
	}
}

int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */

/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
			     u16 idx, unsigned int *pkts_compl,
			     unsigned int *bytes_compl)
{
	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
	struct eth_tx_start_bd *tx_start_bd;
	struct eth_tx_bd *tx_data_bd;
	struct sk_buff *skb = tx_buf->skb;
	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
	int nbd;
	u16 split_bd_len = 0;

Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
  int orig_nbd;
  u16 orig_first_bd_idx;
#endif

	/* prefetch skb end pointer to speedup dev_kfree_skb() */
	prefetch(&skb->end);

	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
	   txdata->txq_index, idx, tx_buf, skb);

	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;

	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
  orig_nbd = nbd;
  orig_first_bd_idx = bd_idx;
#endif
#ifdef BNX2X_STOP_ON_ERROR
	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
		BNX2X_ERR("BAD nbd!\n");
		bnx2x_panic();
	}
#endif
	new_cons = nbd + tx_buf->first_bd;

	/* Get the next bd */
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* Skip a parse bd... */
	--nbd;
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
		/* Skip second parse bd... */
		--nbd;
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
		--nbd;
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* unmap first bd */
	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
			 DMA_TO_DEVICE);

	/* now free frags */
	while (nbd > 0) {

		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* release skb */
Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
  if (!skb) {
    BNX2X_ERR("NULL SKB TXQ[%d] idx %d nbd %d => %d bd_idx %d => %d\n"
        "bd_prod %u pkt_prod %u bd_cons %u pkt_cons %u hw_cons %u"
        , txdata->txq_index, idx, orig_nbd, nbd, orig_first_bd_idx, bd_idx
        , txdata->tx_bd_prod, txdata->tx_pkt_prod, txdata->tx_bd_cons
        , txdata->tx_pkt_cons, le16_to_cpu(*txdata->tx_cons_sb)
        );
  }
#endif
	WARN_ON(!skb);
	if (likely(skb)) {
		(*pkts_compl)++;
		(*bytes_compl) += skb->len;
		dev_kfree_skb_any(skb);
	}

	tx_buf->first_bd = 0;
	tx_buf->skb = NULL;

	return new_cons;
}

int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{
	struct netdev_queue *txq;
	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
	unsigned int pkts_compl = 0, bytes_compl = 0;

  u16 orig_bd_cons, orig_pkt_cons;
  int ret = xsl_intercept_tx_int(bp->dev, txdata);
  if (ret == 0)
    return ret;
#endif

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -1;
#endif

Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
  orig_bd_cons = txdata->tx_bd_cons;
  orig_pkt_cons = txdata->tx_pkt_cons;
#endif

	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
	sw_cons = txdata->tx_pkt_cons;

	while (sw_cons != hw_cons) {
		u16 pkt_cons;

		pkt_cons = TX_BD(sw_cons);

		DP(NETIF_MSG_TX_DONE,
		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);

		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
					    &pkts_compl, &bytes_compl);

		sw_cons++;
	}

Aastha Mehta's avatar
Aastha Mehta committed
#if !defined(CONFIG_XEN_SME)
	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);

	txdata->tx_pkt_cons = sw_cons;
	txdata->tx_bd_cons = bd_cons;

	/* Need to make the tx_bd_cons update visible to start_xmit()
	 * before checking for netif_tx_queue_stopped().  Without the
	 * memory barrier, there is a small possibility that
	 * start_xmit() will miss it and cause the queue to be stopped
	 * forever.
	 * On the other hand we need an rmb() here to ensure the proper
	 * ordering of bit testing in the following
	 * netif_tx_queue_stopped(txq) call.
	 */
	smp_mb();

	if (unlikely(netif_tx_queue_stopped(txq))) {
		/* Taking tx_lock() is needed to prevent re-enabling the queue
		 * while it's empty. This could have happen if rx_action() gets
		 * suspended in bnx2x_tx_int() after the condition before
		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
		 *
		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
		 * sends some packets consuming the whole queue again->
		 * stops the queue
		 */

Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
    DP(NETIF_MSG_IFDOWN, "TXQ[%d] doorbell %d hw_cons %u\n"
      "bd prod %u (%u) pkt prod %u (%u) bd cons %u (%u) => %u "
      "pkt cons %u (%u) => %u"
      , txdata->txq_index, txdata->tx_db.data.prod, hw_cons
      , txdata->tx_bd_prod, (unsigned int) TX_BD(txdata->tx_bd_prod)
      , txdata->tx_pkt_prod, (unsigned int) TX_BD(txdata->tx_pkt_prod)
      , orig_bd_cons, (unsigned int) TX_BD(orig_bd_cons), txdata->tx_bd_cons
      , orig_pkt_cons, (unsigned int) TX_BD(orig_pkt_cons), txdata->tx_pkt_cons);
#endif

		__netif_tx_lock(txq, smp_processor_id());

		if ((netif_tx_queue_stopped(txq)) &&
		    (bp->state == BNX2X_STATE_OPEN) &&
		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
			netif_tx_wake_queue(txq);

		__netif_tx_unlock(txq);
	}
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
	return 0;
}

static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{
	u16 last_max = fp->last_max_sge;

	if (SUB_S16(idx, last_max) > 0)
		fp->last_max_sge = idx;
}

static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
					 u16 sge_len,
					 struct eth_end_agg_rx_cqe *cqe)
{
	struct bnx2x *bp = fp->bp;
	u16 last_max, last_elem, first_elem;
	u16 delta = 0;
	u16 i;

	if (!sge_len)
		return;

	/* First mark all used pages */
	for (i = 0; i < sge_len; i++)
		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));

	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));

	/* Here we assume that the last SGE index is the biggest */
	prefetch((void *)(fp->sge_mask));
	bnx2x_update_last_max_sge(fp,
		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));

	last_max = RX_SGE(fp->last_max_sge);
	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;

	/* If ring is not full */
	if (last_elem + 1 != first_elem)
		last_elem++;

	/* Now update the prod */
	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
		if (likely(fp->sge_mask[i]))
			break;

		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
		delta += BIT_VEC64_ELEM_SZ;
	}

	if (delta > 0) {
		fp->rx_sge_prod += delta;
		/* clear page-end entries */
		bnx2x_clear_sge_mask_next_elems(fp);
	}

	DP(NETIF_MSG_RX_STATUS,
	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
	   fp->last_max_sge, fp->rx_sge_prod);
}

/* Get Toeplitz hash value in the skb using the value from the
 * CQE (calculated by HW).
 */
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
			    const struct eth_fast_path_rx_cqe *cqe,
			    enum pkt_hash_types *rxhash_type)
{
	/* Get Toeplitz hash from CQE */
	if ((bp->dev->features & NETIF_F_RXHASH) &&
	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
		enum eth_rss_hash_type htype;

		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
				(htype == TCP_IPV6_HASH_TYPE)) ?
			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;

		return le32_to_cpu(cqe->rss_hash_result);
	}
	*rxhash_type = PKT_HASH_TYPE_NONE;
	return 0;
}

static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
			    u16 cons, u16 prod,
			    struct eth_fast_path_rx_cqe *cqe)
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
	dma_addr_t mapping;
	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
	struct sw_rx_bd *first_buf = &tpa_info->first_buf;

	/* print error if current state != stop */
	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
		BNX2X_ERR("start of bin not in stop [%d]\n", queue);

	/* Try to map an empty data buffer from the aggregation info  */
	mapping = dma_map_single(&bp->pdev->dev,
				 first_buf->data + NET_SKB_PAD,
				 fp->rx_buf_size, DMA_FROM_DEVICE);
	/*
	 *  ...if it fails - move the skb from the consumer to the producer
	 *  and set the current aggregation state as ERROR to drop it
	 *  when TPA_STOP arrives.
	 */

	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		/* Move the BD from the consumer to the producer */
		bnx2x_reuse_rx_data(fp, cons, prod);
		tpa_info->tpa_state = BNX2X_TPA_ERROR;
		return;
	}

	/* move empty data from pool to prod */
	prod_rx_buf->data = first_buf->data;
	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
	/* point prod_bd to new data */
	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	/* move partial skb from cons to pool (don't unmap yet) */
	*first_buf = *cons_rx_buf;

	/* mark bin state as START */
	tpa_info->parsing_flags =
		le16_to_cpu(cqe->pars_flags.flags);
	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
	tpa_info->tpa_state = BNX2X_TPA_START;
	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
	tpa_info->placement_offset = cqe->placement_offset;
	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
	if (fp->mode == TPA_MODE_GRO) {
		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
		tpa_info->gro_size = gro_size;
	}

#ifdef BNX2X_STOP_ON_ERROR
	fp->tpa_queue_used |= (1 << queue);
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
	   fp->tpa_queue_used);
#endif
}

/* Timestamp option length allowed for TPA aggregation:
 *
 *		nop nop kind length echo val
 */
#define TPA_TSTAMP_OPT_LEN	12
/**
 * bnx2x_set_gro_params - compute GRO values
 *
 * @skb:		packet skb
 * @parsing_flags:	parsing flags from the START CQE
 * @len_on_bd:		total length of the first packet for the
 *			aggregation.
 * @pkt_len:		length of all segments
 *
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
 * Compute number of aggregated segments, and gso_type.
 */
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
				 u16 len_on_bd, unsigned int pkt_len,
				 u16 num_of_coalesced_segs)
{
	/* TPA aggregation won't have either IP options or TCP options
	 * other than timestamp or IPv6 extension headers.
	 */
	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);

	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
	    PRS_FLAG_OVERETH_IPV6) {
		hdrs_len += sizeof(struct ipv6hdr);
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
	} else {
		hdrs_len += sizeof(struct iphdr);
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
	}

	/* Check if there was a TCP timestamp, if there is it's will
	 * always be 12 bytes length: nop nop kind length echo val.
	 *
	 * Otherwise FW would close the aggregation.
	 */
	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
		hdrs_len += TPA_TSTAMP_OPT_LEN;

	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;

	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
	 * to skb_shinfo(skb)->gso_segs
	 */
	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}

static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			      u16 index, gfp_t gfp_mask)
{
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	struct bnx2x_alloc_pool *pool = &fp->page_pool;
	dma_addr_t mapping;

	if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {

		/* put page reference used by the memory pool, since we
		 * won't be using this page as the mempool anymore.
		 */
		if (pool->page)
			put_page(pool->page);

		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
		if (unlikely(!pool->page))
			return -ENOMEM;

		pool->offset = 0;
	}

	mapping = dma_map_page(&bp->pdev->dev, pool->page,
			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	get_page(pool->page);
	sw_buf->page = pool->page;
	sw_buf->offset = pool->offset;

	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	pool->offset += SGE_PAGE_SIZE;

	return 0;
}

static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       struct sk_buff *skb,
			       struct eth_end_agg_rx_cqe *cqe,
			       u16 cqe_idx)
{
	struct sw_rx_page *rx_pg, old_rx_pg;
	u32 i, frag_len, frag_size;
	int err, j, frag_id = 0;
	u16 len_on_bd = tpa_info->len_on_bd;
	u16 full_page = 0, gro_size = 0;

	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;

	if (fp->mode == TPA_MODE_GRO) {
		gro_size = tpa_info->gro_size;
		full_page = tpa_info->full_page;
	}

	/* This is needed in order to enable forwarding support */
	if (frag_size)
		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
				     le16_to_cpu(cqe->pkt_len),
				     le16_to_cpu(cqe->num_of_coalesced_segs));

#ifdef BNX2X_STOP_ON_ERROR
	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
			  pages, cqe_idx);
		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
		bnx2x_panic();
		return -EINVAL;
	}
#endif

	/* Run through the SGL and compose the fragmented skb */
	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));

		/* FW gives the indices of the SGE as if the ring is an array
		   (meaning that "next" element will consume 2 indices) */
		if (fp->mode == TPA_MODE_GRO)
			frag_len = min_t(u32, frag_size, (u32)full_page);
		else /* LRO */
			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);

		rx_pg = &fp->rx_page_ring[sge_idx];
		old_rx_pg = *rx_pg;

		/* If we fail to allocate a substitute page, we simply stop
		   where we are and drop the whole packet */
		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
		if (unlikely(err)) {
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
			return err;
		}

		dma_unmap_page(&bp->pdev->dev,
			       dma_unmap_addr(&old_rx_pg, mapping),
			       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
		/* Add one frag and update the appropriate fields in the skb */
		if (fp->mode == TPA_MODE_LRO)
			skb_fill_page_desc(skb, j, old_rx_pg.page,
					   old_rx_pg.offset, frag_len);
		else { /* GRO */
			int rem;
			int offset = 0;
			for (rem = frag_len; rem > 0; rem -= gro_size) {
				int len = rem > gro_size ? gro_size : rem;
				skb_fill_page_desc(skb, frag_id++,
						   old_rx_pg.page,
						   old_rx_pg.offset + offset,
						   len);
				if (offset)
					get_page(old_rx_pg.page);
				offset += len;
			}
		}

		skb->data_len += frag_len;
		skb->truesize += SGE_PAGES;
		skb->len += frag_len;

		frag_size -= frag_len;
	}

	return 0;
}

static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
	if (fp->rx_frag_size)
		skb_free_frag(data);
	else
		kfree(data);
}

static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
{
	if (fp->rx_frag_size) {
		/* GFP_KERNEL allocations are used only during initialization */
		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
			return (void *)__get_free_page(gfp_mask);

		return netdev_alloc_frag(fp->rx_frag_size);
	}

	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
}

#ifdef CONFIG_INET
static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
{
	const struct iphdr *iph = ip_hdr(skb);
	struct tcphdr *th;

	skb_set_transport_header(skb, sizeof(struct iphdr));
	th = tcp_hdr(skb);

	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
				  iph->saddr, iph->daddr, 0);
}

static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
{
	struct ipv6hdr *iph = ipv6_hdr(skb);
	struct tcphdr *th;

	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
	th = tcp_hdr(skb);

	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
				  &iph->saddr, &iph->daddr, 0);
}

static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
{
	skb_set_network_header(skb, 0);
	gro_func(bp, skb);
	tcp_gro_complete(skb);
}
#endif

static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct sk_buff *skb)
{
#ifdef CONFIG_INET
	if (skb_shinfo(skb)->gso_size) {
		switch (be16_to_cpu(skb->protocol)) {
		case ETH_P_IP:
			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
			break;
		case ETH_P_IPV6:
			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
			break;
		default:
			WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
				  be16_to_cpu(skb->protocol));
		}
	}
#endif
	skb_record_rx_queue(skb, fp->rx_queue);
	napi_gro_receive(&fp->napi, skb);
}

static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   u16 cqe_idx)
{
	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
	u8 pad = tpa_info->placement_offset;
	u16 len = tpa_info->len_on_bd;
	struct sk_buff *skb = NULL;
	u8 *new_data, *data = rx_buf->data;
	u8 old_tpa_state = tpa_info->tpa_state;
Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
Aastha Mehta's avatar
Aastha Mehta committed
	char dbg_buff[DBG_BUF_SIZE];
	memset(dbg_buff, 0, DBG_BUF_SIZE);
Aastha Mehta's avatar
Aastha Mehta committed
#endif

	tpa_info->tpa_state = BNX2X_TPA_STOP;

	/* If we there was an error during the handling of the TPA_START -
	 * drop this aggregation.
	 */
	if (old_tpa_state == BNX2X_TPA_ERROR)
		goto drop;

	/* Try to allocate the new data */
	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
	/* Unmap skb in the pool anyway, as we are going to change
	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
	   fails. */
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
			 fp->rx_buf_size, DMA_FROM_DEVICE);
	if (likely(new_data))
		skb = build_skb(data, fp->rx_frag_size);

	if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
		if (pad + len > fp->rx_buf_size) {
			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
				  pad, len, fp->rx_buf_size);
			bnx2x_panic();
			return;
		}
#endif

		skb_reserve(skb, pad + NET_SKB_PAD);
		skb_put(skb, len);
		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
Aastha Mehta's avatar
Aastha Mehta committed
		memset(dbg_buff, 0, DBG_BUF_SIZE);
		sprintf(dbg_buff, "%s:%d caller: %pS", __func__, __LINE__, __builtin_return_address(0));
		xsl_print_sk_buff(skb, dbg_buff);
Aastha Mehta's avatar
Aastha Mehta committed
#endif
		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
					 skb, cqe, cqe_idx)) {
			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
			bnx2x_gro_receive(bp, fp, skb);
		} else {
			DP(NETIF_MSG_RX_STATUS,
			   "Failed to allocate new pages - dropping packet!\n");
			dev_kfree_skb_any(skb);
		}

		/* put new data in bin */
		rx_buf->data = new_data;

		return;
	}
	if (new_data)
		bnx2x_frag_free(fp, new_data);
drop:
	/* drop the packet and keep the buffer in the bin */
	DP(NETIF_MSG_RX_STATUS,
	   "Failed to allocate or map a new skb - dropping packet!\n");
	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}

static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       u16 index, gfp_t gfp_mask)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

	data = bnx2x_frag_alloc(fp, gfp_mask);
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		bnx2x_frag_free(fp, data);
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

static
void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
				 struct bnx2x_fastpath *fp,
				 struct bnx2x_eth_q_stats *qstats)
{
	/* Do nothing if no L4 csum validation was done.
	 * We do not check whether IP csum was validated. For IPv4 we assume
	 * that if the card got as far as validating the L4 csum, it also
	 * validated the IP csum. IPv6 has no IP csum.
	 */
	if (cqe->fast_path_cqe.status_flags &
	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
		return;

	/* If L4 validation was done, check if an error was found. */

	if (cqe->fast_path_cqe.type_error_flags &
	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
		qstats->hw_csum_err++;
	else
		skb->ip_summed = CHECKSUM_UNNECESSARY;
}

static int __bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;
	union eth_rx_cqe *cqe;
	struct eth_fast_path_rx_cqe *cqe_fp;

Aastha Mehta's avatar
Aastha Mehta committed
#ifdef CONFIG_XEN_SME
Aastha Mehta's avatar
Aastha Mehta committed
#endif

	struct sw_rx_bd *rx_buf = NULL;
	struct sk_buff *skb = NULL;
	u8 cqe_fp_flags;
	enum eth_rx_cqe_type cqe_fp_type;
	u16 len, pad, queue;
	u8 *data;
	u32 rxhash;
	enum pkt_hash_types rxhash_type;

#ifdef CONFIG_XEN_SME
	if (lnk_intercept_rx_path) {
		bd_cons = fp->swi_bd_cons;
		bd_prod = fp->swi_bd_prod;
		sw_comp_cons = fp->swi_comp_cons;
		sw_comp_prod = fp->swi_comp_prod;
	} else {
#endif
		bd_cons = fp->rx_bd_cons;
		bd_prod = fp->rx_bd_prod;
		sw_comp_cons = fp->rx_comp_cons;
		sw_comp_prod = fp->rx_comp_prod;
#ifdef CONFIG_XEN_SME
	}
	bd_prod_fw = bd_prod;

	comp_ring_cons = RCQ_BD(sw_comp_cons);
	cqe = &fp->rx_comp_ring[comp_ring_cons];
	cqe_fp = &cqe->fast_path_cqe;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -5;
#endif

	bd_prod = RX_BD(bd_prod);
	bd_cons = RX_BD(bd_cons);

	/* A rmb() is required to ensure that the CQE is not read
	 * before it is written by the adapter DMA.  PCI ordering
	 * rules will make sure the other fields are written before
	 * the marker at the end of struct eth_fast_path_rx_cqe
	 * but without rmb() a weakly ordered processor can process
	 * stale data.  Without the barrier TPA state-machine might
	 * enter inconsistent state and kernel stack might be
	 * provided with incorrect packet description - these lead
	 * to various kernel crashed.
	 */
	rmb();

	cqe_fp_flags = cqe_fp->type_error_flags;
	cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;

	DP(NETIF_MSG_RX_STATUS,
		 "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		 CQE_TYPE(cqe_fp_flags),
		 cqe_fp_flags, cqe_fp->status_flags,
		 le32_to_cpu(cqe_fp->rss_hash_result),
		 le16_to_cpu(cqe_fp->vlan_tag),
		 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));

	/* is this a slowpath msg? */
	if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
		bnx2x_sp_event(fp, cqe);
		goto next_cqe;
	}

	rx_buf = &fp->rx_buf_ring[bd_cons];
	data = rx_buf->data;

	if (!CQE_TYPE_FAST(cqe_fp_type)) {
		struct bnx2x_agg_info *tpa_info;
		u16 frag_size, pages;
//#ifdef BNX2X_STOP_ON_ERROR
		/* sanity check */
		if (fp->mode == TPA_MODE_DISABLED &&
				(CQE_TYPE_START(cqe_fp_type) ||
				 CQE_TYPE_STOP(cqe_fp_type)))
			BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
					CQE_TYPE(cqe_fp_type));
//#endif

		if (CQE_TYPE_START(cqe_fp_type)) {
			u16 queue = cqe_fp->queue_index;
#ifdef CONFIG_XEN_SME
      DP(NETIF_MSG_RX_ERR, "tpa_start fpsb [%u %u %u] mode %d queue %d hc idx %d"
          " cqe flags %x [%d %d %d %d]\n"
          "bd_prod %u (%u) %u bd_cons %u (%u) %u "
          "comp_prod %u (%u) %u comp_cons %u (%u) %u"
          //"bd_prod %u (%u) %u (%u) %u bd_cons %u (%u) %u (%u) %u"
          //" comp_prod %u (%u) %u (%u) %u comp_cons %u (%u) %u (%u) %u"
          , fp->index, fp->fw_sb_id, fp->igu_sb_id, fp->mode, queue, fp->fp_hc_idx
          , cqe_fp_flags
          , (int) (CQE_TYPE_SLOW(cqe_fp_type)), (int) (CQE_TYPE_FAST(cqe_fp_type))
          , (int) (CQE_TYPE_START(cqe_fp_type)), (int) (CQE_TYPE_STOP(cqe_fp_type))
          //, fp->hwi_bd_prod, (int) RX_BD(fp->hwi_bd_prod)
          , fp->swi_bd_prod, (int) RX_BD(fp->swi_bd_prod), fp->rx_bd_prod
          //, fp->hwi_bd_cons, (int) RX_BD(fp->hwi_bd_cons)
          , fp->swi_bd_cons, (int) RX_BD(fp->swi_bd_cons), fp->rx_bd_cons
          //, fp->hwi_comp_prod, (int) RCQ_BD(fp->hwi_comp_prod)
          , fp->swi_comp_prod, (int) RCQ_BD(fp->swi_comp_prod), fp->rx_comp_prod
          //, fp->hwi_comp_cons, (int) RCQ_BD(fp->hwi_comp_cons)
          , fp->swi_comp_cons, (int) RCQ_BD(fp->swi_comp_cons), fp->rx_comp_cons);
#else
			DP(NETIF_MSG_RX_STATUS,
				 "calling tpa_start on queue %d\n",
				 queue);

			bnx2x_tpa_start(fp, queue,
					bd_cons, bd_prod,
					cqe_fp);

			goto next_rx;
		}
		queue = cqe->end_agg_cqe.queue_index;
		tpa_info = &fp->tpa_info[queue];
#ifdef CONFIG_XEN_SME
      DP(NETIF_MSG_RX_ERR, "tpa_stop fpsb [%u %u %u] mode %d queue %d hc idx %d"
          " cqe flags %x [%d %d %d %d]\n"
          "bd_prod %u (%u) %u bd_cons %u (%u) %u "
          "comp_prod %u (%u) %u comp_cons %u (%u) %u"
          //"bd_prod %u (%u) %u (%u) %u bd_cons %u (%u) %u (%u) %u"
          //" comp_prod %u (%u) %u (%u) %u comp_cons %u (%u) %u (%u) %u"
          , fp->index, fp->fw_sb_id, fp->igu_sb_id, fp->mode, queue, fp->fp_hc_idx
          , cqe_fp_flags
          , (int) (CQE_TYPE_SLOW(cqe_fp_type)), (int) (CQE_TYPE_FAST(cqe_fp_type))
          , (int) (CQE_TYPE_START(cqe_fp_type)), (int) (CQE_TYPE_STOP(cqe_fp_type))
          //, fp->hwi_bd_prod, (int) RX_BD(fp->hwi_bd_prod)
          , fp->swi_bd_prod, (int) RX_BD(fp->swi_bd_prod), fp->rx_bd_prod
          //, fp->hwi_bd_cons, (int) RX_BD(fp->hwi_bd_cons)
          , fp->swi_bd_cons, (int) RX_BD(fp->swi_bd_cons), fp->rx_bd_cons
          //, fp->hwi_comp_prod, (int) RCQ_BD(fp->hwi_comp_prod)
          , fp->swi_comp_prod, (int) RCQ_BD(fp->swi_comp_prod), fp->rx_comp_prod
          //, fp->hwi_comp_cons, (int) RCQ_BD(fp->hwi_comp_cons)
          , fp->swi_comp_cons, (int) RCQ_BD(fp->swi_comp_cons), fp->rx_comp_cons);
#else
		DP(NETIF_MSG_RX_STATUS,
			 "calling tpa_stop on queue %d\n",
			 queue);

		frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
					tpa_info->len_on_bd;

		if (fp->mode == TPA_MODE_GRO)
			pages = (frag_size + tpa_info->full_page - 1) /
				 tpa_info->full_page;
		else
			pages = SGE_PAGE_ALIGN(frag_size) >>
				SGE_PAGE_SHIFT;

		bnx2x_tpa_stop(bp, fp, tpa_info, pages,
						 &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
		if (bp->panic)
			return -5;
#endif

		bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
		goto next_cqe;
	}
	/* non TPA */
	len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
	pad = cqe_fp->placement_offset;
	dma_sync_single_for_cpu(&bp->pdev->dev,
				dma_unmap_addr(rx_buf, mapping),
				pad + RX_COPY_THRESH,
				DMA_FROM_DEVICE);
	pad += NET_SKB_PAD;
	prefetch(data + pad); /* speedup eth_type_trans() */
	/* is this an error packet? */
	if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
		DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
			 "ERROR  flags %x  rx packet %u\n",
			 cqe_fp_flags, sw_comp_cons);
		bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
		goto reuse_rx;
	}

#ifdef CONFIG_XEN_SME
	xsl_parse_rx_data(bp->dev, fp->index, bd_prod_fw, bd_cons_fw, sw_comp_prod,
			sw_comp_cons, data, len, NULL);
#endif
	/* Since we don't have a jumbo ring
	 * copy small packets if mtu > 1500
	 */
	if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
			(len <= RX_COPY_THRESH)) {
		skb = napi_alloc_skb(&fp->napi, len);
		if (skb == NULL) {
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				 "ERROR  packet dropped because of alloc failure\n");
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
			goto reuse_rx;
		}
		memcpy(skb->data, data + pad, len);
		bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
	} else {
		if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
								 GFP_ATOMIC) == 0)) {
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(rx_buf, mapping),
					 fp->rx_buf_size,
					 DMA_FROM_DEVICE);
			skb = build_skb(data, fp->rx_frag_size);
			if (unlikely(!skb)) {
				bnx2x_frag_free(fp, data);
				bnx2x_fp_qstats(bp, fp)->
						rx_skb_alloc_failed++;
				goto next_rx;
			}
			skb_reserve(skb, pad);
		} else {
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				 "ERROR  packet dropped because of alloc failure\n");
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
			goto next_rx;
		}
	}

	skb_put(skb, len);
	skb->protocol = eth_type_trans(skb, bp->dev);

	/* Set Toeplitz hash for a none-LRO skb */
	rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
	skb_set_hash(skb, rxhash, rxhash_type);

	skb_checksum_none_assert(skb);

	if (bp->dev->features & NETIF_F_RXCSUM)
		bnx2x_csum_validate(skb, cqe, fp,
						bnx2x_fp_qstats(bp, fp));

	skb_record_rx_queue(skb, fp->rx_queue);

	/* Check if this packet was timestamped */
	if (unlikely(cqe->fast_path_cqe.type_error_flags &
				 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
		bnx2x_set_rx_ts(bp, skb);

	if (le16_to_cpu(cqe_fp->pars_flags.flags) &
			PARSING_FLAGS_VLAN)
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
							 le16_to_cpu(cqe_fp->vlan_tag));

	napi_gro_receive(&fp->napi, skb);
next_rx:
	rx_buf->data = NULL;

	bd_cons = NEXT_RX_IDX(bd_cons);
	bd_prod = NEXT_RX_IDX(bd_prod);
	bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
#ifdef CONFIG_XEN_SME
	bd_cons_fw = NEXT_RX_IDX(bd_cons_fw);
#endif
	rx_pkt++;
next_cqe:
	sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
	sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

	/* mark CQE as free */
	BNX2X_SEED_CQE(cqe_fp);

//	if (lnk_intercept_rx_path) {
		fp->swi_bd_prod = bd_prod_fw;
		fp->swi_comp_cons = sw_comp_cons;
		fp->swi_comp_prod = sw_comp_prod;
  // if we use rx* in place of hwi*, then rx* should
  // be updated only when our module is not loaded
  if (!lnk_intercept_rx_path) {
	fp->rx_bd_cons = bd_cons_fw;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;
	fp->rx_bd_cons = bd_cons;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;

	return rx_pkt;
}

static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;
	union eth_rx_cqe *cqe;
	struct eth_fast_path_rx_cqe *cqe_fp;

	int rx_pkt_int = 0;
#ifdef CONFIG_XEN_SME
	u16 tmp_swi_bd_prod, tmp_swi_bd_cons, tmp_swi_comp_prod, tmp_swi_comp_cons;
	u16 tmp_hwi_bd_prod, tmp_hwi_bd_cons, tmp_hwi_comp_prod, tmp_hwi_comp_cons;
#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif
	if (budget <= 0)
		return rx_pkt;

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
	bd_prod_fw = bd_prod;
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

	comp_ring_cons = RCQ_BD(sw_comp_cons);
	cqe = &fp->rx_comp_ring[comp_ring_cons];
	cqe_fp = &cqe->fast_path_cqe;

	DP(NETIF_MSG_RX_STATUS,
	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);

		tmp_swi_bd_prod = fp->swi_bd_prod;
		tmp_swi_bd_cons = fp->swi_bd_cons;
		tmp_swi_comp_prod = fp->swi_comp_prod;
		tmp_swi_comp_cons = fp->swi_comp_cons;
		tmp_hwi_bd_prod = fp->hwi_bd_prod;
		tmp_hwi_bd_cons = fp->hwi_bd_cons;
		tmp_hwi_comp_prod = fp->hwi_comp_prod;
		tmp_hwi_comp_cons = fp->hwi_comp_cons;
#endif
    tmp_hwi_bd_prod = fp->rx_bd_prod;
    tmp_hwi_bd_cons = fp->rx_bd_cons;
    tmp_hwi_comp_prod = fp->rx_comp_prod;
    tmp_hwi_comp_cons = fp->rx_comp_cons;
		while (/* RX_BD(tmp_swi_bd_prod) != RX_BD(tmp_hwi_bd_prod)
					&& RX_BD(tmp_swi_bd_cons) != RX_BD(tmp_hwi_bd_cons)
					&& */ RCQ_BD(tmp_swi_comp_prod) != RCQ_BD(tmp_hwi_comp_prod)
					&& RCQ_BD(tmp_swi_comp_cons) != RCQ_BD(tmp_hwi_comp_cons)) {
			rx_pkt_int = __bnx2x_rx_int(fp, budget);
			if (rx_pkt_int == -5) {
			tmp_swi_bd_prod = fp->swi_bd_prod;
			tmp_swi_bd_cons = fp->swi_bd_cons;
			tmp_swi_comp_prod = fp->swi_comp_prod;
			tmp_swi_comp_cons = fp->swi_comp_cons;
			tmp_hwi_bd_prod = fp->hwi_bd_prod;
			tmp_hwi_bd_cons = fp->hwi_bd_cons;
			tmp_hwi_comp_prod = fp->hwi_comp_prod;
			tmp_hwi_comp_cons = fp->hwi_comp_cons;
#endif
      tmp_hwi_bd_prod = fp->rx_bd_prod;
      tmp_hwi_bd_cons = fp->rx_bd_cons;
      tmp_hwi_comp_prod = fp->rx_comp_prod;
      tmp_hwi_comp_cons = fp->rx_comp_cons;
			rx_pkt_int = __bnx2x_rx_int(fp, budget);
			if (rx_pkt_int == -5)
				//fp->hwi_bd_prod = RX_BD(fp->rx_bd_prod);
				fp->hwi_bd_prod = fp->rx_bd_prod;
				fp->hwi_bd_cons = fp->rx_bd_cons;
				fp->hwi_comp_prod = fp->rx_comp_prod;
				fp->hwi_comp_cons = fp->rx_comp_cons;
			}
			//fp->swi_bd_prod = RX_BD(fp->rx_bd_prod);
			fp->swi_bd_prod = fp->rx_bd_prod;
			fp->swi_bd_cons = fp->rx_bd_cons;
			fp->swi_comp_prod = fp->rx_comp_prod;
			fp->swi_comp_cons = fp->rx_comp_cons;
#endif

			sw_comp_cons = fp->rx_comp_cons;
			if (rx_pkt == budget)
				break;

			comp_ring_cons = RCQ_BD(sw_comp_cons);
			cqe = &fp->rx_comp_ring[comp_ring_cons];
			cqe_fp = &cqe->fast_path_cqe;
		}
#ifdef CONFIG_XEN_SME
	}
#endif

#if 0
Aastha Mehta's avatar
Aastha Mehta committed
	/* baseline unmodified loop */
	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
		struct sw_rx_bd *rx_buf = NULL;
		u8 cqe_fp_flags;
		enum eth_rx_cqe_type cqe_fp_type;
		u16 len, pad, queue;
		u8 *data;
		u32 rxhash;
		enum pkt_hash_types rxhash_type;

#ifdef BNX2X_STOP_ON_ERROR
		if (unlikely(bp->panic))
			return 0;
#endif

		bd_prod = RX_BD(bd_prod);
		bd_cons = RX_BD(bd_cons);

		/* A rmb() is required to ensure that the CQE is not read
		 * before it is written by the adapter DMA.  PCI ordering
		 * rules will make sure the other fields are written before
		 * the marker at the end of struct eth_fast_path_rx_cqe
		 * but without rmb() a weakly ordered processor can process
		 * stale data.  Without the barrier TPA state-machine might
		 * enter inconsistent state and kernel stack might be
		 * provided with incorrect packet description - these lead
		 * to various kernel crashed.
		 */
		rmb();

		cqe_fp_flags = cqe_fp->type_error_flags;
		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;

		DP(NETIF_MSG_RX_STATUS,
		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		   CQE_TYPE(cqe_fp_flags),
		   cqe_fp_flags, cqe_fp->status_flags,
		   le32_to_cpu(cqe_fp->rss_hash_result),
		   le16_to_cpu(cqe_fp->vlan_tag),
		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));

		/* is this a slowpath msg? */
		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
			bnx2x_sp_event(fp, cqe);
			goto next_cqe;
		}

		rx_buf = &fp->rx_buf_ring[bd_cons];
		data = rx_buf->data;

		if (!CQE_TYPE_FAST(cqe_fp_type)) {
			struct bnx2x_agg_info *tpa_info;
			u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
			/* sanity check */
			if (fp->mode == TPA_MODE_DISABLED &&
			    (CQE_TYPE_START(cqe_fp_type) ||
			     CQE_TYPE_STOP(cqe_fp_type)))
				BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
					  CQE_TYPE(cqe_fp_type));
#endif

			if (CQE_TYPE_START(cqe_fp_type)) {
				u16 queue = cqe_fp->queue_index;
				DP(NETIF_MSG_RX_STATUS,
				   "calling tpa_start on queue %d\n",
				   queue);

				bnx2x_tpa_start(fp, queue,
						bd_cons, bd_prod,
						cqe_fp);

				goto next_rx;
			}
			queue = cqe->end_agg_cqe.queue_index;
			tpa_info = &fp->tpa_info[queue];
			DP(NETIF_MSG_RX_STATUS,
			   "calling tpa_stop on queue %d\n",
			   queue);

			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
				    tpa_info->len_on_bd;

			if (fp->mode == TPA_MODE_GRO)
				pages = (frag_size + tpa_info->full_page - 1) /
					 tpa_info->full_page;
			else
				pages = SGE_PAGE_ALIGN(frag_size) >>
					SGE_PAGE_SHIFT;

			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
				       &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
			if (bp->panic)
				return 0;
#endif

			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
			goto next_cqe;
		}
		/* non TPA */
		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
		pad = cqe_fp->placement_offset;
		dma_sync_single_for_cpu(&bp->pdev->dev,
					dma_unmap_addr(rx_buf, mapping),
					pad + RX_COPY_THRESH,
					DMA_FROM_DEVICE);
		pad += NET_SKB_PAD;
		prefetch(data + pad); /* speedup eth_type_trans() */
		/* is this an error packet? */
		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
			   "ERROR  flags %x  rx packet %u\n",
			   cqe_fp_flags, sw_comp_cons);
			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
			goto reuse_rx;
		}

		/* Since we don't have a jumbo ring
		 * copy small packets if mtu > 1500
		 */
		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
		    (len <= RX_COPY_THRESH)) {
			skb = napi_alloc_skb(&fp->napi, len);
			if (skb == NULL) {
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				   "ERROR  packet dropped because of alloc failure\n");
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
				goto reuse_rx;
			}
			memcpy(skb->data, data + pad, len);
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
		} else {
			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
						       GFP_ATOMIC) == 0)) {
				dma_unmap_single(&bp->pdev->dev,
						 dma_unmap_addr(rx_buf, mapping),
						 fp->rx_buf_size,
						 DMA_FROM_DEVICE);
				skb = build_skb(data, fp->rx_frag_size);
				if (unlikely(!skb)) {
					bnx2x_frag_free(fp, data);
					bnx2x_fp_qstats(bp, fp)->
							rx_skb_alloc_failed++;
					goto next_rx;
				}
				skb_reserve(skb, pad);
			} else {
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				   "ERROR  packet dropped because of alloc failure\n");
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
				goto next_rx;
			}
		}

		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, bp->dev);

		/* Set Toeplitz hash for a none-LRO skb */
		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
		skb_set_hash(skb, rxhash, rxhash_type);

		skb_checksum_none_assert(skb);

		if (bp->dev->features & NETIF_F_RXCSUM)
			bnx2x_csum_validate(skb, cqe, fp,
					    bnx2x_fp_qstats(bp, fp));

		skb_record_rx_queue(skb, fp->rx_queue);

		/* Check if this packet was timestamped */
		if (unlikely(cqe->fast_path_cqe.type_error_flags &
			     (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
			bnx2x_set_rx_ts(bp, skb);

		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
		    PARSING_FLAGS_VLAN)
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       le16_to_cpu(cqe_fp->vlan_tag));

		napi_gro_receive(&fp->napi, skb);
next_rx:
		rx_buf->data = NULL;

		bd_cons = NEXT_RX_IDX(bd_cons);
		bd_prod = NEXT_RX_IDX(bd_prod);
		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
		rx_pkt++;
next_cqe:
		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

		/* mark CQE as free */
		BNX2X_SEED_CQE(cqe_fp);

		if (rx_pkt == budget)
			break;

		comp_ring_cons = RCQ_BD(sw_comp_cons);
		cqe = &fp->rx_comp_ring[comp_ring_cons];
		cqe_fp = &cqe->fast_path_cqe;
	} /* while */
Aastha Mehta's avatar
Aastha Mehta committed

	fp->rx_bd_cons = bd_cons;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;
#endif /* baseline loop */
#ifdef CONFIG_XEN_SME
	if (lnk_intercept_rx_path) {
		bd_prod_fw = fp->swi_bd_prod;
		sw_comp_prod = fp->swi_comp_prod;
	} else {
#endif
Aastha Mehta's avatar
Aastha Mehta committed
		/* part of baseline logic as well, under modified loop */
		bd_prod_fw = fp->rx_bd_prod;
		sw_comp_prod = fp->rx_comp_prod;
#ifdef CONFIG_XEN_SME
	}
#endif
	/* Update producers */
	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
			     fp->rx_sge_prod);

	return rx_pkt;
}

1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
#ifdef CONFIG_XEN_SME
static int __bnx2x_rx_int_sme(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;
	union eth_rx_cqe *cqe;
	struct eth_fast_path_rx_cqe *cqe_fp;

	u16 bd_cons_fw;

	struct sw_rx_bd *rx_buf = NULL;
	struct sk_buff *skb = NULL;
	u8 cqe_fp_flags;
	enum eth_rx_cqe_type cqe_fp_type;
	u16 len, pad, queue;
	u8 *data;
	u32 rxhash;
	enum pkt_hash_types rxhash_type;

  bd_cons = fp->swi_bd_cons;
  bd_prod = fp->swi_bd_prod;
  sw_comp_cons = fp->swi_comp_cons;
  sw_comp_prod = fp->swi_comp_prod;
	bd_cons_fw = bd_cons;
	bd_prod_fw = bd_prod;

	comp_ring_cons = RCQ_BD(sw_comp_cons);
	cqe = &fp->rx_comp_ring[comp_ring_cons];
	cqe_fp = &cqe->fast_path_cqe;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -5;
#endif

	bd_prod = RX_BD(bd_prod);
	bd_cons = RX_BD(bd_cons);

	/* A rmb() is required to ensure that the CQE is not read
	 * before it is written by the adapter DMA.  PCI ordering
	 * rules will make sure the other fields are written before
	 * the marker at the end of struct eth_fast_path_rx_cqe
	 * but without rmb() a weakly ordered processor can process
	 * stale data.  Without the barrier TPA state-machine might
	 * enter inconsistent state and kernel stack might be
	 * provided with incorrect packet description - these lead
	 * to various kernel crashed.
	 */
	rmb();

	cqe_fp_flags = cqe_fp->type_error_flags;
	cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;

	DP(NETIF_MSG_RX_STATUS,
		 "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		 CQE_TYPE(cqe_fp_flags),
		 cqe_fp_flags, cqe_fp->status_flags,
		 le32_to_cpu(cqe_fp->rss_hash_result),
		 le16_to_cpu(cqe_fp->vlan_tag),
		 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));

	/* is this a slowpath msg? */
	if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
		bnx2x_sp_event(fp, cqe);
		goto next_cqe;
	}

	rx_buf = &fp->rx_buf_ring[bd_cons];
	data = rx_buf->data;

	if (!CQE_TYPE_FAST(cqe_fp_type)) {
		struct bnx2x_agg_info *tpa_info;
		u16 frag_size, pages;
//#ifdef BNX2X_STOP_ON_ERROR
		/* sanity check */
		if (fp->mode == TPA_MODE_DISABLED &&
				(CQE_TYPE_START(cqe_fp_type) ||
				 CQE_TYPE_STOP(cqe_fp_type)))
			BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
					CQE_TYPE(cqe_fp_type));
//#endif

		if (CQE_TYPE_START(cqe_fp_type)) {
			u16 queue = cqe_fp->queue_index;
      DP(NETIF_MSG_RX_ERR, "tpa_start fpsb [%u %u %u] mode %d queue %d hc idx %d"
        " cqe flags %x [%d %d %d %d]\n"
        "bd [%u %u] bd_fw [%u %u] comp [%u %u]\n"
        "fp %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u)"
        , fp->index, fp->fw_sb_id, fp->igu_sb_id, fp->mode
        , queue, fp->fp_hc_idx, cqe_fp_flags
        , (int) (CQE_TYPE_SLOW(cqe_fp_type)), (int) (CQE_TYPE_FAST(cqe_fp_type))
        , (int) (CQE_TYPE_START(cqe_fp_type)), (int) (CQE_TYPE_STOP(cqe_fp_type))
        , bd_prod, bd_cons, bd_prod_fw, bd_cons_fw, sw_comp_prod, sw_comp_cons
        , fp->rx_bd_prod, (int) RX_BD(fp->rx_bd_prod)
        , fp->swi_bd_prod, (int) RX_BD(fp->swi_bd_prod)
        , fp->rx_bd_cons, (int) RX_BD(fp->rx_bd_cons)
        , fp->swi_bd_cons, (int) RX_BD(fp->swi_bd_cons)
        , fp->rx_comp_prod, (int) RCQ_BD(fp->rx_comp_prod)
        , fp->swi_comp_prod, (int) RCQ_BD(fp->swi_comp_prod)
        , fp->rx_comp_cons, (int) RCQ_BD(fp->rx_comp_cons)
        , fp->swi_comp_cons, (int) RCQ_BD(fp->swi_comp_cons)
        );

			bnx2x_tpa_start(fp, queue,
					bd_cons, bd_prod,
					cqe_fp);

			goto next_rx;
		}
		queue = cqe->end_agg_cqe.queue_index;
		tpa_info = &fp->tpa_info[queue];
    DP(NETIF_MSG_RX_ERR, "tpa_stop fpsb [%u %u %u] mode %d queue %d hc idx %d"
        " cqe flags %x [%d %d %d %d]\n"
        "bd [%u %u] bd_fw [%u %u] comp [%u %u]\n"
        "fp %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u)"
        , fp->index, fp->fw_sb_id, fp->igu_sb_id, fp->mode
        , queue, fp->fp_hc_idx, cqe_fp_flags
        , (int) (CQE_TYPE_SLOW(cqe_fp_type)), (int) (CQE_TYPE_FAST(cqe_fp_type))
        , (int) (CQE_TYPE_START(cqe_fp_type)), (int) (CQE_TYPE_STOP(cqe_fp_type))
        , bd_prod, bd_cons, bd_prod_fw, bd_cons_fw, sw_comp_prod, sw_comp_cons
        , fp->rx_bd_prod, (int) RX_BD(fp->rx_bd_prod)
        , fp->swi_bd_prod, (int) RX_BD(fp->swi_bd_prod)
        , fp->rx_bd_cons, (int) RX_BD(fp->rx_bd_cons)
        , fp->swi_bd_cons, (int) RX_BD(fp->swi_bd_cons)
        , fp->rx_comp_prod, (int) RCQ_BD(fp->rx_comp_prod)
        , fp->swi_comp_prod, (int) RCQ_BD(fp->swi_comp_prod)
        , fp->rx_comp_cons, (int) RCQ_BD(fp->rx_comp_cons)
        , fp->swi_comp_cons, (int) RCQ_BD(fp->swi_comp_cons)
        );

		frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
					tpa_info->len_on_bd;

		if (fp->mode == TPA_MODE_GRO)
			pages = (frag_size + tpa_info->full_page - 1) /
				 tpa_info->full_page;
		else
			pages = SGE_PAGE_ALIGN(frag_size) >>
				SGE_PAGE_SHIFT;

		bnx2x_tpa_stop(bp, fp, tpa_info, pages,
						 &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
		if (bp->panic)
			return -5;
#endif

		bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
		goto next_cqe;
	}
	/* non TPA */
	len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
	pad = cqe_fp->placement_offset;
	dma_sync_single_for_cpu(&bp->pdev->dev,
				dma_unmap_addr(rx_buf, mapping),
				pad + RX_COPY_THRESH,
				DMA_FROM_DEVICE);
	pad += NET_SKB_PAD;
	prefetch(data + pad); /* speedup eth_type_trans() */
	/* is this an error packet? */
	if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
		DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
			 "ERROR  flags %x  rx packet %u\n",
			 cqe_fp_flags, sw_comp_cons);
		bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
		goto reuse_rx;
	}

  /* CONFIG_XEN_SME */
	xsl_parse_rx_data(bp->dev, fp->index, bd_prod_fw, bd_cons_fw, sw_comp_prod,
			sw_comp_cons, data, len, NULL);

	/* Since we don't have a jumbo ring
	 * copy small packets if mtu > 1500
	 */
	if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
			(len <= RX_COPY_THRESH)) {
		skb = napi_alloc_skb(&fp->napi, len);
		if (skb == NULL) {
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				 "ERROR  packet dropped because of alloc failure\n");
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
			goto reuse_rx;
		}
		memcpy(skb->data, data + pad, len);
		bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
	} else {
		if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
								 GFP_ATOMIC) == 0)) {
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(rx_buf, mapping),
					 fp->rx_buf_size,
					 DMA_FROM_DEVICE);
			skb = build_skb(data, fp->rx_frag_size);
			if (unlikely(!skb)) {
				bnx2x_frag_free(fp, data);
				bnx2x_fp_qstats(bp, fp)->
						rx_skb_alloc_failed++;
				goto next_rx;
			}
			skb_reserve(skb, pad);
		} else {
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				 "ERROR  packet dropped because of alloc failure\n");
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
			goto next_rx;
		}
	}

	skb_put(skb, len);
	skb->protocol = eth_type_trans(skb, bp->dev);

	/* Set Toeplitz hash for a none-LRO skb */
	rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
	skb_set_hash(skb, rxhash, rxhash_type);

	skb_checksum_none_assert(skb);

	if (bp->dev->features & NETIF_F_RXCSUM)
		bnx2x_csum_validate(skb, cqe, fp,
						bnx2x_fp_qstats(bp, fp));

	skb_record_rx_queue(skb, fp->rx_queue);

	/* Check if this packet was timestamped */
	if (unlikely(cqe->fast_path_cqe.type_error_flags &
				 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
		bnx2x_set_rx_ts(bp, skb);

	if (le16_to_cpu(cqe_fp->pars_flags.flags) &
			PARSING_FLAGS_VLAN)
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
							 le16_to_cpu(cqe_fp->vlan_tag));

	napi_gro_receive(&fp->napi, skb);
next_rx:
	rx_buf->data = NULL;

	bd_cons = NEXT_RX_IDX(bd_cons);
	bd_prod = NEXT_RX_IDX(bd_prod);
	bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
	bd_cons_fw = NEXT_RX_IDX(bd_cons_fw);

	rx_pkt++;
next_cqe:
	sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
	sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

	/* mark CQE as free */
	BNX2X_SEED_CQE(cqe_fp);

  fp->swi_bd_cons = bd_cons_fw;
  fp->swi_bd_prod = bd_prod_fw;
  fp->swi_comp_cons = sw_comp_cons;
  fp->swi_comp_prod = sw_comp_prod;

	return rx_pkt;
}

static int bnx2x_rx_int_sme(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;
	union eth_rx_cqe *cqe;
	struct eth_fast_path_rx_cqe *cqe_fp;

	int rx_pkt_int = 0;
	u16 tmp_swi_bd_prod, tmp_swi_bd_cons, tmp_swi_comp_prod, tmp_swi_comp_cons;
	u16 tmp_hwi_bd_prod, tmp_hwi_bd_cons, tmp_hwi_comp_prod, tmp_hwi_comp_cons;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif
	if (budget <= 0)
		return rx_pkt;

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
	bd_prod_fw = bd_prod;
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

	comp_ring_cons = RCQ_BD(sw_comp_cons);
	cqe = &fp->rx_comp_ring[comp_ring_cons];
	cqe_fp = &cqe->fast_path_cqe;

	DP(NETIF_MSG_RX_STATUS,
	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);

  tmp_swi_bd_prod = fp->swi_bd_prod;
  tmp_swi_bd_cons = fp->swi_bd_cons;
  tmp_swi_comp_prod = fp->swi_comp_prod;
  tmp_swi_comp_cons = fp->swi_comp_cons;
  tmp_hwi_bd_prod = fp->rx_bd_prod;
  tmp_hwi_bd_cons = fp->rx_bd_cons;
  tmp_hwi_comp_prod = fp->rx_comp_prod;
  tmp_hwi_comp_cons = fp->rx_comp_cons;

  while (/* RX_BD(tmp_swi_bd_prod) != RX_BD(tmp_hwi_bd_prod)
        && RX_BD(tmp_swi_bd_cons) != RX_BD(tmp_hwi_bd_cons)
        && */ RCQ_BD(tmp_swi_comp_prod) != RCQ_BD(tmp_hwi_comp_prod)
        && RCQ_BD(tmp_swi_comp_cons) != RCQ_BD(tmp_hwi_comp_cons)) {
    rx_pkt_int = __bnx2x_rx_int_sme(fp, budget);
    if (rx_pkt_int == -5) {
      return 0;
    }

    rx_pkt += rx_pkt_int;

    if (rx_pkt == budget)
      break;

    tmp_swi_bd_prod = fp->swi_bd_prod;
    tmp_swi_bd_cons = fp->swi_bd_cons;
    tmp_swi_comp_prod = fp->swi_comp_prod;
    tmp_swi_comp_cons = fp->swi_comp_cons;
    tmp_hwi_bd_prod = fp->rx_bd_prod;
    tmp_hwi_bd_cons = fp->rx_bd_cons;
    tmp_hwi_comp_prod = fp->rx_comp_prod;
    tmp_hwi_comp_cons = fp->rx_comp_cons;
  }

  bd_prod_fw = fp->swi_bd_prod;
  sw_comp_prod = fp->swi_comp_prod;

	/* Update producers */
	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
			     fp->rx_sge_prod);

	return rx_pkt;
}

static int __bnx2x_rx_int_sme_nomod(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;
	union eth_rx_cqe *cqe;
	struct eth_fast_path_rx_cqe *cqe_fp;

	u16 bd_cons_fw;

	struct sw_rx_bd *rx_buf = NULL;
	struct sk_buff *skb = NULL;
	u8 cqe_fp_flags;
	enum eth_rx_cqe_type cqe_fp_type;
	u16 len, pad, queue;
	u8 *data;
	u32 rxhash;
	enum pkt_hash_types rxhash_type;

  bd_cons = fp->rx_bd_cons;
  bd_prod = fp->rx_bd_prod;
  sw_comp_cons = fp->rx_comp_cons;
  sw_comp_prod = fp->rx_comp_prod;
	bd_cons_fw = bd_cons;

	bd_prod_fw = bd_prod;

	comp_ring_cons = RCQ_BD(sw_comp_cons);
	cqe = &fp->rx_comp_ring[comp_ring_cons];
	cqe_fp = &cqe->fast_path_cqe;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -5;
#endif

	bd_prod = RX_BD(bd_prod);
	bd_cons = RX_BD(bd_cons);

	/* A rmb() is required to ensure that the CQE is not read
	 * before it is written by the adapter DMA.  PCI ordering
	 * rules will make sure the other fields are written before
	 * the marker at the end of struct eth_fast_path_rx_cqe
	 * but without rmb() a weakly ordered processor can process
	 * stale data.  Without the barrier TPA state-machine might
	 * enter inconsistent state and kernel stack might be
	 * provided with incorrect packet description - these lead
	 * to various kernel crashed.
	 */
	rmb();

	cqe_fp_flags = cqe_fp->type_error_flags;
	cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;

	DP(NETIF_MSG_RX_STATUS,
		 "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		 CQE_TYPE(cqe_fp_flags),
		 cqe_fp_flags, cqe_fp->status_flags,
		 le32_to_cpu(cqe_fp->rss_hash_result),
		 le16_to_cpu(cqe_fp->vlan_tag),
		 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));

	/* is this a slowpath msg? */
	if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
		bnx2x_sp_event(fp, cqe);
		goto next_cqe;
	}

	rx_buf = &fp->rx_buf_ring[bd_cons];
	data = rx_buf->data;

	if (!CQE_TYPE_FAST(cqe_fp_type)) {
		struct bnx2x_agg_info *tpa_info;
		u16 frag_size, pages;
//#ifdef BNX2X_STOP_ON_ERROR
		/* sanity check */
		if (fp->mode == TPA_MODE_DISABLED &&
				(CQE_TYPE_START(cqe_fp_type) ||
				 CQE_TYPE_STOP(cqe_fp_type)))
			BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
					CQE_TYPE(cqe_fp_type));
//#endif

		if (CQE_TYPE_START(cqe_fp_type)) {
			u16 queue = cqe_fp->queue_index;
      DP(NETIF_MSG_RX_ERR, "tpa_start fpsb [%u %u %u] mode %d queue %d hc idx %d"
        " cqe flags %x [%d %d %d %d]\n"
        "bd [%u %u] bd_fw [%u %u] comp [%u %u]\n"
        "fp %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u)"
        , fp->index, fp->fw_sb_id, fp->igu_sb_id, fp->mode
        , queue, fp->fp_hc_idx, cqe_fp_flags
        , (int) (CQE_TYPE_SLOW(cqe_fp_type)), (int) (CQE_TYPE_FAST(cqe_fp_type))
        , (int) (CQE_TYPE_START(cqe_fp_type)), (int) (CQE_TYPE_STOP(cqe_fp_type))
        , bd_prod, bd_cons, bd_prod_fw, bd_cons_fw, sw_comp_prod, sw_comp_cons
        , fp->rx_bd_prod, (int) RX_BD(fp->rx_bd_prod)
        , fp->swi_bd_prod, (int) RX_BD(fp->swi_bd_prod)
        , fp->rx_bd_cons, (int) RX_BD(fp->rx_bd_cons)
        , fp->swi_bd_cons, (int) RX_BD(fp->swi_bd_cons)
        , fp->rx_comp_prod, (int) RCQ_BD(fp->rx_comp_prod)
        , fp->swi_comp_prod, (int) RCQ_BD(fp->swi_comp_prod)
        , fp->rx_comp_cons, (int) RCQ_BD(fp->rx_comp_cons)
        , fp->swi_comp_cons, (int) RCQ_BD(fp->swi_comp_cons)
        );

			bnx2x_tpa_start(fp, queue,
					bd_cons, bd_prod,
					cqe_fp);

			goto next_rx;
		}
		queue = cqe->end_agg_cqe.queue_index;
		tpa_info = &fp->tpa_info[queue];
    DP(NETIF_MSG_RX_ERR, "tpa_stop fpsb [%u %u %u] mode %d queue %d hc idx %d"
        " cqe flags %x [%d %d %d %d]\n"
        "bd [%u %u] bd_fw [%u %u] comp [%u %u]\n"
        "fp %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u), %u (%u) %u (%u)"
        , fp->index, fp->fw_sb_id, fp->igu_sb_id, fp->mode
        , queue, fp->fp_hc_idx, cqe_fp_flags
        , (int) (CQE_TYPE_SLOW(cqe_fp_type)), (int) (CQE_TYPE_FAST(cqe_fp_type))
        , (int) (CQE_TYPE_START(cqe_fp_type)), (int) (CQE_TYPE_STOP(cqe_fp_type))
        , bd_prod, bd_cons, bd_prod_fw, bd_cons_fw, sw_comp_prod, sw_comp_cons
        , fp->rx_bd_prod, (int) RX_BD(fp->rx_bd_prod)
        , fp->swi_bd_prod, (int) RX_BD(fp->swi_bd_prod)
        , fp->rx_bd_cons, (int) RX_BD(fp->rx_bd_cons)
        , fp->swi_bd_cons, (int) RX_BD(fp->swi_bd_cons)
        , fp->rx_comp_prod, (int) RCQ_BD(fp->rx_comp_prod)
        , fp->swi_comp_prod, (int) RCQ_BD(fp->swi_comp_prod)
        , fp->rx_comp_cons, (int) RCQ_BD(fp->rx_comp_cons)
        , fp->swi_comp_cons, (int) RCQ_BD(fp->swi_comp_cons)
        );

		frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
					tpa_info->len_on_bd;

		if (fp->mode == TPA_MODE_GRO)
			pages = (frag_size + tpa_info->full_page - 1) /
				 tpa_info->full_page;
		else
			pages = SGE_PAGE_ALIGN(frag_size) >>
				SGE_PAGE_SHIFT;

		bnx2x_tpa_stop(bp, fp, tpa_info, pages,
						 &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
		if (bp->panic)
			return -5;
#endif

		bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
		goto next_cqe;
	}
	/* non TPA */
	len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
	pad = cqe_fp->placement_offset;
	dma_sync_single_for_cpu(&bp->pdev->dev,
				dma_unmap_addr(rx_buf, mapping),
				pad + RX_COPY_THRESH,
				DMA_FROM_DEVICE);
	pad += NET_SKB_PAD;
	prefetch(data + pad); /* speedup eth_type_trans() */
	/* is this an error packet? */
	if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
		DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
			 "ERROR  flags %x  rx packet %u\n",
			 cqe_fp_flags, sw_comp_cons);
		bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
		goto reuse_rx;
	}

//#ifdef CONFIG_XEN_SME
//	xsl_parse_rx_data(bp->dev, fp->index, bd_prod_fw, bd_cons_fw, sw_comp_prod,
//			sw_comp_cons, data, len, NULL);
//#endif

	/* Since we don't have a jumbo ring
	 * copy small packets if mtu > 1500
	 */
	if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
			(len <= RX_COPY_THRESH)) {
		skb = napi_alloc_skb(&fp->napi, len);
		if (skb == NULL) {
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				 "ERROR  packet dropped because of alloc failure\n");
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
			goto reuse_rx;
		}
		memcpy(skb->data, data + pad, len);
		bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
	} else {
		if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
								 GFP_ATOMIC) == 0)) {
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(rx_buf, mapping),
					 fp->rx_buf_size,
					 DMA_FROM_DEVICE);
			skb = build_skb(data, fp->rx_frag_size);
			if (unlikely(!skb)) {
				bnx2x_frag_free(fp, data);
				bnx2x_fp_qstats(bp, fp)->
						rx_skb_alloc_failed++;
				goto next_rx;
			}
			skb_reserve(skb, pad);
		} else {
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				 "ERROR  packet dropped because of alloc failure\n");
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
			goto next_rx;
		}
	}

	skb_put(skb, len);
	skb->protocol = eth_type_trans(skb, bp->dev);

	/* Set Toeplitz hash for a none-LRO skb */
	rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
	skb_set_hash(skb, rxhash, rxhash_type);

	skb_checksum_none_assert(skb);

	if (bp->dev->features & NETIF_F_RXCSUM)
		bnx2x_csum_validate(skb, cqe, fp,
						bnx2x_fp_qstats(bp, fp));

	skb_record_rx_queue(skb, fp->rx_queue);

	/* Check if this packet was timestamped */
	if (unlikely(cqe->fast_path_cqe.type_error_flags &
				 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
		bnx2x_set_rx_ts(bp, skb);

	if (le16_to_cpu(cqe_fp->pars_flags.flags) &
			PARSING_FLAGS_VLAN)
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
							 le16_to_cpu(cqe_fp->vlan_tag));

	napi_gro_receive(&fp->napi, skb);
next_rx:
	rx_buf->data = NULL;

	bd_cons = NEXT_RX_IDX(bd_cons);
	bd_prod = NEXT_RX_IDX(bd_prod);
	bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
	bd_cons_fw = NEXT_RX_IDX(bd_cons_fw);

Loading
Loading full blame...