summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'openvz-sources/022.072-r1/5111_linux-2.6.8.1-emulex-8.0.16.17.patch')
-rw-r--r--openvz-sources/022.072-r1/5111_linux-2.6.8.1-emulex-8.0.16.17.patch23500
1 files changed, 0 insertions, 23500 deletions
diff --git a/openvz-sources/022.072-r1/5111_linux-2.6.8.1-emulex-8.0.16.17.patch b/openvz-sources/022.072-r1/5111_linux-2.6.8.1-emulex-8.0.16.17.patch
deleted file mode 100644
index 24f2705..0000000
--- a/openvz-sources/022.072-r1/5111_linux-2.6.8.1-emulex-8.0.16.17.patch
+++ /dev/null
@@ -1,23500 +0,0 @@
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_fcp.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_fcp.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,2470 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_fcp.c 1.466.1.3 2005/06/21 15:48:55EDT sf_support Exp $
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/blkdev.h>
-+#include <linux/ctype.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/pci.h>
-+#include <linux/smp_lock.h>
-+#include <linux/spinlock.h>
-+#include <linux/timer.h>
-+#include <linux/utsname.h>
-+
-+#include <asm/byteorder.h>
-+
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_tcq.h>
-+#include <scsi/scsi_transport_fc.h>
-+
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_fcp.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+#include "lpfc_version.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_compat.h"
-+
-+static char *lpfc_drvr_name = LPFC_DRIVER_NAME;
-+
-+static struct scsi_transport_template *lpfc_transport_template = NULL;
-+
-+struct list_head lpfc_hba_list = LIST_HEAD_INIT(lpfc_hba_list);
-+EXPORT_SYMBOL(lpfc_hba_list);
-+
-+static const char *
-+lpfc_info(struct Scsi_Host *host)
-+{
-+ struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
-+ int len;
-+ static char lpfcinfobuf[384];
-+
-+ memset(lpfcinfobuf,0,384);
-+ if (phba && phba->pcidev){
-+ strncpy(lpfcinfobuf, phba->ModelDesc, 256);
-+ len = strlen(lpfcinfobuf);
-+ snprintf(lpfcinfobuf + len,
-+ 384-len,
-+ " on PCI bus %02x device %02x irq %d",
-+ phba->pcidev->bus->number,
-+ phba->pcidev->devfn,
-+ phba->pcidev->irq);
-+ len = strlen(lpfcinfobuf);
-+ if (phba->Port[0]) {
-+ snprintf(lpfcinfobuf + len,
-+ 384-len,
-+ " port %s",
-+ phba->Port);
-+ }
-+ }
-+ return lpfcinfobuf;
-+}
-+
-+static void
-+lpfc_jedec_to_ascii(int incr, char hdw[])
-+{
-+ int i, j;
-+ for (i = 0; i < 8; i++) {
-+ j = (incr & 0xf);
-+ if (j <= 9)
-+ hdw[7 - i] = 0x30 + j;
-+ else
-+ hdw[7 - i] = 0x61 + j - 10;
-+ incr = (incr >> 4);
-+ }
-+ hdw[8] = 0;
-+ return;
-+}
-+
-+static ssize_t
-+lpfc_drvr_version_show(struct class_device *cdev, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
-+}
-+
-+static ssize_t
-+management_version_show(struct class_device *cdev, char *buf)
-+{
-+ return snprintf(buf, PAGE_SIZE, DFC_API_VERSION "\n");
-+}
-+
-+static ssize_t
-+lpfc_info_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
-+}
-+
-+static ssize_t
-+lpfc_serialnum_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
-+}
-+
-+static ssize_t
-+lpfc_modeldesc_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
-+}
-+
-+static ssize_t
-+lpfc_modelname_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
-+}
-+
-+static ssize_t
-+lpfc_programtype_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
-+}
-+
-+static ssize_t
-+lpfc_portnum_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
-+}
-+
-+static ssize_t
-+lpfc_fwrev_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ char fwrev[32];
-+ lpfc_decode_firmware_rev(phba, fwrev, 1);
-+ return snprintf(buf, PAGE_SIZE, "%s\n",fwrev);
-+}
-+
-+static ssize_t
-+lpfc_hdw_show(struct class_device *cdev, char *buf)
-+{
-+ char hdw[9];
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ lpfc_vpd_t *vp = &phba->vpd;
-+ lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
-+ return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
-+}
-+static ssize_t
-+lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
-+}
-+static ssize_t
-+lpfc_state_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ int len = 0;
-+ switch (phba->hba_state) {
-+ case LPFC_INIT_START:
-+ case LPFC_INIT_MBX_CMDS:
-+ case LPFC_LINK_DOWN:
-+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
-+ break;
-+ case LPFC_LINK_UP:
-+ case LPFC_LOCAL_CFG_LINK:
-+ len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
-+ break;
-+ case LPFC_FLOGI:
-+ case LPFC_FABRIC_CFG_LINK:
-+ case LPFC_NS_REG:
-+ case LPFC_NS_QRY:
-+ case LPFC_BUILD_DISC_LIST:
-+ case LPFC_DISC_AUTH:
-+ case LPFC_CLEAR_LA:
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ "Link Up - Discovery\n");
-+ break;
-+ case LPFC_HBA_READY:
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ "Link Up - Ready:\n");
-+ if (phba->fc_topology == TOPOLOGY_LOOP) {
-+ if (phba->fc_flag & FC_PUBLIC_LOOP)
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ " Public Loop\n");
-+ else
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ " Private Loop\n");
-+ } else {
-+ if (phba->fc_flag & FC_FABRIC)
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ " Fabric\n");
-+ else
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ " Point-2-Point\n");
-+ }
-+ }
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt +
-+ phba->fc_unmap_cnt);
-+}
-+
-+/*
-+ * These are replaced by Generic FC transport attributes
-+ */
-+static ssize_t
-+lpfc_speed_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ int len = 0;
-+ if (phba->fc_linkspeed == LA_4GHZ_LINK)
-+ len += snprintf(buf + len, PAGE_SIZE-len, "4 Gigabit\n");
-+ else
-+ if (phba->fc_linkspeed == LA_2GHZ_LINK)
-+ len += snprintf(buf + len, PAGE_SIZE-len, "2 Gigabit\n");
-+ else
-+ len += snprintf(buf + len, PAGE_SIZE-len, "1 Gigabit\n");
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_node_name_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ uint64_t node_name = 0;
-+ memcpy (&node_name, &phba->fc_nodename, sizeof (struct lpfc_name));
-+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
-+ (unsigned long long) be64_to_cpu(node_name));
-+}
-+static ssize_t
-+lpfc_port_name_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ uint64_t port_name = 0;
-+ memcpy (&port_name, &phba->fc_portname, sizeof (struct lpfc_name));
-+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
-+ (unsigned long long) be64_to_cpu(port_name));
-+}
-+static ssize_t
-+lpfc_did_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "0x%x\n", phba->fc_myDID);
-+}
-+
-+static ssize_t
-+lpfc_port_type_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+
-+ size_t retval = -EPERM;
-+
-+ if (phba->fc_topology == TOPOLOGY_LOOP) {
-+ if (phba->fc_flag & FC_PUBLIC_LOOP)
-+ retval = snprintf(buf, PAGE_SIZE, "NL_Port\n");
-+ else
-+ retval = snprintf(buf, PAGE_SIZE, "L_Port\n");
-+ } else {
-+ if (phba->fc_flag & FC_FABRIC)
-+ retval = snprintf(buf, PAGE_SIZE, "N_Port\n");
-+ else
-+ retval = snprintf(buf, PAGE_SIZE,
-+ "Point-to-Point N_Port\n");
-+ }
-+
-+ return retval;
-+}
-+
-+static ssize_t
-+lpfc_fabric_name_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ uint64_t node_name = 0;
-+ memcpy (&node_name, &phba->fc_nodename, sizeof (struct lpfc_name));
-+
-+ if ((phba->fc_flag & FC_FABRIC) ||
-+ ((phba->fc_topology == TOPOLOGY_LOOP) &&
-+ (phba->fc_flag & FC_PUBLIC_LOOP))) {
-+ memcpy(&node_name,
-+ & phba->fc_fabparam.nodeName,
-+ sizeof (struct lpfc_name));
-+ }
-+
-+ return snprintf(buf, PAGE_SIZE, "0x%08llx\n",
-+ (unsigned long long) be64_to_cpu(node_name));
-+}
-+
-+static ssize_t
-+lpfc_events_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ int i = 0, len = 0, get = phba->hba_event_put;
-+ struct lpfc_hba_event *rec;
-+
-+ if (get == phba->hba_event_get)
-+ return snprintf(buf, PAGE_SIZE, "None\n");
-+
-+ for (i = 0; i < MAX_HBAEVT; i++) {
-+ if (get == 0)
-+ get = MAX_HBAEVT;
-+ get--;
-+ rec = &phba->hbaevt[get];
-+ switch (rec->fc_eventcode) {
-+ case 0:
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "---------");
-+ break;
-+ case HBA_EVENT_RSCN:
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "RSCN ");
-+ break;
-+ case HBA_EVENT_LINK_UP:
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "LINK UP ");
-+ break;
-+ case HBA_EVENT_LINK_DOWN:
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "LINK DOWN");
-+ break;
-+ default:
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "?????????");
-+ break;
-+
-+ }
-+ len += snprintf(buf+len, PAGE_SIZE-len, " %d,%d,%d,%d\n",
-+ rec->fc_evdata1, rec->fc_evdata2,
-+ rec->fc_evdata3, rec->fc_evdata4);
-+ }
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_issue_lip (struct class_device *cdev, const char *buf, size_t count)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
-+ int val = 0;
-+ LPFC_MBOXQ_t *pmboxq;
-+ int mbxstatus = MBXERR_ERROR;
-+
-+ if ((sscanf(buf, "%d", &val) != 1) ||
-+ (val != 1))
-+ return -EINVAL;
-+
-+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
-+ (phba->hba_state != LPFC_HBA_READY))
-+ return -EPERM;
-+
-+ pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
-+
-+ if (!pmboxq)
-+ return -ENOMEM;
-+
-+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
-+ lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed);
-+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
-+
-+ if (mbxstatus == MBX_TIMEOUT)
-+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-+ else
-+ mempool_free( pmboxq, phba->mbox_mem_pool);
-+
-+ if (mbxstatus == MBXERR_ERROR)
-+ return -EIO;
-+
-+ return strlen(buf);
-+}
-+
-+static ssize_t
-+lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
-+}
-+
-+static ssize_t
-+lpfc_board_online_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+
-+ if (!phba) return -EPERM;
-+
-+ if (phba->fc_flag & FC_OFFLINE_MODE)
-+ return snprintf(buf, PAGE_SIZE, "0\n");
-+ else
-+ return snprintf(buf, PAGE_SIZE, "1\n");
-+}
-+
-+static ssize_t
-+lpfc_board_online_store(struct class_device *cdev, const char *buf,
-+ size_t count)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ int val=0;
-+
-+ if (!phba) return -EPERM;
-+
-+ if (sscanf(buf, "%d", &val) != 1)
-+ return -EINVAL;
-+
-+ if (val && (phba->fc_flag & FC_OFFLINE_MODE)) {
-+ lpfc_online(phba);
-+ }
-+ else if (!val && !(phba->fc_flag & FC_OFFLINE_MODE)) {
-+ lpfc_offline(phba);
-+ }
-+
-+ return strlen(buf);
-+}
-+
-+static int
-+lpfc_disc_ndlp_show(struct lpfc_hba * phba, struct lpfc_nodelist *ndlp,
-+ char *buf, int offset)
-+{
-+ int len = 0, pgsz = PAGE_SIZE;
-+ uint8_t name[sizeof (struct lpfc_name)];
-+
-+ buf += offset;
-+ pgsz -= offset;
-+ len += snprintf(buf + len, pgsz -len,
-+ "DID %06x WWPN ", ndlp->nlp_DID);
-+
-+ /* A Fibre Channel node or port name is 8 octets
-+ * long and delimited by colons.
-+ */
-+ memcpy (&name[0], &ndlp->nlp_portname,
-+ sizeof (struct lpfc_name));
-+ len += snprintf(buf + len, pgsz-len,
-+ "%02x:%02x:%02x:%02x:%02x:%02x:"
-+ "%02x:%02x",
-+ name[0], name[1], name[2],
-+ name[3], name[4], name[5],
-+ name[6], name[7]);
-+
-+ len += snprintf(buf + len, pgsz-len,
-+ " WWNN ");
-+ memcpy (&name[0], &ndlp->nlp_nodename,
-+ sizeof (struct lpfc_name));
-+ len += snprintf(buf + len, pgsz-len,
-+ "%02x:%02x:%02x:%02x:%02x:%02x:"
-+ "%02x:%02x\n",
-+ name[0], name[1], name[2],
-+ name[3], name[4], name[5],
-+ name[6], name[7]);
-+ len += snprintf(buf + len, pgsz-len,
-+ " INFO %02x:%08x:%02x:%02x:%02x:%02x:"
-+ "%02x:%02x:%02x\n",
-+ ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_type,
-+ ndlp->nlp_rpi, ndlp->nlp_sid, ndlp->nlp_failMask,
-+ ndlp->nlp_retry, ndlp->nlp_disc_refcnt,
-+ ndlp->nlp_fcp_info);
-+ return len;
-+}
-+
-+#define LPFC_MAX_SYS_DISC_ENTRIES 35
-+
-+static ssize_t
-+lpfc_disc_npr_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_npr_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "NPR list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "NPR list: %d Entries\n",
-+ phba->fc_npr_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_npr_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_disc_map_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_nlpmap_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "Map list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "Map list: %d Entries\n",
-+ phba->fc_map_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_map_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_disc_unmap_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_nlpunmap_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "Unmap list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "Unmap list: %d Entries\n",
-+ phba->fc_unmap_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_unmap_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_disc_prli_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_prli_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "PRLI list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "PRLI list: %d Entries\n",
-+ phba->fc_prli_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_prli_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_disc_reglgn_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_reglogin_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "RegLgn list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "RegLgn list: %d Entries\n",
-+ phba->fc_reglogin_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_reglogin_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_disc_adisc_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_adisc_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "ADISC list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "ADISC list: %d Entries\n",
-+ phba->fc_adisc_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_adisc_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_disc_plogi_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_plogi_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "PLOGI list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "PLOGI list: %d Entries\n",
-+ phba->fc_plogi_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_plogi_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+static ssize_t
-+lpfc_disc_unused_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+
-+ if (!phba) return -EPERM;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ listp = &phba->fc_unused_list;
-+ if (list_empty(listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return snprintf(buf, PAGE_SIZE, "Unused list: Empty\n");
-+ }
-+
-+ len += snprintf(buf+len, PAGE_SIZE-len, "Unused list: %d Entries\n",
-+ phba->fc_unused_cnt);
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ i++;
-+ if(i > LPFC_MAX_SYS_DISC_ENTRIES) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed %d entries - sysfs %ld limit exceeded\n",
-+ (phba->fc_unused_cnt - i + 1), PAGE_SIZE);
-+ break;
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ len += lpfc_disc_ndlp_show(phba, ndlp, buf, len);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+#define LPFC_MAX_SYS_OUTFCPIO_ENTRIES 50
-+
-+static ssize_t
-+lpfc_outfcpio_show(struct class_device *cdev, char *buf)
-+{
-+ struct Scsi_Host *host = class_to_shost(cdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_target *targetp;
-+ struct lpfc_nodelist *ndlp;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ struct list_head *curr, *next;
-+ struct lpfc_iocbq *iocb;
-+ struct lpfc_iocbq *next_iocb;
-+ IOCB_t *cmd;
-+ unsigned long iflag;
-+ int i = 0, len = 0;
-+ int cnt = 0, unused = 0, total = 0;
-+ int tx_count, txcmpl_count;
-+
-+ if (!phba) return -EPERM;
-+ psli = &phba->sli;
-+ pring = &psli->ring[psli->fcp_ring];
-+
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ for(i=0;i<LPFC_MAX_TARGET;i++) {
-+ targetp = phba->device_queue_hash[i];
-+ if(targetp) {
-+ if(cnt >= LPFC_MAX_SYS_OUTFCPIO_ENTRIES) {
-+ unused++;
-+ continue;
-+ }
-+ cnt++;
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "ID %03d:qcmd %08x done %08x err %08x "
-+ "slv %03x ", targetp->scsi_id, targetp->qcmdcnt,
-+ targetp->iodonecnt, targetp->errorcnt,
-+ targetp->slavecnt);
-+ total += (targetp->qcmdcnt - targetp->iodonecnt);
-+
-+ tx_count = 0;
-+ txcmpl_count = 0;
-+
-+ /* Count I/Os on txq and txcmplq. */
-+ list_for_each_safe(curr, next, &pring->txq) {
-+ next_iocb = list_entry(curr, struct lpfc_iocbq,
-+ list);
-+ iocb = next_iocb;
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd =
-+ (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0)
-+ || (lpfc_cmd->target->scsi_id !=
-+ targetp->scsi_id)) {
-+ continue;
-+ }
-+ tx_count++;
-+ }
-+
-+ /* Next check the txcmplq */
-+ list_for_each_safe(curr, next, &pring->txcmplq) {
-+ next_iocb = list_entry(curr, struct lpfc_iocbq,
-+ list);
-+ iocb = next_iocb;
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd =
-+ (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0)
-+ || (lpfc_cmd->target->scsi_id !=
-+ targetp->scsi_id)) {
-+ continue;
-+ }
-+
-+ txcmpl_count++;
-+ }
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "tx %04x txc %04x ",
-+ tx_count, txcmpl_count);
-+
-+ ndlp = targetp->pnode;
-+ if(ndlp == NULL) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "DISAPPERED\n");
-+ }
-+ else {
-+ if(ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "MAPPED\n");
-+ }
-+ else {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "RECOVERY (%d)\n",
-+ ndlp->nlp_state);
-+ }
-+ }
-+ }
-+ if(len > (PAGE_SIZE-1)) /* double check */
-+ break;
-+ }
-+ if(unused) {
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "Missed x%x entries - sysfs %ld limit exceeded\n",
-+ unused, PAGE_SIZE);
-+ }
-+ len += snprintf(buf+len, PAGE_SIZE-len,
-+ "x%x total I/Os outstanding\n", total);
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return len;
-+}
-+
-+#define lpfc_param_show(attr) \
-+static ssize_t \
-+lpfc_##attr##_show(struct class_device *cdev, char *buf) \
-+{ \
-+ struct Scsi_Host *host = class_to_shost(cdev);\
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
-+ int val = 0;\
-+ if (phba){\
-+ val = phba->cfg_##attr;\
-+ return snprintf(buf, PAGE_SIZE, "%d\n",\
-+ phba->cfg_##attr);\
-+ }\
-+ return -EPERM;\
-+}
-+
-+#define lpfc_param_set(attr, default, minval, maxval) \
-+static int \
-+lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
-+{ \
-+ if (val >= minval && val <= maxval) {\
-+ phba->cfg_##attr = val;\
-+ return 0;\
-+ }\
-+ phba->cfg_##attr = default;\
-+ return -EINVAL;\
-+}
-+
-+#define lpfc_param_store(attr) \
-+static ssize_t \
-+lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
-+{ \
-+ struct Scsi_Host *host = class_to_shost(cdev);\
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];\
-+ int val=0;\
-+ if (sscanf(buf, "%d", &val) != 1)\
-+ return -EPERM;\
-+ if (phba){\
-+ if (lpfc_##attr##_set(phba, val) == 0) \
-+ return strlen(buf);\
-+ }\
-+ return -EINVAL;\
-+}
-+
-+#define LPFC_ATTR(name, defval, minval, maxval, desc) \
-+static int lpfc_##name = defval;\
-+module_param(lpfc_##name, int, 0);\
-+MODULE_PARM_DESC(lpfc_##name, desc);\
-+lpfc_param_set(name, defval, minval, maxval)\
-+
-+
-+#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
-+static int lpfc_##name = defval;\
-+module_param(lpfc_##name, int, 0);\
-+MODULE_PARM_DESC(lpfc_##name, desc);\
-+lpfc_param_show(name)\
-+lpfc_param_set(name, defval, minval, maxval)\
-+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
-+
-+#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
-+static int lpfc_##name = defval;\
-+module_param(lpfc_##name, int, 0);\
-+MODULE_PARM_DESC(lpfc_##name, desc);\
-+lpfc_param_show(name)\
-+lpfc_param_set(name, defval, minval, maxval)\
-+lpfc_param_store(name)\
-+static CLASS_DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
-+ lpfc_##name##_show, lpfc_##name##_store)
-+
-+static CLASS_DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
-+static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
-+static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
-+static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
-+static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
-+static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL);
-+static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
-+static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
-+static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
-+static CLASS_DEVICE_ATTR(option_rom_version, S_IRUGO,
-+ lpfc_option_rom_version_show, NULL);
-+static CLASS_DEVICE_ATTR(num_discovered_ports, S_IRUGO,
-+ lpfc_num_discovered_ports_show, NULL);
-+static CLASS_DEVICE_ATTR(speed, S_IRUGO, lpfc_speed_show, NULL);
-+static CLASS_DEVICE_ATTR(node_name, S_IRUGO, lpfc_node_name_show, NULL);
-+static CLASS_DEVICE_ATTR(port_name, S_IRUGO, lpfc_port_name_show, NULL);
-+static CLASS_DEVICE_ATTR(portfcid, S_IRUGO, lpfc_did_show, NULL);
-+static CLASS_DEVICE_ATTR(port_type, S_IRUGO, lpfc_port_type_show, NULL);
-+static CLASS_DEVICE_ATTR(fabric_name, S_IRUGO, lpfc_fabric_name_show, NULL);
-+static CLASS_DEVICE_ATTR(events, S_IRUGO, lpfc_events_show, NULL);
-+static CLASS_DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
-+static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
-+ NULL);
-+static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
-+ NULL);
-+static CLASS_DEVICE_ATTR(issue_lip, S_IWUSR, NULL, lpfc_issue_lip);
-+static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
-+ lpfc_board_online_show, lpfc_board_online_store);
-+
-+static CLASS_DEVICE_ATTR(disc_npr, S_IRUGO, lpfc_disc_npr_show, NULL);
-+static CLASS_DEVICE_ATTR(disc_map, S_IRUGO, lpfc_disc_map_show, NULL);
-+static CLASS_DEVICE_ATTR(disc_unmap, S_IRUGO, lpfc_disc_unmap_show, NULL);
-+static CLASS_DEVICE_ATTR(disc_prli, S_IRUGO, lpfc_disc_prli_show, NULL);
-+static CLASS_DEVICE_ATTR(disc_reglgn, S_IRUGO, lpfc_disc_reglgn_show, NULL);
-+static CLASS_DEVICE_ATTR(disc_adisc, S_IRUGO, lpfc_disc_adisc_show, NULL);
-+static CLASS_DEVICE_ATTR(disc_plogi, S_IRUGO, lpfc_disc_plogi_show, NULL);
-+static CLASS_DEVICE_ATTR(disc_unused, S_IRUGO, lpfc_disc_unused_show, NULL);
-+static CLASS_DEVICE_ATTR(outfcpio, S_IRUGO, lpfc_outfcpio_show, NULL);
-+
-+/*
-+# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
-+# deluged with LOTS of information.
-+# You can set a bit mask to record specific types of verbose messages:
-+#
-+# LOG_ELS 0x1 ELS events
-+# LOG_DISCOVERY 0x2 Link discovery events
-+# LOG_MBOX 0x4 Mailbox events
-+# LOG_INIT 0x8 Initialization events
-+# LOG_LINK_EVENT 0x10 Link events
-+# LOG_IP 0x20 IP traffic history
-+# LOG_FCP 0x40 FCP traffic history
-+# LOG_NODE 0x80 Node table events
-+# LOG_MISC 0x400 Miscellaneous events
-+# LOG_SLI 0x800 SLI events
-+# LOG_CHK_COND 0x1000 FCP Check condition flag
-+# LOG_LIBDFC 0x2000 LIBDFC events
-+# LOG_ALL_MSG 0xffff LOG all messages
-+*/
-+LPFC_ATTR_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask");
-+
-+/*
-+# lun_queue_depth: This parameter is used to limit the number of outstanding
-+# commands per FCP LUN. Value range is [1,128]. Default value is 30.
-+*/
-+LPFC_ATTR_R(lun_queue_depth, 30, 1, 128,
-+ "Max number of FCP commands we can queue to a specific LUN");
-+
-+/*
-+# Some disk devices have a "select ID" or "select Target" capability.
-+# From a protocol standpoint "select ID" usually means select the
-+# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
-+# annex" which contains a table that maps a "select ID" (a number
-+# between 0 and 7F) to an ALPA. By default, for compatibility with
-+# older drivers, the lpfc driver scans this table from low ALPA to high
-+# ALPA.
-+#
-+# Turning on the scan-down variable (on = 1, off = 0) will
-+# cause the lpfc driver to use an inverted table, effectively
-+# scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
-+#
-+# (Note: This "select ID" functionality is a LOOP ONLY characteristic
-+# and will not work across a fabric. Also this parameter will take
-+# effect only in the case when ALPA map is not available.)
-+*/
-+LPFC_ATTR_R(scan_down, 1, 0, 1,
-+ "Start scanning for devices from highest ALPA to lowest");
-+
-+/*
-+# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
-+# until the timer expires. Value range is [0,255]. Default value is 20.
-+# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
-+*/
-+LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
-+ "Seconds driver will hold I/O waiting for a device to come back");
-+
-+/*
-+# lpfc_topology: link topology for init link
-+# 0x0 = attempt loop mode then point-to-point
-+# 0x02 = attempt point-to-point mode only
-+# 0x04 = attempt loop mode only
-+# 0x06 = attempt point-to-point mode then loop
-+# Set point-to-point mode if you want to run as an N_Port.
-+# Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
-+# Default value is 0.
-+*/
-+LPFC_ATTR_R(topology, 0, 0, 6, "Select Fibre Channel topology");
-+
-+/*
-+# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
-+# connection.
-+# 0 = auto select (default)
-+# 1 = 1 Gigabaud
-+# 2 = 2 Gigabaud
-+# 4 = 4 Gigabaud
-+# Value range is [0,4]. Default value is 0.
-+*/
-+LPFC_ATTR_R(link_speed, 0, 0, 4, "Select link speed");
-+
-+/*
-+# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
-+# Value range is [2,3]. Default value is 3.
-+*/
-+LPFC_ATTR_R(fcp_class, 3, 2, 3,
-+ "Select Fibre Channel class of service for FCP sequences");
-+
-+/*
-+# lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
-+# is [0,1]. Default value is 0.
-+*/
-+LPFC_ATTR_RW(use_adisc, 0, 0, 1,
-+ "Use ADISC on rediscovery to authenticate FCP devices");
-+
-+/*
-+# lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
-+# range is [0,1]. Default value is 0.
-+*/
-+LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
-+
-+/*
-+# lpfc_fcp_bind_method: It specifies the method of binding to be used for each
-+# port. This binding method is used for consistent binding and mapped
-+# binding. A value of 1 will force WWNN binding, value of 2 will force WWPN
-+# binding, value of 3 will force DID binding and value of 4 will force the
-+# driver to derive binding from ALPA. Any consistent binding whose type does
-+# not match with the bind method of the port will be ignored. Value range
-+# is [1,4]. Default value is 2.
-+*/
-+LPFC_ATTR_R(fcp_bind_method, 2, 0, 4,
-+ "Select the bind method to be used");
-+
-+/*
-+# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
-+# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
-+# value [0,63]. cr_count can take value [0,255]. Default value of cr_delay
-+# is 0. Default value of cr_count is 1. The cr_count feature is disabled if
-+# cr_delay is set to 0.
-+*/
-+LPFC_ATTR(cr_delay, 0, 0, 63, "A count of milliseconds after which an"
-+ "interrupt response is generated");
-+
-+LPFC_ATTR(cr_count, 1, 1, 255, "A count of I/O completions after which an"
-+ "interrupt response is generated");
-+
-+/*
-+# lpfc_fdmi_on: controls FDMI support.
-+# 0 = no FDMI support
-+# 1 = support FDMI without attribute of hostname
-+# 2 = support FDMI with attribute of hostname
-+# Value range [0,2]. Default value is 0.
-+*/
-+LPFC_ATTR_RW(fdmi_on, 0, 0, 2, "Enable FDMI support");
-+
-+/*
-+# Specifies the maximum number of ELS cmds we can have outstanding (for
-+# discovery). Value range is [1,64]. Default value = 32.
-+*/
-+LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands"
-+ "during discovery");
-+
-+/*
-+# lpfc_max_luns: maximum number of LUNs per target driver will support
-+# Value range is [1,32768]. Default value is 256.
-+# NOTE: The SCSI layer will scan each target for this many luns
-+*/
-+LPFC_ATTR_R(max_luns, 256, 1, 32768,
-+ "Maximum number of LUNs per target driver will support");
-+
-+
-+static ssize_t
-+sysfs_ctlreg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
-+{
-+ unsigned long iflag;
-+ size_t buf_off;
-+ struct Scsi_Host *host = class_to_shost(container_of(kobj,
-+ struct class_device, kobj));
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+
-+ if ((off + count) > FF_REG_AREA_SIZE)
-+ return -ERANGE;
-+
-+ if (count == 0) return 0;
-+
-+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
-+ return -EINVAL;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return -EPERM;
-+ }
-+
-+ for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
-+ writel(*((uint32_t *)(buf + buf_off)),
-+ (uint8_t *)phba->ctrl_regs_memmap_p + off + buf_off);
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ return count;
-+}
-+
-+static ssize_t
-+sysfs_ctlreg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
-+{
-+ unsigned long iflag;
-+ size_t buf_off;
-+ uint32_t * tmp_ptr;
-+ struct Scsi_Host *host = class_to_shost(container_of(kobj,
-+ struct class_device, kobj));
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+
-+ if (off > FF_REG_AREA_SIZE)
-+ return -ERANGE;
-+
-+ if ((off + count) > FF_REG_AREA_SIZE)
-+ count = FF_REG_AREA_SIZE - off;
-+
-+ if (count == 0) return 0;
-+
-+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
-+ return -EINVAL;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
-+ tmp_ptr = (uint32_t *)(buf + buf_off);
-+ *tmp_ptr = readl((uint8_t *)(phba->ctrl_regs_memmap_p
-+ + off + buf_off));
-+ }
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ return count;
-+}
-+
-+static struct bin_attribute sysfs_ctlreg_attr = {
-+ .attr = {
-+ .name = "ctlreg",
-+ .mode = S_IRUSR | S_IWUSR,
-+ .owner = THIS_MODULE,
-+ },
-+ .size = 256,
-+ .read = sysfs_ctlreg_read,
-+ .write = sysfs_ctlreg_write,
-+};
-+
-+
-+#define MBOX_BUFF_SIZE (MAILBOX_CMD_WSIZE*sizeof(uint32_t))
-+
-+static void
-+sysfs_mbox_idle (struct lpfc_hba * phba)
-+{
-+ phba->sysfs_mbox.state = SMBOX_IDLE;
-+ phba->sysfs_mbox.offset = 0;
-+
-+ if (phba->sysfs_mbox.mbox) {
-+ mempool_free(phba->sysfs_mbox.mbox,
-+ phba->mbox_mem_pool);
-+ phba->sysfs_mbox.mbox = NULL;
-+ }
-+}
-+
-+static ssize_t
-+sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
-+{
-+ unsigned long iflag;
-+ struct Scsi_Host * host =
-+ class_to_shost(container_of(kobj, struct class_device, kobj));
-+ struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata[0];
-+ struct lpfcMboxq * mbox = NULL;
-+
-+ if ((count + off) > MBOX_BUFF_SIZE)
-+ return -ERANGE;
-+
-+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
-+ return -EINVAL;
-+
-+ if (count == 0)
-+ return 0;
-+
-+ if (off == 0) {
-+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-+ if (!mbox)
-+ return -ENOMEM;
-+
-+ }
-+
-+ spin_lock_irqsave(host->host_lock, iflag);
-+
-+ if (off == 0) {
-+ if (phba->sysfs_mbox.mbox)
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ else
-+ phba->sysfs_mbox.mbox = mbox;
-+ phba->sysfs_mbox.state = SMBOX_WRITING;
-+ }
-+ else {
-+ if (phba->sysfs_mbox.state != SMBOX_WRITING ||
-+ phba->sysfs_mbox.offset != off ||
-+ phba->sysfs_mbox.mbox == NULL ) {
-+ sysfs_mbox_idle(phba);
-+ spin_unlock_irqrestore(host->host_lock, iflag);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
-+ buf, count);
-+
-+ phba->sysfs_mbox.offset = off + count;
-+
-+ spin_unlock_irqrestore(host->host_lock, iflag);
-+
-+ return count;
-+}
-+
-+static ssize_t
-+sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
-+{
-+ unsigned long iflag;
-+ struct Scsi_Host *host =
-+ class_to_shost(container_of(kobj, struct class_device,
-+ kobj));
-+ struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata[0];
-+ int rc;
-+
-+ if (off > sizeof(MAILBOX_t))
-+ return -ERANGE;
-+
-+ if ((count + off) > sizeof(MAILBOX_t))
-+ count = sizeof(MAILBOX_t) - off;
-+
-+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
-+ return -EINVAL;
-+
-+ if (off && count == 0)
-+ return 0;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ if (off == 0 &&
-+ phba->sysfs_mbox.state == SMBOX_WRITING &&
-+ phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
-+
-+ switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
-+ /* Offline only */
-+ case MBX_WRITE_NV:
-+ case MBX_INIT_LINK:
-+ case MBX_DOWN_LINK:
-+ case MBX_CONFIG_LINK:
-+ case MBX_CONFIG_RING:
-+ case MBX_RESET_RING:
-+ case MBX_UNREG_LOGIN:
-+ case MBX_CLEAR_LA:
-+ case MBX_DUMP_CONTEXT:
-+ case MBX_RUN_DIAGS:
-+ case MBX_RESTART:
-+ case MBX_FLASH_WR_ULA:
-+ case MBX_SET_MASK:
-+ case MBX_SET_SLIM:
-+ case MBX_SET_DEBUG:
-+ if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
-+ printk(KERN_WARNING "mbox_read:Command 0x%x "
-+ "is illegal in on-line state\n",
-+ phba->sysfs_mbox.mbox->mb.mbxCommand);
-+ sysfs_mbox_idle(phba);
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ iflag);
-+ return -EPERM;
-+ }
-+ case MBX_LOAD_SM:
-+ case MBX_READ_NV:
-+ case MBX_READ_CONFIG:
-+ case MBX_READ_RCONFIG:
-+ case MBX_READ_STATUS:
-+ case MBX_READ_XRI:
-+ case MBX_READ_REV:
-+ case MBX_READ_LNK_STAT:
-+ case MBX_DUMP_MEMORY:
-+ case MBX_DOWN_LOAD:
-+ case MBX_UPDATE_CFG:
-+ case MBX_LOAD_AREA:
-+ case MBX_LOAD_EXP_ROM:
-+ break;
-+ case MBX_READ_SPARM64:
-+ case MBX_READ_LA:
-+ case MBX_READ_LA64:
-+ case MBX_REG_LOGIN:
-+ case MBX_REG_LOGIN64:
-+ case MBX_CONFIG_PORT:
-+ case MBX_RUN_BIU_DIAG:
-+ printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
-+ phba->sysfs_mbox.mbox->mb.mbxCommand);
-+ sysfs_mbox_idle(phba);
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ iflag);
-+ return -EPERM;
-+ default:
-+ printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
-+ phba->sysfs_mbox.mbox->mb.mbxCommand);
-+ sysfs_mbox_idle(phba);
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ iflag);
-+ return -EPERM;
-+ }
-+
-+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
-+ (!(phba->sli.sliinit.sli_flag & LPFC_SLI2_ACTIVE))){
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ rc = lpfc_sli_issue_mbox (phba,
-+ phba->sysfs_mbox.mbox,
-+ MBX_POLL);
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ } else {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ rc = lpfc_sli_issue_mbox_wait (phba,
-+ phba->sysfs_mbox.mbox,
-+ phba->fc_ratov * 2);
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ }
-+
-+ if (rc != MBX_SUCCESS) {
-+ sysfs_mbox_idle(phba);
-+ spin_unlock_irqrestore(host->host_lock, iflag);
-+ return -ENODEV;
-+ }
-+ phba->sysfs_mbox.state = SMBOX_READING;
-+ }
-+ else if (phba->sysfs_mbox.offset != off ||
-+ phba->sysfs_mbox.state != SMBOX_READING) {
-+ printk(KERN_WARNING "mbox_read: Bad State\n");
-+ sysfs_mbox_idle(phba);
-+ spin_unlock_irqrestore(host->host_lock, iflag);
-+ return -EINVAL;
-+ }
-+
-+ memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
-+
-+ phba->sysfs_mbox.offset = off + count;
-+
-+ if (phba->sysfs_mbox.offset == sizeof(MAILBOX_t))
-+ sysfs_mbox_idle(phba);
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ return count;
-+}
-+
-+static struct bin_attribute sysfs_mbox_attr = {
-+ .attr = {
-+ .name = "mbox",
-+ .mode = S_IRUSR | S_IWUSR,
-+ .owner = THIS_MODULE,
-+ },
-+ .size = sizeof(MAILBOX_t),
-+ .read = sysfs_mbox_read,
-+ .write = sysfs_mbox_write,
-+};
-+
-+
-+#ifdef RHEL_FC
-+/*
-+ * The LPFC driver treats linkdown handling as target loss events so there
-+ * are no sysfs handlers for link_down_tmo.
-+ */
-+static void
-+lpfc_get_starget_port_id(struct scsi_target *starget)
-+{
-+ struct lpfc_nodelist *ndlp = NULL;
-+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-+ struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
-+ uint16_t did = 0;
-+
-+ spin_lock_irq(shost->host_lock);
-+ /* Search the mapped list for this target ID */
-+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
-+ if (starget->id == ndlp->nlp_sid) {
-+ did = ndlp->nlp_DID;
-+ break;
-+ }
-+ }
-+ spin_unlock_irq(shost->host_lock);
-+
-+ fc_starget_port_id(starget) = did;
-+}
-+
-+static void
-+lpfc_get_starget_node_name(struct scsi_target *starget)
-+{
-+ struct lpfc_nodelist *ndlp = NULL;
-+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-+ struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
-+ uint64_t node_name = 0;
-+
-+ spin_lock_irq(shost->host_lock);
-+ /* Search the mapped list for this target ID */
-+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
-+ if (starget->id == ndlp->nlp_sid) {
-+ memcpy(&node_name, &ndlp->nlp_nodename,
-+ sizeof(struct lpfc_name));
-+ break;
-+ }
-+ }
-+ spin_unlock_irq(shost->host_lock);
-+
-+ fc_starget_node_name(starget) = be64_to_cpu(node_name);
-+}
-+
-+static void
-+lpfc_get_starget_port_name(struct scsi_target *starget)
-+{
-+ struct lpfc_nodelist *ndlp = NULL;
-+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-+ struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata[0];
-+ uint64_t port_name = 0;
-+
-+ spin_lock_irq(shost->host_lock);
-+ /* Search the mapped list for this target ID */
-+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
-+ if (starget->id == ndlp->nlp_sid) {
-+ memcpy(&port_name, &ndlp->nlp_portname,
-+ sizeof(struct lpfc_name));
-+ break;
-+ }
-+ }
-+ spin_unlock_irq(shost->host_lock);
-+
-+ fc_starget_port_name(starget) = be64_to_cpu(port_name);
-+}
-+
-+static void
-+lpfc_get_starget_loss_tmo(struct scsi_target *starget)
-+{
-+ /*
-+ * Return the driver's global value for device loss timeout plus
-+ * five seconds to allow the driver's nodev timer to run.
-+ */
-+ fc_starget_dev_loss_tmo(starget) = lpfc_nodev_tmo + 5;
-+}
-+
-+static void
-+lpfc_set_starget_loss_tmo(struct scsi_target *starget, uint32_t timeout)
-+{
-+ /*
-+ * The driver doesn't have a per-target timeout setting. Set
-+ * this value globally. Keep lpfc_nodev_tmo >= 1.
-+ */
-+ if (timeout)
-+ lpfc_nodev_tmo = timeout;
-+ else
-+ lpfc_nodev_tmo = 1;
-+}
-+
-+#else /* not RHEL_FC */
-+
-+static void
-+lpfc_get_port_id(struct scsi_device *sdev)
-+{
-+ struct lpfc_target *target = sdev->hostdata;
-+ if (sdev->host->transportt && target->pnode)
-+ fc_port_id(sdev) = target->pnode->nlp_DID;
-+}
-+
-+static void
-+lpfc_get_node_name(struct scsi_device *sdev)
-+{
-+ struct lpfc_target *target = sdev->hostdata;
-+ uint64_t node_name = 0;
-+ if (sdev->host->transportt && target->pnode)
-+ memcpy(&node_name, &target->pnode->nlp_nodename,
-+ sizeof(struct lpfc_name));
-+ fc_node_name(sdev) = be64_to_cpu(node_name);
-+}
-+
-+static void
-+lpfc_get_port_name(struct scsi_device *sdev)
-+{
-+ struct lpfc_target *target = sdev->hostdata;
-+ uint64_t port_name = 0;
-+ if (sdev->host->transportt && target->pnode)
-+ memcpy(&port_name, &target->pnode->nlp_portname,
-+ sizeof(struct lpfc_name));
-+ fc_port_name(sdev) = be64_to_cpu(port_name);
-+}
-+#endif /* not RHEL_FC */
-+
-+static struct fc_function_template lpfc_transport_functions = {
-+#ifdef RHEL_FC
-+ .get_starget_port_id = lpfc_get_starget_port_id,
-+ .show_starget_port_id = 1,
-+
-+ .get_starget_node_name = lpfc_get_starget_node_name,
-+ .show_starget_node_name = 1,
-+
-+ .get_starget_port_name = lpfc_get_starget_port_name,
-+ .show_starget_port_name = 1,
-+
-+ .get_starget_dev_loss_tmo = lpfc_get_starget_loss_tmo,
-+ .set_starget_dev_loss_tmo = lpfc_set_starget_loss_tmo,
-+ .show_starget_dev_loss_tmo = 1,
-+
-+#else /* not RHEL_FC */
-+ .get_port_id = lpfc_get_port_id,
-+ .show_port_id = 1,
-+
-+ .get_node_name = lpfc_get_node_name,
-+ .show_node_name = 1,
-+
-+ .get_port_name = lpfc_get_port_name,
-+ .show_port_name = 1,
-+#endif /* not RHEL_FC */
-+};
-+
-+static int
-+lpfc_proc_info(struct Scsi_Host *host,
-+ char *buf, char **start, off_t offset, int count, int rw)
-+{
-+ struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
-+ struct lpfc_nodelist *ndlp;
-+ int len = 0;
-+
-+ /* Sufficient bytes to hold a port or node name. */
-+ uint8_t name[sizeof (struct lpfc_name)];
-+
-+ /* If rw = 0, then read info
-+ * If rw = 1, then write info (NYI)
-+ */
-+ if (rw)
-+ return -EINVAL;
-+
-+ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
-+ if (ndlp->nlp_state == NLP_STE_MAPPED_NODE){
-+ len += snprintf(buf + len, PAGE_SIZE -len,
-+ "lpfc%dt%02x DID %06x WWPN ",
-+ phba->brd_no,
-+ ndlp->nlp_sid, ndlp->nlp_DID);
-+
-+ memcpy (&name[0], &ndlp->nlp_portname,
-+ sizeof (struct lpfc_name));
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ "%02x:%02x:%02x:%02x:%02x:%02x:"
-+ "%02x:%02x",
-+ name[0], name[1], name[2],
-+ name[3], name[4], name[5],
-+ name[6], name[7]);
-+ len += snprintf(buf + len, PAGE_SIZE-len, " WWNN ");
-+ memcpy (&name[0], &ndlp->nlp_nodename,
-+ sizeof (struct lpfc_name));
-+ len += snprintf(buf + len, PAGE_SIZE-len,
-+ "%02x:%02x:%02x:%02x:%02x:%02x:"
-+ "%02x:%02x\n",
-+ name[0], name[1], name[2],
-+ name[3], name[4], name[5],
-+ name[6], name[7]);
-+ }
-+ if (PAGE_SIZE - len < 90)
-+ break;
-+ }
-+ if (&ndlp->nlp_listp != &phba->fc_nlpmap_list)
-+ len += snprintf(buf+len, PAGE_SIZE-len, "...\n");
-+
-+ return (len);
-+}
-+
-+static int
-+lpfc_slave_alloc(struct scsi_device *scsi_devs)
-+{
-+ struct lpfc_hba *phba;
-+ struct lpfc_target *target;
-+
-+ /*
-+ * Store the lun pointer in the scsi_device hostdata pointer provided
-+ * the driver has already discovered the target id.
-+ */
-+ phba = (struct lpfc_hba *) scsi_devs->host->hostdata[0];
-+ target = lpfc_find_target(phba, scsi_devs->id, NULL);
-+ if (target) {
-+ scsi_devs->hostdata = target;
-+ target->slavecnt++;
-+ return 0;
-+ }
-+
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
-+ return -ENXIO;
-+#else
-+
-+ /*
-+ * The driver does not have a target id matching that in the scsi
-+ * device. Allocate a dummy target initialized to zero so that
-+ * the driver's queuecommand entry correctly fails the call
-+ * forcing the midlayer to call lpfc_slave_destroy. This code
-+ * will be removed in a subsequent kernel patch.
-+ */
-+
-+ target = kmalloc(sizeof (struct lpfc_target), GFP_KERNEL);
-+ if (!target)
-+ return 1;
-+
-+ memset(target, 0, sizeof (struct lpfc_target));
-+#ifdef SLES_FC
-+ init_timer(&target->dev_loss_timer);
-+#endif
-+ scsi_devs->hostdata = target;
-+ target->slavecnt++;
-+ return 0;
-+#endif
-+}
-+
-+static int
-+lpfc_slave_configure(struct scsi_device *sdev)
-+{
-+ struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
-+
-+#if defined(RHEL_FC)
-+ struct lpfc_target *target = (struct lpfc_target *) sdev->hostdata;
-+#endif
-+
-+ if (sdev->tagged_supported)
-+ scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
-+ else
-+ scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
-+
-+#ifdef RHEL_FC
-+ if ((target) && (sdev->sdev_target)) {
-+ /*
-+ * Initialize the fc transport attributes for the target
-+ * containing this scsi device. Also note that the driver's
-+ * target pointer is stored in the starget_data for the
-+ * driver's sysfs entry point functions.
-+ */
-+ target->starget = sdev->sdev_target;
-+ fc_starget_dev_loss_tmo(target->starget) = lpfc_nodev_tmo + 5;
-+ }
-+#endif /* RHEL_FC */
-+
-+ return 0;
-+}
-+
-+static void
-+lpfc_slave_destroy(struct scsi_device *sdev)
-+{
-+ struct lpfc_hba *phba;
-+ struct lpfc_target *target;
-+ int i;
-+
-+ phba = (struct lpfc_hba *) sdev->host->hostdata[0];
-+ target = sdev->hostdata;
-+ if (target) {
-+ target->slavecnt--;
-+
-+ /* Double check for valid lpfc_target */
-+ for (i = 0; i < MAX_FCP_TARGET; i++) {
-+ if(target == phba->device_queue_hash[i]) {
-+ if ((!target->slavecnt) && !(target->pnode)) {
-+ kfree(target);
-+ phba->device_queue_hash[i] = NULL;
-+ }
-+ sdev->hostdata = NULL;
-+ return;
-+ }
-+ }
-+ /* If we get here, this was a dummy lpfc_target allocated
-+ * in lpfc_slave_alloc.
-+ */
-+ if (!target->slavecnt)
-+ kfree(target);
-+ }
-+
-+ /*
-+ * Set this scsi device's hostdata to NULL since it is going
-+ * away. Also, (future) don't set the starget_dev_loss_tmo
-+ * this value is global to all targets managed by this
-+ * host.
-+ */
-+ sdev->hostdata = NULL;
-+ return;
-+}
-+
-+static struct class_device_attribute *lpfc_host_attrs[] = {
-+ &class_device_attr_info,
-+ &class_device_attr_serialnum,
-+ &class_device_attr_modeldesc,
-+ &class_device_attr_modelname,
-+ &class_device_attr_programtype,
-+ &class_device_attr_portnum,
-+ &class_device_attr_fwrev,
-+ &class_device_attr_hdw,
-+ &class_device_attr_option_rom_version,
-+ &class_device_attr_state,
-+ &class_device_attr_num_discovered_ports,
-+ &class_device_attr_speed,
-+ &class_device_attr_node_name,
-+ &class_device_attr_port_name,
-+ &class_device_attr_portfcid,
-+ &class_device_attr_port_type,
-+ &class_device_attr_fabric_name,
-+ &class_device_attr_events,
-+ &class_device_attr_lpfc_drvr_version,
-+ &class_device_attr_lpfc_log_verbose,
-+ &class_device_attr_lpfc_lun_queue_depth,
-+ &class_device_attr_lpfc_nodev_tmo,
-+ &class_device_attr_lpfc_fcp_class,
-+ &class_device_attr_lpfc_use_adisc,
-+ &class_device_attr_lpfc_ack0,
-+ &class_device_attr_lpfc_topology,
-+ &class_device_attr_lpfc_scan_down,
-+ &class_device_attr_lpfc_link_speed,
-+ &class_device_attr_lpfc_fdmi_on,
-+ &class_device_attr_lpfc_fcp_bind_method,
-+ &class_device_attr_lpfc_max_luns,
-+ &class_device_attr_nport_evt_cnt,
-+ &class_device_attr_management_version,
-+ &class_device_attr_issue_lip,
-+ &class_device_attr_board_online,
-+ &class_device_attr_disc_npr,
-+ &class_device_attr_disc_map,
-+ &class_device_attr_disc_unmap,
-+ &class_device_attr_disc_prli,
-+ &class_device_attr_disc_reglgn,
-+ &class_device_attr_disc_adisc,
-+ &class_device_attr_disc_plogi,
-+ &class_device_attr_disc_unused,
-+ &class_device_attr_outfcpio,
-+ NULL,
-+};
-+
-+static struct scsi_host_template driver_template = {
-+ .module = THIS_MODULE,
-+ .name = LPFC_DRIVER_NAME,
-+ .info = lpfc_info,
-+ .queuecommand = lpfc_queuecommand,
-+ .eh_abort_handler = lpfc_abort_handler,
-+ .eh_device_reset_handler= lpfc_reset_lun_handler,
-+ .eh_bus_reset_handler = lpfc_reset_bus_handler,
-+ .slave_alloc = lpfc_slave_alloc,
-+ .slave_configure = lpfc_slave_configure,
-+ .slave_destroy = lpfc_slave_destroy,
-+ .proc_info = lpfc_proc_info,
-+ .proc_name = LPFC_DRIVER_NAME,
-+ .this_id = -1,
-+ .sg_tablesize = SG_ALL,
-+ .cmd_per_lun = 30,
-+ .max_sectors = 0xFFFF,
-+ .shost_attrs = lpfc_host_attrs,
-+ .use_clustering = ENABLE_CLUSTERING,
-+};
-+
-+static int
-+lpfc_sli_setup(struct lpfc_hba * phba)
-+{
-+ int i, totiocb = 0;
-+ struct lpfc_sli *psli = &phba->sli;
-+ LPFC_RING_INIT_t *pring;
-+
-+ psli->sliinit.num_rings = MAX_CONFIGURED_RINGS;
-+ psli->fcp_ring = LPFC_FCP_RING;
-+ psli->next_ring = LPFC_FCP_NEXT_RING;
-+ psli->ip_ring = LPFC_IP_RING;
-+
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pring = &psli->sliinit.ringinit[i];
-+ switch (i) {
-+ case LPFC_FCP_RING: /* ring 0 - FCP */
-+ /* numCiocb and numRiocb are used in config_port */
-+ pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
-+ pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
-+ pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
-+ pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
-+ pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
-+ pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
-+ pring->iotag_ctr = 0;
-+ pring->iotag_max =
-+ (phba->cfg_hba_queue_depth * 2);
-+ pring->fast_iotag = pring->iotag_max;
-+ pring->num_mask = 0;
-+ break;
-+ case LPFC_IP_RING: /* ring 1 - IP */
-+ /* numCiocb and numRiocb are used in config_port */
-+ pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
-+ pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
-+ pring->num_mask = 0;
-+ break;
-+ case LPFC_ELS_RING: /* ring 2 - ELS / CT */
-+ /* numCiocb and numRiocb are used in config_port */
-+ pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
-+ pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
-+ pring->fast_iotag = 0;
-+ pring->iotag_ctr = 0;
-+ pring->iotag_max = 4096;
-+ pring->num_mask = 4;
-+ pring->prt[0].profile = 0; /* Mask 0 */
-+ pring->prt[0].rctl = FC_ELS_REQ;
-+ pring->prt[0].type = FC_ELS_DATA;
-+ pring->prt[0].lpfc_sli_rcv_unsol_event =
-+ lpfc_els_unsol_event;
-+ pring->prt[1].profile = 0; /* Mask 1 */
-+ pring->prt[1].rctl = FC_ELS_RSP;
-+ pring->prt[1].type = FC_ELS_DATA;
-+ pring->prt[1].lpfc_sli_rcv_unsol_event =
-+ lpfc_els_unsol_event;
-+ pring->prt[2].profile = 0; /* Mask 2 */
-+ /* NameServer Inquiry */
-+ pring->prt[2].rctl = FC_UNSOL_CTL;
-+ /* NameServer */
-+ pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
-+ pring->prt[2].lpfc_sli_rcv_unsol_event =
-+ lpfc_ct_unsol_event;
-+ pring->prt[3].profile = 0; /* Mask 3 */
-+ /* NameServer response */
-+ pring->prt[3].rctl = FC_SOL_CTL;
-+ /* NameServer */
-+ pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
-+ pring->prt[3].lpfc_sli_rcv_unsol_event =
-+ lpfc_ct_unsol_event;
-+ break;
-+ }
-+ totiocb += (pring->numCiocb + pring->numRiocb);
-+ }
-+ if (totiocb > MAX_SLI2_IOCB) {
-+ /* Too many cmd / rsp ring entries in SLI2 SLIM */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-+ "%d:0462 Too many cmd / rsp ring entries in "
-+ "SLI2 SLIM Data: x%x x%x\n",
-+ phba->brd_no, totiocb, MAX_SLI2_IOCB);
-+ }
-+
-+#ifdef USE_HGP_HOST_SLIM
-+ psli->sliinit.sli_flag = LPFC_HGP_HOSTSLIM;
-+#else
-+ psli->sliinit.sli_flag = 0;
-+#endif
-+
-+ return (0);
-+}
-+
-+static int
-+lpfc_set_bind_type(struct lpfc_hba * phba)
-+{
-+ int bind_type = phba->cfg_fcp_bind_method;
-+ int ret = LPFC_BIND_WW_NN_PN;
-+
-+ switch (bind_type) {
-+ case 1:
-+ phba->fcp_mapping = FCP_SEED_WWNN;
-+ break;
-+
-+ case 2:
-+ phba->fcp_mapping = FCP_SEED_WWPN;
-+ break;
-+
-+ case 3:
-+ phba->fcp_mapping = FCP_SEED_DID;
-+ ret = LPFC_BIND_DID;
-+ break;
-+
-+ case 4:
-+ phba->fcp_mapping = FCP_SEED_DID;
-+ ret = LPFC_BIND_DID;
-+ break;
-+ }
-+
-+ return (ret);
-+}
-+
-+static void
-+lpfc_get_cfgparam(struct lpfc_hba *phba)
-+{
-+ lpfc_log_verbose_set(phba, lpfc_log_verbose);
-+ lpfc_fcp_bind_method_set(phba, lpfc_fcp_bind_method);
-+ lpfc_cr_delay_set(phba, lpfc_cr_delay);
-+ lpfc_cr_count_set(phba, lpfc_cr_count);
-+ lpfc_lun_queue_depth_set(phba, lpfc_lun_queue_depth);
-+ lpfc_fcp_class_set(phba, lpfc_fcp_class);
-+ lpfc_use_adisc_set(phba, lpfc_use_adisc);
-+ lpfc_ack0_set(phba, lpfc_ack0);
-+ lpfc_topology_set(phba, lpfc_topology);
-+ lpfc_scan_down_set(phba, lpfc_scan_down);
-+ lpfc_nodev_tmo_set(phba, lpfc_nodev_tmo);
-+ lpfc_link_speed_set(phba, lpfc_link_speed);
-+ lpfc_fdmi_on_set(phba, lpfc_fdmi_on);
-+ lpfc_discovery_threads_set(phba, lpfc_discovery_threads);
-+ lpfc_max_luns_set(phba, lpfc_max_luns);
-+ phba->cfg_scsi_hotplug = 0;
-+
-+ switch (phba->pcidev->device) {
-+ case PCI_DEVICE_ID_LP101:
-+ case PCI_DEVICE_ID_BSMB:
-+ case PCI_DEVICE_ID_ZSMB:
-+ phba->cfg_hba_queue_depth = LPFC_LP101_HBA_Q_DEPTH;
-+ break;
-+ case PCI_DEVICE_ID_RFLY:
-+ case PCI_DEVICE_ID_PFLY:
-+ case PCI_DEVICE_ID_BMID:
-+ case PCI_DEVICE_ID_ZMID:
-+ case PCI_DEVICE_ID_TFLY:
-+ phba->cfg_hba_queue_depth = LPFC_LC_HBA_Q_DEPTH;
-+ break;
-+ default:
-+ phba->cfg_hba_queue_depth = LPFC_DFT_HBA_Q_DEPTH;
-+ }
-+ return;
-+}
-+
-+static void
-+lpfc_consistent_bind_setup(struct lpfc_hba * phba)
-+{
-+ INIT_LIST_HEAD(&phba->fc_nlpbind_list);
-+ phba->fc_bind_cnt = 0;
-+}
-+
-+static uint8_t
-+lpfc_get_brd_no(struct lpfc_hba * phba)
-+{
-+ uint8_t brd, found = 1;
-+
-+ brd = 0;
-+ while(found) {
-+ phba = NULL;
-+ found = 0;
-+ list_for_each_entry(phba, &lpfc_hba_list, hba_list) {
-+ if (phba->brd_no == brd) {
-+ found = 1;
-+ brd++;
-+ break;
-+ }
-+ }
-+ }
-+ return (brd);
-+}
-+
-+
-+static int __devinit
-+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
-+{
-+ struct Scsi_Host *host;
-+ struct lpfc_hba *phba;
-+ struct lpfc_sli *psli;
-+ unsigned long iflag;
-+ unsigned long bar0map_len, bar2map_len;
-+ int error = -ENODEV, retval;
-+
-+ if (pci_enable_device(pdev))
-+ goto out;
-+ if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
-+ goto out_disable_device;
-+
-+ /*
-+ * Allocate space for adapter info structure
-+ */
-+ phba = kmalloc(sizeof(*phba), GFP_KERNEL);
-+ if (!phba)
-+ goto out_release_regions;
-+ memset(phba, 0, sizeof (struct lpfc_hba));
-+
-+ host = scsi_host_alloc(&driver_template, sizeof (unsigned long));
-+ if (!host) {
-+ printk (KERN_WARNING "%s: scsi_host_alloc failed.\n",
-+ lpfc_drvr_name);
-+ error = -ENOMEM;
-+ goto out_kfree_phba;
-+ }
-+
-+ phba->fc_flag |= FC_LOADING;
-+ phba->pcidev = pdev;
-+ phba->host = host;
-+
-+ INIT_LIST_HEAD(&phba->ctrspbuflist);
-+ INIT_LIST_HEAD(&phba->rnidrspbuflist);
-+ INIT_LIST_HEAD(&phba->freebufList);
-+
-+ /* Initialize timers used by driver */
-+ init_timer(&phba->fc_estabtmo);
-+ phba->fc_estabtmo.function = lpfc_establish_link_tmo;
-+ phba->fc_estabtmo.data = (unsigned long)phba;
-+ init_timer(&phba->fc_disctmo);
-+ phba->fc_disctmo.function = lpfc_disc_timeout;
-+ phba->fc_disctmo.data = (unsigned long)phba;
-+ init_timer(&phba->fc_scantmo);
-+ phba->fc_scantmo.function = lpfc_scan_timeout;
-+ phba->fc_scantmo.data = (unsigned long)phba;
-+
-+ init_timer(&phba->fc_fdmitmo);
-+ phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
-+ phba->fc_fdmitmo.data = (unsigned long)phba;
-+ init_timer(&phba->els_tmofunc);
-+ phba->els_tmofunc.function = lpfc_els_timeout;
-+ phba->els_tmofunc.data = (unsigned long)phba;
-+ psli = &phba->sli;
-+ init_timer(&psli->mbox_tmo);
-+ psli->mbox_tmo.function = lpfc_mbox_timeout;
-+ psli->mbox_tmo.data = (unsigned long)phba;
-+
-+ /* Assign an unused board number */
-+ phba->brd_no = lpfc_get_brd_no(phba);
-+ host->unique_id = phba->brd_no;
-+
-+ /*
-+ * Get all the module params for configuring this host and then
-+ * establish the host parameters.
-+ */
-+ lpfc_get_cfgparam(phba);
-+
-+ host->max_id = LPFC_MAX_TARGET;
-+ host->max_lun = phba->cfg_max_luns;
-+ host->this_id = -1;
-+
-+ if(phba->cfg_scsi_hotplug) {
-+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-+ "%d:0264 HotPlug Support Enabled\n",
-+ phba->brd_no);
-+ }
-+
-+ /* Add adapter structure to list */
-+ list_add_tail(&phba->hba_list, &lpfc_hba_list);
-+
-+ /* Initialize all internally managed lists. */
-+ INIT_LIST_HEAD(&phba->fc_nlpmap_list);
-+ INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
-+ INIT_LIST_HEAD(&phba->fc_unused_list);
-+ INIT_LIST_HEAD(&phba->fc_plogi_list);
-+ INIT_LIST_HEAD(&phba->fc_adisc_list);
-+ INIT_LIST_HEAD(&phba->fc_reglogin_list);
-+ INIT_LIST_HEAD(&phba->fc_prli_list);
-+ INIT_LIST_HEAD(&phba->fc_npr_list);
-+ lpfc_consistent_bind_setup(phba);
-+
-+ init_waitqueue_head(&phba->linkevtwq);
-+ init_waitqueue_head(&phba->rscnevtwq);
-+ init_waitqueue_head(&phba->ctevtwq);
-+
-+ pci_set_master(pdev);
-+ retval = pci_set_mwi(pdev);
-+ if (retval)
-+ dev_printk(KERN_WARNING, &pdev->dev,
-+ "Warning: pci_set_mwi returned %d\n", retval);
-+
-+ /* Configure DMA attributes. */
-+ if (dma_set_mask(&phba->pcidev->dev, 0xffffffffffffffffULL) &&
-+ dma_set_mask(&phba->pcidev->dev, 0xffffffffULL))
-+ goto out_list_del;
-+
-+ /*
-+ * Get the physical address of Bar0 and Bar2 and the number of bytes
-+ * required by each mapping.
-+ */
-+ phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
-+ bar0map_len = pci_resource_len(phba->pcidev, 0);
-+
-+ phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
-+ bar2map_len = pci_resource_len(phba->pcidev, 2);
-+
-+ /* Map HBA SLIM and Control Registers to a kernel virtual address. */
-+ phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
-+ phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
-+
-+ /*
-+ * Allocate memory for SLI-2 structures
-+ */
-+ phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE,
-+ &phba->slim2p_mapping, GFP_KERNEL);
-+ if (!phba->slim2p)
-+ goto out_iounmap;
-+
-+
-+ lpfc_sli_setup(phba); /* Setup SLI Layer to run over lpfc HBAs */
-+ lpfc_sli_queue_setup(phba); /* Initialize the SLI Layer */
-+
-+ error = lpfc_mem_alloc(phba);
-+ if (error)
-+ goto out_dec_nhbas;
-+
-+ lpfc_set_bind_type(phba);
-+
-+ /* Initialize HBA structure */
-+ phba->fc_edtov = FF_DEF_EDTOV;
-+ phba->fc_ratov = FF_DEF_RATOV;
-+ phba->fc_altov = FF_DEF_ALTOV;
-+ phba->fc_arbtov = FF_DEF_ARBTOV;
-+
-+ INIT_LIST_HEAD(&phba->dpc_disc);
-+ init_completion(&phba->dpc_startup);
-+ init_completion(&phba->dpc_exiting);
-+
-+ /*
-+ * Startup the kernel thread for this host adapter
-+ */
-+ phba->dpc_kill = 0;
-+ phba->dpc_pid = kernel_thread(lpfc_do_dpc, phba, 0);
-+ if (phba->dpc_pid < 0) {
-+ error = phba->dpc_pid;
-+ goto out_free_mem;
-+ }
-+ wait_for_completion(&phba->dpc_startup);
-+
-+ /* Call SLI to initialize the HBA. */
-+ error = lpfc_sli_hba_setup(phba);
-+ if (error)
-+ goto out_hba_down;
-+
-+ /* We can rely on a queue depth attribute only after SLI HBA setup */
-+ host->can_queue = phba->cfg_hba_queue_depth - 10;
-+
-+ /*
-+ * Starting with 2.4.0 kernel, Linux can support commands longer
-+ * than 12 bytes. However, scsi_register() always sets it to 12.
-+ * For it to be useful to the midlayer, we have to set it here.
-+ */
-+ host->max_cmd_len = 16;
-+
-+ /*
-+ * Queue depths per lun
-+ */
-+ host->transportt = lpfc_transport_template;
-+ host->hostdata[0] = (unsigned long)phba;
-+ pci_set_drvdata(pdev, host);
-+ error = scsi_add_host(host, &pdev->dev);
-+ if (error)
-+ goto out_hba_down;
-+
-+ sysfs_create_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
-+ sysfs_create_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
-+ scsi_scan_host(host);
-+ phba->fc_flag &= ~FC_LOADING;
-+ return 0;
-+
-+out_hba_down:
-+ /* Stop any timers that were started during this attach. */
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_sli_hba_down(phba);
-+ lpfc_stop_timer(phba);
-+ phba->work_hba_events = 0;
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ /* Kill the kernel thread for this host */
-+ if (phba->dpc_pid >= 0) {
-+ phba->dpc_kill = 1;
-+ wmb();
-+ kill_proc(phba->dpc_pid, SIGHUP, 1);
-+ wait_for_completion(&phba->dpc_exiting);
-+ }
-+
-+out_free_mem:
-+ lpfc_mem_free(phba);
-+out_dec_nhbas:
-+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
-+ phba->slim2p, phba->slim2p_mapping);
-+out_iounmap:
-+ iounmap(phba->ctrl_regs_memmap_p);
-+ iounmap(phba->slim_memmap_p);
-+out_list_del:
-+ list_del_init(&phba->hba_list);
-+ scsi_host_put(host);
-+out_kfree_phba:
-+ kfree(phba);
-+out_release_regions:
-+ pci_release_regions(pdev);
-+out_disable_device:
-+ pci_disable_device(pdev);
-+out:
-+ return error;
-+}
-+
-+static void __devexit
-+lpfc_pci_remove_one(struct pci_dev *pdev)
-+{
-+ struct Scsi_Host *host = pci_get_drvdata(pdev);
-+ struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0];
-+ struct lpfc_target *targetp;
-+ int i;
-+ unsigned long iflag;
-+
-+ sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr);
-+ sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr);
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ /* Since we are going to scsi_remove_host(), disassociate scsi_dev
-+ * from lpfc_target, and make sure its unblocked.
-+ */
-+ for (i = 0; i < MAX_FCP_TARGET; i++) {
-+ targetp = phba->device_queue_hash[i];
-+ if (!targetp)
-+ continue;
-+#if defined(RHEL_FC) || defined(SLES_FC)
-+ if(targetp->pnode) {
-+ if(targetp->blocked) {
-+ /* If we are blocked, force a nodev_tmo */
-+ del_timer_sync(&targetp->pnode->nlp_tmofunc);
-+ if (!list_empty(&targetp->pnode->
-+ nodev_timeout_evt.evt_listp))
-+ list_del_init(&targetp->pnode->
-+ nodev_timeout_evt.
-+ evt_listp);
-+ lpfc_process_nodev_timeout(phba,
-+ targetp->pnode);
-+ }
-+ else {
-+ /* If we are unblocked, just remove
-+ * the scsi device.
-+ */
-+ lpfc_target_remove(phba, targetp);
-+ }
-+ }
-+#endif /* RHEL_FC or SLES_FC */
-+#if defined(RHEL_FC)
-+ targetp->starget = NULL;
-+#endif /* RHEL_FC */
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ list_del(&phba->hba_list);
-+ scsi_remove_host(phba->host);
-+
-+ /* detach the board */
-+
-+ /* Kill the kernel thread for this host */
-+ if (phba->dpc_pid >= 0) {
-+ phba->dpc_kill = 1;
-+ wmb();
-+ kill_proc(phba->dpc_pid, SIGHUP, 1);
-+ wait_for_completion(&phba->dpc_exiting);
-+ }
-+
-+ /*
-+ * Bring down the SLI Layer. This step disable all interrupts,
-+ * clears the rings, discards all mailbox commands, and resets
-+ * the HBA.
-+ */
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_sli_hba_down(phba);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ /* Release the irq reservation */
-+ free_irq(phba->pcidev->irq, phba);
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_cleanup(phba, 0);
-+ lpfc_stop_timer(phba);
-+ phba->work_hba_events = 0;
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ lpfc_scsi_free(phba);
-+
-+ lpfc_mem_free(phba);
-+
-+ /* Free resources associated with SLI2 interface */
-+ dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
-+ phba->slim2p, phba->slim2p_mapping);
-+
-+ /* unmap adapter SLIM and Control Registers */
-+ iounmap(phba->ctrl_regs_memmap_p);
-+ iounmap(phba->slim_memmap_p);
-+
-+ pci_release_regions(phba->pcidev);
-+ pci_disable_device(phba->pcidev);
-+
-+ scsi_host_put(phba->host);
-+ kfree(phba);
-+
-+ pci_set_drvdata(pdev, NULL);
-+}
-+
-+static struct pci_device_id lpfc_id_table[] = {
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
-+ PCI_ANY_ID, PCI_ANY_ID, },
-+ { 0 }
-+};
-+MODULE_DEVICE_TABLE(pci, lpfc_id_table);
-+
-+
-+static struct pci_driver lpfc_driver = {
-+ .name = LPFC_DRIVER_NAME,
-+ .id_table = lpfc_id_table,
-+ .probe = lpfc_pci_probe_one,
-+ .remove = __devexit_p(lpfc_pci_remove_one),
-+};
-+
-+static int __init
-+lpfc_init(void)
-+{
-+ int rc;
-+
-+ printk(LPFC_MODULE_DESC "\n");
-+ printk(LPFC_COPYRIGHT "\n");
-+
-+ lpfc_transport_template =
-+ fc_attach_transport(&lpfc_transport_functions);
-+ if (!lpfc_transport_template)
-+ return -ENODEV;
-+ rc = pci_module_init(&lpfc_driver);
-+ return rc;
-+
-+}
-+
-+static void __exit
-+lpfc_exit(void)
-+{
-+ pci_unregister_driver(&lpfc_driver);
-+ fc_release_transport(lpfc_transport_template);
-+}
-+module_init(lpfc_init);
-+module_exit(lpfc_exit);
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION(LPFC_MODULE_DESC);
-+MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
-+MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_version.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_version.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,38 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_version.h 1.58.1.8 2005/07/27 18:29:31EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_VERSION
-+#define _H_LPFC_VERSION
-+
-+#define LPFC_DRIVER_VERSION "8.0.16.17"
-+
-+#define LPFC_DRIVER_NAME "lpfc"
-+
-+#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
-+ LPFC_DRIVER_VERSION
-+#define LPFC_COPYRIGHT "Copyright(c) 2003-2005 Emulex. All rights reserved."
-+
-+#define DFC_API_VERSION "0.0.0"
-+
-+#endif
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_mem.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_mem.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,56 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_mem.h 1.23.1.2 2005/06/13 17:16:36EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_MEM
-+#define _H_LPFC_MEM
-+
-+
-+struct lpfc_dmabuf {
-+ struct list_head list;
-+ void *virt; /* virtual address ptr */
-+ dma_addr_t phys; /* mapped address */
-+};
-+struct lpfc_dmabufext {
-+ struct lpfc_dmabuf dma;
-+ uint32_t size;
-+ uint32_t flag;
-+ struct list_head list;
-+ uint32_t uniqueid;
-+ uint32_t data;
-+};
-+typedef struct lpfc_dmabufext DMABUFEXT_t;
-+
-+struct lpfc_dma_pool {
-+ struct lpfc_dmabuf *elements;
-+ uint32_t max_count;
-+ uint32_t current_count;
-+};
-+
-+
-+#define MEM_PRI 0x100 /* Priority bit: set to exceed low
-+ water */
-+#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
-+#define LPFC_MEM_POOL_SIZE 64 /* max elements in non DMA safety
-+ pool */
-+#endif /* _H_LPFC_MEM */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_init.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_init.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,1536 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_init.c 1.183.1.2 2005/06/13 17:16:27EDT sf_support Exp $
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/ctype.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+#include "lpfc_version.h"
-+#include "lpfc_compat.h"
-+
-+static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *);
-+static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
-+static int lpfc_post_rcv_buf(struct lpfc_hba *);
-+static int lpfc_rdrev_wd30 = 0;
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_config_port_prep */
-+/* This routine will do LPFC initialization prior to the */
-+/* CONFIG_PORT mailbox command. This will be initialized */
-+/* as a SLI layer callback routine. */
-+/* This routine returns 0 on success or -ERESTART if it wants */
-+/* the SLI layer to reset the HBA and try again. Any */
-+/* other return value indicates an error. */
-+/* */
-+/************************************************************************/
-+int
-+lpfc_config_port_prep(struct lpfc_hba * phba)
-+{
-+ lpfc_vpd_t *vp = &phba->vpd;
-+ int i = 0;
-+ LPFC_MBOXQ_t *pmb;
-+ MAILBOX_t *mb;
-+ uint32_t *lpfc_vpd_data = 0;
-+ uint16_t offset = 0;
-+
-+ /* Get a Mailbox buffer to setup mailbox commands for HBA
-+ initialization */
-+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
-+ if (!pmb) {
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ return -ENOMEM;
-+ }
-+
-+ mb = &pmb->mb;
-+ phba->hba_state = LPFC_INIT_MBX_CMDS;
-+
-+ /* special handling for LC HBAs */
-+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
-+ char licensed[56] =
-+ "key unlock for use with gnu public licensed code only\0";
-+ uint32_t *ptext = (uint32_t *) licensed;
-+
-+ for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
-+ *ptext = cpu_to_be32(*ptext);
-+
-+ /* Setup and issue mailbox READ NVPARAMS command */
-+ lpfc_read_nv(phba, pmb);
-+ memset((char*)mb->un.varRDnvp.rsvd3, 0,
-+ sizeof (mb->un.varRDnvp.rsvd3));
-+ memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
-+ sizeof (licensed));
-+
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ /* Adapter initialization error, mbxCmd <cmd>
-+ READ_NVPARM, mbxStatus <status> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_MBOX,
-+ "%d:0324 Config Port initialization "
-+ "error, mbxCmd x%x READ_NVPARM, "
-+ "mbxStatus x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, mb->mbxStatus);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -ERESTART;
-+ }
-+ memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
-+ sizeof (mb->un.varRDnvp.nodename));
-+ }
-+
-+ /* Setup and issue mailbox READ REV command */
-+ lpfc_read_rev(phba, pmb);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ /* Adapter failed to init, mbxCmd <mbxCmd> READ_REV, mbxStatus
-+ <status> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0439 Adapter failed to init, mbxCmd x%x "
-+ "READ_REV, mbxStatus x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, mb->mbxStatus);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -ERESTART;
-+ }
-+
-+ /* The HBA's current state is provided by the ProgType and rr fields.
-+ * Read and check the value of these fields before continuing to config
-+ * this port.
-+ */
-+ if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) {
-+ /* Old firmware */
-+ vp->rev.rBit = 0;
-+ /* Adapter failed to init, mbxCmd <cmd> READ_REV detected
-+ outdated firmware */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0440 Adapter failed to init, mbxCmd x%x "
-+ "READ_REV detected outdated firmware"
-+ "Data: x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, 0);
-+ mempool_free(pmb, phba->mbox_mem_pool);
-+ return -ERESTART;
-+ } else {
-+ vp->rev.rBit = 1;
-+ vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
-+ memcpy(vp->rev.sli1FwName,
-+ (char*)mb->un.varRdRev.sli1FwName, 16);
-+ vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
-+ memcpy(vp->rev.sli2FwName,
-+ (char *)mb->un.varRdRev.sli2FwName, 16);
-+ }
-+
-+ /* Save information as VPD data */
-+ vp->rev.biuRev = mb->un.varRdRev.biuRev;
-+ vp->rev.smRev = mb->un.varRdRev.smRev;
-+ vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
-+ vp->rev.endecRev = mb->un.varRdRev.endecRev;
-+ vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
-+ vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
-+ vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
-+ vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
-+ vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
-+ vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
-+ lpfc_rdrev_wd30 = mb->un.varWords[30];
-+
-+ if (lpfc_is_LC_HBA(phba->pcidev->device))
-+ memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
-+ sizeof (phba->RandomData));
-+
-+ /* Get the default values for Model Name and Description */
-+ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
-+
-+ /* Get adapter VPD information */
-+ pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_ATOMIC);
-+ lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_ATOMIC);
-+
-+ do {
-+ lpfc_dump_mem(phba, pmb, offset);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ /* Let it go through even if failed. */
-+ /* Adapter failed to init, mbxCmd <cmd> DUMP VPD,
-+ mbxStatus <status> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_INIT,
-+ "%d:0441 VPD not present on adapter, mbxCmd "
-+ "x%x DUMP VPD, mbxStatus x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, mb->mbxStatus);
-+ kfree(lpfc_vpd_data);
-+ lpfc_vpd_data = 0;
-+ break;
-+ }
-+
-+ lpfc_sli_pcimem_bcopy((uint32_t *)pmb->context2,
-+ (uint32_t*)((uint8_t*)lpfc_vpd_data + offset),
-+ mb->un.varDmp.word_cnt);
-+
-+ offset += mb->un.varDmp.word_cnt;
-+ } while (mb->un.varDmp.word_cnt);
-+
-+ lpfc_parse_vpd(phba, (uint8_t *)lpfc_vpd_data);
-+
-+ if(pmb->context2)
-+ kfree(pmb->context2);
-+ if (lpfc_vpd_data)
-+ kfree(lpfc_vpd_data);
-+
-+ pmb->context2 = 0;
-+ mempool_free(pmb, phba->mbox_mem_pool);
-+ return 0;
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_config_port_post */
-+/* This routine will do LPFC initialization after the */
-+/* CONFIG_PORT mailbox command. This will be initialized */
-+/* as a SLI layer callback routine. */
-+/* This routine returns 0 on success. Any other return value */
-+/* indicates an error. */
-+/* */
-+/************************************************************************/
-+int
-+lpfc_config_port_post(struct lpfc_hba * phba)
-+{
-+ LPFC_MBOXQ_t *pmb;
-+ MAILBOX_t *mb;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_sli *psli = &phba->sli;
-+ uint32_t status, timeout;
-+ int i, j, flogi_sent;
-+ unsigned long isr_cnt, clk_cnt;
-+
-+
-+ /* Get a Mailbox buffer to setup mailbox commands for HBA
-+ initialization */
-+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
-+ if (!pmb) {
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ return -ENOMEM;
-+ }
-+ mb = &pmb->mb;
-+
-+ /* Setup link timers */
-+ lpfc_config_link(phba, pmb);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0447 Adapter failed init, mbxCmd x%x "
-+ "CONFIG_LINK mbxStatus x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, mb->mbxStatus);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -EIO;
-+ }
-+
-+ /* Get login parameters for NID. */
-+ lpfc_read_sparam(phba, pmb);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0448 Adapter failed init, mbxCmd x%x "
-+ "READ_SPARM mbxStatus x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, mb->mbxStatus);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mp = (struct lpfc_dmabuf *) pmb->context1;
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -EIO;
-+ }
-+
-+ mp = (struct lpfc_dmabuf *) pmb->context1;
-+
-+ memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm));
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ pmb->context1 = NULL;
-+
-+ memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
-+ sizeof (struct lpfc_name));
-+ memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
-+ sizeof (struct lpfc_name));
-+ /* If no serial number in VPD data, use low 6 bytes of WWNN */
-+ /* This should be consolidated into parse_vpd ? - mr */
-+ if (phba->SerialNumber[0] == 0) {
-+ uint8_t *outptr;
-+
-+ outptr = (uint8_t *) & phba->fc_nodename.IEEE[0];
-+ for (i = 0; i < 12; i++) {
-+ status = *outptr++;
-+ j = ((status & 0xf0) >> 4);
-+ if (j <= 9)
-+ phba->SerialNumber[i] =
-+ (char)((uint8_t) 0x30 + (uint8_t) j);
-+ else
-+ phba->SerialNumber[i] =
-+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
-+ i++;
-+ j = (status & 0xf);
-+ if (j <= 9)
-+ phba->SerialNumber[i] =
-+ (char)((uint8_t) 0x30 + (uint8_t) j);
-+ else
-+ phba->SerialNumber[i] =
-+ (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
-+ }
-+ }
-+
-+ /* This should turn on DELAYED ABTS for ELS timeouts */
-+ lpfc_set_slim(phba, pmb, 0x052198, 0x1);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -EIO;
-+ }
-+
-+
-+ lpfc_read_config(phba, pmb);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0453 Adapter failed to init, mbxCmd x%x "
-+ "READ_CONFIG, mbxStatus x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, mb->mbxStatus);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -EIO;
-+ }
-+
-+ /* Reset the DFT_HBA_Q_DEPTH to the max xri */
-+ if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
-+ phba->cfg_hba_queue_depth =
-+ mb->un.varRdConfig.max_xri + 1;
-+
-+ phba->lmt = mb->un.varRdConfig.lmt;
-+ /* HBA is not 4GB capable, or HBA is not 2GB capable,
-+ don't let link speed ask for it */
-+ if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) &&
-+ (phba->cfg_link_speed > LINK_SPEED_2G)) ||
-+ (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) &&
-+ (phba->cfg_link_speed > LINK_SPEED_1G))) {
-+ /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_LINK_EVENT,
-+ "%d:1302 Invalid speed for this board: "
-+ "Reset link speed to auto: x%x\n",
-+ phba->brd_no,
-+ phba->cfg_link_speed);
-+ phba->cfg_link_speed = LINK_SPEED_AUTO;
-+ }
-+
-+ if (!phba->intr_inited) {
-+ /* Add our interrupt routine to kernel's interrupt chain &
-+ enable it */
-+
-+ if (request_irq(phba->pcidev->irq,
-+ lpfc_intr_handler,
-+ SA_SHIRQ,
-+ LPFC_DRIVER_NAME,
-+ phba) != 0) {
-+ /* Enable interrupt handler failed */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0451 Enable interrupt handler "
-+ "failed\n",
-+ phba->brd_no);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free(pmb, phba->mbox_mem_pool);
-+ return -EIO;
-+ }
-+ phba->intr_inited =
-+ (HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA);
-+ }
-+
-+ phba->hba_state = LPFC_LINK_DOWN;
-+
-+ /* Only process IOCBs on ring 0 till hba_state is READY */
-+ if (psli->ring[psli->ip_ring].cmdringaddr)
-+ psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
-+ if (psli->ring[psli->fcp_ring].cmdringaddr)
-+ psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
-+ if (psli->ring[psli->next_ring].cmdringaddr)
-+ psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
-+
-+ /* Post receive buffers for desired rings */
-+ lpfc_post_rcv_buf(phba);
-+
-+ /* Enable appropriate host interrupts */
-+ status = readl(phba->HCregaddr);
-+ status |= phba->intr_inited;
-+ if (psli->sliinit.num_rings > 0)
-+ status |= HC_R0INT_ENA;
-+ if (psli->sliinit.num_rings > 1)
-+ status |= HC_R1INT_ENA;
-+ if (psli->sliinit.num_rings > 2)
-+ status |= HC_R2INT_ENA;
-+ if (psli->sliinit.num_rings > 3)
-+ status |= HC_R3INT_ENA;
-+
-+ writel(status, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+
-+ /* Setup and issue mailbox INITIALIZE LINK command */
-+ lpfc_init_link(phba, pmb, phba->cfg_topology,
-+ phba->cfg_link_speed);
-+
-+ isr_cnt = psli->slistat.sliIntr;
-+ clk_cnt = jiffies;
-+
-+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) {
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0454 Adapter failed to init, mbxCmd x%x "
-+ "INIT_LINK, mbxStatus x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand, mb->mbxStatus);
-+
-+ /* Clear all interrupt enable conditions */
-+ writel(0, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+ /* Clear all pending interrupts */
-+ writel(0xffffffff, phba->HAregaddr);
-+ readl(phba->HAregaddr); /* flush */
-+
-+ free_irq(phba->pcidev->irq, phba);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free(pmb, phba->mbox_mem_pool);
-+ return -EIO;
-+ }
-+ /* MBOX buffer will be freed in mbox compl */
-+
-+ /*
-+ * Setup the ring 0 (els) timeout handler
-+ */
-+ timeout = phba->fc_ratov << 1;
-+
-+ phba->els_tmofunc.expires = jiffies + HZ * timeout;
-+ add_timer(&phba->els_tmofunc);
-+
-+ phba->fc_prevDID = Mask_DID;
-+ flogi_sent = 0;
-+ i = 0;
-+ while ((phba->hba_state != LPFC_HBA_READY) ||
-+ (phba->num_disc_nodes) || (phba->fc_prli_sent) ||
-+ ((phba->fc_map_cnt == 0) && (i<2)) ||
-+ (psli->sliinit.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
-+ /* Check every second for 30 retries. */
-+ i++;
-+ if (i > 30) {
-+ break;
-+ }
-+ if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
-+ /* The link is down. Set linkdown timeout */
-+ break;
-+ }
-+
-+ /* Delay for 1 second to give discovery time to complete. */
-+ for (j = 0; j < 20; j++) {
-+ /* On some systems, the driver's attach/detect routines
-+ * are uninterruptible. Since the driver cannot predict
-+ * when this is true, just manually call the ISR every
-+ * 50 ms to service any interrupts.
-+ */
-+ msleep(50);
-+ if (isr_cnt == psli->slistat.sliIntr) {
-+ lpfc_sli_intr(phba);
-+ isr_cnt = psli->slistat.sliIntr;
-+ }
-+ }
-+ isr_cnt = psli->slistat.sliIntr;
-+
-+ if (clk_cnt == jiffies) {
-+ /* REMOVE: IF THIS HAPPENS, SYSTEM CLOCK IS NOT RUNNING.
-+ * WE HAVE TO MANUALLY CALL OUR TIMEOUT ROUTINES.
-+ */
-+ clk_cnt = jiffies;
-+ }
-+ }
-+
-+ /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
-+ * any potential PRLIs to flush thru the SLI sub-system.
-+ */
-+ msleep(50);
-+ if (isr_cnt == psli->slistat.sliIntr) {
-+ lpfc_sli_intr(phba);
-+ }
-+
-+ return (0);
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_hba_down_prep */
-+/* This routine will do LPFC uninitialization before the */
-+/* HBA is reset when bringing down the SLI Layer. This will be */
-+/* initialized as a SLI layer callback routine. */
-+/* This routine returns 0 on success. Any other return value */
-+/* indicates an error. */
-+/* */
-+/************************************************************************/
-+int
-+lpfc_hba_down_prep(struct lpfc_hba * phba)
-+{
-+ /* Disable interrupts */
-+ writel(0, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+
-+ /* Cleanup potential discovery resources */
-+ lpfc_els_flush_rscn(phba);
-+ lpfc_els_flush_cmd(phba);
-+ lpfc_disc_flush_list(phba);
-+
-+ return (0);
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_handle_eratt */
-+/* This routine will handle processing a Host Attention */
-+/* Error Status event. This will be initialized */
-+/* as a SLI layer callback routine. */
-+/* */
-+/************************************************************************/
-+void
-+lpfc_handle_eratt(struct lpfc_hba * phba, uint32_t status)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ IOCB_t *icmd = NULL, *cmd = NULL;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ volatile uint32_t status1, status2;
-+ void *from_slim;
-+ unsigned long iflag;
-+
-+ psli = &phba->sli;
-+ from_slim = ((uint8_t *)phba->MBslimaddr + 0xa8);
-+ status1 = readl( from_slim);
-+ from_slim = ((uint8_t *)phba->MBslimaddr + 0xac);
-+ status2 = readl( from_slim);
-+
-+ if (status & HS_FFER6) {
-+ /* Re-establishing Link */
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
-+ "%d:1301 Re-establishing Link "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, status, status1, status2);
-+ phba->fc_flag |= FC_ESTABLISH_LINK;
-+
-+ /*
-+ * Firmware stops when it triggled erratt with HS_FFER6.
-+ * That could cause the I/Os dropped by the firmware.
-+ * Error iocb (I/O) on txcmplq and let the SCSI layer
-+ * retry it after re-establishing link.
-+ */
-+ pring = &psli->ring[psli->fcp_ring];
-+
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
-+ list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *)(iocb->context1);
-+ if (lpfc_cmd == 0) {
-+ continue;
-+ }
-+
-+ /* Clear fast_lookup entry */
-+ if (cmd->ulpIoTag &&
-+ (cmd->ulpIoTag <
-+ psli->sliinit.ringinit[pring->ringno].fast_iotag))
-+ *(pring->fast_lookup + cmd->ulpIoTag) = NULL;
-+
-+ list_del(&iocb->list);
-+ pring->txcmplq_cnt--;
-+
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl)(phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+
-+ /*
-+ * There was a firmware error. Take the hba offline and then
-+ * attempt to restart it.
-+ */
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ lpfc_offline(phba);
-+ if (lpfc_online(phba) == 0) { /* Initialize the HBA */
-+ mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
-+ return;
-+ }
-+ } else {
-+ /* The if clause above forces this code path when the status
-+ * failure is a value other than FFER6. Do not call the offline
-+ * twice. This is the adapter hardware error path.
-+ */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-+ "%d:0457 Adapter Hardware Error "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, status, status1, status2);
-+
-+ lpfc_offline(phba);
-+
-+ /*
-+ * Restart all traffic to this host. Since the fc_transport
-+ * block functions (future) were not called in lpfc_offline,
-+ * don't call them here.
-+ */
-+ scsi_unblock_requests(phba->host);
-+ }
-+ return;
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_handle_latt */
-+/* This routine will handle processing a Host Attention */
-+/* Link Status event. This will be initialized */
-+/* as a SLI layer callback routine. */
-+/* */
-+/************************************************************************/
-+void
-+lpfc_handle_latt(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ LPFC_MBOXQ_t *pmb;
-+ volatile uint32_t control;
-+ unsigned long iflag;
-+
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ /* called from host_interrupt, to process LATT */
-+ psli = &phba->sli;
-+ psli->slistat.linkEvent++;
-+
-+ /* Get a buffer which will be used for mailbox commands */
-+ if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC))) {
-+ if (lpfc_read_la(phba, pmb) == 0) {
-+ pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
-+ if (lpfc_sli_issue_mbox
-+ (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ != MBX_NOT_FINISHED) {
-+ /* Turn off Link Attention interrupts until
-+ CLEAR_LA done */
-+ psli->sliinit.sli_flag &= ~LPFC_PROCESS_LA;
-+ control = readl(phba->HCregaddr);
-+ control &= ~HC_LAINT_ENA;
-+ writel(control, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+
-+ /* Clear Link Attention in HA REG */
-+ writel(HA_LATT, phba->HAregaddr);
-+ readl(phba->HAregaddr); /* flush */
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ iflag);
-+ return;
-+ } else {
-+ mempool_free(pmb, phba->mbox_mem_pool);
-+ }
-+ } else {
-+ mempool_free(pmb, phba->mbox_mem_pool);
-+ }
-+ }
-+
-+ /* Clear Link Attention in HA REG */
-+ writel(HA_LATT, phba->HAregaddr);
-+ readl(phba->HAregaddr); /* flush */
-+ lpfc_linkdown(phba);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return;
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_parse_vpd */
-+/* This routine will parse the VPD data */
-+/* */
-+/************************************************************************/
-+static int
-+lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd)
-+{
-+ uint8_t lenlo, lenhi;
-+ uint32_t Length;
-+ int i, j;
-+ int finished = 0;
-+ int index = 0;
-+
-+ if(!vpd)
-+ return 0;
-+
-+ /* Vital Product */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_INIT,
-+ "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
-+ (uint32_t) vpd[3]);
-+ do {
-+ switch (vpd[index]) {
-+ case 0x82:
-+ index += 1;
-+ lenlo = vpd[index];
-+ index += 1;
-+ lenhi = vpd[index];
-+ index += 1;
-+ i = ((((unsigned short)lenhi) << 8) + lenlo);
-+ index += i;
-+ break;
-+ case 0x90:
-+ index += 1;
-+ lenlo = vpd[index];
-+ index += 1;
-+ lenhi = vpd[index];
-+ index += 1;
-+ Length = ((((unsigned short)lenhi) << 8) + lenlo);
-+
-+ while (Length > 0) {
-+ /* Look for Serial Number */
-+ if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
-+ index += 2;
-+ i = vpd[index];
-+ index += 1;
-+ j = 0;
-+ Length -= (3+i);
-+ while(i--) {
-+ phba->SerialNumber[j++] = vpd[index++];
-+ if(j == 31)
-+ break;
-+ }
-+ phba->SerialNumber[j] = 0;
-+ continue;
-+ }
-+ else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
-+ phba->vpd_flag |= VPD_MODEL_DESC;
-+ index += 2;
-+ i = vpd[index];
-+ index += 1;
-+ j = 0;
-+ Length -= (3+i);
-+ while(i--) {
-+ phba->ModelDesc[j++] = vpd[index++];
-+ if(j == 255)
-+ break;
-+ }
-+ phba->ModelDesc[j] = 0;
-+ continue;
-+ }
-+ else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
-+ phba->vpd_flag |= VPD_MODEL_NAME;
-+ index += 2;
-+ i = vpd[index];
-+ index += 1;
-+ j = 0;
-+ Length -= (3+i);
-+ while(i--) {
-+ phba->ModelName[j++] = vpd[index++];
-+ if(j == 79)
-+ break;
-+ }
-+ phba->ModelName[j] = 0;
-+ continue;
-+ }
-+ else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
-+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
-+ index += 2;
-+ i = vpd[index];
-+ index += 1;
-+ j = 0;
-+ Length -= (3+i);
-+ while(i--) {
-+ phba->ProgramType[j++] = vpd[index++];
-+ if(j == 255)
-+ break;
-+ }
-+ phba->ProgramType[j] = 0;
-+ continue;
-+ }
-+ else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
-+ phba->vpd_flag |= VPD_PORT;
-+ index += 2;
-+ i = vpd[index];
-+ index += 1;
-+ j = 0;
-+ Length -= (3+i);
-+ while(i--) {
-+ phba->Port[j++] = vpd[index++];
-+ if(j == 19)
-+ break;
-+ }
-+ phba->Port[j] = 0;
-+ continue;
-+ }
-+ else {
-+ index += 2;
-+ i = vpd[index];
-+ index += 1;
-+ index += i;
-+ Length -= (3 + i);
-+ }
-+ }
-+ finished = 0;
-+ break;
-+ case 0x78:
-+ finished = 1;
-+ break;
-+ default:
-+ index ++;
-+ break;
-+ }
-+ } while (!finished && (index < 108));
-+
-+ return(1);
-+}
-+
-+static void
-+lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
-+{
-+ lpfc_vpd_t *vp;
-+ uint32_t id;
-+ uint8_t hdrtype;
-+ char str[16];
-+
-+ vp = &phba->vpd;
-+ pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id);
-+ pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
-+
-+ switch ((id >> 16) & 0xffff) {
-+ case PCI_DEVICE_ID_SUPERFLY:
-+ if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
-+ strcpy(str, "LP7000 1");
-+ else
-+ strcpy(str, "LP7000E 1");
-+ break;
-+ case PCI_DEVICE_ID_DRAGONFLY:
-+ strcpy(str, "LP8000 1");
-+ break;
-+ case PCI_DEVICE_ID_CENTAUR:
-+ if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
-+ strcpy(str, "LP9002 2");
-+ else
-+ strcpy(str, "LP9000 1");
-+ break;
-+ case PCI_DEVICE_ID_RFLY:
-+ strcpy(str, "LP952 2");
-+ break;
-+ case PCI_DEVICE_ID_PEGASUS:
-+ strcpy(str, "LP9802 2");
-+ break;
-+ case PCI_DEVICE_ID_THOR:
-+ if (hdrtype == 0x80)
-+ strcpy(str, "LP10000DC 2");
-+ else
-+ strcpy(str, "LP10000 2");
-+ break;
-+ case PCI_DEVICE_ID_VIPER:
-+ strcpy(str, "LPX1000 10");
-+ break;
-+ case PCI_DEVICE_ID_PFLY:
-+ strcpy(str, "LP982 2");
-+ break;
-+ case PCI_DEVICE_ID_TFLY:
-+ if (hdrtype == 0x80)
-+ strcpy(str, "LP1050DC 2");
-+ else
-+ strcpy(str, "LP1050 2");
-+ break;
-+ case PCI_DEVICE_ID_HELIOS:
-+ if (hdrtype == 0x80)
-+ strcpy(str, "LP11002 4");
-+ else
-+ strcpy(str, "LP11000 4");
-+ break;
-+ case PCI_DEVICE_ID_BMID:
-+ strcpy(str, "LP1150 4");
-+ break;
-+ case PCI_DEVICE_ID_BSMB:
-+ strcpy(str, "LP111 4");
-+ break;
-+ case PCI_DEVICE_ID_ZEPHYR:
-+ if (hdrtype == 0x80)
-+ strcpy(str, "LPe11002 4");
-+ else
-+ strcpy(str, "LPe11000 4");
-+ break;
-+ case PCI_DEVICE_ID_ZMID:
-+ strcpy(str, "LPe1150 4");
-+ break;
-+ case PCI_DEVICE_ID_ZSMB:
-+ strcpy(str, "LPe111 4");
-+ break;
-+ case PCI_DEVICE_ID_LP101:
-+ strcpy(str, "LP101 2");
-+ break;
-+ case PCI_DEVICE_ID_LP10000S:
-+ strcpy(str, "LP10000-S 2");
-+ break;
-+ }
-+ if (mdp)
-+ sscanf(str, "%s", mdp);
-+ if (descp)
-+ sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre "
-+ "Channel Adapter", str);
-+}
-+
-+/**************************************************/
-+/* lpfc_post_buffer */
-+/* */
-+/* This routine will post count buffers to the */
-+/* ring with the QUE_RING_BUF_CN command. This */
-+/* allows 3 buffers / command to be posted. */
-+/* Returns the number of buffers NOT posted. */
-+/**************************************************/
-+int
-+lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
-+ int type)
-+{
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *iocb;
-+ struct lpfc_dmabuf *mp1, *mp2;
-+
-+ cnt += pring->missbufcnt;
-+
-+ /* While there are buffers to post */
-+ while (cnt > 0) {
-+ /* Allocate buffer for command iocb */
-+ if ((iocb = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC))
-+ == 0) {
-+ pring->missbufcnt = cnt;
-+ return (cnt);
-+ }
-+ memset(iocb, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &iocb->iocb;
-+
-+ /* 2 buffers can be posted per command */
-+ /* Allocate buffer to post */
-+ mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (mp1)
-+ mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
-+ &mp1->phys);
-+ if (mp1 == 0 || mp1->virt == 0) {
-+ if (mp1)
-+ kfree(mp1);
-+
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ pring->missbufcnt = cnt;
-+ return (cnt);
-+ }
-+
-+ INIT_LIST_HEAD(&mp1->list);
-+ /* Allocate buffer to post */
-+ if (cnt > 1) {
-+ mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (mp2)
-+ mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
-+ &mp2->phys);
-+ if (mp2 == 0 || mp2->virt == 0) {
-+ if (mp2)
-+ kfree(mp2);
-+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
-+ kfree(mp1);
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ pring->missbufcnt = cnt;
-+ return (cnt);
-+ }
-+
-+ INIT_LIST_HEAD(&mp2->list);
-+ } else {
-+ mp2 = NULL;
-+ }
-+
-+ icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
-+ icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
-+ icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
-+ icmd->ulpBdeCount = 1;
-+ cnt--;
-+ if (mp2) {
-+ icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
-+ icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
-+ icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
-+ cnt--;
-+ icmd->ulpBdeCount = 2;
-+ }
-+
-+ icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
-+ icmd->ulpIoTag = lpfc_sli_next_iotag(phba, pring);
-+ icmd->ulpLe = 1;
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
-+ lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
-+ kfree(mp1);
-+ cnt++;
-+ if (mp2) {
-+ lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
-+ kfree(mp2);
-+ cnt++;
-+ }
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ pring->missbufcnt = cnt;
-+ return (cnt);
-+ }
-+ lpfc_sli_ringpostbuf_put(phba, pring, mp1);
-+ if (mp2) {
-+ lpfc_sli_ringpostbuf_put(phba, pring, mp2);
-+ }
-+ }
-+ pring->missbufcnt = 0;
-+ return (0);
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_post_rcv_buf */
-+/* This routine post initial rcv buffers to the configured rings */
-+/* */
-+/************************************************************************/
-+static int
-+lpfc_post_rcv_buf(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+
-+ /* Ring 0, ELS / CT buffers */
-+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1);
-+ /* Ring 2 - FCP no buffers needed */
-+
-+ return 0;
-+}
-+
-+#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_sha_init */
-+/* */
-+/************************************************************************/
-+static void
-+lpfc_sha_init(uint32_t * HashResultPointer)
-+{
-+ HashResultPointer[0] = 0x67452301;
-+ HashResultPointer[1] = 0xEFCDAB89;
-+ HashResultPointer[2] = 0x98BADCFE;
-+ HashResultPointer[3] = 0x10325476;
-+ HashResultPointer[4] = 0xC3D2E1F0;
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_sha_iterate */
-+/* */
-+/************************************************************************/
-+static void
-+lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
-+{
-+ int t;
-+ uint32_t TEMP;
-+ uint32_t A, B, C, D, E;
-+ t = 16;
-+ do {
-+ HashWorkingPointer[t] =
-+ S(1,
-+ HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
-+ 8] ^
-+ HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
-+ } while (++t <= 79);
-+ t = 0;
-+ A = HashResultPointer[0];
-+ B = HashResultPointer[1];
-+ C = HashResultPointer[2];
-+ D = HashResultPointer[3];
-+ E = HashResultPointer[4];
-+
-+ do {
-+ if (t < 20) {
-+ TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
-+ } else if (t < 40) {
-+ TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
-+ } else if (t < 60) {
-+ TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
-+ } else {
-+ TEMP = (B ^ C ^ D) + 0xCA62C1D6;
-+ }
-+ TEMP += S(5, A) + E + HashWorkingPointer[t];
-+ E = D;
-+ D = C;
-+ C = S(30, B);
-+ B = A;
-+ A = TEMP;
-+ } while (++t <= 79);
-+
-+ HashResultPointer[0] += A;
-+ HashResultPointer[1] += B;
-+ HashResultPointer[2] += C;
-+ HashResultPointer[3] += D;
-+ HashResultPointer[4] += E;
-+
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_challenge_key */
-+/* */
-+/************************************************************************/
-+static void
-+lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
-+{
-+ *HashWorking = (*RandomChallenge ^ *HashWorking);
-+}
-+
-+/************************************************************************/
-+/* */
-+/* lpfc_hba_init */
-+/* */
-+/************************************************************************/
-+void
-+lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
-+{
-+ int t;
-+ uint32_t *HashWorking;
-+ uint32_t *pwwnn = phba->wwnn;
-+
-+ HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_ATOMIC);
-+ if (!HashWorking)
-+ return;
-+
-+ memset(HashWorking, 0, (80 * sizeof(uint32_t)));
-+ HashWorking[0] = HashWorking[78] = *pwwnn++;
-+ HashWorking[1] = HashWorking[79] = *pwwnn;
-+
-+ for (t = 0; t < 7; t++)
-+ lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
-+
-+ lpfc_sha_init(hbainit);
-+ lpfc_sha_iterate(hbainit, HashWorking);
-+ kfree(HashWorking);
-+}
-+
-+static void
-+lpfc_consistent_bind_cleanup(struct lpfc_hba * phba)
-+{
-+ struct lpfc_bindlist *bdlp, *next_bdlp;
-+
-+ list_for_each_entry_safe(bdlp, next_bdlp,
-+ &phba->fc_nlpbind_list, nlp_listp) {
-+ list_del(&bdlp->nlp_listp);
-+ mempool_free( bdlp, phba->bind_mem_pool);
-+ }
-+ phba->fc_bind_cnt = 0;
-+}
-+
-+void
-+lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind)
-+{
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+
-+ /* clean up phba - lpfc specific */
-+ lpfc_can_disctmo(phba);
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
-+ nlp_listp) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
-+ nlp_listp) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
-+ nlp_listp) {
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ }
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
-+ nlp_listp) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-+ nlp_listp) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list,
-+ nlp_listp) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
-+ nlp_listp) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-+ nlp_listp) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+
-+ if (save_bind == 0) {
-+ lpfc_consistent_bind_cleanup(phba);
-+ }
-+
-+ INIT_LIST_HEAD(&phba->fc_nlpmap_list);
-+ INIT_LIST_HEAD(&phba->fc_nlpunmap_list);
-+ INIT_LIST_HEAD(&phba->fc_unused_list);
-+ INIT_LIST_HEAD(&phba->fc_plogi_list);
-+ INIT_LIST_HEAD(&phba->fc_adisc_list);
-+ INIT_LIST_HEAD(&phba->fc_reglogin_list);
-+ INIT_LIST_HEAD(&phba->fc_prli_list);
-+ INIT_LIST_HEAD(&phba->fc_npr_list);
-+
-+ phba->fc_map_cnt = 0;
-+ phba->fc_unmap_cnt = 0;
-+ phba->fc_plogi_cnt = 0;
-+ phba->fc_adisc_cnt = 0;
-+ phba->fc_reglogin_cnt = 0;
-+ phba->fc_prli_cnt = 0;
-+ phba->fc_npr_cnt = 0;
-+ phba->fc_unused_cnt= 0;
-+ return;
-+}
-+
-+void
-+lpfc_establish_link_tmo(unsigned long ptr)
-+{
-+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
-+ unsigned long iflag;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ /* Re-establishing Link, timer expired */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-+ "%d:1300 Re-establishing Link, timer expired "
-+ "Data: x%x x%x\n",
-+ phba->brd_no, phba->fc_flag, phba->hba_state);
-+ phba->fc_flag &= ~FC_ESTABLISH_LINK;
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+}
-+
-+int
-+lpfc_online(struct lpfc_hba * phba)
-+{
-+ if (!phba)
-+ return 0;
-+
-+ if (!(phba->fc_flag & FC_OFFLINE_MODE))
-+ return 0;
-+
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_INIT,
-+ "%d:0458 Bring Adapter online\n",
-+ phba->brd_no);
-+
-+ if (!lpfc_sli_queue_setup(phba))
-+ return 1;
-+
-+ if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */
-+ return 1;
-+
-+ phba->fc_flag &= ~FC_OFFLINE_MODE;
-+
-+ /*
-+ * Restart all traffic to this host. Since the fc_transport block
-+ * functions (future) were not called in lpfc_offline, don't call them
-+ * here.
-+ */
-+ scsi_unblock_requests(phba->host);
-+ return 0;
-+}
-+
-+int
-+lpfc_offline(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ unsigned long iflag;
-+ int i = 0;
-+
-+ if (!phba)
-+ return 0;
-+
-+ if (phba->fc_flag & FC_OFFLINE_MODE)
-+ return 0;
-+
-+ /*
-+ * Don't call the fc_transport block api (future). The device is
-+ * going offline and causing a timer to fire in the midlayer is
-+ * unproductive. Just block all new requests until the driver
-+ * comes back online.
-+ */
-+ scsi_block_requests(phba->host);
-+ psli = &phba->sli;
-+ pring = &psli->ring[psli->fcp_ring];
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_linkdown(phba);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ /* The linkdown event takes 30 seconds to timeout. */
-+ while (pring->txcmplq_cnt) {
-+ mdelay(10);
-+ if (i++ > 3000)
-+ break;
-+ }
-+
-+ /* stop all timers associated with this hba */
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_stop_timer(phba);
-+ phba->work_hba_events = 0;
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_INIT,
-+ "%d:0460 Bring Adapter offline\n",
-+ phba->brd_no);
-+
-+ /* Bring down the SLI Layer and cleanup. The HBA is offline
-+ now. */
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_sli_hba_down(phba);
-+ lpfc_cleanup(phba, 1);
-+ phba->fc_flag |= FC_OFFLINE_MODE;
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return 0;
-+}
-+
-+/******************************************************************************
-+* Function name : lpfc_scsi_free
-+*
-+* Description : Called from fc_detach to free scsi tgt / lun resources
-+*
-+******************************************************************************/
-+int
-+lpfc_scsi_free(struct lpfc_hba * phba)
-+{
-+ struct lpfc_target *targetp;
-+ int i;
-+
-+ for (i = 0; i < MAX_FCP_TARGET; i++) {
-+ targetp = phba->device_queue_hash[i];
-+ if (targetp) {
-+ kfree(targetp);
-+ phba->device_queue_hash[i] = NULL;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static void
-+lpfc_wakeup_event(struct lpfc_hba * phba, fcEVTHDR_t * ep)
-+{
-+ ep->e_mode &= ~E_SLEEPING_MODE;
-+ switch (ep->e_mask) {
-+ case FC_REG_LINK_EVENT:
-+ wake_up_interruptible(&phba->linkevtwq);
-+ break;
-+ case FC_REG_RSCN_EVENT:
-+ wake_up_interruptible(&phba->rscnevtwq);
-+ break;
-+ case FC_REG_CT_EVENT:
-+ wake_up_interruptible(&phba->ctevtwq);
-+ break;
-+ }
-+ return;
-+}
-+
-+int
-+lpfc_put_event(struct lpfc_hba * phba, uint32_t evcode, uint32_t evdata0,
-+ void * evdata1, uint32_t evdata2, uint32_t evdata3)
-+{
-+ fcEVT_t *ep;
-+ fcEVTHDR_t *ehp = phba->fc_evt_head;
-+ int found = 0;
-+ void *fstype = NULL;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_sli_ct_request *ctp;
-+ struct lpfc_hba_event *rec;
-+ uint32_t evtype;
-+
-+ switch (evcode) {
-+ case HBA_EVENT_RSCN:
-+ evtype = FC_REG_RSCN_EVENT;
-+ break;
-+ case HBA_EVENT_LINK_DOWN:
-+ case HBA_EVENT_LINK_UP:
-+ evtype = FC_REG_LINK_EVENT;
-+ break;
-+ default:
-+ evtype = FC_REG_CT_EVENT;
-+ }
-+
-+ if (evtype == FC_REG_RSCN_EVENT || evtype == FC_REG_LINK_EVENT) {
-+ rec = &phba->hbaevt[phba->hba_event_put];
-+ rec->fc_eventcode = evcode;
-+ rec->fc_evdata1 = evdata0;
-+ rec->fc_evdata2 = (uint32_t)(unsigned long)evdata1;
-+ rec->fc_evdata3 = evdata2;
-+ rec->fc_evdata4 = evdata3;
-+
-+ phba->hba_event_put++;
-+ if (phba->hba_event_put >= MAX_HBAEVT)
-+ phba->hba_event_put = 0;
-+
-+ if (phba->hba_event_put == phba->hba_event_get) {
-+ phba->hba_event_missed++;
-+ phba->hba_event_get++;
-+ if (phba->hba_event_get >= MAX_HBAEVT)
-+ phba->hba_event_get = 0;
-+ }
-+ }
-+
-+ if (evtype == FC_REG_CT_EVENT) {
-+ mp = (struct lpfc_dmabuf *) evdata1;
-+ ctp = (struct lpfc_sli_ct_request *) mp->virt;
-+ fstype = (void *)(ulong) (ctp->FsType);
-+ }
-+
-+ while (ehp && ((ehp->e_mask != evtype) || (ehp->e_type != fstype)))
-+ ehp = (fcEVTHDR_t *) ehp->e_next_header;
-+
-+ if (!ehp)
-+ return (0);
-+
-+ ep = ehp->e_head;
-+
-+ while (ep && !(found)) {
-+ if (ep->evt_sleep) {
-+ switch (evtype) {
-+ case FC_REG_CT_EVENT:
-+ if ((ep->evt_type ==
-+ (void *)(ulong) FC_FSTYPE_ALL)
-+ || (ep->evt_type == fstype)) {
-+ found++;
-+ ep->evt_data0 = evdata0; /* tag */
-+ ep->evt_data1 = evdata1; /* buffer
-+ ptr */
-+ ep->evt_data2 = evdata2; /* count */
-+ ep->evt_sleep = 0;
-+ if (ehp->e_mode & E_SLEEPING_MODE) {
-+ ehp->e_flag |=
-+ E_GET_EVENT_ACTIVE;
-+ lpfc_wakeup_event(phba, ehp);
-+ }
-+ /* For FC_REG_CT_EVENT just give it to
-+ first one found */
-+ }
-+ break;
-+ default:
-+ found++;
-+ ep->evt_data0 = evdata0;
-+ ep->evt_data1 = evdata1;
-+ ep->evt_data2 = evdata2;
-+ ep->evt_sleep = 0;
-+ if ((ehp->e_mode & E_SLEEPING_MODE)
-+ && !(ehp->e_flag & E_GET_EVENT_ACTIVE)) {
-+ ehp->e_flag |= E_GET_EVENT_ACTIVE;
-+ lpfc_wakeup_event(phba, ehp);
-+ }
-+ /* For all other events, give it to every one
-+ waiting */
-+ break;
-+ }
-+ }
-+ ep = ep->evt_next;
-+ }
-+ if (evtype == FC_REG_LINK_EVENT)
-+ phba->nport_event_cnt++;
-+
-+ return (found);
-+}
-+
-+int
-+lpfc_stop_timer(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+
-+ /* Instead of a timer, this has been converted to a
-+ * deferred procedding list.
-+ */
-+ while (!list_empty(&phba->freebufList)) {
-+ struct lpfc_dmabuf *mp;
-+
-+ mp = (struct lpfc_dmabuf *)(phba->freebufList.next);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ list_del(&mp->list);
-+ kfree(mp);
-+ }
-+ }
-+
-+ del_timer_sync(&phba->fc_estabtmo);
-+ del_timer_sync(&phba->fc_disctmo);
-+ del_timer_sync(&phba->fc_scantmo);
-+ del_timer_sync(&phba->fc_fdmitmo);
-+ del_timer_sync(&phba->els_tmofunc);
-+ psli = &phba->sli;
-+ del_timer_sync(&psli->mbox_tmo);
-+ return(1);
-+}
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_scsiport.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_scsiport.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,1374 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_scsiport.c 1.231.2.8 2005/07/25 12:56:08EDT sf_support Exp $
-+ */
-+#include <linux/version.h>
-+#include <linux/spinlock.h>
-+#include <linux/pci.h>
-+#include <linux/blkdev.h>
-+#include <scsi/scsi.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_transport_fc.h>
-+
-+#include "lpfc_hw.h"
-+#include "lpfc_sli.h"
-+#include "lpfc_mem.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_fcp.h"
-+#include "lpfc_crtn.h"
-+
-+/* This routine allocates a scsi buffer, which contains all the necessary
-+ * information needed to initiate a SCSI I/O. The non-DMAable region of
-+ * the buffer contains the area to build the IOCB. The DMAable region contains
-+ * the memory for the FCP CMND, FCP RSP, and the inital BPL.
-+ * In addition to allocating memeory, the FCP CMND and FCP RSP BDEs are setup
-+ * in the BPL and the BPL BDE is setup in the IOCB.
-+ */
-+struct lpfc_scsi_buf *
-+lpfc_get_scsi_buf(struct lpfc_hba * phba, int gfp_flags)
-+{
-+ struct lpfc_scsi_buf *psb;
-+ struct ulp_bde64 *bpl;
-+ IOCB_t *cmd;
-+ uint8_t *ptr;
-+ dma_addr_t pdma_phys;
-+
-+ psb = mempool_alloc(phba->scsibuf_mem_pool, gfp_flags);
-+ if (!psb)
-+ return NULL;
-+
-+ memset(psb, 0, sizeof (struct lpfc_scsi_buf));
-+
-+ /* Get a SCSI DMA extention for an I/O */
-+ /*
-+ * The DMA buffer for struct fcp_cmnd, struct fcp_rsp and BPL use
-+ * lpfc_scsi_dma_ext_pool with size LPFC_SCSI_DMA_EXT_SIZE
-+ *
-+ *
-+ * The size of struct fcp_cmnd = 32 bytes.
-+ * The size of struct fcp_rsp = 160 bytes.
-+ * The size of struct ulp_bde64 = 12 bytes and driver can only
-+ * support LPFC_SCSI_INITIAL_BPL_SIZE (3) S/G segments for scsi data.
-+ * One struct ulp_bde64 is used for each of the struct fcp_cmnd and
-+ * struct fcp_rsp
-+ *
-+ * Total usage for each I/O use 32 + 160 + (2 * 12) +
-+ * (4 * 12) = 264 bytes.
-+ */
-+
-+ INIT_LIST_HEAD(&psb->dma_ext.list);
-+
-+ psb->dma_ext.virt = pci_pool_alloc(phba->lpfc_scsi_dma_ext_pool,
-+ GFP_ATOMIC, &psb->dma_ext.phys);
-+ if (!psb->dma_ext.virt) {
-+ mempool_free(psb, phba->scsibuf_mem_pool);
-+ return NULL;
-+ }
-+
-+ /* Save virtual ptrs to FCP Command, Response, and BPL */
-+ ptr = (uint8_t *) psb->dma_ext.virt;
-+
-+ memset(ptr, 0, LPFC_SCSI_DMA_EXT_SIZE);
-+ psb->fcp_cmnd = (struct fcp_cmnd *) ptr;
-+ ptr += sizeof (struct fcp_cmnd);
-+ psb->fcp_rsp = (struct fcp_rsp *) ptr;
-+ ptr += (sizeof (struct fcp_rsp));
-+ psb->fcp_bpl = (struct ulp_bde64 *) ptr;
-+ psb->scsi_hba = phba;
-+
-+ /* Since this is for a FCP cmd, the first 2 BDEs in the BPL are always
-+ * the FCP CMND and FCP RSP, so lets just set it up right here.
-+ */
-+ bpl = psb->fcp_bpl;
-+ /* ptr points to physical address of FCP CMD */
-+ pdma_phys = psb->dma_ext.phys;
-+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
-+ bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
-+ bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
-+ bpl->tus.f.bdeFlags = BUFF_USE_CMND;
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+ bpl++;
-+
-+ /* Setup FCP RSP */
-+ pdma_phys += sizeof (struct fcp_cmnd);
-+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
-+ bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
-+ bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
-+ bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+ bpl++;
-+
-+ /* Since the IOCB for the FCP I/O is built into the struct
-+ * lpfc_scsi_buf, lets setup what we can right here.
-+ */
-+ pdma_phys += (sizeof (struct fcp_rsp));
-+ cmd = &psb->cur_iocbq.iocb;
-+ cmd->un.fcpi64.bdl.ulpIoTag32 = 0;
-+ cmd->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
-+ cmd->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
-+ cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
-+ cmd->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
-+ cmd->ulpBdeCount = 1;
-+ cmd->ulpClass = CLASS3;
-+
-+ return (psb);
-+}
-+
-+void
-+lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
-+{
-+ struct lpfc_hba *phba = psb->scsi_hba;
-+ struct lpfc_dmabuf *pbpl, *next_bpl;
-+
-+ /*
-+ * There are only two special cases to consider. (1) the scsi command
-+ * requested scatter-gather usage or (2) the scsi command allocated
-+ * a request buffer, but did not request use_sg. There is a third
-+ * case, but it does not require resource deallocation.
-+ */
-+
-+ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
-+ /*
-+ * Since the segment count is nonzero, the scsi command
-+ * requested scatter-gather usage and the driver allocated
-+ * addition memory buffers to chain BPLs. Traverse this list
-+ * and release those resource before freeing the parent
-+ * structure.
-+ */
-+ dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
-+ psb->seg_cnt, psb->pCmd->sc_data_direction);
-+
-+ list_for_each_entry_safe(pbpl, next_bpl,
-+ &psb->dma_ext.list, list) {
-+ lpfc_mbuf_free(phba, pbpl->virt, pbpl->phys);
-+ list_del(&pbpl->list);
-+ kfree(pbpl);
-+ }
-+ } else {
-+ if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
-+ /*
-+ * Since either the segment count or the use_sg
-+ * value is zero, the scsi command did not request
-+ * scatter-gather usage and no additional buffers were
-+ * required. Just unmap the dma single resource.
-+ */
-+ dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
-+ psb->pCmd->request_bufflen,
-+ psb->pCmd->sc_data_direction);
-+ }
-+ }
-+
-+ /*
-+ * Release the pci pool resource and clean up the scsi buffer. Neither
-+ * are required now that the IO has completed.
-+ */
-+ pci_pool_free(phba->lpfc_scsi_dma_ext_pool, psb->dma_ext.virt,
-+ psb->dma_ext.phys);
-+ mempool_free(psb, phba->scsibuf_mem_pool);
-+}
-+
-+static int
-+lpfc_os_prep_io(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
-+{
-+ struct fcp_cmnd *fcp_cmnd;
-+ struct ulp_bde64 *topbpl = NULL;
-+ struct ulp_bde64 *bpl;
-+ struct lpfc_dmabuf *bmp;
-+ struct lpfc_dmabuf *head_bmp;
-+ IOCB_t *cmd;
-+ struct scsi_cmnd *cmnd;
-+ struct scatterlist *sgel = NULL;
-+ struct scatterlist *sgel_begin = NULL;
-+ dma_addr_t physaddr;
-+ uint32_t i;
-+ uint32_t num_bmps = 1, num_bde = 0, max_bde;
-+ uint16_t use_sg;
-+ int datadir;
-+ int dma_error;
-+
-+ bpl = lpfc_cmd->fcp_bpl;
-+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
-+
-+ bpl += 2; /* Bump past FCP CMND and FCP RSP */
-+ max_bde = LPFC_SCSI_INITIAL_BPL_SIZE - 1;
-+
-+ cmnd = lpfc_cmd->pCmd;
-+ cmd = &lpfc_cmd->cur_iocbq.iocb;
-+
-+ /* These are needed if we chain BPLs */
-+ head_bmp = &(lpfc_cmd->dma_ext);
-+ use_sg = cmnd->use_sg;
-+
-+ /*
-+ * Fill in the FCP CMND
-+ */
-+ memcpy(&fcp_cmnd->fcpCdb[0], cmnd->cmnd, 16);
-+
-+ if (cmnd->device->tagged_supported) {
-+ switch (cmnd->tag) {
-+ case HEAD_OF_QUEUE_TAG:
-+ fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
-+ break;
-+ case ORDERED_QUEUE_TAG:
-+ fcp_cmnd->fcpCntl1 = ORDERED_Q;
-+ break;
-+ default:
-+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
-+ break;
-+ }
-+ } else {
-+ fcp_cmnd->fcpCntl1 = 0;
-+ }
-+
-+ datadir = cmnd->sc_data_direction;
-+
-+ if (use_sg) {
-+ /*
-+ * Get a local pointer to the scatter-gather list. The
-+ * scatter-gather list head must be preserved since
-+ * sgel is incremented in the loop. The driver must store
-+ * the segment count returned from pci_map_sg for calls to
-+ * pci_unmap_sg later on because the use_sg field in the
-+ * scsi_cmd is a count of physical memory pages, whereas the
-+ * seg_cnt is a count of dma-mappings used by the MMIO to
-+ * map the use_sg pages. They are not the same in most
-+ * cases for those architectures that implement an MMIO.
-+ */
-+ sgel = (struct scatterlist *)cmnd->request_buffer;
-+ sgel_begin = sgel;
-+ lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
-+ use_sg, datadir);
-+
-+ /* return error if we cannot map sg list */
-+ if (lpfc_cmd->seg_cnt == 0)
-+ return 1;
-+
-+ /* scatter-gather list case */
-+ for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
-+ /* Check to see if current BPL is full of BDEs */
-+ /* If this is last BDE and there is one left in */
-+ /* current BPL, use it. */
-+ if (num_bde == max_bde) {
-+ bmp = kmalloc(sizeof (struct lpfc_dmabuf),
-+ GFP_ATOMIC);
-+ if (bmp == 0) {
-+ goto error_out;
-+ }
-+ memset(bmp, 0, sizeof (struct lpfc_dmabuf));
-+ bmp->virt =
-+ lpfc_mbuf_alloc(phba, 0, &bmp->phys);
-+ if (!bmp->virt) {
-+ kfree(bmp);
-+ goto error_out;
-+ }
-+ max_bde = ((1024 / sizeof(struct ulp_bde64))-3);
-+ /* Fill in continuation entry to next bpl */
-+ bpl->addrHigh =
-+ le32_to_cpu(putPaddrHigh(bmp->phys));
-+ bpl->addrLow =
-+ le32_to_cpu(putPaddrLow(bmp->phys));
-+ bpl->tus.f.bdeFlags = BPL64_SIZE_WORD;
-+ num_bde++;
-+ if (num_bmps == 1) {
-+ cmd->un.fcpi64.bdl.bdeSize += (num_bde *
-+ sizeof (struct ulp_bde64));
-+ } else {
-+ topbpl->tus.f.bdeSize = (num_bde *
-+ sizeof (struct ulp_bde64));
-+ topbpl->tus.w =
-+ le32_to_cpu(topbpl->tus.w);
-+ }
-+ topbpl = bpl;
-+ bpl = (struct ulp_bde64 *) bmp->virt;
-+ list_add(&bmp->list, &head_bmp->list);
-+ num_bde = 0;
-+ num_bmps++;
-+ }
-+
-+ physaddr = sg_dma_address(sgel);
-+
-+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
-+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
-+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
-+ if (datadir == DMA_TO_DEVICE)
-+ bpl->tus.f.bdeFlags = 0;
-+ else
-+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+ bpl++;
-+ sgel++;
-+ num_bde++;
-+ } /* end for loop */
-+
-+ if (datadir == DMA_TO_DEVICE) {
-+ cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
-+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
-+
-+ phba->fc4OutputRequests++;
-+ } else {
-+ cmd->ulpCommand = CMD_FCP_IREAD64_CR;
-+ cmd->ulpPU = PARM_READ_CHECK;
-+ cmd->un.fcpi.fcpi_parm = cmnd->request_bufflen;
-+ fcp_cmnd->fcpCntl3 = READ_DATA;
-+
-+ phba->fc4InputRequests++;
-+ }
-+ } else if (cmnd->request_buffer && cmnd->request_bufflen) {
-+ physaddr = dma_map_single(&phba->pcidev->dev,
-+ cmnd->request_buffer,
-+ cmnd->request_bufflen,
-+ datadir);
-+ dma_error = dma_mapping_error(physaddr);
-+ if (dma_error){
-+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-+ "%d:0718 Unable to dma_map_single "
-+ "request_buffer: x%x\n",
-+ phba->brd_no, dma_error);
-+ return 1;
-+ }
-+
-+ /* no scatter-gather list case */
-+ lpfc_cmd->nonsg_phys = physaddr;
-+ bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
-+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
-+ bpl->tus.f.bdeSize = cmnd->request_bufflen;
-+ if (datadir == DMA_TO_DEVICE) {
-+ cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
-+ fcp_cmnd->fcpCntl3 = WRITE_DATA;
-+ bpl->tus.f.bdeFlags = 0;
-+
-+ phba->fc4OutputRequests++;
-+ } else {
-+ cmd->ulpCommand = CMD_FCP_IREAD64_CR;
-+ cmd->ulpPU = PARM_READ_CHECK;
-+ cmd->un.fcpi.fcpi_parm = cmnd->request_bufflen;
-+ fcp_cmnd->fcpCntl3 = READ_DATA;
-+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
-+
-+ phba->fc4InputRequests++;
-+ }
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+ num_bde = 1;
-+ bpl++;
-+ } else {
-+ cmd->ulpCommand = CMD_FCP_ICMND64_CR;
-+ cmd->un.fcpi.fcpi_parm = 0;
-+ fcp_cmnd->fcpCntl3 = 0;
-+
-+ phba->fc4ControlRequests++;
-+ }
-+
-+ bpl->addrHigh = 0;
-+ bpl->addrLow = 0;
-+ bpl->tus.w = 0;
-+ if (num_bmps == 1) {
-+ cmd->un.fcpi64.bdl.bdeSize +=
-+ (num_bde * sizeof (struct ulp_bde64));
-+ } else {
-+ topbpl->tus.f.bdeSize = (num_bde * sizeof (struct ulp_bde64));
-+ topbpl->tus.w = le32_to_cpu(topbpl->tus.w);
-+ }
-+ cmd->ulpBdeCount = 1;
-+ cmd->ulpLe = 1; /* Set the LE bit in the iocb */
-+
-+ /* set the Data Length field in the FCP CMND accordingly */
-+ fcp_cmnd->fcpDl = be32_to_cpu(cmnd->request_bufflen);
-+
-+ return 0;
-+
-+error_out:
-+ /*
-+ * Allocation of a chained BPL failed, unmap the sg list and return
-+ * error. This will ultimately cause lpfc_free_scsi_buf to be called
-+ * which will handle the rest of the cleanup. Set seg_cnt back to zero
-+ * to avoid double unmaps of the sg resources.
-+ */
-+ dma_unmap_sg(&phba->pcidev->dev, sgel_begin, lpfc_cmd->seg_cnt,
-+ datadir);
-+ lpfc_cmd->seg_cnt = 0;
-+ return 1;
-+}
-+
-+static void
-+lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
-+{
-+ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
-+ struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
-+ struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
-+ struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
-+ uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
-+ uint32_t resp_info = fcprsp->rspStatus2;
-+ uint32_t scsi_status = fcprsp->rspStatus3;
-+ uint32_t host_status = DID_OK;
-+ uint32_t rsplen = 0;
-+
-+ /*
-+ * If this is a task management command, there is no
-+ * scsi packet associated with this lpfc_cmd. The driver
-+ * consumes it.
-+ */
-+ if (fcpcmd->fcpCntl2) {
-+ scsi_status = 0;
-+ goto out;
-+ }
-+
-+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-+ "%d:0730 FCP command failed: RSP "
-+ "Data: x%x x%x x%x x%x x%x x%x\n",
-+ phba->brd_no, resp_info, scsi_status,
-+ be32_to_cpu(fcprsp->rspResId),
-+ be32_to_cpu(fcprsp->rspSnsLen),
-+ be32_to_cpu(fcprsp->rspRspLen),
-+ fcprsp->rspInfo3);
-+
-+ if (resp_info & RSP_LEN_VALID) {
-+ rsplen = be32_to_cpu(fcprsp->rspRspLen);
-+ if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
-+ (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
-+ host_status = DID_ERROR;
-+ goto out;
-+ }
-+ }
-+
-+ if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
-+ uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
-+ if (snslen > SCSI_SENSE_BUFFERSIZE)
-+ snslen = SCSI_SENSE_BUFFERSIZE;
-+
-+ memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
-+ }
-+
-+ cmnd->resid = 0;
-+ if (resp_info & RESID_UNDER) {
-+ cmnd->resid = be32_to_cpu(fcprsp->rspResId);
-+
-+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-+ "%d:0716 FCP Read Underrun, expected %d, "
-+ "residual %d Data: x%x x%x x%x\n", phba->brd_no,
-+ be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
-+ fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
-+
-+ /*
-+ * The cmnd->underflow is the minimum number of bytes that must
-+ * be transfered for this command. Provided a sense condition is
-+ * not present, make sure the actual amount transferred is at
-+ * least the underflow value or fail.
-+ */
-+ if (!(resp_info & SNS_LEN_VALID) &&
-+ (scsi_status == SAM_STAT_GOOD) &&
-+ (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
-+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-+ "%d:0717 FCP command x%x residual "
-+ "underrun converted to error "
-+ "Data: x%x x%x x%x\n", phba->brd_no,
-+ cmnd->cmnd[0], cmnd->request_bufflen,
-+ cmnd->resid, cmnd->underflow);
-+
-+ host_status = DID_ERROR;
-+ }
-+ } else if (resp_info & RESID_OVER) {
-+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-+ "%d:0720 FCP command x%x residual "
-+ "overrun error. Data: x%x x%x \n",
-+ phba->brd_no, cmnd->cmnd[0],
-+ cmnd->request_bufflen, cmnd->resid);
-+ host_status = DID_ERROR;
-+
-+ /*
-+ * Check SLI validation that all the transfer was actually done
-+ * (fcpi_parm should be zero). Apply check only to reads.
-+ */
-+ } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
-+ (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
-+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-+ "%d:0734 FCP Read Check Error Data: "
-+ "x%x x%x x%x x%x\n", phba->brd_no,
-+ be32_to_cpu(fcpcmd->fcpDl),
-+ be32_to_cpu(fcprsp->rspResId),
-+ fcpi_parm, cmnd->cmnd[0]);
-+ host_status = DID_ERROR;
-+ cmnd->resid = cmnd->request_bufflen;
-+ }
-+
-+ out:
-+ cmnd->result = ScsiResult(host_status, scsi_status);
-+}
-+
-+void
-+lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
-+ struct lpfc_iocbq *pIocbOut)
-+{
-+ int depth, pend_cnt;
-+ struct lpfc_scsi_buf *lpfc_cmd =
-+ (struct lpfc_scsi_buf *) pIocbIn->context1;
-+ struct lpfc_target *target = lpfc_cmd->target;
-+ struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
-+ struct scsi_device *sdev;
-+ int result;
-+
-+ lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
-+ lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
-+
-+ target->iodonecnt++;
-+
-+ if (lpfc_cmd->status) {
-+ target->errorcnt++;
-+
-+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
-+ (lpfc_cmd->result & IOERR_DRVR_MASK))
-+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
-+ else if (lpfc_cmd->status >= IOSTAT_CNT)
-+ lpfc_cmd->status = IOSTAT_DEFAULT;
-+
-+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-+ "%d:0729 FCP cmd x%x failed <%d/%d> status: "
-+ "x%x result: x%x Data: x%x x%x\n",
-+ phba->brd_no, cmd->cmnd[0], cmd->device->id,
-+ cmd->device->lun, lpfc_cmd->status,
-+ lpfc_cmd->result, pIocbOut->iocb.ulpContext,
-+ lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
-+
-+ switch (lpfc_cmd->status) {
-+ case IOSTAT_FCP_RSP_ERROR:
-+ /* Call FCP RSP handler to determine result */
-+ lpfc_handle_fcp_err(lpfc_cmd);
-+ break;
-+ case IOSTAT_NPORT_BSY:
-+ case IOSTAT_FABRIC_BSY:
-+ cmd->result = ScsiResult(DID_BUS_BUSY, 0);
-+ break;
-+ case IOSTAT_LOCAL_REJECT:
-+ if (lpfc_cmd->result == IOERR_LOOP_OPEN_FAILURE)
-+ lpfc_discq_post_event(phba, target->pnode,
-+ NULL,
-+ LPFC_EVT_OPEN_LOOP);
-+ cmd->result = ScsiResult(DID_ERROR, 0);
-+ break;
-+ default:
-+ cmd->result = ScsiResult(DID_ERROR, 0);
-+ break;
-+ }
-+
-+ if (target->pnode) {
-+ if(target->pnode->nlp_state != NLP_STE_MAPPED_NODE)
-+ cmd->result = ScsiResult(DID_BUS_BUSY,
-+ SAM_STAT_BUSY);
-+ }
-+ else {
-+ cmd->result = ScsiResult(DID_NO_CONNECT, 0);
-+ }
-+ } else {
-+ cmd->result = ScsiResult(DID_OK, 0);
-+ }
-+
-+ if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
-+ uint32_t *lp = (uint32_t *)cmd->sense_buffer;
-+
-+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-+ "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
-+ "SNS x%x x%x Data: x%x x%x\n",
-+ phba->brd_no, cmd->device->id,
-+ cmd->device->lun, cmd, cmd->result,
-+ *lp, *(lp + 3), cmd->retries, cmd->resid);
-+ }
-+
-+ result = cmd->result;
-+ sdev = cmd->device;
-+
-+ lpfc_free_scsi_buf(lpfc_cmd);
-+ cmd->host_scribble = NULL;
-+ cmd->scsi_done(cmd);
-+
-+ /*
-+ * Check for queue full. If the lun is reporting queue full, then
-+ * back off the lun queue depth to prevent target overloads.
-+ */
-+ if (result == SAM_STAT_TASK_SET_FULL) {
-+ pend_cnt = lpfc_sli_sum_iocb_lun(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring],
-+ sdev->id, sdev->lun);
-+
-+ spin_unlock_irq(phba->host->host_lock);
-+ depth = scsi_track_queue_full(sdev, pend_cnt);
-+ spin_lock_irq(phba->host->host_lock);
-+
-+ if (depth) {
-+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-+ "%d:0711 detected queue full - lun queue depth "
-+ " adjusted to %d.\n", phba->brd_no, depth);
-+ }
-+ }
-+}
-+
-+static int
-+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
-+ struct lpfc_scsi_buf *lpfc_cmd,
-+ uint8_t task_mgmt_cmd)
-+{
-+
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *piocbq;
-+ IOCB_t *piocb;
-+ struct fcp_cmnd *fcp_cmnd;
-+ struct lpfc_nodelist *ndlp = lpfc_cmd->target->pnode;
-+
-+ if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
-+ return 0;
-+ }
-+
-+ /* allocate an iocb command */
-+ psli = &phba->sli;
-+ piocbq = &(lpfc_cmd->cur_iocbq);
-+ piocb = &piocbq->iocb;
-+
-+
-+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
-+ putLunHigh(fcp_cmnd->fcpLunMsl, lpfc_cmd->lun);
-+ putLunLow(fcp_cmnd->fcpLunLsl, lpfc_cmd->lun)
-+ fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
-+ fcp_cmnd->fcpCntl3 = 0;
-+
-+ piocb->ulpCommand = CMD_FCP_ICMND64_CR;
-+
-+ piocb->ulpContext = ndlp->nlp_rpi;
-+ if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
-+ piocb->ulpFCP2Rcvy = 1;
-+ }
-+ piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
-+
-+ /* ulpTimeout is only one byte */
-+ if (lpfc_cmd->timeout > 0xff) {
-+ /*
-+ * Do not timeout the command at the firmware level.
-+ * The driver will provide the timeout mechanism.
-+ */
-+ piocb->ulpTimeout = 0;
-+ } else {
-+ piocb->ulpTimeout = lpfc_cmd->timeout;
-+ }
-+
-+ switch (task_mgmt_cmd) {
-+ case FCP_LUN_RESET:
-+ /* Issue LUN Reset to TGT <num> LUN <num> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_FCP,
-+ "%d:0703 Issue LUN Reset to TGT %d LUN %d "
-+ "Data: x%x x%x\n",
-+ phba->brd_no,
-+ lpfc_cmd->target->scsi_id, lpfc_cmd->lun,
-+ ndlp->nlp_rpi, ndlp->nlp_flag);
-+
-+ break;
-+ case FCP_ABORT_TASK_SET:
-+ /* Issue Abort Task Set to TGT <num> LUN <num> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_FCP,
-+ "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
-+ "Data: x%x x%x\n",
-+ phba->brd_no,
-+ lpfc_cmd->target->scsi_id, lpfc_cmd->lun,
-+ ndlp->nlp_rpi, ndlp->nlp_flag);
-+
-+ break;
-+ case FCP_TARGET_RESET:
-+ /* Issue Target Reset to TGT <num> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_FCP,
-+ "%d:0702 Issue Target Reset to TGT %d "
-+ "Data: x%x x%x\n",
-+ phba->brd_no,
-+ lpfc_cmd->target->scsi_id, ndlp->nlp_rpi,
-+ ndlp->nlp_flag);
-+ break;
-+ }
-+
-+ return (1);
-+}
-+
-+static int
-+lpfc_scsi_tgt_reset(struct lpfc_target * target, int id, struct lpfc_hba * phba)
-+{
-+ struct lpfc_iocbq *piocbq, *piocbqrsp;
-+ struct lpfc_scsi_buf * lpfc_cmd;
-+ struct lpfc_sli *psli = &phba->sli;
-+ int ret, retval = FAILED;
-+
-+ lpfc_cmd = lpfc_get_scsi_buf(phba, GFP_ATOMIC);
-+ if (!lpfc_cmd)
-+ goto out;
-+
-+ /*
-+ * The driver cannot count on any meaningful timeout value in the scsi
-+ * command. The timeout is chosen to be twice the ratov plus a window.
-+ */
-+ lpfc_cmd->timeout = (2 * phba->fc_ratov) + 3;
-+ lpfc_cmd->target = target;
-+ lpfc_cmd->lun = 0;
-+
-+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
-+ if (!ret)
-+ goto out_free_scsi_buf;
-+
-+ piocbq = &lpfc_cmd->cur_iocbq;
-+ piocbq->context1 = lpfc_cmd;
-+
-+ piocbqrsp = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
-+ if (!piocbqrsp)
-+ goto out_free_scsi_buf;
-+
-+ /* First flush all outstanding commands on the txq for the target */
-+ lpfc_sli_abort_iocb_tgt(phba, &phba->sli.ring[phba->sli.fcp_ring],
-+ lpfc_cmd->target->scsi_id, LPFC_ABORT_TXQ);
-+
-+ memset(piocbqrsp, 0, sizeof (struct lpfc_iocbq));
-+
-+ piocbq->iocb_flag |= LPFC_IO_POLL;
-+
-+ ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
-+ &phba->sli.ring[psli->fcp_ring],
-+ piocbq, SLI_IOCB_HIGH_PRIORITY,
-+ piocbqrsp);
-+ if (ret != IOCB_SUCCESS) {
-+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
-+ retval = FAILED;
-+ } else {
-+ lpfc_cmd->result = piocbqrsp->iocb.un.ulpWord[4];
-+ lpfc_cmd->status = piocbqrsp->iocb.ulpStatus;
-+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
-+ (lpfc_cmd->result & IOERR_DRVR_MASK))
-+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
-+ retval = SUCCESS;
-+ }
-+
-+ /* At this point in time, target reset completion, all outstanding
-+ * txcmplq I/Os should have been aborted by the target.
-+ * Unfortunately, all targets do not abide by this so we need
-+ * to help it out a bit.
-+ */
-+ lpfc_sli_abort_iocb_tgt(phba, &phba->sli.ring[phba->sli.fcp_ring],
-+ lpfc_cmd->target->scsi_id, LPFC_ABORT_ALLQ);
-+
-+ /*
-+ * If the IOCB failed then free the memory resources. Otherwise,
-+ * the resources will be freed up by the completion handler.
-+ */
-+ if (ret == IOCB_TIMEDOUT)
-+ goto out;
-+
-+ mempool_free(piocbqrsp, phba->iocb_mem_pool);
-+
-+out_free_scsi_buf:
-+ lpfc_free_scsi_buf(lpfc_cmd);
-+out:
-+ return retval;
-+}
-+
-+
-+#define LPFC_RESET_WAIT 2
-+int
-+lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
-+{
-+ struct Scsi_Host *shost = cmnd->device->host;
-+ struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
-+ int ret = FAILED, i, err_count = 0;
-+ struct lpfc_target *target;
-+ int cnt, loopcnt;
-+
-+ /*
-+ * Since the driver manages a single bus device, reset all
-+ * targets known to the driver. Should any target reset
-+ * fail, this routine returns failure to the midlayer.
-+ */
-+ for (i = 0; i < MAX_FCP_TARGET; i++) {
-+ target = phba->device_queue_hash[i];
-+ if (!target)
-+ continue;
-+
-+ ret = lpfc_scsi_tgt_reset(target, i, phba);
-+ if (ret != SUCCESS) {
-+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-+ "%d:0712 Bus Reset on target %d failed\n",
-+ phba->brd_no, i);
-+ err_count++;
-+ }
-+ }
-+
-+ loopcnt = 0;
-+ while((cnt = lpfc_sli_sum_iocb_host(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring]))) {
-+ spin_unlock_irq(phba->host->host_lock);
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ schedule_timeout(LPFC_RESET_WAIT*HZ);
-+ spin_lock_irq(phba->host->host_lock);
-+
-+ if (++loopcnt
-+ > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
-+ break;
-+ }
-+
-+ if (cnt) {
-+ /* flush all outstanding commands on the host */
-+ i = lpfc_sli_abort_iocb_host(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring],
-+ LPFC_ABORT_ALLQ);
-+
-+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-+ "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
-+ phba->brd_no, cnt, i);
-+ }
-+
-+ if (!err_count)
-+ ret = SUCCESS;
-+ else
-+ ret = FAILED;
-+
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_FCP,
-+ "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
-+ phba->brd_no, ret);
-+
-+ return ret;
-+}
-+
-+
-+int
-+lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
-+{
-+ struct lpfc_hba *phba =
-+ (struct lpfc_hba *) cmnd->device->host->hostdata[0];
-+ struct lpfc_sli *psli = &phba->sli;
-+ struct lpfc_target *targetp = cmnd->device->hostdata;
-+ struct lpfc_nodelist *ndlp;
-+ struct lpfc_iocbq *piocbq;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ IOCB_t *piocb;
-+ int err = 0;
-+ uint16_t nlp_state;
-+
-+ targetp->qcmdcnt++;
-+
-+ /*
-+ * The target pointer is guaranteed not to be NULL because the driver
-+ * only clears the device->hostdata field in lpfc_slave_destroy. This
-+ * approach guarantees no further IO calls on this target.
-+ */
-+ ndlp = targetp->pnode;
-+ if (!ndlp) {
-+ cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
-+ goto out_fail_command;
-+ }
-+
-+ nlp_state = ndlp->nlp_state;
-+
-+ /*
-+ * A Fibre Channel is present and functioning only when the node state
-+ * is MAPPED. Any other state is a failure.
-+ */
-+ if (nlp_state != NLP_STE_MAPPED_NODE) {
-+ if ((nlp_state == NLP_STE_UNMAPPED_NODE) ||
-+ (nlp_state == NLP_STE_UNUSED_NODE)) {
-+ cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
-+ goto out_fail_command;
-+ }
-+ /*
-+ * The device is most likely recovered and the driver
-+ * needs a bit more time to finish. Ask the midlayer
-+ * to retry.
-+ */
-+ goto out_host_busy;
-+ }
-+
-+ lpfc_cmd = lpfc_get_scsi_buf(phba, GFP_ATOMIC);
-+ if (!lpfc_cmd)
-+ goto out_host_busy;
-+
-+ /*
-+ * Store the midlayer's command structure for the completion phase
-+ * and complete the command initialization.
-+ */
-+ cmnd->scsi_done = done;
-+ cmnd->host_scribble = (unsigned char *)lpfc_cmd;
-+
-+ lpfc_cmd->target = targetp;
-+ lpfc_cmd->lun = cmnd->device->lun;
-+ lpfc_cmd->timeout = 0;
-+ lpfc_cmd->pCmd = cmnd;
-+ putLunHigh(lpfc_cmd->fcp_cmnd->fcpLunMsl, lpfc_cmd->lun);
-+ putLunLow(lpfc_cmd->fcp_cmnd->fcpLunLsl, lpfc_cmd->lun);
-+
-+ err = lpfc_os_prep_io(phba, lpfc_cmd);
-+ if (err)
-+ goto out_host_busy_free_buf;
-+
-+ piocbq = &(lpfc_cmd->cur_iocbq);
-+ piocb = &piocbq->iocb;
-+ piocb->ulpTimeout = lpfc_cmd->timeout;
-+ piocbq->context1 = lpfc_cmd;
-+ piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
-+
-+ piocbq->iocb.ulpContext = ndlp->nlp_rpi;
-+ if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
-+ piocbq->iocb.ulpFCP2Rcvy = 1;
-+ }
-+
-+ piocbq->iocb.ulpClass = (ndlp->nlp_fcp_info & 0x0f);
-+
-+ err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], piocbq,
-+ SLI_IOCB_RET_IOCB);
-+ if (err)
-+ goto out_host_busy_free_buf;
-+ return 0;
-+
-+ out_host_busy_free_buf:
-+ lpfc_free_scsi_buf(lpfc_cmd);
-+ cmnd->host_scribble = NULL;
-+ out_host_busy:
-+ targetp->iodonecnt++;
-+ targetp->errorcnt++;
-+ return SCSI_MLQUEUE_HOST_BUSY;
-+
-+ out_fail_command:
-+ targetp->iodonecnt++;
-+ targetp->errorcnt++;
-+ done(cmnd);
-+ return 0;
-+}
-+
-+int
-+lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
-+{
-+ struct Scsi_Host *shost = cmnd->device->host;
-+ struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
-+ struct lpfc_sli *psli = &phba->sli;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ struct lpfc_iocbq *piocbq, *piocbqrsp = NULL;
-+ struct lpfc_target *target = cmnd->device->hostdata;
-+ int ret, retval = FAILED;
-+ int cnt, loopcnt;
-+
-+ /*
-+ * If target is not in a MAPPED state, delay the reset till
-+ * target is rediscovered or nodev timeout is fired.
-+ */
-+ while ( 1 ) {
-+ if (!target->pnode)
-+ break;
-+
-+ if (target->pnode->nlp_state != NLP_STE_MAPPED_NODE) {
-+ spin_unlock_irq(phba->host->host_lock);
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ schedule_timeout( HZ/2);
-+ spin_lock_irq(phba->host->host_lock);
-+ }
-+ if ((target->pnode) &&
-+ (target->pnode->nlp_state == NLP_STE_MAPPED_NODE))
-+ break;
-+ }
-+
-+ lpfc_cmd = lpfc_get_scsi_buf(phba, GFP_ATOMIC);
-+ if (!lpfc_cmd)
-+ goto out;
-+
-+ lpfc_cmd->timeout = 60; /* set command timeout to 60 seconds */
-+ lpfc_cmd->scsi_hba = phba;
-+ lpfc_cmd->target = target;
-+ lpfc_cmd->lun = cmnd->device->lun;
-+
-+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
-+ if (!ret)
-+ goto out_free_scsi_buf;
-+
-+ piocbq = &lpfc_cmd->cur_iocbq;
-+ piocbq->context1 = lpfc_cmd;
-+
-+ /* get a buffer for this IOCB command response */
-+ piocbqrsp = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
-+ if(!piocbqrsp)
-+ goto out_free_scsi_buf;
-+
-+ /* First flush all outstanding commands on the txq for the lun */
-+ lpfc_sli_abort_iocb_lun(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring],
-+ cmnd->device->id,
-+ cmnd->device->lun, LPFC_ABORT_TXQ);
-+
-+ memset(piocbqrsp, 0, sizeof (struct lpfc_iocbq));
-+
-+ piocbq->iocb_flag |= LPFC_IO_POLL;
-+
-+ ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
-+ &phba->sli.ring[psli->fcp_ring],
-+ piocbq, 0,
-+ piocbqrsp);
-+ if (ret == IOCB_SUCCESS)
-+ retval = SUCCESS;
-+
-+ lpfc_cmd->result = piocbqrsp->iocb.un.ulpWord[4];
-+ lpfc_cmd->status = piocbqrsp->iocb.ulpStatus;
-+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
-+ if (lpfc_cmd->result & IOERR_DRVR_MASK)
-+ lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
-+
-+ /* At this point in time, lun reset completion, all outstanding
-+ * txcmplq I/Os should have been aborted by the target.
-+ * Unfortunately, all targets do not abide by this so we need
-+ * to help it out a bit.
-+ */
-+ lpfc_sli_abort_iocb_lun(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring],
-+ cmnd->device->id,
-+ cmnd->device->lun, LPFC_ABORT_ALLQ);
-+
-+ loopcnt = 0;
-+ while((cnt = lpfc_sli_sum_iocb_lun(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring],
-+ cmnd->device->id,
-+ cmnd->device->lun))) {
-+ spin_unlock_irq(phba->host->host_lock);
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ schedule_timeout(LPFC_RESET_WAIT*HZ);
-+ spin_lock_irq(phba->host->host_lock);
-+
-+ if (++loopcnt
-+ > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
-+ break;
-+ }
-+
-+ if(cnt) {
-+ lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
-+ "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
-+ phba->brd_no, cnt);
-+ }
-+
-+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-+ "%d:0713 SCSI layer issued LUN reset (%d, %d) "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, lpfc_cmd->target->scsi_id,
-+ lpfc_cmd->lun, ret, lpfc_cmd->status,
-+ lpfc_cmd->result);
-+
-+ if (ret == IOCB_TIMEDOUT)
-+ goto out;
-+
-+ mempool_free(piocbqrsp, phba->iocb_mem_pool);
-+
-+out_free_scsi_buf:
-+ lpfc_free_scsi_buf(lpfc_cmd);
-+out:
-+ return retval;
-+}
-+
-+static void
-+lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
-+ struct lpfc_iocbq *pIocbOut)
-+{
-+ struct lpfc_scsi_buf *lpfc_cmd =
-+ (struct lpfc_scsi_buf *) pIocbIn->context1;
-+ struct scsi_cmnd *ml_cmd =
-+ ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
-+ struct lpfc_target *targetp = ml_cmd->device->hostdata;
-+
-+ if (targetp) {
-+ targetp->iodonecnt++;
-+ targetp->errorcnt++;
-+ }
-+ lpfc_free_scsi_buf(lpfc_cmd);
-+}
-+
-+static void
-+lpfc_scsi_cmd_iocb_cmpl_aborted (struct lpfc_hba *phba,
-+ struct lpfc_iocbq *pIocbIn,
-+ struct lpfc_iocbq *pIocbOut)
-+{
-+ struct scsi_cmnd *ml_cmd =
-+ ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
-+
-+ lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
-+ ml_cmd->host_scribble = NULL;
-+}
-+
-+#define LPFC_ABORT_WAIT 2
-+int
-+lpfc_abort_handler(struct scsi_cmnd *cmnd)
-+{
-+ struct lpfc_hba *phba =
-+ (struct lpfc_hba *)cmnd->device->host->hostdata[0];
-+ struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
-+ struct lpfc_iocbq *iocb, *next_iocb, *abtsiocbp;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ IOCB_t *cmd, *icmd;
-+ unsigned long snum;
-+ unsigned int id, lun;
-+ unsigned int loop_count = 0;
-+ int ret = IOCB_SUCCESS;
-+
-+ /*
-+ * If the host_scribble data area is NULL, then the driver has already
-+ * completed this command, but the midlayer did not see the completion
-+ * before the eh fired. Just return SUCCESS.
-+ */
-+ lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
-+ if (!lpfc_cmd)
-+ return SUCCESS;
-+
-+ /* save these now since lpfc_cmd can be freed */
-+ id = lpfc_cmd->target->scsi_id;
-+ lun = lpfc_cmd->lun;
-+ snum = cmnd->serial_number;
-+
-+ /* Search the txq first. */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ cmd = &iocb->iocb;
-+ if (iocb->context1 != lpfc_cmd)
-+ continue;
-+
-+ list_del_init(&iocb->list);
-+ pring->txq_cnt--;
-+ if (!iocb->iocb_cmpl) {
-+ mempool_free(iocb, phba->iocb_mem_pool);
-+ }
-+ else {
-+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
-+ }
-+ goto out;
-+ }
-+
-+ abtsiocbp = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
-+ if (!abtsiocbp)
-+ goto out;
-+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
-+
-+ /*
-+ * The scsi command was not in the txq. Check the txcmplq and if it is
-+ * found, send an abort to the FW.
-+ */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-+ if (iocb->context1 != lpfc_cmd)
-+ continue;
-+
-+ iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
-+ cmd = &iocb->iocb;
-+ icmd = &abtsiocbp->iocb;
-+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
-+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
-+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-+
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = cmd->ulpClass;
-+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
-+ if (phba->hba_state >= LPFC_LINK_UP)
-+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
-+ else
-+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
-+ IOCB_ERROR) {
-+ mempool_free(abtsiocbp, phba->iocb_mem_pool);
-+ ret = IOCB_ERROR;
-+ break;
-+ }
-+
-+ /* Wait for abort to complete */
-+ while (cmnd->host_scribble)
-+ {
-+ spin_unlock_irq(phba->host->host_lock);
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ schedule_timeout(LPFC_ABORT_WAIT*HZ);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (++loop_count
-+ > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
-+ break;
-+ }
-+
-+ if (cmnd->host_scribble) {
-+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-+ "%d:0748 abort handler timed "
-+ "out waiting for abort to "
-+ "complete. Data: "
-+ "x%x x%x x%x x%lx\n",
-+ phba->brd_no, ret, id, lun, snum);
-+ cmnd->host_scribble = NULL;
-+ iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
-+ ret = IOCB_ERROR;
-+ }
-+
-+ break;
-+ }
-+
-+ out:
-+ lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-+ "%d:0749 SCSI layer issued abort device "
-+ "Data: x%x x%x x%x x%lx\n",
-+ phba->brd_no, ret, id, lun, snum);
-+
-+ return (ret == IOCB_SUCCESS ? SUCCESS : FAILED);
-+}
-+
-+#if defined(RHEL_FC) || defined(SLES_FC)
-+void
-+lpfc_target_unblock(struct lpfc_hba *phba, struct lpfc_target *targetp)
-+{
-+#if defined(RHEL_FC)
-+ /*
-+ * This code to be removed once block/unblock and the new
-+ * dicovery state machine are fully debugged.
-+ */
-+ if (!targetp || !targetp->starget) {
-+#else /* not RHEL_FC -> is SLES_FC */
-+ if (!targetp) {
-+#endif
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
-+ "%d:0262 Cannot unblock scsi target\n", phba->brd_no);
-+
-+ return;
-+ }
-+
-+ /* Unblock IO to target scsi id <sid> to NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_FCP,
-+ "%d:0258 Unblocking IO to Target scsi id x%x "
-+ "NPort pointer x%p\n",
-+ phba->brd_no, targetp->scsi_id, targetp->pnode);
-+
-+ spin_unlock_irq(phba->host->host_lock);
-+
-+#if defined(RHEL_FC)
-+ fc_target_unblock(targetp->starget);
-+#else /* not RHEL_FC -> is SLES_FC */
-+ fc_target_unblock(phba->host, targetp->scsi_id,
-+ &targetp->dev_loss_timer);
-+#endif
-+ spin_lock_irq(phba->host->host_lock);
-+ targetp->blocked--;
-+}
-+
-+void
-+lpfc_target_block(struct lpfc_hba *phba, struct lpfc_target *targetp)
-+{
-+#if defined(RHEL_FC)
-+ /*
-+ * This code to be removed once block/unblock and the new
-+ * dicovery state machine are fully debugged.
-+ */
-+ if (!targetp || !targetp->starget) {
-+#else /* not RHEL_FC -> is SLES_FC */
-+ if (!targetp) {
-+#endif
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
-+ "%d:0263 Cannot block scsi target."
-+ " target ptr x%p\n",
-+ phba->brd_no, targetp);
-+ return;
-+ }
-+
-+ /* Block all IO to target scsi id <sid> to NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_FCP,
-+ "%d:0259 Blocking IO to Target scsi id x%x"
-+ " NPort pointer x%p\n",
-+ phba->brd_no, targetp->scsi_id, targetp->pnode);
-+
-+ spin_unlock_irq(phba->host->host_lock);
-+#if defined(RHEL_FC)
-+ fc_target_block(targetp->starget);
-+#else /* not RHEL_FC -> is SLES_FC */
-+ fc_target_block(phba->host, targetp->scsi_id, &targetp->dev_loss_timer,
-+ phba->cfg_nodev_tmo);
-+
-+ /*
-+ * Kill the midlayer unblock timer, but leave the target blocked.
-+ * The driver will unblock with the nodev_tmo callback function.
-+ */
-+ del_timer_sync(&targetp->dev_loss_timer);
-+#endif
-+ spin_lock_irq(phba->host->host_lock);
-+ targetp->blocked++;
-+}
-+
-+int
-+lpfc_target_remove(struct lpfc_hba *phba, struct lpfc_target *targetp)
-+{
-+ struct scsi_device *sdev;
-+ struct Scsi_Host *shost = phba->host;
-+
-+ /* This is only called if scsi target (targetp->starget) is valid */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
-+ "%d:0260 Remove Target scsi id x%x\n",
-+ phba->brd_no, targetp->scsi_id);
-+
-+ /* If this target is blocked, we must unblock it first */
-+ if (targetp->blocked)
-+ lpfc_target_unblock(phba, targetp);
-+
-+ /* Remove all associated devices for this target */
-+ if (phba->cfg_scsi_hotplug) {
-+top:
-+ list_for_each_entry(sdev, &shost->__devices, siblings) {
-+ if (sdev->channel == 0
-+ && sdev->id == targetp->scsi_id) {
-+ spin_unlock_irq(shost->host_lock);
-+ scsi_device_get(sdev);
-+ scsi_remove_device(sdev);
-+ scsi_device_put(sdev);
-+ spin_lock_irq(shost->host_lock);
-+ goto top;
-+ }
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+int
-+lpfc_target_add(struct lpfc_hba *phba, struct lpfc_target *targetp)
-+{
-+ /* If the driver is not supporting scsi hotplug, just exit. */
-+ if(!phba->cfg_scsi_hotplug)
-+ return 1;
-+
-+ /* This is only called if scsi target (targetp->starget) is valid */
-+
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
-+ "%d:0261 Adding Target scsi id x%x\n",
-+ phba->brd_no, targetp->scsi_id);
-+
-+ /*
-+ * The driver discovered a new target. Call the midlayer and get this
-+ * target's luns added into the device list.
-+ * Since we are going to scan the entire host, kick off a timer to
-+ * do this so we can possibly consolidate multiple target scans into
-+ * one scsi host scan.
-+ */
-+ mod_timer(&phba->fc_scantmo, jiffies + HZ);
-+ phba->fc_flag |= FC_SCSI_SCAN_TMO;
-+ return 0;
-+}
-+#endif /* RHEL_FC or SLES_FC */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_compat.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_compat.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,109 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_compat.h 1.31.1.2 2005/06/13 17:16:04EDT sf_support Exp $
-+ *
-+ * This file provides macros to aid compilation in the Linux 2.4 kernel
-+ * over various platform architectures.
-+ */
-+
-+#ifndef _H_LPFC_COMPAT
-+#define _H_LPFC_COMPAT
-+
-+
-+/*******************************************************************
-+Note: HBA's SLI memory contains little-endian LW.
-+Thus to access it from a little-endian host,
-+memcpy_toio() and memcpy_fromio() can be used.
-+However on a big-endian host, copy 4 bytes at a time,
-+using writel() and readl().
-+ *******************************************************************/
-+
-+#if __BIG_ENDIAN
-+
-+static inline void
-+lpfc_memcpy_to_slim( void *dest, void *src, unsigned int bytes)
-+{
-+ uint32_t *dest32;
-+ uint32_t *src32;
-+ unsigned int four_bytes;
-+
-+
-+ dest32 = (uint32_t *) dest;
-+ src32 = (uint32_t *) src;
-+
-+ /* write input bytes, 4 bytes at a time */
-+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
-+ writel( *src32, dest32);
-+ readl(dest32); /* flush */
-+ dest32++;
-+ src32++;
-+ }
-+
-+ return;
-+}
-+
-+static inline void
-+lpfc_memcpy_from_slim( void *dest, void *src, unsigned int bytes)
-+{
-+ uint32_t *dest32;
-+ uint32_t *src32;
-+ unsigned int four_bytes;
-+
-+
-+ dest32 = (uint32_t *) dest;
-+ src32 = (uint32_t *) src;
-+
-+ /* read input bytes, 4 bytes at a time */
-+ for (four_bytes = bytes /4; four_bytes > 0; four_bytes--) {
-+ *dest32 = readl( src32);
-+ dest32++;
-+ src32++;
-+ }
-+
-+ return;
-+}
-+
-+#else
-+
-+static inline void
-+lpfc_memcpy_to_slim( void *dest, void *src, unsigned int bytes)
-+{
-+ /* actually returns 1 byte past dest */
-+ memcpy_toio( dest, src, bytes);
-+}
-+
-+static inline void
-+lpfc_memcpy_from_slim( void *dest, void *src, unsigned int bytes)
-+{
-+ /* actually returns 1 byte past dest */
-+ memcpy_fromio( dest, src, bytes);
-+}
-+
-+#endif /* __BIG_ENDIAN */
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
-+#define msleep(x) do { \
-+ set_current_state(TASK_UNINTERRUPTIBLE); \
-+ schedule_timeout((x)); \
-+ } while (0);
-+#endif
-+#endif /* _H_LPFC_COMPAT */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_hbadisc.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_hbadisc.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,2906 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_hbadisc.c 1.225.1.3 2005/07/08 19:33:24EDT sf_support Exp $
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/kernel.h>
-+#include <linux/smp_lock.h>
-+
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+
-+#include <scsi/scsi_transport_fc.h>
-+
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_fcp.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+
-+/* AlpaArray for assignment of scsid for scan-down and bind_method */
-+uint8_t lpfcAlpaArray[] = {
-+ 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
-+ 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
-+ 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
-+ 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
-+ 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
-+ 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
-+ 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
-+ 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
-+ 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
-+ 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
-+ 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
-+ 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
-+ 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
-+};
-+
-+static void lpfc_disc_timeout_handler(struct lpfc_hba *);
-+
-+void
-+lpfc_evt_iocb_free(struct lpfc_hba * phba, struct lpfc_iocbq * saveq)
-+{
-+ struct lpfc_iocbq *rspiocbp, *tmpiocbp;
-+
-+ /* Free up iocb buffer chain for cmd just processed */
-+ list_for_each_entry_safe(rspiocbp, tmpiocbp,
-+ &saveq->list, list) {
-+ list_del(&rspiocbp->list);
-+ mempool_free( rspiocbp, phba->iocb_mem_pool);
-+ }
-+ mempool_free( saveq, phba->iocb_mem_pool);
-+}
-+
-+void
-+lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
-+{
-+ struct lpfc_target *targetp;
-+ int scsid, warn_user = 0;
-+
-+ /* If the nodev_timeout is cancelled do nothing */
-+ if (!(ndlp->nlp_flag & NLP_NODEV_TMO))
-+ return;
-+
-+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
-+
-+ for(scsid=0;scsid<MAX_FCP_TARGET;scsid++) {
-+ targetp = phba->device_queue_hash[scsid];
-+ /* First see if the SCSI ID has an allocated struct
-+ lpfc_target */
-+ if (targetp) {
-+ if (targetp->pnode == ndlp) {
-+ /* flush the target */
-+ lpfc_sli_abort_iocb_tgt(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring],
-+ scsid, LPFC_ABORT_ALLQ);
-+ warn_user = 1;
-+ break;
-+ }
-+ }
-+ }
-+
-+ if (warn_user) {
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-+ "%d:0203 Nodev timeout on NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
-+ ndlp->nlp_state, ndlp->nlp_rpi);
-+ } else {
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0206 Nodev timeout on NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
-+ ndlp->nlp_state, ndlp->nlp_rpi);
-+ }
-+
-+ lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
-+ return;
-+}
-+
-+static void
-+lpfc_disc_done(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ LPFC_DISC_EVT_t *evtp;
-+ LPFC_MBOXQ_t *pmb;
-+ struct lpfc_iocbq *cmdiocbp, *saveq;
-+ struct lpfc_nodelist *ndlp;
-+ LPFC_RING_MASK_t *func;
-+ struct Scsi_Host *shost;
-+ struct lpfc_dmabuf *mp;
-+ uint32_t work_hba_events;
-+ int free_evt;
-+
-+ work_hba_events=phba->work_hba_events;
-+ spin_unlock_irq(phba->host->host_lock);
-+
-+ if (work_hba_events & WORKER_DISC_TMO)
-+ lpfc_disc_timeout_handler(phba);
-+
-+ if (work_hba_events & WORKER_ELS_TMO)
-+ lpfc_els_timeout_handler(phba);
-+
-+ if (work_hba_events & WORKER_MBOX_TMO)
-+ lpfc_mbox_timeout_handler(phba);
-+
-+ if (work_hba_events & WORKER_FDMI_TMO)
-+ lpfc_fdmi_tmo_handler(phba);
-+
-+ spin_lock_irq(phba->host->host_lock);
-+ phba->work_hba_events &= ~work_hba_events;
-+
-+ /* check discovery event list */
-+ while(!list_empty(&phba->dpc_disc)) {
-+ evtp = list_entry(phba->dpc_disc.next,
-+ typeof(*evtp), evt_listp);
-+ list_del_init(&evtp->evt_listp);
-+ free_evt =1;
-+ switch(evtp->evt) {
-+ case LPFC_EVT_MBOX:
-+ pmb = (LPFC_MBOXQ_t *)(evtp->evt_arg1);
-+ if ( pmb->mbox_cmpl )
-+ (pmb->mbox_cmpl) (phba, pmb);
-+ else {
-+ mp = (struct lpfc_dmabuf *) (pmb->context1);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ }
-+ break;
-+ case LPFC_EVT_SOL_IOCB:
-+ cmdiocbp = (struct lpfc_iocbq *)(evtp->evt_arg1);
-+ saveq = (struct lpfc_iocbq *)(evtp->evt_arg2);
-+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
-+ lpfc_evt_iocb_free(phba, saveq);
-+ break;
-+ case LPFC_EVT_UNSOL_IOCB:
-+ func = (LPFC_RING_MASK_t *)(evtp->evt_arg1);
-+ saveq = (struct lpfc_iocbq *)(evtp->evt_arg2);
-+ (func->lpfc_sli_rcv_unsol_event) (phba,
-+ &psli->ring[LPFC_ELS_RING], saveq);
-+ lpfc_evt_iocb_free(phba, saveq);
-+ break;
-+ case LPFC_EVT_NODEV_TMO:
-+ free_evt = 0;
-+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-+ lpfc_process_nodev_timeout(phba, ndlp);
-+ break;
-+ case LPFC_EVT_ELS_RETRY:
-+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-+ spin_unlock_irq(phba->host->host_lock);
-+ lpfc_els_retry_delay_handler(ndlp);
-+ spin_lock_irq(phba->host->host_lock);
-+ free_evt = 0;
-+ break;
-+ case LPFC_EVT_SCAN:
-+ shost = phba->host;
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
-+ "%d:0252 Rescanning scsi host\n", phba->brd_no);
-+ spin_unlock_irq(shost->host_lock);
-+ scsi_scan_host(shost);
-+ spin_lock_irq(shost->host_lock);
-+ break;
-+ case LPFC_EVT_ERR_ATTN:
-+ spin_unlock_irq(phba->host->host_lock);
-+ lpfc_handle_eratt(phba, (unsigned long) evtp->evt_arg1);
-+ spin_lock_irq(phba->host->host_lock);
-+ break;
-+ case LPFC_EVT_OPEN_LOOP:
-+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-+ break;
-+ }
-+ if (free_evt)
-+ kfree(evtp);
-+ }
-+}
-+
-+int
-+lpfc_do_dpc(void *p)
-+{
-+ unsigned long flags;
-+ DECLARE_MUTEX_LOCKED(sem);
-+ struct lpfc_hba *phba = (struct lpfc_hba *)p;
-+
-+ lock_kernel();
-+
-+ daemonize("lpfc_dpc_%d", phba->brd_no);
-+ allow_signal(SIGHUP);
-+
-+ phba->dpc_wait = &sem;
-+ set_user_nice(current, -20);
-+
-+ unlock_kernel();
-+
-+ complete(&phba->dpc_startup);
-+
-+ while (1) {
-+ if (down_interruptible(&sem))
-+ break;
-+
-+ if (signal_pending(current))
-+ break;
-+
-+ if (phba->dpc_kill)
-+ break;
-+
-+ spin_lock_irqsave(phba->host->host_lock, flags);
-+ lpfc_disc_done(phba);
-+ spin_unlock_irqrestore(phba->host->host_lock, flags);
-+ }
-+
-+ /* Zero out semaphore we were waiting on. */
-+ phba->dpc_wait = NULL;
-+ complete_and_exit(&phba->dpc_exiting, 0);
-+ return(0);
-+}
-+
-+/*
-+ * This is only called to handle FC discovery events. Since this a rare
-+ * occurance, we allocate an LPFC_DISC_EVT_t structure here instead of
-+ * embedding it in the IOCB.
-+ */
-+int
-+lpfc_discq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
-+ uint32_t evt)
-+{
-+ LPFC_DISC_EVT_t *evtp;
-+
-+ /* All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events
-+ * will be queued to DPC for processing
-+ */
-+ evtp = (LPFC_DISC_EVT_t *) kmalloc(sizeof(LPFC_DISC_EVT_t), GFP_ATOMIC);
-+ if (!evtp)
-+ return 0;
-+
-+ evtp->evt_arg1 = arg1;
-+ evtp->evt_arg2 = arg2;
-+ evtp->evt = evt;
-+ evtp->evt_listp.next = NULL;
-+ evtp->evt_listp.prev = NULL;
-+
-+ /* Queue the event to the DPC to be processed later */
-+ list_add_tail(&evtp->evt_listp, &phba->dpc_disc);
-+ if (phba->dpc_wait)
-+ up(phba->dpc_wait);
-+
-+ return 1;
-+}
-+
-+int
-+lpfc_linkdown(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ struct list_head *node_list[7];
-+ LPFC_MBOXQ_t *mb;
-+ int rc, i;
-+
-+ psli = &phba->sli;
-+ phba->hba_state = LPFC_LINK_DOWN;
-+
-+#if !defined(RHEL_FC) && !defined(SLES_FC)
-+ /* Stop all requests to the driver from the midlayer. */
-+ scsi_block_requests(phba->host);
-+#endif
-+
-+ lpfc_put_event(phba, HBA_EVENT_LINK_DOWN, phba->fc_myDID, NULL, 0, 0);
-+
-+ /* Clean up any firmware default rpi's */
-+ if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
-+ lpfc_unreg_did(phba, 0xffffffff, mb);
-+ mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( mb, phba->mbox_mem_pool);
-+ }
-+ }
-+
-+ /* Cleanup any outstanding RSCN activity */
-+ lpfc_els_flush_rscn(phba);
-+
-+ /* Cleanup any outstanding ELS commands */
-+ lpfc_els_flush_cmd(phba);
-+
-+ /*
-+ * If this function was called by the lpfc_do_dpc, don't recurse into
-+ * the routine again. If not, just process any outstanding
-+ * discovery events.
-+ */
-+ if ((!list_empty(&phba->dpc_disc)) ||
-+ (phba->work_hba_events)){
-+ lpfc_disc_done(phba);
-+ }
-+
-+ /* Issue a LINK DOWN event to all nodes */
-+ node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
-+ node_list[1] = &phba->fc_nlpmap_list;
-+ node_list[2] = &phba->fc_nlpunmap_list;
-+ node_list[3] = &phba->fc_prli_list;
-+ node_list[4] = &phba->fc_reglogin_list;
-+ node_list[5] = &phba->fc_adisc_list;
-+ node_list[6] = &phba->fc_plogi_list;
-+ for (i = 0; i < 7; i++) {
-+ listp = node_list[i];
-+ if (list_empty(listp))
-+ continue;
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ /* Fabric nodes are not handled thru state machine for
-+ link down */
-+ if (ndlp->nlp_type & NLP_FABRIC) {
-+ /* Remove ALL Fabric nodes except Fabric_DID */
-+ if (ndlp->nlp_DID != Fabric_DID) {
-+ /* Take it off current list and free */
-+ lpfc_nlp_list(phba, ndlp,
-+ NLP_NO_LIST);
-+ }
-+ }
-+ else {
-+ lpfc_set_failmask(phba, ndlp,
-+ LPFC_DEV_LINK_DOWN,
-+ LPFC_SET_BITMASK);
-+
-+ rc = lpfc_disc_state_machine(phba, ndlp, NULL,
-+ NLP_EVT_DEVICE_RECOVERY);
-+
-+ /* Check config parameter use-adisc or FCP-2 */
-+ if ((rc != NLP_STE_FREED_NODE) &&
-+ (phba->cfg_use_adisc == 0) &&
-+ !(ndlp->nlp_fcp_info &
-+ NLP_FCP_2_DEVICE)) {
-+ /* We know we will have to relogin, so
-+ * unreglogin the rpi right now to fail
-+ * any outstanding I/Os quickly.
-+ */
-+ lpfc_unreg_rpi(phba, ndlp);
-+ }
-+ }
-+ }
-+ }
-+
-+ /* free any ndlp's on unused list */
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
-+ nlp_listp) {
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ }
-+
-+ /* Setup myDID for link up if we are in pt2pt mode */
-+ if (phba->fc_flag & FC_PT2PT) {
-+ phba->fc_myDID = 0;
-+ if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
-+ lpfc_config_link(phba, mb);
-+ mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( mb, phba->mbox_mem_pool);
-+ }
-+ }
-+ phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
-+ }
-+ phba->fc_flag &= ~FC_LBIT;
-+
-+ /* Turn off discovery timer if its running */
-+ lpfc_can_disctmo(phba);
-+
-+ /* Must process IOCBs on all rings to handle ABORTed I/Os */
-+ return (0);
-+}
-+
-+static int
-+lpfc_linkup(struct lpfc_hba * phba)
-+{
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ struct list_head *listp;
-+ struct list_head *node_list[7];
-+ int i;
-+
-+ phba->hba_state = LPFC_LINK_UP;
-+ phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
-+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
-+ phba->fc_flag |= FC_NDISC_ACTIVE;
-+ phba->fc_ns_retry = 0;
-+
-+
-+ lpfc_put_event(phba, HBA_EVENT_LINK_UP, phba->fc_myDID,
-+ (void *)(unsigned long)(phba->fc_topology),
-+ 0, phba->fc_linkspeed);
-+
-+ /*
-+ * Clean up old Fabric NLP_FABRIC logins.
-+ */
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
-+ nlp_listp) {
-+ if (ndlp->nlp_DID == Fabric_DID) {
-+ /* Take it off current list and free */
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ }
-+ }
-+
-+ /* free any ndlp's on unused list */
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
-+ nlp_listp) {
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ }
-+
-+ /* Mark all nodes for LINK UP */
-+ node_list[0] = &phba->fc_plogi_list;
-+ node_list[1] = &phba->fc_adisc_list;
-+ node_list[2] = &phba->fc_reglogin_list;
-+ node_list[3] = &phba->fc_prli_list;
-+ node_list[4] = &phba->fc_nlpunmap_list;
-+ node_list[5] = &phba->fc_nlpmap_list;
-+ node_list[6] = &phba->fc_npr_list;
-+ for (i = 0; i < 7; i++) {
-+ listp = node_list[i];
-+ if (list_empty(listp))
-+ continue;
-+
-+ list_for_each_entry(ndlp, listp, nlp_listp) {
-+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCOVERY_INP,
-+ LPFC_SET_BITMASK);
-+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_LINK_DOWN,
-+ LPFC_CLR_BITMASK);
-+ }
-+ }
-+
-+#if !defined(RHEL_FC) && !defined(SLES_FC)
-+ spin_unlock_irq(phba->host->host_lock);
-+ scsi_unblock_requests(phba->host);
-+ spin_lock_irq(phba->host->host_lock);
-+#endif
-+ return 0;
-+}
-+
-+/*
-+ * This routine handles processing a CLEAR_LA mailbox
-+ * command upon completion. It is setup in the LPFC_MBOXQ
-+ * as the completion routine when the command is
-+ * handed off to the SLI layer.
-+ */
-+void
-+lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli;
-+ MAILBOX_t *mb;
-+ uint32_t control;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+ /* Since we don't do discovery right now, turn these off here */
-+ psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
-+ /* Check for error */
-+ if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
-+ /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-+ "%d:0320 CLEAR_LA mbxStatus error x%x hba "
-+ "state x%x\n",
-+ phba->brd_no, mb->mbxStatus, phba->hba_state);
-+
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ goto out;
-+ }
-+
-+ if(phba->fc_flag & FC_ABORT_DISCOVERY)
-+ goto out;
-+
-+ phba->num_disc_nodes = 0;
-+ /* go thru NPR list and issue ELS PLOGIs */
-+ if (phba->fc_npr_cnt) {
-+ lpfc_els_disc_plogi(phba);
-+ }
-+
-+ if(!phba->num_disc_nodes) {
-+ phba->fc_flag &= ~FC_NDISC_ACTIVE;
-+ }
-+
-+ phba->hba_state = LPFC_HBA_READY;
-+
-+out:
-+ phba->fc_flag &= ~FC_ABORT_DISCOVERY;
-+ /* Device Discovery completes */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0225 Device Discovery completes\n",
-+ phba->brd_no);
-+
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ if (phba->fc_flag & FC_ESTABLISH_LINK) {
-+ phba->fc_flag &= ~FC_ESTABLISH_LINK;
-+ }
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&phba->fc_estabtmo);
-+ spin_lock_irq(phba->host->host_lock);
-+ lpfc_can_disctmo(phba);
-+
-+ /* turn on Link Attention interrupts */
-+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
-+ control = readl(phba->HCregaddr);
-+ control |= HC_LAINT_ENA;
-+ writel(control, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+
-+ return;
-+}
-+
-+static void
-+lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli;
-+ MAILBOX_t *mb;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+ /* Check for error */
-+ if (mb->mbxStatus) {
-+ /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-+ "%d:0306 CONFIG_LINK mbxStatus error x%x "
-+ "HBA state x%x\n",
-+ phba->brd_no, mb->mbxStatus, phba->hba_state);
-+
-+ lpfc_linkdown(phba);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ goto out;
-+ }
-+
-+ if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
-+ /* Start discovery by sending a FLOGI hba_state is identically
-+ * LPFC_FLOGI while waiting for FLOGI cmpl (same on FAN)
-+ */
-+ phba->hba_state = LPFC_FLOGI;
-+ lpfc_set_disctmo(phba);
-+ lpfc_initial_flogi(phba);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return;
-+ }
-+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return;
-+ }
-+
-+out:
-+ /* CONFIG_LINK bad hba state <hba_state> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_DISCOVERY,
-+ "%d:0200 CONFIG_LINK bad hba state x%x\n",
-+ phba->brd_no, phba->hba_state);
-+
-+ if (phba->hba_state != LPFC_CLEAR_LA) {
-+ lpfc_clear_la(phba, pmb);
-+ pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-+ if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ lpfc_disc_flush_list(phba);
-+ psli->ring[(psli->ip_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->fcp_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->next_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ phba->hba_state = LPFC_HBA_READY;
-+ }
-+ } else {
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ }
-+ return;
-+}
-+
-+static void
-+lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ MAILBOX_t *mb = &pmb->mb;
-+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
-+
-+
-+ /* Check for error */
-+ if (mb->mbxStatus) {
-+ /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
-+ "%d:0319 READ_SPARAM mbxStatus error x%x "
-+ "hba state x%x>\n",
-+ phba->brd_no, mb->mbxStatus, phba->hba_state);
-+
-+ lpfc_linkdown(phba);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ goto out;
-+ }
-+
-+ memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
-+ sizeof (struct serv_parm));
-+ memcpy((uint8_t *) & phba->fc_nodename,
-+ (uint8_t *) & phba->fc_sparam.nodeName,
-+ sizeof (struct lpfc_name));
-+ memcpy((uint8_t *) & phba->fc_portname,
-+ (uint8_t *) & phba->fc_sparam.portName,
-+ sizeof (struct lpfc_name));
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return;
-+
-+out:
-+ pmb->context1 = NULL;
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ if (phba->hba_state != LPFC_CLEAR_LA) {
-+ lpfc_clear_la(phba, pmb);
-+ pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-+ if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ lpfc_disc_flush_list(phba);
-+ psli->ring[(psli->ip_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->fcp_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->next_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ phba->hba_state = LPFC_HBA_READY;
-+ }
-+ } else {
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ }
-+ return;
-+}
-+
-+/*
-+ * This routine handles processing a READ_LA mailbox
-+ * command upon completion. It is setup in the LPFC_MBOXQ
-+ * as the completion routine when the command is
-+ * handed off to the SLI layer.
-+ */
-+void
-+lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ READ_LA_VAR *la;
-+ LPFC_MBOXQ_t *mbox;
-+ MAILBOX_t *mb = &pmb->mb;
-+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
-+ uint32_t control;
-+ int i;
-+
-+ /* Check for error */
-+ if (mb->mbxStatus) {
-+ /* READ_LA mbox error <mbxStatus> state <hba_state> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_LINK_EVENT,
-+ "%d:1307 READ_LA mbox error x%x state x%x\n",
-+ phba->brd_no,
-+ mb->mbxStatus, phba->hba_state);
-+ pmb->context1 = NULL;
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+
-+ lpfc_linkdown(phba);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+
-+ /* turn on Link Attention interrupts */
-+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
-+ control = readl(phba->HCregaddr);
-+ control |= HC_LAINT_ENA;
-+ writel(control, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+ return;
-+ }
-+ la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
-+
-+ /* Get Loop Map information */
-+ if (mp) {
-+ memcpy(&phba->alpa_map[0], mp->virt, 128);
-+ } else {
-+ memset(&phba->alpa_map[0], 0, 128);
-+ }
-+
-+ if (((phba->fc_eventTag + 1) < la->eventTag) ||
-+ (phba->fc_eventTag == la->eventTag)) {
-+ phba->fc_stat.LinkMultiEvent++;
-+ if (la->attType == AT_LINK_UP) {
-+ if (phba->fc_eventTag != 0) {
-+
-+ lpfc_linkdown(phba);
-+ }
-+ }
-+ }
-+
-+ phba->fc_eventTag = la->eventTag;
-+
-+ if (la->attType == AT_LINK_UP) {
-+ phba->fc_stat.LinkUp++;
-+ /* Link Up Event <eventTag> received */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-+ "%d:1303 Link Up Event x%x received "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no, la->eventTag, phba->fc_eventTag,
-+ la->granted_AL_PA, la->UlnkSpeed,
-+ phba->alpa_map[0]);
-+
-+ switch(la->UlnkSpeed) {
-+ case LA_1GHZ_LINK:
-+ phba->fc_linkspeed = LA_1GHZ_LINK;
-+ break;
-+ case LA_2GHZ_LINK:
-+ phba->fc_linkspeed = LA_2GHZ_LINK;
-+ break;
-+ case LA_4GHZ_LINK:
-+ phba->fc_linkspeed = LA_4GHZ_LINK;
-+ break;
-+ default:
-+ phba->fc_linkspeed = LA_UNKNW_LINK;
-+ break;
-+ }
-+
-+ if ((phba->fc_topology = la->topology) == TOPOLOGY_LOOP) {
-+
-+ if (la->il) {
-+ phba->fc_flag |= FC_LBIT;
-+ }
-+
-+ phba->fc_myDID = la->granted_AL_PA;
-+
-+ i = la->un.lilpBde64.tus.f.bdeSize;
-+ if (i == 0) {
-+ phba->alpa_map[0] = 0;
-+ } else {
-+ if (phba->cfg_log_verbose
-+ & LOG_LINK_EVENT) {
-+ int numalpa, j, k;
-+ union {
-+ uint8_t pamap[16];
-+ struct {
-+ uint32_t wd1;
-+ uint32_t wd2;
-+ uint32_t wd3;
-+ uint32_t wd4;
-+ } pa;
-+ } un;
-+
-+ numalpa = phba->alpa_map[0];
-+ j = 0;
-+ while (j < numalpa) {
-+ memset(un.pamap, 0, 16);
-+ for (k = 1; j < numalpa; k++) {
-+ un.pamap[k - 1] =
-+ phba->alpa_map[j +
-+ 1];
-+ j++;
-+ if (k == 16)
-+ break;
-+ }
-+ /* Link Up Event ALPA map */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_LINK_EVENT,
-+ "%d:1304 Link Up Event "
-+ "ALPA map Data: x%x "
-+ "x%x x%x x%x\n",
-+ phba->brd_no,
-+ un.pa.wd1, un.pa.wd2,
-+ un.pa.wd3, un.pa.wd4);
-+ }
-+ }
-+ }
-+ } else {
-+ phba->fc_myDID = phba->fc_pref_DID;
-+ phba->fc_flag |= FC_LBIT;
-+ }
-+
-+ lpfc_linkup(phba);
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
-+ lpfc_read_sparam(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
-+ lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
-+ }
-+
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
-+ phba->hba_state = LPFC_LOCAL_CFG_LINK;
-+ lpfc_config_link(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
-+ lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
-+ }
-+ } else {
-+ phba->fc_stat.LinkDown++;
-+ /* Link Down Event <eventTag> received */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
-+ "%d:1305 Link Down Event x%x received "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, la->eventTag, phba->fc_eventTag,
-+ phba->hba_state, phba->fc_flag);
-+
-+ lpfc_linkdown(phba);
-+
-+ /* turn on Link Attention interrupts - no CLEAR_LA needed */
-+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
-+ control = readl(phba->HCregaddr);
-+ control |= HC_LAINT_ENA;
-+ writel(control, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+ }
-+
-+ pmb->context1 = NULL;
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return;
-+}
-+
-+/*
-+ * This routine handles processing a REG_LOGIN mailbox
-+ * command upon completion. It is setup in the LPFC_MBOXQ
-+ * as the completion routine when the command is
-+ * handed off to the SLI layer.
-+ */
-+void
-+lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli;
-+ MAILBOX_t *mb;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_nodelist *ndlp;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+
-+ ndlp = (struct lpfc_nodelist *) pmb->context2;
-+ mp = (struct lpfc_dmabuf *) (pmb->context1);
-+
-+ pmb->context1 = NULL;
-+
-+ /* Good status, call state machine */
-+ lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+
-+ return;
-+}
-+
-+/*
-+ * This routine handles processing a Fabric REG_LOGIN mailbox
-+ * command upon completion. It is setup in the LPFC_MBOXQ
-+ * as the completion routine when the command is
-+ * handed off to the SLI layer.
-+ */
-+void
-+lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli;
-+ MAILBOX_t *mb;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_nodelist *ndlp;
-+ struct lpfc_nodelist *ndlp_fdmi;
-+
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+
-+ ndlp = (struct lpfc_nodelist *) pmb->context2;
-+ mp = (struct lpfc_dmabuf *) (pmb->context1);
-+
-+ if (mb->mbxStatus) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+
-+ /* FLOGI failed, so just use loop map to make discovery list */
-+ lpfc_disc_list_loopmap(phba);
-+
-+ /* Start discovery */
-+ lpfc_disc_start(phba);
-+ return;
-+ }
-+
-+ pmb->context1 = NULL;
-+
-+ if (ndlp->nlp_rpi != 0)
-+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
-+ ndlp->nlp_rpi = mb->un.varWords[0];
-+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
-+ ndlp->nlp_type |= NLP_FABRIC;
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+
-+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
-+ /* This NPort has been assigned an NPort_ID by the fabric as a
-+ * result of the completed fabric login. Issue a State Change
-+ * Registration (SCR) ELS request to the fabric controller
-+ * (SCR_DID) so that this NPort gets RSCN events from the
-+ * fabric.
-+ */
-+ lpfc_issue_els_scr(phba, SCR_DID, 0);
-+
-+ /* Allocate a new node instance. If the pool is empty, just
-+ * start the discovery process and skip the Nameserver login
-+ * process. This is attempted again later on. Otherwise, issue
-+ * a Port Login (PLOGI) to the NameServer
-+ */
-+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
-+ == 0) {
-+ lpfc_disc_start(phba);
-+ } else {
-+ lpfc_nlp_init(phba, ndlp, NameServer_DID);
-+ ndlp->nlp_type |= NLP_FABRIC;
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ if (phba->cfg_fdmi_on) {
-+ if ((ndlp_fdmi = mempool_alloc(
-+ phba->nlp_mem_pool,
-+ GFP_ATOMIC))) {
-+ lpfc_nlp_init(phba, ndlp_fdmi,
-+ FDMI_DID);
-+ ndlp_fdmi->nlp_type |= NLP_FABRIC;
-+ ndlp_fdmi->nlp_state =
-+ NLP_STE_PLOGI_ISSUE;
-+ lpfc_issue_els_plogi(phba, ndlp_fdmi,
-+ 0);
-+ }
-+ }
-+ }
-+ }
-+
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+
-+ return;
-+}
-+
-+/*
-+ * This routine handles processing a NameServer REG_LOGIN mailbox
-+ * command upon completion. It is setup in the LPFC_MBOXQ
-+ * as the completion routine when the command is
-+ * handed off to the SLI layer.
-+ */
-+void
-+lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli;
-+ MAILBOX_t *mb;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_nodelist *ndlp;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+
-+ ndlp = (struct lpfc_nodelist *) pmb->context2;
-+ mp = (struct lpfc_dmabuf *) (pmb->context1);
-+
-+ if (mb->mbxStatus) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+
-+ /* RegLogin failed, so just use loop map to make discovery
-+ list */
-+ lpfc_disc_list_loopmap(phba);
-+
-+ /* Start discovery */
-+ lpfc_disc_start(phba);
-+ return;
-+ }
-+
-+ pmb->context1 = NULL;
-+
-+ if (ndlp->nlp_rpi != 0)
-+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
-+ ndlp->nlp_rpi = mb->un.varWords[0];
-+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
-+ ndlp->nlp_type |= NLP_FABRIC;
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+
-+ if (phba->hba_state < LPFC_HBA_READY) {
-+ /* Link up discovery requires Fabrib registration. */
-+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
-+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
-+ lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
-+ }
-+
-+ phba->fc_ns_retry = 0;
-+ /* Good status, issue CT Request to NameServer */
-+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
-+ /* Cannot issue NameServer Query, so finish up discovery */
-+ lpfc_disc_start(phba);
-+ }
-+
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+
-+ return;
-+}
-+
-+/* Put blp on the bind list */
-+int
-+lpfc_consistent_bind_save(struct lpfc_hba * phba, struct lpfc_bindlist * blp)
-+{
-+ /* Put it at the end of the bind list */
-+ list_add_tail(&blp->nlp_listp, &phba->fc_nlpbind_list);
-+ phba->fc_bind_cnt++;
-+
-+ /* Add scsiid <sid> to BIND list */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0903 Add scsiid %d to BIND list "
-+ "Data: x%x x%x x%x x%p\n",
-+ phba->brd_no, blp->nlp_sid, phba->fc_bind_cnt,
-+ blp->nlp_DID, blp->nlp_bind_type, blp);
-+
-+ return (0);
-+}
-+
-+int
-+lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
-+{
-+ struct lpfc_bindlist *blp;
-+ struct lpfc_target *targetp;
-+ struct lpfc_sli *psli;
-+ psli = &phba->sli;
-+
-+ /* Sanity check to ensure we are not moving to / from the same list */
-+ if((nlp->nlp_flag & NLP_LIST_MASK) == list) {
-+ if(list != NLP_NO_LIST)
-+ return(0);
-+ }
-+
-+ blp = nlp->nlp_listp_bind;
-+
-+ switch(nlp->nlp_flag & NLP_LIST_MASK) {
-+ case NLP_NO_LIST: /* Not on any list */
-+ break;
-+ case NLP_UNUSED_LIST:
-+ phba->fc_unused_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ break;
-+ case NLP_PLOGI_LIST:
-+ phba->fc_plogi_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ break;
-+ case NLP_ADISC_LIST:
-+ phba->fc_adisc_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ break;
-+ case NLP_REGLOGIN_LIST:
-+ phba->fc_reglogin_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ break;
-+ case NLP_PRLI_LIST:
-+ phba->fc_prli_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ break;
-+ case NLP_UNMAPPED_LIST:
-+ phba->fc_unmap_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
-+ nlp->nlp_type &= ~NLP_FC_NODE;
-+ phba->nport_event_cnt++;
-+ break;
-+ case NLP_MAPPED_LIST:
-+ phba->fc_map_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ phba->nport_event_cnt++;
-+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISAPPEARED,
-+ LPFC_SET_BITMASK);
-+ nlp->nlp_type &= ~NLP_FCP_TARGET;
-+ targetp = nlp->nlp_Target;
-+ if (targetp && (list != NLP_MAPPED_LIST)) {
-+ nlp->nlp_Target = NULL;
-+#if defined(RHEL_FC) || defined(SLES_FC)
-+ /*
-+ * Do not block the target if the driver has just reset
-+ * its interface to the hardware.
-+ */
-+ if (phba->hba_state != LPFC_INIT_START)
-+ lpfc_target_block(phba, targetp);
-+#endif
-+ }
-+
-+ break;
-+ case NLP_NPR_LIST:
-+ phba->fc_npr_cnt--;
-+ list_del(&nlp->nlp_listp);
-+ nlp->nlp_flag &= ~NLP_LIST_MASK;
-+ /* Stop delay tmo if taking node off NPR list */
-+ if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
-+ (list != NLP_NPR_LIST)) {
-+ nlp->nlp_flag &= ~NLP_DELAY_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&nlp->nlp_delayfunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&nlp->els_retry_evt.evt_listp))
-+ list_del_init(&nlp->els_retry_evt.
-+ evt_listp);
-+ if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
-+ nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+ if (phba->num_disc_nodes) {
-+ /* Check to see if there are more
-+ * PLOGIs to be sent
-+ */
-+ lpfc_more_plogi(phba);
-+ }
-+
-+
-+ if (phba->num_disc_nodes == 0) {
-+ phba->fc_flag &= ~FC_NDISC_ACTIVE;
-+ lpfc_can_disctmo(phba);
-+
-+ if (phba->fc_flag & FC_RSCN_MODE) {
-+ /* Check to see if more RSCNs
-+ * came in while we were
-+ * processing this one.
-+ */
-+ if((phba->fc_rscn_id_cnt==0) &&
-+ (!(phba->fc_flag &
-+ FC_RSCN_DISCOVERY))) {
-+ phba->fc_flag &=
-+ ~FC_RSCN_MODE;
-+ }
-+ else {
-+ lpfc_els_handle_rscn(
-+ phba);
-+ }
-+ }
-+ }
-+ }
-+ }
-+ break;
-+ }
-+
-+ /* Add NPort <did> to <num> list */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_NODE,
-+ "%d:0904 Add NPort x%x to %d list Data: x%x x%p\n",
-+ phba->brd_no,
-+ nlp->nlp_DID, list, nlp->nlp_flag, blp);
-+
-+ nlp->nlp_listp_bind = NULL;
-+
-+ switch(list) {
-+ case NLP_NO_LIST: /* No list, just remove it */
-+#if defined(SLES_FC)
-+ targetp = NULL;
-+ if (((nlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
-+ (nlp->nlp_sid != NLP_NO_SID)) {
-+ targetp = phba->device_queue_hash[nlp->nlp_sid];
-+ }
-+#endif
-+ lpfc_nlp_remove(phba, nlp);
-+
-+#if defined(SLES_FC)
-+ if (targetp && targetp->blocked) {
-+ lpfc_target_unblock(phba, targetp);
-+ }
-+#endif
-+
-+ break;
-+ case NLP_UNUSED_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the unused list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
-+ phba->fc_unused_cnt++;
-+ break;
-+ case NLP_PLOGI_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the plogi list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
-+ phba->fc_plogi_cnt++;
-+ break;
-+ case NLP_ADISC_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the adisc list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
-+ phba->fc_adisc_cnt++;
-+ break;
-+ case NLP_REGLOGIN_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the reglogin list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
-+ phba->fc_reglogin_cnt++;
-+ break;
-+ case NLP_PRLI_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the prli list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
-+ phba->fc_prli_cnt++;
-+ break;
-+ case NLP_UNMAPPED_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the unmap list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
-+ phba->fc_unmap_cnt++;
-+ phba->nport_event_cnt++;
-+ /* stop nodev tmo if running */
-+ if (nlp->nlp_flag & NLP_NODEV_TMO) {
-+ nlp->nlp_flag &= ~NLP_NODEV_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&nlp->nlp_tmofunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&nlp->nodev_timeout_evt.
-+ evt_listp))
-+ list_del_init(&nlp->nodev_timeout_evt.
-+ evt_listp);
-+ }
-+ nlp->nlp_type |= NLP_FC_NODE;
-+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISCOVERY_INP,
-+ LPFC_CLR_BITMASK);
-+ break;
-+ case NLP_MAPPED_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the map list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
-+ phba->fc_map_cnt++;
-+ phba->nport_event_cnt++;
-+ /* stop nodev tmo if running */
-+ if (nlp->nlp_flag & NLP_NODEV_TMO) {
-+ nlp->nlp_flag &= ~NLP_NODEV_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&nlp->nlp_tmofunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&nlp->nodev_timeout_evt.
-+ evt_listp))
-+ list_del_init(&nlp->nodev_timeout_evt.
-+ evt_listp);
-+ }
-+ nlp->nlp_type |= NLP_FCP_TARGET;
-+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISAPPEARED,
-+ LPFC_CLR_BITMASK);
-+ lpfc_set_failmask(phba, nlp, LPFC_DEV_DISCOVERY_INP,
-+ LPFC_CLR_BITMASK);
-+
-+ targetp = NULL;
-+ if (nlp->nlp_sid != NLP_NO_SID)
-+ targetp = phba->device_queue_hash[nlp->nlp_sid];
-+
-+ if (targetp && targetp->pnode) {
-+ nlp->nlp_Target = targetp;
-+#if defined(RHEL_FC) || defined(SLES_FC)
-+ /* Unblock I/Os on target */
-+ if(targetp->blocked)
-+ lpfc_target_unblock(phba, targetp);
-+#endif
-+ }
-+ break;
-+ case NLP_NPR_LIST:
-+ nlp->nlp_flag |= list;
-+ /* Put it at the end of the npr list */
-+ list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
-+ phba->fc_npr_cnt++;
-+
-+ /*
-+ * Sanity check for Fabric entity.
-+ * Set nodev_tmo for NPR state, for Fabric use 1 sec.
-+ */
-+ if (nlp->nlp_type & NLP_FABRIC) {
-+ mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
-+ }
-+ else {
-+ mod_timer(&nlp->nlp_tmofunc,
-+ jiffies + HZ * phba->cfg_nodev_tmo);
-+ }
-+ nlp->nlp_flag |= NLP_NODEV_TMO;
-+ nlp->nlp_flag &= ~NLP_RCV_PLOGI;
-+ break;
-+ case NLP_JUST_DQ:
-+ break;
-+ }
-+
-+ if (blp) {
-+ nlp->nlp_flag &= ~NLP_SEED_MASK;
-+ nlp->nlp_Target = NULL;
-+ lpfc_consistent_bind_save(phba, blp);
-+ }
-+ return (0);
-+}
-+
-+/*
-+ * Start / ReStart rescue timer for Discovery / RSCN handling
-+ */
-+void
-+lpfc_set_disctmo(struct lpfc_hba * phba)
-+{
-+ uint32_t tmo;
-+
-+ tmo = ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT + 3);
-+
-+ mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
-+ phba->fc_flag |= FC_DISC_TMO;
-+
-+ /* Start Discovery Timer state <hba_state> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0247 Start Discovery Timer state x%x "
-+ "Data: x%x x%lx x%x x%x\n",
-+ phba->brd_no,
-+ phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
-+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
-+
-+ return;
-+}
-+
-+/*
-+ * Cancel rescue timer for Discovery / RSCN handling
-+ */
-+int
-+lpfc_can_disctmo(struct lpfc_hba * phba)
-+{
-+ /* Turn off discovery timer if its running */
-+ if(phba->fc_flag & FC_DISC_TMO) {
-+ phba->fc_flag &= ~FC_DISC_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&phba->fc_disctmo);
-+ spin_lock_irq(phba->host->host_lock);
-+ phba->work_hba_events &= ~WORKER_DISC_TMO;
-+ }
-+
-+ /* Cancel Discovery Timer state <hba_state> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0248 Cancel Discovery Timer state x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, phba->hba_state, phba->fc_flag,
-+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
-+
-+ return (0);
-+}
-+
-+/*
-+ * Check specified ring for outstanding IOCB on the SLI queue
-+ * Return true if iocb matches the specified nport
-+ */
-+int
-+lpfc_check_sli_ndlp(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_sli *psli;
-+ IOCB_t *icmd;
-+
-+ psli = &phba->sli;
-+ icmd = &iocb->iocb;
-+ if (pring->ringno == LPFC_ELS_RING) {
-+ switch (icmd->ulpCommand) {
-+ case CMD_GEN_REQUEST64_CR:
-+ if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
-+ return (1);
-+ case CMD_ELS_REQUEST64_CR:
-+ case CMD_XMIT_ELS_RSP64_CX:
-+ if (iocb->context1 == (uint8_t *) ndlp)
-+ return (1);
-+ }
-+ } else if (pring->ringno == psli->ip_ring) {
-+
-+ } else if (pring->ringno == psli->fcp_ring) {
-+ /* Skip match check if waiting to relogin to FCP target */
-+ if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
-+ (ndlp->nlp_flag & NLP_DELAY_TMO)) {
-+ return (0);
-+ }
-+ if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
-+ return (1);
-+ }
-+ } else if (pring->ringno == psli->next_ring) {
-+
-+ }
-+ return (0);
-+}
-+
-+/*
-+ * Free resources / clean up outstanding I/Os
-+ * associated with nlp_rpi in the LPFC_NODELIST entry.
-+ */
-+static int
-+lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ IOCB_t *icmd;
-+ uint32_t rpi, i;
-+
-+ psli = &phba->sli;
-+ rpi = ndlp->nlp_rpi;
-+ if (rpi) {
-+ /* Now process each ring */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pring = &psli->ring[i];
-+
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
-+ list) {
-+ /*
-+ * Check to see if iocb matches the nport we are
-+ * looking for
-+ */
-+ if ((lpfc_check_sli_ndlp
-+ (phba, pring, iocb, ndlp))) {
-+ /* It matches, so deque and call compl
-+ with an error */
-+ list_del(&iocb->list);
-+ pring->txq_cnt--;
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus =
-+ IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] =
-+ IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba,
-+ iocb, iocb);
-+ } else {
-+ mempool_free(iocb,
-+ phba->iocb_mem_pool);
-+ }
-+ }
-+ }
-+ /* Everything that matches on txcmplq will be returned
-+ * by firmware with a no rpi error.
-+ */
-+ }
-+ }
-+ return (0);
-+}
-+
-+/*
-+ * Free rpi associated with LPFC_NODELIST entry.
-+ * This routine is called from lpfc_freenode(), when we are removing
-+ * a LPFC_NODELIST entry. It is also called if the driver initiates a
-+ * LOGO that completes successfully, and we are waiting to PLOGI back
-+ * to the remote NPort. In addition, it is called after we receive
-+ * and unsolicated ELS cmd, send back a rsp, the rsp completes and
-+ * we are waiting to PLOGI back to the remote NPort.
-+ */
-+int
-+lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
-+{
-+ LPFC_MBOXQ_t *mbox;
-+
-+ if (ndlp->nlp_rpi) {
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
-+ lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
-+ mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ }
-+ }
-+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
-+ lpfc_no_rpi(phba, ndlp);
-+ ndlp->nlp_rpi = 0;
-+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
-+ LPFC_SET_BITMASK);
-+ return 1;
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Free resources associated with LPFC_NODELIST entry
-+ * so it can be freed.
-+ */
-+static int
-+lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_target *targetp;
-+ LPFC_MBOXQ_t *mb, *nextmb;
-+ LPFC_DISC_EVT_t *evtp, *next_evtp;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_sli *psli;
-+ int scsid;
-+
-+ /* The psli variable gets rid of the long pointer deference. */
-+ psli = &phba->sli;
-+
-+ /* Cleanup node for NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0900 Cleanup node for NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
-+ ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
-+
-+ /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
-+ if ((mb = psli->mbox_active)) {
-+ if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
-+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-+ mb->context2 = NULL;
-+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-+ }
-+ }
-+ list_for_each_entry_safe(mb, nextmb, &psli->mboxq, list) {
-+ if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
-+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-+ mp = (struct lpfc_dmabuf *) (mb->context1);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+ list_del(&mb->list);
-+ mempool_free(mb, phba->mbox_mem_pool);
-+ }
-+ }
-+ /* cleanup any ndlp on disc event q waiting for reglogin cmpl */
-+ list_for_each_entry_safe(evtp, next_evtp, &phba->dpc_disc, evt_listp) {
-+ mb = (LPFC_MBOXQ_t *)(evtp->evt_arg1);
-+ if ((evtp->evt == LPFC_EVT_MBOX) &&
-+ (mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
-+ (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-+ mp = (struct lpfc_dmabuf *) (mb->context1);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+ mempool_free(mb, phba->mbox_mem_pool);
-+ list_del_init(&evtp->evt_listp);
-+ kfree(evtp);
-+ }
-+ }
-+
-+ lpfc_els_abort(phba,ndlp,0);
-+ if(ndlp->nlp_flag & NLP_NODEV_TMO) {
-+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&ndlp->nlp_tmofunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&ndlp->nodev_timeout_evt.
-+ evt_listp))
-+ list_del_init(&ndlp->nodev_timeout_evt.
-+ evt_listp);
-+ }
-+
-+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
-+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&ndlp->nlp_delayfunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&ndlp->els_retry_evt.
-+ evt_listp))
-+ list_del_init(&ndlp->els_retry_evt.
-+ evt_listp);
-+ }
-+
-+ lpfc_unreg_rpi(phba, ndlp);
-+
-+ for(scsid=0;scsid<MAX_FCP_TARGET;scsid++) {
-+ targetp = phba->device_queue_hash[scsid];
-+ /* First see if the SCSI ID has an allocated struct
-+ lpfc_target */
-+ if (targetp) {
-+ if (targetp->pnode == ndlp) {
-+ targetp->pnode = NULL;
-+ ndlp->nlp_Target = NULL;
-+#ifdef RHEL_FC
-+ /*
-+ * This code does not apply to SLES9 since there
-+ * is no starget defined in the midlayer.
-+ * Additionally, dynamic target discovery to the
-+ * midlayer is not supported yet.
-+ */
-+ if (targetp->starget) {
-+ /* Remove SCSI target / SCSI Hotplug */
-+ lpfc_target_remove(phba, targetp);
-+ }
-+#endif /* RHEL_FC */
-+ break;
-+ }
-+ }
-+ }
-+ return (0);
-+}
-+
-+/*
-+ * Check to see if we can free the nlp back to the freelist.
-+ * If we are in the middle of using the nlp in the discovery state
-+ * machine, defer the free till we reach the end of the state machine.
-+ */
-+int
-+lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
-+{
-+
-+ if(ndlp->nlp_flag & NLP_NODEV_TMO) {
-+ ndlp->nlp_flag &= ~NLP_NODEV_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&ndlp->nlp_tmofunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&ndlp->nodev_timeout_evt.
-+ evt_listp))
-+ list_del_init(&ndlp->nodev_timeout_evt.
-+ evt_listp);
-+ }
-+
-+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
-+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&ndlp->nlp_delayfunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&ndlp->els_retry_evt.
-+ evt_listp))
-+ list_del_init(&ndlp->els_retry_evt.
-+ evt_listp);
-+ }
-+
-+ if (ndlp->nlp_disc_refcnt) {
-+ ndlp->nlp_flag |= NLP_DELAY_REMOVE;
-+ }
-+ else {
-+ lpfc_freenode(phba, ndlp);
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ }
-+ return(0);
-+}
-+
-+static int
-+lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
-+{
-+ D_ID mydid;
-+ D_ID ndlpdid;
-+ D_ID matchdid;
-+
-+ if (did == Bcast_DID)
-+ return (0);
-+
-+ if (ndlp->nlp_DID == 0) {
-+ return (0);
-+ }
-+
-+ /* First check for Direct match */
-+ if (ndlp->nlp_DID == did)
-+ return (1);
-+
-+ /* Next check for area/domain identically equals 0 match */
-+ mydid.un.word = phba->fc_myDID;
-+ if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
-+ return (0);
-+ }
-+
-+ matchdid.un.word = did;
-+ ndlpdid.un.word = ndlp->nlp_DID;
-+ if (matchdid.un.b.id == ndlpdid.un.b.id) {
-+ if ((mydid.un.b.domain == matchdid.un.b.domain) &&
-+ (mydid.un.b.area == matchdid.un.b.area)) {
-+ if ((ndlpdid.un.b.domain == 0) &&
-+ (ndlpdid.un.b.area == 0)) {
-+ if (ndlpdid.un.b.id)
-+ return (1);
-+ }
-+ return (0);
-+ }
-+
-+ matchdid.un.word = ndlp->nlp_DID;
-+ if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
-+ (mydid.un.b.area == ndlpdid.un.b.area)) {
-+ if ((matchdid.un.b.domain == 0) &&
-+ (matchdid.un.b.area == 0)) {
-+ if (matchdid.un.b.id)
-+ return (1);
-+ }
-+ }
-+ }
-+ return (0);
-+}
-+
-+/* Search for a nodelist entry on a specific list */
-+struct lpfc_nodelist *
-+lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
-+ struct lpfc_name * wwpn)
-+{
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ uint32_t data1;
-+
-+ if (order & NLP_SEARCH_UNMAPPED) {
-+ list_for_each_entry_safe(ndlp, next_ndlp,
-+ &phba->fc_nlpunmap_list, nlp_listp) {
-+ if (memcmp(&ndlp->nlp_portname, wwpn,
-+ sizeof(struct lpfc_name)) == 0) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* FIND node DID unmapped */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_NODE,
-+ "%d:0911 FIND node DID unmapped"
-+ " Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_MAPPED) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
-+ nlp_listp) {
-+ if (memcmp(&ndlp->nlp_portname, wwpn,
-+ sizeof(struct lpfc_name)) == 0) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* FIND node DID mapped */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0901 FIND node DID mapped "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ /* no match found */
-+ return ((struct lpfc_nodelist *) 0);
-+}
-+/* Search for a nodelist entry on a specific list */
-+struct lpfc_nodelist *
-+lpfc_findnode_wwnn(struct lpfc_hba * phba, uint32_t order,
-+ struct lpfc_name * wwnn)
-+{
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ uint32_t data1;
-+
-+ if (order & NLP_SEARCH_UNMAPPED) {
-+ list_for_each_entry_safe(ndlp, next_ndlp,
-+ &phba->fc_nlpunmap_list, nlp_listp) {
-+ if (memcmp(&ndlp->nlp_nodename, wwnn,
-+ sizeof(struct lpfc_name)) == 0) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* FIND node DID unmapped */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0910 FIND node DID unmapped"
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_MAPPED) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
-+ nlp_listp) {
-+ if (memcmp(&ndlp->nlp_nodename, wwnn,
-+ sizeof(struct lpfc_name)) == 0) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* FIND node did mapped */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0902 FIND node DID mapped "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ /* no match found */
-+ return ((struct lpfc_nodelist *) 0);
-+}
-+/* Search for a nodelist entry on a specific list */
-+struct lpfc_nodelist *
-+lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
-+{
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ uint32_t data1;
-+
-+ if (order & NLP_SEARCH_UNMAPPED) {
-+ list_for_each_entry_safe(ndlp, next_ndlp,
-+ &phba->fc_nlpunmap_list, nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* FIND node DID unmapped */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0929 FIND node DID unmapped"
-+ " Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_MAPPED) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
-+ nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* FIND node DID mapped */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0930 FIND node DID mapped "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_PLOGI) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
-+ nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* LOG change to PLOGI */
-+ /* FIND node DID plogi */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0908 FIND node DID plogi "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_ADISC) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-+ nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* LOG change to ADISC */
-+ /* FIND node DID adisc */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0931 FIND node DID adisc "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_REGLOGIN) {
-+ list_for_each_entry_safe(ndlp, next_ndlp,
-+ &phba->fc_reglogin_list, nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* LOG change to REGLOGIN */
-+ /* FIND node DID reglogin */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0933 FIND node DID reglogin"
-+ " Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_PRLI) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
-+ nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* LOG change to PRLI */
-+ /* FIND node DID prli */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0934 FIND node DID prli "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_NPR) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-+ nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* LOG change to NPR */
-+ /* FIND node DID npr */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0935 FIND node DID npr "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ if (order & NLP_SEARCH_UNUSED) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-+ nlp_listp) {
-+ if (lpfc_matchdid(phba, ndlp, did)) {
-+
-+ data1 = (((uint32_t) ndlp->nlp_state << 24) |
-+ ((uint32_t) ndlp->nlp_xri << 16) |
-+ ((uint32_t) ndlp->nlp_type << 8) |
-+ ((uint32_t) ndlp->nlp_rpi & 0xff));
-+ /* LOG change to UNUSED */
-+ /* FIND node DID unused */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-+ "%d:0936 FIND node DID unused "
-+ "Data: x%p x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp, ndlp->nlp_DID,
-+ ndlp->nlp_flag, data1);
-+ return (ndlp);
-+ }
-+ }
-+ }
-+
-+ /* FIND node did <did> NOT FOUND */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_NODE,
-+ "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
-+ phba->brd_no, did, order);
-+
-+ /* no match found */
-+ return ((struct lpfc_nodelist *) 0);
-+}
-+
-+struct lpfc_nodelist *
-+lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
-+{
-+ struct lpfc_nodelist *ndlp;
-+ uint32_t flg;
-+
-+ if((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
-+ if ((phba->hba_state == LPFC_HBA_READY) &&
-+ ((lpfc_rscn_payload_check(phba, did) == 0)))
-+ return NULL;
-+ ndlp = (struct lpfc_nodelist *)
-+ mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
-+ if (!ndlp)
-+ return NULL;
-+ lpfc_nlp_init(phba, ndlp, did);
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ return ndlp;
-+ }
-+ if ((phba->hba_state == LPFC_HBA_READY) &&
-+ (phba->fc_flag & FC_RSCN_MODE)) {
-+ if(lpfc_rscn_payload_check(phba, did)) {
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ }
-+ else {
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+ ndlp = NULL;
-+ }
-+ }
-+ else {
-+ flg = ndlp->nlp_flag & NLP_LIST_MASK;
-+ if ((flg == NLP_ADISC_LIST) ||
-+ (flg == NLP_PLOGI_LIST)) {
-+ return NULL;
-+ }
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ }
-+ return ndlp;
-+}
-+
-+/* Build a list of nodes to discover based on the loopmap */
-+void
-+lpfc_disc_list_loopmap(struct lpfc_hba * phba)
-+{
-+ int j;
-+ uint32_t alpa, index;
-+
-+ if (phba->hba_state <= LPFC_LINK_DOWN) {
-+ return;
-+ }
-+ if (phba->fc_topology != TOPOLOGY_LOOP) {
-+ return;
-+ }
-+
-+ /* Check for loop map present or not */
-+ if (phba->alpa_map[0]) {
-+ for (j = 1; j <= phba->alpa_map[0]; j++) {
-+ alpa = phba->alpa_map[j];
-+
-+ if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
-+ continue;
-+ }
-+ lpfc_setup_disc_node(phba, alpa);
-+ }
-+ } else {
-+ /* No alpamap, so try all alpa's */
-+ for (j = 0; j < FC_MAXLOOP; j++) {
-+ /* If cfg_scan_down is set, start from highest
-+ * ALPA (0xef) to lowest (0x1).
-+ */
-+ if (phba->cfg_scan_down)
-+ index = j;
-+ else
-+ index = FC_MAXLOOP - j - 1;
-+ alpa = lpfcAlpaArray[index];
-+ if ((phba->fc_myDID & 0xff) == alpa) {
-+ continue;
-+ }
-+
-+ lpfc_setup_disc_node(phba, alpa);
-+ }
-+ }
-+ return;
-+}
-+
-+/* Start Link up / RSCN discovery on NPR list */
-+void
-+lpfc_disc_start(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ LPFC_MBOXQ_t *mbox;
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+ uint32_t did_changed, num_sent;
-+ uint32_t clear_la_pending;
-+
-+ psli = &phba->sli;
-+
-+ if (phba->hba_state <= LPFC_LINK_DOWN) {
-+ return;
-+ }
-+ if (phba->hba_state == LPFC_CLEAR_LA)
-+ clear_la_pending = 1;
-+ else
-+ clear_la_pending = 0;
-+
-+ if (phba->hba_state < LPFC_HBA_READY) {
-+ phba->hba_state = LPFC_DISC_AUTH;
-+ }
-+ lpfc_set_disctmo(phba);
-+
-+ if (phba->fc_prevDID == phba->fc_myDID) {
-+ did_changed = 0;
-+ } else {
-+ did_changed = 1;
-+ }
-+ phba->fc_prevDID = phba->fc_myDID;
-+ phba->num_disc_nodes = 0;
-+
-+ /* Start Discovery state <hba_state> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0202 Start Discovery hba state x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, phba->hba_state, phba->fc_flag,
-+ phba->fc_plogi_cnt, phba->fc_adisc_cnt);
-+
-+ /* If our did changed, we MUST do PLOGI */
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-+ nlp_listp) {
-+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
-+ if(did_changed)
-+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-+ }
-+ }
-+
-+ /* First do ADISCs - if any */
-+ num_sent = lpfc_els_disc_adisc(phba);
-+
-+ if(num_sent)
-+ return;
-+
-+ if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
-+ /* If we get here, there is nothing to ADISC */
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
-+ phba->hba_state = LPFC_CLEAR_LA;
-+ lpfc_clear_la(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ lpfc_disc_flush_list(phba);
-+ psli->ring[(psli->ip_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->fcp_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->next_ring)].flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ phba->hba_state = LPFC_HBA_READY;
-+ }
-+ }
-+ } else {
-+ /* Next do PLOGIs - if any */
-+ num_sent = lpfc_els_disc_plogi(phba);
-+
-+ if(num_sent)
-+ return;
-+
-+ if (phba->fc_flag & FC_RSCN_MODE) {
-+ /* Check to see if more RSCNs came in while we
-+ * were processing this one.
-+ */
-+ if ((phba->fc_rscn_id_cnt == 0) &&
-+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
-+ phba->fc_flag &= ~FC_RSCN_MODE;
-+ } else {
-+ lpfc_els_handle_rscn(phba);
-+ }
-+ }
-+ }
-+ return;
-+}
-+
-+/*
-+ * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
-+ * ring the match the sppecified nodelist.
-+ */
-+static void
-+lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_sli *psli;
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_dmabuf *mp;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING];
-+
-+ /* Error matching iocb on txq or txcmplq
-+ * First check the txq.
-+ */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ if (iocb->context1 != ndlp) {
-+ continue;
-+ }
-+ icmd = &iocb->iocb;
-+ if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
-+ (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
-+
-+ list_del(&iocb->list);
-+ pring->txq_cnt--;
-+ lpfc_els_free_iocb(phba, iocb);
-+ }
-+ }
-+
-+ /* Next check the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-+ if (iocb->context1 != ndlp) {
-+ continue;
-+ }
-+ icmd = &iocb->iocb;
-+ if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
-+ (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
-+
-+ iocb->iocb_cmpl = NULL;
-+ /* context2 = cmd, context2->next = rsp, context3 =
-+ bpl */
-+ if (iocb->context2) {
-+ /* Free the response IOCB before handling the
-+ command. */
-+
-+ mp = (struct lpfc_dmabuf *)
-+ (((struct lpfc_dmabuf *) (iocb->context2))
-+ ->list.next);
-+ if (mp) {
-+ /* Delay before releasing rsp buffer to
-+ * give UNREG mbox a chance to take
-+ * effect.
-+ */
-+ list_add(&mp->list,
-+ &phba->freebufList);
-+ }
-+ lpfc_mbuf_free(phba,
-+ ((struct lpfc_dmabuf *)
-+ iocb->context2)->virt,
-+ ((struct lpfc_dmabuf *)
-+ iocb->context2)->phys);
-+ kfree(iocb->context2);
-+ }
-+
-+ if (iocb->context3) {
-+ lpfc_mbuf_free(phba,
-+ ((struct lpfc_dmabuf *)
-+ iocb->context3)->virt,
-+ ((struct lpfc_dmabuf *)
-+ iocb->context3)->phys);
-+ kfree(iocb->context3);
-+ }
-+ }
-+ }
-+
-+ return;
-+}
-+
-+void
-+lpfc_disc_flush_list(struct lpfc_hba * phba)
-+{
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+
-+ if (phba->fc_plogi_cnt) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
-+ nlp_listp) {
-+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
-+ LPFC_SET_BITMASK);
-+ lpfc_free_tx(phba, ndlp);
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+ }
-+ if (phba->fc_adisc_cnt) {
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-+ nlp_listp) {
-+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
-+ LPFC_SET_BITMASK);
-+ lpfc_free_tx(phba, ndlp);
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+ }
-+ return;
-+}
-+
-+/*****************************************************************************/
-+/*
-+ * NAME: lpfc_disc_timeout
-+ *
-+ * FUNCTION: Fibre Channel driver discovery timeout routine.
-+ *
-+ * EXECUTION ENVIRONMENT: interrupt only
-+ *
-+ * CALLED FROM:
-+ * Timer function
-+ *
-+ * RETURNS:
-+ * none
-+ */
-+/*****************************************************************************/
-+void
-+lpfc_disc_timeout(unsigned long ptr)
-+{
-+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
-+ unsigned long flags = 0;
-+
-+ if (unlikely(!phba))
-+ return;
-+
-+ spin_lock_irqsave(phba->host->host_lock, flags);
-+ if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
-+ phba->work_hba_events |= WORKER_DISC_TMO;
-+ if (phba->dpc_wait)
-+ up(phba->dpc_wait);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, flags);
-+ return;
-+}
-+
-+static void
-+lpfc_disc_timeout_handler(struct lpfc_hba *phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp;
-+ LPFC_MBOXQ_t *mbox;
-+
-+ if (!phba) {
-+ return;
-+ }
-+ if (!(phba->fc_flag & FC_DISC_TMO))
-+ return;
-+
-+ psli = &phba->sli;
-+ spin_lock_irq(phba->host->host_lock);
-+
-+ phba->fc_flag &= ~FC_DISC_TMO;
-+
-+ /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
-+ if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
-+ /* FAN timeout */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_DISCOVERY,
-+ "%d:0221 FAN timeout\n",
-+ phba->brd_no);
-+
-+ /* Forget about FAN, Start discovery by sending a FLOGI
-+ * hba_state is identically LPFC_FLOGI while waiting for FLOGI
-+ * cmpl
-+ */
-+ phba->hba_state = LPFC_FLOGI;
-+ lpfc_set_disctmo(phba);
-+ lpfc_initial_flogi(phba);
-+ goto out;
-+ }
-+
-+ /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
-+ if (phba->hba_state == LPFC_FLOGI) {
-+ /* Initial FLOGI timeout */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_DISCOVERY,
-+ "%d:0222 Initial FLOGI timeout\n",
-+ phba->brd_no);
-+
-+ /* Assume no Fabric and go on with discovery.
-+ * Check for outstanding ELS FLOGI to abort.
-+ */
-+
-+ /* FLOGI failed, so just use loop map to make discovery list */
-+ lpfc_disc_list_loopmap(phba);
-+
-+ /* Start discovery */
-+ lpfc_disc_start(phba);
-+ goto out;
-+ }
-+
-+ /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
-+ NameServer login */
-+ if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
-+ /* Timeout while waiting for NameServer login */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-+ "%d:0223 Timeout while waiting for NameServer "
-+ "login\n", phba->brd_no);
-+
-+ /* Next look for NameServer ndlp */
-+ if ((ndlp = lpfc_findnode_did(phba,
-+ NLP_SEARCH_ALL, NameServer_DID))) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ }
-+ /* Start discovery */
-+ lpfc_disc_start(phba);
-+ goto out;
-+ }
-+
-+ /* Check for wait for NameServer Rsp timeout */
-+ if (phba->hba_state == LPFC_NS_QRY) {
-+ /* NameServer Query timeout */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-+ "%d:0224 NameServer Query timeout "
-+ "Data: x%x x%x\n",
-+ phba->brd_no,
-+ phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-+
-+ if ((ndlp =
-+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
-+ NameServer_DID))) {
-+ if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
-+ /* Try it one more time */
-+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
-+ 0) {
-+ goto out;
-+ }
-+ }
-+ phba->fc_ns_retry = 0;
-+ }
-+
-+ /* Nothing to authenticate, so CLEAR_LA right now */
-+ if (phba->hba_state != LPFC_CLEAR_LA) {
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC))) {
-+ phba->hba_state = LPFC_CLEAR_LA;
-+ lpfc_clear_la(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ goto clrlaerr;
-+ }
-+ } else {
-+ /* Device Discovery completion error */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-+ "%d:0226 Device Discovery "
-+ "completion error\n",
-+ phba->brd_no);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ }
-+ }
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
-+ /* Setup and issue mailbox INITIALIZE LINK command */
-+ lpfc_linkdown(phba);
-+ lpfc_init_link(phba, mbox,
-+ phba->cfg_topology,
-+ phba->cfg_link_speed);
-+ mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
-+ mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ }
-+ }
-+ goto out;
-+ }
-+
-+ if (phba->hba_state == LPFC_DISC_AUTH) {
-+ /* Node Authentication timeout */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_DISCOVERY,
-+ "%d:0227 Node Authentication timeout\n",
-+ phba->brd_no);
-+ lpfc_disc_flush_list(phba);
-+ if (phba->hba_state != LPFC_CLEAR_LA) {
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC))) {
-+ phba->hba_state = LPFC_CLEAR_LA;
-+ lpfc_clear_la(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ goto clrlaerr;
-+ }
-+ }
-+ }
-+ goto out;
-+ }
-+
-+ if (phba->hba_state == LPFC_CLEAR_LA) {
-+ /* CLEAR LA timeout */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_DISCOVERY,
-+ "%d:0228 CLEAR LA timeout\n",
-+ phba->brd_no);
-+clrlaerr:
-+ lpfc_disc_flush_list(phba);
-+ psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
-+ phba->hba_state = LPFC_HBA_READY;
-+ goto out;
-+ }
-+
-+ if ((phba->hba_state == LPFC_HBA_READY) &&
-+ (phba->fc_flag & FC_RSCN_MODE)) {
-+ /* RSCN timeout */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_DISCOVERY,
-+ "%d:0231 RSCN timeout Data: x%x x%x\n",
-+ phba->brd_no,
-+ phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
-+
-+ /* Cleanup any outstanding ELS commands */
-+ lpfc_els_flush_cmd(phba);
-+
-+ lpfc_els_flush_rscn(phba);
-+ lpfc_disc_flush_list(phba);
-+ goto out;
-+ }
-+
-+out:
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+}
-+
-+/*****************************************************************************/
-+/*
-+ * NAME: lpfc_scan_timeout
-+ *
-+ * FUNCTION: Fibre Channel driver scsi_scan_host timeout routine.
-+ *
-+ * EXECUTION ENVIRONMENT: interrupt only
-+ *
-+ * CALLED FROM:
-+ * Timer function
-+ *
-+ * RETURNS:
-+ * none
-+ */
-+/*****************************************************************************/
-+void
-+lpfc_scan_timeout(unsigned long ptr)
-+{
-+ struct lpfc_hba *phba;
-+ unsigned long iflag;
-+
-+ phba = (struct lpfc_hba *)ptr;
-+ if (!phba) {
-+ return;
-+ }
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ phba->fc_flag &= ~FC_SCSI_SCAN_TMO;
-+ lpfc_discq_post_event(phba, NULL, NULL, LPFC_EVT_SCAN);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return;
-+}
-+
-+static void
-+lpfc_nodev_timeout(unsigned long ptr)
-+{
-+ struct lpfc_hba *phba;
-+ struct lpfc_nodelist *ndlp;
-+ unsigned long iflag;
-+ LPFC_DISC_EVT_t *evtp;
-+
-+ ndlp = (struct lpfc_nodelist *)ptr;
-+ phba = ndlp->nlp_phba;
-+ evtp = &ndlp->nodev_timeout_evt;
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ if (!list_empty(&evtp->evt_listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return;
-+ }
-+ evtp->evt_arg1 = ndlp;
-+ evtp->evt = LPFC_EVT_NODEV_TMO;
-+ list_add_tail(&evtp->evt_listp, &phba->dpc_disc);
-+ if (phba->dpc_wait)
-+ up(phba->dpc_wait);
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return;
-+}
-+
-+
-+/*****************************************************************************/
-+/*
-+ * NAME: lpfc_find_target
-+ *
-+ * FUNCTION: Fibre Channel bus/target/LUN to struct lpfc_target lookup
-+ *
-+ * EXECUTION ENVIRONMENT:
-+ *
-+ * RETURNS:
-+ * ptr to desired struct lpfc_target
-+ */
-+/*****************************************************************************/
-+struct lpfc_target *
-+lpfc_find_target(struct lpfc_hba * phba, uint32_t tgt,
-+ struct lpfc_nodelist *nlp)
-+{
-+ struct lpfc_target *targetp = NULL;
-+ int found = 0, i;
-+ struct list_head *listp;
-+ struct list_head *node_list[6];
-+
-+ if (tgt == NLP_NO_SID)
-+ return NULL;
-+
-+ if(!nlp) {
-+ /* Search over all lists other than fc_nlpunmap_list */
-+ node_list[0] = &phba->fc_npr_list;
-+ node_list[1] = &phba->fc_nlpmap_list; /* Skip fc_nlpunmap */
-+ node_list[2] = &phba->fc_prli_list;
-+ node_list[3] = &phba->fc_reglogin_list;
-+ node_list[4] = &phba->fc_adisc_list;
-+ node_list[5] = &phba->fc_plogi_list;
-+
-+ for (i=0; i < 6 && !found; i++) {
-+ listp = node_list[i];
-+ if (list_empty(listp))
-+ continue;
-+ list_for_each_entry(nlp, listp, nlp_listp) {
-+ if (tgt == nlp->nlp_sid) {
-+ found = 1;
-+ break;
-+ }
-+ }
-+ }
-+
-+ if (!found)
-+ return NULL;
-+ }
-+
-+ targetp = phba->device_queue_hash[tgt];
-+
-+ /* First see if the SCSI ID has an allocated struct lpfc_target */
-+ if (!targetp) {
-+ targetp = kmalloc(sizeof (struct lpfc_target), GFP_ATOMIC);
-+ if (!targetp)
-+ return NULL;
-+
-+ memset(targetp, 0, sizeof (struct lpfc_target));
-+#ifdef SLES_FC
-+ init_timer(&targetp->dev_loss_timer);
-+#endif
-+ phba->device_queue_hash[tgt] = targetp;
-+ targetp->scsi_id = tgt;
-+
-+ /* Create SCSI Target <tgt> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY | LOG_FCP,
-+ "%d:0204 Create SCSI Target %d\n",
-+ phba->brd_no, tgt);
-+ }
-+
-+ if (targetp->pnode == NULL) {
-+ targetp->pnode = nlp;
-+ nlp->nlp_Target = targetp;
-+#ifdef RHEL_FC
-+ /*
-+ * This code does not apply to SLES9 since there is no
-+ * starget defined in the midlayer. Additionally,
-+ * dynamic target discovery to the midlayer is not
-+ * supported yet.
-+ */
-+ if(!(phba->fc_flag & FC_LOADING)) {
-+ /* Add SCSI target / SCSI Hotplug if called
-+ * after initial driver load.
-+ */
-+ lpfc_target_add(phba, targetp);
-+ }
-+#endif /* RHEL_FC */
-+ }
-+ else {
-+ if(targetp->pnode != nlp) {
-+ /*
-+ * The scsi-id exists but the nodepointer is different.
-+ * We are reassigning the scsi-id. Attach the nodelist
-+ * pointer to the correct target. This is common
-+ * with a target side cable swap.
-+ */
-+ if (targetp->pnode->nlp_Target != targetp)
-+ targetp->pnode = nlp;
-+ }
-+ }
-+ nlp->nlp_Target = targetp;
-+ return (targetp);
-+}
-+
-+/*
-+ * lpfc_set_failmask
-+ * Set, or clear, failMask bits in struct lpfc_nodelist
-+ */
-+void
-+lpfc_set_failmask(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, uint32_t bitmask, uint32_t flag)
-+{
-+ uint32_t oldmask;
-+ uint32_t changed;
-+
-+ /* Failmask change on NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0208 Failmask change on NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp->nlp_DID, ndlp->nlp_failMask, bitmask, flag);
-+
-+ if (flag == LPFC_SET_BITMASK) {
-+ oldmask = ndlp->nlp_failMask;
-+ /* Set failMask event */
-+ ndlp->nlp_failMask |= bitmask;
-+ if (oldmask != ndlp->nlp_failMask) {
-+ changed = 1;
-+ } else {
-+ changed = 0;
-+ }
-+
-+ } else {
-+ /* Clear failMask event */
-+ ndlp->nlp_failMask &= ~bitmask;
-+ changed = 1;
-+ }
-+ return;
-+}
-+
-+/*
-+ * This routine handles processing a NameServer REG_LOGIN mailbox
-+ * command upon completion. It is setup in the LPFC_MBOXQ
-+ * as the completion routine when the command is
-+ * handed off to the SLI layer.
-+ */
-+void
-+lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_sli *psli;
-+ MAILBOX_t *mb;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_nodelist *ndlp;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+
-+ ndlp = (struct lpfc_nodelist *) pmb->context2;
-+ mp = (struct lpfc_dmabuf *) (pmb->context1);
-+
-+ pmb->context1 = NULL;
-+
-+ if (ndlp->nlp_rpi != 0)
-+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
-+ ndlp->nlp_rpi = mb->un.varWords[0];
-+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
-+ ndlp->nlp_type |= NLP_FABRIC;
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+
-+ /* Start issuing Fabric-Device Management Interface (FDMI)
-+ * command to 0xfffffa (FDMI well known port)
-+ */
-+ if (phba->cfg_fdmi_on == 1) {
-+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
-+ } else {
-+ /*
-+ * Delay issuing FDMI command if fdmi-on=2
-+ * (supporting RPA/hostnmae)
-+ */
-+ mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
-+ }
-+
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+
-+ return;
-+}
-+
-+/*
-+ * This routine looks up the ndlp hash
-+ * table for the given RPI. If rpi found
-+ * it return the node list pointer
-+ * else return 0.
-+ */
-+struct lpfc_nodelist *
-+lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
-+{
-+ struct lpfc_nodelist *ret;
-+
-+ ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
-+ while ((ret != 0) && (ret->nlp_rpi != rpi)) {
-+ ret = ret->nlp_rpi_hash_next;
-+ }
-+ return ret;
-+}
-+
-+/*
-+ * This routine looks up the ndlp hash table for the
-+ * given RPI. If rpi found it return the node list
-+ * pointer else return 0 after deleting the entry
-+ * from hash table.
-+ */
-+struct lpfc_nodelist *
-+lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
-+{
-+ struct lpfc_nodelist *ret, *temp;;
-+
-+ ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
-+ if (ret == 0)
-+ return NULL;
-+
-+ if (ret->nlp_rpi == rpi) {
-+ phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
-+ ret->nlp_rpi_hash_next;
-+ ret->nlp_rpi_hash_next = NULL;
-+ return ret;
-+ }
-+
-+ while ((ret->nlp_rpi_hash_next != 0) &&
-+ (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
-+ ret = ret->nlp_rpi_hash_next;
-+ }
-+
-+ if (ret->nlp_rpi_hash_next != 0) {
-+ temp = ret->nlp_rpi_hash_next;
-+ ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
-+ temp->nlp_rpi_hash_next = NULL;
-+ return temp;
-+ } else {
-+ return NULL;
-+ }
-+}
-+
-+/*
-+ * This routine adds the node list entry to the
-+ * ndlp hash table.
-+ */
-+void
-+lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint16_t rpi)
-+{
-+
-+ uint32_t index;
-+
-+ index = LPFC_RPI_HASH_FUNC(rpi);
-+ ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
-+ phba->fc_nlplookup[index] = ndlp;
-+ return;
-+}
-+
-+void
-+lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint32_t did)
-+{
-+ memset(ndlp, 0, sizeof (struct lpfc_nodelist));
-+ INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
-+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
-+ init_timer(&ndlp->nlp_tmofunc);
-+ ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
-+ ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
-+ init_timer(&ndlp->nlp_delayfunc);
-+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
-+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
-+ ndlp->nlp_DID = did;
-+ ndlp->nlp_phba = phba;
-+ ndlp->nlp_sid = NLP_NO_SID;
-+ return;
-+}
-+
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_nportdisc.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_nportdisc.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,2038 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_nportdisc.c 1.160.1.2 2005/06/13 17:16:39EDT sf_support Exp $
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+
-+extern uint8_t lpfcAlpaArray[];
-+
-+
-+/* Called to verify a rcv'ed ADISC was intended for us. */
-+static int
-+lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ struct lpfc_name * nn, struct lpfc_name * pn)
-+{
-+ /* Compare the ADISC rsp WWNN / WWPN matches our internal node
-+ * table entry for that node.
-+ */
-+ if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
-+ return (0);
-+
-+ if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
-+ return (0);
-+
-+ /* we match, return success */
-+ return (1);
-+}
-+
-+
-+int
-+lpfc_check_sparm(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, struct serv_parm * sp,
-+ uint32_t class)
-+{
-+ volatile struct serv_parm *hsp = &phba->fc_sparam;
-+ /* First check for supported version */
-+
-+ /* Next check for class validity */
-+ if (sp->cls1.classValid) {
-+
-+ if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
-+ sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
-+ if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
-+ sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
-+ } else if (class == CLASS1) {
-+ return (0);
-+ }
-+
-+ if (sp->cls2.classValid) {
-+
-+ if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
-+ sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
-+ if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
-+ sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
-+ } else if (class == CLASS2) {
-+ return (0);
-+ }
-+
-+ if (sp->cls3.classValid) {
-+
-+ if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
-+ sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
-+ if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
-+ sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
-+ } else if (class == CLASS3) {
-+ return (0);
-+ }
-+
-+ if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
-+ sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
-+ if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
-+ sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
-+
-+ /* If check is good, copy wwpn wwnn into ndlp */
-+ memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
-+ memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
-+ return (1);
-+}
-+
-+static void *
-+lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
-+ struct lpfc_iocbq *cmdiocb,
-+ struct lpfc_iocbq *rspiocb)
-+{
-+ struct lpfc_dmabuf *pcmd, *prsp;
-+ uint32_t *lp;
-+ void *ptr;
-+ IOCB_t *irsp;
-+
-+ irsp = &rspiocb->iocb;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+
-+ /* For lpfc_els_abort, context2 could be zero'ed to delay
-+ * freeing associated memory till after ABTS completes.
-+ */
-+ if (pcmd) {
-+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
-+ lp = (uint32_t *) prsp->virt;
-+
-+ ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
-+ }
-+ else {
-+ /* Force ulpStatus error since we are returning NULL ptr */
-+ if (!(irsp->ulpStatus)) {
-+ irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ }
-+ ptr = NULL;
-+ }
-+ return (ptr);
-+}
-+
-+
-+/*
-+ * Free resources / clean up outstanding I/Os
-+ * associated with a LPFC_NODELIST entry. This
-+ * routine effectively results in a "software abort".
-+ */
-+int
-+lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ int send_abts)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_iocbq *iocb, *next_iocb, *saveq;
-+ IOCB_t *icmd;
-+ int found = 0;
-+ LPFC_DISC_EVT_t *evtp, *next_evtp;
-+
-+ /* Abort outstanding I/O on NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0205 Abort outstanding I/O on NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
-+ ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING];
-+
-+ /* Abort all the ELS iocbs in the dpc thread. */
-+ list_for_each_entry_safe(evtp, next_evtp, &phba->dpc_disc,evt_listp) {
-+ if (evtp->evt != LPFC_EVT_SOL_IOCB)
-+ continue;
-+
-+ iocb = (struct lpfc_iocbq *)(evtp->evt_arg1);
-+ saveq = (struct lpfc_iocbq *)(evtp->evt_arg2);
-+
-+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp) == 0)
-+ continue;
-+
-+ list_del_init(&evtp->evt_listp);
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, saveq);
-+ lpfc_evt_iocb_free(phba, saveq);
-+ kfree(evtp);
-+ }
-+
-+ /* First check the txq */
-+ do {
-+ found = 0;
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ /* Check to see if iocb matches the nport we are looking for */
-+ if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
-+ found = 1;
-+ /* It matches, so deque and call compl with an error */
-+ list_del(&iocb->list);
-+ pring->txq_cnt--;
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free(iocb, phba->iocb_mem_pool);
-+ }
-+ break;
-+ }
-+ }
-+
-+ } while (found);
-+
-+ /* Everything on txcmplq will be returned by firmware
-+ * with a no rpi / linkdown / abort error. For ring 0,
-+ * ELS discovery, we want to get rid of it right here.
-+ */
-+ /* Next check the txcmplq */
-+ do {
-+ found = 0;
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-+ /* Check to see if iocb matches the nport we are looking for */
-+ if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
-+ found = 1;
-+ /* It matches, so deque and call compl with an error */
-+ list_del(&iocb->list);
-+ pring->txcmplq_cnt--;
-+
-+ icmd = &iocb->iocb;
-+ /* If the driver is completing an ELS
-+ * command early, flush it out of the firmware.
-+ */
-+ if (send_abts &&
-+ (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
-+ (icmd->un.elsreq64.bdl.ulpIoTag32)) {
-+ lpfc_sli_issue_abort_iotag32(phba, pring, iocb);
-+ }
-+ if (iocb->iocb_cmpl) {
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free(iocb, phba->iocb_mem_pool);
-+ }
-+ break;
-+ }
-+ }
-+ } while (found);
-+
-+
-+ /* If we are delaying issuing an ELS command, cancel it */
-+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
-+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&ndlp->nlp_delayfunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&ndlp->els_retry_evt.
-+ evt_listp))
-+ list_del_init(&ndlp->els_retry_evt.
-+ evt_listp);
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_rcv_plogi(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp,
-+ struct lpfc_iocbq *cmdiocb)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ uint32_t *lp;
-+ IOCB_t *icmd;
-+ struct serv_parm *sp;
-+ LPFC_MBOXQ_t *mbox;
-+ struct ls_rjt stat;
-+
-+ memset(&stat, 0, sizeof (struct ls_rjt));
-+ if (phba->hba_state <= LPFC_FLOGI) {
-+ /* Before responding to PLOGI, check for pt2pt mode.
-+ * If we are pt2pt, with an outstanding FLOGI, abort
-+ * the FLOGI and resend it first.
-+ */
-+ if (phba->fc_flag & FC_PT2PT) {
-+ lpfc_els_abort_flogi(phba);
-+ if(!(phba->fc_flag & FC_PT2PT_PLOGI)) {
-+ /* If the other side is supposed to initiate
-+ * the PLOGI anyway, just ACC it now and
-+ * move on with discovery.
-+ */
-+ phba->fc_edtov = FF_DEF_EDTOV;
-+ phba->fc_ratov = FF_DEF_RATOV;
-+ /* Start discovery - this should just do
-+ CLEAR_LA */
-+ lpfc_disc_start(phba);
-+ }
-+ else {
-+ lpfc_initial_flogi(phba);
-+ }
-+ }
-+ else {
-+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
-+ goto out;
-+ }
-+ }
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
-+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
-+ /* Reject this request because invalid parameters */
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
-+ goto out;
-+ }
-+ icmd = &cmdiocb->iocb;
-+
-+ /* PLOGI chkparm OK */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_ELS,
-+ "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
-+ ndlp->nlp_rpi);
-+
-+ if ((phba->cfg_fcp_class == 2) &&
-+ (sp->cls2.classValid)) {
-+ ndlp->nlp_fcp_info |= CLASS2;
-+ } else {
-+ ndlp->nlp_fcp_info |= CLASS3;
-+ }
-+
-+ /* no need to reg_login if we are already in one of these states */
-+ switch(ndlp->nlp_state) {
-+ case NLP_STE_NPR_NODE:
-+ if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
-+ break;
-+ case NLP_STE_REG_LOGIN_ISSUE:
-+ case NLP_STE_PRLI_ISSUE:
-+ case NLP_STE_UNMAPPED_NODE:
-+ case NLP_STE_MAPPED_NODE:
-+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
-+ return (1);
-+ }
-+
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC)) == 0) {
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
-+ goto out;
-+ }
-+
-+ if ((phba->fc_flag & FC_PT2PT)
-+ && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
-+ /* rcv'ed PLOGI decides what our NPortId will be */
-+ phba->fc_myDID = icmd->un.rcvels.parmRo;
-+ lpfc_config_link(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
-+ goto out;
-+ }
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
-+ goto out;
-+ }
-+ lpfc_can_disctmo(phba);
-+ }
-+
-+ if(lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
-+ (uint8_t *) sp, mbox, 0)) {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
-+out:
-+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
-+ return (0);
-+ }
-+
-+ /* ACC PLOGI rsp command needs to execute first,
-+ * queue this mbox command to be processed later.
-+ */
-+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
-+ mbox->context2 = ndlp;
-+ ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
-+
-+ /* If there is an outstanding PLOGI issued, abort it before
-+ * sending ACC rsp to PLOGI recieved.
-+ */
-+ if(ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
-+ /* software abort outstanding PLOGI */
-+ lpfc_els_abort(phba, ndlp, 1);
-+ }
-+ ndlp->nlp_flag |= NLP_RCV_PLOGI;
-+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
-+ return (1);
-+}
-+
-+static int
-+lpfc_rcv_padisc(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp,
-+ struct lpfc_iocbq *cmdiocb)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ struct serv_parm *sp;
-+ struct lpfc_name *pnn, *ppn;
-+ struct ls_rjt stat;
-+ ADISC *ap;
-+ IOCB_t *icmd;
-+ uint32_t *lp;
-+ uint32_t cmd;
-+
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+
-+ cmd = *lp++;
-+ if (cmd == ELS_CMD_ADISC) {
-+ ap = (ADISC *) lp;
-+ pnn = (struct lpfc_name *) & ap->nodeName;
-+ ppn = (struct lpfc_name *) & ap->portName;
-+ } else {
-+ sp = (struct serv_parm *) lp;
-+ pnn = (struct lpfc_name *) & sp->nodeName;
-+ ppn = (struct lpfc_name *) & sp->portName;
-+ }
-+
-+ icmd = &cmdiocb->iocb;
-+ if ((icmd->ulpStatus == 0) &&
-+ (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
-+ if (cmd == ELS_CMD_ADISC) {
-+ lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
-+ }
-+ else {
-+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
-+ NULL, 0);
-+ }
-+ return (1);
-+ }
-+ /* Reject this request because invalid parameters */
-+ stat.un.b.lsRjtRsvd0 = 0;
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
-+ stat.un.b.vendorUnique = 0;
-+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
-+
-+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
-+ /* 1 sec timeout */
-+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
-+
-+ ndlp->nlp_flag |= NLP_DELAY_TMO;
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ return (0);
-+}
-+
-+static int
-+lpfc_rcv_logo(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp,
-+ struct lpfc_iocbq *cmdiocb)
-+{
-+ /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
-+ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
-+ * PLOGIs during LOGO storms from a device.
-+ */
-+ ndlp->nlp_flag |= NLP_LOGO_ACC;
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+
-+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
-+ /* Only try to re-login if this is NOT a Fabric Node */
-+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
-+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
-+ ndlp->nlp_flag |= NLP_DELAY_TMO;
-+ }
-+
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+
-+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-+ /* The driver has to wait until the ACC completes before it continues
-+ * processing the LOGO. The action will resume in
-+ * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
-+ * unreg_login, the driver waits so the ACC does not get aborted.
-+ */
-+ return (0);
-+}
-+
-+static int
-+lpfc_binding_found(struct lpfc_bindlist * blp, struct lpfc_nodelist * ndlp)
-+{
-+ uint16_t bindtype = blp->nlp_bind_type;
-+
-+ if ((bindtype & FCP_SEED_DID) &&
-+ (ndlp->nlp_DID == be32_to_cpu(blp->nlp_DID))) {
-+ return (1);
-+ } else if ((bindtype & FCP_SEED_WWPN) &&
-+ (memcmp(&ndlp->nlp_portname, &blp->nlp_portname,
-+ sizeof (struct lpfc_name)) == 0)) {
-+ return (1);
-+ } else if ((bindtype & FCP_SEED_WWNN) &&
-+ (memcmp(&ndlp->nlp_nodename, &blp->nlp_nodename,
-+ sizeof (struct lpfc_name)) == 0)) {
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_binding_useid(struct lpfc_hba * phba, uint32_t sid)
-+{
-+ struct lpfc_bindlist *blp;
-+
-+ list_for_each_entry(blp, &phba->fc_nlpbind_list, nlp_listp) {
-+ if (blp->nlp_sid == sid) {
-+ return (1);
-+ }
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_mapping_useid(struct lpfc_hba * phba, uint32_t sid)
-+{
-+ struct lpfc_nodelist *mapnode;
-+ struct lpfc_bindlist *blp;
-+
-+ list_for_each_entry(mapnode, &phba->fc_nlpmap_list, nlp_listp) {
-+ blp = mapnode->nlp_listp_bind;
-+ if (blp->nlp_sid == sid) {
-+ return (1);
-+ }
-+ }
-+ return (0);
-+}
-+
-+static struct lpfc_bindlist *
-+lpfc_create_binding(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, uint16_t index,
-+ uint16_t bindtype)
-+{
-+ struct lpfc_bindlist *blp;
-+
-+ if ((blp = mempool_alloc(phba->bind_mem_pool, GFP_ATOMIC))) {
-+ memset(blp, 0, sizeof (struct lpfc_bindlist));
-+ switch (bindtype) {
-+ case FCP_SEED_WWPN:
-+ blp->nlp_bind_type = FCP_SEED_WWPN;
-+ break;
-+ case FCP_SEED_WWNN:
-+ blp->nlp_bind_type = FCP_SEED_WWNN;
-+ break;
-+ case FCP_SEED_DID:
-+ blp->nlp_bind_type = FCP_SEED_DID;
-+ break;
-+ }
-+ blp->nlp_sid = index;
-+ blp->nlp_DID = ndlp->nlp_DID;
-+ memcpy(&blp->nlp_nodename, &ndlp->nlp_nodename,
-+ sizeof (struct lpfc_name));
-+ memcpy(&blp->nlp_portname, &ndlp->nlp_portname,
-+ sizeof (struct lpfc_name));
-+
-+ return (blp);
-+ }
-+ return NULL;
-+}
-+
-+
-+static struct lpfc_bindlist *
-+lpfc_consistent_bind_get(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_bindlist *blp, *next_blp;
-+
-+ /* check binding list */
-+ list_for_each_entry_safe(blp, next_blp, &phba->fc_nlpbind_list,
-+ nlp_listp) {
-+ if (lpfc_binding_found(blp, ndlp)) {
-+
-+ /* take it off the binding list */
-+ phba->fc_bind_cnt--;
-+ list_del_init(&blp->nlp_listp);
-+
-+ /* Reassign scsi id <sid> to NPort <nlp_DID> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY | LOG_FCP,
-+ "%d:0213 Reassign scsi id x%x to "
-+ "NPort x%x Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ blp->nlp_sid, ndlp->nlp_DID,
-+ blp->nlp_bind_type, ndlp->nlp_flag,
-+ ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ return (blp);
-+ }
-+ }
-+ return NULL;
-+}
-+
-+
-+static struct lpfc_bindlist *
-+lpfc_consistent_bind_create(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
-+{
-+
-+ struct lpfc_bindlist *blp;
-+ uint16_t index;
-+
-+
-+ /* NOTE: if scan-down = 2 and we have private loop, then we use
-+ * AlpaArray to determine sid.
-+ */
-+ if ((phba->cfg_fcp_bind_method == 4) &&
-+ ((phba->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) ||
-+ (phba->fc_topology != TOPOLOGY_LOOP))) {
-+ /* Log message: ALPA based binding used on a non loop
-+ topology */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_DISCOVERY,
-+ "%d:0245 ALPA based bind method used on an HBA "
-+ "which is in a nonloop topology Data: x%x\n",
-+ phba->brd_no,
-+ phba->fc_topology);
-+ }
-+
-+ if ((phba->cfg_fcp_bind_method == 4) &&
-+ !(phba->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) &&
-+ (phba->fc_topology == TOPOLOGY_LOOP)) {
-+ for (index = 0; index < FC_MAXLOOP; index++) {
-+ if (ndlp->nlp_DID == (uint32_t) lpfcAlpaArray[index]) {
-+ if ((blp =
-+ lpfc_create_binding(phba, ndlp, index,
-+ FCP_SEED_DID))) {
-+ return (blp);
-+ }
-+ goto errid;
-+ }
-+ }
-+ }
-+
-+ while (1) {
-+ if ((lpfc_binding_useid(phba, phba->sid_cnt))
-+ || (lpfc_mapping_useid (phba, phba->sid_cnt))) {
-+
-+ phba->sid_cnt++;
-+ } else {
-+ if ((blp =
-+ lpfc_create_binding(phba, ndlp,
-+ phba->sid_cnt,
-+ phba->fcp_mapping))) {
-+ blp->nlp_bind_type |= FCP_SEED_AUTO;
-+
-+ phba->sid_cnt++;
-+ return (blp);
-+ }
-+ goto errid;
-+ }
-+ }
-+errid:
-+ /* Cannot assign scsi id on NPort <nlp_DID> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY | LOG_FCP,
-+ "%d:0230 Cannot assign scsi ID on NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
-+ ndlp->nlp_rpi);
-+
-+ return NULL;
-+}
-+
-+static uint32_t
-+lpfc_assign_binding(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, struct lpfc_bindlist *blp)
-+{
-+ struct lpfc_target *targetp;
-+
-+ targetp = lpfc_find_target(phba, blp->nlp_sid, ndlp);
-+ if(!targetp) {
-+ /* Cannot assign scsi id <sid> to NPort <nlp_DID> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY | LOG_FCP,
-+ "%d:0229 Cannot assign scsi id x%x to NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, blp->nlp_sid,
-+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
-+ ndlp->nlp_rpi);
-+ return(0);
-+ }
-+ ndlp->nlp_sid = blp->nlp_sid;
-+ ndlp->nlp_flag &= ~NLP_SEED_MASK;
-+ switch ((blp->nlp_bind_type & FCP_SEED_MASK)) {
-+ case FCP_SEED_WWPN:
-+ ndlp->nlp_flag |= NLP_SEED_WWPN;
-+ break;
-+ case FCP_SEED_WWNN:
-+ ndlp->nlp_flag |= NLP_SEED_WWNN;
-+ break;
-+ case FCP_SEED_DID:
-+ ndlp->nlp_flag |= NLP_SEED_DID;
-+ break;
-+ }
-+ if (blp->nlp_bind_type & FCP_SEED_AUTO) {
-+ ndlp->nlp_flag |= NLP_AUTOMAP;
-+ }
-+ /* Assign scsi id <sid> to NPort <nlp_DID> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY | LOG_FCP,
-+ "%d:0216 Assign scsi "
-+ "id x%x to NPort x%x "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp->nlp_sid, ndlp->nlp_DID,
-+ blp->nlp_bind_type,
-+ ndlp->nlp_flag, ndlp->nlp_state,
-+ ndlp->nlp_rpi);
-+ return(1);
-+}
-+
-+static uint32_t
-+lpfc_disc_set_adisc(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp)
-+{
-+ /* Check config parameter use-adisc or FCP-2 */
-+ if ((phba->cfg_use_adisc == 0) &&
-+ !(phba->fc_flag & FC_RSCN_MODE)) {
-+ if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
-+ return (0);
-+ }
-+ ndlp->nlp_flag |= NLP_NPR_ADISC;
-+ return (1);
-+}
-+
-+static uint32_t
-+lpfc_disc_noop(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ /* This routine does nothing, just return the current state */
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_disc_illegal(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_DISCOVERY,
-+ "%d:0253 Illegal State Transition: node x%x event x%x, "
-+ "state x%x Data: x%x x%x\n",
-+ phba->brd_no,
-+ ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
-+ ndlp->nlp_flag);
-+ return (ndlp->nlp_state);
-+}
-+
-+/* Start of Discovery State Machine routines */
-+
-+static uint32_t
-+lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
-+ ndlp->nlp_state = NLP_STE_UNUSED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
-+ return (ndlp->nlp_state);
-+ }
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ lpfc_issue_els_logo(phba, ndlp, 0);
-+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ ndlp->nlp_flag |= NLP_LOGO_ACC;
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_device_rm_unused_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp,
-+ struct lpfc_iocbq *cmdiocb, uint32_t evt)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ struct serv_parm *sp;
-+ uint32_t *lp;
-+ struct ls_rjt stat;
-+ int port_cmp;
-+
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
-+
-+ memset(&stat, 0, sizeof (struct ls_rjt));
-+
-+ /* For a PLOGI, we only accept if our portname is less
-+ * than the remote portname.
-+ */
-+ phba->fc_stat.elsLogiCol++;
-+ port_cmp = memcmp(&phba->fc_portname, &sp->portName,
-+ sizeof (struct lpfc_name));
-+
-+ if (port_cmp >= 0) {
-+ /* Reject this request because the remote node will accept
-+ ours */
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
-+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
-+ }
-+ else {
-+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
-+ } /* if our portname was less */
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ /* software abort outstanding PLOGI */
-+ lpfc_els_abort(phba, ndlp, 1);
-+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
-+ ndlp->nlp_flag |= NLP_DELAY_TMO;
-+
-+ if(evt == NLP_EVT_RCV_LOGO) {
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+ }
-+ else {
-+ lpfc_issue_els_logo(phba, ndlp, 0);
-+ }
-+
-+ /* Put ndlp in npr list set plogi timer for 1 sec */
-+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb, *rspiocb;
-+ struct lpfc_dmabuf *pcmd, *prsp;
-+ uint32_t *lp;
-+ IOCB_t *irsp;
-+ struct serv_parm *sp;
-+ LPFC_MBOXQ_t *mbox;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+ rspiocb = cmdiocb->context_un.rsp_iocb;
-+
-+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
-+ return (ndlp->nlp_state);
-+ }
-+
-+ irsp = &rspiocb->iocb;
-+
-+ if (irsp->ulpStatus == 0) {
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+
-+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
-+ lp = (uint32_t *) prsp->virt;
-+
-+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
-+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
-+ /* PLOGI chkparm OK */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_ELS,
-+ "%d:0121 PLOGI chkparm OK "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ ndlp->nlp_DID, ndlp->nlp_state,
-+ ndlp->nlp_flag, ndlp->nlp_rpi);
-+
-+ if ((phba->cfg_fcp_class == 2) &&
-+ (sp->cls2.classValid)) {
-+ ndlp->nlp_fcp_info |= CLASS2;
-+ } else {
-+ ndlp->nlp_fcp_info |= CLASS3;
-+ }
-+
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC))) {
-+ lpfc_unreg_rpi(phba, ndlp);
-+ if (lpfc_reg_login
-+ (phba, irsp->un.elsreq64.remoteID,
-+ (uint8_t *) sp, mbox, 0) == 0) {
-+ /* set_slim mailbox command needs to
-+ * execute first, queue this command to
-+ * be processed later.
-+ */
-+ switch(ndlp->nlp_DID) {
-+ case NameServer_DID:
-+ mbox->mbox_cmpl =
-+ lpfc_mbx_cmpl_ns_reg_login;
-+ break;
-+ case FDMI_DID:
-+ mbox->mbox_cmpl =
-+ lpfc_mbx_cmpl_fdmi_reg_login;
-+ break;
-+ default:
-+ mbox->mbox_cmpl =
-+ lpfc_mbx_cmpl_reg_login;
-+ }
-+ mbox->context2 = ndlp;
-+ if (lpfc_sli_issue_mbox(phba, mbox,
-+ (MBX_NOWAIT | MBX_STOP_IOCB))
-+ != MBX_NOT_FINISHED) {
-+ ndlp->nlp_state =
-+ NLP_STE_REG_LOGIN_ISSUE;
-+ lpfc_nlp_list(phba, ndlp,
-+ NLP_REGLOGIN_LIST);
-+ return (ndlp->nlp_state);
-+ }
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ } else {
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ }
-+ }
-+ }
-+ }
-+
-+ /* Free this node since the driver cannot login or has the wrong
-+ sparm */
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ /* software abort outstanding PLOGI */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ /* software abort outstanding PLOGI */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ /* software abort outstanding ADISC */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
-+ return (ndlp->nlp_state);
-+ }
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ /* software abort outstanding ADISC */
-+ lpfc_els_abort(phba, ndlp, 0);
-+
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ /* Treat like rcv logo */
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb, *rspiocb;
-+ struct lpfc_bindlist *blp;
-+ IOCB_t *irsp;
-+ ADISC *ap;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+ rspiocb = cmdiocb->context_un.rsp_iocb;
-+
-+ ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
-+ irsp = &rspiocb->iocb;
-+
-+ if ((irsp->ulpStatus) ||
-+ (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
-+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
-+ /* 1 sec timeout */
-+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
-+ ndlp->nlp_flag |= NLP_DELAY_TMO;
-+
-+ memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
-+ memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
-+
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ lpfc_unreg_rpi(phba, ndlp);
-+ return (ndlp->nlp_state);
-+ }
-+ /* move to mapped / unmapped list accordingly */
-+ /* Can we assign a SCSI Id to this NPort */
-+ if ((blp = lpfc_consistent_bind_get(phba, ndlp))) {
-+ /* Next 4 lines MUST be in this order */
-+ if(lpfc_assign_binding(phba, ndlp, blp)) {
-+ ndlp->nlp_state = NLP_STE_MAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
-+ ndlp->nlp_listp_bind = blp;
-+
-+ lpfc_set_failmask(phba, ndlp,
-+ (LPFC_DEV_DISCOVERY_INP|LPFC_DEV_DISCONNECTED),
-+ LPFC_CLR_BITMASK);
-+
-+ return (ndlp->nlp_state);
-+ }
-+ }
-+ ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+
-+ lpfc_set_failmask(phba, ndlp,
-+ (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
-+ LPFC_CLR_BITMASK);
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ /* software abort outstanding ADISC */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ /* software abort outstanding ADISC */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+
-+ lpfc_disc_set_adisc(phba, ndlp);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp,
-+ void *arg, uint32_t evt)
-+{
-+ LPFC_MBOXQ_t *pmb;
-+ MAILBOX_t *mb;
-+ uint32_t did;
-+
-+ pmb = (LPFC_MBOXQ_t *) arg;
-+ mb = &pmb->mb;
-+ did = mb->un.varWords[1];
-+ if (mb->mbxStatus) {
-+ /* RegLogin failed */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_DISCOVERY,
-+ "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
-+ phba->brd_no,
-+ did, mb->mbxStatus, phba->hba_state);
-+
-+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
-+ ndlp->nlp_flag |= NLP_DELAY_TMO;
-+
-+ lpfc_issue_els_logo(phba, ndlp, 0);
-+ /* Put ndlp in npr list set plogi timer for 1 sec */
-+ ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ return (ndlp->nlp_state);
-+ }
-+
-+ if (ndlp->nlp_rpi != 0)
-+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
-+
-+ ndlp->nlp_rpi = mb->un.varWords[0];
-+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
-+
-+ /* Only if we are not a fabric nport do we issue PRLI */
-+ if (!(ndlp->nlp_type & NLP_FABRIC)) {
-+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
-+ lpfc_issue_els_prli(phba, ndlp, 0);
-+ } else {
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+ }
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ /* Software abort outstanding PRLI before sending acc */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+/* This routine is envoked when we rcv a PRLO request from a nport
-+ * we are logged into. We should send back a PRLO rsp setting the
-+ * appropriate bits.
-+ * NEXT STATE = PRLI_ISSUE
-+ */
-+static uint32_t
-+lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb, *rspiocb;
-+ IOCB_t *irsp;
-+ PRLI *npr;
-+ struct lpfc_bindlist *blp;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+ rspiocb = cmdiocb->context_un.rsp_iocb;
-+ npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
-+
-+ irsp = &rspiocb->iocb;
-+ if (irsp->ulpStatus) {
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+ lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCOVERY_INP,
-+ LPFC_CLR_BITMASK);
-+ return (ndlp->nlp_state);
-+ }
-+
-+ /* Check out PRLI rsp */
-+ if ((npr->acceptRspCode != PRLI_REQ_EXECUTED) ||
-+ (npr->prliType != PRLI_FCP_TYPE) || (npr->targetFunc != 1)) {
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+ lpfc_set_failmask(phba, ndlp,
-+ (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
-+ LPFC_CLR_BITMASK);
-+ return (ndlp->nlp_state);
-+ }
-+ if (npr->Retry == 1) {
-+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
-+ }
-+
-+ /* Can we assign a SCSI Id to this NPort */
-+ blp = lpfc_consistent_bind_get(phba, ndlp);
-+ if (!blp)
-+ blp = lpfc_consistent_bind_create(phba, ndlp);
-+ if (blp) {
-+ /* Next 4 lines MUST be in this order */
-+ if(lpfc_assign_binding(phba, ndlp, blp)) {
-+ ndlp->nlp_state = NLP_STE_MAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
-+ ndlp->nlp_listp_bind = blp;
-+
-+ lpfc_set_failmask(phba, ndlp,
-+ (LPFC_DEV_DISCOVERY_INP|LPFC_DEV_DISCONNECTED),
-+ LPFC_CLR_BITMASK);
-+ return (ndlp->nlp_state);
-+ }
-+ }
-+ ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
-+ ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
-+
-+ lpfc_set_failmask(phba, ndlp,
-+ (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
-+ LPFC_CLR_BITMASK);
-+ return (ndlp->nlp_state);
-+}
-+
-+/*! lpfc_device_rm_prli_issue
-+ *
-+ * \pre
-+ * \post
-+ * \param phba
-+ * \param ndlp
-+ * \param arg
-+ * \param evt
-+ * \return uint32_t
-+ *
-+ * \b Description:
-+ * This routine is envoked when we a request to remove a nport we are in the
-+ * process of PRLIing. We should software abort outstanding prli, unreg
-+ * login, send a logout. We will change node state to UNUSED_NODE, put it
-+ * on plogi list so it can be freed when LOGO completes.
-+ *
-+ */
-+static uint32_t
-+lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ /* software abort outstanding PRLI */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+
-+/*! lpfc_device_recov_prli_issue
-+ *
-+ * \pre
-+ * \post
-+ * \param phba
-+ * \param ndlp
-+ * \param arg
-+ * \param evt
-+ * \return uint32_t
-+ *
-+ * \b Description:
-+ * The routine is envoked when the state of a device is unknown, like
-+ * during a link down. We should remove the nodelist entry from the
-+ * unmapped list, issue a UNREG_LOGIN, do a software abort of the
-+ * outstanding PRLI command, then free the node entry.
-+ */
-+static uint32_t
-+lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ /* software abort outstanding PRLI */
-+ lpfc_els_abort(phba, ndlp, 1);
-+
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ /* Treat like rcv logo */
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+ lpfc_disc_set_adisc(phba, ndlp);
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_plogi(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ /* flush the target */
-+ lpfc_sli_abort_iocb_tgt(phba,
-+ &phba->sli.ring[phba->sli.fcp_ring],
-+ ndlp->nlp_sid, LPFC_ABORT_ALLQ);
-+
-+ /* Treat like rcv logo */
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+ lpfc_disc_set_adisc(phba, ndlp);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ /* Ignore PLOGI if we have an outstanding LOGO */
-+ if (ndlp->nlp_flag & NLP_LOGO_SND) {
-+ return (ndlp->nlp_state);
-+ }
-+
-+ if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
-+ ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
-+ return (ndlp->nlp_state);
-+ }
-+
-+ /* send PLOGI immediately, move to PLOGI issue state */
-+ if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ }
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
-+
-+ if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
-+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
-+ lpfc_issue_els_adisc(phba, ndlp, 0);
-+ } else {
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ }
-+ }
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_logo(phba, ndlp, cmdiocb);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_rcv_padisc(phba, ndlp, cmdiocb);
-+
-+ if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-+ if (ndlp->nlp_flag & NLP_NPR_ADISC) {
-+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
-+ lpfc_issue_els_adisc(phba, ndlp, 0);
-+ } else {
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ }
-+ }
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ struct lpfc_iocbq *cmdiocb;
-+
-+ cmdiocb = (struct lpfc_iocbq *) arg;
-+
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+
-+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
-+ if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
-+ return (ndlp->nlp_state);
-+ } else {
-+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-+ spin_unlock_irq(phba->host->host_lock);
-+ del_timer_sync(&ndlp->nlp_delayfunc);
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!list_empty(&ndlp->els_retry_evt.
-+ evt_listp))
-+ list_del_init(&ndlp->els_retry_evt.
-+ evt_listp);
-+ }
-+ }
-+
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ lpfc_unreg_rpi(phba, ndlp);
-+ /* This routine does nothing, just return the current state */
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ LPFC_MBOXQ_t *pmb;
-+ MAILBOX_t *mb;
-+
-+ pmb = (LPFC_MBOXQ_t *) arg;
-+ mb = &pmb->mb;
-+
-+ /* save rpi */
-+ if (ndlp->nlp_rpi != 0)
-+ lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
-+
-+ ndlp->nlp_rpi = mb->un.varWords[0];
-+ lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
-+
-+ return (ndlp->nlp_state);
-+}
-+
-+static uint32_t
-+lpfc_device_rm_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ return (NLP_STE_FREED_NODE);
-+}
-+
-+static uint32_t
-+lpfc_device_recov_npr_node(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg,
-+ uint32_t evt)
-+{
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+ return (ndlp->nlp_state);
-+}
-+
-+
-+/* This next section defines the NPort Discovery State Machine */
-+
-+/* There are 4 different double linked lists nodelist entries can reside on.
-+ * The plogi list and adisc list are used when Link Up discovery or RSCN
-+ * processing is needed. Each list holds the nodes that we will send PLOGI
-+ * or ADISC on. These lists will keep track of what nodes will be effected
-+ * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
-+ * The unmapped_list will contain all nodes that we have successfully logged
-+ * into at the Fibre Channel level. The mapped_list will contain all nodes
-+ * that are mapped FCP targets.
-+ */
-+/*
-+ * The bind list is a list of undiscovered (potentially non-existent) nodes
-+ * that we have saved binding information on. This information is used when
-+ * nodes transition from the unmapped to the mapped list.
-+ */
-+/* For UNUSED_NODE state, the node has just been allocated .
-+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
-+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
-+ * and put on the unmapped list. For ADISC processing, the node is taken off
-+ * the ADISC list and placed on either the mapped or unmapped list (depending
-+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
-+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
-+ * changed to UNMAPPED_NODE. If the completion indicates a mapped
-+ * node, the node is taken off the unmapped list. The binding list is checked
-+ * for a valid binding, or a binding is automatically assigned. If binding
-+ * assignment is unsuccessful, the node is left on the unmapped list. If
-+ * binding assignment is successful, the associated binding list entry (if
-+ * any) is removed, and the node is placed on the mapped list.
-+ */
-+/*
-+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
-+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
-+ * expire, all effected nodes will receive a DEVICE_RM event.
-+ */
-+/*
-+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
-+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
-+ * check, additional nodes may be added or removed (via DEVICE_RM) to / from
-+ * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
-+ * we will first process the ADISC list. 32 entries are processed initially and
-+ * ADISC is initited for each one. Completions / Events for each node are
-+ * funnelled thru the state machine. As each node finishes ADISC processing, it
-+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
-+ * waiting, and the ADISC list count is identically 0, then we are done. For
-+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
-+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
-+ * list. 32 entries are processed initially and PLOGI is initited for each one.
-+ * Completions / Events for each node are funnelled thru the state machine. As
-+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
-+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
-+ * indentically 0, then we are done. We have now completed discovery / RSCN
-+ * handling. Upon completion, ALL nodes should be on either the mapped or
-+ * unmapped lists.
-+ */
-+
-+static void *lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT] = {
-+ /* Action routine Event Current State */
-+ (void *)lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
-+ (void *)lpfc_rcv_els_unused_node, /* RCV_PRLI */
-+ (void *)lpfc_rcv_logo_unused_node, /* RCV_LOGO */
-+ (void *)lpfc_rcv_els_unused_node, /* RCV_ADISC */
-+ (void *)lpfc_rcv_els_unused_node, /* RCV_PDISC */
-+ (void *)lpfc_rcv_els_unused_node, /* RCV_PRLO */
-+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
-+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
-+ (void *)lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
-+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
-+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
-+ (void *)lpfc_device_rm_unused_node, /* DEVICE_RM */
-+ (void *)lpfc_disc_illegal, /* DEVICE_RECOVERY */
-+
-+ (void *)lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
-+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
-+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_LOGO */
-+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
-+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
-+ (void *)lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
-+ (void *)lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
-+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
-+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
-+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
-+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
-+ (void *)lpfc_device_rm_plogi_issue, /* DEVICE_RM */
-+ (void *)lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
-+
-+ (void *)lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
-+ (void *)lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
-+ (void *)lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
-+ (void *)lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
-+ (void *)lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
-+ (void *)lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
-+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
-+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
-+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
-+ (void *)lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
-+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
-+ (void *)lpfc_device_rm_adisc_issue, /* DEVICE_RM */
-+ (void *)lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
-+
-+ (void *)lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
-+ (void *)lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
-+ (void *)lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
-+ (void *)lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
-+ (void *)lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
-+ (void *)lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
-+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
-+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
-+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
-+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
-+ (void *)lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
-+ (void *)lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
-+ (void *)lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
-+
-+ (void *)lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
-+ (void *)lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
-+ (void *)lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
-+ (void *)lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
-+ (void *)lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
-+ (void *)lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
-+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
-+ (void *)lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
-+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
-+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
-+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
-+ (void *)lpfc_device_rm_prli_issue, /* DEVICE_RM */
-+ (void *)lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
-+
-+ (void *)lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
-+ (void *)lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
-+ (void *)lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
-+ (void *)lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
-+ (void *)lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
-+ (void *)lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
-+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
-+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
-+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
-+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
-+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
-+ (void *)lpfc_disc_illegal, /* DEVICE_RM */
-+ (void *)lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
-+
-+ (void *)lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
-+ (void *)lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
-+ (void *)lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
-+ (void *)lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
-+ (void *)lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
-+ (void *)lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
-+ (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
-+ (void *)lpfc_disc_illegal, /* CMPL_PRLI */
-+ (void *)lpfc_disc_illegal, /* CMPL_LOGO */
-+ (void *)lpfc_disc_illegal, /* CMPL_ADISC */
-+ (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
-+ (void *)lpfc_disc_illegal, /* DEVICE_RM */
-+ (void *)lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
-+
-+ (void *)lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
-+ (void *)lpfc_rcv_prli_npr_node, /* RCV_PRLI */
-+ (void *)lpfc_rcv_logo_npr_node, /* RCV_LOGO */
-+ (void *)lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
-+ (void *)lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
-+ (void *)lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
-+ (void *)lpfc_disc_noop, /* CMPL_PLOGI */
-+ (void *)lpfc_disc_noop, /* CMPL_PRLI */
-+ (void *)lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
-+ (void *)lpfc_disc_noop, /* CMPL_ADISC */
-+ (void *)lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
-+ (void *)lpfc_device_rm_npr_node, /* DEVICE_RM */
-+ (void *)lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
-+};
-+
-+int
-+lpfc_disc_state_machine(struct lpfc_hba * phba,
-+ struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
-+{
-+ uint32_t cur_state, rc;
-+ uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
-+ uint32_t);
-+
-+ ndlp->nlp_disc_refcnt++;
-+ cur_state = ndlp->nlp_state;
-+
-+ /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0211 DSM in event x%x on NPort x%x in state %d "
-+ "Data: x%x\n",
-+ phba->brd_no,
-+ evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
-+
-+ func = (uint32_t(*)(struct lpfc_hba *, struct lpfc_nodelist *, void *,
-+ uint32_t))
-+ lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
-+ rc = (func) (phba, ndlp, arg, evt);
-+
-+ /* DSM out state <rc> on NPort <nlp_DID> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
-+ phba->brd_no,
-+ rc, ndlp->nlp_DID, ndlp->nlp_flag);
-+
-+ ndlp->nlp_disc_refcnt--;
-+
-+ /* Check to see if ndlp removal is deferred */
-+ if ((ndlp->nlp_disc_refcnt == 0)
-+ && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
-+
-+ ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
-+ lpfc_nlp_remove(phba, ndlp);
-+ return (NLP_STE_FREED_NODE);
-+ }
-+ if (rc == NLP_STE_FREED_NODE)
-+ return (NLP_STE_FREED_NODE);
-+ ndlp->nlp_state = rc;
-+ return (rc);
-+}
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/Makefile 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/Makefile 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,7 @@
-+# Driver for Emulex LightPulse fibre channel host bus adapters.
-+EXTRA_CFLAGS += -DRHEL_FC
-+obj-$(CONFIG_SCSI_LPFC) := lpfc.o
-+
-+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
-+lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsiport.o \
-+lpfc_fcp.o
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,464 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc.h 1.143.2.2 2005/06/13 17:16:00EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC
-+#define _H_LPFC
-+
-+struct lpfc_sli2_slim;
-+
-+#define LPFC_MAX_TARGET 256 /* max nunber of targets
-+ supported */
-+#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
-+ requests */
-+#define LPFC_MAX_NS_RETRY 3 /* Try to get to the NameServer
-+ 3 times and then give up. */
-+#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
-+#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
-+#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
-+
-+/* Define the SLIM2 page size. */
-+#define LPFC_SLIM2_PAGE_AREA 8192
-+
-+/* Define macros for 64 bit support */
-+#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
-+#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
-+#define getPaddr(high, low) ((dma_addr_t)( \
-+ (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
-+/* Provide maximum configuration definitions. */
-+#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
-+#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
-+#define FC_MAX_ADPTMSG 64
-+
-+#define MAX_HBAEVT 32
-+
-+#if __LITTLE_ENDIAN
-+
-+#define putLunLow(lunlow, lun) \
-+ { \
-+ lunlow = 0; \
-+ }
-+
-+#define putLunHigh(lunhigh, lun) \
-+ { \
-+ lunhigh = swab16(lun); \
-+ }
-+
-+#else /* BIG_ENDIAN_HOST */
-+
-+#define putLunLow(lunlow, lun) \
-+ { \
-+ lunlow = 0; \
-+ }
-+
-+#define putLunHigh(lunhigh, lun) \
-+ { \
-+ lunhigh = (uint32_t)(lun << 16); \
-+ }
-+#endif
-+
-+/****************************************************************************/
-+/* Device VPD save area */
-+/****************************************************************************/
-+typedef struct lpfc_vpd {
-+ uint32_t status; /* vpd status value */
-+ uint32_t length; /* number of bytes actually returned */
-+ struct {
-+ uint32_t rsvd1; /* Revision numbers */
-+ uint32_t biuRev;
-+ uint32_t smRev;
-+ uint32_t smFwRev;
-+ uint32_t endecRev;
-+ uint16_t rBit;
-+ uint8_t fcphHigh;
-+ uint8_t fcphLow;
-+ uint8_t feaLevelHigh;
-+ uint8_t feaLevelLow;
-+ uint32_t postKernRev;
-+ uint32_t opFwRev;
-+ uint8_t opFwName[16];
-+ uint32_t sli1FwRev;
-+ uint8_t sli1FwName[16];
-+ uint32_t sli2FwRev;
-+ uint8_t sli2FwName[16];
-+ } rev;
-+} lpfc_vpd_t;
-+
-+struct lpfc_scsi_buf;
-+
-+struct lpfc_hba_event {
-+ uint32_t fc_eventcode;
-+ uint32_t fc_evdata1;
-+ uint32_t fc_evdata2;
-+ uint32_t fc_evdata3;
-+ uint32_t fc_evdata4;
-+};
-+
-+/*
-+ * lpfc stat counters
-+ */
-+struct lpfc_stats {
-+ /* Statistics for ELS commands */
-+ uint32_t elsLogiCol;
-+ uint32_t elsRetryExceeded;
-+ uint32_t elsXmitRetry;
-+ uint32_t elsDelayRetry;
-+ uint32_t elsRcvDrop;
-+ uint32_t elsRcvFrame;
-+ uint32_t elsRcvRSCN;
-+ uint32_t elsRcvRNID;
-+ uint32_t elsRcvFARP;
-+ uint32_t elsRcvFARPR;
-+ uint32_t elsRcvFLOGI;
-+ uint32_t elsRcvPLOGI;
-+ uint32_t elsRcvADISC;
-+ uint32_t elsRcvPDISC;
-+ uint32_t elsRcvFAN;
-+ uint32_t elsRcvLOGO;
-+ uint32_t elsRcvPRLO;
-+ uint32_t elsRcvPRLI;
-+ uint32_t elsRcvRRQ;
-+ uint32_t elsXmitFLOGI;
-+ uint32_t elsXmitPLOGI;
-+ uint32_t elsXmitPRLI;
-+ uint32_t elsXmitADISC;
-+ uint32_t elsXmitLOGO;
-+ uint32_t elsXmitSCR;
-+ uint32_t elsXmitRNID;
-+ uint32_t elsXmitFARP;
-+ uint32_t elsXmitFARPR;
-+ uint32_t elsXmitACC;
-+ uint32_t elsXmitLSRJT;
-+
-+ uint32_t frameRcvBcast;
-+ uint32_t frameRcvMulti;
-+ uint32_t strayXmitCmpl;
-+ uint32_t frameXmitDelay;
-+ uint32_t xriCmdCmpl;
-+ uint32_t xriStatErr;
-+ uint32_t LinkUp;
-+ uint32_t LinkDown;
-+ uint32_t LinkMultiEvent;
-+ uint32_t NoRcvBuf;
-+ uint32_t fcpCmd;
-+ uint32_t fcpCmpl;
-+ uint32_t fcpRspErr;
-+ uint32_t fcpRemoteStop;
-+ uint32_t fcpPortRjt;
-+ uint32_t fcpPortBusy;
-+ uint32_t fcpError;
-+ uint32_t fcpLocalErr;
-+};
-+
-+enum sysfs_mbox_state {
-+ SMBOX_IDLE,
-+ SMBOX_WRITING,
-+ SMBOX_READING
-+};
-+
-+struct lpfc_sysfs_mbox {
-+ enum sysfs_mbox_state state;
-+ size_t offset;
-+ struct lpfcMboxq * mbox;
-+};
-+
-+struct lpfc_hba {
-+ uint32_t intr_inited; /* flag for interrupt registration */
-+ struct list_head hba_list; /* List of hbas/ports */
-+ struct lpfc_sli sli;
-+ struct lpfc_sli2_slim *slim2p;
-+ dma_addr_t slim2p_mapping;
-+
-+ uint32_t hba_state;
-+
-+#define LPFC_INIT_START 1 /* Initial state after board reset */
-+#define LPFC_INIT_MBX_CMDS 2 /* Initialize HBA with mbox commands */
-+#define LPFC_LINK_DOWN 3 /* HBA initialized, link is down */
-+#define LPFC_LINK_UP 4 /* Link is up - issue READ_LA */
-+#define LPFC_LOCAL_CFG_LINK 5 /* local NPORT Id configured */
-+#define LPFC_FLOGI 6 /* FLOGI sent to Fabric */
-+#define LPFC_FABRIC_CFG_LINK 7 /* Fabric assigned NPORT Id
-+ configured */
-+#define LPFC_NS_REG 8 /* Register with NameServer */
-+#define LPFC_NS_QRY 9 /* Query NameServer for NPort ID list */
-+#define LPFC_BUILD_DISC_LIST 10 /* Build ADISC and PLOGI lists for
-+ * device authentication / discovery */
-+#define LPFC_DISC_AUTH 11 /* Processing ADISC list */
-+#define LPFC_CLEAR_LA 12 /* authentication cmplt - issue
-+ CLEAR_LA */
-+#define LPFC_HBA_READY 32
-+#define LPFC_HBA_ERROR 0xff
-+
-+ uint8_t fc_linkspeed; /* Link speed after last READ_LA */
-+
-+ uint32_t fc_eventTag; /* event tag for link attention */
-+ uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
-+
-+ uint32_t num_disc_nodes; /*in addition to hba_state */
-+
-+ uint8_t fcp_mapping; /* Map FCP devices based on WWNN WWPN or DID */
-+#define FCP_SEED_WWNN 0x1
-+#define FCP_SEED_WWPN 0x2
-+#define FCP_SEED_DID 0x4
-+#define FCP_SEED_MASK 0x7
-+#define FCP_SEED_AUTO 0x8 /* binding was created by auto mapping */
-+
-+ struct timer_list fc_estabtmo; /* link establishment timer */
-+ struct timer_list fc_disctmo; /* Discovery rescue timer */
-+ struct timer_list fc_fdmitmo; /* fdmi timer */
-+ struct timer_list fc_scantmo; /* scsi scan host timer */
-+
-+
-+ void *fc_evt_head; /* waiting for event queue */
-+ void *fc_evt_tail; /* waiting for event queue */
-+
-+ uint16_t hba_event_put; /* hbaevent event put word anchor */
-+ uint16_t hba_event_get; /* hbaevent event get word anchor */
-+ uint32_t hba_event_missed; /* hbaevent missed event word anchor */
-+ uint32_t sid_cnt; /* SCSI ID counter */
-+
-+ struct lpfc_hba_event hbaevt[MAX_HBAEVT];
-+
-+ /* These fields used to be binfo */
-+ struct lpfc_name fc_nodename; /* fc nodename */
-+ struct lpfc_name fc_portname; /* fc portname */
-+ uint32_t fc_pref_DID; /* preferred D_ID */
-+ uint8_t fc_pref_ALPA; /* preferred AL_PA */
-+ uint32_t fc_edtov; /* E_D_TOV timer value */
-+ uint32_t fc_arbtov; /* ARB_TOV timer value */
-+ uint32_t fc_ratov; /* R_A_TOV timer value */
-+ uint32_t fc_rttov; /* R_T_TOV timer value */
-+ uint32_t fc_altov; /* AL_TOV timer value */
-+ uint32_t fc_crtov; /* C_R_TOV timer value */
-+ uint32_t fc_citov; /* C_I_TOV timer value */
-+ uint32_t fc_myDID; /* fibre channel S_ID */
-+ uint32_t fc_prevDID; /* previous fibre channel S_ID */
-+
-+ struct serv_parm fc_sparam; /* buffer for our service parameters */
-+ struct serv_parm fc_fabparam; /* fabric service parameters buffer */
-+ uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
-+
-+ uint8_t fc_ns_retry; /* retries for fabric nameserver */
-+ uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
-+ uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
-+ struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
-+ uint32_t lmt;
-+ uint32_t fc_flag; /* FC flags */
-+#define FC_PT2PT 0x1 /* pt2pt with no fabric */
-+#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
-+#define FC_DISC_TMO 0x4 /* Discovery timer running */
-+#define FC_PUBLIC_LOOP 0x8 /* Public loop */
-+#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
-+#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
-+#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
-+#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
-+#define FC_FABRIC 0x100 /* We are fabric attached */
-+#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
-+#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
-+#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
-+#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
-+#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
-+#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
-+
-+ uint32_t fc_topology; /* link topology, from LINK INIT */
-+
-+ struct lpfc_stats fc_stat;
-+
-+ /* These are the head/tail pointers for the bind, plogi, adisc, unmap,
-+ * and map lists. Their counters are immediately following.
-+ */
-+ struct list_head fc_nlpbind_list;
-+ struct list_head fc_plogi_list;
-+ struct list_head fc_adisc_list;
-+ struct list_head fc_reglogin_list;
-+ struct list_head fc_prli_list;
-+ struct list_head fc_nlpunmap_list;
-+ struct list_head fc_nlpmap_list;
-+ struct list_head fc_npr_list;
-+ struct list_head fc_unused_list;
-+
-+ /* Keep counters for the number of entries in each list. */
-+ uint16_t fc_bind_cnt;
-+ uint16_t fc_plogi_cnt;
-+ uint16_t fc_adisc_cnt;
-+ uint16_t fc_reglogin_cnt;
-+ uint16_t fc_prli_cnt;
-+ uint16_t fc_unmap_cnt;
-+ uint16_t fc_map_cnt;
-+ uint16_t fc_npr_cnt;
-+ uint16_t fc_unused_cnt;
-+ struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
-+ uint32_t nport_event_cnt; /* timestamp for nlplist entry */
-+
-+ struct lpfc_target *device_queue_hash[MAX_FCP_TARGET];
-+#define LPFC_RPI_HASH_SIZE 64
-+#define LPFC_RPI_HASH_FUNC(x) ((x) & (0x3f))
-+ /* ptr to active D_ID / RPIs */
-+ struct lpfc_nodelist *fc_nlplookup[LPFC_RPI_HASH_SIZE];
-+ uint32_t wwnn[2];
-+ uint32_t RandomData[7];
-+
-+ uint32_t cfg_log_verbose;
-+ uint32_t cfg_lun_queue_depth;
-+ uint32_t cfg_nodev_tmo;
-+ uint32_t cfg_hba_queue_depth;
-+ uint32_t cfg_fcp_class;
-+ uint32_t cfg_use_adisc;
-+ uint32_t cfg_ack0;
-+ uint32_t cfg_topology;
-+ uint32_t cfg_scan_down;
-+ uint32_t cfg_link_speed;
-+ uint32_t cfg_cr_delay;
-+ uint32_t cfg_cr_count;
-+ uint32_t cfg_fdmi_on;
-+ uint32_t cfg_fcp_bind_method;
-+ uint32_t cfg_discovery_threads;
-+ uint32_t cfg_max_luns;
-+ uint32_t cfg_scsi_hotplug;
-+
-+ lpfc_vpd_t vpd; /* vital product data */
-+
-+#if defined(SLES_FC)
-+ /*
-+ * Provide a per-HBA timer for 2.6.5 kernels patched with the
-+ * block/unblock FC transport patch.
-+ */
-+ struct timer_list dev_loss_timer;
-+#endif
-+
-+ struct Scsi_Host *host;
-+ struct pci_dev *pcidev;
-+ struct list_head dpc_disc;
-+
-+ pid_t dpc_pid;
-+ int dpc_kill;
-+ struct completion dpc_startup;
-+ struct completion dpc_exiting;
-+ struct semaphore *dpc_wait;
-+ uint32_t work_hba_events; /* Timeout to be handled */
-+#define WORKER_DISC_TMO 0x1 /* Discovery timeout */
-+#define WORKER_ELS_TMO 0x2 /* ELS timeout */
-+#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
-+#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
-+
-+ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
-+ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
-+ void *slim_memmap_p; /* Kernel memory mapped address for PCI
-+ BAR0 */
-+ void *ctrl_regs_memmap_p; /* Kernel memory mapped address for PCI
-+ BAR2 */
-+
-+ void *MBslimaddr; /* virtual address for mbox cmds */
-+ void *HAregaddr; /* virtual address for host attn reg */
-+ void *CAregaddr; /* virtual address for chip attn reg */
-+ void *HSregaddr; /* virtual address for host status reg */
-+ void *HCregaddr; /* virtual address for host ctl reg */
-+ wait_queue_head_t linkevtwq;
-+ wait_queue_head_t rscnevtwq;
-+ wait_queue_head_t ctevtwq;
-+
-+ uint8_t brd_no; /* FC board number */
-+
-+ char SerialNumber[32]; /* adapter Serial Number */
-+ char OptionROMVersion[32]; /* adapter BIOS / Fcode version */
-+ char ModelDesc[256]; /* Model Description */
-+ char ModelName[80]; /* Model Name */
-+ char ProgramType[256]; /* Program Type */
-+ char Port[20]; /* Port No */
-+ uint8_t vpd_flag; /* VPD data flag */
-+
-+#define VPD_MODEL_DESC 0x1 /* valid vpd model description */
-+#define VPD_MODEL_NAME 0x2 /* valid vpd model name */
-+#define VPD_PROGRAM_TYPE 0x4 /* valid vpd program type */
-+#define VPD_PORT 0x8 /* valid vpd port data */
-+#define VPD_MASK 0xf /* mask for any vpd data */
-+
-+ struct timer_list els_tmofunc;
-+
-+ void *link_stats;
-+
-+ /*
-+ * stat counters
-+ */
-+ uint64_t fc4InputRequests;
-+ uint64_t fc4OutputRequests;
-+ uint64_t fc4ControlRequests;
-+
-+ struct lpfc_sysfs_mbox sysfs_mbox;
-+;
-+ /* pci_mem_pools */
-+ struct pci_pool *lpfc_scsi_dma_ext_pool;
-+ struct pci_pool *lpfc_mbuf_pool;
-+ struct lpfc_dma_pool lpfc_mbuf_safety_pool;
-+ mempool_t *scsibuf_mem_pool;
-+
-+ mempool_t *iocb_mem_pool;
-+ mempool_t *mbox_mem_pool;
-+ mempool_t *nlp_mem_pool;
-+ mempool_t *bind_mem_pool;
-+ struct list_head freebufList;
-+ struct list_head ctrspbuflist;
-+ struct list_head rnidrspbuflist;
-+};
-+
-+/* event mask definitions */
-+#define FC_REG_LINK_EVENT 0x1 /* Register for link up / down events */
-+#define FC_REG_RSCN_EVENT 0x2 /* Register for RSCN events */
-+#define FC_REG_CT_EVENT 0x4 /* Register for CT request events */
-+
-+#define FC_FSTYPE_ALL 0xffff /* match on all fsTypes */
-+
-+typedef struct fcEVT { /* Kernel level Event structure */
-+ uint32_t evt_handle;
-+ uint32_t evt_mask;
-+ uint32_t evt_data0;
-+ uint16_t evt_sleep;
-+ uint16_t evt_flags;
-+ void *evt_type;
-+ void *evt_next;
-+ void *evt_data1;
-+ uint32_t evt_data2;
-+} fcEVT_t;
-+
-+typedef struct fcEVTHDR { /* Kernel level Event Header */
-+ uint32_t e_handle;
-+ uint32_t e_mask;
-+ uint16_t e_mode;
-+#define E_SLEEPING_MODE 0x0001
-+ uint16_t e_refcnt;
-+ uint16_t e_flag;
-+#define E_GET_EVENT_ACTIVE 0x0001
-+ fcEVT_t *e_head;
-+ fcEVT_t *e_tail;
-+ void *e_next_header;
-+ void *e_type;
-+} fcEVTHDR_t;
-+
-+struct rnidrsp {
-+ void *buf;
-+ uint32_t uniqueid;
-+ struct list_head list;
-+ uint32_t data;
-+};
-+
-+#endif /* _H_LPFC */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_mbox.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_mbox.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,665 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_mbox.c 1.77.2.2 2005/06/13 17:16:32EDT sf_support Exp $
-+ */
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <scsi/scsi_device.h>
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+#include "lpfc_compat.h"
-+
-+/**********************************************/
-+
-+/* mailbox command */
-+/**********************************************/
-+void
-+lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
-+{
-+ MAILBOX_t *mb;
-+ void *ctx;
-+
-+ mb = &pmb->mb;
-+ ctx = pmb->context2;
-+
-+ /* Setup to dump VPD region */
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+ mb->mbxCommand = MBX_DUMP_MEMORY;
-+ mb->un.varDmp.cv = 1;
-+ mb->un.varDmp.type = DMP_NV_PARAMS;
-+ mb->un.varDmp.entry_index = offset;
-+ mb->un.varDmp.region_id = DMP_REGION_VPD;
-+ mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
-+ mb->un.varDmp.co = 0;
-+ mb->un.varDmp.resp_offset = 0;
-+ pmb->context2 = ctx;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+/**********************************************/
-+/* lpfc_read_nv Issue a READ NVPARAM */
-+/* mailbox command */
-+/**********************************************/
-+void
-+lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+ mb->mbxCommand = MBX_READ_NV;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+/**********************************************/
-+/* lpfc_read_la Issue a READ LA */
-+/* mailbox command */
-+/**********************************************/
-+int
-+lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb;
-+ struct lpfc_dmabuf *mp;
-+ struct lpfc_sli *psli;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ /* Get a buffer to hold the loop map */
-+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
-+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
-+ if (mp)
-+ kfree(mp);
-+ mb->mbxCommand = MBX_READ_LA64;
-+ /* READ_LA: no buffers */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_MBOX,
-+ "%d:0300 READ_LA: no buffers\n",
-+ phba->brd_no);
-+ return (1);
-+ }
-+ INIT_LIST_HEAD(&mp->list);
-+ mb->mbxCommand = MBX_READ_LA64;
-+ mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
-+ mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
-+ mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
-+
-+ /* Save address for later completion and set the owner to host so that
-+ * the FW knows this mailbox is available for processing.
-+ */
-+ pmb->context1 = (uint8_t *) mp;
-+ mb->mbxOwner = OWN_HOST;
-+ return (0);
-+}
-+
-+/**********************************************/
-+/* lpfc_clear_la Issue a CLEAR LA */
-+/* mailbox command */
-+/**********************************************/
-+void
-+lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ mb->un.varClearLA.eventTag = phba->fc_eventTag;
-+ mb->mbxCommand = MBX_CLEAR_LA;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+/**************************************************/
-+/* lpfc_config_link Issue a CONFIG LINK */
-+/* mailbox command */
-+/**************************************************/
-+void
-+lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ /* NEW_FEATURE
-+ * SLI-2, Coalescing Response Feature.
-+ */
-+ if (phba->cfg_cr_delay) {
-+ mb->un.varCfgLnk.cr = 1;
-+ mb->un.varCfgLnk.ci = 1;
-+ mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
-+ mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
-+ }
-+
-+ mb->un.varCfgLnk.myId = phba->fc_myDID;
-+ mb->un.varCfgLnk.edtov = phba->fc_edtov;
-+ mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
-+ mb->un.varCfgLnk.ratov = phba->fc_ratov;
-+ mb->un.varCfgLnk.rttov = phba->fc_rttov;
-+ mb->un.varCfgLnk.altov = phba->fc_altov;
-+ mb->un.varCfgLnk.crtov = phba->fc_crtov;
-+ mb->un.varCfgLnk.citov = phba->fc_citov;
-+
-+ if (phba->cfg_ack0)
-+ mb->un.varCfgLnk.ack0_enable = 1;
-+
-+ mb->mbxCommand = MBX_CONFIG_LINK;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+/**********************************************/
-+/* lpfc_init_link Issue an INIT LINK */
-+/* mailbox command */
-+/**********************************************/
-+void
-+lpfc_init_link(struct lpfc_hba * phba,
-+ LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
-+{
-+ lpfc_vpd_t *vpd;
-+ struct lpfc_sli *psli;
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ psli = &phba->sli;
-+ switch (topology) {
-+ case FLAGS_TOPOLOGY_MODE_LOOP_PT:
-+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
-+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
-+ break;
-+ case FLAGS_TOPOLOGY_MODE_PT_PT:
-+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
-+ break;
-+ case FLAGS_TOPOLOGY_MODE_LOOP:
-+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
-+ break;
-+ case FLAGS_TOPOLOGY_MODE_PT_LOOP:
-+ mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
-+ mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
-+ break;
-+ }
-+
-+ /* NEW_FEATURE
-+ * Setting up the link speed
-+ */
-+ vpd = &phba->vpd;
-+ if (vpd->rev.feaLevelHigh >= 0x02){
-+ switch(linkspeed){
-+ case LINK_SPEED_1G:
-+ case LINK_SPEED_2G:
-+ case LINK_SPEED_4G:
-+ mb->un.varInitLnk.link_flags |=
-+ FLAGS_LINK_SPEED;
-+ mb->un.varInitLnk.link_speed = linkspeed;
-+ break;
-+ case LINK_SPEED_AUTO:
-+ default:
-+ mb->un.varInitLnk.link_speed =
-+ LINK_SPEED_AUTO;
-+ break;
-+ }
-+
-+ }
-+ else
-+ mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
-+
-+ mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
-+ mb->mbxOwner = OWN_HOST;
-+ mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
-+ return;
-+}
-+
-+/**********************************************/
-+/* lpfc_read_sparam Issue a READ SPARAM */
-+/* mailbox command */
-+/**********************************************/
-+int
-+lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_dmabuf *mp;
-+ MAILBOX_t *mb;
-+ struct lpfc_sli *psli;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ mb->mbxOwner = OWN_HOST;
-+
-+ /* Get a buffer to hold the HBAs Service Parameters */
-+
-+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
-+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
-+ if (mp)
-+ kfree(mp);
-+ mb->mbxCommand = MBX_READ_SPARM64;
-+ /* READ_SPARAM: no buffers */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_MBOX,
-+ "%d:0301 READ_SPARAM: no buffers\n",
-+ phba->brd_no);
-+ return (1);
-+ }
-+ INIT_LIST_HEAD(&mp->list);
-+ mb->mbxCommand = MBX_READ_SPARM64;
-+ mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
-+ mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
-+ mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
-+
-+ /* save address for completion */
-+ pmb->context1 = mp;
-+
-+ return (0);
-+}
-+
-+/********************************************/
-+/* lpfc_unreg_did Issue a UNREG_DID */
-+/* mailbox command */
-+/********************************************/
-+void
-+lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ mb->un.varUnregDID.did = did;
-+
-+ mb->mbxCommand = MBX_UNREG_D_ID;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+/***********************************************/
-+
-+/* command to write slim */
-+/***********************************************/
-+void
-+lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr,
-+ uint32_t value)
-+{
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
-+ /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
-+
-+ /*
-+ * Always turn on DELAYED ABTS for ELS timeouts
-+ */
-+ if ((addr == 0x052198) && (value == 0))
-+ value = 1;
-+
-+ mb->un.varWords[0] = addr;
-+ mb->un.varWords[1] = value;
-+
-+ mb->mbxCommand = MBX_SET_SLIM;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+/**********************************************/
-+/* lpfc_read_config Issue a READ CONFIG */
-+/* mailbox command */
-+/**********************************************/
-+void
-+lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ mb->mbxCommand = MBX_READ_CONFIG;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+/********************************************/
-+/* lpfc_reg_login Issue a REG_LOGIN */
-+/* mailbox command */
-+/********************************************/
-+int
-+lpfc_reg_login(struct lpfc_hba * phba,
-+ uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag)
-+{
-+ uint8_t *sparam;
-+ struct lpfc_dmabuf *mp;
-+ MAILBOX_t *mb;
-+ struct lpfc_sli *psli;
-+
-+ psli = &phba->sli;
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ mb->un.varRegLogin.rpi = 0;
-+ mb->un.varRegLogin.did = did;
-+ mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
-+
-+ mb->mbxOwner = OWN_HOST;
-+
-+ /* Get a buffer to hold NPorts Service Parameters */
-+ if (((mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
-+ ((mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys))) == 0)) {
-+ if (mp)
-+ kfree(mp);
-+
-+ mb->mbxCommand = MBX_REG_LOGIN64;
-+ /* REG_LOGIN: no buffers */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_MBOX,
-+ "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n",
-+ phba->brd_no,
-+ (uint32_t) did, (uint32_t) flag);
-+ return (1);
-+ }
-+ INIT_LIST_HEAD(&mp->list);
-+ sparam = mp->virt;
-+
-+ /* Copy param's into a new buffer */
-+ memcpy(sparam, param, sizeof (struct serv_parm));
-+
-+ /* save address for completion */
-+ pmb->context1 = (uint8_t *) mp;
-+
-+ mb->mbxCommand = MBX_REG_LOGIN64;
-+ mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
-+ mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
-+ mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
-+
-+ return (0);
-+}
-+
-+/**********************************************/
-+/* lpfc_unreg_login Issue a UNREG_LOGIN */
-+/* mailbox command */
-+/**********************************************/
-+void
-+lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ mb->un.varUnregLogin.rpi = (uint16_t) rpi;
-+ mb->un.varUnregLogin.rsvd1 = 0;
-+
-+ mb->mbxCommand = MBX_UNREG_LOGIN;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+static void
-+lpfc_config_pcb_setup(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ struct lpfc_sli_ring *pring;
-+ PCB_t *pcbp = &phba->slim2p->pcb;
-+ LPFC_RING_INIT_t *pringinit;
-+ dma_addr_t pdma_addr;
-+ uint32_t offset;
-+ uint32_t iocbCnt;
-+ int i;
-+
-+ psli->MBhostaddr = (uint32_t *)&phba->slim2p->mbx;
-+ pcbp->maxRing = (psli->sliinit.num_rings - 1);
-+
-+ iocbCnt = 0;
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pringinit = &psli->sliinit.ringinit[i];
-+ pring = &psli->ring[i];
-+ /* A ring MUST have both cmd and rsp entries defined to be
-+ valid */
-+ if ((pringinit->numCiocb == 0) || (pringinit->numRiocb == 0)) {
-+ pcbp->rdsc[i].cmdEntries = 0;
-+ pcbp->rdsc[i].rspEntries = 0;
-+ pcbp->rdsc[i].cmdAddrHigh = 0;
-+ pcbp->rdsc[i].rspAddrHigh = 0;
-+ pcbp->rdsc[i].cmdAddrLow = 0;
-+ pcbp->rdsc[i].rspAddrLow = 0;
-+ pring->cmdringaddr = NULL;
-+ pring->rspringaddr = NULL;
-+ continue;
-+ }
-+ /* Command ring setup for ring */
-+ pring->cmdringaddr =
-+ (void *)&phba->slim2p->IOCBs[iocbCnt];
-+ pcbp->rdsc[i].cmdEntries = pringinit->numCiocb;
-+
-+ offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
-+ (uint8_t *)phba->slim2p;
-+ pdma_addr = phba->slim2p_mapping + offset;
-+ pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
-+ pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
-+ iocbCnt += pringinit->numCiocb;
-+
-+ /* Response ring setup for ring */
-+ pring->rspringaddr =
-+ (void *)&phba->slim2p->IOCBs[iocbCnt];
-+
-+ pcbp->rdsc[i].rspEntries = pringinit->numRiocb;
-+ offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
-+ (uint8_t *)phba->slim2p;
-+ pdma_addr = phba->slim2p_mapping + offset;
-+ pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
-+ pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
-+ iocbCnt += pringinit->numRiocb;
-+ }
-+}
-+
-+void
-+lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb;
-+
-+ mb = &pmb->mb;
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+ mb->un.varRdRev.cv = 1;
-+ mb->mbxCommand = MBX_READ_REV;
-+ mb->mbxOwner = OWN_HOST;
-+ return;
-+}
-+
-+void
-+lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
-+{
-+ int i;
-+ MAILBOX_t *mb = &pmb->mb;
-+ struct lpfc_sli *psli;
-+ LPFC_RING_INIT_t *pring;
-+
-+ memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
-+
-+ mb->un.varCfgRing.ring = ring;
-+ mb->un.varCfgRing.maxOrigXchg = 0;
-+ mb->un.varCfgRing.maxRespXchg = 0;
-+ mb->un.varCfgRing.recvNotify = 1;
-+
-+ psli = &phba->sli;
-+ pring = &psli->sliinit.ringinit[ring];
-+ mb->un.varCfgRing.numMask = pring->num_mask;
-+ mb->mbxCommand = MBX_CONFIG_RING;
-+ mb->mbxOwner = OWN_HOST;
-+
-+ /* Is this ring configured for a specific profile */
-+ if (pring->prt[0].profile) {
-+ mb->un.varCfgRing.profile = pring->prt[0].profile;
-+ return;
-+ }
-+
-+ /* Otherwise we setup specific rctl / type masks for this ring */
-+ for (i = 0; i < pring->num_mask; i++) {
-+ mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
-+ if (mb->un.varCfgRing.rrRegs[i].rval != FC_ELS_REQ)
-+ mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
-+ else
-+ mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
-+ mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
-+ mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
-+ }
-+
-+ return;
-+}
-+
-+void
-+lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ MAILBOX_t *mb = &pmb->mb;
-+ dma_addr_t pdma_addr;
-+ uint32_t bar_low, bar_high;
-+ size_t offset;
-+ HGP hgp;
-+ void *to_slim;
-+
-+ memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
-+ mb->mbxCommand = MBX_CONFIG_PORT;
-+ mb->mbxOwner = OWN_HOST;
-+
-+ mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
-+ offset = (uint8_t *)&phba->slim2p->pcb - (uint8_t *)phba->slim2p;
-+ pdma_addr = phba->slim2p_mapping + offset;
-+ mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
-+ mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
-+
-+ /* Now setup pcb */
-+ phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
-+ phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
-+
-+ /* Setup Mailbox pointers */
-+ phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t);
-+ offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
-+ pdma_addr = phba->slim2p_mapping + offset;
-+ phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
-+ phba->slim2p->pcb.mbAddrLow = putPaddrLow(pdma_addr);
-+
-+ /*
-+ * Setup Host Group ring pointer.
-+ *
-+ * For efficiency reasons, the ring get/put pointers can be
-+ * placed in adapter memory (SLIM) rather than in host memory.
-+ * This allows firmware to avoid PCI reads/writes when updating
-+ * and checking pointers.
-+ *
-+ * The firmware recognizes the use of SLIM memory by comparing
-+ * the address of the get/put pointers structure with that of
-+ * the SLIM BAR (BAR0).
-+ *
-+ * Caution: be sure to use the PCI config space value of BAR0/BAR1
-+ * (the hardware's view of the base address), not the OS's
-+ * value of pci_resource_start() as the OS value may be a cookie
-+ * for ioremap/iomap.
-+ */
-+
-+
-+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
-+ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
-+
-+
-+ /* mask off BAR0's flag bits 0 - 3 */
-+ phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
-+ (SLIMOFF*sizeof(uint32_t));
-+ if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
-+ phba->slim2p->pcb.hgpAddrHigh = bar_high;
-+ else
-+ phba->slim2p->pcb.hgpAddrHigh = 0;
-+ /* write HGP data to SLIM at the required longword offset */
-+ memset(&hgp, 0, sizeof(HGP));
-+ to_slim = (uint8_t *)phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
-+ lpfc_memcpy_to_slim(to_slim, &hgp, sizeof (HGP));
-+
-+ /* Setup Port Group ring pointer */
-+ offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
-+ (uint8_t *)phba->slim2p;
-+ pdma_addr = phba->slim2p_mapping + offset;
-+ phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
-+ phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
-+
-+ /* Use callback routine to setp rings in the pcb */
-+ lpfc_config_pcb_setup(phba);
-+
-+ /* special handling for LC HBAs */
-+ if (lpfc_is_LC_HBA(phba->pcidev->device)) {
-+ uint32_t hbainit[5];
-+
-+ lpfc_hba_init(phba, hbainit);
-+
-+ memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
-+ }
-+
-+ /* Swap PCB if needed */
-+ lpfc_sli_pcimem_bcopy((uint32_t *)&phba->slim2p->pcb,
-+ (uint32_t *)&phba->slim2p->pcb,
-+ sizeof (PCB_t));
-+
-+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-+ "%d:0405 Service Level Interface (SLI) 2 selected\n",
-+ phba->brd_no);
-+}
-+
-+void
-+lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
-+{
-+ struct lpfc_sli *psli;
-+
-+ psli = &phba->sli;
-+
-+ list_add_tail(&mbq->list, &psli->mboxq);
-+
-+ psli->mboxq_cnt++;
-+
-+ return;
-+}
-+
-+LPFC_MBOXQ_t *
-+lpfc_mbox_get(struct lpfc_hba * phba)
-+{
-+ LPFC_MBOXQ_t *mbq = NULL;
-+ struct lpfc_sli *psli = &phba->sli;
-+
-+ if (!list_empty(&psli->mboxq)) {
-+ mbq = list_entry(psli->mboxq.next, LPFC_MBOXQ_t, list);
-+ list_del_init(&mbq->list);
-+ psli->mboxq_cnt--;
-+ }
-+
-+ return mbq;
-+}
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_sli.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_sli.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,3447 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_sli.c 1.200.1.8 2005/07/27 17:00:59EDT sf_support Exp $
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+
-+#include <scsi/scsi_cmnd.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+#include "lpfc_compat.h"
-+#include "lpfc_fcp.h"
-+
-+static int lpfc_sli_reset_on_init = 1;
-+extern void
-+lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
-+/*
-+ * Define macro to log: Mailbox command x%x cannot issue Data
-+ * This allows multiple uses of lpfc_msgBlk0311
-+ * w/o perturbing log msg utility.
-+*/
-+#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
-+ lpfc_printf_log(phba, \
-+ KERN_INFO, \
-+ LOG_MBOX | LOG_SLI, \
-+ "%d:0311 Mailbox command x%x cannot issue " \
-+ "Data: x%x x%x x%x\n", \
-+ phba->brd_no, \
-+ mb->mbxCommand, \
-+ phba->hba_state, \
-+ psli->sliinit.sli_flag, \
-+ flag);
-+
-+
-+/* This will save a huge switch to determine if the IOCB cmd
-+ * is unsolicited or solicited.
-+ */
-+#define LPFC_UNKNOWN_IOCB 0
-+#define LPFC_UNSOL_IOCB 1
-+#define LPFC_SOL_IOCB 2
-+#define LPFC_ABORT_IOCB 3
-+static uint8_t lpfc_sli_iocb_cmd_type[CMD_MAX_IOCB_CMD] = {
-+ LPFC_UNKNOWN_IOCB, /* 0x00 */
-+ LPFC_UNSOL_IOCB, /* CMD_RCV_SEQUENCE_CX 0x01 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE_CR 0x02 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE_CX 0x03 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST_CN 0x04 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST_CX 0x05 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_RING_BUF_CN 0x06 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_XRI_BUF_CX 0x07 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_IOCB_CONTINUE_CN 0x08 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_RET_XRI_BUF_CX 0x09 */
-+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST_CR 0x0A */
-+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST_CX 0x0B */
-+ LPFC_UNKNOWN_IOCB, /* 0x0C */
-+ LPFC_UNSOL_IOCB, /* CMD_RCV_ELS_REQ_CX 0x0D */
-+ LPFC_ABORT_IOCB, /* CMD_ABORT_XRI_CN 0x0E */
-+ LPFC_ABORT_IOCB, /* CMD_ABORT_XRI_CX 0x0F */
-+ LPFC_ABORT_IOCB, /* CMD_CLOSE_XRI_CR 0x10 */
-+ LPFC_ABORT_IOCB, /* CMD_CLOSE_XRI_CX 0x11 */
-+ LPFC_SOL_IOCB, /* CMD_CREATE_XRI_CR 0x12 */
-+ LPFC_SOL_IOCB, /* CMD_CREATE_XRI_CX 0x13 */
-+ LPFC_SOL_IOCB, /* CMD_GET_RPI_CN 0x14 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_ELS_RSP_CX 0x15 */
-+ LPFC_SOL_IOCB, /* CMD_GET_RPI_CR 0x16 */
-+ LPFC_ABORT_IOCB, /* CMD_XRI_ABORTED_CX 0x17 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE_CR 0x18 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE_CX 0x19 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD_CR 0x1A */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD_CX 0x1B */
-+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND_CR 0x1C */
-+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND_CX 0x1D */
-+ LPFC_UNKNOWN_IOCB, /* 0x1E */
-+ LPFC_SOL_IOCB, /* CMD_FCP_TSEND_CX 0x1F */
-+ LPFC_SOL_IOCB, /* CMD_ADAPTER_MSG 0x20 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_TRECEIVE_CX 0x21 */
-+ LPFC_SOL_IOCB, /* CMD_ADAPTER_DUMP 0x22 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_TRSP_CX 0x23 */
-+ /* 0x24 - 0x80 */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ /* 0x30 */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB,
-+ /* 0x40 */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB,
-+ /* 0x50 */
-+ LPFC_SOL_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_UNKNOWN_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_UNSOL_IOCB,
-+ LPFC_UNSOL_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_SOL_IOCB,
-+
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB,
-+ /* 0x60 */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB,
-+ /* 0x70 */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB,
-+ /* 0x80 */
-+ LPFC_UNKNOWN_IOCB,
-+ LPFC_UNSOL_IOCB, /* CMD_RCV_SEQUENCE64_CX 0x81 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE64_CR 0x82 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE64_CX 0x83 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST64_CN 0x84 */
-+ LPFC_SOL_IOCB, /* CMD_XMIT_BCAST64_CX 0x85 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_RING_BUF64_CN 0x86 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_QUE_XRI_BUF64_CX 0x87 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_IOCB_CONTINUE64_CN 0x88 */
-+ LPFC_UNKNOWN_IOCB, /* CMD_RET_XRI_BUF64_CX 0x89 */
-+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST64_CR 0x8A */
-+ LPFC_SOL_IOCB, /* CMD_ELS_REQUEST64_CX 0x8B */
-+ LPFC_ABORT_IOCB, /* CMD_ABORT_MXRI64_CN 0x8C */
-+ LPFC_UNSOL_IOCB, /* CMD_RCV_ELS_REQ64_CX 0x8D */
-+ /* 0x8E - 0x94 */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB,
-+ LPFC_SOL_IOCB, /* CMD_XMIT_ELS_RSP64_CX 0x95 */
-+ LPFC_UNKNOWN_IOCB, /* 0x96 */
-+ LPFC_UNKNOWN_IOCB, /* 0x97 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE64_CR 0x98 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IWRITE64_CX 0x99 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD64_CR 0x9A */
-+ LPFC_SOL_IOCB, /* CMD_FCP_IREAD64_CX 0x9B */
-+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND64_CR 0x9C */
-+ LPFC_SOL_IOCB, /* CMD_FCP_ICMND64_CX 0x9D */
-+ LPFC_UNKNOWN_IOCB, /* 0x9E */
-+ LPFC_SOL_IOCB, /* CMD_FCP_TSEND64_CX 0x9F */
-+ LPFC_UNKNOWN_IOCB, /* 0xA0 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_TRECEIVE64_CX 0xA1 */
-+ LPFC_UNKNOWN_IOCB, /* 0xA2 */
-+ LPFC_SOL_IOCB, /* CMD_FCP_TRSP64_CX 0xA3 */
-+ /* 0xA4 - 0xC1 */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_SOL_IOCB, /* CMD_GEN_REQUEST64_CR 0xC2 */
-+ LPFC_SOL_IOCB, /* CMD_GEN_REQUEST64_CX 0xC3 */
-+ /* 0xC4 - 0xCF */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+
-+ LPFC_SOL_IOCB,
-+ LPFC_SOL_IOCB, /* CMD_SENDTEXT_CR 0xD1 */
-+ LPFC_SOL_IOCB, /* CMD_SENDTEXT_CX 0xD2 */
-+ LPFC_SOL_IOCB, /* CMD_RCV_LOGIN 0xD3 */
-+ LPFC_SOL_IOCB, /* CMD_ACCEPT_LOGIN 0xD4 */
-+ LPFC_SOL_IOCB, /* CMD_REJECT_LOGIN 0xD5 */
-+ LPFC_UNSOL_IOCB,
-+ /* 0xD7 - 0xDF */
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
-+ /* 0xE0 */
-+ LPFC_UNSOL_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_SOL_IOCB,
-+ LPFC_UNSOL_IOCB
-+};
-+
-+static void
-+lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
-+{
-+ wait_queue_head_t *pdone_q;
-+
-+ /*
-+ * If pdone_q is empty, the driver thread gave up waiting and
-+ * continued running.
-+ */
-+ pdone_q = (wait_queue_head_t *) pmboxq->context1;
-+ if (pdone_q)
-+ wake_up_interruptible(pdone_q);
-+ return;
-+}
-+
-+
-+
-+static int
-+lpfc_sli_ring_map(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ LPFC_MBOXQ_t *pmb;
-+ MAILBOX_t *pmbox;
-+ int i;
-+
-+ psli = &phba->sli;
-+
-+ /* Get a Mailbox buffer to setup mailbox commands for HBA
-+ initialization */
-+ if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ return -ENOMEM;
-+ }
-+ pmbox = &pmb->mb;
-+
-+ /* Initialize the struct lpfc_sli_ring structure for each ring */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ /* Issue a CONFIG_RING mailbox command for each ring */
-+ phba->hba_state = LPFC_INIT_MBX_CMDS;
-+ lpfc_config_ring(phba, i, pmb);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
-+ mbxStatus <status>, ring <num> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0446 Adapter failed to init, "
-+ "mbxCmd x%x CFG_RING, mbxStatus x%x, "
-+ "ring %d\n",
-+ phba->brd_no,
-+ pmbox->mbxCommand,
-+ pmbox->mbxStatus,
-+ i);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -ENXIO;
-+ }
-+ }
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return 0;
-+}
-+
-+static int
-+lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
-+{
-+ uint16_t iotag;
-+
-+ list_add_tail(&piocb->list, &pring->txcmplq);
-+ pring->txcmplq_cnt++;
-+ if (unlikely(pring->ringno == LPFC_ELS_RING))
-+ mod_timer(&phba->els_tmofunc,
-+ jiffies + HZ * (phba->fc_ratov << 1));
-+
-+ if (pring->fast_lookup) {
-+ /* Setup fast lookup based on iotag for completion */
-+ iotag = piocb->iocb.ulpIoTag;
-+ if (iotag && (iotag
-+ < phba->sli.sliinit.ringinit[pring->ringno].fast_iotag))
-+ *(pring->fast_lookup + iotag) = piocb;
-+ else {
-+
-+ /* Cmd ring <ringno> put: iotag <iotag> greater then
-+ configured max <fast_iotag> wd0 <icmd> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_SLI,
-+ "%d:0316 Cmd ring %d put: iotag x%x "
-+ "greater then configured max x%x "
-+ "wd0 x%x\n",
-+ phba->brd_no,
-+ pring->ringno, iotag, phba->sli.sliinit
-+ .ringinit[pring->ringno].fast_iotag,
-+ *(((uint32_t *)(&piocb->iocb)) + 7));
-+ }
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * piocb)
-+{
-+ /* Insert the caller's iocb in the txq tail for later processing. */
-+ list_add_tail(&piocb->list, &pring->txq);
-+ pring->txq_cnt++;
-+ return (0);
-+}
-+
-+static struct lpfc_iocbq *
-+lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
-+{
-+ struct list_head *dlp;
-+ struct lpfc_iocbq *cmd_iocb;
-+ struct lpfc_iocbq *next_iocb;
-+
-+ dlp = &pring->txq;
-+ cmd_iocb = NULL;
-+ next_iocb = (struct lpfc_iocbq *) pring->txq.next;
-+ if (next_iocb != (struct lpfc_iocbq *) & pring->txq) {
-+ /* If the first ptr is not equal to the list header,
-+ * deque the IOCBQ_t and return it.
-+ */
-+ cmd_iocb = next_iocb;
-+ list_del(&cmd_iocb->list);
-+ pring->txq_cnt--;
-+ }
-+ return (cmd_iocb);
-+}
-+
-+static IOCB_t *
-+lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
-+{
-+ MAILBOX_t *mbox = (MAILBOX_t *)phba->sli.MBhostaddr;
-+ PGP *pgp = (PGP *)&mbox->us.s2.port[pring->ringno];
-+ uint32_t max_cmd_idx =
-+ phba->sli.sliinit.ringinit[pring->ringno].numCiocb;
-+ IOCB_t *iocb = NULL;
-+
-+ if((pring->next_cmdidx == pring->cmdidx) &&
-+ (++pring->next_cmdidx >= max_cmd_idx))
-+ pring->next_cmdidx = 0;
-+
-+ if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
-+
-+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
-+
-+ if (unlikely(pring->local_getidx >= max_cmd_idx)) {
-+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-+ "%d:0315 Ring %d issue: portCmdGet %d "
-+ "is bigger then cmd ring %d\n",
-+ phba->brd_no, pring->ringno,
-+ pring->local_getidx, max_cmd_idx);
-+
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ /*
-+ All error attention handlers are posted to
-+ discovery tasklet
-+ */
-+ lpfc_discq_post_event(phba, (void *)HS_FFER3, NULL,
-+ LPFC_EVT_ERR_ATTN);
-+
-+ return NULL;
-+ }
-+
-+ if (pring->local_getidx == pring->next_cmdidx)
-+ return NULL;
-+ }
-+
-+ iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
-+
-+ return iocb;
-+}
-+
-+static int
-+lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-+ IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ int ringno = pring->ringno;
-+
-+ /*
-+ * Alloocate and set up an iotag
-+ */
-+ if ((nextiocb->iocb.ulpIoTag =
-+ lpfc_sli_next_iotag(phba, &psli->ring[psli->fcp_ring])) == 0)
-+ return (1);
-+
-+ /*
-+ * Issue iocb command to adapter
-+ */
-+ lpfc_sli_pcimem_bcopy((uint32_t *)&nextiocb->iocb,
-+ (uint32_t *)(iocb), sizeof (IOCB_t));
-+ wmb();
-+ psli->slistat.iocbCmd[ringno]++;
-+
-+ /*
-+ * If there is no completion routine to call, we can release the
-+ * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
-+ * that have no rsp ring completion, iocb_cmpl MUST be NULL.
-+ */
-+ if (nextiocb->iocb_cmpl)
-+ lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
-+ else
-+ mempool_free(nextiocb, phba->iocb_mem_pool);
-+
-+ /*
-+ * Let the HBA know what IOCB slot will be the next one the
-+ * driver will put a command into.
-+ */
-+ pring->cmdidx = pring->next_cmdidx;
-+ writeb(pring->cmdidx,
-+ (u8 *)phba->MBslimaddr + (SLIMOFF + (ringno * 2)) * 4);
-+
-+ return (0);
-+}
-+
-+static void
-+lpfc_sli_update_full_ring(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring *pring)
-+{
-+ int ringno = pring->ringno;
-+
-+ pring->flag |= LPFC_CALL_RING_AVAILABLE;
-+
-+ wmb();
-+
-+ /*
-+ * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
-+ * The HBA will tell us when an IOCB entry is available.
-+ */
-+ writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
-+ readl(phba->CAregaddr); /* flush */
-+
-+ phba->sli.slistat.iocbCmdFull[ringno]++;
-+}
-+
-+static void
-+lpfc_sli_update_ring(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring *pring)
-+{
-+ int ringno = pring->ringno;
-+
-+ /*
-+ * Tell the HBA that there is work to do in this ring.
-+ */
-+ wmb();
-+ writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
-+ readl(phba->CAregaddr); /* flush */
-+}
-+
-+static void
-+lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ IOCB_t *iocb;
-+ struct lpfc_iocbq *nextiocb;
-+
-+ /*
-+ * Check to see if:
-+ * (a) there is anything on the txq to send
-+ * (b) link is up
-+ * (c) link attention events can be processed (fcp ring only)
-+ * (d) IOCB processing is not blocked by the outstanding mbox command.
-+ */
-+ if (pring->txq_cnt &&
-+ (phba->hba_state > LPFC_LINK_DOWN) &&
-+ (pring->ringno != psli->fcp_ring ||
-+ psli->sliinit.sli_flag & LPFC_PROCESS_LA) &&
-+ !(pring->flag & LPFC_STOP_IOCB_MBX)) {
-+
-+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
-+ (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
-+ if (lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb)) {
-+ lpfc_sli_ringtx_put(phba, pring, nextiocb);
-+ break;
-+ }
-+
-+ if (iocb)
-+ lpfc_sli_update_ring(phba, pring);
-+ else
-+ lpfc_sli_update_full_ring(phba, pring);
-+ }
-+
-+ return;
-+}
-+
-+/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
-+static void
-+lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ PGP *pgp;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[ringno];
-+ pgp = (PGP *) & (((MAILBOX_t *)psli->MBhostaddr)->us.s2.port[ringno]);
-+
-+ /* If the ring is active, flag it */
-+ if (psli->ring[ringno].cmdringaddr) {
-+ if (psli->ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
-+ psli->ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
-+ /*
-+ * Force update of the local copy of cmdGetInx
-+ */
-+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
-+ lpfc_sli_resume_iocb(phba, pring);
-+ }
-+ }
-+}
-+
-+static int
-+lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
-+{
-+ uint8_t ret;
-+
-+ switch (mbxCommand) {
-+ case MBX_LOAD_SM:
-+ case MBX_READ_NV:
-+ case MBX_WRITE_NV:
-+ case MBX_RUN_BIU_DIAG:
-+ case MBX_INIT_LINK:
-+ case MBX_DOWN_LINK:
-+ case MBX_CONFIG_LINK:
-+ case MBX_CONFIG_RING:
-+ case MBX_RESET_RING:
-+ case MBX_READ_CONFIG:
-+ case MBX_READ_RCONFIG:
-+ case MBX_READ_SPARM:
-+ case MBX_READ_STATUS:
-+ case MBX_READ_RPI:
-+ case MBX_READ_XRI:
-+ case MBX_READ_REV:
-+ case MBX_READ_LNK_STAT:
-+ case MBX_REG_LOGIN:
-+ case MBX_UNREG_LOGIN:
-+ case MBX_READ_LA:
-+ case MBX_CLEAR_LA:
-+ case MBX_DUMP_MEMORY:
-+ case MBX_DUMP_CONTEXT:
-+ case MBX_RUN_DIAGS:
-+ case MBX_RESTART:
-+ case MBX_UPDATE_CFG:
-+ case MBX_DOWN_LOAD:
-+ case MBX_DEL_LD_ENTRY:
-+ case MBX_RUN_PROGRAM:
-+ case MBX_SET_MASK:
-+ case MBX_SET_SLIM:
-+ case MBX_UNREG_D_ID:
-+ case MBX_CONFIG_FARP:
-+ case MBX_LOAD_AREA:
-+ case MBX_RUN_BIU_DIAG64:
-+ case MBX_CONFIG_PORT:
-+ case MBX_READ_SPARM64:
-+ case MBX_READ_RPI64:
-+ case MBX_REG_LOGIN64:
-+ case MBX_READ_LA64:
-+ case MBX_FLASH_WR_ULA:
-+ case MBX_SET_DEBUG:
-+ case MBX_LOAD_EXP_ROM:
-+ ret = mbxCommand;
-+ break;
-+ default:
-+ ret = MBX_SHUTDOWN;
-+ break;
-+ }
-+ return (ret);
-+}
-+
-+void
-+lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
-+{
-+ struct lpfc_dmabuf *mp;
-+ mp = (struct lpfc_dmabuf *) (pmb->context1);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return;
-+}
-+
-+static int
-+lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
-+{
-+ MAILBOX_t *mbox;
-+ MAILBOX_t *pmbox;
-+ LPFC_MBOXQ_t *pmb;
-+ struct lpfc_sli *psli;
-+ int i;
-+ unsigned long iflag;
-+ uint32_t process_next;
-+
-+
-+ psli = &phba->sli;
-+ /* We should only get here if we are in SLI2 mode */
-+ if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
-+ return (1);
-+ }
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ psli->slistat.mboxEvent++;
-+
-+ /* Get a Mailbox buffer to setup mailbox commands for callback */
-+ if ((pmb = psli->mbox_active)) {
-+ pmbox = &pmb->mb;
-+ mbox = (MAILBOX_t *) psli->MBhostaddr;
-+
-+ /* First check out the status word */
-+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox, (uint32_t *) pmbox,
-+ sizeof (uint32_t));
-+
-+ /* Sanity check to ensure the host owns the mailbox */
-+ if (pmbox->mbxOwner != OWN_HOST) {
-+ /* Lets try for a while */
-+ for (i = 0; i < 10240; i++) {
-+ /* First copy command data */
-+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
-+ (uint32_t *) pmbox,
-+ sizeof (uint32_t));
-+ if (pmbox->mbxOwner == OWN_HOST)
-+ goto mbout;
-+ }
-+ /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
-+ <status> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_MBOX | LOG_SLI,
-+ "%d:0304 Stray Mailbox Interrupt "
-+ "mbxCommand x%x mbxStatus x%x\n",
-+ phba->brd_no,
-+ pmbox->mbxCommand,
-+ pmbox->mbxStatus);
-+
-+ psli->sliinit.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return (1);
-+ }
-+
-+ mbout:
-+ del_timer_sync(&psli->mbox_tmo);
-+ phba->work_hba_events &= ~WORKER_MBOX_TMO;
-+
-+ /*
-+ * It is a fatal error if unknown mbox command completion.
-+ */
-+ if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
-+ MBX_SHUTDOWN) {
-+
-+ /* Unknow mailbox command compl */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_MBOX | LOG_SLI,
-+ "%d:0323 Unknown Mailbox command %x Cmpl\n",
-+ phba->brd_no,
-+ pmbox->mbxCommand);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+
-+ /*
-+ All error attention handlers are posted to
-+ discovery tasklet
-+ */
-+ lpfc_discq_post_event(phba, (void *)HS_FFER3, NULL,
-+ LPFC_EVT_ERR_ATTN);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return (0);
-+ }
-+
-+ psli->mbox_active = NULL;
-+ if (pmbox->mbxStatus) {
-+ psli->slistat.mboxStatErr++;
-+ if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
-+ /* Mbox cmd cmpl error - RETRYing */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_MBOX | LOG_SLI,
-+ "%d:0305 Mbox cmd cmpl error - "
-+ "RETRYing Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ pmbox->mbxCommand,
-+ pmbox->mbxStatus,
-+ pmbox->un.varWords[0],
-+ phba->hba_state);
-+ pmbox->mbxStatus = 0;
-+ pmbox->mbxOwner = OWN_HOST;
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT)
-+ == MBX_SUCCESS) {
-+ spin_unlock_irqrestore(
-+ phba->host->host_lock,
-+ iflag);
-+ return (0);
-+ }
-+ }
-+ }
-+
-+ /* Mailbox cmd <cmd> Cmpl <cmpl> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_MBOX | LOG_SLI,
-+ "%d:0307 Mailbox cmd x%x Cmpl x%p "
-+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ pmbox->mbxCommand,
-+ pmb->mbox_cmpl,
-+ *((uint32_t *) pmbox),
-+ mbox->un.varWords[0],
-+ mbox->un.varWords[1],
-+ mbox->un.varWords[2],
-+ mbox->un.varWords[3],
-+ mbox->un.varWords[4],
-+ mbox->un.varWords[5],
-+ mbox->un.varWords[6],
-+ mbox->un.varWords[7]);
-+
-+ if (pmb->mbox_cmpl) {
-+ /* Copy entire mbox completion over buffer */
-+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
-+ (uint32_t *) pmbox,
-+ (sizeof (uint32_t) *
-+ (MAILBOX_CMD_WSIZE)));
-+ /* All mbox cmpls are posted to discovery tasklet */
-+ lpfc_discq_post_event(phba, pmb, NULL,
-+ LPFC_EVT_MBOX);
-+
-+ }
-+ }
-+
-+
-+ do {
-+ process_next = 0; /* by default don't loop */
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+
-+ /* Process next mailbox command if there is one */
-+ if ((pmb = lpfc_mbox_get(phba))) {
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) ==
-+ MBX_NOT_FINISHED) {
-+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
-+ /* All mbox cmpls are posted to discovery tasklet */
-+ lpfc_discq_post_event(phba, pmb, NULL,
-+ LPFC_EVT_MBOX);
-+ process_next = 1;
-+ continue; /* loop back */
-+ }
-+ } else {
-+ /* Turn on IOCB processing */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ lpfc_sli_turn_on_ring(phba, i);
-+ }
-+
-+ /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
-+ while (!list_empty(&phba->freebufList)) {
-+ struct lpfc_dmabuf *mp;
-+
-+ mp = (struct lpfc_dmabuf *)
-+ (phba->freebufList.next);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt,
-+ mp->phys);
-+ list_del(&mp->list);
-+ kfree(mp);
-+ }
-+ }
-+ }
-+
-+ } while (process_next);
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return (0);
-+}
-+static int
-+lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-+ struct lpfc_iocbq *saveq)
-+{
-+ struct lpfc_sli * psli;
-+ IOCB_t * irsp;
-+ LPFC_RING_INIT_t * pringinit;
-+ WORD5 * w5p;
-+ uint32_t Rctl, Type;
-+ uint32_t match, ringno, i;
-+ unsigned long iflag;
-+
-+ psli = &phba->sli;
-+ match = 0;
-+ ringno = pring->ringno;
-+ irsp = &(saveq->iocb);
-+ if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
-+ || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
-+ Rctl = FC_ELS_REQ;
-+ Type = FC_ELS_DATA;
-+ } else {
-+ w5p =
-+ (WORD5 *) & (saveq->iocb.un.
-+ ulpWord[5]);
-+ Rctl = w5p->hcsw.Rctl;
-+ Type = w5p->hcsw.Type;
-+
-+ /* Firmware Workaround */
-+ if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
-+ (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
-+ Rctl = FC_ELS_REQ;
-+ Type = FC_ELS_DATA;
-+ w5p->hcsw.Rctl = Rctl;
-+ w5p->hcsw.Type = Type;
-+ }
-+ }
-+ /* unSolicited Responses */
-+ pringinit = &psli->sliinit.ringinit[ringno];
-+ if (pringinit->prt[0].profile) {
-+ /* If this ring has a profile set, just
-+ send it to prt[0] */
-+ /* All unsol iocbs for LPFC_ELS_RING
-+ * are posted to discovery tasklet.
-+ */
-+ if (ringno == LPFC_ELS_RING) {
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_discq_post_event(phba, (void *)&pringinit->prt[0],
-+ (void *)saveq, LPFC_EVT_UNSOL_IOCB);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ }
-+ else {
-+ (pringinit->prt[0].
-+ lpfc_sli_rcv_unsol_event) (phba, pring, saveq);
-+ }
-+ match = 1;
-+ } else {
-+ /* We must search, based on rctl / type
-+ for the right routine */
-+ for (i = 0; i < pringinit->num_mask;
-+ i++) {
-+ if ((pringinit->prt[i].rctl ==
-+ Rctl)
-+ && (pringinit->prt[i].
-+ type == Type)) {
-+ /* All unsol iocbs for LPFC_ELS_RING
-+ * are posted to discovery tasklet.
-+ */
-+ if (ringno == LPFC_ELS_RING) {
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ lpfc_discq_post_event(phba,
-+ (void *)&pringinit->prt[i],
-+ (void *)saveq, LPFC_EVT_UNSOL_IOCB);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ }
-+ else {
-+ (pringinit->prt[i].
-+ lpfc_sli_rcv_unsol_event)
-+ (phba, pring, saveq);
-+ }
-+ match = 1;
-+ break;
-+ }
-+ }
-+ }
-+ if (match == 0) {
-+ /* Unexpected Rctl / Type received */
-+ /* Ring <ringno> handler: unexpected
-+ Rctl <Rctl> Type <Type> received */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_SLI,
-+ "%d:0313 Ring %d handler: unexpected Rctl x%x "
-+ "Type x%x received \n",
-+ phba->brd_no,
-+ ringno,
-+ Rctl,
-+ Type);
-+ }
-+ return(1);
-+}
-+static struct lpfc_iocbq *
-+lpfc_search_txcmpl(struct lpfc_sli_ring * pring, struct lpfc_iocbq * prspiocb)
-+{
-+ IOCB_t *icmd = NULL;
-+ IOCB_t *irsp = NULL;
-+ struct lpfc_iocbq *cmd_iocb;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ uint16_t iotag;
-+
-+ irsp = &prspiocb->iocb;
-+ iotag = irsp->ulpIoTag;
-+ cmd_iocb = NULL;
-+
-+ /* Search through txcmpl from the begining */
-+ list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
-+ icmd = &iocb->iocb;
-+ if (iotag == icmd->ulpIoTag) {
-+ /* Found a match. */
-+ cmd_iocb = iocb;
-+ list_del(&iocb->list);
-+ pring->txcmplq_cnt--;
-+ break;
-+ }
-+ }
-+
-+ return (cmd_iocb);
-+}
-+static struct lpfc_iocbq *
-+lpfc_sli_ringtxcmpl_get(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * prspiocb, uint32_t srch)
-+{
-+ struct list_head *dlp;
-+ IOCB_t *irsp = NULL;
-+ struct lpfc_iocbq *cmd_iocb;
-+ struct lpfc_sli *psli;
-+ uint16_t iotag;
-+
-+
-+ dlp = &pring->txcmplq;
-+
-+ if (pring->fast_lookup && (srch == 0)) {
-+ /*
-+ * Use fast lookup based on iotag for completion
-+ */
-+ psli = &phba->sli;
-+ irsp = &prspiocb->iocb;
-+ iotag = irsp->ulpIoTag;
-+ if (iotag < psli->sliinit.ringinit[pring->ringno].fast_iotag) {
-+ cmd_iocb = *(pring->fast_lookup + iotag);
-+ *(pring->fast_lookup + iotag) = NULL;
-+ if (cmd_iocb) {
-+ list_del(&cmd_iocb->list);
-+ pring->txcmplq_cnt--;
-+ return cmd_iocb;
-+ }
-+ } else {
-+ /*
-+ * Rsp ring <ringno> get: iotag <iotag> greater then
-+ * configured max <fast_iotag> wd0 <irsp>
-+ */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_SLI,
-+ "%d:0317 Rsp ring %d get: iotag x%x "
-+ "greater then configured max x%x "
-+ "wd0 x%x\n",
-+ phba->brd_no,
-+ pring->ringno, iotag,
-+ psli->sliinit.ringinit[pring->ringno]
-+ .fast_iotag,
-+ *(((uint32_t *) irsp) + 7));
-+ }
-+ }
-+
-+ cmd_iocb = lpfc_search_txcmpl(pring, prspiocb);
-+
-+ return cmd_iocb;
-+}
-+
-+static int
-+lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq *saveq)
-+{
-+ struct lpfc_iocbq * cmdiocbp;
-+ int ringno, rc;
-+ unsigned long iflag;
-+
-+ rc = 1;
-+ ringno = pring->ringno;
-+ /* Solicited Responses */
-+ /* Based on the iotag field, get the cmd IOCB
-+ from the txcmplq */
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ if ((cmdiocbp =
-+ lpfc_sli_ringtxcmpl_get(phba, pring, saveq,
-+ 0))) {
-+ /* Call the specified completion
-+ routine */
-+ if (cmdiocbp->iocb_cmpl) {
-+ /* All iocb cmpls for LPFC_ELS_RING
-+ * are posted to discovery tasklet.
-+ */
-+ if (ringno == LPFC_ELS_RING) {
-+ lpfc_discq_post_event(phba, (void *)cmdiocbp,
-+ (void *)saveq, LPFC_EVT_SOL_IOCB);
-+ }
-+ else {
-+ if (cmdiocbp->iocb_flag & LPFC_IO_POLL) {
-+ rc = 0;
-+ }
-+
-+ if (cmdiocbp->iocb_cmpl == lpfc_scsi_cmd_iocb_cmpl)
-+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
-+ else {
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ iflag);
-+ (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ }
-+ }
-+ } else {
-+ mempool_free( cmdiocbp, phba->iocb_mem_pool);
-+ }
-+ } else {
-+ /* Could not find the initiating command
-+ * based of the response iotag.
-+ * This is expected on ELS ring because of lpfc_els_abort().
-+ */
-+ if (ringno != LPFC_ELS_RING) {
-+ /* Ring <ringno> handler: unexpected
-+ completion IoTag <IoTag> */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_SLI,
-+ "%d:0322 Ring %d handler: unexpected "
-+ "completion IoTag x%x Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ ringno,
-+ saveq->iocb.ulpIoTag,
-+ saveq->iocb.ulpStatus,
-+ saveq->iocb.un.ulpWord[4],
-+ saveq->iocb.ulpCommand,
-+ saveq->iocb.ulpContext);
-+ }
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return(rc);
-+}
-+static int
-+lpfc_sli_handle_ring_event(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring, uint32_t mask)
-+{
-+ struct lpfc_sli * psli;
-+ IOCB_t * entry;
-+ IOCB_t * irsp;
-+ struct lpfc_iocbq * rspiocbp, *next_iocb;
-+ struct lpfc_iocbq * cmdiocbp;
-+ struct lpfc_iocbq * saveq;
-+ HGP * hgp;
-+ PGP * pgp;
-+ MAILBOX_t * mbox;
-+ uint32_t status, free_saveq;
-+ uint32_t portRspPut, portRspMax;
-+ int ringno, loopcnt, rc;
-+ uint8_t type;
-+ unsigned long iflag;
-+ void *to_slim;
-+
-+ psli = &phba->sli;
-+ ringno = pring->ringno;
-+ irsp = NULL;
-+ rc = 1;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ psli->slistat.iocbEvent[ringno]++;
-+
-+ /* At this point we assume SLI-2 */
-+ mbox = (MAILBOX_t *) psli->MBhostaddr;
-+ pgp = (PGP *) & mbox->us.s2.port[ringno];
-+ hgp = (HGP *) & mbox->us.s2.host[ringno];
-+
-+ /* portRspMax is the number of rsp ring entries for this specific
-+ ring. */
-+ portRspMax = psli->sliinit.ringinit[ringno].numRiocb;
-+
-+ rspiocbp = NULL;
-+ loopcnt = 0;
-+
-+ /* Gather iocb entries off response ring.
-+ * rspidx is the IOCB index of the next IOCB that the driver
-+ * is going to process.
-+ */
-+ entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
-+ portRspPut = le32_to_cpu(pgp->rspPutInx);
-+
-+ if (portRspPut >= portRspMax) {
-+
-+ /* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
-+ rsp ring <portRspMax> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_SLI,
-+ "%d:0312 Ring %d handler: portRspPut %d "
-+ "is bigger then rsp ring %d\n",
-+ phba->brd_no,
-+ ringno, portRspPut, portRspMax);
-+ /*
-+ * Treat it as adapter hardware error.
-+ */
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ /*
-+ All error attention handlers are posted to
-+ discovery tasklet
-+ */
-+ lpfc_discq_post_event(phba, (void *)HS_FFER3, NULL,
-+ LPFC_EVT_ERR_ATTN);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return (1);
-+ }
-+
-+ rmb();
-+
-+ /* Get the next available response iocb.
-+ * rspidx is the IOCB index of the next IOCB that the driver
-+ * is going to process.
-+ */
-+ while (pring->rspidx != portRspPut) {
-+ /* get an iocb buffer to copy entry into */
-+ if ((rspiocbp = mempool_alloc(phba->iocb_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ break;
-+ }
-+
-+ lpfc_sli_pcimem_bcopy((uint32_t *) entry,
-+ (uint32_t *) & rspiocbp->iocb,
-+ sizeof (IOCB_t));
-+ irsp = &rspiocbp->iocb;
-+
-+ /* bump iocb available response index */
-+ if (++pring->rspidx >= portRspMax) {
-+ pring->rspidx = 0;
-+ }
-+
-+ /* Let the HBA know what IOCB slot will be the next one the
-+ * driver will read a response from.
-+ */
-+ to_slim = (uint8_t *) phba->MBslimaddr +
-+ (SLIMOFF + (ringno * 2) + 1) * 4;
-+ writeb( pring->rspidx, to_slim);
-+
-+ /* chain all iocb entries until LE is set */
-+ if (list_empty(&(pring->iocb_continueq))) {
-+ list_add(&rspiocbp->list, &(pring->iocb_continueq));
-+ } else {
-+ list_add_tail(&rspiocbp->list,
-+ &(pring->iocb_continueq));
-+ }
-+ pring->iocb_continueq_cnt++;
-+
-+ /*
-+ * When the ulpLe field is set, the entire Command has been
-+ * received. Start by getting a pointer to the first iocb entry
-+ * in the chain.
-+ */
-+ if (irsp->ulpLe) {
-+ /*
-+ * By default, the driver expects to free all resources
-+ * associated with this iocb completion.
-+ */
-+ free_saveq = 1;
-+ saveq = list_entry(pring->iocb_continueq.next,
-+ struct lpfc_iocbq, list);
-+ irsp = &(saveq->iocb);
-+ list_del_init(&pring->iocb_continueq);
-+ pring->iocb_continueq_cnt = 0;
-+
-+ psli->slistat.iocbRsp[ringno]++;
-+
-+ if(irsp->ulpStatus) {
-+ /* Rsp ring <ringno> error: IOCB */
-+ lpfc_printf_log(phba,
-+ KERN_WARNING,
-+ LOG_SLI,
-+ "%d:0326 Rsp Ring %d error: IOCB Data: "
-+ "x%x x%x x%x x%x x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ ringno,
-+ irsp->un.ulpWord[0],
-+ irsp->un.ulpWord[1],
-+ irsp->un.ulpWord[2],
-+ irsp->un.ulpWord[3],
-+ irsp->un.ulpWord[4],
-+ irsp->un.ulpWord[5],
-+ *(((uint32_t *) irsp) + 6),
-+ *(((uint32_t *) irsp) + 7));
-+ }
-+
-+ /* Determine if IOCB command is a solicited or
-+ unsolicited event */
-+ type =
-+ lpfc_sli_iocb_cmd_type[(irsp->
-+ ulpCommand &
-+ CMD_IOCB_MASK)];
-+ if (type == LPFC_SOL_IOCB) {
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ iflag);
-+ rc = lpfc_sli_process_sol_iocb(phba, pring,
-+ saveq);
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ /*
-+ * If this solicted completion is an ELS
-+ * command, don't free the resources now because
-+ * the discoverytasklet does later.
-+ */
-+ if (pring->ringno == LPFC_ELS_RING)
-+ free_saveq = 0;
-+ else
-+ free_saveq = 1;
-+
-+ } else if (type == LPFC_UNSOL_IOCB) {
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ iflag);
-+ rc = lpfc_sli_process_unsol_iocb(phba, pring,
-+ saveq);
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+
-+ /*
-+ * If this unsolicted completion is an ELS
-+ * command, don't free the resources now because
-+ * the discoverytasklet does later.
-+ */
-+ if (pring->ringno == LPFC_ELS_RING)
-+ free_saveq = 0;
-+ else
-+ free_saveq = 1;
-+
-+ } else if (type == LPFC_ABORT_IOCB) {
-+ /* Solicited ABORT Responses */
-+ /* Based on the iotag field, get the cmd IOCB
-+ from the txcmplq */
-+ if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
-+ ((cmdiocbp =
-+ lpfc_sli_ringtxcmpl_get(phba, pring,
-+ saveq, 0)))) {
-+ /* Call the specified completion
-+ routine */
-+ if (cmdiocbp->iocb_cmpl) {
-+ spin_unlock_irqrestore(
-+ phba->host->host_lock,
-+ iflag);
-+ (cmdiocbp->iocb_cmpl) (phba,
-+ cmdiocbp, saveq);
-+ spin_lock_irqsave(
-+ phba->host->host_lock,
-+ iflag);
-+ } else {
-+ mempool_free(cmdiocbp,
-+ phba->iocb_mem_pool);
-+ }
-+ }
-+ } else if (type == LPFC_UNKNOWN_IOCB) {
-+ if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
-+
-+ char adaptermsg[LPFC_MAX_ADPTMSG];
-+
-+ memset(adaptermsg, 0,
-+ LPFC_MAX_ADPTMSG);
-+ memcpy(&adaptermsg[0], (uint8_t *) irsp,
-+ MAX_MSG_DATA);
-+ dev_warn(&((phba->pcidev)->dev),
-+ "lpfc%d: %s",
-+ phba->brd_no, adaptermsg);
-+ } else {
-+ /* Unknown IOCB command */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_SLI,
-+ "%d:0321 Unknown IOCB command "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ irsp->ulpCommand,
-+ irsp->ulpStatus,
-+ irsp->ulpIoTag,
-+ irsp->ulpContext);
-+ }
-+ }
-+
-+ if (free_saveq) {
-+ /*
-+ * Free up iocb buffer chain for command just
-+ * processed
-+ */
-+ if (!list_empty(&pring->iocb_continueq)) {
-+ list_for_each_entry_safe(rspiocbp,
-+ next_iocb,
-+ &pring->iocb_continueq, list) {
-+ list_del_init(&rspiocbp->list);
-+ mempool_free(rspiocbp,
-+ phba->iocb_mem_pool);
-+ }
-+ }
-+ mempool_free( saveq, phba->iocb_mem_pool);
-+ }
-+ }
-+
-+ /* Entire Command has been received */
-+ entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
-+
-+ /* If the port response put pointer has not been updated, sync
-+ * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
-+ * response put pointer.
-+ */
-+ if (pring->rspidx == portRspPut) {
-+ portRspPut = le32_to_cpu(pgp->rspPutInx);
-+ }
-+ } /* while (pring->rspidx != portRspPut) */
-+
-+ if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
-+ /* At least one response entry has been freed */
-+ psli->slistat.iocbRspFull[ringno]++;
-+ /* SET RxRE_RSP in Chip Att register */
-+ status = ((CA_R0ATT | CA_R0RE_RSP) << (ringno * 4));
-+ writel(status, phba->CAregaddr);
-+ readl(phba->CAregaddr); /* flush */
-+ }
-+ if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
-+ pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
-+ psli->slistat.iocbCmdEmpty[ringno]++;
-+ /*
-+ * Force update of the local copy of cmdGetInx
-+ */
-+ pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
-+ lpfc_sli_resume_iocb(phba, pring);
-+
-+ if ((psli->sliinit.ringinit[ringno].lpfc_sli_cmd_available))
-+ (psli->sliinit.ringinit[ringno].
-+ lpfc_sli_cmd_available) (phba, pring);
-+
-+ }
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return (rc);
-+}
-+
-+static uint32_t
-+lpfc_intr_prep(struct lpfc_hba * phba)
-+{
-+ uint32_t ha_copy;
-+
-+ /* Ignore all interrupts during initialization. */
-+ if (phba->hba_state < LPFC_LINK_DOWN)
-+ return (0);
-+
-+ /* Read host attention register to determine interrupt source */
-+ ha_copy = readl(phba->HAregaddr);
-+
-+ /* Clear Attention Sources, except ERATT (to preserve status) & LATT
-+ * (ha_copy & ~(HA_ERATT | HA_LATT));
-+ */
-+ writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
-+ readl(phba->HAregaddr); /* flush */
-+ return (ha_copy);
-+} /* lpfc_intr_prep */
-+
-+int
-+lpfc_sli_intr(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ uint32_t ha_copy;
-+ unsigned long status;
-+ int i;
-+ unsigned long iflag;
-+
-+ psli = &phba->sli;
-+ psli->slistat.sliIntr++;
-+
-+ /*
-+ * Call the HBA to see if it is interrupting. If not, don't claim
-+ * the interrupt
-+ */
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ ha_copy = lpfc_intr_prep(phba);
-+ if (!ha_copy) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return (1);
-+ }
-+
-+ if (ha_copy & HA_ERATT) {
-+ /*
-+ * There was a link/board error. Read the status register to
-+ * retrieve the error event and process it.
-+ */
-+ psli->slistat.errAttnEvent++;
-+ status = readl(phba->HSregaddr);
-+ /* Clear Chip error bit */
-+ writel(HA_ERATT, phba->HAregaddr);
-+ readl(phba->HAregaddr); /* flush */
-+ /*
-+ All error attention handlers are posted to
-+ discovery tasklet
-+ */
-+
-+ lpfc_discq_post_event(phba, (void *)status, NULL,
-+ LPFC_EVT_ERR_ATTN);
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return (0);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+
-+ if (ha_copy & HA_MBATT) {
-+ /* There was a Mailbox event. */
-+ lpfc_sli_handle_mb_event(phba);
-+ }
-+
-+ if (ha_copy & HA_LATT) {
-+ /*
-+ * There was a link attention event. Provided the driver is in
-+ * a state to handle link events, handle this event.
-+ */
-+ if (psli->sliinit.sli_flag & LPFC_PROCESS_LA) {
-+ lpfc_handle_latt(phba);
-+ }
-+ }
-+
-+ /* Process all events on each ring */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pring = &psli->ring[i];
-+ if ((ha_copy & HA_RXATT)
-+ || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
-+ if (pring->flag & LPFC_STOP_IOCB_MASK) {
-+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
-+ } else {
-+ lpfc_sli_handle_ring_event(phba, pring,
-+ (ha_copy &
-+ HA_RXMASK));
-+ pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
-+ }
-+ }
-+ ha_copy = (ha_copy >> 4);
-+ }
-+
-+ return (0);
-+}
-+
-+static int
-+lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
-+ uint32_t flag)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ struct lpfc_iocbq *abtsiocbp;
-+ IOCB_t *icmd = NULL, *cmd = NULL;
-+ int errcnt;
-+ uint16_t iotag;
-+
-+ psli = &phba->sli;
-+ errcnt = 0;
-+
-+ /* Error everything on txq and txcmplq
-+ * First do the txq.
-+ */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ list_del_init(&iocb->list);
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+
-+ pring->txq_cnt = 0;
-+ INIT_LIST_HEAD(&(pring->txq));
-+
-+ /* Next issue ABTS for everything on the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-+ cmd = &iocb->iocb;
-+
-+ if (flag == LPFC_SLI_ABORT_IMED) {
-+ /*
-+ * Imediate abort of IOCB, clear fast_lookup entry,
-+ * if any, deque and call compl
-+ */
-+ iotag = cmd->ulpIoTag;
-+ if (pring->fast_lookup &&
-+ iotag &&
-+ (iotag <
-+ psli->sliinit.ringinit[pring->ringno].fast_iotag))
-+ *(pring->fast_lookup + iotag) = NULL;
-+
-+ list_del_init(&iocb->list);
-+ pring->txcmplq_cnt--;
-+
-+ if (iocb->iocb_cmpl) {
-+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ continue;
-+ }
-+
-+ /* issue ABTS for this IOCB based on iotag */
-+
-+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ errcnt++;
-+ continue;
-+ }
-+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &abtsiocbp->iocb;
-+
-+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
-+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
-+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-+
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = cmd->ulpClass;
-+ if (phba->hba_state >= LPFC_LINK_UP) {
-+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
-+ } else {
-+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-+
-+ }
-+
-+ if (lpfc_sli_issue_iocb
-+ (phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
-+ mempool_free(abtsiocbp, phba->iocb_mem_pool);
-+ errcnt++;
-+ continue;
-+ }
-+ /* The rsp ring completion will remove IOCB from txcmplq when
-+ * abort is read by HBA.
-+ */
-+ }
-+
-+ if (flag == LPFC_SLI_ABORT_IMED) {
-+ INIT_LIST_HEAD(&(pring->txcmplq));
-+ pring->txcmplq_cnt = 0;
-+ }
-+
-+ return (errcnt);
-+}
-+
-+int
-+lpfc_sli_brdreset(struct lpfc_hba * phba)
-+{
-+ MAILBOX_t *swpmb;
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ uint16_t cfg_value, skip_post;
-+ volatile uint32_t word0;
-+ int i;
-+ void *to_slim;
-+ struct lpfc_dmabuf *mp, *next_mp;
-+
-+ psli = &phba->sli;
-+
-+ /* A board reset must use REAL SLIM. */
-+ psli->sliinit.sli_flag &= ~LPFC_SLI2_ACTIVE;
-+
-+ word0 = 0;
-+ swpmb = (MAILBOX_t *) & word0;
-+ swpmb->mbxCommand = MBX_RESTART;
-+ swpmb->mbxHc = 1;
-+
-+ to_slim = phba->MBslimaddr;
-+ writel(*(uint32_t *) swpmb, to_slim);
-+ readl(to_slim); /* flush */
-+
-+ /* Only skip post after fc_ffinit is completed */
-+ if (phba->hba_state) {
-+ skip_post = 1;
-+ word0 = 1; /* This is really setting up word1 */
-+ } else {
-+ skip_post = 0;
-+ word0 = 0; /* This is really setting up word1 */
-+ }
-+ to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);
-+ writel(*(uint32_t *) swpmb, to_slim);
-+ readl(to_slim); /* flush */
-+
-+ /* Reset HBA */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_SLI,
-+ "%d:0325 Reset HBA Data: x%x x%x\n",
-+ phba->brd_no,
-+ phba->hba_state,
-+ psli->sliinit.sli_flag);
-+
-+ /* Turn off SERR, PERR in PCI cmd register */
-+ phba->hba_state = LPFC_INIT_START;
-+
-+ /* perform board reset */
-+ phba->fc_eventTag = 0;
-+ phba->fc_myDID = 0;
-+ phba->fc_prevDID = 0;
-+
-+ /* Turn off parity checking and serr during the physical reset */
-+ pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
-+ pci_write_config_word(phba->pcidev, PCI_COMMAND,
-+ (cfg_value &
-+ ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
-+
-+ /* Now toggle INITFF bit in the Host Control Register */
-+ writel(HC_INITFF, phba->HCregaddr);
-+ mdelay(1);
-+ readl(phba->HCregaddr); /* flush */
-+ writel(0, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+
-+ /* Restore PCI cmd register */
-+
-+ pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
-+ phba->hba_state = LPFC_INIT_START;
-+
-+ /* Initialize relevant SLI info */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pring = &psli->ring[i];
-+ pring->flag = 0;
-+ pring->rspidx = 0;
-+ pring->next_cmdidx = 0;
-+ pring->local_getidx = 0;
-+ pring->cmdidx = 0;
-+ pring->missbufcnt = 0;
-+ }
-+
-+ if (skip_post) {
-+ mdelay(100);
-+ } else {
-+ mdelay(2000);
-+ }
-+
-+ /* Cleanup preposted buffers on the ELS ring */
-+ pring = &psli->ring[LPFC_ELS_RING];
-+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
-+ list_del(&mp->list);
-+ pring->postbufq_cnt--;
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pring = &psli->ring[i];
-+ lpfc_sli_abort_iocb_ring(phba, pring, LPFC_SLI_ABORT_IMED);
-+ }
-+
-+ return (0);
-+}
-+
-+static void
-+lpfc_setup_slim_access(struct lpfc_hba *phba)
-+{
-+ phba->MBslimaddr = phba->slim_memmap_p;
-+ phba->HAregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
-+ HA_REG_OFFSET;
-+ phba->HCregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
-+ HC_REG_OFFSET;
-+ phba->CAregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
-+ CA_REG_OFFSET;
-+ phba->HSregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
-+ HS_REG_OFFSET;
-+ return;
-+}
-+
-+int
-+lpfc_sli_hba_setup(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ LPFC_MBOXQ_t *pmb;
-+ int read_rev_reset, i, rc;
-+ uint32_t status;
-+
-+ psli = &phba->sli;
-+
-+ /* Setep SLI interface for HBA register and HBA SLIM access */
-+ lpfc_setup_slim_access(phba);
-+
-+ /* Set board state to initialization started */
-+ phba->hba_state = LPFC_INIT_START;
-+ read_rev_reset = 0;
-+
-+ /* On some platforms/OS's, the driver can't rely on the state the
-+ * adapter may be in. For this reason, the driver is allowed to reset
-+ * the HBA before initialization.
-+ */
-+ if (lpfc_sli_reset_on_init) {
-+ phba->hba_state = 0; /* Don't skip post */
-+ lpfc_sli_brdreset(phba);
-+ phba->hba_state = LPFC_INIT_START;
-+
-+ /* Sleep for 2.5 sec */
-+ msleep(2500);
-+ }
-+
-+top:
-+ /* Read the HBA Host Status Register */
-+ status = readl(phba->HSregaddr);
-+
-+ /* Check status register to see what current state is */
-+ i = 0;
-+ while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
-+
-+ /* Check every 100ms for 5 retries, then every 500ms for 5, then
-+ * every 2.5 sec for 5, then reset board and every 2.5 sec for
-+ * 4.
-+ */
-+ if (i++ >= 20) {
-+ /* Adapter failed to init, timeout, status reg
-+ <status> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0436 Adapter failed to init, "
-+ "timeout, status reg x%x\n",
-+ phba->brd_no,
-+ status);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ return -ETIMEDOUT;
-+ }
-+
-+ /* Check to see if any errors occurred during init */
-+ if (status & HS_FFERM) {
-+ /* ERROR: During chipset initialization */
-+ /* Adapter failed to init, chipset, status reg
-+ <status> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0437 Adapter failed to init, "
-+ "chipset, status reg x%x\n",
-+ phba->brd_no,
-+ status);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ return -EIO;
-+ }
-+
-+ if (i <= 5) {
-+ msleep(10);
-+ } else if (i <= 10) {
-+ msleep(500);
-+ } else {
-+ msleep(2500);
-+ }
-+
-+ if (i == 15) {
-+ phba->hba_state = 0; /* Don't skip post */
-+ lpfc_sli_brdreset(phba);
-+ phba->hba_state = LPFC_INIT_START;
-+ }
-+ /* Read the HBA Host Status Register */
-+ status = readl(phba->HSregaddr);
-+ }
-+
-+ /* Check to see if any errors occurred during init */
-+ if (status & HS_FFERM) {
-+ /* ERROR: During chipset initialization */
-+ /* Adapter failed to init, chipset, status reg <status> */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_INIT,
-+ "%d:0438 Adapter failed to init, chipset, "
-+ "status reg x%x\n",
-+ phba->brd_no,
-+ status);
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ return -EIO;
-+ }
-+
-+ /* Clear all interrupt enable conditions */
-+ writel(0, phba->HCregaddr);
-+ readl(phba->HCregaddr); /* flush */
-+
-+ /* setup host attn register */
-+ writel(0xffffffff, phba->HAregaddr);
-+ readl(phba->HAregaddr); /* flush */
-+
-+ /* Get a Mailbox buffer to setup mailbox commands for HBA
-+ initialization */
-+ if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ return -ENOMEM;
-+ }
-+
-+ /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
-+ * means the call was successful. Any other nonzero value is a failure,
-+ * but if ERESTART is returned, the driver may reset the HBA and try
-+ * again.
-+ */
-+ if ((rc = lpfc_config_port_prep(phba))) {
-+ if ((rc == -ERESTART) && (read_rev_reset == 0)) {
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ phba->hba_state = 0; /* Don't skip post */
-+ lpfc_sli_brdreset(phba);
-+ phba->hba_state = LPFC_INIT_START;
-+ msleep(500);
-+ read_rev_reset = 1;
-+ goto top;
-+ }
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -ENXIO;
-+ }
-+
-+ /* Setup and issue mailbox CONFIG_PORT command */
-+ phba->hba_state = LPFC_INIT_MBX_CMDS;
-+ lpfc_config_port(phba, pmb);
-+ if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
-+ /* Adapter failed to init, mbxCmd <cmd> CONFIG_PORT,
-+ mbxStatus <status> */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-+ "%d:0442 Adapter failed to init, mbxCmd x%x "
-+ "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
-+ phba->brd_no, pmb->mb.mbxCommand,
-+ pmb->mb.mbxStatus, 0);
-+
-+ /* This clause gives the config_port call is given multiple
-+ chances to succeed. */
-+ if (read_rev_reset == 0) {
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ phba->hba_state = 0; /* Don't skip post */
-+ lpfc_sli_brdreset(phba);
-+ phba->hba_state = LPFC_INIT_START;
-+ msleep(2500);
-+ read_rev_reset = 1;
-+ goto top;
-+ }
-+
-+ psli->sliinit.sli_flag &= ~LPFC_SLI2_ACTIVE;
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -ENXIO;
-+ }
-+
-+ if ((rc = lpfc_sli_ring_map(phba))) {
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -ENXIO;
-+ }
-+ psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
-+
-+ /* Call post CONFIG_PORT mailbox command initialization. */
-+ if ((rc = lpfc_config_port_post(phba))) {
-+ phba->hba_state = LPFC_HBA_ERROR;
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return -ENXIO;
-+ }
-+ mempool_free( pmb, phba->mbox_mem_pool);
-+ return 0;
-+}
-+
-+
-+static void
-+lpfc_mbox_abort(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ LPFC_MBOXQ_t *pmbox;
-+ MAILBOX_t *mb;
-+
-+ psli = &phba->sli;
-+
-+ if (psli->mbox_active) {
-+ del_timer_sync(&psli->mbox_tmo);
-+ phba->work_hba_events &= ~WORKER_MBOX_TMO;
-+ pmbox = psli->mbox_active;
-+ mb = &pmbox->mb;
-+ psli->mbox_active = NULL;
-+ if (pmbox->mbox_cmpl) {
-+ mb->mbxStatus = MBX_NOT_FINISHED;
-+ (pmbox->mbox_cmpl) (phba, pmbox);
-+ }
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+ }
-+
-+ /* Abort all the non active mailbox commands. */
-+ pmbox = lpfc_mbox_get(phba);
-+ while (pmbox) {
-+ mb = &pmbox->mb;
-+ if (pmbox->mbox_cmpl) {
-+ mb->mbxStatus = MBX_NOT_FINISHED;
-+ (pmbox->mbox_cmpl) (phba, pmbox);
-+ }
-+ pmbox = lpfc_mbox_get(phba);
-+ }
-+ return;
-+}
-+/*! lpfc_mbox_timeout
-+ *
-+ * \pre
-+ * \post
-+ * \param hba Pointer to per struct lpfc_hba structure
-+ * \param l1 Pointer to the driver's mailbox queue.
-+ * \return
-+ * void
-+ *
-+ * \b Description:
-+ *
-+ * This routine handles mailbox timeout events at timer interrupt context.
-+ */
-+void
-+lpfc_mbox_timeout(unsigned long ptr)
-+{
-+ struct lpfc_hba *phba;
-+ unsigned long iflag;
-+
-+ phba = (struct lpfc_hba *)ptr;
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
-+ phba->work_hba_events |= WORKER_MBOX_TMO;
-+ if (phba->dpc_wait)
-+ up(phba->dpc_wait);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+}
-+
-+void
-+lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
-+{
-+ struct lpfc_sli *psli;
-+ LPFC_MBOXQ_t *pmbox;
-+ MAILBOX_t *mb;
-+
-+ psli = &phba->sli;
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+ }
-+
-+ phba->work_hba_events &= ~WORKER_MBOX_TMO;
-+
-+ pmbox = psli->mbox_active;
-+ mb = &pmbox->mb;
-+
-+ /* Mbox cmd <mbxCommand> timeout */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_MBOX | LOG_SLI,
-+ "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
-+ phba->brd_no,
-+ mb->mbxCommand,
-+ phba->hba_state,
-+ psli->sliinit.sli_flag,
-+ psli->mbox_active);
-+
-+ if (psli->mbox_active == pmbox) {
-+ psli->mbox_active = NULL;
-+ if (pmbox->mbox_cmpl) {
-+ mb->mbxStatus = MBX_NOT_FINISHED;
-+ (pmbox->mbox_cmpl) (phba, pmbox);
-+ }
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+ }
-+
-+ lpfc_mbox_abort(phba);
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+}
-+
-+
-+int
-+lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
-+{
-+ MAILBOX_t *mbox;
-+ MAILBOX_t *mb;
-+ struct lpfc_sli *psli;
-+ uint32_t status, evtctr;
-+ uint32_t ha_copy;
-+ int i;
-+ unsigned long drvr_flag = 0;
-+ volatile uint32_t word0, ldata;
-+ void *to_slim;
-+
-+ psli = &phba->sli;
-+ if (flag & MBX_POLL) {
-+ spin_lock_irqsave(phba->host->host_lock, drvr_flag);
-+ }
-+
-+ mb = &pmbox->mb;
-+ status = MBX_SUCCESS;
-+
-+ if (psli->sliinit.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
-+ /* Polling for a mbox command when another one is already active
-+ * is not allowed in SLI. Also, the driver must have established
-+ * SLI2 mode to queue and process multiple mbox commands.
-+ */
-+
-+ if (flag & MBX_POLL) {
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ drvr_flag);
-+
-+ /* Mbox command <mbxCommand> cannot issue */
-+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
-+ return (MBX_NOT_FINISHED);
-+ }
-+
-+ if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
-+
-+ /* Mbox command <mbxCommand> cannot issue */
-+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
-+ return (MBX_NOT_FINISHED);
-+ }
-+
-+ /* Handle STOP IOCB processing flag. This is only meaningful
-+ * if we are not polling for mbox completion.
-+ */
-+ if (flag & MBX_STOP_IOCB) {
-+ flag &= ~MBX_STOP_IOCB;
-+ /* Now flag each ring */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ /* If the ring is active, flag it */
-+ if (psli->ring[i].cmdringaddr) {
-+ psli->ring[i].flag |=
-+ LPFC_STOP_IOCB_MBX;
-+ }
-+ }
-+ }
-+
-+ /* Another mailbox command is still being processed, queue this
-+ * command to be processed later.
-+ */
-+ lpfc_mbox_put(phba, pmbox);
-+
-+ /* Mbox cmd issue - BUSY */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_MBOX | LOG_SLI,
-+ "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand,
-+ phba->hba_state,
-+ psli->sliinit.sli_flag,
-+ flag);
-+
-+ psli->slistat.mboxBusy++;
-+ if (flag == MBX_POLL) {
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ drvr_flag);
-+ }
-+ return (MBX_BUSY);
-+ }
-+
-+ /* Handle STOP IOCB processing flag. This is only meaningful
-+ * if we are not polling for mbox completion.
-+ */
-+ if (flag & MBX_STOP_IOCB) {
-+ flag &= ~MBX_STOP_IOCB;
-+ if (flag == MBX_NOWAIT) {
-+ /* Now flag each ring */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ /* If the ring is active, flag it */
-+ if (psli->ring[i].cmdringaddr) {
-+ psli->ring[i].flag |=
-+ LPFC_STOP_IOCB_MBX;
-+ }
-+ }
-+ }
-+ }
-+
-+ psli->sliinit.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
-+
-+ /* If we are not polling, we MUST be in SLI2 mode */
-+ if (flag != MBX_POLL) {
-+ if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+
-+ /* Mbox command <mbxCommand> cannot issue */
-+ LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
-+ return (MBX_NOT_FINISHED);
-+ }
-+ /* timeout active mbox command */
-+ mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
-+ }
-+
-+ /* Mailbox cmd <cmd> issue */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_MBOX | LOG_SLI,
-+ "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
-+ phba->brd_no,
-+ mb->mbxCommand,
-+ phba->hba_state,
-+ psli->sliinit.sli_flag,
-+ flag);
-+
-+ psli->slistat.mboxCmd++;
-+ evtctr = psli->slistat.mboxEvent;
-+
-+ /* next set own bit for the adapter and copy over command word */
-+ mb->mbxOwner = OWN_CHIP;
-+
-+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
-+
-+ /* First copy command data to host SLIM area */
-+ mbox = (MAILBOX_t *) psli->MBhostaddr;
-+ lpfc_sli_pcimem_bcopy((uint32_t *) mb, (uint32_t *) mbox,
-+ (sizeof (uint32_t) *
-+ (MAILBOX_CMD_WSIZE)));
-+
-+ } else {
-+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
-+ /* copy command data into host mbox for cmpl */
-+ mbox = (MAILBOX_t *) psli->MBhostaddr;
-+ lpfc_sli_pcimem_bcopy((uint32_t *) mb,
-+ (uint32_t *) mbox,
-+ (sizeof (uint32_t) *
-+ (MAILBOX_CMD_WSIZE)));
-+ }
-+
-+ /* First copy mbox command data to HBA SLIM, skip past first
-+ word */
-+ to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);
-+ lpfc_memcpy_to_slim(to_slim, (void *)&mb->un.varWords[0],
-+ (MAILBOX_CMD_WSIZE - 1) * sizeof (uint32_t));
-+
-+ /* Next copy over first word, with mbxOwner set */
-+ ldata = *((volatile uint32_t *)mb);
-+ to_slim = phba->MBslimaddr;
-+ writel(ldata, to_slim);
-+ readl(to_slim); /* flush */
-+
-+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
-+ /* switch over to host mailbox */
-+ psli->sliinit.sli_flag |= LPFC_SLI2_ACTIVE;
-+ }
-+ }
-+
-+ wmb();
-+ /* interrupt board to doit right away */
-+ writel(CA_MBATT, phba->CAregaddr);
-+ readl(phba->CAregaddr); /* flush */
-+
-+ switch (flag) {
-+ case MBX_NOWAIT:
-+ /* Don't wait for it to finish, just return */
-+ psli->mbox_active = pmbox;
-+ break;
-+
-+ case MBX_POLL:
-+ i = 0;
-+ psli->mbox_active = NULL;
-+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
-+ /* First read mbox status word */
-+ mbox = (MAILBOX_t *) psli->MBhostaddr;
-+ word0 = *((volatile uint32_t *)mbox);
-+ word0 = le32_to_cpu(word0);
-+ } else {
-+ /* First read mbox status word */
-+ word0 = readl(phba->MBslimaddr);
-+ }
-+
-+ /* Read the HBA Host Attention Register */
-+ ha_copy = readl(phba->HAregaddr);
-+
-+ /* Wait for command to complete */
-+ while (((word0 & OWN_CHIP) == OWN_CHIP)
-+ || !(ha_copy & HA_MBATT)) {
-+ if (i++ >= 5000) {
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+ spin_unlock_irqrestore(phba->host->host_lock,
-+ drvr_flag);
-+ return (MBX_NOT_FINISHED);
-+ }
-+
-+ /* Check if we took a mbox interrupt while we were
-+ polling */
-+ if (((word0 & OWN_CHIP) != OWN_CHIP)
-+ && (evtctr != psli->slistat.mboxEvent))
-+ break;
-+
-+ /* Can be in interrupt context, do not sleep */
-+ /* (or might be called with interrupts disabled) */
-+ udelay(1000);
-+
-+
-+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
-+ /* First copy command data */
-+ mbox = (MAILBOX_t *) psli->MBhostaddr;
-+ word0 = *((volatile uint32_t *)mbox);
-+ word0 = le32_to_cpu(word0);
-+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
-+ MAILBOX_t *slimmb;
-+ volatile uint32_t slimword0;
-+ /* Check real SLIM for any errors */
-+ slimword0 = readl(phba->MBslimaddr);
-+ slimmb = (MAILBOX_t *) & slimword0;
-+ if (((slimword0 & OWN_CHIP) != OWN_CHIP)
-+ && slimmb->mbxStatus) {
-+ psli->sliinit.sli_flag &=
-+ ~LPFC_SLI2_ACTIVE;
-+ word0 = slimword0;
-+ }
-+ }
-+ } else {
-+ /* First copy command data */
-+ word0 = readl(phba->MBslimaddr);
-+ }
-+ /* Read the HBA Host Attention Register */
-+ ha_copy = readl(phba->HAregaddr);
-+ }
-+
-+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
-+ /* First copy command data */
-+ mbox = (MAILBOX_t *) psli->MBhostaddr;
-+ /* copy results back to user */
-+ lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
-+ (uint32_t *) mb,
-+ (sizeof (uint32_t) *
-+ MAILBOX_CMD_WSIZE));
-+ } else {
-+ /* First copy command data */
-+ lpfc_memcpy_from_slim((void *)mb,
-+ phba->MBslimaddr,
-+ sizeof (uint32_t) * (MAILBOX_CMD_WSIZE));
-+ if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
-+ pmbox->context2) {
-+ lpfc_memcpy_from_slim((void *)pmbox->context2,
-+ phba->MBslimaddr + DMP_RSP_OFFSET,
-+ mb->un.varDmp.word_cnt);
-+ }
-+ }
-+
-+ writel(HA_MBATT, phba->HAregaddr);
-+ readl(phba->HAregaddr); /* flush */
-+
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+ status = mb->mbxStatus;
-+ }
-+
-+ if (flag == MBX_POLL) {
-+ spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
-+ }
-+ return (status);
-+}
-+
-+static struct lpfc_iocbq *
-+lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-+ struct lpfc_iocbq ** piocb)
-+{
-+ struct lpfc_iocbq * nextiocb;
-+
-+ nextiocb = lpfc_sli_ringtx_get(phba, pring);
-+ if (!nextiocb) {
-+ nextiocb = *piocb;
-+ *piocb = NULL;
-+ }
-+
-+ return nextiocb;
-+}
-+
-+int
-+lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-+ struct lpfc_iocbq *piocb, uint32_t flag)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ int ringno = pring->ringno;
-+ struct lpfc_iocbq *nextiocb;
-+ IOCB_t *iocb;
-+
-+ /*
-+ * We should never get an IOCB if we are in a < LINK_DOWN state
-+ */
-+ if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
-+ return IOCB_ERROR;
-+
-+ /*
-+ * Check to see if we are blocking IOCB processing because of a
-+ * outstanding mbox command.
-+ */
-+ if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
-+ goto iocb_busy;
-+
-+ if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
-+ /*
-+ * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
-+ * can be issued if the link is not up.
-+ */
-+ switch (piocb->iocb.ulpCommand) {
-+ case CMD_QUE_RING_BUF_CN:
-+ case CMD_QUE_RING_BUF64_CN:
-+ /*
-+ * For IOCBs, like QUE_RING_BUF, that have no rsp ring
-+ * completion, iocb_cmpl MUST be 0.
-+ */
-+ if (piocb->iocb_cmpl)
-+ piocb->iocb_cmpl = NULL;
-+ /*FALLTHROUGH*/
-+ case CMD_CREATE_XRI_CR:
-+ break;
-+ default:
-+ goto iocb_busy;
-+ }
-+
-+ /*
-+ * For FCP commands, we must be in a state where we can process link
-+ * attention events.
-+ */
-+ } else if (unlikely(pring->ringno == psli->fcp_ring &&
-+ !(psli->sliinit.sli_flag & LPFC_PROCESS_LA)))
-+ goto iocb_busy;
-+
-+ /*
-+ * Check to see if this is a high priority command.
-+ * If so bypass tx queue processing.
-+ */
-+ if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) &&
-+ (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) {
-+ if (lpfc_sli_submit_iocb(phba, pring, iocb, piocb))
-+ goto iocb_busy;
-+ piocb = NULL;
-+ }
-+
-+ while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
-+ (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
-+ if (lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb))
-+ break;
-+
-+ if (iocb)
-+ lpfc_sli_update_ring(phba, pring);
-+ else
-+ lpfc_sli_update_full_ring(phba, pring);
-+
-+ if (!piocb)
-+ return IOCB_SUCCESS;
-+
-+ goto out_busy;
-+
-+ iocb_busy:
-+ psli->slistat.iocbCmdDelay[ringno]++;
-+
-+ out_busy:
-+
-+ if (!(flag & SLI_IOCB_RET_IOCB)) {
-+ lpfc_sli_ringtx_put(phba, pring, piocb);
-+ return IOCB_SUCCESS;
-+ }
-+
-+ return IOCB_BUSY;
-+}
-+
-+int
-+lpfc_sli_queue_setup(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ int i, cnt;
-+
-+ psli = &phba->sli;
-+ INIT_LIST_HEAD(&psli->mboxq);
-+ /* Initialize list headers for txq and txcmplq as double linked lists */
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pring = &psli->ring[i];
-+ pring->ringno = i;
-+ pring->next_cmdidx = 0;
-+ pring->local_getidx = 0;
-+ pring->cmdidx = 0;
-+ INIT_LIST_HEAD(&pring->txq);
-+ INIT_LIST_HEAD(&pring->txcmplq);
-+ INIT_LIST_HEAD(&pring->iocb_continueq);
-+ INIT_LIST_HEAD(&pring->postbufq);
-+ cnt = psli->sliinit.ringinit[i].fast_iotag;
-+ if (cnt) {
-+ pring->fast_lookup =
-+ kmalloc(cnt * sizeof (struct lpfc_iocbq *),
-+ GFP_KERNEL);
-+ if (pring->fast_lookup == 0) {
-+ return (0);
-+ }
-+ memset((char *)pring->fast_lookup, 0,
-+ cnt * sizeof (struct lpfc_iocbq *));
-+ }
-+ }
-+ return (1);
-+}
-+
-+int
-+lpfc_sli_hba_down(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ LPFC_MBOXQ_t *pmb;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ IOCB_t *icmd = NULL;
-+ int i;
-+
-+ psli = &phba->sli;
-+ lpfc_hba_down_prep(phba);
-+
-+ for (i = 0; i < psli->sliinit.num_rings; i++) {
-+ pring = &psli->ring[i];
-+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
-+
-+ /*
-+ * Error everything on the txq since these iocbs have not been
-+ * given to the FW yet.
-+ */
-+ pring->txq_cnt = 0;
-+
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ list_del_init(&iocb->list);
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+
-+ INIT_LIST_HEAD(&(pring->txq));
-+
-+ if (pring->fast_lookup) {
-+ kfree(pring->fast_lookup);
-+ pring->fast_lookup = NULL;
-+ }
-+
-+ }
-+
-+ /* Return any active mbox cmds */
-+ del_timer_sync(&psli->mbox_tmo);
-+ phba->work_hba_events &= ~WORKER_MBOX_TMO;
-+ if ((psli->mbox_active)) {
-+ pmb = psli->mbox_active;
-+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
-+ if (pmb->mbox_cmpl)
-+ pmb->mbox_cmpl(phba,pmb);
-+ }
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+ psli->mbox_active = NULL;
-+
-+ /* Return any pending mbox cmds */
-+ while ((pmb = lpfc_mbox_get(phba)) != NULL) {
-+ pmb->mb.mbxStatus = MBX_NOT_FINISHED;
-+ if (pmb->mbox_cmpl)
-+ pmb->mbox_cmpl(phba,pmb);
-+ }
-+
-+ INIT_LIST_HEAD(&psli->mboxq);
-+
-+ /*
-+ * Provided the hba is not in an error state, reset it. It is not
-+ * capable of IO anymore.
-+ */
-+ if (phba->hba_state != LPFC_HBA_ERROR) {
-+ phba->hba_state = LPFC_INIT_START;
-+ lpfc_sli_brdreset(phba);
-+ }
-+
-+ return 1;
-+}
-+
-+void
-+lpfc_sli_pcimem_bcopy(uint32_t * src, uint32_t * dest, uint32_t cnt)
-+{
-+ uint32_t ldata;
-+ int i;
-+
-+ for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
-+ ldata = *src++;
-+ ldata = le32_to_cpu(ldata);
-+ *dest++ = ldata;
-+ }
-+}
-+
-+int
-+lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
-+ struct lpfc_dmabuf * mp)
-+{
-+ /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
-+ later */
-+ list_add_tail(&mp->list, &pring->postbufq);
-+
-+ pring->postbufq_cnt++;
-+ return 0;
-+}
-+
-+
-+struct lpfc_dmabuf *
-+lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-+ dma_addr_t phys)
-+{
-+ struct lpfc_dmabuf *mp, *next_mp;
-+ struct list_head *slp = &pring->postbufq;
-+
-+ /* Search postbufq, from the begining, looking for a match on phys */
-+ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
-+ if (mp->phys == phys) {
-+ list_del_init(&mp->list);
-+ pring->postbufq_cnt--;
-+ return mp;
-+ }
-+ }
-+
-+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-+ "%d:0410 Cannot find virtual addr for mapped buf on "
-+ "ring %d Data x%llx x%p x%p x%x\n",
-+ phba->brd_no, pring->ringno, (unsigned long long)phys,
-+ slp->next, slp->prev, pring->postbufq_cnt);
-+ return NULL;
-+}
-+
-+uint32_t
-+lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
-+{
-+ LPFC_RING_INIT_t *pringinit;
-+ struct lpfc_sli *psli;
-+ uint32_t search_start;
-+
-+ psli = &phba->sli;
-+ pringinit = &psli->sliinit.ringinit[pring->ringno];
-+
-+ if (pring->fast_lookup == NULL) {
-+ pringinit->iotag_ctr++;
-+ if (pringinit->iotag_ctr >= pringinit->iotag_max)
-+ pringinit->iotag_ctr = 1;
-+ return pringinit->iotag_ctr;
-+ }
-+
-+ search_start = pringinit->iotag_ctr;
-+
-+ do {
-+ pringinit->iotag_ctr++;
-+ if (pringinit->iotag_ctr >= pringinit->fast_iotag)
-+ pringinit->iotag_ctr = 1;
-+
-+ if(*(pring->fast_lookup + pringinit->iotag_ctr) == NULL)
-+ return pringinit->iotag_ctr;
-+
-+ } while (pringinit->iotag_ctr != search_start);
-+
-+ /*
-+ * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
-+ */
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_SLI,
-+ "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
-+ phba->brd_no,
-+ pring->ringno,
-+ psli->sliinit.ringinit[pring->ringno].fast_iotag);
-+ return (0);
-+}
-+
-+static void
-+lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
-+ /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
-+ * just aborted.
-+ * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
-+ */
-+ if (cmdiocb->context2) {
-+ buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
-+
-+ /* Free the response IOCB before completing the abort
-+ command. */
-+ if (!list_empty(&buf_ptr1->list)) {
-+
-+ buf_ptr = list_entry(buf_ptr1->list.next,
-+ struct lpfc_dmabuf, list);
-+
-+ list_del(&buf_ptr->list);
-+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
-+ kfree(buf_ptr);
-+ }
-+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
-+ kfree(buf_ptr1);
-+ }
-+
-+ if (cmdiocb->context3) {
-+ buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
-+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
-+ kfree(buf_ptr);
-+ }
-+ mempool_free( cmdiocb, phba->iocb_mem_pool);
-+ return;
-+}
-+
-+int
-+lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * cmdiocb)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *abtsiocbp;
-+ IOCB_t *icmd = NULL;
-+ IOCB_t *iabt = NULL;
-+ uint32_t iotag32;
-+
-+ psli = &phba->sli;
-+
-+ /* issue ABTS for this IOCB based on iotag */
-+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC)) == 0) {
-+ return (0);
-+ }
-+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
-+ iabt = &abtsiocbp->iocb;
-+
-+ icmd = &cmdiocb->iocb;
-+ switch (icmd->ulpCommand) {
-+ case CMD_ELS_REQUEST64_CR:
-+ iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
-+ /* Even though we abort the ELS command, the firmware may access
-+ * the BPL or other resources before it processes our
-+ * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
-+ * resources till the actual abort request completes.
-+ */
-+ abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
-+ abtsiocbp->context2 = cmdiocb->context2;
-+ abtsiocbp->context3 = cmdiocb->context3;
-+ cmdiocb->context2 = NULL;
-+ cmdiocb->context3 = NULL;
-+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
-+ break;
-+ default:
-+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
-+ return (0);
-+ }
-+
-+ iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
-+ iabt->un.amxri.iotag32 = iotag32;
-+
-+ iabt->ulpLe = 1;
-+ iabt->ulpClass = CLASS3;
-+ iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
-+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
-+ return (0);
-+ }
-+
-+ return (1);
-+}
-+
-+void
-+lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ /*
-+ * Just free the iocbq resources back to the memory pool. This was an
-+ * abort command and has no other outstanding resources associated with
-+ * it.
-+ */
-+ mempool_free(cmdiocb, phba->iocb_mem_pool);
-+}
-+
-+
-+int
-+lpfc_sli_abort_iocb_ctx(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
-+ uint32_t ctx)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ struct lpfc_iocbq *abtsiocbp;
-+ IOCB_t *icmd = NULL, *cmd = NULL;
-+ int errcnt;
-+
-+ psli = &phba->sli;
-+ errcnt = 0;
-+
-+ /* Error matching iocb on txq or txcmplq
-+ * First check the txq.
-+ */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ cmd = &iocb->iocb;
-+ if (cmd->ulpContext != ctx) {
-+ continue;
-+ }
-+
-+ list_del_init(&iocb->list);
-+ pring->txq_cnt--;
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+
-+ /* Next check the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-+ cmd = &iocb->iocb;
-+ if (cmd->ulpContext != ctx) {
-+ continue;
-+ }
-+
-+ /* issue ABTS for this IOCB based on iotag */
-+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ errcnt++;
-+ continue;
-+ }
-+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &abtsiocbp->iocb;
-+
-+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
-+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
-+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-+
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = cmd->ulpClass;
-+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
-+ if (phba->hba_state >= LPFC_LINK_UP) {
-+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
-+ } else {
-+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-+ }
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
-+ IOCB_ERROR) {
-+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
-+ errcnt++;
-+ continue;
-+ }
-+ /* The rsp ring completion will remove IOCB from txcmplq when
-+ * abort is read by HBA.
-+ */
-+ }
-+ return (errcnt);
-+}
-+
-+int
-+lpfc_sli_sum_iocb_host(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ IOCB_t *cmd = NULL;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ int sum;
-+
-+ psli = &phba->sli;
-+ sum = 0;
-+
-+ /* Error matching iocb on txq or txcmplq
-+ * First check the txq.
-+ */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if (lpfc_cmd == 0) {
-+ continue;
-+ }
-+ sum++;
-+ }
-+
-+ /* Next check the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if (lpfc_cmd == 0) {
-+ continue;
-+ }
-+ sum++;
-+ }
-+ return (sum);
-+}
-+
-+int
-+lpfc_sli_abort_iocb_host(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring, int flag)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ struct lpfc_iocbq *abtsiocbp;
-+ IOCB_t *icmd = NULL, *cmd = NULL;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ int errcnt;
-+
-+ psli = &phba->sli;
-+ errcnt = 0;
-+
-+ /* Error matching iocb on txq or txcmplq
-+ * First check the txq.
-+ */
-+ if(flag & LPFC_ABORT_TXQ) {
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if (lpfc_cmd == 0) {
-+ continue;
-+ }
-+
-+ list_del_init(&iocb->list);
-+ pring->txq_cnt--;
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+ }
-+
-+ if(flag & LPFC_ABORT_TXCMPLQ) {
-+ /* Next check the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
-+ list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if (lpfc_cmd == 0) {
-+ continue;
-+ }
-+
-+ /* issue ABTS for this IOCB based on iotag */
-+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ errcnt++;
-+ continue;
-+ }
-+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &abtsiocbp->iocb;
-+
-+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
-+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
-+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-+
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = cmd->ulpClass;
-+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
-+ if (phba->hba_state >= LPFC_LINK_UP) {
-+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
-+ } else {
-+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-+ }
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
-+ IOCB_ERROR) {
-+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
-+ errcnt++;
-+ continue;
-+ }
-+ /* The rsp ring completion will remove IOCB from
-+ * tacmplq when abort is read by HBA.
-+ */
-+ }
-+ }
-+ return (errcnt);
-+}
-+
-+int
-+lpfc_sli_sum_iocb_lun(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ uint16_t scsi_target, uint64_t scsi_lun)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ IOCB_t *cmd = NULL;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ int sum;
-+
-+ psli = &phba->sli;
-+ sum = 0;
-+
-+ /* Error matching iocb on txq or txcmplq
-+ * First check the txq.
-+ */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0) ||
-+ (lpfc_cmd->target == 0) ||
-+ (lpfc_cmd->target->scsi_id != scsi_target) ||
-+ (lpfc_cmd->lun != scsi_lun)) {
-+ continue;
-+ }
-+ sum++;
-+ }
-+
-+ /* Next check the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0) ||
-+ (lpfc_cmd->target == 0) ||
-+ (lpfc_cmd->target->scsi_id != scsi_target) ||
-+ (lpfc_cmd->lun != scsi_lun)) {
-+ continue;
-+ }
-+
-+ sum++;
-+ }
-+ return (sum);
-+}
-+
-+int
-+lpfc_sli_abort_iocb_lun(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ uint16_t scsi_target, uint64_t scsi_lun, int flag)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ struct lpfc_iocbq *abtsiocbp;
-+ IOCB_t *icmd = NULL, *cmd = NULL;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ int errcnt;
-+
-+ psli = &phba->sli;
-+ errcnt = 0;
-+
-+ /* Error matching iocb on txq or txcmplq
-+ * First check the txq.
-+ */
-+ if(flag & LPFC_ABORT_TXQ) {
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0) ||
-+ (lpfc_cmd->target == 0) ||
-+ (lpfc_cmd->target->scsi_id != scsi_target) ||
-+ (lpfc_cmd->lun != scsi_lun)) {
-+ continue;
-+ }
-+
-+ list_del_init(&iocb->list);
-+ pring->txq_cnt--;
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+ }
-+
-+ if(flag & LPFC_ABORT_TXCMPLQ) {
-+ /* Next check the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
-+ list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0) ||
-+ (lpfc_cmd->target == 0) ||
-+ (lpfc_cmd->target->scsi_id != scsi_target) ||
-+ (lpfc_cmd->lun != scsi_lun)) {
-+ continue;
-+ }
-+
-+ /* issue ABTS for this IOCB based on iotag */
-+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ errcnt++;
-+ continue;
-+ }
-+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &abtsiocbp->iocb;
-+
-+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
-+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
-+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-+
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = cmd->ulpClass;
-+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
-+ if (phba->hba_state >= LPFC_LINK_UP) {
-+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
-+ } else {
-+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-+ }
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
-+ IOCB_ERROR) {
-+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
-+ errcnt++;
-+ continue;
-+ }
-+ /* The rsp ring completion will remove IOCB from
-+ * tacmplq when abort is read by HBA.
-+ */
-+ }
-+ }
-+ return (errcnt);
-+}
-+
-+int
-+lpfc_sli_abort_iocb_tgt(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ uint16_t scsi_target, int flag)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ struct lpfc_iocbq *abtsiocbp;
-+ IOCB_t *icmd = NULL, *cmd = NULL;
-+ struct lpfc_scsi_buf *lpfc_cmd;
-+ int errcnt;
-+
-+ psli = &phba->sli;
-+ errcnt = 0;
-+
-+ /* Error matching iocb on txq or txcmplq
-+ * First check the txq.
-+ */
-+ if(flag & LPFC_ABORT_TXQ) {
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0) ||
-+ (lpfc_cmd->target == 0) ||
-+ (lpfc_cmd->target->scsi_id != scsi_target)) {
-+ continue;
-+ }
-+
-+ list_del_init(&iocb->list);
-+ pring->txq_cnt--;
-+ if (iocb->iocb_cmpl) {
-+ icmd = &iocb->iocb;
-+ icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free( iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+ }
-+
-+ if(flag & LPFC_ABORT_TXCMPLQ) {
-+ /* Next check the txcmplq */
-+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
-+ list) {
-+ cmd = &iocb->iocb;
-+
-+ /* Must be a FCP command */
-+ if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
-+ (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
-+ continue;
-+ }
-+
-+ /* context1 MUST be a struct lpfc_scsi_buf */
-+ lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
-+ if ((lpfc_cmd == 0) ||
-+ (lpfc_cmd->target == 0) ||
-+ (lpfc_cmd->target->scsi_id != scsi_target)) {
-+ continue;
-+ }
-+
-+ /* issue ABTS for this IOCB based on iotag */
-+ if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ errcnt++;
-+ continue;
-+ }
-+ memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &abtsiocbp->iocb;
-+
-+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
-+ icmd->un.acxri.abortContextTag = cmd->ulpContext;
-+ icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
-+
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = cmd->ulpClass;
-+ abtsiocbp->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
-+ if (phba->hba_state >= LPFC_LINK_UP) {
-+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
-+ } else {
-+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
-+ }
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
-+ IOCB_ERROR) {
-+ mempool_free( abtsiocbp, phba->iocb_mem_pool);
-+ errcnt++;
-+ continue;
-+ }
-+ /* The rsp ring completion will remove IOCB from
-+ * txcmplq when abort is read by HBA.
-+ */
-+ }
-+ }
-+ return (errcnt);
-+}
-+
-+
-+
-+void
-+lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * queue1,
-+ struct lpfc_iocbq * queue2)
-+{
-+ if (queue1->context2 && queue2)
-+ memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
-+
-+ /* The waiter is looking for LPFC_IO_HIPRI bit to be set
-+ as a signal to wake up */
-+ queue1->iocb_flag |= LPFC_IO_HIPRI;
-+ return;
-+}
-+
-+static void
-+lpfc_sli_wake_iocb_high_priority_cleanup(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * queue1,
-+ struct lpfc_iocbq * queue2)
-+{
-+ struct lpfc_scsi_buf *lpfc_cmd = queue1->context1;
-+
-+ /*
-+ * Just free the iocbq back to the mempool. The driver
-+ * has stopped polling and this routine will execute as
-+ * a result of the subsequent abort.
-+ */
-+ mempool_free(queue1->context2, phba->iocb_mem_pool);
-+ lpfc_free_scsi_buf(lpfc_cmd);
-+ return;
-+}
-+
-+int
-+lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * piocb,
-+ uint32_t flag,
-+ struct lpfc_iocbq * prspiocbq)
-+{
-+ int wait_time = 0, retval = IOCB_ERROR;
-+
-+ /* The caller must left context1 empty. */
-+ if (piocb->context_un.hipri_wait_queue != 0) {
-+ return IOCB_ERROR;
-+ }
-+
-+ /*
-+ * If the caller has provided a response iocbq buffer, context2 must
-+ * be NULL or its an error.
-+ */
-+ if (prspiocbq && piocb->context2) {
-+ return IOCB_ERROR;
-+ }
-+
-+ piocb->context2 = prspiocbq;
-+
-+ /* Setup callback routine and issue the command. */
-+ piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
-+ retval = lpfc_sli_issue_iocb(phba, pring, piocb,
-+ flag | SLI_IOCB_HIGH_PRIORITY);
-+ if (retval != IOCB_SUCCESS) {
-+ piocb->context2 = NULL;
-+ return IOCB_ERROR;
-+ }
-+
-+ /*
-+ * This high-priority iocb was sent out-of-band. Poll for its
-+ * completion rather than wait for a signal. Note that the host_lock
-+ * is held by the midlayer and must be released here to allow the
-+ * interrupt handlers to complete the IO and signal this routine via
-+ * the iocb_flag.
-+ * The driver waits a maximum of 600 seconds to give the FW ample time
-+ * to complete the target reset ABTS. The race is not waiting long
-+ * enough and then having the FW complete the request before the driver
-+ * can issue the second abort. Since a solicited completion is required
-+ * by the FW, this wait period should be enough time for the FW to
-+ * complete the abts successfully or give up.
-+ */
-+
-+ retval = IOCB_TIMEDOUT;
-+ spin_unlock_irq(phba->host->host_lock);
-+ while (wait_time <= 600000) {
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
-+ mdelay(100);
-+#else
-+ msleep(100);
-+#endif
-+ if (piocb->iocb_flag & LPFC_IO_HIPRI) {
-+ piocb->iocb_flag &= ~LPFC_IO_HIPRI;
-+ retval = IOCB_SUCCESS;
-+ break;
-+ }
-+ wait_time += 100;
-+ }
-+
-+ spin_lock_irq(phba->host->host_lock);
-+
-+ /*
-+ * If the polling attempt failed to get a completion from the HBA,
-+ * then substitute the initial completion function with one that
-+ * releases the piocb back to the mempool. Failure to do this
-+ * results in a memory leak. Also note the small timing race that
-+ * exists between the driver giving up and a completion coming in.
-+ */
-+ if ((retval == IOCB_TIMEDOUT) && !(piocb->iocb_flag & LPFC_IO_HIPRI)) {
-+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-+ "%d:0327 waited %d mSecs for high priority "
-+ "IOCB %p - giving up\n",
-+ phba->brd_no, wait_time, piocb);
-+ piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority_cleanup;
-+ }
-+
-+ piocb->context2 = NULL;
-+
-+ return retval;
-+}
-+
-+int
-+lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
-+ uint32_t timeout)
-+{
-+ DECLARE_WAIT_QUEUE_HEAD(done_q);
-+ DECLARE_WAITQUEUE(wq_entry, current);
-+ uint32_t timeleft = 0;
-+ int retval;
-+
-+ /* The caller must leave context1 empty. */
-+ if (pmboxq->context1 != 0) {
-+ return (MBX_NOT_FINISHED);
-+ }
-+
-+ /* setup wake call as IOCB callback */
-+ pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
-+ /* setup context field to pass wait_queue pointer to wake function */
-+ pmboxq->context1 = &done_q;
-+
-+ /* start to sleep before we wait, to avoid races */
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ add_wait_queue(&done_q, &wq_entry);
-+
-+ /* now issue the command */
-+ spin_lock_irq(phba->host->host_lock);
-+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
-+ spin_unlock_irq(phba->host->host_lock);
-+
-+ if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
-+ timeleft = schedule_timeout(timeout * HZ);
-+ pmboxq->context1 = NULL;
-+ /* if schedule_timeout returns 0, we timed out and were not
-+ woken up */
-+ if (timeleft == 0) {
-+ retval = MBX_TIMEOUT;
-+ } else {
-+ retval = MBX_SUCCESS;
-+ }
-+ }
-+
-+
-+ set_current_state(TASK_RUNNING);
-+ remove_wait_queue(&done_q, &wq_entry);
-+ return retval;
-+}
-+
-+static void
-+lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * queue1, struct lpfc_iocbq * queue2)
-+{
-+ wait_queue_head_t *pdone_q;
-+
-+ queue1->iocb_flag |= LPFC_IO_WAIT;
-+ if (queue1->context2 && queue2)
-+ memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
-+
-+ /*
-+ * If pdone_q is empty, the waiter gave up and returned and this
-+ * call has nothing to do.
-+ */
-+ pdone_q = queue1->context_un.hipri_wait_queue;
-+ if (pdone_q) {
-+ wake_up(pdone_q);
-+ }
-+
-+ return;
-+}
-+
-+int
-+lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * piocb,
-+ struct lpfc_iocbq * prspiocbq, uint32_t timeout)
-+{
-+ DECLARE_WAIT_QUEUE_HEAD(done_q);
-+ DECLARE_WAITQUEUE(wq_entry, current);
-+ uint32_t timeleft = 0;
-+ int retval;
-+
-+ /* The caller must leave context1 empty for the driver. */
-+ if (piocb->context_un.hipri_wait_queue != 0)
-+ return (IOCB_ERROR);
-+
-+ /* If the caller has provided a response iocbq buffer, then context2
-+ * is NULL or its an error.
-+ */
-+ if (prspiocbq) {
-+ if (piocb->context2)
-+ return (IOCB_ERROR);
-+ piocb->context2 = prspiocbq;
-+ }
-+
-+ /* setup wake call as IOCB callback */
-+ piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
-+ /* setup context field to pass wait_queue pointer to wake function */
-+ piocb->context_un.hipri_wait_queue = &done_q;
-+
-+ /* start to sleep before we wait, to avoid races */
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+ add_wait_queue(&done_q, &wq_entry);
-+
-+ /* now issue the command */
-+ retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
-+ if (retval == IOCB_SUCCESS) {
-+ /* Give up thread time and wait for the iocb to complete or for
-+ * the alloted time to expire.
-+ */
-+ spin_unlock_irq(phba->host->host_lock);
-+ timeleft = schedule_timeout(timeout * HZ);
-+ spin_lock_irq(phba->host->host_lock);
-+
-+ piocb->context_un.hipri_wait_queue = NULL;
-+ piocb->iocb_cmpl = NULL;
-+ if (piocb->context2 == prspiocbq)
-+ piocb->context2 = NULL;
-+
-+ /*
-+ * Catch the error cases. A timeleft of zero is an error since
-+ * the iocb should have completed. The iocb_flag not have value
-+ * LPFC_IO_WAIT is also an error since the wakeup callback sets
-+ * this flag when it runs. Handle each.
-+ */
-+ if (!(piocb->iocb_flag & LPFC_IO_WAIT)) {
-+ printk(KERN_ERR "%s: Timeleft is %d, iocb_flags is 0x%x ring_no %d ulpCommand 0x%x`\n ",
-+ __FUNCTION__, timeleft, piocb->iocb_flag,
-+ pring->ringno, piocb->iocb.ulpCommand);
-+ retval = IOCB_TIMEDOUT;
-+ }
-+ }
-+
-+ remove_wait_queue(&done_q, &wq_entry);
-+ set_current_state(TASK_RUNNING);
-+ piocb->context2 = NULL;
-+ return retval;
-+}
-+
-+irqreturn_t
-+lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
-+{
-+ struct lpfc_hba *phba;
-+ int intr_status;
-+
-+ /*
-+ * Get the driver's phba structure from the dev_id and
-+ * assume the HBA is not interrupting.
-+ */
-+ phba = (struct lpfc_hba *) dev_id;
-+
-+ if (phba) {
-+ /* Call SLI to handle the interrupt event. */
-+ intr_status = lpfc_sli_intr(phba);
-+ if (intr_status == 0)
-+ return IRQ_HANDLED;
-+ }
-+
-+ return IRQ_NONE;
-+
-+} /* lpfc_intr_handler */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_disc.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_disc.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,278 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_disc.h 1.51.1.2 2005/06/13 17:16:12EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_DISC
-+#define _H_LPFC_DISC
-+
-+#include "lpfc_hw.h"
-+
-+struct lpfc_target;
-+
-+#define FC_MAX_HOLD_RSCN 32 /* max number of deferred RSCNs */
-+#define FC_MAX_NS_RSP 65536 /* max size NameServer rsp */
-+#define FC_MAXLOOP 126 /* max devices supported on a fc loop */
-+#define LPFC_DISC_FLOGI_TMO 10 /* Discovery FLOGI ratov */
-+
-+/* Defines for failMask bitmask
-+ * These are reasons that the device is not currently available
-+ * for I/O to be sent.
-+ */
-+#define LPFC_DEV_LINK_DOWN 0x1 /* Link is down */
-+#define LPFC_DEV_DISAPPEARED 0x2 /* Device disappeared from mapped
-+ list */
-+#define LPFC_DEV_DISCOVERY_INP 0x4 /* Device to go through discovery */
-+#define LPFC_DEV_DISCONNECTED 0x8 /* noactive connection to remote dev */
-+
-+/* These defines are used for set failMask routines */
-+#define LPFC_SET_BITMASK 1
-+#define LPFC_CLR_BITMASK 2
-+
-+/* Provide an enumeration for the Types of addresses a FARP can resolve. */
-+typedef enum lpfc_farp_addr_type {
-+ LPFC_FARP_BY_IEEE,
-+ LPFC_FARP_BY_WWPN,
-+ LPFC_FARP_BY_WWNN,
-+} LPFC_FARP_ADDR_TYPE;
-+
-+/* This is the protocol dependent definition for a Node List Entry.
-+ * This is used by Fibre Channel protocol to support FCP.
-+ */
-+
-+struct lpfc_bindlist {
-+ struct list_head nlp_listp;
-+ struct lpfc_target *nlp_Target; /* ptr to the tgt structure */
-+ struct lpfc_name nlp_portname; /* port name */
-+ struct lpfc_name nlp_nodename; /* node name */
-+ uint16_t nlp_bind_type;
-+ uint16_t nlp_sid; /* scsi id */
-+ uint32_t nlp_DID; /* FibreChannel D_ID of entry */
-+};
-+
-+/* structure used to queue event to the discovery tasklet */
-+struct lpfc_disc_evt {
-+ struct list_head evt_listp;
-+ void * evt_arg1;
-+ void * evt_arg2;
-+ uint32_t evt;
-+};
-+typedef struct lpfc_disc_evt LPFC_DISC_EVT_t;
-+
-+#define LPFC_EVT_MBOX 0x1
-+#define LPFC_EVT_SOL_IOCB 0x2
-+#define LPFC_EVT_UNSOL_IOCB 0x3
-+#define LPFC_EVT_NODEV_TMO 0x4
-+#define LPFC_EVT_SCAN 0x5
-+#define LPFC_EVT_ERR_ATTN 0x6
-+#define LPFC_EVT_ELS_RETRY 0x7
-+#define LPFC_EVT_OPEN_LOOP 0x8
-+
-+struct lpfc_nodelist {
-+ struct list_head nlp_listp;
-+ struct lpfc_name nlp_portname; /* port name */
-+ struct lpfc_name nlp_nodename; /* node name */
-+ uint32_t nlp_failMask; /* failure mask for device */
-+ uint32_t nlp_flag; /* entry flags */
-+ uint32_t nlp_DID; /* FC D_ID of entry */
-+ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
-+ uint16_t nlp_type;
-+#define NLP_FC_NODE 0x1 /* entry is an FC node */
-+#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */
-+#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */
-+
-+ uint16_t nlp_rpi;
-+ uint16_t nlp_state; /* state transition indicator */
-+ uint16_t nlp_xri; /* output exchange id for RPI */
-+ uint16_t nlp_sid; /* scsi id */
-+#define NLP_NO_SID 0xffff
-+
-+ uint8_t nlp_retry; /* used for ELS retries */
-+ uint8_t nlp_disc_refcnt; /* used for DSM */
-+ uint8_t nlp_fcp_info; /* class info, bits 0-3 */
-+#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
-+
-+ struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
-+ struct timer_list nlp_tmofunc; /* Used for nodev tmo */
-+ struct lpfc_target *nlp_Target; /* Pointer to the target
-+ structure */
-+
-+ struct lpfc_bindlist *nlp_listp_bind; /* Linked list bounded remote
-+ ports */
-+ struct lpfc_nodelist *nlp_rpi_hash_next;
-+ struct lpfc_hba *nlp_phba;
-+ LPFC_DISC_EVT_t nodev_timeout_evt;
-+ LPFC_DISC_EVT_t els_retry_evt;
-+};
-+
-+/*++
-+ * lpfc_node_farp_list:
-+ * This data structure defines the attributes associated with
-+ * an outstanding FARP REQ to a remote node.
-+ *
-+ * listentry - head of this list of pending farp requests.
-+ * rnode_addr - The address of the remote node. Either the IEEE, WWPN, or
-+ * WWNN. Used in the FARP request.
-+ *
-+ --*/
-+struct lpfc_node_farp_pend {
-+ struct list_head listentry;
-+ struct lpfc_name rnode_addr;
-+};
-+
-+/* Defines for nlp_flag (uint32) */
-+#define NLP_NO_LIST 0x0 /* Indicates immediately free node */
-+#define NLP_UNUSED_LIST 0x1 /* Flg to indicate node will be freed */
-+#define NLP_PLOGI_LIST 0x2 /* Flg to indicate sent PLOGI */
-+#define NLP_ADISC_LIST 0x3 /* Flg to indicate sent ADISC */
-+#define NLP_REGLOGIN_LIST 0x4 /* Flg to indicate sent REG_LOGIN */
-+#define NLP_PRLI_LIST 0x5 /* Flg to indicate sent PRLI */
-+#define NLP_UNMAPPED_LIST 0x6 /* Node is now unmapped */
-+#define NLP_MAPPED_LIST 0x7 /* Node is now mapped */
-+#define NLP_NPR_LIST 0x8 /* Node is in NPort Recovery state */
-+#define NLP_JUST_DQ 0x9 /* just deque ndlp in lpfc_nlp_list */
-+#define NLP_LIST_MASK 0xf /* mask to see what list node is on */
-+#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */
-+#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */
-+#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */
-+#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
-+#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
-+#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
-+#define NLP_AUTOMAP 0x800 /* Entry was automap'ed */
-+#define NLP_SEED_WWPN 0x1000 /* Entry scsi id is seeded for WWPN */
-+#define NLP_SEED_WWNN 0x2000 /* Entry scsi id is seeded for WWNN */
-+#define NLP_SEED_DID 0x4000 /* Entry scsi id is seeded for DID */
-+#define NLP_SEED_MASK 0x807000 /* mask for seeded flags */
-+#define NLP_NS_NODE 0x8000 /* Authenticated entry by NameServer */
-+#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
-+#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
-+#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
-+#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
-+#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */
-+#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */
-+#define NLP_SEED_ALPA 0x800000 /* SCSI id is derived from alpa array */
-+#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful
-+ ACC */
-+#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
-+ NPR list */
-+#define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */
-+
-+/* Defines for list searchs */
-+#define NLP_SEARCH_MAPPED 0x1 /* search mapped */
-+#define NLP_SEARCH_UNMAPPED 0x2 /* search unmapped */
-+#define NLP_SEARCH_PLOGI 0x4 /* search plogi */
-+#define NLP_SEARCH_ADISC 0x8 /* search adisc */
-+#define NLP_SEARCH_REGLOGIN 0x10 /* search reglogin */
-+#define NLP_SEARCH_PRLI 0x20 /* search prli */
-+#define NLP_SEARCH_NPR 0x40 /* search npr */
-+#define NLP_SEARCH_UNUSED 0x80 /* search mapped */
-+#define NLP_SEARCH_ALL 0xff /* search all lists */
-+
-+/* There are 4 different double linked lists nodelist entries can reside on.
-+ * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
-+ * when Link Up discovery or Registered State Change Notification (RSCN)
-+ * processing is needed. Each list holds the nodes that require a PLOGI or
-+ * ADISC Extended Link Service (ELS) request. These lists keep track of the
-+ * nodes affected by an RSCN, or a Link Up (Typically, all nodes are effected
-+ * by Link Up) event. The unmapped_list contains all nodes that have
-+ * successfully logged into at the Fibre Channel level. The
-+ * mapped_list will contain all nodes that are mapped FCP targets.
-+ *
-+ * The bind list is a list of undiscovered (potentially non-existent) nodes
-+ * that we have saved binding information on. This information is used when
-+ * nodes transition from the unmapped to the mapped list.
-+ */
-+
-+/* Defines for nlp_state */
-+#define NLP_STE_UNUSED_NODE 0x0 /* node is just allocated */
-+#define NLP_STE_PLOGI_ISSUE 0x1 /* PLOGI was sent to NL_PORT */
-+#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
-+#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
-+#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
-+#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */
-+#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */
-+#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */
-+#define NLP_STE_MAX_STATE 0x8
-+#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
-+
-+/* For UNUSED_NODE state, the node has just been allocated.
-+ * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
-+ * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
-+ * and put on the unmapped list. For ADISC processing, the node is taken off
-+ * the ADISC list and placed on either the mapped or unmapped list (depending
-+ * on its previous state). Once on the unmapped list, a PRLI is issued and the
-+ * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
-+ * changed to PRLI_COMPL. If the completion indicates a mapped
-+ * node, the node is taken off the unmapped list. The binding list is checked
-+ * for a valid binding, or a binding is automatically assigned. If binding
-+ * assignment is unsuccessful, the node is left on the unmapped list. If
-+ * binding assignment is successful, the associated binding list entry (if
-+ * any) is removed, and the node is placed on the mapped list.
-+ */
-+/*
-+ * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
-+ * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
-+ * expire, all effected nodes will receive a DEVICE_RM event.
-+ */
-+/*
-+ * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
-+ * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
-+ * check, additional nodes may be added (DEVICE_ADD) or removed (DEVICE_RM) to /
-+ * from the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
-+ * we will first process the ADISC list. 32 entries are processed initially and
-+ * ADISC is initited for each one. Completions / Events for each node are
-+ * funnelled thru the state machine. As each node finishes ADISC processing, it
-+ * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
-+ * waiting, and the ADISC list count is identically 0, then we are done. For
-+ * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
-+ * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
-+ * list. 32 entries are processed initially and PLOGI is initited for each one.
-+ * Completions / Events for each node are funnelled thru the state machine. As
-+ * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
-+ * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
-+ * identically 0, then we are done. We have now completed discovery / RSCN
-+ * handling. Upon completion, ALL nodes should be on either the mapped or
-+ * unmapped lists.
-+ */
-+
-+/* Defines for Node List Entry Events that could happen */
-+#define NLP_EVT_RCV_PLOGI 0x0 /* Rcv'd an ELS PLOGI command */
-+#define NLP_EVT_RCV_PRLI 0x1 /* Rcv'd an ELS PRLI command */
-+#define NLP_EVT_RCV_LOGO 0x2 /* Rcv'd an ELS LOGO command */
-+#define NLP_EVT_RCV_ADISC 0x3 /* Rcv'd an ELS ADISC command */
-+#define NLP_EVT_RCV_PDISC 0x4 /* Rcv'd an ELS PDISC command */
-+#define NLP_EVT_RCV_PRLO 0x5 /* Rcv'd an ELS PRLO command */
-+#define NLP_EVT_CMPL_PLOGI 0x6 /* Sent an ELS PLOGI command */
-+#define NLP_EVT_CMPL_PRLI 0x7 /* Sent an ELS PRLI command */
-+#define NLP_EVT_CMPL_LOGO 0x8 /* Sent an ELS LOGO command */
-+#define NLP_EVT_CMPL_ADISC 0x9 /* Sent an ELS ADISC command */
-+#define NLP_EVT_CMPL_REG_LOGIN 0xa /* REG_LOGIN mbox cmd completed */
-+#define NLP_EVT_DEVICE_RM 0xb /* Device not found in NS / ALPAmap */
-+#define NLP_EVT_DEVICE_RECOVERY 0xc /* Device existence unknown */
-+#define NLP_EVT_MAX_EVENT 0xd
-+
-+
-+/* Definitions for Binding Entry Type for lpfc_parse_binding_entry() */
-+#define LPFC_BIND_WW_NN_PN 0
-+#define LPFC_BIND_DID 1
-+
-+#endif /* _H_LPFC_DISC */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_scsi.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_scsi.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,93 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_scsi.h 1.71.1.3 2005/06/21 15:48:51EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_SCSI
-+#define _H_LPFC_SCSI
-+
-+#include "lpfc_disc.h"
-+#include "lpfc_mem.h"
-+#include "lpfc_sli.h"
-+
-+struct lpfc_hba;
-+
-+
-+struct lpfc_target {
-+ struct lpfc_nodelist *pnode; /* Pointer to the node structure. */
-+ uint16_t scsi_id;
-+ uint32_t qcmdcnt;
-+ uint32_t iodonecnt;
-+ uint32_t errorcnt;
-+ uint32_t slavecnt;
-+#if defined(RHEL_FC) || defined(SLES_FC)
-+ uint16_t blocked;
-+#endif
-+#ifdef RHEL_FC
-+ struct scsi_target *starget; /* Pointer to midlayer target
-+ structure. */
-+#endif
-+#ifdef SLES_FC
-+ struct timer_list dev_loss_timer;
-+#endif
-+};
-+
-+struct lpfc_scsi_buf {
-+ struct scsi_cmnd *pCmd;
-+ struct lpfc_hba *scsi_hba;
-+ struct lpfc_target *target;
-+ uint32_t lun;
-+
-+ uint32_t timeout;
-+
-+ uint16_t status; /* From IOCB Word 7- ulpStatus */
-+ uint32_t result; /* From IOCB Word 4. */
-+
-+ uint32_t seg_cnt; /* Number of scatter-gather segments returned by
-+ * dma_map_sg. The driver needs this for calls
-+ * to dma_unmap_sg. */
-+ dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
-+
-+ /* dma_ext has both virt, phys to dma-able buffer
-+ * which contains fcp_cmd, fcp_rsp and scatter gather list fro upto
-+ * 68 (LPFC_SCSI_BPL_SIZE) BDE entries,
-+ * xfer length, cdb, data direction....
-+ */
-+ struct lpfc_dmabuf dma_ext;
-+ struct fcp_cmnd *fcp_cmnd;
-+ struct fcp_rsp *fcp_rsp;
-+ struct ulp_bde64 *fcp_bpl;
-+
-+ /* cur_iocbq has phys of the dma-able buffer.
-+ * Iotag is in here
-+ */
-+ struct lpfc_iocbq cur_iocbq;
-+};
-+
-+#define LPFC_SCSI_INITIAL_BPL_SIZE 4 /* Number of scsi buf BDEs in fcp_bpl */
-+
-+#define LPFC_SCSI_DMA_EXT_SIZE 264
-+#define LPFC_BPL_SIZE 1024
-+
-+#define MDAC_DIRECT_CMD 0x22
-+
-+#endif /* _H_LPFC_SCSI */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_fcp.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_fcp.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,108 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_fcp.h 1.10.1.2 2005/06/13 17:16:19EDT sf_support Exp $
-+ */
-+
-+#ifndef H_LPFC_DFC
-+#define H_LPFC_DFC
-+
-+#define MAX_LPFC_SNS 128
-+
-+struct fcp_rsp {
-+ uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */
-+ uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */
-+
-+ uint8_t rspStatus0; /* FCP_STATUS byte 0 (reserved) */
-+ uint8_t rspStatus1; /* FCP_STATUS byte 1 (reserved) */
-+ uint8_t rspStatus2; /* FCP_STATUS byte 2 field validity */
-+#define RSP_LEN_VALID 0x01 /* bit 0 */
-+#define SNS_LEN_VALID 0x02 /* bit 1 */
-+#define RESID_OVER 0x04 /* bit 2 */
-+#define RESID_UNDER 0x08 /* bit 3 */
-+ uint8_t rspStatus3; /* FCP_STATUS byte 3 SCSI status byte */
-+
-+ uint32_t rspResId; /* Residual xfer if residual count field set in
-+ fcpStatus2 */
-+ /* Received in Big Endian format */
-+ uint32_t rspSnsLen; /* Length of sense data in fcpSnsInfo */
-+ /* Received in Big Endian format */
-+ uint32_t rspRspLen; /* Length of FCP response data in fcpRspInfo */
-+ /* Received in Big Endian format */
-+
-+ uint8_t rspInfo0; /* FCP_RSP_INFO byte 0 (reserved) */
-+ uint8_t rspInfo1; /* FCP_RSP_INFO byte 1 (reserved) */
-+ uint8_t rspInfo2; /* FCP_RSP_INFO byte 2 (reserved) */
-+ uint8_t rspInfo3; /* FCP_RSP_INFO RSP_CODE byte 3 */
-+
-+#define RSP_NO_FAILURE 0x00
-+#define RSP_DATA_BURST_ERR 0x01
-+#define RSP_CMD_FIELD_ERR 0x02
-+#define RSP_RO_MISMATCH_ERR 0x03
-+#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */
-+#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */
-+
-+ uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */
-+
-+ uint8_t rspSnsInfo[MAX_LPFC_SNS];
-+#define SNS_ILLEGAL_REQ 0x05 /* sense key is byte 3 ([2]) */
-+#define SNSCOD_BADCMD 0x20 /* sense code is byte 13 ([12]) */
-+};
-+
-+struct fcp_cmnd {
-+ uint32_t fcpLunMsl; /* most significant lun word (32 bits) */
-+ uint32_t fcpLunLsl; /* least significant lun word (32 bits) */
-+ /* # of bits to shift lun id to end up in right
-+ * payload word, little endian = 8, big = 16.
-+ */
-+#if __BIG_ENDIAN
-+#define FC_LUN_SHIFT 16
-+#define FC_ADDR_MODE_SHIFT 24
-+#else /* __LITTLE_ENDIAN */
-+#define FC_LUN_SHIFT 8
-+#define FC_ADDR_MODE_SHIFT 0
-+#endif
-+
-+ uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
-+ uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */
-+#define SIMPLE_Q 0x00
-+#define HEAD_OF_Q 0x01
-+#define ORDERED_Q 0x02
-+#define ACA_Q 0x04
-+#define UNTAGGED 0x05
-+ uint8_t fcpCntl2; /* FCP_CTL byte 2 task management codes */
-+#define FCP_ABORT_TASK_SET 0x02 /* Bit 1 */
-+#define FCP_CLEAR_TASK_SET 0x04 /* bit 2 */
-+#define FCP_BUS_RESET 0x08 /* bit 3 */
-+#define FCP_LUN_RESET 0x10 /* bit 4 */
-+#define FCP_TARGET_RESET 0x20 /* bit 5 */
-+#define FCP_CLEAR_ACA 0x40 /* bit 6 */
-+#define FCP_TERMINATE_TASK 0x80 /* bit 7 */
-+ uint8_t fcpCntl3;
-+#define WRITE_DATA 0x01 /* Bit 0 */
-+#define READ_DATA 0x02 /* Bit 1 */
-+
-+ uint8_t fcpCdb[16]; /* SRB cdb field is copied here */
-+ uint32_t fcpDl; /* Total transfer length */
-+
-+};
-+
-+#endif
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_crtn.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_crtn.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,273 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_crtn.h 1.149.1.4 2005/07/13 17:04:12EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_CRTN
-+#define _H_LPFC_CRTN
-+
-+#include <linux/delay.h>
-+#include <linux/interrupt.h>
-+#include <asm/uaccess.h>
-+
-+#include "lpfc_disc.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc_sli.h"
-+
-+
-+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
-+void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
-+int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *,
-+ uint32_t);
-+void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
-+void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *);
-+void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
-+
-+
-+int lpfc_linkdown(struct lpfc_hba *);
-+void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+
-+void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+int lpfc_consistent_bind_save(struct lpfc_hba *, struct lpfc_bindlist *);
-+int lpfc_nlp_plogi(struct lpfc_hba *, struct lpfc_nodelist *);
-+int lpfc_nlp_adisc(struct lpfc_hba *, struct lpfc_nodelist *);
-+int lpfc_nlp_unmapped(struct lpfc_hba *, struct lpfc_nodelist *);
-+int lpfc_nlp_mapped(struct lpfc_hba *, struct lpfc_nodelist *,
-+ struct lpfc_bindlist *);
-+int lpfc_nlp_list(struct lpfc_hba *, struct lpfc_nodelist *, int);
-+void lpfc_set_disctmo(struct lpfc_hba *);
-+int lpfc_can_disctmo(struct lpfc_hba *);
-+int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *);
-+int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ struct lpfc_iocbq *, struct lpfc_nodelist *);
-+int lpfc_nlp_remove(struct lpfc_hba *, struct lpfc_nodelist *);
-+void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t);
-+struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t);
-+struct lpfc_nodelist *lpfc_setup_rscn_node(struct lpfc_hba *, uint32_t);
-+void lpfc_disc_list_loopmap(struct lpfc_hba *);
-+void lpfc_disc_start(struct lpfc_hba *);
-+void lpfc_disc_flush_list(struct lpfc_hba *);
-+void lpfc_establish_link_tmo(unsigned long);
-+void lpfc_disc_timeout(unsigned long);
-+void lpfc_scan_timeout(unsigned long);
-+struct lpfc_target *lpfc_find_target(struct lpfc_hba *, uint32_t,
-+ struct lpfc_nodelist *);
-+void lpfc_set_failmask(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t,
-+ uint32_t);
-+void lpfc_process_nodev_timeout(struct lpfc_hba *, struct lpfc_nodelist *);
-+
-+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi);
-+struct lpfc_nodelist *lpfc_findnode_remove_rpi(struct lpfc_hba * phba,
-+ uint16_t rpi);
-+void lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint16_t rpi);
-+
-+int lpfc_discq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
-+int lpfc_do_dpc(void *);
-+void lpfc_evt_iocb_free(struct lpfc_hba *, struct lpfc_iocbq *);
-+int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *,
-+ uint32_t);
-+
-+uint32_t lpfc_cmpl_prli_reglogin_issue(struct lpfc_hba *,
-+ struct lpfc_nodelist *, void *,
-+ uint32_t);
-+uint32_t lpfc_cmpl_plogi_prli_issue(struct lpfc_hba *, struct lpfc_nodelist *,
-+ void *, uint32_t);
-+
-+int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *,
-+ struct serv_parm *, uint32_t);
-+int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp,
-+ int);
-+int lpfc_els_abort_flogi(struct lpfc_hba *);
-+int lpfc_initial_flogi(struct lpfc_hba *);
-+void lpfc_more_plogi(struct lpfc_hba *);
-+int lpfc_issue_els_plogi(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-+int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-+int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-+int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t);
-+int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t);
-+int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
-+int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
-+ struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
-+int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *,
-+ struct lpfc_nodelist *);
-+int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_nodelist *);
-+int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_nodelist *);
-+void lpfc_els_retry_delay(unsigned long);
-+void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
-+void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ struct lpfc_iocbq *);
-+int lpfc_els_handle_rscn(struct lpfc_hba *);
-+int lpfc_els_flush_rscn(struct lpfc_hba *);
-+int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t);
-+void lpfc_els_flush_cmd(struct lpfc_hba *);
-+int lpfc_els_disc_adisc(struct lpfc_hba *);
-+int lpfc_els_disc_plogi(struct lpfc_hba *);
-+void lpfc_els_timeout(unsigned long);
-+void lpfc_els_timeout_handler(struct lpfc_hba *);
-+
-+void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ struct lpfc_iocbq *);
-+int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
-+int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int);
-+void lpfc_fdmi_tmo(unsigned long);
-+void lpfc_fdmi_tmo_handler(struct lpfc_hba *);
-+
-+int lpfc_config_port_prep(struct lpfc_hba *);
-+int lpfc_config_port_post(struct lpfc_hba *);
-+int lpfc_hba_down_prep(struct lpfc_hba *);
-+void lpfc_handle_eratt(struct lpfc_hba *, uint32_t);
-+void lpfc_handle_latt(struct lpfc_hba *);
-+void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-+int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int, int);
-+void lpfc_cleanup(struct lpfc_hba *, uint32_t);
-+int lpfc_scsi_free(struct lpfc_hba *);
-+void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
-+uint8_t *lpfc_get_lpfchba_info(struct lpfc_hba *, uint8_t *);
-+int lpfc_fcp_abort(struct lpfc_hba *, int, int, int);
-+int lpfc_put_event(struct lpfc_hba *, uint32_t, uint32_t, void *,
-+ uint32_t, uint32_t);
-+int lpfc_online(struct lpfc_hba *);
-+int lpfc_offline(struct lpfc_hba *);
-+
-+
-+
-+int lpfc_sli_queue_setup(struct lpfc_hba *);
-+void lpfc_slim_access(struct lpfc_hba *);
-+
-+void lpfc_handle_eratt(struct lpfc_hba *, uint32_t);
-+void lpfc_handle_latt(struct lpfc_hba *);
-+irqreturn_t lpfc_intr_handler(int, void *, struct pt_regs *);
-+
-+void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
-+void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
-+
-+int lpfc_mem_alloc(struct lpfc_hba *);
-+void lpfc_mem_free(struct lpfc_hba *);
-+
-+struct lpfc_iocbq *
-+lpfc_prep_els_iocb(struct lpfc_hba * phba,
-+ uint8_t expectRsp,
-+ uint16_t cmdSize,
-+ uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd);
-+
-+int lpfc_sli_hba_setup(struct lpfc_hba *);
-+int lpfc_sli_hba_down(struct lpfc_hba *);
-+int lpfc_sli_intr(struct lpfc_hba *);
-+int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-+void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
-+int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ struct lpfc_iocbq *, uint32_t);
-+void lpfc_sli_pcimem_bcopy(uint32_t *, uint32_t *, uint32_t);
-+int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ struct lpfc_dmabuf *);
-+struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
-+ struct lpfc_sli_ring *,
-+ dma_addr_t);
-+uint32_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_sli_ring *);
-+int lpfc_sli_issue_abort_iotag32(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ struct lpfc_iocbq *);
-+int lpfc_sli_abort_iocb_ctx(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ uint32_t);
-+int lpfc_sli_sum_iocb_host(struct lpfc_hba *, struct lpfc_sli_ring *);
-+int lpfc_sli_abort_iocb_host(struct lpfc_hba *, struct lpfc_sli_ring *, int);
-+int lpfc_sli_sum_iocb_lun(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
-+ uint64_t);
-+int lpfc_sli_abort_iocb_lun(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
-+ uint64_t, int);
-+int lpfc_sli_abort_iocb_tgt(struct lpfc_hba *, struct lpfc_sli_ring *,
-+ uint16_t, int);
-+void lpfc_mbox_timeout(unsigned long);
-+void lpfc_mbox_timeout_handler(struct lpfc_hba *);
-+void lpfc_map_fcp_cmnd_to_bpl(struct lpfc_hba *, struct lpfc_scsi_buf *);
-+void lpfc_free_scsi_cmd(struct lpfc_scsi_buf *);
-+uint32_t lpfc_os_timeout_transform(struct lpfc_hba *, uint32_t);
-+
-+struct lpfc_nodelist *
-+lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
-+ struct lpfc_name * wwpn);
-+struct lpfc_nodelist *
-+lpfc_findnode_wwnn(struct lpfc_hba * phba, uint32_t order,
-+ struct lpfc_name * wwnn);
-+struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order,
-+ uint32_t did);
-+
-+int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
-+ uint32_t timeout);
-+
-+int
-+lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * piocb,
-+ struct lpfc_iocbq * prspiocbq, uint32_t timeout);
-+int lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring,
-+ struct lpfc_iocbq * piocb,
-+ uint32_t flag,
-+ struct lpfc_iocbq * prspiocbq);
-+void lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * queue1,
-+ struct lpfc_iocbq * queue2);
-+void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb);
-+void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
-+void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
-+
-+int lpfc_stop_timer(struct lpfc_hba *);
-+
-+
-+/* Function prototypes. */
-+int lpfc_queuecommand(struct scsi_cmnd *, void (*done) (struct scsi_cmnd *));
-+int lpfc_abort_handler(struct scsi_cmnd *);
-+int lpfc_reset_bus_handler(struct scsi_cmnd *);
-+int lpfc_reset_lun_handler(struct scsi_cmnd *);
-+void lpfc_free_scsi_buf(struct lpfc_scsi_buf *);
-+
-+#if defined(RHEL_FC) || defined(SLES_FC)
-+void lpfc_target_unblock(struct lpfc_hba *, struct lpfc_target *);
-+void lpfc_target_block(struct lpfc_hba *, struct lpfc_target *);
-+int lpfc_target_remove(struct lpfc_hba *, struct lpfc_target *);
-+int lpfc_target_add(struct lpfc_hba *, struct lpfc_target *);
-+#endif
-+
-+#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
-+#define HBA_EVENT_RSCN 5
-+#define HBA_EVENT_LINK_UP 2
-+#define HBA_EVENT_LINK_DOWN 3
-+#endif /* _H_LPFC_CRTN */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_els.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_els.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,3152 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_els.c 1.165.2.3 2005/07/08 19:33:28EDT sf_support Exp $
-+ */
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+
-+
-+static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_iocbq *);
-+static int lpfc_max_els_tries = 3;
-+
-+static int
-+lpfc_els_chk_latt(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ LPFC_MBOXQ_t *mbox;
-+ uint32_t ha_copy;
-+
-+ psli = &phba->sli;
-+
-+ if ((phba->hba_state < LPFC_HBA_READY) &&
-+ (phba->hba_state != LPFC_LINK_DOWN)) {
-+
-+ /* Read the HBA Host Attention Register */
-+ ha_copy = readl(phba->HAregaddr);
-+
-+ if (ha_copy & HA_LATT) { /* Link Attention interrupt */
-+
-+ /* Pending Link Event during Discovery */
-+ lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
-+ "%d:0237 Pending Link Event during "
-+ "Discovery: State x%x\n",
-+ phba->brd_no, phba->hba_state);
-+
-+ /* CLEAR_LA should re-enable link attention events and
-+ * we should then imediately take a LATT event. The
-+ * LATT processing should call lpfc_linkdown() which
-+ * will cleanup any left over in-progress discovery
-+ * events.
-+ */
-+ phba->fc_flag |= FC_ABORT_DISCOVERY;
-+
-+ if (phba->hba_state != LPFC_CLEAR_LA) {
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC))) {
-+ phba->hba_state = LPFC_CLEAR_LA;
-+ lpfc_clear_la(phba, mbox);
-+ mbox->mbox_cmpl =
-+ lpfc_mbx_cmpl_clear_la;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox,
-+ (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free(mbox,
-+ phba->mbox_mem_pool);
-+ phba->hba_state =
-+ LPFC_HBA_ERROR;
-+ }
-+ }
-+ }
-+ return (1);
-+ }
-+ }
-+
-+ return (0);
-+}
-+
-+struct lpfc_iocbq *
-+lpfc_prep_els_iocb(struct lpfc_hba * phba,
-+ uint8_t expectRsp,
-+ uint16_t cmdSize,
-+ uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
-+ struct ulp_bde64 *bpl;
-+ IOCB_t *icmd;
-+ uint32_t tag;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ if (phba->hba_state < LPFC_LINK_UP)
-+ return NULL;
-+
-+
-+ /* Allocate buffer for command iocb */
-+ elsiocb = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
-+ if (!elsiocb)
-+ return NULL;
-+
-+ memset(elsiocb, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &elsiocb->iocb;
-+
-+ /* fill in BDEs for command */
-+ /* Allocate buffer for command payload */
-+ if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
-+ ((pcmd->virt = lpfc_mbuf_alloc(phba,
-+ MEM_PRI, &(pcmd->phys))) == 0)) {
-+ if (pcmd)
-+ kfree(pcmd);
-+ mempool_free( elsiocb, phba->iocb_mem_pool);
-+ return NULL;
-+ }
-+
-+ INIT_LIST_HEAD(&pcmd->list);
-+
-+ /* Allocate buffer for response payload */
-+ if (expectRsp) {
-+ prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (prsp)
-+ prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
-+ &prsp->phys);
-+ if (prsp == 0 || prsp->virt == 0) {
-+ if (prsp)
-+ kfree(prsp);
-+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
-+ kfree(pcmd);
-+ mempool_free( elsiocb, phba->iocb_mem_pool);
-+ return NULL;
-+ }
-+ INIT_LIST_HEAD(&prsp->list);
-+ } else {
-+ prsp = NULL;
-+ }
-+
-+ /* Allocate buffer for Buffer ptr list */
-+ pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (pbuflist)
-+ pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
-+ &pbuflist->phys);
-+ if (pbuflist == 0 || pbuflist->virt == 0) {
-+ mempool_free( elsiocb, phba->iocb_mem_pool);
-+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
-+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
-+ kfree(pcmd);
-+ kfree(prsp);
-+ if (pbuflist)
-+ kfree(pbuflist);
-+ return NULL;
-+ }
-+
-+ INIT_LIST_HEAD(&pbuflist->list);
-+
-+ icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
-+ icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
-+ icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
-+ if (expectRsp) {
-+ icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
-+ icmd->un.elsreq64.remoteID = ndlp->nlp_DID; /* DID */
-+ icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
-+ } else {
-+ icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
-+ icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
-+ }
-+
-+ /* NOTE: we don't use ulpIoTag0 because it is a t2 structure */
-+ tag = lpfc_sli_next_iotag(phba, pring);
-+ icmd->ulpIoTag = (uint16_t)(tag & 0xffff);
-+ icmd->un.elsreq64.bdl.ulpIoTag32 = tag;
-+ icmd->ulpBdeCount = 1;
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = CLASS3;
-+
-+ bpl = (struct ulp_bde64 *) pbuflist->virt;
-+ bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
-+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
-+ bpl->tus.f.bdeSize = cmdSize;
-+ bpl->tus.f.bdeFlags = 0;
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+
-+ if (expectRsp) {
-+ bpl++;
-+ bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
-+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
-+ bpl->tus.f.bdeSize = FCELSSIZE;
-+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+ }
-+
-+ /* Save for completion so we can release these resources */
-+ elsiocb->context1 = (uint8_t *) ndlp;
-+ elsiocb->context2 = (uint8_t *) pcmd;
-+ elsiocb->context3 = (uint8_t *) pbuflist;
-+ elsiocb->retry = retry;
-+ elsiocb->drvrTimeout = (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT;
-+
-+ if (prsp) {
-+ list_add(&prsp->list, &pcmd->list);
-+ }
-+
-+ if (expectRsp) {
-+ /* Xmit ELS command <elsCmd> to remote NPORT <did> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0116 Xmit ELS command x%x to remote "
-+ "NPORT x%x Data: x%x x%x\n",
-+ phba->brd_no, elscmd,
-+ ndlp->nlp_DID, icmd->ulpIoTag, phba->hba_state);
-+ } else {
-+ /* Xmit ELS response <elsCmd> to remote NPORT <did> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0117 Xmit ELS response x%x to remote "
-+ "NPORT x%x Data: x%x x%x\n",
-+ phba->brd_no, elscmd,
-+ ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
-+ }
-+
-+ return (elsiocb);
-+}
-+
-+static void
-+lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+ struct lpfc_dmabuf *pcmd, *prsp;
-+ struct serv_parm *sp;
-+ uint32_t *lp;
-+ LPFC_MBOXQ_t *mbox;
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp;
-+ int rc;
-+
-+ psli = &phba->sli;
-+ irsp = &(rspiocb->iocb);
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+
-+ /* Check to see if link went down during discovery */
-+ if (lpfc_els_chk_latt(phba)) {
-+ lpfc_nlp_remove(phba, ndlp);
-+ goto out;
-+ }
-+
-+ if (irsp->ulpStatus) {
-+ /* FLOGI failure */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_ELS,
-+ "%d:0100 FLOGI failure Data: x%x x%x\n",
-+ phba->brd_no,
-+ irsp->ulpStatus, irsp->un.ulpWord[4]);
-+
-+ /* Check for retry */
-+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
-+ /* ELS command is being retried */
-+ goto out;
-+ }
-+ /* FLOGI failed, so there is no fabric */
-+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
-+
-+ /* If private loop, then allow max outstandting els to be
-+ * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
-+ * alpa map would take too long otherwise.
-+ */
-+ if (phba->alpa_map[0] == 0) {
-+ phba->cfg_discovery_threads =
-+ LPFC_MAX_DISC_THREADS;
-+ }
-+
-+ } else {
-+ /* The FLogI succeeded. Sync the data for the CPU before
-+ * accessing it.
-+ */
-+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
-+ lp = (uint32_t *) prsp->virt;
-+
-+ sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
-+
-+ /* FLOGI completes successfully */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0101 FLOGI completes sucessfully "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ irsp->un.ulpWord[4], sp->cmn.e_d_tov,
-+ sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
-+
-+ if (phba->hba_state == LPFC_FLOGI) {
-+ /* If Common Service Parameters indicate Nport
-+ * we are point to point, if Fport we are Fabric.
-+ */
-+ if (sp->cmn.fPort) {
-+ phba->fc_flag |= FC_FABRIC;
-+ if (sp->cmn.edtovResolution) {
-+ /* E_D_TOV ticks are in nanoseconds */
-+ phba->fc_edtov =
-+ (be32_to_cpu(sp->cmn.e_d_tov) +
-+ 999999) / 1000000;
-+ } else {
-+ /* E_D_TOV ticks are in milliseconds */
-+ phba->fc_edtov =
-+ be32_to_cpu(sp->cmn.e_d_tov);
-+ }
-+ phba->fc_ratov =
-+ (be32_to_cpu(sp->cmn.w2.r_a_tov) +
-+ 999) / 1000;
-+
-+ if (phba->fc_topology == TOPOLOGY_LOOP) {
-+ phba->fc_flag |= FC_PUBLIC_LOOP;
-+ } else {
-+ /* If we are a N-port connected to a
-+ * Fabric, fixup sparam's so logins to
-+ * devices on remote loops work.
-+ */
-+ phba->fc_sparam.cmn.altBbCredit = 1;
-+ }
-+
-+ phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
-+
-+ memcpy(&ndlp->nlp_portname, &sp->portName,
-+ sizeof (struct lpfc_name));
-+ memcpy(&ndlp->nlp_nodename, &sp->nodeName,
-+ sizeof (struct lpfc_name));
-+ memcpy(&phba->fc_fabparam, sp,
-+ sizeof (struct serv_parm));
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ goto flogifail;
-+ }
-+ phba->hba_state = LPFC_FABRIC_CFG_LINK;
-+ lpfc_config_link(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ goto flogifail;
-+ }
-+
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ goto flogifail;
-+ }
-+ if (lpfc_reg_login(phba, Fabric_DID,
-+ (uint8_t *) sp, mbox,
-+ 0) == 0) {
-+ /* set_slim mailbox command needs to
-+ * execute first, queue this command to
-+ * be processed later.
-+ */
-+ mbox->mbox_cmpl =
-+ lpfc_mbx_cmpl_fabric_reg_login;
-+ mbox->context2 = ndlp;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox,
-+ (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free(mbox,
-+ phba->mbox_mem_pool);
-+ goto flogifail;
-+ }
-+ } else {
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ goto flogifail;
-+ }
-+ } else {
-+ /* We FLOGIed into an NPort, initiate pt2pt
-+ protocol */
-+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
-+ phba->fc_edtov = FF_DEF_EDTOV;
-+ phba->fc_ratov = FF_DEF_RATOV;
-+ rc = memcmp(&phba->fc_portname, &sp->portName,
-+ sizeof(struct lpfc_name));
-+ if (rc >= 0) {
-+ /* This side will initiate the PLOGI */
-+ phba->fc_flag |= FC_PT2PT_PLOGI;
-+
-+ /* N_Port ID cannot be 0, set our to
-+ * LocalID the other side will be
-+ * RemoteID.
-+ */
-+
-+ /* not equal */
-+ if (rc)
-+ phba->fc_myDID = PT2PT_LocalID;
-+
-+ if ((mbox =
-+ mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC))
-+ == 0) {
-+ goto flogifail;
-+ }
-+ lpfc_config_link(phba, mbox);
-+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox,
-+ (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free(mbox,
-+ phba->mbox_mem_pool);
-+ goto flogifail;
-+ }
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+
-+ if ((ndlp =
-+ lpfc_findnode_did(phba,
-+ NLP_SEARCH_ALL,
-+ PT2PT_RemoteID))
-+ == 0) {
-+ /* Cannot find existing Fabric
-+ ndlp, so allocate a new
-+ one */
-+ if ((ndlp =
-+ mempool_alloc(
-+ phba->nlp_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ goto flogifail;
-+ }
-+ lpfc_nlp_init(phba, ndlp,
-+ PT2PT_RemoteID);
-+ }
-+ memcpy(&ndlp->nlp_portname,
-+ &sp->portName,
-+ sizeof (struct lpfc_name));
-+ memcpy(&ndlp->nlp_nodename,
-+ &sp->nodeName,
-+ sizeof (struct lpfc_name));
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ }
-+ else {
-+ /* This side will wait for the PLOGI */
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ }
-+
-+ phba->fc_flag |= FC_PT2PT;
-+
-+ /* Start discovery - this should just do
-+ CLEAR_LA */
-+ lpfc_disc_start(phba);
-+ }
-+ goto out;
-+ }
-+ }
-+
-+flogifail:
-+ lpfc_nlp_remove(phba, ndlp);
-+
-+ if((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
-+ ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
-+ (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
-+
-+ /* FLOGI failed, so just use loop map to make discovery list */
-+ lpfc_disc_list_loopmap(phba);
-+
-+ /* Start discovery */
-+ lpfc_disc_start(phba);
-+ }
-+
-+out:
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+static int
-+lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint8_t retry)
-+{
-+ struct serv_parm *sp;
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+ uint32_t tmo;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
-+ ndlp, ELS_CMD_FLOGI)) == 0) {
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ /* For FLOGI request, remainder of payload is service parameters */
-+ *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
-+ pcmd += sizeof (uint32_t);
-+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
-+ sp = (struct serv_parm *) pcmd;
-+
-+ /* Setup CSPs accordingly for Fabric */
-+ sp->cmn.e_d_tov = 0;
-+ sp->cmn.w2.r_a_tov = 0;
-+ sp->cls1.classValid = 0;
-+ sp->cls2.seqDelivery = 1;
-+ sp->cls3.seqDelivery = 1;
-+ if (sp->cmn.fcphLow < FC_PH3)
-+ sp->cmn.fcphLow = FC_PH3;
-+ if (sp->cmn.fcphHigh < FC_PH3)
-+ sp->cmn.fcphHigh = FC_PH3;
-+
-+ tmo = phba->fc_ratov;
-+ phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
-+ lpfc_set_disctmo(phba);
-+ phba->fc_ratov = tmo;
-+
-+ phba->fc_stat.elsXmitFLOGI++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+int
-+lpfc_els_abort_flogi(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_iocbq *iocb, *next_iocb;
-+ struct lpfc_nodelist *ndlp;
-+ IOCB_t *icmd;
-+ struct list_head *curr, *next;
-+
-+ /* Abort outstanding I/O on NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0201 Abort outstanding I/O on NPort x%x\n",
-+ phba->brd_no, Fabric_DID);
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING];
-+
-+ /* check the txcmplq */
-+ list_for_each_safe(curr, next, &pring->txcmplq) {
-+ next_iocb = list_entry(curr, struct lpfc_iocbq, list);
-+ iocb = next_iocb;
-+ /* Check to see if iocb matches the nport we are
-+ looking for */
-+ icmd = &iocb->iocb;
-+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
-+ ndlp = (struct lpfc_nodelist *)(iocb->context1);
-+ if(ndlp && (ndlp->nlp_DID == Fabric_DID)) {
-+ /* It matches, so deque and call compl
-+ with an error */
-+ list_del(&iocb->list);
-+ pring->txcmplq_cnt--;
-+
-+ if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
-+ lpfc_sli_issue_abort_iotag32
-+ (phba, pring, iocb);
-+ }
-+ if (iocb->iocb_cmpl) {
-+ icmd->ulpStatus =
-+ IOSTAT_LOCAL_REJECT;
-+ icmd->un.ulpWord[4] =
-+ IOERR_SLI_ABORTED;
-+ (iocb->iocb_cmpl) (phba, iocb, iocb);
-+ } else {
-+ mempool_free(iocb, phba->iocb_mem_pool);
-+ }
-+ }
-+ }
-+ }
-+ return (0);
-+}
-+
-+int
-+lpfc_initial_flogi(struct lpfc_hba * phba)
-+{
-+ struct lpfc_nodelist *ndlp;
-+
-+ /* First look for Fabric ndlp on the unmapped list */
-+
-+ if ((ndlp =
-+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
-+ Fabric_DID)) == 0) {
-+ /* Cannot find existing Fabric ndlp, so allocate a new one */
-+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
-+ == 0) {
-+ return (0);
-+ }
-+ lpfc_nlp_init(phba, ndlp, Fabric_DID);
-+ }
-+ else {
-+ phba->fc_unmap_cnt--;
-+ list_del(&ndlp->nlp_listp);
-+ ndlp->nlp_flag &= ~NLP_LIST_MASK;
-+ }
-+ if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ }
-+ return (1);
-+}
-+
-+void
-+lpfc_more_plogi(struct lpfc_hba * phba)
-+{
-+ int sentplogi;
-+
-+ if (phba->num_disc_nodes)
-+ phba->num_disc_nodes--;
-+
-+ /* Continue discovery with <num_disc_nodes> PLOGIs to go */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0232 Continue discovery with %d PLOGIs to go "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
-+ phba->fc_flag, phba->hba_state);
-+
-+ /* Check to see if there are more PLOGIs to be sent */
-+ if (phba->fc_flag & FC_NLP_MORE) {
-+ /* go thru NPR list and issue any remaining ELS PLOGIs */
-+ sentplogi = lpfc_els_disc_plogi(phba);
-+ }
-+ return;
-+}
-+
-+static void
-+lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp;
-+ int disc, rc, did, type;
-+ struct lpfc_nodelist *curr_ndlp, *next_ndlp;
-+ int valid_ndlp = 0;
-+
-+ psli = &phba->sli;
-+
-+ /* we pass cmdiocb to state machine which needs rspiocb as well */
-+ cmdiocb->context_un.rsp_iocb = rspiocb;
-+
-+ irsp = &rspiocb->iocb;
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+
-+ list_for_each_entry_safe(curr_ndlp, next_ndlp, &phba->fc_plogi_list,
-+ nlp_listp) {
-+ if (curr_ndlp == ndlp ) {
-+ valid_ndlp =1;
-+ break;
-+ }
-+ }
-+ if (!valid_ndlp)
-+ goto out;
-+
-+ ndlp->nlp_flag &= ~NLP_PLOGI_SND;
-+
-+ /* Since ndlp can be freed in the disc state machine, note if this node
-+ * is being used during discovery.
-+ */
-+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
-+ rc = 0;
-+
-+ /* PLOGI completes to NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0102 PLOGI completes to NPort x%x "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
-+ irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
-+
-+ /* Check to see if link went down during discovery */
-+ if (lpfc_els_chk_latt(phba)) {
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ goto out;
-+ }
-+
-+ /* ndlp could be freed in DSM, save these values now */
-+ type = ndlp->nlp_type;
-+ did = ndlp->nlp_DID;
-+
-+ if (irsp->ulpStatus) {
-+ /* Check for retry */
-+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
-+ /* ELS command is being retried */
-+ if (disc) {
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ }
-+ goto out;
-+ }
-+
-+ /* PLOGI failed */
-+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
-+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
-+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
-+ }
-+ else {
-+ rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
-+ NLP_EVT_CMPL_PLOGI);
-+ }
-+ } else {
-+ /* Good status, call state machine */
-+ rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
-+ NLP_EVT_CMPL_PLOGI);
-+ }
-+
-+ if(type & NLP_FABRIC) {
-+ /* If we cannot login to Nameserver, kick off discovery now */
-+ if ((did == NameServer_DID) && (rc == NLP_STE_FREED_NODE)) {
-+ lpfc_disc_start(phba);
-+ }
-+ goto out;
-+ }
-+
-+ if (disc && phba->num_disc_nodes) {
-+ /* Check to see if there are more PLOGIs to be sent */
-+ lpfc_more_plogi(phba);
-+ }
-+
-+ if (rc != NLP_STE_FREED_NODE)
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+
-+ if (phba->num_disc_nodes == 0) {
-+ if(disc) {
-+ phba->fc_flag &= ~FC_NDISC_ACTIVE;
-+ }
-+ lpfc_can_disctmo(phba);
-+ if (phba->fc_flag & FC_RSCN_MODE) {
-+ /* Check to see if more RSCNs came in while we were
-+ * processing this one.
-+ */
-+ if ((phba->fc_rscn_id_cnt == 0) &&
-+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
-+ phba->fc_flag &= ~FC_RSCN_MODE;
-+ } else {
-+ lpfc_els_handle_rscn(phba);
-+ }
-+ }
-+ }
-+
-+out:
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+int
-+lpfc_issue_els_plogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint8_t retry)
-+{
-+ struct serv_parm *sp;
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
-+ ndlp, ELS_CMD_PLOGI)) == 0) {
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ /* For PLOGI request, remainder of payload is service parameters */
-+ *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
-+ pcmd += sizeof (uint32_t);
-+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
-+ sp = (struct serv_parm *) pcmd;
-+
-+ if (sp->cmn.fcphLow < FC_PH_4_3)
-+ sp->cmn.fcphLow = FC_PH_4_3;
-+
-+ if (sp->cmn.fcphHigh < FC_PH3)
-+ sp->cmn.fcphHigh = FC_PH3;
-+
-+ phba->fc_stat.elsXmitPLOGI++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
-+ ndlp->nlp_flag |= NLP_PLOGI_SND;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ ndlp->nlp_flag &= ~NLP_PLOGI_SND;
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+static void
-+lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp, *curr_ndlp, *next_ndlp;
-+ int valid_ndlp = 0;
-+
-+ psli = &phba->sli;
-+ /* we pass cmdiocb to state machine which needs rspiocb as well */
-+ cmdiocb->context_un.rsp_iocb = rspiocb;
-+
-+ irsp = &(rspiocb->iocb);
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+ phba->fc_prli_sent--;
-+ list_for_each_entry_safe(curr_ndlp, next_ndlp, &phba->fc_prli_list,
-+ nlp_listp) {
-+ if (curr_ndlp == ndlp ) {
-+ valid_ndlp =1;
-+ break;
-+ }
-+ }
-+
-+ if (!valid_ndlp)
-+ goto out;
-+
-+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
-+
-+ /* PRLI completes to NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0103 PRLI completes to NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
-+ irsp->un.ulpWord[4], phba->num_disc_nodes);
-+
-+ /* Check to see if link went down during discovery */
-+ if (lpfc_els_chk_latt(phba))
-+ goto out;
-+
-+ if (irsp->ulpStatus) {
-+ /* Check for retry */
-+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
-+ /* ELS command is being retried */
-+ goto out;
-+ }
-+ /* PRLI failed */
-+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
-+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
-+ goto out;
-+ }
-+ else {
-+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
-+ NLP_EVT_CMPL_PRLI);
-+ }
-+ } else {
-+ /* Good status, call state machine */
-+ lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
-+ }
-+
-+out:
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+int
-+lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint8_t retry)
-+{
-+ PRLI *npr;
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
-+ ndlp, ELS_CMD_PRLI)) == 0) {
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ /* For PRLI request, remainder of payload is service parameters */
-+ memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
-+ *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
-+ pcmd += sizeof (uint32_t);
-+
-+ /* For PRLI, remainder of payload is PRLI parameter page */
-+ npr = (PRLI *) pcmd;
-+ /*
-+ * If our firmware version is 3.20 or later,
-+ * set the following bits for FC-TAPE support.
-+ */
-+ if (phba->vpd.rev.feaLevelHigh >= 0x02) {
-+ npr->ConfmComplAllowed = 1;
-+ npr->Retry = 1;
-+ npr->TaskRetryIdReq = 1;
-+ }
-+ npr->estabImagePair = 1;
-+ npr->readXferRdyDis = 1;
-+
-+ /* For FCP support */
-+ npr->prliType = PRLI_FCP_TYPE;
-+ npr->initiatorFunc = 1;
-+
-+ phba->fc_stat.elsXmitPRLI++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
-+ ndlp->nlp_flag |= NLP_PRLI_SND;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ ndlp->nlp_flag &= ~NLP_PRLI_SND;
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ phba->fc_prli_sent++;
-+ return (0);
-+}
-+
-+static void
-+lpfc_more_adisc(struct lpfc_hba * phba)
-+{
-+ int sentadisc;
-+
-+ if (phba->num_disc_nodes)
-+ phba->num_disc_nodes--;
-+
-+ /* Continue discovery with <num_disc_nodes> ADISCs to go */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0210 Continue discovery with %d ADISCs to go "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
-+ phba->fc_flag, phba->hba_state);
-+
-+ /* Check to see if there are more ADISCs to be sent */
-+ if (phba->fc_flag & FC_NLP_MORE) {
-+ lpfc_set_disctmo(phba);
-+
-+ /* go thru NPR list and issue any remaining ELS ADISCs */
-+ sentadisc = lpfc_els_disc_adisc(phba);
-+ }
-+ return;
-+}
-+
-+static void
-+lpfc_rscn_disc(struct lpfc_hba * phba)
-+{
-+ /* RSCN discovery */
-+ /* go thru NPR list and issue ELS PLOGIs */
-+ if (phba->fc_npr_cnt) {
-+ if (lpfc_els_disc_plogi(phba))
-+ return;
-+ }
-+ if (phba->fc_flag & FC_RSCN_MODE) {
-+ /* Check to see if more RSCNs came in while we were
-+ * processing this one.
-+ */
-+ if ((phba->fc_rscn_id_cnt == 0) &&
-+ (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
-+ phba->fc_flag &= ~FC_RSCN_MODE;
-+ } else {
-+ lpfc_els_handle_rscn(phba);
-+ }
-+ }
-+}
-+
-+static void
-+lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp;
-+ LPFC_MBOXQ_t *mbox;
-+ int disc;
-+
-+ psli = &phba->sli;
-+
-+ /* we pass cmdiocb to state machine which needs rspiocb as well */
-+ cmdiocb->context_un.rsp_iocb = rspiocb;
-+
-+ irsp = &(rspiocb->iocb);
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
-+
-+ /* Since ndlp can be freed in the disc state machine, note if this node
-+ * is being used during discovery.
-+ */
-+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
-+
-+ /* ADISC completes to NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0104 ADISC completes to NPort x%x "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
-+ irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
-+
-+ /* Check to see if link went down during discovery */
-+ if (lpfc_els_chk_latt(phba)) {
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ goto out;
-+ }
-+
-+ if (irsp->ulpStatus) {
-+ /* Check for retry */
-+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
-+ /* ELS command is being retried */
-+ if (disc) {
-+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
-+ }
-+ goto out;
-+ }
-+ /* ADISC failed */
-+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
-+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
-+ disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
-+ }
-+ else {
-+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
-+ NLP_EVT_CMPL_ADISC);
-+ }
-+ } else {
-+ /* Good status, call state machine */
-+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
-+ NLP_EVT_CMPL_ADISC);
-+ }
-+
-+ if (disc && phba->num_disc_nodes) {
-+ /* Check to see if there are more ADISCs to be sent */
-+ lpfc_more_adisc(phba);
-+
-+ /* Check to see if we are done with ADISC authentication */
-+ if (phba->num_disc_nodes == 0) {
-+ lpfc_can_disctmo(phba);
-+ /* If we get here, there is nothing left to wait for */
-+ if ((phba->hba_state < LPFC_HBA_READY) &&
-+ (phba->hba_state != LPFC_CLEAR_LA)) {
-+ /* Link up discovery */
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC))) {
-+ phba->hba_state = LPFC_CLEAR_LA;
-+ lpfc_clear_la(phba, mbox);
-+ mbox->mbox_cmpl =
-+ lpfc_mbx_cmpl_clear_la;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox,
-+ (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free(mbox,
-+ phba->mbox_mem_pool);
-+ lpfc_disc_flush_list(phba);
-+ psli->ring[(psli->ip_ring)].
-+ flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->fcp_ring)].
-+ flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ psli->ring[(psli->next_ring)].
-+ flag &=
-+ ~LPFC_STOP_IOCB_EVENT;
-+ phba->hba_state =
-+ LPFC_HBA_READY;
-+ }
-+ }
-+ } else {
-+ lpfc_rscn_disc(phba);
-+ }
-+ }
-+ }
-+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-+out:
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+int
-+lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint8_t retry)
-+{
-+ ADISC *ap;
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
-+ ndlp, ELS_CMD_ADISC)) == 0) {
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ /* For ADISC request, remainder of payload is service parameters */
-+ *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
-+ pcmd += sizeof (uint32_t);
-+
-+ /* Fill in ADISC payload */
-+ ap = (ADISC *) pcmd;
-+ ap->hardAL_PA = phba->fc_pref_ALPA;
-+ memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
-+ memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
-+ ap->DID = be32_to_cpu(phba->fc_myDID);
-+
-+ phba->fc_stat.elsXmitADISC++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
-+ ndlp->nlp_flag |= NLP_ADISC_SND;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ ndlp->nlp_flag &= ~NLP_ADISC_SND;
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+static void
-+lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp;
-+
-+ psli = &phba->sli;
-+ /* we pass cmdiocb to state machine which needs rspiocb as well */
-+ cmdiocb->context_un.rsp_iocb = rspiocb;
-+
-+ irsp = &(rspiocb->iocb);
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
-+
-+ /* LOGO completes to NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0105 LOGO completes to NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
-+ irsp->un.ulpWord[4], phba->num_disc_nodes);
-+
-+ /* Check to see if link went down during discovery */
-+ if (lpfc_els_chk_latt(phba))
-+ goto out;
-+
-+ if (irsp->ulpStatus) {
-+ /* Check for retry */
-+ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
-+ /* ELS command is being retried */
-+ goto out;
-+ }
-+ /* LOGO failed */
-+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
-+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
-+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
-+ goto out;
-+ }
-+ else {
-+ lpfc_disc_state_machine(phba, ndlp, cmdiocb,
-+ NLP_EVT_CMPL_LOGO);
-+ }
-+ } else {
-+ /* Good status, call state machine */
-+ lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
-+
-+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
-+ lpfc_unreg_rpi(phba, ndlp);
-+ }
-+ }
-+
-+out:
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+int
-+lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
-+ uint8_t retry)
-+{
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING];
-+
-+ cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
-+ ndlp, ELS_CMD_LOGO)) == 0) {
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+ *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
-+ pcmd += sizeof (uint32_t);
-+
-+ /* Fill in LOGO payload */
-+ *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
-+ pcmd += sizeof (uint32_t);
-+ memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
-+
-+ phba->fc_stat.elsXmitLOGO++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
-+ ndlp->nlp_flag |= NLP_LOGO_SND;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+static void
-+lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+
-+ irsp = &rspiocb->iocb;
-+
-+ /* ELS cmd tag <ulpIoTag> completes */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_ELS,
-+ "%d:0106 ELS cmd tag x%x completes Data: x%x x%x\n",
-+ phba->brd_no,
-+ irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4]);
-+
-+ /* Check to see if link went down during discovery */
-+ lpfc_els_chk_latt(phba);
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+int
-+lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
-+{
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+ struct lpfc_nodelist *ndlp;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+ cmdsize = (sizeof (uint32_t) + sizeof (SCR));
-+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC)) == 0) {
-+ return (1);
-+ }
-+
-+ lpfc_nlp_init(phba, ndlp, nportid);
-+
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
-+ ndlp, ELS_CMD_SCR)) == 0) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
-+ pcmd += sizeof (uint32_t);
-+
-+ /* For SCR, remainder of payload is SCR parameter page */
-+ memset(pcmd, 0, sizeof (SCR));
-+ ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
-+
-+ phba->fc_stat.elsXmitSCR++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ return (0);
-+}
-+
-+static int
-+lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
-+{
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ FARP *fp;
-+ uint8_t *pcmd;
-+ uint32_t *lp;
-+ uint16_t cmdsize;
-+ struct lpfc_nodelist *ondlp;
-+ struct lpfc_nodelist *ndlp;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+ cmdsize = (sizeof (uint32_t) + sizeof (FARP));
-+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC)) == 0) {
-+ return (1);
-+ }
-+ lpfc_nlp_init(phba, ndlp, nportid);
-+
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
-+ ndlp, ELS_CMD_RNID)) == 0) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
-+ pcmd += sizeof (uint32_t);
-+
-+ /* Fill in FARPR payload */
-+ fp = (FARP *) (pcmd);
-+ memset(fp, 0, sizeof (FARP));
-+ lp = (uint32_t *) pcmd;
-+ *lp++ = be32_to_cpu(nportid);
-+ *lp++ = be32_to_cpu(phba->fc_myDID);
-+ fp->Rflags = 0;
-+ fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
-+
-+ memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
-+ memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
-+ if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
-+ memcpy(&fp->OportName, &ondlp->nlp_portname,
-+ sizeof (struct lpfc_name));
-+ memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
-+ sizeof (struct lpfc_name));
-+ }
-+
-+ phba->fc_stat.elsXmitFARPR++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ return (0);
-+}
-+
-+void
-+lpfc_els_retry_delay(unsigned long ptr)
-+{
-+ struct lpfc_nodelist *ndlp;
-+ struct lpfc_hba *phba;
-+ unsigned long iflag;
-+ LPFC_DISC_EVT_t *evtp;
-+
-+ ndlp = (struct lpfc_nodelist *)ptr;
-+ phba = ndlp->nlp_phba;
-+ evtp = &ndlp->els_retry_evt;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ if (!list_empty(&evtp->evt_listp)) {
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return;
-+ }
-+
-+ evtp->evt_arg1 = ndlp;
-+ evtp->evt = LPFC_EVT_ELS_RETRY;
-+ list_add_tail(&evtp->evt_listp, &phba->dpc_disc);
-+ if (phba->dpc_wait)
-+ up(phba->dpc_wait);
-+
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return;
-+}
-+
-+void
-+lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
-+{
-+ struct lpfc_hba *phba;
-+ uint32_t cmd;
-+ uint32_t did;
-+ uint8_t retry;
-+
-+ phba = ndlp->nlp_phba;
-+ spin_lock_irq(phba->host->host_lock);
-+ did = (uint32_t) (ndlp->nlp_DID);
-+ cmd = (uint32_t) (ndlp->nlp_last_elscmd);
-+
-+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+ }
-+
-+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-+ retry = ndlp->nlp_retry;
-+
-+ switch (cmd) {
-+ case ELS_CMD_FLOGI:
-+ lpfc_issue_els_flogi(phba, ndlp, retry);
-+ break;
-+ case ELS_CMD_PLOGI:
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, retry);
-+ break;
-+ case ELS_CMD_ADISC:
-+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
-+ lpfc_issue_els_adisc(phba, ndlp, retry);
-+ break;
-+ case ELS_CMD_PRLI:
-+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
-+ lpfc_issue_els_prli(phba, ndlp, retry);
-+ break;
-+ case ELS_CMD_LOGO:
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ lpfc_issue_els_logo(phba, ndlp, retry);
-+ break;
-+ }
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+}
-+
-+static int
-+lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+ struct lpfc_dmabuf *pcmd;
-+ struct lpfc_nodelist *ndlp;
-+ uint32_t *elscmd;
-+ struct ls_rjt stat;
-+ int retry, maxretry;
-+ int delay;
-+ uint32_t cmd;
-+
-+ retry = 0;
-+ delay = 0;
-+ maxretry = lpfc_max_els_tries;
-+ irsp = &rspiocb->iocb;
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ cmd = 0;
-+ /* Note: context2 may be 0 for internal driver abort
-+ * of delays ELS command.
-+ */
-+
-+ if (pcmd && pcmd->virt) {
-+ elscmd = (uint32_t *) (pcmd->virt);
-+ cmd = *elscmd++;
-+ }
-+
-+ switch (irsp->ulpStatus) {
-+ case IOSTAT_FCP_RSP_ERROR:
-+ case IOSTAT_REMOTE_STOP:
-+ break;
-+
-+ case IOSTAT_LOCAL_REJECT:
-+ switch ((irsp->un.ulpWord[4] & 0xff)) {
-+ case IOERR_LOOP_OPEN_FAILURE:
-+ if (cmd == ELS_CMD_PLOGI) {
-+ if (cmdiocb->retry == 0) {
-+ delay = 1;
-+ }
-+ }
-+ retry = 1;
-+ break;
-+
-+ case IOERR_SEQUENCE_TIMEOUT:
-+ retry = 1;
-+ if ((cmd == ELS_CMD_FLOGI)
-+ && (phba->fc_topology != TOPOLOGY_LOOP)) {
-+ maxretry = 48;
-+ }
-+ break;
-+
-+ case IOERR_NO_RESOURCES:
-+ if (cmd == ELS_CMD_PLOGI) {
-+ delay = 1;
-+ }
-+ retry = 1;
-+ break;
-+
-+ case IOERR_INVALID_RPI:
-+ retry = 1;
-+ break;
-+ }
-+ break;
-+
-+ case IOSTAT_NPORT_RJT:
-+ case IOSTAT_FABRIC_RJT:
-+ if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
-+ retry = 1;
-+ break;
-+ }
-+ break;
-+
-+ case IOSTAT_NPORT_BSY:
-+ case IOSTAT_FABRIC_BSY:
-+ retry = 1;
-+ break;
-+
-+ case IOSTAT_LS_RJT:
-+ stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
-+ /* Added for Vendor specifc support
-+ * Just keep retrying for these Rsn / Exp codes
-+ */
-+ switch (stat.un.b.lsRjtRsnCode) {
-+ case LSRJT_UNABLE_TPC:
-+ if (stat.un.b.lsRjtRsnCodeExp ==
-+ LSEXP_CMD_IN_PROGRESS) {
-+ if (cmd == ELS_CMD_PLOGI) {
-+ delay = 1;
-+ maxretry = 48;
-+ }
-+ retry = 1;
-+ break;
-+ }
-+ if (cmd == ELS_CMD_PLOGI) {
-+ delay = 1;
-+ retry = 1;
-+ break;
-+ }
-+ break;
-+
-+ case LSRJT_LOGICAL_BSY:
-+ if (cmd == ELS_CMD_PLOGI) {
-+ delay = 1;
-+ maxretry = 48;
-+ }
-+ retry = 1;
-+ break;
-+ }
-+ break;
-+
-+ case IOSTAT_INTERMED_RSP:
-+ case IOSTAT_BA_RJT:
-+ break;
-+
-+ default:
-+ break;
-+ }
-+
-+ if (ndlp->nlp_DID == FDMI_DID) {
-+ retry = 1;
-+ }
-+
-+ if ((++cmdiocb->retry) >= maxretry) {
-+ phba->fc_stat.elsRetryExceeded++;
-+ retry = 0;
-+ }
-+
-+ if (retry) {
-+
-+ /* Retry ELS command <elsCmd> to remote NPORT <did> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0107 Retry ELS command x%x to remote "
-+ "NPORT x%x Data: x%x x%x\n",
-+ phba->brd_no,
-+ cmd, ndlp->nlp_DID, cmdiocb->retry, delay);
-+
-+ if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
-+ /* If discovery / RSCN timer is running, reset it */
-+ if (timer_pending(&phba->fc_disctmo) ||
-+ (phba->fc_flag & FC_RSCN_MODE)) {
-+ lpfc_set_disctmo(phba);
-+ }
-+ }
-+
-+ phba->fc_stat.elsXmitRetry++;
-+ if (delay) {
-+ phba->fc_stat.elsDelayRetry++;
-+ ndlp->nlp_retry = cmdiocb->retry;
-+
-+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
-+ ndlp->nlp_flag |= NLP_DELAY_TMO;
-+
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ ndlp->nlp_last_elscmd = cmd;
-+
-+ return (1);
-+ }
-+ switch (cmd) {
-+ case ELS_CMD_FLOGI:
-+ lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
-+ return (1);
-+ case ELS_CMD_PLOGI:
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, cmdiocb->retry);
-+ return (1);
-+ case ELS_CMD_ADISC:
-+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
-+ lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
-+ return (1);
-+ case ELS_CMD_PRLI:
-+ ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
-+ lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
-+ return (1);
-+ case ELS_CMD_LOGO:
-+ ndlp->nlp_state = NLP_STE_NPR_NODE;
-+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-+ lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
-+ return (1);
-+ }
-+ }
-+
-+ /* No retry ELS command <elsCmd> to remote NPORT <did> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0108 No retry ELS command x%x to remote NPORT x%x "
-+ "Data: x%x x%x\n",
-+ phba->brd_no,
-+ cmd, ndlp->nlp_DID, cmdiocb->retry, ndlp->nlp_flag);
-+
-+ return (0);
-+}
-+
-+int
-+lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
-+{
-+ struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
-+
-+ /* context2 = cmd, context2->next = rsp, context3 = bpl */
-+ if (elsiocb->context2) {
-+ buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
-+ /* Free the response before processing the command. */
-+ if (!list_empty(&buf_ptr1->list)) {
-+ buf_ptr = list_entry(buf_ptr1->list.next,
-+ struct lpfc_dmabuf, list);
-+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
-+ kfree(buf_ptr);
-+ }
-+ lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
-+ kfree(buf_ptr1);
-+ }
-+
-+ if (elsiocb->context3) {
-+ buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
-+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
-+ kfree(buf_ptr);
-+ }
-+
-+ mempool_free( elsiocb, phba->iocb_mem_pool);
-+ return 0;
-+}
-+
-+static void
-+lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ struct lpfc_nodelist *ndlp;
-+
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+
-+ /* ACC to LOGO completes to NPort <nlp_DID> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0109 ACC to LOGO completes to NPort x%x "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
-+ ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ ndlp->nlp_flag &= ~NLP_LOGO_ACC;
-+
-+ switch (ndlp->nlp_state) {
-+ case NLP_STE_UNUSED_NODE: /* node is just allocated */
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ break;
-+ case NLP_STE_NPR_NODE: /* NPort Recovery mode */
-+ lpfc_unreg_rpi(phba, ndlp);
-+ break;
-+ default:
-+ break;
-+ }
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+static void
-+lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ struct lpfc_nodelist *ndlp;
-+ LPFC_MBOXQ_t *mbox = NULL;
-+
-+ ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
-+ if (cmdiocb->context_un.mbox)
-+ mbox = cmdiocb->context_un.mbox;
-+
-+
-+ /* Check to see if link went down during discovery */
-+ if ((lpfc_els_chk_latt(phba)) || !ndlp) {
-+ if (mbox) {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ }
-+ goto out;
-+ }
-+
-+ /* ELS response tag <ulpIoTag> completes */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0110 ELS response tag x%x completes "
-+ "Data: x%x x%x x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
-+ rspiocb->iocb.un.ulpWord[4], ndlp->nlp_DID,
-+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ if (mbox) {
-+ if ((rspiocb->iocb.ulpStatus == 0)
-+ && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
-+ /* set_slim mailbox command needs to execute first,
-+ * queue this command to be processed later.
-+ */
-+ lpfc_unreg_rpi(phba, ndlp);
-+ mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
-+ mbox->context2 = ndlp;
-+ ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
-+ if (lpfc_sli_issue_mbox(phba, mbox,
-+ (MBX_NOWAIT | MBX_STOP_IOCB))
-+ != MBX_NOT_FINISHED) {
-+ goto out;
-+ }
-+ /* NOTE: we should have messages for unsuccessful
-+ reglogin */
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ } else {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
-+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
-+ }
-+ }
-+ }
-+out:
-+ if(ndlp)
-+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
-+ lpfc_els_free_iocb(phba, cmdiocb);
-+ return;
-+}
-+
-+int
-+lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
-+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
-+ LPFC_MBOXQ_t * mbox, uint8_t newnode)
-+{
-+ IOCB_t *icmd;
-+ IOCB_t *oldcmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+ oldcmd = &oldiocb->iocb;
-+
-+ switch (flag) {
-+ case ELS_CMD_ACC:
-+ cmdsize = sizeof (uint32_t);
-+ if ((elsiocb =
-+ lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
-+ ndlp, ELS_CMD_ACC)) == 0) {
-+ return (1);
-+ }
-+ icmd = &elsiocb->iocb;
-+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
-+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
-+ pcmd += sizeof (uint32_t);
-+ break;
-+ case ELS_CMD_PLOGI:
-+ cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
-+ if ((elsiocb =
-+ lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
-+ ndlp, ELS_CMD_ACC)) == 0) {
-+ return (1);
-+ }
-+ icmd = &elsiocb->iocb;
-+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
-+ pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ if (mbox)
-+ elsiocb->context_un.mbox = mbox;
-+
-+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
-+ pcmd += sizeof (uint32_t);
-+ memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
-+ break;
-+ default:
-+ return (1);
-+ }
-+
-+ if (newnode)
-+ elsiocb->context1 = NULL;
-+
-+ /* Xmit ELS ACC response tag <ulpIoTag> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0128 Xmit ELS ACC response tag x%x "
-+ "Data: x%x x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ elsiocb->iocb.ulpIoTag,
-+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
-+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ if (ndlp->nlp_flag & NLP_LOGO_ACC) {
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
-+ } else {
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
-+ }
-+
-+ phba->fc_stat.elsXmitACC++;
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+int
-+lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
-+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
-+{
-+ IOCB_t *icmd;
-+ IOCB_t *oldcmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ cmdsize = 2 * sizeof (uint32_t);
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
-+ ndlp, ELS_CMD_LS_RJT)) == 0) {
-+ return (1);
-+ }
-+
-+ icmd = &elsiocb->iocb;
-+ oldcmd = &oldiocb->iocb;
-+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
-+ pcmd += sizeof (uint32_t);
-+ *((uint32_t *) (pcmd)) = rejectError;
-+
-+ /* Xmit ELS RJT <err> response tag <ulpIoTag> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0129 Xmit ELS RJT x%x response tag x%x "
-+ "Data: x%x x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ rejectError, elsiocb->iocb.ulpIoTag,
-+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
-+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ phba->fc_stat.elsXmitLSRJT++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+int
-+lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
-+{
-+ ADISC *ap;
-+ IOCB_t *icmd;
-+ IOCB_t *oldcmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ cmdsize = sizeof (uint32_t) + sizeof (ADISC);
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
-+ ndlp, ELS_CMD_ACC)) == 0) {
-+ return (1);
-+ }
-+
-+ /* Xmit ADISC ACC response tag <ulpIoTag> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0130 Xmit ADISC ACC response tag x%x "
-+ "Data: x%x x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ elsiocb->iocb.ulpIoTag,
-+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
-+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ icmd = &elsiocb->iocb;
-+ oldcmd = &oldiocb->iocb;
-+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
-+ pcmd += sizeof (uint32_t);
-+
-+ ap = (ADISC *) (pcmd);
-+ ap->hardAL_PA = phba->fc_pref_ALPA;
-+ memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
-+ memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
-+ ap->DID = be32_to_cpu(phba->fc_myDID);
-+
-+ phba->fc_stat.elsXmitACC++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+int
-+lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
-+{
-+ PRLI *npr;
-+ lpfc_vpd_t *vpd;
-+ IOCB_t *icmd;
-+ IOCB_t *oldcmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
-+
-+ cmdsize = sizeof (uint32_t) + sizeof (PRLI);
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
-+ ndlp,
-+ (ELS_CMD_ACC |
-+ (ELS_CMD_PRLI & ~ELS_RSP_MASK)))) ==
-+ 0) {
-+ return (1);
-+ }
-+
-+ /* Xmit PRLI ACC response tag <ulpIoTag> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0131 Xmit PRLI ACC response tag x%x "
-+ "Data: x%x x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ elsiocb->iocb.ulpIoTag,
-+ elsiocb->iocb.ulpContext, ndlp->nlp_DID,
-+ ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
-+
-+ icmd = &elsiocb->iocb;
-+ oldcmd = &oldiocb->iocb;
-+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
-+ pcmd += sizeof (uint32_t);
-+
-+ /* For PRLI, remainder of payload is PRLI parameter page */
-+ memset(pcmd, 0, sizeof (PRLI));
-+
-+ npr = (PRLI *) pcmd;
-+ vpd = &phba->vpd;
-+ /*
-+ * If our firmware version is 3.20 or later,
-+ * set the following bits for FC-TAPE support.
-+ */
-+ if (vpd->rev.feaLevelHigh >= 0x02) {
-+ npr->ConfmComplAllowed = 1;
-+ npr->Retry = 1;
-+ npr->TaskRetryIdReq = 1;
-+ }
-+
-+ npr->acceptRspCode = PRLI_REQ_EXECUTED;
-+ npr->estabImagePair = 1;
-+ npr->readXferRdyDis = 1;
-+ npr->ConfmComplAllowed = 1;
-+
-+ npr->prliType = PRLI_FCP_TYPE;
-+ npr->initiatorFunc = 1;
-+
-+ phba->fc_stat.elsXmitACC++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
-+ uint8_t format,
-+ struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
-+{
-+ RNID *rn;
-+ IOCB_t *icmd;
-+ IOCB_t *oldcmd;
-+ struct lpfc_iocbq *elsiocb;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ uint8_t *pcmd;
-+ uint16_t cmdsize;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING];
-+
-+ cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
-+ + (2 * sizeof (struct lpfc_name));
-+ if (format)
-+ cmdsize += sizeof (RNID_TOP_DISC);
-+
-+ if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
-+ ndlp, ELS_CMD_ACC)) == 0) {
-+ return (1);
-+ }
-+
-+ /* Xmit RNID ACC response tag <ulpIoTag> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0132 Xmit RNID ACC response tag x%x "
-+ "Data: x%x\n",
-+ phba->brd_no,
-+ elsiocb->iocb.ulpIoTag,
-+ elsiocb->iocb.ulpContext);
-+
-+ icmd = &elsiocb->iocb;
-+ oldcmd = &oldiocb->iocb;
-+ icmd->ulpContext = oldcmd->ulpContext; /* Xri */
-+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
-+
-+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
-+ pcmd += sizeof (uint32_t);
-+
-+ memset(pcmd, 0, sizeof (RNID));
-+ rn = (RNID *) (pcmd);
-+ rn->Format = format;
-+ rn->CommonLen = (2 * sizeof (struct lpfc_name));
-+ memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
-+ memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
-+ switch (format) {
-+ case 0:
-+ rn->SpecificLen = 0;
-+ break;
-+ case RNID_TOPOLOGY_DISC:
-+ rn->SpecificLen = sizeof (RNID_TOP_DISC);
-+ memcpy(&rn->un.topologyDisc.portName,
-+ &phba->fc_portname, sizeof (struct lpfc_name));
-+ rn->un.topologyDisc.unitType = RNID_HBA;
-+ rn->un.topologyDisc.physPort = 0;
-+ rn->un.topologyDisc.attachedNodes = 0;
-+ break;
-+ default:
-+ rn->CommonLen = 0;
-+ rn->SpecificLen = 0;
-+ break;
-+ }
-+
-+ phba->fc_stat.elsXmitACC++;
-+ elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
-+ elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
-+ * it could be freed */
-+
-+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
-+ lpfc_els_free_iocb(phba, elsiocb);
-+ return (1);
-+ }
-+ return (0);
-+}
-+
-+int
-+lpfc_els_disc_adisc(struct lpfc_hba * phba)
-+{
-+ int sentadisc;
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+
-+ sentadisc = 0;
-+ /* go thru NPR list and issue any remaining ELS ADISCs */
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-+ nlp_listp) {
-+ if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
-+ if(ndlp->nlp_flag & NLP_NPR_ADISC) {
-+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
-+ ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
-+ lpfc_nlp_list(phba, ndlp,
-+ NLP_ADISC_LIST);
-+ lpfc_issue_els_adisc(phba, ndlp, 0);
-+ sentadisc++;
-+ phba->num_disc_nodes++;
-+ if (phba->num_disc_nodes >=
-+ phba->cfg_discovery_threads) {
-+ phba->fc_flag |= FC_NLP_MORE;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+ if (sentadisc == 0) {
-+ phba->fc_flag &= ~FC_NLP_MORE;
-+ }
-+ return(sentadisc);
-+}
-+
-+int
-+lpfc_els_disc_plogi(struct lpfc_hba * phba)
-+{
-+ int sentplogi;
-+ struct lpfc_nodelist *ndlp, *next_ndlp;
-+
-+ sentplogi = 0;
-+ /* go thru NPR list and issue any remaining ELS PLOGIs */
-+ list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-+ nlp_listp) {
-+ if((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
-+ (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
-+ if(!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ sentplogi++;
-+ phba->num_disc_nodes++;
-+ if (phba->num_disc_nodes >=
-+ phba->cfg_discovery_threads) {
-+ phba->fc_flag |= FC_NLP_MORE;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+ if (sentplogi == 0) {
-+ phba->fc_flag &= ~FC_NLP_MORE;
-+ }
-+ return(sentplogi);
-+}
-+
-+int
-+lpfc_els_flush_rscn(struct lpfc_hba * phba)
-+{
-+ struct lpfc_dmabuf *mp;
-+ int i;
-+
-+ for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
-+ mp = phba->fc_rscn_id_list[i];
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ phba->fc_rscn_id_list[i] = NULL;
-+ }
-+ phba->fc_rscn_id_cnt = 0;
-+ phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
-+ lpfc_can_disctmo(phba);
-+ return (0);
-+}
-+
-+int
-+lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
-+{
-+ D_ID ns_did;
-+ D_ID rscn_did;
-+ struct lpfc_dmabuf *mp;
-+ uint32_t *lp;
-+ uint32_t payload_len, cmd, i, match;
-+
-+ ns_did.un.word = did;
-+ match = 0;
-+
-+ /* Never match fabric nodes for RSCNs */
-+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
-+ return(0);
-+
-+ /* If we are doing a FULL RSCN rediscovery, match everything */
-+ if (phba->fc_flag & FC_RSCN_DISCOVERY) {
-+ return (did);
-+ }
-+
-+ for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
-+ mp = phba->fc_rscn_id_list[i];
-+ lp = (uint32_t *) mp->virt;
-+ cmd = *lp++;
-+ payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
-+ payload_len -= sizeof (uint32_t); /* take off word 0 */
-+ while (payload_len) {
-+ rscn_did.un.word = *lp++;
-+ rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
-+ payload_len -= sizeof (uint32_t);
-+ switch (rscn_did.un.b.resv) {
-+ case 0: /* Single N_Port ID effected */
-+ if (ns_did.un.word == rscn_did.un.word) {
-+ match = did;
-+ }
-+ break;
-+ case 1: /* Whole N_Port Area effected */
-+ if ((ns_did.un.b.domain == rscn_did.un.b.domain)
-+ && (ns_did.un.b.area == rscn_did.un.b.area))
-+ {
-+ match = did;
-+ }
-+ break;
-+ case 2: /* Whole N_Port Domain effected */
-+ if (ns_did.un.b.domain == rscn_did.un.b.domain)
-+ {
-+ match = did;
-+ }
-+ break;
-+ case 3: /* Whole Fabric effected */
-+ match = did;
-+ break;
-+ default:
-+ /* Unknown Identifier in RSCN list */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-+ "%d:0217 Unknown Identifier in "
-+ "RSCN payload Data: x%x\n",
-+ phba->brd_no, rscn_did.un.word);
-+ break;
-+ }
-+ if (match) {
-+ break;
-+ }
-+ }
-+ }
-+ return (match);
-+}
-+
-+static int
-+lpfc_rscn_recovery_check(struct lpfc_hba * phba)
-+{
-+ struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
-+ struct list_head *listp;
-+ struct list_head *node_list[7];
-+ int i;
-+
-+ /* Look at all nodes effected by pending RSCNs and move
-+ * them to NPR list.
-+ */
-+ node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
-+ node_list[1] = &phba->fc_nlpmap_list;
-+ node_list[2] = &phba->fc_nlpunmap_list;
-+ node_list[3] = &phba->fc_prli_list;
-+ node_list[4] = &phba->fc_reglogin_list;
-+ node_list[5] = &phba->fc_adisc_list;
-+ node_list[6] = &phba->fc_plogi_list;
-+ for (i = 0; i < 7; i++) {
-+ listp = node_list[i];
-+ if (list_empty(listp))
-+ continue;
-+
-+ list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
-+ if((lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) {
-+ /* part of RSCN, process this entry */
-+ lpfc_set_failmask(phba, ndlp,
-+ LPFC_DEV_DISCOVERY_INP,
-+ LPFC_SET_BITMASK);
-+
-+ lpfc_disc_state_machine(phba, ndlp, NULL,
-+ NLP_EVT_DEVICE_RECOVERY);
-+ if(ndlp->nlp_flag & NLP_DELAY_TMO) {
-+ ndlp->nlp_flag &= ~NLP_DELAY_TMO;
-+ del_timer_sync(&ndlp->nlp_delayfunc);
-+
-+ if (!list_empty(&ndlp->
-+ els_retry_evt.evt_listp))
-+ list_del_init(&ndlp->
-+ els_retry_evt.
-+ evt_listp);
-+ }
-+ }
-+ }
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_els_rcv_rscn(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_nodelist * ndlp, uint8_t newnode)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ uint32_t *lp;
-+ IOCB_t *icmd;
-+ uint32_t payload_len, cmd;
-+
-+ icmd = &cmdiocb->iocb;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+
-+ cmd = *lp++;
-+ payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
-+ payload_len -= sizeof (uint32_t); /* take off word 0 */
-+ cmd &= ELS_CMD_MASK;
-+
-+ /* RSCN received */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
-+
-+ /* If we are about to begin discovery, just ACC the RSCN.
-+ * Discovery processing will satisfy it.
-+ */
-+ if (phba->hba_state < LPFC_NS_QRY) {
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
-+ newnode);
-+ return (0);
-+ }
-+
-+ /* If we are already processing an RSCN, save the received
-+ * RSCN payload buffer, cmdiocb->context2 to process later.
-+ */
-+ if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
-+ if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
-+ !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
-+ phba->fc_flag |= FC_RSCN_MODE;
-+ phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
-+
-+ /* If we zero, cmdiocb->context2, the calling
-+ * routine will not try to free it.
-+ */
-+ cmdiocb->context2 = NULL;
-+
-+ /* Deferred RSCN */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0235 Deferred RSCN "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, phba->fc_rscn_id_cnt,
-+ phba->fc_flag, phba->hba_state);
-+ } else {
-+ phba->fc_flag |= FC_RSCN_DISCOVERY;
-+ /* ReDiscovery RSCN */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0234 ReDiscovery RSCN "
-+ "Data: x%x x%x x%x\n",
-+ phba->brd_no, phba->fc_rscn_id_cnt,
-+ phba->fc_flag, phba->hba_state);
-+ }
-+ /* Send back ACC */
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
-+ newnode);
-+
-+ /* send RECOVERY event for ALL nodes that match RSCN payload */
-+ lpfc_rscn_recovery_check(phba);
-+ return (0);
-+ }
-+
-+ phba->fc_flag |= FC_RSCN_MODE;
-+ phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
-+ /*
-+ * If we zero, cmdiocb->context2, the calling routine will
-+ * not try to free it.
-+ */
-+ cmdiocb->context2 = NULL;
-+
-+ lpfc_set_disctmo(phba);
-+
-+ /* Send back ACC */
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
-+
-+ /* send RECOVERY event for ALL nodes that match RSCN payload */
-+ lpfc_rscn_recovery_check(phba);
-+
-+ return (lpfc_els_handle_rscn(phba));
-+}
-+
-+int
-+lpfc_els_handle_rscn(struct lpfc_hba * phba)
-+{
-+ struct lpfc_nodelist *ndlp;
-+
-+ lpfc_put_event(phba, HBA_EVENT_RSCN, phba->fc_myDID,
-+ (void *)(unsigned long)(phba->fc_myDID), 0, 0);
-+
-+ /* Start timer for RSCN processing */
-+ lpfc_set_disctmo(phba);
-+
-+ /* RSCN processed */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ phba->fc_flag, 0, phba->fc_rscn_id_cnt,
-+ phba->hba_state);
-+
-+ /* To process RSCN, first compare RSCN data with NameServer */
-+ phba->fc_ns_retry = 0;
-+ if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
-+ NameServer_DID))) {
-+ /* Good ndlp, issue CT Request to NameServer */
-+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
-+ /* Wait for NameServer query cmpl before we can
-+ continue */
-+ return (1);
-+ }
-+ } else {
-+ /* If login to NameServer does not exist, issue one */
-+ /* Good status, issue PLOGI to NameServer */
-+ if ((ndlp =
-+ lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID))) {
-+ /* Wait for NameServer login cmpl before we can
-+ continue */
-+ return (1);
-+ }
-+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
-+ == 0) {
-+ lpfc_els_flush_rscn(phba);
-+ return (0);
-+ } else {
-+ lpfc_nlp_init(phba, ndlp, NameServer_DID);
-+ ndlp->nlp_type |= NLP_FABRIC;
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ /* Wait for NameServer login cmpl before we can
-+ continue */
-+ return (1);
-+ }
-+ }
-+
-+ lpfc_els_flush_rscn(phba);
-+ return (0);
-+}
-+
-+static int
-+lpfc_els_rcv_flogi(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_nodelist * ndlp, uint8_t newnode)
-+{
-+ struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ uint32_t *lp = (uint32_t *) pcmd->virt;
-+ IOCB_t *icmd = &cmdiocb->iocb;
-+ struct serv_parm *sp;
-+ LPFC_MBOXQ_t *mbox;
-+ struct ls_rjt stat;
-+ uint32_t cmd, did;
-+
-+
-+ cmd = *lp++;
-+ sp = (struct serv_parm *) lp;
-+
-+ /* FLOGI received */
-+
-+ lpfc_set_disctmo(phba);
-+
-+ if (phba->fc_topology == TOPOLOGY_LOOP) {
-+ /* We should never receive a FLOGI in loop mode, ignore it */
-+ did = icmd->un.elsreq64.remoteID;
-+
-+ /* An FLOGI ELS command <elsCmd> was received from DID <did> in
-+ Loop Mode */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-+ "%d:0113 An FLOGI ELS command x%x was received "
-+ "from DID x%x in Loop Mode\n",
-+ phba->brd_no, cmd, did);
-+ return (1);
-+ }
-+
-+ did = Fabric_DID;
-+
-+ if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
-+ /* For a FLOGI we accept, then if our portname is greater
-+ * then the remote portname we initiate Nport login.
-+ */
-+ int rc;
-+
-+ rc = memcmp(&phba->fc_portname, &sp->portName,
-+ sizeof (struct lpfc_name));
-+
-+ if (!rc) {
-+ if ((mbox = mempool_alloc(phba->mbox_mem_pool,
-+ GFP_ATOMIC)) == 0) {
-+ return (1);
-+ }
-+ lpfc_linkdown(phba);
-+ lpfc_init_link(phba, mbox,
-+ phba->cfg_topology,
-+ phba->cfg_link_speed);
-+ mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
-+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-+ if (lpfc_sli_issue_mbox
-+ (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
-+ == MBX_NOT_FINISHED) {
-+ mempool_free( mbox, phba->mbox_mem_pool);
-+ }
-+ return (1);
-+ }
-+
-+ else if (rc > 0) { /* greater than */
-+ phba->fc_flag |= FC_PT2PT_PLOGI;
-+ }
-+ phba->fc_flag |= FC_PT2PT;
-+ phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
-+ } else {
-+ /* Reject this request because invalid parameters */
-+ stat.un.b.lsRjtRsvd0 = 0;
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
-+ stat.un.b.vendorUnique = 0;
-+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
-+ return (1);
-+ }
-+
-+ /* Send back ACC */
-+ lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
-+
-+ return (0);
-+}
-+
-+static int
-+lpfc_els_rcv_rnid(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ uint32_t *lp;
-+ IOCB_t *icmd;
-+ RNID *rn;
-+ struct ls_rjt stat;
-+ uint32_t cmd, did;
-+
-+ icmd = &cmdiocb->iocb;
-+ did = icmd->un.elsreq64.remoteID;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+
-+ cmd = *lp++;
-+ rn = (RNID *) lp;
-+
-+ /* RNID received */
-+
-+ switch (rn->Format) {
-+ case 0:
-+ case RNID_TOPOLOGY_DISC:
-+ /* Send back ACC */
-+ lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
-+ break;
-+ default:
-+ /* Reject this request because format not supported */
-+ stat.un.b.lsRjtRsvd0 = 0;
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
-+ stat.un.b.vendorUnique = 0;
-+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_els_rcv_rrq(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ uint32_t *lp;
-+ IOCB_t *icmd;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_sli *psli;
-+ RRQ *rrq;
-+ uint32_t cmd, did;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_FCP_RING];
-+ icmd = &cmdiocb->iocb;
-+ did = icmd->un.elsreq64.remoteID;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+
-+ cmd = *lp++;
-+ rrq = (RRQ *) lp;
-+
-+ /* RRQ received */
-+ /* Get oxid / rxid from payload and abort it */
-+ if ((rrq->SID == be32_to_cpu(phba->fc_myDID))) {
-+ lpfc_sli_abort_iocb_ctx(phba, pring, rrq->Oxid);
-+ } else {
-+ lpfc_sli_abort_iocb_ctx(phba, pring, rrq->Rxid);
-+ }
-+ /* ACCEPT the rrq request */
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+
-+ return 0;
-+}
-+
-+static int
-+lpfc_els_rcv_farp(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ uint32_t *lp;
-+ IOCB_t *icmd;
-+ FARP *fp;
-+ uint32_t cmd, cnt, did;
-+
-+ icmd = &cmdiocb->iocb;
-+ did = icmd->un.elsreq64.remoteID;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+
-+ cmd = *lp++;
-+ fp = (FARP *) lp;
-+
-+ /* FARP-REQ received from DID <did> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_ELS,
-+ "%d:0134 FARP-REQ received from DID x%x\n",
-+ phba->brd_no, did);
-+
-+ /* We will only support match on WWPN or WWNN */
-+ if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
-+ return (0);
-+ }
-+
-+ cnt = 0;
-+ /* If this FARP command is searching for my portname */
-+ if (fp->Mflags & FARP_MATCH_PORT) {
-+ if (memcmp(&fp->RportName, &phba->fc_portname,
-+ sizeof (struct lpfc_name)) == 0)
-+ cnt = 1;
-+ }
-+
-+ /* If this FARP command is searching for my nodename */
-+ if (fp->Mflags & FARP_MATCH_NODE) {
-+ if (memcmp(&fp->RnodeName, &phba->fc_nodename,
-+ sizeof (struct lpfc_name)) == 0)
-+ cnt = 1;
-+ }
-+
-+ if (cnt) {
-+ if((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
-+ (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
-+ /* Log back into the node before sending the FARP. */
-+ if (fp->Rflags & FARP_REQUEST_PLOGI) {
-+ ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
-+ lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
-+ lpfc_issue_els_plogi(phba, ndlp, 0);
-+ }
-+
-+ /* Send a FARP response to that node */
-+ if (fp->Rflags & FARP_REQUEST_FARPR) {
-+ lpfc_issue_els_farpr(phba, did, 0);
-+ }
-+ }
-+ }
-+ return (0);
-+}
-+
-+static int
-+lpfc_els_rcv_farpr(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
-+{
-+ struct lpfc_dmabuf *pcmd;
-+ uint32_t *lp;
-+ IOCB_t *icmd;
-+ uint32_t cmd, did;
-+
-+ icmd = &cmdiocb->iocb;
-+ did = icmd->un.elsreq64.remoteID;
-+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ lp = (uint32_t *) pcmd->virt;
-+
-+ cmd = *lp++;
-+ /* FARP-RSP received from DID <did> */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_ELS,
-+ "%d:0133 FARP-RSP received from DID x%x\n",
-+ phba->brd_no, did);
-+
-+ /* ACCEPT the Farp resp request */
-+ lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
-+
-+ return 0;
-+}
-+
-+static int
-+lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_nodelist * ndlp)
-+{
-+ /* FAN received */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_ELS,
-+ "%d:265 FAN received\n",
-+ phba->brd_no);
-+
-+ return (0);
-+}
-+
-+void
-+lpfc_els_timeout(unsigned long ptr)
-+{
-+ struct lpfc_hba *phba;
-+ unsigned long iflag;
-+
-+ phba = (struct lpfc_hba *)ptr;
-+ if (phba == 0)
-+ return;
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
-+ phba->work_hba_events |= WORKER_ELS_TMO;
-+ if (phba->dpc_wait)
-+ up(phba->dpc_wait);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock, iflag);
-+ return;
-+}
-+
-+void
-+lpfc_els_timeout_handler(struct lpfc_hba *phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_iocbq *tmp_iocb, *piocb;
-+ IOCB_t *cmd = NULL;
-+ struct lpfc_dmabuf *pcmd;
-+ struct list_head *dlp;
-+ uint32_t *elscmd;
-+ uint32_t els_command;
-+ uint32_t timeout;
-+ uint32_t remote_ID;
-+
-+ if(phba == 0)
-+ return;
-+ spin_lock_irq(phba->host->host_lock);
-+ /* If the timer is already canceled do nothing */
-+ if (!(phba->work_hba_events & WORKER_ELS_TMO)) {
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+ }
-+
-+ timeout = (uint32_t)(phba->fc_ratov << 1);
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING];
-+ dlp = &pring->txcmplq;
-+
-+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
-+ cmd = &piocb->iocb;
-+
-+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
-+ continue;
-+ }
-+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
-+ elscmd = (uint32_t *) (pcmd->virt);
-+ els_command = *elscmd;
-+
-+ if ((els_command == ELS_CMD_FARP)
-+ || (els_command == ELS_CMD_FARPR)) {
-+ continue;
-+ }
-+
-+ if (piocb->drvrTimeout > 0) {
-+ if (piocb->drvrTimeout >= timeout) {
-+ piocb->drvrTimeout -= timeout;
-+ } else {
-+ piocb->drvrTimeout = 0;
-+ }
-+ continue;
-+ }
-+
-+ list_del(&piocb->list);
-+ pring->txcmplq_cnt--;
-+
-+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
-+ struct lpfc_nodelist *ndlp;
-+
-+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
-+ remote_ID = ndlp->nlp_DID;
-+ if (cmd->un.elsreq64.bdl.ulpIoTag32) {
-+ lpfc_sli_issue_abort_iotag32(phba,
-+ pring, piocb);
-+ }
-+ } else {
-+ remote_ID = cmd->un.elsreq64.remoteID;
-+ }
-+
-+ lpfc_printf_log(phba,
-+ KERN_ERR,
-+ LOG_ELS,
-+ "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
-+ phba->brd_no, els_command,
-+ remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
-+
-+ /*
-+ * The iocb has timed out; abort it.
-+ */
-+ if (piocb->iocb_cmpl) {
-+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+ (piocb->iocb_cmpl) (phba, piocb, piocb);
-+ } else {
-+ mempool_free(piocb, phba->iocb_mem_pool);
-+ }
-+ }
-+
-+ if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) {
-+ phba->els_tmofunc.expires = jiffies + HZ * timeout;
-+ add_timer(&phba->els_tmofunc);
-+ }
-+ spin_unlock_irq(phba->host->host_lock);
-+}
-+
-+void
-+lpfc_els_flush_cmd(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_sli_ring *pring;
-+ struct lpfc_iocbq *tmp_iocb, *piocb;
-+ IOCB_t *cmd = NULL;
-+ struct lpfc_dmabuf *pcmd;
-+ uint32_t *elscmd;
-+ uint32_t els_command;
-+ uint32_t remote_ID;
-+
-+ psli = &phba->sli;
-+ pring = &psli->ring[LPFC_ELS_RING];
-+
-+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
-+ cmd = &piocb->iocb;
-+
-+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
-+ continue;
-+ }
-+
-+ /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
-+ if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
-+ (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
-+ (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
-+ (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
-+ continue;
-+ }
-+
-+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
-+ elscmd = (uint32_t *) (pcmd->virt);
-+ els_command = *elscmd;
-+
-+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
-+ struct lpfc_nodelist *ndlp;
-+
-+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
-+ remote_ID = ndlp->nlp_DID;
-+ if (phba->hba_state == LPFC_HBA_READY) {
-+ continue;
-+ }
-+ } else {
-+ remote_ID = cmd->un.elsreq64.remoteID;
-+ }
-+
-+ list_del(&piocb->list);
-+ pring->txcmplq_cnt--;
-+
-+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+
-+ if (piocb->iocb_cmpl) {
-+ (piocb->iocb_cmpl) (phba, piocb, piocb);
-+ } else {
-+ mempool_free( piocb, phba->iocb_mem_pool);
-+ }
-+ }
-+
-+ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
-+ cmd = &piocb->iocb;
-+
-+ if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
-+ continue;
-+ }
-+ pcmd = (struct lpfc_dmabuf *) piocb->context2;
-+ elscmd = (uint32_t *) (pcmd->virt);
-+ els_command = *elscmd;
-+
-+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
-+ struct lpfc_nodelist *ndlp;
-+
-+ ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
-+ remote_ID = ndlp->nlp_DID;
-+ if (phba->hba_state == LPFC_HBA_READY) {
-+ continue;
-+ }
-+ } else {
-+ remote_ID = cmd->un.elsreq64.remoteID;
-+ }
-+
-+ list_del(&piocb->list);
-+ pring->txcmplq_cnt--;
-+
-+ cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
-+ cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
-+
-+ if (piocb->iocb_cmpl) {
-+ (piocb->iocb_cmpl) (phba, piocb, piocb);
-+ } else {
-+ mempool_free( piocb, phba->iocb_mem_pool);
-+ }
-+ }
-+ return;
-+}
-+
-+void
-+lpfc_els_unsol_event(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_nodelist *ndlp;
-+ struct lpfc_dmabuf *mp;
-+ uint32_t *lp;
-+ IOCB_t *icmd;
-+ struct ls_rjt stat;
-+ uint32_t cmd;
-+ uint32_t did;
-+ uint32_t newnode;
-+ uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
-+ uint32_t rjt_err = 0;
-+
-+ psli = &phba->sli;
-+ icmd = &elsiocb->iocb;
-+
-+ if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-+ ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
-+ /* Not enough posted buffers; Try posting more buffers */
-+ phba->fc_stat.NoRcvBuf++;
-+ lpfc_post_buffer(phba, pring, 0, 1);
-+ return;
-+ }
-+
-+ /* If there are no BDEs associated with this IOCB,
-+ * there is nothing to do.
-+ */
-+ if (icmd->ulpBdeCount == 0)
-+ return;
-+
-+ /* type of ELS cmd is first 32bit word in packet */
-+ mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
-+ cont64[0].
-+ addrHigh,
-+ icmd->un.
-+ cont64[0].addrLow));
-+ if (mp == 0) {
-+ drop_cmd = 1;
-+ goto dropit;
-+ }
-+
-+ newnode = 0;
-+ lp = (uint32_t *) mp->virt;
-+ cmd = *lp++;
-+ lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
-+
-+ if (icmd->ulpStatus) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ drop_cmd = 1;
-+ goto dropit;
-+ }
-+
-+ /* Check to see if link went down during discovery */
-+ if (lpfc_els_chk_latt(phba)) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ drop_cmd = 1;
-+ goto dropit;
-+ }
-+
-+ did = icmd->un.rcvels.remoteID;
-+ if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
-+ /* Cannot find existing Fabric ndlp, so allocate a new one */
-+ if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
-+ == 0) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ drop_cmd = 1;
-+ goto dropit;
-+ }
-+
-+ lpfc_nlp_init(phba, ndlp, did);
-+ newnode = 1;
-+ if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
-+ ndlp->nlp_type |= NLP_FABRIC;
-+ }
-+ }
-+
-+ phba->fc_stat.elsRcvFrame++;
-+ elsiocb->context1 = ndlp;
-+ elsiocb->context2 = mp;
-+
-+ if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
-+ cmd &= ELS_CMD_MASK;
-+ }
-+ /* ELS command <elsCmd> received from NPORT <did> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0112 ELS command x%x received from NPORT x%x "
-+ "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
-+
-+ switch (cmd) {
-+ case ELS_CMD_PLOGI:
-+ phba->fc_stat.elsRcvPLOGI++;
-+ if(phba->hba_state < LPFC_DISC_AUTH) {
-+ rjt_err = LSEXP_NOTHING_MORE;
-+ break;
-+ }
-+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
-+ break;
-+ case ELS_CMD_FLOGI:
-+ phba->fc_stat.elsRcvFLOGI++;
-+ lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
-+ if (newnode) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ }
-+ break;
-+ case ELS_CMD_LOGO:
-+ phba->fc_stat.elsRcvLOGO++;
-+ if(phba->hba_state < LPFC_DISC_AUTH) {
-+ rjt_err = LSEXP_NOTHING_MORE;
-+ break;
-+ }
-+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
-+ break;
-+ case ELS_CMD_PRLO:
-+ phba->fc_stat.elsRcvPRLO++;
-+ if(phba->hba_state < LPFC_DISC_AUTH) {
-+ rjt_err = LSEXP_NOTHING_MORE;
-+ break;
-+ }
-+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
-+ break;
-+ case ELS_CMD_RSCN:
-+ phba->fc_stat.elsRcvRSCN++;
-+ lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
-+ if (newnode) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ }
-+ break;
-+ case ELS_CMD_ADISC:
-+ phba->fc_stat.elsRcvADISC++;
-+ if(phba->hba_state < LPFC_DISC_AUTH) {
-+ rjt_err = LSEXP_NOTHING_MORE;
-+ break;
-+ }
-+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
-+ break;
-+ case ELS_CMD_PDISC:
-+ phba->fc_stat.elsRcvPDISC++;
-+ if(phba->hba_state < LPFC_DISC_AUTH) {
-+ rjt_err = LSEXP_NOTHING_MORE;
-+ break;
-+ }
-+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
-+ break;
-+ case ELS_CMD_FARPR:
-+ phba->fc_stat.elsRcvFARPR++;
-+ lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
-+ break;
-+ case ELS_CMD_FARP:
-+ phba->fc_stat.elsRcvFARP++;
-+ lpfc_els_rcv_farp(phba, elsiocb, ndlp);
-+ break;
-+ case ELS_CMD_FAN:
-+ phba->fc_stat.elsRcvFAN++;
-+ lpfc_els_rcv_fan(phba, elsiocb, ndlp);
-+ break;
-+ case ELS_CMD_RRQ:
-+ phba->fc_stat.elsRcvRRQ++;
-+ lpfc_els_rcv_rrq(phba, elsiocb, ndlp);
-+ break;
-+ case ELS_CMD_PRLI:
-+ phba->fc_stat.elsRcvPRLI++;
-+ if(phba->hba_state < LPFC_DISC_AUTH) {
-+ rjt_err = LSEXP_NOTHING_MORE;
-+ break;
-+ }
-+ lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
-+ break;
-+ case ELS_CMD_RNID:
-+ phba->fc_stat.elsRcvRNID++;
-+ lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
-+ break;
-+ default:
-+ /* Unsupported ELS command, reject */
-+ rjt_err = LSEXP_NOTHING_MORE;
-+
-+ /* Unknown ELS command <elsCmd> received from NPORT <did> */
-+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-+ "%d:0115 Unknown ELS command x%x received from "
-+ "NPORT x%x\n", phba->brd_no, cmd, did);
-+ if (newnode) {
-+ mempool_free( ndlp, phba->nlp_mem_pool);
-+ }
-+ break;
-+ }
-+
-+ /* check if need to LS_RJT received ELS cmd */
-+ if (rjt_err) {
-+ stat.un.b.lsRjtRsvd0 = 0;
-+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
-+ stat.un.b.lsRjtRsnCodeExp = rjt_err;
-+ stat.un.b.vendorUnique = 0;
-+ lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
-+ }
-+
-+ if (elsiocb->context2) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+dropit:
-+ /* check if need to drop received ELS cmd */
-+ if (drop_cmd == 1) {
-+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-+ "%d:0111 Dropping received ELS cmd "
-+ "Data: x%x x%x\n", phba->brd_no,
-+ icmd->ulpStatus, icmd->un.ulpWord[4]);
-+ phba->fc_stat.elsRcvDrop++;
-+ }
-+ return;
-+}
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_ct.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_ct.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,1235 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_ct.c 1.150.2.2 2005/06/13 17:16:09EDT sf_support Exp $
-+ *
-+ * Fibre Channel SCSI LAN Device Driver CT support
-+ */
-+
-+#include <linux/version.h>
-+#include <linux/blkdev.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/utsname.h>
-+#include <scsi/scsi_device.h>
-+#include <scsi/scsi_host.h>
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_hw.h"
-+#include "lpfc_logmsg.h"
-+#include "lpfc_mem.h"
-+#include "lpfc_version.h"
-+
-+
-+#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
-+ * incapable of reporting */
-+#define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */
-+#define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */
-+#define HBA_PORTSPEED_4GBIT 8 /* 4 GBit/sec */
-+#define HBA_PORTSPEED_8GBIT 16 /* 8 GBit/sec */
-+#define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */
-+#define HBA_PORTSPEED_NOT_NEGOTIATED 5 /* Speed not established */
-+
-+#define FOURBYTES 4
-+
-+
-+static char *lpfc_release_version = LPFC_DRIVER_VERSION;
-+
-+/*
-+ * lpfc_ct_unsol_event
-+ */
-+void
-+lpfc_ct_unsol_event(struct lpfc_hba * phba,
-+ struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq)
-+{
-+
-+ struct lpfc_iocbq *next_piocbq;
-+ struct lpfc_dmabuf *pmbuf = NULL;
-+ struct lpfc_dmabuf *matp, *next_matp;
-+ uint32_t ctx = 0, size = 0, cnt = 0;
-+ IOCB_t *icmd = &piocbq->iocb;
-+ IOCB_t *save_icmd = icmd;
-+ int i, status, go_exit = 0;
-+ struct list_head head;
-+
-+ if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-+ ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
-+ /* Not enough posted buffers; Try posting more buffers */
-+ phba->fc_stat.NoRcvBuf++;
-+ lpfc_post_buffer(phba, pring, 0, 1);
-+ return;
-+ }
-+
-+ /* If there are no BDEs associated with this IOCB,
-+ * there is nothing to do.
-+ */
-+ if (icmd->ulpBdeCount == 0)
-+ return;
-+
-+ INIT_LIST_HEAD(&head);
-+ list_add_tail(&head, &piocbq->list);
-+ list_for_each_entry_safe(piocbq, next_piocbq, &head, list) {
-+ icmd = &piocbq->iocb;
-+ if (ctx == 0)
-+ ctx = (uint32_t) (icmd->ulpContext);
-+ if (icmd->ulpBdeCount == 0)
-+ continue;
-+
-+ for (i = 0; i < icmd->ulpBdeCount; i++) {
-+ matp = lpfc_sli_ringpostbuf_get(phba, pring,
-+ getPaddr(icmd->un.
-+ cont64[i].
-+ addrHigh,
-+ icmd->un.
-+ cont64[i].
-+ addrLow));
-+ if (!matp) {
-+ /* Insert lpfc log message here */
-+ lpfc_post_buffer(phba, pring, cnt, 1);
-+ go_exit = 1;
-+ goto ct_unsol_event_exit_piocbq;
-+ }
-+
-+ /* Typically for Unsolicited CT requests */
-+ if (!pmbuf) {
-+ pmbuf = matp;
-+ INIT_LIST_HEAD(&pmbuf->list);
-+ } else
-+ list_add_tail(&matp->list, &pmbuf->list);
-+
-+ size += icmd->un.cont64[i].tus.f.bdeSize;
-+ cnt++;
-+ }
-+
-+ icmd->ulpBdeCount = 0;
-+ }
-+
-+ lpfc_post_buffer(phba, pring, cnt, 1);
-+ if (save_icmd->ulpStatus) {
-+ go_exit = 1;
-+ }
-+ct_unsol_event_exit_piocbq:
-+ list_del(&head);
-+ /*
-+ * if not early-exiting and there is pmbuf,
-+ * then do FC_REG_CT_EVENT for libdfc
-+ */
-+ if (!go_exit && pmbuf) {
-+ status = lpfc_put_event(phba, FC_REG_CT_EVENT, ctx,
-+ (void *)pmbuf, size, 0);
-+ if (status)
-+ return;
-+ }
-+ if (pmbuf) {
-+ list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
-+ lpfc_mbuf_free(phba, matp->virt, matp->phys);
-+ list_del(&matp->list);
-+ kfree(matp);
-+ }
-+ lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
-+ kfree(pmbuf);
-+ }
-+ return;
-+}
-+
-+static void
-+lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
-+{
-+ struct lpfc_dmabuf *mlast, *next_mlast;
-+
-+ list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
-+ lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
-+ list_del(&mlast->list);
-+ kfree(mlast);
-+ }
-+ lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
-+ kfree(mlist);
-+ return;
-+}
-+
-+static struct lpfc_dmabuf *
-+lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
-+ uint32_t size, int *entries)
-+{
-+ struct lpfc_dmabuf *mlist = NULL;
-+ struct lpfc_dmabuf *mp;
-+ int cnt, i = 0;
-+
-+ /* We get chucks of FCELSSIZE */
-+ cnt = size > FCELSSIZE ? FCELSSIZE: size;
-+
-+ while (size) {
-+ /* Allocate buffer for rsp payload */
-+ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (!mp) {
-+ if (mlist)
-+ lpfc_free_ct_rsp(phba, mlist);
-+ return NULL;
-+ }
-+
-+ INIT_LIST_HEAD(&mp->list);
-+
-+ if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT))
-+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
-+ else
-+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
-+
-+ if (!mp->virt) {
-+ kfree(mp);
-+ lpfc_free_ct_rsp(phba, mlist);
-+ return NULL;
-+ }
-+
-+ /* Queue it to a linked list */
-+ if (!mlist)
-+ mlist = mp;
-+ else
-+ list_add_tail(&mp->list, &mlist->list);
-+
-+ bpl->tus.f.bdeFlags = BUFF_USE_RCV;
-+ /* build buffer ptr list for IOCB */
-+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
-+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
-+ bpl->tus.f.bdeSize = (uint16_t) cnt;
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+ bpl++;
-+
-+ i++;
-+ size -= cnt;
-+ }
-+
-+ *entries = i;
-+ return mlist;
-+}
-+
-+static int
-+lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
-+ struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
-+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_iocbq *),
-+ struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
-+ uint32_t tmo)
-+{
-+
-+ struct lpfc_sli *psli = &phba->sli;
-+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
-+ IOCB_t *icmd;
-+ struct lpfc_iocbq *geniocb;
-+
-+ /* Allocate buffer for command iocb */
-+ geniocb = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
-+ if (!geniocb) {
-+ return 1;
-+ }
-+ memset(geniocb, 0, sizeof (struct lpfc_iocbq));
-+ icmd = &geniocb->iocb;
-+
-+ icmd->un.genreq64.bdl.ulpIoTag32 = 0;
-+ icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
-+ icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
-+ icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
-+ icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
-+
-+ if (usr_flg)
-+ geniocb->context3 = NULL;
-+ else
-+ geniocb->context3 = (uint8_t *) bmp;
-+
-+ /* Save for completion so we can release these resources */
-+ geniocb->context1 = (uint8_t *) inp;
-+ geniocb->context2 = (uint8_t *) outp;
-+
-+ /* Fill in payload, bp points to frame payload */
-+ icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
-+
-+ icmd->ulpIoTag = lpfc_sli_next_iotag(phba, pring);
-+
-+ /* Fill in rest of iocb */
-+ icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
-+ icmd->un.genreq64.w5.hcsw.Dfctl = 0;
-+ icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
-+ icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
-+
-+ if (!tmo)
-+ tmo = (2 * phba->fc_ratov) + 1;
-+ icmd->ulpTimeout = tmo;
-+ icmd->ulpBdeCount = 1;
-+ icmd->ulpLe = 1;
-+ icmd->ulpClass = CLASS3;
-+ icmd->ulpContext = ndlp->nlp_rpi;
-+
-+ /* Issue GEN REQ IOCB for NPORT <did> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-+ "%d:0119 Issue GEN REQ IOCB for NPORT x%x "
-+ "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5],
-+ icmd->ulpIoTag, phba->hba_state);
-+ geniocb->iocb_cmpl = cmpl;
-+ geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
-+ if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) {
-+ mempool_free( geniocb, phba->iocb_mem_pool);
-+ return 1;
-+ }
-+
-+ return 0;
-+}
-+
-+static int
-+lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
-+ struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
-+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_iocbq *),
-+ uint32_t rsp_size)
-+{
-+ struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
-+ struct lpfc_dmabuf *outmp;
-+ int cnt = 0, status;
-+ int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)->
-+ CommandResponse.bits.CmdRsp;
-+
-+ bpl++; /* Skip past ct request */
-+
-+ /* Put buffer(s) for ct rsp in bpl */
-+ outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
-+ if (!outmp)
-+ return -ENOMEM;
-+
-+ status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0,
-+ cnt+1, 0);
-+ if (status) {
-+ lpfc_free_ct_rsp(phba, outmp);
-+ return -ENOMEM;
-+ }
-+ return 0;
-+}
-+
-+static int
-+lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
-+{
-+ struct lpfc_sli_ct_request *Response =
-+ (struct lpfc_sli_ct_request *) mp->virt;
-+ struct lpfc_nodelist *ndlp = NULL;
-+ struct lpfc_dmabuf *mlast, *next_mp;
-+ uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
-+ uint32_t Did;
-+ uint32_t CTentry;
-+ int Cnt;
-+ struct list_head head;
-+
-+ lpfc_set_disctmo(phba);
-+
-+ Cnt = Size > FCELSSIZE ? FCELSSIZE : Size;
-+
-+ list_add_tail(&head, &mp->list);
-+ list_for_each_entry_safe(mp, next_mp, &head, list) {
-+ mlast = mp;
-+ Size -= Cnt;
-+
-+ if (!ctptr)
-+ ctptr = (uint32_t *) mlast->virt;
-+ else
-+ Cnt -= 16; /* subtract length of CT header */
-+
-+ /* Loop through entire NameServer list of DIDs */
-+ while (Cnt) {
-+
-+ /* Get next DID from NameServer List */
-+ CTentry = *ctptr++;
-+ Did = ((be32_to_cpu(CTentry)) & Mask_DID);
-+
-+ ndlp = NULL;
-+ if (Did != phba->fc_myDID) {
-+ /* Check for rscn processing or not */
-+ ndlp = lpfc_setup_disc_node(phba, Did);
-+ }
-+ /* Mark all node table entries that are in the
-+ Nameserver */
-+ if (ndlp) {
-+ /* NameServer Rsp */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0238 Process x%x NameServer"
-+ " Rsp Data: x%x x%x x%x\n",
-+ phba->brd_no,
-+ Did, ndlp->nlp_flag,
-+ phba->fc_flag,
-+ phba->fc_rscn_id_cnt);
-+ } else {
-+ /* NameServer Rsp */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0239 Skip x%x NameServer "
-+ "Rsp Data: x%x x%x x%x\n",
-+ phba->brd_no,
-+ Did, Size, phba->fc_flag,
-+ phba->fc_rscn_id_cnt);
-+ }
-+
-+ if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
-+ goto nsout1;
-+ Cnt -= sizeof (uint32_t);
-+ }
-+ ctptr = NULL;
-+
-+ }
-+
-+nsout1:
-+ list_del(&head);
-+
-+ /* Here we are finished in the case RSCN */
-+ if (phba->hba_state == LPFC_HBA_READY) {
-+ lpfc_els_flush_rscn(phba);
-+ phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
-+ }
-+ return 0;
-+}
-+
-+
-+
-+
-+static void
-+lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ IOCB_t *irsp;
-+ struct lpfc_sli *psli;
-+ struct lpfc_dmabuf *bmp;
-+ struct lpfc_dmabuf *inp;
-+ struct lpfc_dmabuf *outp;
-+ struct lpfc_nodelist *ndlp;
-+ struct lpfc_sli_ct_request *CTrsp;
-+
-+ psli = &phba->sli;
-+ /* we pass cmdiocb to state machine which needs rspiocb as well */
-+ cmdiocb->context_un.rsp_iocb = rspiocb;
-+
-+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
-+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
-+
-+ irsp = &rspiocb->iocb;
-+ if (irsp->ulpStatus) {
-+ if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-+ ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
-+ (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
-+ goto out;
-+ }
-+
-+ /* Check for retry */
-+ if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
-+ phba->fc_ns_retry++;
-+ /* CT command is being retried */
-+ ndlp =
-+ lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
-+ NameServer_DID);
-+ if (ndlp) {
-+ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
-+ 0) {
-+ goto out;
-+ }
-+ }
-+ }
-+ } else {
-+ /* Good status, continue checking */
-+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
-+ if (CTrsp->CommandResponse.bits.CmdRsp ==
-+ be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
-+ lpfc_ns_rsp(phba, outp,
-+ (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
-+ } else if (CTrsp->CommandResponse.bits.CmdRsp ==
-+ be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
-+ /* NameServer Rsp Error */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0240 NameServer Rsp Error "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ CTrsp->CommandResponse.bits.CmdRsp,
-+ (uint32_t) CTrsp->ReasonCode,
-+ (uint32_t) CTrsp->Explanation,
-+ phba->fc_flag);
-+ } else {
-+ /* NameServer Rsp Error */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0241 NameServer Rsp Error "
-+ "Data: x%x x%x x%x x%x\n",
-+ phba->brd_no,
-+ CTrsp->CommandResponse.bits.CmdRsp,
-+ (uint32_t) CTrsp->ReasonCode,
-+ (uint32_t) CTrsp->Explanation,
-+ phba->fc_flag);
-+ }
-+ }
-+ /* Link up / RSCN discovery */
-+ lpfc_disc_start(phba);
-+out:
-+ lpfc_free_ct_rsp(phba, outp);
-+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
-+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-+ kfree(inp);
-+ kfree(bmp);
-+ mempool_free( cmdiocb, phba->iocb_mem_pool);
-+ return;
-+}
-+
-+static void
-+lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ struct lpfc_sli *psli;
-+ struct lpfc_dmabuf *bmp;
-+ struct lpfc_dmabuf *inp;
-+ struct lpfc_dmabuf *outp;
-+ IOCB_t *irsp;
-+ struct lpfc_sli_ct_request *CTrsp;
-+
-+ psli = &phba->sli;
-+ /* we pass cmdiocb to state machine which needs rspiocb as well */
-+ cmdiocb->context_un.rsp_iocb = rspiocb;
-+
-+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
-+ outp = (struct lpfc_dmabuf *) cmdiocb->context2;
-+ bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
-+ irsp = &rspiocb->iocb;
-+
-+ CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
-+
-+ /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
-+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
-+ "%d:0209 RFT request completes ulpStatus x%x "
-+ "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus,
-+ CTrsp->CommandResponse.bits.CmdRsp);
-+
-+ lpfc_free_ct_rsp(phba, outp);
-+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
-+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-+ kfree(inp);
-+ kfree(bmp);
-+ mempool_free( cmdiocb, phba->iocb_mem_pool);
-+ return;
-+}
-+
-+static void
-+lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
-+ return;
-+}
-+
-+static void
-+lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
-+ struct lpfc_iocbq * rspiocb)
-+{
-+ lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
-+ return;
-+}
-+
-+static void
-+lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
-+{
-+ char fwrev[16];
-+
-+ lpfc_decode_firmware_rev(phba, fwrev, 0);
-+
-+ if (phba->Port[0]) {
-+ sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName,
-+ phba->Port, fwrev, lpfc_release_version);
-+ } else {
-+ sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
-+ fwrev, lpfc_release_version);
-+ }
-+}
-+
-+/*
-+ * lpfc_ns_cmd
-+ * Description:
-+ * Issue Cmd to NameServer
-+ * SLI_CTNS_GID_FT
-+ * LI_CTNS_RFT_ID
-+ */
-+int
-+lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
-+{
-+ struct lpfc_dmabuf *mp, *bmp;
-+ struct lpfc_sli_ct_request *CtReq;
-+ struct ulp_bde64 *bpl;
-+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_iocbq *) = NULL;
-+ uint32_t rsp_size = 1024;
-+
-+ /* fill in BDEs for command */
-+ /* Allocate buffer for command payload */
-+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (!mp)
-+ goto ns_cmd_exit;
-+
-+ INIT_LIST_HEAD(&mp->list);
-+ mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
-+ if (!mp->virt)
-+ goto ns_cmd_free_mp;
-+
-+ /* Allocate buffer for Buffer ptr list */
-+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (!bmp)
-+ goto ns_cmd_free_mpvirt;
-+
-+ INIT_LIST_HEAD(&bmp->list);
-+ bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
-+ if (!bmp->virt)
-+ goto ns_cmd_free_bmp;
-+
-+ /* NameServer Req */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0236 NameServer Req Data: x%x x%x x%x\n",
-+ phba->brd_no, cmdcode, phba->fc_flag,
-+ phba->fc_rscn_id_cnt);
-+
-+ bpl = (struct ulp_bde64 *) bmp->virt;
-+ memset(bpl, 0, sizeof(struct ulp_bde64));
-+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
-+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
-+ bpl->tus.f.bdeFlags = 0;
-+ if (cmdcode == SLI_CTNS_GID_FT)
-+ bpl->tus.f.bdeSize = GID_REQUEST_SZ;
-+ else if (cmdcode == SLI_CTNS_RFT_ID)
-+ bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
-+ else if (cmdcode == SLI_CTNS_RNN_ID)
-+ bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
-+ else if (cmdcode == SLI_CTNS_RSNN_NN)
-+ bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
-+ else
-+ bpl->tus.f.bdeSize = 0;
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+
-+ CtReq = (struct lpfc_sli_ct_request *) mp->virt;
-+ memset(CtReq, 0, sizeof (struct lpfc_sli_ct_request));
-+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
-+ CtReq->RevisionId.bits.InId = 0;
-+ CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
-+ CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
-+ CtReq->CommandResponse.bits.Size = 0;
-+ switch (cmdcode) {
-+ case SLI_CTNS_GID_FT:
-+ CtReq->CommandResponse.bits.CmdRsp =
-+ be16_to_cpu(SLI_CTNS_GID_FT);
-+ CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
-+ if (phba->hba_state < LPFC_HBA_READY)
-+ phba->hba_state = LPFC_NS_QRY;
-+ lpfc_set_disctmo(phba);
-+ cmpl = lpfc_cmpl_ct_cmd_gid_ft;
-+ rsp_size = FC_MAX_NS_RSP;
-+ break;
-+
-+ case SLI_CTNS_RFT_ID:
-+ CtReq->CommandResponse.bits.CmdRsp =
-+ be16_to_cpu(SLI_CTNS_RFT_ID);
-+ CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID);
-+ CtReq->un.rft.fcpReg = 1;
-+ cmpl = lpfc_cmpl_ct_cmd_rft_id;
-+ break;
-+
-+ case SLI_CTNS_RNN_ID:
-+ CtReq->CommandResponse.bits.CmdRsp =
-+ be16_to_cpu(SLI_CTNS_RNN_ID);
-+ CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID);
-+ memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename,
-+ sizeof (struct lpfc_name));
-+ cmpl = lpfc_cmpl_ct_cmd_rnn_id;
-+ break;
-+
-+ case SLI_CTNS_RSNN_NN:
-+ CtReq->CommandResponse.bits.CmdRsp =
-+ be16_to_cpu(SLI_CTNS_RSNN_NN);
-+ memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename,
-+ sizeof (struct lpfc_name));
-+ lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname);
-+ CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname);
-+ cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
-+ break;
-+ }
-+
-+ if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size))
-+ /* On success, The cmpl function will free the buffers */
-+ return 0;
-+
-+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-+ns_cmd_free_bmp:
-+ kfree(bmp);
-+ns_cmd_free_mpvirt:
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ns_cmd_free_mp:
-+ kfree(mp);
-+ns_cmd_exit:
-+ return 1;
-+}
-+
-+static void
-+lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
-+ struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
-+{
-+ struct lpfc_dmabuf *bmp = cmdiocb->context3;
-+ struct lpfc_dmabuf *inp = cmdiocb->context1;
-+ struct lpfc_dmabuf *outp = cmdiocb->context2;
-+ struct lpfc_sli_ct_request *CTrsp = outp->virt;
-+ struct lpfc_sli_ct_request *CTcmd = inp->virt;
-+ struct lpfc_nodelist *ndlp;
-+ uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
-+ uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
-+
-+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
-+ if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
-+ /* FDMI rsp failed */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0220 FDMI rsp failed Data: x%x\n",
-+ phba->brd_no,
-+ be16_to_cpu(fdmi_cmd));
-+ }
-+
-+ switch (be16_to_cpu(fdmi_cmd)) {
-+ case SLI_MGMT_RHBA:
-+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA);
-+ break;
-+
-+ case SLI_MGMT_RPA:
-+ break;
-+
-+ case SLI_MGMT_DHBA:
-+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT);
-+ break;
-+
-+ case SLI_MGMT_DPRT:
-+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA);
-+ break;
-+ }
-+
-+ lpfc_free_ct_rsp(phba, outp);
-+ lpfc_mbuf_free(phba, inp->virt, inp->phys);
-+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-+ kfree(inp);
-+ kfree(bmp);
-+ mempool_free(cmdiocb, phba->iocb_mem_pool);
-+ return;
-+}
-+int
-+lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
-+{
-+ struct lpfc_dmabuf *mp, *bmp;
-+ struct lpfc_sli_ct_request *CtReq;
-+ struct ulp_bde64 *bpl;
-+ uint32_t size;
-+ REG_HBA *rh;
-+ PORT_ENTRY *pe;
-+ REG_PORT_ATTRIBUTE *pab;
-+ ATTRIBUTE_BLOCK *ab;
-+ ATTRIBUTE_ENTRY *ae;
-+ void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_iocbq *);
-+
-+
-+ /* fill in BDEs for command */
-+ /* Allocate buffer for command payload */
-+ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (!mp)
-+ goto fdmi_cmd_exit;
-+
-+ mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
-+ if (!mp->virt)
-+ goto fdmi_cmd_free_mp;
-+
-+ /* Allocate buffer for Buffer ptr list */
-+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
-+ if (!bmp)
-+ goto fdmi_cmd_free_mpvirt;
-+
-+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
-+ if (!bmp->virt)
-+ goto fdmi_cmd_free_bmp;
-+
-+ INIT_LIST_HEAD(&mp->list);
-+ INIT_LIST_HEAD(&bmp->list);
-+
-+ /* FDMI request */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0218 FDMI Request Data: x%x x%x x%x\n",
-+ phba->brd_no,
-+ phba->fc_flag, phba->hba_state, cmdcode);
-+
-+ CtReq = (struct lpfc_sli_ct_request *) mp->virt;
-+
-+ memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
-+ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
-+ CtReq->RevisionId.bits.InId = 0;
-+
-+ CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
-+ CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
-+ size = 0;
-+
-+ switch (cmdcode) {
-+ case SLI_MGMT_RHBA:
-+ {
-+ lpfc_vpd_t *vp = &phba->vpd;
-+ uint32_t i, j, incr;
-+ int len;
-+
-+ CtReq->CommandResponse.bits.CmdRsp =
-+ be16_to_cpu(SLI_MGMT_RHBA);
-+ CtReq->CommandResponse.bits.Size = 0;
-+ rh = (REG_HBA *) & CtReq->un.PortID;
-+ memcpy(&rh->hi.PortName, &phba->fc_sparam.portName,
-+ sizeof (struct lpfc_name));
-+ /* One entry (port) per adapter */
-+ rh->rpl.EntryCnt = be32_to_cpu(1);
-+ memcpy(&rh->rpl.pe, &phba->fc_sparam.portName,
-+ sizeof (struct lpfc_name));
-+
-+ /* point to the HBA attribute block */
-+ size = 2 * sizeof (struct lpfc_name) + FOURBYTES;
-+ ab = (ATTRIBUTE_BLOCK *) ((uint8_t *) rh + size);
-+ ab->EntryCnt = 0;
-+
-+ /* Point to the beginning of the first HBA attribute
-+ entry */
-+ /* #1 HBA attribute entry */
-+ size += FOURBYTES;
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
-+ + sizeof (struct lpfc_name));
-+ memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName,
-+ sizeof (struct lpfc_name));
-+ ab->EntryCnt++;
-+ size += FOURBYTES + sizeof (struct lpfc_name);
-+
-+ /* #2 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
-+ strcpy(ae->un.Manufacturer, "Emulex Corporation");
-+ len = strlen(ae->un.Manufacturer);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #3 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
-+ strcpy(ae->un.SerialNumber, phba->SerialNumber);
-+ len = strlen(ae->un.SerialNumber);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #4 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(MODEL);
-+ strcpy(ae->un.Model, phba->ModelName);
-+ len = strlen(ae->un.Model);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #5 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
-+ strcpy(ae->un.ModelDescription, phba->ModelDesc);
-+ len = strlen(ae->un.ModelDescription);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #6 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(HARDWARE_VERSION);
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 8);
-+ /* Convert JEDEC ID to ascii for hardware version */
-+ incr = vp->rev.biuRev;
-+ for (i = 0; i < 8; i++) {
-+ j = (incr & 0xf);
-+ if (j <= 9)
-+ ae->un.HardwareVersion[7 - i] =
-+ (char)((uint8_t) 0x30 +
-+ (uint8_t) j);
-+ else
-+ ae->un.HardwareVersion[7 - i] =
-+ (char)((uint8_t) 0x61 +
-+ (uint8_t) (j - 10));
-+ incr = (incr >> 4);
-+ }
-+ ab->EntryCnt++;
-+ size += FOURBYTES + 8;
-+
-+ /* #7 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
-+ strcpy(ae->un.DriverVersion, lpfc_release_version);
-+ len = strlen(ae->un.DriverVersion);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #8 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
-+ strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
-+ len = strlen(ae->un.OptionROMVersion);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #9 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(FIRMWARE_VERSION);
-+ lpfc_decode_firmware_rev(phba, ae->un.FirmwareVersion,
-+ 1);
-+ len = strlen(ae->un.FirmwareVersion);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #10 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
-+ sprintf(ae->un.OsNameVersion, "%s %s %s",
-+ system_utsname.sysname, system_utsname.release,
-+ system_utsname.version);
-+ len = strlen(ae->un.OsNameVersion);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ /* #11 HBA attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(MAX_CT_PAYLOAD_LEN);
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
-+ ae->un.MaxCTPayloadLen = (65 * 4096);
-+ ab->EntryCnt++;
-+ size += FOURBYTES + 4;
-+
-+ ab->EntryCnt = be32_to_cpu(ab->EntryCnt);
-+ /* Total size */
-+ size = GID_REQUEST_SZ - 4 + size;
-+ }
-+ break;
-+
-+ case SLI_MGMT_RPA:
-+ {
-+ lpfc_vpd_t *vp;
-+ struct serv_parm *hsp;
-+ int len;
-+
-+ vp = &phba->vpd;
-+
-+ CtReq->CommandResponse.bits.CmdRsp =
-+ be16_to_cpu(SLI_MGMT_RPA);
-+ CtReq->CommandResponse.bits.Size = 0;
-+ pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
-+ size = sizeof (struct lpfc_name) + FOURBYTES;
-+ memcpy((uint8_t *) & pab->PortName,
-+ (uint8_t *) & phba->fc_sparam.portName,
-+ sizeof (struct lpfc_name));
-+ pab->ab.EntryCnt = 0;
-+
-+ /* #1 Port attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_FC4_TYPES);
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 32);
-+ ae->un.SupportFC4Types[2] = 1;
-+ ae->un.SupportFC4Types[7] = 1;
-+ pab->ab.EntryCnt++;
-+ size += FOURBYTES + 32;
-+
-+ /* #2 Port attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(SUPPORTED_SPEED);
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
-+ if (FC_JEDEC_ID(vp->rev.biuRev) == VIPER_JEDEC_ID)
-+ ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT;
-+ else if (FC_JEDEC_ID(vp->rev.biuRev) == HELIOS_JEDEC_ID)
-+ ae->un.SupportSpeed = HBA_PORTSPEED_4GBIT;
-+ else if ((FC_JEDEC_ID(vp->rev.biuRev) ==
-+ CENTAUR_2G_JEDEC_ID)
-+ || (FC_JEDEC_ID(vp->rev.biuRev) ==
-+ PEGASUS_JEDEC_ID)
-+ || (FC_JEDEC_ID(vp->rev.biuRev) ==
-+ THOR_JEDEC_ID))
-+ ae->un.SupportSpeed = HBA_PORTSPEED_2GBIT;
-+ else
-+ ae->un.SupportSpeed = HBA_PORTSPEED_1GBIT;
-+ pab->ab.EntryCnt++;
-+ size += FOURBYTES + 4;
-+
-+ /* #3 Port attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
-+ switch(phba->fc_linkspeed) {
-+ case LA_1GHZ_LINK:
-+ ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
-+ break;
-+ case LA_2GHZ_LINK:
-+ ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
-+ break;
-+ case LA_4GHZ_LINK:
-+ ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
-+ break;
-+ default:
-+ ae->un.PortSpeed =
-+ HBA_PORTSPEED_UNKNOWN;
-+ break;
-+ }
-+ pab->ab.EntryCnt++;
-+ size += FOURBYTES + 4;
-+
-+ /* #4 Port attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
-+ hsp = (struct serv_parm *) & phba->fc_sparam;
-+ ae->un.MaxFrameSize =
-+ (((uint32_t) hsp->cmn.
-+ bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
-+ bbRcvSizeLsb;
-+ pab->ab.EntryCnt++;
-+ size += FOURBYTES + 4;
-+
-+ /* #5 Port attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
-+ ae->ad.bits.AttrType = be16_to_cpu(OS_DEVICE_NAME);
-+ strcpy((char *)ae->un.OsDeviceName, LPFC_DRIVER_NAME);
-+ len = strlen((char *)ae->un.OsDeviceName);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
-+ pab->ab.EntryCnt++;
-+ size += FOURBYTES + len;
-+
-+ if (phba->cfg_fdmi_on == 2) {
-+ /* #6 Port attribute entry */
-+ ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab +
-+ size);
-+ ae->ad.bits.AttrType = be16_to_cpu(HOST_NAME);
-+ sprintf(ae->un.HostName, "%s",
-+ system_utsname.nodename);
-+ len = strlen(ae->un.HostName);
-+ len += (len & 3) ? (4 - (len & 3)) : 4;
-+ ae->ad.bits.AttrLen =
-+ be16_to_cpu(FOURBYTES + len);
-+ pab->ab.EntryCnt++;
-+ size += FOURBYTES + len;
-+ }
-+
-+ pab->ab.EntryCnt = be32_to_cpu(pab->ab.EntryCnt);
-+ /* Total size */
-+ size = GID_REQUEST_SZ - 4 + size;
-+ }
-+ break;
-+
-+ case SLI_MGMT_DHBA:
-+ CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DHBA);
-+ CtReq->CommandResponse.bits.Size = 0;
-+ pe = (PORT_ENTRY *) & CtReq->un.PortID;
-+ memcpy((uint8_t *) & pe->PortName,
-+ (uint8_t *) & phba->fc_sparam.portName,
-+ sizeof (struct lpfc_name));
-+ size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
-+ break;
-+
-+ case SLI_MGMT_DPRT:
-+ CtReq->CommandResponse.bits.CmdRsp = be16_to_cpu(SLI_MGMT_DPRT);
-+ CtReq->CommandResponse.bits.Size = 0;
-+ pe = (PORT_ENTRY *) & CtReq->un.PortID;
-+ memcpy((uint8_t *) & pe->PortName,
-+ (uint8_t *) & phba->fc_sparam.portName,
-+ sizeof (struct lpfc_name));
-+ size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
-+ break;
-+ }
-+
-+ bpl = (struct ulp_bde64 *) bmp->virt;
-+ bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) );
-+ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) );
-+ bpl->tus.f.bdeFlags = 0;
-+ bpl->tus.f.bdeSize = size;
-+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
-+
-+ cmpl = lpfc_cmpl_ct_cmd_fdmi;
-+
-+ if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP))
-+ return 0;
-+
-+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-+fdmi_cmd_free_bmp:
-+ kfree(bmp);
-+fdmi_cmd_free_mpvirt:
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+fdmi_cmd_free_mp:
-+ kfree(mp);
-+fdmi_cmd_exit:
-+ /* Issue FDMI request failed */
-+ lpfc_printf_log(phba,
-+ KERN_INFO,
-+ LOG_DISCOVERY,
-+ "%d:0244 Issue FDMI request failed Data: x%x\n",
-+ phba->brd_no,
-+ cmdcode);
-+ return 1;
-+}
-+
-+void
-+lpfc_fdmi_tmo(unsigned long ptr)
-+{
-+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
-+ unsigned long iflag;
-+
-+ spin_lock_irqsave(phba->host->host_lock, iflag);
-+ if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
-+ phba->work_hba_events |= WORKER_FDMI_TMO;
-+ if (phba->dpc_wait)
-+ up(phba->dpc_wait);
-+ }
-+ spin_unlock_irqrestore(phba->host->host_lock,iflag);
-+}
-+
-+void
-+lpfc_fdmi_tmo_handler(struct lpfc_hba *phba)
-+{
-+ struct lpfc_nodelist *ndlp;
-+
-+ spin_lock_irq(phba->host->host_lock);
-+ if (!(phba->work_hba_events & WORKER_FDMI_TMO)) {
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+ }
-+ ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, FDMI_DID);
-+ if (ndlp) {
-+ if (system_utsname.nodename[0] != '\0') {
-+ lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
-+ } else {
-+ mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
-+ }
-+ }
-+ spin_unlock_irq(phba->host->host_lock);
-+ return;
-+}
-+
-+
-+void
-+lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ lpfc_vpd_t *vp = &phba->vpd;
-+ uint32_t b1, b2, b3, b4, i, rev;
-+ char c;
-+ uint32_t *ptr, str[4];
-+ uint8_t *fwname;
-+
-+ if (vp->rev.rBit) {
-+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)
-+ rev = vp->rev.sli2FwRev;
-+ else
-+ rev = vp->rev.sli1FwRev;
-+
-+ b1 = (rev & 0x0000f000) >> 12;
-+ b2 = (rev & 0x00000f00) >> 8;
-+ b3 = (rev & 0x000000c0) >> 6;
-+ b4 = (rev & 0x00000030) >> 4;
-+
-+ switch (b4) {
-+ case 0:
-+ c = 'N';
-+ break;
-+ case 1:
-+ c = 'A';
-+ break;
-+ case 2:
-+ c = 'B';
-+ break;
-+ default:
-+ c = 0;
-+ break;
-+ }
-+ b4 = (rev & 0x0000000f);
-+
-+ if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)
-+ fwname = vp->rev.sli2FwName;
-+ else
-+ fwname = vp->rev.sli1FwName;
-+
-+ for (i = 0; i < 16; i++)
-+ if(fwname[i] == 0x20)
-+ fwname[i] = 0;
-+
-+ ptr = (uint32_t*)fwname;
-+
-+ for (i = 0; i < 3; i++)
-+ str[i] = be32_to_cpu(*ptr++);
-+
-+ if (c == 0) {
-+ if (flag)
-+ sprintf(fwrevision, "%d.%d%d (%s)",
-+ b1, b2, b3, (char *)str);
-+ else
-+ sprintf(fwrevision, "%d.%d%d", b1,
-+ b2, b3);
-+ } else {
-+ if (flag)
-+ sprintf(fwrevision, "%d.%d%d%c%d (%s)",
-+ b1, b2, b3, c,
-+ b4, (char *)str);
-+ else
-+ sprintf(fwrevision, "%d.%d%d%c%d",
-+ b1, b2, b3, c, b4);
-+ }
-+ } else {
-+ rev = vp->rev.smFwRev;
-+
-+ b1 = (rev & 0xff000000) >> 24;
-+ b2 = (rev & 0x00f00000) >> 20;
-+ b3 = (rev & 0x000f0000) >> 16;
-+ c = (rev & 0x0000ff00) >> 8;
-+ b4 = (rev & 0x000000ff);
-+
-+ if (flag)
-+ sprintf(fwrevision, "%d.%d%d%c%d ", b1,
-+ b2, b3, c, b4);
-+ else
-+ sprintf(fwrevision, "%d.%d%d%c%d ", b1,
-+ b2, b3, c, b4);
-+ }
-+ return;
-+}
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_sli.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_sli.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,218 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_sli.h 1.38.2.2 2005/06/13 17:16:49EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_SLI
-+#define _H_LPFC_SLI
-+
-+#include "lpfc_hw.h"
-+
-+/* forward declaration for LPFC_IOCB_t's use */
-+struct lpfc_hba;
-+
-+/* This structure is used to handle IOCB requests / responses */
-+struct lpfc_iocbq {
-+ /* lpfc_iocbqs are used in double linked lists */
-+ struct list_head list;
-+ IOCB_t iocb; /* IOCB cmd */
-+ uint8_t retry; /* retry counter for IOCB cmd - if needed */
-+ uint8_t iocb_flag;
-+#define LPFC_IO_POLL 1 /* Polling mode iocb */
-+#define LPFC_IO_LIBDFC 2 /* libdfc iocb */
-+#define LPFC_IO_WAIT 4
-+#define LPFC_IO_HIPRI 8 /* High Priority Queue signal flag */
-+
-+ uint8_t abort_count;
-+ uint8_t rsvd2;
-+ uint32_t drvrTimeout; /* driver timeout in seconds */
-+ void *context1; /* caller context information */
-+ void *context2; /* caller context information */
-+ void *context3; /* caller context information */
-+ union {
-+ wait_queue_head_t *hipri_wait_queue; /* High Priority Queue wait
-+ queue */
-+ struct lpfc_iocbq *rsp_iocb;
-+ struct lpfcMboxq *mbox;
-+ } context_un;
-+
-+ void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
-+ struct lpfc_iocbq *);
-+
-+};
-+
-+#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
-+#define SLI_IOCB_HIGH_PRIORITY 2 /* High priority command */
-+
-+#define IOCB_SUCCESS 0
-+#define IOCB_BUSY 1
-+#define IOCB_ERROR 2
-+#define IOCB_TIMEDOUT 3
-+
-+typedef struct lpfcMboxq {
-+ /* MBOXQs are used in single linked lists */
-+ struct list_head list; /* ptr to next mailbox command */
-+ MAILBOX_t mb; /* Mailbox cmd */
-+ void *context1; /* caller context information */
-+ void *context2; /* caller context information */
-+
-+ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
-+
-+} LPFC_MBOXQ_t;
-+
-+#define MBX_POLL 1 /* poll mailbox till command done, then
-+ return */
-+#define MBX_NOWAIT 2 /* issue command then return immediately */
-+#define MBX_STOP_IOCB 4 /* Stop iocb processing till mbox cmds
-+ complete */
-+
-+#define LPFC_MAX_RING_MASK 4 /* max num of rctl/type masks allowed per
-+ ring */
-+#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */
-+
-+/* Structure used to hold SLI ring information */
-+struct lpfc_sli_ring {
-+ uint16_t flag; /* ring flags */
-+#define LPFC_DEFERRED_RING_EVENT 0x001 /* Deferred processing a ring event */
-+#define LPFC_CALL_RING_AVAILABLE 0x002 /* indicates cmd was full */
-+#define LPFC_STOP_IOCB_MBX 0x010 /* Stop processing IOCB cmds mbox */
-+#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
-+#define LPFC_STOP_IOCB_MASK 0x030 /* Stop processing IOCB cmds mask */
-+ uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
-+
-+ uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
-+ uint32_t next_cmdidx; /* next_cmd index */
-+ uint8_t rsvd;
-+ uint8_t ringno; /* ring number */
-+ uint8_t rspidx; /* current index in response ring */
-+ uint8_t cmdidx; /* current index in command ring */
-+ struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
-+ iotag */
-+ struct list_head txq;
-+ uint16_t txq_cnt; /* current length of queue */
-+ uint16_t txq_max; /* max length */
-+ struct list_head txcmplq;
-+ uint16_t txcmplq_cnt; /* current length of queue */
-+ uint16_t txcmplq_max; /* max length */
-+ volatile uint32_t *cmdringaddr; /* virtual address for cmd rings */
-+ volatile uint32_t *rspringaddr; /* virtual address for rsp rings */
-+ uint32_t missbufcnt; /* keep track of buffers to post */
-+ struct list_head postbufq;
-+ uint16_t postbufq_cnt; /* current length of queue */
-+ uint16_t postbufq_max; /* max length */
-+ struct list_head iocb_continueq;
-+ uint16_t iocb_continueq_cnt; /* current length of queue */
-+ uint16_t iocb_continueq_max; /* max length */
-+};
-+
-+typedef struct {
-+ uint8_t profile; /* profile associated with ring */
-+ uint8_t rctl; /* rctl / type pair configured for ring */
-+ uint8_t type; /* rctl / type pair configured for ring */
-+ uint8_t rsvd;
-+ /* rcv'd unsol event */
-+ void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
-+ struct lpfc_sli_ring *,
-+ struct lpfc_iocbq *);
-+} LPFC_RING_MASK_t;
-+
-+/* Structure used for configuring rings to a specific profile or rctl / type */
-+typedef struct {
-+ LPFC_RING_MASK_t prt[LPFC_MAX_RING_MASK];
-+ uint32_t num_mask; /* number of mask entries in prt array */
-+ uint32_t iotag_ctr; /* keeps track of the next iotag to use */
-+ uint32_t iotag_max; /* max iotag value to use */
-+ uint32_t fast_iotag; /* max fastlookup based iotag */
-+ uint16_t numCiocb; /* number of command iocb's per ring */
-+ uint16_t numRiocb; /* number of rsp iocb's per ring */
-+ /* cmd ring available */
-+ void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
-+ struct lpfc_sli_ring *);
-+} LPFC_RING_INIT_t;
-+
-+typedef struct {
-+ LPFC_RING_INIT_t ringinit[LPFC_MAX_RING]; /* ring initialization info */
-+ uint32_t num_rings;
-+ uint32_t sli_flag;
-+} LPFC_SLI_INIT_t;
-+
-+/* Structure used to hold SLI statistical counters and info */
-+typedef struct {
-+ uint64_t iocbEvent[LPFC_MAX_RING]; /* IOCB event counters */
-+ uint64_t iocbCmd[LPFC_MAX_RING]; /* IOCB cmd issued */
-+ uint64_t iocbRsp[LPFC_MAX_RING]; /* IOCB rsp received */
-+ uint64_t iocbCmdDelay[LPFC_MAX_RING]; /* IOCB cmd ring delay */
-+ uint64_t iocbCmdFull[LPFC_MAX_RING]; /* IOCB cmd ring full */
-+ uint64_t iocbCmdEmpty[LPFC_MAX_RING]; /* IOCB cmd ring is now empty */
-+ uint64_t iocbRspFull[LPFC_MAX_RING]; /* IOCB rsp ring full */
-+ uint64_t mboxStatErr; /* Mbox cmds completed status error */
-+ uint64_t mboxCmd; /* Mailbox commands issued */
-+ uint64_t sliIntr; /* Count of Host Attention interrupts */
-+ uint32_t errAttnEvent; /* Error Attn event counters */
-+ uint32_t linkEvent; /* Link event counters */
-+ uint32_t mboxEvent; /* Mailbox event counters */
-+ uint32_t mboxBusy; /* Mailbox cmd busy */
-+} LPFC_SLI_STAT_t;
-+
-+/* Structure used to hold SLI information */
-+struct lpfc_sli {
-+ LPFC_SLI_INIT_t sliinit; /* initialization info */
-+ /* Additional sli_flags */
-+#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
-+#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
-+#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
-+
-+ struct lpfc_sli_ring ring[LPFC_MAX_RING];
-+ int fcp_ring; /* ring used for FCP initiator commands */
-+ int next_ring;
-+
-+ int ip_ring; /* ring used for IP network drv cmds */
-+
-+ LPFC_SLI_STAT_t slistat; /* SLI statistical info */
-+ struct list_head mboxq;
-+ uint16_t mboxq_cnt; /* current length of queue */
-+ uint16_t mboxq_max; /* max length */
-+ LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
-+
-+ struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
-+ cmd */
-+
-+ volatile uint32_t *MBhostaddr; /* virtual address for mbox cmds */
-+};
-+
-+/* Given a pointer to the start of the ring, and the slot number of
-+ * the desired iocb entry, calc a pointer to that entry.
-+ * (assume iocb entry size is 32 bytes, or 8 words)
-+ */
-+#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
-+
-+#define LPFC_SLI_ABORT_IMED 0 /* Immediate abort of IOCB, deque and
-+ call compl routine immediately. */
-+#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
-+ command */
-+
-+/* Flags for aborting I/Os on tx and txcmpl queues */
-+#define LPFC_ABORT_TXQ 1 /* Abort I/Os on txq */
-+#define LPFC_ABORT_TXCMPLQ 2 /* Abort I/Os on txcmplq */
-+#define LPFC_ABORT_ALLQ 3 /* Abort I/Os both txq and txcmplq */
-+
-+#endif /* _H_LPFC_SLI */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_mem.c 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_mem.c 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,204 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_mem.c 1.72.1.2 2005/06/13 17:16:34EDT sf_support Exp $
-+ */
-+
-+#include <linux/mempool.h>
-+#include <linux/pci.h>
-+#include <linux/slab.h>
-+#include <scsi/scsi_device.h>
-+
-+#include "lpfc_sli.h"
-+#include "lpfc_disc.h"
-+#include "lpfc_scsi.h"
-+#include "lpfc.h"
-+#include "lpfc_crtn.h"
-+#include "lpfc_mem.h"
-+
-+static void *
-+lpfc_pool_kmalloc(int gfp_flags, void *data)
-+{
-+ return kmalloc((unsigned long)data, gfp_flags);
-+}
-+
-+static void
-+lpfc_pool_kfree(void *obj, void *data)
-+{
-+ kfree(obj);
-+}
-+
-+int
-+lpfc_mem_alloc(struct lpfc_hba * phba)
-+{
-+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
-+ int i;
-+
-+ phba->lpfc_scsi_dma_ext_pool = pci_pool_create("lpfc_scsi_dma_ext_pool",
-+ phba->pcidev, LPFC_SCSI_DMA_EXT_SIZE, 8, 0);
-+ if (!phba->lpfc_scsi_dma_ext_pool)
-+ goto fail;
-+
-+ phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
-+ LPFC_BPL_SIZE, 8,0);
-+ if (!phba->lpfc_mbuf_pool)
-+ goto fail_free_dma_ext_pool;
-+
-+ pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) *
-+ LPFC_MBUF_POOL_SIZE, GFP_KERNEL);
-+ pool->max_count = 0;
-+ pool->current_count = 0;
-+ for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
-+ pool->elements[i].virt = pci_pool_alloc(phba->lpfc_mbuf_pool,
-+ GFP_KERNEL, &pool->elements[i].phys);
-+ if (!pool->elements[i].virt)
-+ goto fail_free_mbuf_pool;
-+ pool->max_count++;
-+ pool->current_count++;
-+ }
-+
-+ phba->iocb_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
-+ lpfc_pool_kmalloc, lpfc_pool_kfree,
-+ (void *)(unsigned long)sizeof(struct lpfc_iocbq));
-+ if (!phba->iocb_mem_pool)
-+ goto fail_free_mbuf_pool;
-+
-+ phba->scsibuf_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
-+ lpfc_pool_kmalloc, lpfc_pool_kfree,
-+ (void *)(unsigned long)sizeof(struct lpfc_scsi_buf));
-+ if (!phba->scsibuf_mem_pool)
-+ goto fail_free_iocb_pool;
-+
-+ phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
-+ lpfc_pool_kmalloc, lpfc_pool_kfree,
-+ (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
-+ if (!phba->mbox_mem_pool)
-+ goto fail_free_scsibuf_pool;
-+
-+ phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
-+ lpfc_pool_kmalloc, lpfc_pool_kfree,
-+ (void *)(unsigned long)sizeof(struct lpfc_nodelist));
-+ if (!phba->nlp_mem_pool)
-+ goto fail_free_mbox_pool;
-+
-+ phba->bind_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE,
-+ lpfc_pool_kmalloc, lpfc_pool_kfree,
-+ (void *)(unsigned long)sizeof(struct lpfc_bindlist));
-+ if (!phba->bind_mem_pool)
-+ goto fail_free_nlp_pool;
-+
-+ return 0;
-+
-+ fail_free_nlp_pool:
-+ mempool_destroy(phba->nlp_mem_pool);
-+ fail_free_mbox_pool:
-+ mempool_destroy(phba->mbox_mem_pool);
-+ fail_free_scsibuf_pool:
-+ mempool_destroy(phba->scsibuf_mem_pool);
-+ fail_free_iocb_pool:
-+ mempool_destroy(phba->iocb_mem_pool);
-+ fail_free_mbuf_pool:
-+ while (--i)
-+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
-+ pool->elements[i].phys);
-+ kfree(pool->elements);
-+ pci_pool_destroy(phba->lpfc_mbuf_pool);
-+ fail_free_dma_ext_pool:
-+ pci_pool_destroy(phba->lpfc_scsi_dma_ext_pool);
-+ fail:
-+ return -ENOMEM;
-+}
-+
-+void
-+lpfc_mem_free(struct lpfc_hba * phba)
-+{
-+ struct lpfc_sli *psli = &phba->sli;
-+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
-+ LPFC_MBOXQ_t *mbox, *next_mbox;
-+ struct lpfc_dmabuf *mp;
-+ int i;
-+
-+ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
-+ mp = (struct lpfc_dmabuf *) (mbox->context1);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+ list_del(&mbox->list);
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ }
-+
-+ psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-+ if (psli->mbox_active) {
-+ mbox = psli->mbox_active;
-+ mp = (struct lpfc_dmabuf *) (mbox->context1);
-+ if (mp) {
-+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
-+ kfree(mp);
-+ }
-+ mempool_free(mbox, phba->mbox_mem_pool);
-+ psli->mbox_active = NULL;
-+ }
-+
-+ for (i = 0; i < pool->current_count; i++)
-+ pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
-+ pool->elements[i].phys);
-+ kfree(pool->elements);
-+ mempool_destroy(phba->bind_mem_pool);
-+ mempool_destroy(phba->nlp_mem_pool);
-+ mempool_destroy(phba->mbox_mem_pool);
-+ mempool_destroy(phba->scsibuf_mem_pool);
-+ mempool_destroy(phba->iocb_mem_pool);
-+
-+ pci_pool_destroy(phba->lpfc_scsi_dma_ext_pool);
-+ pci_pool_destroy(phba->lpfc_mbuf_pool);
-+}
-+
-+void *
-+lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
-+{
-+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
-+ void *ret;
-+
-+ ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_ATOMIC, handle);
-+
-+ if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) {
-+ pool->current_count--;
-+ ret = pool->elements[pool->current_count].virt;
-+ *handle = pool->elements[pool->current_count].phys;
-+ }
-+ return ret;
-+}
-+
-+void
-+lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
-+{
-+ struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
-+
-+ if (pool->current_count < pool->max_count) {
-+ pool->elements[pool->current_count].virt = virt;
-+ pool->elements[pool->current_count].phys = dma;
-+ pool->current_count++;
-+ } else {
-+ pci_pool_free(phba->lpfc_mbuf_pool, virt, dma);
-+ }
-+ return;
-+}
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_hw.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_hw.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,2691 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_hw.h 1.34.2.2 2005/06/13 17:16:25EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_HW
-+#define _H_LPFC_HW
-+
-+#define FDMI_DID ((uint32_t)0xfffffa)
-+#define NameServer_DID ((uint32_t)0xfffffc)
-+#define SCR_DID ((uint32_t)0xfffffd)
-+#define Fabric_DID ((uint32_t)0xfffffe)
-+#define Bcast_DID ((uint32_t)0xffffff)
-+#define Mask_DID ((uint32_t)0xffffff)
-+#define CT_DID_MASK ((uint32_t)0xffff00)
-+#define Fabric_DID_MASK ((uint32_t)0xfff000)
-+#define WELL_KNOWN_DID_MASK ((uint32_t)0xfffff0)
-+
-+#define PT2PT_LocalID ((uint32_t)1)
-+#define PT2PT_RemoteID ((uint32_t)2)
-+
-+#define FF_DEF_EDTOV 2000 /* Default E_D_TOV (2000ms) */
-+#define FF_DEF_ALTOV 15 /* Default AL_TIME (15ms) */
-+#define FF_DEF_RATOV 2 /* Default RA_TOV (2s) */
-+#define FF_DEF_ARBTOV 1900 /* Default ARB_TOV (1900ms) */
-+
-+#define LPFC_BUF_RING0 64 /* Number of buffers to post to RING
-+ 0 */
-+
-+#define FCELSSIZE 1024 /* maximum ELS transfer size */
-+
-+#define LPFC_FCP_RING 0 /* ring 2 for FCP initiator commands */
-+#define LPFC_IP_RING 1 /* ring 1 for IP commands */
-+#define LPFC_ELS_RING 2 /* ring 0 for ELS commands */
-+#define LPFC_FCP_NEXT_RING 3
-+
-+#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
-+#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
-+#define SLI2_IOCB_CMD_R1_ENTRIES 4 /* SLI-2 IP command ring entries */
-+#define SLI2_IOCB_RSP_R1_ENTRIES 4 /* SLI-2 IP response ring entries */
-+#define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36 /* SLI-2 extra FCP cmd ring entries */
-+#define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52 /* SLI-2 extra FCP rsp ring entries */
-+#define SLI2_IOCB_CMD_R2_ENTRIES 20 /* SLI-2 ELS command ring entries */
-+#define SLI2_IOCB_RSP_R2_ENTRIES 20 /* SLI-2 ELS response ring entries */
-+#define SLI2_IOCB_CMD_R3_ENTRIES 0
-+#define SLI2_IOCB_RSP_R3_ENTRIES 0
-+#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
-+#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
-+
-+/* Common Transport structures and definitions */
-+
-+union CtRevisionId {
-+ /* Structure is in Big Endian format */
-+ struct {
-+ uint32_t Revision:8;
-+ uint32_t InId:24;
-+ } bits;
-+ uint32_t word;
-+};
-+
-+union CtCommandResponse {
-+ /* Structure is in Big Endian format */
-+ struct {
-+ uint32_t CmdRsp:16;
-+ uint32_t Size:16;
-+ } bits;
-+ uint32_t word;
-+};
-+
-+struct lpfc_sli_ct_request {
-+ /* Structure is in Big Endian format */
-+ union CtRevisionId RevisionId;
-+ uint8_t FsType;
-+ uint8_t FsSubType;
-+ uint8_t Options;
-+ uint8_t Rsrvd1;
-+ union CtCommandResponse CommandResponse;
-+ uint8_t Rsrvd2;
-+ uint8_t ReasonCode;
-+ uint8_t Explanation;
-+ uint8_t VendorUnique;
-+
-+ union {
-+ uint32_t PortID;
-+ struct gid {
-+ uint8_t PortType; /* for GID_PT requests */
-+ uint8_t DomainScope;
-+ uint8_t AreaScope;
-+ uint8_t Fc4Type; /* for GID_FT requests */
-+ } gid;
-+ struct rft {
-+ uint32_t PortId; /* For RFT_ID requests */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd0:16;
-+ uint32_t rsvd1:7;
-+ uint32_t fcpReg:1; /* Type 8 */
-+ uint32_t rsvd2:2;
-+ uint32_t ipReg:1; /* Type 5 */
-+ uint32_t rsvd3:5;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t rsvd0:16;
-+ uint32_t fcpReg:1; /* Type 8 */
-+ uint32_t rsvd1:7;
-+ uint32_t rsvd3:5;
-+ uint32_t ipReg:1; /* Type 5 */
-+ uint32_t rsvd2:2;
-+#endif
-+
-+ uint32_t rsvd[7];
-+ } rft;
-+ struct rnn {
-+ uint32_t PortId; /* For RNN_ID requests */
-+ uint8_t wwnn[8];
-+ } rnn;
-+ struct rsnn { /* For RSNN_ID requests */
-+ uint8_t wwnn[8];
-+ uint8_t len;
-+ uint8_t symbname[255];
-+ } rsnn;
-+ } un;
-+};
-+
-+#define SLI_CT_REVISION 1
-+#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260)
-+#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228)
-+#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252)
-+#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request))
-+
-+/*
-+ * FsType Definitions
-+ */
-+
-+#define SLI_CT_MANAGEMENT_SERVICE 0xFA
-+#define SLI_CT_TIME_SERVICE 0xFB
-+#define SLI_CT_DIRECTORY_SERVICE 0xFC
-+#define SLI_CT_FABRIC_CONTROLLER_SERVICE 0xFD
-+
-+/*
-+ * Directory Service Subtypes
-+ */
-+
-+#define SLI_CT_DIRECTORY_NAME_SERVER 0x02
-+
-+/*
-+ * Response Codes
-+ */
-+
-+#define SLI_CT_RESPONSE_FS_RJT 0x8001
-+#define SLI_CT_RESPONSE_FS_ACC 0x8002
-+
-+/*
-+ * Reason Codes
-+ */
-+
-+#define SLI_CT_NO_ADDITIONAL_EXPL 0x0
-+#define SLI_CT_INVALID_COMMAND 0x01
-+#define SLI_CT_INVALID_VERSION 0x02
-+#define SLI_CT_LOGICAL_ERROR 0x03
-+#define SLI_CT_INVALID_IU_SIZE 0x04
-+#define SLI_CT_LOGICAL_BUSY 0x05
-+#define SLI_CT_PROTOCOL_ERROR 0x07
-+#define SLI_CT_UNABLE_TO_PERFORM_REQ 0x09
-+#define SLI_CT_REQ_NOT_SUPPORTED 0x0b
-+#define SLI_CT_HBA_INFO_NOT_REGISTERED 0x10
-+#define SLI_CT_MULTIPLE_HBA_ATTR_OF_SAME_TYPE 0x11
-+#define SLI_CT_INVALID_HBA_ATTR_BLOCK_LEN 0x12
-+#define SLI_CT_HBA_ATTR_NOT_PRESENT 0x13
-+#define SLI_CT_PORT_INFO_NOT_REGISTERED 0x20
-+#define SLI_CT_MULTIPLE_PORT_ATTR_OF_SAME_TYPE 0x21
-+#define SLI_CT_INVALID_PORT_ATTR_BLOCK_LEN 0x22
-+#define SLI_CT_VENDOR_UNIQUE 0xff
-+
-+/*
-+ * Name Server SLI_CT_UNABLE_TO_PERFORM_REQ Explanations
-+ */
-+
-+#define SLI_CT_NO_PORT_ID 0x01
-+#define SLI_CT_NO_PORT_NAME 0x02
-+#define SLI_CT_NO_NODE_NAME 0x03
-+#define SLI_CT_NO_CLASS_OF_SERVICE 0x04
-+#define SLI_CT_NO_IP_ADDRESS 0x05
-+#define SLI_CT_NO_IPA 0x06
-+#define SLI_CT_NO_FC4_TYPES 0x07
-+#define SLI_CT_NO_SYMBOLIC_PORT_NAME 0x08
-+#define SLI_CT_NO_SYMBOLIC_NODE_NAME 0x09
-+#define SLI_CT_NO_PORT_TYPE 0x0A
-+#define SLI_CT_ACCESS_DENIED 0x10
-+#define SLI_CT_INVALID_PORT_ID 0x11
-+#define SLI_CT_DATABASE_EMPTY 0x12
-+
-+/*
-+ * Name Server Command Codes
-+ */
-+
-+#define SLI_CTNS_GA_NXT 0x0100
-+#define SLI_CTNS_GPN_ID 0x0112
-+#define SLI_CTNS_GNN_ID 0x0113
-+#define SLI_CTNS_GCS_ID 0x0114
-+#define SLI_CTNS_GFT_ID 0x0117
-+#define SLI_CTNS_GSPN_ID 0x0118
-+#define SLI_CTNS_GPT_ID 0x011A
-+#define SLI_CTNS_GID_PN 0x0121
-+#define SLI_CTNS_GID_NN 0x0131
-+#define SLI_CTNS_GIP_NN 0x0135
-+#define SLI_CTNS_GIPA_NN 0x0136
-+#define SLI_CTNS_GSNN_NN 0x0139
-+#define SLI_CTNS_GNN_IP 0x0153
-+#define SLI_CTNS_GIPA_IP 0x0156
-+#define SLI_CTNS_GID_FT 0x0171
-+#define SLI_CTNS_GID_PT 0x01A1
-+#define SLI_CTNS_RPN_ID 0x0212
-+#define SLI_CTNS_RNN_ID 0x0213
-+#define SLI_CTNS_RCS_ID 0x0214
-+#define SLI_CTNS_RFT_ID 0x0217
-+#define SLI_CTNS_RSPN_ID 0x0218
-+#define SLI_CTNS_RPT_ID 0x021A
-+#define SLI_CTNS_RIP_NN 0x0235
-+#define SLI_CTNS_RIPA_NN 0x0236
-+#define SLI_CTNS_RSNN_NN 0x0239
-+#define SLI_CTNS_DA_ID 0x0300
-+
-+/*
-+ * Port Types
-+ */
-+
-+#define SLI_CTPT_N_PORT 0x01
-+#define SLI_CTPT_NL_PORT 0x02
-+#define SLI_CTPT_FNL_PORT 0x03
-+#define SLI_CTPT_IP 0x04
-+#define SLI_CTPT_FCP 0x08
-+#define SLI_CTPT_NX_PORT 0x7F
-+#define SLI_CTPT_F_PORT 0x81
-+#define SLI_CTPT_FL_PORT 0x82
-+#define SLI_CTPT_E_PORT 0x84
-+
-+#define SLI_CT_LAST_ENTRY 0x80000000
-+
-+/* Fibre Channel Service Parameter definitions */
-+
-+#define FC_PH_4_0 6 /* FC-PH version 4.0 */
-+#define FC_PH_4_1 7 /* FC-PH version 4.1 */
-+#define FC_PH_4_2 8 /* FC-PH version 4.2 */
-+#define FC_PH_4_3 9 /* FC-PH version 4.3 */
-+
-+#define FC_PH_LOW 8 /* Lowest supported FC-PH version */
-+#define FC_PH_HIGH 9 /* Highest supported FC-PH version */
-+#define FC_PH3 0x20 /* FC-PH-3 version */
-+
-+#define FF_FRAME_SIZE 2048
-+
-+struct lpfc_name {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
-+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t IEEEextMsn:4; /* FC Word 0, bit 24:27, bit 8:11 of IEEE ext */
-+ uint8_t nameType:4; /* FC Word 0, bit 28:31 */
-+#endif
-+
-+#define NAME_IEEE 0x1 /* IEEE name - nameType */
-+#define NAME_IEEE_EXT 0x2 /* IEEE extended name */
-+#define NAME_FC_TYPE 0x3 /* FC native name type */
-+#define NAME_IP_TYPE 0x4 /* IP address */
-+#define NAME_CCITT_TYPE 0xC
-+#define NAME_CCITT_GR_TYPE 0xE
-+ uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
-+ uint8_t IEEE[6]; /* FC IEEE address */
-+};
-+
-+struct csp {
-+ uint8_t fcphHigh; /* FC Word 0, byte 0 */
-+ uint8_t fcphLow;
-+ uint8_t bbCreditMsb;
-+ uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
-+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
-+ uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
-+ uint16_t fPort:1; /* FC Word 1, bit 28 */
-+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
-+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
-+ uint16_t multicast:1; /* FC Word 1, bit 25 */
-+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
-+
-+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
-+ uint16_t simplex:1; /* FC Word 1, bit 22 */
-+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
-+ uint16_t dhd:1; /* FC Word 1, bit 18 */
-+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
-+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t broadcast:1; /* FC Word 1, bit 24 */
-+ uint16_t multicast:1; /* FC Word 1, bit 25 */
-+ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
-+ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
-+ uint16_t fPort:1; /* FC Word 1, bit 28 */
-+ uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */
-+ uint16_t randomOffset:1; /* FC Word 1, bit 30 */
-+ uint16_t increasingOffset:1; /* FC Word 1, bit 31 */
-+
-+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
-+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
-+ uint16_t dhd:1; /* FC Word 1, bit 18 */
-+ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
-+ uint16_t simplex:1; /* FC Word 1, bit 22 */
-+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
-+#endif
-+
-+ uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
-+ uint8_t bbRcvSizeLsb; /* FC Word 1, byte 3 */
-+ union {
-+ struct {
-+ uint8_t word2Reserved1; /* FC Word 2 byte 0 */
-+
-+ uint8_t totalConcurrSeq; /* FC Word 2 byte 1 */
-+ uint8_t roByCategoryMsb; /* FC Word 2 byte 2 */
-+
-+ uint8_t roByCategoryLsb; /* FC Word 2 byte 3 */
-+ } nPort;
-+ uint32_t r_a_tov; /* R_A_TOV must be in B.E. format */
-+ } w2;
-+
-+ uint32_t e_d_tov; /* E_D_TOV must be in B.E. format */
-+};
-+
-+struct class_parms {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t classValid:1; /* FC Word 0, bit 31 */
-+ uint8_t intermix:1; /* FC Word 0, bit 30 */
-+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
-+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
-+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
-+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t word0Reserved1:3; /* FC Word 0, bit 24:26 */
-+ uint8_t seqDelivery:1; /* FC Word 0, bit 27 */
-+ uint8_t stackedLockDown:1; /* FC Word 0, bit 28 */
-+ uint8_t stackedXparent:1; /* FC Word 0, bit 29 */
-+ uint8_t intermix:1; /* FC Word 0, bit 30 */
-+ uint8_t classValid:1; /* FC Word 0, bit 31 */
-+
-+#endif
-+
-+ uint8_t word0Reserved2; /* FC Word 0, bit 16:23 */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
-+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
-+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
-+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
-+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t word0Reserved3:2; /* FC Word 0, bit 8: 9 */
-+ uint8_t iCtlAckNcapable:1; /* FC Word 0, bit 10 */
-+ uint8_t iCtlAck0capable:1; /* FC Word 0, bit 11 */
-+ uint8_t iCtlInitialPa:2; /* FC Word 0, bit 12:13 */
-+ uint8_t iCtlXidReAssgn:2; /* FC Word 0, Bit 14:15 */
-+#endif
-+
-+ uint8_t word0Reserved4; /* FC Word 0, bit 0: 7 */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
-+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
-+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
-+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
-+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
-+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t rCtlCatPerSeq:2; /* FC Word 1, bit 24:25 */
-+ uint8_t word1Reserved1:1; /* FC Word 1, bit 26 */
-+ uint8_t rCtlErrorPolicy:2; /* FC Word 1, bit 27:28 */
-+ uint8_t rCtlXidInterlck:1; /* FC Word 1, bit 29 */
-+ uint8_t rCtlAckNcapable:1; /* FC Word 1, bit 30 */
-+ uint8_t rCtlAck0capable:1; /* FC Word 1, bit 31 */
-+#endif
-+
-+ uint8_t word1Reserved2; /* FC Word 1, bit 16:23 */
-+ uint8_t rcvDataSizeMsb; /* FC Word 1, bit 8:15 */
-+ uint8_t rcvDataSizeLsb; /* FC Word 1, bit 0: 7 */
-+
-+ uint8_t concurrentSeqMsb; /* FC Word 2, bit 24:31 */
-+ uint8_t concurrentSeqLsb; /* FC Word 2, bit 16:23 */
-+ uint8_t EeCreditSeqMsb; /* FC Word 2, bit 8:15 */
-+ uint8_t EeCreditSeqLsb; /* FC Word 2, bit 0: 7 */
-+
-+ uint8_t openSeqPerXchgMsb; /* FC Word 3, bit 24:31 */
-+ uint8_t openSeqPerXchgLsb; /* FC Word 3, bit 16:23 */
-+ uint8_t word3Reserved1; /* Fc Word 3, bit 8:15 */
-+ uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
-+};
-+
-+struct serv_parm { /* Structure is in Big Endian format */
-+ struct csp cmn;
-+ struct lpfc_name portName;
-+ struct lpfc_name nodeName;
-+ struct class_parms cls1;
-+ struct class_parms cls2;
-+ struct class_parms cls3;
-+ struct class_parms cls4;
-+ uint8_t vendorVersion[16];
-+};
-+
-+/*
-+ * Extended Link Service LS_COMMAND codes (Payload Word 0)
-+ */
-+#ifdef __BIG_ENDIAN_BITFIELD
-+#define ELS_CMD_MASK 0xffff0000
-+#define ELS_RSP_MASK 0xff000000
-+#define ELS_CMD_LS_RJT 0x01000000
-+#define ELS_CMD_ACC 0x02000000
-+#define ELS_CMD_PLOGI 0x03000000
-+#define ELS_CMD_FLOGI 0x04000000
-+#define ELS_CMD_LOGO 0x05000000
-+#define ELS_CMD_ABTX 0x06000000
-+#define ELS_CMD_RCS 0x07000000
-+#define ELS_CMD_RES 0x08000000
-+#define ELS_CMD_RSS 0x09000000
-+#define ELS_CMD_RSI 0x0A000000
-+#define ELS_CMD_ESTS 0x0B000000
-+#define ELS_CMD_ESTC 0x0C000000
-+#define ELS_CMD_ADVC 0x0D000000
-+#define ELS_CMD_RTV 0x0E000000
-+#define ELS_CMD_RLS 0x0F000000
-+#define ELS_CMD_ECHO 0x10000000
-+#define ELS_CMD_TEST 0x11000000
-+#define ELS_CMD_RRQ 0x12000000
-+#define ELS_CMD_PRLI 0x20100014
-+#define ELS_CMD_PRLO 0x21100014
-+#define ELS_CMD_PDISC 0x50000000
-+#define ELS_CMD_FDISC 0x51000000
-+#define ELS_CMD_ADISC 0x52000000
-+#define ELS_CMD_FARP 0x54000000
-+#define ELS_CMD_FARPR 0x55000000
-+#define ELS_CMD_FAN 0x60000000
-+#define ELS_CMD_RSCN 0x61040000
-+#define ELS_CMD_SCR 0x62000000
-+#define ELS_CMD_RNID 0x78000000
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+#define ELS_CMD_MASK 0xffff
-+#define ELS_RSP_MASK 0xff
-+#define ELS_CMD_LS_RJT 0x01
-+#define ELS_CMD_ACC 0x02
-+#define ELS_CMD_PLOGI 0x03
-+#define ELS_CMD_FLOGI 0x04
-+#define ELS_CMD_LOGO 0x05
-+#define ELS_CMD_ABTX 0x06
-+#define ELS_CMD_RCS 0x07
-+#define ELS_CMD_RES 0x08
-+#define ELS_CMD_RSS 0x09
-+#define ELS_CMD_RSI 0x0A
-+#define ELS_CMD_ESTS 0x0B
-+#define ELS_CMD_ESTC 0x0C
-+#define ELS_CMD_ADVC 0x0D
-+#define ELS_CMD_RTV 0x0E
-+#define ELS_CMD_RLS 0x0F
-+#define ELS_CMD_ECHO 0x10
-+#define ELS_CMD_TEST 0x11
-+#define ELS_CMD_RRQ 0x12
-+#define ELS_CMD_PRLI 0x14001020
-+#define ELS_CMD_PRLO 0x14001021
-+#define ELS_CMD_PDISC 0x50
-+#define ELS_CMD_FDISC 0x51
-+#define ELS_CMD_ADISC 0x52
-+#define ELS_CMD_FARP 0x54
-+#define ELS_CMD_FARPR 0x55
-+#define ELS_CMD_FAN 0x60
-+#define ELS_CMD_RSCN 0x0461
-+#define ELS_CMD_SCR 0x62
-+#define ELS_CMD_RNID 0x78
-+#endif
-+
-+/*
-+ * LS_RJT Payload Definition
-+ */
-+
-+struct ls_rjt { /* Structure is in Big Endian format */
-+ union {
-+ uint32_t lsRjtError;
-+ struct {
-+ uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
-+
-+ uint8_t lsRjtRsnCode; /* FC Word 0, bit 16:23 */
-+ /* LS_RJT reason codes */
-+#define LSRJT_INVALID_CMD 0x01
-+#define LSRJT_LOGICAL_ERR 0x03
-+#define LSRJT_LOGICAL_BSY 0x05
-+#define LSRJT_PROTOCOL_ERR 0x07
-+#define LSRJT_UNABLE_TPC 0x09 /* Unable to perform command */
-+#define LSRJT_CMD_UNSUPPORTED 0x0B
-+#define LSRJT_VENDOR_UNIQUE 0xFF /* See Byte 3 */
-+
-+ uint8_t lsRjtRsnCodeExp; /* FC Word 0, bit 8:15 */
-+ /* LS_RJT reason explanation */
-+#define LSEXP_NOTHING_MORE 0x00
-+#define LSEXP_SPARM_OPTIONS 0x01
-+#define LSEXP_SPARM_ICTL 0x03
-+#define LSEXP_SPARM_RCTL 0x05
-+#define LSEXP_SPARM_RCV_SIZE 0x07
-+#define LSEXP_SPARM_CONCUR_SEQ 0x09
-+#define LSEXP_SPARM_CREDIT 0x0B
-+#define LSEXP_INVALID_PNAME 0x0D
-+#define LSEXP_INVALID_NNAME 0x0E
-+#define LSEXP_INVALID_CSP 0x0F
-+#define LSEXP_INVALID_ASSOC_HDR 0x11
-+#define LSEXP_ASSOC_HDR_REQ 0x13
-+#define LSEXP_INVALID_O_SID 0x15
-+#define LSEXP_INVALID_OX_RX 0x17
-+#define LSEXP_CMD_IN_PROGRESS 0x19
-+#define LSEXP_INVALID_NPORT_ID 0x1F
-+#define LSEXP_INVALID_SEQ_ID 0x21
-+#define LSEXP_INVALID_XCHG 0x23
-+#define LSEXP_INACTIVE_XCHG 0x25
-+#define LSEXP_RQ_REQUIRED 0x27
-+#define LSEXP_OUT_OF_RESOURCE 0x29
-+#define LSEXP_CANT_GIVE_DATA 0x2A
-+#define LSEXP_REQ_UNSUPPORTED 0x2C
-+ uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
-+ } b;
-+ } un;
-+};
-+
-+/*
-+ * N_Port Login (FLOGO/PLOGO Request) Payload Definition
-+ */
-+
-+typedef struct _LOGO { /* Structure is in Big Endian format */
-+ union {
-+ uint32_t nPortId32; /* Access nPortId as a word */
-+ struct {
-+ uint8_t word1Reserved1; /* FC Word 1, bit 31:24 */
-+ uint8_t nPortIdByte0; /* N_port ID bit 16:23 */
-+ uint8_t nPortIdByte1; /* N_port ID bit 8:15 */
-+ uint8_t nPortIdByte2; /* N_port ID bit 0: 7 */
-+ } b;
-+ } un;
-+ struct lpfc_name portName; /* N_port name field */
-+} LOGO;
-+
-+/*
-+ * FCP Login (PRLI Request / ACC) Payload Definition
-+ */
-+
-+#define PRLX_PAGE_LEN 0x10
-+#define TPRLO_PAGE_LEN 0x14
-+
-+typedef struct _PRLI { /* Structure is in Big Endian format */
-+ uint8_t prliType; /* FC Parm Word 0, bit 24:31 */
-+
-+#define PRLI_FCP_TYPE 0x08
-+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
-+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
-+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
-+
-+ /* ACC = imagePairEstablished */
-+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
-+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
-+ uint8_t word0Reserved2:1; /* FC Parm Word 0, bit 12 */
-+ uint8_t estabImagePair:1; /* FC Parm Word 0, bit 13 */
-+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
-+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
-+ /* ACC = imagePairEstablished */
-+#endif
-+
-+#define PRLI_REQ_EXECUTED 0x1 /* acceptRspCode */
-+#define PRLI_NO_RESOURCES 0x2
-+#define PRLI_INIT_INCOMPLETE 0x3
-+#define PRLI_NO_SUCH_PA 0x4
-+#define PRLI_PREDEF_CONFIG 0x5
-+#define PRLI_PARTIAL_SUCCESS 0x6
-+#define PRLI_INVALID_PAGE_CNT 0x7
-+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
-+
-+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
-+
-+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
-+
-+ uint8_t word3Reserved1; /* FC Parm Word 3, bit 24:31 */
-+ uint8_t word3Reserved2; /* FC Parm Word 3, bit 16:23 */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
-+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
-+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
-+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
-+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
-+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
-+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
-+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
-+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
-+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
-+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
-+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
-+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
-+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
-+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
-+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t Retry:1; /* FC Parm Word 3, bit 8 */
-+ uint16_t TaskRetryIdReq:1; /* FC Parm Word 3, bit 9 */
-+ uint16_t Word3bit10Resved:1; /* FC Parm Word 3, bit 10 */
-+ uint16_t Word3bit11Resved:1; /* FC Parm Word 3, bit 11 */
-+ uint16_t Word3bit12Resved:1; /* FC Parm Word 3, bit 12 */
-+ uint16_t Word3bit13Resved:1; /* FC Parm Word 3, bit 13 */
-+ uint16_t Word3bit14Resved:1; /* FC Parm Word 3, bit 14 */
-+ uint16_t Word3bit15Resved:1; /* FC Parm Word 3, bit 15 */
-+ uint16_t writeXferRdyDis:1; /* FC Parm Word 3, bit 0 */
-+ uint16_t readXferRdyDis:1; /* FC Parm Word 3, bit 1 */
-+ uint16_t dataRspMixEna:1; /* FC Parm Word 3, bit 2 */
-+ uint16_t cmdDataMixEna:1; /* FC Parm Word 3, bit 3 */
-+ uint16_t targetFunc:1; /* FC Parm Word 3, bit 4 */
-+ uint16_t initiatorFunc:1; /* FC Parm Word 3, bit 5 */
-+ uint16_t dataOverLay:1; /* FC Parm Word 3, bit 6 */
-+ uint16_t ConfmComplAllowed:1; /* FC Parm Word 3, bit 7 */
-+#endif
-+} PRLI;
-+
-+/*
-+ * FCP Logout (PRLO Request / ACC) Payload Definition
-+ */
-+
-+typedef struct _PRLO { /* Structure is in Big Endian format */
-+ uint8_t prloType; /* FC Parm Word 0, bit 24:31 */
-+
-+#define PRLO_FCP_TYPE 0x08
-+ uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
-+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
-+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
-+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t acceptRspCode:4; /* FC Parm Word 0, bit 8:11, ACC ONLY */
-+ uint8_t word0Reserved2:2; /* FC Parm Word 0, bit 12:13 */
-+ uint8_t respProcAssocV:1; /* FC Parm Word 0, bit 14 */
-+ uint8_t origProcAssocV:1; /* FC Parm Word 0, bit 15 */
-+#endif
-+
-+#define PRLO_REQ_EXECUTED 0x1 /* acceptRspCode */
-+#define PRLO_NO_SUCH_IMAGE 0x4
-+#define PRLO_INVALID_PAGE_CNT 0x7
-+
-+ uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
-+
-+ uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
-+
-+ uint32_t respProcAssoc; /* FC Parm Word 2, bit 0:31 */
-+
-+ uint32_t word3Reserved1; /* FC Parm Word 3, bit 0:31 */
-+} PRLO;
-+
-+typedef struct _ADISC { /* Structure is in Big Endian format */
-+ uint32_t hardAL_PA;
-+ struct lpfc_name portName;
-+ struct lpfc_name nodeName;
-+ uint32_t DID;
-+} ADISC;
-+
-+typedef struct _FARP { /* Structure is in Big Endian format */
-+ uint32_t Mflags:8;
-+ uint32_t Odid:24;
-+#define FARP_NO_ACTION 0 /* FARP information enclosed, no
-+ action */
-+#define FARP_MATCH_PORT 0x1 /* Match on Responder Port Name */
-+#define FARP_MATCH_NODE 0x2 /* Match on Responder Node Name */
-+#define FARP_MATCH_IP 0x4 /* Match on IP address, not supported */
-+#define FARP_MATCH_IPV4 0x5 /* Match on IPV4 address, not
-+ supported */
-+#define FARP_MATCH_IPV6 0x6 /* Match on IPV6 address, not
-+ supported */
-+ uint32_t Rflags:8;
-+ uint32_t Rdid:24;
-+#define FARP_REQUEST_PLOGI 0x1 /* Request for PLOGI */
-+#define FARP_REQUEST_FARPR 0x2 /* Request for FARP Response */
-+ struct lpfc_name OportName;
-+ struct lpfc_name OnodeName;
-+ struct lpfc_name RportName;
-+ struct lpfc_name RnodeName;
-+ uint8_t Oipaddr[16];
-+ uint8_t Ripaddr[16];
-+} FARP;
-+
-+typedef struct _FAN { /* Structure is in Big Endian format */
-+ uint32_t Fdid;
-+ struct lpfc_name FportName;
-+ struct lpfc_name FnodeName;
-+} FAN;
-+
-+typedef struct _SCR { /* Structure is in Big Endian format */
-+ uint8_t resvd1;
-+ uint8_t resvd2;
-+ uint8_t resvd3;
-+ uint8_t Function;
-+#define SCR_FUNC_FABRIC 0x01
-+#define SCR_FUNC_NPORT 0x02
-+#define SCR_FUNC_FULL 0x03
-+#define SCR_CLEAR 0xff
-+} SCR;
-+
-+typedef struct _RNID_TOP_DISC {
-+ struct lpfc_name portName;
-+ uint8_t resvd[8];
-+ uint32_t unitType;
-+#define RNID_HBA 0x7
-+#define RNID_HOST 0xa
-+#define RNID_DRIVER 0xd
-+ uint32_t physPort;
-+ uint32_t attachedNodes;
-+ uint16_t ipVersion;
-+#define RNID_IPV4 0x1
-+#define RNID_IPV6 0x2
-+ uint16_t UDPport;
-+ uint8_t ipAddr[16];
-+ uint16_t resvd1;
-+ uint16_t flags;
-+#define RNID_TD_SUPPORT 0x1
-+#define RNID_LP_VALID 0x2
-+} RNID_TOP_DISC;
-+
-+typedef struct _RNID { /* Structure is in Big Endian format */
-+ uint8_t Format;
-+#define RNID_TOPOLOGY_DISC 0xdf
-+ uint8_t CommonLen;
-+ uint8_t resvd1;
-+ uint8_t SpecificLen;
-+ struct lpfc_name portName;
-+ struct lpfc_name nodeName;
-+ union {
-+ RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
-+ } un;
-+} RNID;
-+
-+typedef struct _RRQ { /* Structure is in Big Endian format */
-+ uint32_t SID;
-+ uint16_t Oxid;
-+ uint16_t Rxid;
-+ uint8_t resv[32]; /* optional association hdr */
-+} RRQ;
-+
-+/* This is used for RSCN command */
-+typedef struct _D_ID { /* Structure is in Big Endian format */
-+ union {
-+ uint32_t word;
-+ struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t resv;
-+ uint8_t domain;
-+ uint8_t area;
-+ uint8_t id;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t id;
-+ uint8_t area;
-+ uint8_t domain;
-+ uint8_t resv;
-+#endif
-+ } b;
-+ } un;
-+} D_ID;
-+
-+/*
-+ * Structure to define all ELS Payload types
-+ */
-+
-+typedef struct _ELS_PKT { /* Structure is in Big Endian format */
-+ uint8_t elsCode; /* FC Word 0, bit 24:31 */
-+ uint8_t elsByte1;
-+ uint8_t elsByte2;
-+ uint8_t elsByte3;
-+ union {
-+ struct ls_rjt lsRjt; /* Payload for LS_RJT ELS response */
-+ struct serv_parm logi; /* Payload for PLOGI/FLOGI/PDISC/ACC */
-+ LOGO logo; /* Payload for PLOGO/FLOGO/ACC */
-+ PRLI prli; /* Payload for PRLI/ACC */
-+ PRLO prlo; /* Payload for PRLO/ACC */
-+ ADISC adisc; /* Payload for ADISC/ACC */
-+ FARP farp; /* Payload for FARP/ACC */
-+ FAN fan; /* Payload for FAN */
-+ SCR scr; /* Payload for SCR/ACC */
-+ RRQ rrq; /* Payload for RRQ */
-+ RNID rnid; /* Payload for RNID */
-+ uint8_t pad[128 - 4]; /* Pad out to payload of 128 bytes */
-+ } un;
-+} ELS_PKT;
-+
-+/*
-+ * FDMI
-+ * HBA MAnagement Operations Command Codes
-+ */
-+#define SLI_MGMT_GRHL 0x100 /* Get registered HBA list */
-+#define SLI_MGMT_GHAT 0x101 /* Get HBA attributes */
-+#define SLI_MGMT_GRPL 0x102 /* Get registered Port list */
-+#define SLI_MGMT_GPAT 0x110 /* Get Port attributes */
-+#define SLI_MGMT_RHBA 0x200 /* Register HBA */
-+#define SLI_MGMT_RHAT 0x201 /* Register HBA atttributes */
-+#define SLI_MGMT_RPRT 0x210 /* Register Port */
-+#define SLI_MGMT_RPA 0x211 /* Register Port attributes */
-+#define SLI_MGMT_DHBA 0x300 /* De-register HBA */
-+#define SLI_MGMT_DPRT 0x310 /* De-register Port */
-+
-+/*
-+ * Management Service Subtypes
-+ */
-+#define SLI_CT_FDMI_Subtypes 0x10
-+
-+/*
-+ * HBA Management Service Reject Code
-+ */
-+#define REJECT_CODE 0x9 /* Unable to perform command request */
-+
-+/*
-+ * HBA Management Service Reject Reason Code
-+ * Please refer to the Reason Codes above
-+ */
-+
-+/*
-+ * HBA Attribute Types
-+ */
-+#define NODE_NAME 0x1
-+#define MANUFACTURER 0x2
-+#define SERIAL_NUMBER 0x3
-+#define MODEL 0x4
-+#define MODEL_DESCRIPTION 0x5
-+#define HARDWARE_VERSION 0x6
-+#define DRIVER_VERSION 0x7
-+#define OPTION_ROM_VERSION 0x8
-+#define FIRMWARE_VERSION 0x9
-+#define OS_NAME_VERSION 0xa
-+#define MAX_CT_PAYLOAD_LEN 0xb
-+
-+/*
-+ * Port Attrubute Types
-+ */
-+#define SUPPORTED_FC4_TYPES 0x1
-+#define SUPPORTED_SPEED 0x2
-+#define PORT_SPEED 0x3
-+#define MAX_FRAME_SIZE 0x4
-+#define OS_DEVICE_NAME 0x5
-+#define HOST_NAME 0x6
-+
-+union AttributesDef {
-+ /* Structure is in Big Endian format */
-+ struct {
-+ uint32_t AttrType:16;
-+ uint32_t AttrLen:16;
-+ } bits;
-+ uint32_t word;
-+};
-+
-+
-+/*
-+ * HBA Attribute Entry (8 - 260 bytes)
-+ */
-+typedef struct {
-+ union AttributesDef ad;
-+ union {
-+ uint32_t VendorSpecific;
-+ uint8_t Manufacturer[64];
-+ uint8_t SerialNumber[64];
-+ uint8_t Model[256];
-+ uint8_t ModelDescription[256];
-+ uint8_t HardwareVersion[256];
-+ uint8_t DriverVersion[256];
-+ uint8_t OptionROMVersion[256];
-+ uint8_t FirmwareVersion[256];
-+ struct lpfc_name NodeName;
-+ uint8_t SupportFC4Types[32];
-+ uint32_t SupportSpeed;
-+ uint32_t PortSpeed;
-+ uint32_t MaxFrameSize;
-+ uint8_t OsDeviceName[256];
-+ uint8_t OsNameVersion[256];
-+ uint32_t MaxCTPayloadLen;
-+ uint8_t HostName[256];
-+ } un;
-+} ATTRIBUTE_ENTRY;
-+
-+/*
-+ * HBA Attribute Block
-+ */
-+typedef struct {
-+ uint32_t EntryCnt; /* Number of HBA attribute entries */
-+ ATTRIBUTE_ENTRY Entry; /* Variable-length array */
-+} ATTRIBUTE_BLOCK;
-+
-+/*
-+ * Port Entry
-+ */
-+typedef struct {
-+ struct lpfc_name PortName;
-+} PORT_ENTRY;
-+
-+/*
-+ * HBA Identifier
-+ */
-+typedef struct {
-+ struct lpfc_name PortName;
-+} HBA_IDENTIFIER;
-+
-+/*
-+ * Registered Port List Format
-+ */
-+typedef struct {
-+ uint32_t EntryCnt;
-+ PORT_ENTRY pe; /* Variable-length array */
-+} REG_PORT_LIST;
-+
-+/*
-+ * Register HBA(RHBA)
-+ */
-+typedef struct {
-+ HBA_IDENTIFIER hi;
-+ REG_PORT_LIST rpl; /* variable-length array */
-+/* ATTRIBUTE_BLOCK ab; */
-+} REG_HBA;
-+
-+/*
-+ * Register HBA Attributes (RHAT)
-+ */
-+typedef struct {
-+ struct lpfc_name HBA_PortName;
-+ ATTRIBUTE_BLOCK ab;
-+} REG_HBA_ATTRIBUTE;
-+
-+/*
-+ * Register Port Attributes (RPA)
-+ */
-+typedef struct {
-+ struct lpfc_name PortName;
-+ ATTRIBUTE_BLOCK ab;
-+} REG_PORT_ATTRIBUTE;
-+
-+/*
-+ * Get Registered HBA List (GRHL) Accept Payload Format
-+ */
-+typedef struct {
-+ uint32_t HBA__Entry_Cnt; /* Number of Registered HBA Identifiers */
-+ struct lpfc_name HBA_PortName; /* Variable-length array */
-+} GRHL_ACC_PAYLOAD;
-+
-+/*
-+ * Get Registered Port List (GRPL) Accept Payload Format
-+ */
-+typedef struct {
-+ uint32_t RPL_Entry_Cnt; /* Number of Registered Port Entries */
-+ PORT_ENTRY Reg_Port_Entry[1]; /* Variable-length array */
-+} GRPL_ACC_PAYLOAD;
-+
-+/*
-+ * Get Port Attributes (GPAT) Accept Payload Format
-+ */
-+
-+typedef struct {
-+ ATTRIBUTE_BLOCK pab;
-+} GPAT_ACC_PAYLOAD;
-+
-+
-+/*
-+ * Begin HBA configuration parameters.
-+ * The PCI configuration register BAR assignments are:
-+ * BAR0, offset 0x10 - SLIM base memory address
-+ * BAR1, offset 0x14 - SLIM base memory high address
-+ * BAR2, offset 0x18 - REGISTER base memory address
-+ * BAR3, offset 0x1c - REGISTER base memory high address
-+ * BAR4, offset 0x20 - BIU I/O registers
-+ * BAR5, offset 0x24 - REGISTER base io high address
-+ */
-+
-+/* Number of rings currently used and available. */
-+#define MAX_CONFIGURED_RINGS 3
-+#define MAX_RINGS 4
-+
-+/* IOCB / Mailbox is owned by FireFly */
-+#define OWN_CHIP 1
-+
-+/* IOCB / Mailbox is owned by Host */
-+#define OWN_HOST 0
-+
-+/* Number of 4-byte words in an IOCB. */
-+#define IOCB_WORD_SZ 8
-+
-+/* defines for type field in fc header */
-+#define FC_ELS_DATA 0x1
-+#define FC_LLC_SNAP 0x5
-+#define FC_FCP_DATA 0x8
-+#define FC_COMMON_TRANSPORT_ULP 0x20
-+
-+/* defines for rctl field in fc header */
-+#define FC_DEV_DATA 0x0
-+#define FC_UNSOL_CTL 0x2
-+#define FC_SOL_CTL 0x3
-+#define FC_UNSOL_DATA 0x4
-+#define FC_FCP_CMND 0x6
-+#define FC_ELS_REQ 0x22
-+#define FC_ELS_RSP 0x23
-+
-+/* network headers for Dfctl field */
-+#define FC_NET_HDR 0x20
-+
-+/* Start FireFly Register definitions */
-+#define PCI_VENDOR_ID_EMULEX 0x10df
-+#define PCI_DEVICE_ID_FIREFLY 0x1ae5
-+#define PCI_DEVICE_ID_SUPERFLY 0xf700
-+#define PCI_DEVICE_ID_DRAGONFLY 0xf800
-+#define PCI_DEVICE_ID_RFLY 0xf095
-+#define PCI_DEVICE_ID_PFLY 0xf098
-+#define PCI_DEVICE_ID_TFLY 0xf0a5
-+#define PCI_DEVICE_ID_CENTAUR 0xf900
-+#define PCI_DEVICE_ID_PEGASUS 0xf980
-+#define PCI_DEVICE_ID_THOR 0xfa00
-+#define PCI_DEVICE_ID_VIPER 0xfb00
-+#define PCI_DEVICE_ID_HELIOS 0xfd00
-+#define PCI_DEVICE_ID_BMID 0xf0d5
-+#define PCI_DEVICE_ID_BSMB 0xf0d1
-+#define PCI_DEVICE_ID_ZEPHYR 0xfe00
-+#define PCI_DEVICE_ID_ZMID 0xf0e5
-+#define PCI_DEVICE_ID_ZSMB 0xf0e1
-+#define PCI_DEVICE_ID_LP101 0xf0a1
-+#define PCI_DEVICE_ID_LP10000S 0xfc00
-+
-+#define JEDEC_ID_ADDRESS 0x0080001c
-+#define FIREFLY_JEDEC_ID 0x1ACC
-+#define SUPERFLY_JEDEC_ID 0x0020
-+#define DRAGONFLY_JEDEC_ID 0x0021
-+#define DRAGONFLY_V2_JEDEC_ID 0x0025
-+#define CENTAUR_2G_JEDEC_ID 0x0026
-+#define CENTAUR_1G_JEDEC_ID 0x0028
-+#define PEGASUS_ORION_JEDEC_ID 0x0036
-+#define PEGASUS_JEDEC_ID 0x0038
-+#define THOR_JEDEC_ID 0x0012
-+#define HELIOS_JEDEC_ID 0x0364
-+#define ZEPHYR_JEDEC_ID 0x0577
-+#define VIPER_JEDEC_ID 0x4838
-+
-+#define JEDEC_ID_MASK 0x0FFFF000
-+#define JEDEC_ID_SHIFT 12
-+#define FC_JEDEC_ID(id) ((id & JEDEC_ID_MASK) >> JEDEC_ID_SHIFT)
-+
-+typedef struct { /* FireFly BIU registers */
-+ uint32_t hostAtt; /* See definitions for Host Attention
-+ register */
-+ uint32_t chipAtt; /* See definitions for Chip Attention
-+ register */
-+ uint32_t hostStatus; /* See definitions for Host Status register */
-+ uint32_t hostControl; /* See definitions for Host Control register */
-+ uint32_t buiConfig; /* See definitions for BIU configuration
-+ register */
-+} FF_REGS;
-+
-+/* IO Register size in bytes */
-+#define FF_REG_AREA_SIZE 256
-+
-+/* Host Attention Register */
-+
-+#define HA_REG_OFFSET 0 /* Word offset from register base address */
-+
-+#define HA_R0RE_REQ 0x00000001 /* Bit 0 */
-+#define HA_R0CE_RSP 0x00000002 /* Bit 1 */
-+#define HA_R0ATT 0x00000008 /* Bit 3 */
-+#define HA_R1RE_REQ 0x00000010 /* Bit 4 */
-+#define HA_R1CE_RSP 0x00000020 /* Bit 5 */
-+#define HA_R1ATT 0x00000080 /* Bit 7 */
-+#define HA_R2RE_REQ 0x00000100 /* Bit 8 */
-+#define HA_R2CE_RSP 0x00000200 /* Bit 9 */
-+#define HA_R2ATT 0x00000800 /* Bit 11 */
-+#define HA_R3RE_REQ 0x00001000 /* Bit 12 */
-+#define HA_R3CE_RSP 0x00002000 /* Bit 13 */
-+#define HA_R3ATT 0x00008000 /* Bit 15 */
-+#define HA_LATT 0x20000000 /* Bit 29 */
-+#define HA_MBATT 0x40000000 /* Bit 30 */
-+#define HA_ERATT 0x80000000 /* Bit 31 */
-+
-+#define HA_RXRE_REQ 0x00000001 /* Bit 0 */
-+#define HA_RXCE_RSP 0x00000002 /* Bit 1 */
-+#define HA_RXATT 0x00000008 /* Bit 3 */
-+#define HA_RXMASK 0x0000000f
-+
-+/* Chip Attention Register */
-+
-+#define CA_REG_OFFSET 1 /* Word offset from register base address */
-+
-+#define CA_R0CE_REQ 0x00000001 /* Bit 0 */
-+#define CA_R0RE_RSP 0x00000002 /* Bit 1 */
-+#define CA_R0ATT 0x00000008 /* Bit 3 */
-+#define CA_R1CE_REQ 0x00000010 /* Bit 4 */
-+#define CA_R1RE_RSP 0x00000020 /* Bit 5 */
-+#define CA_R1ATT 0x00000080 /* Bit 7 */
-+#define CA_R2CE_REQ 0x00000100 /* Bit 8 */
-+#define CA_R2RE_RSP 0x00000200 /* Bit 9 */
-+#define CA_R2ATT 0x00000800 /* Bit 11 */
-+#define CA_R3CE_REQ 0x00001000 /* Bit 12 */
-+#define CA_R3RE_RSP 0x00002000 /* Bit 13 */
-+#define CA_R3ATT 0x00008000 /* Bit 15 */
-+#define CA_MBATT 0x40000000 /* Bit 30 */
-+
-+/* Host Status Register */
-+
-+#define HS_REG_OFFSET 2 /* Word offset from register base address */
-+
-+#define HS_MBRDY 0x00400000 /* Bit 22 */
-+#define HS_FFRDY 0x00800000 /* Bit 23 */
-+#define HS_FFER8 0x01000000 /* Bit 24 */
-+#define HS_FFER7 0x02000000 /* Bit 25 */
-+#define HS_FFER6 0x04000000 /* Bit 26 */
-+#define HS_FFER5 0x08000000 /* Bit 27 */
-+#define HS_FFER4 0x10000000 /* Bit 28 */
-+#define HS_FFER3 0x20000000 /* Bit 29 */
-+#define HS_FFER2 0x40000000 /* Bit 30 */
-+#define HS_FFER1 0x80000000 /* Bit 31 */
-+#define HS_FFERM 0xFF000000 /* Mask for error bits 31:24 */
-+
-+/* Host Control Register */
-+
-+#define HC_REG_OFFSET 3 /* Word offset from register base address */
-+
-+#define HC_MBINT_ENA 0x00000001 /* Bit 0 */
-+#define HC_R0INT_ENA 0x00000002 /* Bit 1 */
-+#define HC_R1INT_ENA 0x00000004 /* Bit 2 */
-+#define HC_R2INT_ENA 0x00000008 /* Bit 3 */
-+#define HC_R3INT_ENA 0x00000010 /* Bit 4 */
-+#define HC_INITHBI 0x02000000 /* Bit 25 */
-+#define HC_INITMB 0x04000000 /* Bit 26 */
-+#define HC_INITFF 0x08000000 /* Bit 27 */
-+#define HC_LAINT_ENA 0x20000000 /* Bit 29 */
-+#define HC_ERINT_ENA 0x80000000 /* Bit 31 */
-+
-+/* Mailbox Commands */
-+#define MBX_SHUTDOWN 0x00 /* terminate testing */
-+#define MBX_LOAD_SM 0x01
-+#define MBX_READ_NV 0x02
-+#define MBX_WRITE_NV 0x03
-+#define MBX_RUN_BIU_DIAG 0x04
-+#define MBX_INIT_LINK 0x05
-+#define MBX_DOWN_LINK 0x06
-+#define MBX_CONFIG_LINK 0x07
-+#define MBX_CONFIG_RING 0x09
-+#define MBX_RESET_RING 0x0A
-+#define MBX_READ_CONFIG 0x0B
-+#define MBX_READ_RCONFIG 0x0C
-+#define MBX_READ_SPARM 0x0D
-+#define MBX_READ_STATUS 0x0E
-+#define MBX_READ_RPI 0x0F
-+#define MBX_READ_XRI 0x10
-+#define MBX_READ_REV 0x11
-+#define MBX_READ_LNK_STAT 0x12
-+#define MBX_REG_LOGIN 0x13
-+#define MBX_UNREG_LOGIN 0x14
-+#define MBX_READ_LA 0x15
-+#define MBX_CLEAR_LA 0x16
-+#define MBX_DUMP_MEMORY 0x17
-+#define MBX_DUMP_CONTEXT 0x18
-+#define MBX_RUN_DIAGS 0x19
-+#define MBX_RESTART 0x1A
-+#define MBX_UPDATE_CFG 0x1B
-+#define MBX_DOWN_LOAD 0x1C
-+#define MBX_DEL_LD_ENTRY 0x1D
-+#define MBX_RUN_PROGRAM 0x1E
-+#define MBX_SET_MASK 0x20
-+#define MBX_SET_SLIM 0x21
-+#define MBX_UNREG_D_ID 0x23
-+#define MBX_CONFIG_FARP 0x25
-+
-+#define MBX_LOAD_AREA 0x81
-+#define MBX_RUN_BIU_DIAG64 0x84
-+#define MBX_CONFIG_PORT 0x88
-+#define MBX_READ_SPARM64 0x8D
-+#define MBX_READ_RPI64 0x8F
-+#define MBX_REG_LOGIN64 0x93
-+#define MBX_READ_LA64 0x95
-+
-+#define MBX_FLASH_WR_ULA 0x98
-+#define MBX_SET_DEBUG 0x99
-+#define MBX_LOAD_EXP_ROM 0x9C
-+
-+#define MBX_MAX_CMDS 0x9D
-+#define MBX_SLI2_CMD_MASK 0x80
-+
-+/* IOCB Commands */
-+
-+#define CMD_RCV_SEQUENCE_CX 0x01
-+#define CMD_XMIT_SEQUENCE_CR 0x02
-+#define CMD_XMIT_SEQUENCE_CX 0x03
-+#define CMD_XMIT_BCAST_CN 0x04
-+#define CMD_XMIT_BCAST_CX 0x05
-+#define CMD_QUE_RING_BUF_CN 0x06
-+#define CMD_QUE_XRI_BUF_CX 0x07
-+#define CMD_IOCB_CONTINUE_CN 0x08
-+#define CMD_RET_XRI_BUF_CX 0x09
-+#define CMD_ELS_REQUEST_CR 0x0A
-+#define CMD_ELS_REQUEST_CX 0x0B
-+#define CMD_RCV_ELS_REQ_CX 0x0D
-+#define CMD_ABORT_XRI_CN 0x0E
-+#define CMD_ABORT_XRI_CX 0x0F
-+#define CMD_CLOSE_XRI_CN 0x10
-+#define CMD_CLOSE_XRI_CX 0x11
-+#define CMD_CREATE_XRI_CR 0x12
-+#define CMD_CREATE_XRI_CX 0x13
-+#define CMD_GET_RPI_CN 0x14
-+#define CMD_XMIT_ELS_RSP_CX 0x15
-+#define CMD_GET_RPI_CR 0x16
-+#define CMD_XRI_ABORTED_CX 0x17
-+#define CMD_FCP_IWRITE_CR 0x18
-+#define CMD_FCP_IWRITE_CX 0x19
-+#define CMD_FCP_IREAD_CR 0x1A
-+#define CMD_FCP_IREAD_CX 0x1B
-+#define CMD_FCP_ICMND_CR 0x1C
-+#define CMD_FCP_ICMND_CX 0x1D
-+
-+#define CMD_ADAPTER_MSG 0x20
-+#define CMD_ADAPTER_DUMP 0x22
-+
-+/* SLI_2 IOCB Command Set */
-+
-+#define CMD_RCV_SEQUENCE64_CX 0x81
-+#define CMD_XMIT_SEQUENCE64_CR 0x82
-+#define CMD_XMIT_SEQUENCE64_CX 0x83
-+#define CMD_XMIT_BCAST64_CN 0x84
-+#define CMD_XMIT_BCAST64_CX 0x85
-+#define CMD_QUE_RING_BUF64_CN 0x86
-+#define CMD_QUE_XRI_BUF64_CX 0x87
-+#define CMD_IOCB_CONTINUE64_CN 0x88
-+#define CMD_RET_XRI_BUF64_CX 0x89
-+#define CMD_ELS_REQUEST64_CR 0x8A
-+#define CMD_ELS_REQUEST64_CX 0x8B
-+#define CMD_ABORT_MXRI64_CN 0x8C
-+#define CMD_RCV_ELS_REQ64_CX 0x8D
-+#define CMD_XMIT_ELS_RSP64_CX 0x95
-+#define CMD_FCP_IWRITE64_CR 0x98
-+#define CMD_FCP_IWRITE64_CX 0x99
-+#define CMD_FCP_IREAD64_CR 0x9A
-+#define CMD_FCP_IREAD64_CX 0x9B
-+#define CMD_FCP_ICMND64_CR 0x9C
-+#define CMD_FCP_ICMND64_CX 0x9D
-+
-+#define CMD_GEN_REQUEST64_CR 0xC2
-+#define CMD_GEN_REQUEST64_CX 0xC3
-+
-+#define CMD_MAX_IOCB_CMD 0xE6
-+#define CMD_IOCB_MASK 0xff
-+
-+#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
-+ iocb */
-+#define LPFC_MAX_ADPTMSG 32 /* max msg data */
-+/*
-+ * Define Status
-+ */
-+#define MBX_SUCCESS 0
-+#define MBXERR_NUM_RINGS 1
-+#define MBXERR_NUM_IOCBS 2
-+#define MBXERR_IOCBS_EXCEEDED 3
-+#define MBXERR_BAD_RING_NUMBER 4
-+#define MBXERR_MASK_ENTRIES_RANGE 5
-+#define MBXERR_MASKS_EXCEEDED 6
-+#define MBXERR_BAD_PROFILE 7
-+#define MBXERR_BAD_DEF_CLASS 8
-+#define MBXERR_BAD_MAX_RESPONDER 9
-+#define MBXERR_BAD_MAX_ORIGINATOR 10
-+#define MBXERR_RPI_REGISTERED 11
-+#define MBXERR_RPI_FULL 12
-+#define MBXERR_NO_RESOURCES 13
-+#define MBXERR_BAD_RCV_LENGTH 14
-+#define MBXERR_DMA_ERROR 15
-+#define MBXERR_ERROR 16
-+#define MBX_NOT_FINISHED 255
-+
-+#define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
-+#define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */
-+
-+/*
-+ * Begin Structure Definitions for Mailbox Commands
-+ */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t tval;
-+ uint8_t tmask;
-+ uint8_t rval;
-+ uint8_t rmask;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t rmask;
-+ uint8_t rval;
-+ uint8_t tmask;
-+ uint8_t tval;
-+#endif
-+} RR_REG;
-+
-+struct ulp_bde {
-+ uint32_t bdeAddress;
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t bdeReserved:4;
-+ uint32_t bdeAddrHigh:4;
-+ uint32_t bdeSize:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t bdeSize:24;
-+ uint32_t bdeAddrHigh:4;
-+ uint32_t bdeReserved:4;
-+#endif
-+};
-+
-+struct ulp_bde64 { /* SLI-2 */
-+ union ULP_BDE_TUS {
-+ uint32_t w;
-+ struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
-+ VALUE !! */
-+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t bdeSize:24; /* Size of buffer (in bytes) */
-+ uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED
-+ VALUE !! */
-+#endif
-+
-+#define BUFF_USE_RSVD 0x01 /* bdeFlags */
-+#define BUFF_USE_INTRPT 0x02 /* Not Implemented with LP6000 */
-+#define BUFF_USE_CMND 0x04 /* Optional, 1=cmd/rsp 0=data buffer */
-+#define BUFF_USE_RCV 0x08 /* "" "", 1=rcv buffer, 0=xmit
-+ buffer */
-+#define BUFF_TYPE_32BIT 0x10 /* "" "", 1=32 bit addr 0=64 bit
-+ addr */
-+#define BUFF_TYPE_SPECIAL 0x20 /* Not Implemented with LP6000 */
-+#define BUFF_TYPE_BDL 0x40 /* Optional, may be set in BDL */
-+#define BUFF_TYPE_INVALID 0x80 /* "" "" */
-+ } f;
-+ } tus;
-+ uint32_t addrLow;
-+ uint32_t addrHigh;
-+};
-+#define BDE64_SIZE_WORD 0
-+#define BPL64_SIZE_WORD 0x40
-+
-+typedef struct ULP_BDL { /* SLI-2 */
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t bdeFlags:8; /* BDL Flags */
-+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t bdeSize:24; /* Size of BDL array in host memory (bytes) */
-+ uint32_t bdeFlags:8; /* BDL Flags */
-+#endif
-+
-+ uint32_t addrLow; /* Address 0:31 */
-+ uint32_t addrHigh; /* Address 32:63 */
-+ uint32_t ulpIoTag32; /* Can be used for 32 bit I/O Tag */
-+} ULP_BDL;
-+
-+/* Structure for MB Command LOAD_SM and DOWN_LOAD */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd2:25;
-+ uint32_t acknowledgment:1;
-+ uint32_t version:1;
-+ uint32_t erase_or_prog:1;
-+ uint32_t update_flash:1;
-+ uint32_t update_ram:1;
-+ uint32_t method:1;
-+ uint32_t load_cmplt:1;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t load_cmplt:1;
-+ uint32_t method:1;
-+ uint32_t update_ram:1;
-+ uint32_t update_flash:1;
-+ uint32_t erase_or_prog:1;
-+ uint32_t version:1;
-+ uint32_t acknowledgment:1;
-+ uint32_t rsvd2:25;
-+#endif
-+
-+ uint32_t dl_to_adr_low;
-+ uint32_t dl_to_adr_high;
-+ uint32_t dl_len;
-+ union {
-+ uint32_t dl_from_mbx_offset;
-+ struct ulp_bde dl_from_bde;
-+ struct ulp_bde64 dl_from_bde64;
-+ } un;
-+
-+} LOAD_SM_VAR;
-+
-+/* Structure for MB Command READ_NVPARM (02) */
-+
-+typedef struct {
-+ uint32_t rsvd1[3]; /* Read as all one's */
-+ uint32_t rsvd2; /* Read as all zero's */
-+ uint32_t portname[2]; /* N_PORT name */
-+ uint32_t nodename[2]; /* NODE name */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t pref_DID:24;
-+ uint32_t hardAL_PA:8;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t hardAL_PA:8;
-+ uint32_t pref_DID:24;
-+#endif
-+
-+ uint32_t rsvd3[21]; /* Read as all one's */
-+} READ_NV_VAR;
-+
-+/* Structure for MB Command WRITE_NVPARMS (03) */
-+
-+typedef struct {
-+ uint32_t rsvd1[3]; /* Must be all one's */
-+ uint32_t rsvd2; /* Must be all zero's */
-+ uint32_t portname[2]; /* N_PORT name */
-+ uint32_t nodename[2]; /* NODE name */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t pref_DID:24;
-+ uint32_t hardAL_PA:8;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t hardAL_PA:8;
-+ uint32_t pref_DID:24;
-+#endif
-+
-+ uint32_t rsvd3[21]; /* Must be all one's */
-+} WRITE_NV_VAR;
-+
-+/* Structure for MB Command RUN_BIU_DIAG (04) */
-+/* Structure for MB Command RUN_BIU_DIAG64 (0x84) */
-+
-+typedef struct {
-+ uint32_t rsvd1;
-+ union {
-+ struct {
-+ struct ulp_bde xmit_bde;
-+ struct ulp_bde rcv_bde;
-+ } s1;
-+ struct {
-+ struct ulp_bde64 xmit_bde64;
-+ struct ulp_bde64 rcv_bde64;
-+ } s2;
-+ } un;
-+} BIU_DIAG_VAR;
-+
-+/* Structure for MB Command INIT_LINK (05) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd1:24;
-+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t lipsr_AL_PA:8; /* AL_PA to issue Lip Selective Reset to */
-+ uint32_t rsvd1:24;
-+#endif
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
-+ uint8_t rsvd2;
-+ uint16_t link_flags;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t link_flags;
-+ uint8_t rsvd2;
-+ uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
-+#endif
-+
-+#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
-+#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
-+#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
-+#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
-+#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
-+#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
-+
-+#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
-+#define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */
-+
-+ uint32_t link_speed;
-+#define LINK_SPEED_AUTO 0 /* Auto selection */
-+#define LINK_SPEED_1G 1 /* 1 Gigabaud */
-+#define LINK_SPEED_2G 2 /* 2 Gigabaud */
-+#define LINK_SPEED_4G 4 /* 4 Gigabaud */
-+#define LINK_SPEED_8G 8 /* 4 Gigabaud */
-+#define LINK_SPEED_10G 16 /* 10 Gigabaud */
-+
-+} INIT_LINK_VAR;
-+
-+/* Structure for MB Command DOWN_LINK (06) */
-+
-+typedef struct {
-+ uint32_t rsvd1;
-+} DOWN_LINK_VAR;
-+
-+/* Structure for MB Command CONFIG_LINK (07) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t cr:1;
-+ uint32_t ci:1;
-+ uint32_t cr_delay:6;
-+ uint32_t cr_count:8;
-+ uint32_t rsvd1:8;
-+ uint32_t MaxBBC:8;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t MaxBBC:8;
-+ uint32_t rsvd1:8;
-+ uint32_t cr_count:8;
-+ uint32_t cr_delay:6;
-+ uint32_t ci:1;
-+ uint32_t cr:1;
-+#endif
-+
-+ uint32_t myId;
-+ uint32_t rsvd2;
-+ uint32_t edtov;
-+ uint32_t arbtov;
-+ uint32_t ratov;
-+ uint32_t rttov;
-+ uint32_t altov;
-+ uint32_t crtov;
-+ uint32_t citov;
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rrq_enable:1;
-+ uint32_t rrq_immed:1;
-+ uint32_t rsvd4:29;
-+ uint32_t ack0_enable:1;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t ack0_enable:1;
-+ uint32_t rsvd4:29;
-+ uint32_t rrq_immed:1;
-+ uint32_t rrq_enable:1;
-+#endif
-+} CONFIG_LINK;
-+
-+/* Structure for MB Command PART_SLIM (08)
-+ * will be removed since SLI1 is no longer supported!
-+ */
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t offCiocb;
-+ uint16_t numCiocb;
-+ uint16_t offRiocb;
-+ uint16_t numRiocb;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t numCiocb;
-+ uint16_t offCiocb;
-+ uint16_t numRiocb;
-+ uint16_t offRiocb;
-+#endif
-+} RING_DEF;
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t unused1:24;
-+ uint32_t numRing:8;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t numRing:8;
-+ uint32_t unused1:24;
-+#endif
-+
-+ RING_DEF ringdef[4];
-+ uint32_t hbainit;
-+} PART_SLIM_VAR;
-+
-+/* Structure for MB Command CONFIG_RING (09) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t unused2:6;
-+ uint32_t recvSeq:1;
-+ uint32_t recvNotify:1;
-+ uint32_t numMask:8;
-+ uint32_t profile:8;
-+ uint32_t unused1:4;
-+ uint32_t ring:4;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t ring:4;
-+ uint32_t unused1:4;
-+ uint32_t profile:8;
-+ uint32_t numMask:8;
-+ uint32_t recvNotify:1;
-+ uint32_t recvSeq:1;
-+ uint32_t unused2:6;
-+#endif
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t maxRespXchg;
-+ uint16_t maxOrigXchg;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t maxOrigXchg;
-+ uint16_t maxRespXchg;
-+#endif
-+
-+ RR_REG rrRegs[6];
-+} CONFIG_RING_VAR;
-+
-+/* Structure for MB Command RESET_RING (10) */
-+
-+typedef struct {
-+ uint32_t ring_no;
-+} RESET_RING_VAR;
-+
-+/* Structure for MB Command READ_CONFIG (11) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t cr:1;
-+ uint32_t ci:1;
-+ uint32_t cr_delay:6;
-+ uint32_t cr_count:8;
-+ uint32_t InitBBC:8;
-+ uint32_t MaxBBC:8;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t MaxBBC:8;
-+ uint32_t InitBBC:8;
-+ uint32_t cr_count:8;
-+ uint32_t cr_delay:6;
-+ uint32_t ci:1;
-+ uint32_t cr:1;
-+#endif
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t topology:8;
-+ uint32_t myDid:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t myDid:24;
-+ uint32_t topology:8;
-+#endif
-+
-+ /* Defines for topology (defined previously) */
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t AR:1;
-+ uint32_t IR:1;
-+ uint32_t rsvd1:29;
-+ uint32_t ack0:1;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t ack0:1;
-+ uint32_t rsvd1:29;
-+ uint32_t IR:1;
-+ uint32_t AR:1;
-+#endif
-+
-+ uint32_t edtov;
-+ uint32_t arbtov;
-+ uint32_t ratov;
-+ uint32_t rttov;
-+ uint32_t altov;
-+ uint32_t lmt;
-+#define LMT_RESERVED 0x0 /* Not used */
-+#define LMT_266_10bit 0x1 /* 265.625 Mbaud 10 bit iface */
-+#define LMT_532_10bit 0x2 /* 531.25 Mbaud 10 bit iface */
-+#define LMT_1063_20bit 0x3 /* 1062.5 Mbaud 20 bit iface */
-+#define LMT_1063_10bit 0x4 /* 1062.5 Mbaud 10 bit iface */
-+#define LMT_2125_10bit 0x8 /* 2125 Mbaud 10 bit iface */
-+#define LMT_4250_10bit 0x40 /* 4250 Mbaud 10 bit iface */
-+
-+ uint32_t rsvd2;
-+ uint32_t rsvd3;
-+ uint32_t max_xri;
-+ uint32_t max_iocb;
-+ uint32_t max_rpi;
-+ uint32_t avail_xri;
-+ uint32_t avail_iocb;
-+ uint32_t avail_rpi;
-+ uint32_t default_rpi;
-+} READ_CONFIG_VAR;
-+
-+/* Structure for MB Command READ_RCONFIG (12) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd2:7;
-+ uint32_t recvNotify:1;
-+ uint32_t numMask:8;
-+ uint32_t profile:8;
-+ uint32_t rsvd1:4;
-+ uint32_t ring:4;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t ring:4;
-+ uint32_t rsvd1:4;
-+ uint32_t profile:8;
-+ uint32_t numMask:8;
-+ uint32_t recvNotify:1;
-+ uint32_t rsvd2:7;
-+#endif
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t maxResp;
-+ uint16_t maxOrig;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t maxOrig;
-+ uint16_t maxResp;
-+#endif
-+
-+ RR_REG rrRegs[6];
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t cmdRingOffset;
-+ uint16_t cmdEntryCnt;
-+ uint16_t rspRingOffset;
-+ uint16_t rspEntryCnt;
-+ uint16_t nextCmdOffset;
-+ uint16_t rsvd3;
-+ uint16_t nextRspOffset;
-+ uint16_t rsvd4;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t cmdEntryCnt;
-+ uint16_t cmdRingOffset;
-+ uint16_t rspEntryCnt;
-+ uint16_t rspRingOffset;
-+ uint16_t rsvd3;
-+ uint16_t nextCmdOffset;
-+ uint16_t rsvd4;
-+ uint16_t nextRspOffset;
-+#endif
-+} READ_RCONF_VAR;
-+
-+/* Structure for MB Command READ_SPARM (13) */
-+/* Structure for MB Command READ_SPARM64 (0x8D) */
-+
-+typedef struct {
-+ uint32_t rsvd1;
-+ uint32_t rsvd2;
-+ union {
-+ struct ulp_bde sp; /* This BDE points to struct serv_parm
-+ structure */
-+ struct ulp_bde64 sp64;
-+ } un;
-+} READ_SPARM_VAR;
-+
-+/* Structure for MB Command READ_STATUS (14) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd1:31;
-+ uint32_t clrCounters:1;
-+ uint16_t activeXriCnt;
-+ uint16_t activeRpiCnt;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t clrCounters:1;
-+ uint32_t rsvd1:31;
-+ uint16_t activeRpiCnt;
-+ uint16_t activeXriCnt;
-+#endif
-+
-+ uint32_t xmitByteCnt;
-+ uint32_t rcvByteCnt;
-+ uint32_t xmitFrameCnt;
-+ uint32_t rcvFrameCnt;
-+ uint32_t xmitSeqCnt;
-+ uint32_t rcvSeqCnt;
-+ uint32_t totalOrigExchanges;
-+ uint32_t totalRespExchanges;
-+ uint32_t rcvPbsyCnt;
-+ uint32_t rcvFbsyCnt;
-+} READ_STATUS_VAR;
-+
-+/* Structure for MB Command READ_RPI (15) */
-+/* Structure for MB Command READ_RPI64 (0x8F) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t nextRpi;
-+ uint16_t reqRpi;
-+ uint32_t rsvd2:8;
-+ uint32_t DID:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t reqRpi;
-+ uint16_t nextRpi;
-+ uint32_t DID:24;
-+ uint32_t rsvd2:8;
-+#endif
-+
-+ union {
-+ struct ulp_bde sp;
-+ struct ulp_bde64 sp64;
-+ } un;
-+
-+} READ_RPI_VAR;
-+
-+/* Structure for MB Command READ_XRI (16) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t nextXri;
-+ uint16_t reqXri;
-+ uint16_t rsvd1;
-+ uint16_t rpi;
-+ uint32_t rsvd2:8;
-+ uint32_t DID:24;
-+ uint32_t rsvd3:8;
-+ uint32_t SID:24;
-+ uint32_t rsvd4;
-+ uint8_t seqId;
-+ uint8_t rsvd5;
-+ uint16_t seqCount;
-+ uint16_t oxId;
-+ uint16_t rxId;
-+ uint32_t rsvd6:30;
-+ uint32_t si:1;
-+ uint32_t exchOrig:1;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t reqXri;
-+ uint16_t nextXri;
-+ uint16_t rpi;
-+ uint16_t rsvd1;
-+ uint32_t DID:24;
-+ uint32_t rsvd2:8;
-+ uint32_t SID:24;
-+ uint32_t rsvd3:8;
-+ uint32_t rsvd4;
-+ uint16_t seqCount;
-+ uint8_t rsvd5;
-+ uint8_t seqId;
-+ uint16_t rxId;
-+ uint16_t oxId;
-+ uint32_t exchOrig:1;
-+ uint32_t si:1;
-+ uint32_t rsvd6:30;
-+#endif
-+} READ_XRI_VAR;
-+
-+/* Structure for MB Command READ_REV (17) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t cv:1;
-+ uint32_t rr:1;
-+ uint32_t rsvd1:29;
-+ uint32_t rv:1;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t rv:1;
-+ uint32_t rsvd1:29;
-+ uint32_t rr:1;
-+ uint32_t cv:1;
-+#endif
-+
-+ uint32_t biuRev;
-+ uint32_t smRev;
-+ union {
-+ uint32_t smFwRev;
-+ struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t ProgType;
-+ uint8_t ProgId;
-+ uint16_t ProgVer:4;
-+ uint16_t ProgRev:4;
-+ uint16_t ProgFixLvl:2;
-+ uint16_t ProgDistType:2;
-+ uint16_t DistCnt:4;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t DistCnt:4;
-+ uint16_t ProgDistType:2;
-+ uint16_t ProgFixLvl:2;
-+ uint16_t ProgRev:4;
-+ uint16_t ProgVer:4;
-+ uint8_t ProgId;
-+ uint8_t ProgType;
-+#endif
-+
-+ } b;
-+ } un;
-+ uint32_t endecRev;
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t feaLevelHigh;
-+ uint8_t feaLevelLow;
-+ uint8_t fcphHigh;
-+ uint8_t fcphLow;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t fcphLow;
-+ uint8_t fcphHigh;
-+ uint8_t feaLevelLow;
-+ uint8_t feaLevelHigh;
-+#endif
-+
-+ uint32_t postKernRev;
-+ uint32_t opFwRev;
-+ uint8_t opFwName[16];
-+ uint32_t sli1FwRev;
-+ uint8_t sli1FwName[16];
-+ uint32_t sli2FwRev;
-+ uint8_t sli2FwName[16];
-+ uint32_t rsvd2;
-+ uint32_t RandomData[7];
-+} READ_REV_VAR;
-+
-+/* Structure for MB Command READ_LINK_STAT (18) */
-+
-+typedef struct {
-+ uint32_t rsvd1;
-+ uint32_t linkFailureCnt;
-+ uint32_t lossSyncCnt;
-+
-+ uint32_t lossSignalCnt;
-+ uint32_t primSeqErrCnt;
-+ uint32_t invalidXmitWord;
-+ uint32_t crcCnt;
-+ uint32_t primSeqTimeout;
-+ uint32_t elasticOverrun;
-+ uint32_t arbTimeout;
-+} READ_LNK_VAR;
-+
-+/* Structure for MB Command REG_LOGIN (19) */
-+/* Structure for MB Command REG_LOGIN64 (0x93) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t rsvd1;
-+ uint16_t rpi;
-+ uint32_t rsvd2:8;
-+ uint32_t did:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t rpi;
-+ uint16_t rsvd1;
-+ uint32_t did:24;
-+ uint32_t rsvd2:8;
-+#endif
-+
-+ union {
-+ struct ulp_bde sp;
-+ struct ulp_bde64 sp64;
-+ } un;
-+
-+} REG_LOGIN_VAR;
-+
-+/* Word 30 contents for REG_LOGIN */
-+typedef union {
-+ struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t rsvd1:12;
-+ uint16_t wd30_class:4;
-+ uint16_t xri;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t xri;
-+ uint16_t wd30_class:4;
-+ uint16_t rsvd1:12;
-+#endif
-+ } f;
-+ uint32_t word;
-+} REG_WD30;
-+
-+/* Structure for MB Command UNREG_LOGIN (20) */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t rsvd1;
-+ uint16_t rpi;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t rpi;
-+ uint16_t rsvd1;
-+#endif
-+} UNREG_LOGIN_VAR;
-+
-+/* Structure for MB Command UNREG_D_ID (0x23) */
-+
-+typedef struct {
-+ uint32_t did;
-+} UNREG_D_ID_VAR;
-+
-+/* Structure for MB Command READ_LA (21) */
-+/* Structure for MB Command READ_LA64 (0x95) */
-+
-+typedef struct {
-+ uint32_t eventTag; /* Event tag */
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd1:22;
-+ uint32_t pb:1;
-+ uint32_t il:1;
-+ uint32_t attType:8;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t attType:8;
-+ uint32_t il:1;
-+ uint32_t pb:1;
-+ uint32_t rsvd1:22;
-+#endif
-+
-+#define AT_RESERVED 0x00 /* Reserved - attType */
-+#define AT_LINK_UP 0x01 /* Link is up */
-+#define AT_LINK_DOWN 0x02 /* Link is down */
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t granted_AL_PA;
-+ uint8_t lipAlPs;
-+ uint8_t lipType;
-+ uint8_t topology;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t topology;
-+ uint8_t lipType;
-+ uint8_t lipAlPs;
-+ uint8_t granted_AL_PA;
-+#endif
-+
-+#define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
-+#define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
-+
-+ union {
-+ struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
-+ to */
-+ /* store the LILP AL_PA position map into */
-+ struct ulp_bde64 lilpBde64;
-+ } un;
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t Dlu:1;
-+ uint32_t Dtf:1;
-+ uint32_t Drsvd2:14;
-+ uint32_t DlnkSpeed:8;
-+ uint32_t DnlPort:4;
-+ uint32_t Dtx:2;
-+ uint32_t Drx:2;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t Drx:2;
-+ uint32_t Dtx:2;
-+ uint32_t DnlPort:4;
-+ uint32_t DlnkSpeed:8;
-+ uint32_t Drsvd2:14;
-+ uint32_t Dtf:1;
-+ uint32_t Dlu:1;
-+#endif
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t Ulu:1;
-+ uint32_t Utf:1;
-+ uint32_t Ursvd2:14;
-+ uint32_t UlnkSpeed:8;
-+ uint32_t UnlPort:4;
-+ uint32_t Utx:2;
-+ uint32_t Urx:2;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t Urx:2;
-+ uint32_t Utx:2;
-+ uint32_t UnlPort:4;
-+ uint32_t UlnkSpeed:8;
-+ uint32_t Ursvd2:14;
-+ uint32_t Utf:1;
-+ uint32_t Ulu:1;
-+#endif
-+
-+#define LA_UNKNW_LINK 0x0 /* lnkSpeed */
-+#define LA_1GHZ_LINK 0x04 /* lnkSpeed */
-+#define LA_2GHZ_LINK 0x08 /* lnkSpeed */
-+#define LA_4GHZ_LINK 0x10 /* lnkSpeed */
-+#define LA_8GHZ_LINK 0x20 /* lnkSpeed */
-+#define LA_10GHZ_LINK 0x40 /* lnkSpeed */
-+
-+} READ_LA_VAR;
-+
-+/* Structure for MB Command CLEAR_LA (22) */
-+
-+typedef struct {
-+ uint32_t eventTag; /* Event tag */
-+ uint32_t rsvd1;
-+} CLEAR_LA_VAR;
-+
-+/* Structure for MB Command DUMP */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd:25;
-+ uint32_t ra:1;
-+ uint32_t co:1;
-+ uint32_t cv:1;
-+ uint32_t type:4;
-+ uint32_t entry_index:16;
-+ uint32_t region_id:16;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t type:4;
-+ uint32_t cv:1;
-+ uint32_t co:1;
-+ uint32_t ra:1;
-+ uint32_t rsvd:25;
-+ uint32_t region_id:16;
-+ uint32_t entry_index:16;
-+#endif
-+
-+ uint32_t rsvd1;
-+ uint32_t word_cnt;
-+ uint32_t resp_offset;
-+} DUMP_VAR;
-+
-+#define DMP_MEM_REG 0x1
-+#define DMP_NV_PARAMS 0x2
-+
-+#define DMP_REGION_VPD 0xe
-+#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */
-+#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
-+#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
-+
-+/* Structure for MB Command CONFIG_PORT (0x88) */
-+
-+typedef struct {
-+ uint32_t pcbLen;
-+ uint32_t pcbLow; /* bit 31:0 of memory based port config block */
-+ uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
-+ uint32_t hbainit[5];
-+} CONFIG_PORT_VAR;
-+
-+/* SLI-2 Port Control Block */
-+
-+/* SLIM POINTER */
-+#define SLIMOFF 0x30 /* WORD */
-+
-+typedef struct _SLI2_RDSC {
-+ uint32_t cmdEntries;
-+ uint32_t cmdAddrLow;
-+ uint32_t cmdAddrHigh;
-+
-+ uint32_t rspEntries;
-+ uint32_t rspAddrLow;
-+ uint32_t rspAddrHigh;
-+} SLI2_RDSC;
-+
-+typedef struct _PCB {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t type:8;
-+#define TYPE_NATIVE_SLI2 0x01;
-+ uint32_t feature:8;
-+#define FEATURE_INITIAL_SLI2 0x01;
-+ uint32_t rsvd:12;
-+ uint32_t maxRing:4;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t maxRing:4;
-+ uint32_t rsvd:12;
-+ uint32_t feature:8;
-+#define FEATURE_INITIAL_SLI2 0x01;
-+ uint32_t type:8;
-+#define TYPE_NATIVE_SLI2 0x01;
-+#endif
-+
-+ uint32_t mailBoxSize;
-+ uint32_t mbAddrLow;
-+ uint32_t mbAddrHigh;
-+
-+ uint32_t hgpAddrLow;
-+ uint32_t hgpAddrHigh;
-+
-+ uint32_t pgpAddrLow;
-+ uint32_t pgpAddrHigh;
-+ SLI2_RDSC rdsc[MAX_RINGS];
-+} PCB_t;
-+
-+/* NEW_FEATURE */
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t rsvd0:27;
-+ uint32_t discardFarp:1;
-+ uint32_t IPEnable:1;
-+ uint32_t nodeName:1;
-+ uint32_t portName:1;
-+ uint32_t filterEnable:1;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t filterEnable:1;
-+ uint32_t portName:1;
-+ uint32_t nodeName:1;
-+ uint32_t IPEnable:1;
-+ uint32_t discardFarp:1;
-+ uint32_t rsvd:27;
-+#endif
-+
-+ uint8_t portname[8]; /* Used to be struct lpfc_name */
-+ uint8_t nodename[8];
-+ uint32_t rsvd1;
-+ uint32_t rsvd2;
-+ uint32_t rsvd3;
-+ uint32_t IPAddress;
-+} CONFIG_FARP_VAR;
-+
-+/* Union of all Mailbox Command types */
-+#define MAILBOX_CMD_WSIZE 32
-+
-+typedef union {
-+ uint32_t varWords[MAILBOX_CMD_WSIZE - 1];
-+ LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
-+ READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
-+ WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
-+ BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
-+ INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
-+ DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
-+ CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
-+ PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
-+ CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
-+ RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
-+ READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
-+ READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
-+ READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
-+ READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
-+ READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
-+ READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
-+ READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
-+ READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
-+ REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
-+ UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
-+ READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
-+ CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
-+ DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
-+ UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
-+ CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */
-+ CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
-+} MAILVARIANTS;
-+
-+/*
-+ * SLI-2 specific structures
-+ */
-+
-+typedef struct {
-+ uint32_t cmdPutInx;
-+ uint32_t rspGetInx;
-+} HGP;
-+
-+typedef struct {
-+ uint32_t cmdGetInx;
-+ uint32_t rspPutInx;
-+} PGP;
-+
-+typedef struct _SLI2_DESC {
-+ HGP host[MAX_RINGS];
-+ uint32_t unused1[16];
-+ PGP port[MAX_RINGS];
-+} SLI2_DESC;
-+
-+typedef union {
-+ SLI2_DESC s2;
-+} SLI_VAR;
-+
-+typedef volatile struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t mbxStatus;
-+ uint8_t mbxCommand;
-+ uint8_t mbxReserved:6;
-+ uint8_t mbxHc:1;
-+ uint8_t mbxOwner:1; /* Low order bit first word */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t mbxOwner:1; /* Low order bit first word */
-+ uint8_t mbxHc:1;
-+ uint8_t mbxReserved:6;
-+ uint8_t mbxCommand;
-+ uint16_t mbxStatus;
-+#endif
-+
-+ MAILVARIANTS un;
-+ SLI_VAR us;
-+} MAILBOX_t;
-+
-+/*
-+ * Begin Structure Definitions for IOCB Commands
-+ */
-+
-+typedef struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t statAction;
-+ uint8_t statRsn;
-+ uint8_t statBaExp;
-+ uint8_t statLocalError;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t statLocalError;
-+ uint8_t statBaExp;
-+ uint8_t statRsn;
-+ uint8_t statAction;
-+#endif
-+ /* statRsn P/F_RJT reason codes */
-+#define RJT_BAD_D_ID 0x01 /* Invalid D_ID field */
-+#define RJT_BAD_S_ID 0x02 /* Invalid S_ID field */
-+#define RJT_UNAVAIL_TEMP 0x03 /* N_Port unavailable temp. */
-+#define RJT_UNAVAIL_PERM 0x04 /* N_Port unavailable perm. */
-+#define RJT_UNSUP_CLASS 0x05 /* Class not supported */
-+#define RJT_DELIM_ERR 0x06 /* Delimiter usage error */
-+#define RJT_UNSUP_TYPE 0x07 /* Type not supported */
-+#define RJT_BAD_CONTROL 0x08 /* Invalid link conrtol */
-+#define RJT_BAD_RCTL 0x09 /* R_CTL invalid */
-+#define RJT_BAD_FCTL 0x0A /* F_CTL invalid */
-+#define RJT_BAD_OXID 0x0B /* OX_ID invalid */
-+#define RJT_BAD_RXID 0x0C /* RX_ID invalid */
-+#define RJT_BAD_SEQID 0x0D /* SEQ_ID invalid */
-+#define RJT_BAD_DFCTL 0x0E /* DF_CTL invalid */
-+#define RJT_BAD_SEQCNT 0x0F /* SEQ_CNT invalid */
-+#define RJT_BAD_PARM 0x10 /* Param. field invalid */
-+#define RJT_XCHG_ERR 0x11 /* Exchange error */
-+#define RJT_PROT_ERR 0x12 /* Protocol error */
-+#define RJT_BAD_LENGTH 0x13 /* Invalid Length */
-+#define RJT_UNEXPECTED_ACK 0x14 /* Unexpected ACK */
-+#define RJT_LOGIN_REQUIRED 0x16 /* Login required */
-+#define RJT_TOO_MANY_SEQ 0x17 /* Excessive sequences */
-+#define RJT_XCHG_NOT_STRT 0x18 /* Exchange not started */
-+#define RJT_UNSUP_SEC_HDR 0x19 /* Security hdr not supported */
-+#define RJT_UNAVAIL_PATH 0x1A /* Fabric Path not available */
-+#define RJT_VENDOR_UNIQUE 0xFF /* Vendor unique error */
-+
-+#define IOERR_SUCCESS 0x00 /* statLocalError */
-+#define IOERR_MISSING_CONTINUE 0x01
-+#define IOERR_SEQUENCE_TIMEOUT 0x02
-+#define IOERR_INTERNAL_ERROR 0x03
-+#define IOERR_INVALID_RPI 0x04
-+#define IOERR_NO_XRI 0x05
-+#define IOERR_ILLEGAL_COMMAND 0x06
-+#define IOERR_XCHG_DROPPED 0x07
-+#define IOERR_ILLEGAL_FIELD 0x08
-+#define IOERR_BAD_CONTINUE 0x09
-+#define IOERR_TOO_MANY_BUFFERS 0x0A
-+#define IOERR_RCV_BUFFER_WAITING 0x0B
-+#define IOERR_NO_CONNECTION 0x0C
-+#define IOERR_TX_DMA_FAILED 0x0D
-+#define IOERR_RX_DMA_FAILED 0x0E
-+#define IOERR_ILLEGAL_FRAME 0x0F
-+#define IOERR_EXTRA_DATA 0x10
-+#define IOERR_NO_RESOURCES 0x11
-+#define IOERR_RESERVED 0x12
-+#define IOERR_ILLEGAL_LENGTH 0x13
-+#define IOERR_UNSUPPORTED_FEATURE 0x14
-+#define IOERR_ABORT_IN_PROGRESS 0x15
-+#define IOERR_ABORT_REQUESTED 0x16
-+#define IOERR_RECEIVE_BUFFER_TIMEOUT 0x17
-+#define IOERR_LOOP_OPEN_FAILURE 0x18
-+#define IOERR_RING_RESET 0x19
-+#define IOERR_LINK_DOWN 0x1A
-+#define IOERR_CORRUPTED_DATA 0x1B
-+#define IOERR_CORRUPTED_RPI 0x1C
-+#define IOERR_OUT_OF_ORDER_DATA 0x1D
-+#define IOERR_OUT_OF_ORDER_ACK 0x1E
-+#define IOERR_DUP_FRAME 0x1F
-+#define IOERR_LINK_CONTROL_FRAME 0x20 /* ACK_N received */
-+#define IOERR_BAD_HOST_ADDRESS 0x21
-+#define IOERR_RCV_HDRBUF_WAITING 0x22
-+#define IOERR_MISSING_HDR_BUFFER 0x23
-+#define IOERR_MSEQ_CHAIN_CORRUPTED 0x24
-+#define IOERR_ABORTMULT_REQUESTED 0x25
-+#define IOERR_BUFFER_SHORTAGE 0x28
-+#define IOERR_DEFAULT 0x29
-+#define IOERR_CNT 0x2A
-+
-+#define IOERR_DRVR_MASK 0x100
-+#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
-+#define IOERR_SLI_BRESET 0x102
-+#define IOERR_SLI_ABORTED 0x103
-+} PARM_ERR;
-+
-+typedef union {
-+ struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint8_t Rctl; /* R_CTL field */
-+ uint8_t Type; /* TYPE field */
-+ uint8_t Dfctl; /* DF_CTL field */
-+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint8_t Fctl; /* Bits 0-7 of IOCB word 5 */
-+ uint8_t Dfctl; /* DF_CTL field */
-+ uint8_t Type; /* TYPE field */
-+ uint8_t Rctl; /* R_CTL field */
-+#endif
-+
-+#define BC 0x02 /* Broadcast Received - Fctl */
-+#define SI 0x04 /* Sequence Initiative */
-+#define LA 0x08 /* Ignore Link Attention state */
-+#define LS 0x80 /* Last Sequence */
-+ } hcsw;
-+ uint32_t reserved;
-+} WORD5;
-+
-+/* IOCB Command template for a generic response */
-+typedef struct {
-+ uint32_t reserved[4];
-+ PARM_ERR perr;
-+} GENERIC_RSP;
-+
-+/* IOCB Command template for XMIT / XMIT_BCAST / RCV_SEQUENCE / XMIT_ELS */
-+typedef struct {
-+ struct ulp_bde xrsqbde[2];
-+ uint32_t xrsqRo; /* Starting Relative Offset */
-+ WORD5 w5; /* Header control/status word */
-+} XR_SEQ_FIELDS;
-+
-+/* IOCB Command template for ELS_REQUEST */
-+typedef struct {
-+ struct ulp_bde elsReq;
-+ struct ulp_bde elsRsp;
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t word4Rsvd:7;
-+ uint32_t fl:1;
-+ uint32_t myID:24;
-+ uint32_t word5Rsvd:8;
-+ uint32_t remoteID:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t myID:24;
-+ uint32_t fl:1;
-+ uint32_t word4Rsvd:7;
-+ uint32_t remoteID:24;
-+ uint32_t word5Rsvd:8;
-+#endif
-+} ELS_REQUEST;
-+
-+/* IOCB Command template for RCV_ELS_REQ */
-+typedef struct {
-+ struct ulp_bde elsReq[2];
-+ uint32_t parmRo;
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t word5Rsvd:8;
-+ uint32_t remoteID:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t remoteID:24;
-+ uint32_t word5Rsvd:8;
-+#endif
-+} RCV_ELS_REQ;
-+
-+/* IOCB Command template for ABORT / CLOSE_XRI */
-+typedef struct {
-+ uint32_t rsvd[3];
-+ uint32_t abortType;
-+#define ABORT_TYPE_ABTX 0x00000000
-+#define ABORT_TYPE_ABTS 0x00000001
-+ uint32_t parm;
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
-+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t abortIoTag; /* ulpIoTag from command to abort/close */
-+ uint16_t abortContextTag; /* ulpContext from command to abort/close */
-+#endif
-+} AC_XRI;
-+
-+/* IOCB Command template for ABORT_MXRI64 */
-+typedef struct {
-+ uint32_t rsvd[3];
-+ uint32_t abortType;
-+ uint32_t parm;
-+ uint32_t iotag32;
-+} A_MXRI64;
-+
-+/* IOCB Command template for GET_RPI */
-+typedef struct {
-+ uint32_t rsvd[4];
-+ uint32_t parmRo;
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t word5Rsvd:8;
-+ uint32_t remoteID:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t remoteID:24;
-+ uint32_t word5Rsvd:8;
-+#endif
-+} GET_RPI;
-+
-+/* IOCB Command template for all FCP Initiator commands */
-+typedef struct {
-+ struct ulp_bde fcpi_cmnd; /* FCP_CMND payload descriptor */
-+ struct ulp_bde fcpi_rsp; /* Rcv buffer */
-+ uint32_t fcpi_parm;
-+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
-+} FCPI_FIELDS;
-+
-+/* IOCB Command template for all FCP Target commands */
-+typedef struct {
-+ struct ulp_bde fcpt_Buffer[2]; /* FCP_CMND payload descriptor */
-+ uint32_t fcpt_Offset;
-+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
-+} FCPT_FIELDS;
-+
-+/* SLI-2 IOCB structure definitions */
-+
-+/* IOCB Command template for 64 bit XMIT / XMIT_BCAST / XMIT_ELS */
-+typedef struct {
-+ ULP_BDL bdl;
-+ uint32_t xrsqRo; /* Starting Relative Offset */
-+ WORD5 w5; /* Header control/status word */
-+} XMT_SEQ_FIELDS64;
-+
-+/* IOCB Command template for 64 bit RCV_SEQUENCE64 */
-+typedef struct {
-+ struct ulp_bde64 rcvBde;
-+ uint32_t rsvd1;
-+ uint32_t xrsqRo; /* Starting Relative Offset */
-+ WORD5 w5; /* Header control/status word */
-+} RCV_SEQ_FIELDS64;
-+
-+/* IOCB Command template for ELS_REQUEST64 */
-+typedef struct {
-+ ULP_BDL bdl;
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t word4Rsvd:7;
-+ uint32_t fl:1;
-+ uint32_t myID:24;
-+ uint32_t word5Rsvd:8;
-+ uint32_t remoteID:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t myID:24;
-+ uint32_t fl:1;
-+ uint32_t word4Rsvd:7;
-+ uint32_t remoteID:24;
-+ uint32_t word5Rsvd:8;
-+#endif
-+} ELS_REQUEST64;
-+
-+/* IOCB Command template for GEN_REQUEST64 */
-+typedef struct {
-+ ULP_BDL bdl;
-+ uint32_t xrsqRo; /* Starting Relative Offset */
-+ WORD5 w5; /* Header control/status word */
-+} GEN_REQUEST64;
-+
-+/* IOCB Command template for RCV_ELS_REQ64 */
-+typedef struct {
-+ struct ulp_bde64 elsReq;
-+ uint32_t rcvd1;
-+ uint32_t parmRo;
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t word5Rsvd:8;
-+ uint32_t remoteID:24;
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t remoteID:24;
-+ uint32_t word5Rsvd:8;
-+#endif
-+} RCV_ELS_REQ64;
-+
-+/* IOCB Command template for all 64 bit FCP Initiator commands */
-+typedef struct {
-+ ULP_BDL bdl;
-+ uint32_t fcpi_parm;
-+ uint32_t fcpi_XRdy; /* transfer ready for IWRITE */
-+} FCPI_FIELDS64;
-+
-+/* IOCB Command template for all 64 bit FCP Target commands */
-+typedef struct {
-+ ULP_BDL bdl;
-+ uint32_t fcpt_Offset;
-+ uint32_t fcpt_Length; /* transfer ready for IWRITE */
-+} FCPT_FIELDS64;
-+
-+typedef volatile struct _IOCB { /* IOCB structure */
-+ union {
-+ GENERIC_RSP grsp; /* Generic response */
-+ XR_SEQ_FIELDS xrseq; /* XMIT / BCAST / RCV_SEQUENCE cmd */
-+ struct ulp_bde cont[3]; /* up to 3 continuation bdes */
-+ RCV_ELS_REQ rcvels; /* RCV_ELS_REQ template */
-+ AC_XRI acxri; /* ABORT / CLOSE_XRI template */
-+ A_MXRI64 amxri; /* abort multiple xri command overlay */
-+ GET_RPI getrpi; /* GET_RPI template */
-+ FCPI_FIELDS fcpi; /* FCP Initiator template */
-+ FCPT_FIELDS fcpt; /* FCP target template */
-+
-+ /* SLI-2 structures */
-+
-+ struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
-+ bde_64s */
-+ ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
-+ GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
-+ RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
-+ XMT_SEQ_FIELDS64 xseq64; /* XMIT / BCAST cmd */
-+ FCPI_FIELDS64 fcpi64; /* FCP 64 bit Initiator template */
-+ FCPT_FIELDS64 fcpt64; /* FCP 64 bit target template */
-+
-+ uint32_t ulpWord[IOCB_WORD_SZ - 2]; /* generic 6 'words' */
-+ } un;
-+ union {
-+ struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t ulpContext; /* High order bits word 6 */
-+ uint16_t ulpIoTag; /* Low order bits word 6 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t ulpIoTag; /* Low order bits word 6 */
-+ uint16_t ulpContext; /* High order bits word 6 */
-+#endif
-+ } t1;
-+ struct {
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint16_t ulpContext; /* High order bits word 6 */
-+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
-+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint16_t ulpIoTag0:14; /* Low order bits word 6 */
-+ uint16_t ulpIoTag1:2; /* Low order bits word 6 */
-+ uint16_t ulpContext; /* High order bits word 6 */
-+#endif
-+ } t2;
-+ } un1;
-+#define ulpContext un1.t1.ulpContext
-+#define ulpIoTag un1.t1.ulpIoTag
-+#define ulpIoTag0 un1.t2.ulpIoTag0
-+
-+#ifdef __BIG_ENDIAN_BITFIELD
-+ uint32_t ulpTimeout:8;
-+ uint32_t ulpXS:1;
-+ uint32_t ulpFCP2Rcvy:1;
-+ uint32_t ulpPU:2;
-+ uint32_t ulpIr:1;
-+ uint32_t ulpClass:3;
-+ uint32_t ulpCommand:8;
-+ uint32_t ulpStatus:4;
-+ uint32_t ulpBdeCount:2;
-+ uint32_t ulpLe:1;
-+ uint32_t ulpOwner:1; /* Low order bit word 7 */
-+#else /* __LITTLE_ENDIAN_BITFIELD */
-+ uint32_t ulpOwner:1; /* Low order bit word 7 */
-+ uint32_t ulpLe:1;
-+ uint32_t ulpBdeCount:2;
-+ uint32_t ulpStatus:4;
-+ uint32_t ulpCommand:8;
-+ uint32_t ulpClass:3;
-+ uint32_t ulpIr:1;
-+ uint32_t ulpPU:2;
-+ uint32_t ulpFCP2Rcvy:1;
-+ uint32_t ulpXS:1;
-+ uint32_t ulpTimeout:8;
-+#endif
-+
-+#define PARM_UNUSED 0 /* PU field (Word 4) not used */
-+#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
-+#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
-+#define CLASS1 0 /* Class 1 */
-+#define CLASS2 1 /* Class 2 */
-+#define CLASS3 2 /* Class 3 */
-+#define CLASS_FCP_INTERMIX 7 /* FCP Data->Cls 1, all else->Cls 2 */
-+
-+#define IOSTAT_SUCCESS 0x0 /* ulpStatus - HBA defined */
-+#define IOSTAT_FCP_RSP_ERROR 0x1
-+#define IOSTAT_REMOTE_STOP 0x2
-+#define IOSTAT_LOCAL_REJECT 0x3
-+#define IOSTAT_NPORT_RJT 0x4
-+#define IOSTAT_FABRIC_RJT 0x5
-+#define IOSTAT_NPORT_BSY 0x6
-+#define IOSTAT_FABRIC_BSY 0x7
-+#define IOSTAT_INTERMED_RSP 0x8
-+#define IOSTAT_LS_RJT 0x9
-+#define IOSTAT_BA_RJT 0xA
-+#define IOSTAT_RSVD1 0xB
-+#define IOSTAT_RSVD2 0xC
-+#define IOSTAT_RSVD3 0xD
-+#define IOSTAT_RSVD4 0xE
-+#define IOSTAT_RSVD5 0xF
-+#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
-+#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
-+#define IOSTAT_CNT 0x11
-+
-+} IOCB_t;
-+
-+
-+#define SLI1_SLIM_SIZE (4 * 1024)
-+
-+/* Up to 498 IOCBs will fit into 16k
-+ * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
-+ */
-+#define SLI2_SLIM_SIZE (16 * 1024)
-+
-+/* Maximum IOCBs that will fit in SLI2 slim */
-+#define MAX_SLI2_IOCB 498
-+
-+struct lpfc_sli2_slim {
-+ MAILBOX_t mbx;
-+ PCB_t pcb;
-+ IOCB_t IOCBs[MAX_SLI2_IOCB];
-+};
-+
-+/*******************************************************************
-+This macro check PCI device to allow special handling for LC HBAs.
-+
-+Parameters:
-+device : struct pci_dev 's device field
-+
-+return 1 => TRUE
-+ 0 => FALSE
-+ *******************************************************************/
-+static inline int
-+lpfc_is_LC_HBA(unsigned short device)
-+{
-+ if ((device == PCI_DEVICE_ID_TFLY) ||
-+ (device == PCI_DEVICE_ID_PFLY) ||
-+ (device == PCI_DEVICE_ID_LP101) ||
-+ (device == PCI_DEVICE_ID_BMID) ||
-+ (device == PCI_DEVICE_ID_BSMB) ||
-+ (device == PCI_DEVICE_ID_ZMID) ||
-+ (device == PCI_DEVICE_ID_ZSMB) ||
-+ (device == PCI_DEVICE_ID_RFLY))
-+ return 1;
-+ else
-+ return 0;
-+}
-+
-+#endif /* _H_LPFC_HW */
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/lpfc/lpfc_logmsg.h 1970-01-01 03:00:00.000000000 +0300
-+++ rhel4u2//drivers/scsi/lpfc/lpfc_logmsg.h 2005-10-19 11:47:17.000000000 +0400
-@@ -0,0 +1,46 @@
-+/*******************************************************************
-+ * This file is part of the Emulex Linux Device Driver for *
-+ * Fibre Channel Host Bus Adapters. *
-+ * Copyright (C) 2003-2005 Emulex. All rights reserved. *
-+ * EMULEX and SLI are trademarks of Emulex. *
-+ * www.emulex.com *
-+ * *
-+ * This program is free software; you can redistribute it and/or *
-+ * modify it under the terms of version 2 of the GNU General *
-+ * Public License as published by the Free Software Foundation. *
-+ * This program is distributed in the hope that it will be useful. *
-+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
-+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
-+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
-+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
-+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
-+ * more details, a copy of which can be found in the file COPYING *
-+ * included with this package. *
-+ *******************************************************************/
-+
-+/*
-+ * $Id: lpfc_logmsg.h 1.33.1.2 2005/06/13 17:16:30EDT sf_support Exp $
-+ */
-+
-+#ifndef _H_LPFC_LOGMSG
-+#define _H_LPFC_LOGMSG
-+
-+#define LOG_ELS 0x1 /* ELS events */
-+#define LOG_DISCOVERY 0x2 /* Link discovery events */
-+#define LOG_MBOX 0x4 /* Mailbox events */
-+#define LOG_INIT 0x8 /* Initialization events */
-+#define LOG_LINK_EVENT 0x10 /* Link events */
-+#define LOG_IP 0x20 /* IP traffic history */
-+#define LOG_FCP 0x40 /* FCP traffic history */
-+#define LOG_NODE 0x80 /* Node table events */
-+#define LOG_MISC 0x400 /* Miscellaneous events */
-+#define LOG_SLI 0x800 /* SLI events */
-+#define LOG_CHK_COND 0x1000 /* FCP Check condition flag */
-+#define LOG_LIBDFC 0x2000 /* Libdfc events */
-+#define LOG_ALL_MSG 0xffff /* LOG all messages */
-+
-+#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
-+ { if (((mask) &(phba)->cfg_log_verbose) || (level[1] <= '3')) \
-+ dev_printk(level, &((phba)->pcidev)->dev, fmt, ##arg); }
-+#endif
-+
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/Makefile 2005-10-25 14:49:22.457611512 +0400
-+++ rhel4u2//drivers/scsi/Makefile 2005-10-19 11:47:17.000000000 +0400
-@@ -129,6 +132,7 @@ obj-$(CONFIG_SCSI_SATA_VITESSE) += libat
- obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
- obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
- obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
-+obj-$(CONFIG_SCSI_LPFC) += lpfc/
-
- obj-$(CONFIG_ARM) += arm/
-
---- linux-2.6.8.1-t044-driver-update//drivers/scsi/Kconfig 2005-10-25 13:18:59.017099792 +0400
-+++ rhel4u2//drivers/scsi/Kconfig 2005-10-19 11:47:17.000000000 +0400
-@@ -599,6 +621,13 @@ config SCSI_EATA_PIO
- To compile this driver as a module, choose M here: the
- module will be called eata_pio.
-
-+config SCSI_LPFC
-+ tristate "Emulex LightPulse Fibre Channel Support"
-+ depends on PCI && SCSI
-+ help
-+ This lpfc driver supports the Emulex LightPulse
-+ family of Fibre Channel PCI host adapters.
-+
- config SCSI_FUTURE_DOMAIN
- tristate "Future Domain 16xx SCSI/AHA-2920A support"
- depends on (ISA || PCI) && SCSI