79 unsigned long i = 0,
j = 0,
hi = 0;
107 unsigned long i = 0,
j = 0,
n = 0,
num = 0;
112 while (sha->sas_port[i]) {
113 if (sha->sas_port[i] == dev->
port) {
118 while (sha->sas_phy[
j]) {
119 if (sha->sas_phy[
j] == phy)
123 phyno[
n] = (
j >= mvi->
chip->n_phy) ?
124 (
j - mvi->
chip->n_phy) :
j;
143 if (mvi->
devices[dev_no].taskfileset == reg_set)
149 static inline void mvs_free_reg_set(
struct mvs_info *mvi,
161 static inline u8 mvs_assign_reg_set(
struct mvs_info *mvi,
188 if (sha->
sas_phy[i] == sas_phy)
235 static void mvs_bytes_dmaed(
struct mvs_info *mvi,
int i)
252 struct sas_phy *sphy = sas_phy->
phy;
262 struct sas_identify_frame *
id;
264 id = (
struct sas_identify_frame *)phy->
frame_rcvd;
265 id->dev_type = phy->
identify.device_type;
267 id->target_bits = phy->
identify.target_port_protocols;
281 mvi->
sas->notify_port_event(sas_phy,
288 unsigned short core_nr;
295 for (j = 0; j < core_nr; j++) {
297 for (i = 0; i < mvi->
chip->n_phy; ++
i)
298 mvs_bytes_dmaed(mvi, i);
315 static int mvs_task_prep_smp(
struct mvs_info *mvi,
350 if ((req_len & 0x3) || (resp_len & 0x3)) {
406 *(
u16 *)(buf_oaf + 2) = 0xFFFF;
438 static int mvs_task_prep_ata(
struct mvs_info *mvi,
448 u32 tag = tei->
tag, hdr_tag;
451 u8 *buf_cmd, *buf_oaf;
457 mv_dprintk(
"Have not enough regiset for dev %d.\n",
483 if (task->
ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
484 task->
ata_task.fis.sector_count |= (
u8) (hdr_tag << 3);
497 buf_cmd = buf_tmp = slot->
buf;
536 resp_len =
min(resp_len, max_resp_len);
564 static int mvs_task_prep_ssp(
struct mvs_info *mvi,
576 struct ssp_frame_hdr *ssp_hdr;
578 u8 *buf_cmd, *buf_oaf, fburst = 0;
596 if (task->
ssp_task.enable_first_burst) {
614 buf_cmd = buf_tmp = slot->
buf;
648 resp_len =
min(resp_len, max_resp_len);
650 req_len =
sizeof(
struct ssp_frame_hdr) + 28;
663 ssp_hdr = (
struct ssp_frame_hdr *)buf_cmd;
672 memcpy(ssp_hdr->hashed_src_addr,
677 buf_cmd +=
sizeof(*ssp_hdr);
680 if (ssp_hdr->frame_type !=
SSP_TASK) {
681 buf_cmd[9] = fburst | task->
ssp_task.task_attr |
685 buf_cmd[10] = tmf->
tmf;
703 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
704 static int mvs_task_prep(
struct sas_task *task,
struct mvs_info *mvi,
int is_tmf,
740 if (tei.
port && !tei.
port->port_attached && !tmf) {
744 "device.\n", dev->
port->id);
753 "device.\n", dev->
port->id);
797 rc = mvs_task_prep_smp(mvi, &tei);
800 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
805 rc = mvs_task_prep_ata(mvi, &tei);
809 "unknown sas_task proto: 0x%x\n",
817 goto err_out_slot_buf;
839 dev_printk(
KERN_ERR, mvi->
dev,
"mvsas prep failed[%d]!\n", rc);
852 for (; *num > 0; --*num) {
858 INIT_LIST_HEAD(&mvs_list->
list);
869 static inline void mvs_task_free_list(
struct mvs_task_list *mvs_list)
884 static int mvs_task_exec(
struct sas_task *task,
const int num,
gfp_t gfp_flags,
891 unsigned long flags = 0;
896 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
898 dev_printk(
KERN_ERR, mvi->
dev,
"mvsas exec failed[%d]!\n", rc);
903 spin_unlock_irqrestore(&mvi->
lock, flags);
908 static int mvs_collector_task_exec(
struct sas_task *task,
const int num,
gfp_t gfp_flags,
909 struct completion *completion,
int is_tmf,
921 unsigned long flags = 0;
923 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
943 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->
id]);
945 dev_printk(
KERN_ERR, mvi->
dev,
"mvsas exec failed[%d]!\n", rc);
946 spin_unlock_irqrestore(&mvi->
lock, flags);
961 mvs_task_free_list(mvs_list);
973 return mvs_task_exec(task, num, gfp_flags,
NULL, 0,
NULL);
975 return mvs_collector_task_exec(task, num, gfp_flags,
NULL, 0,
NULL);
1014 list_del_init(&slot->
entry);
1019 mvs_slot_free(mvi, slot_idx);
1022 static void mvs_update_wideport(
struct mvs_info *mvi,
int phy_no)
1043 static u32 mvs_is_phy_ready(
struct mvs_info *mvi,
int i)
1061 mvs_update_wideport(mvi, i);
1071 static void *mvs_get_d2h_reg(
struct mvs_info *mvi,
int i,
void *
buf)
1090 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(
u8 *)&s[3] == 0x01))
1091 s[1] = 0x00EB1401 | (*((
u8 *)&s[1] + 3) & 0x10);
1096 static u32 mvs_is_sig_fis_received(
u32 irq_status)
1101 static void mvs_sig_remove_timer(
struct mvs_phy *phy)
1103 if (phy->
timer.function)
1111 struct sas_identify_frame *
id;
1113 id = (
struct sas_identify_frame *)phy->
frame_rcvd;
1129 if (mvs_is_sig_fis_received(phy->
irq_status)) {
1130 mvs_sig_remove_timer(phy);
1133 i + mvi->
id * mvi->
chip->n_phy;
1138 mvs_get_d2h_reg(mvi, i,
id);
1142 "Phy%d : No sig fis\n", i);
1157 phy->
identify.target_port_protocols =
1160 phy->
identify.target_port_protocols =
1165 sizeof(
struct sas_identify_frame);
1175 mv_dprintk(
"phy %d attach sas addr is %llx\n",
1182 static void mvs_port_notify_formed(
struct asd_sas_phy *sas_phy,
int lock)
1189 unsigned long flags = 0;
1194 if (sas_ha->
sas_phy[i] == sas_phy)
1200 if (i >= mvi->
chip->n_phy)
1201 port = &mvi->
port[i - mvi->
chip->n_phy];
1203 port = &mvi->
port[
i];
1212 mvs_update_wideport(mvi, sas_phy->
id);
1221 spin_unlock_irqrestore(&mvi->
lock, flags);
1224 static void mvs_port_notify_deformed(
struct asd_sas_phy *sas_phy,
int lock)
1232 while (phy != &mvi->
phy[phy_no]) {
1245 mvs_port_notify_formed(sas_phy, 1);
1250 mvs_port_notify_deformed(sas_phy, 1);
1263 if (dev == MVS_MAX_DEVICES)
1264 mv_printk(
"max support %d devices, ignore ..\n",
1273 memset(mvi_dev, 0,
sizeof(*mvi_dev));
1282 unsigned long flags = 0;
1305 u8 phy_num = parent_dev->
ex_dev.num_phys;
1307 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1316 if (phy_id == phy_num) {
1317 mv_printk(
"Error: no attached dev:%016llx"
1327 spin_unlock_irqrestore(&mvi->
lock, flags);
1338 unsigned long flags = 0;
1348 mvs_free_reg_set(mvi, mvi_dev);
1356 spin_unlock_irqrestore(&mvi->
lock, flags);
1365 static void mvs_task_done(
struct sas_task *task)
1372 static void mvs_tmf_timedout(
unsigned long data)
1380 #define MVS_TASK_TIMEOUT 20
1381 static int mvs_exec_internal_tmf_task(
struct domain_device *dev,
1387 for (retry = 0; retry < 3; retry++) {
1399 task->
slow_task->timer.function = mvs_tmf_timedout;
1407 mv_printk(
"executing internel task failed:%d\n", res);
1441 mv_dprintk(
" task to dev %016llx response: 0x%x "
1457 static int mvs_debug_issue_ssp_tmf(
struct domain_device *dev,
1466 return mvs_exec_internal_tmf_task(dev, &
ssp_task,
1473 static int mvs_debug_I_T_nexus_reset(
struct domain_device *dev)
1480 sas_put_local_phy(phy);
1488 unsigned long flags;
1496 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1500 spin_unlock_irqrestore(&mvi->
lock, flags);
1503 mv_printk(
"%s for device[%x]:rc= %d\n", __func__,
1510 unsigned long flags;
1519 rc = mvs_debug_I_T_nexus_reset(dev);
1525 spin_unlock_irqrestore(&mvi->
lock, flags);
1544 rc = mvs_find_tag(mvi, task, &tag);
1553 rc = mvs_debug_issue_ssp_tmf(dev, lun.
scsi_lun, &tmf_task);
1576 unsigned long flags;
1598 rc = mvs_find_tag(mvi, task, &tag);
1600 mv_printk(
"No such tag in %s\n", __func__);
1608 rc = mvs_debug_issue_ssp_tmf(dev, lun.
scsi_lun, &tmf_task);
1620 spin_unlock_irqrestore(&mvi->
lock, flags);
1629 mv_dprintk(
"mvs_abort_task() mvi=%p task=%p "
1630 "slot=%p slot_idx=x%x\n",
1631 mvi, task, slot, slot_idx);
1633 mvs_slot_task_free(mvi, task, slot, slot_idx);
1651 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1662 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1673 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1710 mv_printk(
"Length %d of sense buffer too small to "
1711 "fit sense %x:%x:%x", len, key, asc, ascq);
1723 mv_printk(
"Length %d of sense buffer too small to "
1724 "fit sense %x:%x:%x", len, key, asc, ascq);
1745 iu->response_data_len = 0;
1746 iu->sense_data_len = 17;
1772 if ((err_dw0 &
NO_DEST) || err_dw1 &
bit(31)) {
1773 struct ssp_response_iu *
iu = slot->
response +
1779 if (err_dw1 &
bit(31))
1780 mv_printk(
"reuse same slot, retry command.\n");
1793 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1831 memset(tstat, 0,
sizeof(*tstat));
1839 mvs_free_reg_set(mvi, mvi_dev);
1841 mvs_slot_task_free(mvi, task, slot, slot_idx);
1855 mv_dprintk(
"port %d slot %d rx_desc %X has error info"
1856 "%016llX.\n", slot->
port->sas_port.id, slot_idx,
1858 tstat->
stat = mvs_slot_err(mvi, task, slot_idx);
1872 struct ssp_response_iu *
iu = slot->
response +
1893 tstat->
stat = mvs_sata_done(mvi, task, slot_idx, 0);
1901 if (!slot->
port->port_attached) {
1911 mvs_free_reg_set(mvi, mvi_dev);
1913 mvs_slot_task_free(mvi, task, slot, slot_idx);
1916 spin_unlock(&mvi->
lock);
1920 spin_lock(&mvi->
lock);
1933 phy = &mvi->
phy[phy_no];
1947 if (dev && task->
dev != dev)
1950 mv_printk(
"Release slot [%x] tag[%x], task [%p]:\n",
1963 for (i = 0; i < num; i++)
1967 static void mvs_phy_disconnected(
struct mvs_phy *phy)
1979 unsigned long flags;
1990 struct sas_identify_frame *
id;
1991 id = (
struct sas_identify_frame *)phy->
frame_rcvd;
1994 if (!(tmp & PHY_READY_MASK)) {
1995 sas_phy_disconnected(sas_phy);
1996 mvs_phy_disconnected(phy);
1999 mv_dprintk(
"phy%d Removed Device\n", phy_no);
2003 mvs_bytes_dmaed(mvi, phy_no);
2004 mvs_port_notify_formed(sas_phy, 0);
2005 mv_dprintk(
"phy%d Attached Device\n", phy_no);
2012 mv_dprintk(
"phy%d Got Broadcast Change\n", phy_no);
2015 spin_unlock_irqrestore(&mvi->
lock, flags);
2019 static int mvs_handle_event(
struct mvs_info *mvi,
void *
data,
int handler)
2038 static void mvs_sig_time_out(
unsigned long tphy)
2044 for (phy_no = 0; phy_no < mvi->
chip->n_phy; phy_no++) {
2045 if (&mvi->
phy[phy_no] == phy) {
2046 mv_dprintk(
"Get signature time out, reset phy %d\n",
2047 phy_no+mvi->
id*mvi->
chip->n_phy);
2072 phy_no + mvi->
id*mvi->
chip->n_phy);
2083 mvs_handle_event(mvi,
2084 (
void *)(
unsigned long)phy_no,
2086 ready = mvs_is_phy_ready(mvi, phy_no);
2087 if (ready || dev_sata) {
2105 phy->
timer.function = mvs_sig_time_out;
2111 phy->
phy_status = mvs_is_phy_ready(mvi, phy_no);
2112 mv_dprintk(
"notify plug in on phy[%d]\n", phy_no);
2129 mvs_bytes_dmaed(mvi, phy_no);
2132 mvs_port_notify_formed(&phy->
sas_phy, 0);
2136 mv_dprintk(
"plugin interrupt but phy%d is gone\n",
2137 phy_no + mvi->
id*mvi->
chip->n_phy);
2141 phy_no + mvi->
id*mvi->
chip->n_phy);
2142 mvs_handle_event(mvi, (
void *)(
unsigned long)phy_no,
2169 if (mvi->
rx_cons == rx_prod_idx)
2172 while (mvi->
rx_cons != rx_prod_idx) {
2181 }
else if (rx_desc &
RXQ_ERR) {
2182 if (!(rx_desc & RXQ_DONE))
2185 mvs_slot_free(mvi, rx_desc);
2189 if (attn && self_clear)