SOURCES: kernel-desktop-sata_nv-ncq.patch (NEW) - init PLD - NCQ f...
czarny
czarny at pld-linux.org
Fri Aug 3 01:09:55 CEST 2007
Author: czarny Date: Thu Aug 2 23:09:55 2007 GMT
Module: SOURCES Tag: HEAD
---- Log message:
- init PLD
- NCQ functionality for newer nvidia chipsets (mcp{50,51,61}); http://pred.dcaf-security.org/sata_nv-ncq-support-mcp51-mcp55-mcp61.patch
---- Files affected:
SOURCES:
kernel-desktop-sata_nv-ncq.patch (NONE -> 1.1) (NEW)
---- Diffs:
================================================================
Index: SOURCES/kernel-desktop-sata_nv-ncq.patch
diff -u /dev/null SOURCES/kernel-desktop-sata_nv-ncq.patch:1.1
--- /dev/null Fri Aug 3 01:09:55 2007
+++ SOURCES/kernel-desktop-sata_nv-ncq.patch Fri Aug 3 01:09:50 2007
@@ -0,0 +1,924 @@
+Index: linux-2.6.22-dev/drivers/ata/sata_nv.c
+===================================================================
+--- linux-2.6.22-dev.orig/drivers/ata/sata_nv.c
++++ linux-2.6.22-dev/drivers/ata/sata_nv.c
+@@ -169,6 +169,35 @@ enum {
+ NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
+ NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
+
++ /* MCP55 reg offset */
++ NV_CTL_MCP55 = 0x400,
++ NV_INT_STATUS_MCP55 = 0x440,
++ NV_INT_ENABLE_MCP55 = 0x444,
++ NV_NCQ_REG_MCP55 = 0x448,
++
++ /* MCP55 */
++ NV_INT_ALL_MCP55 = 0xffff,
++ NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
++ NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
++
++ /* SWNCQ ENABLE BITS*/
++ NV_CTL_PRI_SWNCQ = 0x02,
++ NV_CTL_SEC_SWNCQ = 0x04,
++
++ /* SW NCQ status bits*/
++ NV_SWNCQ_IRQ_DEV = (1 << 0),
++ NV_SWNCQ_IRQ_PM = (1 << 1),
++ NV_SWNCQ_IRQ_ADDED = (1 << 2),
++ NV_SWNCQ_IRQ_REMOVED = (1 << 3),
++
++ NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
++ NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
++ NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
++ NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
++
++ NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
++ NV_SWNCQ_IRQ_REMOVED,
++
+ };
+
+ /* ADMA Physical Region Descriptor - one SG segment */
+@@ -226,6 +255,35 @@ struct nv_host_priv {
+ unsigned long type;
+ };
+
++struct defer_queue {
++ u32 defer_bits;
++ u8 front;
++ u8 rear;
++ unsigned int tag[ATA_MAX_QUEUE + 1];
++};
++
++struct nv_swncq_port_priv {
++ struct ata_prd *prd; /* our SG list */
++ dma_addr_t prd_dma; /* and its DMA mapping */
++ void __iomem *sactive_block;
++ u32 qc_active;
++ unsigned int last_issue_tag;
++ spinlock_t lock;
++ /* fifo loop queue to store deferral command */
++ struct defer_queue defer_queue;
++
++ /* for NCQ interrupt analysis */
++ u32 dhfis_bits;
++ u32 dmafis_bits;
++ u32 sdbfis_bits;
++
++ unsigned int ncq_saw_d2h:1;
++ unsigned int ncq_saw_dmas:1;
++ unsigned int ncq_saw_sdb:1;
++ unsigned int ncq_saw_backout:1;
++};
++
++
+ #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
+
+ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+@@ -263,13 +321,28 @@ static void nv_adma_host_stop(struct ata
+ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
+ static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+
++static void nv_mcp55_thaw(struct ata_port *ap);
++static void nv_mcp55_freeze(struct ata_port *ap);
++static void nv_swncq_error_handler(struct ata_port *ap);
++static int nv_swncq_port_start(struct ata_port *ap);
++static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
++static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
++static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
++static void nv_swncq_irq_clear(struct ata_port *ap, u32 val);
++static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
++#ifdef CONFIG_PM
++static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
++static int nv_swncq_port_resume(struct ata_port *ap);
++#endif
++
+ enum nv_host_type
+ {
+ GENERIC,
+ NFORCE2,
+ NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
+ CK804,
+- ADMA
++ ADMA,
++ SWNCQ
+ };
+
+ static const struct pci_device_id nv_pci_tbl[] = {
+@@ -280,13 +353,13 @@ static const struct pci_device_id nv_pci
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
+- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
+- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
+- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
+- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
+- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
+- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
+- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
++ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
++ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
++ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
++ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
++ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), SWNCQ },
++ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), SWNCQ },
++ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), SWNCQ },
+
+ { } /* terminate list */
+ };
+@@ -339,6 +412,24 @@ static struct scsi_host_template nv_adma
+ .bios_param = ata_std_bios_param,
+ };
+
++static struct scsi_host_template nv_swncq_sht = {
++ .module = THIS_MODULE,
++ .name = DRV_NAME,
++ .ioctl = ata_scsi_ioctl,
++ .queuecommand = ata_scsi_queuecmd,
++ .can_queue = ATA_MAX_QUEUE,
++ .this_id = ATA_SHT_THIS_ID,
++ .sg_tablesize = LIBATA_MAX_PRD,
++ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
++ .emulated = ATA_SHT_EMULATED,
++ .use_clustering = ATA_SHT_USE_CLUSTERING,
++ .proc_name = DRV_NAME,
++ .dma_boundary = ATA_DMA_BOUNDARY,
++ .slave_configure = ata_scsi_slave_config,
++ .slave_destroy = ata_scsi_slave_destroy,
++ .bios_param = ata_std_bios_param,
++};
++
+ static const struct ata_port_operations nv_generic_ops = {
+ .port_disable = ata_port_disable,
+ .tf_load = ata_tf_load,
+@@ -451,6 +542,36 @@ static const struct ata_port_operations
+ .host_stop = nv_adma_host_stop,
+ };
+
++static const struct ata_port_operations nv_swncq_ops = {
++ .port_disable = ata_port_disable,
++ .tf_load = ata_tf_load,
++ .tf_read = ata_tf_read,
++ .exec_command = ata_exec_command,
++ .check_status = ata_check_status,
++ .dev_select = ata_std_dev_select,
++ .bmdma_setup = ata_bmdma_setup,
++ .bmdma_start = ata_bmdma_start,
++ .bmdma_stop = ata_bmdma_stop,
++ .bmdma_status = ata_bmdma_status,
++ .qc_prep = nv_swncq_qc_prep,
++ .qc_issue = nv_swncq_qc_issue,
++ .freeze = nv_mcp55_freeze,
++ .thaw = nv_mcp55_thaw,
++ .error_handler = nv_swncq_error_handler,
++ .post_internal_cmd = ata_bmdma_post_internal_cmd,
++ .data_xfer = ata_data_xfer,
++ .irq_clear = ata_bmdma_irq_clear,
++ .irq_on = ata_irq_on,
++ .irq_ack = ata_irq_ack,
++ .scr_read = nv_scr_read,
++ .scr_write = nv_scr_write,
++#ifdef CONFIG_PM
++ .port_suspend = nv_swncq_port_suspend,
++ .port_resume = nv_swncq_port_resume,
++#endif
++ .port_start = nv_swncq_port_start,
++};
++
+ static const struct ata_port_info nv_port_info[] = {
+ /* generic */
+ {
+@@ -497,6 +618,17 @@ static const struct ata_port_info nv_por
+ .port_ops = &nv_adma_ops,
+ .irq_handler = nv_adma_interrupt,
+ },
++ /* SWNCQ*/
++ {
++ .sht = &nv_swncq_sht,
++ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
++ ATA_FLAG_HRST_TO_RESUME,
++ .pio_mask = NV_PIO_MASK,
++ .mwdma_mask = NV_MWDMA_MASK,
++ .udma_mask = NV_UDMA_MASK,
++ .port_ops = &nv_swncq_ops,
++ .irq_handler = nv_swncq_interrupt,
++ },
+ };
+
+ MODULE_AUTHOR("NVIDIA");
+@@ -506,6 +638,7 @@ MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
+ MODULE_VERSION(DRV_VERSION);
+
+ static int adma_enabled = 1;
++static int swncq_enabled = 1;
+
+ static void nv_adma_register_mode(struct ata_port *ap)
+ {
+@@ -1455,6 +1588,48 @@ static void nv_ck804_thaw(struct ata_por
+ writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
+ }
+
++static void nv_mcp55_freeze(struct ata_port *ap)
++{
++ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
++ int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
++ u32 mask;
++ u32 val;
++
++ if (ap->flags & ATA_FLAG_NCQ) {
++ val = readl(mmio_base + NV_CTL_MCP55);
++ val &= ~(NV_CTL_PRI_SWNCQ << ap->port_no);
++ writel(val, mmio_base + NV_CTL_MCP55);/* disable ncq */
++ }
++
++ writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
++
++ mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
++ mask &= ~(NV_INT_ALL_MCP55 << shift);
++ writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
++ ata_bmdma_freeze(ap);
++}
++
++static void nv_mcp55_thaw(struct ata_port *ap)
++{
++ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
++ int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
++ u32 mask;
++ u32 val;
++
++ if (ap->flags & ATA_FLAG_NCQ) {
++ val = readl(mmio_base + NV_CTL_MCP55);
++ val |= (NV_CTL_PRI_SWNCQ << ap->port_no);
++ writel(val, mmio_base + NV_CTL_MCP55);/* enable ncq */
++ }
++
++ writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
++
++ mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
++ mask |= (NV_INT_MASK_MCP55 << shift);
++ writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
++ ata_bmdma_thaw(ap);
++}
++
+ static int nv_hardreset(struct ata_port *ap, unsigned int *class,
+ unsigned long deadline)
+ {
+@@ -1528,6 +1703,633 @@ static void nv_adma_error_handler(struct
+ nv_hardreset, ata_std_postreset);
+ }
+
++static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
++{
++ struct nv_swncq_port_priv *pp = ap->private_data;
++ struct defer_queue *dq = &pp->defer_queue;
++
++ /* queue is full */
++ WARN_ON((dq->rear + 1) % (ATA_MAX_QUEUE + 1) == dq->front);
++
++ dq->defer_bits |= (1 << qc->tag);
++
++ dq->tag[dq->rear] = qc->tag;
++ dq->rear = (dq->rear + 1) % (ATA_MAX_QUEUE + 1);
++
++}
++
++static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
++{
++ struct nv_swncq_port_priv *pp = ap->private_data;
++ struct defer_queue *dq = &pp->defer_queue;
++ unsigned int tag;
++
++ if (dq->front == dq->rear) /* null queue */
++ return NULL;
++
++ tag = dq->tag[dq->front];
++ dq->tag[dq->front] = ATA_TAG_POISON;
++ dq->front = (dq->front + 1) % (ATA_MAX_QUEUE + 1);
++
++ WARN_ON(!(dq->defer_bits & (1 << tag)));
++ dq->defer_bits &= ~(1 << tag);
++
++ return ata_qc_from_tag(ap, tag);
++}
++
++static void nv_swncq_bmdma_stop(struct ata_port *ap)
++{
++ /* clear start/stop bit */
++ iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
++ ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
++ ata_altstatus(ap);
++}
++
++static void nv_swncq_fis_reinit(struct ata_port *ap)
++{
++ struct nv_swncq_port_priv *pp = ap->private_data;
++
++ pp->dhfis_bits = 0;
++ pp->dmafis_bits = 0;
++ pp->sdbfis_bits = 0;
++ pp->ncq_saw_d2h = 0;
++ pp->ncq_saw_dmas = 0;
++ pp->ncq_saw_sdb = 0;
++ pp->ncq_saw_backout = 0;
++}
++
++static void nv_swncq_pp_reinit(struct ata_port *ap)
++{
++ struct nv_swncq_port_priv *pp = ap->private_data;
++ struct defer_queue *dq = &pp->defer_queue;
++
++ dq->front = dq->rear = 0;
++ dq->defer_bits = 0;
++ pp->qc_active = 0;
++ pp->last_issue_tag = ATA_TAG_POISON;
++ nv_swncq_fis_reinit(ap);
++}
++
++static void nv_swncq_irq_clear(struct ata_port *ap, u32 val)
++{
++ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
++ u32 flags = (val << (ap->port_no * NV_INT_PORT_SHIFT_MCP55));
++
++ writel(flags, mmio + NV_INT_STATUS_MCP55);
++}
++
++static void nv_swncq_ncq_stop(struct ata_port *ap)
++{
++ struct nv_swncq_port_priv *pp = ap->private_data;
++ unsigned int i;
++ u32 sactive;
++ u32 done_mask;
++
++ ata_port_printk(ap, KERN_ERR,
++ "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
++ ap->qc_active, ap->sactive);
++ ata_port_printk(ap, KERN_ERR,
++ "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
++ "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
++ pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
++ pp->dhfis_bits, pp->dmafis_bits,
++ pp->sdbfis_bits);
++
++ ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
++ ap->ops->check_status(ap), ioread8(ap->ioaddr.error_addr));
++
++ sactive = readl(pp->sactive_block);
++ done_mask = pp->qc_active ^ sactive;
++
++ ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
++ for (i = 0; i < ATA_MAX_QUEUE; i++) {
++ u8 err = 0;
++ if (pp->qc_active & (1 << i))
++ err = 0;
++ else if (done_mask & (1 << i))
++ err = 1;
++ else
++ continue;
++
++ ata_port_printk(ap, KERN_ERR,
++ "tag 0x%x: %01x %01x %01x %01x %s\n", i,
++ (pp->dhfis_bits >> i) & 0x1,
++ (pp->dmafis_bits >> i) & 0x1 , (pp->sdbfis_bits >> i) & 0x1,
++ (sactive >> i) & 0x1,
++ (err ? "error!tag doesn't exit, but sactive bit is set" : " "));
++ }
++
++ nv_swncq_pp_reinit(ap);
++ ap->ops->irq_clear(ap);
++ nv_swncq_bmdma_stop(ap);
++ nv_swncq_irq_clear(ap, 0xffff);
++}
++
++static void nv_swncq_error_handler(struct ata_port *ap)
++{
++ struct ata_eh_context *ehc = &ap->eh_context;
++
++ if (ap->sactive) {
++ nv_swncq_ncq_stop(ap);
++ ehc->i.action |= ATA_EH_HARDRESET;
++ }
++
++ ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
++ nv_hardreset, ata_std_postreset);
++}
++
++#ifdef CONFIG_PM
++static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
++{
++ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
++ u32 tmp;
++
++ /* clear irq */
++ writel(~0, mmio + NV_INT_STATUS_MCP55);
++
++ if (!(ap->flags & ATA_FLAG_NCQ))
++ return 0;
++
++ /* disable irq */
++ writel(0, mmio + NV_INT_ENABLE_MCP55);
++
++ /* disable swncq */
++ tmp = readl(mmio + NV_CTL_MCP55);
++ tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
++ writel(tmp, mmio + NV_CTL_MCP55);
++
++ return 0;
++}
++
++static int nv_swncq_port_resume(struct ata_port *ap)
++{
++ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
++ u32 tmp;
++
++ /* clear irq */
++ writel(~0, mmio + NV_INT_STATUS_MCP55);
++
++ if (!(ap->flags & ATA_FLAG_NCQ))
++ return 0;
++
++ /* enable irq */
++ writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
++
++ /* enable swncq */
++ tmp = readl(mmio + NV_CTL_MCP55);
++ writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
++
++ return 0;
++}
++#endif
++
++static void nv_swncq_host_init(struct ata_host *host)
++{
++ u32 tmp;
++ void __iomem *mmio = host->iomap[NV_MMIO_BAR];
++ struct pci_dev *pdev = to_pci_dev(host->dev);
++ u8 regval;
++ unsigned int i;
++
++ /* disable ECO 398 */
++ pci_read_config_byte(pdev, 0x7f, ®val);
++ regval &= ~(1 << 7);
++ pci_write_config_byte(pdev, 0x7f, regval);
++
++ /* enable swncq */
++ tmp = readl(mmio + NV_CTL_MCP55);
++ VPRINTK("HOST_CTL:0x%X\n", tmp);
++ writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
++
++ for (i = 0; i < host->n_ports; i++)
++ host->ports[i]->flags |= ATA_FLAG_NCQ;
++
++ /* enable irq intr */
++ tmp = readl(mmio + NV_INT_ENABLE_MCP55);
++ VPRINTK("HOST_ENABLE:0x%X\n", tmp);
++ writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
++
++ /* clear port irq */
++ writel(~0x0, mmio + NV_INT_STATUS_MCP55);
++}
++
++static int nv_swncq_port_start(struct ata_port *ap)
++{
++ struct device *dev = ap->host->dev;
++ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
++ struct nv_swncq_port_priv *pp;
++ int rc;
++
++ rc = ata_port_start(ap);
++ if (rc)
++ return rc;
++
++ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
++ if (!pp)
++ return -ENOMEM;
++
++ pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
++ &pp->prd_dma, GFP_KERNEL);
++ if (!pp->prd)
++ return -ENOMEM;
++ memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
++
++ ap->private_data = pp;
++ pp->sactive_block = mmio + 4 * SCR_ACTIVE +
++ ap->port_no * NV_PORT1_SCR_REG_OFFSET;
++ spin_lock_init(&pp->lock);
++
++ return 0;
++}
++
++static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
++{
++ if (qc->tf.protocol != ATA_PROT_NCQ)
++ return ata_qc_prep(qc);
++
++ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
++ return;
++
++ nv_swncq_fill_sg(qc);
++}
++
++static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
++{
++ struct ata_port *ap = qc->ap;
++ struct scatterlist *sg;
++ unsigned int idx;
++ struct nv_swncq_port_priv *pp = ap->private_data;
++ struct ata_prd *prd;
++
++ WARN_ON(qc->__sg == NULL);
++ WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
++
++ prd = (void*)pp->prd + (ATA_PRD_TBL_SZ * qc->tag);
++
++ idx = 0;
++ ata_for_each_sg(sg, qc) {
++ u32 addr, offset;
++ u32 sg_len, len;
++
++ addr = (u32)sg_dma_address(sg);
++ sg_len = sg_dma_len(sg);
++
++ while (sg_len) {
++ offset = addr & 0xffff;
++ len = sg_len;
++ if ((offset + sg_len) > 0x10000)
++ len = 0x10000 - offset;
++
++ prd[idx].addr = cpu_to_le32(addr);
++ prd[idx].flags_len = cpu_to_le32(len & 0xffff);
++
++ idx++;
++ sg_len -= len;
++ addr += len;
++ }
++ }
++
++ if (idx)
++ prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
++}
++
++static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
++ struct ata_queued_cmd *qc)
++{
++ struct nv_swncq_port_priv *pp = ap->private_data;
++
++ if (qc == NULL)
++ return 0;
++
++ DPRINTK("Enter\n");
++
++ writel((1 << qc->tag), pp->sactive_block);
++ pp->last_issue_tag = qc->tag;
++ pp->dhfis_bits &= ~(1 << qc->tag);
++ pp->dmafis_bits &= ~(1 << qc->tag);
++ pp->qc_active |= (0x1 << qc->tag);
++
++ ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
++ ap->ops->exec_command(ap, &qc->tf);
++
++ DPRINTK("Issued tag %u\n", qc->tag);
++
++ return 0;
++}
++
++static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
++{
++ struct ata_port *ap = qc->ap;
++ struct nv_swncq_port_priv *pp = ap->private_data;
++ unsigned long flags;
++
++ if (qc->tf.protocol != ATA_PROT_NCQ)
++ return ata_qc_issue_prot(qc);
++
<<Diff was trimmed, longer than 597 lines>>
More information about the pld-cvs-commit
mailing list