sd_dif: Verify write requests --- diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -580,6 +580,9 @@ static int sd_prep_fn(struct request_que SCpnt->underflow = this_count << 9; SCpnt->allowed = SD_MAX_RETRIES; SCpnt->timeout_per_command = timeout; + + if (rq_data_dir(rq) == WRITE && scsi_prot_sg_count(SCpnt) && sd_dif_check(SCpnt, block)) + ret = BLKPREP_KILL; /* * This indicates that the command is ready from our end to be diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -103,6 +103,7 @@ extern void sd_dif_config_host(struct sc extern void sd_dif_config_host(struct scsi_disk *); extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int); extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); +extern int sd_dif_check(struct scsi_cmnd *, sector_t); #else /* CONFIG_BLK_DEV_INTEGRITY */ diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -535,3 +535,116 @@ void sd_dif_complete(struct scsi_cmnd *s } } +void dump_sector(unsigned char *buf, int len) +{ + int i, j; + + printk(KERN_ERR ">>> Sector Dump <<<\n"); + + for (i=0 ; i= 0x20 && c < 0x7e) + printk(" %c ",buf[i+j]); + else + printk("%02x ", buf[i+j]); + } + + printk("\n"); + } +} + +int sd_dif_check(struct scsi_cmnd * SCpnt, sector_t start_sec) +{ + int i, j, ret; + struct sd_dif_tuple *sdt; + struct scatterlist *dsgl = scsi_sglist(SCpnt); + struct scatterlist *psgl = scsi_prot_sglist(SCpnt); + void *daddr, *paddr; + sector_t sector = start_sec; + int ppage_offset; + unsigned short csum; + + if (((SCpnt->cmnd[1] >> 5) & 7) != 1) { + unsigned char prot_op = scsi_get_prot_op(SCpnt); + + if (prot_op != SCSI_PROT_WRITE_STRIP + && prot_op != SCSI_PROT_READ_INSERT) + printk(KERN_WARNING "scsi_dif_check: WRPROTECT != 1\n"); + + return 0; + } + + BUG_ON(scsi_sg_count(SCpnt) == 0); + BUG_ON(scsi_prot_sg_count(SCpnt) == 0); + + paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset; + ppage_offset = 0; + + /* For each data page */ + scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { + daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset; + + /* For each sector-sized chunk in data page */ + for (j=0 ; jlength ; j+=512) { + + /* If we're at the end of the current + * protection page advance to the next one + */ + if (ppage_offset >= psgl->length) { + kunmap_atomic(paddr, KM_IRQ1); + psgl = sg_next(psgl); + BUG_ON(psgl == NULL); + paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + + psgl->offset; + ppage_offset = 0; + } + + sdt = paddr + ppage_offset; + + if (scsi_host_get_guard(SCpnt->device->host) == SHOST_DIX_GUARD_IP) + csum = ip_compute_csum(daddr, 512); + else if (scsi_host_get_guard(SCpnt->device->host) == SHOST_DIX_GUARD_CRC) + csum = sd_dif_crc_fn(daddr, 512); + else + BUG(); + + if (sdt->guard_tag != csum) { + printk(KERN_ERR "%s: GUARD check failed on sector %lu " \ + "rcvd 0x%04x, calculated 0x%04x\n", + __func__, sector, + be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); + ret = 0x01; + dump_sector(daddr, 512); + goto out; + } + + if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { + printk(KERN_ERR + "%s: REF check failed on sector %lu\n", + __func__, sector); + ret = 0x03; + dump_sector(daddr, 512); + goto out; + } + + sector++; + daddr += 512; + ppage_offset += sizeof(struct sd_dif_tuple); + } + + kunmap_atomic(daddr, KM_IRQ0); + } + + kunmap_atomic(paddr, KM_IRQ1); + + return 0; + +out: + kunmap_atomic(daddr, KM_IRQ0); + kunmap_atomic(paddr, KM_IRQ1); + return ret; +} +EXPORT_SYMBOL(sd_dif_check);