--- test021.left.txt +++ test021.right.txt @@ -1,4 +1,4 @@ -/* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */ +/* $OpenBSD: softraid_crypto.c,v 1.139 2020/07/13 00:06:22 kn Exp $ */ /* * Copyright (c) 2007 Marco Peereboom * Copyright (c) 2008 Hans-Joerg Hoexer @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -34,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +42,6 @@ #include #include -#include #include #include #include @@ -54,7 +53,6 @@ #include #include -#include /* * The per-I/O data that we need to preallocate. We cannot afford to allow I/O @@ -62,18 +60,15 @@ * because we assert that only one ccb per WU will ever be active. */ struct sr_crypto_wu { - TAILQ_ENTRY(sr_crypto_wu) cr_link; + struct sr_workunit cr_wu; /* Must be first. */ struct uio cr_uio; struct iovec cr_iov; struct cryptop *cr_crp; - struct cryptodesc *cr_descs; - struct sr_workunit *cr_wu; void *cr_dmabuf; }; -struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int); -void sr_crypto_wu_put(struct sr_crypto_wu *); +struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *, int); int sr_crypto_create_keys(struct sr_discipline *); int sr_crypto_get_kdf(struct bioc_createraid *, struct sr_discipline *); @@ -92,12 +87,11 @@ struct bioc_discipline *); int sr_crypto_meta_opt_handler(struct sr_discipline *, struct sr_meta_opt_hdr *); -int sr_crypto_write(struct cryptop *); +void sr_crypto_write(struct cryptop *); int sr_crypto_rw(struct sr_workunit *); -int sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *); +int sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *); void sr_crypto_done(struct sr_workunit *); -int sr_crypto_read(struct cryptop *); -void sr_crypto_finish_io(struct sr_workunit *); +void sr_crypto_read(struct cryptop *); void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int, u_int8_t *, int, u_char *); void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int); @@ -113,6 +107,7 @@ int i; /* Fill out discipline members. */ + sd->sd_wu_size = sizeof(struct sr_crypto_wu); sd->sd_type = SR_MD_CRYPTO; strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name)); sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE; @@ -143,8 +138,14 @@ sr_error(sd->sd_sc, "%s requires exactly one chunk", sd->sd_name); goto done; - } + } + if (coerced_size > SR_CRYPTO_MAXSIZE) { + sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)", + sd->sd_name, coerced_size, SR_CRYPTO_MAXSIZE); + goto done; + } + /* Create crypto optional metadata. */ omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF, M_WAITOK | M_ZERO); @@ -208,7 +209,7 @@ if (data != NULL) { /* Kernel already has mask key. */ - bcopy(data, sd->mds.mdd_crypto.scr_maskkey, + memcpy(sd->mds.mdd_crypto.scr_maskkey, data, sizeof(sd->mds.mdd_crypto.scr_maskkey)); } else if (bc->bc_key_disk != NODEV) { /* Read the mask key from the key disk. */ @@ -248,117 +249,69 @@ } struct sr_crypto_wu * -sr_crypto_wu_get(struct sr_workunit *wu, int encrypt) +sr_crypto_prepare(struct sr_workunit *wu, int encrypt) { struct scsi_xfer *xs = wu->swu_xs; struct sr_discipline *sd = wu->swu_dis; struct sr_crypto_wu *crwu; struct cryptodesc *crd; int flags, i, n; - daddr64_t blk = 0; + daddr_t blkno; u_int keyndx; - DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n", + DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n", DEVNAME(sd->sd_sc), wu, encrypt); - mtx_enter(&sd->mds.mdd_crypto.scr_mutex); - if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) - TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); - mtx_leave(&sd->mds.mdd_crypto.scr_mutex); - if (crwu == NULL) - panic("sr_crypto_wu_get: out of wus"); - + crwu = (struct sr_crypto_wu *)wu; crwu->cr_uio.uio_iovcnt = 1; crwu->cr_uio.uio_iov->iov_len = xs->datalen; if (xs->flags & SCSI_DATA_OUT) { crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf; - bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen); + memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen); } else crwu->cr_uio.uio_iov->iov_base = xs->data; - if (xs->cmdlen == 10) - blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr); - else if (xs->cmdlen == 16) - blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr); - else if (xs->cmdlen == 6) - blk = _3btol(((struct scsi_rw *)xs->cmd)->addr); - + blkno = wu->swu_blk_start; n = xs->datalen >> DEV_BSHIFT; /* * We preallocated enough crypto descs for up to MAXPHYS of I/O. - * Since there may be less than that we need to tweak the linked list + * Since there may be less than that we need to tweak the amount * of crypto desc structures to be just long enough for our needs. */ - crd = crwu->cr_descs; - for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) { - crd = crd->crd_next; - KASSERT(crd); - } - crwu->cr_crp->crp_desc = crd; + KASSERT(crwu->cr_crp->crp_ndescalloc >= n); + crwu->cr_crp->crp_ndesc = n; flags = (encrypt ? CRD_F_ENCRYPT : 0) | CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT; - /* Select crypto session based on block number */ - keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT; - if (keyndx >= SR_CRYPTO_MAXKEYS) - goto unwind; + /* + * Select crypto session based on block number. + * + * XXX - this does not handle the case where the read/write spans + * across a different key blocks (e.g. 0.5TB boundary). Currently + * this is already broken by the use of scr_key[0] below. + */ + keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT; crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx]; - if (crwu->cr_crp->crp_sid == (u_int64_t)-1) - goto unwind; + crwu->cr_crp->crp_opaque = crwu; crwu->cr_crp->crp_ilen = xs->datalen; crwu->cr_crp->crp_alloctype = M_DEVBUF; + crwu->cr_crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_NOQUEUE; crwu->cr_crp->crp_buf = &crwu->cr_uio; - for (i = 0, crd = crwu->cr_crp->crp_desc; crd; - i++, blk++, crd = crd->crd_next) { + for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) { + crd = &crwu->cr_crp->crp_desc[i]; crd->crd_skip = i << DEV_BSHIFT; crd->crd_len = DEV_BSIZE; crd->crd_inject = 0; crd->crd_flags = flags; - crd->crd_alg = CRYPTO_AES_XTS; - - switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { - case SR_CRYPTOA_AES_XTS_128: - crd->crd_klen = 256; - break; - case SR_CRYPTOA_AES_XTS_256: - crd->crd_klen = 512; - break; - default: - goto unwind; - } + crd->crd_alg = sd->mds.mdd_crypto.scr_alg; + crd->crd_klen = sd->mds.mdd_crypto.scr_klen; crd->crd_key = sd->mds.mdd_crypto.scr_key[0]; - bcopy(&blk, crd->crd_iv, sizeof(blk)); + memcpy(crd->crd_iv, &blkno, sizeof(blkno)); } - crwu->cr_wu = wu; - crwu->cr_crp->crp_opaque = crwu; return (crwu); - -unwind: - /* steal the descriptors back from the cryptop */ - crwu->cr_crp->crp_desc = NULL; - - return (NULL); -} - -void -sr_crypto_wu_put(struct sr_crypto_wu *crwu) -{ - struct cryptop *crp = crwu->cr_crp; - struct sr_workunit *wu = crwu->cr_wu; - struct sr_discipline *sd = wu->swu_dis; - - DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n", - DEVNAME(wu->swu_dis->sd_sc), crwu); - - /* steal the descriptors back from the cryptop */ - crp->crp_desc = NULL; - - mtx_enter(&sd->mds.mdd_crypto.scr_mutex); - TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); - mtx_leave(&sd->mds.mdd_crypto.scr_mutex); } int @@ -386,9 +339,8 @@ if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) < kdfinfo->genkdf.len) goto out; - bcopy(&kdfinfo->genkdf, - sd->mds.mdd_crypto.scr_meta->scm_kdfhint, - kdfinfo->genkdf.len); + memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, + &kdfinfo->genkdf, kdfinfo->genkdf.len); } /* copy mask key to run-time meta data */ @@ -396,7 +348,7 @@ if (sizeof(sd->mds.mdd_crypto.scr_maskkey) < sizeof(kdfinfo->maskkey)) goto out; - bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey, + memcpy(sd->mds.mdd_crypto.scr_maskkey, &kdfinfo->maskkey, sizeof(kdfinfo->maskkey)); } @@ -404,7 +356,7 @@ rv = 0; out: explicit_bzero(kdfinfo, bc->bc_opaque_size); - free(kdfinfo, M_DEVBUF); + free(kdfinfo, M_DEVBUF, bc->bc_opaque_size); return (rv); } @@ -424,7 +376,7 @@ rv = 0; break; default: - DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n", + DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n", "softraid", alg); rv = -1; goto out; @@ -450,7 +402,7 @@ rv = 0; break; default: - DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n", + DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n", "softraid", alg); rv = -1; goto out; @@ -615,6 +567,17 @@ sr_error(sd->sd_sc, "incorrect key or passphrase"); rv = EPERM; goto out; + } + + /* Copy new KDF hint to metadata, if supplied. */ + if (kdfinfo2->flags & SR_CRYPTOKDF_HINT) { + if (kdfinfo2->genkdf.len > + sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint)) + goto out; + explicit_bzero(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, + sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint)); + memcpy(sd->mds.mdd_crypto.scr_meta->scm_kdfhint, + &kdfinfo2->genkdf, kdfinfo2->genkdf.len); } /* Mask the disk keys. */ @@ -630,7 +593,7 @@ sizeof(sd->mds.mdd_crypto.scr_key), check_digest); /* Copy new encrypted key and HMAC to metadata. */ - bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, + memcpy(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac, check_digest, sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac)); rv = 0; /* Success */ @@ -638,7 +601,7 @@ out: if (p) { explicit_bzero(p, ksz); - free(p, M_DEVBUF); + free(p, M_DEVBUF, ksz); } explicit_bzero(check_digest, sizeof(check_digest)); @@ -686,7 +649,7 @@ DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot " "open %s\n", DEVNAME(sc), devname); vput(vn); - goto fail; + goto done; } open = 1; /* close dev on error */ @@ -696,19 +659,12 @@ FREAD, NOCRED, curproc)) { DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl " "failed\n", DEVNAME(sc)); - VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); - vput(vn); - goto fail; + goto done; } - if (label.d_secsize != DEV_BSIZE) { - sr_error(sc, "%s has unsupported sector size (%d)", - devname, label.d_secsize); - goto fail; - } if (label.d_partitions[part].p_fstype != FS_RAID) { - sr_error(sc, "%s partition not of type RAID (%d)\n", + sr_error(sc, "%s partition not of type RAID (%d)", devname, label.d_partitions[part].p_fstype); - goto fail; + goto done; } /* @@ -728,7 +684,7 @@ km->scmi.scm_size = 0; km->scmi.scm_coerced_size = 0; strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname)); - bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid, + memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid, sizeof(struct sr_uuid)); sr_checksum(sc, km, &km->scm_checksum, @@ -745,7 +701,7 @@ sm->ssdi.ssd_version = SR_META_VERSION; sm->ssd_ondisk = 0; sm->ssdi.ssd_vol_flags = 0; - bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid, + memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid, sizeof(struct sr_uuid)); sm->ssdi.ssd_chunk_no = 1; sm->ssdi.ssd_volid = SR_KEYDISK_VOLID; @@ -785,7 +741,7 @@ omi->omi_som->som_type = SR_OPT_KEYDISK; omi->omi_som->som_length = sizeof(struct sr_meta_keydisk); skm = (struct sr_meta_keydisk *)omi->omi_som; - bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey, + memcpy(&skm->skm_maskkey, sd->mds.mdd_crypto.scr_maskkey, sizeof(skm->skm_maskkey)); SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link); fakesd->sd_meta->ssdi.ssd_opt_no++; @@ -799,19 +755,16 @@ goto done; fail: - if (key_disk) - free(key_disk, M_DEVBUF); + free(key_disk, M_DEVBUF, sizeof(struct sr_chunk)); key_disk = NULL; done: - if (omi) - free(omi, M_DEVBUF); + free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item)); if (fakesd && fakesd->sd_vol.sv_chunks) - free(fakesd->sd_vol.sv_chunks, M_DEVBUF); - if (fakesd) - free(fakesd, M_DEVBUF); - if (sm) - free(sm, M_DEVBUF); + free(fakesd->sd_vol.sv_chunks, M_DEVBUF, + sizeof(struct sr_chunk *)); + free(fakesd, M_DEVBUF, sizeof(struct sr_discipline)); + free(sm, M_DEVBUF, sizeof(struct sr_metadata)); if (open) { VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); vput(vn); @@ -855,7 +808,7 @@ sr_error(sc, "cannot open key disk %s", devname); goto done; } - if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) { + if (VOP_OPEN(vn, FREAD, NOCRED, curproc)) { DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot " "open %s\n", DEVNAME(sc), devname); vput(vn); @@ -869,17 +822,10 @@ NOCRED, curproc)) { DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl " "failed\n", DEVNAME(sc)); - VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc); - vput(vn); goto done; } - if (label.d_secsize != DEV_BSIZE) { - sr_error(sc, "%s has unsupported sector size (%d)", - devname, label.d_secsize); - goto done; - } if (label.d_partitions[part].p_fstype != FS_RAID) { - sr_error(sc, "%s partition not of type RAID (%d)\n", + sr_error(sc, "%s partition not of type RAID (%d)", devname, label.d_partitions[part].p_fstype); goto done; } @@ -887,7 +833,7 @@ /* * Read and validate key disk metadata. */ - sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO); + sm = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF, M_WAITOK | M_ZERO); if (sr_meta_native_read(sd, dev, sm, NULL)) { sr_error(sc, "native bootprobe could not read native metadata"); goto done; @@ -911,7 +857,7 @@ key_disk->src_vn = vn; key_disk->src_size = 0; - bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta, + memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1), sizeof(key_disk->src_meta)); /* Read mask key from optional metadata. */ @@ -920,13 +866,12 @@ omh = omi->omi_som; if (omh->som_type == SR_OPT_KEYDISK) { skm = (struct sr_meta_keydisk *)omh; - bcopy(&skm->skm_maskkey, - sd->mds.mdd_crypto.scr_maskkey, + memcpy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey, sizeof(sd->mds.mdd_crypto.scr_maskkey)); } else if (omh->som_type == SR_OPT_CRYPTO) { /* Original keydisk format with key in crypto area. */ - bcopy(omh + sizeof(struct sr_meta_opt_hdr), - sd->mds.mdd_crypto.scr_maskkey, + memcpy(sd->mds.mdd_crypto.scr_maskkey, + omh + sizeof(struct sr_meta_opt_hdr), sizeof(sd->mds.mdd_crypto.scr_maskkey)); } } @@ -934,15 +879,13 @@ open = 0; done: - for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) { + for (omi = SLIST_FIRST(&som); omi != NULL; omi = omi_next) { omi_next = SLIST_NEXT(omi, omi_link); - if (omi->omi_som) - free(omi->omi_som, M_DEVBUF); - free(omi, M_DEVBUF); + free(omi->omi_som, M_DEVBUF, 0); + free(omi, M_DEVBUF, sizeof(struct sr_meta_opt_item)); } - if (sm) - free(sm, M_DEVBUF); + free(sm, M_DEVBUF, SR_META_SIZE * DEV_BSIZE); if (vn && open) { VOP_CLOSE(vn, FREAD, NOCRED, curproc); @@ -950,18 +893,45 @@ } return key_disk; +} + +static void +sr_crypto_free_sessions(struct sr_discipline *sd) +{ + u_int i; + + for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) { + if (sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1) { + crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]); + sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; + } + } } int sr_crypto_alloc_resources(struct sr_discipline *sd) { - struct cryptoini cri; + struct sr_workunit *wu; struct sr_crypto_wu *crwu; + struct cryptoini cri; u_int num_keys, i; DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n", DEVNAME(sd->sd_sc)); + sd->mds.mdd_crypto.scr_alg = CRYPTO_AES_XTS; + switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { + case SR_CRYPTOA_AES_XTS_128: + sd->mds.mdd_crypto.scr_klen = 256; + break; + case SR_CRYPTOA_AES_XTS_256: + sd->mds.mdd_crypto.scr_klen = 512; + break; + default: + sr_error(sd->sd_sc, "unknown crypto algorithm"); + return (EINVAL); + } + for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; @@ -979,61 +949,34 @@ } /* - * For each wu allocate the uio, iovec and crypto structures. - * these have to be allocated now because during runtime we can't - * fail an allocation without failing the io (which can cause real + * For each work unit allocate the uio, iovec and crypto structures. + * These have to be allocated now because during runtime we cannot + * fail an allocation without failing the I/O (which can cause real * problems). */ - mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO); - TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus); - for (i = 0; i < sd->sd_max_wu; i++) { - crwu = malloc(sizeof(*crwu), M_DEVBUF, - M_WAITOK | M_ZERO | M_CANFAIL); - if (crwu == NULL) - return (ENOMEM); - /* put it on the list now so if we fail it'll be freed */ - mtx_enter(&sd->mds.mdd_crypto.scr_mutex); - TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); - mtx_leave(&sd->mds.mdd_crypto.scr_mutex); - + TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) { + crwu = (struct sr_crypto_wu *)wu; crwu->cr_uio.uio_iov = &crwu->cr_iov; crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK); crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT); if (crwu->cr_crp == NULL) return (ENOMEM); - /* steal the list of cryptodescs */ - crwu->cr_descs = crwu->cr_crp->crp_desc; - crwu->cr_crp->crp_desc = NULL; } - bzero(&cri, sizeof(cri)); - cri.cri_alg = CRYPTO_AES_XTS; - switch (sd->mds.mdd_crypto.scr_meta->scm_alg) { - case SR_CRYPTOA_AES_XTS_128: - cri.cri_klen = 256; - break; - case SR_CRYPTOA_AES_XTS_256: - cri.cri_klen = 512; - break; - default: - return (EINVAL); - } + memset(&cri, 0, sizeof(cri)); + cri.cri_alg = sd->mds.mdd_crypto.scr_alg; + cri.cri_klen = sd->mds.mdd_crypto.scr_klen; - /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */ - num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT; - if (num_keys >= SR_CRYPTO_MAXKEYS) + /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */ + num_keys = ((sd->sd_meta->ssdi.ssd_size - 1) >> + SR_CRYPTO_KEY_BLKSHIFT) + 1; + if (num_keys > SR_CRYPTO_MAXKEYS) return (EFBIG); - for (i = 0; i <= num_keys; i++) { + for (i = 0; i < num_keys; i++) { cri.cri_key = sd->mds.mdd_crypto.scr_key[i]; if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i], &cri, 0) != 0) { - for (i = 0; - sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; - i++) { - crypto_freesession( - sd->mds.mdd_crypto.scr_sid[i]); - sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; - } + sr_crypto_free_sessions(sd); return (EINVAL); } } @@ -1046,39 +989,30 @@ void sr_crypto_free_resources(struct sr_discipline *sd) { + struct sr_workunit *wu; struct sr_crypto_wu *crwu; - u_int i; DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n", DEVNAME(sd->sd_sc)); if (sd->mds.mdd_crypto.key_disk != NULL) { - explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof - sd->mds.mdd_crypto.key_disk); - free(sd->mds.mdd_crypto.key_disk, M_DEVBUF); + explicit_bzero(sd->mds.mdd_crypto.key_disk, + sizeof(*sd->mds.mdd_crypto.key_disk)); + free(sd->mds.mdd_crypto.key_disk, M_DEVBUF, + sizeof(*sd->mds.mdd_crypto.key_disk)); } sr_hotplug_unregister(sd, sr_crypto_hotplug); - for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) { - crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]); - sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1; - } + sr_crypto_free_sessions(sd); - mtx_enter(&sd->mds.mdd_crypto.scr_mutex); - while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) { - TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link); - - if (crwu->cr_dmabuf != NULL) + TAILQ_FOREACH(wu, &sd->sd_wu, swu_next) { + crwu = (struct sr_crypto_wu *)wu; + if (crwu->cr_dmabuf) dma_free(crwu->cr_dmabuf, MAXPHYS); - if (crwu->cr_crp) { - /* twiddle cryptoreq back */ - crwu->cr_crp->crp_desc = crwu->cr_descs; + if (crwu->cr_crp) crypto_freereq(crwu->cr_crp); - } - free(crwu, M_DEVBUF); } - mtx_leave(&sd->mds.mdd_crypto.scr_mutex); sr_wu_free(sd); sr_ccb_free(sd); @@ -1165,65 +1099,60 @@ sr_crypto_rw(struct sr_workunit *wu) { struct sr_crypto_wu *crwu; - int s, rv = 0; + daddr_t blkno; + int rv = 0; - DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n", + DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n", DEVNAME(wu->swu_dis->sd_sc), wu); - if (wu->swu_xs->flags & SCSI_DATA_OUT) { - crwu = sr_crypto_wu_get(wu, 1); - if (crwu == NULL) - return (1); + if (sr_validate_io(wu, &blkno, "sr_crypto_rw")) + return (1); + + if (wu->swu_xs->flags & SCSI_DATA_OUT) { + crwu = sr_crypto_prepare(wu, 1); crwu->cr_crp->crp_callback = sr_crypto_write; - s = splvm(); - if (crypto_invoke(crwu->cr_crp)) - rv = 1; - else + rv = crypto_dispatch(crwu->cr_crp); + if (rv == 0) rv = crwu->cr_crp->crp_etype; - splx(s); } else - rv = sr_crypto_rw2(wu, NULL); + rv = sr_crypto_dev_rw(wu, NULL); return (rv); } -int +void sr_crypto_write(struct cryptop *crp) { struct sr_crypto_wu *crwu = crp->crp_opaque; - struct sr_workunit *wu = crwu->cr_wu; + struct sr_workunit *wu = &crwu->cr_wu; int s; - DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n", + DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %p xs: %p\n", DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); if (crp->crp_etype) { /* fail io */ wu->swu_xs->error = XS_DRIVER_STUFFUP; s = splbio(); - sr_crypto_finish_io(wu); + sr_scsi_done(wu->swu_dis, wu->swu_xs); splx(s); } - return (sr_crypto_rw2(wu, crwu)); + sr_crypto_dev_rw(wu, crwu); } int -sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu) +sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu) { struct sr_discipline *sd = wu->swu_dis; struct scsi_xfer *xs = wu->swu_xs; struct sr_ccb *ccb; struct uio *uio; - int s; - daddr64_t blk; + daddr_t blkno; - if (sr_validate_io(wu, &blk, "sr_crypto_rw2")) - goto bad; + blkno = wu->swu_blk_start; - blk += sd->sd_meta->ssd_data_offset; - - ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0); + ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0); if (!ccb) { /* should never happen but handle more gracefully */ printf("%s: %s: too many ccbs queued\n", @@ -1236,17 +1165,10 @@ ccb->ccb_opaque = crwu; } sr_wu_enqueue_ccb(wu, ccb); + sr_schedule_wu(wu); - s = splbio(); - - if (sr_check_io_collision(wu)) - goto queued; - - sr_raid_startwu(wu); - -queued: - splx(s); return (0); + bad: /* wu is unwound by sr_wu_put */ if (crwu) @@ -1259,77 +1181,39 @@ { struct scsi_xfer *xs = wu->swu_xs; struct sr_crypto_wu *crwu; - struct sr_ccb *ccb; int s; /* If this was a successful read, initiate decryption of the data. */ if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) { - /* only fails on implementation error */ - crwu = sr_crypto_wu_get(wu, 0); - if (crwu == NULL) - panic("sr_crypto_intr: no wu"); + crwu = sr_crypto_prepare(wu, 0); crwu->cr_crp->crp_callback = sr_crypto_read; - ccb = TAILQ_FIRST(&wu->swu_ccb); - if (ccb == NULL) - panic("sr_crypto_done: no ccbs on workunit"); - ccb->ccb_opaque = crwu; - DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n", + DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_dispatch %p\n", DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp); - s = splvm(); - crypto_invoke(crwu->cr_crp); - splx(s); + crypto_dispatch(crwu->cr_crp); return; } s = splbio(); - sr_crypto_finish_io(wu); + sr_scsi_done(wu->swu_dis, wu->swu_xs); splx(s); } void -sr_crypto_finish_io(struct sr_workunit *wu) -{ - struct sr_discipline *sd = wu->swu_dis; - struct scsi_xfer *xs = wu->swu_xs; - struct sr_ccb *ccb; -#ifdef SR_DEBUG - struct sr_softc *sc = sd->sd_sc; -#endif /* SR_DEBUG */ - - splassert(IPL_BIO); - - DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n", - DEVNAME(sc), wu, xs); - - if (wu->swu_cb_active == 1) - panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc)); - TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) { - if (ccb->ccb_opaque == NULL) - continue; - sr_crypto_wu_put(ccb->ccb_opaque); - } - - sr_scsi_done(sd, xs); -} - -int sr_crypto_read(struct cryptop *crp) { struct sr_crypto_wu *crwu = crp->crp_opaque; - struct sr_workunit *wu = crwu->cr_wu; + struct sr_workunit *wu = &crwu->cr_wu; int s; - DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n", + DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %p xs: %p\n", DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs); if (crp->crp_etype) wu->swu_xs->error = XS_DRIVER_STUFFUP; s = splbio(); - sr_crypto_finish_io(wu); + sr_scsi_done(wu->swu_dis, wu->swu_xs); splx(s); - - return (0); } void