Skip to content

Commit 09646c9

Browse files
tzanussiherbertx
authored andcommitted
crypto: iaa - Add irq support for the crypto async interface
The existing iaa crypto async support provides an implementation that satisfies the interface but does so in a synchronous manner - it fills and submits the IDXD descriptor and then waits for it to complete before returning. This isn't a problem at the moment, since all existing callers (e.g. zswap) wrap any asynchronous callees in a synchronous wrapper anyway. This change makes the iaa crypto async implementation truly asynchronous: it fills and submits the IDXD descriptor, then returns immediately with -EINPROGRESS. It also sets the descriptor's 'request completion irq' bit and sets up a callback with the IDXD driver which is called when the operation completes and the irq fires. The existing callers such as zswap use synchronous wrappers to deal with -EINPROGRESS and so work as expected without any changes. This mode can be enabled by writing 'async_irq' to the sync_mode iaa_crypto driver attribute: echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode Async mode without interrupts (caller must poll) can be enabled by writing 'async' to it: echo async > /sys/bus/dsa/drivers/crypto/sync_mode The default sync mode can be enabled by writing 'sync' to it: echo sync > /sys/bus/dsa/drivers/crypto/sync_mode The sync_mode value setting at the time the IAA algorithms are registered is captured in each algorithm's crypto_ctx and used for all compresses and decompresses when using a given algorithm. Signed-off-by: Tom Zanussi <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent 2ec6761 commit 09646c9

File tree

2 files changed

+266
-2
lines changed

2 files changed

+266
-2
lines changed

drivers/crypto/intel/iaa/iaa_crypto.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,8 @@ enum iaa_mode {
153153
struct iaa_compression_ctx {
154154
enum iaa_mode mode;
155155
bool verify_compress;
156+
bool async_mode;
157+
bool use_irq;
156158
};
157159

158160
#endif

drivers/crypto/intel/iaa/iaa_crypto_main.c

Lines changed: 264 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,102 @@ static ssize_t verify_compress_store(struct device_driver *driver,
122122
}
123123
static DRIVER_ATTR_RW(verify_compress);
124124

125+
/*
126+
* The iaa crypto driver supports three 'sync' methods determining how
127+
* compressions and decompressions are performed:
128+
*
129+
* - sync: the compression or decompression completes before
130+
* returning. This is the mode used by the async crypto
131+
* interface when the sync mode is set to 'sync' and by
132+
* the sync crypto interface regardless of setting.
133+
*
134+
* - async: the compression or decompression is submitted and returns
135+
* immediately. Completion interrupts are not used so
136+
* the caller is responsible for polling the descriptor
137+
* for completion. This mode is applicable to only the
138+
* async crypto interface and is ignored for anything
139+
* else.
140+
*
141+
* - async_irq: the compression or decompression is submitted and
142+
* returns immediately. Completion interrupts are
143+
* enabled so the caller can wait for the completion and
144+
* yield to other threads. When the compression or
145+
* decompression completes, the completion is signaled
146+
* and the caller awakened. This mode is applicable to
147+
* only the async crypto interface and is ignored for
148+
* anything else.
149+
*
150+
* These modes can be set using the iaa_crypto sync_mode driver
151+
* attribute.
152+
*/
153+
154+
/* Use async mode */
155+
static bool async_mode;
156+
/* Use interrupts */
157+
static bool use_irq;
158+
159+
/**
160+
* set_iaa_sync_mode - Set IAA sync mode
161+
* @name: The name of the sync mode
162+
*
163+
* Make the IAA sync mode named @name the current sync mode used by
164+
* compression/decompression.
165+
*/
166+
167+
static int set_iaa_sync_mode(const char *name)
168+
{
169+
int ret = 0;
170+
171+
if (sysfs_streq(name, "sync")) {
172+
async_mode = false;
173+
use_irq = false;
174+
} else if (sysfs_streq(name, "async")) {
175+
async_mode = true;
176+
use_irq = false;
177+
} else if (sysfs_streq(name, "async_irq")) {
178+
async_mode = true;
179+
use_irq = true;
180+
} else {
181+
ret = -EINVAL;
182+
}
183+
184+
return ret;
185+
}
186+
187+
static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
188+
{
189+
int ret = 0;
190+
191+
if (!async_mode && !use_irq)
192+
ret = sprintf(buf, "%s\n", "sync");
193+
else if (async_mode && !use_irq)
194+
ret = sprintf(buf, "%s\n", "async");
195+
else if (async_mode && use_irq)
196+
ret = sprintf(buf, "%s\n", "async_irq");
197+
198+
return ret;
199+
}
200+
201+
static ssize_t sync_mode_store(struct device_driver *driver,
202+
const char *buf, size_t count)
203+
{
204+
int ret = -EBUSY;
205+
206+
mutex_lock(&iaa_devices_lock);
207+
208+
if (iaa_crypto_enabled)
209+
goto out;
210+
211+
ret = set_iaa_sync_mode(buf);
212+
if (ret == 0)
213+
ret = count;
214+
out:
215+
mutex_unlock(&iaa_devices_lock);
216+
217+
return ret;
218+
}
219+
static DRIVER_ATTR_RW(sync_mode);
220+
125221
static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];
126222

127223
static int find_empty_iaa_compression_mode(void)
@@ -1001,6 +1097,111 @@ static int deflate_generic_decompress(struct acomp_req *req)
10011097
return ret;
10021098
}
10031099

1100+
static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
1101+
struct acomp_req *req,
1102+
dma_addr_t *src_addr, dma_addr_t *dst_addr);
1103+
1104+
static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
1105+
struct idxd_wq *wq,
1106+
dma_addr_t src_addr, unsigned int slen,
1107+
dma_addr_t dst_addr, unsigned int *dlen,
1108+
u32 compression_crc);
1109+
1110+
static void iaa_desc_complete(struct idxd_desc *idxd_desc,
1111+
enum idxd_complete_type comp_type,
1112+
bool free_desc, void *__ctx,
1113+
u32 *status)
1114+
{
1115+
struct iaa_device_compression_mode *active_compression_mode;
1116+
struct iaa_compression_ctx *compression_ctx;
1117+
struct crypto_ctx *ctx = __ctx;
1118+
struct iaa_device *iaa_device;
1119+
struct idxd_device *idxd;
1120+
struct iaa_wq *iaa_wq;
1121+
struct pci_dev *pdev;
1122+
struct device *dev;
1123+
int ret, err = 0;
1124+
1125+
compression_ctx = crypto_tfm_ctx(ctx->tfm);
1126+
1127+
iaa_wq = idxd_wq_get_private(idxd_desc->wq);
1128+
iaa_device = iaa_wq->iaa_device;
1129+
idxd = iaa_device->idxd;
1130+
pdev = idxd->pdev;
1131+
dev = &pdev->dev;
1132+
1133+
active_compression_mode = get_iaa_device_compression_mode(iaa_device,
1134+
compression_ctx->mode);
1135+
dev_dbg(dev, "%s: compression mode %s,"
1136+
" ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__,
1137+
active_compression_mode->name,
1138+
ctx->src_addr, ctx->dst_addr);
1139+
1140+
ret = check_completion(dev, idxd_desc->iax_completion,
1141+
ctx->compress, false);
1142+
if (ret) {
1143+
dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
1144+
if (!ctx->compress &&
1145+
idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
1146+
pr_warn("%s: falling back to deflate-generic decompress, "
1147+
"analytics error code %x\n", __func__,
1148+
idxd_desc->iax_completion->error_code);
1149+
ret = deflate_generic_decompress(ctx->req);
1150+
if (ret) {
1151+
dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
1152+
__func__, ret);
1153+
err = -EIO;
1154+
goto err;
1155+
}
1156+
} else {
1157+
err = -EIO;
1158+
goto err;
1159+
}
1160+
} else {
1161+
ctx->req->dlen = idxd_desc->iax_completion->output_size;
1162+
}
1163+
1164+
if (ctx->compress && compression_ctx->verify_compress) {
1165+
dma_addr_t src_addr, dst_addr;
1166+
u32 compression_crc;
1167+
1168+
compression_crc = idxd_desc->iax_completion->crc;
1169+
1170+
ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
1171+
if (ret) {
1172+
dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
1173+
err = -EIO;
1174+
goto out;
1175+
}
1176+
1177+
ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
1178+
ctx->req->slen, dst_addr, &ctx->req->dlen,
1179+
compression_crc);
1180+
if (ret) {
1181+
dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
1182+
err = -EIO;
1183+
}
1184+
1185+
dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE);
1186+
dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE);
1187+
1188+
goto out;
1189+
}
1190+
err:
1191+
dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE);
1192+
dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE);
1193+
out:
1194+
if (ret != 0)
1195+
dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
1196+
1197+
if (ctx->req->base.complete)
1198+
acomp_request_complete(ctx->req, err);
1199+
1200+
if (free_desc)
1201+
idxd_free_desc(idxd_desc->wq, idxd_desc);
1202+
iaa_wq_put(idxd_desc->wq);
1203+
}
1204+
10041205
static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
10051206
struct idxd_wq *wq,
10061207
dma_addr_t src_addr, unsigned int slen,
@@ -1049,6 +1250,22 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
10491250
desc->src2_size = sizeof(struct aecs_comp_table_record);
10501251
desc->completion_addr = idxd_desc->compl_dma;
10511252

1253+
if (ctx->use_irq && !disable_async) {
1254+
desc->flags |= IDXD_OP_FLAG_RCI;
1255+
1256+
idxd_desc->crypto.req = req;
1257+
idxd_desc->crypto.tfm = tfm;
1258+
idxd_desc->crypto.src_addr = src_addr;
1259+
idxd_desc->crypto.dst_addr = dst_addr;
1260+
idxd_desc->crypto.compress = true;
1261+
1262+
dev_dbg(dev, "%s use_async_irq: compression mode %s,"
1263+
" src_addr %llx, dst_addr %llx\n", __func__,
1264+
active_compression_mode->name,
1265+
src_addr, dst_addr);
1266+
} else if (ctx->async_mode && !disable_async)
1267+
req->base.data = idxd_desc;
1268+
10521269
dev_dbg(dev, "%s: compression mode %s,"
10531270
" desc->src1_addr %llx, desc->src1_size %d,"
10541271
" desc->dst_addr %llx, desc->max_dst_size %d,"
@@ -1063,6 +1280,12 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
10631280
goto err;
10641281
}
10651282

1283+
if (ctx->async_mode && !disable_async) {
1284+
ret = -EINPROGRESS;
1285+
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
1286+
goto out;
1287+
}
1288+
10661289
ret = check_completion(dev, idxd_desc->iax_completion, true, false);
10671290
if (ret) {
10681291
dev_dbg(dev, "check_completion failed ret=%d\n", ret);
@@ -1073,7 +1296,8 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
10731296

10741297
*compression_crc = idxd_desc->iax_completion->crc;
10751298

1076-
idxd_free_desc(wq, idxd_desc);
1299+
if (!ctx->async_mode)
1300+
idxd_free_desc(wq, idxd_desc);
10771301
out:
10781302
return ret;
10791303
err:
@@ -1256,6 +1480,22 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
12561480
desc->src1_size = slen;
12571481
desc->completion_addr = idxd_desc->compl_dma;
12581482

1483+
if (ctx->use_irq && !disable_async) {
1484+
desc->flags |= IDXD_OP_FLAG_RCI;
1485+
1486+
idxd_desc->crypto.req = req;
1487+
idxd_desc->crypto.tfm = tfm;
1488+
idxd_desc->crypto.src_addr = src_addr;
1489+
idxd_desc->crypto.dst_addr = dst_addr;
1490+
idxd_desc->crypto.compress = false;
1491+
1492+
dev_dbg(dev, "%s: use_async_irq compression mode %s,"
1493+
" src_addr %llx, dst_addr %llx\n", __func__,
1494+
active_compression_mode->name,
1495+
src_addr, dst_addr);
1496+
} else if (ctx->async_mode && !disable_async)
1497+
req->base.data = idxd_desc;
1498+
12591499
dev_dbg(dev, "%s: decompression mode %s,"
12601500
" desc->src1_addr %llx, desc->src1_size %d,"
12611501
" desc->dst_addr %llx, desc->max_dst_size %d,"
@@ -1270,6 +1510,12 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
12701510
goto err;
12711511
}
12721512

1513+
if (ctx->async_mode && !disable_async) {
1514+
ret = -EINPROGRESS;
1515+
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
1516+
goto out;
1517+
}
1518+
12731519
ret = check_completion(dev, idxd_desc->iax_completion, false, false);
12741520
if (ret) {
12751521
dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
@@ -1292,7 +1538,8 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
12921538

12931539
*dlen = req->dlen;
12941540

1295-
idxd_free_desc(wq, idxd_desc);
1541+
if (!ctx->async_mode)
1542+
idxd_free_desc(wq, idxd_desc);
12961543
out:
12971544
return ret;
12981545
err:
@@ -1601,6 +1848,8 @@ static int iaa_comp_adecompress(struct acomp_req *req)
16011848
static void compression_ctx_init(struct iaa_compression_ctx *ctx)
16021849
{
16031850
ctx->verify_compress = iaa_verify_compress;
1851+
ctx->async_mode = async_mode;
1852+
ctx->use_irq = use_irq;
16041853
}
16051854

16061855
static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
@@ -1809,6 +2058,7 @@ static struct idxd_device_driver iaa_crypto_driver = {
18092058
.remove = iaa_crypto_remove,
18102059
.name = IDXD_SUBDRIVER_NAME,
18112060
.type = dev_types,
2061+
.desc_complete = iaa_desc_complete,
18122062
};
18132063

18142064
static int __init iaa_crypto_init_module(void)
@@ -1847,10 +2097,20 @@ static int __init iaa_crypto_init_module(void)
18472097
goto err_verify_attr_create;
18482098
}
18492099

2100+
ret = driver_create_file(&iaa_crypto_driver.drv,
2101+
&driver_attr_sync_mode);
2102+
if (ret) {
2103+
pr_debug("IAA sync mode attr creation failed\n");
2104+
goto err_sync_attr_create;
2105+
}
2106+
18502107
pr_debug("initialized\n");
18512108
out:
18522109
return ret;
18532110

2111+
err_sync_attr_create:
2112+
driver_remove_file(&iaa_crypto_driver.drv,
2113+
&driver_attr_verify_compress);
18542114
err_verify_attr_create:
18552115
idxd_driver_unregister(&iaa_crypto_driver);
18562116
err_driver_reg:
@@ -1866,6 +2126,8 @@ static void __exit iaa_crypto_cleanup_module(void)
18662126
if (iaa_unregister_compression_device())
18672127
pr_debug("IAA compression device unregister failed\n");
18682128

2129+
driver_remove_file(&iaa_crypto_driver.drv,
2130+
&driver_attr_sync_mode);
18692131
driver_remove_file(&iaa_crypto_driver.drv,
18702132
&driver_attr_verify_compress);
18712133
idxd_driver_unregister(&iaa_crypto_driver);

0 commit comments

Comments
 (0)