@@ -122,6 +122,102 @@ static ssize_t verify_compress_store(struct device_driver *driver,
122
122
}
123
123
static DRIVER_ATTR_RW (verify_compress );
124
124
125
+ /*
126
+ * The iaa crypto driver supports three 'sync' methods determining how
127
+ * compressions and decompressions are performed:
128
+ *
129
+ * - sync: the compression or decompression completes before
130
+ * returning. This is the mode used by the async crypto
131
+ * interface when the sync mode is set to 'sync' and by
132
+ * the sync crypto interface regardless of setting.
133
+ *
134
+ * - async: the compression or decompression is submitted and returns
135
+ * immediately. Completion interrupts are not used so
136
+ * the caller is responsible for polling the descriptor
137
+ * for completion. This mode is applicable to only the
138
+ * async crypto interface and is ignored for anything
139
+ * else.
140
+ *
141
+ * - async_irq: the compression or decompression is submitted and
142
+ * returns immediately. Completion interrupts are
143
+ * enabled so the caller can wait for the completion and
144
+ * yield to other threads. When the compression or
145
+ * decompression completes, the completion is signaled
146
+ * and the caller awakened. This mode is applicable to
147
+ * only the async crypto interface and is ignored for
148
+ * anything else.
149
+ *
150
+ * These modes can be set using the iaa_crypto sync_mode driver
151
+ * attribute.
152
+ */
153
+
154
+ /* Use async mode */
155
+ static bool async_mode ;
156
+ /* Use interrupts */
157
+ static bool use_irq ;
158
+
159
+ /**
160
+ * set_iaa_sync_mode - Set IAA sync mode
161
+ * @name: The name of the sync mode
162
+ *
163
+ * Make the IAA sync mode named @name the current sync mode used by
164
+ * compression/decompression.
165
+ */
166
+
167
+ static int set_iaa_sync_mode (const char * name )
168
+ {
169
+ int ret = 0 ;
170
+
171
+ if (sysfs_streq (name , "sync" )) {
172
+ async_mode = false;
173
+ use_irq = false;
174
+ } else if (sysfs_streq (name , "async" )) {
175
+ async_mode = true;
176
+ use_irq = false;
177
+ } else if (sysfs_streq (name , "async_irq" )) {
178
+ async_mode = true;
179
+ use_irq = true;
180
+ } else {
181
+ ret = - EINVAL ;
182
+ }
183
+
184
+ return ret ;
185
+ }
186
+
187
+ static ssize_t sync_mode_show (struct device_driver * driver , char * buf )
188
+ {
189
+ int ret = 0 ;
190
+
191
+ if (!async_mode && !use_irq )
192
+ ret = sprintf (buf , "%s\n" , "sync" );
193
+ else if (async_mode && !use_irq )
194
+ ret = sprintf (buf , "%s\n" , "async" );
195
+ else if (async_mode && use_irq )
196
+ ret = sprintf (buf , "%s\n" , "async_irq" );
197
+
198
+ return ret ;
199
+ }
200
+
201
+ static ssize_t sync_mode_store (struct device_driver * driver ,
202
+ const char * buf , size_t count )
203
+ {
204
+ int ret = - EBUSY ;
205
+
206
+ mutex_lock (& iaa_devices_lock );
207
+
208
+ if (iaa_crypto_enabled )
209
+ goto out ;
210
+
211
+ ret = set_iaa_sync_mode (buf );
212
+ if (ret == 0 )
213
+ ret = count ;
214
+ out :
215
+ mutex_unlock (& iaa_devices_lock );
216
+
217
+ return ret ;
218
+ }
219
+ static DRIVER_ATTR_RW (sync_mode );
220
+
125
221
static struct iaa_compression_mode * iaa_compression_modes [IAA_COMP_MODES_MAX ];
126
222
127
223
static int find_empty_iaa_compression_mode (void )
@@ -1001,6 +1097,111 @@ static int deflate_generic_decompress(struct acomp_req *req)
1001
1097
return ret ;
1002
1098
}
1003
1099
1100
+ static int iaa_remap_for_verify (struct device * dev , struct iaa_wq * iaa_wq ,
1101
+ struct acomp_req * req ,
1102
+ dma_addr_t * src_addr , dma_addr_t * dst_addr );
1103
+
1104
+ static int iaa_compress_verify (struct crypto_tfm * tfm , struct acomp_req * req ,
1105
+ struct idxd_wq * wq ,
1106
+ dma_addr_t src_addr , unsigned int slen ,
1107
+ dma_addr_t dst_addr , unsigned int * dlen ,
1108
+ u32 compression_crc );
1109
+
1110
+ static void iaa_desc_complete (struct idxd_desc * idxd_desc ,
1111
+ enum idxd_complete_type comp_type ,
1112
+ bool free_desc , void * __ctx ,
1113
+ u32 * status )
1114
+ {
1115
+ struct iaa_device_compression_mode * active_compression_mode ;
1116
+ struct iaa_compression_ctx * compression_ctx ;
1117
+ struct crypto_ctx * ctx = __ctx ;
1118
+ struct iaa_device * iaa_device ;
1119
+ struct idxd_device * idxd ;
1120
+ struct iaa_wq * iaa_wq ;
1121
+ struct pci_dev * pdev ;
1122
+ struct device * dev ;
1123
+ int ret , err = 0 ;
1124
+
1125
+ compression_ctx = crypto_tfm_ctx (ctx -> tfm );
1126
+
1127
+ iaa_wq = idxd_wq_get_private (idxd_desc -> wq );
1128
+ iaa_device = iaa_wq -> iaa_device ;
1129
+ idxd = iaa_device -> idxd ;
1130
+ pdev = idxd -> pdev ;
1131
+ dev = & pdev -> dev ;
1132
+
1133
+ active_compression_mode = get_iaa_device_compression_mode (iaa_device ,
1134
+ compression_ctx -> mode );
1135
+ dev_dbg (dev , "%s: compression mode %s,"
1136
+ " ctx->src_addr %llx, ctx->dst_addr %llx\n" , __func__ ,
1137
+ active_compression_mode -> name ,
1138
+ ctx -> src_addr , ctx -> dst_addr );
1139
+
1140
+ ret = check_completion (dev , idxd_desc -> iax_completion ,
1141
+ ctx -> compress , false);
1142
+ if (ret ) {
1143
+ dev_dbg (dev , "%s: check_completion failed ret=%d\n" , __func__ , ret );
1144
+ if (!ctx -> compress &&
1145
+ idxd_desc -> iax_completion -> status == IAA_ANALYTICS_ERROR ) {
1146
+ pr_warn ("%s: falling back to deflate-generic decompress, "
1147
+ "analytics error code %x\n" , __func__ ,
1148
+ idxd_desc -> iax_completion -> error_code );
1149
+ ret = deflate_generic_decompress (ctx -> req );
1150
+ if (ret ) {
1151
+ dev_dbg (dev , "%s: deflate-generic failed ret=%d\n" ,
1152
+ __func__ , ret );
1153
+ err = - EIO ;
1154
+ goto err ;
1155
+ }
1156
+ } else {
1157
+ err = - EIO ;
1158
+ goto err ;
1159
+ }
1160
+ } else {
1161
+ ctx -> req -> dlen = idxd_desc -> iax_completion -> output_size ;
1162
+ }
1163
+
1164
+ if (ctx -> compress && compression_ctx -> verify_compress ) {
1165
+ dma_addr_t src_addr , dst_addr ;
1166
+ u32 compression_crc ;
1167
+
1168
+ compression_crc = idxd_desc -> iax_completion -> crc ;
1169
+
1170
+ ret = iaa_remap_for_verify (dev , iaa_wq , ctx -> req , & src_addr , & dst_addr );
1171
+ if (ret ) {
1172
+ dev_dbg (dev , "%s: compress verify remap failed ret=%d\n" , __func__ , ret );
1173
+ err = - EIO ;
1174
+ goto out ;
1175
+ }
1176
+
1177
+ ret = iaa_compress_verify (ctx -> tfm , ctx -> req , iaa_wq -> wq , src_addr ,
1178
+ ctx -> req -> slen , dst_addr , & ctx -> req -> dlen ,
1179
+ compression_crc );
1180
+ if (ret ) {
1181
+ dev_dbg (dev , "%s: compress verify failed ret=%d\n" , __func__ , ret );
1182
+ err = - EIO ;
1183
+ }
1184
+
1185
+ dma_unmap_sg (dev , ctx -> req -> dst , sg_nents (ctx -> req -> dst ), DMA_TO_DEVICE );
1186
+ dma_unmap_sg (dev , ctx -> req -> src , sg_nents (ctx -> req -> src ), DMA_FROM_DEVICE );
1187
+
1188
+ goto out ;
1189
+ }
1190
+ err :
1191
+ dma_unmap_sg (dev , ctx -> req -> dst , sg_nents (ctx -> req -> dst ), DMA_FROM_DEVICE );
1192
+ dma_unmap_sg (dev , ctx -> req -> src , sg_nents (ctx -> req -> src ), DMA_TO_DEVICE );
1193
+ out :
1194
+ if (ret != 0 )
1195
+ dev_dbg (dev , "asynchronous compress failed ret=%d\n" , ret );
1196
+
1197
+ if (ctx -> req -> base .complete )
1198
+ acomp_request_complete (ctx -> req , err );
1199
+
1200
+ if (free_desc )
1201
+ idxd_free_desc (idxd_desc -> wq , idxd_desc );
1202
+ iaa_wq_put (idxd_desc -> wq );
1203
+ }
1204
+
1004
1205
static int iaa_compress (struct crypto_tfm * tfm , struct acomp_req * req ,
1005
1206
struct idxd_wq * wq ,
1006
1207
dma_addr_t src_addr , unsigned int slen ,
@@ -1049,6 +1250,22 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
1049
1250
desc -> src2_size = sizeof (struct aecs_comp_table_record );
1050
1251
desc -> completion_addr = idxd_desc -> compl_dma ;
1051
1252
1253
+ if (ctx -> use_irq && !disable_async ) {
1254
+ desc -> flags |= IDXD_OP_FLAG_RCI ;
1255
+
1256
+ idxd_desc -> crypto .req = req ;
1257
+ idxd_desc -> crypto .tfm = tfm ;
1258
+ idxd_desc -> crypto .src_addr = src_addr ;
1259
+ idxd_desc -> crypto .dst_addr = dst_addr ;
1260
+ idxd_desc -> crypto .compress = true;
1261
+
1262
+ dev_dbg (dev , "%s use_async_irq: compression mode %s,"
1263
+ " src_addr %llx, dst_addr %llx\n" , __func__ ,
1264
+ active_compression_mode -> name ,
1265
+ src_addr , dst_addr );
1266
+ } else if (ctx -> async_mode && !disable_async )
1267
+ req -> base .data = idxd_desc ;
1268
+
1052
1269
dev_dbg (dev , "%s: compression mode %s,"
1053
1270
" desc->src1_addr %llx, desc->src1_size %d,"
1054
1271
" desc->dst_addr %llx, desc->max_dst_size %d,"
@@ -1063,6 +1280,12 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
1063
1280
goto err ;
1064
1281
}
1065
1282
1283
+ if (ctx -> async_mode && !disable_async ) {
1284
+ ret = - EINPROGRESS ;
1285
+ dev_dbg (dev , "%s: returning -EINPROGRESS\n" , __func__ );
1286
+ goto out ;
1287
+ }
1288
+
1066
1289
ret = check_completion (dev , idxd_desc -> iax_completion , true, false);
1067
1290
if (ret ) {
1068
1291
dev_dbg (dev , "check_completion failed ret=%d\n" , ret );
@@ -1073,7 +1296,8 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
1073
1296
1074
1297
* compression_crc = idxd_desc -> iax_completion -> crc ;
1075
1298
1076
- idxd_free_desc (wq , idxd_desc );
1299
+ if (!ctx -> async_mode )
1300
+ idxd_free_desc (wq , idxd_desc );
1077
1301
out :
1078
1302
return ret ;
1079
1303
err :
@@ -1256,6 +1480,22 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
1256
1480
desc -> src1_size = slen ;
1257
1481
desc -> completion_addr = idxd_desc -> compl_dma ;
1258
1482
1483
+ if (ctx -> use_irq && !disable_async ) {
1484
+ desc -> flags |= IDXD_OP_FLAG_RCI ;
1485
+
1486
+ idxd_desc -> crypto .req = req ;
1487
+ idxd_desc -> crypto .tfm = tfm ;
1488
+ idxd_desc -> crypto .src_addr = src_addr ;
1489
+ idxd_desc -> crypto .dst_addr = dst_addr ;
1490
+ idxd_desc -> crypto .compress = false;
1491
+
1492
+ dev_dbg (dev , "%s: use_async_irq compression mode %s,"
1493
+ " src_addr %llx, dst_addr %llx\n" , __func__ ,
1494
+ active_compression_mode -> name ,
1495
+ src_addr , dst_addr );
1496
+ } else if (ctx -> async_mode && !disable_async )
1497
+ req -> base .data = idxd_desc ;
1498
+
1259
1499
dev_dbg (dev , "%s: decompression mode %s,"
1260
1500
" desc->src1_addr %llx, desc->src1_size %d,"
1261
1501
" desc->dst_addr %llx, desc->max_dst_size %d,"
@@ -1270,6 +1510,12 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
1270
1510
goto err ;
1271
1511
}
1272
1512
1513
+ if (ctx -> async_mode && !disable_async ) {
1514
+ ret = - EINPROGRESS ;
1515
+ dev_dbg (dev , "%s: returning -EINPROGRESS\n" , __func__ );
1516
+ goto out ;
1517
+ }
1518
+
1273
1519
ret = check_completion (dev , idxd_desc -> iax_completion , false, false);
1274
1520
if (ret ) {
1275
1521
dev_dbg (dev , "%s: check_completion failed ret=%d\n" , __func__ , ret );
@@ -1292,7 +1538,8 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
1292
1538
1293
1539
* dlen = req -> dlen ;
1294
1540
1295
- idxd_free_desc (wq , idxd_desc );
1541
+ if (!ctx -> async_mode )
1542
+ idxd_free_desc (wq , idxd_desc );
1296
1543
out :
1297
1544
return ret ;
1298
1545
err :
@@ -1601,6 +1848,8 @@ static int iaa_comp_adecompress(struct acomp_req *req)
1601
1848
static void compression_ctx_init (struct iaa_compression_ctx * ctx )
1602
1849
{
1603
1850
ctx -> verify_compress = iaa_verify_compress ;
1851
+ ctx -> async_mode = async_mode ;
1852
+ ctx -> use_irq = use_irq ;
1604
1853
}
1605
1854
1606
1855
static int iaa_comp_init_fixed (struct crypto_acomp * acomp_tfm )
@@ -1809,6 +2058,7 @@ static struct idxd_device_driver iaa_crypto_driver = {
1809
2058
.remove = iaa_crypto_remove ,
1810
2059
.name = IDXD_SUBDRIVER_NAME ,
1811
2060
.type = dev_types ,
2061
+ .desc_complete = iaa_desc_complete ,
1812
2062
};
1813
2063
1814
2064
static int __init iaa_crypto_init_module (void )
@@ -1847,10 +2097,20 @@ static int __init iaa_crypto_init_module(void)
1847
2097
goto err_verify_attr_create ;
1848
2098
}
1849
2099
2100
+ ret = driver_create_file (& iaa_crypto_driver .drv ,
2101
+ & driver_attr_sync_mode );
2102
+ if (ret ) {
2103
+ pr_debug ("IAA sync mode attr creation failed\n" );
2104
+ goto err_sync_attr_create ;
2105
+ }
2106
+
1850
2107
pr_debug ("initialized\n" );
1851
2108
out :
1852
2109
return ret ;
1853
2110
2111
+ err_sync_attr_create :
2112
+ driver_remove_file (& iaa_crypto_driver .drv ,
2113
+ & driver_attr_verify_compress );
1854
2114
err_verify_attr_create :
1855
2115
idxd_driver_unregister (& iaa_crypto_driver );
1856
2116
err_driver_reg :
@@ -1866,6 +2126,8 @@ static void __exit iaa_crypto_cleanup_module(void)
1866
2126
if (iaa_unregister_compression_device ())
1867
2127
pr_debug ("IAA compression device unregister failed\n" );
1868
2128
2129
+ driver_remove_file (& iaa_crypto_driver .drv ,
2130
+ & driver_attr_sync_mode );
1869
2131
driver_remove_file (& iaa_crypto_driver .drv ,
1870
2132
& driver_attr_verify_compress );
1871
2133
idxd_driver_unregister (& iaa_crypto_driver );
0 commit comments