@@ -130,8 +130,7 @@ struct rdma_id_private {
130
130
131
131
struct completion comp ;
132
132
atomic_t refcount ;
133
- wait_queue_head_t wait_remove ;
134
- atomic_t dev_remove ;
133
+ struct mutex handler_mutex ;
135
134
136
135
int backlog ;
137
136
int timeout_ms ;
@@ -355,26 +354,15 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
355
354
complete (& id_priv -> comp );
356
355
}
357
356
358
- static int cma_disable_remove (struct rdma_id_private * id_priv ,
357
+ static int cma_disable_callback (struct rdma_id_private * id_priv ,
359
358
enum cma_state state )
360
359
{
361
- unsigned long flags ;
362
- int ret ;
363
-
364
- spin_lock_irqsave (& id_priv -> lock , flags );
365
- if (id_priv -> state == state ) {
366
- atomic_inc (& id_priv -> dev_remove );
367
- ret = 0 ;
368
- } else
369
- ret = - EINVAL ;
370
- spin_unlock_irqrestore (& id_priv -> lock , flags );
371
- return ret ;
372
- }
373
-
374
- static void cma_enable_remove (struct rdma_id_private * id_priv )
375
- {
376
- if (atomic_dec_and_test (& id_priv -> dev_remove ))
377
- wake_up (& id_priv -> wait_remove );
360
+ mutex_lock (& id_priv -> handler_mutex );
361
+ if (id_priv -> state != state ) {
362
+ mutex_unlock (& id_priv -> handler_mutex );
363
+ return - EINVAL ;
364
+ }
365
+ return 0 ;
378
366
}
379
367
380
368
static int cma_has_cm_dev (struct rdma_id_private * id_priv )
@@ -399,8 +387,7 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
399
387
mutex_init (& id_priv -> qp_mutex );
400
388
init_completion (& id_priv -> comp );
401
389
atomic_set (& id_priv -> refcount , 1 );
402
- init_waitqueue_head (& id_priv -> wait_remove );
403
- atomic_set (& id_priv -> dev_remove , 0 );
390
+ mutex_init (& id_priv -> handler_mutex );
404
391
INIT_LIST_HEAD (& id_priv -> listen_list );
405
392
INIT_LIST_HEAD (& id_priv -> mc_list );
406
393
get_random_bytes (& id_priv -> seq_num , sizeof id_priv -> seq_num );
@@ -927,7 +914,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
927
914
struct rdma_cm_event event ;
928
915
int ret = 0 ;
929
916
930
- if (cma_disable_remove (id_priv , CMA_CONNECT ))
917
+ if (cma_disable_callback (id_priv , CMA_CONNECT ))
931
918
return 0 ;
932
919
933
920
memset (& event , 0 , sizeof event );
@@ -984,12 +971,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
984
971
/* Destroy the CM ID by returning a non-zero value. */
985
972
id_priv -> cm_id .ib = NULL ;
986
973
cma_exch (id_priv , CMA_DESTROYING );
987
- cma_enable_remove ( id_priv );
974
+ mutex_unlock ( & id_priv -> handler_mutex );
988
975
rdma_destroy_id (& id_priv -> id );
989
976
return ret ;
990
977
}
991
978
out :
992
- cma_enable_remove ( id_priv );
979
+ mutex_unlock ( & id_priv -> handler_mutex );
993
980
return ret ;
994
981
}
995
982
@@ -1101,7 +1088,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1101
1088
int offset , ret ;
1102
1089
1103
1090
listen_id = cm_id -> context ;
1104
- if (cma_disable_remove (listen_id , CMA_LISTEN ))
1091
+ if (cma_disable_callback (listen_id , CMA_LISTEN ))
1105
1092
return - ECONNABORTED ;
1106
1093
1107
1094
memset (& event , 0 , sizeof event );
@@ -1122,7 +1109,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1122
1109
goto out ;
1123
1110
}
1124
1111
1125
- atomic_inc (& conn_id -> dev_remove );
1112
+ mutex_lock_nested (& conn_id -> handler_mutex , SINGLE_DEPTH_NESTING );
1126
1113
mutex_lock (& lock );
1127
1114
ret = cma_acquire_dev (conn_id );
1128
1115
mutex_unlock (& lock );
@@ -1144,7 +1131,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1144
1131
!cma_is_ud_ps (conn_id -> id .ps ))
1145
1132
ib_send_cm_mra (cm_id , CMA_CM_MRA_SETTING , NULL , 0 );
1146
1133
mutex_unlock (& lock );
1147
- cma_enable_remove ( conn_id );
1134
+ mutex_unlock ( & conn_id -> handler_mutex );
1148
1135
goto out ;
1149
1136
}
1150
1137
@@ -1153,11 +1140,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1153
1140
1154
1141
release_conn_id :
1155
1142
cma_exch (conn_id , CMA_DESTROYING );
1156
- cma_enable_remove ( conn_id );
1143
+ mutex_unlock ( & conn_id -> handler_mutex );
1157
1144
rdma_destroy_id (& conn_id -> id );
1158
1145
1159
1146
out :
1160
- cma_enable_remove ( listen_id );
1147
+ mutex_unlock ( & listen_id -> handler_mutex );
1161
1148
return ret ;
1162
1149
}
1163
1150
@@ -1223,7 +1210,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1223
1210
struct sockaddr_in * sin ;
1224
1211
int ret = 0 ;
1225
1212
1226
- if (cma_disable_remove (id_priv , CMA_CONNECT ))
1213
+ if (cma_disable_callback (id_priv , CMA_CONNECT ))
1227
1214
return 0 ;
1228
1215
1229
1216
memset (& event , 0 , sizeof event );
@@ -1267,12 +1254,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1267
1254
/* Destroy the CM ID by returning a non-zero value. */
1268
1255
id_priv -> cm_id .iw = NULL ;
1269
1256
cma_exch (id_priv , CMA_DESTROYING );
1270
- cma_enable_remove ( id_priv );
1257
+ mutex_unlock ( & id_priv -> handler_mutex );
1271
1258
rdma_destroy_id (& id_priv -> id );
1272
1259
return ret ;
1273
1260
}
1274
1261
1275
- cma_enable_remove ( id_priv );
1262
+ mutex_unlock ( & id_priv -> handler_mutex );
1276
1263
return ret ;
1277
1264
}
1278
1265
@@ -1288,7 +1275,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1288
1275
struct ib_device_attr attr ;
1289
1276
1290
1277
listen_id = cm_id -> context ;
1291
- if (cma_disable_remove (listen_id , CMA_LISTEN ))
1278
+ if (cma_disable_callback (listen_id , CMA_LISTEN ))
1292
1279
return - ECONNABORTED ;
1293
1280
1294
1281
/* Create a new RDMA id for the new IW CM ID */
@@ -1300,19 +1287,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1300
1287
goto out ;
1301
1288
}
1302
1289
conn_id = container_of (new_cm_id , struct rdma_id_private , id );
1303
- atomic_inc (& conn_id -> dev_remove );
1290
+ mutex_lock_nested (& conn_id -> handler_mutex , SINGLE_DEPTH_NESTING );
1304
1291
conn_id -> state = CMA_CONNECT ;
1305
1292
1306
1293
dev = ip_dev_find (& init_net , iw_event -> local_addr .sin_addr .s_addr );
1307
1294
if (!dev ) {
1308
1295
ret = - EADDRNOTAVAIL ;
1309
- cma_enable_remove ( conn_id );
1296
+ mutex_unlock ( & conn_id -> handler_mutex );
1310
1297
rdma_destroy_id (new_cm_id );
1311
1298
goto out ;
1312
1299
}
1313
1300
ret = rdma_copy_addr (& conn_id -> id .route .addr .dev_addr , dev , NULL );
1314
1301
if (ret ) {
1315
- cma_enable_remove ( conn_id );
1302
+ mutex_unlock ( & conn_id -> handler_mutex );
1316
1303
rdma_destroy_id (new_cm_id );
1317
1304
goto out ;
1318
1305
}
@@ -1321,7 +1308,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1321
1308
ret = cma_acquire_dev (conn_id );
1322
1309
mutex_unlock (& lock );
1323
1310
if (ret ) {
1324
- cma_enable_remove ( conn_id );
1311
+ mutex_unlock ( & conn_id -> handler_mutex );
1325
1312
rdma_destroy_id (new_cm_id );
1326
1313
goto out ;
1327
1314
}
@@ -1337,7 +1324,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1337
1324
1338
1325
ret = ib_query_device (conn_id -> id .device , & attr );
1339
1326
if (ret ) {
1340
- cma_enable_remove ( conn_id );
1327
+ mutex_unlock ( & conn_id -> handler_mutex );
1341
1328
rdma_destroy_id (new_cm_id );
1342
1329
goto out ;
1343
1330
}
@@ -1353,14 +1340,17 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1353
1340
/* User wants to destroy the CM ID */
1354
1341
conn_id -> cm_id .iw = NULL ;
1355
1342
cma_exch (conn_id , CMA_DESTROYING );
1356
- cma_enable_remove ( conn_id );
1343
+ mutex_unlock ( & conn_id -> handler_mutex );
1357
1344
rdma_destroy_id (& conn_id -> id );
1345
+ goto out ;
1358
1346
}
1359
1347
1348
+ mutex_unlock (& conn_id -> handler_mutex );
1349
+
1360
1350
out :
1361
1351
if (dev )
1362
1352
dev_put (dev );
1363
- cma_enable_remove ( listen_id );
1353
+ mutex_unlock ( & listen_id -> handler_mutex );
1364
1354
return ret ;
1365
1355
}
1366
1356
@@ -1592,7 +1582,7 @@ static void cma_work_handler(struct work_struct *_work)
1592
1582
struct rdma_id_private * id_priv = work -> id ;
1593
1583
int destroy = 0 ;
1594
1584
1595
- atomic_inc (& id_priv -> dev_remove );
1585
+ mutex_lock (& id_priv -> handler_mutex );
1596
1586
if (!cma_comp_exch (id_priv , work -> old_state , work -> new_state ))
1597
1587
goto out ;
1598
1588
@@ -1601,7 +1591,7 @@ static void cma_work_handler(struct work_struct *_work)
1601
1591
destroy = 1 ;
1602
1592
}
1603
1593
out :
1604
- cma_enable_remove ( id_priv );
1594
+ mutex_unlock ( & id_priv -> handler_mutex );
1605
1595
cma_deref_id (id_priv );
1606
1596
if (destroy )
1607
1597
rdma_destroy_id (& id_priv -> id );
@@ -1764,7 +1754,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1764
1754
struct rdma_cm_event event ;
1765
1755
1766
1756
memset (& event , 0 , sizeof event );
1767
- atomic_inc (& id_priv -> dev_remove );
1757
+ mutex_lock (& id_priv -> handler_mutex );
1768
1758
1769
1759
/*
1770
1760
* Grab mutex to block rdma_destroy_id() from removing the device while
@@ -1793,13 +1783,13 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1793
1783
1794
1784
if (id_priv -> id .event_handler (& id_priv -> id , & event )) {
1795
1785
cma_exch (id_priv , CMA_DESTROYING );
1796
- cma_enable_remove ( id_priv );
1786
+ mutex_unlock ( & id_priv -> handler_mutex );
1797
1787
cma_deref_id (id_priv );
1798
1788
rdma_destroy_id (& id_priv -> id );
1799
1789
return ;
1800
1790
}
1801
1791
out :
1802
- cma_enable_remove ( id_priv );
1792
+ mutex_unlock ( & id_priv -> handler_mutex );
1803
1793
cma_deref_id (id_priv );
1804
1794
}
1805
1795
@@ -2126,7 +2116,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2126
2116
struct ib_cm_sidr_rep_event_param * rep = & ib_event -> param .sidr_rep_rcvd ;
2127
2117
int ret = 0 ;
2128
2118
2129
- if (cma_disable_remove (id_priv , CMA_CONNECT ))
2119
+ if (cma_disable_callback (id_priv , CMA_CONNECT ))
2130
2120
return 0 ;
2131
2121
2132
2122
memset (& event , 0 , sizeof event );
@@ -2167,12 +2157,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2167
2157
/* Destroy the CM ID by returning a non-zero value. */
2168
2158
id_priv -> cm_id .ib = NULL ;
2169
2159
cma_exch (id_priv , CMA_DESTROYING );
2170
- cma_enable_remove ( id_priv );
2160
+ mutex_unlock ( & id_priv -> handler_mutex );
2171
2161
rdma_destroy_id (& id_priv -> id );
2172
2162
return ret ;
2173
2163
}
2174
2164
out :
2175
- cma_enable_remove ( id_priv );
2165
+ mutex_unlock ( & id_priv -> handler_mutex );
2176
2166
return ret ;
2177
2167
}
2178
2168
@@ -2570,8 +2560,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2570
2560
int ret ;
2571
2561
2572
2562
id_priv = mc -> id_priv ;
2573
- if (cma_disable_remove (id_priv , CMA_ADDR_BOUND ) &&
2574
- cma_disable_remove (id_priv , CMA_ADDR_RESOLVED ))
2563
+ if (cma_disable_callback (id_priv , CMA_ADDR_BOUND ) &&
2564
+ cma_disable_callback (id_priv , CMA_ADDR_RESOLVED ))
2575
2565
return 0 ;
2576
2566
2577
2567
mutex_lock (& id_priv -> qp_mutex );
@@ -2596,12 +2586,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2596
2586
ret = id_priv -> id .event_handler (& id_priv -> id , & event );
2597
2587
if (ret ) {
2598
2588
cma_exch (id_priv , CMA_DESTROYING );
2599
- cma_enable_remove ( id_priv );
2589
+ mutex_unlock ( & id_priv -> handler_mutex );
2600
2590
rdma_destroy_id (& id_priv -> id );
2601
2591
return 0 ;
2602
2592
}
2603
2593
2604
- cma_enable_remove ( id_priv );
2594
+ mutex_unlock ( & id_priv -> handler_mutex );
2605
2595
return 0 ;
2606
2596
}
2607
2597
@@ -2760,22 +2750,26 @@ static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2760
2750
{
2761
2751
struct rdma_cm_event event ;
2762
2752
enum cma_state state ;
2753
+ int ret = 0 ;
2763
2754
2764
2755
/* Record that we want to remove the device */
2765
2756
state = cma_exch (id_priv , CMA_DEVICE_REMOVAL );
2766
2757
if (state == CMA_DESTROYING )
2767
2758
return 0 ;
2768
2759
2769
2760
cma_cancel_operation (id_priv , state );
2770
- wait_event ( id_priv -> wait_remove , ! atomic_read ( & id_priv -> dev_remove ) );
2761
+ mutex_lock ( & id_priv -> handler_mutex );
2771
2762
2772
2763
/* Check for destruction from another callback. */
2773
2764
if (!cma_comp (id_priv , CMA_DEVICE_REMOVAL ))
2774
- return 0 ;
2765
+ goto out ;
2775
2766
2776
2767
memset (& event , 0 , sizeof event );
2777
2768
event .event = RDMA_CM_EVENT_DEVICE_REMOVAL ;
2778
- return id_priv -> id .event_handler (& id_priv -> id , & event );
2769
+ ret = id_priv -> id .event_handler (& id_priv -> id , & event );
2770
+ out :
2771
+ mutex_unlock (& id_priv -> handler_mutex );
2772
+ return ret ;
2779
2773
}
2780
2774
2781
2775
static void cma_process_remove (struct cma_device * cma_dev )
0 commit comments