@@ -511,18 +511,9 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
511
511
}
512
512
513
513
pci_set_master (pcidev );
514
- ret = pci_set_dma_mask ( pcidev , DMA_BIT_MASK (64 ));
514
+ ret = dma_set_mask_and_coherent ( & pcidev -> dev , DMA_BIT_MASK (64 ));
515
515
if (ret ) {
516
- ret = pci_set_dma_mask (pcidev , DMA_BIT_MASK (32 ));
517
- if (ret ) {
518
- dev_err (& pcidev -> dev , "Could not set PCI DMA Mask\n" );
519
- goto pci_region_release ;
520
- } else {
521
- ret = pci_set_consistent_dma_mask (pcidev ,
522
- DMA_BIT_MASK (32 ));
523
- }
524
- } else {
525
- ret = pci_set_consistent_dma_mask (pcidev , DMA_BIT_MASK (64 ));
516
+ ret = dma_set_mask_and_coherent (& pcidev -> dev , DMA_BIT_MASK (32 ));
526
517
if (ret ) {
527
518
dev_err (& pcidev -> dev , "Could not set PCI DMA Mask\n" );
528
519
goto pci_region_release ;
@@ -550,9 +541,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
550
541
if (status )
551
542
return status ;
552
543
mbox_mem_alloc -> size = sizeof (struct be_mcc_mailbox ) + 16 ;
553
- mbox_mem_alloc -> va = pci_alloc_consistent (pdev ,
554
- mbox_mem_alloc -> size ,
555
- & mbox_mem_alloc -> dma );
544
+ mbox_mem_alloc -> va = dma_alloc_coherent (& pdev -> dev ,
545
+ mbox_mem_alloc -> size , & mbox_mem_alloc -> dma , GFP_KERNEL );
556
546
if (!mbox_mem_alloc -> va ) {
557
547
beiscsi_unmap_pci_function (phba );
558
548
return - ENOMEM ;
@@ -2302,11 +2292,11 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2302
2292
2303
2293
/* Map addr only if there is data_count */
2304
2294
if (dsp_value ) {
2305
- io_task -> mtask_addr = pci_map_single ( phba -> pcidev ,
2295
+ io_task -> mtask_addr = dma_map_single ( & phba -> pcidev -> dev ,
2306
2296
task -> data ,
2307
2297
task -> data_count ,
2308
- PCI_DMA_TODEVICE );
2309
- if (pci_dma_mapping_error ( phba -> pcidev ,
2298
+ DMA_TO_DEVICE );
2299
+ if (dma_mapping_error ( & phba -> pcidev -> dev ,
2310
2300
io_task -> mtask_addr ))
2311
2301
return - ENOMEM ;
2312
2302
io_task -> mtask_data_count = task -> data_count ;
@@ -2517,10 +2507,9 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2517
2507
BEISCSI_MAX_FRAGS_INIT );
2518
2508
curr_alloc_size = min (be_max_phys_size * 1024 , alloc_size );
2519
2509
do {
2520
- mem_arr -> virtual_address = pci_alloc_consistent (
2521
- phba -> pcidev ,
2522
- curr_alloc_size ,
2523
- & bus_add );
2510
+ mem_arr -> virtual_address =
2511
+ dma_alloc_coherent (& phba -> pcidev -> dev ,
2512
+ curr_alloc_size , & bus_add , GFP_KERNEL );
2524
2513
if (!mem_arr -> virtual_address ) {
2525
2514
if (curr_alloc_size <= BE_MIN_MEM_SIZE )
2526
2515
goto free_mem ;
@@ -2558,7 +2547,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2558
2547
mem_descr -> num_elements = j ;
2559
2548
while ((i ) || (j )) {
2560
2549
for (j = mem_descr -> num_elements ; j > 0 ; j -- ) {
2561
- pci_free_consistent ( phba -> pcidev ,
2550
+ dma_free_coherent ( & phba -> pcidev -> dev ,
2562
2551
mem_descr -> mem_array [j - 1 ].size ,
2563
2552
mem_descr -> mem_array [j - 1 ].
2564
2553
virtual_address ,
@@ -3029,9 +3018,9 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3029
3018
eq = & phwi_context -> be_eq [i ].q ;
3030
3019
mem = & eq -> dma_mem ;
3031
3020
phwi_context -> be_eq [i ].phba = phba ;
3032
- eq_vaddress = pci_alloc_consistent ( phba -> pcidev ,
3021
+ eq_vaddress = dma_alloc_coherent ( & phba -> pcidev -> dev ,
3033
3022
num_eq_pages * PAGE_SIZE ,
3034
- & paddr );
3023
+ & paddr , GFP_KERNEL );
3035
3024
if (!eq_vaddress ) {
3036
3025
ret = - ENOMEM ;
3037
3026
goto create_eq_error ;
@@ -3067,7 +3056,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3067
3056
eq = & phwi_context -> be_eq [i ].q ;
3068
3057
mem = & eq -> dma_mem ;
3069
3058
if (mem -> va )
3070
- pci_free_consistent ( phba -> pcidev , num_eq_pages
3059
+ dma_free_coherent ( & phba -> pcidev -> dev , num_eq_pages
3071
3060
* PAGE_SIZE ,
3072
3061
mem -> va , mem -> dma );
3073
3062
}
@@ -3095,9 +3084,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3095
3084
pbe_eq -> cq = cq ;
3096
3085
pbe_eq -> phba = phba ;
3097
3086
mem = & cq -> dma_mem ;
3098
- cq_vaddress = pci_alloc_consistent ( phba -> pcidev ,
3087
+ cq_vaddress = dma_alloc_coherent ( & phba -> pcidev -> dev ,
3099
3088
num_cq_pages * PAGE_SIZE ,
3100
- & paddr );
3089
+ & paddr , GFP_KERNEL );
3101
3090
if (!cq_vaddress ) {
3102
3091
ret = - ENOMEM ;
3103
3092
goto create_cq_error ;
@@ -3132,7 +3121,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3132
3121
cq = & phwi_context -> be_cq [i ];
3133
3122
mem = & cq -> dma_mem ;
3134
3123
if (mem -> va )
3135
- pci_free_consistent ( phba -> pcidev , num_cq_pages
3124
+ dma_free_coherent ( & phba -> pcidev -> dev , num_cq_pages
3136
3125
* PAGE_SIZE ,
3137
3126
mem -> va , mem -> dma );
3138
3127
}
@@ -3324,7 +3313,7 @@ static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3324
3313
{
3325
3314
struct be_dma_mem * mem = & q -> dma_mem ;
3326
3315
if (mem -> va ) {
3327
- pci_free_consistent ( phba -> pcidev , mem -> size ,
3316
+ dma_free_coherent ( & phba -> pcidev -> dev , mem -> size ,
3328
3317
mem -> va , mem -> dma );
3329
3318
mem -> va = NULL ;
3330
3319
}
@@ -3339,7 +3328,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3339
3328
q -> len = len ;
3340
3329
q -> entry_size = entry_size ;
3341
3330
mem -> size = len * entry_size ;
3342
- mem -> va = pci_zalloc_consistent (phba -> pcidev , mem -> size , & mem -> dma );
3331
+ mem -> va = dma_zalloc_coherent (& phba -> pcidev -> dev , mem -> size , & mem -> dma ,
3332
+ GFP_KERNEL );
3343
3333
if (!mem -> va )
3344
3334
return - ENOMEM ;
3345
3335
return 0 ;
@@ -3477,7 +3467,7 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3477
3467
& ctrl -> ptag_state [tag ].tag_state )) {
3478
3468
ptag_mem = & ctrl -> ptag_state [tag ].tag_mem_state ;
3479
3469
if (ptag_mem -> size ) {
3480
- pci_free_consistent ( ctrl -> pdev ,
3470
+ dma_free_coherent ( & ctrl -> pdev -> dev ,
3481
3471
ptag_mem -> size ,
3482
3472
ptag_mem -> va ,
3483
3473
ptag_mem -> dma );
@@ -3878,7 +3868,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
3878
3868
j = 0 ;
3879
3869
for (i = 0 ; i < SE_MEM_MAX ; i ++ ) {
3880
3870
for (j = mem_descr -> num_elements ; j > 0 ; j -- ) {
3881
- pci_free_consistent ( phba -> pcidev ,
3871
+ dma_free_coherent ( & phba -> pcidev -> dev ,
3882
3872
mem_descr -> mem_array [j - 1 ].size ,
3883
3873
mem_descr -> mem_array [j - 1 ].virtual_address ,
3884
3874
(unsigned long )mem_descr -> mem_array [j - 1 ].
@@ -4253,10 +4243,10 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4253
4243
}
4254
4244
4255
4245
if (io_task -> mtask_addr ) {
4256
- pci_unmap_single ( phba -> pcidev ,
4246
+ dma_unmap_single ( & phba -> pcidev -> dev ,
4257
4247
io_task -> mtask_addr ,
4258
4248
io_task -> mtask_data_count ,
4259
- PCI_DMA_TODEVICE );
4249
+ DMA_TO_DEVICE );
4260
4250
io_task -> mtask_addr = 0 ;
4261
4251
}
4262
4252
}
@@ -4850,9 +4840,9 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4850
4840
4851
4841
switch (bsg_req -> msgcode ) {
4852
4842
case ISCSI_BSG_HST_VENDOR :
4853
- nonemb_cmd .va = pci_alloc_consistent ( phba -> ctrl .pdev ,
4843
+ nonemb_cmd .va = dma_alloc_coherent ( & phba -> ctrl .pdev -> dev ,
4854
4844
job -> request_payload .payload_len ,
4855
- & nonemb_cmd .dma );
4845
+ & nonemb_cmd .dma , GFP_KERNEL );
4856
4846
if (nonemb_cmd .va == NULL ) {
4857
4847
beiscsi_log (phba , KERN_ERR , BEISCSI_LOG_CONFIG ,
4858
4848
"BM_%d : Failed to allocate memory for "
@@ -4865,7 +4855,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4865
4855
beiscsi_log (phba , KERN_ERR , BEISCSI_LOG_CONFIG ,
4866
4856
"BM_%d : MBX Tag Allocation Failed\n" );
4867
4857
4868
- pci_free_consistent ( phba -> ctrl .pdev , nonemb_cmd .size ,
4858
+ dma_free_coherent ( & phba -> ctrl .pdev -> dev , nonemb_cmd .size ,
4869
4859
nonemb_cmd .va , nonemb_cmd .dma );
4870
4860
return - EAGAIN ;
4871
4861
}
@@ -4879,7 +4869,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4879
4869
if (!test_bit (BEISCSI_HBA_ONLINE , & phba -> state )) {
4880
4870
clear_bit (MCC_TAG_STATE_RUNNING ,
4881
4871
& phba -> ctrl .ptag_state [tag ].tag_state );
4882
- pci_free_consistent ( phba -> ctrl .pdev , nonemb_cmd .size ,
4872
+ dma_free_coherent ( & phba -> ctrl .pdev -> dev , nonemb_cmd .size ,
4883
4873
nonemb_cmd .va , nonemb_cmd .dma );
4884
4874
return - EIO ;
4885
4875
}
@@ -4896,7 +4886,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4896
4886
bsg_reply -> result = status ;
4897
4887
bsg_job_done (job , bsg_reply -> result ,
4898
4888
bsg_reply -> reply_payload_rcv_len );
4899
- pci_free_consistent ( phba -> ctrl .pdev , nonemb_cmd .size ,
4889
+ dma_free_coherent ( & phba -> ctrl .pdev -> dev , nonemb_cmd .size ,
4900
4890
nonemb_cmd .va , nonemb_cmd .dma );
4901
4891
if (status || extd_status ) {
4902
4892
beiscsi_log (phba , KERN_ERR , BEISCSI_LOG_CONFIG ,
@@ -5753,7 +5743,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5753
5743
beiscsi_cleanup_port (phba );
5754
5744
beiscsi_free_mem (phba );
5755
5745
free_port :
5756
- pci_free_consistent ( phba -> pcidev ,
5746
+ dma_free_coherent ( & phba -> pcidev -> dev ,
5757
5747
phba -> ctrl .mbox_mem_alloced .size ,
5758
5748
phba -> ctrl .mbox_mem_alloced .va ,
5759
5749
phba -> ctrl .mbox_mem_alloced .dma );
@@ -5797,7 +5787,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
5797
5787
5798
5788
/* ctrl uninit */
5799
5789
beiscsi_unmap_pci_function (phba );
5800
- pci_free_consistent ( phba -> pcidev ,
5790
+ dma_free_coherent ( & phba -> pcidev -> dev ,
5801
5791
phba -> ctrl .mbox_mem_alloced .size ,
5802
5792
phba -> ctrl .mbox_mem_alloced .va ,
5803
5793
phba -> ctrl .mbox_mem_alloced .dma );
0 commit comments