12
12
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
13
13
14
14
/**
15
- * mpi3mr_alloc_trace_buffer: Allocate trace buffer
15
+ * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
16
16
* @mrioc: Adapter instance reference
17
17
* @trace_size: Trace buffer size
18
18
*
19
- * Allocate trace buffer
19
+ * Allocate either segmented memory pools or contiguous buffer
20
+ * based on the controller capability for the host trace
21
+ * buffer.
22
+ *
20
23
* Return: 0 on success, non-zero on failure.
21
24
*/
22
25
static int mpi3mr_alloc_trace_buffer (struct mpi3mr_ioc * mrioc , u32 trace_size )
23
26
{
24
27
struct diag_buffer_desc * diag_buffer = & mrioc -> diag_buffers [0 ];
28
+ int i , sz ;
29
+ u64 * diag_buffer_list = NULL ;
30
+ dma_addr_t diag_buffer_list_dma ;
31
+ u32 seg_count ;
32
+
33
+ if (mrioc -> seg_tb_support ) {
34
+ seg_count = (trace_size ) / MPI3MR_PAGE_SIZE_4K ;
35
+ trace_size = seg_count * MPI3MR_PAGE_SIZE_4K ;
36
+
37
+ diag_buffer_list = dma_alloc_coherent (& mrioc -> pdev -> dev ,
38
+ sizeof (u64 ) * seg_count ,
39
+ & diag_buffer_list_dma , GFP_KERNEL );
40
+ if (!diag_buffer_list )
41
+ return -1 ;
42
+
43
+ mrioc -> num_tb_segs = seg_count ;
44
+
45
+ sz = sizeof (struct segments ) * seg_count ;
46
+ mrioc -> trace_buf = kzalloc (sz , GFP_KERNEL );
47
+ if (!mrioc -> trace_buf )
48
+ goto trace_buf_failed ;
49
+
50
+ mrioc -> trace_buf_pool = dma_pool_create ("trace_buf pool" ,
51
+ & mrioc -> pdev -> dev , MPI3MR_PAGE_SIZE_4K , MPI3MR_PAGE_SIZE_4K ,
52
+ 0 );
53
+ if (!mrioc -> trace_buf_pool ) {
54
+ ioc_err (mrioc , "trace buf pool: dma_pool_create failed\n" );
55
+ goto trace_buf_pool_failed ;
56
+ }
25
57
26
- diag_buffer -> addr = dma_alloc_coherent (& mrioc -> pdev -> dev ,
27
- trace_size , & diag_buffer -> dma_addr , GFP_KERNEL );
28
- if (diag_buffer -> addr ) {
29
- dprint_init (mrioc , "trace diag buffer is allocated successfully\n" );
58
+ for (i = 0 ; i < seg_count ; i ++ ) {
59
+ mrioc -> trace_buf [i ].segment =
60
+ dma_pool_zalloc (mrioc -> trace_buf_pool , GFP_KERNEL ,
61
+ & mrioc -> trace_buf [i ].segment_dma );
62
+ diag_buffer_list [i ] =
63
+ (u64 ) mrioc -> trace_buf [i ].segment_dma ;
64
+ if (!diag_buffer_list [i ])
65
+ goto tb_seg_alloc_failed ;
66
+ }
67
+
68
+ diag_buffer -> addr = diag_buffer_list ;
69
+ diag_buffer -> dma_addr = diag_buffer_list_dma ;
70
+ diag_buffer -> is_segmented = true;
71
+
72
+ dprint_init (mrioc , "segmented trace diag buffer\n"
73
+ "is allocated successfully seg_count:%d\n" , seg_count );
30
74
return 0 ;
75
+ } else {
76
+ diag_buffer -> addr = dma_alloc_coherent (& mrioc -> pdev -> dev ,
77
+ trace_size , & diag_buffer -> dma_addr , GFP_KERNEL );
78
+ if (diag_buffer -> addr ) {
79
+ dprint_init (mrioc , "trace diag buffer is allocated successfully\n" );
80
+ return 0 ;
81
+ }
82
+ return -1 ;
31
83
}
84
+
85
+ tb_seg_alloc_failed :
86
+ if (mrioc -> trace_buf_pool ) {
87
+ for (i = 0 ; i < mrioc -> num_tb_segs ; i ++ ) {
88
+ if (mrioc -> trace_buf [i ].segment ) {
89
+ dma_pool_free (mrioc -> trace_buf_pool ,
90
+ mrioc -> trace_buf [i ].segment ,
91
+ mrioc -> trace_buf [i ].segment_dma );
92
+ mrioc -> trace_buf [i ].segment = NULL ;
93
+ }
94
+ mrioc -> trace_buf [i ].segment = NULL ;
95
+ }
96
+ dma_pool_destroy (mrioc -> trace_buf_pool );
97
+ mrioc -> trace_buf_pool = NULL ;
98
+ }
99
+ trace_buf_pool_failed :
100
+ kfree (mrioc -> trace_buf );
101
+ mrioc -> trace_buf = NULL ;
102
+ trace_buf_failed :
103
+ if (diag_buffer_list )
104
+ dma_free_coherent (& mrioc -> pdev -> dev ,
105
+ sizeof (u64 ) * mrioc -> num_tb_segs ,
106
+ diag_buffer_list , diag_buffer_list_dma );
32
107
return -1 ;
33
108
}
34
109
@@ -100,8 +175,9 @@ void mpi3mr_alloc_diag_bufs(struct mpi3mr_ioc *mrioc)
100
175
dprint_init (mrioc ,
101
176
"trying to allocate trace diag buffer of size = %dKB\n" ,
102
177
trace_size / 1024 );
103
- if (get_order (trace_size ) > MAX_PAGE_ORDER ||
178
+ if ((! mrioc -> seg_tb_support && ( get_order (trace_size ) > MAX_PAGE_ORDER )) ||
104
179
mpi3mr_alloc_trace_buffer (mrioc , trace_size )) {
180
+
105
181
retry = true;
106
182
trace_size -= trace_dec_size ;
107
183
dprint_init (mrioc , "trace diag buffer allocation failed\n"
@@ -161,6 +237,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
161
237
u8 prev_status ;
162
238
int retval = 0 ;
163
239
240
+ if (diag_buffer -> disabled_after_reset ) {
241
+ dprint_bsg_err (mrioc , "%s: skiping diag buffer posting\n"
242
+ "as it is disabled after reset\n" , __func__ );
243
+ return -1 ;
244
+ }
245
+
164
246
memset (& diag_buf_post_req , 0 , sizeof (diag_buf_post_req ));
165
247
mutex_lock (& mrioc -> init_cmds .mutex );
166
248
if (mrioc -> init_cmds .state & MPI3MR_CMD_PENDING ) {
@@ -177,8 +259,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
177
259
diag_buf_post_req .address = le64_to_cpu (diag_buffer -> dma_addr );
178
260
diag_buf_post_req .length = le32_to_cpu (diag_buffer -> size );
179
261
180
- dprint_bsg_info (mrioc , "%s: posting diag buffer type %d\n" , __func__ ,
181
- diag_buffer -> type );
262
+ if (diag_buffer -> is_segmented )
263
+ diag_buf_post_req .msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED ;
264
+
265
+ dprint_bsg_info (mrioc , "%s: posting diag buffer type %d segmented:%d\n" , __func__ ,
266
+ diag_buffer -> type , diag_buffer -> is_segmented );
267
+
182
268
prev_status = diag_buffer -> status ;
183
269
diag_buffer -> status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED ;
184
270
init_completion (& mrioc -> init_cmds .done );
0 commit comments