Skip to content

Commit 81d87cb

Browse files
Dave JiangLinus Torvalds
authored andcommitted
drivers/edac: mod MC to use workq instead of kthread
Move the memory controller object to work queue based implementation from the kernel thread based. Signed-off-by: Dave Jiang <[email protected]> Signed-off-by: Douglas Thompson <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 535c6a5 commit 81d87cb

File tree

6 files changed

+177
-97
lines changed

6 files changed

+177
-97
lines changed

drivers/edac/edac_core.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,15 @@ struct mem_ctl_info {
382382
/* edac sysfs device control */
383383
struct kobject edac_mci_kobj;
384384
struct completion kobj_complete;
385+
386+
/* work struct for this MC */
387+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
388+
struct delayed_work work;
389+
#else
390+
struct work_struct work;
391+
#endif
392+
/* the internal state of this controller instance */
393+
int op_state;
385394
};
386395

387396
/*
@@ -573,6 +582,9 @@ struct edac_device_ctl_info {
573582
};
574583

575584
/* To get from the instance's wq to the beginning of the ctl structure */
585+
#define to_edac_mem_ctl_work(w) \
586+
container_of(w, struct mem_ctl_info, work)
587+
576588
#define to_edac_device_ctl_work(w) \
577589
container_of(w,struct edac_device_ctl_info,work)
578590

@@ -584,6 +596,8 @@ static inline void edac_device_calc_delay(
584596
edac_dev->delay = edac_dev->poll_msec * HZ / 1000;
585597
}
586598

599+
#define edac_calc_delay(dev) dev->delay = dev->poll_msec * HZ / 1000;
600+
587601
/*
588602
* The alloc() and free() functions for the 'edac_device' control info
589603
* structure. A MC driver will allocate one of these for each edac_device

drivers/edac/edac_device.c

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -332,17 +332,17 @@ EXPORT_SYMBOL(edac_device_find);
332332

333333

334334
/*
335-
* edac_workq_function
335+
* edac_device_workq_function
336336
* performs the operation scheduled by a workq request
337337
*/
338338
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
339-
static void edac_workq_function(struct work_struct *work_req)
339+
static void edac_device_workq_function(struct work_struct *work_req)
340340
{
341341
struct delayed_work *d_work = (struct delayed_work*) work_req;
342342
struct edac_device_ctl_info *edac_dev =
343343
to_edac_device_ctl_work(d_work);
344344
#else
345-
static void edac_workq_function(void *ptr)
345+
static void edac_device_workq_function(void *ptr)
346346
{
347347
struct edac_device_ctl_info *edac_dev =
348348
(struct edac_device_ctl_info *) ptr;
@@ -364,30 +364,31 @@ static void edac_workq_function(void *ptr)
364364
}
365365

366366
/*
367-
* edac_workq_setup
367+
* edac_device_workq_setup
368368
* initialize a workq item for this edac_device instance
369369
* passing in the new delay period in msec
370370
*/
371-
void edac_workq_setup(struct edac_device_ctl_info *edac_dev, unsigned msec)
371+
void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
372+
unsigned msec)
372373
{
373374
debugf0("%s()\n", __func__);
374375

375376
edac_dev->poll_msec = msec;
376-
edac_device_calc_delay(edac_dev); /* Calc delay jiffies */
377+
edac_calc_delay(edac_dev); /* Calc delay jiffies */
377378

378379
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
379-
INIT_DELAYED_WORK(&edac_dev->work,edac_workq_function);
380+
INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
380381
#else
381-
INIT_WORK(&edac_dev->work,edac_workq_function,edac_dev);
382+
INIT_WORK(&edac_dev->work, edac_device_workq_function, edac_dev);
382383
#endif
383-
queue_delayed_work(edac_workqueue,&edac_dev->work, edac_dev->delay);
384+
queue_delayed_work(edac_workqueue, &edac_dev->work, edac_dev->delay);
384385
}
385386

386387
/*
387-
* edac_workq_teardown
388+
* edac_device_workq_teardown
388389
* stop the workq processing on this edac_dev
389390
*/
390-
void edac_workq_teardown(struct edac_device_ctl_info *edac_dev)
391+
void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
391392
{
392393
int status;
393394

@@ -409,10 +410,10 @@ void edac_device_reset_delay_period(
409410
lock_device_list();
410411

411412
/* cancel the current workq request */
412-
edac_workq_teardown(edac_dev);
413+
edac_device_workq_teardown(edac_dev);
413414

414415
/* restart the workq request, with new delay value */
415-
edac_workq_setup(edac_dev, value);
416+
edac_device_workq_setup(edac_dev, value);
416417

417418
unlock_device_list();
418419
}
@@ -479,8 +480,11 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev, int edac_idx)
479480
/* This instance is NOW RUNNING */
480481
edac_dev->op_state = OP_RUNNING_POLL;
481482

482-
/* enable workq processing on this instance, default = 1000 msec */
483-
edac_workq_setup(edac_dev, 1000);
483+
/*
484+
* enable workq processing on this instance,
485+
* default = 1000 msec
486+
*/
487+
edac_device_workq_setup(edac_dev, 1000);
484488
} else {
485489
edac_dev->op_state = OP_RUNNING_INTERRUPT;
486490
}
@@ -538,7 +542,7 @@ struct edac_device_ctl_info * edac_device_del_device(struct device *dev)
538542
edac_dev->op_state = OP_OFFLINE;
539543

540544
/* clear workq processing on this instance */
541-
edac_workq_teardown(edac_dev);
545+
edac_device_workq_teardown(edac_dev);
542546

543547
/* Tear down the sysfs entries for this instance */
544548
edac_device_remove_sysfs(edac_dev);

drivers/edac/edac_mc.c

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,8 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
184184
}
185185
}
186186

187+
mci->op_state = OP_ALLOC;
188+
187189
return mci;
188190
}
189191
EXPORT_SYMBOL_GPL(edac_mc_alloc);
@@ -215,6 +217,107 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
215217
return NULL;
216218
}
217219

220+
/*
221+
* handler for EDAC to check if NMI type handler has asserted interrupt
222+
*/
223+
static int edac_mc_assert_error_check_and_clear(void)
224+
{
225+
int vreg;
226+
227+
if(edac_op_state == EDAC_OPSTATE_POLL)
228+
return 1;
229+
230+
vreg = atomic_read(&edac_err_assert);
231+
if(vreg) {
232+
atomic_set(&edac_err_assert, 0);
233+
return 1;
234+
}
235+
236+
return 0;
237+
}
238+
239+
/*
240+
* edac_mc_workq_function
241+
* performs the operation scheduled by a workq request
242+
*/
243+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
244+
static void edac_mc_workq_function(struct work_struct *work_req)
245+
{
246+
struct delayed_work *d_work = (struct delayed_work*) work_req;
247+
struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
248+
#else
249+
static void edac_mc_workq_function(void *ptr)
250+
{
251+
struct mem_ctl_info *mci = (struct mem_ctl_info *) ptr;
252+
#endif
253+
254+
mutex_lock(&mem_ctls_mutex);
255+
256+
/* Only poll controllers that are running polled and have a check */
257+
if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
258+
mci->edac_check(mci);
259+
260+
/*
261+
* FIXME: temp place holder for PCI checks,
262+
* goes away when we break out PCI
263+
*/
264+
edac_pci_do_parity_check();
265+
266+
mutex_unlock(&mem_ctls_mutex);
267+
268+
/* Reschedule */
269+
queue_delayed_work(edac_workqueue, &mci->work, edac_mc_get_poll_msec());
270+
}
271+
272+
/*
273+
* edac_mc_workq_setup
274+
* initialize a workq item for this mci
275+
* passing in the new delay period in msec
276+
*/
277+
void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
278+
{
279+
debugf0("%s()\n", __func__);
280+
281+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
282+
INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
283+
#else
284+
INIT_WORK(&mci->work, edac_mc_workq_function, mci);
285+
#endif
286+
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
287+
}
288+
289+
/*
290+
* edac_mc_workq_teardown
291+
* stop the workq processing on this mci
292+
*/
293+
void edac_mc_workq_teardown(struct mem_ctl_info *mci)
294+
{
295+
int status;
296+
297+
status = cancel_delayed_work(&mci->work);
298+
if (status == 0) {
299+
/* workq instance might be running, wait for it */
300+
flush_workqueue(edac_workqueue);
301+
}
302+
}
303+
304+
/*
305+
* edac_reset_delay_period
306+
*/
307+
308+
void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value)
309+
{
310+
mutex_lock(&mem_ctls_mutex);
311+
312+
/* cancel the current workq request */
313+
edac_mc_workq_teardown(mci);
314+
315+
/* restart the workq request, with new delay value */
316+
edac_mc_workq_setup(mci, value);
317+
318+
mutex_unlock(&mem_ctls_mutex);
319+
}
320+
218321
/* Return 0 on success, 1 on failure.
219322
* Before calling this function, caller must
220323
* assign a unique value to mci->mc_idx.
@@ -351,6 +454,16 @@ int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
351454
goto fail1;
352455
}
353456

457+
/* If there IS a check routine, then we are running POLLED */
458+
if (mci->edac_check != NULL) {
459+
/* This instance is NOW RUNNING */
460+
mci->op_state = OP_RUNNING_POLL;
461+
462+
edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
463+
} else {
464+
mci->op_state = OP_RUNNING_INTERRUPT;
465+
}
466+
354467
/* Report action taken */
355468
edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
356469
mci->mod_name, mci->ctl_name, dev_name(mci));
@@ -386,6 +499,12 @@ struct mem_ctl_info * edac_mc_del_mc(struct device *dev)
386499
return NULL;
387500
}
388501

502+
/* marking MCI offline */
503+
mci->op_state = OP_OFFLINE;
504+
505+
/* flush workq processes */
506+
edac_mc_workq_teardown(mci);
507+
389508
edac_remove_sysfs_mci_device(mci);
390509
del_mc_from_global_list(mci);
391510
mutex_unlock(&mem_ctls_mutex);

drivers/edac/edac_mc_sysfs.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,22 +22,28 @@ static int panic_on_ue;
2222
static int poll_msec = 1000;
2323

2424
/* Getter functions for above */
25-
int edac_get_log_ue()
25+
int edac_get_log_ue(void)
2626
{
2727
return log_ue;
2828
}
2929

30-
int edac_get_log_ce()
30+
int edac_get_log_ce(void)
3131
{
3232
return log_ce;
3333
}
3434

35-
int edac_get_panic_on_ue()
35+
int edac_get_panic_on_ue(void)
3636
{
3737
return panic_on_ue;
3838
}
3939

40-
int edac_get_poll_msec()
40+
/* this is temporary */
41+
int edac_mc_get_poll_msec(void)
42+
{
43+
return edac_get_poll_msec();
44+
}
45+
46+
int edac_get_poll_msec(void)
4147
{
4248
return poll_msec;
4349
}

0 commit comments

Comments
 (0)