@@ -30,6 +30,7 @@ struct tsc_adjust {
30
30
};
31
31
32
32
static DEFINE_PER_CPU (struct tsc_adjust , tsc_adjust ) ;
33
+ static struct timer_list tsc_sync_check_timer ;
33
34
34
35
/*
35
36
* TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
77
78
}
78
79
}
79
80
81
+ /*
82
+ * Normally the tsc_sync will be checked every time system enters idle
83
+ * state, but there is still caveat that a system won't enter idle,
84
+ * either because it's too busy or configured purposely to not enter
85
+ * idle.
86
+ *
87
+ * So setup a periodic timer (every 10 minutes) to make sure the check
88
+ * is always on.
89
+ */
90
+
91
+ #define SYNC_CHECK_INTERVAL (HZ * 600)
92
+
93
+ static void tsc_sync_check_timer_fn (struct timer_list * unused )
94
+ {
95
+ int next_cpu ;
96
+
97
+ tsc_verify_tsc_adjust (false);
98
+
99
+ /* Run the check for all onlined CPUs in turn */
100
+ next_cpu = cpumask_next (raw_smp_processor_id (), cpu_online_mask );
101
+ if (next_cpu >= nr_cpu_ids )
102
+ next_cpu = cpumask_first (cpu_online_mask );
103
+
104
+ tsc_sync_check_timer .expires += SYNC_CHECK_INTERVAL ;
105
+ add_timer_on (& tsc_sync_check_timer , next_cpu );
106
+ }
107
+
108
+ static int __init start_sync_check_timer (void )
109
+ {
110
+ if (!cpu_feature_enabled (X86_FEATURE_TSC_ADJUST ) || tsc_clocksource_reliable )
111
+ return 0 ;
112
+
113
+ timer_setup (& tsc_sync_check_timer , tsc_sync_check_timer_fn , 0 );
114
+ tsc_sync_check_timer .expires = jiffies + SYNC_CHECK_INTERVAL ;
115
+ add_timer (& tsc_sync_check_timer );
116
+
117
+ return 0 ;
118
+ }
119
+ late_initcall (start_sync_check_timer );
120
+
80
121
static void tsc_sanitize_first_cpu (struct tsc_adjust * cur , s64 bootval ,
81
122
unsigned int cpu , bool bootcpu )
82
123
{
0 commit comments