@@ -957,6 +957,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
957
957
#define cpu_curr (cpu ) (cpu_rq(cpu)->curr)
958
958
#define raw_rq () raw_cpu_ptr(&runqueues)
959
959
960
+ extern void update_rq_clock (struct rq * rq );
961
+
960
962
static inline u64 __rq_clock_broken (struct rq * rq )
961
963
{
962
964
return READ_ONCE (rq -> clock );
@@ -1075,6 +1077,86 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1075
1077
#endif
1076
1078
}
1077
1079
1080
+ struct rq * __task_rq_lock (struct task_struct * p , struct rq_flags * rf )
1081
+ __acquires (rq - > lock );
1082
+
1083
+ struct rq * task_rq_lock (struct task_struct * p , struct rq_flags * rf )
1084
+ __acquires (p - > pi_lock )
1085
+ __acquires (rq - > lock );
1086
+
1087
+ static inline void __task_rq_unlock (struct rq * rq , struct rq_flags * rf )
1088
+ __releases (rq - > lock )
1089
+ {
1090
+ rq_unpin_lock (rq , rf );
1091
+ raw_spin_unlock (& rq -> lock );
1092
+ }
1093
+
1094
+ static inline void
1095
+ task_rq_unlock (struct rq * rq , struct task_struct * p , struct rq_flags * rf )
1096
+ __releases (rq - > lock )
1097
+ __releases (p - > pi_lock )
1098
+ {
1099
+ rq_unpin_lock (rq , rf );
1100
+ raw_spin_unlock (& rq -> lock );
1101
+ raw_spin_unlock_irqrestore (& p -> pi_lock , rf -> flags );
1102
+ }
1103
+
1104
+ static inline void
1105
+ rq_lock_irqsave (struct rq * rq , struct rq_flags * rf )
1106
+ __acquires (rq - > lock )
1107
+ {
1108
+ raw_spin_lock_irqsave (& rq -> lock , rf -> flags );
1109
+ rq_pin_lock (rq , rf );
1110
+ }
1111
+
1112
+ static inline void
1113
+ rq_lock_irq (struct rq * rq , struct rq_flags * rf )
1114
+ __acquires (rq - > lock )
1115
+ {
1116
+ raw_spin_lock_irq (& rq -> lock );
1117
+ rq_pin_lock (rq , rf );
1118
+ }
1119
+
1120
+ static inline void
1121
+ rq_lock (struct rq * rq , struct rq_flags * rf )
1122
+ __acquires (rq - > lock )
1123
+ {
1124
+ raw_spin_lock (& rq -> lock );
1125
+ rq_pin_lock (rq , rf );
1126
+ }
1127
+
1128
+ static inline void
1129
+ rq_relock (struct rq * rq , struct rq_flags * rf )
1130
+ __acquires (rq - > lock )
1131
+ {
1132
+ raw_spin_lock (& rq -> lock );
1133
+ rq_repin_lock (rq , rf );
1134
+ }
1135
+
1136
+ static inline void
1137
+ rq_unlock_irqrestore (struct rq * rq , struct rq_flags * rf )
1138
+ __releases (rq - > lock )
1139
+ {
1140
+ rq_unpin_lock (rq , rf );
1141
+ raw_spin_unlock_irqrestore (& rq -> lock , rf -> flags );
1142
+ }
1143
+
1144
+ static inline void
1145
+ rq_unlock_irq (struct rq * rq , struct rq_flags * rf )
1146
+ __releases (rq - > lock )
1147
+ {
1148
+ rq_unpin_lock (rq , rf );
1149
+ raw_spin_unlock_irq (& rq -> lock );
1150
+ }
1151
+
1152
+ static inline void
1153
+ rq_unlock (struct rq * rq , struct rq_flags * rf )
1154
+ __releases (rq - > lock )
1155
+ {
1156
+ rq_unpin_lock (rq , rf );
1157
+ raw_spin_unlock (& rq -> lock );
1158
+ }
1159
+
1078
1160
#ifdef CONFIG_NUMA
1079
1161
enum numa_topology_type {
1080
1162
NUMA_DIRECT ,
@@ -1717,8 +1799,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
1717
1799
sched_update_tick_dependency (rq );
1718
1800
}
1719
1801
1720
- extern void update_rq_clock (struct rq * rq );
1721
-
1722
1802
extern void activate_task (struct rq * rq , struct task_struct * p , int flags );
1723
1803
extern void deactivate_task (struct rq * rq , struct task_struct * p , int flags );
1724
1804
@@ -1783,86 +1863,6 @@ unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
1783
1863
#endif
1784
1864
#endif
1785
1865
1786
- struct rq * __task_rq_lock (struct task_struct * p , struct rq_flags * rf )
1787
- __acquires (rq - > lock );
1788
-
1789
- struct rq * task_rq_lock (struct task_struct * p , struct rq_flags * rf )
1790
- __acquires (p - > pi_lock )
1791
- __acquires (rq - > lock );
1792
-
1793
- static inline void __task_rq_unlock (struct rq * rq , struct rq_flags * rf )
1794
- __releases (rq - > lock )
1795
- {
1796
- rq_unpin_lock (rq , rf );
1797
- raw_spin_unlock (& rq -> lock );
1798
- }
1799
-
1800
- static inline void
1801
- task_rq_unlock (struct rq * rq , struct task_struct * p , struct rq_flags * rf )
1802
- __releases (rq - > lock )
1803
- __releases (p - > pi_lock )
1804
- {
1805
- rq_unpin_lock (rq , rf );
1806
- raw_spin_unlock (& rq -> lock );
1807
- raw_spin_unlock_irqrestore (& p -> pi_lock , rf -> flags );
1808
- }
1809
-
1810
- static inline void
1811
- rq_lock_irqsave (struct rq * rq , struct rq_flags * rf )
1812
- __acquires (rq - > lock )
1813
- {
1814
- raw_spin_lock_irqsave (& rq -> lock , rf -> flags );
1815
- rq_pin_lock (rq , rf );
1816
- }
1817
-
1818
- static inline void
1819
- rq_lock_irq (struct rq * rq , struct rq_flags * rf )
1820
- __acquires (rq - > lock )
1821
- {
1822
- raw_spin_lock_irq (& rq -> lock );
1823
- rq_pin_lock (rq , rf );
1824
- }
1825
-
1826
- static inline void
1827
- rq_lock (struct rq * rq , struct rq_flags * rf )
1828
- __acquires (rq - > lock )
1829
- {
1830
- raw_spin_lock (& rq -> lock );
1831
- rq_pin_lock (rq , rf );
1832
- }
1833
-
1834
- static inline void
1835
- rq_relock (struct rq * rq , struct rq_flags * rf )
1836
- __acquires (rq - > lock )
1837
- {
1838
- raw_spin_lock (& rq -> lock );
1839
- rq_repin_lock (rq , rf );
1840
- }
1841
-
1842
- static inline void
1843
- rq_unlock_irqrestore (struct rq * rq , struct rq_flags * rf )
1844
- __releases (rq - > lock )
1845
- {
1846
- rq_unpin_lock (rq , rf );
1847
- raw_spin_unlock_irqrestore (& rq -> lock , rf -> flags );
1848
- }
1849
-
1850
- static inline void
1851
- rq_unlock_irq (struct rq * rq , struct rq_flags * rf )
1852
- __releases (rq - > lock )
1853
- {
1854
- rq_unpin_lock (rq , rf );
1855
- raw_spin_unlock_irq (& rq -> lock );
1856
- }
1857
-
1858
- static inline void
1859
- rq_unlock (struct rq * rq , struct rq_flags * rf )
1860
- __releases (rq - > lock )
1861
- {
1862
- rq_unpin_lock (rq , rf );
1863
- raw_spin_unlock (& rq -> lock );
1864
- }
1865
-
1866
1866
#ifdef CONFIG_SMP
1867
1867
#ifdef CONFIG_PREEMPT
1868
1868
0 commit comments