@@ -81,6 +81,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
8181 */
8282#define SCHED_STATE_NO_LOCK_ENABLED BIT(0)
8383#define SCHED_STATE_NO_LOCK_PENDING_ENABLE BIT(1)
84+ #define SCHED_STATE_NO_LOCK_REGISTERED BIT(2)
8485static inline bool context_enabled (struct intel_context * ce )
8586{
8687 return (atomic_read (& ce -> guc_sched_state_no_lock ) &
@@ -116,6 +117,24 @@ static inline void clr_context_pending_enable(struct intel_context *ce)
116117 & ce -> guc_sched_state_no_lock );
117118}
118119
120+ static inline bool context_registered (struct intel_context * ce )
121+ {
122+ return (atomic_read (& ce -> guc_sched_state_no_lock ) &
123+ SCHED_STATE_NO_LOCK_REGISTERED );
124+ }
125+
126+ static inline void set_context_registered (struct intel_context * ce )
127+ {
128+ atomic_or (SCHED_STATE_NO_LOCK_REGISTERED ,
129+ & ce -> guc_sched_state_no_lock );
130+ }
131+
132+ static inline void clr_context_registered (struct intel_context * ce )
133+ {
134+ atomic_and ((u32 )~SCHED_STATE_NO_LOCK_REGISTERED ,
135+ & ce -> guc_sched_state_no_lock );
136+ }
137+
119138/*
120139 * Below is a set of functions which control the GuC scheduling state which
121140 * require a lock, aside from the special case where the functions are called
@@ -1092,6 +1111,7 @@ static int steal_guc_id(struct intel_guc *guc)
10921111
10931112 list_del_init (& ce -> guc_id_link );
10941113 guc_id = ce -> guc_id ;
1114+ clr_context_registered (ce );
10951115 set_context_guc_id_invalid (ce );
10961116 return guc_id ;
10971117 } else {
@@ -1201,10 +1221,15 @@ static int register_context(struct intel_context *ce, bool loop)
12011221 struct intel_guc * guc = ce_to_guc (ce );
12021222 u32 offset = intel_guc_ggtt_offset (guc , guc -> lrc_desc_pool ) +
12031223 ce -> guc_id * sizeof (struct guc_lrc_desc );
1224+ int ret ;
12041225
12051226 trace_intel_context_register (ce );
12061227
1207- return __guc_action_register_context (guc , ce -> guc_id , offset , loop );
1228+ ret = __guc_action_register_context (guc , ce -> guc_id , offset , loop );
1229+ if (likely (!ret ))
1230+ set_context_registered (ce );
1231+
1232+ return ret ;
12081233}
12091234
12101235static int __guc_action_deregister_context (struct intel_guc * guc ,
@@ -1260,13 +1285,17 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
12601285 desc -> preemption_timeout = engine -> props .preempt_timeout_ms * 1000 ;
12611286}
12621287
1288+ static inline u8 map_i915_prio_to_guc_prio (int prio );
1289+
12631290static int guc_lrc_desc_pin (struct intel_context * ce , bool loop )
12641291{
12651292 struct intel_engine_cs * engine = ce -> engine ;
12661293 struct intel_runtime_pm * runtime_pm = engine -> uncore -> rpm ;
12671294 struct intel_guc * guc = & engine -> gt -> uc .guc ;
12681295 u32 desc_idx = ce -> guc_id ;
12691296 struct guc_lrc_desc * desc ;
1297+ const struct i915_gem_context * ctx ;
1298+ int prio = I915_CONTEXT_DEFAULT_PRIORITY ;
12701299 bool context_registered ;
12711300 intel_wakeref_t wakeref ;
12721301 int ret = 0 ;
@@ -1282,6 +1311,12 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
12821311
12831312 context_registered = lrc_desc_registered (guc , desc_idx );
12841313
1314+ rcu_read_lock ();
1315+ ctx = rcu_dereference (ce -> gem_context );
1316+ if (ctx )
1317+ prio = ctx -> sched .priority ;
1318+ rcu_read_unlock ();
1319+
12851320 reset_lrc_desc (guc , desc_idx );
12861321 set_lrc_desc_registered (guc , desc_idx , ce );
12871322
@@ -1290,7 +1325,8 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
12901325 desc -> engine_submit_mask = adjust_engine_mask (engine -> class ,
12911326 engine -> mask );
12921327 desc -> hw_context_desc = ce -> lrc .lrca ;
1293- desc -> priority = GUC_CLIENT_PRIORITY_KMD_NORMAL ;
1328+ ce -> guc_prio = map_i915_prio_to_guc_prio (prio );
1329+ desc -> priority = ce -> guc_prio ;
12941330 desc -> context_flags = CONTEXT_REGISTRATION_FLAG_KMD ;
12951331 guc_context_policy_init (engine , desc );
12961332 init_sched_state (ce );
@@ -1693,11 +1729,17 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
16931729 GEM_BUG_ON (ce != __get_context (guc , ce -> guc_id ));
16941730 GEM_BUG_ON (context_enabled (ce ));
16951731
1732+ clr_context_registered (ce );
16961733 deregister_context (ce , ce -> guc_id , true);
16971734}
16981735
16991736static void __guc_context_destroy (struct intel_context * ce )
17001737{
1738+ GEM_BUG_ON (ce -> guc_prio_count [GUC_CLIENT_PRIORITY_KMD_HIGH ] ||
1739+ ce -> guc_prio_count [GUC_CLIENT_PRIORITY_HIGH ] ||
1740+ ce -> guc_prio_count [GUC_CLIENT_PRIORITY_KMD_NORMAL ] ||
1741+ ce -> guc_prio_count [GUC_CLIENT_PRIORITY_NORMAL ]);
1742+
17011743 lrc_fini (ce );
17021744 intel_context_fini (ce );
17031745
@@ -1791,15 +1833,124 @@ static int guc_context_alloc(struct intel_context *ce)
17911833 return lrc_alloc (ce , ce -> engine );
17921834}
17931835
1836+ static void guc_context_set_prio (struct intel_guc * guc ,
1837+ struct intel_context * ce ,
1838+ u8 prio )
1839+ {
1840+ u32 action [] = {
1841+ INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY ,
1842+ ce -> guc_id ,
1843+ prio ,
1844+ };
1845+
1846+ GEM_BUG_ON (prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
1847+ prio > GUC_CLIENT_PRIORITY_NORMAL );
1848+
1849+ if (ce -> guc_prio == prio || submission_disabled (guc ) ||
1850+ !context_registered (ce ))
1851+ return ;
1852+
1853+ guc_submission_send_busy_loop (guc , action , ARRAY_SIZE (action ), 0 , true);
1854+
1855+ ce -> guc_prio = prio ;
1856+ trace_intel_context_set_prio (ce );
1857+ }
1858+
1859+ static inline u8 map_i915_prio_to_guc_prio (int prio )
1860+ {
1861+ if (prio == I915_PRIORITY_NORMAL )
1862+ return GUC_CLIENT_PRIORITY_KMD_NORMAL ;
1863+ else if (prio < I915_PRIORITY_NORMAL )
1864+ return GUC_CLIENT_PRIORITY_NORMAL ;
1865+ else if (prio < I915_PRIORITY_DISPLAY )
1866+ return GUC_CLIENT_PRIORITY_HIGH ;
1867+ else
1868+ return GUC_CLIENT_PRIORITY_KMD_HIGH ;
1869+ }
1870+
1871+ static inline void add_context_inflight_prio (struct intel_context * ce ,
1872+ u8 guc_prio )
1873+ {
1874+ lockdep_assert_held (& ce -> guc_active .lock );
1875+ GEM_BUG_ON (guc_prio >= ARRAY_SIZE (ce -> guc_prio_count ));
1876+
1877+ ++ ce -> guc_prio_count [guc_prio ];
1878+
1879+ /* Overflow protection */
1880+ GEM_WARN_ON (!ce -> guc_prio_count [guc_prio ]);
1881+ }
1882+
1883+ static inline void sub_context_inflight_prio (struct intel_context * ce ,
1884+ u8 guc_prio )
1885+ {
1886+ lockdep_assert_held (& ce -> guc_active .lock );
1887+ GEM_BUG_ON (guc_prio >= ARRAY_SIZE (ce -> guc_prio_count ));
1888+
1889+ /* Underflow protection */
1890+ GEM_WARN_ON (!ce -> guc_prio_count [guc_prio ]);
1891+
1892+ -- ce -> guc_prio_count [guc_prio ];
1893+ }
1894+
1895+ static inline void update_context_prio (struct intel_context * ce )
1896+ {
1897+ struct intel_guc * guc = & ce -> engine -> gt -> uc .guc ;
1898+ int i ;
1899+
1900+ BUILD_BUG_ON (GUC_CLIENT_PRIORITY_KMD_HIGH != 0 );
1901+ BUILD_BUG_ON (GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL );
1902+
1903+ lockdep_assert_held (& ce -> guc_active .lock );
1904+
1905+ for (i = 0 ; i < ARRAY_SIZE (ce -> guc_prio_count ); ++ i ) {
1906+ if (ce -> guc_prio_count [i ]) {
1907+ guc_context_set_prio (guc , ce , i );
1908+ break ;
1909+ }
1910+ }
1911+ }
1912+
1913+ static inline bool new_guc_prio_higher (u8 old_guc_prio , u8 new_guc_prio )
1914+ {
1915+ /* Lower value is higher priority */
1916+ return new_guc_prio < old_guc_prio ;
1917+ }
1918+
17941919static void add_to_context (struct i915_request * rq )
17951920{
17961921 struct intel_context * ce = rq -> context ;
1922+ u8 new_guc_prio = map_i915_prio_to_guc_prio (rq_prio (rq ));
1923+
1924+ GEM_BUG_ON (rq -> guc_prio == GUC_PRIO_FINI );
17971925
17981926 spin_lock (& ce -> guc_active .lock );
17991927 list_move_tail (& rq -> sched .link , & ce -> guc_active .requests );
1928+
1929+ if (rq -> guc_prio == GUC_PRIO_INIT ) {
1930+ rq -> guc_prio = new_guc_prio ;
1931+ add_context_inflight_prio (ce , rq -> guc_prio );
1932+ } else if (new_guc_prio_higher (rq -> guc_prio , new_guc_prio )) {
1933+ sub_context_inflight_prio (ce , rq -> guc_prio );
1934+ rq -> guc_prio = new_guc_prio ;
1935+ add_context_inflight_prio (ce , rq -> guc_prio );
1936+ }
1937+ update_context_prio (ce );
1938+
18001939 spin_unlock (& ce -> guc_active .lock );
18011940}
18021941
1942+ static void guc_prio_fini (struct i915_request * rq , struct intel_context * ce )
1943+ {
1944+ lockdep_assert_held (& ce -> guc_active .lock );
1945+
1946+ if (rq -> guc_prio != GUC_PRIO_INIT &&
1947+ rq -> guc_prio != GUC_PRIO_FINI ) {
1948+ sub_context_inflight_prio (ce , rq -> guc_prio );
1949+ update_context_prio (ce );
1950+ }
1951+ rq -> guc_prio = GUC_PRIO_FINI ;
1952+ }
1953+
18031954static void remove_from_context (struct i915_request * rq )
18041955{
18051956 struct intel_context * ce = rq -> context ;
@@ -1812,6 +1963,8 @@ static void remove_from_context(struct i915_request *rq)
18121963 /* Prevent further __await_execution() registering a cb, then flush */
18131964 set_bit (I915_FENCE_FLAG_ACTIVE , & rq -> fence .flags );
18141965
1966+ guc_prio_fini (rq , ce );
1967+
18151968 spin_unlock_irq (& ce -> guc_active .lock );
18161969
18171970 atomic_dec (& ce -> guc_id_ref );
@@ -2093,6 +2246,39 @@ static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
20932246 }
20942247}
20952248
2249+ static void guc_bump_inflight_request_prio (struct i915_request * rq ,
2250+ int prio )
2251+ {
2252+ struct intel_context * ce = rq -> context ;
2253+ u8 new_guc_prio = map_i915_prio_to_guc_prio (prio );
2254+
2255+ /* Short circuit function */
2256+ if (prio < I915_PRIORITY_NORMAL ||
2257+ rq -> guc_prio == GUC_PRIO_FINI ||
2258+ (rq -> guc_prio != GUC_PRIO_INIT &&
2259+ !new_guc_prio_higher (rq -> guc_prio , new_guc_prio )))
2260+ return ;
2261+
2262+ spin_lock (& ce -> guc_active .lock );
2263+ if (rq -> guc_prio != GUC_PRIO_FINI ) {
2264+ if (rq -> guc_prio != GUC_PRIO_INIT )
2265+ sub_context_inflight_prio (ce , rq -> guc_prio );
2266+ rq -> guc_prio = new_guc_prio ;
2267+ add_context_inflight_prio (ce , rq -> guc_prio );
2268+ update_context_prio (ce );
2269+ }
2270+ spin_unlock (& ce -> guc_active .lock );
2271+ }
2272+
2273+ static void guc_retire_inflight_request_prio (struct i915_request * rq )
2274+ {
2275+ struct intel_context * ce = rq -> context ;
2276+
2277+ spin_lock (& ce -> guc_active .lock );
2278+ guc_prio_fini (rq , ce );
2279+ spin_unlock (& ce -> guc_active .lock );
2280+ }
2281+
20962282static void sanitize_hwsp (struct intel_engine_cs * engine )
20972283{
20982284 struct intel_timeline * tl ;
@@ -2317,6 +2503,10 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
23172503 guc -> sched_engine -> disabled = guc_sched_engine_disabled ;
23182504 guc -> sched_engine -> private_data = guc ;
23192505 guc -> sched_engine -> destroy = guc_sched_engine_destroy ;
2506+ guc -> sched_engine -> bump_inflight_request_prio =
2507+ guc_bump_inflight_request_prio ;
2508+ guc -> sched_engine -> retire_inflight_request_prio =
2509+ guc_retire_inflight_request_prio ;
23202510 tasklet_setup (& guc -> sched_engine -> tasklet ,
23212511 guc_submission_tasklet );
23222512 }
@@ -2694,6 +2884,22 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
26942884 drm_printf (p , "\n" );
26952885}
26962886
2887+ static inline void guc_log_context_priority (struct drm_printer * p ,
2888+ struct intel_context * ce )
2889+ {
2890+ int i ;
2891+
2892+ drm_printf (p , "\t\tPriority: %d\n" ,
2893+ ce -> guc_prio );
2894+ drm_printf (p , "\t\tNumber Requests (lower index == higher priority)\n" );
2895+ for (i = GUC_CLIENT_PRIORITY_KMD_HIGH ;
2896+ i < GUC_CLIENT_PRIORITY_NUM ; ++ i ) {
2897+ drm_printf (p , "\t\tNumber requests in priority band[%d]: %d\n" ,
2898+ i , ce -> guc_prio_count [i ]);
2899+ }
2900+ drm_printf (p , "\n" );
2901+ }
2902+
26972903void intel_guc_submission_print_context_info (struct intel_guc * guc ,
26982904 struct drm_printer * p )
26992905{
@@ -2716,6 +2922,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
27162922 drm_printf (p , "\t\tSchedule State: 0x%x, 0x%x\n\n" ,
27172923 ce -> guc_state .sched_state ,
27182924 atomic_read (& ce -> guc_sched_state_no_lock ));
2925+
2926+ guc_log_context_priority (p , ce );
27192927 }
27202928}
27212929
0 commit comments