@@ -42,6 +42,10 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
4242 struct drm_device * dev = state -> dev ;
4343 struct vc4_dev * vc4 = to_vc4_dev (dev );
4444
45+ drm_atomic_helper_wait_for_fences (dev , state , false);
46+
47+ drm_atomic_helper_wait_for_dependencies (state );
48+
4549 drm_atomic_helper_commit_modeset_disables (dev , state );
4650
4751 drm_atomic_helper_commit_planes (dev , state , 0 );
@@ -57,10 +61,14 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
5761 */
5862 state -> legacy_cursor_update = false;
5963
64+ drm_atomic_helper_commit_hw_done (state );
65+
6066 drm_atomic_helper_wait_for_vblanks (dev , state );
6167
6268 drm_atomic_helper_cleanup_planes (dev , state );
6369
70+ drm_atomic_helper_commit_cleanup_done (state );
71+
6472 drm_atomic_state_put (state );
6573
6674 up (& vc4 -> async_modeset );
@@ -117,32 +125,10 @@ static int vc4_atomic_commit(struct drm_device *dev,
117125 if (!c )
118126 return - ENOMEM ;
119127
120- /* Make sure that any outstanding modesets have finished. */
121- if (nonblock ) {
122- struct drm_crtc * crtc ;
123- struct drm_crtc_state * crtc_state ;
124- unsigned long flags ;
125- bool busy = false;
126-
127- /*
128- * If there's an undispatched event to send then we're
129- * obviously still busy. If there isn't, then we can
130- * unconditionally wait for the semaphore because it
131- * shouldn't be contended (for long).
132- *
133- * This is to prevent a race where queuing a new flip
134- * from userspace immediately on receipt of an event
135- * beats our clean-up and returns EBUSY.
136- */
137- spin_lock_irqsave (& dev -> event_lock , flags );
138- for_each_crtc_in_state (state , crtc , crtc_state , i )
139- busy |= vc4_event_pending (crtc );
140- spin_unlock_irqrestore (& dev -> event_lock , flags );
141- if (busy ) {
142- kfree (c );
143- return - EBUSY ;
144- }
145- }
128+ ret = drm_atomic_helper_setup_commit (state , nonblock );
129+ if (ret )
130+ return ret ;
131+
146132 ret = down_interruptible (& vc4 -> async_modeset );
147133 if (ret ) {
148134 kfree (c );
@@ -202,11 +188,50 @@ static int vc4_atomic_commit(struct drm_device *dev,
202188 return 0 ;
203189}
204190
191+ static struct drm_framebuffer * vc4_fb_create (struct drm_device * dev ,
192+ struct drm_file * file_priv ,
193+ const struct drm_mode_fb_cmd2 * mode_cmd )
194+ {
195+ struct drm_mode_fb_cmd2 mode_cmd_local ;
196+
197+ /* If the user didn't specify a modifier, use the
198+ * vc4_set_tiling_ioctl() state for the BO.
199+ */
200+ if (!(mode_cmd -> flags & DRM_MODE_FB_MODIFIERS )) {
201+ struct drm_gem_object * gem_obj ;
202+ struct vc4_bo * bo ;
203+
204+ gem_obj = drm_gem_object_lookup (file_priv ,
205+ mode_cmd -> handles [0 ]);
206+ if (!gem_obj ) {
207+ DRM_ERROR ("Failed to look up GEM BO %d\n" ,
208+ mode_cmd -> handles [0 ]);
209+ return ERR_PTR (- ENOENT );
210+ }
211+ bo = to_vc4_bo (gem_obj );
212+
213+ mode_cmd_local = * mode_cmd ;
214+
215+ if (bo -> t_format ) {
216+ mode_cmd_local .modifier [0 ] =
217+ DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED ;
218+ } else {
219+ mode_cmd_local .modifier [0 ] = DRM_FORMAT_MOD_NONE ;
220+ }
221+
222+ drm_gem_object_unreference_unlocked (gem_obj );
223+
224+ mode_cmd = & mode_cmd_local ;
225+ }
226+
227+ return drm_fb_cma_create (dev , file_priv , mode_cmd );
228+ }
229+
205230static const struct drm_mode_config_funcs vc4_mode_funcs = {
206231 .output_poll_changed = vc4_output_poll_changed ,
207232 .atomic_check = drm_atomic_helper_check ,
208233 .atomic_commit = vc4_atomic_commit ,
209- .fb_create = drm_fb_cma_create ,
234+ .fb_create = vc4_fb_create ,
210235};
211236
212237int vc4_kms_load (struct drm_device * dev )
0 commit comments