@@ -104,19 +104,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
104104 * is initialized itself.
105105 */
106106 entity -> sched_list = num_sched_list > 1 ? sched_list : NULL ;
107+ if (num_sched_list ) {
108+ entity -> sched_list = num_sched_list > 1 ? sched_list : NULL ;
109+ entity -> rq = & sched_list [0 ]-> rq ;
110+ }
107111 RCU_INIT_POINTER (entity -> last_scheduled , NULL );
108112 RB_CLEAR_NODE (& entity -> rb_tree_node );
109-
110- if (num_sched_list && !sched_list [0 ]-> rq ) {
111- /* Since every entry covered by num_sched_list
112- * should be non-NULL and therefore we warn drivers
113- * not to do this and to fix their DRM calling order.
114- */
115- pr_warn ("%s: called with uninitialized scheduler\n" , __func__ );
116- } else if (num_sched_list ) {
117- entity -> rq = sched_list [0 ]-> rq ;
118- }
119-
120113 init_completion (& entity -> entity_idle );
121114
122115 /* We start in an idle state. */
@@ -302,7 +295,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
302295 if (!entity -> rq )
303296 return 0 ;
304297
305- sched = entity -> rq -> sched ;
298+ sched = container_of ( entity -> rq , typeof ( * sched ), rq ) ;
306299 /**
307300 * The client will not queue more IBs during this fini, consume existing
308301 * queued IBs or discard them on SIGKILL
@@ -394,9 +387,11 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
394387{
395388 struct drm_sched_entity * entity =
396389 container_of (cb , struct drm_sched_entity , cb );
390+ struct drm_gpu_scheduler * sched =
391+ container_of (entity -> rq , typeof (* sched ), rq );
397392
398393 drm_sched_entity_clear_dep (f , cb );
399- drm_sched_wakeup (entity -> rq -> sched );
394+ drm_sched_wakeup (sched );
400395}
401396
402397/**
@@ -422,7 +417,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority);
422417 */
423418static bool drm_sched_entity_add_dependency_cb (struct drm_sched_entity * entity )
424419{
425- struct drm_gpu_scheduler * sched = entity -> rq -> sched ;
420+ struct drm_gpu_scheduler * sched =
421+ container_of (entity -> rq , typeof (* sched ), rq );
426422 struct dma_fence * fence = entity -> dependency ;
427423 struct drm_sched_fence * s_fence ;
428424
@@ -561,7 +557,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
561557
562558 spin_lock (& entity -> lock );
563559 sched = drm_sched_pick_best (entity -> sched_list , entity -> num_sched_list );
564- rq = sched ? sched -> rq : NULL ;
560+ rq = sched ? & sched -> rq : NULL ;
565561 if (rq != entity -> rq ) {
566562 drm_sched_rq_remove_entity (entity -> rq , entity );
567563 entity -> rq = rq ;
@@ -584,10 +580,12 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
584580void drm_sched_entity_push_job (struct drm_sched_job * sched_job )
585581{
586582 struct drm_sched_entity * entity = sched_job -> entity ;
583+ struct drm_gpu_scheduler * sched =
584+ container_of (entity -> rq , typeof (* sched ), rq );
587585 bool first ;
588586
589587 trace_drm_sched_job (sched_job , entity );
590- atomic_inc (entity -> rq -> sched -> score );
588+ atomic_inc (sched -> score );
591589 WRITE_ONCE (entity -> last_user , current -> group_leader );
592590
593591 /*
@@ -598,8 +596,6 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
598596
599597 /* first job wakes up scheduler */
600598 if (first ) {
601- struct drm_gpu_scheduler * sched ;
602-
603599 sched = drm_sched_rq_add_entity (entity );
604600 if (sched )
605601 drm_sched_wakeup (sched );
0 commit comments