drm/sched: Reverse run-queue priority enumeration
Reverse run-queue priority enumeration such that the higest priority is now 0, and for each consecutive integer the prioirty diminishes. Run-queues correspond to priorities. To an external observer a scheduler created with a single run-queue, and another created with DRM_SCHED_PRIORITY_COUNT number of run-queues, should always schedule sched->sched_rq[0] with the same "priority", as that index run-queue exists in both schedulers, i.e. a scheduler with one run-queue or many. This patch makes it so. In other words, the "priority" of sched->sched_rq[n], n >= 0, is the same for any scheduler created with any allowable number of run-queues (priorities), 0 to DRM_SCHED_PRIORITY_COUNT. Cc: Rob Clark <robdclark@gmail.com> Cc: Abhinav Kumar <quic_abhinavk@quicinc.com> Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Cc: Danilo Krummrich <dakr@redhat.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Christian König <christian.koenig@amd.com> Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org Signed-off-by: Luben Tuikov <ltuikov89@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231124052752.6915-6-ltuikov89@gmail.com
This commit is contained in:
parent
fe375c7480
commit
38f922a563
|
@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
|
|||
int i;
|
||||
|
||||
/* Signal all jobs not yet scheduled */
|
||||
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
|
||||
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
|
||||
struct drm_sched_rq *rq = sched->sched_rq[i];
|
||||
spin_lock(&rq->lock);
|
||||
list_for_each_entry(s_entity, &rq->entities, list) {
|
||||
|
|
|
@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
|
|||
* DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
|
||||
* cases, so we don't use it (no need for kernel generated jobs).
|
||||
*/
|
||||
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
|
||||
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
|
||||
|
||||
/**
|
||||
* struct msm_file_private - per-drm_file context
|
||||
|
|
|
@ -82,13 +82,14 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
|
|||
pr_warn("%s: called with uninitialized scheduler\n", __func__);
|
||||
} else if (num_sched_list) {
|
||||
/* The "priority" of an entity cannot exceed the number of run-queues of a
|
||||
* scheduler. Protect against num_rqs being 0, by converting to signed.
|
||||
* scheduler. Protect against num_rqs being 0, by converting to signed. Choose
|
||||
* the lowest priority available.
|
||||
*/
|
||||
if (entity->priority >= sched_list[0]->num_rqs) {
|
||||
drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
|
||||
entity->priority, sched_list[0]->num_rqs);
|
||||
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
|
||||
(s32) DRM_SCHED_PRIORITY_LOW);
|
||||
(s32) DRM_SCHED_PRIORITY_KERNEL);
|
||||
}
|
||||
entity->rq = sched_list[0]->sched_rq[entity->priority];
|
||||
}
|
||||
|
|
|
@ -1051,8 +1051,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
|
|||
struct drm_sched_entity *entity;
|
||||
int i;
|
||||
|
||||
/* Kernel run queue has higher priority than normal run queue*/
|
||||
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
|
||||
/* Start with the highest priority.
|
||||
*/
|
||||
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
|
||||
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
|
||||
drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
|
||||
drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
|
||||
|
@ -1291,7 +1292,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|||
if (!sched->sched_rq)
|
||||
goto Out_free;
|
||||
sched->num_rqs = num_rqs;
|
||||
for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) {
|
||||
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
|
||||
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
|
||||
if (!sched->sched_rq[i])
|
||||
goto Out_unroll;
|
||||
|
@ -1312,7 +1313,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|||
sched->ready = true;
|
||||
return 0;
|
||||
Out_unroll:
|
||||
for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--)
|
||||
for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
|
||||
kfree(sched->sched_rq[i]);
|
||||
Out_free:
|
||||
kfree(sched->sched_rq);
|
||||
|
@ -1338,7 +1339,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
|
|||
|
||||
drm_sched_wqueue_stop(sched);
|
||||
|
||||
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
|
||||
for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
|
||||
struct drm_sched_rq *rq = sched->sched_rq[i];
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
|
@ -1390,9 +1391,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
|
|||
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
|
||||
atomic_inc(&bad->karma);
|
||||
|
||||
for (i = DRM_SCHED_PRIORITY_LOW;
|
||||
i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
|
||||
i++) {
|
||||
for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
|
||||
struct drm_sched_rq *rq = sched->sched_rq[i];
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
|
|
|
@ -63,10 +63,10 @@ struct drm_file;
|
|||
* to an array, and as such should start at 0.
|
||||
*/
|
||||
enum drm_sched_priority {
|
||||
DRM_SCHED_PRIORITY_LOW,
|
||||
DRM_SCHED_PRIORITY_NORMAL,
|
||||
DRM_SCHED_PRIORITY_HIGH,
|
||||
DRM_SCHED_PRIORITY_KERNEL,
|
||||
DRM_SCHED_PRIORITY_HIGH,
|
||||
DRM_SCHED_PRIORITY_NORMAL,
|
||||
DRM_SCHED_PRIORITY_LOW,
|
||||
|
||||
DRM_SCHED_PRIORITY_COUNT
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue