Skip to content

Commit 552b80d

Browse files
Nirmoy Dasalexdeucher
authored andcommitted
drm/amdgpu: remove unused functions
AMDGPU statically sets priority for compute queues at initialization so remove all the functions responsible for changing compute queue priority dynamically. Signed-off-by: Nirmoy Das <[email protected]> Reviewed-by: Christian König <[email protected]> Signed-off-by: Alex Deucher <[email protected]>
1 parent 2316a86 commit 552b80d

File tree

4 files changed

+0
-276
lines changed

4 files changed

+0
-276
lines changed

drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c

Lines changed: 0 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -150,76 +150,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
150150
ring->funcs->end_use(ring);
151151
}
152152

153-
/**
154-
* amdgpu_ring_priority_put - restore a ring's priority
155-
*
156-
* @ring: amdgpu_ring structure holding the information
157-
* @priority: target priority
158-
*
159-
* Release a request for executing at @priority
160-
*/
161-
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
162-
enum drm_sched_priority priority)
163-
{
164-
int i;
165-
166-
if (!ring->funcs->set_priority)
167-
return;
168-
169-
if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
170-
return;
171-
172-
/* no need to restore if the job is already at the lowest priority */
173-
if (priority == DRM_SCHED_PRIORITY_NORMAL)
174-
return;
175-
176-
mutex_lock(&ring->priority_mutex);
177-
/* something higher prio is executing, no need to decay */
178-
if (ring->priority > priority)
179-
goto out_unlock;
180-
181-
/* decay priority to the next level with a job available */
182-
for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
183-
if (i == DRM_SCHED_PRIORITY_NORMAL
184-
|| atomic_read(&ring->num_jobs[i])) {
185-
ring->priority = i;
186-
ring->funcs->set_priority(ring, i);
187-
break;
188-
}
189-
}
190-
191-
out_unlock:
192-
mutex_unlock(&ring->priority_mutex);
193-
}
194-
195-
/**
196-
* amdgpu_ring_priority_get - change the ring's priority
197-
*
198-
* @ring: amdgpu_ring structure holding the information
199-
* @priority: target priority
200-
*
201-
* Request a ring's priority to be raised to @priority (refcounted).
202-
*/
203-
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
204-
enum drm_sched_priority priority)
205-
{
206-
if (!ring->funcs->set_priority)
207-
return;
208-
209-
if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
210-
return;
211-
212-
mutex_lock(&ring->priority_mutex);
213-
if (priority <= ring->priority)
214-
goto out_unlock;
215-
216-
ring->priority = priority;
217-
ring->funcs->set_priority(ring, priority);
218-
219-
out_unlock:
220-
mutex_unlock(&ring->priority_mutex);
221-
}
222-
223153
/**
224154
* amdgpu_ring_init - init driver ring struct.
225155
*

drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,6 @@ struct amdgpu_ring_funcs {
167167
uint32_t reg0, uint32_t reg1,
168168
uint32_t ref, uint32_t mask);
169169
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
170-
/* priority functions */
171-
void (*set_priority) (struct amdgpu_ring *ring,
172-
enum drm_sched_priority priority);
173170
/* Try to soft recover the ring to make the fence signal */
174171
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
175172
int (*preempt_ib)(struct amdgpu_ring *ring);
@@ -259,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
259256
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
260257
void amdgpu_ring_commit(struct amdgpu_ring *ring);
261258
void amdgpu_ring_undo(struct amdgpu_ring *ring);
262-
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
263-
enum drm_sched_priority priority);
264-
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
265-
enum drm_sched_priority priority);
266259
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
267260
unsigned ring_size, struct amdgpu_irq_src *irq_src,
268261
unsigned irq_type);

drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c

Lines changed: 0 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -6275,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
62756275
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
62766276
}
62776277

6278-
static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
6279-
bool acquire)
6280-
{
6281-
struct amdgpu_device *adev = ring->adev;
6282-
int pipe_num, tmp, reg;
6283-
int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
6284-
6285-
pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
6286-
6287-
/* first me only has 2 entries, GFX and HP3D */
6288-
if (ring->me > 0)
6289-
pipe_num -= 2;
6290-
6291-
reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
6292-
tmp = RREG32(reg);
6293-
tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
6294-
WREG32(reg, tmp);
6295-
}
6296-
6297-
static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
6298-
struct amdgpu_ring *ring,
6299-
bool acquire)
6300-
{
6301-
int i, pipe;
6302-
bool reserve;
6303-
struct amdgpu_ring *iring;
6304-
6305-
mutex_lock(&adev->gfx.pipe_reserve_mutex);
6306-
pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
6307-
if (acquire)
6308-
set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6309-
else
6310-
clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6311-
6312-
if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
6313-
/* Clear all reservations - everyone reacquires all resources */
6314-
for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
6315-
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
6316-
true);
6317-
6318-
for (i = 0; i < adev->gfx.num_compute_rings; ++i)
6319-
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
6320-
true);
6321-
} else {
6322-
/* Lower all pipes without a current reservation */
6323-
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
6324-
iring = &adev->gfx.gfx_ring[i];
6325-
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
6326-
iring->me,
6327-
iring->pipe,
6328-
0);
6329-
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6330-
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
6331-
}
6332-
6333-
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
6334-
iring = &adev->gfx.compute_ring[i];
6335-
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
6336-
iring->me,
6337-
iring->pipe,
6338-
0);
6339-
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
6340-
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
6341-
}
6342-
}
6343-
6344-
mutex_unlock(&adev->gfx.pipe_reserve_mutex);
6345-
}
6346-
6347-
static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
6348-
struct amdgpu_ring *ring,
6349-
bool acquire)
6350-
{
6351-
uint32_t pipe_priority = acquire ? 0x2 : 0x0;
6352-
uint32_t queue_priority = acquire ? 0xf : 0x0;
6353-
6354-
mutex_lock(&adev->srbm_mutex);
6355-
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
6356-
6357-
WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
6358-
WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
6359-
6360-
vi_srbm_select(adev, 0, 0, 0, 0);
6361-
mutex_unlock(&adev->srbm_mutex);
6362-
}
6363-
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
6364-
enum drm_sched_priority priority)
6365-
{
6366-
struct amdgpu_device *adev = ring->adev;
6367-
bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
6368-
6369-
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
6370-
return;
6371-
6372-
gfx_v8_0_hqd_set_priority(adev, ring, acquire);
6373-
gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
6374-
}
6375-
63766278
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
63776279
u64 addr, u64 seq,
63786280
unsigned flags)
@@ -7005,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
70056907
.test_ib = gfx_v8_0_ring_test_ib,
70066908
.insert_nop = amdgpu_ring_insert_nop,
70076909
.pad_ib = amdgpu_ring_generic_pad_ib,
7008-
.set_priority = gfx_v8_0_ring_set_priority_compute,
70096910
.emit_wreg = gfx_v8_0_ring_emit_wreg,
70106911
};
70116912

drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c

Lines changed: 0 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -5123,105 +5123,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
51235123
return wptr;
51245124
}
51255125

5126-
static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
5127-
bool acquire)
5128-
{
5129-
struct amdgpu_device *adev = ring->adev;
5130-
int pipe_num, tmp, reg;
5131-
int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
5132-
5133-
pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
5134-
5135-
/* first me only has 2 entries, GFX and HP3D */
5136-
if (ring->me > 0)
5137-
pipe_num -= 2;
5138-
5139-
reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
5140-
tmp = RREG32(reg);
5141-
tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
5142-
WREG32(reg, tmp);
5143-
}
5144-
5145-
static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
5146-
struct amdgpu_ring *ring,
5147-
bool acquire)
5148-
{
5149-
int i, pipe;
5150-
bool reserve;
5151-
struct amdgpu_ring *iring;
5152-
5153-
mutex_lock(&adev->gfx.pipe_reserve_mutex);
5154-
pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
5155-
if (acquire)
5156-
set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5157-
else
5158-
clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5159-
5160-
if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
5161-
/* Clear all reservations - everyone reacquires all resources */
5162-
for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
5163-
gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
5164-
true);
5165-
5166-
for (i = 0; i < adev->gfx.num_compute_rings; ++i)
5167-
gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
5168-
true);
5169-
} else {
5170-
/* Lower all pipes without a current reservation */
5171-
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
5172-
iring = &adev->gfx.gfx_ring[i];
5173-
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
5174-
iring->me,
5175-
iring->pipe,
5176-
0);
5177-
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5178-
gfx_v9_0_ring_set_pipe_percent(iring, reserve);
5179-
}
5180-
5181-
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
5182-
iring = &adev->gfx.compute_ring[i];
5183-
pipe = amdgpu_gfx_mec_queue_to_bit(adev,
5184-
iring->me,
5185-
iring->pipe,
5186-
0);
5187-
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
5188-
gfx_v9_0_ring_set_pipe_percent(iring, reserve);
5189-
}
5190-
}
5191-
5192-
mutex_unlock(&adev->gfx.pipe_reserve_mutex);
5193-
}
5194-
5195-
static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
5196-
struct amdgpu_ring *ring,
5197-
bool acquire)
5198-
{
5199-
uint32_t pipe_priority = acquire ? 0x2 : 0x0;
5200-
uint32_t queue_priority = acquire ? 0xf : 0x0;
5201-
5202-
mutex_lock(&adev->srbm_mutex);
5203-
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5204-
5205-
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
5206-
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
5207-
5208-
soc15_grbm_select(adev, 0, 0, 0, 0);
5209-
mutex_unlock(&adev->srbm_mutex);
5210-
}
5211-
5212-
static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
5213-
enum drm_sched_priority priority)
5214-
{
5215-
struct amdgpu_device *adev = ring->adev;
5216-
bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
5217-
5218-
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
5219-
return;
5220-
5221-
gfx_v9_0_hqd_set_priority(adev, ring, acquire);
5222-
gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
5223-
}
5224-
52255126
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
52265127
{
52275128
struct amdgpu_device *adev = ring->adev;
@@ -6592,7 +6493,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
65926493
.test_ib = gfx_v9_0_ring_test_ib,
65936494
.insert_nop = amdgpu_ring_insert_nop,
65946495
.pad_ib = amdgpu_ring_generic_pad_ib,
6595-
.set_priority = gfx_v9_0_ring_set_priority_compute,
65966496
.emit_wreg = gfx_v9_0_ring_emit_wreg,
65976497
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
65986498
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,

0 commit comments

Comments
 (0)