@@ -147,6 +147,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
147147 }
148148 }
149149
150+ /* from vcn4 and above, only unified queue is used */
151+ adev -> vcn .using_unified_queue =
152+ amdgpu_ip_version (adev , UVD_HWIP , 0 ) >= IP_VERSION (4 , 0 , 0 );
153+
150154 hdr = (const struct common_firmware_header * )adev -> vcn .fw [0 ]-> data ;
151155 adev -> vcn .fw_version = le32_to_cpu (hdr -> ucode_version );
152156
@@ -275,18 +279,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
275279 return 0 ;
276280}
277281
278- /* from vcn4 and above, only unified queue is used */
279- static bool amdgpu_vcn_using_unified_queue (struct amdgpu_ring * ring )
280- {
281- struct amdgpu_device * adev = ring -> adev ;
282- bool ret = false;
283-
284- if (amdgpu_ip_version (adev , UVD_HWIP , 0 ) >= IP_VERSION (4 , 0 , 0 ))
285- ret = true;
286-
287- return ret ;
288- }
289-
290282bool amdgpu_vcn_is_disabled_vcn (struct amdgpu_device * adev , enum vcn_ring_type type , uint32_t vcn_instance )
291283{
292284 bool ret = false;
@@ -724,12 +716,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
724716 struct amdgpu_job * job ;
725717 struct amdgpu_ib * ib ;
726718 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN (ib_msg -> gpu_addr );
727- bool sq = amdgpu_vcn_using_unified_queue (ring );
728719 uint32_t * ib_checksum ;
729720 uint32_t ib_pack_in_dw ;
730721 int i , r ;
731722
732- if (sq )
723+ if (adev -> vcn . using_unified_queue )
733724 ib_size_dw += 8 ;
734725
735726 r = amdgpu_job_alloc_with_ib (ring -> adev , NULL , NULL ,
@@ -742,7 +733,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
742733 ib -> length_dw = 0 ;
743734
744735 /* single queue headers */
745- if (sq ) {
736+ if (adev -> vcn . using_unified_queue ) {
746737 ib_pack_in_dw = sizeof (struct amdgpu_vcn_decode_buffer ) / sizeof (uint32_t )
747738 + 4 + 2 ; /* engine info + decoding ib in dw */
748739 ib_checksum = amdgpu_vcn_unified_ring_ib_header (ib , ib_pack_in_dw , false);
@@ -761,7 +752,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
761752 for (i = ib -> length_dw ; i < ib_size_dw ; ++ i )
762753 ib -> ptr [i ] = 0x0 ;
763754
764- if (sq )
755+ if (adev -> vcn . using_unified_queue )
765756 amdgpu_vcn_unified_ring_ib_checksum (& ib_checksum , ib_pack_in_dw );
766757
767758 r = amdgpu_job_submit_direct (job , ring , & f );
@@ -851,15 +842,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
851842 struct dma_fence * * fence )
852843{
853844 unsigned int ib_size_dw = 16 ;
845+ struct amdgpu_device * adev = ring -> adev ;
854846 struct amdgpu_job * job ;
855847 struct amdgpu_ib * ib ;
856848 struct dma_fence * f = NULL ;
857849 uint32_t * ib_checksum = NULL ;
858850 uint64_t addr ;
859- bool sq = amdgpu_vcn_using_unified_queue (ring );
860851 int i , r ;
861852
862- if (sq )
853+ if (adev -> vcn . using_unified_queue )
863854 ib_size_dw += 8 ;
864855
865856 r = amdgpu_job_alloc_with_ib (ring -> adev , NULL , NULL ,
@@ -873,7 +864,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
873864
874865 ib -> length_dw = 0 ;
875866
876- if (sq )
867+ if (adev -> vcn . using_unified_queue )
877868 ib_checksum = amdgpu_vcn_unified_ring_ib_header (ib , 0x11 , true);
878869
879870 ib -> ptr [ib -> length_dw ++ ] = 0x00000018 ;
@@ -895,7 +886,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
895886 for (i = ib -> length_dw ; i < ib_size_dw ; ++ i )
896887 ib -> ptr [i ] = 0x0 ;
897888
898- if (sq )
889+ if (adev -> vcn . using_unified_queue )
899890 amdgpu_vcn_unified_ring_ib_checksum (& ib_checksum , 0x11 );
900891
901892 r = amdgpu_job_submit_direct (job , ring , & f );
@@ -918,15 +909,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
918909 struct dma_fence * * fence )
919910{
920911 unsigned int ib_size_dw = 16 ;
912+ struct amdgpu_device * adev = ring -> adev ;
921913 struct amdgpu_job * job ;
922914 struct amdgpu_ib * ib ;
923915 struct dma_fence * f = NULL ;
924916 uint32_t * ib_checksum = NULL ;
925917 uint64_t addr ;
926- bool sq = amdgpu_vcn_using_unified_queue (ring );
927918 int i , r ;
928919
929- if (sq )
920+ if (adev -> vcn . using_unified_queue )
930921 ib_size_dw += 8 ;
931922
932923 r = amdgpu_job_alloc_with_ib (ring -> adev , NULL , NULL ,
@@ -940,7 +931,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
940931
941932 ib -> length_dw = 0 ;
942933
943- if (sq )
934+ if (adev -> vcn . using_unified_queue )
944935 ib_checksum = amdgpu_vcn_unified_ring_ib_header (ib , 0x11 , true);
945936
946937 ib -> ptr [ib -> length_dw ++ ] = 0x00000018 ;
@@ -962,7 +953,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
962953 for (i = ib -> length_dw ; i < ib_size_dw ; ++ i )
963954 ib -> ptr [i ] = 0x0 ;
964955
965- if (sq )
956+ if (adev -> vcn . using_unified_queue )
966957 amdgpu_vcn_unified_ring_ib_checksum (& ib_checksum , 0x11 );
967958
968959 r = amdgpu_job_submit_direct (job , ring , & f );
0 commit comments