@@ -837,6 +837,174 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
837
837
return 0 ;
838
838
}
839
839
840
+ static void get_block_dimensions (unsigned int block_log2 , unsigned int cpp ,
841
+ unsigned int * width , unsigned int * height )
842
+ {
843
+ unsigned int cpp_log2 = ilog2 (cpp );
844
+ unsigned int pixel_log2 = block_log2 - cpp_log2 ;
845
+ unsigned int width_log2 = (pixel_log2 + 1 ) / 2 ;
846
+ unsigned int height_log2 = pixel_log2 - width_log2 ;
847
+
848
+ * width = 1 << width_log2 ;
849
+ * height = 1 << height_log2 ;
850
+ }
851
+
852
+ static unsigned int get_dcc_block_size (uint64_t modifier , bool rb_aligned ,
853
+ bool pipe_aligned )
854
+ {
855
+ unsigned int ver = AMD_FMT_MOD_GET (TILE_VERSION , modifier );
856
+
857
+ switch (ver ) {
858
+ case AMD_FMT_MOD_TILE_VER_GFX9 : {
859
+ /*
860
+ * TODO: for pipe aligned we may need to check the alignment of the
861
+ * total size of the surface, which may need to be bigger than the
862
+ * natural alignment due to some HW workarounds
863
+ */
864
+ return max (10 + (rb_aligned ? (int )AMD_FMT_MOD_GET (RB , modifier ) : 0 ), 12 );
865
+ }
866
+ case AMD_FMT_MOD_TILE_VER_GFX10 :
867
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS : {
868
+ int pipes_log2 = AMD_FMT_MOD_GET (PIPE_XOR_BITS , modifier );
869
+
870
+ if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
871
+ AMD_FMT_MOD_GET (PACKERS , modifier ) == pipes_log2 )
872
+ ++ pipes_log2 ;
873
+
874
+ return max (8 + (pipe_aligned ? pipes_log2 : 0 ), 12 );
875
+ }
876
+ default :
877
+ return 0 ;
878
+ }
879
+ }
880
+
881
+ static int amdgpu_display_verify_plane (struct amdgpu_framebuffer * rfb , int plane ,
882
+ const struct drm_format_info * format ,
883
+ unsigned int block_width , unsigned int block_height ,
884
+ unsigned int block_size_log2 )
885
+ {
886
+ unsigned int width = rfb -> base .width /
887
+ ((plane && plane < format -> num_planes ) ? format -> hsub : 1 );
888
+ unsigned int height = rfb -> base .height /
889
+ ((plane && plane < format -> num_planes ) ? format -> vsub : 1 );
890
+ unsigned int cpp = plane < format -> num_planes ? format -> cpp [plane ] : 1 ;
891
+ unsigned int block_pitch = block_width * cpp ;
892
+ unsigned int min_pitch = ALIGN (width * cpp , block_pitch );
893
+ unsigned int block_size = 1 << block_size_log2 ;
894
+ uint64_t size ;
895
+
896
+ if (rfb -> base .pitches [plane ] % block_pitch ) {
897
+ drm_dbg_kms (rfb -> base .dev ,
898
+ "pitch %d for plane %d is not a multiple of block pitch %d\n" ,
899
+ rfb -> base .pitches [plane ], plane , block_pitch );
900
+ return - EINVAL ;
901
+ }
902
+ if (rfb -> base .pitches [plane ] < min_pitch ) {
903
+ drm_dbg_kms (rfb -> base .dev ,
904
+ "pitch %d for plane %d is less than minimum pitch %d\n" ,
905
+ rfb -> base .pitches [plane ], plane , min_pitch );
906
+ return - EINVAL ;
907
+ }
908
+
909
+ /* Force at least natural alignment. */
910
+ if (rfb -> base .offsets [plane ] % block_size ) {
911
+ drm_dbg_kms (rfb -> base .dev ,
912
+ "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n" ,
913
+ rfb -> base .offsets [plane ], plane , block_size );
914
+ return - EINVAL ;
915
+ }
916
+
917
+ size = rfb -> base .offsets [plane ] +
918
+ (uint64_t )rfb -> base .pitches [plane ] / block_pitch *
919
+ block_size * DIV_ROUND_UP (height , block_height );
920
+
921
+ if (rfb -> base .obj [0 ]-> size < size ) {
922
+ drm_dbg_kms (rfb -> base .dev ,
923
+ "BO size 0x%zx is less than 0x%llx required for plane %d\n" ,
924
+ rfb -> base .obj [0 ]-> size , size , plane );
925
+ return - EINVAL ;
926
+ }
927
+
928
+ return 0 ;
929
+ }
930
+
931
+
932
+ static int amdgpu_display_verify_sizes (struct amdgpu_framebuffer * rfb )
933
+ {
934
+ const struct drm_format_info * format_info = drm_format_info (rfb -> base .format -> format );
935
+ uint64_t modifier = rfb -> base .modifier ;
936
+ int ret ;
937
+ unsigned int i , block_width , block_height , block_size_log2 ;
938
+
939
+ if (!rfb -> base .dev -> mode_config .allow_fb_modifiers )
940
+ return 0 ;
941
+
942
+ for (i = 0 ; i < format_info -> num_planes ; ++ i ) {
943
+ if (modifier == DRM_FORMAT_MOD_LINEAR ) {
944
+ block_width = 256 / format_info -> cpp [i ];
945
+ block_height = 1 ;
946
+ block_size_log2 = 8 ;
947
+ } else {
948
+ int swizzle = AMD_FMT_MOD_GET (TILE , modifier );
949
+
950
+ switch ((swizzle & ~3 ) + 1 ) {
951
+ case DC_SW_256B_S :
952
+ block_size_log2 = 8 ;
953
+ break ;
954
+ case DC_SW_4KB_S :
955
+ case DC_SW_4KB_S_X :
956
+ block_size_log2 = 12 ;
957
+ break ;
958
+ case DC_SW_64KB_S :
959
+ case DC_SW_64KB_S_T :
960
+ case DC_SW_64KB_S_X :
961
+ block_size_log2 = 16 ;
962
+ break ;
963
+ default :
964
+ drm_dbg_kms (rfb -> base .dev ,
965
+ "Swizzle mode with unknown block size: %d\n" , swizzle );
966
+ return - EINVAL ;
967
+ }
968
+
969
+ get_block_dimensions (block_size_log2 , format_info -> cpp [i ],
970
+ & block_width , & block_height );
971
+ }
972
+
973
+ ret = amdgpu_display_verify_plane (rfb , i , format_info ,
974
+ block_width , block_height , block_size_log2 );
975
+ if (ret )
976
+ return ret ;
977
+ }
978
+
979
+ if (AMD_FMT_MOD_GET (DCC , modifier )) {
980
+ if (AMD_FMT_MOD_GET (DCC_RETILE , modifier )) {
981
+ block_size_log2 = get_dcc_block_size (modifier , false, false);
982
+ get_block_dimensions (block_size_log2 + 8 , format_info -> cpp [0 ],
983
+ & block_width , & block_height );
984
+ ret = amdgpu_display_verify_plane (rfb , i , format_info ,
985
+ block_width , block_height ,
986
+ block_size_log2 );
987
+ if (ret )
988
+ return ret ;
989
+
990
+ ++ i ;
991
+ block_size_log2 = get_dcc_block_size (modifier , true, true);
992
+ } else {
993
+ bool pipe_aligned = AMD_FMT_MOD_GET (DCC_PIPE_ALIGN , modifier );
994
+
995
+ block_size_log2 = get_dcc_block_size (modifier , true, pipe_aligned );
996
+ }
997
+ get_block_dimensions (block_size_log2 + 8 , format_info -> cpp [0 ],
998
+ & block_width , & block_height );
999
+ ret = amdgpu_display_verify_plane (rfb , i , format_info ,
1000
+ block_width , block_height , block_size_log2 );
1001
+ if (ret )
1002
+ return ret ;
1003
+ }
1004
+
1005
+ return 0 ;
1006
+ }
1007
+
840
1008
static int amdgpu_display_get_fb_info (const struct amdgpu_framebuffer * amdgpu_fb ,
841
1009
uint64_t * tiling_flags , bool * tmz_surface )
842
1010
{
@@ -902,10 +1070,8 @@ int amdgpu_display_gem_fb_verify_and_init(
902
1070
int ret ;
903
1071
904
1072
rfb -> base .obj [0 ] = obj ;
905
-
906
- /* Verify that bo size can fit the fb size. */
907
- ret = drm_gem_fb_init_with_funcs (dev , & rfb -> base , file_priv , mode_cmd ,
908
- & amdgpu_fb_funcs );
1073
+ drm_helper_mode_fill_fb_struct (dev , & rfb -> base , mode_cmd );
1074
+ ret = drm_framebuffer_init (dev , & rfb -> base , & amdgpu_fb_funcs );
909
1075
if (ret )
910
1076
goto err ;
911
1077
/* Verify that the modifier is supported. */
@@ -967,9 +1133,12 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
967
1133
}
968
1134
}
969
1135
970
- for (i = 1 ; i < rfb -> base .format -> num_planes ; ++ i ) {
1136
+ ret = amdgpu_display_verify_sizes (rfb );
1137
+ if (ret )
1138
+ return ret ;
1139
+
1140
+ for (i = 0 ; i < rfb -> base .format -> num_planes ; ++ i ) {
971
1141
drm_gem_object_get (rfb -> base .obj [0 ]);
972
- drm_gem_object_put (rfb -> base .obj [i ]);
973
1142
rfb -> base .obj [i ] = rfb -> base .obj [0 ];
974
1143
}
975
1144
@@ -999,6 +1168,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
999
1168
domains = amdgpu_display_supported_domains (drm_to_adev (dev ), bo -> flags );
1000
1169
if (obj -> import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT )) {
1001
1170
drm_dbg_kms (dev , "Cannot create framebuffer from imported dma_buf\n" );
1171
+ drm_gem_object_put (obj );
1002
1172
return ERR_PTR (- EINVAL );
1003
1173
}
1004
1174
@@ -1412,7 +1582,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1412
1582
}
1413
1583
}
1414
1584
}
1415
- return r ;
1585
+ return 0 ;
1416
1586
}
1417
1587
1418
1588
int amdgpu_display_resume_helper (struct amdgpu_device * adev )
0 commit comments