@@ -1622,8 +1622,7 @@ check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
1622
1622
return ;
1623
1623
}
1624
1624
1625
- if (memory_access_type != VMAT_CONTIGUOUS
1626
- && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1625
+ if (memory_access_type != VMAT_CONTIGUOUS)
1627
1626
{
1628
1627
/* Element X of the data must come from iteration i * VF + X of the
1629
1628
scalar loop. We need more work to support other mappings. */
@@ -9050,7 +9049,6 @@ vectorizable_store (vec_info *vinfo,
9050
9049
9051
9050
gcc_assert (memory_access_type == VMAT_CONTIGUOUS
9052
9051
|| memory_access_type == VMAT_CONTIGUOUS_DOWN
9053
- || memory_access_type == VMAT_CONTIGUOUS_PERMUTE
9054
9052
|| memory_access_type == VMAT_CONTIGUOUS_REVERSE);
9055
9053
9056
9054
unsigned inside_cost = 0 , prologue_cost = 0 ;
@@ -9095,25 +9093,7 @@ vectorizable_store (vec_info *vinfo,
9095
9093
simd_lane_access_p, bump);
9096
9094
9097
9095
new_stmt = NULL ;
9098
- if (grouped_store)
9099
- {
9100
- /* Permute. */
9101
- gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
9102
- if (costing_p)
9103
- {
9104
- int group_size = DR_GROUP_SIZE (first_stmt_info);
9105
- int nstmts = ceil_log2 (group_size) * group_size;
9106
- inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
9107
- slp_node, 0 , vect_body);
9108
- if (dump_enabled_p ())
9109
- dump_printf_loc (MSG_NOTE, vect_location, " vect_model_store_cost: "
9110
- " strided group_size = %d .\n " , group_size);
9111
- }
9112
- else
9113
- vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
9114
- gsi, &result_chain);
9115
- }
9116
-
9096
+ gcc_assert (!grouped_store);
9117
9097
for (i = 0 ; i < vec_num; i++)
9118
9098
{
9119
9099
if (!costing_p)
@@ -11457,18 +11437,12 @@ vectorizable_load (vec_info *vinfo,
11457
11437
alignment support schemes. */
11458
11438
if (costing_p)
11459
11439
{
11460
- /* For VMAT_CONTIGUOUS_PERMUTE if it's grouped load, we
11461
- only need to take care of the first stmt, whose
11462
- stmt_info is first_stmt_info, vec_num iterating on it
11463
- will cover the cost for the remaining, it's consistent
11464
- with transforming. For the prologue cost for realign,
11440
+ /* For the prologue cost for realign,
11465
11441
we only need to count it once for the whole group. */
11466
11442
bool first_stmt_info_p = first_stmt_info == stmt_info;
11467
11443
bool add_realign_cost = first_stmt_info_p && i == 0 ;
11468
11444
if (memory_access_type == VMAT_CONTIGUOUS
11469
- || memory_access_type == VMAT_CONTIGUOUS_REVERSE
11470
- || (memory_access_type == VMAT_CONTIGUOUS_PERMUTE
11471
- && (!grouped_load || first_stmt_info_p)))
11445
+ || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
11472
11446
{
11473
11447
/* Leave realign cases alone to keep them simple. */
11474
11448
if (alignment_support_scheme == dr_explicit_realign_optimized
@@ -11625,8 +11599,7 @@ vectorizable_load (vec_info *vinfo,
11625
11599
if (costing_p)
11626
11600
{
11627
11601
gcc_assert (memory_access_type == VMAT_CONTIGUOUS
11628
- || memory_access_type == VMAT_CONTIGUOUS_REVERSE
11629
- || memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
11602
+ || memory_access_type == VMAT_CONTIGUOUS_REVERSE);
11630
11603
if (n_adjacent_loads > 0 )
11631
11604
vect_get_load_cost (vinfo, stmt_info, slp_node, n_adjacent_loads,
11632
11605
alignment_support_scheme, misalignment, false ,
0 commit comments