@@ -65,6 +65,7 @@ module_param(wp_on, int, 0444);
65
65
#define CMD_PARAMETER_READ 0x0e
66
66
#define CMD_PARAMETER_CHANGE_COL 0x0f
67
67
#define CMD_LOW_LEVEL_OP 0x10
68
+ #define CMD_NOT_SUPPORTED 0xff
68
69
69
70
struct brcm_nand_dma_desc {
70
71
u32 next_desc ;
@@ -199,6 +200,30 @@ static const u16 flash_dma_regs_v4[] = {
199
200
[FLASH_DMA_CURRENT_DESC_EXT ] = 0x34 ,
200
201
};
201
202
203
+ /* Native command conversion for legacy controllers (< v5.0) */
204
+ static const u8 native_cmd_conv [] = {
205
+ [NAND_CMD_READ0 ] = CMD_NOT_SUPPORTED ,
206
+ [NAND_CMD_READ1 ] = CMD_NOT_SUPPORTED ,
207
+ [NAND_CMD_RNDOUT ] = CMD_PARAMETER_CHANGE_COL ,
208
+ [NAND_CMD_PAGEPROG ] = CMD_NOT_SUPPORTED ,
209
+ [NAND_CMD_READOOB ] = CMD_NOT_SUPPORTED ,
210
+ [NAND_CMD_ERASE1 ] = CMD_BLOCK_ERASE ,
211
+ [NAND_CMD_STATUS ] = CMD_NOT_SUPPORTED ,
212
+ [NAND_CMD_SEQIN ] = CMD_NOT_SUPPORTED ,
213
+ [NAND_CMD_RNDIN ] = CMD_NOT_SUPPORTED ,
214
+ [NAND_CMD_READID ] = CMD_DEVICE_ID_READ ,
215
+ [NAND_CMD_ERASE2 ] = CMD_NULL ,
216
+ [NAND_CMD_PARAM ] = CMD_PARAMETER_READ ,
217
+ [NAND_CMD_GET_FEATURES ] = CMD_NOT_SUPPORTED ,
218
+ [NAND_CMD_SET_FEATURES ] = CMD_NOT_SUPPORTED ,
219
+ [NAND_CMD_RESET ] = CMD_NOT_SUPPORTED ,
220
+ [NAND_CMD_READSTART ] = CMD_NOT_SUPPORTED ,
221
+ [NAND_CMD_READCACHESEQ ] = CMD_NOT_SUPPORTED ,
222
+ [NAND_CMD_READCACHEEND ] = CMD_NOT_SUPPORTED ,
223
+ [NAND_CMD_RNDOUTSTART ] = CMD_NULL ,
224
+ [NAND_CMD_CACHEDPROG ] = CMD_NOT_SUPPORTED ,
225
+ };
226
+
202
227
/* Controller feature flags */
203
228
enum {
204
229
BRCMNAND_HAS_1K_SECTORS = BIT (0 ),
@@ -237,6 +262,12 @@ struct brcmnand_controller {
237
262
/* List of NAND hosts (one for each chip-select) */
238
263
struct list_head host_list ;
239
264
265
+ /* Functions to be called from exec_op */
266
+ int (* check_instr )(struct nand_chip * chip ,
267
+ const struct nand_operation * op );
268
+ int (* exec_instr )(struct nand_chip * chip ,
269
+ const struct nand_operation * op );
270
+
240
271
/* EDU info, per-transaction */
241
272
const u16 * edu_offsets ;
242
273
void __iomem * edu_base ;
@@ -2478,18 +2509,190 @@ static int brcmnand_op_is_reset(const struct nand_operation *op)
2478
2509
return 0 ;
2479
2510
}
2480
2511
2512
+ static int brcmnand_check_instructions (struct nand_chip * chip ,
2513
+ const struct nand_operation * op )
2514
+ {
2515
+ return 0 ;
2516
+ }
2517
+
2518
+ static int brcmnand_exec_instructions (struct nand_chip * chip ,
2519
+ const struct nand_operation * op )
2520
+ {
2521
+ struct brcmnand_host * host = nand_get_controller_data (chip );
2522
+ unsigned int i ;
2523
+ int ret = 0 ;
2524
+
2525
+ for (i = 0 ; i < op -> ninstrs ; i ++ ) {
2526
+ ret = brcmnand_exec_instr (host , i , op );
2527
+ if (ret )
2528
+ break ;
2529
+ }
2530
+
2531
+ return ret ;
2532
+ }
2533
+
2534
+ static int brcmnand_check_instructions_legacy (struct nand_chip * chip ,
2535
+ const struct nand_operation * op )
2536
+ {
2537
+ const struct nand_op_instr * instr ;
2538
+ unsigned int i ;
2539
+ u8 cmd ;
2540
+
2541
+ for (i = 0 ; i < op -> ninstrs ; i ++ ) {
2542
+ instr = & op -> instrs [i ];
2543
+
2544
+ switch (instr -> type ) {
2545
+ case NAND_OP_CMD_INSTR :
2546
+ cmd = native_cmd_conv [instr -> ctx .cmd .opcode ];
2547
+ if (cmd == CMD_NOT_SUPPORTED )
2548
+ return - EOPNOTSUPP ;
2549
+ break ;
2550
+ case NAND_OP_ADDR_INSTR :
2551
+ case NAND_OP_DATA_IN_INSTR :
2552
+ case NAND_OP_WAITRDY_INSTR :
2553
+ break ;
2554
+ default :
2555
+ return - EOPNOTSUPP ;
2556
+ }
2557
+ }
2558
+
2559
+ return 0 ;
2560
+ }
2561
+
2562
+ static int brcmnand_exec_instructions_legacy (struct nand_chip * chip ,
2563
+ const struct nand_operation * op )
2564
+ {
2565
+ struct mtd_info * mtd = nand_to_mtd (chip );
2566
+ struct brcmnand_host * host = nand_get_controller_data (chip );
2567
+ struct brcmnand_controller * ctrl = host -> ctrl ;
2568
+ const struct nand_op_instr * instr ;
2569
+ unsigned int i , j ;
2570
+ u8 cmd = CMD_NULL , last_cmd = CMD_NULL ;
2571
+ int ret = 0 ;
2572
+ u64 last_addr ;
2573
+
2574
+ for (i = 0 ; i < op -> ninstrs ; i ++ ) {
2575
+ instr = & op -> instrs [i ];
2576
+
2577
+ if (instr -> type == NAND_OP_CMD_INSTR ) {
2578
+ cmd = native_cmd_conv [instr -> ctx .cmd .opcode ];
2579
+ if (cmd == CMD_NOT_SUPPORTED ) {
2580
+ dev_err (ctrl -> dev , "unsupported cmd=%d\n" ,
2581
+ instr -> ctx .cmd .opcode );
2582
+ ret = - EOPNOTSUPP ;
2583
+ break ;
2584
+ }
2585
+ } else if (instr -> type == NAND_OP_ADDR_INSTR ) {
2586
+ u64 addr = 0 ;
2587
+
2588
+ if (cmd == CMD_NULL )
2589
+ continue ;
2590
+
2591
+ if (instr -> ctx .addr .naddrs > 8 ) {
2592
+ dev_err (ctrl -> dev , "unsupported naddrs=%u\n" ,
2593
+ instr -> ctx .addr .naddrs );
2594
+ ret = - EOPNOTSUPP ;
2595
+ break ;
2596
+ }
2597
+
2598
+ for (j = 0 ; j < instr -> ctx .addr .naddrs ; j ++ )
2599
+ addr |= (instr -> ctx .addr .addrs [j ]) << (j << 3 );
2600
+
2601
+ if (cmd == CMD_BLOCK_ERASE )
2602
+ addr <<= chip -> page_shift ;
2603
+ else if (cmd == CMD_PARAMETER_CHANGE_COL )
2604
+ addr &= ~((u64 )(FC_BYTES - 1 ));
2605
+
2606
+ brcmnand_set_cmd_addr (mtd , addr );
2607
+ brcmnand_send_cmd (host , cmd );
2608
+ last_addr = addr ;
2609
+ last_cmd = cmd ;
2610
+ cmd = CMD_NULL ;
2611
+ brcmnand_waitfunc (chip );
2612
+
2613
+ if (last_cmd == CMD_PARAMETER_READ ||
2614
+ last_cmd == CMD_PARAMETER_CHANGE_COL ) {
2615
+ /* Copy flash cache word-wise */
2616
+ u32 * flash_cache = (u32 * )ctrl -> flash_cache ;
2617
+
2618
+ brcmnand_soc_data_bus_prepare (ctrl -> soc , true);
2619
+
2620
+ /*
2621
+ * Must cache the FLASH_CACHE now, since changes in
2622
+ * SECTOR_SIZE_1K may invalidate it
2623
+ */
2624
+ for (j = 0 ; j < FC_WORDS ; j ++ )
2625
+ /*
2626
+ * Flash cache is big endian for parameter pages, at
2627
+ * least on STB SoCs
2628
+ */
2629
+ flash_cache [j ] = be32_to_cpu (brcmnand_read_fc (ctrl , j ));
2630
+
2631
+ brcmnand_soc_data_bus_unprepare (ctrl -> soc , true);
2632
+ }
2633
+ } else if (instr -> type == NAND_OP_DATA_IN_INSTR ) {
2634
+ u8 * in = instr -> ctx .data .buf .in ;
2635
+
2636
+ if (last_cmd == CMD_DEVICE_ID_READ ) {
2637
+ u32 val ;
2638
+
2639
+ if (instr -> ctx .data .len > 8 ) {
2640
+ dev_err (ctrl -> dev , "unsupported len=%u\n" ,
2641
+ instr -> ctx .data .len );
2642
+ ret = - EOPNOTSUPP ;
2643
+ break ;
2644
+ }
2645
+
2646
+ for (j = 0 ; j < instr -> ctx .data .len ; j ++ ) {
2647
+ if (j == 0 )
2648
+ val = brcmnand_read_reg (ctrl , BRCMNAND_ID );
2649
+ else if (j == 4 )
2650
+ val = brcmnand_read_reg (ctrl , BRCMNAND_ID_EXT );
2651
+
2652
+ in [j ] = (val >> (24 - ((j % 4 ) << 3 ))) & 0xff ;
2653
+ }
2654
+ } else if (last_cmd == CMD_PARAMETER_READ ||
2655
+ last_cmd == CMD_PARAMETER_CHANGE_COL ) {
2656
+ u64 addr ;
2657
+ u32 offs ;
2658
+
2659
+ for (j = 0 ; j < instr -> ctx .data .len ; j ++ ) {
2660
+ addr = last_addr + j ;
2661
+ offs = addr & (FC_BYTES - 1 );
2662
+
2663
+ if (j > 0 && offs == 0 )
2664
+ nand_change_read_column_op (chip , addr , NULL , 0 ,
2665
+ false);
2666
+
2667
+ in [j ] = ctrl -> flash_cache [offs ];
2668
+ }
2669
+ }
2670
+ } else if (instr -> type == NAND_OP_WAITRDY_INSTR ) {
2671
+ ret = bcmnand_ctrl_poll_status (host , NAND_CTRL_RDY , NAND_CTRL_RDY , 0 );
2672
+ if (ret )
2673
+ break ;
2674
+ } else {
2675
+ dev_err (ctrl -> dev , "unsupported instruction type: %d\n" , instr -> type );
2676
+ ret = - EOPNOTSUPP ;
2677
+ break ;
2678
+ }
2679
+ }
2680
+
2681
+ return ret ;
2682
+ }
2683
+
2481
2684
static int brcmnand_exec_op (struct nand_chip * chip ,
2482
2685
const struct nand_operation * op ,
2483
2686
bool check_only )
2484
2687
{
2485
2688
struct brcmnand_host * host = nand_get_controller_data (chip );
2689
+ struct brcmnand_controller * ctrl = host -> ctrl ;
2486
2690
struct mtd_info * mtd = nand_to_mtd (chip );
2487
2691
u8 * status ;
2488
- unsigned int i ;
2489
2692
int ret = 0 ;
2490
2693
2491
2694
if (check_only )
2492
- return 0 ;
2695
+ return ctrl -> check_instr ( chip , op ) ;
2493
2696
2494
2697
if (brcmnand_op_is_status (op )) {
2495
2698
status = op -> instrs [1 ].ctx .data .buf .in ;
@@ -2513,11 +2716,7 @@ static int brcmnand_exec_op(struct nand_chip *chip,
2513
2716
if (op -> deassert_wp )
2514
2717
brcmnand_wp (mtd , 0 );
2515
2718
2516
- for (i = 0 ; i < op -> ninstrs ; i ++ ) {
2517
- ret = brcmnand_exec_instr (host , i , op );
2518
- if (ret )
2519
- break ;
2520
- }
2719
+ ret = ctrl -> exec_instr (chip , op );
2521
2720
2522
2721
if (op -> deassert_wp )
2523
2722
brcmnand_wp (mtd , 1 );
@@ -3130,6 +3329,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
3130
3329
if (ret )
3131
3330
goto err ;
3132
3331
3332
+ /* Only v5.0+ controllers have low level ops support */
3333
+ if (ctrl -> nand_version >= 0x0500 ) {
3334
+ ctrl -> check_instr = brcmnand_check_instructions ;
3335
+ ctrl -> exec_instr = brcmnand_exec_instructions ;
3336
+ } else {
3337
+ ctrl -> check_instr = brcmnand_check_instructions_legacy ;
3338
+ ctrl -> exec_instr = brcmnand_exec_instructions_legacy ;
3339
+ }
3340
+
3133
3341
/*
3134
3342
* Most chips have this cache at a fixed offset within 'nand' block.
3135
3343
* Some must specify this region separately.
0 commit comments