@@ -127,13 +127,112 @@ static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
127
127
return 0 ;
128
128
}
129
129
130
+ static irqreturn_t nvidia_smmu_global_fault_inst (int irq ,
131
+ struct arm_smmu_device * smmu ,
132
+ int inst )
133
+ {
134
+ u32 gfsr , gfsynr0 , gfsynr1 , gfsynr2 ;
135
+ void __iomem * gr0_base = nvidia_smmu_page (smmu , inst , 0 );
136
+
137
+ gfsr = readl_relaxed (gr0_base + ARM_SMMU_GR0_sGFSR );
138
+ if (!gfsr )
139
+ return IRQ_NONE ;
140
+
141
+ gfsynr0 = readl_relaxed (gr0_base + ARM_SMMU_GR0_sGFSYNR0 );
142
+ gfsynr1 = readl_relaxed (gr0_base + ARM_SMMU_GR0_sGFSYNR1 );
143
+ gfsynr2 = readl_relaxed (gr0_base + ARM_SMMU_GR0_sGFSYNR2 );
144
+
145
+ dev_err_ratelimited (smmu -> dev ,
146
+ "Unexpected global fault, this could be serious\n" );
147
+ dev_err_ratelimited (smmu -> dev ,
148
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n" ,
149
+ gfsr , gfsynr0 , gfsynr1 , gfsynr2 );
150
+
151
+ writel_relaxed (gfsr , gr0_base + ARM_SMMU_GR0_sGFSR );
152
+ return IRQ_HANDLED ;
153
+ }
154
+
155
+ static irqreturn_t nvidia_smmu_global_fault (int irq , void * dev )
156
+ {
157
+ unsigned int inst ;
158
+ irqreturn_t ret = IRQ_NONE ;
159
+ struct arm_smmu_device * smmu = dev ;
160
+
161
+ for (inst = 0 ; inst < NUM_SMMU_INSTANCES ; inst ++ ) {
162
+ irqreturn_t irq_ret ;
163
+
164
+ irq_ret = nvidia_smmu_global_fault_inst (irq , smmu , inst );
165
+ if (irq_ret == IRQ_HANDLED )
166
+ ret = IRQ_HANDLED ;
167
+ }
168
+
169
+ return ret ;
170
+ }
171
+
172
+ static irqreturn_t nvidia_smmu_context_fault_bank (int irq ,
173
+ struct arm_smmu_device * smmu ,
174
+ int idx , int inst )
175
+ {
176
+ u32 fsr , fsynr , cbfrsynra ;
177
+ unsigned long iova ;
178
+ void __iomem * gr1_base = nvidia_smmu_page (smmu , inst , 1 );
179
+ void __iomem * cb_base = nvidia_smmu_page (smmu , inst , smmu -> numpage + idx );
180
+
181
+ fsr = readl_relaxed (cb_base + ARM_SMMU_CB_FSR );
182
+ if (!(fsr & ARM_SMMU_FSR_FAULT ))
183
+ return IRQ_NONE ;
184
+
185
+ fsynr = readl_relaxed (cb_base + ARM_SMMU_CB_FSYNR0 );
186
+ iova = readq_relaxed (cb_base + ARM_SMMU_CB_FAR );
187
+ cbfrsynra = readl_relaxed (gr1_base + ARM_SMMU_GR1_CBFRSYNRA (idx ));
188
+
189
+ dev_err_ratelimited (smmu -> dev ,
190
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n" ,
191
+ fsr , iova , fsynr , cbfrsynra , idx );
192
+
193
+ writel_relaxed (fsr , cb_base + ARM_SMMU_CB_FSR );
194
+ return IRQ_HANDLED ;
195
+ }
196
+
197
+ static irqreturn_t nvidia_smmu_context_fault (int irq , void * dev )
198
+ {
199
+ int idx ;
200
+ unsigned int inst ;
201
+ irqreturn_t ret = IRQ_NONE ;
202
+ struct arm_smmu_device * smmu ;
203
+ struct iommu_domain * domain = dev ;
204
+ struct arm_smmu_domain * smmu_domain ;
205
+
206
+ smmu_domain = container_of (domain , struct arm_smmu_domain , domain );
207
+ smmu = smmu_domain -> smmu ;
208
+
209
+ for (inst = 0 ; inst < NUM_SMMU_INSTANCES ; inst ++ ) {
210
+ irqreturn_t irq_ret ;
211
+
212
+ /*
213
+ * Interrupt line is shared between all contexts.
214
+ * Check for faults across all contexts.
215
+ */
216
+ for (idx = 0 ; idx < smmu -> num_context_banks ; idx ++ ) {
217
+ irq_ret = nvidia_smmu_context_fault_bank (irq , smmu ,
218
+ idx , inst );
219
+ if (irq_ret == IRQ_HANDLED )
220
+ ret = IRQ_HANDLED ;
221
+ }
222
+ }
223
+
224
+ return ret ;
225
+ }
226
+
130
227
static const struct arm_smmu_impl nvidia_smmu_impl = {
131
228
.read_reg = nvidia_smmu_read_reg ,
132
229
.write_reg = nvidia_smmu_write_reg ,
133
230
.read_reg64 = nvidia_smmu_read_reg64 ,
134
231
.write_reg64 = nvidia_smmu_write_reg64 ,
135
232
.reset = nvidia_smmu_reset ,
136
233
.tlb_sync = nvidia_smmu_tlb_sync ,
234
+ .global_fault = nvidia_smmu_global_fault ,
235
+ .context_fault = nvidia_smmu_context_fault ,
137
236
};
138
237
139
238
struct arm_smmu_device * nvidia_smmu_impl_init (struct arm_smmu_device * smmu )
0 commit comments