Skip to content

Commit 29ee68e

Browse files
PandeyajitLucas Tanure
authored andcommitted
mfd: clsic: introduce RAS notifications as simulated IRQs
The RAS service in the CLSIC device can expose hardware interrupts as notification messages, this patch exposes them as simulated IRQs so client MFD drivers can interact with them as conventional interrupt sources. Change-Id: Ib1dc78ca53330d5dbe8be459481fc2d25c0b8b11 Signed-off-by: apandey <[email protected]> Signed-off-by: Simon Trimmer <[email protected]>
1 parent 0d42ea1 commit 29ee68e

File tree

8 files changed

+426
-14
lines changed

8 files changed

+426
-14
lines changed

drivers/mfd/clsic/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ config MFD_CLSIC
66
select REGMAP
77
select REGMAP_IRQ
88
select MFD_CORE
9+
select IRQ_SIM
910
tristate
1011

1112
config MFD_CLSIC_SPI

drivers/mfd/clsic/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
clsic-objs := clsic-core.o clsic-msg.o clsic-irq.o
66

77
clsic-objs += clsic-bootsrv.o clsic-syssrv.o
8-
clsic-objs += clsic-rassrv.o clsic-tables.o
8+
clsic-objs += clsic-rassrv.o clsic-rassrv-irq.o clsic-tables.o
99
clsic-objs += clsic-tacna.o
1010

1111
obj-$(CONFIG_MFD_CLSIC) += clsic.o
Lines changed: 289 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,289 @@
1+
/*
2+
* clsic-rassrv-irq.c -- CLSIC Register Access Service IRQ support
3+
*
4+
* Copyright (C) 2015-2019 Cirrus Logic, Inc. and
5+
* Cirrus Logic International Semiconductor Ltd.
6+
*
7+
* This program is free software; you can redistribute it and/or modify
8+
* it under the terms of the GNU General Public License version 2 as
9+
* published by the Free Software Foundation.
10+
*/
11+
12+
#include <linux/mfd/core.h>
13+
14+
#include <linux/mfd/clsic/core.h>
15+
#include "clsic-trace.h"
16+
#include <linux/mfd/clsic/message.h>
17+
#include <linux/mfd/clsic/irq.h>
18+
#include <linux/mfd/clsic/rassrv.h>
19+
20+
/*
21+
* - The RAS service in the device can receive interrupts from DSP2.
22+
* - These interrupts are exposed to the host as optional notification
23+
* messages.
24+
* - The notifications are not enabled when the device is cold booted.
25+
* - The CLSIC_RAS_MSG_CR_SET_IRQ_NTY_MODE command is used to control their
26+
* state in the device.
27+
* - There are three modes;
28+
* - REQ: request an interrupt be enabled (may cause immediate notification)
29+
* - FLUSH_AND_REQ: clear any pending interrupt and then enable it
30+
* - CANCEL: disable sending interrupt notifications
31+
*
32+
* - When an interrupt is signalled to the RAS service in the device a
33+
* notification message is sent to the host and the interrupt is disabled.
34+
* The host must enable the interrupt to receive another notification.
35+
*
36+
* - The RAS service handler exposes the interrupts using the simulated IRQ
37+
* infrastructure, this is to maintain compatibility with the conventional
38+
* Cirrus DSP driver interfaces.
39+
* - CLSIC client drivers need to obtain the IRQ using
40+
* clsic_tacna_request_irq() and release it with clsic_tacna_free_irq()
41+
* - The IRQ is either unbound and disabled, or bound and enabled
42+
* - The IRQ simulator tracks whether an IRQ is unmasked
43+
* - When an notification is received, once the IRQ is delivered to the client
44+
* the worker thread automatically rearms the device
45+
*/
46+
47+
/*
48+
* This method allows client drivers to register their IRQ handler with a RAS
49+
* notification.
50+
*/
51+
int clsic_ras_request_irq(struct clsic_ras_struct *ras, unsigned int irq_id,
52+
const char *devname, irq_handler_t thread_fn,
53+
void *dev_id)
54+
{
55+
int ret;
56+
struct clsic_ras_irq *irq;
57+
58+
if (irq_id >= CLSIC_RAS_IRQ_COUNT) {
59+
clsic_err(ras->clsic,
60+
"error: Invalid NTY_IRQ index %d context %p",
61+
irq_id, ras);
62+
return -EINVAL;
63+
}
64+
65+
mutex_lock(&ras->irq_mutex);
66+
irq = &ras->irqs[irq_id];
67+
if (irq->state != CLSIC_RAS_IRQ_STATE_IDLE) {
68+
clsic_err(ras->clsic, "IRQ already bound (%d %p %d %pF)",
69+
irq_id, ras, irq->state, thread_fn);
70+
mutex_unlock(&ras->irq_mutex);
71+
return -EBUSY;
72+
}
73+
74+
irq->simirq_id = irq_sim_irqnum(&ras->irqsim, irq_id);
75+
ret = request_threaded_irq(irq->simirq_id, NULL, thread_fn,
76+
IRQF_ONESHOT, devname, dev_id);
77+
if (ret == 0) {
78+
irq->state = CLSIC_RAS_IRQ_STATE_ENABLING;
79+
queue_work(system_unbound_wq, &irq->work);
80+
}
81+
82+
mutex_unlock(&ras->irq_mutex);
83+
84+
return ret;
85+
}
86+
87+
/* Free RAS notification IRQ and its handler data */
88+
void clsic_ras_free_irq(struct clsic_ras_struct *ras,
89+
unsigned int irq_id, void *data)
90+
{
91+
struct clsic_ras_irq *irq;
92+
93+
if (irq_id >= CLSIC_RAS_IRQ_COUNT) {
94+
clsic_err(ras->clsic,
95+
"error: Invalid NTY_IRQ index %d context %p",
96+
irq_id, ras);
97+
return;
98+
}
99+
100+
mutex_lock(&ras->irq_mutex);
101+
irq = &ras->irqs[irq_id];
102+
irq->state = CLSIC_RAS_IRQ_STATE_DISABLING;
103+
queue_work(system_unbound_wq, &irq->work);
104+
mutex_unlock(&ras->irq_mutex);
105+
106+
flush_work(&irq->work);
107+
108+
mutex_lock(&ras->irq_mutex);
109+
free_irq(irq->simirq_id, data);
110+
if (irq->state == CLSIC_RAS_IRQ_STATE_DISABLED)
111+
irq->state = CLSIC_RAS_IRQ_STATE_IDLE;
112+
113+
mutex_unlock(&ras->irq_mutex);
114+
}
115+
116+
/*
117+
* Send a message to update the state of the RAS IRQ
118+
*/
119+
static void clsic_ras_irq_worker(struct work_struct *data)
120+
{
121+
struct clsic_ras_irq *irq = container_of(data, struct clsic_ras_irq,
122+
work);
123+
struct clsic_ras_struct *ras = irq->ras;
124+
struct clsic *clsic = ras->clsic;
125+
union clsic_ras_msg msg_cmd;
126+
union clsic_ras_msg msg_rsp;
127+
int ret;
128+
129+
memset(&msg_rsp, 0, CLSIC_FIXED_MSG_SZ);
130+
131+
mutex_lock(&ras->irq_mutex);
132+
133+
clsic_init_message((union t_clsic_generic_message *)&msg_cmd,
134+
ras->service->service_instance,
135+
CLSIC_RAS_MSG_CR_SET_IRQ_NTY_MODE);
136+
137+
msg_cmd.cmd_set_irq_nty_mode.irq_id = irq->id;
138+
139+
/* based on the current state determine the required mode to set */
140+
switch (irq->state) {
141+
case CLSIC_RAS_IRQ_STATE_ENABLING:
142+
case CLSIC_RAS_IRQ_STATE_ENABLED:
143+
case CLSIC_RAS_IRQ_STATE_PENDING:
144+
msg_cmd.cmd_set_irq_nty_mode.mode = CLSIC_RAS_NTY_REQ;
145+
break;
146+
case CLSIC_RAS_IRQ_STATE_DISABLING:
147+
case CLSIC_RAS_IRQ_STATE_IDLE:
148+
case CLSIC_RAS_IRQ_STATE_DISABLED:
149+
default:
150+
msg_cmd.cmd_set_irq_nty_mode.mode = CLSIC_RAS_NTY_CANCEL;
151+
break;
152+
}
153+
154+
ret = clsic_send_msg_sync_pm(clsic,
155+
(union t_clsic_generic_message *) &msg_cmd,
156+
(union t_clsic_generic_message *) &msg_rsp,
157+
CLSIC_NO_TXBUF, CLSIC_NO_TXBUF_LEN,
158+
CLSIC_NO_RXBUF, CLSIC_NO_RXBUF_LEN);
159+
160+
/* When a command succeeds potentially change the state */
161+
if ((ret == 0) && (msg_rsp.rsp_set_irq_nty_mode.hdr.err == 0))
162+
switch (irq->state) {
163+
case CLSIC_RAS_IRQ_STATE_PENDING:
164+
case CLSIC_RAS_IRQ_STATE_ENABLING:
165+
irq->state = CLSIC_RAS_IRQ_STATE_ENABLED;
166+
break;
167+
case CLSIC_RAS_IRQ_STATE_DISABLING:
168+
irq->state = CLSIC_RAS_IRQ_STATE_DISABLED;
169+
break;
170+
default:
171+
break;
172+
}
173+
174+
trace_clsic_ras_irq_change(msg_cmd.cmd_set_irq_nty_mode.irq_id,
175+
msg_cmd.cmd_set_irq_nty_mode.mode,
176+
irq->state,
177+
ret,
178+
msg_rsp.rsp_set_irq_nty_mode.hdr.err);
179+
180+
mutex_unlock(&ras->irq_mutex);
181+
};
182+
183+
/*
184+
* This method initialises the RAS IRQs and creates a mapping with the virtual
185+
* IRQ domain.
186+
*/
187+
int clsic_ras_irq_init(struct clsic_ras_struct *ras)
188+
{
189+
struct clsic_ras_irq *irq;
190+
unsigned int i;
191+
int ret;
192+
193+
ret = irq_sim_init(&ras->irqsim, CLSIC_RAS_IRQ_COUNT);
194+
if (ret != 0) {
195+
clsic_err(ras->clsic, "irq_sim_init() failed %d\n", ret);
196+
return ret;
197+
}
198+
199+
mutex_init(&ras->irq_mutex);
200+
for (i = 0; i < CLSIC_RAS_IRQ_COUNT; i++) {
201+
irq = &ras->irqs[i];
202+
irq->id = CLSIC_RAS_IRQ_DSP2_0 + 0;
203+
irq->ras = ras;
204+
irq->state = CLSIC_RAS_IRQ_STATE_IDLE;
205+
INIT_WORK(&irq->work, clsic_ras_irq_worker);
206+
}
207+
208+
return 0;
209+
}
210+
211+
/*
212+
* Extract the ID of the notification and trigger the associated IRQ handler.
213+
*
214+
*/
215+
void clsic_ras_irq_handler(struct clsic *clsic,
216+
struct clsic_ras_struct *ras,
217+
union clsic_ras_msg *nty_msg)
218+
{
219+
unsigned int irq_id;
220+
struct clsic_ras_irq *irq;
221+
bool trigger_worker = false;
222+
223+
irq_id = nty_msg->nty_irq.irq_id;
224+
if (irq_id >= CLSIC_RAS_IRQ_COUNT) {
225+
clsic_err(clsic, "Invalid RAS IRQ id: %d\n", irq_id);
226+
return;
227+
}
228+
irq = &ras->irqs[irq_id];
229+
230+
trace_clsic_ras_irq_event(irq_id);
231+
232+
mutex_lock(&ras->irq_mutex);
233+
switch (irq->state) {
234+
case CLSIC_RAS_IRQ_STATE_ENABLING:
235+
case CLSIC_RAS_IRQ_STATE_ENABLED:
236+
case CLSIC_RAS_IRQ_STATE_PENDING:
237+
irq->state = CLSIC_RAS_IRQ_STATE_PENDING;
238+
trigger_worker = true;
239+
break;
240+
case CLSIC_RAS_IRQ_STATE_IDLE:
241+
case CLSIC_RAS_IRQ_STATE_DISABLING:
242+
case CLSIC_RAS_IRQ_STATE_DISABLED:
243+
/* Do nothing / not interested */
244+
break;
245+
}
246+
mutex_unlock(&ras->irq_mutex);
247+
248+
if (!trigger_worker)
249+
return;
250+
251+
/* Make sure worker thread has no pending work */
252+
flush_work(&irq->work);
253+
254+
/* trigger the simulated IRQ (invokes callback function) */
255+
irq_sim_fire(&ras->irqsim, irq_id);
256+
257+
/*
258+
* The RAS service automatically masks the IRQ as the notification is
259+
* sent, trigger the worker to to re-enable the interrupt.
260+
*/
261+
queue_work(system_unbound_wq, &irq->work);
262+
}
263+
264+
/*
265+
* Called when RAS is suspended
266+
*
267+
* Notification IRQs are automatically masked on suspend, flush pending
268+
* worker threads
269+
*/
270+
void clsic_ras_irq_suspend(struct clsic_ras_struct *ras)
271+
{
272+
int i;
273+
274+
for (i = 0; i < CLSIC_RAS_IRQ_COUNT; i++)
275+
flush_work(&ras->irqs[i].work);
276+
}
277+
278+
/*
279+
* Called when the RAS service is resumed to re-enable unmasked RAS IRQs
280+
*/
281+
void clsic_ras_irq_resume(struct clsic_ras_struct *ras)
282+
{
283+
int i;
284+
285+
for (i = 0; i < CLSIC_RAS_IRQ_COUNT; i++)
286+
if (ras->irqs[i].state != CLSIC_RAS_IRQ_STATE_IDLE)
287+
queue_work(system_unbound_wq,
288+
&ras->irqs[i].work);
289+
}

0 commit comments

Comments
 (0)