Skip to content

Commit 7276531

Browse files
Tom Zanussirostedt
authored andcommitted
tracing: Consolidate trace() functions
Move the checking, buffer reserve and buffer commit code in synth_event_trace_start/end() into inline functions __synth_event_trace_start/end() so they can also be used by synth_event_trace() and synth_event_trace_array(), and then have all those functions use them. Also, change synth_event_trace_state.enabled to disabled so it only needs to be set if the event is disabled, which is not normally the case. Link: http://lkml.kernel.org/r/b1f3108d0f450e58192955a300e31d0405ab4149.1581374549.git.zanussi@kernel.org Signed-off-by: Tom Zanussi <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
1 parent 0c62f6c commit 7276531

File tree

2 files changed

+87
-135
lines changed

2 files changed

+87
-135
lines changed

include/linux/trace_events.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ struct synth_event_trace_state {
424424
struct synth_event *event;
425425
unsigned int cur_field;
426426
unsigned int n_u64;
427-
bool enabled;
427+
bool disabled;
428428
bool add_next;
429429
bool add_name;
430430
};

kernel/trace/trace_events_hist.c

Lines changed: 86 additions & 134 deletions
Original file line numberDiff line numberDiff line change
@@ -1791,6 +1791,60 @@ void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
17911791
}
17921792
EXPORT_SYMBOL_GPL(synth_event_cmd_init);
17931793

1794+
static inline int
1795+
__synth_event_trace_start(struct trace_event_file *file,
1796+
struct synth_event_trace_state *trace_state)
1797+
{
1798+
int entry_size, fields_size = 0;
1799+
int ret = 0;
1800+
1801+
/*
1802+
* Normal event tracing doesn't get called at all unless the
1803+
* ENABLED bit is set (which attaches the probe thus allowing
1804+
* this code to be called, etc). Because this is called
1805+
* directly by the user, we don't have that but we still need
1806+
* to honor not logging when disabled. For the the iterated
1807+
* trace case, we save the enabed state upon start and just
1808+
* ignore the following data calls.
1809+
*/
1810+
if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1811+
trace_trigger_soft_disabled(file)) {
1812+
trace_state->disabled = true;
1813+
ret = -ENOENT;
1814+
goto out;
1815+
}
1816+
1817+
trace_state->event = file->event_call->data;
1818+
1819+
fields_size = trace_state->event->n_u64 * sizeof(u64);
1820+
1821+
/*
1822+
* Avoid ring buffer recursion detection, as this event
1823+
* is being performed within another event.
1824+
*/
1825+
trace_state->buffer = file->tr->array_buffer.buffer;
1826+
ring_buffer_nest_start(trace_state->buffer);
1827+
1828+
entry_size = sizeof(*trace_state->entry) + fields_size;
1829+
trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1830+
file,
1831+
entry_size);
1832+
if (!trace_state->entry) {
1833+
ring_buffer_nest_end(trace_state->buffer);
1834+
ret = -EINVAL;
1835+
}
1836+
out:
1837+
return ret;
1838+
}
1839+
1840+
static inline void
1841+
__synth_event_trace_end(struct synth_event_trace_state *trace_state)
1842+
{
1843+
trace_event_buffer_commit(&trace_state->fbuffer);
1844+
1845+
ring_buffer_nest_end(trace_state->buffer);
1846+
}
1847+
17941848
/**
17951849
* synth_event_trace - Trace a synthetic event
17961850
* @file: The trace_event_file representing the synthetic event
@@ -1812,69 +1866,38 @@ EXPORT_SYMBOL_GPL(synth_event_cmd_init);
18121866
*/
18131867
int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
18141868
{
1815-
struct trace_event_buffer fbuffer;
1816-
struct synth_trace_event *entry;
1817-
struct trace_buffer *buffer;
1818-
struct synth_event *event;
1869+
struct synth_event_trace_state state;
18191870
unsigned int i, n_u64;
1820-
int fields_size = 0;
18211871
va_list args;
1822-
int ret = 0;
1823-
1824-
/*
1825-
* Normal event generation doesn't get called at all unless
1826-
* the ENABLED bit is set (which attaches the probe thus
1827-
* allowing this code to be called, etc). Because this is
1828-
* called directly by the user, we don't have that but we
1829-
* still need to honor not logging when disabled.
1830-
*/
1831-
if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1832-
trace_trigger_soft_disabled(file))
1833-
return 0;
1834-
1835-
event = file->event_call->data;
1836-
1837-
if (n_vals != event->n_fields)
1838-
return -EINVAL;
1839-
1840-
fields_size = event->n_u64 * sizeof(u64);
1841-
1842-
/*
1843-
* Avoid ring buffer recursion detection, as this event
1844-
* is being performed within another event.
1845-
*/
1846-
buffer = file->tr->array_buffer.buffer;
1847-
ring_buffer_nest_start(buffer);
1872+
int ret;
18481873

1849-
entry = trace_event_buffer_reserve(&fbuffer, file,
1850-
sizeof(*entry) + fields_size);
1851-
if (!entry) {
1852-
ret = -EINVAL;
1853-
goto out;
1874+
ret = __synth_event_trace_start(file, &state);
1875+
if (ret) {
1876+
if (ret == -ENOENT)
1877+
ret = 0; /* just disabled, not really an error */
1878+
return ret;
18541879
}
18551880

18561881
va_start(args, n_vals);
1857-
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
1882+
for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
18581883
u64 val;
18591884

18601885
val = va_arg(args, u64);
18611886

1862-
if (event->fields[i]->is_string) {
1887+
if (state.event->fields[i]->is_string) {
18631888
char *str_val = (char *)(long)val;
1864-
char *str_field = (char *)&entry->fields[n_u64];
1889+
char *str_field = (char *)&state.entry->fields[n_u64];
18651890

18661891
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
18671892
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
18681893
} else {
1869-
entry->fields[n_u64] = val;
1894+
state.entry->fields[n_u64] = val;
18701895
n_u64++;
18711896
}
18721897
}
18731898
va_end(args);
18741899

1875-
trace_event_buffer_commit(&fbuffer);
1876-
out:
1877-
ring_buffer_nest_end(buffer);
1900+
__synth_event_trace_end(&state);
18781901

18791902
return ret;
18801903
}
@@ -1901,62 +1924,31 @@ EXPORT_SYMBOL_GPL(synth_event_trace);
19011924
int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
19021925
unsigned int n_vals)
19031926
{
1904-
struct trace_event_buffer fbuffer;
1905-
struct synth_trace_event *entry;
1906-
struct trace_buffer *buffer;
1907-
struct synth_event *event;
1927+
struct synth_event_trace_state state;
19081928
unsigned int i, n_u64;
1909-
int fields_size = 0;
1910-
int ret = 0;
1911-
1912-
/*
1913-
* Normal event generation doesn't get called at all unless
1914-
* the ENABLED bit is set (which attaches the probe thus
1915-
* allowing this code to be called, etc). Because this is
1916-
* called directly by the user, we don't have that but we
1917-
* still need to honor not logging when disabled.
1918-
*/
1919-
if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1920-
trace_trigger_soft_disabled(file))
1921-
return 0;
1922-
1923-
event = file->event_call->data;
1924-
1925-
if (n_vals != event->n_fields)
1926-
return -EINVAL;
1927-
1928-
fields_size = event->n_u64 * sizeof(u64);
1929-
1930-
/*
1931-
* Avoid ring buffer recursion detection, as this event
1932-
* is being performed within another event.
1933-
*/
1934-
buffer = file->tr->array_buffer.buffer;
1935-
ring_buffer_nest_start(buffer);
1929+
int ret;
19361930

1937-
entry = trace_event_buffer_reserve(&fbuffer, file,
1938-
sizeof(*entry) + fields_size);
1939-
if (!entry) {
1940-
ret = -EINVAL;
1941-
goto out;
1931+
ret = __synth_event_trace_start(file, &state);
1932+
if (ret) {
1933+
if (ret == -ENOENT)
1934+
ret = 0; /* just disabled, not really an error */
1935+
return ret;
19421936
}
19431937

1944-
for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
1945-
if (event->fields[i]->is_string) {
1938+
for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1939+
if (state.event->fields[i]->is_string) {
19461940
char *str_val = (char *)(long)vals[i];
1947-
char *str_field = (char *)&entry->fields[n_u64];
1941+
char *str_field = (char *)&state.entry->fields[n_u64];
19481942

19491943
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
19501944
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
19511945
} else {
1952-
entry->fields[n_u64] = vals[i];
1946+
state.entry->fields[n_u64] = vals[i];
19531947
n_u64++;
19541948
}
19551949
}
19561950

1957-
trace_event_buffer_commit(&fbuffer);
1958-
out:
1959-
ring_buffer_nest_end(buffer);
1951+
__synth_event_trace_end(&state);
19601952

19611953
return ret;
19621954
}
@@ -1993,55 +1985,17 @@ EXPORT_SYMBOL_GPL(synth_event_trace_array);
19931985
int synth_event_trace_start(struct trace_event_file *file,
19941986
struct synth_event_trace_state *trace_state)
19951987
{
1996-
struct synth_trace_event *entry;
1997-
int fields_size = 0;
1998-
int ret = 0;
1988+
int ret;
19991989

2000-
if (!trace_state) {
2001-
ret = -EINVAL;
2002-
goto out;
2003-
}
1990+
if (!trace_state)
1991+
return -EINVAL;
20041992

20051993
memset(trace_state, '\0', sizeof(*trace_state));
20061994

2007-
/*
2008-
* Normal event tracing doesn't get called at all unless the
2009-
* ENABLED bit is set (which attaches the probe thus allowing
2010-
* this code to be called, etc). Because this is called
2011-
* directly by the user, we don't have that but we still need
2012-
* to honor not logging when disabled. For the the iterated
2013-
* trace case, we save the enabed state upon start and just
2014-
* ignore the following data calls.
2015-
*/
2016-
if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
2017-
trace_trigger_soft_disabled(file)) {
2018-
trace_state->enabled = false;
2019-
goto out;
2020-
}
2021-
2022-
trace_state->enabled = true;
1995+
ret = __synth_event_trace_start(file, trace_state);
1996+
if (ret == -ENOENT)
1997+
ret = 0; /* just disabled, not really an error */
20231998

2024-
trace_state->event = file->event_call->data;
2025-
2026-
fields_size = trace_state->event->n_u64 * sizeof(u64);
2027-
2028-
/*
2029-
* Avoid ring buffer recursion detection, as this event
2030-
* is being performed within another event.
2031-
*/
2032-
trace_state->buffer = file->tr->array_buffer.buffer;
2033-
ring_buffer_nest_start(trace_state->buffer);
2034-
2035-
entry = trace_event_buffer_reserve(&trace_state->fbuffer, file,
2036-
sizeof(*entry) + fields_size);
2037-
if (!entry) {
2038-
ring_buffer_nest_end(trace_state->buffer);
2039-
ret = -EINVAL;
2040-
goto out;
2041-
}
2042-
2043-
trace_state->entry = entry;
2044-
out:
20451999
return ret;
20462000
}
20472001
EXPORT_SYMBOL_GPL(synth_event_trace_start);
@@ -2074,7 +2028,7 @@ static int __synth_event_add_val(const char *field_name, u64 val,
20742028
trace_state->add_next = true;
20752029
}
20762030

2077-
if (!trace_state->enabled)
2031+
if (trace_state->disabled)
20782032
goto out;
20792033

20802034
event = trace_state->event;
@@ -2209,9 +2163,7 @@ int synth_event_trace_end(struct synth_event_trace_state *trace_state)
22092163
if (!trace_state)
22102164
return -EINVAL;
22112165

2212-
trace_event_buffer_commit(&trace_state->fbuffer);
2213-
2214-
ring_buffer_nest_end(trace_state->buffer);
2166+
__synth_event_trace_end(trace_state);
22152167

22162168
return 0;
22172169
}

0 commit comments

Comments
 (0)