@@ -7322,85 +7322,19 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
73227322}
73237323
73247324/*
7325- * rtollert: tracing_ni_ett_raw_write exists as part of LabVIEW RT's support of
7326- * the Execution Trace Toolkit. LabVIEW RT logs its own events through this
7327- * interface, so that they are stored in ftrace's ring buffers. Basically
7328- * tracing_ni_ett_raw_write is the same as tracing_mark_write, except all the
7329- * text processing code is ripped out for improved performance .
7325+ * tracing_ni_ett_raw_write was added as part of LabVIEW RT's support of the
7326+ * Execution Trace Toolkit (RTETT). But RTETT is being deprecated in favor of
7327+ * using open source tooling, including ftrace and kernelshark. Keep the
7328+ * trace_ni_ett_marker file and just enough implementation to not break existing
7329+ * VIs .
73307330 *
7331- * These events will show up as BPRINT ftrace events, with ip and fmt set to
7332- * the fourcc 'lvrt'. The event data is generally a binary blob that is
7333- * processed later by LabVIEW RT (and ultimately the ETT). That data is not
7334- * meant to be parsed by third parties and is not documented (sorry).
7335- *
7336- * I'm a little embarrassed of this implementation, so this code goes out of
7337- * its way to scream "HACK!": The hardcoded settings for ip and fmt; the
7338- * name of the marker file (trace_ni_ett_marker), etc.
7339- *
7340- * Eventually I'd like to see a solution which would allow multiple programs
7341- * to each write to their own marker files, with dynamically allocated IDs,
7342- * without overloading BPRINT events, etc. However a lot of that is contingent
7343- * on if it's even a good idea to allow binary blobs to be logged to ftrace.
7344- * (a worthwhile discussion!)
7331+ * This can be completely removed when support is dropped for old LabVIEW
7332+ * versions shipping RTETT VIs.
73457333 */
73467334static ssize_t
73477335tracing_ni_ett_raw_write (struct file * filp , const char __user * ubuf ,
73487336 size_t cnt , loff_t * fpos )
73497337{
7350- struct trace_event_call * call = & event_bprint ;
7351- struct ring_buffer_event * event ;
7352- struct trace_array * tr = & global_trace ;
7353- struct trace_buffer * buffer = tr -> array_buffer .buffer ;
7354- struct trace_array_cpu * data ;
7355- int cpu , size ;
7356- unsigned int trace_ctx ;
7357- struct bprint_entry * entry ;
7358- unsigned long irq_flags ;
7359- int disable ;
7360-
7361- const unsigned int ip = 0x6c767274 ; /* "lvrt" */
7362- const char * fmt = "lvrt" ; /* to avoid dereferencing NULL */
7363-
7364- if (tracing_disabled || tracing_selftest_running )
7365- return - EINVAL ;
7366-
7367- preempt_disable_notrace ();
7368- cpu = raw_smp_processor_id ();
7369- data = per_cpu_ptr (tr -> array_buffer .data , cpu );
7370- disable = atomic_inc_return (& data -> disabled );
7371- if (unlikely (disable != 1 ))
7372- goto out ;
7373- pause_graph_tracing ();
7374- raw_local_irq_save (irq_flags );
7375-
7376- trace_ctx = tracing_gen_ctx_flags (irq_flags );
7377- size = sizeof (* entry ) + cnt ;
7378- event = __trace_buffer_lock_reserve (buffer , TRACE_BPRINT , size ,
7379- trace_ctx );
7380- if (!event )
7381- goto out_unlock ;
7382- entry = ring_buffer_event_data (event );
7383- entry -> ip = ip ;
7384- entry -> fmt = fmt ;
7385-
7386- if (cnt ) {
7387- if (copy_from_user (& (entry -> buf [0 ]), ubuf , cnt )) {
7388- cnt = - EFAULT ;
7389- goto error_and_trace ;
7390- }
7391- }
7392- if (call_filter_check_discard (call , entry , buffer , event ))
7393- goto out_unlock ;
7394- error_and_trace :
7395- __buffer_unlock_commit (buffer , event );
7396- ftrace_trace_stack (& global_trace , buffer , trace_ctx , 6 , NULL );
7397- out_unlock :
7398- raw_local_irq_restore (irq_flags );
7399- unpause_graph_tracing ();
7400- out :
7401- atomic_dec_return (& data -> disabled );
7402- preempt_enable_notrace ();
7403-
74047338 return cnt ;
74057339}
74067340
0 commit comments