Skip to content

Commit e2310b8

Browse files
authored
Update WIT syntax with semicolons (#57)
* Update WIT syntax with semicolons In preparation for when it's eventually required * Add another semicolon * Regenerate ml.md
1 parent 747d8df commit e2310b8

File tree

3 files changed

+37
-39
lines changed

3 files changed

+37
-39
lines changed

.github/workflows/main.yml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,4 @@ jobs:
1111
runs-on: ubuntu-latest
1212
steps:
1313
- uses: actions/checkout@v2
14-
- uses: WebAssembly/wit-abi-up-to-date@v13
15-
with:
16-
wit-abi-tag: wit-abi-0.11.0
14+
- uses: WebAssembly/wit-abi-up-to-date@v16

ml.md

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,11 @@ Then, the user passes <em>tensor</em> inputs to the <em>graph</em>, computes the
2020
<p>All inputs and outputs to an ML inference are represented as <a href="#tensor"><code>tensor</code></a>s.</p>
2121
<hr />
2222
<h3>Types</h3>
23+
<h4><a name="tensor_dimensions"><code>type tensor-dimensions</code></a></h4>
24+
<p><a href="#tensor_dimensions"><a href="#tensor_dimensions"><code>tensor-dimensions</code></a></a></p>
25+
<p>The dimensions of a tensor.
26+
<p>The array length matches the tensor rank and each element in the array describes the size of
27+
each dimension</p>
2328
<h4><a name="tensor_type"><code>enum tensor-type</code></a></h4>
2429
<p>The type of the elements in a tensor.</p>
2530
<h5>Enum Cases</h5>
@@ -32,11 +37,6 @@ Then, the user passes <em>tensor</em> inputs to the <em>graph</em>, computes the
3237
<li><a name="tensor_type.i32"><code>I32</code></a></li>
3338
<li><a name="tensor_type.i64"><code>I64</code></a></li>
3439
</ul>
35-
<h4><a name="tensor_dimensions"><code>type tensor-dimensions</code></a></h4>
36-
<p><a href="#tensor_dimensions"><a href="#tensor_dimensions"><code>tensor-dimensions</code></a></a></p>
37-
<p>The dimensions of a tensor.
38-
<p>The array length matches the tensor rank and each element in the array describes the size of
39-
each dimension</p>
4040
<h4><a name="tensor_data"><code>type tensor-data</code></a></h4>
4141
<p><a href="#tensor_data"><a href="#tensor_data"><code>tensor-data</code></a></a></p>
4242
<p>The tensor data.
@@ -78,7 +78,11 @@ framework (e.g., TensorFlow):</p>
7878
#### <a name="tensor">`type tensor`</a>
7979
[`tensor`](#tensor)
8080
<p>
81-
#### <a name="graph_encoding">`enum graph-encoding`</a>
81+
#### <a name="graph">`type graph`</a>
82+
`u32`
83+
<p>An execution graph for performing inference (i.e., a model).
84+
<p>TODO: replace with <code>resource</code> (https://github.com/WebAssembly/wasi-nn/issues/47).</p>
85+
<h4><a name="graph_encoding"><code>enum graph-encoding</code></a></h4>
8286
<p>Describes the encoding of the graph. This allows the API to be implemented by various
8387
backends that encode (i.e., serialize) their graph IR with different formats.</p>
8488
<h5>Enum Cases</h5>
@@ -90,15 +94,6 @@ backends that encode (i.e., serialize) their graph IR with different formats.</p
9094
<li><a name="graph_encoding.tensorflowlite"><code>tensorflowlite</code></a></li>
9195
<li><a name="graph_encoding.autodetect"><code>autodetect</code></a></li>
9296
</ul>
93-
<h4><a name="graph_builder"><code>type graph-builder</code></a></h4>
94-
<p><a href="#graph_builder"><a href="#graph_builder"><code>graph-builder</code></a></a></p>
95-
<p>The graph initialization data.
96-
<p>This gets bundled up into an array of buffers because implementing backends may encode their
97-
graph IR in parts (e.g., OpenVINO stores its IR and weights separately).</p>
98-
<h4><a name="graph"><code>type graph</code></a></h4>
99-
<p><code>u32</code></p>
100-
<p>An execution graph for performing inference (i.e., a model).
101-
<p>TODO: replace with <code>resource</code> (https://github.com/WebAssembly/wasi-nn/issues/47).</p>
10297
<h4><a name="execution_target"><code>enum execution-target</code></a></h4>
10398
<p>Define where the graph should be executed.</p>
10499
<h5>Enum Cases</h5>
@@ -107,6 +102,11 @@ graph IR in parts (e.g., OpenVINO stores its IR and weights separately).</p>
107102
<li><a name="execution_target.gpu"><code>gpu</code></a></li>
108103
<li><a name="execution_target.tpu"><code>tpu</code></a></li>
109104
</ul>
105+
<h4><a name="graph_builder"><code>type graph-builder</code></a></h4>
106+
<p><a href="#graph_builder"><a href="#graph_builder"><code>graph-builder</code></a></a></p>
107+
<p>The graph initialization data.
108+
<p>This gets bundled up into an array of buffers because implementing backends may encode their
109+
graph IR in parts (e.g., OpenVINO stores its IR and weights separately).</p>
110110
<hr />
111111
<h3>Functions</h3>
112112
<h4><a name="load"><code>load: func</code></a></h4>

wit/wasi-nn.wit

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package wasi:nn
1+
package wasi:nn;
22

33
/// `wasi-nn` is a WASI API for performing machine learning (ML) inference. The API is not (yet)
44
/// capable of performing ML training. WebAssembly programs that want to use a host's ML
@@ -9,10 +9,10 @@ package wasi:nn
99
///
1010
/// This example world shows how to use these primitives together.
1111
world ml {
12-
import tensor
13-
import graph
14-
import inference
15-
import errors
12+
import tensor;
13+
import graph;
14+
import inference;
15+
import errors;
1616
}
1717

1818
/// All inputs and outputs to an ML inference are represented as `tensor`s.
@@ -21,7 +21,7 @@ interface tensor {
2121
///
2222
/// The array length matches the tensor rank and each element in the array describes the size of
2323
/// each dimension
24-
type tensor-dimensions = list<u32>
24+
type tensor-dimensions = list<u32>;
2525

2626
/// The type of the elements in a tensor.
2727
enum tensor-type {
@@ -41,7 +41,7 @@ interface tensor {
4141
/// in the type (e.g., a 2x2 tensor with 4-byte f32 elements would have a data array of length
4242
/// 16). Naturally, this representation requires some knowledge of how to lay out data in
4343
/// memory--e.g., using row-major ordering--and could perhaps be improved.
44-
type tensor-data = list<u8>
44+
type tensor-data = list<u8>;
4545

4646
record tensor {
4747
// Describe the size of the tensor (e.g., 2x2x2x2 -> [2, 2, 2, 2]). To represent a tensor
@@ -59,13 +59,13 @@ interface tensor {
5959
/// A `graph` is a loaded instance of a specific ML model (e.g., MobileNet) for a specific ML
6060
/// framework (e.g., TensorFlow):
6161
interface graph {
62-
use errors.{error}
63-
use tensor.{tensor}
62+
use errors.{error};
63+
use tensor.{tensor};
6464

6565
/// An execution graph for performing inference (i.e., a model).
6666
///
6767
/// TODO: replace with `resource` (https://github.com/WebAssembly/wasi-nn/issues/47).
68-
type graph = u32
68+
type graph = u32;
6969

7070
/// Describes the encoding of the graph. This allows the API to be implemented by various
7171
/// backends that encode (i.e., serialize) their graph IR with different formats.
@@ -89,45 +89,45 @@ interface graph {
8989
///
9090
/// This gets bundled up into an array of buffers because implementing backends may encode their
9191
/// graph IR in parts (e.g., OpenVINO stores its IR and weights separately).
92-
type graph-builder = list<u8>
92+
type graph-builder = list<u8>;
9393

9494
/// Load a `graph` from an opaque sequence of bytes to use for inference.
95-
load: func(builder: list<graph-builder>, encoding: graph-encoding, target: execution-target) -> result<graph, error>
95+
load: func(builder: list<graph-builder>, encoding: graph-encoding, target: execution-target) -> result<graph, error>;
9696

9797
/// Load a `graph` by name.
9898
///
9999
/// How the host expects the names to be passed and how it stores the graphs for retrieval via
100100
/// this function is **implementation-specific**. This allows hosts to choose name schemes that
101101
/// range from simple to complex (e.g., URLs?) and caching mechanisms of various kinds.
102-
load-by-name: func(name: string) -> result<graph, error>
102+
load-by-name: func(name: string) -> result<graph, error>;
103103
}
104104

105105
/// An inference "session" is encapsulated by a `graph-execution-context`. This structure binds a
106106
/// `graph` to input tensors before `compute`-ing an inference:
107107
interface inference {
108-
use errors.{error}
109-
use tensor.{tensor, tensor-data}
110-
use graph.{graph}
108+
use errors.{error};
109+
use tensor.{tensor, tensor-data};
110+
use graph.{graph};
111111

112112
/// Bind a `graph` to the input and output tensors for an inference.
113113
///
114114
/// TODO: this is no longer necessary in WIT (https://github.com/WebAssembly/wasi-nn/issues/43)
115-
type graph-execution-context = u32
115+
type graph-execution-context = u32;
116116

117117
/// Create an execution instance of a loaded graph.
118-
init-execution-context: func(graph: graph) -> result<graph-execution-context, error>
118+
init-execution-context: func(graph: graph) -> result<graph-execution-context, error>;
119119

120120
/// Define the inputs to use for inference.
121-
set-input: func(ctx: graph-execution-context, index: u32, tensor: tensor) -> result<_, error>
121+
set-input: func(ctx: graph-execution-context, index: u32, tensor: tensor) -> result<_, error>;
122122

123123
/// Compute the inference on the given inputs.
124124
///
125125
/// Note the expected sequence of calls: `set-input`, `compute`, `get-output`. TODO: this
126126
/// expectation could be removed as a part of https://github.com/WebAssembly/wasi-nn/issues/43.
127-
compute: func(ctx: graph-execution-context) -> result<_, error>
127+
compute: func(ctx: graph-execution-context) -> result<_, error>;
128128

129129
/// Extract the outputs after inference.
130-
get-output: func(ctx: graph-execution-context, index: u32) -> result<tensor-data, error>
130+
get-output: func(ctx: graph-execution-context, index: u32) -> result<tensor-data, error>;
131131
}
132132

133133
/// TODO: create function-specific errors (https://github.com/WebAssembly/wasi-nn/issues/42)

0 commit comments

Comments
 (0)