Skip to content

Commit 3189f38

Browse files
authored
add JuliaFormatter to Github Workflows (#253)
* add JuliaFormatter to Github Workflows * fixes * run JuliaFormatter * limit to one test * update readme * add fix * use julia v1.8.2 * run JuliaFormatter
1 parent c100721 commit 3189f38

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+2042
-2059
lines changed

.JuliaFormatter.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
style = "sciml"

.github/workflows/FormatCheck.yml

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
name: format-check
2+
3+
on:
4+
pull_request:
5+
branches:
6+
- master
7+
push:
8+
branches:
9+
- master
10+
11+
jobs:
12+
build:
13+
runs-on: ${{ matrix.os }}
14+
strategy:
15+
fail-fast: false
16+
matrix:
17+
version:
18+
- '1.6' # Replace this with the minimum Julia version that your package supports. E.g. if your package requires Julia 1.5 or higher, change this to '1.5'.
19+
os:
20+
- ubuntu-latest
21+
arch:
22+
- x64
23+
steps:
24+
- uses: actions/checkout@v2
25+
- uses: julia-actions/setup-julia@v1
26+
with:
27+
version: ${{ matrix.version }}
28+
arch: ${{ matrix.arch }}
29+
- name: Install JuliaFormatter and Format
30+
run: |
31+
julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))'
32+
julia -e 'using JuliaFormatter; format(".", verbose=true)'
33+
- name: Format check
34+
run: |
35+
julia -e '
36+
out = Cmd(`git diff`) |> read |> String
37+
if out == ""
38+
exit(0)
39+
else
40+
@error "Some files have not been formatted !!!"
41+
write(stdout, out)
42+
exit(1)
43+
end'

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
[![](https://img.shields.io/badge/docs-dev-blue.svg)](https://CarloLucibello.github.io/GraphNeuralNetworks.jl/dev)
88
![](https://github.com/CarloLucibello/GraphNeuralNetworks.jl/actions/workflows/ci.yml/badge.svg)
99
[![codecov](https://codecov.io/gh/CarloLucibello/GraphNeuralNetworks.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/CarloLucibello/GraphNeuralNetworks.jl)
10+
[![Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle)
1011

1112

1213

docs/make.jl

Lines changed: 22 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -6,40 +6,38 @@ tutorials, tutorials_cb, tutorial_assets = makedemos("tutorials")
66
assets = []
77
isnothing(tutorial_assets) || push!(assets, tutorial_assets)
88

9-
DocMeta.setdocmeta!(GraphNeuralNetworks, :DocTestSetup,
10-
:(using GraphNeuralNetworks, Graphs, SparseArrays, NNlib, Flux);
11-
recursive=true)
9+
DocMeta.setdocmeta!(GraphNeuralNetworks, :DocTestSetup,
10+
:(using GraphNeuralNetworks, Graphs, SparseArrays, NNlib, Flux);
11+
recursive = true)
1212

1313
prettyurls = get(ENV, "CI", nothing) == "true"
1414
mathengine = MathJax3()
15-
15+
1616
makedocs(;
17-
modules = [GraphNeuralNetworks, NNlib, Flux, Graphs, SparseArrays],
18-
doctest = false,
19-
clean = true,
20-
format= Documenter.HTML(; mathengine, prettyurls, assets=assets),
21-
sitename = "GraphNeuralNetworks.jl",
22-
pages = ["Home" => "index.md",
17+
modules = [GraphNeuralNetworks, NNlib, Flux, Graphs, SparseArrays],
18+
doctest = false,
19+
clean = true,
20+
format = Documenter.HTML(; mathengine, prettyurls, assets = assets),
21+
sitename = "GraphNeuralNetworks.jl",
22+
pages = ["Home" => "index.md",
2323
"Graphs" => "gnngraph.md",
2424
"Message Passing" => "messagepassing.md",
2525
"Model Building" => "models.md",
2626
"Datasets" => "datasets.md",
2727
"HeteroGraphs" => "gnnheterograph.md",
2828
"Tutorials" => tutorials,
29-
"API Reference" =>
30-
[
31-
"GNNGraph" => "api/gnngraph.md",
32-
"Basic Layers" => "api/basic.md",
33-
"Convolutional Layers" => "api/conv.md",
34-
"Pooling Layers" => "api/pool.md",
35-
"Message Passing" => "api/messagepassing.md",
36-
"Utils" => "api/utils.md",
37-
],
38-
"Developer Notes" => "dev.md",
39-
"Summer Of Code" => "gsoc.md",
40-
],
41-
)
29+
"API Reference" => [
30+
"GNNGraph" => "api/gnngraph.md",
31+
"Basic Layers" => "api/basic.md",
32+
"Convolutional Layers" => "api/conv.md",
33+
"Pooling Layers" => "api/pool.md",
34+
"Message Passing" => "api/messagepassing.md",
35+
"Utils" => "api/utils.md",
36+
],
37+
"Developer Notes" => "dev.md",
38+
"Summer Of Code" => "gsoc.md",
39+
])
4240

4341
tutorials_cb()
4442

45-
deploydocs(repo="github.com/CarloLucibello/GraphNeuralNetworks.jl.git")
43+
deploydocs(repo = "github.com/CarloLucibello/GraphNeuralNetworks.jl.git")

docs/pluto_output/gnn_intro_pluto.md

Lines changed: 28 additions & 27 deletions
Large diffs are not rendered by default.

docs/pluto_output/graph_classification_pluto.md

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
<!--
2626
# This information is used for caching.
2727
[PlutoStaticHTML.State]
28-
input_sha = "1bb7ba1abfd868b62a25393944efff368141d2e85e62bef7228c91f540463b54"
28+
input_sha = "465a4590b716e99f2d70eaaa9ca2e5de5f8549d401b4fa432fab439ab30aa172"
2929
julia_version = "1.8.2"
3030
-->
3131
<pre class='language-julia'><code class='language-julia'>begin
@@ -36,7 +36,7 @@
3636
using MLDatasets
3737
using MLUtils
3838
using LinearAlgebra, Random, Statistics
39-
39+
4040
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true" # don't ask for dataset download confirmation
4141
Random.seed!(17) # for reproducibility
4242
end;</code></pre>
@@ -60,11 +60,11 @@ end;</code></pre>
6060
1
6161
-1</pre>
6262
63-
<pre class='language-julia'><code class='language-julia'>g1, y1 = dataset[1] #get the first graph and target</code></pre>
63+
<pre class='language-julia'><code class='language-julia'>g1, y1 = dataset[1] #get the first graph and target</code></pre>
6464
<pre class="code-output documenter-example-output" id="var-y1">(graphs = Graph(17, 38), targets = 1)</pre>
6565
66-
<pre class='language-julia'><code class='language-julia'>reduce(vcat, g.node_data.targets for (g,_) in dataset) |&gt; union</code></pre>
67-
<pre class="code-output documenter-example-output" id="var-hash163982">7-element Vector{Int64}:
66+
<pre class='language-julia'><code class='language-julia'>reduce(vcat, g.node_data.targets for (g, _) in dataset) |&gt; union</code></pre>
67+
<pre class="code-output documenter-example-output" id="var-hash256211">7-element Vector{Int64}:
6868
0
6969
1
7070
2
@@ -73,8 +73,8 @@ end;</code></pre>
7373
5
7474
6</pre>
7575
76-
<pre class='language-julia'><code class='language-julia'>reduce(vcat, g.edge_data.targets for (g,_) in dataset)|&gt; union</code></pre>
77-
<pre class="code-output documenter-example-output" id="var-hash766940">4-element Vector{Int64}:
76+
<pre class='language-julia'><code class='language-julia'>reduce(vcat, g.edge_data.targets for (g, _) in dataset) |&gt; union</code></pre>
77+
<pre class="code-output documenter-example-output" id="var-hash262486">4-element Vector{Int64}:
7878
0
7979
1
8080
2
@@ -88,9 +88,9 @@ end;</code></pre>
8888
8989
<pre class='language-julia'><code class='language-julia'>begin
9090
graphs = mldataset2gnngraph(dataset)
91-
graphs = [GNNGraph(g,
92-
ndata=Float32.(onehotbatch(g.ndata.targets, 0:6)),
93-
edata=nothing)
91+
graphs = [GNNGraph(g,
92+
ndata = Float32.(onehotbatch(g.ndata.targets, 0:6)),
93+
edata = nothing)
9494
for g in graphs]
9595
y = onehotbatch(dataset.graph_data.targets, [-1, 1])
9696
end</code></pre>
@@ -101,12 +101,12 @@ end</code></pre>
101101
102102
<div class="markdown"><p>We have some useful utilities for working with graph datasets, <em>e.g.</em>, we can shuffle the dataset and use the first 150 graphs as training graphs, while using the remaining ones for testing:</p></div>
103103
104-
<pre class='language-julia'><code class='language-julia'>train_data, test_data = splitobs((graphs, y), at=150, shuffle=true) |&gt; getobs</code></pre>
105-
<pre class="code-output documenter-example-output" id="var-train_data">((GraphNeuralNetworks.GNNGraphs.GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(16, 34), GNNGraph(19, 44), GNNGraph(13, 28), GNNGraph(11, 22), GNNGraph(15, 32), GNNGraph(12, 26), GNNGraph(11, 22), GNNGraph(16, 34), GNNGraph(23, 52), GNNGraph(16, 34) … GNNGraph(19, 44), GNNGraph(13, 28), GNNGraph(15, 34), GNNGraph(17, 38), GNNGraph(19, 44), GNNGraph(13, 26), GNNGraph(11, 22), GNNGraph(21, 44), GNNGraph(22, 50), GNNGraph(23, 54)], Bool[1 0 … 0 0; 0 1 … 1 1]), (GraphNeuralNetworks.GNNGraphs.GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(16, 34), GNNGraph(16, 34), GNNGraph(21, 44), GNNGraph(16, 34), GNNGraph(20, 44), GNNGraph(13, 26), GNNGraph(21, 44), GNNGraph(15, 34), GNNGraph(20, 44), GNNGraph(26, 60) … GNNGraph(17, 38), GNNGraph(22, 50), GNNGraph(22, 50), GNNGraph(17, 38), GNNGraph(18, 40), GNNGraph(17, 38), GNNGraph(23, 54), GNNGraph(14, 28), GNNGraph(17, 38), GNNGraph(11, 22)], Bool[1 1 … 0 1; 0 0 … 1 0]))</pre>
104+
<pre class='language-julia'><code class='language-julia'>train_data, test_data = splitobs((graphs, y), at = 150, shuffle = true) |&gt; getobs</code></pre>
105+
<pre class="code-output documenter-example-output" id="var-train_data">((GraphNeuralNetworks.GNNGraphs.GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(16, 34), GNNGraph(15, 32), GNNGraph(19, 44), GNNGraph(20, 44), GNNGraph(20, 46), GNNGraph(15, 34), GNNGraph(18, 40), GNNGraph(16, 36), GNNGraph(13, 28), GNNGraph(16, 34) … GNNGraph(23, 48), GNNGraph(20, 44), GNNGraph(28, 66), GNNGraph(25, 56), GNNGraph(13, 28), GNNGraph(16, 36), GNNGraph(12, 24), GNNGraph(22, 50), GNNGraph(25, 58), GNNGraph(19, 42)], Bool[1 1 … 0 0; 0 0 … 1 1]), (GraphNeuralNetworks.GNNGraphs.GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(12, 24), GNNGraph(11, 22), GNNGraph(15, 34), GNNGraph(19, 44), GNNGraph(22, 50), GNNGraph(17, 38), GNNGraph(17, 38), GNNGraph(17, 38), GNNGraph(19, 42), GNNGraph(13, 28) … GNNGraph(19, 40), GNNGraph(13, 28), GNNGraph(22, 50), GNNGraph(14, 28), GNNGraph(23, 54), GNNGraph(20, 46), GNNGraph(13, 28), GNNGraph(26, 60), GNNGraph(17, 38), GNNGraph(12, 26)], Bool[1 1 … 0 0; 0 0 … 1 1]))</pre>
106106
107107
<pre class='language-julia'><code class='language-julia'>begin
108-
train_loader = DataLoader(train_data, batchsize=64, shuffle=true)
109-
test_loader = DataLoader(test_data, batchsize=64, shuffle=false)
108+
train_loader = DataLoader(train_data, batchsize = 64, shuffle = true)
109+
test_loader = DataLoader(test_data, batchsize = 64, shuffle = false)
110110
end</code></pre>
111111
<pre class="code-output documenter-example-output" id="var-test_loader">1-element DataLoader(::Tuple{Vector{GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}}, OneHotArrays.OneHotMatrix{UInt32, Vector{UInt32}}}, batchsize=64)
112112
with first element:
@@ -123,15 +123,15 @@ end</code></pre>
123123
<p>Since graphs in graph classification datasets are usually small, a good idea is to <strong>batch the graphs</strong> before inputting them into a Graph Neural Network to guarantee full GPU utilization. In the image or language domain, this procedure is typically achieved by <strong>rescaling</strong> or <strong>padding</strong> each example into a set of equally-sized shapes, and examples are then grouped in an additional dimension. The length of this dimension is then equal to the number of examples grouped in a mini-batch and is typically referred to as the <code>batchsize</code>.</p><p>However, for GNNs the two approaches described above are either not feasible or may result in a lot of unnecessary memory consumption. Therefore, GraphNeuralNetworks.jl opts for another approach to achieve parallelization across a number of examples. Here, adjacency matrices are stacked in a diagonal fashion (creating a giant graph that holds multiple isolated subgraphs), and node and target features are simply concatenated in the node dimension (the last dimension).</p><p>This procedure has some crucial advantages over other batching procedures:</p><ol><li><p>GNN operators that rely on a message passing scheme do not need to be modified since messages are not exchanged between two nodes that belong to different graphs.</p></li><li><p>There is no computational or memory overhead since adjacency matrices are saved in a sparse fashion holding only non-zero entries, <em>i.e.</em>, the edges.</p></li></ol><p>GraphNeuralNetworks.jl can <strong>batch multiple graphs into a single giant graph</strong>:</p></div>
124124
125125
<pre class='language-julia'><code class='language-julia'>vec_gs, _ = first(train_loader)</code></pre>
126-
<pre class="code-output documenter-example-output" id="var-vec_gs">(GraphNeuralNetworks.GNNGraphs.GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(15, 34), GNNGraph(14, 30), GNNGraph(23, 54), GNNGraph(22, 50), GNNGraph(14, 28), GNNGraph(23, 52), GNNGraph(20, 44), GNNGraph(23, 50), GNNGraph(12, 26), GNNGraph(21, 48) … GNNGraph(11, 22), GNNGraph(12, 26), GNNGraph(18, 38), GNNGraph(20, 46), GNNGraph(17, 36), GNNGraph(21, 44), GNNGraph(24, 50), GNNGraph(21, 44), GNNGraph(16, 34), GNNGraph(17, 38)], Bool[0 1 … 1 0; 1 0 … 0 1])</pre>
126+
<pre class="code-output documenter-example-output" id="var-vec_gs">(GraphNeuralNetworks.GNNGraphs.GNNGraph{Tuple{Vector{Int64}, Vector{Int64}, Nothing}}[GNNGraph(11, 22), GNNGraph(16, 36), GNNGraph(16, 34), GNNGraph(22, 50), GNNGraph(18, 40), GNNGraph(19, 40), GNNGraph(24, 50), GNNGraph(23, 54), GNNGraph(24, 50), GNNGraph(12, 26) … GNNGraph(20, 44), GNNGraph(11, 22), GNNGraph(22, 50), GNNGraph(13, 26), GNNGraph(16, 34), GNNGraph(10, 20), GNNGraph(28, 66), GNNGraph(19, 44), GNNGraph(14, 30), GNNGraph(18, 38)], Bool[1 0 … 1 0; 0 1 … 0 1])</pre>
127127
128128
<pre class='language-julia'><code class='language-julia'>MLUtils.batch(vec_gs)</code></pre>
129129
<pre class="code-output documenter-example-output" id="var-hash102363">GNNGraph:
130-
num_nodes = 1136
131-
num_edges = 2496
130+
num_nodes = 1191
131+
num_edges = 2618
132132
num_graphs = 64
133133
ndata:
134-
x =&gt; 7×1136 Matrix{Float32}</pre>
134+
x =&gt; 7×1191 Matrix{Float32}</pre>
135135
136136
137137
<div class="markdown"><p>Each batched graph object is equipped with a <strong><code>graph_indicator</code> vector</strong>, which maps each node to its respective graph in the batch:</p><p class="tex">$$\textrm{graph-indicator} = [1, \ldots, 1, 2, \ldots, 2, 3, \ldots ]$$</p></div>
@@ -157,35 +157,35 @@ end</code></pre>
157157
<div class="markdown"><p>Here, we again make use of the <code>GCNConv</code> with <span class="tex">$\mathrm{ReLU}(x) = \max(x, 0)$</span> activation for obtaining localized node embeddings, before we apply our final classifier on top of a graph readout layer.</p><p>Let's train our network for a few epochs to see how well it performs on the training as well as test set:</p></div>
158158
159159
<pre class='language-julia'><code class='language-julia'>function eval_loss_accuracy(model, data_loader, device)
160-
loss = 0.
161-
acc = 0.
160+
loss = 0.0
161+
acc = 0.0
162162
ntot = 0
163163
for (g, y) in data_loader
164164
g, y = MLUtils.batch(g) |&gt; device, y |&gt; device
165165
n = length(y)
166166
ŷ = model(g, g.ndata.x)
167-
loss += logitcrossentropy(ŷ, y) * n
167+
loss += logitcrossentropy(ŷ, y) * n
168168
acc += mean((ŷ .&gt; 0) .== y) * n
169169
ntot += n
170-
end
171-
return (loss = round(loss/ntot, digits=4), acc = round(acc*100/ntot, digits=2))
170+
end
171+
return (loss = round(loss / ntot, digits = 4),
172+
acc = round(acc * 100 / ntot, digits = 2))
172173
end</code></pre>
173174
<pre class="code-output documenter-example-output" id="var-eval_loss_accuracy">eval_loss_accuracy (generic function with 1 method)</pre>
174175
175-
<pre class='language-julia'><code class='language-julia'>function train!(model; epochs=200, η=1e-2, infotime=10)
176+
<pre class='language-julia'><code class='language-julia'>function train!(model; epochs = 200, η = 1e-2, infotime = 10)
176177
# device = Flux.gpu # uncomment this for GPU training
177178
device = Flux.cpu
178179
model = model |&gt; device
179180
ps = Flux.params(model)
180181
opt = Adam(1e-3)
181-
182182
183183
function report(epoch)
184184
train = eval_loss_accuracy(model, train_loader, device)
185185
test = eval_loss_accuracy(model, test_loader, device)
186186
@info (; epoch, train, test)
187187
end
188-
188+
189189
report(0)
190190
for epoch in 1:epochs
191191
for (g, y) in train_loader
@@ -202,7 +202,7 @@ end</code></pre>
202202
<pre class="code-output documenter-example-output" id="var-train!">train! (generic function with 1 method)</pre>
203203
204204
<pre class='language-julia'><code class='language-julia'>begin
205-
nin = 7
205+
nin = 7
206206
nh = 64
207207
nout = 2
208208
model = create_model(nin, nh, nout)

0 commit comments

Comments
 (0)