Skip to content

Commit

Permalink
refactor: use runic for formatting (#1255)
Browse files Browse the repository at this point in the history
* refactor: use runic for formatting

* chore: add git hash to ignore revs

* ci: add runic CI

* refactor: format lib

* chore: housekeeping

* refactor: format all

* ci: run runic correctly
  • Loading branch information
avik-pal authored Mar 7, 2025
1 parent e3a72b5 commit 0c8f1e2
Show file tree
Hide file tree
Showing 236 changed files with 7,222 additions and 5,031 deletions.
8 changes: 0 additions & 8 deletions .JuliaFormatter.toml

This file was deleted.

4 changes: 2 additions & 2 deletions .buildkite/scripts/downstream.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0, "GROUP" => group, "BACKEND_GROUP" => g
try
Pkg.develop(repo)
println("+++ :julia: Running tests")
Pkg.test("$(repo)"; coverage="user")
Pkg.test("$(repo)"; coverage = "user")
catch err
err isa Pkg.Resolve.ResolverError || rethrow()
@info "Not compatible with this release. No problem." exception=err
@info "Not compatible with this release. No problem." exception = err
exit(0)
end
end
Expand Down
2 changes: 2 additions & 0 deletions .git-blame-ignore-revs
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
aaf856ed53e8eb526aa066aaebe59a25d7f8ddf4 # Switched formatting to Runic
1c70cc899988e4d5958b1a99e2dc79fbfcd35018 # Runic formatting for `lib/`
14 changes: 6 additions & 8 deletions .github/workflows/FormatPR.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,21 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install JuliaFormatter and format
- name: Install Runic and format
run: |
julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))'
julia -e 'using JuliaFormatter; format(".")'
# https://github.com/marketplace/actions/create-pull-request
# https://github.com/peter-evans/create-pull-request#reference-example
julia -e 'using Pkg; Pkg.add(PackageSpec(name="Runic"))'
julia -e 'using Runic; exit(Runic.main(ARGS))' -- --inplace .
- name: Create Pull Request
id: cpr
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: Format .jl files
title: 'Automatic JuliaFormatter.jl run'
branch: auto-juliaformatter-pr
title: 'Automatic Runic.jl run'
branch: auto-runic-pr
delete-branch: true
labels: formatting, automated pr, no changelog
- name: Check outputs
run: |
echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}"
echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}"
echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}"
9 changes: 6 additions & 3 deletions .github/workflows/QualityCheck.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@ name: Code Quality Check
on: [pull_request]

jobs:
code-style:
name: Format Suggestions
runic:
name: Julia Format Check
runs-on: ubuntu-latest
steps:
- uses: julia-actions/julia-format@v3
- uses: actions/checkout@v4
- uses: fredrikekre/runic-action@v1
with:
version: '1'

typos-check:
name: Spell Check with Typos
Expand Down
5 changes: 5 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
repos:
- repo: https://github.com/fredrikekre/runic-pre-commit
rev: v1.0.0
hooks:
- id: runic
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,12 @@ Also consider starring [our github repo](https://github.com/LuxDL/Lux.jl/).
This section is somewhat incomplete. You can contribute by contributing to finishing this
section 😜.

### 💎 Formatting (Runic)

For instructions on how to use Runic, see
[Runic's documentation](https://github.com/fredrikekre/Runic.jl). We recommend using
pre-commit by invoking `pre-commit install` in the root of the repository.

### 🧪 Testing

The full test of `Lux.jl` takes a long time, here's how to test a portion of the code.
Expand Down
6 changes: 4 additions & 2 deletions benchmarks/aggregate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ const NUM_CPU_THREADS = [1, 2, 4, 8]

#Start with CPU benchmarks for 1 thread and add other results
const CPU_results_1thread_filepath = joinpath(
dirname(@__FILE__), "results", "CPUbenchmarks1threads.json")
dirname(@__FILE__), "results", "CPUbenchmarks1threads.json"
)
@assert(ispath(CPU_results_1thread_filepath))
const RESULTS = BenchmarkTools.load(CPU_results_1thread_filepath)[1]
@assert RESULTS isa BenchmarkTools.BenchmarkGroup
Expand Down Expand Up @@ -54,4 +55,5 @@ for backend in GPU_BACKENDS
end

BenchmarkTools.save(
joinpath(dirname(@__FILE__), "results", "combinedbenchmarks.json"), RESULTS)
joinpath(dirname(@__FILE__), "results", "combinedbenchmarks.json"), RESULTS
)
2 changes: 1 addition & 1 deletion benchmarks/asv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ SUITE["basics"] = BenchmarkGroup()

SUITE["basics"]["overhead"] = @benchmarkable begin
dense(x, ps, st)
end setup=begin
end setup = begin
dense = Dense(2, 3)
x = ones(Float32, 2, 3)
ps, st = Lux.setup(Xoshiro(), dense)
Expand Down
16 changes: 8 additions & 8 deletions benchmarks/runbenchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,35 +44,35 @@ end

if BENCHMARK_GROUP == "AMDGPU"
using AMDGPU # ] add AMDGPU to benchmarks/Project.toml
@info "Running AMDGPU benchmarks" maxlog=1
@info "Running AMDGPU benchmarks" maxlog = 1
AMDGPU.versioninfo()
elseif BENCHMARK_GROUP == "CUDA"
using LuxCUDA # ] add LuxCUDA to benchmarks/Project.toml
@info "Running CUDA benchmarks" maxlog=1
@info "Running CUDA benchmarks" maxlog = 1
CUDA.versioninfo()
elseif BENCHMARK_GROUP == "Metal"
using Metal # ] add Metal to benchmarks/Project.toml
@info "Running Metal benchmarks" maxlog=1
@info "Running Metal benchmarks" maxlog = 1
Metal.versioninfo()
elseif BENCHMARK_GROUP == "oneAPI"
using oneAPI # ] add oneAPI to benchmarks/Project.toml
@info "Running oneAPI benchmarks" maxlog=1
@info "Running oneAPI benchmarks" maxlog = 1
oneAPI.versioninfo()
else
@info "Running CPU benchmarks with $(BENCHMARK_CPU_THREADS) thread(s)" maxlog=1
@info "Running CPU benchmarks with $(BENCHMARK_CPU_THREADS) thread(s)" maxlog = 1
end

# Main benchmark files
include("setup.jl")
setup_benchmarks!(SUITE, BENCHMARK_GROUP, BENCHMARK_CPU_THREADS)

results = BenchmarkTools.run(SUITE; verbose=true)
results = BenchmarkTools.run(SUITE; verbose = true)

filepath = joinpath(dirname(@__FILE__), "results")
mkpath(filepath)
filename = BENCHMARK_GROUP == "CPU" ?
string("CPUbenchmarks", BENCHMARK_CPU_THREADS, "threads.json") :
string(BENCHMARK_GROUP, "benchmarks.json")
string("CPUbenchmarks", BENCHMARK_CPU_THREADS, "threads.json") :
string(BENCHMARK_GROUP, "benchmarks.json")
BenchmarkTools.save(joinpath(filepath, filename), median(results))

@info "Saved results to $(joinpath(filepath, filename))"
54 changes: 34 additions & 20 deletions benchmarks/setup.jl
Original file line number Diff line number Diff line change
Expand Up @@ -80,55 +80,69 @@ function setup_benchmarks!(suite::BenchmarkGroup, backend::String, num_cpu_threa

setup_groupnorm_benchmarks!(suite, cpu_or_gpu, final_backend, dev)

setup_batched_matmul_benchmarks!(suite, cpu_or_gpu, final_backend, dev)
return setup_batched_matmul_benchmarks!(suite, cpu_or_gpu, final_backend, dev)
end

function setup_forward_pass_benchmark!(suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, model, x_dims, dev::AbstractDevice)
suite[benchmark_name]["forward"][cpu_or_gpu][backend] = @benchmarkable begin
function setup_forward_pass_benchmark!(
suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, model, x_dims, dev::AbstractDevice
)
return suite[benchmark_name]["forward"][cpu_or_gpu][backend] = @benchmarkable begin
Lux.apply($model, x, ps, st_test)
synchronize($dev)
end setup=begin
end setup = begin
reclaim($dev)
x, ps, st = general_setup($model, $x_dims) |> $dev
st_test = Lux.testmode(st)
end
end

function setup_reverse_pass_benchmark!(suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, ad_backends, model, x_dims, dev::AbstractDevice)
function setup_reverse_pass_benchmark!(
suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, ad_backends, model, x_dims, dev::AbstractDevice
)
for ad_backend in ad_backends
setup_reverse_pass_benchmark!(
suite, benchmark_name, cpu_or_gpu, backend, ad_backend, model, x_dims, dev)
suite, benchmark_name, cpu_or_gpu, backend, ad_backend, model, x_dims, dev
)
end
return
end

function setup_reverse_pass_benchmark!(suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, ::AutoZygote, model, x_dims, dev::AbstractDevice)
suite[benchmark_name]["zygote"][cpu_or_gpu][backend] = @benchmarkable begin
function setup_reverse_pass_benchmark!(
suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, ::AutoZygote, model, x_dims, dev::AbstractDevice
)
return suite[benchmark_name]["zygote"][cpu_or_gpu][backend] = @benchmarkable begin
Zygote.gradient(sumabs2, $model, x, ps, st)
synchronize($dev)
end setup=begin
end setup = begin
reclaim($dev)
x, ps, st = general_setup($model, $x_dims) |> $dev
Zygote.gradient(sumabs2, $model, x, ps, st) # Warm up
end
end

function setup_reverse_pass_benchmark!(suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, ::AutoEnzyme, model, x_dims, dev::AbstractDevice)
function setup_reverse_pass_benchmark!(
suite::BenchmarkGroup, benchmark_name::String,
cpu_or_gpu::String, backend::String, ::AutoEnzyme, model, x_dims, dev::AbstractDevice
)
cpu_or_gpu != "CPU" && return # TODO: Remove once Enzyme.jl supports GPUs

suite[benchmark_name]["enzyme"][cpu_or_gpu][backend] = @benchmarkable begin
Enzyme.autodiff(Enzyme.Reverse, sumabs2, Enzyme.Active, Enzyme.Const($model),
Enzyme.Duplicated(x, dx), Enzyme.Duplicated(ps, dps), Enzyme.Const(st))
return suite[benchmark_name]["enzyme"][cpu_or_gpu][backend] = @benchmarkable begin
Enzyme.autodiff(
Enzyme.Reverse, sumabs2, Enzyme.Active, Enzyme.Const($model),
Enzyme.Duplicated(x, dx), Enzyme.Duplicated(ps, dps), Enzyme.Const(st)
)
synchronize($dev)
end setup=begin
end setup = begin
reclaim($dev)
x, ps, st = general_setup($model, $x_dims) |> $dev
dps = Enzyme.make_zero(ps)
dx = Enzyme.make_zero(x)
Enzyme.autodiff(Enzyme.Reverse, sumabs2, Enzyme.Active, Enzyme.Const($model),
Enzyme.Duplicated(x, dx), Enzyme.Duplicated(ps, dps), Enzyme.Const(st)) # Warm up
Enzyme.autodiff(
Enzyme.Reverse, sumabs2, Enzyme.Active, Enzyme.Const($model),
Enzyme.Duplicated(x, dx), Enzyme.Duplicated(ps, dps), Enzyme.Const(st)
) # Warm up
end
end
33 changes: 23 additions & 10 deletions benchmarks/setups/layers.jl
Original file line number Diff line number Diff line change
@@ -1,28 +1,41 @@
function setup_dense_benchmarks!(suite::BenchmarkGroup, cpu_or_gpu::String,
final_backend::String, dev::AbstractDevice)
function setup_dense_benchmarks!(
suite::BenchmarkGroup, cpu_or_gpu::String,
final_backend::String, dev::AbstractDevice
)
for n in (16, 128, 512), act in (identity, relu, gelu)
layer = Dense(n => n, act)

setup_forward_pass_benchmark!(suite, "Dense($n => $n, $act)($n x 128)",
cpu_or_gpu, final_backend, layer, (n, 128), dev)
setup_forward_pass_benchmark!(
suite, "Dense($n => $n, $act)($n x 128)",
cpu_or_gpu, final_backend, layer, (n, 128), dev
)

setup_reverse_pass_benchmark!(suite, "Dense($n => $n, $act)($n x 128)",
cpu_or_gpu, final_backend, [AutoZygote(), AutoEnzyme()], layer, (n, 128), dev)
setup_reverse_pass_benchmark!(
suite, "Dense($n => $n, $act)($n x 128)",
cpu_or_gpu, final_backend, [AutoZygote(), AutoEnzyme()], layer, (n, 128), dev
)
end
return
end

function setup_conv_benchmarks!(suite::BenchmarkGroup, cpu_or_gpu::String,
final_backend::String, dev::AbstractDevice)
function setup_conv_benchmarks!(
suite::BenchmarkGroup, cpu_or_gpu::String,
final_backend::String, dev::AbstractDevice
)
for ch in (2, 4, 32, 64), act in (identity, relu, gelu)
layer = Conv((3, 3), ch => ch, act)

setup_forward_pass_benchmark!(
suite, "Conv((3, 3), $ch => $ch, $act)(64 x 64 x $ch x 128)",
cpu_or_gpu, final_backend, layer, (64, 64, ch, 128), dev)
cpu_or_gpu, final_backend, layer, (64, 64, ch, 128), dev
)

setup_reverse_pass_benchmark!(
suite, "Conv((3, 3), $ch => $ch, $act)(64 x 64 x $ch x 128)",
cpu_or_gpu, final_backend, [AutoZygote(), AutoEnzyme()], layer, (
64, 64, ch, 128), dev)
64, 64, ch, 128,
), dev
)
end
return
end
Loading

0 comments on commit 0c8f1e2

Please sign in to comment.