Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Format code with JuliaFormatter #256

Closed
wants to merge 9 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions .github/workflows/format-check.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: format-check

on:
push:
branches:
- 'master'
tags: '*'
pull_request:
mofeing marked this conversation as resolved.
Show resolved Hide resolved

jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
version:
- '1.9'
os:
- ubuntu-latest
arch:
- x64
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- name: Install JuliaFormatter.jl
run: julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))'
- name: Format code
run: julia -e 'using JuliaFormatter; format(".", verbose=true)'
- name: Format check
run: |
julia -e '
out = Cmd(`git diff --name-only`) |> read |> String
if out == ""
exit(0)
else
@error "Some files have not been formatted !!!"
write(stdout, out)
exit(1)
end'
51 changes: 51 additions & 0 deletions .github/workflows/format-pr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
name: format-pr
mofeing marked this conversation as resolved.
Show resolved Hide resolved
on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch:

jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
branch:
- master

steps:
- uses: julia-actions/setup-julia@v1
with:
version: '1.9'

- uses: actions/checkout@v4
with:
ref: ${{ matrix.branch }}

- name: Install JuliaFormatter.jl
shell: julia --color=yes {0}
run: |
import Pkg
Pkg.add("JuliaFormatter")

- name: Format code
shell: julia --color=yes {0}
run: |
using JuliaFormatter
format(".")

- name: Create Pull Request
id: cpr
uses: peter-evans/create-pull-request@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: Format code
title: 'Format code of branch "${{ matrix.branch }}"'
branch: format-${{ matrix.branch }}
delete-branch: true
labels: format
reviewers: mofeing

- name: Check outputs
run: |
echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}"
echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}"
27 changes: 17 additions & 10 deletions benchmark/benchmark.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,21 @@ function grad_benchmark_driver!(out, f, x)
gc()
end
else
println("skipping compiled GradientTape benchmark because the tape is too long ($(length(tp.tape)) elements)")
println(
"skipping compiled GradientTape benchmark because the tape is too long ($(length(tp.tape)) elements)",
)
end
end

####################################################################

rosenbrock(x) = sum(map(ReverseDiff.@forward((i, j) -> (1 - j)^2 + 100*(i - j^2)^2), x[2:end], x[1:end-1]))
rosenbrock(x) = sum(
map(
ReverseDiff.@forward((i, j) -> (1 - j)^2 + 100 * (i - j^2)^2),
x[2:end],
x[1:end-1],
),
)

# function rosenbrock(x)
# i = x[2:end]
Expand All @@ -82,16 +90,15 @@ grad_benchmark_driver!(out, rosenbrock, x)
####################################################################

function ackley(x::AbstractVector)
a, b, c = 20.0, -0.2, 2.0*π
a, b, c = 20.0, -0.2, 2.0 * π
len_recip = inv(length(x))
sum_sqrs = zero(eltype(x))
sum_cos = sum_sqrs
for i in x
sum_cos += cos(c*i)
sum_cos += cos(c * i)
sum_sqrs += i^2
end
return (-a * exp(b * sqrt(len_recip*sum_sqrs)) -
exp(len_recip*sum_cos) + a + e)
return (-a * exp(b * sqrt(len_recip * sum_sqrs)) - exp(len_recip * sum_cos) + a + e)
end

x = rand(100000)
Expand All @@ -104,30 +111,30 @@ function generate_matrix_test(n)
return x -> begin
@assert length(x) == 2n^2 + n
a = reshape(x[1:n^2], n, n)
b = reshape(x[n^2 + 1:2n^2], n, n)
b = reshape(x[n^2+1:2n^2], n, n)
return trace(log.((a * b) + a - b))
end
end

n = 100
matrix_test = generate_matrix_test(n)
x = collect(1.0:(2n^2 + n))
x = collect(1.0:(2n^2+n))
out = zeros(x)
grad_benchmark_driver!(out, matrix_test, x)

####################################################################

relu(x) = log.(1.0 .+ exp.(x))

ReverseDiff.@forward sigmoid(n) = 1. / (1. + exp(-n))
ReverseDiff.@forward sigmoid(n) = 1.0 / (1.0 + exp(-n))

function neural_net(w1, w2, w3, x1)
x2 = relu(w1 * x1)
x3 = relu(w2 * x2)
return sigmoid(dot(w3, x3))
end

xs = (randn(10,10), randn(10,10), randn(10), rand(10))
xs = (randn(10, 10), randn(10, 10), randn(10), rand(10))
outs = map(similar, xs)
grad_benchmark_driver!(outs, neural_net, xs)

Expand Down
18 changes: 8 additions & 10 deletions docs/make.jl
Original file line number Diff line number Diff line change
@@ -1,20 +1,18 @@
using Documenter, ReverseDiff

makedocs(;
modules=[ReverseDiff],
sitename="ReverseDiff.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", nothing) == "true",
canonical="http://www.juliadiff.org/ReverseDiff.jl",
modules = [ReverseDiff],
sitename = "ReverseDiff.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", nothing) == "true",
canonical = "http://www.juliadiff.org/ReverseDiff.jl",
),
pages=[
pages = [
"Home" => "index.md",
"Limitation of ReverseDiff" => "limits.md",
"API" => "api.md",
],
checkdocs=:exports,
checkdocs = :exports,
)

deploydocs(;
repo="github.com/JuliaDiff/ReverseDiff.jl.git", push_preview=true,
)
deploydocs(; repo = "github.com/JuliaDiff/ReverseDiff.jl.git", push_preview = true)
22 changes: 18 additions & 4 deletions src/ReverseDiff.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,25 @@ using ChainRulesCore

# Not all operations will be valid over all of these types, but that's okay; such cases
# will simply error when they hit the original operation in the overloaded definition.
const ARRAY_TYPES = (:AbstractArray, :AbstractVector, :AbstractMatrix, :Array, :Vector, :Matrix)
const REAL_TYPES = (:Bool, :Integer, :(Irrational{:ℯ}), :(Irrational{:π}), :Rational, :BigFloat, :BigInt, :AbstractFloat, :Real, :Dual)
const ARRAY_TYPES =
(:AbstractArray, :AbstractVector, :AbstractMatrix, :Array, :Vector, :Matrix)
const REAL_TYPES = (
:Bool,
:Integer,
:(Irrational{:ℯ}),
:(Irrational{:π}),
:Rational,
:BigFloat,
:BigInt,
:AbstractFloat,
:Real,
:Dual,
)

const SKIPPED_UNARY_SCALAR_FUNCS = Symbol[:isinf, :isnan, :isfinite, :iseven, :isodd, :isreal, :isinteger]
const SKIPPED_BINARY_SCALAR_FUNCS = Symbol[:isequal, :isless, :<, :>, :(==), :(!=), :(<=), :(>=)]
const SKIPPED_UNARY_SCALAR_FUNCS =
Symbol[:isinf, :isnan, :isfinite, :iseven, :isodd, :isreal, :isinteger]
const SKIPPED_BINARY_SCALAR_FUNCS =
Symbol[:isequal, :isless, :<, :>, :(==), :(!=), :(<=), :(>=)]

# Some functions with derivatives in DiffRules are not supported
# For instance, ReverseDiff does not support functions with complex results and derivatives
Expand Down
56 changes: 45 additions & 11 deletions src/api/Config.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,21 +32,31 @@ the target function's output.

See `ReverseDiff.gradient` for a description of acceptable types for `input`.
"""
GradientConfig(input::AbstractArray{T}, tp::InstructionTape = InstructionTape()) where {T} = GradientConfig(input, T, tp)
GradientConfig(input::AbstractArray{T}, tp::InstructionTape = InstructionTape()) where {T} =
GradientConfig(input, T, tp)

GradientConfig(input::Tuple, tp::InstructionTape = InstructionTape()) = GradientConfig(input, eltype(first(input)), tp)
GradientConfig(input::Tuple, tp::InstructionTape = InstructionTape()) =
GradientConfig(input, eltype(first(input)), tp)

"""
ReverseDiff.GradientConfig(input, ::Type{D}, tp::InstructionTape = InstructionTape())

Like `GradientConfig(input, tp)`, except the provided type `D` is assumed to be the element
type of the target function's output.
"""
function GradientConfig(input::Tuple, ::Type{D}, tp::InstructionTape = InstructionTape()) where D
function GradientConfig(
input::Tuple,
::Type{D},
tp::InstructionTape = InstructionTape(),
) where {D}
return _GradientConfig(map(x -> track(similar(x), D, tp), input), tp)
end

function GradientConfig(input::AbstractArray, ::Type{D}, tp::InstructionTape = InstructionTape()) where D
function GradientConfig(
input::AbstractArray,
::Type{D},
tp::InstructionTape = InstructionTape(),
) where {D}
return _GradientConfig(track(similar(input), D, tp), tp)
end

Expand All @@ -63,7 +73,8 @@ struct JacobianConfig{I,O} <: AbstractConfig
end

# "private" convienence constructor
_JacobianConfig(input::I, output::O, tape::InstructionTape) where {I,O} = JacobianConfig{I,O}(input, output, tape)
_JacobianConfig(input::I, output::O, tape::InstructionTape) where {I,O} =
JacobianConfig{I,O}(input, output, tape)

"""
ReverseDiff.JacobianConfig(input, tp::InstructionTape = InstructionTape())
Expand Down Expand Up @@ -99,14 +110,22 @@ stored or modified in any way.

See `ReverseDiff.jacobian` for a description of acceptable types for `input`.
"""
function JacobianConfig(output::AbstractArray{D}, input::Tuple, tp::InstructionTape = InstructionTape()) where D
function JacobianConfig(
output::AbstractArray{D},
input::Tuple,
tp::InstructionTape = InstructionTape(),
) where {D}
cfg_input = map(x -> track(similar(x), D, tp), input)
cfg_output = track!(similar(output, TrackedReal{D,D,Nothing}), output, tp)
return _JacobianConfig(cfg_input, cfg_output, tp)
end

# we dispatch on V<:Real here because InstructionTape is actually also an AbstractArray
function JacobianConfig(output::AbstractArray{D}, input::AbstractArray{V}, tp::InstructionTape = InstructionTape()) where {D,V<:Real}
function JacobianConfig(
output::AbstractArray{D},
input::AbstractArray{V},
tp::InstructionTape = InstructionTape(),
) where {D,V<:Real}
cfg_input = track(similar(input), D, tp)
cfg_output = track!(similar(output, TrackedReal{D,D,Nothing}), output, tp)
return _JacobianConfig(cfg_input, cfg_output, tp)
Expand All @@ -117,7 +136,8 @@ end

A convenience method for `JacobianConfig(DiffResults.value(result), input, tp)`.
"""
JacobianConfig(result::DiffResult, input, tp::InstructionTape) = JacobianConfig(DiffResults.value(result), input, tp)
JacobianConfig(result::DiffResult, input, tp::InstructionTape) =
JacobianConfig(DiffResults.value(result), input, tp)

#################
# HessianConfig #
Expand All @@ -139,7 +159,11 @@ Note that `input` is only used for type and shape information; it is not stored
in any way. It is assumed that the element type of `input` is same as the element type of
the target function's output.
"""
function HessianConfig(input::AbstractArray, gtp::InstructionTape = InstructionTape(), jtp::InstructionTape = InstructionTape())
function HessianConfig(
input::AbstractArray,
gtp::InstructionTape = InstructionTape(),
jtp::InstructionTape = InstructionTape(),
)
return HessianConfig(input, eltype(input), gtp, jtp)
end

Expand All @@ -149,7 +173,12 @@ end
Like `HessianConfig(input, tp)`, except the provided type `D` is assumed to be the element
type of the target function's output.
"""
function HessianConfig(input::AbstractArray, ::Type{D}, gtp::InstructionTape = InstructionTape(), jtp::InstructionTape = InstructionTape()) where D
function HessianConfig(
input::AbstractArray,
::Type{D},
gtp::InstructionTape = InstructionTape(),
jtp::InstructionTape = InstructionTape(),
) where {D}
jcfg = JacobianConfig(input, D, jtp)
gcfg = GradientConfig(jcfg.input, gtp)
return HessianConfig(gcfg, jcfg)
Expand All @@ -164,7 +193,12 @@ buffers.
Note that `result` and `input` are only used for type and shape information; they are not
stored or modified in any way.
"""
function HessianConfig(result::DiffResult, input::AbstractArray, gtp::InstructionTape = InstructionTape(), jtp::InstructionTape = InstructionTape())
function HessianConfig(
result::DiffResult,
input::AbstractArray,
gtp::InstructionTape = InstructionTape(),
jtp::InstructionTape = InstructionTape(),
)
jcfg = JacobianConfig(DiffResults.gradient(result), input, jtp)
gcfg = GradientConfig(jcfg.input, gtp)
return HessianConfig(gcfg, jcfg)
Expand Down
Loading
Loading