Skip to content

Commit 1496868

Browse files
committed
Merge branch 'main' into breaking
2 parents 061acbe + e4fa7f2 commit 1496868

33 files changed

+724
-469
lines changed

.github/workflows/Benchmarking.yml

+76
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
name: Benchmarking
2+
3+
on:
4+
pull_request:
5+
6+
jobs:
7+
benchmarks:
8+
runs-on: ubuntu-latest
9+
10+
steps:
11+
- name: Checkout Repository
12+
uses: actions/checkout@v4
13+
with:
14+
ref: ${{ github.event.pull_request.head.sha }}
15+
16+
- name: Set up Julia
17+
uses: julia-actions/setup-julia@v2
18+
with:
19+
version: '1'
20+
21+
- name: Install Dependencies
22+
run: julia --project=benchmarks/ -e 'using Pkg; Pkg.instantiate()'
23+
24+
- name: Run Benchmarks
25+
id: run_benchmarks
26+
run: |
27+
# Capture version info into a variable, print it, and set it as an env var for later steps
28+
version_info=$(julia -e 'using InteractiveUtils; versioninfo()')
29+
echo "$version_info"
30+
echo "VERSION_INFO<<EOF" >> $GITHUB_ENV
31+
echo "$version_info" >> $GITHUB_ENV
32+
echo "EOF" >> $GITHUB_ENV
33+
34+
# Capture benchmark output into a variable
35+
echo "Running Benchmarks..."
36+
benchmark_output=$(julia --project=benchmarks benchmarks/benchmarks.jl)
37+
38+
# Print benchmark results directly to the workflow log
39+
echo "Benchmark Results:"
40+
echo "$benchmark_output"
41+
42+
# Set the benchmark output as an env var for later steps
43+
echo "BENCHMARK_OUTPUT<<EOF" >> $GITHUB_ENV
44+
echo "$benchmark_output" >> $GITHUB_ENV
45+
echo "EOF" >> $GITHUB_ENV
46+
47+
# Get the current commit SHA of DynamicPPL
48+
DPPL_COMMIT_SHA=$(git rev-parse HEAD)
49+
echo "DPPL_COMMIT_SHA=$DPPL_COMMIT_SHA" >> $GITHUB_ENV
50+
51+
COMMIT_URL="https://github.com/${{ github.repository }}/commit/$DPPL_COMMIT_SHA"
52+
echo "DPPL_COMMIT_URL=$COMMIT_URL" >> $GITHUB_ENV
53+
54+
- name: Find Existing Comment
55+
uses: peter-evans/find-comment@v3
56+
id: find_comment
57+
with:
58+
issue-number: ${{ github.event.pull_request.number }}
59+
comment-author: github-actions[bot]
60+
61+
- name: Post Benchmark Results as PR Comment
62+
uses: peter-evans/create-or-update-comment@v4
63+
with:
64+
issue-number: ${{ github.event.pull_request.number }}
65+
body: |
66+
## Benchmark Report for Commit [`${{ env.DPPL_COMMIT_SHA }}`](${{ env.DPPL_COMMIT_URL }})
67+
### Computer Information
68+
```
69+
${{ env.VERSION_INFO }}
70+
```
71+
### Benchmark Results
72+
```
73+
${{ env.BENCHMARK_OUTPUT }}
74+
```
75+
comment-id: ${{ steps.find_comment.outputs.comment-id }}
76+
edit-mode: replace

.github/workflows/CI.yml

+2
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,8 @@ jobs:
7070
env:
7171
GROUP: ${{ matrix.test_group }}
7272
JULIA_NUM_THREADS: ${{ matrix.runner.num_threads }}
73+
# Only run Aqua tests on latest version
74+
AQUA: ${{ matrix.runner.version == '1' && 'true' || 'false' }}
7375

7476
- uses: julia-actions/julia-processcoverage@v1
7577

.github/workflows/Format.yml

+2-15
Original file line numberDiff line numberDiff line change
@@ -17,20 +17,7 @@ concurrency:
1717
jobs:
1818
format:
1919
runs-on: ubuntu-latest
20+
2021
steps:
21-
- uses: actions/checkout@v4
22-
- uses: julia-actions/setup-julia@v2
23-
with:
24-
version: 1
2522
- name: Format code
26-
run: |
27-
using Pkg
28-
Pkg.add(; name="JuliaFormatter", uuid="98e50ef6-434e-11e9-1051-2b60c6c9e899")
29-
using JuliaFormatter
30-
format("."; verbose=true)
31-
shell: julia --color=yes {0}
32-
- uses: reviewdog/action-suggester@v1
33-
if: github.event_name == 'pull_request'
34-
with:
35-
tool_name: JuliaFormatter
36-
fail_on_error: true
23+
uses: TuringLang/actions/Format@main

HISTORY.md

+23
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,28 @@
11
# DynamicPPL Changelog
22

3+
## 0.35.4
4+
5+
Fixed a type instability in an implementation of `with_logabsdet_jacobian`, which resulted in the log-jacobian returned being an Int in some cases and a Float in others.
6+
This resolves an Enzyme.jl error on a number of models.
7+
More generally, this version also changes the type of various log probabilities to be more consistent with one another.
8+
Although we aren't fully there yet, our eventual aim is that log probabilities will generally default to Float64 on 64-bit systems, and Float32 on 32-bit systems.
9+
If you run into any issues with these types, please get in touch.
10+
11+
## 0.35.3
12+
13+
`model | (@varname(x) => 1.0, @varname(y) => 2.0)` now works.
14+
Previously, this would throw a `MethodError` if the tuple had more than one element.
15+
16+
## 0.35.2
17+
18+
`unflatten(::VarInfo, params)` now works with params that have non-float types (such as Int or Bool).
19+
20+
## 0.35.1
21+
22+
`subset(::AbstractVarInfo, ::AbstractVector{<:VarName})` now preserves the ordering of the varnames in the original varinfo argument.
23+
Previously, this would select the varnames according to their order in the second argument.
24+
This fixes an upstream Turing.jl issue with Gibbs sampling when a component sampler was assigned multiple variables.
25+
326
## 0.35.0
427

528
**Breaking changes**

Project.toml

+2-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63"
1616
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
1717
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
1818
InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
19-
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
2019
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
2120
LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c"
2221
MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
@@ -30,6 +29,7 @@ ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
3029
EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869"
3130
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
3231
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
32+
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
3333
MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d"
3434
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
3535

@@ -56,6 +56,7 @@ Distributions = "0.25"
5656
DocStringExtensions = "0.9"
5757
EnzymeCore = "0.6 - 0.8"
5858
ForwardDiff = "0.10.12"
59+
InteractiveUtils = "1"
5960
JET = "0.9"
6061
KernelAbstractions = "0.9.33"
6162
LinearAlgebra = "1.6"

benchmarks/Project.toml

+17-5
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,23 @@ uuid = "d94a1522-c11e-44a7-981a-42bf5dc1a001"
33
version = "0.1.0"
44

55
[deps]
6+
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
67
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
7-
DiffUtils = "8294860b-85a6-42f8-8c35-d911f667b5f6"
88
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
99
DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8"
10-
LibGit2 = "76f85450-5226-5b5a-8eaa-529ad045b433"
11-
Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a"
12-
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
13-
Weave = "44d3d7a6-8a23-5bf8-98c5-b353f8df5ec9"
10+
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
11+
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
12+
LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c"
13+
Mooncake = "da2b9cff-9c12-43a0-ae48-6db2b0edb7d6"
14+
PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
15+
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
16+
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
17+
18+
[compat]
19+
ADTypes = "1.14.0"
20+
BenchmarkTools = "1.6.0"
21+
Distributions = "0.25.117"
22+
ForwardDiff = "0.10.38"
23+
LogDensityProblems = "2.1.2"
24+
PrettyTables = "2.4.0"
25+
ReverseDiff = "1.15.3"

benchmarks/README.md

+2-24
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,5 @@
1-
To run the benchmarks, simply do:
1+
To run the benchmarks, run this from the root directory of the repository:
22

33
```sh
4-
julia --project -e 'using DynamicPPLBenchmarks; weave_benchmarks();'
5-
```
6-
7-
```julia
8-
julia> @doc weave_benchmarks
9-
weave_benchmarks(input="benchmarks.jmd"; kwargs...)
10-
11-
Weave benchmarks present in benchmarks.jmd into a single file.
12-
13-
Keyword arguments
14-
≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡≡
15-
16-
• benchmarkbody: JMD-file to be rendered for each model.
17-
18-
• include_commit_id=false: specify whether to include commit-id in the default name.
19-
20-
• name: the name of directory in results/ to use as output directory.
21-
22-
• name_old=nothing: if specified, comparisons of current run vs. the run pinted to by name_old will be included in the generated document.
23-
24-
• include_typed_code=false: if true, output of code_typed for the evaluator of the model will be included in the weaved document.
25-
26-
• Rest of the passed kwargs will be passed on to Weave.weave.
4+
julia --project=benchmarks benchmarks/benchmarks.jl
275
```

benchmarks/benchmark_body.jmd

-49
This file was deleted.

benchmarks/benchmarks.jl

+103
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
using Pkg
2+
# To ensure we benchmark the local version of DynamicPPL, dev the folder above.
3+
Pkg.develop(; path=joinpath(@__DIR__, ".."))
4+
5+
using DynamicPPLBenchmarks: Models, make_suite, model_dimension
6+
using BenchmarkTools: @benchmark, median, run
7+
using PrettyTables: PrettyTables, ft_printf
8+
using StableRNGs: StableRNG
9+
10+
rng = StableRNG(23)
11+
12+
# Create DynamicPPL.Model instances to run benchmarks on.
13+
smorgasbord_instance = Models.smorgasbord(randn(rng, 100), randn(rng, 100))
14+
loop_univariate1k, multivariate1k = begin
15+
data_1k = randn(rng, 1_000)
16+
loop = Models.loop_univariate(length(data_1k)) | (; o=data_1k)
17+
multi = Models.multivariate(length(data_1k)) | (; o=data_1k)
18+
loop, multi
19+
end
20+
loop_univariate10k, multivariate10k = begin
21+
data_10k = randn(rng, 10_000)
22+
loop = Models.loop_univariate(length(data_10k)) | (; o=data_10k)
23+
multi = Models.multivariate(length(data_10k)) | (; o=data_10k)
24+
loop, multi
25+
end
26+
lda_instance = begin
27+
w = [1, 2, 3, 2, 1, 1]
28+
d = [1, 1, 1, 2, 2, 2]
29+
Models.lda(2, d, w)
30+
end
31+
32+
# Specify the combinations to test:
33+
# (Model Name, model instance, VarInfo choice, AD backend, linked)
34+
chosen_combinations = [
35+
(
36+
"Simple assume observe",
37+
Models.simple_assume_observe(randn(rng)),
38+
:typed,
39+
:forwarddiff,
40+
false,
41+
),
42+
("Smorgasbord", smorgasbord_instance, :typed, :forwarddiff, false),
43+
("Smorgasbord", smorgasbord_instance, :simple_namedtuple, :forwarddiff, true),
44+
("Smorgasbord", smorgasbord_instance, :untyped, :forwarddiff, true),
45+
("Smorgasbord", smorgasbord_instance, :simple_dict, :forwarddiff, true),
46+
("Smorgasbord", smorgasbord_instance, :typed, :reversediff, true),
47+
("Smorgasbord", smorgasbord_instance, :typed, :mooncake, true),
48+
("Loop univariate 1k", loop_univariate1k, :typed, :mooncake, true),
49+
("Multivariate 1k", multivariate1k, :typed, :mooncake, true),
50+
("Loop univariate 10k", loop_univariate10k, :typed, :mooncake, true),
51+
("Multivariate 10k", multivariate10k, :typed, :mooncake, true),
52+
("Dynamic", Models.dynamic(), :typed, :mooncake, true),
53+
("Submodel", Models.parent(randn(rng)), :typed, :mooncake, true),
54+
("LDA", lda_instance, :typed, :reversediff, true),
55+
]
56+
57+
# Time running a model-like function that does not use DynamicPPL, as a reference point.
58+
# Eval timings will be relative to this.
59+
reference_time = begin
60+
obs = randn(rng)
61+
median(@benchmark Models.simple_assume_observe_non_model(obs)).time
62+
end
63+
64+
results_table = Tuple{String,Int,String,String,Bool,Float64,Float64}[]
65+
66+
for (model_name, model, varinfo_choice, adbackend, islinked) in chosen_combinations
67+
@info "Running benchmark for $model_name"
68+
suite = make_suite(model, varinfo_choice, adbackend, islinked)
69+
results = run(suite)
70+
eval_time = median(results["evaluation"]).time
71+
relative_eval_time = eval_time / reference_time
72+
ad_eval_time = median(results["gradient"]).time
73+
relative_ad_eval_time = ad_eval_time / eval_time
74+
push!(
75+
results_table,
76+
(
77+
model_name,
78+
model_dimension(model, islinked),
79+
string(adbackend),
80+
string(varinfo_choice),
81+
islinked,
82+
relative_eval_time,
83+
relative_ad_eval_time,
84+
),
85+
)
86+
end
87+
88+
table_matrix = hcat(Iterators.map(collect, zip(results_table...))...)
89+
header = [
90+
"Model",
91+
"Dimension",
92+
"AD Backend",
93+
"VarInfo Type",
94+
"Linked",
95+
"Eval Time / Ref Time",
96+
"AD Time / Eval Time",
97+
]
98+
PrettyTables.pretty_table(
99+
table_matrix;
100+
header=header,
101+
tf=PrettyTables.tf_markdown,
102+
formatters=ft_printf("%.1f", [6, 7]),
103+
)

0 commit comments

Comments
 (0)