licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 1034 | # Use
#
# DOCUMENTER_DEBUG=true julia --color=yes make.jl local [nonstrict] [fixdoctests]
#
# for local builds.
using Documenter
using DensityInterface
# Doctest setup
DocMeta.setdocmeta!(
DensityInterface,
:DocTestSetup,
quote
using DensityInterface
object = logfuncdensity(x -> -x^2)
log_f = logdensityof(object)
f = densityof(object)
x = 4
end;
recursive=true,
)
makedocs(
sitename = "DensityInterface",
modules = [DensityInterface],
format = Documenter.HTML(
prettyurls = !("local" in ARGS),
canonical = "https://JuliaMath.github.io/DensityInterface.jl/stable/"
),
pages = [
"Home" => "index.md",
"API" => "api.md",
"LICENSE" => "LICENSE.md",
],
doctest = ("fixdoctests" in ARGS) ? :fix : true,
linkcheck = !("nonstrict" in ARGS),
strict = !("nonstrict" in ARGS),
)
deploydocs(
repo = "github.com/JuliaMath/DensityInterface.jl.git",
forcepush = true,
push_preview = true,
)
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 342 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
"""
DensityInterface
Trait-based interface for mathematical/statistical densities and objects
associated with a density.
"""
module DensityInterface
using InverseFunctions
using Test
include("interface.jl")
include("interface_test.jl")
end # module
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 10053 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
"""
abstract type DensityKind end
DensityKind(object)
Subtypes of `DensityKind` indicate if an `object` *is* a density or if it *has*
a density, in the sense of the `DensityInterface` API, or if is *not*
associated with a density (not compatible with `DensityInterface`).
`DensityKind(object)` returns either `IsDensity()`, `HasDensity()` or
`NoDensity()`.
In addition to the subtypes [`IsDensity`](@ref), [`HasDensity`](@ref) or
[`NoDensity`](@ref), a union `IsOrHasDensity = Union{IsDensity, HasDensity}`
is defined for convenience.
`DensityKind(object) isa IsOrHasDensity` implies that `object` is either a
density itself or can be said to have an associated density. It also implies
that the value of that density at given points can be calculated via
[`logdensityof`](@ref) and [`densityof`](@ref).
`DensityKind(object)` defaults to `NoDensity()` (object is not and does not
have a density). For a type that *is* (directly represents) a density, like a
probability density, define
```julia
@inline DensityKind(::MyDensityType) = IsDensity()
```
For a type that *has* (is associated with) a density in some way, like
a probability distribution has a probability density, define
```julia
@inline DensityKind(::MyDensityType) = HasDensity()
```
"""
abstract type DensityKind end
export DensityKind
@inline DensityKind(object) = NoDensity()
"""
struct IsDensity <: DensityKind end
As a return value of [`DensityKind(object)`](@ref), indicates that
`object` *is* (represents) a density, like a probability density
object.
See [`DensityKind`](@ref) for details.
"""
struct IsDensity <: DensityKind end
export IsDensity
"""
struct HasDensity <: DensityKind end
As a return value of [`DensityKind(object)`](@ref), indicates that
`object` *has* a density, like a probability distribution has
a probability density.
See [`DensityKind`](@ref) for details.
"""
struct HasDensity <: DensityKind end
export HasDensity
"""
struct NoDensity <: DensityKind end
As a return value of [`DensityKind(object)`](@ref), indicates that
`object` *is not* and *does not have* a density, as understood by
`DensityInterface`.
See [`DensityKind`](@ref) for details.
"""
struct NoDensity <: DensityKind end
export NoDensity
"""
IsOrHasDensity = Union{IsDensity, HasDensity}
As a return value of [`DensityKind(object)`](@ref), indicates that `object`
either *is* or *has* a density, as understood by `DensityInterface`.
See [`DensityKind`](@ref) for details.
"""
const IsOrHasDensity = Union{IsDensity, HasDensity}
export IsOrHasDensity
function _check_is_or_has_density(object)
DensityKind(object) isa IsOrHasDensity || throw(ArgumentError("Object of type $(typeof(object)) neither is nor has a density"))
end
"""
logdensityof(object, x)::Real
Compute the logarithmic value of the density `object` (resp. its associated density)
at a given point `x`.
```jldoctest a
julia> DensityKind(object)
IsDensity()
julia> logy = logdensityof(object, x); logy isa Real
true
```
See also [`DensityKind`](@ref) and [`densityof`](@ref).
"""
function logdensityof end
export logdensityof
"""
logdensityof(object)
Return a function that computes the logarithmic value of the density `object`
(resp. its associated density) at a given point.
```jldoctest a
julia> log_f = logdensityof(object); log_f isa Function
true
julia> log_f(x) == logdensityof(object, x)
true
```
`logdensityof(object)` defaults to `Base.Fix1(logdensityof, object)`, but may be
specialized. If so, [`logfuncdensity`](@ref) will typically have to be
specialized for the return type of `logdensityof` as well.
[`logfuncdensity`](@ref) is the inverse of `logdensityof`, so
`logfuncdensity(log_f)` must be equivalent to `object`.
"""
function logdensityof(object)
_check_is_or_has_density(object)
Base.Fix1(logdensityof, object)
end
"""
densityof(object, x)::Real
Compute the value of the density `object` (resp. its associated density)
at a given point `x`.
```jldoctest a
julia> DensityKind(object)
IsDensity()
julia> densityof(object, x) == exp(logdensityof(object, x))
true
```
`densityof(object, x)` defaults to `exp(logdensityof(object, x))`, but
may be specialized.
See also [`DensityKind`](@ref) and [`densityof`](@ref).
"""
densityof(object, x) = exp(logdensityof(object, x))
export densityof
"""
densityof(object)
Return a function that computes the value of the density `object`
(resp. its associated density) at a given point.
```jldoctest a
julia> f = densityof(object);
julia> f(x) == densityof(object, x)
true
```
`densityof(object)` defaults to `Base.Fix1(densityof, object)`, but may be specialized.
"""
function densityof(object)
_check_is_or_has_density(object)
Base.Fix1(densityof, object)
end
"""
logfuncdensity(log_f)
Return a `DensityInterface`-compatible density that is defined by a given
log-density function `log_f`:
```jldoctest
julia> object = logfuncdensity(log_f);
julia> DensityKind(object)
IsDensity()
julia> logdensityof(object, x) == log_f(x)
true
```
`logfuncdensity(log_f)` returns an instance of [`DensityInterface.LogFuncDensity`](@ref)
by default, but may be specialized to return something else depending on the
type of `log_f`). If so, [`logdensityof`](@ref) will typically have to be
specialized for the return type of `logfuncdensity` as well.
`logfuncdensity` is the inverse of `logdensityof`, so the following must
hold true:
* `d = logfuncdensity(logdensityof(object))` is equivalent to `object` in
respect to `logdensityof` and `densityof`. However, `d` may not be equal to
`object`, especially if `DensityKind(object) == HasDensity()`: `logfuncdensity` always
creates something that *is* density, never something that just *has*
a density in some way (like a distribution or a measure in general).
* `logdensityof(logfuncdensity(log_f))` is equivalent (typically equal or even
identical to) to `log_f`.
See also [`DensityKind`](@ref).
"""
function logfuncdensity end
export logfuncdensity
@inline logfuncdensity(log_f) = LogFuncDensity(log_f)
# For functions stemming from objects that *have* a density, create a new density:
@inline _logfuncdensity_impl(::HasDensity, log_f::Base.Fix1{typeof(logdensityof)}) = LogFuncDensity(log_f)
# For functions stemming from objects that *are* a density, recover original object:
@inline _logfuncdensity_impl(::IsDensity, log_f::Base.Fix1{typeof(logdensityof)}) = log_f.x
@inline logfuncdensity(log_f::Base.Fix1{typeof(logdensityof)}) = _logfuncdensity_impl(DensityKind(log_f.x), log_f)
InverseFunctions.inverse(::typeof(logfuncdensity)) = logdensityof
InverseFunctions.inverse(::typeof(logdensityof)) = logfuncdensity
"""
struct DensityInterface.LogFuncDensity{F}
Wraps a log-density function `log_f` to make it compatible with `DensityInterface`
interface. Typically, `LogFuncDensity(log_f)` should not be called
directly, [`logfuncdensity`](@ref) should be used instead.
"""
struct LogFuncDensity{F}
_log_f::F
end
LogFuncDensity
@inline DensityKind(::LogFuncDensity) = IsDensity()
@inline logdensityof(object::LogFuncDensity, x) = object._log_f(x)
@inline logdensityof(object::LogFuncDensity) = object._log_f
@inline densityof(object::LogFuncDensity, x) = exp(object._log_f(x))
@inline densityof(object::LogFuncDensity) = exp ∘ object._log_f
function Base.show(io::IO, object::LogFuncDensity)
print(io, nameof(typeof(object)), "(")
show(io, object._log_f)
print(io, ")")
end
"""
funcdensity(f)
Return a `DensityInterface`-compatible density that is defined by a given
non-log density function `f`:
```jldoctest
julia> object = funcdensity(f);
julia> DensityKind(object)
IsDensity()
julia> densityof(object, x) == f(x)
true
```
`funcdensity(f)` returns an instance of [`DensityInterface.FuncDensity`](@ref)
by default, but may be specialized to return something else depending on the
type of `f`). If so, [`densityof`](@ref) will typically have to be
specialized for the return type of `funcdensity` as well.
`funcdensity` is the inverse of `densityof`, so the following must
hold true:
* `d = funcdensity(densityof(object))` is equivalent to `object` in
respect to `logdensityof` and `densityof`. However, `d` may not be equal to
`object`, especially if `DensityKind(object) == HasDensity()`: `funcdensity` always
creates something that *is* density, never something that just *has*
a density in some way (like a distribution or a measure in general).
* `densityof(funcdensity(f))` is equivalent (typically equal or even
identical to) to `f`.
See also [`DensityKind`](@ref).
"""
function funcdensity end
export funcdensity
@inline funcdensity(f) = FuncDensity(f)
# For functions stemming from objects that *have* a density, create a new density:
@inline _funcdensity_impl(::HasDensity, f::Base.Fix1{typeof(densityof)}) = FuncDensity(f)
# For functions stemming from objects that *are* a density, recover original object:
@inline _funcdensity_impl(::IsDensity, f::Base.Fix1{typeof(densityof)}) = f.x
@inline funcdensity(f::Base.Fix1{typeof(densityof)}) = _funcdensity_impl(DensityKind(f.x), f)
InverseFunctions.inverse(::typeof(funcdensity)) = densityof
InverseFunctions.inverse(::typeof(densityof)) = funcdensity
"""
struct DensityInterface.FuncDensity{F}
Wraps a non-log density function `f` to make it compatible with
`DensityInterface` interface. Typically, `FuncDensity(f)` should not be
called directly, [`funcdensity`](@ref) should be used instead.
"""
struct FuncDensity{F}
_f::F
end
FuncDensity
@inline DensityKind(::FuncDensity) = IsDensity()
@inline logdensityof(object::FuncDensity, x) = log(object._f(x))
@inline logdensityof(object::FuncDensity) = log ∘ object._f
@inline densityof(object::FuncDensity, x) = object._f(x)
@inline densityof(object::FuncDensity) = object._f
function Base.show(io::IO, object::FuncDensity)
print(io, nameof(typeof(object)), "(")
show(io, object._f)
print(io, ")")
end
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 1901 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
"""
DensityInterface.test_density_interface(object, x, ref_logd_at_x; kwargs...)
Test that `object` is compatible with `DensityInterface`.
Tests that either `DensityKind(object) isa IsOrHasDensity`.
Also tests that [`logdensityof(object, x)`](@ref) equals `ref_logd_at_x` and
that the behavior of [`logdensityof(object)`](@ref),
[`densityof(object, x)`](@ref) and [`densityof(object)`](@ref) is consistent.
The results of `logdensityof(object, x)` and `densityof(object, x)` are compared to
`ref_logd_at_x` and `exp(ref_logd_at_x)` using `isapprox`. `kwargs...` are
forwarded to `isapprox`.
Also tests that `d = logfuncdensity(logdensityof(object))` returns a density
(`DensityKind(d) == IsDensity()`) that is equivalent to `object` in respect to
`logdensityof` and `densityof`, and that `funcdensity(densityof(object))`
behaves the same way.
"""
function test_density_interface(object, x, ref_logd_at_x; kwargs...)
@testset "test_density_interface: $object with input $x" begin
ref_d_at_x = exp(ref_logd_at_x)
@test DensityKind(object) isa IsOrHasDensity
@test isapprox(logdensityof(object, x), ref_logd_at_x; kwargs...)
log_f = logdensityof(object)
@test isapprox(log_f(x), ref_logd_at_x; kwargs...)
@test isapprox(densityof(object,x), ref_d_at_x; kwargs...)
f = densityof(object)
@test isapprox(f(x), ref_d_at_x; kwargs...)
for d in (logfuncdensity(log_f), funcdensity(f))
@test DensityKind(d) == IsDensity()
@test isapprox(logdensityof(d, x), ref_logd_at_x; kwargs...)
@test isapprox(logdensityof(d)(x), ref_logd_at_x; kwargs...)
@test isapprox(densityof(d,x), ref_d_at_x; kwargs...)
@test isapprox(densityof(d)(x), ref_d_at_x; kwargs...)
end
end
end
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 613 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
import Test
import DensityInterface
import Documenter
Test.@testset "Package DensityInterface" begin
include("test_interface.jl")
# doctests
Documenter.DocMeta.setdocmeta!(
DensityInterface,
:DocTestSetup,
quote
using DensityInterface
object = logfuncdensity(x -> x^2)
log_f = logdensityof(object)
f = densityof(object)
x = 4.2
end;
recursive=true,
)
Documenter.doctest(DensityInterface)
end # testset
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | code | 1335 | # This file is a part of DensityInterface.jl, licensed under the MIT License (MIT).
using DensityInterface
using Test
using LinearAlgebra, InverseFunctions
struct MyDensity end
@inline DensityInterface.DensityKind(::MyDensity) = IsDensity()
DensityInterface.logdensityof(::MyDensity, x::Any) = -norm(x)^2
struct MyMeasure end
@inline DensityInterface.DensityKind(::MyMeasure) = HasDensity()
DensityInterface.logdensityof(::MyMeasure, x::Any) = -norm(x)^2
@testset "interface" begin
@test inverse(logdensityof) == logfuncdensity
@test inverse(logfuncdensity) == logdensityof
@test inverse(densityof) == funcdensity
@test inverse(funcdensity) == densityof
@test @inferred(DensityKind("foo")) === NoDensity()
@test_throws ArgumentError logdensityof("foo")
@test_throws ArgumentError densityof("foo")
for object1 in (MyDensity(), MyMeasure())
x = [1, 2, 3]
DensityInterface.test_density_interface(object1, x, -norm(x)^2)
object2 = logfuncdensity(x -> -norm(x)^2)
@test DensityKind(object2) === IsDensity()
DensityInterface.test_density_interface(object2, x, -norm(x)^2)
object3 = funcdensity(x -> exp(-norm(x)^2))
@test DensityKind(object3) === IsDensity()
DensityInterface.test_density_interface(object3, x, -norm(x)^2)
end
end
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | docs | 1102 | # DensityInterface.jl
[![Documentation for stable version](https://img.shields.io/badge/docs-stable-blue.svg)](https://JuliaMath.github.io/DensityInterface.jl/stable)
[![Documentation for development version](https://img.shields.io/badge/docs-dev-blue.svg)](https://JuliaMath.github.io/DensityInterface.jl/dev)
[![License](http://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat)](LICENSE.md)
[![Build Status](https://github.com/JuliaMath/DensityInterface.jl/workflows/CI/badge.svg?branch=master)](https://github.com/JuliaMath/DensityInterface.jl/actions?query=workflow%3ACI)
[![Codecov](https://codecov.io/gh/JuliaMath/DensityInterface.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/JuliaMath/DensityInterface.jl)
This package defines an interface for mathematical/statistical densities and objects associated with a density in Julia. See the documentation for details.
## Documentation
* [Documentation for stable version](https://JuliaMath.github.io/DensityInterface.jl/stable)
* [Documentation for development version](https://JuliaMath.github.io/DensityInterface.jl/dev)
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | docs | 335 | # API
## Interface
```@docs
logdensityof
logdensityof(::Any)
logfuncdensity
funcdensity
densityof
densityof(::Any)
```
## Types
```@docs
IsDensity
HasDensity
IsOrHasDensity
NoDensity
DensityKind
DensityInterface.LogFuncDensity
DensityInterface.FuncDensity
```
## Test utility
```@docs
DensityInterface.test_density_interface
```
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 0.4.0 | 80c3e8639e3353e5d2912fb3a1916b8455e2494b | docs | 2311 | # DensityInterface.jl
```@meta
DocTestSetup = quote
struct SomeDensity end
log_of_d_at(x) = x^2
x = 4
end
```
```@docs
DensityInterface
```
This package defines an interface for mathematical/statistical densities and objects associated with a density in Julia. The interface comprises the type [`DensityKind`](@ref) and the functions [`logdensityof`](@ref)/[`densityof`](@ref)[^1] and [`logfuncdensity`](@ref)/[`funcdensity`](@ref).
The following methods must be provided to make a type (e.g. `SomeDensity`) compatible with the interface:
```jldoctest a
import DensityInterface
@inline DensityInterface.DensityKind(::SomeDensity) = IsDensity()
DensityInterface.logdensityof(object::SomeDensity, x) = log_of_d_at(x)
object = SomeDensity()
DensityInterface.logdensityof(object, x) isa Real
# output
true
```
`object` may be/represent a density itself (`DensityKind(object) === IsDensity()`) or it may be something that can be said to have a density (`DensityKind(object) === HasDensity()`)[^2].
In statistical inference applications, for example, `object` might be a likelihood, prior or posterior.
DensityInterface automatically provides `logdensityof(object)`, equivalent to `x -> logdensityof(object, x)`. This constitutes a convenient way of passing a (log-)density function to algorithms like optimizers, samplers, etc.:
```jldoctest a
using DensityInterface
object = SomeDensity()
log_f = logdensityof(object)
log_f(x) == logdensityof(object, x)
# output
true
```
```julia
SomeOptimizerPackage.maximize(logdensityof(object), x_init)
```
Reversely, a given log-density function `log_f` can be converted to a DensityInterface-compatible density object using [`logfuncdensity`](@ref):
```julia
object = logfuncdensity(log_f)
DensityKind(object) === IsDensity() && logdensityof(object, x) == log_f(x)
# output
true
```
[^1]: The function names `logdensityof` and `densityof` were chosen to convey that the target object may either *be* a density or something that can be said to *have* a density. They also have less naming conflict potential than `logdensity` and esp. `density` (the latter already being exported by Plots.jl).
[^2]: The package [`Distributions`](https://github.com/JuliaStats/Distributions.jl) supports `DensityInterface` for `Distributions.Distribution`.
| DensityInterface | https://github.com/JuliaMath/DensityInterface.jl.git |
|
[
"MIT"
] | 1.0.0 | 0fd73bf40485c791e6c33672c643bf1303045e9a | code | 3037 | module BatchIterators
using Statistics
export BatchIterator
export choose_batchsize
export centered_batch_iterator
"""
BatchIterator(X; batchsize = nothing, limit=size(X,2))
Wrapper allowing to iterate over batches of `batchsize` columns of `X`. `X` can be of any type supporting `size` and 2d indexing. When `limit` is provided, iteration is restricted to the columns of `X[:, 1:limit]`.
"""
struct BatchIterator{T}
X::T
length::Int # Number of batches
bsz::Int # Batch size
limit::Int
function BatchIterator(X; batchsize=nothing, limit=size(X,2))
@assert limit > 0 && limit ≤ size(X,2)
bsz = (batchsize == nothing) ? choose_batchsize(size(X,1), limit) : batchsize
nb = ceil(Int, limit/bsz)
new{typeof(X)}(X, nb, bsz, limit)
end
end
view_compatible(::Any) = false
view_compatible(::Array) = true
view_compatible(bi::BatchIterator) = view_compatible(bi.X)
#######################################################################
# Iteration #
#######################################################################
function Base.getindex(it::BatchIterator, i)
d = i - it.length # > 0 means overflow, == 0 means last batch
cbsz = (d == 0) ? mod(it.limit - 1, it.bsz) + 1 : it.bsz # Size of current batch
if (i<1 || d > 0)
@error "Out of bounds."
else
# TODO using views here might impact type stability.
view_compatible(it) ? (@view it.X[:, (i-1)*it.bsz+1:(i-1)*it.bsz+cbsz]) : it.X[:, (i-1)*it.bsz+1:(i-1)*it.bsz+cbsz]
end
end
Base.length(it::BatchIterator) = it.length
function Base.iterate(it::BatchIterator{T}, st = 0) where T
st = st + 1 # new state
d = st - it.length # > 0 means overflow, == 0 means last batch
(d > 0) ? nothing : (it[st], st)
end
"""
centered_batch_iterator(X; kwargs...)
Similar to BatchIterator, but performs first one pass over the data to compute the mean, and centers the batches.
"""
function centered_batch_iterator(X; kwargs...)
bi = BatchIterator(X; kwargs...)
μ = vec(mean(mean(b, dims=2) for b in BatchIterator(X)))
(b .- μ for b in bi)
end
#######################################################################
# Utilities #
#######################################################################
"""
choose_batchsize(d, n; maxmemGB = 1.0, maxbatchsize = 2^14, sizeoneB = d*sizeof(Float64))
Computes the size (nb. of columns) of a batch, so that each column of the batch can be converted to a vector of size `sizeoneB` (in bytes) with a total memory constrained by `maxmemGB` (gigabytes).
"""
function choose_batchsize(d, n;
maxmemGB = 1.0,
maxbatchsize = 2^14,
sizeoneB = d*sizeof(Float64),
forcepow2 = true)
fullsizeGB = n * sizeoneB/1024^3 # Size of the sketches of all samples
batchsize = (fullsizeGB > maxmemGB) ? ceil(Int, n/ceil(Int, fullsizeGB/maxmemGB)) : n
batchsize = min(batchsize, maxbatchsize)
(forcepow2 && batchsize != n) ? prevpow(2, batchsize) : batchsize
end
end # module
| BatchIterators | https://github.com/Djoop/BatchIterators.jl.git |
|
[
"MIT"
] | 1.0.0 | 0fd73bf40485c791e6c33672c643bf1303045e9a | docs | 497 | # Summary
Licence: MIT.
A very small package providing the constructor `BatchIterator(X; batchsize=…, limit=…)` and the function `centered_batch_iterator(X; kwargs…)`, which allow iteration over blocks of columns of `X`, for any object `X` supporting 2d indexing and for which the function `size` is defined.
The function `choose_batchsize` helps finding a good batch size while controlling memory usage.
The package was originally designed to iterate over samples of an out-of-core dataset.
| BatchIterators | https://github.com/Djoop/BatchIterators.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 188 | module MatrixMarket
using SparseArrays
using LinearAlgebra
using CodecZlib
export mmread, mmwrite, mminfo
include("mminfo.jl")
include("mmread.jl")
include("mmwrite.jl")
end # module
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 1810 | """
mminfo(file)
Read header information on the size and structure from file. The actual data matrix is not
parsed.
# Arguments
- `file`: The filename or io stream.
"""
function mminfo(filename::String)
stream = open(filename, "r")
if endswith(filename, ".gz")
stream = GzipDecompressorStream(stream)
end
info = mminfo(stream)
close(stream)
return info
end
function mminfo(stream::IO)
firstline = chomp(readline(stream))
if !startswith(firstline, "%%MatrixMarket")
throw(FileFormatException("Expected start of header `%%MatrixMarket`"))
end
tokens = split(firstline)
if length(tokens) != 5
throw(FileFormatException("Not enough words on first line, got $(length(tokens)) words"))
end
(head1, rep, field, symm) = map(lowercase, tokens[2:5])
if head1 != "matrix"
throw(FileFormatException("Unknown MatrixMarket data type: $head1 (only `matrix` is supported)"))
end
dimline = readline(stream)
# Skip all comments and empty lines
while length(chomp(dimline)) == 0 || (length(dimline) > 0 && dimline[1] == '%')
dimline = readline(stream)
end
rows, cols, entries = parse_dimension(dimline, rep)
return rows, cols, entries, rep, field, symm
end
struct FileFormatException <: Exception
msg::String
end
Base.showerror(io::IO, e::FileFormatException) = print(io, e.msg)
function parse_dimension(line::String, rep::String)
dims = map(x -> parse(Int, x), split(line))
if length(dims) < (rep == "coordinate" ? 3 : 2)
throw(FileFormatException(string("Could not read in matrix dimensions from line: ", line)))
end
if rep == "coordinate"
return dims[1], dims[2], dims[3]
else
return dims[1], dims[2], (dims[1] * dims[2])
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 3851 | """
mmread(filename, infoonly=false, retcoord=false)
Read the contents of the Matrix Market file `filename` into a matrix, which will be either
sparse or dense, depending on the Matrix Market format indicated by `coordinate` (coordinate
sparse storage), or `array` (dense array storage).
# Arguments
- `filename::String`: The file to read.
- `infoonly::Bool=false`: Only information on the size and structure is returned from
reading the header. The actual data for the matrix elements are not parsed.
- `retcoord::Bool`: If it is `true`, the rows, column and value vectors are returned along
with the header information.
"""
function mmread(filename::String, infoonly::Bool=false, retcoord::Bool=false)
stream = open(filename, "r")
if endswith(filename, ".gz")
stream = GzipDecompressorStream(stream)
end
result = infoonly ? mminfo(stream) : mmread(stream, retcoord)
close(stream)
return result
end
function mmread(stream::IO, infoonly::Bool=false, retcoord::Bool=false)
rows, cols, entries, rep, field, symm = mminfo(stream)
infoonly && return rows, cols, entries, rep, field, symm
T = parse_eltype(field)
symfunc = parse_symmetric(symm)
if rep == "coordinate"
rn = Vector{Int}(undef, entries)
cn = Vector{Int}(undef, entries)
vals = Vector{T}(undef, entries)
for i in 1:entries
line = readline(stream)
splits = find_splits(line, num_splits(T))
rn[i] = parse_row(line, splits)
cn[i] = parse_col(line, splits, T)
vals[i] = parse_val(line, splits, T)
end
result = retcoord ? (rn, cn, vals, rows, cols, entries, rep, field, symm) :
symfunc(sparse(rn, cn, vals, rows, cols))
else
vals = [parse(Float64, readline(stream)) for _ in 1:entries]
A = reshape(vals, rows, cols)
result = symfunc(A)
end
return result
end
function parse_eltype(field::String)
if field == "real"
return Float64
elseif field == "complex"
return ComplexF64
elseif field == "integer"
return Int64
elseif field == "pattern"
return Bool
else
throw(FileFormatException("Unsupported field $field."))
end
end
function parse_symmetric(symm::String)
if symm == "general"
return identity
elseif symm == "symmetric" || symm == "hermitian"
return hermitianize!
elseif symm == "skew-symmetric"
return skewsymmetrize!
else
throw(FileFormatException("Unknown matrix symmetry: $symm."))
end
end
function hermitianize!(M::AbstractMatrix)
M .+= tril(M, -1)'
return M
end
function skewsymmetrize!(M::AbstractMatrix)
M .-= tril(M, -1)'
return M
end
parse_row(line, splits) = parse(Int, line[1:splits[1]])
parse_col(line, splits, ::Type{Bool}) = parse(Int, line[splits[1]:end])
parse_col(line, splits, eltype) = parse(Int, line[splits[1]:splits[2]])
function parse_val(line, splits, ::Type{ComplexF64})
real = parse(Float64, line[splits[2]:splits[3]])
imag = parse(Float64, line[splits[3]:length(line)])
return ComplexF64(real, imag)
end
parse_val(line, splits, ::Type{Bool}) = true
parse_val(line, splits, ::Type{T}) where {T} = parse(T, line[splits[2]:length(line)])
num_splits(::Type{ComplexF64}) = 3
num_splits(::Type{Bool}) = 1
num_splits(elty) = 2
function find_splits(s::String, num)
splits = Vector{Int}(undef, num)
cur = 1
in_space = s[1] == '\t' || s[1] == ' '
@inbounds for i in 1:length(s)
if s[i] == '\t' || s[i] == ' '
if !in_space
in_space = true
splits[cur] = i
cur += 1
cur > num && break
end
else
in_space = false
end
end
splits
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 1924 | """
mmwrite(filename, matrix)
Write a sparse matrix to .mtx file format.
# Arguments
- `filename::String`: The file to write.
- `matrix::SparseMatrixCSC`: The sparse matrix to write.
"""
function mmwrite(filename::String, matrix::SparseMatrixCSC)
stream = open(filename, "w")
if endswith(filename, ".gz")
stream = GzipCompressorStream(stream)
end
mmwrite(stream, matrix)
close(stream)
end
function mmwrite(stream::IO, matrix::SparseMatrixCSC)
nl = "\n"
elem = generate_eltype(eltype(matrix))
sym = generate_symmetric(matrix)
# write header
write(stream, "%%MatrixMarket matrix coordinate $elem $sym$nl")
# only use lower triangular part of symmetric and Hermitian matrices
if issymmetric(matrix) || ishermitian(matrix)
matrix = tril(matrix)
end
# write matrix size and number of nonzeros
write(stream, "$(size(matrix, 1)) $(size(matrix, 2)) $(nnz(matrix))$nl")
rows = rowvals(matrix)
vals = nonzeros(matrix)
for i in 1:size(matrix, 2)
for j in nzrange(matrix, i)
entity = generate_entity(i, j, rows, vals, elem)
write(stream, entity)
end
end
end
generate_eltype(::Type{<:Bool}) = "pattern"
generate_eltype(::Type{<:Integer}) = "integer"
generate_eltype(::Type{<:AbstractFloat}) = "real"
generate_eltype(::Type{<:Complex}) = "complex"
generate_eltype(elty) = error("Invalid matrix type")
function generate_symmetric(m::AbstractMatrix)
if issymmetric(m)
return "symmetric"
elseif ishermitian(m)
return "hermitian"
else
return "general"
end
end
function generate_entity(i, j, rows, vals, kind::String)
nl = "\n"
if kind == "pattern"
return "$(rows[j]) $i$nl"
elseif kind == "complex"
return "$(rows[j]) $i $(real(vals[j])) $(imag(vals[j]))$nl"
else
return "$(rows[j]) $i $(vals[j])$nl"
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 3167 | @testset "mtx" begin
mtx_filename = joinpath(TEST_PATH, "data", "test.mtx")
res = sparse(
[5, 4, 1, 2, 6],
[1, 5, 1, 4, 7],
[1, 1, 1, 1, 1],
11, 12
)
testmatrices = download_unzip_nist_files()
@testset "read/write mtx" begin
rows, cols, entries, rep, field, symm = mminfo(mtx_filename)
@test rows == 11
@test cols == 12
@test entries == 5
@test rep == "coordinate"
@test field == "integer"
@test symm == "general"
A = mmread(mtx_filename)
@test A isa SparseMatrixCSC
@test A == res
newfilename = replace(mtx_filename, "test.mtx" => "test_write.mtx")
mmwrite(newfilename, res)
f = open(mtx_filename)
sha_test = bytes2hex(sha256(read(f, String)))
close(f)
f = open(newfilename)
sha_new = bytes2hex(sha256(read(f, String)))
close(f)
@test sha_test == sha_new
rm(newfilename)
end
@testset "read/write mtx.gz" begin
gz_filename = mtx_filename * ".gz"
rows, cols, entries, rep, field, symm = mminfo(gz_filename)
@test rows == 11
@test cols == 12
@test entries == 5
@test rep == "coordinate"
@test field == "integer"
@test symm == "general"
A = mmread(gz_filename)
@test A isa SparseMatrixCSC
@test A == res
newfilename = replace(gz_filename, "test.mtx.gz" => "test_write.mtx.gz")
mmwrite(newfilename, res)
stream = GzipDecompressorStream(open(gz_filename))
sha_test = bytes2hex(sha256(read(stream, String)))
close(stream)
stream = GzipDecompressorStream(open(newfilename))
sha_new = bytes2hex(sha256(read(stream, String)))
close(stream)
@test sha_test == sha_new
rm(newfilename)
end
@testset "read/write NIST mtx files" begin
# verify mmread(mmwrite(A)) == A
for filename in filter(t -> endswith(t, ".mtx"), readdir())
new_filename = replace(filename, ".mtx" => "_.mtx")
A = MatrixMarket.mmread(filename)
MatrixMarket.mmwrite(new_filename, A)
new_A = MatrixMarket.mmread(new_filename)
@test new_A == A
rm(new_filename)
end
end
@testset "read/write NIST mtx.gz files" begin
for gz_filename in filter(t -> endswith(t, ".mtx.gz"), readdir())
mtx_filename = replace(gz_filename, ".mtx.gz" => ".mtx")
# reading from .mtx and .mtx.gz must be identical
A_gz = MatrixMarket.mmread(gz_filename)
A = MatrixMarket.mmread(mtx_filename)
@test A_gz == A
# writing to .mtx and .mtx.gz must be identical
new_filename = replace(gz_filename, ".mtx.gz" => "_.mtx.gz")
mmwrite(new_filename, A)
new_A = MatrixMarket.mmread(new_filename)
@test new_A == A
rm(new_filename)
end
end
# clean up
for filename in filter(t -> endswith(t, ".mtx"), readdir())
rm(filename)
rm(filename * ".gz")
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 319 | using MatrixMarket
using CodecZlib
using Downloads
using GZip
using SparseArrays
using SHA
using Test
include("test_utils.jl")
const TEST_PATH = @__DIR__
const NIST_FILELIST = download_nist_filelist()
tests = [
"mtx",
]
@testset "MatrixMarket.jl" begin
for t in tests
include("$(t).jl")
end
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | code | 1795 | function gunzip(fname)
destname, ext = splitext(fname)
if ext != ".gz"
error("gunzip: $fname: unknown suffix -- ignored")
end
open(destname, "w") do f
GZip.open(fname) do g
write(f, read(g, String))
end
end
destname
end
function download_nist_filelist()
isfile("matrices.html") ||
Downloads.download("math.nist.gov/MatrixMarket/matrices.html", "matrices.html")
matrixmarketdata = Any[]
open("matrices.html") do f
for line in readlines(f)
if occursin("""<A HREF="/MatrixMarket/data/""", line)
collectionname, setname, matrixname = split(split(line, '"')[2], '/')[4:6]
matrixname = split(matrixname, '.')[1]
push!(matrixmarketdata, (collectionname, setname, matrixname))
end
end
end
rm("matrices.html")
return matrixmarketdata
end
function download_unzip_nist_files()
# Download one matrix at random plus some specifically chosen ones.
n = rand(1:length(NIST_FILELIST))
testmatrices = [
("NEP", "mhd", "mhd1280b"),
("Harwell-Boeing", "acoust", "young4c"),
("Harwell-Boeing", "platz", "plsk1919"),
NIST_FILELIST[n]
]
for (collectionname, setname, matrixname) in testmatrices
fn = string(collectionname, '_', setname, '_', matrixname)
mtxfname = string(fn, ".mtx")
if !isfile(mtxfname)
url = "https://math.nist.gov/pub/MatrixMarket2/$collectionname/$setname/$matrixname.mtx.gz"
gzfname = string(fn, ".mtx.gz")
try
Downloads.download(url, gzfname)
catch
continue
end
gunzip(gzfname)
end
end
return testmatrices
end
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.5.2 | da6fde5ea219bbe414f9d6f878ea9ab5d3476e64 | docs | 1344 | # MatrixMarket
[![Build Status](https://travis-ci.org/JuliaSparse/MatrixMarket.jl.svg?branch=master)](https://travis-ci.org/JuliaSparse/MatrixMarket.jl)
Package to read/write matrices from/to files in the [Matrix Market native exchange
format](http://math.nist.gov/MatrixMarket/formats.html#MMformat).
The [Matrix Market](http://math.nist.gov/MatrixMarket/) is a NIST repository of
"test data for use in comparative studies of algorithms for numerical linear
algebra, featuring nearly 500 sparse matrices from a variety of applications,
as well as matrix generation tools and services." Over time, the [Matrix Market's
native exchange format](http://math.nist.gov/MatrixMarket/formats.html#MMformat)
has become one of the _de facto_ standard file formats for exchanging matrix
data.
## Usage
### Read
using MatrixMarket
M = MatrixMarket.mmread("myfile.mtx")
`M` will be a sparse or dense matrix depending on whether the file contains a matrix
in coordinate format or array format. The specific type of `M` may be `Symmetric` or
`Hermitian` depending on the symmetry information contained in the file header.
MatrixMarket.mmread("myfile.mtx", true)
Returns raw data from the file header. Does not read in the actual matrix elements.
### Write
MatrixMarket.mmwrite("myfile.mtx", M)
`M` has to be a sparse matrix.
| MatrixMarket | https://github.com/JuliaSparse/MatrixMarket.jl.git |
|
[
"MIT"
] | 0.2.0 | 4d7669dceb533e34d59bc57f6c13cd6b79f89a76 | code | 5014 | module WhereIsMyDocstring
using Documenter
export @docmatch
mutable struct DocStr
binding
mod
source
signature
text
function DocStr(D::Base.Docs.DocStr)
d = new()
if length(D.text) > 0
d.text = D.text[1]
else
if isdefined(D, :object)
d.text = string(D.object)
else
d.text = ""
end
end
d.text = lstrip(d.text, '`')
d.text = lstrip(d.text)
d.binding = D.data[:binding]
d.mod = D.data[:module]
d.source = D.data[:path] * ":" * string(D.data[:linenumber])
d.signature = D.data[:typesig]
return d
end
end
# Some type printing gymnastics for the signatures
function _name(x)
S = string(x)
r1 = r"([a-zA-Z1-9]*)<:([a-zA-Z1-9]*)"
r2 = r"([a-zA-Z1-9]*)<:(.+?)<:([a-zA-Z1-9]*)"
while match(r2, S) !== nothing
S = replace(S, r2 => s"\2")
end
while match(r1, S) !== nothing
S = replace(S, r1 => s"\1")
end
return S
end
function _print_type_hint(x::Type)
@assert x isa UnionAll
vars = []
while x isa UnionAll
push!(vars, x.var)
x = x.body
end
while x isa Union
x = x.b
end
@assert x <: Tuple
res = "(" * join(["::$(_name(T))" for T in x.parameters], ", ") * ")"
while occursin("::<:", res)
res = replace(res, "::<:" => "::")
end
while occursin("<:<:", res)
res = replace(res, "<:<:" => "<:")
end
return res * " where {" * join(vars, ", ") * "}"
end
function _print_type(x::Type)
if x isa Core.TypeofBottom
return [""]
end
if x isa UnionAll
res = _print_type_hint(x)
return ["(::$x)\n try the following:", "$res"]
end
_print_type_real(x)
end
function _print_type_real(x)
if x isa Union
return append!(_print_type_real(x.a), _print_type_real(x.b))
elseif x <: Tuple
return ["(" * join(["::$(T)" for T in x.parameters], ", ") * ")"]
else
return ["(::$x)"]
end
end
function Base.show(io::IO, d::DocStr)
printstyled(io, d.binding, bold = true)
text = join(split(d.text, "\n")[1:1], "\n")
printstyled(io, "\n Content:", color = :light_green)
printstyled(io, "\n ", text, " [...]", italic = true)
printstyled(io, "\n Signature type:", color = :light_green)
printstyled(io, "\n ", d.signature)
printstyled(io, "\n Include in ```@docs``` block one of the following:", color = :light_green)
for s in _print_type(d.signature)
print(io, "\n ")
print(io, "$(d.binding)")
# now print s
if occursin("might need adjustment:", s)
ss = split(s, "might need adjustment:")
print(io, ss[1])
printstyled(io, "might need adjustment:"; color = :light_yellow)
print(io, ss[2])
else
print(io, s)
end
end
printstyled(io, "\n Source:", color = :light_green)
printstyled(io, "\n ", d.source, color = :light_grey)
print(io, "\n", "="^displaysize(stdout)[2])
end
function _list_documenter_docstring(mod, ex)
bind = Documenter.DocSystem.binding(mod, ex)
typesig = Core.eval(mod, Base.Docs.signature(ex))
return list_documenter_docstring(mod, bind; sig = typesig)
end
function list_documenter_docstring(mod, fun; sig = Union{})
bind = Documenter.DocSystem.binding(mod, ex)
return list_documenter_docstring(mod, bind; sig = sig)
end
function list_documenter_docstring(mod, bind::Base.Docs.Binding; sig = Union{})
res = Documenter.DocSystem.getdocs(bind, sig, modules = [mod])
return [DocStr(r) for r in res]
end
function list_documenter_docstring(bind::Base.Docs.Binding; sig = Union{})
res = Documenter.DocSystem.getdocs(bind, sig)
return [DocStr(r) for r in res]
end
"""
@docmatch f
@docmatch f(sig)
@docmatch f module
@docmatch f(sig) module
Retrieves all docstrings that would be included in the block
````
```@docs
f
```
````
or
````
```@docs
f(sig)
```
````
The optional argument `module` controls in which module to look for `f`.
#### Example
```
julia> @docmatch sin
2-element Vector{WhereIsMyDocstring.DocStr}:
Base.sin
Content:
sin(x) [...]
Signature type:
Tuple{Number}
Include in ```@docs``` block:
Base.sin(::Number)
Source:
math.jl:490
================================================================================
Base.sin
Content:
sin(A::AbstractMatrix) [...]
Signature type:
Tuple{AbstractMatrix{<:Real}}
Include in ```@docs``` block:
Base.sin(::AbstractMatrix{<:Real})
Source:
/usr/share/julia/stdlib/v1.10/LinearAlgebra/src/dense.jl:956
```
"""
macro docmatch
end
macro docmatch(ex)
bind = Documenter.DocSystem.binding(Main, ex)
typesig = Core.eval(Main, Base.Docs.signature(ex))
return list_documenter_docstring(bind, sig = typesig)
end
macro docmatch(ex, mod)
# (awkward)
# mod is evaluated directly to get the module (I don't want to eval this)
# but the expression for the function (+ signature)
# needs to be passed to the Documenter function as an expression,
# which is later eval'ed
return quote
_list_documenter_docstring($(esc(mod)), $(QuoteNode(ex)))
end
end
end # module WhereIsMyDocstring
| WhereIsMyDocstring | https://github.com/thofma/WhereIsMyDocstring.jl.git |
|
[
"MIT"
] | 0.2.0 | 4d7669dceb533e34d59bc57f6c13cd6b79f89a76 | code | 1252 | using Test, WhereIsMyDocstring
module TestDocstrings
"foo(::Number)"
foo(::Number) = nothing
"foo(::Float64)"
foo(::Float64) = nothing
"baz(::Number)"
baz(::Number)
"baz(::Float64)"
baz(::Float64)
"bla"
function baz(::T, ::S) where {S <: Integer, T <: S}
end
@doc (@doc baz(::Float64))
foobar(::Number) = nothing
"blub"
function fookw(x::Number, z::Number = 1; y::Number = 2)
end
"blub"
function foopa(x::Vector{S}, z::Matrix{T} = 1; y::Number = 2) where {S, T <: S}
end
end
D = @docmatch foo
@test sprint(show, D) isa String
@test length(D) == 0
D = @docmatch foo(::Number)
@test sprint(show, D) isa String
@test length(D) == 0
D = @docmatch foo TestDocstrings
@test sprint(show, D) isa String
@test length(D) == 2
D = @docmatch foo(::Number) TestDocstrings
@test sprint(show, D) isa String
@test length(D) == 1
D = @docmatch baz TestDocstrings
@test sprint(show, D) isa String
@test length(D) == 3
D = @docmatch foobar TestDocstrings
@test sprint(show, D) isa String
D = @docmatch length
@test sprint(show, D) isa String
D = @docmatch fookw TestDocstrings
@test sprint(show, D) isa String
D = @docmatch foopa TestDocstrings
@test sprint(show, D) isa String
| WhereIsMyDocstring | https://github.com/thofma/WhereIsMyDocstring.jl.git |
|
[
"MIT"
] | 0.2.0 | 4d7669dceb533e34d59bc57f6c13cd6b79f89a76 | docs | 3403 | # WhereIsMyDocstring.jl
---
*Dude, where is my docstring?*
---
- Have you ever wondered, which docstring is included in a ```` ```@docs``` ```` block when writing the documentation?
- Are you tired of finding the magic syntax to include the *right* docstring of a method?
Enter: WhereIsMyDocstring.jl
## Status
[![Build Status](https://github.com/thofma/WhereIsMyDocstring.jl/actions/workflows/CI.yml/badge.svg?branch=master)](https://github.com/thofma/WhereIsMyDocstring.jl/actions/workflows/CI.yml?query=branch%3Amaster)
[![Coverage](https://codecov.io/gh/thofma/WhereIsMyDocstring.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/thofma/WhereIsMyDocstring.jl)
[![Pkg Eval](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/W/WhereIsMyDocstring.svg)](https://juliaci.github.io/NanosoldierReports/pkgeval_badges/report.html)
## Installation
Since WhereIsMyDocstring.jl is a registered package, it can be simply installed as follows:
```
julia> using Pkg; Pkg.install("WhereIsMyDocstring")
```
## Usage
The package provides the `@docmatch` macro, which allows one to simulate the behaviour of ```` ```@docs``` ```` blocks interactively. This is helpful in case a function has many different methods and docstrings, and one wants to include a specific one. In particular in the presence of type parameters, this can be a frustrating experience due to https://github.com/JuliaLang/julia/issues/29437. Here is a simple example:
```
julia> using WhereIsMyDocstring
julia> @docmatch sin
2-element Vector{WhereIsMyDocstring.DocStr}:
Base.sin
Content:
sin(x) [...]
Signature type:
Tuple{Number}
Include in ```@docs``` block:
Base.sin(::Number)
Source:
math.jl:490
====================================================================================
Base.sin
Content:
sin(A::AbstractMatrix) [...]
Signature type:
Tuple{AbstractMatrix{<:Real}}
Include in ```@docs``` block:
Base.sin(::AbstractMatrix{<:Real})
Source:
/usr/share/julia/stdlib/v1.10/LinearAlgebra/src/dense.jl:956
====================================================================================
```
The macro returns the docstrings (including metadata). In view of ```` ```@docs ``` ```` blocks, the most imporant information is the "Include in ..." field. This provides the right invocation to include the specific docstring. For example, if we want to include the second docstring, in our documentation markdown source we would write:
````
```@docs
Base.sin(::AbstractMatrix{<:Real})
```
````
A more complicated example is:
````julia-repl
julia> "blub"
function foo(x::Vector{S}, z::Matrix{T} = 1; y::Number = 2) where {S, T <: S}
end
julia> @docmatch foo
1-element Vector{WhereIsMyDocstring.DocStr}:
foo
Content:
blub [...]
Signature type:
Union{Tuple{Vector{S}}, Tuple{T}, Tuple{S}, Tuple{Vector{S}, Matrix{T}}} where {S, T<:S}
Include in ```@docs``` block:
foo(::Union{Tuple{Vector{S}}, Tuple{T}, Tuple{S}, Tuple{Vector{S}, Matrix{T}}} where {S, T<:S})
try the following:
foo(::Array{S, 1}, ::Array{T, 2}) where {S, T<:S}
Source:
REPL[2]:1
````
Note that the type of the signature is garbled due to https://github.com/JuliaLang/julia/issues/29437. This also messes up the lookup. Here we are warned about this and a suggested way to fix it is provided via `foo(::Array{S, 1}, ::Array{T, 2}) where {S, T<:S}`.
| WhereIsMyDocstring | https://github.com/thofma/WhereIsMyDocstring.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1709 | using NiLangCore, BenchmarkTools
bg = BenchmarkGroup()
# pop!/push!
bg["NiLang"] = @benchmarkable begin
@instr PUSH!(x)
@instr POP!(x)
end seconds=1 setup=(x=3.0)
# @invcheckoff pop!/push!
bg["NiLang-@invcheckoff"] = @benchmarkable begin
@instr @invcheckoff PUSH!(x)
@instr @invcheckoff POP!(x)
end seconds=1 setup=(x=3.0)
# @invcheckoff pop!/push!
bg["NiLang-@invcheckoff-@inbounds"] = @benchmarkable begin
@instr @invcheckoff @inbounds PUSH!(x)
@instr @invcheckoff @inbounds POP!(x)
end seconds=1 setup=(x=3.0)
# Julia pop!/push!
bg["Julia"] = @benchmarkable begin
push!(stack, x)
x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=Float64[])
# FastStack-inbounds-Any
bg["FastStack-inbounds-Any"] = @benchmarkable begin
@inbounds push!(stack, x)
@inbounds pop!(stack)
end seconds=1 setup=(x=3.0; stack=FastStack(10))
# Julia pop!/push!
bg["Julia-Any"] = @benchmarkable begin
push!(stack, x)
x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=Any[])
# setindex
bg["setindex"] = @benchmarkable begin
stack[2] = x
x = 0.0
x = stack[2]
end seconds=1 setup=(x=3.0; stack=Float64[1.0, 2.0])
# setindex-inbounds
bg["setindex-inbounds"] = @benchmarkable begin
stack[2] = x
x = 0.0
x = stack[2]
end seconds=1 setup=(x=3.0; stack=Float64[1.0, 2.0])
# FastStack
bg["FastStack"] = @benchmarkable begin
push!(stack, x)
x = 0.0
x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=FastStack{Float64}(10))
# FastStack-inbounds
bg["FastStack-inbounds"] = @benchmarkable begin
@inbounds push!(stack, x)
x = 0.0
@inbounds x = pop!(stack)
end seconds=1 setup=(x=3.0; stack=FastStack{Float64}(10))
tune!(bg)
run(bg) | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 2297 | using Zygote
f(x, y) = (x+exp(y), y)
invf(x, y) = (x-exp(y), y)
# ∂L/∂x2 = ∂L/∂x1*∂x1/∂x2 + ∂L/∂y1*∂y1/∂y2 = ∂L/∂x1*invf'(x2) + ∂L/∂y1*invf'(y2)
x1, y1 = 1.4, 4.4
x2, y2 = f(x,y)
function gf(x, y, gx, gy)
x2, y2 = f(x, y)
invJ1 = gradient((x2, y2)->invf(x2, y2)[1], x2, y2)
invJ2 = gradient((x2, y2)->invf(x2, y2)[2], x2, y2)
return (x2, y2, gx, gy)
end
gradient((x, y)->invf(x, y)[1], x, y)
mutable struct A{T}
x::T
end
Base.:*(x1::A, x2::A) = A(x1.x*x2.x)
Base.:+(x1::A, x2::A) = A(x1.x+x2.x)
Base.zero(::A{T}) where T = A(T(0))
struct A2{T}
x::T
end
Base.:*(x1::A2, x2::A2) = A2(x1.x*x2.x)
Base.:+(x1::A2, x2::A2) = A2(x1.x+x2.x)
Base.zero(::A2{T}) where T = A2(T(0))
struct BG{T}
x::T
g::B{T}
BG(x::T) where T = new{T}(x)
end
struct BG{T}
x::T
g::BG{T}
BG(x::T) where T = new{T}(x)
end
mutable struct AG{T}
x::T
g::AG{T}
AG(x::T) where T = new{T}(x)
AG(x::T, g::TG) where {T,TG} = new{T}(x, T(g))
end
Base.:*(x1::AG, x2::AG) = AG(x1.x*x2.x)
Base.:+(x1::AG, x2::AG) = AG(x1.x+x2.x)
Base.zero(::AG{T}) where T = AG(T(0))
init(ag::AG{T}) where T = (ag.g = AG(T(0)))
using BenchmarkTools
ma = fill(A(1.0), 100,100)
ma2 = fill(A2(1.0), 100,100)
function f(ma, mb)
M, N, K = size(ma, 1), size(mb, 2), size(ma, 2)
res = fill(zero(ma[1]), M, N)
for i=1:M
for j=1:N
for k=1:K
@inbounds res[i,j] += ma[i,k]*mb[k,j]
end
end
end
return res
end
@benchmark f(ma, ma)
@benchmark f(ma2, ma2)
ma = fill(AG(1.0), 100,100)
@benchmark ma*ma
a = A(0.4)
ag = AG(0.4)
using NiLangCore
@benchmark isdefined($ag, :g)
@benchmark $ag + $ag
ag.g = AG(0.0)
@benchmark $a + $a
struct SG{T}
x::T
g::Ref{T}
SG(x::T) where T = new{T}(x)
end
Base.:*(x1::SG, x2::SG) = SG(x1.x*x2.x)
Base.:+(x1::SG, x2::SG) = SG(x1.x+x2.x)
Base.zero(::SG{T}) where T = SG(T(0))
init(ag::AG{T}) where T = (ag.g = AG(T(0)))
using BenchmarkTools
ma = fill(SG(1.0), 100,100)
@benchmark ma*ma
a = A(0.4)
ag = AG(0.4)
using NiLangCore
@benchmark isdefined($ag, :g)
@benchmark $ag + $ag
ag.g = AG(0.0)
@benchmark $a + $a
using NiLang, NiLang.AD
@i function test(x, one, N::Int)
for i = 1:N
x += one
end
end
invcheckon(true)
@benchmark test'(Loss(0.0), 1.0, 1000000)
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 382 | using Documenter, NiLangCore
makedocs(;
modules=[NiLangCore],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
],
repo="https://github.com/GiggleLiu/NiLangCore.jl/blob/{commit}{path}#L{line}",
sitename="NiLangCore.jl",
authors="JinGuo Liu, thautwarm",
assets=String[],
)
deploydocs(;
repo="github.com/GiggleLiu/NiLangCore.jl",
)
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 6174 | ############# function properties #############
export isreversible, isreflexive, isprimitive
export protectf
"""
isreversible(f, ARGT)
Return `true` if a function is reversible.
"""
isreversible(f, ::Type{ARGT}) where ARGT = hasmethod(~f, ARGT)
"""
isreflexive(f)
Return `true` if a function is self-inverse.
"""
isreflexive(f) = (~f) === f
"""
isprimitive(f)
Return `true` if `f` is an `instruction` that can not be decomposed anymore.
"""
isprimitive(f) = false
############# ancillas ################
export InvertibilityError, @invcheck
"""
deanc(a, b)
Deallocate varialbe `a` with value `b`. It will throw an error if
* `a` and `b` are objects with different types,
* `a` is not equal to `b` (for floating point numbers, an error within `NiLangCore.GLOBAL_ATOL[]` is allowed),
"""
function deanc end
function deanc(a::T, b::T) where T <: AbstractFloat
if a !== b && abs(b - a) > GLOBAL_ATOL[]
throw(InvertibilityError("deallocate fail (floating point numbers): $a ≂̸ $b"))
end
end
deanc(x::T, val::T) where T<:Tuple = deanc.(x, val)
deanc(x::T, val::T) where T<:AbstractArray = x === val || deanc.(x, val)
deanc(a::T, b::T) where T<:AbstractString = a === b || throw(InvertibilityError("deallocate fail (string): $a ≂̸ $b"))
function deanc(x::T, val::T) where T<:Dict
if x !== val
if length(x) != length(val)
throw(InvertibilityError("deallocate fail (dict): length of dict not the same, got $(length(x)) and $(length(val))!"))
else
for (k, v) in x
if haskey(val, k)
deanc(x[k], val[k])
else
throw(InvertibilityError("deallocate fail (dict): key $k of dict does not exist!"))
end
end
end
end
end
deanc(a, b) = throw(InvertibilityError("deallocate fail (type mismatch): `$(typeof(a))` and `$(typeof(b))`"))
@generated function deanc(a::T, b::T) where T
nf = fieldcount(a)
if isprimitivetype(T)
:(a === b || throw(InvertibilityError("deallocate fail (primitive): $a ≂̸ $b")))
else
Expr(:block, [:($deanc(a.$NAME, b.$NAME)) for NAME in fieldnames(T)]...)
end
end
"""
InvertibilityError <: Exception
InvertibilityError(ex)
The error for irreversible statements.
"""
struct InvertibilityError <: Exception
ex
end
"""
@invcheck x val
The macro version `NiLangCore.deanc`, with more informative error.
"""
macro invcheck(x, val)
esc(_invcheck(x, val))
end
# the expression for reversibility checking
function _invcheck(x, val)
Expr(:try, Expr(:block, :($deanc($x, $val))), :e, Expr(:block,
:(println("deallocate fail `$($(QuoteNode(x))) → $($(QuoteNode(val)))`")),
:(throw(e)))
)
end
_invcheck(docheck::Bool, arg, res) = docheck ? _invcheck(arg, res) : nothing
"""
chfield(x, field, val)
Change a `field` of an object `x`.
The `field` can be a `Val` type
```jldoctest; setup=:(using NiLangCore)
julia> chfield(1+2im, Val(:im), 5)
1 + 5im
```
or a function
```jldoctest; setup=:(using NiLangCore)
julia> using NiLangCore
julia> struct GVar{T, GT}
x::T
g::GT
end
julia> @fieldview xx(x::GVar) = x.x
julia> chfield(GVar(1.0, 0.0), xx, 2.0)
GVar{Float64, Float64}(2.0, 0.0)
```
"""
function chfield end
########### Inv ##########
export Inv, invtype
"""
Inv{FT} <: Function
Inv(f)
The inverse of a function.
"""
struct Inv{FT} <: Function
f::FT
end
Inv(f::Inv) = f.f
@static if VERSION >= v"1.6"
Base.:~(f::Base.ComposedFunction) = (~(f.inner)) ∘ (~(f.outer))
end
Base.:~(f::Function) = Inv(f)
Base.:~(::Type{Inv{T}}) where T = T # for type, it is a destructor
Base.:~(::Type{T}) where T = Inv{T} # for type, it is a destructor
Base.show(io::IO, b::Inv) = print(io, "~$(b.f)")
Base.display(bf::Inv) = print(bf)
"""
protectf(f)
Protect a function from being inverted, useful when using an callable object.
"""
protectf(x) = x
protectf(x::Inv) = x.f
invtype(::Type{T}) where T = Inv{<:T}
######### Infer
export PlusEq, MinusEq, XorEq, MulEq, DivEq
"""
PlusEq{FT} <: Function
PlusEq(f)
Called when executing `out += f(args...)` instruction. The following two statements are same
```jldoctest; setup=:(using NiLangCore)
julia> x, y, z = 0.0, 2.0, 3.0
(0.0, 2.0, 3.0)
julia> x, y, z = PlusEq(*)(x, y, z)
(6.0, 2.0, 3.0)
julia> x, y, z = 0.0, 2.0, 3.0
(0.0, 2.0, 3.0)
julia> @instr x += y*z
julia> x, y, z
(6.0, 2.0, 3.0)
```
"""
struct PlusEq{FT} <: Function
f::FT
end
"""
MinusEq{FT} <: Function
MinusEq(f)
Called when executing `out -= f(args...)` instruction. See `PlusEq` for detail.
"""
struct MinusEq{FT} <: Function
f::FT
end
"""
MulEq{FT} <: Function
MulEq(f)
Called when executing `out *= f(args...)` instruction. See `PlusEq` for detail.
"""
struct MulEq{FT} <: Function
f::FT
end
"""
DivEq{FT} <: Function
DivEq(f)
Called when executing `out /= f(args...)` instruction. See `PlusEq` for detail.
"""
struct DivEq{FT} <: Function
f::FT
end
"""
XorEq{FT} <: Function
XorEq(f)
Called when executing `out ⊻= f(args...)` instruction. See `PlusEq` for detail.
"""
struct XorEq{FT} <: Function
f::FT
end
const OPMX{FT} = Union{PlusEq{FT}, MinusEq{FT}, XorEq{FT}, MulEq{FT}, DivEq{FT}}
for (TP, OP) in [(:PlusEq, :+), (:MinusEq, :-), (:XorEq, :⊻)]
@eval (inf::$TP)(out!, args...; kwargs...) = $OP(out!, inf.f(args...; kwargs...)), args...
@eval (inf::$TP)(out!::Tuple, args...; kwargs...) = $OP.(out!, inf.f(args...; kwargs...)), args... # e.g. allow `(x, y) += sincos(a)`
end
Base.:~(op::PlusEq) = MinusEq(op.f)
Base.:~(om::MinusEq) = PlusEq(om.f)
Base.:~(op::MulEq) = DivEq(op.f)
Base.:~(om::DivEq) = MulEq(om.f)
Base.:~(om::XorEq) = om
for (T, S) in [(:PlusEq, "+="), (:MinusEq, "-="), (:MulEq, "*="), (:DivEq, "/="), (:XorEq, "⊻=")]
@eval Base.display(o::$T) = print($S, "(", o.f, ")")
@eval Base.display(o::Type{$T}) = print($S)
@eval Base.show_function(io::IO, o::$T, compact::Bool) = print(io, "$($S)($(o.f))")
@eval Base.show_function(io::IO, ::MIME"plain/text", o::$T, compact::Bool) = Base.show(io, o)
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 333 | module NiLangCore
using MLStyle
using TupleTools
include("lens.jl")
include("utils.jl")
include("symboltable.jl")
include("stack.jl")
include("Core.jl")
include("vars.jl")
include("instr.jl")
include("dualcode.jl")
include("preprocess.jl")
include("variable_analysis.jl")
include("compiler.jl")
include("checks.jl")
end # module
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1889 | export check_inv, world_similar, almost_same
@nospecialize
"""
check_inv(f, args; atol::Real=1e-8, verbose::Bool=false, kwargs...)
Return true if `f(args..., kwargs...)` is reversible.
"""
function check_inv(f, args; atol::Real=1e-8, verbose::Bool=false, kwargs...)
args0 = deepcopy(args)
args_ = f(args...; kwargs...)
args = length(args) == 1 ? (args_,) : args_
args_ = (~f)(args...; kwargs...)
args = length(args) == 1 ? (args_,) : args_
world_similar(args0, args, atol=atol, verbose=verbose)
end
function world_similar(a, b; atol::Real=1e-8, verbose::Bool=false)
for (xa, xb) in zip(a, b)
if !almost_same(xa, xb; atol=atol)
verbose && println("$xa does not match $xb")
return false
end
end
return true
end
@specialize
"""
almost_same(a, b; atol=GLOBAL_ATOL[], kwargs...) -> Bool
Return true if `a` and `b` are almost same w.r.t. `atol`.
"""
function almost_same(a::T, b::T; atol=GLOBAL_ATOL[], kwargs...) where T <: AbstractFloat
a === b || abs(b - a) < atol
end
function almost_same(a::TA, b::TB; kwargs...) where {TA, TB}
false
end
function almost_same(a::T, b::T; kwargs...) where {T<:Dict}
length(a) != length(b) && return false
for (k, v) in a
haskey(b, k) && almost_same(v, b[k]; kwargs...) || return false
end
return true
end
@generated function almost_same(a::T, b::T; kwargs...) where T
nf = fieldcount(a)
if isprimitivetype(T)
:(a === b)
else
quote
res = true
@nexprs $nf i-> res = res && almost_same(getfield(a, i), getfield(b, i); kwargs...)
res
end
end
end
almost_same(x::T, y::T; kwargs...) where T<:AbstractArray = all(almost_same.(x, y; kwargs...))
almost_same(x::FastStack, y::FastStack; kwargs...) = all(almost_same.(x.data[1:x.top[]], y.data[1:y.top[]]; kwargs...))
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 15496 | struct CompileInfo
invcheckon::Ref{Bool}
end
CompileInfo() = CompileInfo(Ref(true))
function compile_body(m::Module, body::AbstractVector, info)
out = []
for ex in body
ex_ = compile_ex(m, ex, info)
ex_ !== nothing && push!(out, ex_)
end
return out
end
deleteindex!(d::AbstractDict, index) = delete!(d, index)
@inline function map_func(x::Symbol)
if x == :+=
PlusEq, false
elseif x == :.+=
PlusEq, true
elseif x == :-=
MinusEq, false
elseif x == :.-=
MinusEq, true
elseif x == :*=
MulEq, false
elseif x == :.*=
MulEq, true
elseif x == :/=
DivEq, false
elseif x == :./=
DivEq, true
elseif x == :⊻=
XorEq, false
elseif x == :.⊻=
XorEq, true
else
error("`$x` can not be mapped to a reversible function.")
end
end
# e.g. map `x += sin(z)` => `PlusEq(sin)(x, z)`.
function to_standard_format(ex::Expr)
head::Symbol = ex.head
F, isbcast = map_func(ex.head)
a, b = ex.args
if !isbcast
@match b begin
:($f($(args...); $(kwargs...))) => :($F($f)($a, $(args...); $(kwargs...)))
:($f($(args...))) => :($F($f)($a, $(args...)))
:($x || $y) => :($F($logical_or)($a, $x, $y))
:($x && $y) => :($F($logical_and)($a, $x, $y))
_ => :($F(identity)($a, $b))
end
else
@match b begin
:($f.($(args...); $(kwargs...))) => :($F($f).($a, $(args...); $(kwargs...)))
:($f.($(args...))) => :($F($f).($a, $(args...)))
:($f($(args...); $(kwargs...))) => :($F($(removedot(f))).($a, $(args...); $(kwargs...)))
:($f($(args...))) => :($F($(removedot(f))).($a, $(args...)))
_ => :($F(identity).($a, $b))
end
end
end
logical_or(a, b) = a || b
logical_and(a, b) = a && b
"""
compile_ex(m::Module, ex, info)
Compile a NiLang statement to a regular julia statement.
"""
function compile_ex(m::Module, ex, info)
@match ex begin
:($a += $b) || :($a .+= $b) ||
:($a -= $b) || :($a .-= $b) ||
:($a *= $b) || :($a .*= $b) ||
:($a /= $b) || :($a ./= $b) ||
:($a ⊻= $b) || :($a .⊻= $b) => compile_ex(m, to_standard_format(ex), info)
:(($t1=>$t2)($x)) => assign_ex(x, :(convert($t2, $x)), info.invcheckon[])
:(($t1=>$t2).($x)) => assign_ex(x, :(convert.($t2, $x)), info.invcheckon[])
# multi args expanded in preprocessing
# general
:($x ↔ $y) => begin
e1 = isemptyvar(x)
e2 = isemptyvar(y)
if e1 && e2
nothing
elseif e1 && !e2
_push_value(x, _pop_value(y), info.invcheckon[])
elseif !e1 && e2
_push_value(y, _pop_value(x), info.invcheckon[])
else
tmp = gensym("temp")
Expr(:block, :($tmp = $y), assign_ex(y, x, info.invcheckon[]), assign_ex(x, tmp, info.invcheckon[]))
end
end
# stack
:($s[end] → $x) => begin
if info.invcheckon[]
y = gensym("result")
Expr(:block, :($y=$loaddata($x, $pop!($s))), _invcheck(y, x), assign_ex(x, y, info.invcheckon[]))
else
y = gensym("result")
Expr(:block, :($y=$loaddata($x, $pop!($s))), assign_ex(x, y, info.invcheckon[]))
end
end
:($s[end+1] ← $x) => :($push!($s, $_copy($x)))
# dict
:($x[$index] ← $tp) => begin
assign_expr = :($x[$index] = $tp)
if info.invcheckon[]
Expr(:block, _assert_nokey(x, index), assign_expr)
else
assign_expr
end
end
:($x[$index] → $tp) => begin
delete_expr = :($(deleteindex!)($x, $index))
if info.invcheckon[]
Expr(:block, _invcheck(:($x[$index]), tp), delete_expr)
else
delete_expr
end
end
# general
:($x ← $tp) => :($x = $tp)
:($x → $tp) => begin
if info.invcheckon[]
_invcheck(x, tp)
end
end
:($f($(args...))) => begin
assignback_ex(ex, info.invcheckon[])
end
:($f.($(allargs...))) => begin
args, kwargs = seperate_kwargs(allargs)
symres = gensym("results")
ex = :($symres = $unzipped_broadcast($kwargs, $f, $(args...)))
Expr(:block, ex, assign_vars(args, symres, info.invcheckon[]).args...)
end
Expr(:if, _...) => compile_if(m, copy(ex), info)
:(while ($pre, $post); $(body...); end) => begin
whilestatement(pre, post, compile_body(m, body, info), info)
end
:(for $i=$range; $(body...); end) => begin
forstatement(i, range, compile_body(m, body, info), info, nothing)
end
:(@simd $line for $i=$range; $(body...); end) => begin
forstatement(i, range, compile_body(m, body, info), info, Symbol("@simd")=>line)
end
:(@threads $line for $i=$range; $(body...); end) => begin
forstatement(i, range, compile_body(m, body, info), info, Symbol("@threads")=>line)
end
:(@avx $line for $i=$range; $(body...); end) => begin
forstatement(i, range, compile_body(m, body, info), info, Symbol("@avx")=>line)
end
:(begin $(body...) end) => begin
Expr(:block, compile_body(m, body, info)...)
end
:(@safe $line $subex) => subex
:(@inbounds $line $subex) => Expr(:macrocall, Symbol("@inbounds"), line, compile_ex(m, subex, info))
:(@invcheckoff $line $subex) => begin
state = info.invcheckon[]
info.invcheckon[] = false
ex = compile_ex(m, subex, info)
info.invcheckon[] = state
ex
end
:(@cuda $line $(args...)) => begin
fcall = @match args[end] begin
:($f($(args...))) => Expr(:call,
Expr(:->,
:(args...),
Expr(:block,
:($f(args...)),
nothing
)
),
args...
)
_ => error("expect a function after @cuda, got $(args[end])")
end
Expr(:macrocall, Symbol("@cuda"), line, args[1:end-1]..., fcall)
end
:(@launchkernel $line $device $thread $ndrange $f($(args...))) => begin
res = gensym("results")
Expr(:block,
:($res = $f($device, $thread)($(args...); ndrange=$ndrange)),
:(wait($res))
)
end
:(nothing) => ex
::Nothing => ex
::LineNumberNode => ex
_ => error("statement not supported: `$ex`")
end
end
function compile_if(m::Module, ex, info)
pres = []
posts = []
ex = analyse_if(m, ex, info, pres, posts)
Expr(:block, pres..., ex, posts...)
end
function analyse_if(m::Module, ex, info, pres, posts)
var = gensym("branch")
if ex.head == :if
pre, post = ex.args[1].args
ex.args[1] = var
elseif ex.head == :elseif
pre, post = ex.args[1].args[2].args
ex.args[1].args[2] = var
end
push!(pres, :($var = $pre))
if info.invcheckon[]
push!(posts, _invcheck(var, post))
end
ex.args[2] = Expr(:block, compile_body(m, ex.args[2].args, info)...)
if length(ex.args) == 3
if ex.args[3].head == :elseif
ex.args[3] = analyse_if(m, ex.args[3], info, pres, posts)
elseif ex.args[3].head == :block
ex.args[3] = Expr(:block, compile_body(m, ex.args[3].args, info)...)
end
end
ex
end
function whilestatement(precond, postcond, body, info)
ex = Expr(:block,
Expr(:while,
precond,
Expr(:block, body...),
),
)
if info.invcheckon[]
pushfirst!(ex.args, _invcheck(postcond, false))
push!(ex.args[end].args[end].args,
_invcheck(postcond, true)
)
end
ex
end
function forstatement(i, range, body, info, mcr)
assigns, checkers = compile_range(range)
exf = Expr(:for, :($i=$range), Expr(:block, body...))
if !(mcr isa Nothing)
exf = Expr(:macrocall, mcr.first, mcr.second, exf)
end
if info.invcheckon[]
Expr(:block, assigns..., exf, checkers...)
else
exf
end
end
_pop_value(x) = @match x begin
:($s[end]) => :($pop!($s))
:($s[$ind]) => :($pop!($s, $ind)) # dict (notice pop over vector elements is not allowed.)
:($x::$T) => :($(_pop_value(x))::$T)
:(($(args...)),) => Expr(:tuple, _pop_value.(args)...)
_ => x
end
_push_value(x, val, invcheck) = @match x begin
:($s[end+1]) => :($push!($s, $val))
:($s[$arg]::∅) => begin
ex = :($s[$arg] = $val)
if invcheck
Expr(:block, _assert_nokey(s, arg), ex)
else
ex
end
end
_ => assign_ex(x, val, invcheck)
end
function _assert_nokey(x, index)
str = "dictionary `$x` already has key `$index`"
Expr(:if, :(haskey($x, $index)), :(throw(InvertibilityError($str))))
end
_copy(x) = copy(x)
_copy(x::Tuple) = copy.(x)
export @code_julia
"""
@code_julia ex
Get the interpreted expression of `ex`.
```julia
julia> @code_julia x += exp(3.0)
quote
var"##results#267" = ((PlusEq)(exp))(x, 3.0)
x = var"##results#267"[1]
try
(NiLangCore.deanc)(3.0, var"##results#267"[2])
catch e
@warn "deallocate fail: `3.0 → var\"##results#267\"[2]`"
throw(e)
end
end
julia> @code_julia @invcheckoff x += exp(3.0)
quote
var"##results#257" = ((PlusEq)(exp))(x, 3.0)
x = var"##results#257"[1]
end
```
"""
macro code_julia(ex)
QuoteNode(compile_ex(__module__, ex, CompileInfo()))
end
compile_ex(m::Module, ex) = compile_ex(m, ex, CompileInfo())
export @i
"""
@i function fname(args..., kwargs...) ... end
@i struct sname ... end
Define a reversible function/type.
```jldoctest; setup=:(using NiLangCore)
julia> @i function test(out!, x)
out! += identity(x)
end
julia> test(0.2, 0.8)
(1.0, 0.8)
```
See `test/compiler.jl` for more examples.
"""
macro i(ex)
ex = gen_ifunc(__module__, ex)
ex.args[1] = :(Base.@__doc__ $(ex.args[1]))
esc(ex)
end
# generate the reversed function
function gen_ifunc(m::Module, ex)
mc, fname, args, ts, body = precom(m, ex)
fname = _replace_opmx(fname)
# implementations
ftype = get_ftype(fname)
head = :($fname($(args...)) where {$(ts...)})
dfname = dual_fname(fname)
dftype = get_ftype(dfname)
fdef1 = Expr(:function, head, Expr(:block, compile_body(m, body, CompileInfo())..., functionfoot(args)))
dualhead = :($dfname($(args...)) where {$(ts...)})
fdef2 = Expr(:function, dualhead, Expr(:block, compile_body(m, dual_body(m, body), CompileInfo())..., functionfoot(args)))
if mc !== nothing
fdef1 = Expr(:macrocall, mc[1], mc[2], fdef1)
fdef2 = Expr(:macrocall, mc[1], mc[2], fdef2)
end
#ex = :(Base.@__doc__ $fdef1; if $ftype != $dftype; $fdef2; end)
ex = Expr(:block, fdef1,
Expr(:if, :($ftype != $dftype), fdef2),
)
end
export nilang_ir
"""
nilang_ir(ex; reversed::Bool=false)
Get the NiLang reversible IR from the function expression `ex`,
return the reversed function if `reversed` is `true`.
This IR is not directly executable on Julia, please use
`macroexpand(Main, :(@i function .... end))` to get the julia expression of a reversible function.
```jldoctest; setup=:(using NiLangCore)
julia> ex = :(@inline function f(x!::T, y) where T
@routine begin
anc ← zero(T)
anc += identity(x!)
end
x! += y * anc
~@routine
end);
julia> NiLangCore.nilang_ir(Main, ex) |> NiLangCore.rmlines
:(@inline function f(x!::T, y) where T
begin
anc ← zero(T)
anc += identity(x!)
end
x! += y * anc
begin
anc -= identity(x!)
anc → zero(T)
end
end)
julia> NiLangCore.nilang_ir(Main, ex; reversed=true) |> NiLangCore.rmlines
:(@inline function (~f)(x!::T, y) where T
begin
anc ← zero(T)
anc += identity(x!)
end
x! -= y * anc
begin
anc -= identity(x!)
anc → zero(T)
end
end)
```
"""
function nilang_ir(m::Module, ex; reversed::Bool=false)
mc, fname, args, ts, body = precom(m, ex)
fname = _replace_opmx(fname)
# implementations
if reversed
dfname = :(~$fname) # use fake head for readability
head = :($dfname($(args...)) where {$(ts...)})
body = dual_body(m, body)
else
head = :($fname($(args...)) where {$(ts...)})
end
fdef = Expr(:function, head, Expr(:block, body...))
if mc !== nothing
fdef = Expr(:macrocall, mc[1], mc[2], fdef)
end
fdef
end
# seperate and return `args` and `kwargs`
@inline function seperate_kwargs(args)
if length(args) > 0 && args[1] isa Expr && args[1].head == :parameters
args = args[2:end], args[1]
else
args, Expr(:parameters)
end
end
# add a `return` statement to the end of the function body.
function functionfoot(args)
args = get_argname.(seperate_kwargs(args)[1])
if length(args) == 1
if args[1] isa Expr && args[1].head == :(...)
args[1].args[1]
else
args[1]
end
else
:(($(args...),))
end
end
# to provide the eye candy for defining `x += f(args...)` like functions
_replace_opmx(ex) = @match ex begin
:(:+=($f)) => :($(gensym())::PlusEq{typeof($f)})
:(:-=($f)) => :($(gensym())::MinusEq{typeof($f)})
:(:*=($f)) => :($(gensym())::MulEq{typeof($f)})
:(:/=($f)) => :($(gensym())::DivEq{typeof($f)})
:(:⊻=($f)) => :($(gensym())::XorEq{typeof($f)})
_ => ex
end
export @instr
"""
@instr ex
Execute a reversible instruction.
"""
macro instr(ex)
ex = precom_ex(__module__, ex, NiLangCore.PreInfo())
#variable_analysis_ex(ex, SymbolTable())
esc(Expr(:block, NiLangCore.compile_ex(__module__, ex, CompileInfo()), nothing))
end
# the range of for statement
compile_range(range) = @match range begin
:($start:$step:$stop) => begin
start_, step_, stop_ = gensym("start"), gensym("step"), gensym("stop")
Any[:($start_ = $start),
:($step_ = $step),
:($stop_ = $stop)],
Any[_invcheck(start_, start),
_invcheck(step_, step),
_invcheck(stop_, stop)]
end
:($start:$stop) => begin
start_, stop_ = gensym("start"), gensym("stop")
Any[:($start_ = $start),
:($stop_ = $stop)],
Any[_invcheck(start_, start),
_invcheck(stop_, stop)]
end
:($list) => begin
list_ = gensym("iterable")
Any[:($list_ = deepcopy($list))],
Any[_invcheck(list_, list)]
end
end
"""
get_ftype(fname)
Return the function type, e.g.
* `obj::ABC` => `ABC`
* `f` => `typeof(f)`
"""
function get_ftype(fname)
@match fname begin
:($x::$tp) => tp
_ => :($NiLangCore._typeof($fname))
end
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 5191 | # get the expression of the inverse function
function dual_func(m::Module, fname, args, ts, body)
:(function $(:(~$fname))($(args...)) where {$(ts...)};
$(dual_body(m, body)...);
end)
end
# get the function name of the inverse function
function dual_fname(op)
@match op begin
:($x::$tp) => :($x::$invtype($tp))
:(~$x) => x
_ => :($(gensym("~$op"))::$_typeof(~$op))
end
end
_typeof(x) = typeof(x)
_typeof(x::Type{T}) where T = Type{T}
"""
dual_ex(m::Module, ex)
Get the dual expression of `ex`.
"""
function dual_ex(m::Module, ex)
@match ex begin
:(($t1=>$t2)($x)) => :(($t2=>$t1)($x))
:(($t1=>$t2).($x)) => :(($t2=>$t1).($x))
:($x ↔ $y) => dual_swap(x, y)
:($s[end+1] ← $x) => :($s[end] → $x)
:($s[end] → $x) => :($s[end+1] ← $x)
:($x → $val) => :($x ← $val)
:($x ← $val) => :($x → $val)
:($f($(args...))) => startwithdot(f) ? :($(getdual(removedot(sym))).($(args...))) : :($(getdual(f))($(args...)))
:($f.($(args...))) => :($(getdual(f)).($(args...)))
:($a += $b) => :($a -= $b)
:($a .+= $b) => :($a .-= $b)
:($a -= $b) => :($a += $b)
:($a .-= $b) => :($a .+= $b)
:($a *= $b) => :($a /= $b)
:($a .*= $b) => :($a ./= $b)
:($a /= $b) => :($a *= $b)
:($a ./= $b) => :($a .*= $b)
:($a ⊻= $b) => :($a ⊻= $b)
:($a .⊻= $b) => :($a .⊻= $b)
Expr(:if, _...) => dual_if(m, copy(ex))
:(while ($pre, $post); $(body...); end) => begin
Expr(:while, :(($post, $pre)), Expr(:block, dual_body(m, body)...))
end
:(for $i=$start:$step:$stop; $(body...); end) => begin
Expr(:for, :($i=$stop:(-$step):$start), Expr(:block, dual_body(m, body)...))
end
:(for $i=$start:$stop; $(body...); end) => begin
j = gensym("j")
Expr(:for, :($j=$start:$stop), Expr(:block, :($i ← $stop-$j+$start), dual_body(m, body)..., :($i → $stop-$j+$start)))
end
:(for $i=$itr; $(body...); end) => begin
Expr(:for, :($i=Base.Iterators.reverse($itr)), Expr(:block, dual_body(m, body)...))
end
:(@safe $line $subex) => Expr(:macrocall, Symbol("@safe"), line, subex)
:(@cuda $line $(args...)) => Expr(:macrocall, Symbol("@cuda"), line, args[1:end-1]..., dual_ex(m, args[end]))
:(@launchkernel $line $(args...)) => Expr(:macrocall, Symbol("@launchkernel"), line, args[1:end-1]..., dual_ex(m, args[end]))
:(@inbounds $line $subex) => Expr(:macrocall, Symbol("@inbounds"), line, dual_ex(m, subex))
:(@simd $line $subex) => Expr(:macrocall, Symbol("@simd"), line, dual_ex(m, subex))
:(@threads $line $subex) => Expr(:macrocall, Symbol("@threads"), line, dual_ex(m, subex))
:(@avx $line $subex) => Expr(:macrocall, Symbol("@avx"), line, dual_ex(m, subex))
:(@invcheckoff $line $subex) => Expr(:macrocall, Symbol("@invcheckoff"), line, dual_ex(m, subex))
:(begin $(body...) end) => Expr(:block, dual_body(m, body)...)
:(nothing) => ex
::LineNumberNode => ex
::Nothing => ex
:() => ex
_ => error("can not invert target expression $ex")
end
end
function dual_if(m::Module, ex)
_dual_cond(cond) = @match cond begin
:(($pre, $post)) => :(($post, $pre))
end
if ex.head == :if
ex.args[1] = _dual_cond(ex.args[1])
elseif ex.head == :elseif
ex.args[1].args[2] = _dual_cond(ex.args[1].args[2])
end
ex.args[2] = Expr(:block, dual_body(m, ex.args[2].args)...)
if length(ex.args) == 3
if ex.args[3].head == :elseif
ex.args[3] = dual_if(m, ex.args[3])
elseif ex.args[3].head == :block
ex.args[3] = Expr(:block, dual_body(m, ex.args[3].args)...)
end
end
ex
end
function dual_swap(x, y)
e1 = isemptyvar(x)
e2 = isemptyvar(y)
if e1 && !e2 || !e1 && e2
:($(_dual_swap_var(x)) ↔ $(_dual_swap_var(y)))
else
:($y ↔ $x)
end
end
_dual_swap_var(x) = @match x begin
:($s[end+1]) => :($s[end])
:($x::∅) => :($x)
:($s[end]) => :($s[end+1])
_ => :($x::∅)
end
export @code_reverse
"""
@code_reverse ex
Get the reversed expression of `ex`.
```jldoctest; setup=:(using NiLangCore)
julia> @code_reverse x += exp(3.0)
:(x -= exp(3.0))
```
"""
macro code_reverse(ex)
QuoteNode(dual_ex(__module__, ex))
end
getdual(f) = @match f begin
:(~$f) => f
_ => :(~$f)
end
function dual_body(m::Module, body)
out = []
# fix function LineNumberNode
if length(body) > 1 && body[1] isa LineNumberNode && body[2] isa LineNumberNode
push!(out, body[1])
start = 2
else
start = 1
end
ptr = length(body)
# reverse the statements
len = 0
while ptr >= start
if ptr-len==0 || body[ptr-len] isa LineNumberNode
ptr-len != 0 && push!(out, body[ptr-len])
for j=ptr:-1:ptr-len+1
push!(out, dual_ex(m, body[j]))
end
ptr -= len+1
len = 0
else
len += 1
end
end
return out
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 5453 | export @dual, @selfdual, @dualtype
"""
@dual f invf
Define `f` and `invf` as a pair of dual instructions, i.e. reverse to each other.
"""
macro dual(f, invf)
esc(quote
if !$NiLangCore.isprimitive($f)
$NiLangCore.isprimitive(::typeof($f)) = true
end
if !$NiLangCore.isprimitive($invf)
$NiLangCore.isprimitive(::typeof($invf)) = true
end
if Base.:~($f) !== $invf
Base.:~(::typeof($f)) = $invf;
end
if Base.:~($invf) !== $f
Base.:~(::typeof($invf)) = $f;
end
end)
end
macro dualtype(t, invt)
esc(quote
$invtype($t) === $invt || begin
$NiLangCore.invtype(::Type{$t}) = $invt
$NiLangCore.invtype(::Type{T}) where T<:$t = $invt{T.parameters...}
end
$invtype($invt) === $t || begin
$NiLangCore.invtype(::Type{$invt}) = $t
$NiLangCore.invtype(::Type{T}) where T<:$invt = $t{T.parameters...}
end
end)
end
@dualtype PlusEq MinusEq
@dualtype DivEq MulEq
@dualtype XorEq XorEq
"""
@selfdual f
Define `f` as a self-dual instructions.
"""
macro selfdual(f)
esc(:(@dual $f $f))
end
export @const
@eval macro $(:const)(ex)
esc(ex)
end
export @skip!
macro skip!(ex)
esc(ex)
end
export @assignback
# TODO: include control flows.
"""
@assignback f(args...) [invcheck]
Assign input variables with output values: `args... = f(args...)`, turn off invertibility error check if the second argument is false.
"""
macro assignback(ex, invcheck=true)
ex = precom_ex(__module__, ex, PreInfo())
esc(assignback_ex(ex, invcheck))
end
function assignback_ex(ex::Expr, invcheck::Bool)
@match ex begin
:($f($(args...))) => begin
symres = gensym("results")
ex = :($symres = $f($(args...)))
res = assign_vars(seperate_kwargs(args)[1], symres, invcheck)
pushfirst!(res.args, ex)
return res
end
_ => error("assign back fail, got $ex")
end
end
"""
assign_vars(args, symres, invcheck)
Get the expression of assigning `symres` to `args`.
"""
function assign_vars(args, symres, invcheck)
exprs = []
for (i,arg) in enumerate(args)
exi = @match arg begin
:($ag...) => begin
i!=length(args) && error("`args...` like arguments should only appear as the last argument!")
ex = :(ntuple(j->$symres[j+$(i-1)], length($ag)))
assign_ex(ag, i==1 ? :(length($ag) == 1 ? ($symres,) : $ex) : ex, invcheck)
end
_ => if length(args) == 1
assign_ex(arg, symres, invcheck)
else
assign_ex(arg, :($symres[$i]), invcheck)
end
end
exi !== nothing && push!(exprs, exi)
end
Expr(:block, exprs...)
end
error_message_fcall(arg) = """
function arguments should not contain function calls on variables, got `$arg`, try to decompose it into elementary statements, e.g. statement `z += f(g(x))` should be written as
y += g(x)
z += y
If `g` is a dataview (a function map an object to its field or a bijective function), one can also use the pipline like
z += f(x |> g)
"""
assign_ex(arg, res, invcheck) = @match arg begin
::Number || ::String => _invcheck(invcheck, arg, res)
::Symbol || ::GlobalRef => _isconst(arg) ? _invcheck(invcheck, arg, res) : :($arg = $res)
:(@skip! $line $x) => nothing
:(@fields $line $x) => assign_ex(x, Expr(:call, default_constructor, :(typeof($x)), Expr(:..., res)), invcheck)
:($x::∅) => assign_ex(x, res, invcheck)
:($x::$T) => assign_ex(x, :($loaddata($T, $res)), invcheck)
:($x.$k) => _isconst(x) ? _invcheck(invcheck, arg, res) : assign_ex(x, :(chfield($x, $(Val(k)), $res)), invcheck)
# tuples must be index through (x |> 1)
:($a |> tget($x)) => assign_ex(a, :($(TupleTools.insertat)($a, $x, ($res,))), invcheck)
:($a |> subarray($(ranges...))) => :(($res===view($a, $(ranges...))) || (view($a, $(ranges...)) .= $res))
:($x |> $f) => _isconst(x) ? _invcheck(invcheck, arg,res) : assign_ex(x, :(chfield($x, $f, $res)), invcheck)
:($x .|> $f) => _isconst(x) ? _invcheck(invcheck, arg,res) : assign_ex(x, :(chfield.($x, Ref($f), $res)), invcheck)
:($x') => _isconst(x) ? _invcheck(invcheck, arg, res) : assign_ex(x, :(chfield($x, adjoint, $res)), invcheck)
:(-$x) => _isconst(x) ? _invcheck(invcheck, arg,res) : assign_ex(x, :(chfield($x, -, $res)), invcheck)
:($t{$(p...)}($(args...))) => begin
if length(args) == 1
assign_ex(args[1], :($getfield($res, 1)), invcheck)
else
assign_vars(args, :($type2tuple($res)), invcheck)
end
end
:($f($(args...))) => all(_isconst, args) || error(error_message_fcall(arg))
:($f.($(args...))) => all(_isconst, args) || error(error_message_fcall(arg))
:($a[$(x...)]) => begin
:($a[$(x...)] = $res)
end
:(($(args...),)) => begin
# TODO: avoid possible repeated evaluation (not here, in swap)
Expr(:block, [assign_ex(args[i], :($res[$i]), invcheck) for i=1:length(args)]...)
end
_ => _invcheck(invcheck, arg, res)
end
export @assign
"""
@assign a b [invcheck]
Perform the assign `a = b` in a reversible program.
Turn off invertibility check if the `invcheck` is false.
"""
macro assign(a, b, invcheck=true)
esc(assign_ex(a, b, invcheck))
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 3183 | export _zero, @fields
# update a field of a struct.
@inline @generated function field_update(main :: T, ::Val{Field}, value) where {T, Field}
fields = fieldnames(T)
Expr(:new, T, Any[field !== Field ? :(main.$field) : :value for field in fields]...)
end
# the default constructor of a struct
@inline @generated function default_constructor(::Type{T}, fields::Vararg{Any,N}) where {T,N}
Expr(:new, T, Any[:(fields[$i]) for i=1:N]...)
end
"""
_zero(T)
_zero(x::T)
Create a `zero` of type `T` by recursively applying `zero` to its fields.
"""
@inline @generated function _zero(::Type{T}) where {T}
Expr(:new, T, Any[:(_zero($field)) for field in T.types]...)
end
@inline @generated function _zero(x::T) where {T}
Expr(:new, T, Any[:(_zero(x.$field)) for field in fieldnames(T)]...)
end
function lens_compile(ex, cache, value)
@match ex begin
:($a.$b.$c = $d) => begin
updated =
Expr(:let,
Expr(:block, :($cache = $cache.$b), :($value = $d)),
:($field_update($cache, $(Val(c)), $value)))
lens_compile(:($a.$b = $updated), cache, value)
end
:($a.$b = $c) => begin
Expr(:let,
Expr(:block, :($cache = $a), :($value=$c)),
:($field_update($cache, $(Val(b)), $value)))
end
_ => error("Malformed update notation $ex, expect the form like 'a.b = c'.")
end
end
function with(ex)
cache = gensym("cache")
value = gensym("value")
lens_compile(ex, cache, value)
end
"""
e.g. `@with x.y = val` will return a new object similar to `x`, with the `y` field changed to `val`.
"""
macro with(ex)
with(ex) |> esc
end
@inline @generated function _zero(::Type{T}) where {T<:Tuple}
Expr(:tuple, Any[:(_zero($field)) for field in T.types]...)
end
_zero(::Type{T}) where T<:Real = zero(T)
_zero(::Type{String}) = ""
_zero(::Type{Symbol}) = Symbol("")
_zero(::Type{Char}) = '\0'
_zero(::Type{T}) where {ET,N,T<:AbstractArray{ET,N}} = reshape(ET[], ntuple(x->0, N))
_zero(::Type{T}) where {A,B,T<:Dict{A,B}} = Dict{A,B}()
#_zero(x::T) where T = _zero(T) # not adding this line!
_zero(x::T) where T<:Real = zero(x)
_zero(::String) = ""
_zero(::Symbol) = Symbol("")
_zero(::Char) = '\0'
_zero(x::T) where T<:AbstractArray = zero(x)
function _zero(d::T) where {A,B,T<:Dict{A,B}}
Dict{A,B}([x=>_zero(y) for (x,y) in d])
end
@static if VERSION > v"1.6.100"
@generated function chfield(x, ::Val{FIELD}, xval) where FIELD
if ismutabletype(x)
Expr(:block, :(x.$FIELD = xval), :x)
else
:(@with x.$FIELD = xval)
end
end
else
@generated function chfield(x, ::Val{FIELD}, xval) where FIELD
if x.mutable
Expr(:block, :(x.$FIELD = xval), :x)
else
:(@with x.$FIELD = xval)
end
end
end
@generated function chfield(x, f::Function, xval)
Expr(:block, _invcheck(:(f(x)), :xval), :x)
end
# convert field of an object to a tuple
@generated function type2tuple(x::T) where T
Expr(:tuple, [:(x.$v) for v in fieldnames(T)]...)
end
macro fields(ex)
esc(:($type2tuple($ex)))
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 7899 | export precom
# precompiling information
struct PreInfo
routines::Vector{Any}
end
PreInfo() = PreInfo([])
"""
precom(module, ex)
Precompile a function, returns a tuple of (macros, function name, arguments, type parameters, function body).
"""
function precom(m::Module, ex)
mc, fname, args, ts, body = match_function(ex)
vars = Symbol[]
newargs = map(args) do arg
@match arg begin
:(::$tp)=>Expr(:(::), gensym(), tp)
_ => arg
end
end
for arg in newargs
pushvar!(vars, arg)
end
info = PreInfo()
body_out = precom_body(m, body, info)
if !isempty(info.routines)
error("`@routine` and `~@routine` must appear in pairs, mising `~@routine`!")
end
st = SymbolTable(vars, Symbol[], Symbol[])
st_after = copy(st)
variable_analysis_ex.(body_out, Ref(st_after))
checksyms(st_after, st)
mc, fname, newargs, ts, body_out
end
function precom_body(m::Module, body::AbstractVector, info)
Any[precom_ex(m, ex, info) for ex in body]
end
# precompile `+=`, `-=`, `*=` and `/=`
function precom_opm(f, out, arg2)
if f in [:(+=), :(-=), :(*=), :(/=)]
@match arg2 begin
:($x |> $view) => Expr(f, out, :(identity($arg2)))
:($subf($(subargs...))) => Expr(f, out, arg2)
_ => Expr(f, out, :(identity($arg2)))
end
elseif f in [:(.+=), :(.-=), :(.*=), :(./=)]
@match arg2 begin
:($x |> $view) || :($x .|> $view) => Expr(f, out, :(identity.($arg2)))
:($subf.($(subargs...))) => Expr(f, out, arg2)
:($subf($(subargs...))) => Expr(f, out, arg2)
_ => Expr(f, out, :(identity.($arg2)))
end
end
end
# precompile `⊻=`
function precom_ox(f, out, arg2)
if f == :(⊻=)
@match arg2 begin
:($x |> $view) => Expr(f, out, :(identity($arg2)))
:($subf($(subargs...))) ||
:($a || $b) || :($a && $b) => Expr(f, out, arg2)
_ => Expr(f, out, :(identity($arg2)))
end
elseif f == :(.⊻=)
@match arg2 begin
:($x |> $view) || :($x .|> $view) => Expr(f, out, :(identity.($arg2)))
:($subf.($(subargs...))) => Expr(f, out, arg2)
:($subf($(subargs...))) => Expr(f, out, arg2)
_ => Expr(f, out, :(identity.($arg2)))
end
end
end
"""
precom_ex(module, ex, info)
Precompile a single statement `ex`, where `info` is a `PreInfo` instance.
"""
function precom_ex(m::Module, ex, info)
@match ex begin
:($x ← $val) || :($x → $val) => ex
:($x ↔ $y) => ex
:($(xs...), $y ← $val) => precom_ex(m, :(($(xs...), $y) ← $val), info)
:($(xs...), $y → $val) => precom_ex(m, :(($(xs...), $y) → $val), info)
:($a += $b) => precom_opm(:+=, a, b)
:($a -= $b) => precom_opm(:-=, a, b)
:($a *= $b) => precom_opm(:*=, a, b)
:($a /= $b) => precom_opm(:/=, a, b)
:($a ⊻= $b) => precom_ox(:⊻=, a, b)
:($a .+= $b) => precom_opm(:.+=, a, b)
:($a .-= $b) => precom_opm(:.-=, a, b)
:($a .*= $b) => precom_opm(:.*=, a, b)
:($a ./= $b) => precom_opm(:./=, a, b)
:($a .⊻= $b) => precom_ox(:.⊻=, a, b)
Expr(:if, _...) => precom_if(m, copy(ex), info)
:(while ($pre, $post); $(body...); end) => begin
post = post == :~ ? pre : post
info = PreInfo()
Expr(:while, :(($pre, $post)), Expr(:block, precom_body(m, body, info)...))
end
:(@from $line $post while $pre; $(body...); end) => precom_ex(m, Expr(:while, :(($pre, !$post)), ex.args[4].args[2]), info)
:(begin $(body...) end) => begin
Expr(:block, precom_body(m, body, info)...)
end
# TODO: allow ommit step.
:(for $i=$range; $(body...); end) ||
:(for $i in $range; $(body...); end) => begin
info = PreInfo()
Expr(:for, :($i=$(precom_range(range))), Expr(:block, precom_body(m, body, info)...))
end
:(@safe $line $subex) => ex
:(@cuda $line $(args...)) => ex
:(@launchkernel $line $(args...)) => ex
:(@inbounds $line $subex) => Expr(:macrocall, Symbol("@inbounds"), line, precom_ex(m, subex, info))
:(@simd $line $subex) => Expr(:macrocall, Symbol("@simd"), line, precom_ex(m, subex, info))
:(@threads $line $subex) => Expr(:macrocall, Symbol("@threads"), line, precom_ex(m, subex, info))
:(@avx $line $subex) => Expr(:macrocall, Symbol("@avx"), line, precom_ex(m, subex, info))
:(@invcheckoff $line $subex) => Expr(:macrocall, Symbol("@invcheckoff"), line, precom_ex(m, subex, info))
:(@routine $line $expr) => begin
precode = precom_ex(m, expr, info)
push!(info.routines, precode)
precode
end
:(~(@routine $line)) => begin
if isempty(info.routines)
error("`@routine` and `~@routine` must appear in pairs, mising `@routine`!")
end
precom_ex(m, dual_ex(m, pop!(info.routines)), info)
end
# 1. precompile to expand macros
# 2. get dual expression
# 3. precompile to analyze vaiables
:(~$expr) => precom_ex(m, dual_ex(m, precom_ex(m, expr, PreInfo())), info)
:($f($(args...))) => :($f($(args...)))
:($f.($(args...))) => :($f.($(args...)))
:(nothing) => ex
Expr(:macrocall, _...) => precom_ex(m, macroexpand(m, ex), info)
::LineNumberNode => ex
::Nothing => ex
_ => error("unsupported statement: $ex")
end
end
precom_range(range) = @match range begin
_ => range
end
function precom_if(m, ex, exinfo)
_expand_cond(cond) = @match cond begin
:(($pre, ~)) => :(($pre, $pre))
:(($pre, $post)) => :(($pre, $post))
:($pre) => :(($pre, $pre))
end
if ex.head == :if
ex.args[1] = _expand_cond(ex.args[1])
elseif ex.head == :elseif
ex.args[1].args[2] = _expand_cond(ex.args[1].args[2])
end
info = PreInfo()
ex.args[2] = Expr(:block, precom_body(m, ex.args[2].args, info)...)
if length(ex.args) == 3
if ex.args[3].head == :elseif
ex.args[3] = precom_if(m, ex.args[3], exinfo)
elseif ex.args[3].head == :block
info = PreInfo()
ex.args[3] = Expr(:block, precom_body(m, ex.args[3].args, info)...)
else
error("unknown statement following `if` $ex.")
end
end
ex
end
export @code_preprocess
"""
@code_preprocess ex
Preprocess `ex` and return the symmetric reversible IR.
```jldoctest; setup=:(using NiLangCore)
julia> NiLangCore.rmlines(@code_preprocess if (x < 3, ~) x += exp(3.0) end)
:(if (x < 3, x < 3)
x += exp(3.0)
end)
```
"""
macro code_preprocess(ex)
QuoteNode(precom_ex(__module__, ex, PreInfo()))
end
precom_ex(m::Module, ex) = precom_ex(m, ex, PreInfo())
# push a new variable to variable set `x`, for allocating `target`
function pushvar!(x::Vector{Symbol}, target)
@match target begin
::Symbol => begin
if target in x
throw(InvertibilityError("Symbol `$target` should not be used as the allocation target, it is an existing variable in the current scope."))
else
push!(x, target)
end
end
:(($(tar...),)) => begin
for t in tar
pushvar!(x, t)
end
end
:($tar = _) => pushvar!(x, tar)
:($tar...) => pushvar!(x, tar)
:($tar::$tp) => pushvar!(x, tar)
Expr(:parameters, targets...) => begin
for tar in targets
pushvar!(x, tar)
end
end
Expr(:kw, tar, val) => begin
pushvar!(x, tar)
end
_ => error("unknown variable expression $(target)")
end
nothing
end | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1829 | export FastStack, GLOBAL_STACK, FLOAT64_STACK, FLOAT32_STACK, COMPLEXF64_STACK, COMPLEXF32_STACK, BOOL_STACK, INT64_STACK, INT32_STACK
const GLOBAL_STACK = []
struct FastStack{T}
data::Vector{T}
top::Base.RefValue{Int}
end
function FastStack{T}(n::Int) where T
FastStack{T}(Vector{T}(undef, n), Ref(0))
end
function FastStack(n::Int)
FastStack{Any}(Vector{Any}(undef, n), Ref(0))
end
Base.show(io::IO, x::FastStack{T}) where T = print(io, "FastStack{$T}($(x.top[])/$(length(x.data)))")
Base.show(io::IO, ::MIME"text/plain", x::FastStack{T}) where T = show(io, x)
Base.length(stack::FastStack) = stack.top[]
Base.empty!(stack::FastStack) = (stack.top[] = 0; stack)
@inline function Base.push!(stack::FastStack, val)
stack.top[] += 1
@boundscheck stack.top[] <= length(stack.data) || throw(BoundsError(stack, stack.top[]))
stack.data[stack.top[]] = val
return stack
end
@inline function Base.pop!(stack::FastStack)
@boundscheck stack.top[] > 0 || throw(BoundsError(stack, stack.top[]))
val = stack.data[stack.top[]]
stack.top[] -= 1
return val
end
# default stack size is 10^6 (~8M for Float64)
let
empty_exprs = Expr[:($empty!($GLOBAL_STACK))]
for DT in [:Float64, :Float32, :ComplexF64, :ComplexF32, :Int64, :Int32, :Bool]
STACK = Symbol(uppercase(String(DT)), :_STACK)
@eval const $STACK = FastStack{$DT}(1000000)
# allow in-stack and out-stack different, to support loading data to GVar.
push!(empty_exprs, Expr(:call, empty!, STACK))
end
@eval function empty_global_stacks!()
$(empty_exprs...)
end
end
"""
loaddata(t, x)
load data `x`, matching type `t`.
"""
loaddata(::Type{T}, x::T) where T = x
loaddata(::Type{T1}, x::T) where {T1,T} = convert(T1,x)
loaddata(::T1, x::T) where {T1,T} = loaddata(T1, x) | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 4127 | # * existing: the ancillas and input arguments in the local scope.
# They should be protected to avoid duplicated allocation.
# * deallocated: the ancillas removed.
# They should be recorded to avoid using after deallocation.
# * unclassified: the variables from global scope.
# They can not be allocation target.
struct SymbolTable
existing::Vector{Symbol}
deallocated::Vector{Symbol}
unclassified::Vector{Symbol}
end
function SymbolTable()
SymbolTable(Symbol[], Symbol[], Symbol[])
end
Base.copy(st::SymbolTable) = SymbolTable(copy(st.existing), copy(st.deallocated), copy(st.unclassified))
# remove a variable from a list
function removevar!(lst::AbstractVector, var)
index = findfirst(==(var), lst)
deleteat!(lst, index)
end
# replace a variable in a list with target variable
function replacevar!(lst::AbstractVector, var, var2)
index = findfirst(==(var), lst)
lst[index] = var2
end
# allocate a new variable
function allocate!(st::SymbolTable, var::Symbol)
if var ∈ st.existing
throw(InvertibilityError("Repeated allocation of variable `$(var)`"))
elseif var ∈ st.deallocated
removevar!(st.deallocated, var)
push!(st.existing, var)
elseif var ∈ st.unclassified
throw(InvertibilityError("Variable `$(var)` used before allocation."))
else
push!(st.existing, var)
end
nothing
end
# find the list containing var
function findlist(st::SymbolTable, var::Symbol)
if var ∈ st.existing
return st.existing
elseif var ∈ st.unclassified
return st.unclassified
elseif var in st.deallocated
return st.deallocated
else
return nothing
end
end
# using a variable
function operate!(st::SymbolTable, var::Symbol)
if var ∈ st.existing || var ∈ st.unclassified
elseif var ∈ st.deallocated
throw(InvertibilityError("Operating on deallocate variable `$(var)`"))
else
push!(st.unclassified, var::Symbol)
end
nothing
end
# deallocate a variable
function deallocate!(st::SymbolTable, var::Symbol)
if var ∈ st.deallocated
throw(InvertibilityError("Repeated deallocation of variable `$(var)`"))
elseif var ∈ st.existing
removevar!(st.existing, var)
push!(st.deallocated, var)
elseif var ∈ st.unclassified
throw(InvertibilityError("Deallocating an external variable `$(var)`"))
else
throw(InvertibilityError("Deallocating an external variable `$(var)`"))
end
nothing
end
# check symbol table to make sure there is symbols introduced in the local scope that has not yet deallocated.
# `a` is the symbol table after running local scope, `b` is the symbol table before running the local scope.
function checksyms(a::SymbolTable, b::SymbolTable=SymbolTable())
diff = setdiff(a.existing, b.existing)
if !isempty(diff)
error("Some variables not deallocated correctly: $diff")
end
end
function swapsyms!(st::SymbolTable, var1::Symbol, var2::Symbol)
lst1 = findlist(st, var1)
lst2 = findlist(st, var2)
if lst1 !== nothing && lst2 !== nothing
# exchange variables
i1 = findfirst(==(var1), lst1)
i2 = findfirst(==(var2), lst2)
lst2[i2], lst1[i1] = lst1[i1], lst2[i2]
elseif lst1 !== nothing
replacevar!(lst1, var1, var2)
operate!(st, var1)
elseif lst2 !== nothing
replacevar!(lst2, var2, var1)
operate!(st, var2)
else
operate!(st, var1)
operate!(st, var2)
end
end
function swapsyms_asymetric!(st::SymbolTable, var1s::Vector, var2::Symbol)
length(var1s) == 0 && return
lst1 = findlist(st, var1s[1])
for k=2:length(var1s)
if findlist(st, var1s[k]) !== lst1
error("variable status not aligned: $var1s")
end
end
lst2 = findlist(st, var2)
if lst1 !== nothing
removevar!.(Ref(lst1), var1s)
push!(lst1, var2)
else
operate!(st, var2)
end
if lst2 !== nothing
removevar!(lst2, var2)
push!.(Ref(lst2),var1s)
else
operate!.(Ref(st), var1s)
end
end | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 4044 | const GLOBAL_ATOL = Ref(1e-8)
########### macro tools #############
startwithdot(sym::Symbol) = string(sym)[1] == '.'
startwithdot(sym::Expr) = false
startwithdot(sym) = false
function removedot(f)
string(f)[1] == '.' || error("$f is not a broadcasting.")
Symbol(string(f)[2:end])
end
"""
get_argname(ex)
Return the argument name of a function argument expression, e.g. `x::Float64 = 4` gives `x`.
"""
function get_argname(fname)
@match fname begin
::Symbol => fname
:($x::$t) => x
:($x::$t=$y) => x
:($x=$y) => x
:($x...) => :($x...)
:($x::$t...) => :($x...)
Expr(:parameters, args...) => fname
_ => error("can not get the function name of expression $fname.")
end
end
"""
match_function(ex)
Analyze a function expression, returns a tuple of `(macros, function name, arguments, type parameters (in where {...}), statements in the body)`
"""
function match_function(ex)
@match ex begin
:(function $(fname)($(args...)) $(body...) end) ||
:($fname($(args...)) = $(body...)) => (nothing, fname, args, [], body)
Expr(:function, :($fname($(args...)) where {$(ts...)}), xbody) => (nothing, fname, args, ts, xbody.args)
Expr(:macrocall, mcname, line, fdef) => ([mcname, line], match_function(fdef)[2:end]...)
_ => error("must input a function, got $ex")
end
end
"""
rmlines(ex::Expr)
Remove line number nodes for pretty printing.
"""
rmlines(ex::Expr) = begin
hd = ex.head
if hd == :macrocall
Expr(:macrocall, ex.args[1], nothing, rmlines.(ex.args[3:end])...)
else
tl = Any[rmlines(ex) for ex in ex.args if !(ex isa LineNumberNode)]
Expr(hd, tl...)
end
end
rmlines(@nospecialize(a)) = a
########### ordered dict ###############
struct MyOrderedDict{TK,TV}
keys::Vector{TK}
vals::Vector{TV}
end
function MyOrderedDict{K,V}() where {K,V}
MyOrderedDict(K[], V[])
end
function Base.setindex!(d::MyOrderedDict, val, key)
ind = findfirst(x->x===key, d.keys)
if ind isa Nothing
push!(d.keys, key)
push!(d.vals, val)
else
@inbounds d.vals[ind] = val
end
return d
end
function Base.getindex(d::MyOrderedDict, key)
ind = findfirst(x->x===key, d.keys)
if ind isa Nothing
throw(KeyError(ind))
else
return d.vals[ind]
end
end
function Base.delete!(d::MyOrderedDict, key)
ind = findfirst(x->x==key, d.keys)
if ind isa Nothing
throw(KeyError(ind))
else
deleteat!(d.vals, ind)
deleteat!(d.keys, ind)
end
end
Base.length(d::MyOrderedDict) = length(d.keys)
function Base.pop!(d::MyOrderedDict)
k = pop!(d.keys)
v = pop!(d.vals)
k, v
end
Base.isempty(d::MyOrderedDict) = length(d.keys) == 0
########### broadcasting ###############
"""
unzipped_broadcast(f, args...)
unzipped broadcast for arrays and tuples, e.g. `SWAP.([1,2,3], [4,5,6])` will do inplace element-wise swap, and return `[4,5,6], [1,2,3]`.
"""
unzipped_broadcast(f) = error("must provide at least one argument in broadcasting!")
function unzipped_broadcast(f, arg::AbstractArray; kwargs...)
arg .= f.(arg)
end
function unzipped_broadcast(f, arg::Tuple; kwargs...)
f.(arg)
end
@generated function unzipped_broadcast(f, args::Vararg{AbstractArray,N}; kwargs...) where N
argi = [:(args[$k][i]) for k=1:N]
quote
for i = 1:same_length(args)
($(argi...),) = f($(argi...); kwargs...)
end
return args
end
end
@generated function unzipped_broadcast(f, args::Vararg{Tuple,N}; kwargs...) where N
quote
same_length(args)
res = map(f, args...)
($([:($getindex.(res, $i)) for i=1:N]...),)
end
end
function same_length(args)
length(args) == 0 && error("can not broadcast over an empty set of arguments.")
l = length(args[1])
for j=2:length(args)
@assert l == length(args[j]) "length of arguments should be the same `$(length(args[j])) != $l`"
end
return l
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 10585 | function variable_analysis_ex(ex, syms::SymbolTable)
use!(x) = usevar!(syms, x)
allocate!(x) = allocatevar!(syms, x)
deallocate!(x) = deallocatevar!(syms, x)
@match ex begin
:($x[$key] ← $val) || :($x[$key] → $val) => (use!(x); use!(key); use!(val))
:($x ← $val) => allocate!(x)
:($x → $val) => deallocate!(x)
:($x ↔ $y) => swapvars!(syms, x, y)
:($a += $f($(b...))) || :($a -= $f($(b...))) ||
:($a *= $f($(b...))) || :($a /= $f($(b...))) ||
:($a .+= $f($(b...))) || :($a .-= $f($(b...))) ||
:($a .*= $f($(b...))) || :($a ./= $f($(b...))) ||
:($a .+= $f.($(b...))) || :($a .-= $f.($(b...))) ||
:($a .*= $f.($(b...))) || :($a ./= $f.($(b...))) => begin
ex.args[1] = render_arg(a)
b .= render_arg.(b)
use!(a)
use!.(b)
check_args(Any[a, b...])
end
:($a ⊻= $f($(b...))) || :($a .⊻= $f($(b...))) || :($a .⊻= $f.($(b...))) => begin
ex.args[1] = render_arg(a)
b .= render_arg.(b)
use!(a)
use!(b)
end
:($a ⊻= $b || $c) || :($a ⊻= $b && $c) => begin
ex.args[1] = render_arg(a)
ex.args[2].args .= render_arg.(ex.args[2].args)
use!(a)
use!(b)
end
Expr(:if, _...) => variable_analysis_if(ex, syms)
:(while $condition; $(body...); end) => begin
julia_usevar!(syms, condition)
localsyms = SymbolTable(Symbol[], copy(syms.deallocated), Symbol[])
variable_analysis_ex.(body, Ref(localsyms))
checksyms(localsyms)
end
:(begin $(body...) end) => begin
variable_analysis_ex.(body, Ref(syms))
end
# TODO: allow ommit step.
:(for $i=$range; $(body...); end) => begin
julia_usevar!(syms, range)
localsyms = SymbolTable(Symbol[], copy(syms.deallocated), Symbol[])
variable_analysis_ex.(body, Ref(localsyms))
checksyms(localsyms)
ex
end
:(@safe $line $subex) => julia_usevar!(syms, subex)
:(@cuda $line $(args...)) => variable_analysis_ex(args[end], syms)
:(@launchkernel $line $(args...)) => variable_analysis_ex(args[end], syms)
:(@inbounds $line $subex) => variable_analysis_ex(subex, syms)
:(@simd $line $subex) => variable_analysis_ex(subex, syms)
:(@threads $line $subex) => variable_analysis_ex(subex, syms)
:(@avx $line $subex) => variable_analysis_ex(subex, syms)
:(@invcheckoff $line $subex) => variable_analysis_ex(subex, syms)
# 1. precompile to expand macros
# 2. get dual expression
# 3. precompile to analyze vaiables
:($f($(args...))) => begin
args .= render_arg.(args)
check_args(args)
use!.(args)
end
:($f.($(args...))) => begin
args .= render_arg.(args)
check_args(args)
use!.(args)
end
:(nothing) => nothing
::LineNumberNode => nothing
::Nothing => nothing
_ => error("unsupported statement: $ex")
end
end
function variable_analysis_if(ex, exsyms)
syms = copy(exsyms)
julia_usevar!(exsyms, ex.args[1])
variable_analysis_ex.(ex.args[2].args, Ref(exsyms))
checksyms(exsyms, syms)
if length(ex.args) == 3
if ex.args[3].head == :elseif
variable_analysis_if(ex.args[3], exsyms)
elseif ex.args[3].head == :block
syms = copy(exsyms)
variable_analysis_ex.(ex.args[3].args, Ref(exsyms))
checksyms(exsyms, syms)
else
error("unknown statement following `if` $ex.")
end
end
end
usevar!(syms::SymbolTable, arg) = @match arg begin
::Number || ::String => nothing
::Symbol => _isconst(arg) || operate!(syms, arg)
:(@skip! $line $x) => julia_usevar!(syms, x)
:($x.$k) => usevar!(syms, x)
:($a |> subarray($(ranges...))) => (usevar!(syms, a); julia_usevar!.(Ref(syms), ranges))
:($x |> tget($f)) || :($x |> $f) || :($x .|> $f) || :($x::$f) => (usevar!(syms, x); julia_usevar!(syms, f))
:($x') || :(-$x) => usevar!(syms, x)
:($t{$(p...)}($(args...))) => begin
usevar!(syms, t)
usevar!.(Ref(syms), p)
usevar!.(Ref(syms), args)
end
:($a[$(x...)]) => begin
usevar!(syms, a)
usevar!.(Ref(syms), x)
end
:(($(args...),)) => usevar!.(Ref(syms), args)
_ => julia_usevar!(syms, arg)
end
julia_usevar!(syms::SymbolTable, ex) = @match ex begin
::Symbol => _isconst(ex) || operate!(syms, ex)
:($a:$b:$c) => julia_usevar!.(Ref(syms), [a, b, c])
:($a:$c) => julia_usevar!.(Ref(syms), [a, c])
:($a && $b) || :($a || $b) || :($a[$b]) => julia_usevar!.(Ref(syms), [a, b])
:($a.$b) => julia_usevar!(syms, a)
:(($(v...),)) || :(begin $(v...) end) => julia_usevar!.(Ref(syms), v)
:($f($(v...))) || :($f[$(v...)]) => begin
julia_usevar!(syms, f)
julia_usevar!.(Ref(syms), v)
end
:($args...) => julia_usevar!(syms, args)
Expr(:parameters, targets...) => julia_usevar!.(Ref(syms), targets)
Expr(:kw, tar, val) => julia_usevar!(syms, val)
::LineNumberNode => nothing
_ => nothing
end
# push a new variable to variable set `x`, for allocating `target`
allocatevar!(st::SymbolTable, target) = @match target begin
::Symbol => allocate!(st, target)
:(($(tar...),)) => begin
for t in tar
allocatevar!(st, t)
end
end
:($tar = $y) => allocatevar!(st, y)
:($tar...) => allocatevar!(st, tar)
:($tar::$tp) => allocatevar!(st, tar)
Expr(:parameters, targets...) => begin
for tar in targets
allocatevar!(st, tar)
end
end
Expr(:kw, tar, val) => begin
allocatevar!(st, tar)
end
_ => _isconst(target) || error("unknown variable expression $(target)")
end
# pop a variable from variable set `x`, for deallocating `target`
deallocatevar!(st::SymbolTable, target) = @match target begin
::Symbol => deallocate!(st, target)
:(($(tar...),)) => begin
for t in tar
deallocatevar!(st, t)
end
end
_ => error("unknow variable expression $(target)")
end
function swapvars!(st::SymbolTable, x, y)
e1 = isemptyvar(x)
e2 = isemptyvar(y)
# check assersion
for (e, v) in ((e1, x), (e2, y))
e && dosymbol(v) do sv
if sv ∈ st.existing || sv ∈ st.unclassified
throw(InvertibilityError("can not assert variable to empty: $v"))
end
end
end
if e1 && e2
elseif e1 && !e2
dosymbol(sx -> allocate!(st, sx), x)
dosymbol(sy -> deallocate!(st, sy), y)
usevar!(st, x)
elseif !e1 && e2
dosymbol(sx -> deallocate!(st, sx), x)
dosymbol(sy -> allocate!(st, sy), y)
usevar!(st, y)
else # both are nonempty
sx = dosymbol(identity, x)
sy = dosymbol(identity, y)
if sx === nothing || sy === nothing # e.g. x.y ↔ k.c
usevar!(st, x)
usevar!(st, y)
elseif sx isa Symbol && sy isa Symbol # e.g. x ↔ y
swapsyms!(st, sx, sy)
elseif sx isa Vector && sy isa Vector # e.g. (x, y) ↔ (a, b)
@assert length(sx) == length(sy)
swapsyms!.(Ref(st), sx, sy)
elseif sx isa Vector && sy isa Symbol # e.g. (x, y) ↔ args
swapsyms_asymetric!(st, sx, sy)
elseif sx isa Symbol && sy isa Vector # e.g. args ↔ (x, y)
swapsyms_asymetric!(st, sy, sx)
end
end
end
isemptyvar(ex) = @match ex begin
:($x[end+1]) => true
:($x::∅) => true
_ => false
end
dosymbol(f, ex) = @match ex begin
x::Symbol => f(x)
:(@fields $line $sym) => dosymbol(f, sym)
:($x::$T) => dosymbol(f, x)
:(($(args...),)) => dosymbol.(Ref(f), args)
_ => nothing
end
_isconst(x) = @match x begin
::Symbol => x ∈ Symbol[:im, :π, :Float64, :Float32, :Int, :Int64, :Int32, :Bool, :UInt8, :String, :Char, :ComplexF64, :ComplexF32, :(:), :end, :nothing]
::QuoteNode || ::Bool || ::Char || ::Number || ::String => true
:($f($(args...))) => all(_isconst, args)
:(@const $line $ex) => true
_ => false
end
# avoid share read/write
function check_args(args)
args_kernel = []
for i=1:length(args)
out = memkernel(args[i])
if out isa Vector
for o in out
if o !== nothing
push!(args_kernel, o)
end
end
elseif out !== nothing
push!(args_kernel, out)
end
end
# error on shared read or shared write.
for i=1:length(args_kernel)
for j in i+1:length(args_kernel)
if args_kernel[i] == args_kernel[j]
throw(InvertibilityError("$i-th argument and $j-th argument shares the same memory $(args_kernel[i]), shared read and shared write are not allowed!"))
end
end
end
end
# Returns the memory `identifier`, it is used to avoid shared read/write.
memkernel(ex) = @match ex begin
::Symbol => ex
:(@const $line $x) => memkernel(x)
:($a |> subarray($(inds...))) || :($a[$(inds...)]) => :($(memkernel(a))[$(inds...)])
:($x.$y) => :($(memkernel(x)).$y)
:($a |> tget($x)) => :($(memkernel(a))[$x])
:($x |> $f) || :($x .|> $f) || :($x') || :(-$x) || :($x...) => memkernel(x)
:($t{$(p...)}($(args...))) || :(($(args...),)) => memkernel.(args)
_ => nothing # Julia scope, including `@skip!`, `f(x)` et. al.
end
# Modify the argument, e.g. `x.[1,3:5]` is rendered as `x |> subarray(1,3:5)`.
render_arg(ex) = @match ex begin
::Symbol => ex
:(@skip! $line $x) => ex
:(@const $line $x) => Expr(:macrocall, Symbol("@const"), line, render_arg(x))
:($a.[$(inds...)]) => :($(render_arg(a)) |> subarray($(inds...)))
:($a |> subarray($(inds...))) => :($(render_arg(a)) |> subarray($(inds...)))
:($a[$(inds...)]) => :($(render_arg(a))[$(inds...)])
:($x.$y) => :($(render_arg(x)).$y)
:($a |> tget($x)) => :($(render_arg(a)) |> tget($x))
:($x |> $f) => :($(render_arg(x)) |> $f)
:($x .|> $f) => :($(render_arg(x)) .|> $f)
:($x') => :($(render_arg(x))')
:(-$x) => :(-$(render_arg(x)))
:($ag...) => :($(render_arg(ag))...)
:($t{$(p...)}($(args...))) => :($t{($p...)}($(render_arg.(args)...)))
:(($(args...),)) => :(($(render_arg.(args)...),))
_ => ex # Julia scope, including `@skip!`, `f(x)` et. al.
end | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1440 | using Base.Cartesian
export chfield
############# ancillas ################
export @fieldview
"""
@fieldview fname(x::TYPE) = x.fieldname
@fieldview fname(x::TYPE) = x[i]
...
Create a function fieldview that can be accessed by a reversible program
```jldoctest; setup=:(using NiLangCore)
julia> struct GVar{T, GT}
x::T
g::GT
end
julia> @fieldview xx(x::GVar) = x.x
julia> chfield(GVar(1.0, 0.0), xx, 2.0)
GVar{Float64, Float64}(2.0, 0.0)
```
"""
macro fieldview(ex)
@match ex begin
:($f($obj::$tp) = begin $line; $ex end) => begin
xval = gensym("value")
esc(Expr(:block,
:(Base.@__doc__ $f($obj::$tp) = begin $line; $ex end),
:($NiLangCore.chfield($obj::$tp, ::typeof($f), $xval) = $(Expr(:block, assign_ex(ex, xval, false), obj)))
))
end
_ => error("expect expression `f(obj::type) = obj.prop`, got $ex")
end
end
chfield(a, b, c) = error("chfield($a, $b, $c) not defined!")
chfield(x, ::typeof(identity), xval) = xval
chfield(x::T, ::typeof(-), y::T) where T = -y
chfield(x::T, ::typeof(adjoint), y) where T = adjoint(y)
############ dataview patches ############
export tget, subarray
"""
tget(i::Int)
Get the i-th entry of a tuple.
"""
tget(i::Int) = x::Tuple -> x[i]
"""
subarray(ranges...)
Get a subarray, same as `view` in Base.
"""
subarray(args...) = x -> view(x, args...) | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 621 | using Test, NiLangCore
@testset "basic" begin
@test ~(~sin) === sin
@test ~(~typeof(sin)) === typeof(sin)
@test isreflexive(XorEq(NiLangCore.logical_or))
println(XorEq(*))
println(PlusEq(+))
println(MinusEq(-))
println(MulEq(*))
println(DivEq(/))
end
@static if VERSION > v"1.5.100"
@testset "composite function" begin
@i function f1(x)
x.:1 += x.:2
end
@i function f2(x)
x.:2 += cos(x.:1)
end
@i function f3(x)
x.:1 ↔ x.:2
end
x = (2.0, 3.0)
y = (f3∘f2∘f1)(x)
z = (~(f3∘f2∘f1))(y)
@show x, z
@test all(x .≈ z)
end
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 20024 | using NiLangCore
using Test
using Base.Threads
@testset "to_standard_format" begin
for (OP, FUNC) in [(:+=, PlusEq), (:-=, MinusEq), (:*=, MulEq), (:/=, DivEq), (:⊻=, XorEq)]
@test NiLangCore.to_standard_format(Expr(OP, :x, :y)) == :($FUNC(identity)(x, y))
@test NiLangCore.to_standard_format(Expr(OP, :x, :(sin(y; z=3)))) == :($FUNC(sin)(x, y; z=3))
OPD = Symbol(:., OP)
@test NiLangCore.to_standard_format(Expr(OPD, :x, :y)) == :($FUNC(identity).(x, y))
@test NiLangCore.to_standard_format(Expr(OPD, :x, :(sin.(y)))) == :($FUNC(sin).(x, y))
@test NiLangCore.to_standard_format(Expr(OPD, :x, :(y .* z))) == :($FUNC(*).(x, y, z))
end
@test NiLangCore.to_standard_format(Expr(:⊻=, :x, :(y && z))) == :($XorEq($(NiLangCore.logical_and))(x, y, z))
@test NiLangCore.to_standard_format(Expr(:⊻=, :x, :(y || z))) == :($XorEq($(NiLangCore.logical_or))(x, y, z))
end
@testset "i" begin
@i function test1(a::T, b, out) where T<:Number
add(a, b)
out += a * b
end
@i function tt(a, b)
out ← 0.0
test1(a, b, out)
(~test1)(a, b, out)
a += b
out → 0.0
end
# compute (a+b)*b -> out
x = 3.0
y = 4.0
out = 0.0
@test isreversible(test1, Tuple{Number, Any, Any})
@test check_inv(test1, (x, y, out))
@test check_inv(tt, (x, y))
@test check_inv(tt, (x, y))
end
@testset "if statement 1" begin
# compute (a+b)*b -> out
@i function test1(a, b, out)
add(a, b)
if (a > 8, a > 8)
out += a*b
else
end
end
x = 3
y = 4
out = 0
@instr test1(x, y, out)
@test out==0
@test x==7
@instr (~test1)(x, y, out)
@test out==0
@test x==3
end
@testset "if statement error" begin
x = 3
y = 4
out = 0
# compute (a+b)*b -> out
@i function test1(a, b, out)
add(a, b)
if (out < 4, out < 4)
out += a*b
else
end
end
@test_throws InvertibilityError test1(x, y, out)
end
@testset "if statement 3" begin
x = 3
y = 4
out = 0
@i @inline function test1(a, b, out)
add(a, b)
if (a > 2, a > 2)
out += a*b
else
end
end
x = 3
y = 4
out = 0
@instr test1(x, y, out)
@test out==28
@instr (~test1)(x, y, out)
@test out==0
end
@testset "if statement 4" begin
@i function test1(a, b, out)
add(a, b)
if a > 8.0
out += a*b
end
end
@test test1(1.0, 8.0, 0.0)[3] == 72.0
@i function test2(a, b)
add(a, b)
if a > 8.0
a -= b^2
end
end
@test_throws InvertibilityError test2(1.0, 8.0)
@test_throws InvertibilityError macroexpand(Main, :(@i function test3(a, b)
add(a, b)
if a > 8.0
a -= b*b
end
end))
end
@testset "for" begin
@i function looper(x, y, k)
for i=1:1:k
x += y
end
end
x = 0.0
y = 1.0
k = 3
@instr looper(x, y, k)
@test x == 3
@instr (~looper)(x, y, k)
@test x == 0.0
shiba = 18
@i function looper2(x, y, k)
for i=1:1:k
k += shiba
x += y
end
end
@test_throws InvertibilityError looper2(x, y, k)
end
@testset "while" begin
@i function looper(x, y)
while (x<100, x>0)
x += y
end
end
x = 0.0
y = 9
@instr looper(x, y)
@test x == 108
@instr (~looper)(x, y)
@test x == 0.0
@i function looper2(x, y)
while (x<100, x>-10)
x += y
end
end
@test_throws InvertibilityError looper2(x, y)
@test_throws ErrorException macroexpand(@__MODULE__, :(@i function looper3(x, y)
while (x<100, x>0)
z ← 0
x += y
z += 1
end
end))
end
@testset "ancilla" begin
one, ten = 1, 10
@i function looper(x, y)
z ← 0
x += y
z += one
z -= one
z → 0
end
x = 0.0
y = 9
@instr looper(x, y)
@test x[] == 9
@instr (~looper)(x, y)
@test x[] == 0.0
@i function looper2(x, y)
z ← 0
x += y
z += one
z -= ten
z → 0
end
x = 0.0
y = 9
@test_throws InvertibilityError looper2(x, y)
end
@testset "broadcast" begin
# compute (a+b)*b -> out
@i function test1(a, b)
a .+= b
end
x = [3, 1.0]
y = [4, 2.0]
@instr test1(x, y)
@test x == [7, 3.0]
@instr (~test1)(x, y)
@test x == [3, 1.0]
@i function test2(a, b, out)
a .+= identity.(b)
out .+= (a .* b)
end
x = Array([3, 1.0])
y = [4, 2.0]
out = Array([0.0, 1.0])
@instr test2(x, y, out)
@test out==[28, 7]
@test check_inv(test2, (x, y, out))
end
@testset "broadcast arr" begin
@i function f5(x, y, z, a, b)
x += y + z
b += a + x
end
@i function f4(x, y, z, a)
x += y + z
a += y + x
end
@i function f3(x, y, z)
y += x + z
end
@i function f2(x, y)
y += x
end
@i function f1(x)
l ← zero(x)
l += x
x -= 2 * l
l += x
l → zero(x)
end
a = randn(10)
b = randn(10)
c = randn(10)
d = randn(10)
e = randn(10)
aa = copy(a)
@instr f1.(aa)
@test aa ≈ -a
aa = copy(a)
bb = copy(b)
@instr f2.(aa, bb)
@test aa ≈ a
@test bb ≈ b + a
aa = copy(a)
bb = copy(b)
cc = copy(c)
@instr f3.(aa, bb, cc)
@test aa ≈ a
@test bb ≈ b + a + c
@test cc ≈ c
aa = copy(a)
bb = copy(b)
cc = copy(c)
dd = copy(d)
@instr f4.(aa, bb, cc, dd)
@test aa ≈ a + b + c
@test bb ≈ b
@test cc ≈ c
@test dd ≈ a + 2b + c + d
aa = copy(a)
bb = copy(b)
cc = copy(c)
dd = copy(d)
ee = copy(e)
@instr f5.(aa, bb, cc, dd, ee)
@test aa ≈ a + b + c
@test bb ≈ b
@test cc ≈ c
@test dd ≈ d
@test ee ≈ a + b + c + d + e
x = randn(5)
@test_throws AssertionError @instr x .+= c
end
@testset "broadcast tuple" begin
@i function f5(x, y, z, a, b)
x += y + z
b += a + x
end
@i function f4(x, y, z, a)
x += y + z
a += y + x
end
@i function f3(x, y, z)
y += x + z
end
@i function f2(x, y)
y += x
end
@i function f1(x)
l ← zero(x)
l += x
x -= 2 * l
l += x
l → zero(x)
end
a = (1,2)
b = (3,1)
c = (6,7)
d = (1,11)
e = (4,1)
aa = a
@instr f1.(aa)
@test aa == -1 .* a
aa = a
bb = b
@instr f2.(aa, bb)
@test aa == a
@test bb == b .+ a
aa = a
bb = b
cc = c
@instr f3.(aa, bb, cc)
@test aa == a
@test bb == b .+ a .+ c
@test cc == c
aa = a
bb = b
cc = c
dd = d
@instr f4.(aa, bb, cc, dd)
@test aa == a .+ b .+ c
@test bb == b
@test cc == c
@test dd == a .+ 2 .* b .+ c .+ d
aa = a
bb = b
cc = c
dd = d
ee = e
@instr f5.(aa, bb, cc, dd, ee)
@test aa == a .+ b .+ c
@test bb == b
@test cc == c
@test dd == d
@test ee == a .+ b .+ c .+ d .+ e
x = (2,1,5)
@test_throws AssertionError @instr x .+= c
end
@testset "broadcast 2" begin
# compute (a+b)*b -> out
@i function test1(a, b)
a += b
end
x = [3, 1.0]
y = [4, 2.0]
@instr test1.(x, y)
@test x == [7, 3.0]
@instr (~test1).(x, y)
@test x == [3, 1.0]
@i function test2(a, b, out)
add(a, b)
out += (a * b)
end
x = [3, 1.0]
y = [4, 2.0]
out = [0.0, 1.0]
@instr test2.(x, y, out)
@test out==[28, 7]
@instr (~test2).(x, y, out)
@test out==[0, 1.0]
args = (x, y, out)
@instr test2.(args...)
@test args[3]==[28, 7]
end
@testset "neg sign" begin
@i function test(out, x, y)
out += x * (-y)
end
@test check_inv(test, (0.1, 2.0, -2.5); verbose=true)
end
@testset "@ibounds" begin
@i function test(x, y)
for i=1:length(x)
@inbounds x[i] += y[i]
end
end
@test test([1,2], [2,3]) == ([3,5], [2,3])
end
@testset "kwargs" begin
@i function test(out, x; y)
out += x * (-y)
end
@test check_inv(test, (0.1, 2.0); y=0.5, verbose=true)
end
@testset "routines" begin
@i function test(out, x)
@routine begin
out += x
end
~@routine
end
out, x = 0.0, 1.0
@instr test(out, x)
@test out == 0.0
end
@testset "inverse a prog" begin
@i function test(out, x)
~(begin
out += x
out += x
end)
~(for i=1:3
out += x
end)
end
out, x = 0.0, 1.0
@test check_inv(test, (out, x))
@instr test(out, x)
@test out == -5.0
end
@testset "invcheck" begin
@i function test(out, x)
anc ← 0
@invcheckoff for i=1:x[]
x[] -= 1
end
@invcheckoff while (anc<3, anc<3)
anc += 1
end
out += anc
@invcheckoff anc → 0
end
res = test(0, Ref(7))
@test res[1] == 3
@test res[2][] == 0
end
@testset "nilang ir" begin
ex = :(
@inline function f(x!::T, y) where T
anc ← zero(T)
@routine anc += x!
x! += y * anc
~@routine
anc → zero(T)
end
)
ex2 = :(
@inline function f(x!::T, y) where T
anc ← zero(T)
anc += identity(x!)
x! += y * anc
anc -= identity(x!)
anc → zero(T)
end)
ex3 = :(
@inline function (~f)(x!::T, y) where T
anc ← zero(T)
anc += identity(x!)
x! -= y * anc
anc -= identity(x!)
anc → zero(T)
end)
@test nilang_ir(@__MODULE__, ex) |> NiLangCore.rmlines == ex2 |> NiLangCore.rmlines
@test nilang_ir(@__MODULE__, ex; reversed=true) |> NiLangCore.rmlines == ex3 |> NiLangCore.rmlines
end
@testset "protectf" begin
struct C<:Function end
# protected
@i function (a::C)(x)
@safe @show a
if (protectf(a) isa Inv, ~)
add(x, 1.0)
else
sub(x, 1.0)
end
end
a = C()
@test (~a)(a(1.0)) == 1.0
# not protected
@i function (a::C)(x)
@safe @show a
if (a isa Inv, ~)
add(x, 1.0)
else
sub(x, 1.0)
end
end
@test (~a)(a(1.0)) == -1.0
end
@testset "ifelse statement" begin
@i function f(x, y)
if (x > 0, ~)
y += 1
elseif (x < 0, ~)
y += 2
else
y += 3
end
end
@test f(1, 0) == (1, 1)
@test f(-2, 0) == (-2, 2)
@test f(0, 0) == (0, 3)
@i function f2(x, y)
if (x > 0, x < 0)
y += 1
elseif (x < 0, x < 0)
y += 2
else
y += 3
end
end
@test_throws InvertibilityError f2(-1, 0)
end
@testset "skip!" begin
x = 0.4
@instr (@skip! 3) += x
@test x == 0.4
y = 0.3
@instr x += @const y
@test x == 0.7
@test y == 0.3
end
@testset "for x in range" begin
@i function f(x, y)
for item in y
x += item
end
end
@test check_inv(f, (0.0, [1,2,5]))
end
@testset "@simd and @threads" begin
@i function f(x)
@threads for i=1:length(x)
x[i] += 1
end
end
x = [1,2,3]
@test f(x) == [2,3,4]
@i function f2(x)
@simd for i=1:length(x)
x[i] += 1
end
end
x = [1,2,3]
@test f2(x) == [2,3,4]
end
@testset "xor over ||" begin
x = false
@instr x ⊻= true || false
@test x
@instr x ⊻= true && false
@test x
end
macro zeros(T, x, y)
esc(:($x ← zero($T); $y ← zero($T)))
end
@testset "macro" begin
@i function f(x)
@zeros Float64 a b
x += a * b
~@zeros Float64 a b
end
@test f(3.0) == 3.0
end
@testset "allow nothing pass" begin
@i function f(x)
nothing
end
@test f(2) == 2
end
@testset "ancilla check" begin
ex1 = :(@i function f(x)
x ← 0
end)
@test_throws InvertibilityError macroexpand(Main, ex1)
ex2 = :(@i function f(x)
y ← 0
y ← 0
end)
@test_throws InvertibilityError macroexpand(Main, ex2)
ex3 = :(@i function f(x)
y ← 0
y → 0
end)
@test macroexpand(Main, ex3) isa Expr
ex4 = :(@i function f(x; y=5)
y ← 0
end)
@test_throws InvertibilityError macroexpand(Main, ex4)
ex5 = :(@i function f(x)
y → 0
end)
@test_throws InvertibilityError macroexpand(Main, ex5)
ex6 = :(@i function f(x::Int)
y ← 0
y → 0
end)
@test macroexpand(Main, ex6) isa Expr
ex7 = :(@i function f(x::Int)
if x>3
y ← 0
y → 0
elseif x<-3
y ← 0
y → 0
else
y ← 0
y → 0
end
end)
@test macroexpand(Main, ex7) isa Expr
ex8 = :(@i function f(x; y=5)
z ← 0
z → 0
end)
@test macroexpand(Main, ex8) isa Expr
ex9 = :(@i function f(x; y)
z ← 0
z → 0
end)
@test macroexpand(Main, ex9) isa Expr
ex10 = :(@i function f(x; y)
begin
z ← 0
end
~begin
z ← 0
end
end)
@test macroexpand(Main, ex10) isa Expr
end
@testset "dict access" begin
d = Dict(3=>4)
@instr d[3] → 4
@instr d[4] ← 3
@test d == Dict(4=>3)
@test_throws InvertibilityError @instr d[4] → 5
@test (@instr @invcheckoff d[8] → 5; true)
@test_throws InvertibilityError @instr d[4] ← 5
@test (@instr @invcheckoff d[4] ← 5; true)
end
@testset "@routine,~@routine" begin
@test_throws ErrorException macroexpand(Main, :(@i function f(x)
@routine begin
end
end))
@test_throws ErrorException macroexpand(Main, :(@i function f(x)
~@routine
end))
@test macroexpand(Main, :(@i function f(x)
@routine begin end
~@routine
end)) !== nothing
end
@testset "@from post while pre" begin
@i function f()
x ← 5
z ← 0
@from z==0 while x > 0
x -= 1
z += 1
end
z → 5
x → 0
end
@test f() == ()
@test (~f)() == ()
end
@testset "argument with function call" begin
@test_throws ErrorException @macroexpand @i function f(x, y)
x += sin(exp(y))
end
@i function f(x, y)
x += sin(exp(0.4)) + y
end
end
@testset "allocation multiple vars" begin
info = NiLangCore.PreInfo()
@test NiLangCore.precom_ex(NiLangCore, :(x,y ← var), info) == :((x, y) ← var)
@test NiLangCore.precom_ex(NiLangCore, :(x,y → var), info) == :((x, y) → var)
@test NiLangCore.precom_ex(NiLangCore, :((x,y) ↔ (a, b)), info) == :((x,y) ↔ (a,b))
@test (@code_reverse (x,y) ← var) == :((x, y) → var)
@test (@code_reverse (x,y) → var) == :((x, y) ← var)
@test (@code_julia (x,y) ← var) == :((x, y) = var)
@test (@code_julia (x,y) → var) == :(try
$(NiLangCore.deanc)((x, y), var)
catch e
$(:(println("deallocate fail `$($(QuoteNode(:((x, y))))) → $(:var)`")))
throw(e)
end) |> NiLangCore.rmlines
x = randn(2,4)
@i function f(y, x)
m, n ← size(x)
(l, k) ← size(x)
y += m*n
y += l*k
(l, k) → size(x)
m, n → size(x)
end
twosize = f(0, x)[1]
@test twosize == 16
@test (~f)(twosize, x)[1] == 0
@i function g(x)
(m, n) ← size(x)
(m, n) → (7, 5)
end
@test_throws InvertibilityError g(x)
@test_throws InvertibilityError (~g)(x)
end
@testset "argument without argname" begin
@i function f(::Complex)
end
@test f(1+2im) == 1+2im
end
@testset "tuple input" begin
@i function f(x::Tuple{<:Tuple, <:Real})
f(x.:1)
(x.:1).:1 += x.:2
end
@i function f(x::Tuple{<:Real, <:Real})
x.:1 += x.:2
end
@i function g(data)
f(((data.:1, data.:2), data.:3))
end
@test g((1,2,3)) == (6,2,3)
end
@testset "single/zero argument" begin
@i function f(x)
neg(x)
end
@i function g(x::Vector)
neg.(x)
end
@test f(3) == -3
@test g([3, 2]) == [-3, -2]
x = (3,)
@instr f(x...)
@test x == (-3,)
x = ([3, 4],)
@instr f.(x...)
@test x == ([-3, -4],)
@i function f()
end
x = ()
@instr f(x...)
@test x == ()
end
@testset "type constructor" begin
@i function f(x, y, a, b)
add(Complex{}(x, y), Complex{}(a, b))
end
@test f(1,2, 3, 4) == (4, 6, 3, 4)
@test_throws ErrorException macroexpand(NiLangCore, :(@i function f(x, y, a, b)
add(Complex(x, y), Complex{}(a, b))
end))
@i function g(x::Inv, y::Inv)
add(x.f, y.f)
end
@i function g(x, y)
g(Inv{}(x), Inv{}(y))
end
@test g(2, 3) == (5, 3)
end
@testset "variable_analysis" begin
# kwargs should not be assigned
@test_throws InvertibilityError macroexpand(@__MODULE__, :(@i function f1(x; y=4)
y ← 5
y → 5
end))
# deallocated variables should not be used
@test_throws InvertibilityError macroexpand(@__MODULE__, :(@i function f1(x; y=4)
z ← 5
z → 5
x += 2 * z
end))
# deallocated variables should not be used in local scope
@test_throws InvertibilityError macroexpand(@__MODULE__, :(@i function f1(x; y=4)
z ← 5
z → 5
for i=1:10
x += 2 * z
end
end))
end
@testset "boolean" begin
@i function f1(x, y, z)
x ⊻= true
y .⊻= z
end
@test f1(false, [true, false], [true, false]) == (true, [false, false], [true, false])
@i function f2(x, y, z)
z[2] ⊻= true && y[1]
z[1] ⊻= z[2] || x
end
@test f2(false, [true, false], [true, false]) == (false, [true, false], [false, true])
end
@testset "swap ↔" begin
@i function f1(x, y)
j::∅ ↔ k::∅ # dummy swap
a::∅ ↔ x
a ↔ y
a ↔ x::∅ # ↔ is symmetric
end
@test f1(2, 3) == (3, 2)
@test check_inv(f1, (2, 3))
# stack
@i function f2(x, y)
x[end+1] ↔ y
y ← 2
end
@test f2([1,2,3], 4) == ([1,2,3,4], 2)
@test check_inv(f2, ([1,2,3], 3))
@i function f4(x, y)
y ↔ x[end+1]
y ← 2
end
@test f4([1,2,3], 4) == ([1,2,3,4], 2)
@test check_inv(f4, ([1,2,3], 3))
@i function f3(x, y::TY, s) where TY
y → _zero(TY)
x[end] ↔ (y::TY)::∅
@safe @show x[2], s
x[2] ↔ s
end
@test f3(Float32[1,2,3], 0.0, 4f0) == (Float32[1,4], 3.0, 2f0)
@test check_inv(f3, (Float32[1,2,3], 0.0, 4f0))
end
@testset "feed tuple and types" begin
@i function f3(a, d::Complex)
a.:1 += d.re
d.re ↔ d.im
end
@i function f4(a, b, c, d, e)
f3((a, b, c), Complex{}(d, e))
end
@test f4(1,2,3,4,5) == (5,2,3,5,4)
@test check_inv(f4, (1,2,3,4,5))
end
@testset "exchange tuple and fields" begin
@i function f1(x, y, z)
(x, y) ↔ @fields z
end
@test f1(1,2, 3+4im) == (3,4,1+2im)
@i function f2(re, x)
r, i ← @fields x
re += r
r, i → @fields x
end
@test f2(0.0, 3.0+2im) == (3.0, 3.0 + 2.0im)
@i function f3(x, y, z)
(@fields z) ↔ (x, y)
end
@test f3(1,2, 3+4im) == (3,4,1+2im)
@test_throws ErrorException macroexpand(@__MODULE__, :(@i function f3(x, y, z)
(x, y) ↔ (z, j)
end))
@i function f4(x, y, z, j)
(x, y) ↔ (z, j)
end
@test f4(1,2, 3, 4) == (3,4,1,2)
@i function swap_fields(obj::Complex)
(x, y)::∅ ↔ @fields obj
x += y
(x, y) ↔ (@fields obj)::∅
end
@test swap_fields(1+2im) == (3+2im)
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 4877 | using NiLangCore
using NiLangCore: compile_ex, dual_ex, precom_ex, memkernel, render_arg, check_args
using Test
import Base: +, -
value(x) = x
NiLangCore.chfield(x::T, ::typeof(value), y::T) where T = y
function add(a!::Number, b::Number)
a!+b, b
end
function neg(b::Number)
-b
end
@selfdual neg
@i function add(a!, b)
add(a! |> value, b |> value)
end
function sub(a!::Number, b::Number)
a!-b, b
end
@i function sub(a!, b)
sub(a! |> value, b |> value)
end
@dual add sub
function XOR(a!::Integer, b::Integer)
xor(a!, b), b
end
@selfdual XOR
#@nograd XOR
@testset "boolean" begin
x = false
@instr x ⊻= true
@test x
@instr x ⊻= true || false
@test !x
@instr x ⊻= true && false
@instr x ⊻= !false
@test x
end
@testset "@dual" begin
@test isreversible(add, Tuple{Any,Any})
@test isreversible(sub, Tuple{Any,Any})
@test !isreflexive(add)
@test ~(add) == sub
a=2.0
b=1.0
@instr add(a, b)
@test a == 3.0
args = (1,2)
@instr add(args...)
@test args == (3,2)
@instr sub(a, b)
@test a == 2.0
@test check_inv(add, (a, b))
@test isprimitive(add)
@test isprimitive(sub)
end
@testset "@selfdual" begin
@test !isreversible(XOR, Tuple{Any, Any})
@test !isreversible(~XOR, Tuple{Any, Any})
@test isreversible(~XOR, Tuple{Integer, Integer})
@test isreversible(XOR, Tuple{Integer, Integer})
@test isreflexive(XOR)
@test isprimitive(XOR)
@test ~(XOR) == XOR
a=2
b=1
@instr XOR(a, b)
@test a == 3
@instr XOR(a, b)
@test a == 2
end
@testset "+=, -=" begin
x = 1.0
y = 1.0
@instr PlusEq(exp)(y, x)
@test x ≈ 1
@test y ≈ 1+exp(1.0)
@instr (~PlusEq(exp))(y, x)
@test x ≈ 1
@test y ≈ 1
end
@testset "+= and const" begin
x = 0.5
@instr x += π
@test x == 0.5+π
@instr x += log(π)
@test x == 0.5 + π + log(π)
@instr x += log(π)/2
@test x == 0.5 + π + 3*log(π)/2
@instr x += log(2*π)/2
@test x == 0.5 + π + 3*log(π)/2 + log(2π)/2
end
@testset "+= keyword functions" begin
g(x; y=2) = x^y
z = 0.0
x = 2.0
@instr z += g(x; y=4)
@test z == 16.0
end
@testset "constant value" begin
@test @const 2 == 2
@test NiLangCore._isconst(:(@const grad(x)))
end
@testset "+=, -=, *=, /=" begin
@test compile_ex(@__MODULE__, :(x += y * z), NiLangCore.CompileInfo()).args[1].args[2] == :($PlusEq(*)(x, y, z))
@test compile_ex(@__MODULE__, dual_ex(@__MODULE__, :(x -= y * z)), NiLangCore.CompileInfo()).args[1].args[2] == :($PlusEq(*)(x, y, z))
@test compile_ex(@__MODULE__, :(x /= y * z), NiLangCore.CompileInfo()).args[1].args[2] == :($DivEq(*)(x, y, z))
@test compile_ex(@__MODULE__, dual_ex(@__MODULE__, :(x *= y * z)), NiLangCore.CompileInfo()).args[1].args[2] == :($DivEq(*)(x, y, z))
@test ~MulEq(*) == DivEq(*)
@test ~DivEq(*) == MulEq(*)
function (g::MulEq)(y, a, b)
y * g.f(a, b), a, b
end
function (g::DivEq)(y, a, b)
y / g.f(a, b), a, b
end
a, b, c = 1.0, 2.0, 3.0
@instr a *= b + c
@test a == 5.0
@instr a /= b + c
@test a == 1.0
end
@testset "shared read write check" begin
for (x, y) in [
(:((-x[3].g' |> NEG).k[5]) , :((x[3]).g.k[5]))
(:((-(x |> subarray(3)).g' |> NEG).k[5]) , :((x[3]).g.k[5]))
(:(@skip! x.g) , nothing)
(:(@const x .|> g) , :x)
(:(cos.(x[2])) , nothing)
(:(cos(x[2])) , nothing)
(:((x |> g)...) , :x)
(:((x |> g, y.:1)) , [:x, :(y.:1)])
(:((x |> g, y |> tget(1))) , [:x, :(y[1])])]
@test memkernel(deepcopy(x)) == y
@test render_arg(deepcopy(x)) == x
end
@test render_arg(:(x.y.[2:3])) == :(x.y |> subarray(2:3))
@test memkernel(:(x.y |> subarray(2:3))) == (:(x.y[2:3]))
@test render_arg(:(x.y.[2:3] |> value)) == :(x.y |> subarray(2:3) |> value)
@test memkernel(:(x.y |> subarray(2:3) |> value)) == :(x.y[2:3])
@test_throws InvertibilityError check_args([:a, :(a |> grad)])
@test check_args([:(a.x), :(a.g |> grad)]) isa Nothing
@test_throws InvertibilityError check_args([:(a.x), :(b[3]), :(b[3])])
@test_throws InvertibilityError check_args([:(a.x), :((b, a.x))]) isa Nothing
# TODO: check variable on the same tree, like `a.b` and `a`
end
@testset "dual type" begin
struct AddX{T}
x::T
end
struct SubX{T}
x::T
end
@dualtype AddX SubX
@dualtype AddX SubX
@i function (f::AddX)(x::Real) end
@test hasmethod(AddX(3), Tuple{Real})
@test hasmethod(SubX(3), Tuple{Real})
for (TA, TB) in [(AddX, SubX), (MulEq, DivEq), (XorEq, XorEq), (PlusEq, MinusEq)]
@test invtype(TA) == TB
@test invtype(TA{typeof(*)}) == TB{typeof(*)}
@test invtype(TB) == TA
@test invtype(TB{typeof(*)}) == TA{typeof(*)}
end
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1268 | using NiLangCore
using NiLangCore: type2tuple
using Test
struct NiTypeTest{T} <: IWrapper{T}
x::T
g::T
end
NiTypeTest(x) = NiTypeTest(x, zero(x))
@fieldview value(invtype::NiTypeTest) = invtype.x
@fieldview gg(invtype::NiTypeTest) = invtype.g
@testset "inv type" begin
it = NiTypeTest(0.5)
@test eps(typeof(it)) === eps(Float64)
@test value(it) == 0.5
@test it ≈ NiTypeTest(0.5)
@test it > 0.4
@test it < NiTypeTest(0.6)
@test it < 7
@test 0.4 < it
@test 7 > it
@test chfield(it, value, 0.3) == NiTypeTest(0.3)
it = chfield(it, Val(:g), 0.2)
@test almost_same(NiTypeTest(0.5+1e-15), NiTypeTest(0.5))
@test !almost_same(NiTypeTest(1.0), NiTypeTest(1))
it = NiTypeTest(0.5)
@test chfield(it, gg, 0.3) == NiTypeTest(0.5, 0.3)
end
@testset "mutable struct set field" begin
mutable struct MS{T}
x::T
y::T
z::T
end
ms = MS(0.5, 0.6, 0.7)
@i function f(ms)
ms.x += 1
ms.y += 1
ms.z -= ms.x ^ 2
end
ms2 = f(ms)
@test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55)
struct IMS{T}
x::T
y::T
z::T
end
ms = IMS(0.5, 0.6, 0.7)
ms2 = f(ms)
@test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55)
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1130 | using NiLangCore, Test
@testset "update field" begin
@test NiLangCore.field_update(1+2im, Val(:im), 4) == 1+4im
struct TestUpdateField1{A, B}
a::A
end
@test NiLangCore.field_update(TestUpdateField1{Int,Float64}(1), Val(:a), 4) == TestUpdateField1{Int,Float64}(4)
struct TestUpdateField2{A}
a::A
function TestUpdateField2(a::T) where T
new{T}(a)
end
end
@test NiLangCore.field_update(TestUpdateField2(1), Val(:a), 4) == TestUpdateField2(4)
@test NiLangCore.default_constructor(ComplexF64, 1.0, 2.0) == 1+2im
end
@testset "_zero" begin
@test _zero(Tuple{Float64, Float32,String,Matrix{Float64},Char,Dict{Int,Int}}) == (0.0, 0f0, "", zeros(0,0), '\0', Dict{Int,Int}())
@test _zero(ComplexF64) == 0.0 + 0.0im
@test _zero((1,2.0,"adsf",randn(2,2),'d',Dict(2=>5))) == (0, 0.0,"",zeros(2,2),'\0',Dict(2=>0))
@test _zero(1+2.0im) == 0.0 + 0.0im
@test _zero(()) == ()
@test _zero((1,2)) == (0, 0)
@test _zero(Symbol) == Symbol("")
@test _zero(:x) == Symbol("")
end
@testset "fields" begin
@test (@fields 1+3im) == (1,3)
end | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 480 | using NiLangCore
using Test
@testset "Core.jl" begin
include("Core.jl")
end
@testset "stack.jl" begin
include("stack.jl")
end
@testset "lens.jl" begin
include("lens.jl")
end
@testset "utils.jl" begin
include("utils.jl")
end
@testset "symboltable.jl" begin
include("symboltable.jl")
end
@testset "instr.jl" begin
include("instr.jl")
end
@testset "vars.jl" begin
include("vars.jl")
end
@testset "compiler.jl" begin
include("compiler.jl")
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 4790 | using NiLangCore, Test
@testset "stack" begin
for (stack, x) in [
(FLOAT64_STACK, 0.3), (FLOAT32_STACK, 0f4),
(INT64_STACK, 3), (INT32_STACK, Int32(3)),
(COMPLEXF64_STACK, 4.0+0.3im), (COMPLEXF32_STACK, 4f0+0f3im),
(BOOL_STACK, true),
]
println(stack)
push!(stack, x)
@test pop!(stack) === x
end
end
@testset "stack operations" begin
z = 1.0
NiLangCore.empty_global_stacks!()
@test_throws ArgumentError (@instr GLOBAL_STACK[end] ↔ y::∅)
y = 4.0
@test_throws ArgumentError (@instr GLOBAL_STACK[end] → y)
@test_throws BoundsError (@instr @invcheckoff GLOBAL_STACK[end] ↔ y)
@test_throws ArgumentError (@instr @invcheckoff GLOBAL_STACK[end] → y)
x = 0.3
NiLangCore.empty_global_stacks!()
@instr GLOBAL_STACK[end+1] ↔ x
@instr GLOBAL_STACK[end] ↔ x::∅
@test x === 0.3
@instr @invcheckoff GLOBAL_STACK[end+1] ↔ x
y = 0.5
@instr GLOBAL_STACK[end+1] ↔ y
@instr @invcheckoff GLOBAL_STACK[end] ↔ x::∅
@test x == 0.5
x =0.3
st = Float64[]
@instr st[end+1] ↔ x
@test length(st) == 1
@instr st[end] ↔ x::∅
@test length(st) == 0
@test x === 0.3
@instr st[end+1] ↔ x
@test length(st) == 1
y = 0.5
@instr st[end+1] ↔ y
@instr @invcheckoff st[end] ↔ x::∅
@test x == 0.5
@i function test(x)
x2 ← zero(x)
x2 += x^2
GLOBAL_STACK[end+1] ↔ x
x::∅ ↔ x2
end
@test test(3.0) == 9.0
l = length(NiLangCore.GLOBAL_STACK)
@test check_inv(test, (3.0,))
@test length(NiLangCore.GLOBAL_STACK) == l
@i function test2(x)
x2 ← zero(x)
x2 += x^2
@invcheckoff GLOBAL_STACK[end+1] ↔ x
x::∅ ↔ x2
end
@test test2(3.0) == 9.0
l = length(NiLangCore.GLOBAL_STACK)
@test check_inv(test2, (3.0,))
@test length(NiLangCore.GLOBAL_STACK) == l
x = 3.0
@instr GLOBAL_STACK[end+1] ↔ x
NiLangCore.empty_global_stacks!()
l = length(NiLangCore.GLOBAL_STACK)
@test l == 0
end
@testset "copied push/pop stack operations" begin
NiLangCore.empty_global_stacks!()
x =0.3
@instr GLOBAL_STACK[end+1] ← x
@test x === 0.3
@instr GLOBAL_STACK[end] → x
@test x === 0.3
@instr GLOBAL_STACK[end+1] ← x
x = 0.4
@test_throws InvertibilityError @instr GLOBAL_STACK[end] → x
y = 0.5
@instr GLOBAL_STACK[end+1] ← y
@instr @invcheckoff GLOBAL_STACK[end] → x
@test x == 0.5
st = []
x = [0.3]
@instr st[end+1] ← x
@test st[1] !== [0.3]
@test st[1] ≈ [0.3]
x =0.3
st = Float64[]
@instr ~(st[end] → x)
@test x === 0.3
@test length(st) == 1
@instr ~(st[end+1] ← x)
@test length(st) == 0
@test x === 0.3
@instr @invcheckoff st[end+1] ← x
@test length(st) == 1
x = 0.4
@test_throws InvertibilityError @instr st[end] → x
@test length(st) == 0
y = 0.5
@instr st[end+1] ← y
@instr @invcheckoff st[end] → x
@test x == 0.5
@i function test(x, x2)
x2 += x^2
GLOBAL_STACK[end+1] ← x
x ↔ x2
end
@test test(3.0, 0.0) == (9.0, 3.0)
l = length(NiLangCore.GLOBAL_STACK)
@test check_inv(test, (3.0, 0.0))
@test length(NiLangCore.GLOBAL_STACK) == l
end
@testset "dictionary & vector" begin
# allocate and deallocate
@i function f1(d, y)
d["y"] ← y
end
d = Dict("x" => 34)
@test f1(d, 3) == (Dict("x"=>34, "y"=>3), 3)
@test_throws InvertibilityError f1(d, 3)
d = Dict("x" => 34)
@test check_inv(f1, (d, 3))
# not available on vectors
@i function f2(d, y)
d[2] ← y
end
@test_throws MethodError f2([1,2,3], 3)
# swap
@i function f3(d, y)
d["y"] ↔ y
end
d = Dict("y" => 34)
@test f3(d, 3) == (Dict("y"=>3), 34)
d = Dict("z" => 34)
@test_throws KeyError f3(d, 3)
d = Dict("y" => 34)
@test check_inv(f3, (d, 3))
# swap on vector
@i function f4(d, y, x)
d[2] ↔ y
d[end] ↔ x
end
d = [11,12,13]
@test f4(d, 1,2) == ([11,1,2],12,13)
d = [11,12,13]
@test check_inv(f4, (d, 1,2))
# swap to empty
@i function f5(d, x::T) where T
d["x"]::∅ ↔ x # swap in
d["y"] ↔ x::∅ # swap out
end
d = Dict("y" => 34)
@test f5(d, 3) == (Dict("x"=>3), 34)
d = Dict("y" => 34)
@test check_inv(f5, (d, 3))
d = Dict("x" => 34)
@test_throws InvertibilityError f5(d, 3)
# not available on vectors
@i function f6(d, y)
d[2]::∅ ↔ y
end
@test_throws MethodError f6([1,2,3], 3)
end
@testset "inverse stack" begin
@i function f(x)
x[end+1] ← 1
end
x = FastStack{Int}(3)
@test check_inv(f, (x,))
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 2654 | using Test, NiLangCore
using NiLangCore: SymbolTable, allocate!, deallocate!, operate!, swapvars!, variable_analysis_ex
@testset "variable analysis" begin
st = SymbolTable()
# allocate! : not exist
allocate!(st, :x)
allocate!(st, :y)
@test st.existing == [:x, :y]
# allocate! : existing
@test_throws InvertibilityError allocate!(st, :x)
@test st.existing == [:x, :y]
# deallocate! : not exist
@test_throws InvertibilityError deallocate!(st, :z)
# deallocate! : existing
deallocate!(st, :y)
@test st.existing == [:x]
@test st.deallocated == [:y]
# deallocate! : deallocated
@test_throws InvertibilityError deallocate!(st, :y)
# operate! : deallocated
@test_throws InvertibilityError operate!(st, :y)
# allocate! : deallocated
allocate!(st, :y)
@test st.existing == [:x, :y]
@test st.deallocated == []
# operate! : not exist
operate!(st, :j)
@test st.unclassified == [:j]
# operate! : existing
operate!(st, :y)
@test st.unclassified == [:j]
# allocate! unclassified
@test_throws InvertibilityError allocate!(st, :j)
# operate! : unclassified
operate!(st, :j)
@test st.unclassified == [:j]
# deallocate! : unclassified
@test_throws InvertibilityError deallocate!(st, :j)
# swap both existing
swapvars!(st, :j, :x)
@test st.unclassified == [:x]
@test st.existing == [:j, :y]
# swap existing - nonexisting
swapvars!(st, :j, :k)
@test st.unclassified == [:x, :j]
@test st.existing == [:k, :y]
# swap nonexisting - existing
swapvars!(st, :o, :x)
@test st.unclassified == [:o, :j, :x]
@test st.existing == [:k, :y]
# swap both not existing
swapvars!(st, :m, :n)
@test st.unclassified == [:o, :j, :x, :m, :n]
# push and pop variables
end
@testset "variable analysis" begin
st = SymbolTable([:x, :y], [], [])
ex = :((x,y) ↔ (a, b))
variable_analysis_ex(ex, st)
@test st.existing == [:a, :b]
@test st.unclassified == [:x, :y]
st = SymbolTable([:x, :y], [], [])
ex = :((x,y) ↔ b)
variable_analysis_ex(ex, st)
@test st.existing == [:b]
@test st.unclassified == [:x, :y]
ex = :(b ↔ (x,y))
variable_analysis_ex(ex, st)
@test st.existing == [:x, :y]
@test st.unclassified == [:b]
st = SymbolTable([:x, :y], [], [])
ex = :(b ↔ x)
variable_analysis_ex(ex, st)
@test st.existing == [:b, :y]
@test st.unclassified == [:x]
st = SymbolTable([], [], [])
ex = :(b ↔ (x, y))
variable_analysis_ex(ex, st)
@test st.existing == []
@test st.unclassified == [:b, :x, :y]
end | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 1670 | using Test, NiLangCore
using NiLangCore: get_argname, get_ftype, match_function, MyOrderedDict
@testset "match function" begin
ex = match_function(:(function f(x) x end))
@test ex[1] == nothing
@test ex[2] == :f
@test ex[3] == [:x]
@test ex[4] == []
@test length(filter(x->!(x isa LineNumberNode), ex[5])) == 1
ex = match_function(:(@inline function f(x; y) x end))
@test ex[1][1] == Symbol("@inline")
@test ex[1][2] isa LineNumberNode
@test ex[2] == :f
@test ex[3] == [Expr(:parameters, :y), :x]
@test length(filter(x->!(x isa LineNumberNode), ex[5])) == 1
@test ex[4] == []
ex = match_function(:(function f(x::T) where T x end))
@test ex[2] == :f
@test ex[3] == [:(x::T)]
@test length(filter(x->!(x isa LineNumberNode), ex[5])) == 1
@test ex[4] == [:T]
ex = match_function(:(f(x)=x))
@test ex[2] == :f
@test ex[3] == [:x]
@test length(ex[5]) == 2
@test ex[4] == []
end
@testset "argname and type" begin
@test get_argname(:(y=3)) == :y
@test get_argname(:(y::Int)) == :y
@test get_argname(:(y::Int=3)) == :y
@test get_argname(:(f(; k::Int=4)).args[2]) == :(f(; k::Int=4)).args[2]
end
@testset "my ordered dict" begin
od = MyOrderedDict{Any, Any}()
od[:a] = 2
od[:b] = 4
od[:c] = 7
@test length(od) == 3
@test od[:b] == 4
od[:b] = 1
@test od[:b] == 1
delete!(od, :b)
@test_throws KeyError od[:b]
@test pop!(od) == (:c, 7)
@test length(od) == 1
end
@testset "unzipped broadcast" begin
x = [1, 2, 3.0]
res = NiLangCore.unzipped_broadcast(exp, x)
@test res === x
@test res ≈ exp.([1, 2, 3.0])
end
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | code | 3544 | using Test, NiLangCore
using NiLangCore: type2tuple
@testset "dataview" begin
x = 1.0
@test_throws ErrorException chfield(x, "asdf", 3.0)
@test chfield(x, identity, 2.0) === 2.0
@assign -x 0.1
@test x == -0.1
x = 1+2.0im
@assign x' 0.1+1im
@test x == 0.1-1im
x = (3, 4)
@instr (x.:1) += 3
@test x == (6, 4)
x = 3
y = (4,)
@instr x += y.:1
@test x == 7
x = [3, 4]
y = ([4, 4],)
@instr x .+= y.:1
@test x == [7.0, 8.0]
x = true
y = (true,)
@instr x ⊻= y.:1
@test x == false
x = [true, false]
y = ([true, true],)
@instr x .⊻= (y |> tget(1))
@test x == [false, true]
x = ones(4)
y = ones(2)
@instr (x |> subarray(1:2)) += y
@test x == [2,2,1,1]
@instr (x |> subarray(1)) += (y |> subarray(1))
@test x == [3,2,1,1]
end
@testset "anc, deanc" begin
@i function f(y)
x ← y
x → 1.0
end
f(1.0)
@test_throws InvertibilityError f(1.1)
@i function f2(y)
x ← y
x → (1.0, 2.0)
end
f2((1.0, 2.0))
@test_throws InvertibilityError f2((1.1, 2.0))
@i function f3(y)
x ← y
x → [1.0, 2.0]
end
f3([1.0, 2.0])
@test_throws InvertibilityError f3([1.1, 2.0])
struct B
a
b
end
@i function f4(y)
x ← y
x → B(1.0, 2.0)
end
f4(B(1.0, 2.0))
@test_throws InvertibilityError f4(B(1.0, 1.1))
@i function f5(y)
x ← y
x → ""
end
f5("")
@test_throws InvertibilityError f5("a")
end
@testset "inv and tuple output" begin
a, b = false, false
@instr ~(a ⊻= true)
@test a == true
@instr ~((a, b) ⊻= (true, true))
@test a == false
@test b == true
y = 1.0
x = 1.0
@instr ~(~(y += 1.0))
@test y == 2.0
@instr ~(~((x, y) += (1.0, 1.0)))
@test y == 3.0
@test x == 2.0
@instr ~((x, y) += (1.0, 1.0))
@test y == 2.0
@test x == 1.0
@instr ~(y += 1.0)
@test y == 1.0
z = [1.0, 2.0]
@instr ~(~(z .+= [1.0, 2.0]))
@test z ≈ [2.0, 4.0]
end
@testset "chfield" begin
x = [1,2,3]
@test chfield(x, length, 3) == x
@test_throws InvertibilityError chfield(x, length, 2)
end
@testset "invcheck" begin
@test (@invcheck 0.3 0.3) isa Any
@test_throws InvertibilityError (@invcheck 0.3 0.4)
@test_throws InvertibilityError (@invcheck 3 3.0)
end
@testset "dict" begin
@i function f1()
d ← Dict(1=>1, 2=>2)
d → Dict(2=>2)
end
@i function f2()
d ← Dict(1=>1)
d → Dict(2=>1)
end
@i function f3()
d ← Dict(1=>1)
d → Dict(1=>2)
end
@i function f4()
d ← Dict(1=>1)
d → Dict(1=>1)
end
@test_throws InvertibilityError f1()
@test_throws InvertibilityError f2()
@test_throws InvertibilityError f3()
@test f4() == ()
end
@testset "fieldview" begin
@fieldview first_real(x::Vector{ComplexF64}) = x[1].re
x = [1.0im, 2+3im]
@instr (x |> first_real) += 3
@test x == [3+1.0im, 2+3.0im]
end
@testset "mutable struct set field" begin
mutable struct MS{T}
x::T
y::T
z::T
end
ms = MS(0.5, 0.6, 0.7)
@i function f(ms)
ms.x += 1
ms.y += 1
ms.z -= ms.x ^ 2
end
ms2 = f(ms)
@test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55)
struct IMS{T}
x::T
y::T
z::T
end
ms = IMS(0.5, 0.6, 0.7)
ms2 = f(ms)
@test (ms2.x, ms2.y, ms2.z) == (1.5, 1.6, -1.55)
end | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | docs | 2844 | # NiLangCore
The core package for reversible eDSL NiLang.
![CI](https://github.com/GiggleLiu/NiLangCore.jl/workflows/CI/badge.svg)
[![codecov](https://codecov.io/gh/GiggleLiu/NiLangCore.jl/branch/master/graph/badge.svg?token=ReCkoV9Pgp)](https://codecov.io/gh/GiggleLiu/NiLangCore.jl)
**Warning**
Requires Julia version >= 1.3.
## Examples
1. Define a pair of dual instructions
```julia
julia> using NiLangCore
julia> function ADD(a!::Number, b::Number)
a! + b, b
end
ADD (generic function with 3 methods)
julia> function SUB(a!::Number, b::Number)
a! - b, b
end
SUB (generic function with 3 methods)
julia> @dual ADD SUB
```
2. Define a reversible function
```julia
julia> @i function test(a, b)
SUB(a, b)
end
```
## Reversible IR
```julia
julia> using NiLangCore
julia> @code_reverse x += f(y)
:(x -= f(y))
julia> @code_reverse x .+= f.(y)
:(x .-= f.(y))
julia> @code_reverse x ⊻= f(y)
:(x ⊻= f(y))
julia> @code_reverse x ← zero(T)
:(x → zero(T))
julia> @code_reverse begin y += f(x) end
quote
#= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:82 =#
y -= f(x)
#= REPL[52]:1 =#
end
julia> julia> @code_reverse if (precond, postcond) y += f(x) else y += g(x) end
:(if (postcond, precond)
#= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:69 =#
y -= f(x)
#= REPL[48]:1 =#
else
#= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:69 =#
y -= g(x)
#= REPL[48]:1 =#
end)
julia> @code_reverse while (precond, postcond) y += f(x) end
:(@from !postcond while precond
#= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:72 =#
y -= f(x)
#= REPL[49]:1 =#
end)
julia> @code_reverse for i=start:step:stop y += f(x) end
:(for i = stop:-step:start
#= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:76 =#
y -= f(x)
#= REPL[50]:1 =#
end)
julia> @code_reverse @safe println(x)
:(#= /home/leo/.julia/dev/NiLangCore/src/dualcode.jl:81 =# @safe println(x))
```
## A note on symbols
The `←` (\leftarrow + TAB) operation copies B to A, its inverse is `→` (\rightarrow + TAB)
* push into a stack, `A[end+1] ← B` => `[A..., B], B`
* add a key-value pair into a dict, `A[i] ← B` => `{A..., i=>B}, B`
* allocate a new ancilla, `(A = ∅) ← B` => `(A = B), B`
The `↔` (\leftrightarrow + TAB) operation swaps B and A, it is self reversible
* swap two variables, `A ↔ B` => `B, A`
* transfer into a stack, `A[end+1] ↔ B` => `[A..., B], ∅`
* transfer a key-value pair into a dict, `A[i] ↔ B` => `haskey ? {(A\A[i])..., i=>B}, A[i] : {A..., i=>B}, ∅`
* transfer the value of two variables, `(A = ∅) ↔ B` => `(A = B), ∅`
One can use `var::∅` to annotate `var` as a fresh new variable (only new variables can be allocated), use `var[end+1]` to represent stack top for push and `var[end]` for stack top for pop. | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | docs | 351 | # Benchmark Report
### May 8th, 2021
```
"FastStack-inbounds" => Trial(3.136 ns)
"NiLang-@invcheckoff-@inbounds" => Trial(2.096 ns)
"NiLang-@invcheckoff" => Trial(5.341 ns)
"FastStack" => Trial(6.775 ns)
"NiLang" => Trial(22.935 ns)
"Julia" => Trial(12.062 ns)
"setindex-inbounds" => Trial(2.362 ns)
"setindex" => Trial(2.321 ns)
``` | NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"Apache-2.0"
] | 0.10.7 | a6448d0f450d85be5777659e695d67c19ec6a707 | docs | 72 | # NiLangCore.jl
```@index
```
```@autodocs
Modules = [NiLangCore]
```
| NiLangCore | https://github.com/GiggleLiu/NiLangCore.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 403 | using AbstractMCMC
using Documenter
using Random
DocMeta.setdocmeta!(AbstractMCMC, :DocTestSetup, :(using AbstractMCMC); recursive=true)
makedocs(;
sitename="AbstractMCMC",
format=Documenter.HTML(),
modules=[AbstractMCMC],
pages=["Home" => "index.md", "api.md", "design.md"],
checkdocs=:exports,
)
deploydocs(; repo="github.com/TuringLang/AbstractMCMC.jl.git", push_preview=true)
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 2883 | module AbstractMCMC
using BangBang: BangBang
using ConsoleProgressMonitor: ConsoleProgressMonitor
using LogDensityProblems: LogDensityProblems
using LoggingExtras: LoggingExtras
using ProgressLogging: ProgressLogging
using StatsBase: StatsBase
using TerminalLoggers: TerminalLoggers
using Transducers: Transducers
using FillArrays: FillArrays
using Distributed: Distributed
using Logging: Logging
using Random: Random
# Reexport sample
using StatsBase: sample
export sample
# Parallel sampling types
export MCMCThreads, MCMCDistributed, MCMCSerial
"""
AbstractChains
`AbstractChains` is an abstract type for an object that stores
parameter samples generated through a MCMC process.
"""
abstract type AbstractChains end
"""
AbstractSampler
The `AbstractSampler` type is intended to be inherited from when
implementing a custom sampler. Any persistent state information should be
saved in a subtype of `AbstractSampler`.
When defining a new sampler, you should also overload the function
`transition_type`, which tells the `sample` function what type of parameter
it should expect to receive.
"""
abstract type AbstractSampler end
"""
AbstractModel
An `AbstractModel` represents a generic model type that can be used to perform inference.
"""
abstract type AbstractModel end
"""
AbstractMCMCEnsemble
An `AbstractMCMCEnsemble` algorithm represents a specific algorithm for sampling MCMC chains
in parallel.
"""
abstract type AbstractMCMCEnsemble end
"""
MCMCThreads
The `MCMCThreads` algorithm allows users to sample MCMC chains in parallel using multiple
threads.
"""
struct MCMCThreads <: AbstractMCMCEnsemble end
"""
MCMCDistributed
The `MCMCDistributed` algorithm allows users to sample MCMC chains in parallel using multiple
processes.
"""
struct MCMCDistributed <: AbstractMCMCEnsemble end
"""
MCMCSerial
The `MCMCSerial` algorithm allows users to sample serially, with no thread or process parallelism.
"""
struct MCMCSerial <: AbstractMCMCEnsemble end
include("samplingstats.jl")
include("logging.jl")
include("interface.jl")
include("sample.jl")
include("stepper.jl")
include("transducer.jl")
include("logdensityproblems.jl")
if isdefined(Base.Experimental, :register_error_hint)
function __init__()
Base.Experimental.register_error_hint(MethodError) do io, exc, argtypes, _
if Base.parentmodule(exc.f) == LogDensityProblems &&
any(a -> a <: LogDensityModel, argtypes)
print(
io,
"\n`AbstractMCMC.LogDensityModel` is a wrapper and does not itself implement the LogDensityProblems.jl interface. To use LogDensityProblems.jl methods, access the inner type with (e.g.) `logdensity(model.logdensity, params)` instead of `logdensity(model, params)`.",
)
end
end
end
end
end # module AbstractMCMC
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 4537 | """
chainscat(c::AbstractChains...)
Concatenate multiple chains.
By default, the chains are concatenated along the third dimension by calling
`cat(c...; dims=3)`.
"""
chainscat(c::AbstractChains...) = cat(c...; dims=3)
"""
chainsstack(c::AbstractVector)
Stack chains in `c`.
By default, the vector of chains is returned unmodified. If `eltype(c) <: AbstractChains`,
then `reduce(chainscat, c)` is called.
"""
chainsstack(c) = c
chainsstack(c::AbstractVector{<:AbstractChains}) = reduce(chainscat, c)
"""
bundle_samples(samples, model, sampler, state, chain_type[; kwargs...])
Bundle all `samples` that were sampled from the `model` with the given `sampler` in a chain.
The final `state` of the `sampler` can be included in the chain. The type of the chain can
be specified with the `chain_type` argument.
By default, this method returns `samples`.
"""
function bundle_samples(
samples, model::AbstractModel, sampler::AbstractSampler, state, ::Type{T}; kwargs...
) where {T}
# dispatch to internal method for default implementations to fix
# method ambiguity issues (see #120)
return _bundle_samples(samples, model, sampler, state, T; kwargs...)
end
function _bundle_samples(
samples,
@nospecialize(::AbstractModel),
@nospecialize(::AbstractSampler),
@nospecialize(::Any),
::Type;
kwargs...,
)
return samples
end
function _bundle_samples(
samples::Vector,
@nospecialize(::AbstractModel),
@nospecialize(::AbstractSampler),
@nospecialize(::Any),
::Type{Vector{T}};
kwargs...,
) where {T}
return map(samples) do sample
convert(T, sample)
end
end
"""
step(rng, model, sampler[, state; kwargs...])
Return a 2-tuple of the next sample and the next state of the MCMC `sampler` for `model`.
Samples describe the results of a single step of the `sampler`. As an example, a sample
might include a vector of parameters sampled from a prior distribution.
When sampling using [`sample`](@ref), every `step` call after the first has access to the
current `state` of the sampler.
"""
function step end
"""
step_warmup(rng, model, sampler[, state; kwargs...])
Return a 2-tuple of the next sample and the next state of the MCMC `sampler` for `model`.
When sampling using [`sample`](@ref), this takes the place of [`AbstractMCMC.step`](@ref) in the first
`num_warmup` number of iterations, as specified by the `num_warmup` keyword to [`sample`](@ref).
This is useful if the sampler has an initial "warmup"-stage that is different from the
standard iteration.
By default, this simply calls [`AbstractMCMC.step`](@ref).
"""
step_warmup(rng, model, sampler; kwargs...) = step(rng, model, sampler; kwargs...)
function step_warmup(rng, model, sampler, state; kwargs...)
return step(rng, model, sampler, state; kwargs...)
end
"""
samples(sample, model, sampler[, N; kwargs...])
Generate a container for the samples of the MCMC `sampler` for the `model`, whose first
sample is `sample`.
The method can be called with and without a predefined number `N` of samples.
"""
function samples(sample, ::AbstractModel, ::AbstractSampler, N::Integer; kwargs...)
ts = Vector{typeof(sample)}(undef, 0)
sizehint!(ts, N)
return ts
end
function samples(sample, ::AbstractModel, ::AbstractSampler; kwargs...)
return Vector{typeof(sample)}(undef, 0)
end
"""
save!!(samples, sample, iteration, model, sampler[, N; kwargs...])
Save the `sample` of the MCMC `sampler` at the current `iteration` in the container of
`samples`.
The function can be called with and without a predefined number `N` of samples. By default,
AbstractMCMC uses `push!!` from the Julia package
[BangBang](https://github.com/tkf/BangBang.jl) to append to the container, and widen its
type if needed.
"""
function save!!(
samples::Vector,
sample,
iteration::Integer,
::AbstractModel,
::AbstractSampler,
N::Integer;
kwargs...,
)
s = BangBang.push!!(samples, sample)
s !== samples && sizehint!(s, N)
return s
end
function save!!(
samples, sample, iteration::Integer, ::AbstractModel, ::AbstractSampler; kwargs...
)
return BangBang.push!!(samples, sample)
end
# Deprecations
Base.@deprecate transitions(
transition, model::AbstractModel, sampler::AbstractSampler, N::Integer; kwargs...
) samples(transition, model, sampler, N; kwargs...) false
Base.@deprecate transitions(
transition, model::AbstractModel, sampler::AbstractSampler; kwargs...
) samples(transition, model, sampler; kwargs...) false
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 4003 | """
LogDensityModel <: AbstractMCMC.AbstractModel
Wrapper around something that implements the LogDensityProblem.jl interface.
Note that this does _not_ implement the LogDensityProblems.jl interface itself,
but it simply useful for indicating to the `sample` and other `AbstractMCMC` methods
that the wrapped object implements the LogDensityProblems.jl interface.
# Fields
- `logdensity`: The object that implements the LogDensityProblems.jl interface.
"""
struct LogDensityModel{L} <: AbstractModel
logdensity::L
function LogDensityModel{L}(logdensity::L) where {L}
if LogDensityProblems.capabilities(logdensity) === nothing
throw(
ArgumentError(
"The log density function does not support the LogDensityProblems.jl interface",
),
)
end
return new{L}(logdensity)
end
end
LogDensityModel(logdensity::L) where {L} = LogDensityModel{L}(logdensity)
# Fallbacks: Wrap log density function in a model
"""
sample(
rng::Random.AbstractRNG=Random.default_rng(),
logdensity,
sampler::AbstractSampler,
N_or_isdone;
kwargs...,
)
Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `sample` with the resulting model instead of `logdensity`.
The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface.
"""
function StatsBase.sample(
rng::Random.AbstractRNG, logdensity, sampler::AbstractSampler, N_or_isdone; kwargs...
)
return StatsBase.sample(rng, _model(logdensity), sampler, N_or_isdone; kwargs...)
end
"""
sample(
rng::Random.AbstractRNG=Random.default_rng(),
logdensity,
sampler::AbstractSampler,
parallel::AbstractMCMCEnsemble,
N::Integer,
nchains::Integer;
kwargs...,
)
Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `sample` with the resulting model instead of `logdensity`.
The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface.
"""
function StatsBase.sample(
rng::Random.AbstractRNG,
logdensity,
sampler::AbstractSampler,
parallel::AbstractMCMCEnsemble,
N::Integer,
nchains::Integer;
kwargs...,
)
return StatsBase.sample(
rng, _model(logdensity), sampler, parallel, N, nchains; kwargs...
)
end
"""
steps(
rng::Random.AbstractRNG=Random.default_rng(),
logdensity,
sampler::AbstractSampler;
kwargs...,
)
Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `steps` with the resulting model instead of `logdensity`.
The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface.
"""
function steps(rng::Random.AbstractRNG, logdensity, sampler::AbstractSampler; kwargs...)
return steps(rng, _model(logdensity), sampler; kwargs...)
end
"""
Sample(
rng::Random.AbstractRNG=Random.default_rng(),
logdensity,
sampler::AbstractSampler;
kwargs...,
)
Wrap the `logdensity` function in a [`LogDensityModel`](@ref), and call `Sample` with the resulting model instead of `logdensity`.
The `logdensity` function has to support the [LogDensityProblems.jl](https://github.com/tpapp/LogDensityProblems.jl) interface.
"""
function Sample(rng::Random.AbstractRNG, logdensity, sampler::AbstractSampler; kwargs...)
return Sample(rng, _model(logdensity), sampler; kwargs...)
end
function _model(logdensity)
if LogDensityProblems.capabilities(logdensity) === nothing
throw(
ArgumentError(
"the log density function does not support the LogDensityProblems.jl interface. Please implement the interface or provide a model of type `AbstractMCMC.AbstractModel`",
),
)
end
return LogDensityModel(logdensity)
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 1785 | # avoid creating a progress bar with @withprogress if progress logging is disabled
# and add a custom progress logger if the current logger does not seem to be able to handle
# progress logs
macro ifwithprogresslogger(progress, exprs...)
return esc(
quote
if $progress
if $hasprogresslevel($Logging.current_logger())
$ProgressLogging.@withprogress $(exprs...)
else
$with_progresslogger($Base.@__MODULE__, $Logging.current_logger()) do
$ProgressLogging.@withprogress $(exprs...)
end
end
else
$(exprs[end])
end
end,
)
end
# improved checks?
function hasprogresslevel(logger)
return Logging.min_enabled_level(logger) ≤ ProgressLogging.ProgressLevel
end
# filter better, e.g., according to group?
function with_progresslogger(f, _module, logger)
logger1 = LoggingExtras.EarlyFilteredLogger(progresslogger()) do log
log._module === _module && log.level == ProgressLogging.ProgressLevel
end
logger2 = LoggingExtras.EarlyFilteredLogger(logger) do log
log._module !== _module || log.level != ProgressLogging.ProgressLevel
end
return Logging.with_logger(f, LoggingExtras.TeeLogger(logger1, logger2))
end
function progresslogger()
# detect if code is running under IJulia since TerminalLogger does not work with IJulia
# https://github.com/JuliaLang/IJulia.jl#detecting-that-code-is-running-under-ijulia
if (Sys.iswindows() && VERSION < v"1.5.3") ||
(isdefined(Main, :IJulia) && Main.IJulia.inited)
return ConsoleProgressMonitor.ProgressLogger()
else
return TerminalLoggers.TerminalLogger()
end
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 22375 | # Default implementations of `sample`.
const PROGRESS = Ref(true)
"""
setprogress!(progress::Bool; silent::Bool=false)
Enable progress logging globally if `progress` is `true`, and disable it otherwise.
Optionally disable informational message if `silent` is `true`.
"""
function setprogress!(progress::Bool; silent::Bool=false)
if !silent
@info "progress logging is $(progress ? "enabled" : "disabled") globally"
end
PROGRESS[] = progress
return progress
end
function StatsBase.sample(
model_or_logdensity, sampler::AbstractSampler, N_or_isdone; kwargs...
)
return StatsBase.sample(
Random.default_rng(), model_or_logdensity, sampler, N_or_isdone; kwargs...
)
end
"""
sample(
rng::Random.AbatractRNG=Random.default_rng(),
model::AbstractModel,
sampler::AbstractSampler,
N_or_isdone;
kwargs...,
)
Sample from the `model` with the Markov chain Monte Carlo `sampler` and return the samples.
If `N_or_isdone` is an `Integer`, exactly `N_or_isdone` samples are returned.
Otherwise, sampling is performed until a convergence criterion `N_or_isdone` returns `true`.
The convergence criterion has to be a function with the signature
```julia
isdone(rng, model, sampler, samples, state, iteration; kwargs...)
```
where `state` and `iteration` are the current state and iteration of the sampler, respectively.
It should return `true` when sampling should end, and `false` otherwise.
# Keyword arguments
See https://turinglang.org/AbstractMCMC.jl/dev/api/#Common-keyword-arguments for common keyword
arguments.
"""
function StatsBase.sample(
rng::Random.AbstractRNG,
model::AbstractModel,
sampler::AbstractSampler,
N_or_isdone;
kwargs...,
)
return mcmcsample(rng, model, sampler, N_or_isdone; kwargs...)
end
function StatsBase.sample(
model_or_logdensity,
sampler::AbstractSampler,
parallel::AbstractMCMCEnsemble,
N::Integer,
nchains::Integer;
kwargs...,
)
return StatsBase.sample(
Random.default_rng(), model_or_logdensity, sampler, parallel, N, nchains; kwargs...
)
end
"""
sample(
rng::Random.AbstractRNG=Random.default_rng(),
model::AbstractModel,
sampler::AbstractSampler,
parallel::AbstractMCMCEnsemble,
N::Integer,
nchains::Integer;
kwargs...,
)
Sample `nchains` Monte Carlo Markov chains from the `model` with the `sampler` in parallel
using the `parallel` algorithm, and combine them into a single chain.
# Keyword arguments
See https://turinglang.org/AbstractMCMC.jl/dev/api/#Common-keyword-arguments for common keyword
arguments.
"""
function StatsBase.sample(
rng::Random.AbstractRNG,
model::AbstractModel,
sampler::AbstractSampler,
parallel::AbstractMCMCEnsemble,
N::Integer,
nchains::Integer;
kwargs...,
)
return mcmcsample(rng, model, sampler, parallel, N, nchains; kwargs...)
end
# Default implementations of regular and parallel sampling.
function mcmcsample(
rng::Random.AbstractRNG,
model::AbstractModel,
sampler::AbstractSampler,
N::Integer;
progress=PROGRESS[],
progressname="Sampling",
callback=nothing,
num_warmup::Int=0,
discard_initial::Int=num_warmup,
thinning=1,
chain_type::Type=Any,
initial_state=nothing,
kwargs...,
)
# Check the number of requested samples.
N > 0 || error("the number of samples must be ≥ 1")
discard_initial >= 0 ||
throw(ArgumentError("number of discarded samples must be non-negative"))
num_warmup >= 0 ||
throw(ArgumentError("number of warm-up samples must be non-negative"))
Ntotal = thinning * (N - 1) + discard_initial + 1
Ntotal >= num_warmup || throw(
ArgumentError("number of warm-up samples exceeds the total number of samples")
)
# Determine how many samples to drop from `num_warmup` and the
# main sampling process before we start saving samples.
discard_from_warmup = min(num_warmup, discard_initial)
keep_from_warmup = num_warmup - discard_from_warmup
# Start the timer
start = time()
local state
@ifwithprogresslogger progress name = progressname begin
# Determine threshold values for progress logging
# (one update per 0.5% of progress)
if progress
threshold = Ntotal ÷ 200
next_update = threshold
end
# Obtain the initial sample and state.
sample, state = if num_warmup > 0
if initial_state === nothing
step_warmup(rng, model, sampler; kwargs...)
else
step_warmup(rng, model, sampler, initial_state; kwargs...)
end
else
if initial_state === nothing
step(rng, model, sampler; kwargs...)
else
step(rng, model, sampler, initial_state; kwargs...)
end
end
# Update the progress bar.
itotal = 1
if progress && itotal >= next_update
ProgressLogging.@logprogress itotal / Ntotal
next_update = itotal + threshold
end
# Discard initial samples.
for j in 1:discard_initial
# Obtain the next sample and state.
sample, state = if j ≤ num_warmup
step_warmup(rng, model, sampler, state; kwargs...)
else
step(rng, model, sampler, state; kwargs...)
end
# Update the progress bar.
if progress && (itotal += 1) >= next_update
ProgressLogging.@logprogress itotal / Ntotal
next_update = itotal + threshold
end
end
# Run callback.
callback === nothing || callback(rng, model, sampler, sample, state, 1; kwargs...)
# Save the sample.
samples = AbstractMCMC.samples(sample, model, sampler, N; kwargs...)
samples = save!!(samples, sample, 1, model, sampler, N; kwargs...)
# Step through the sampler.
for i in 2:N
# Discard thinned samples.
for _ in 1:(thinning - 1)
# Obtain the next sample and state.
sample, state = if i ≤ keep_from_warmup
step_warmup(rng, model, sampler, state; kwargs...)
else
step(rng, model, sampler, state; kwargs...)
end
# Update progress bar.
if progress && (itotal += 1) >= next_update
ProgressLogging.@logprogress itotal / Ntotal
next_update = itotal + threshold
end
end
# Obtain the next sample and state.
sample, state = if i ≤ keep_from_warmup
step_warmup(rng, model, sampler, state; kwargs...)
else
step(rng, model, sampler, state; kwargs...)
end
# Run callback.
callback === nothing ||
callback(rng, model, sampler, sample, state, i; kwargs...)
# Save the sample.
samples = save!!(samples, sample, i, model, sampler, N; kwargs...)
# Update the progress bar.
if progress && (itotal += 1) >= next_update
ProgressLogging.@logprogress itotal / Ntotal
next_update = itotal + threshold
end
end
end
# Get the sample stop time.
stop = time()
duration = stop - start
stats = SamplingStats(start, stop, duration)
return bundle_samples(
samples,
model,
sampler,
state,
chain_type;
stats=stats,
discard_initial=discard_initial,
thinning=thinning,
kwargs...,
)
end
function mcmcsample(
rng::Random.AbstractRNG,
model::AbstractModel,
sampler::AbstractSampler,
isdone;
chain_type::Type=Any,
progress=PROGRESS[],
progressname="Convergence sampling",
callback=nothing,
num_warmup=0,
discard_initial=num_warmup,
thinning=1,
initial_state=nothing,
kwargs...,
)
# Check the number of requested samples.
discard_initial >= 0 ||
throw(ArgumentError("number of discarded samples must be non-negative"))
num_warmup >= 0 ||
throw(ArgumentError("number of warm-up samples must be non-negative"))
# Determine how many samples to drop from `num_warmup` and the
# main sampling process before we start saving samples.
discard_from_warmup = min(num_warmup, discard_initial)
keep_from_warmup = num_warmup - discard_from_warmup
# Start the timer
start = time()
local state
@ifwithprogresslogger progress name = progressname begin
# Obtain the initial sample and state.
sample, state = if num_warmup > 0
if initial_state === nothing
step_warmup(rng, model, sampler; kwargs...)
else
step_warmup(rng, model, sampler, initial_state; kwargs...)
end
else
if initial_state === nothing
step(rng, model, sampler; kwargs...)
else
step(rng, model, sampler, initial_state; kwargs...)
end
end
# Discard initial samples.
for j in 1:discard_initial
# Obtain the next sample and state.
sample, state = if j ≤ num_warmup
step_warmup(rng, model, sampler, state; kwargs...)
else
step(rng, model, sampler, state; kwargs...)
end
end
# Run callback.
callback === nothing || callback(rng, model, sampler, sample, state, 1; kwargs...)
# Save the sample.
samples = AbstractMCMC.samples(sample, model, sampler; kwargs...)
samples = save!!(samples, sample, 1, model, sampler; kwargs...)
# Step through the sampler until stopping.
i = 2
while !isdone(rng, model, sampler, samples, state, i; progress=progress, kwargs...)
# Discard thinned samples.
for _ in 1:(thinning - 1)
# Obtain the next sample and state.
sample, state = if i ≤ keep_from_warmup
step_warmup(rng, model, sampler, state; kwargs...)
else
step(rng, model, sampler, state; kwargs...)
end
end
# Obtain the next sample and state.
sample, state = if i ≤ keep_from_warmup
step_warmup(rng, model, sampler, state; kwargs...)
else
step(rng, model, sampler, state; kwargs...)
end
# Run callback.
callback === nothing ||
callback(rng, model, sampler, sample, state, i; kwargs...)
# Save the sample.
samples = save!!(samples, sample, i, model, sampler; kwargs...)
# Increment iteration counter.
i += 1
end
end
# Get the sample stop time.
stop = time()
duration = stop - start
stats = SamplingStats(start, stop, duration)
# Wrap the samples up.
return bundle_samples(
samples,
model,
sampler,
state,
chain_type;
stats=stats,
discard_initial=discard_initial,
thinning=thinning,
kwargs...,
)
end
function mcmcsample(
rng::Random.AbstractRNG,
model::AbstractModel,
sampler::AbstractSampler,
::MCMCThreads,
N::Integer,
nchains::Integer;
progress=PROGRESS[],
progressname="Sampling ($(min(nchains, Threads.nthreads())) threads)",
initial_params=nothing,
initial_state=nothing,
kwargs...,
)
# Check if actually multiple threads are used.
if Threads.nthreads() == 1
@warn "Only a single thread available: MCMC chains are not sampled in parallel"
end
# Check if the number of chains is larger than the number of samples
if nchains > N
@warn "Number of chains ($nchains) is greater than number of samples per chain ($N)"
end
# Copy the random number generator, model, and sample for each thread
nchunks = min(nchains, Threads.nthreads())
chunksize = cld(nchains, nchunks)
interval = 1:nchunks
rngs = [deepcopy(rng) for _ in interval]
models = [deepcopy(model) for _ in interval]
samplers = [deepcopy(sampler) for _ in interval]
# Create a seed for each chain using the provided random number generator.
seeds = rand(rng, UInt, nchains)
# Ensure that initial parameters and states are `nothing` or of the correct length
check_initial_params(initial_params, nchains)
check_initial_state(initial_state, nchains)
# Set up a chains vector.
chains = Vector{Any}(undef, nchains)
@ifwithprogresslogger progress name = progressname begin
# Create a channel for progress logging.
if progress
channel = Channel{Bool}(length(interval))
end
Distributed.@sync begin
if progress
# Update the progress bar.
Distributed.@async begin
# Determine threshold values for progress logging
# (one update per 0.5% of progress)
threshold = nchains ÷ 200
nextprogresschains = threshold
progresschains = 0
while take!(channel)
progresschains += 1
if progresschains >= nextprogresschains
ProgressLogging.@logprogress progresschains / nchains
nextprogresschains = progresschains + threshold
end
end
end
end
Distributed.@async begin
try
Distributed.@sync for (i, _rng, _model, _sampler) in
zip(1:nchunks, rngs, models, samplers)
chainidxs = if i == nchunks
((i - 1) * chunksize + 1):nchains
else
((i - 1) * chunksize + 1):(i * chunksize)
end
Threads.@spawn for chainidx in chainidxs
# Seed the chunk-specific random number generator with the pre-made seed.
Random.seed!(_rng, seeds[chainidx])
# Sample a chain and save it to the vector.
chains[chainidx] = StatsBase.sample(
_rng,
_model,
_sampler,
N;
progress=false,
initial_params=if initial_params === nothing
nothing
else
initial_params[chainidx]
end,
initial_state=if initial_state === nothing
nothing
else
initial_state[chainidx]
end,
kwargs...,
)
# Update the progress bar.
progress && put!(channel, true)
end
end
finally
# Stop updating the progress bar.
progress && put!(channel, false)
end
end
end
end
# Concatenate the chains together.
return chainsstack(tighten_eltype(chains))
end
function mcmcsample(
rng::Random.AbstractRNG,
model::AbstractModel,
sampler::AbstractSampler,
::MCMCDistributed,
N::Integer,
nchains::Integer;
progress=PROGRESS[],
progressname="Sampling ($(Distributed.nworkers()) processes)",
initial_params=nothing,
initial_state=nothing,
kwargs...,
)
# Check if actually multiple processes are used.
if Distributed.nworkers() == 1
@warn "Only a single process available: MCMC chains are not sampled in parallel"
end
# Check if the number of chains is larger than the number of samples
if nchains > N
@warn "Number of chains ($nchains) is greater than number of samples per chain ($N)"
end
# Ensure that initial parameters and states are `nothing` or of the correct length
check_initial_params(initial_params, nchains)
check_initial_state(initial_state, nchains)
_initial_params =
initial_params === nothing ? FillArrays.Fill(nothing, nchains) : initial_params
_initial_state =
initial_state === nothing ? FillArrays.Fill(nothing, nchains) : initial_state
# Create a seed for each chain using the provided random number generator.
seeds = rand(rng, UInt, nchains)
# Set up worker pool.
pool = Distributed.CachingPool(Distributed.workers())
local chains
@ifwithprogresslogger progress name = progressname begin
# Create a channel for progress logging.
if progress
channel = Distributed.RemoteChannel(() -> Channel{Bool}(Distributed.nworkers()))
end
Distributed.@sync begin
if progress
# Update the progress bar.
Distributed.@async begin
# Determine threshold values for progress logging
# (one update per 0.5% of progress)
threshold = nchains ÷ 200
nextprogresschains = threshold
progresschains = 0
while take!(channel)
progresschains += 1
if progresschains >= nextprogresschains
ProgressLogging.@logprogress progresschains / nchains
nextprogresschains = progresschains + threshold
end
end
end
end
Distributed.@async begin
try
function sample_chain(seed, initial_params, initial_state)
# Seed a new random number generator with the pre-made seed.
Random.seed!(rng, seed)
# Sample a chain.
chain = StatsBase.sample(
rng,
model,
sampler,
N;
progress=false,
initial_params=initial_params,
initial_state=initial_state,
kwargs...,
)
# Update the progress bar.
progress && put!(channel, true)
# Return the new chain.
return chain
end
chains = Distributed.pmap(
sample_chain, pool, seeds, _initial_params, _initial_state
)
finally
# Stop updating the progress bar.
progress && put!(channel, false)
end
end
end
end
# Concatenate the chains together.
return chainsstack(tighten_eltype(chains))
end
function mcmcsample(
rng::Random.AbstractRNG,
model::AbstractModel,
sampler::AbstractSampler,
::MCMCSerial,
N::Integer,
nchains::Integer;
progressname="Sampling",
initial_params=nothing,
initial_state=nothing,
kwargs...,
)
# Check if the number of chains is larger than the number of samples
if nchains > N
@warn "Number of chains ($nchains) is greater than number of samples per chain ($N)"
end
# Ensure that initial parameters and states are `nothing` or of the correct length
check_initial_params(initial_params, nchains)
check_initial_state(initial_state, nchains)
_initial_params =
initial_params === nothing ? FillArrays.Fill(nothing, nchains) : initial_params
_initial_state =
initial_state === nothing ? FillArrays.Fill(nothing, nchains) : initial_state
# Create a seed for each chain using the provided random number generator.
seeds = rand(rng, UInt, nchains)
# Sample the chains.
function sample_chain(i, seed, initial_params, initial_state)
# Seed a new random number generator with the pre-made seed.
Random.seed!(rng, seed)
# Sample a chain.
return StatsBase.sample(
rng,
model,
sampler,
N;
progressname=string(progressname, " (Chain ", i, " of ", nchains, ")"),
initial_params=initial_params,
initial_state=initial_state,
kwargs...,
)
end
chains = map(sample_chain, 1:nchains, seeds, _initial_params, _initial_state)
# Concatenate the chains together.
return chainsstack(tighten_eltype(chains))
end
tighten_eltype(x) = x
tighten_eltype(x::Vector{Any}) = map(identity, x)
@nospecialize check_initial_params(x, n) = throw(
ArgumentError(
"initial parameters must be specified as a vector of length equal to the number of chains or `nothing`",
),
)
check_initial_params(::Nothing, n) = nothing
function check_initial_params(x::AbstractArray, n)
if length(x) != n
throw(
ArgumentError(
"incorrect number of initial parameters (expected $n, received $(length(x))"
),
)
end
return nothing
end
@nospecialize check_initial_state(x, n) = throw(
ArgumentError(
"initial states must be specified as a vector of length equal to the number of chains or `nothing`",
),
)
check_initial_state(::Nothing, n) = nothing
function check_initial_state(x::AbstractArray, n)
if length(x) != n
throw(
ArgumentError(
"incorrect number of initial states (expected $n, received $(length(x))"
),
)
end
return nothing
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 404 | """
SamplingStats
A struct that tracks sampling information.
The fields available are:
- `start`: A `Float64` Unix timestamp indicating the start time of sampling.
- `stop`: A `Float64` Unix timestamp indicating the stop time of sampling.
- `duration`: The sampling time duration, defined as `stop - start`.
"""
struct SamplingStats
start::Float64
stop::Float64
duration::Float64
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 2246 | struct Stepper{A<:Random.AbstractRNG,M<:AbstractModel,S<:AbstractSampler,K}
rng::A
model::M
sampler::S
kwargs::K
end
# Initial sample.
function Base.iterate(stp::Stepper)
# Unpack iterator.
rng = stp.rng
model = stp.model
sampler = stp.sampler
kwargs = stp.kwargs
discard_initial = get(kwargs, :discard_initial, 0)::Int
# Start sampling algorithm and discard initial samples if desired.
sample, state = step(rng, model, sampler; kwargs...)
for _ in 1:discard_initial
sample, state = step(rng, model, sampler, state; kwargs...)
end
return sample, state
end
# Subsequent samples.
function Base.iterate(stp::Stepper, state)
# Unpack iterator.
rng = stp.rng
model = stp.model
sampler = stp.sampler
kwargs = stp.kwargs
thinning = get(kwargs, :thinning, 1)::Int
# Return next sample, possibly after thinning the chain if desired.
for _ in 1:(thinning - 1)
_, state = step(rng, model, sampler, state; kwargs...)
end
return step(rng, model, sampler, state; kwargs...)
end
Base.IteratorSize(::Type{<:Stepper}) = Base.IsInfinite()
Base.IteratorEltype(::Type{<:Stepper}) = Base.EltypeUnknown()
function steps(model_or_logdensity, sampler::AbstractSampler; kwargs...)
return steps(Random.default_rng(), model_or_logdensity, sampler; kwargs...)
end
"""
steps(
rng::Random.AbstractRNG=Random.default_rng(),
model::AbstractModel,
sampler::AbstractSampler;
kwargs...,
)
Create an iterator that returns samples from the `model` with the Markov chain Monte Carlo
`sampler`.
# Examples
```jldoctest; setup=:(using AbstractMCMC: steps)
julia> struct MyModel <: AbstractMCMC.AbstractModel end
julia> struct MySampler <: AbstractMCMC.AbstractSampler end
julia> function AbstractMCMC.step(rng, ::MyModel, ::MySampler, state=nothing; kwargs...)
# all samples are zero
return 0.0, state
end
julia> iterator = steps(MyModel(), MySampler());
julia> collect(Iterators.take(iterator, 10)) == zeros(10)
true
```
"""
function steps(
rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler; kwargs...
)
return Stepper(rng, model, sampler, kwargs)
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 2887 | struct Sample{A<:Random.AbstractRNG,M<:AbstractModel,S<:AbstractSampler,K} <:
Transducers.Transducer
rng::A
model::M
sampler::S
kwargs::K
end
function Sample(model_or_logdensity, sampler::AbstractSampler; kwargs...)
return Sample(Random.default_rng(), model_or_logdensity, sampler; kwargs...)
end
"""
Sample(
rng::Random.AbstractRNG=Random.default_rng(),
model::AbstractModel,
sampler::AbstractSampler;
kwargs...,
)
Create a transducer that returns samples from the `model` with the Markov chain Monte Carlo
`sampler`.
# Examples
```jldoctest; setup=:(using AbstractMCMC: Sample)
julia> struct MyModel <: AbstractMCMC.AbstractModel end
julia> struct MySampler <: AbstractMCMC.AbstractSampler end
julia> function AbstractMCMC.step(rng, ::MyModel, ::MySampler, state=nothing; kwargs...)
# all samples are zero
return 0.0, state
end
julia> transducer = Sample(MyModel(), MySampler());
julia> collect(transducer(1:10)) == zeros(10)
true
```
"""
function Sample(
rng::Random.AbstractRNG, model::AbstractModel, sampler::AbstractSampler; kwargs...
)
return Sample(rng, model, sampler, kwargs)
end
# Initial sample.
function Transducers.start(rf::Transducers.R_{<:Sample}, result)
# Unpack transducer.
td = Transducers.xform(rf)
rng = td.rng
model = td.model
sampler = td.sampler
kwargs = td.kwargs
discard_initial = get(kwargs, :discard_initial, 0)::Int
# Start sampling algorithm and discard initial samples if desired.
sample, state = step(rng, model, sampler; kwargs...)
for _ in 1:discard_initial
sample, state = step(rng, model, sampler, state; kwargs...)
end
return Transducers.wrap(
rf, (sample, state), Transducers.start(Transducers.inner(rf), result)
)
end
# Subsequent samples.
function Transducers.next(rf::Transducers.R_{<:Sample}, result, input)
# Unpack transducer.
td = Transducers.xform(rf)
rng = td.rng
model = td.model
sampler = td.sampler
kwargs = td.kwargs
thinning = get(kwargs, :thinning, 1)::Int
let rng = rng,
model = model,
sampler = sampler,
kwargs = kwargs,
thinning = thinning,
inner_rf = Transducers.inner(rf)
Transducers.wrapping(rf, result) do (sample, state), iresult
iresult2 = Transducers.next(inner_rf, iresult, sample)
# Perform thinning if desired.
for _ in 1:(thinning - 1)
_, state = step(rng, model, sampler, state; kwargs...)
end
return step(rng, model, sampler, state; kwargs...), iresult2
end
end
end
function Transducers.complete(rf::Transducers.R_{Sample}, result)
_, inner_result = Transducers.unwrap(rf, result)
return Transducers.complete(Transducers.inner(rf), inner_result)
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 4138 | @testset "logdensityproblems.jl" begin
# Add worker processes.
# Memory requirements on Windows are ~4x larger than on Linux, hence number of processes is reduced
# See, e.g., https://github.com/JuliaLang/julia/issues/40766 and https://github.com/JuliaLang/Pkg.jl/pull/2366
pids = addprocs(Sys.iswindows() ? div(Sys.CPU_THREADS::Int, 2) : Sys.CPU_THREADS::Int)
# Load all required packages (`utils.jl` needs LogDensityProblems, Logging, and Random).
@everywhere begin
using AbstractMCMC
using AbstractMCMC: sample
using LogDensityProblems
using Logging
using Random
include("utils.jl")
end
@testset "LogDensityModel" begin
ℓ = MyLogDensity(10)
model = @inferred AbstractMCMC.LogDensityModel(ℓ)
@test model isa AbstractMCMC.LogDensityModel{MyLogDensity}
@test model.logdensity === ℓ
@test_throws ArgumentError AbstractMCMC.LogDensityModel(mylogdensity)
try
LogDensityProblems.logdensity(model, ones(10))
catch exc
@test exc isa MethodError
if isdefined(Base.Experimental, :register_error_hint)
@test occursin("is a wrapper", sprint(showerror, exc))
end
end
end
@testset "fallback for log densities" begin
# Sample with log density
dim = 10
ℓ = MyLogDensity(dim)
Random.seed!(1234)
N = 1_000
samples = sample(ℓ, MySampler(), N)
# Samples are of the correct dimension and log density values are correct
@test length(samples) == N
@test all(length(x.a) == dim for x in samples)
@test all(x.b ≈ LogDensityProblems.logdensity(ℓ, x.a) for x in samples)
# Same chain as if LogDensityModel is used explicitly
Random.seed!(1234)
samples2 = sample(AbstractMCMC.LogDensityModel(ℓ), MySampler(), N)
@test length(samples2) == N
@test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples2))
# Same chain if sampling is performed with convergence criterion
Random.seed!(1234)
isdone(rng, model, sampler, state, samples, iteration; kwargs...) = iteration > N
samples3 = sample(ℓ, MySampler(), isdone)
@test length(samples3) == N
@test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples3))
# Same chain if sampling is performed with iterator
Random.seed!(1234)
samples4 = collect(Iterators.take(AbstractMCMC.steps(ℓ, MySampler()), N))
@test length(samples4) == N
@test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples4))
# Same chain if sampling is performed with transducer
Random.seed!(1234)
xf = AbstractMCMC.Sample(ℓ, MySampler())
samples5 = collect(xf(1:N))
@test length(samples5) == N
@test all(x.a == y.a && x.b == y.b for (x, y) in zip(samples, samples5))
# Parallel sampling
for alg in (MCMCSerial(), MCMCDistributed(), MCMCThreads())
chains = sample(ℓ, MySampler(), alg, N, 2)
@test length(chains) == 2
samples = vcat(chains[1], chains[2])
@test length(samples) == 2 * N
@test all(length(x.a) == dim for x in samples)
@test all(x.b ≈ LogDensityProblems.logdensity(ℓ, x.a) for x in samples)
end
# Log density has to satisfy the LogDensityProblems interface
@test_throws ArgumentError sample(mylogdensity, MySampler(), N)
@test_throws ArgumentError sample(mylogdensity, MySampler(), isdone)
@test_throws ArgumentError sample(mylogdensity, MySampler(), MCMCSerial(), N, 2)
@test_throws ArgumentError sample(mylogdensity, MySampler(), MCMCThreads(), N, 2)
@test_throws ArgumentError sample(
mylogdensity, MySampler(), MCMCDistributed(), N, 2
)
@test_throws ArgumentError AbstractMCMC.steps(mylogdensity, MySampler())
@test_throws ArgumentError AbstractMCMC.Sample(mylogdensity, MySampler())
end
# Remove workers
rmprocs(pids...)
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 596 | using AbstractMCMC
using ConsoleProgressMonitor: ProgressLogger
using IJulia
using LogDensityProblems
using LoggingExtras: TeeLogger, EarlyFilteredLogger
using TerminalLoggers: TerminalLogger
using FillArrays: FillArrays
using Transducers
using Distributed
using Logging: Logging
using Random
using Statistics
using Test
using Test: collect_test_logs
const LOGGERS = Set()
const CURRENT_LOGGER = Logging.current_logger()
include("utils.jl")
@testset "AbstractMCMC" begin
include("sample.jl")
include("stepper.jl")
include("transducer.jl")
include("logdensityproblems.jl")
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 26757 | @testset "sample.jl" begin
@testset "Basic sampling" begin
@testset "REPL" begin
empty!(LOGGERS)
Random.seed!(1234)
N = 1_000
chain = sample(MyModel(), MySampler(), N; loggers=true)
@test length(LOGGERS) == 1
logger = first(LOGGERS)
@test logger isa TeeLogger
@test logger.loggers[1].logger isa
(Sys.iswindows() && VERSION < v"1.5.3" ? ProgressLogger : TerminalLogger)
@test logger.loggers[2].logger === CURRENT_LOGGER
@test Logging.current_logger() === CURRENT_LOGGER
# test output type and size
@test chain isa Vector{<:MySample}
@test length(chain) == N
# test some statistical properties
tail_chain = @view chain[2:end]
@test mean(x.a for x in tail_chain) ≈ 0.5 atol = 6e-2
@test var(x.a for x in tail_chain) ≈ 1 / 12 atol = 5e-3
@test mean(x.b for x in tail_chain) ≈ 0.0 atol = 5e-2
@test var(x.b for x in tail_chain) ≈ 1 atol = 6e-2
# initial parameters
chain = sample(
MyModel(), MySampler(), 3; progress=false, initial_params=(b=3.2, a=-1.8)
)
@test chain[1].a == -1.8
@test chain[1].b == 3.2
end
@testset "IJulia" begin
# emulate running IJulia kernel
@eval IJulia begin
inited = true
end
empty!(LOGGERS)
Random.seed!(1234)
N = 10
sample(MyModel(), MySampler(), N; loggers=true)
@test length(LOGGERS) == 1
logger = first(LOGGERS)
@test logger isa TeeLogger
@test logger.loggers[1].logger isa ProgressLogger
@test logger.loggers[2].logger === CURRENT_LOGGER
@test Logging.current_logger() === CURRENT_LOGGER
@eval IJulia begin
inited = false
end
end
@testset "Custom logger" begin
empty!(LOGGERS)
Random.seed!(1234)
N = 10
logger = Logging.ConsoleLogger(stderr, Logging.LogLevel(-1))
Logging.with_logger(logger) do
sample(MyModel(), MySampler(), N; loggers=true)
end
@test length(LOGGERS) == 1
@test first(LOGGERS) === logger
@test Logging.current_logger() === CURRENT_LOGGER
end
@testset "Suppress output" begin
logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do
sample(MyModel(), MySampler(), 100; progress=false)
end
@test all(l.level > Logging.LogLevel(-1) for l in logs)
# disable progress logging globally
@test !(@test_logs (:info, "progress logging is disabled globally") AbstractMCMC.setprogress!(
false
))
@test !AbstractMCMC.PROGRESS[]
logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do
sample(MyModel(), MySampler(), 100)
end
@test all(l.level > Logging.LogLevel(-1) for l in logs)
# enable progress logging globally
@test (@test_logs (:info, "progress logging is enabled globally") AbstractMCMC.setprogress!(
true
))
@test AbstractMCMC.PROGRESS[]
end
end
@testset "Multithreaded sampling" begin
if Threads.nthreads() == 1
warnregex = r"^Only a single thread available"
@test_logs (:warn, warnregex) sample(
MyModel(), MySampler(), MCMCThreads(), 10, 10
)
end
# No dedicated chains type
N = 10_000
chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000)
@test chains isa Vector{<:Vector{<:MySample}}
@test length(chains) == 1000
@test all(length(x) == N for x in chains)
Random.seed!(1234)
chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000; chain_type=MyChain)
# test output type and size
@test chains isa Vector{<:MyChain}
@test length(chains) == 1000
@test all(x -> length(x.as) == length(x.bs) == N, chains)
@test all(ismissing(x.as[1]) for x in chains)
# test some statistical properties
@test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains)
@test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains)
@test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains)
@test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=1e-1), chains)
# test reproducibility
Random.seed!(1234)
chains2 = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000; chain_type=MyChain)
@test all(ismissing(x.as[1]) for x in chains2)
@test all(c1.as[i] == c2.as[i] for (c1, c2) in zip(chains, chains2), i in 2:N)
@test all(c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N)
# Unexpected order of arguments.
str = "Number of chains (10) is greater than number of samples per chain (5)"
@test_logs (:warn, str) match_mode = :any sample(
MyModel(), MySampler(), MCMCThreads(), 5, 10; chain_type=MyChain
)
# Suppress output.
logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do
sample(
MyModel(),
MySampler(),
MCMCThreads(),
10_000,
1000;
progress=false,
chain_type=MyChain,
)
end
@test all(l.level > Logging.LogLevel(-1) for l in logs)
# Smoke test for nchains < nthreads
if Threads.nthreads() == 2
sample(MyModel(), MySampler(), MCMCThreads(), N, 1)
end
# initial parameters
nchains = 100
initial_params = [(b=randn(), a=rand()) for _ in 1:nchains]
chains = sample(
MyModel(),
MySampler(),
MCMCThreads(),
3,
nchains;
progress=false,
initial_params=initial_params,
)
@test length(chains) == nchains
@test all(
chain[1].a == params.a && chain[1].b == params.b for
(chain, params) in zip(chains, initial_params)
)
initial_params = (a=randn(), b=rand())
chains = sample(
MyModel(),
MySampler(),
MCMCThreads(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains),
)
@test length(chains) == nchains
@test all(
chain[1].a == initial_params.a && chain[1].b == initial_params.b for
chain in chains
)
# Too many `initial_params`
@test_throws ArgumentError sample(
MyModel(),
MySampler(),
MCMCThreads(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains + 1),
)
# Too few `initial_params`
@test_throws ArgumentError sample(
MyModel(),
MySampler(),
MCMCThreads(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains - 1),
)
end
@testset "Multicore sampling" begin
if nworkers() == 1
warnregex = r"^Only a single process available"
@test_logs (:warn, warnregex) sample(
MyModel(), MySampler(), MCMCDistributed(), 10, 10; chain_type=MyChain
)
end
# Add worker processes.
# Memory requirements on Windows are ~4x larger than on Linux, hence number of processes is reduced
# See, e.g., https://github.com/JuliaLang/julia/issues/40766 and https://github.com/JuliaLang/Pkg.jl/pull/2366
pids = addprocs(
Sys.iswindows() ? div(Sys.CPU_THREADS::Int, 2) : Sys.CPU_THREADS::Int
)
# Load all required packages (`utils.jl` needs LogDensityProblems, Logging, and Random).
@everywhere begin
using AbstractMCMC
using AbstractMCMC: sample
using LogDensityProblems
using Logging
using Random
include("utils.jl")
end
# No dedicated chains type
N = 10_000
chains = sample(MyModel(), MySampler(), MCMCThreads(), N, 1000)
@test chains isa Vector{<:Vector{<:MySample}}
@test length(chains) == 1000
@test all(length(x) == N for x in chains)
Random.seed!(1234)
chains = sample(
MyModel(), MySampler(), MCMCDistributed(), N, 1000; chain_type=MyChain
)
# Test output type and size.
@test chains isa Vector{<:MyChain}
@test all(ismissing(c.as[1]) for c in chains)
@test length(chains) == 1000
@test all(x -> length(x.as) == length(x.bs) == N, chains)
# Test some statistical properties.
@test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains)
@test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains)
@test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains)
@test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=1e-1), chains)
# Test reproducibility.
Random.seed!(1234)
chains2 = sample(
MyModel(), MySampler(), MCMCDistributed(), N, 1000; chain_type=MyChain
)
@test all(ismissing(c.as[1]) for c in chains2)
@test all(c1.as[i] == c2.as[i] for (c1, c2) in zip(chains, chains2), i in 2:N)
@test all(c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N)
# Unexpected order of arguments.
str = "Number of chains (10) is greater than number of samples per chain (5)"
@test_logs (:warn, str) match_mode = :any sample(
MyModel(), MySampler(), MCMCDistributed(), 5, 10; chain_type=MyChain
)
# Suppress output.
logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do
sample(
MyModel(),
MySampler(),
MCMCDistributed(),
10_000,
100;
progress=false,
chain_type=MyChain,
)
end
@test all(l.level > Logging.LogLevel(-1) for l in logs)
# initial parameters
nchains = 100
initial_params = [(a=randn(), b=rand()) for _ in 1:nchains]
chains = sample(
MyModel(),
MySampler(),
MCMCDistributed(),
3,
nchains;
progress=false,
initial_params=initial_params,
)
@test length(chains) == nchains
@test all(
chain[1].a == params.a && chain[1].b == params.b for
(chain, params) in zip(chains, initial_params)
)
initial_params = (b=randn(), a=rand())
chains = sample(
MyModel(),
MySampler(),
MCMCDistributed(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains),
)
@test length(chains) == nchains
@test all(
chain[1].a == initial_params.a && chain[1].b == initial_params.b for
chain in chains
)
# Too many `initial_params`
@test_throws ArgumentError sample(
MyModel(),
MySampler(),
MCMCDistributed(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains + 1),
)
# Too few `initial_params`
@test_throws ArgumentError sample(
MyModel(),
MySampler(),
MCMCDistributed(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains - 1),
)
# Remove workers
rmprocs(pids...)
end
@testset "Serial sampling" begin
# No dedicated chains type
N = 10_000
chains = sample(MyModel(), MySampler(), MCMCSerial(), N, 1000; progress=false)
@test chains isa Vector{<:Vector{<:MySample}}
@test length(chains) == 1000
@test all(length(x) == N for x in chains)
Random.seed!(1234)
chains = sample(
MyModel(),
MySampler(),
MCMCSerial(),
N,
1000;
chain_type=MyChain,
progress=false,
)
# Test output type and size.
@test chains isa Vector{<:MyChain}
@test all(ismissing(c.as[1]) for c in chains)
@test length(chains) == 1000
@test all(x -> length(x.as) == length(x.bs) == N, chains)
# Test some statistical properties.
@test all(x -> isapprox(mean(@view x.as[2:end]), 0.5; atol=5e-2), chains)
@test all(x -> isapprox(var(@view x.as[2:end]), 1 / 12; atol=5e-3), chains)
@test all(x -> isapprox(mean(@view x.bs[2:end]), 0; atol=5e-2), chains)
@test all(x -> isapprox(var(@view x.bs[2:end]), 1; atol=1e-1), chains)
# Test reproducibility.
Random.seed!(1234)
chains2 = sample(
MyModel(),
MySampler(),
MCMCSerial(),
N,
1000;
chain_type=MyChain,
progress=false,
)
@test all(ismissing(c.as[1]) for c in chains2)
@test all(c1.as[i] == c2.as[i] for (c1, c2) in zip(chains, chains2), i in 2:N)
@test all(c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains, chains2), i in 1:N)
# Unexpected order of arguments.
str = "Number of chains (10) is greater than number of samples per chain (5)"
@test_logs (:warn, str) match_mode = :any sample(
MyModel(), MySampler(), MCMCSerial(), 5, 10; chain_type=MyChain
)
# Suppress output.
logs, _ = collect_test_logs(; min_level=Logging.LogLevel(-1)) do
sample(
MyModel(),
MySampler(),
MCMCSerial(),
10_000,
100;
progress=false,
chain_type=MyChain,
)
end
@test all(l.level > Logging.LogLevel(-1) for l in logs)
# initial parameters
nchains = 100
initial_params = [(a=rand(), b=randn()) for _ in 1:nchains]
chains = sample(
MyModel(),
MySampler(),
MCMCSerial(),
3,
nchains;
progress=false,
initial_params=initial_params,
)
@test length(chains) == nchains
@test all(
chain[1].a == params.a && chain[1].b == params.b for
(chain, params) in zip(chains, initial_params)
)
initial_params = (b=rand(), a=randn())
chains = sample(
MyModel(),
MySampler(),
MCMCSerial(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains),
)
@test length(chains) == nchains
@test all(
chain[1].a == initial_params.a && chain[1].b == initial_params.b for
chain in chains
)
# Too many `initial_params`
@test_throws ArgumentError sample(
MyModel(),
MySampler(),
MCMCSerial(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains + 1),
)
# Too few `initial_params`
@test_throws ArgumentError sample(
MyModel(),
MySampler(),
MCMCSerial(),
3,
nchains;
progress=false,
initial_params=FillArrays.Fill(initial_params, nchains - 1),
)
end
@testset "Ensemble sampling: Reproducibility" begin
N = 1_000
nchains = 10
# Serial sampling
Random.seed!(1234)
chains_serial = sample(
MyModel(),
MySampler(),
MCMCSerial(),
N,
nchains;
progress=false,
chain_type=MyChain,
)
@test all(ismissing(c.as[1]) for c in chains_serial)
# Multi-threaded sampling
Random.seed!(1234)
chains_threads = sample(
MyModel(),
MySampler(),
MCMCThreads(),
N,
nchains;
progress=false,
chain_type=MyChain,
)
@test all(ismissing(c.as[1]) for c in chains_threads)
@test all(
c1.as[i] == c2.as[i] for (c1, c2) in zip(chains_serial, chains_threads),
i in 2:N
)
@test all(
c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains_serial, chains_threads),
i in 1:N
)
# Multi-core sampling
Random.seed!(1234)
chains_distributed = sample(
MyModel(),
MySampler(),
MCMCDistributed(),
N,
nchains;
progress=false,
chain_type=MyChain,
)
@test all(ismissing(c.as[1]) for c in chains_distributed)
@test all(
c1.as[i] == c2.as[i] for (c1, c2) in zip(chains_serial, chains_distributed),
i in 2:N
)
@test all(
c1.bs[i] == c2.bs[i] for (c1, c2) in zip(chains_serial, chains_distributed),
i in 1:N
)
end
@testset "Chain constructors" begin
chain1 = sample(MyModel(), MySampler(), 100)
chain2 = sample(MyModel(), MySampler(), 100; chain_type=MyChain)
@test chain1 isa Vector{<:MySample}
@test chain2 isa MyChain
end
@testset "Sample stats" begin
chain = sample(MyModel(), MySampler(), 1000; chain_type=MyChain)
@test chain.stats.stop >= chain.stats.start
@test chain.stats.duration == chain.stats.stop - chain.stats.start
end
@testset "Discard initial samples" begin
# Create a chain and discard initial samples.
Random.seed!(1234)
N = 100
discard_initial = 50
chain = sample(MyModel(), MySampler(), N; discard_initial=discard_initial)
@test length(chain) == N
@test !ismissing(chain[1].a)
# Repeat sampling without discarding initial samples.
# On Julia < 1.6 progress logging changes the global RNG and hence is enabled here.
# https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258
Random.seed!(1234)
ref_chain = sample(
MyModel(), MySampler(), N + discard_initial; progress=VERSION < v"1.6"
)
@test all(chain[i].a == ref_chain[i + discard_initial].a for i in 1:N)
@test all(chain[i].b == ref_chain[i + discard_initial].b for i in 1:N)
end
@testset "Warm-up steps" begin
# Create a chain and discard initial samples.
Random.seed!(1234)
N = 100
num_warmup = 50
# Everything should be discarded here.
chain = sample(MyModel(), MySampler(), N; num_warmup=num_warmup)
@test length(chain) == N
@test !ismissing(chain[1].a)
# Repeat sampling without discarding initial samples.
# On Julia < 1.6 progress logging changes the global RNG and hence is enabled here.
# https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258
Random.seed!(1234)
ref_chain = sample(
MyModel(), MySampler(), N + num_warmup; progress=VERSION < v"1.6"
)
@test all(chain[i].a == ref_chain[i + num_warmup].a for i in 1:N)
@test all(chain[i].b == ref_chain[i + num_warmup].b for i in 1:N)
# Some other stuff.
Random.seed!(1234)
discard_initial = 10
chain_warmup = sample(
MyModel(),
MySampler(),
N;
num_warmup=num_warmup,
discard_initial=discard_initial,
)
@test length(chain_warmup) == N
@test all(chain_warmup[i].a == ref_chain[i + discard_initial].a for i in 1:N)
# Check that the first `num_warmup - discard_initial` samples are warmup samples.
@test all(
chain_warmup[i].is_warmup == (i <= num_warmup - discard_initial) for i in 1:N
)
end
@testset "Thin chain by a factor of `thinning`" begin
# Run a thinned chain with `N` samples thinned by factor of `thinning`.
Random.seed!(100)
N = 100
thinning = 3
chain = sample(MyModel(), MySampler(), N; thinning=thinning)
@test length(chain) == N
@test ismissing(chain[1].a)
# Repeat sampling without thinning.
# On Julia < 1.6 progress logging changes the global RNG and hence is enabled here.
# https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258
Random.seed!(100)
ref_chain = sample(MyModel(), MySampler(), N * thinning; progress=VERSION < v"1.6")
@test all(chain[i].a == ref_chain[(i - 1) * thinning + 1].a for i in 2:N)
@test all(chain[i].b == ref_chain[(i - 1) * thinning + 1].b for i in 1:N)
end
@testset "Sample without predetermined N" begin
Random.seed!(1234)
chain = sample(MyModel(), MySampler())
bmean = mean(x.b for x in chain)
@test ismissing(chain[1].a)
@test abs(bmean) <= 0.001 || length(chain) == 10_000
# Discard initial samples.
Random.seed!(1234)
discard_initial = 50
chain = sample(MyModel(), MySampler(); discard_initial=discard_initial)
bmean = mean(x.b for x in chain)
@test !ismissing(chain[1].a)
@test abs(bmean) <= 0.001 || length(chain) == 10_000
# On Julia < 1.6 progress logging changes the global RNG and hence is enabled here.
# https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258
Random.seed!(1234)
N = length(chain)
ref_chain = sample(
MyModel(),
MySampler(),
N;
discard_initial=discard_initial,
progress=VERSION < v"1.6",
)
@test all(chain[i].a == ref_chain[i].a for i in 1:N)
@test all(chain[i].b == ref_chain[i].b for i in 1:N)
# Thin chain by a factor of `thinning`.
Random.seed!(1234)
thinning = 3
chain = sample(MyModel(), MySampler(); thinning=thinning)
bmean = mean(x.b for x in chain)
@test ismissing(chain[1].a)
@test abs(bmean) <= 0.001 || length(chain) == 10_000
# On Julia < 1.6 progress logging changes the global RNG and hence is enabled here.
# https://github.com/TuringLang/AbstractMCMC.jl/pull/102#issuecomment-1142253258
Random.seed!(1234)
N = length(chain)
ref_chain = sample(
MyModel(), MySampler(), N; thinning=thinning, progress=VERSION < v"1.6"
)
@test all(chain[i].a == ref_chain[i].a for i in 2:N)
@test all(chain[i].b == ref_chain[i].b for i in 1:N)
end
@testset "Sample vector of `NamedTuple`s" begin
chain = sample(MyModel(), MySampler(), 1_000; chain_type=Vector{NamedTuple})
# Check output type
@test chain isa Vector{<:NamedTuple}
@test length(chain) == 1_000
@test all(keys(x) == (:a, :b) for x in chain)
# Check some statistical properties
@test ismissing(chain[1].a)
@test mean(x.a for x in view(chain, 2:1_000)) ≈ 0.5 atol = 6e-2
@test var(x.a for x in view(chain, 2:1_000)) ≈ 1 / 12 atol = 1e-2
@test mean(x.b for x in chain) ≈ 0 atol = 0.11
@test var(x.b for x in chain) ≈ 1 atol = 0.15
end
@testset "Testing callbacks" begin
function count_iterations(
rng, model, sampler, sample, state, i; iter_array, kwargs...
)
return push!(iter_array, i)
end
N = 100
it_array = Float64[]
sample(MyModel(), MySampler(), N; callback=count_iterations, iter_array=it_array)
@test it_array == collect(1:N)
# sampling without predetermined N
it_array = Float64[]
chain = sample(
MyModel(), MySampler(); callback=count_iterations, iter_array=it_array
)
@test it_array == collect(1:size(chain, 1))
end
@testset "Providing initial state" begin
function record_state(
rng, model, sampler, sample, state, i; states_channel, kwargs...
)
return put!(states_channel, state)
end
initial_state = 10
@testset "sample" begin
n = 10
states_channel = Channel{Int}(n)
chain = sample(
MyModel(),
MySampler(),
n;
initial_state=initial_state,
callback=record_state,
states_channel=states_channel,
)
# Extract the states.
states = [take!(states_channel) for _ in 1:n]
@test length(states) == n
for i in 1:n
@test states[i] == initial_state + i
end
end
@testset "sample with $mode" for mode in
[MCMCSerial(), MCMCThreads(), MCMCDistributed()]
nchains = 4
initial_state = 10
states_channel = if mode === MCMCDistributed()
# Need to use `RemoteChannel` for this.
RemoteChannel(() -> Channel{Int}(nchains))
else
Channel{Int}(nchains)
end
chain = sample(
MyModel(),
MySampler(),
mode,
1,
nchains;
initial_state=FillArrays.Fill(initial_state, nchains),
callback=record_state,
states_channel=states_channel,
)
# Extract the states.
states = [take!(states_channel) for _ in 1:nchains]
@test length(states) == nchains
for i in 1:nchains
@test states[i] == initial_state + 1
end
end
end
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 2333 | @testset "stepper.jl" begin
@testset "Iterator sampling" begin
Random.seed!(1234)
as = []
bs = []
iter = AbstractMCMC.steps(MyModel(), MySampler())
iter = AbstractMCMC.steps(MyModel(), MySampler(); a=1.0) # `a` shouldn't do anything
for (count, t) in enumerate(iter)
if count >= 1000
break
end
# don't save missing values
t.a === missing && continue
push!(as, t.a)
push!(bs, t.b)
end
@test length(as) == length(bs) == 998
@test mean(as) ≈ 0.5 atol = 2e-2
@test var(as) ≈ 1 / 12 atol = 5e-3
@test mean(bs) ≈ 0.0 atol = 5e-2
@test var(bs) ≈ 1 atol = 5e-2
@test Base.IteratorSize(iter) == Base.IsInfinite()
@test Base.IteratorEltype(iter) == Base.EltypeUnknown()
end
@testset "Discard initial samples" begin
# Create a chain of `N` samples after discarding some initial samples.
Random.seed!(1234)
N = 50
discard_initial = 10
iter = AbstractMCMC.steps(MyModel(), MySampler(); discard_initial=discard_initial)
as = []
bs = []
for t in Iterators.take(iter, N)
push!(as, t.a)
push!(bs, t.b)
end
# Repeat sampling with `sample`.
Random.seed!(1234)
chain = sample(
MyModel(), MySampler(), N; discard_initial=discard_initial, progress=false
)
@test all(as[i] == chain[i].a for i in 1:N)
@test all(bs[i] == chain[i].b for i in 1:N)
end
@testset "Thin chain by a factor of `thinning`" begin
# Create a thinned chain with a thinning factor of `thinning`.
Random.seed!(1234)
N = 50
thinning = 3
iter = AbstractMCMC.steps(MyModel(), MySampler(); thinning=thinning)
as = []
bs = []
for t in Iterators.take(iter, N)
push!(as, t.a)
push!(bs, t.b)
end
# Repeat sampling with `sample`.
Random.seed!(1234)
chain = sample(MyModel(), MySampler(), N; thinning=thinning, progress=false)
@test as[1] === chain[1].a === missing
@test all(as[i] == chain[i].a for i in 2:N)
@test all(bs[i] == chain[i].b for i in 1:N)
end
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 3308 | @testset "transducer.jl" begin
Random.seed!(1234)
@testset "Basic sampling" begin
N = 1_000
local chain
Logging.with_logger(TerminalLogger()) do
xf = AbstractMCMC.Sample(MyModel(), MySampler(); sleepy=true, logger=true)
chain = collect(xf(withprogress(1:N; interval=1e-3)))
end
# test output type and size
@test chain isa Vector{<:MySample}
@test length(chain) == N
# test some statistical properties
tail_chain = @view chain[2:end]
@test mean(x.a for x in tail_chain) ≈ 0.5 atol = 6e-2
@test var(x.a for x in tail_chain) ≈ 1 / 12 atol = 5e-3
@test mean(x.b for x in tail_chain) ≈ 0.0 atol = 5e-2
@test var(x.b for x in tail_chain) ≈ 1 atol = 6e-2
end
@testset "drop" begin
xf = AbstractMCMC.Sample(MyModel(), MySampler())
chain = collect(Drop(1)(xf(1:10)))
@test chain isa Vector{MySample{Float64,Float64}}
@test length(chain) == 9
end
# Reproduce iterator example
@testset "iterator example" begin
# filter missing values and split transitions
xf = opcompose(
AbstractMCMC.Sample(MyModel(), MySampler()),
OfType(MySample{Float64,Float64}),
Map(x -> (x.a, x.b)),
)
as, bs = foldl(xf, 1:999; init=(Float64[], Float64[])) do (as, bs), (a, b)
push!(as, a)
push!(bs, b)
as, bs
end
@test length(as) == length(bs) == 998
@test mean(as) ≈ 0.5 atol = 2e-2
@test var(as) ≈ 1 / 12 atol = 5e-3
@test mean(bs) ≈ 0.0 atol = 5e-2
@test var(bs) ≈ 1 atol = 5e-2
end
@testset "Discard initial samples" begin
# Create a chain of `N` samples after discarding some initial samples.
Random.seed!(1234)
N = 50
discard_initial = 10
xf = opcompose(
AbstractMCMC.Sample(MyModel(), MySampler(); discard_initial=discard_initial),
Map(x -> (x.a, x.b)),
)
as, bs = foldl(xf, 1:N; init=([], [])) do (as, bs), (a, b)
push!(as, a)
push!(bs, b)
as, bs
end
# Repeat sampling with `sample`.
Random.seed!(1234)
chain = sample(
MyModel(), MySampler(), N; discard_initial=discard_initial, progress=false
)
@test all(as[i] == chain[i].a for i in 1:N)
@test all(bs[i] == chain[i].b for i in 1:N)
end
@testset "Thin chain by a factor of `thinning`" begin
# Create a thinned chain with a thinning factor of `thinning`.
Random.seed!(1234)
N = 50
thinning = 3
xf = opcompose(
AbstractMCMC.Sample(MyModel(), MySampler(); thinning=thinning),
Map(x -> (x.a, x.b)),
)
as, bs = foldl(xf, 1:N; init=([], [])) do (as, bs), (a, b)
push!(as, a)
push!(bs, b)
as, bs
end
# Repeat sampling with `sample`.
Random.seed!(1234)
chain = sample(MyModel(), MySampler(), N; thinning=thinning, progress=false)
@test as[1] === chain[1].a === missing
@test all(as[i] == chain[i].a for i in 2:N)
@test all(bs[i] == chain[i].b for i in 1:N)
end
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | code | 3345 | struct MyModel <: AbstractMCMC.AbstractModel end
struct MySample{A,B}
a::A
b::B
is_warmup::Bool
end
MySample(a, b) = MySample(a, b, false)
struct MySampler <: AbstractMCMC.AbstractSampler end
struct AnotherSampler <: AbstractMCMC.AbstractSampler end
struct MyChain{A,B,S} <: AbstractMCMC.AbstractChains
as::Vector{A}
bs::Vector{B}
stats::S
end
MyChain(a, b) = MyChain(a, b, NamedTuple())
function AbstractMCMC.step_warmup(
rng::AbstractRNG,
model::MyModel,
sampler::MySampler,
state::Union{Nothing,Integer}=nothing;
loggers=false,
initial_params=nothing,
kwargs...,
)
transition, state = AbstractMCMC.step(
rng, model, sampler, state; loggers, initial_params, kwargs...
)
return MySample(transition.a, transition.b, true), state
end
function AbstractMCMC.step(
rng::AbstractRNG,
model::MyModel,
sampler::MySampler,
state::Union{Nothing,Integer}=nothing;
loggers=false,
initial_params=nothing,
kwargs...,
)
# sample `a` is missing in the first step if not provided
a, b = if state === nothing && initial_params !== nothing
initial_params.a, initial_params.b
else
(state === nothing ? missing : rand(rng)), randn(rng)
end
loggers && push!(LOGGERS, Logging.current_logger())
_state = state === nothing ? 1 : state + 1
return MySample(a, b), _state
end
function AbstractMCMC.bundle_samples(
samples::Vector{<:MySample},
model::MyModel,
sampler::MySampler,
::Any,
::Type{MyChain};
stats=nothing,
kwargs...,
)
as = [t.a for t in samples]
bs = [t.b for t in samples]
return MyChain(as, bs, stats)
end
function isdone(
rng::AbstractRNG,
model::MyModel,
s::MySampler,
samples,
state,
iteration::Int;
kwargs...,
)
# Calculate the mean of x.b.
bmean = mean(x.b for x in samples)
return abs(bmean) <= 0.001 || iteration > 10_000
end
# Set a default convergence function.
function AbstractMCMC.sample(model, sampler::MySampler; kwargs...)
return sample(Random.default_rng(), model, sampler, isdone; kwargs...)
end
function AbstractMCMC.chainscat(
chain::Union{MyChain,Vector{<:MyChain}}, chains::Union{MyChain,Vector{<:MyChain}}...
)
return vcat(chain, chains...)
end
# Conversion to NamedTuple
Base.convert(::Type{NamedTuple}, x::MySample) = (a=x.a, b=x.b)
# Gaussian log density (without additive constants)
# Without LogDensityProblems.jl interface
mylogdensity(x) = -sum(abs2, x) / 2
# With LogDensityProblems.jl interface
struct MyLogDensity
dim::Int
end
LogDensityProblems.logdensity(::MyLogDensity, x) = mylogdensity(x)
LogDensityProblems.dimension(m::MyLogDensity) = m.dim
function LogDensityProblems.capabilities(::Type{MyLogDensity})
return LogDensityProblems.LogDensityOrder{0}()
end
# Define "sampling"
function AbstractMCMC.step(
rng::AbstractRNG,
model::AbstractMCMC.LogDensityModel{MyLogDensity},
::MySampler,
state::Union{Nothing,Integer}=nothing;
kwargs...,
)
# Sample from multivariate normal distribution
ℓ = model.logdensity
dim = LogDensityProblems.dimension(ℓ)
θ = randn(rng, dim)
logdensity_θ = LogDensityProblems.logdensity(ℓ, θ)
_state = state === nothing ? 1 : state + 1
return MySample(θ, logdensity_θ), _state
end
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | docs | 1149 | # AbstractMCMC.jl
Abstract types and interfaces for Markov chain Monte Carlo methods.
[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://turinglang.github.io/AbstractMCMC.jl/stable)
[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://turinglang.github.io/AbstractMCMC.jl/dev)
[![CI](https://github.com/TuringLang/AbstractMCMC.jl/workflows/CI/badge.svg?branch=master)](https://github.com/TuringLang/AbstractMCMC.jl/actions?query=workflow%3ACI+branch%3Amaster)
[![IntegrationTest](https://github.com/TuringLang/AbstractMCMC.jl/workflows/IntegrationTest/badge.svg?branch=master)](https://github.com/TuringLang/AbstractMCMC.jl/actions?query=workflow%3AIntegrationTest+branch%3Amaster)
[![Codecov](https://codecov.io/gh/TuringLang/AbstractMCMC.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/TuringLang/AbstractMCMC.jl)
[![Coveralls](https://coveralls.io/repos/github/TuringLang/AbstractMCMC.jl/badge.svg?branch=master)](https://coveralls.io/github/TuringLang/AbstractMCMC.jl?branch=master)
[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle)
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | docs | 4419 | # API
AbstractMCMC defines an interface for sampling Markov chains.
## Model
```@docs
AbstractMCMC.AbstractModel
AbstractMCMC.LogDensityModel
```
## Sampler
```@docs
AbstractMCMC.AbstractSampler
```
## Sampling a single chain
```@docs
AbstractMCMC.sample(::AbstractRNG, ::AbstractMCMC.AbstractModel, ::AbstractMCMC.AbstractSampler, ::Any)
AbstractMCMC.sample(::AbstractRNG, ::Any, ::AbstractMCMC.AbstractSampler, ::Any)
```
### Iterator
```@docs
AbstractMCMC.steps(::AbstractRNG, ::AbstractMCMC.AbstractModel, ::AbstractMCMC.AbstractSampler)
AbstractMCMC.steps(::AbstractRNG, ::Any, ::AbstractMCMC.AbstractSampler)
```
### Transducer
```@docs
AbstractMCMC.Sample(::AbstractRNG, ::AbstractMCMC.AbstractModel, ::AbstractMCMC.AbstractSampler)
AbstractMCMC.Sample(::AbstractRNG, ::Any, ::AbstractMCMC.AbstractSampler)
```
## Sampling multiple chains in parallel
```@docs
AbstractMCMC.sample(
::AbstractRNG,
::AbstractMCMC.AbstractModel,
::AbstractMCMC.AbstractSampler,
::AbstractMCMC.AbstractMCMCEnsemble,
::Integer,
::Integer,
)
AbstractMCMC.sample(
::AbstractRNG,
::Any,
::AbstractMCMC.AbstractSampler,
::AbstractMCMC.AbstractMCMCEnsemble,
::Integer,
::Integer,
)
```
Two algorithms are provided for parallel sampling with multiple threads and multiple processes, and one allows for the user to sample multiple chains in serial (no parallelization):
```@docs
AbstractMCMC.MCMCThreads
AbstractMCMC.MCMCDistributed
AbstractMCMC.MCMCSerial
```
## Common keyword arguments
Common keyword arguments for regular and parallel sampling are:
- `progress` (default: `AbstractMCMC.PROGRESS[]` which is `true` initially): toggles progress logging
- `chain_type` (default: `Any`): determines the type of the returned chain
- `callback` (default: `nothing`): if `callback !== nothing`, then
`callback(rng, model, sampler, sample, iteration)` is called after every sampling step,
where `sample` is the most recent sample of the Markov chain and `iteration` is the current iteration
- `num_warmup` (default: `0`): number of "warm-up" steps to take before the first "regular" step,
i.e. number of times to call [`AbstractMCMC.step_warmup`](@ref) before the first call to
[`AbstractMCMC.step`](@ref).
- `discard_initial` (default: `num_warmup`): number of initial samples that are discarded. Note that
if `discard_initial < num_warmup`, warm-up samples will also be included in the resulting samples.
- `thinning` (default: `1`): factor by which to thin samples.
- `initial_state` (default: `nothing`): if `initial_state !== nothing`, the first call to [`AbstractMCMC.step`](@ref)
is passed `initial_state` as the `state` argument.
!!! info
The common keyword arguments `progress`, `chain_type`, and `callback` are not supported by the iterator [`AbstractMCMC.steps`](@ref) and the transducer [`AbstractMCMC.Sample`](@ref).
There is no "official" way for providing initial parameter values yet.
However, multiple packages such as [EllipticalSliceSampling.jl](https://github.com/TuringLang/EllipticalSliceSampling.jl) and [AdvancedMH.jl](https://github.com/TuringLang/AdvancedMH.jl) support an `initial_params` keyword argument for setting the initial values when sampling a single chain.
To ensure that sampling multiple chains "just works" when sampling of a single chain is implemented, [we decided to support `initial_params` in the default implementations of the ensemble methods](https://github.com/TuringLang/AbstractMCMC.jl/pull/94):
- `initial_params` (default: `nothing`): if `initial_params isa AbstractArray`, then the `i`th element of `initial_params` is used as initial parameters of the `i`th chain. If one wants to use the same initial parameters `x` for every chain, one can specify e.g. `initial_params = FillArrays.Fill(x, N)`.
Progress logging can be enabled and disabled globally with `AbstractMCMC.setprogress!(progress)`.
```@docs
AbstractMCMC.setprogress!
```
## Chains
The `chain_type` keyword argument allows to set the type of the returned chain. A common
choice is to return chains of type `Chains` from [MCMCChains.jl](https://github.com/TuringLang/MCMCChains.jl).
AbstractMCMC defines the abstract type `AbstractChains` for Markov chains.
```@docs
AbstractMCMC.AbstractChains
```
For chains of this type, AbstractMCMC defines the following two methods.
```@docs
AbstractMCMC.chainscat
AbstractMCMC.chainsstack
```
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | docs | 4393 | # Design
This page explains the default implementations and design choices of AbstractMCMC.
It is not intended for users but for developers that want to implement the AbstractMCMC
interface for Markov chain Monte Carlo sampling. The user-facing API is explained in
[API](@ref).
## Overview
AbstractMCMC provides a default implementation of the user-facing interface described
in [API](@ref). You can completely neglect these and define your own implementation of the
interface. However, as described below, in most use cases the default implementation
allows you to obtain support of parallel sampling, progress logging, callbacks, iterators,
and transducers for free by just defining the sampling step of your inference algorithm,
drastically reducing the amount of code you have to write. In general, the docstrings
of the functions described below might be helpful if you intend to make use of the default
implementations.
## Basic structure
The simplified structure for regular sampling (the actual implementation contains
some additional error checks and support for progress logging and callbacks) is
```julia
StatsBase.sample(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::AbstractMCMC.AbstractSampler,
nsamples::Integer;
chain_type = ::Type{Any},
kwargs...
)
# Obtain the initial sample and state.
sample, state = AbstractMCMC.step(rng, model, sampler; kwargs...)
# Save the sample.
samples = AbstractMCMC.samples(sample, model, sampler, N; kwargs...)
samples = AbstractMCMC.save!!(samples, sample, 1, model, sampler, N; kwargs...)
# Step through the sampler.
for i in 2:N
# Obtain the next sample and state.
sample, state = AbstractMCMC.step(rng, model, sampler, state; kwargs...)
# Save the sample.
samples = AbstractMCMC.save!!(samples, sample, i, model, sampler, N; kwargs...)
end
return AbstractMCMC.bundle_samples(samples, model, sampler, state, chain_type; kwargs...)
end
```
All other default implementations make use of the same structure and in particular
call the same methods.
## Sampling step
The only method for which no default implementation is provided (and hence which
downstream packages *have* to implement) is [`AbstractMCMC.step`](@ref). It defines
the sampling step of the inference method.
```@docs
AbstractMCMC.step
```
If one also has some special handling of the warmup-stage of sampling, then this can be specified by overloading
```@docs
AbstractMCMC.step_warmup
```
which will be used for the first `num_warmup` iterations, as specified as a keyword argument to [`AbstractMCMC.sample`](@ref).
Note that this is optional; by default it simply calls [`AbstractMCMC.step`](@ref) from above.
## Collecting samples
!!! note
This section does not apply to the iterator and transducer interface.
After the initial sample is obtained, the default implementations for regular and parallel sampling
(not for the iterator and the transducer since it is not needed there) create a container for all
samples (the initial one and all subsequent samples) using `AbstractMCMC.samples`.
```@docs
AbstractMCMC.samples
```
In each step, the sample is saved in the container by `AbstractMCMC.save!!`. The notation `!!`
follows the convention of the package [BangBang.jl](https://github.com/JuliaFolds/BangBang.jl)
which is used in the default implementation of `AbstractMCMC.save!!`. It indicates that the
sample is pushed to the container but a "widening" fallback is used if the container type
does not allow to save the sample. Therefore `AbstractMCMC.save!!` *always has* to return the container.
```@docs
AbstractMCMC.save!!
```
For most use cases the default implementation of `AbstractMCMC.samples` and `AbstractMCMC.save!!`
should work out of the box and hence need not to be overloaded in downstream code.
## Creating chains
!!! note
This section does not apply to the iterator and transducer interface.
At the end of the sampling procedure for regular and paralle sampling we transform
the collection of samples to the desired output type by calling `AbstractMCMC.bundle_samples`.
```@docs
AbstractMCMC.bundle_samples
```
The default implementation should be fine in most use cases, but downstream packages
could, e.g., save the final state of the sampler as well if they overload
`AbstractMCMC.bundle_samples`.
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 5.4.0 | d4ab12197672f0f4a3afb850d574cfded5fd9070 | docs | 604 | # AbstractMCMC.jl
*Abstract types and interfaces for Markov chain Monte Carlo methods.*
AbstractMCMC defines an interface for sampling and combining Markov chains.
It comes with a default sampling algorithm that provides support of progress
bars, parallel sampling (multithreaded and multicore), and user-provided callbacks
out of the box. Typically developers only have to define the sampling step
of their inference method in an iterator-like fashion to make use of this
functionality. Additionally, the package defines an iterator and a transducer
for sampling Markov chains based on the interface.
| AbstractMCMC | https://github.com/TuringLang/AbstractMCMC.jl.git |
|
[
"MIT"
] | 0.1.3 | d95b8e6e93a173b71406d24fce591fa44a8cf3f9 | code | 519 | using StableMap
using Documenter
DocMeta.setdocmeta!(StableMap, :DocTestSetup, :(using StableMap); recursive=true)
makedocs(;
modules=[StableMap],
authors="Chris Elrod <elrodc@gmail.com> and contributors",
repo="https://github.com/chriselrod/StableMap.jl/blob/{commit}{path}#{line}",
sitename="StableMap.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
| StableMap | https://github.com/chriselrod/StableMap.jl.git |
|
[
"MIT"
] | 0.1.3 | d95b8e6e93a173b71406d24fce591fa44a8cf3f9 | code | 3215 | module StableMap
using ArrayInterface
using LinearAlgebra
export stable_map, stable_map!
function stable_map!(f, dst::AbstractArray, arg0)
N = length(dst)
eachindex(arg0) == Base.oneto(N) ||
throw(ArgumentError("All args must have same axes."))
@inbounds for i = 1:N
dst[i] = f(arg0[i])
end
return dst
end
function stable_map!(
f,
dst::AbstractArray{T},
arg0,
args::Vararg{Any,K}
) where {K,T}
N = length(dst)
all(==(Base.oneto(N)), map(eachindex, (arg0, args...))) ||
throw(ArgumentError("All args must have same axes."))
@inbounds for i = 1:N
# fᵢ = f(map(Base.Fix2(Base.unsafe_getindex, i), args)...)
# dst[i] = convert(T, fᵢ)::T
dst[i] = f(arg0[i], map(Base.Fix2(Base.unsafe_getindex, i), args)...)
end
return dst
end
function stable_map!(f, dst::AbstractArray)
N = length(dst)
@inbounds for i = 1:N
# fᵢ = f(map(Base.Fix2(Base.unsafe_getindex, i), args)...)
# dst[i] = convert(T, fᵢ)::T
dst[i] = f()
end
return dst
end
function narrowing_map!(
f,
dst::AbstractArray{T},
start::Int,
args::Vararg{Any,K}
) where {K,T}
N = length(dst)
all(==(Base.oneto(N)), map(eachindex, args)) ||
throw(ArgumentError("All args must have same axes."))
@inbounds for i = start:N
xi = f(map(Base.Fix2(Base.unsafe_getindex, i), args)...)
if xi isa T
dst[i] = xi
else
Ti = typeof(xi)
PT = promote_type(Ti, T)
if PT === T
dst[i] = convert(T, xi)
elseif Base.isconcretetype(PT)
dst_promote = Array{PT}(undef, size(dst))
copyto!(
view(dst_promote, Base.OneTo(i - 1)),
view(dst, Base.OneTo(i - 1))
)
dst_promote[i] = convert(PT, xi)::PT
return narrowing_map!(f, dst_promote, i + 1, args...)
else
dst_union = Array{Union{T,Ti}}(undef, size(dst))
copyto!(
view(dst_union, Base.OneTo(i - 1)),
view(dst, Base.OneTo(i - 1))
)
dst_union[i] = xi
return narrowing_map!(f, dst_union, i + 1, args...)
end
end
end
return dst
end
isconcreteunion(TU) =
if TU isa Union
isconcretetype(TU.a) && isconcreteunion(TU.b)
else
isconcretetype(TU)
end
function promote_return(f::F, args...) where {F}
T = Base.promote_op(f, map(eltype, args)...)
Base.isconcretetype(T) && return T
T isa Union || return nothing
TU = Base.promote_union(T)
Base.isconcretetype(TU) && return TU
isconcreteunion(TU) && return TU
nothing
end
function stable_map(f::F, args::Vararg{AbstractArray,K}) where {K,F}
# assume specialized implementation
all(ArrayInterface.ismutable, args) || return map(f, args...)
T = promote_return(f, args...)
first_arg = first(args)
T === nothing ||
return stable_map!(f, Array{T}(undef, size(first_arg)), args...)
x = f(map(first, args)...)
dst = similar(first_arg, typeof(x))
@inbounds dst[1] = x
narrowing_map!(f, dst, 2, args...)
end
function stable_map(f, A::Diagonal{T}) where {T}
B = Matrix{promote_type(T, Float32)}(undef, size(A))
@inbounds for i in eachindex(A)
B[i] = f(A[i])
end
return B
end
@inline stable_map(f::F, arg1::A, args::Vararg{A,K}) where {F,K,A} =
map(f, arg1, args...)
end
| StableMap | https://github.com/chriselrod/StableMap.jl.git |
|
[
"MIT"
] | 0.1.3 | d95b8e6e93a173b71406d24fce591fa44a8cf3f9 | code | 507 | using StableMap
using Test
using ForwardDiff
@testset "StableMap.jl" begin
x = rand(10);
@test stable_map(exp, x) ≈ map(exp, x)
unstablemax(x,y) = Base.inferencebarrier(x > y ? x : y)
y = rand(-10:10, 10);
res = stable_map(unstablemax, x, y)
@test res isa Vector{Float64}
@test res == map(unstablemax, x, y)
f(x) = Base.inferencebarrier(x > 1 ? x : 1.0)
@test stable_map(f, [ForwardDiff.Dual(0f0,1f0), ForwardDiff.Dual(2f0,1f0)]) isa Vector{ForwardDiff.Dual{Nothing,Float64,1}}
end
| StableMap | https://github.com/chriselrod/StableMap.jl.git |
|
[
"MIT"
] | 0.1.3 | d95b8e6e93a173b71406d24fce591fa44a8cf3f9 | docs | 4344 | # StableMap
[![Build Status](https://github.com/chriselrod/StableMap.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/chriselrod/StableMap.jl/actions/workflows/CI.yml?query=branch%3Amain)
[![Coverage](https://codecov.io/gh/chriselrod/StableMap.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/chriselrod/StableMap.jl)
The map that preserves the relative order of inputs mapped to outputs.
So do other maps, of course.
StableMap tries to return vectors that are as concretely typed as possible.
For example:
```julia
julia> using StableMap, ForwardDiff, BenchmarkTools
[ Info: Precompiling StableMap [626594ce-0aac-4e81-a7f6-bc4bb5ff97e9]
julia> f(x) = x > 1 ? x : 1.0
f (generic function with 1 method)
julia> g(x) = Base.inferencebarrier(x > 1 ? x : 1.0)
g (generic function with 1 method)
julia> h(x) = Base.inferencebarrier(x)
h (generic function with 1 method)
julia> x = [ForwardDiff.Dual(0f0,1f0), ForwardDiff.Dual(2f0,1f0)];
julia> y = [ForwardDiff.Dual(2f0,1f0), ForwardDiff.Dual(0f0,1f0)];
julia> @btime map(f, $x)
208.010 ns (4 allocations: 176 bytes)
2-element Vector{Real}:
1.0
Dual{Nothing}(2.0,1.0)
julia> @btime stable_map(f, $x)
93.329 ns (1 allocation: 96 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}:
Dual{Nothing}(1.0,0.0)
Dual{Nothing}(2.0,1.0)
julia> @btime map(f, $y)
210.378 ns (4 allocations: 176 bytes)
2-element Vector{Real}:
Dual{Nothing}(2.0,1.0)
1.0
julia> @btime stable_map(f, $y)
94.547 ns (1 allocation: 96 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}:
Dual{Nothing}(2.0,1.0)
Dual{Nothing}(1.0,0.0)
julia> @btime map(g, $x)
890.247 ns (10 allocations: 272 bytes)
2-element Vector{Real}:
1.0
Dual{Nothing}(2.0,1.0)
julia> @btime stable_map(g, $x)
3.221 μs (18 allocations: 800 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}:
Dual{Nothing}(1.0,0.0)
Dual{Nothing}(2.0,1.0)
julia> @btime map(g, $y)
866.372 ns (10 allocations: 272 bytes)
2-element Vector{Real}:
Dual{Nothing}(2.0,1.0)
1.0
julia> @btime stable_map(g, $y)
3.357 μs (18 allocations: 800 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float64, 1}}:
Dual{Nothing}(2.0,1.0)
Dual{Nothing}(1.0,0.0)
julia> @btime map(h, $x)
531.503 ns (5 allocations: 144 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}:
Dual{Nothing}(0.0,1.0)
Dual{Nothing}(2.0,1.0)
julia> @btime stable_map(h, $x)
810.656 ns (4 allocations: 128 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}:
Dual{Nothing}(0.0,1.0)
Dual{Nothing}(2.0,1.0)
julia> @btime map(h, $y)
535.145 ns (5 allocations: 144 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}:
Dual{Nothing}(2.0,1.0)
Dual{Nothing}(0.0,1.0)
julia> @btime stable_map(h, $y)
816.471 ns (4 allocations: 128 bytes)
2-element Vector{ForwardDiff.Dual{Nothing, Float32, 1}}:
Dual{Nothing}(2.0,1.0)
Dual{Nothing}(0.0,1.0)
```
It can be faster at handling small unions than `Base.map`, but is currently slower for functions than return `Any`. However, in both cases, it has the benefit of returning as concretely-typed arrays as possible.
It will try to promote returned objects to the same type, and if this is not possible, it will return a small union.
```julia
julia> m(x) = x > 1.0 ? x : [x]
m (generic function with 1 method)
julia> @btime map(m, $x)
257.890 ns (4 allocations: 208 bytes)
2-element Vector{Any}:
ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)]
Dual{Nothing}(2.0,1.0)
julia> @btime stable_map(m, $x)
194.158 ns (3 allocations: 144 bytes)
2-element Vector{Union{ForwardDiff.Dual{Nothing, Float32, 1}, Vector{ForwardDiff.Dual{Nothing, Float32, 1}}}}:
ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)]
Dual{Nothing}(2.0,1.0)
julia> @btime map(m, $y)
260.979 ns (4 allocations: 224 bytes)
2-element Vector{Any}:
Dual{Nothing}(2.0,1.0)
ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)]
julia> @btime stable_map(m, $y)
190.128 ns (3 allocations: 144 bytes)
2-element Vector{Union{ForwardDiff.Dual{Nothing, Float32, 1}, Vector{ForwardDiff.Dual{Nothing, Float32, 1}}}}:
Dual{Nothing}(2.0,1.0)
ForwardDiff.Dual{Nothing, Float32, 1}[Dual{Nothing}(0.0,1.0)]
```
| StableMap | https://github.com/chriselrod/StableMap.jl.git |
|
[
"MIT"
] | 0.1.3 | d95b8e6e93a173b71406d24fce591fa44a8cf3f9 | docs | 183 | ```@meta
CurrentModule = StableMap
```
# StableMap
Documentation for [StableMap](https://github.com/chriselrod/StableMap.jl).
```@index
```
```@autodocs
Modules = [StableMap]
```
| StableMap | https://github.com/chriselrod/StableMap.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 614 | using Documenter, KrylovPreconditioners
makedocs(
modules = [KrylovPreconditioners],
doctest = true,
linkcheck = true,
format = Documenter.HTML(assets = ["assets/style.css"],
ansicolor = true,
prettyurls = get(ENV, "CI", nothing) == "true",
collapselevel = 1),
sitename = "KrylovPreconditioners.jl",
pages = ["Home" => "index.md",
"Reference" => "reference.md"
]
)
deploydocs(
repo = "github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git",
push_preview = true,
devbranch = "main",
)
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 528 | module KrylovPreconditionersAMDGPUExt
using LinearAlgebra
using SparseArrays
using AMDGPU
using AMDGPU.rocSPARSE, AMDGPU.rocSOLVER
using LinearAlgebra: checksquare, BlasReal, BlasFloat
import LinearAlgebra: ldiv!, mul!
import Base: size, eltype, unsafe_convert
using KrylovPreconditioners
const KP = KrylovPreconditioners
using KernelAbstractions
const KA = KernelAbstractions
include("AMDGPU/ic0.jl")
include("AMDGPU/ilu0.jl")
include("AMDGPU/blockjacobi.jl")
include("AMDGPU/operators.jl")
include("AMDGPU/scaling.jl")
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 506 | module KrylovPreconditionersCUDAExt
using LinearAlgebra
using SparseArrays
using CUDA
using CUDA.CUSPARSE, CUDA.CUBLAS
using LinearAlgebra: checksquare, BlasReal, BlasFloat
import LinearAlgebra: ldiv!, mul!
import Base: size, eltype, unsafe_convert
using KrylovPreconditioners
const KP = KrylovPreconditioners
using KernelAbstractions
const KA = KernelAbstractions
include("CUDA/ic0.jl")
include("CUDA/ilu0.jl")
include("CUDA/blockjacobi.jl")
include("CUDA/operators.jl")
include("CUDA/scaling.jl")
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 483 | module KrylovPreconditionersoneAPIExt
using LinearAlgebra
using SparseArrays
using oneAPI
using oneAPI: global_queue, sycl_queue, context, device
using oneAPI.oneMKL
using LinearAlgebra: checksquare, BlasReal, BlasFloat
import LinearAlgebra: ldiv!, mul!
import Base: size, eltype, unsafe_convert
using KrylovPreconditioners
const KP = KrylovPreconditioners
using KernelAbstractions
const KA = KernelAbstractions
include("oneAPI/blockjacobi.jl")
include("oneAPI/operators.jl")
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 1672 | KP.BlockJacobiPreconditioner(J::rocSPARSE.ROCSparseMatrixCSR; options...) = BlockJacobiPreconditioner(SparseMatrixCSC(J); options...)
function KP.create_blocklist(cublocks::ROCArray, npart)
blocklist = Array{ROCMatrix{Float64}}(undef, npart)
for b in 1:npart
blocklist[b] = ROCMatrix{Float64}(undef, size(cublocks,1), size(cublocks,2))
end
return blocklist
end
function _update_gpu(p, j_rowptr, j_colval, j_nzval, device::ROCBackend)
nblocks = p.nblocks
blocksize = p.blocksize
fillblock_gpu_kernel! = KP._fillblock_gpu!(device)
# Fill Block Jacobi" begin
fillblock_gpu_kernel!(
p.cublocks, size(p.id,1),
p.cupartitions, p.cumap,
j_rowptr, j_colval, j_nzval,
p.cupart, p.culpartitions, p.id,
ndrange=(nblocks, blocksize),
)
KA.synchronize(device)
# Invert blocks begin
for b in 1:nblocks
p.blocklist[b] .= p.cublocks[:,:,b]
end
AMDGPU.@sync pivot, info = rocSOLVER.getrf_batched!(p.blocklist)
AMDGPU.@sync pivot, info, p.blocklist = rocSOLVER.getri_batched!(p.blocklist, pivot)
for b in 1:nblocks
p.cublocks[:,:,b] .= p.blocklist[b]
end
return
end
"""
function update!(J::ROCSparseMatrixCSR, p)
Update the preconditioner `p` from the sparse Jacobian `J` in CSR format for ROCm
1) The dense blocks `cuJs` are filled from the sparse Jacobian `J`
2) To a batch inversion of the dense blocks using CUBLAS
3) Extract the preconditioner matrix `p.P` from the dense blocks `cuJs`
"""
function KP.update!(p::BlockJacobiPreconditioner, J::rocSPARSE.ROCSparseMatrixCSR)
_update_gpu(p, J.rowPtr, J.colVal, J.nzVal, p.device)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 4395 | mutable struct AMD_IC0{SM} <: AbstractKrylovPreconditioner
n::Int
desc::rocSPARSE.ROCMatrixDescriptor
buffer::ROCVector{UInt8}
info::rocSPARSE.MatInfo
timer_update::Float64
P::SM
end
for (bname, aname, sname, T) in ((:rocsparse_scsric0_buffer_size, :rocsparse_scsric0_analysis, :rocsparse_scsric0, :Float32),
(:rocsparse_dcsric0_buffer_size, :rocsparse_dcsric0_analysis, :rocsparse_dcsric0, :Float64),
(:rocsparse_ccsric0_buffer_size, :rocsparse_ccsric0_analysis, :rocsparse_ccsric0, :ComplexF32),
(:rocsparse_zcsric0_buffer_size, :rocsparse_zcsric0_analysis, :rocsparse_zcsric0, :ComplexF64))
@eval begin
function KP.kp_ic0(A::ROCSparseMatrixCSR{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O')
info = rocSPARSE.MatInfo()
buffer_size = Ref{Csize_t}()
rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size)
buffer = ROCVector{UInt8}(undef, buffer_size[])
rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info,
rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer)
posit = Ref{Cint}(1)
rocSPARSE.rocsparse_csric0_zero_pivot(rocSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer)
return AMD_IC0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::AMD_IC0{ROCSparseMatrixCSR{$T,Cint}}, A::ROCSparseMatrixCSR{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer)
return p
end
function KP.kp_ic0(A::ROCSparseMatrixCSC{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O')
info = rocSPARSE.MatInfo()
buffer_size = Ref{Csize_t}()
rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size)
buffer = ROCVector{UInt8}(undef, buffer_size[])
rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info,
rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer)
posit = Ref{Cint}(1)
rocSPARSE.rocsparse_csric0_zero_pivot(rocSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer)
return AMD_IC0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::AMD_IC0{ROCSparseMatrixCSC{$T,Cint}}, A::ROCSparseMatrixCSC{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer)
return p
end
end
end
for ArrayType in (:(ROCVector{T}), :(ROCMatrix{T}))
@eval begin
function ldiv!(ic::AMD_IC0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
ldiv!(LowerTriangular(ic.P), x) # Forward substitution with L
ldiv!(LowerTriangular(ic.P)', x) # Backward substitution with Lᴴ
return x
end
function ldiv!(y::$ArrayType, ic::AMD_IC0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
ic.timer_update += @elapsed begin
copyto!(y, x)
ldiv!(ic, y)
end
return y
end
function ldiv!(ic::AMD_IC0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal
ldiv!(UpperTriangular(ic.P)', x) # Forward substitution with L
ldiv!(UpperTriangular(ic.P), x) # Backward substitution with Lᴴ
return x
end
function ldiv!(y::$ArrayType, ic::AMD_IC0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal
ic.timer_update += @elapsed begin
copyto!(y, x)
ldiv!(ic, y)
end
return y
end
end
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 4396 | mutable struct AMD_ILU0{SM} <: AbstractKrylovPreconditioner
n::Int
desc::rocSPARSE.ROCMatrixDescriptor
buffer::ROCVector{UInt8}
info::rocSPARSE.MatInfo
timer_update::Float64
P::SM
end
for (bname, aname, sname, T) in ((:rocsparse_scsrilu0_buffer_size, :rocsparse_scsrilu0_analysis, :rocsparse_scsrilu0, :Float32),
(:rocsparse_dcsrilu0_buffer_size, :rocsparse_dcsrilu0_analysis, :rocsparse_dcsrilu0, :Float64),
(:rocsparse_ccsrilu0_buffer_size, :rocsparse_ccsrilu0_analysis, :rocsparse_ccsrilu0, :ComplexF32),
(:rocsparse_zcsrilu0_buffer_size, :rocsparse_zcsrilu0_analysis, :rocsparse_zcsrilu0, :ComplexF64))
@eval begin
function KP.kp_ilu0(A::ROCSparseMatrixCSR{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O')
info = rocSPARSE.MatInfo()
buffer_size = Ref{Csize_t}()
rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size)
buffer = ROCVector{UInt8}(undef, buffer_size[])
rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer)
posit = Ref{Cint}(1)
rocSPARSE.rocsparse_csrilu0_zero_pivot(rocSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer)
return AMD_ILU0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::AMD_ILU0{ROCSparseMatrixCSR{$T,Cint}}, A::ROCSparseMatrixCSR{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer)
return p
end
function KP.kp_ilu0(A::ROCSparseMatrixCSC{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = rocSPARSE.ROCMatrixDescriptor('G', 'L', 'N', 'O')
info = rocSPARSE.MatInfo()
buffer_size = Ref{Csize_t}()
rocSPARSE.$bname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size)
buffer = ROCVector{UInt8}(undef, buffer_size[])
rocSPARSE.$aname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, rocSPARSE.rocsparse_analysis_policy_force, rocSPARSE.rocsparse_solve_policy_auto, buffer)
posit = Ref{Cint}(1)
rocSPARSE.rocsparse_csrilu0_zero_pivot(rocSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
rocSPARSE.$sname(rocSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, rocSPARSE.rocsparse_solve_policy_auto, buffer)
return AMD_ILU0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::AMD_ILU0{ROCSparseMatrixCSC{$T,Cint}}, A::ROCSparseMatrixCSC{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
rocSPARSE.$sname(rocSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, rocSPARSE.rocsparse_solve_policy_auto, p.buffer)
return p
end
end
end
for ArrayType in (:(ROCVector{T}), :(ROCMatrix{T}))
@eval begin
function ldiv!(ilu::AMD_ILU0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
ldiv!(UnitLowerTriangular(ilu.P), x) # Forward substitution with L
ldiv!(UpperTriangular(ilu.P), x) # Backward substitution with U
return x
end
function ldiv!(y::$ArrayType, ilu::AMD_ILU0{ROCSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
copyto!(y, x)
ilu.timer_update += @elapsed begin
ldiv!(ilu, y)
end
return y
end
function ldiv!(ilu::AMD_ILU0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal
ldiv!(LowerTriangular(ilu.P), x) # Forward substitution with L
ldiv!(UnitUpperTriangular(ilu.P), x) # Backward substitution with U
return x
end
function ldiv!(y::$ArrayType, ilu::AMD_ILU0{ROCSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal
copyto!(y, x)
ilu.timer_update += @elapsed begin
ldiv!(ilu, y)
end
return y
end
end
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 9830 | using AMDGPU.HIP
mutable struct AMD_KrylovOperator{T} <: AbstractKrylovOperator{T}
type::Type{T}
m::Int
n::Int
nrhs::Int
transa::Char
descA::rocSPARSE.ROCSparseMatrixDescriptor
buffer_size::Ref{Csize_t}
buffer::ROCVector{UInt8}
end
eltype(A::AMD_KrylovOperator{T}) where T = T
size(A::AMD_KrylovOperator) = (A.m, A.n)
for (SparseMatrixType, BlasType) in ((:(ROCSparseMatrixCSR{T}), :BlasFloat),
(:(ROCSparseMatrixCSC{T}), :BlasFloat),
(:(ROCSparseMatrixCOO{T}), :BlasFloat))
@eval begin
function KP.KrylovOperator(A::$SparseMatrixType; nrhs::Int=1, transa::Char='N') where T <: $BlasType
m,n = size(A)
alpha = Ref{T}(one(T))
beta = Ref{T}(zero(T))
descA = rocSPARSE.ROCSparseMatrixDescriptor(A, 'O')
if nrhs == 1
descX = rocSPARSE.ROCDenseVectorDescriptor(T, n)
descY = rocSPARSE.ROCDenseVectorDescriptor(T, m)
algo = rocSPARSE.rocSPARSE.rocsparse_spmv_alg_default
buffer_size = Ref{Csize_t}()
if HIP.runtime_version() ≥ v"6-"
rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), transa, alpha, descA, descX,
beta, descY, T, algo, rocSPARSE.rocsparse_spmv_stage_buffer_size,
buffer_size, C_NULL)
else
rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), transa, alpha, descA, descX,
beta, descY, T, algo, buffer_size, C_NULL)
end
buffer = ROCVector{UInt8}(undef, buffer_size[])
if HIP.runtime_version() ≥ v"6-"
rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), transa, alpha, descA, descX,
beta, descY, T, algo, rocSPARSE.rocsparse_spmv_stage_preprocess,
buffer_size, buffer)
end
return AMD_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer)
else
descX = rocSPARSE.ROCDenseMatrixDescriptor(T, n, nrhs)
descY = rocSPARSE.ROCDenseMatrixDescriptor(T, m, nrhs)
algo = rocSPARSE.rocsparse_spmm_alg_default
buffer_size = Ref{Csize_t}()
transb = 'N'
rocSPARSE.rocsparse_spmm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T,
algo, rocSPARSE.rocsparse_spmm_stage_buffer_size, buffer_size, C_NULL)
buffer = ROCVector{UInt8}(undef, buffer_size[])
rocSPARSE.rocsparse_spmm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T,
algo, rocSPARSE.rocsparse_spmm_stage_preprocess, buffer_size, buffer)
return AMD_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer)
end
end
function KP.update!(A::AMD_KrylovOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat
descB = rocSPARSE.ROCSparseMatrixDescriptor(B, 'O')
A.descA = descB
return A
end
end
end
function LinearAlgebra.mul!(y::ROCVector{T}, A::AMD_KrylovOperator{T}, x::ROCVector{T}) where T <: BlasFloat
(length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m"))
(length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n"))
(A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1"))
descY = rocSPARSE.ROCDenseVectorDescriptor(y)
descX = rocSPARSE.ROCDenseVectorDescriptor(x)
algo = rocSPARSE.rocsparse_spmv_alg_default
alpha = Ref{T}(one(T))
beta = Ref{T}(zero(T))
if HIP.runtime_version() ≥ v"6-"
rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), A.transa, alpha, A.descA, descX,
beta, descY, T, algo, rocSPARSE.rocsparse_spmv_stage_compute,
A.buffer_size, A.buffer)
else
rocSPARSE.rocsparse_spmv(rocSPARSE.handle(), A.transa, alpha, A.descA, descX,
beta, descY, T, algo, A.buffer_size, A.buffer)
end
end
function LinearAlgebra.mul!(Y::ROCMatrix{T}, A::AMD_KrylovOperator{T}, X::ROCMatrix{T}) where T <: BlasFloat
mY, nY = size(Y)
mX, nX = size(X)
(mY != A.m) && throw(DimensionMismatch("mY != A.m"))
(mX != A.n) && throw(DimensionMismatch("mX != A.n"))
(nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs"))
descY = rocSPARSE.ROCDenseMatrixDescriptor(Y)
descX = rocSPARSE.ROCDenseMatrixDescriptor(X)
algo = rocSPARSE.rocsparse_spmm_alg_default
alpha = Ref{T}(one(T))
beta = Ref{T}(zero(T))
rocSPARSE.rocsparse_spmm(rocSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX,
beta, descY, T, algo, rocSPARSE.rocsparse_spmm_stage_compute, A.buffer_size, A.buffer)
end
mutable struct AMD_TriangularOperator{T} <: AbstractTriangularOperator{T}
type::Type{T}
m::Int
n::Int
nrhs::Int
transa::Char
descA::rocSPARSE.ROCSparseMatrixDescriptor
buffer_size::Ref{Csize_t}
buffer::ROCVector{UInt8}
end
eltype(A::AMD_TriangularOperator{T}) where T = T
size(A::AMD_TriangularOperator) = (A.m, A.n)
for (SparseMatrixType, BlasType) in ((:(ROCSparseMatrixCSR{T}), :BlasFloat),
(:(ROCSparseMatrixCOO{T}), :BlasFloat))
@eval begin
function KP.TriangularOperator(A::$SparseMatrixType, uplo::Char, diag::Char; nrhs::Int=1, transa::Char='N') where T <: $BlasType
m,n = size(A)
alpha = Ref{T}(one(T))
descA = rocSPARSE.ROCSparseMatrixDescriptor(A, 'O')
rocsparse_uplo = Ref{rocSPARSE.rocsparse_fill_mode}(uplo)
rocsparse_diag = Ref{rocSPARSE.rocsparse_diag_type}(diag)
rocSPARSE.rocsparse_spmat_set_attribute(descA, rocSPARSE.rocsparse_spmat_fill_mode, rocsparse_uplo, Csize_t(sizeof(rocsparse_uplo)))
rocSPARSE.rocsparse_spmat_set_attribute(descA, rocSPARSE.rocsparse_spmat_diag_type, rocsparse_diag, Csize_t(sizeof(rocsparse_diag)))
if nrhs == 1
descX = rocSPARSE.ROCDenseVectorDescriptor(T, n)
descY = rocSPARSE.ROCDenseVectorDescriptor(T, m)
algo = rocSPARSE.rocsparse_spsv_alg_default
buffer_size = Ref{Csize_t}()
rocSPARSE.rocsparse_spsv(rocSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo,
rocSPARSE.rocsparse_spsv_stage_buffer_size, buffer_size, C_NULL)
buffer = ROCVector{UInt8}(undef, buffer_size[])
rocSPARSE.rocsparse_spsv(rocSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo,
rocSPARSE.rocsparse_spsv_stage_preprocess, buffer_size, buffer)
return AMD_TriangularOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer)
else
descX = rocSPARSE.ROCDenseMatrixDescriptor(T, n, nrhs)
descY = rocSPARSE.ROCDenseMatrixDescriptor(T, m, nrhs)
algo = rocSPARSE.rocsparse_spsm_alg_default
buffer_size = Ref{Csize_t}()
rocSPARSE.rocsparse_spsm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo,
rocSPARSE.rocsparse_spsm_stage_buffer_size, buffer_size, C_NULL)
buffer = ROCVector{UInt8}(undef, buffer_size[])
rocSPARSE.rocsparse_spsm(rocSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo,
rocSPARSE.rocsparse_spsm_stage_preprocess, buffer_size, buffer)
return AMD_TriangularOperator{T}(T, m, n, nrhs, transa, descA, buffer_size, buffer)
end
end
function KP.update!(A::AMD_TriangularOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat
(B isa ROCSparseMatrixCOO) && rocSPARSE.rocsparse_coo_set_pointers(A.descA, B.rowInd, B.colInd, B.nzVal)
(B isa ROCSparseMatrixCSR) && rocSPARSE.rocsparse_csr_set_pointers(A.descA, B.rowPtr, B.colVal, B.nzVal)
return A
end
end
end
function LinearAlgebra.ldiv!(y::ROCVector{T}, A::AMD_TriangularOperator{T}, x::ROCVector{T}) where T <: BlasFloat
(length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m"))
(length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n"))
(A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1"))
descY = rocSPARSE.ROCDenseVectorDescriptor(y)
descX = rocSPARSE.ROCDenseVectorDescriptor(x)
algo = rocSPARSE.rocsparse_spsv_alg_default
alpha = Ref{T}(one(T))
rocSPARSE.rocsparse_spsv(rocSPARSE.handle(), A.transa, alpha, A.descA, descX, descY, T,
algo, rocSPARSE.rocsparse_spsv_stage_compute, A.buffer_size, A.buffer)
end
function LinearAlgebra.ldiv!(Y::ROCMatrix{T}, A::AMD_TriangularOperator{T}, X::ROCMatrix{T}) where T <: BlasFloat
mY, nY = size(Y)
mX, nX = size(X)
(mY != A.m) && throw(DimensionMismatch("mY != A.m"))
(mX != A.n) && throw(DimensionMismatch("mX != A.n"))
(nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs"))
descY = rocSPARSE.ROCDenseMatrixDescriptor(Y)
descX = rocSPARSE.ROCDenseMatrixDescriptor(X)
algo = rocSPARSE.rocsparse_spsm_alg_default
alpha = Ref{T}(one(T))
rocSPARSE.rocsparse_spsm(rocSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX, descY, T,
algo, rocSPARSE.rocsparse_spsm_stage_compute, A.buffer_size, A.buffer)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 98 | KP.scaling_csr!(A::rocSPARSE.ROCSparseMatrixCSR, b::ROCVector) = scaling_csr!(A, b, ROCBackend())
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 1660 | KP.BlockJacobiPreconditioner(J::CUSPARSE.CuSparseMatrixCSR; options...) = BlockJacobiPreconditioner(SparseMatrixCSC(J); options...)
function KP.create_blocklist(cublocks::CuArray, npart)
blocklist = Array{CuMatrix{Float64}}(undef, npart)
for b in 1:npart
blocklist[b] = CuMatrix{Float64}(undef, size(cublocks,1), size(cublocks,2))
end
return blocklist
end
function _update_gpu(p, j_rowptr, j_colval, j_nzval, device::CUDABackend)
nblocks = p.nblocks
blocksize = p.blocksize
fillblock_gpu_kernel! = KP._fillblock_gpu!(device)
# Fill Block Jacobi" begin
fillblock_gpu_kernel!(
p.cublocks, size(p.id,1),
p.cupartitions, p.cumap,
j_rowptr, j_colval, j_nzval,
p.cupart, p.culpartitions, p.id,
ndrange=(nblocks, blocksize),
)
KA.synchronize(device)
# Invert blocks begin
for b in 1:nblocks
p.blocklist[b] .= p.cublocks[:,:,b]
end
CUDA.@sync pivot, info = CUBLAS.getrf_batched!(p.blocklist, true)
CUDA.@sync pivot, info, p.blocklist = CUBLAS.getri_batched(p.blocklist, pivot)
for b in 1:nblocks
p.cublocks[:,:,b] .= p.blocklist[b]
end
return
end
"""
function update!(J::CuSparseMatrixCSR, p)
Update the preconditioner `p` from the sparse Jacobian `J` in CSR format for CUDA
1) The dense blocks `cuJs` are filled from the sparse Jacobian `J`
2) To a batch inversion of the dense blocks using CUBLAS
3) Extract the preconditioner matrix `p.P` from the dense blocks `cuJs`
"""
function KP.update!(p::BlockJacobiPreconditioner, J::CUSPARSE.CuSparseMatrixCSR)
_update_gpu(p, J.rowPtr, J.colVal, J.nzVal, p.device)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 4586 | mutable struct IC0Info
info::CUSPARSE.csric02Info_t
function IC0Info()
info_ref = Ref{CUSPARSE.csric02Info_t}()
CUSPARSE.cusparseCreateCsric02Info(info_ref)
obj = new(info_ref[])
finalizer(CUSPARSE.cusparseDestroyCsric02Info, obj)
obj
end
end
unsafe_convert(::Type{CUSPARSE.csric02Info_t}, info::IC0Info) = info.info
mutable struct NVIDIA_IC0{SM} <: AbstractKrylovPreconditioner
n::Int
desc::CUSPARSE.CuMatrixDescriptor
buffer::CuVector{UInt8}
info::IC0Info
timer_update::Float64
P::SM
end
for (bname, aname, sname, T) in ((:cusparseScsric02_bufferSize, :cusparseScsric02_analysis, :cusparseScsric02, :Float32),
(:cusparseDcsric02_bufferSize, :cusparseDcsric02_analysis, :cusparseDcsric02, :Float64),
(:cusparseCcsric02_bufferSize, :cusparseCcsric02_analysis, :cusparseCcsric02, :ComplexF32),
(:cusparseZcsric02_bufferSize, :cusparseZcsric02_analysis, :cusparseZcsric02, :ComplexF64))
@eval begin
function KP.kp_ic0(A::CuSparseMatrixCSR{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O')
info = IC0Info()
buffer_size = Ref{Cint}()
CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
posit = Ref{Cint}(1)
CUSPARSE.cusparseXcsric02_zeroPivot(CUSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
return NVIDIA_IC0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::NVIDIA_IC0{CuSparseMatrixCSR{$T,Cint}}, A::CuSparseMatrixCSR{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer)
return p
end
function KP.kp_ic0(A::CuSparseMatrixCSC{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O')
info = IC0Info()
buffer_size = Ref{Cint}()
CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
posit = Ref{Cint}(1)
CUSPARSE.cusparseXcsric02_zeroPivot(CUSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
return NVIDIA_IC0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::NVIDIA_IC0{CuSparseMatrixCSC{$T,Cint}}, A::CuSparseMatrixCSC{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer)
return p
end
end
end
for ArrayType in (:(CuVector{T}), :(CuMatrix{T}))
@eval begin
function ldiv!(ic::NVIDIA_IC0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
ldiv!(LowerTriangular(ic.P), x) # Forward substitution with L
ldiv!(LowerTriangular(ic.P)', x) # Backward substitution with Lᴴ
return x
end
function ldiv!(y::$ArrayType, ic::NVIDIA_IC0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
copyto!(y, x)
ic.timer_update += @elapsed begin
ldiv!(ic, y)
end
return y
end
function ldiv!(ic::NVIDIA_IC0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasFloat
ldiv!(UpperTriangular(ic.P)', x) # Forward substitution with L
ldiv!(UpperTriangular(ic.P), x) # Backward substitution with Lᴴ
return x
end
function ldiv!(y::$ArrayType, ic::NVIDIA_IC0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal
copyto!(y, x)
ic.timer_update += @elapsed begin
ldiv!(ic, y)
end
return y
end
end
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 4643 | mutable struct ILU0Info
info::CUSPARSE.csrilu02Info_t
function ILU0Info()
info_ref = Ref{CUSPARSE.csrilu02Info_t}()
CUSPARSE.cusparseCreateCsrilu02Info(info_ref)
obj = new(info_ref[])
finalizer(CUSPARSE.cusparseDestroyCsrilu02Info, obj)
obj
end
end
unsafe_convert(::Type{CUSPARSE.csrilu02Info_t}, info::ILU0Info) = info.info
mutable struct NVIDIA_ILU0{SM} <: AbstractKrylovPreconditioner
n::Int
desc::CUSPARSE.CuMatrixDescriptor
buffer::CuVector{UInt8}
info::ILU0Info
timer_update::Float64
P::SM
end
for (bname, aname, sname, T) in ((:cusparseScsrilu02_bufferSize, :cusparseScsrilu02_analysis, :cusparseScsrilu02, :Float32),
(:cusparseDcsrilu02_bufferSize, :cusparseDcsrilu02_analysis, :cusparseDcsrilu02, :Float64),
(:cusparseCcsrilu02_bufferSize, :cusparseCcsrilu02_analysis, :cusparseCcsrilu02, :ComplexF32),
(:cusparseZcsrilu02_bufferSize, :cusparseZcsrilu02_analysis, :cusparseZcsrilu02, :ComplexF64))
@eval begin
function KP.kp_ilu0(A::CuSparseMatrixCSR{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O')
info = ILU0Info()
buffer_size = Ref{Cint}()
CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
posit = Ref{Cint}(1)
CUSPARSE.cusparseXcsrilu02_zeroPivot(CUSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.rowPtr, P.colVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
return NVIDIA_ILU0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::NVIDIA_ILU0{CuSparseMatrixCSR{$T,Cint}}, A::CuSparseMatrixCSR{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.rowPtr, p.P.colVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer)
return p
end
function KP.kp_ilu0(A::CuSparseMatrixCSC{$T,Cint})
P = copy(A)
n = checksquare(P)
desc = CUSPARSE.CuMatrixDescriptor('G', 'L', 'N', 'O')
info = ILU0Info()
buffer_size = Ref{Cint}()
CUSPARSE.$bname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
CUSPARSE.$aname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
posit = Ref{Cint}(1)
CUSPARSE.cusparseXcsrilu02_zeroPivot(CUSPARSE.handle(), info, posit)
(posit[] ≥ 0) && error("Structural/numerical zero in A at ($(posit[]),$(posit[])))")
CUSPARSE.$sname(CUSPARSE.handle(), n, nnz(P), desc, P.nzVal, P.colPtr, P.rowVal, info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer)
return NVIDIA_ILU0(n, desc, buffer, info, 0.0, P)
end
function KP.update!(p::NVIDIA_ILU0{CuSparseMatrixCSC{$T,Cint}}, A::CuSparseMatrixCSC{$T,Cint})
copyto!(p.P.nzVal, A.nzVal)
CUSPARSE.$sname(CUSPARSE.handle(), p.n, nnz(p.P), p.desc, p.P.nzVal, p.P.colPtr, p.P.rowVal, p.info, CUSPARSE.CUSPARSE_SOLVE_POLICY_USE_LEVEL, p.buffer)
return p
end
end
end
for ArrayType in (:(CuVector{T}), :(CuMatrix{T}))
@eval begin
function ldiv!(ilu::NVIDIA_ILU0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
ldiv!(UnitLowerTriangular(ilu.P), x) # Forward substitution with L
ldiv!(UpperTriangular(ilu.P), x) # Backward substitution with U
return x
end
function ldiv!(y::$ArrayType, ilu::NVIDIA_ILU0{CuSparseMatrixCSR{T,Cint}}, x::$ArrayType) where T <: BlasFloat
copyto!(y, x)
ilu.timer_update += @elapsed begin
ldiv!(ilu, y)
end
return y
end
function ldiv!(ilu::NVIDIA_ILU0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal
ldiv!(LowerTriangular(ilu.P), x) # Forward substitution with L
ldiv!(UnitUpperTriangular(ilu.P), x) # Backward substitution with U
return x
end
function ldiv!(y::$ArrayType, ilu::NVIDIA_ILU0{CuSparseMatrixCSC{T,Cint}}, x::$ArrayType) where T <: BlasReal
copyto!(y, x)
ilu.timer_update += @elapsed begin
ldiv!(ilu, y)
end
return y
end
end
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 8704 | mutable struct NVIDIA_KrylovOperator{T} <: AbstractKrylovOperator{T}
type::Type{T}
m::Int
n::Int
nrhs::Int
transa::Char
descA::CUSPARSE.CuSparseMatrixDescriptor
buffer::CuVector{UInt8}
end
eltype(A::NVIDIA_KrylovOperator{T}) where T = T
size(A::NVIDIA_KrylovOperator) = (A.m, A.n)
for (SparseMatrixType, BlasType) in ((:(CuSparseMatrixCSR{T}), :BlasFloat),
(:(CuSparseMatrixCSC{T}), :BlasFloat),
(:(CuSparseMatrixCOO{T}), :BlasFloat))
@eval begin
function KP.KrylovOperator(A::$SparseMatrixType; nrhs::Int=1, transa::Char='N') where T <: $BlasType
m,n = size(A)
alpha = Ref{T}(one(T))
beta = Ref{T}(zero(T))
descA = CUSPARSE.CuSparseMatrixDescriptor(A, 'O')
if nrhs == 1
descX = CUSPARSE.CuDenseVectorDescriptor(T, n)
descY = CUSPARSE.CuDenseVectorDescriptor(T, m)
algo = CUSPARSE.CUSPARSE_SPMV_ALG_DEFAULT
buffer_size = Ref{Csize_t}()
CUSPARSE.cusparseSpMV_bufferSize(CUSPARSE.handle(), transa, alpha, descA, descX, beta, descY, T, algo, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
if CUSPARSE.version() ≥ v"12.3"
CUSPARSE.cusparseSpMV_preprocess(CUSPARSE.handle(), transa, alpha, descA, descX, beta, descY, T, algo, buffer)
end
return NVIDIA_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer)
else
descX = CUSPARSE.CuDenseMatrixDescriptor(T, n, nrhs)
descY = CUSPARSE.CuDenseMatrixDescriptor(T, m, nrhs)
algo = CUSPARSE.CUSPARSE_SPMM_ALG_DEFAULT
buffer_size = Ref{Csize_t}()
CUSPARSE.cusparseSpMM_bufferSize(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T, algo, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
if !(A isa CuSparseMatrixCOO)
CUSPARSE.cusparseSpMM_preprocess(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, beta, descY, T, algo, buffer)
end
return NVIDIA_KrylovOperator{T}(T, m, n, nrhs, transa, descA, buffer)
end
end
function KP.update!(A::NVIDIA_KrylovOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat
descB = CUSPARSE.CuSparseMatrixDescriptor(B, 'O')
A.descA = descB
return A
end
end
end
function LinearAlgebra.mul!(y::CuVector{T}, A::NVIDIA_KrylovOperator{T}, x::CuVector{T}) where T <: BlasFloat
(length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m"))
(length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n"))
(A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1"))
descY = CUSPARSE.CuDenseVectorDescriptor(y)
descX = CUSPARSE.CuDenseVectorDescriptor(x)
algo = CUSPARSE.CUSPARSE_SPMV_ALG_DEFAULT
alpha = Ref{T}(one(T))
beta = Ref{T}(zero(T))
CUSPARSE.cusparseSpMV(CUSPARSE.handle(), A.transa, alpha, A.descA, descX, beta, descY, T, algo, A.buffer)
end
function LinearAlgebra.mul!(Y::CuMatrix{T}, A::NVIDIA_KrylovOperator{T}, X::CuMatrix{T}) where T <: BlasFloat
mY, nY = size(Y)
mX, nX = size(X)
(mY != A.m) && throw(DimensionMismatch("mY != A.m"))
(mX != A.n) && throw(DimensionMismatch("mX != A.n"))
(nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs"))
descY = CUSPARSE.CuDenseMatrixDescriptor(Y)
descX = CUSPARSE.CuDenseMatrixDescriptor(X)
algo = CUSPARSE.CUSPARSE_SPMM_ALG_DEFAULT
alpha = Ref{T}(one(T))
beta = Ref{T}(zero(T))
CUSPARSE.cusparseSpMM(CUSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX, beta, descY, T, algo, A.buffer)
end
mutable struct NVIDIA_TriangularOperator{T,S} <: AbstractTriangularOperator{T}
type::Type{T}
m::Int
n::Int
nrhs::Int
transa::Char
descA::CUSPARSE.CuSparseMatrixDescriptor
descT::S
buffer::CuVector{UInt8}
end
eltype(A::NVIDIA_TriangularOperator{T}) where T = T
size(A::NVIDIA_TriangularOperator) = (A.m, A.n)
for (SparseMatrixType, BlasType) in ((:(CuSparseMatrixCSR{T}), :BlasFloat),
(:(CuSparseMatrixCOO{T}), :BlasFloat))
@eval begin
function KP.TriangularOperator(A::$SparseMatrixType, uplo::Char, diag::Char; nrhs::Int=1, transa::Char='N') where T <: $BlasType
m,n = size(A)
alpha = Ref{T}(one(T))
descA = CUSPARSE.CuSparseMatrixDescriptor(A, 'O')
cusparse_uplo = Ref{CUSPARSE.cusparseFillMode_t}(uplo)
cusparse_diag = Ref{CUSPARSE.cusparseDiagType_t}(diag)
CUSPARSE.cusparseSpMatSetAttribute(descA, 'F', cusparse_uplo, Csize_t(sizeof(cusparse_uplo)))
CUSPARSE.cusparseSpMatSetAttribute(descA, 'D', cusparse_diag, Csize_t(sizeof(cusparse_diag)))
if nrhs == 1
descT = CUSPARSE.CuSparseSpSVDescriptor()
descX = CUSPARSE.CuDenseVectorDescriptor(T, n)
descY = CUSPARSE.CuDenseVectorDescriptor(T, m)
algo = CUSPARSE.CUSPARSE_SPSV_ALG_DEFAULT
buffer_size = Ref{Csize_t}()
CUSPARSE.cusparseSpSV_bufferSize(CUSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo, descT, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
CUSPARSE.cusparseSpSV_analysis(CUSPARSE.handle(), transa, alpha, descA, descX, descY, T, algo, descT, buffer)
return NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSVDescriptor}(T, m, n, nrhs, transa, descA, descT, buffer)
else
descT = CUSPARSE.CuSparseSpSMDescriptor()
descX = CUSPARSE.CuDenseMatrixDescriptor(T, n, nrhs)
descY = CUSPARSE.CuDenseMatrixDescriptor(T, m, nrhs)
algo = CUSPARSE.CUSPARSE_SPSM_ALG_DEFAULT
buffer_size = Ref{Csize_t}()
CUSPARSE.cusparseSpSM_bufferSize(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo, descT, buffer_size)
buffer = CuVector{UInt8}(undef, buffer_size[])
CUSPARSE.cusparseSpSM_analysis(CUSPARSE.handle(), transa, 'N', alpha, descA, descX, descY, T, algo, descT, buffer)
return NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSMDescriptor}(T, m, n, nrhs, transa, descA, descT, buffer)
end
end
function KP.update!(A::NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSVDescriptor}, B::$SparseMatrixType) where T <: $BlasFloat
CUSPARSE.version() ≥ v"12.2" || error("This operation is only supported by CUDA ≥ v12.3")
descB = CUSPARSE.CuSparseMatrixDescriptor(B, 'O')
A.descA = descB
CUSPARSE.cusparseSpSV_updateMatrix(CUSPARSE.handle(), A.descT, B.nzVal, 'G')
return A
end
function KP.update!(A::NVIDIA_TriangularOperator{T,CUSPARSE.CuSparseSpSMDescriptor}, B::$SparseMatrixType) where T <: $BlasFloat
CUSPARSE.version() ≥ v"12.3" || error("This operation is only supported by CUDA ≥ v12.4")
descB = CUSPARSE.CuSparseMatrixDescriptor(B, 'O')
A.descA = descB
CUSPARSE.cusparseSpSM_updateMatrix(CUSPARSE.handle(), A.descT, B.nzVal, 'G')
return A
end
end
end
function LinearAlgebra.ldiv!(y::CuVector{T}, A::NVIDIA_TriangularOperator{T}, x::CuVector{T}) where T <: BlasFloat
(length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m"))
(length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n"))
(A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1"))
descY = CUSPARSE.CuDenseVectorDescriptor(y)
descX = CUSPARSE.CuDenseVectorDescriptor(x)
algo = CUSPARSE.CUSPARSE_SPSV_ALG_DEFAULT
alpha = Ref{T}(one(T))
CUSPARSE.cusparseSpSV_solve(CUSPARSE.handle(), A.transa, alpha, A.descA, descX, descY, T, algo, A.descT)
end
function LinearAlgebra.ldiv!(Y::CuMatrix{T}, A::NVIDIA_TriangularOperator{T}, X::CuMatrix{T}) where T <: BlasFloat
mY, nY = size(Y)
mX, nX = size(X)
(mY != A.m) && throw(DimensionMismatch("mY != A.m"))
(mX != A.n) && throw(DimensionMismatch("mX != A.n"))
(nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs"))
descY = CUSPARSE.CuDenseMatrixDescriptor(Y)
descX = CUSPARSE.CuDenseMatrixDescriptor(X)
algo = CUSPARSE.CUSPARSE_SPSM_ALG_DEFAULT
alpha = Ref{T}(one(T))
CUSPARSE.cusparseSpSM_solve(CUSPARSE.handle(), A.transa, 'N', alpha, A.descA, descX, descY, T, algo, A.descT)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 96 | KP.scaling_csr!(A::CUSPARSE.CuSparseMatrixCSR, b::CuVector) = scaling_csr!(A, b, CUDABackend())
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 1666 | KP.BlockJacobiPreconditioner(J::oneMKL.oneSparseMatrixCSR; options...) = BlockJacobiPreconditioner(SparseMatrixCSC(J); options...)
function KP.create_blocklist(cublocks::oneArray, npart)
blocklist = Array{oneMatrix{Float64}}(undef, npart)
for b in 1:npart
blocklist[b] = oneMatrix{Float64}(undef, size(cublocks,1), size(cublocks,2))
end
return blocklist
end
function _update_gpu(p, j_rowptr, j_colval, j_nzval, device::oneAPIBackend)
nblocks = p.nblocks
blocksize = p.blocksize
fillblock_gpu_kernel! = KP._fillblock_gpu!(device)
# Fill Block Jacobi" begin
fillblock_gpu_kernel!(
p.cublocks, size(p.id,1),
p.cupartitions, p.cumap,
j_rowptr, j_colval, j_nzval,
p.cupart, p.culpartitions, p.id,
ndrange=(nblocks, blocksize),
)
KA.synchronize(device)
# Invert blocks begin
for b in 1:nblocks
p.blocklist[b] .= p.cublocks[:,:,b]
end
oneAPI.@sync pivot, p.blocklist = oneMKL.getrf_batched!(p.blocklist)
oneAPI.@sync pivot, p.blocklist = oneMKL.getri_batched!(p.blocklist, pivot)
for b in 1:nblocks
p.cublocks[:,:,b] .= p.blocklist[b]
end
return
end
"""
function update!(J::oneSparseMatrixCSR, p)
Update the preconditioner `p` from the sparse Jacobian `J` in CSR format for oneAPI
1) The dense blocks `cuJs` are filled from the sparse Jacobian `J`
2) To a batch inversion of the dense blocks using oneMKL
3) Extract the preconditioner matrix `p.P` from the dense blocks `cuJs`
"""
function KP.update!(p::BlockJacobiPreconditioner, J::oneMKL.oneSparseMatrixCSR)
_update_gpu(p, J.rowPtr, J.colVal, J.nzVal, p.device)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 3765 | mutable struct INTEL_KrylovOperator{T} <: AbstractKrylovOperator{T}
type::Type{T}
m::Int
n::Int
nrhs::Int
transa::Char
matrix::oneSparseMatrixCSR{T}
end
eltype(A::INTEL_KrylovOperator{T}) where T = T
size(A::INTEL_KrylovOperator) = (A.m, A.n)
for (SparseMatrixType, BlasType) in ((:(oneSparseMatrixCSR{T}), :BlasFloat),)
@eval begin
function KP.KrylovOperator(A::$SparseMatrixType; nrhs::Int=1, transa::Char='N') where T <: $BlasType
m,n = size(A)
if nrhs == 1
oneMKL.sparse_optimize_gemv!(transa, A)
end
# sparse_optimize_gemm! is only available with oneAPI > v2024.1.0
return INTEL_KrylovOperator{T}(T, m, n, nrhs, transa, A)
end
function KP.update!(A::INTEL_KrylovOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat
error("The update of an INTEL_KrylovOperator is not supported.")
end
end
end
function LinearAlgebra.mul!(y::oneVector{T}, A::INTEL_KrylovOperator{T}, x::oneVector{T}) where T <: BlasFloat
(length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m"))
(length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n"))
(A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1"))
alpha = one(T)
beta = zero(T)
oneMKL.sparse_gemv!(A.transa, alpha, A.matrix, x, beta, y)
end
function LinearAlgebra.mul!(Y::oneMatrix{T}, A::INTEL_KrylovOperator{T}, X::oneMatrix{T}) where T <: BlasFloat
mY, nY = size(Y)
mX, nX = size(X)
(mY != A.m) && throw(DimensionMismatch("mY != A.m"))
(mX != A.n) && throw(DimensionMismatch("mX != A.n"))
(nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs"))
alpha = one(T)
beta = zero(T)
oneMKL.sparse_gemm!(A.transa, 'N', alpha, A.matrix, X, beta, Y)
end
mutable struct INTEL_TriangularOperator{T} <: AbstractTriangularOperator{T}
type::Type{T}
m::Int
n::Int
nrhs::Int
uplo::Char
diag::Char
transa::Char
matrix::oneSparseMatrixCSR{T}
end
eltype(A::INTEL_TriangularOperator{T}) where T = T
size(A::INTEL_TriangularOperator) = (A.m, A.n)
for (SparseMatrixType, BlasType) in ((:(oneSparseMatrixCSR{T}), :BlasFloat),)
@eval begin
function KP.TriangularOperator(A::$SparseMatrixType, uplo::Char, diag::Char; nrhs::Int=1, transa::Char='N') where T <: $BlasType
m,n = size(A)
if nrhs == 1
oneMKL.sparse_optimize_trsv!(uplo, transa, diag, A)
else
oneMKL.sparse_optimize_trsm!(uplo, transa, diag, nrhs, A)
end
return INTEL_TriangularOperator{T}(T, m, n, nrhs, uplo, diag, transa, A)
end
function KP.update!(A::INTEL_TriangularOperator{T}, B::$SparseMatrixType) where T <: $BlasFloat
return error("The update of an INTEL_TriangularOperator is not supported.")
end
end
end
function LinearAlgebra.ldiv!(y::oneVector{T}, A::INTEL_TriangularOperator{T}, x::oneVector{T}) where T <: BlasFloat
(length(y) != A.m) && throw(DimensionMismatch("length(y) != A.m"))
(length(x) != A.n) && throw(DimensionMismatch("length(x) != A.n"))
(A.nrhs == 1) || throw(DimensionMismatch("A.nrhs != 1"))
oneMKL.sparse_trsv!(A.uplo, A.transa, A.diag, one(T), A.matrix, x, y)
end
function LinearAlgebra.ldiv!(Y::oneMatrix{T}, A::INTEL_TriangularOperator{T}, X::oneMatrix{T}) where T <: BlasFloat
mY, nY = size(Y)
mX, nX = size(X)
(mY != A.m) && throw(DimensionMismatch("mY != A.m"))
(mX != A.n) && throw(DimensionMismatch("mX != A.n"))
(nY == nX == A.nrhs) || throw(DimensionMismatch("nY != A.nrhs or nX != A.nrhs"))
oneMKL.sparse_trsm!(A.uplo, A.transa, 'N', A.diag, one(T), A.matrix, X, Y)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 1444 | module KrylovPreconditioners
using LinearAlgebra, SparseArrays
using Adapt
using KernelAbstractions
const KA = KernelAbstractions
using LinearAlgebra: checksquare, BlasReal, BlasFloat
import LinearAlgebra: ldiv!
abstract type AbstractKrylovPreconditioner end
export AbstractKrylovPreconditioner
abstract type AbstractKrylovOperator{T} end
export AbstractKrylovOperator
abstract type AbstractTriangularOperator{T} end
export AbstractTriangularOperator
update!(p::AbstractKrylovPreconditioner, A::SparseMatrixCSC) = error("update!() for $(typeof(p)) is not implemented.")
update!(p::AbstractKrylovPreconditioner, A) = error("update!() for $(typeof(p)) is not implemented.")
update!(p::AbstractKrylovOperator, A::SparseMatrixCSC) = error("update!() for $(typeof(p)) is not implemented.")
update!(p::AbstractKrylovOperator, A) = error("update!() for $(typeof(p)) is not implemented.")
export update!, get_timer, reset_timer!
function get_timer(p::AbstractKrylovPreconditioner)
return p.timer_update
end
function reset_timer!(p::AbstractKrylovPreconditioner)
p.timer_update = 0.0
end
function KrylovOperator end
export KrylovOperator
function TriangularOperator end
export TriangularOperator
# Preconditioners
include("ic0.jl")
include("ilu0.jl")
include("blockjacobi.jl")
include("ilu/IncompleteLU.jl")
# Scaling
include("scaling.jl")
export scaling_csr!
# Ordering
# include(ordering.jl)
end # module KrylovPreconditioners
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 9993 | export BlockJacobiPreconditioner
using LightGraphs, Metis
"""
overlap(Graph, subset, level)
Given subset embedded within Graph, compute subset2 such that
subset2 contains subset and all of its adjacent vertices.
"""
function overlap(Graph, subset; level=1)
@assert level > 0
subset2 = [LightGraphs.neighbors(Graph, v) for v in subset]
subset2 = reduce(vcat, subset2)
subset2 = unique(vcat(subset, subset2))
level -= 1
if level == 0
return subset2
else
return overlap(Graph, subset2, level=level)
end
end
"""
BlockJacobiPreconditioner
Overlapping-Schwarz preconditioner.
### Attributes
* `nblocks::Int64`: Number of partitions or blocks.
* `blocksize::Int64`: Size of each block.
* `partitions::Vector{Vector{Int64}}``: `npart` partitions stored as lists
* `cupartitions`: `partitions` transfered to the GPU
* `lpartitions::Vector{Int64}``: Length of each partitions.
* `culpartitions::Vector{Int64}``: Length of each partitions, on the GPU.
* `blocks`: Dense blocks of the block-Jacobi
* `cublocks`: `Js` transfered to the GPU
* `map`: The partitions as a mapping to construct views
* `cumap`: `cumap` transferred to the GPU`
* `part`: Partitioning as output by Metis
* `cupart`: `part` transferred to the GPU
"""
mutable struct BlockJacobiPreconditioner{AT,GAT,VI,GVI,GMT,MI,GMI} <: AbstractKrylovPreconditioner
nblocks::Int64
blocksize::Int64
partitions::MI
cupartitions::GMI
lpartitions::VI
culpartitions::GVI
rest_size::VI
curest_size::GVI
blocks::AT
cublocks::GAT
map::VI
cumap::GVI
part::VI
cupart::GVI
id::GMT
blocklist::Vector{GMT}
timer_update::Float64
device::KA.Backend
end
function create_blocklist(blocks::Array, npart)
blocklist = Array{Array{Float64,2}}(undef, npart)
for b in 1:npart
blocklist[b] = Matrix{Float64}(undef, size(blocks,1), size(blocks,2))
end
return blocklist
end
function BlockJacobiPreconditioner(J, npart::Int64, device=CPU(), olevel=0)
if npart < 2
error("Number of partitions `npart` should be at" *
"least 2 for partitioning in Metis")
end
adj = build_adjmatrix(SparseMatrixCSC(J))
g = LightGraphs.Graph(adj)
part = Metis.partition(g, npart)
partitions = Vector{Vector{Int64}}()
for i in 1:npart
push!(partitions, [])
end
for (i,v) in enumerate(part)
push!(partitions[v], i)
end
# We keep track of the partition size pre-overlap.
# This will allow us to implement the RAS update.
rest_size = length.(partitions)
# overlap
if olevel > 0
for i in 1:npart
partitions[i] = overlap(g, partitions[i], level=olevel)
end
end
lpartitions = length.(partitions)
blocksize = maximum(length.(partitions))
blocks = zeros(Float64, blocksize, blocksize, npart)
# Get partitions into bit typed structure
bpartitions = zeros(Int64, blocksize, npart)
bpartitions .= 0.0
for i in 1:npart
bpartitions[1:length(partitions[i]),i] .= Vector{Int64}(partitions[i])
end
id = Matrix{Float64}(I, blocksize, blocksize)
for i in 1:npart
blocks[:,:,i] .= id
end
nmap = 0
for b in partitions
nmap += length(b)
end
map = zeros(Int64, nmap)
part = zeros(Int64, nmap)
for b in 1:npart
for (i,el) in enumerate(partitions[b])
map[el] = i
part[el] = b
end
end
id = adapt(device, id)
cubpartitions = adapt(device, bpartitions)
culpartitions = adapt(device, lpartitions)
curest_size = adapt(device, rest_size)
cublocks = adapt(device, blocks)
cumap = adapt(device, map)
cupart = adapt(device, part)
blocklist = create_blocklist(cublocks, npart)
return BlockJacobiPreconditioner(
npart, blocksize, bpartitions,
cubpartitions, lpartitions,
culpartitions, rest_size,
curest_size, blocks,
cublocks, map,
cumap, part,
cupart, id, blocklist, 0.0,
device
)
end
function BlockJacobiPreconditioner(J::SparseMatrixCSC; nblocks=-1, device=CPU(), noverlaps=0)
n = size(J, 1)
npartitions = if nblocks > 0
nblocks
else
div(n, 32)
end
return BlockJacobiPreconditioner(J, npartitions, device, noverlaps)
end
Base.eltype(::BlockJacobiPreconditioner) = Float64
# NOTE: Custom kernel to implement blocks - vector multiplication.
# The blocks have very unbalanced sizes, leading to imbalances
# between the different threads.
# CUBLAS.gemm_strided_batched has been tested has well, but is
# overall 3x slower than this custom kernel : due to the various sizes
# of the blocks, gemm_strided is performing too many unecessary operations,
# impairing its performance.
@kernel function mblock_b_kernel!(y, b, p_len, rp_len, part, blocks)
j, i = @index(Global, NTuple)
@inbounds len = p_len[i]
@inbounds rlen = rp_len[i]
if j <= rlen
accum = 0.0
idxA = @inbounds part[j, i]
for k=1:len
idxB = @inbounds part[k, i]
@inbounds accum = accum + blocks[j, k, i]*b[idxB]
end
@inbounds y[idxA] = accum
end
end
@kernel function mblock_B_kernel!(y, b, p_len, rp_len, part, blocks)
p = size(b, 2)
i, j = @index(Global, NTuple)
len = p_len[i]
rlen = rp_len[i]
if j <= rlen
for ℓ=1:p
accum = 0.0
idxA = @inbounds part[j, i]
for k=1:len
idxB = @inbounds part[k, i]
@inbounds accum = accum + blocks[j, k, i]*b[idxB,ℓ]
end
@inbounds y[idxA,ℓ] = accum
end
end
end
function LinearAlgebra.mul!(y, C::BlockJacobiPreconditioner, b::Vector{T}) where T
n = size(b, 1)
fill!(y, zero(T))
for i=1:C.nblocks
rlen = C.lpartitions[i]
part = C.partitions[1:rlen, i]
blck = C.blocks[1:rlen, 1:rlen, i]
for j=1:C.rest_size[i]
idx = part[j]
y[idx] += dot(blck[j, :], b[part])
end
end
end
function LinearAlgebra.mul!(Y, C::BlockJacobiPreconditioner, B::Matrix{T}) where T
n, p = size(B)
fill!(Y, zero(T))
for i=1:C.nblocks
rlen = C.lpartitions[i]
part = C.partitions[1:rlen, i]
blck = C.blocks[1:rlen, 1:rlen, i]
for rhs=1:p
for j=1:C.rest_size[i]
idx = part[j]
Y[idx,rhs] += dot(blck[j, :], B[part,rhs])
end
end
end
end
function LinearAlgebra.mul!(y, C::BlockJacobiPreconditioner, b::AbstractVector{T}) where T
device = KA.get_backend(b)
n = size(b, 1)
fill!(y, zero(T))
max_rlen = maximum(C.rest_size)
ndrange = (max_rlen, C.nblocks)
C.timer_update += @elapsed begin mblock_b_kernel!(device)(
y, b, C.culpartitions, C.curest_size,
C.cupartitions, C.cublocks,
ndrange=ndrange,
)
KA.synchronize(device)
end
end
function LinearAlgebra.mul!(Y, C::BlockJacobiPreconditioner, B::AbstractMatrix{T}) where T
device = KA.get_backend(B)
n, p = size(B)
fill!(Y, zero(T))
max_rlen = maximum(C.rest_size)
ndrange = (C.nblocks, max_rlen)
C.timer_update += @elapsed begin mblock_B_kernel!(device)(
Y, B, C.culpartitions, C.curest_size,
C.cupartitions, C.cublocks,
ndrange=ndrange,
)
KA.synchronize(device)
end
end
"""
build_adjmatrix
Build the adjacency matrix of a matrix A corresponding to the undirected graph
"""
function build_adjmatrix(A)
rows = Int64[]
cols = Int64[]
vals = Float64[]
rowsA = rowvals(A)
m, n = size(A)
for i = 1:n
for j in nzrange(A, i)
push!(rows, rowsA[j])
push!(cols, i)
push!(vals, 1.0)
push!(rows, i)
push!(cols, rowsA[j])
push!(vals, 1.0)
end
end
return sparse(rows,cols,vals,size(A,1),size(A,2))
end
"""
_fillblock_gpu
Fill the dense blocks of the preconditioner from the sparse CSR matrix arrays
"""
@kernel function _fillblock_gpu!(blocks, blocksize, partition, map, rowPtr, colVal, nzVal, part, lpartitions, id)
b,k = @index(Global, NTuple)
for i in 1:blocksize
blocks[k,i,b] = id[k,i]
end
@synchronize
@inbounds if k <= lpartitions[b]
# select row
i = partition[k, b]
# iterate matrix
for row_ptr in rowPtr[i]:(rowPtr[i + 1] - 1)
# retrieve column value
col = colVal[row_ptr]
# iterate partition list and see if pertains to it
for j in 1:lpartitions[b]
if col == partition[j, b]
@inbounds blocks[k, j, b] = nzVal[row_ptr]
end
end
end
end
end
"""
function update!(p, J::SparseMatrixCSC)
Update the preconditioner `p` from the sparse Jacobian `J` in CSC format for the CPU
Note that this implements the same algorithm as for the GPU and becomes very slow on CPU with growing number of blocks.
"""
function update!(p::BlockJacobiPreconditioner, J::SparseMatrixCSC)
# TODO: Enabling threading leads to a crash here
for b in 1:p.nblocks
p.blocks[:,:,b] = p.id[:,:]
for k in 1:p.lpartitions[b]
i = p.partitions[k,b]
for j in J.colptr[i]:J.colptr[i+1]-1
if b == p.part[J.rowval[j]]
p.blocks[p.map[J.rowval[j]], p.map[i], b] = J.nzval[j]
end
end
end
end
for b in 1:p.nblocks
# Invert blocks
p.blocks[:,:,b] .= inv(p.blocks[:,:,b])
end
end
function Base.show(precond::BlockJacobiPreconditioner)
npartitions = precond.npart
nblock = precond.nblocks
println("#partitions: $npartitions, Blocksize: n = ", nblock,
" Mbytes = ", (nblock*nblock*npartitions*8.0)/(1024.0*1024.0))
println("Block Jacobi block size: $(precond.nJs)")
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 79 | export kp_ic0
kp_ic0(A) = error("Not implemented for this type $(typeof(A))")
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 81 | export kp_ilu0
kp_ilu0(A) = error("Not implemented for this type $(typeof(A))")
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 605 | @kernel function scaling_csr_kernel!(rowPtr, nzVal, b)
m = @index(Global, Linear)
max = 0.0
@inbounds for i = rowPtr[m]:(rowPtr[m + 1] - 1)
absnzVal = abs(nzVal[i])
# This works somehow better in ExaPF. Was initially a bug I thought
# absnzVal = nzVal[i]
if absnzVal > max
max = absnzVal
end
end
if max < 1.0
b[m] /= max
@inbounds for i = rowPtr[m]:(rowPtr[m + 1] - 1)
nzVal[i] /= max
end
end
end
function scaling_csr!(A, b, backend::KA.Backend)
scaling_csr_kernel!(backend)(A.rowPtr, A.nzVal, b; ndrange=length(b))
synchronize(backend)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 465 | using LinearAlgebra: Factorization, AdjointFactorization, LowerTriangular, UnitLowerTriangular, UpperTriangular
using SparseArrays
using Base: @propagate_inbounds
struct ILUFactorization{Tv,Ti} <: Factorization{Tv}
L::SparseMatrixCSC{Tv,Ti}
U::SparseMatrixCSC{Tv,Ti}
end
include("sorted_set.jl")
include("linked_list.jl")
include("sparse_vector_accumulator.jl")
include("insertion_sort_update_vector.jl")
include("application.jl")
include("crout_ilu.jl")
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 5853 | import SparseArrays: nnz
import LinearAlgebra: ldiv!
import Base.\
export forward_substitution!, backward_substitution!
export adjoint_forward_substitution!, adjoint_backward_substitution!
"""
Returns the number of nonzeros of the `L` and `U`
factor combined.
Excludes the unit diagonal of the `L` factor,
which is not stored.
"""
nnz(F::ILUFactorization) = nnz(F.L) + nnz(F.U)
function ldiv!(F::ILUFactorization, y::AbstractVecOrMat)
forward_substitution!(F, y)
backward_substitution!(F, y)
end
function ldiv!(F::AdjointFactorization{<:Any,<:ILUFactorization}, y::AbstractVecOrMat)
adjoint_forward_substitution!(F.parent, y)
adjoint_backward_substitution!(F.parent, y)
end
function ldiv!(y::AbstractVector, F::ILUFactorization, x::AbstractVector)
y .= x
ldiv!(F, y)
end
function ldiv!(y::AbstractVector, F::AdjointFactorization{<:Any,<:ILUFactorization}, x::AbstractVector)
y .= x
ldiv!(F, y)
end
function ldiv!(y::AbstractMatrix, F::ILUFactorization, x::AbstractMatrix)
y .= x
ldiv!(F, y)
end
function ldiv!(y::AbstractMatrix, F::AdjointFactorization{<:Any,<:ILUFactorization}, x::AbstractMatrix)
y .= x
ldiv!(F, y)
end
(\)(F::ILUFactorization, y::AbstractVecOrMat) = ldiv!(F, copy(y))
(\)(F::AdjointFactorization{<:Any,<:ILUFactorization}, y::AbstractVecOrMat) = ldiv!(F, copy(y))
"""
Applies in-place backward substitution with the U factor of F, under the assumptions:
1. U is stored transposed / row-wise
2. U has no lower-triangular elements stored
3. U has (nonzero) diagonal elements stored.
"""
function backward_substitution!(F::ILUFactorization, y::AbstractVector)
U = F.U
@inbounds for col = U.n : -1 : 1
# Substitutions
for idx = U.colptr[col + 1] - 1 : -1 : U.colptr[col] + 1
y[col] -= U.nzval[idx] * y[U.rowval[idx]]
end
# Final value for y[col]
y[col] /= U.nzval[U.colptr[col]]
end
y
end
function backward_substitution!(F::ILUFactorization, y::AbstractMatrix)
U = F.U
p = size(y, 2)
@inbounds for c = 1 : p
@inbounds for col = U.n : -1 : 1
# Substitutions
for idx = U.colptr[col + 1] - 1 : -1 : U.colptr[col] + 1
y[col,c] -= U.nzval[idx] * y[U.rowval[idx],c]
end
# Final value for y[col,c]
y[col,c] /= U.nzval[U.colptr[col]]
end
end
y
end
function backward_substitution!(v::AbstractVector, F::ILUFactorization, y::AbstractVector)
v .= y
backward_substitution!(F, v)
end
function backward_substitution!(v::AbstractMatrix, F::ILUFactorization, y::AbstractMatrix)
v .= y
backward_substitution!(F, v)
end
function adjoint_backward_substitution!(F::ILUFactorization, y::AbstractVector)
L = F.L
@inbounds for col = L.n - 1 : -1 : 1
# Substitutions
for idx = L.colptr[col + 1] - 1 : -1 : L.colptr[col]
y[col] -= L.nzval[idx] * y[L.rowval[idx]]
end
end
y
end
function adjoint_backward_substitution!(F::ILUFactorization, y::AbstractMatrix)
L = F.L
p = size(y, 2)
@inbounds for c = 1 : p
@inbounds for col = L.n - 1 : -1 : 1
# Substitutions
for idx = L.colptr[col + 1] - 1 : -1 : L.colptr[col]
y[col,c] -= L.nzval[idx] * y[L.rowval[idx],c]
end
end
end
y
end
function adjoint_backward_substitution!(v::AbstractVector, F::ILUFactorization, y::AbstractVector)
v .= y
adjoint_backward_substitution!(F, v)
end
function adjoint_backward_substitution!(v::AbstractMatrix, F::ILUFactorization, y::AbstractMatrix)
v .= y
adjoint_backward_substitution!(F, v)
end
"""
Applies in-place forward substitution with the L factor of F, under the assumptions:
1. L is stored column-wise (unlike U)
2. L has no upper triangular elements
3. L has *no* diagonal elements
"""
function forward_substitution!(F::ILUFactorization, y::AbstractVector)
L = F.L
@inbounds for col = 1 : L.n - 1
for idx = L.colptr[col] : L.colptr[col + 1] - 1
y[L.rowval[idx]] -= L.nzval[idx] * y[col]
end
end
y
end
function forward_substitution!(F::ILUFactorization, y::AbstractMatrix)
L = F.L
p = size(y, 2)
@inbounds for c = 1 : p
@inbounds for col = 1 : L.n - 1
for idx = L.colptr[col] : L.colptr[col + 1] - 1
y[L.rowval[idx],c] -= L.nzval[idx] * y[col,c]
end
end
end
y
end
function forward_substitution!(v::AbstractVector, F::ILUFactorization, y::AbstractVector)
v .= y
forward_substitution!(F, v)
end
function forward_substitution!(v::AbstractMatrix, F::ILUFactorization, y::AbstractMatrix)
v .= y
forward_substitution!(F, v)
end
function adjoint_forward_substitution!(F::ILUFactorization, y::AbstractVector)
U = F.U
@inbounds for col = 1 : U.n
# Final value for y[col]
y[col] /= U.nzval[U.colptr[col]]
for idx = U.colptr[col] + 1 : U.colptr[col + 1] - 1
y[U.rowval[idx]] -= U.nzval[idx] * y[col]
end
end
y
end
function adjoint_forward_substitution!(F::ILUFactorization, y::AbstractMatrix)
U = F.U
p = size(y, 2)
@inbounds for c = 1 : p
@inbounds for col = 1 : U.n
# Final value for y[col,c]
y[col,c] /= U.nzval[U.colptr[col]]
for idx = U.colptr[col] + 1 : U.colptr[col + 1] - 1
y[U.rowval[idx],c] -= U.nzval[idx] * y[col,c]
end
end
end
y
end
function adjoint_forward_substitution!(v::AbstractVector, F::ILUFactorization, y::AbstractVector)
v .= y
adjoint_forward_substitution!(F, v)
end
function adjoint_forward_substitution!(v::AbstractMatrix, F::ILUFactorization, y::AbstractMatrix)
v .= y
adjoint_forward_substitution!(F, v)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 3193 | export ilu
function lutype(T::Type)
UT = typeof(oneunit(T) - oneunit(T) * (oneunit(T) / (oneunit(T) + zero(T))))
LT = typeof(oneunit(UT) / oneunit(UT))
S = promote_type(T, LT, UT)
end
function ilu(A::SparseMatrixCSC{ATv,Ti}; τ = 1e-3) where {ATv,Ti}
n = size(A, 1)
Tv = lutype(ATv)
L = spzeros(Tv, Ti, n, n)
U = spzeros(Tv, Ti, n, n)
U_row = SparseVectorAccumulator{Tv,Ti}(n)
L_col = SparseVectorAccumulator{Tv,Ti}(n)
A_reader = RowReader(A)
L_reader = RowReader(L, Val{false})
U_reader = RowReader(U, Val{false})
@inbounds for k = Ti(1) : Ti(n)
##
## Copy the new row into U_row and the new column into L_col
##
col::Int = first_in_row(A_reader, k)
while is_column(col)
add!(U_row, nzval(A_reader, col), col)
next_col = next_column(A_reader, col)
next_row!(A_reader, col)
# Check if the next nonzero in this column
# is still above the diagonal
if has_next_nonzero(A_reader, col) && nzrow(A_reader, col) ≤ col
enqueue_next_nonzero!(A_reader, col)
end
col = next_col
end
# Copy the remaining part of the column into L_col
axpy!(one(Tv), A, k, nzidx(A_reader, k), L_col)
##
## Combine the vectors:
##
# U_row[k:n] -= L[k,i] * U[i,k:n] for i = 1 : k - 1
col = first_in_row(L_reader, k)
while is_column(col)
axpy!(-nzval(L_reader, col), U, col, nzidx(U_reader, col), U_row)
next_col = next_column(L_reader, col)
next_row!(L_reader, col)
if has_next_nonzero(L_reader, col)
enqueue_next_nonzero!(L_reader, col)
end
col = next_col
end
# Nothing is happening here when k = n, maybe remove?
# L_col[k+1:n] -= U[i,k] * L[i,k+1:n] for i = 1 : k - 1
if k < n
col = first_in_row(U_reader, k)
while is_column(col)
axpy!(-nzval(U_reader, col), L, col, nzidx(L_reader, col), L_col)
next_col = next_column(U_reader, col)
next_row!(U_reader, col)
if has_next_nonzero(U_reader, col)
enqueue_next_nonzero!(U_reader, col)
end
col = next_col
end
end
##
## Apply a drop rule
##
U_diag_element = U_row.nzval[k]
# U_diag_element = U_row.values[k]
# Append the columns
append_col!(U, U_row, k, τ)
append_col!(L, L_col, k, τ, inv(U_diag_element))
# Add the new row and column to U_nonzero_col, L_nonzero_row, U_first, L_first
# (First index *after* the diagonal)
U_reader.next_in_column[k] = U.colptr[k] + 1
if U.colptr[k] < U.colptr[k + 1] - 1
enqueue_next_nonzero!(U_reader, k)
end
L_reader.next_in_column[k] = L.colptr[k]
if L.colptr[k] < L.colptr[k + 1]
enqueue_next_nonzero!(L_reader, k)
end
end
return ILUFactorization(L, U)
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
|
[
"MPL-2.0"
] | 0.3.0 | f49af35a8dd097d4dccabf94bd2053afbfdab3a4 | code | 2609 | import Base: getindex, setindex!, empty!, Vector
import LinearAlgebra: axpy!
"""
`InsertableSparseVector` accumulates the sparse vector
result from SpMV. Initialization requires O(N) work,
therefore the data structure is reused. Insertion
requires O(nnz) at worst, as insertion sort is used.
"""
struct InsertableSparseVector{Tv}
values::Vector{Tv}
indices::SortedSet
InsertableSparseVector{Tv}(n::Int) where {Tv} = new(Vector{Tv}(undef, n), SortedSet(n))
end
@propagate_inbounds getindex(v::InsertableSparseVector{Tv}, idx::Int) where {Tv} = v.values[idx]
@propagate_inbounds setindex!(v::InsertableSparseVector{Tv}, value::Tv, idx::Int) where {Tv} = v.values[idx] = value
@inline indices(v::InsertableSparseVector) = Vector(v.indices)
function Vector(v::InsertableSparseVector{Tv}) where {Tv}
vals = zeros(Tv, v.indices.N - 1)
for index in v.indices
@inbounds vals[index] = v.values[index]
end
return vals
end
"""
Sets `v[idx] += a` when `idx` is occupied, or sets `v[idx] = a`.
Complexity is O(nnz). The `prev_idx` can be used to start the linear
search at `prev_idx`, useful when multiple already sorted values
are added.
"""
function add!(v::InsertableSparseVector, a, idx::Integer, prev_idx::Integer)
if push!(v.indices, idx, prev_idx)
@inbounds v[idx] = a
else
@inbounds v[idx] += a
end
v
end
"""
Add without providing a previous index.
"""
@propagate_inbounds add!(v::InsertableSparseVector, a, idx::Integer) = add!(v, a, idx, v.indices.N)
function axpy!(a, A::SparseMatrixCSC, column::Integer, start::Integer, y::InsertableSparseVector)
prev_index = y.indices.N
@inbounds for idx = start : A.colptr[column + 1] - 1
add!(y, a * A.nzval[idx], A.rowval[idx], prev_index)
prev_index = A.rowval[idx]
end
y
end
"""
Empties the InsterableSparseVector in O(1) operations.
"""
@inline empty!(v::InsertableSparseVector) = empty!(v.indices)
"""
Basically `A[:, j] = scale * drop(y)`, where drop removes
values less than `drop`.
Resets the `InsertableSparseVector`.
Note: does *not* update `A.colptr` for columns > j + 1,
as that is done during the steps.
"""
function append_col!(A::SparseMatrixCSC{Tv}, y::InsertableSparseVector{Tv}, j::Int, drop::Tv, scale::Tv = one(Tv)) where {Tv}
total = 0
@inbounds for row = y.indices
if abs(y[row]) ≥ drop || row == j
push!(A.rowval, row)
push!(A.nzval, scale * y[row])
total += 1
end
end
@inbounds A.colptr[j + 1] = A.colptr[j] + total
empty!(y)
nothing
end
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |