licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 3175 | function anim_initialize!(
canvas::Canvas, renderer::GridworldRenderer, domain::Domain, state::State;
callback=nothing, overlay=nothing, options...
)
if canvas.state !== nothing
return anim_transition!(canvas, renderer, domain, state;
callback=callback, overlay=overlay, options...)
end
options = merge(renderer.anim_options, options)
# Render state with caption if provided
captions = get(options, :captions, nothing)
caption = isnothing(captions) ? nothing : get(captions, 1, nothing)
render_state!(canvas, renderer, domain, state; caption=caption, options...)
# Add trail tracks if trail length is non-zero
trail_length = get(options, :trail_length, 0)
if trail_length > 0
trail = Observable([state])
trail_options = merge(renderer.trajectory_options, options)
agent_color = get(trail_options, :agent_color, :black) |> to_color
trail_options[:agent_start_color] = set_alpha(agent_color, 0.0)
object_colors = get(trail_options, :object_colors, Symbol[]) .|> to_color
trail_options[:object_start_colors] =
[set_alpha(c, 0.0) for c in object_colors]
type_colors = get(trail_options, :type_colors, Symbol[]) .|> to_color
trail_options[:type_start_colors] =
[set_alpha(c, 0.0) for c in type_colors]
render_trajectory!(canvas, renderer, domain, trail; trail_options...)
canvas.observables[:trail] = trail
end
# Run callbacks
overlay !== nothing && overlay(canvas)
callback !== nothing && callback(canvas)
return canvas
end
function anim_transition!(
canvas::Canvas, renderer::GridworldRenderer, domain::Domain,
state::State, action::Term = PDDL.no_op, t::Int = 1;
callback=nothing, overlay=nothing, options...
)
options = merge(renderer.anim_options, options)
# Update canvas with new state
canvas.state[] = state
# Update captions if provided
captions = get(options, :captions, nothing)
if !isnothing(captions)
caption = get(captions, t, nothing)
if !isnothing(caption)
canvas.observables[:caption][] = caption
end
end
# Update trail tracks if trail length is non-zero
trail_length = get(options, :trail_length, 0)
if trail_length > 0 && haskey(canvas.observables, :trail)
trail = canvas.observables[:trail]
push!(trail[], state)
if length(trail[]) > trail_length
popfirst!(trail[])
end
notify(trail)
end
# Run callbacks
overlay !== nothing && overlay(canvas)
callback !== nothing && callback(canvas)
return canvas
end
"""
- `captions = nothing`: Captions to display for each timestep, e.g.,
`["t=1", "t=2", ...]`. Can be provided as a vector of strings, or a dictionary
mapping timesteps to strings. If `nothing`, no captions are displayed.
- `trail_length = 0`: Length of trail tracks to display for each agent or
tracked object. If `0`, no trail tracks are displayed.
"""
default_anim_options(R::Type{GridworldRenderer}) = Dict{Symbol,Any}(
:captions => nothing,
:trail_length => 0,
)
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 4497 | export GridworldRenderer
"""
GridworldRenderer(; options...)
Customizable renderer for 2D gridworld domains.
# General options
$(TYPEDFIELDS)
"""
@kwdef mutable struct GridworldRenderer <: Renderer
"Default figure resolution, in pixels."
resolution::Tuple{Int, Int} = (800, 800)
"PDDL fluents that represent the grid layers (walls, etc)."
grid_fluents::Vector{Term} = [pddl"(walls)"]
"Colors for each grid layer."
grid_colors::Vector = [:black]
"Whether the domain has an agent not associated with a PDDL object."
has_agent::Bool = true
"Function that returns the PDDL fluent for the agent's x position."
get_agent_x::Function = () -> pddl"(xpos)"
"Function that returns the PDDL fluent for the agent's y position."
get_agent_y::Function = () -> pddl"(ypos)"
"Takes an object constant and returns the PDDL fluent for its x position."
get_obj_x::Function = obj -> Compound(:xloc, [obj])
"Takes an object constant and returns the PDDL fluent for its y position."
get_obj_y::Function = obj -> Compound(:yloc, [obj])
"Agent renderer, of the form `(domain, state) -> Graphic`."
agent_renderer::Function = (d, s) -> CircleShape(0, 0, 0.3, color=:black)
"Per-type object renderers, of the form `(domain, state, obj) -> Graphic`."
obj_renderers::Dict{Symbol, Function} = Dict{Symbol, Function}(
:object => (d, s, o) -> SquareShape(0, 0, 0.2, color=:gray)
)
"Z-order for object types, from bottom to top."
obj_type_z_order::Vector{Symbol} = collect(keys(obj_renderers))
"List of `(x, y, label, color)` tuples to label locations on the grid."
locations::Vector{Tuple} = Tuple[]
"Whether to show an object inventory for each function in `inventory_fns`."
show_inventory::Bool = false
"Inventory indicator functions of the form `(domain, state, obj) -> Bool`."
inventory_fns::Vector{Function} = Function[]
"Types of objects that can be each inventory."
inventory_types::Vector{Symbol} = Symbol[]
"Axis titles / labels for each inventory."
inventory_labels::Vector{String} = String[]
"Inventory label font size."
inventory_labelsize::Real = 20
"Default options for state rendering."
state_options::Dict{Symbol, Any} =
default_state_options(GridworldRenderer)
"Default options for trajectory rendering."
trajectory_options::Dict{Symbol, Any} =
default_trajectory_options(GridworldRenderer)
"Default options for animation rendering."
anim_options::Dict{Symbol, Any} =
default_anim_options(GridworldRenderer)
end
function new_canvas(renderer::GridworldRenderer)
figure = Figure()
resize!(figure.scene, renderer.resolution)
layout = GridLayout(figure[1,1])
return Canvas(figure, layout)
end
new_canvas(renderer::GridworldRenderer, figure::Figure) =
Canvas(figure, GridLayout(figure[1,1]))
new_canvas(renderer::GridworldRenderer, gridpos::GridPosition) =
Canvas(Makie.get_top_parent(gridpos), GridLayout(gridpos))
function gw_agent_loc(
renderer::GridworldRenderer, state::State,
height = size(state[renderer.grid_fluents[1]], 1)
)
x = state[renderer.get_agent_x()]
y = height - state[renderer.get_agent_y()] + 1
return (x, y)
end
function gw_obj_loc(
renderer::GridworldRenderer, state::State, obj::Const,
height = size(state[renderer.grid_fluents[1]], 1)
)
x = state[renderer.get_obj_x(obj)]
y = height - state[renderer.get_obj_y(obj)] + 1
return (x, y)
end
# State and trajectory rendering / animation
include("state.jl")
include("trajectory.jl")
include("animate.jl")
# Solution rendering
include("path_search.jl")
include("policy.jl")
# Animated solving / planning
include("anim_forward.jl")
include("anim_rtdp.jl")
include("anim_rths.jl")
# Add documentation for auxiliary options
Base.with_logger(Base.NullLogger()) do
@doc """
$(@doc GridworldRenderer)
# State options
These options can be passed as keyword arguments to [`render_state`](@ref):
$(Base.doc(default_state_options, Tuple{Type{GridworldRenderer}}))
# Trajectory options
These options can be passed as keyword arguments to [`render_trajectory`](@ref):
$(Base.doc(default_trajectory_options, Tuple{Type{GridworldRenderer}}))
# Animation options
These options can be passed as keyword arguments to animation functions:
$(Base.doc(default_anim_options, Tuple{Type{GridworldRenderer}}))
"""
GridworldRenderer
end
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 7464 | using SymbolicPlanners: PathNode
function render_sol!(
canvas::Canvas, renderer::GridworldRenderer, domain::Domain,
state::Observable, sol::Observable{<:PathSearchSolution};
options...
)
# Render initial state if not already on canvas
if canvas.state === nothing
render_state!(canvas, renderer, domain, state; options...)
end
# Extract main axis
ax = canvas.blocks[1]
# Update options
options = merge(renderer.trajectory_options, options)
# Render search tree
if get(options, :show_search, true) && !isnothing(sol[].search_tree)
# Set up observables for agent
if renderer.has_agent
agent_locs = Observable(Point2f[])
agent_dirs = Observable(Point2f[])
else
agent_locs = nothing
agent_dirs = nothing
end
# Set up observables for tracked objects
objects = get(options, :tracked_objects, Const[])
types = get(options, :tracked_types, Symbol[])
for ty in types
objs = PDDL.get_objects(domain, state, ty)
append!(objects, objs)
end
obj_locs = [Observable(Point2f[]) for _ in 1:length(objects)]
obj_dirs = [Observable(Point2f[]) for _ in 1:length(objects)]
# Update observables
on(sol; update = true) do sol
# Rebuild observables for search tree
node_id = isempty(sol.trajectory) ?
nothing : hash(sol.trajectory[end])
_build_tree!(agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer, sol, node_id)
# Trigger updates
if renderer.has_agent
notify(agent_locs); notify(agent_dirs)
end
for (ls, ds) in zip(obj_locs, obj_dirs)
notify(ls); notify(ds)
end
end
# Create arrow plots for agent and tracked objects
node_marker = get(options, :search_marker, '⦿')
node_size = get(options, :search_size, 0.3)
edge_arrow = get(options, :search_arrow, '▷')
cmap = get(options, :search_colormap, cgrad([:blue, :red]))
if renderer.has_agent
colors = @lift 1:length($agent_locs)
canvas.plots[:agent_search_nodes] = arrows!(
ax, agent_locs, agent_dirs, colormap=cmap, color=colors,
arrowsize=node_size, arrowhead=node_marker,
markerspace=:data, align=:head
)
edge_locs = @lift $agent_locs .- ($agent_dirs .* 0.5)
edge_rotations = @lift [atan(d[2], d[1]) for d in $agent_dirs]
edge_markers = @lift map($agent_dirs) do d
d == Point2f(0, 0) ? node_marker : edge_arrow
end
canvas.plots[:agent_search_arrows] = scatter!(
ax, edge_locs, rotation=edge_rotations,
marker=edge_markers, markersize=node_size, markerspace=:data,
colormap=cmap, color=colors
)
end
for (obj, ls, ds) in zip(objects, obj_locs, obj_dirs)
colors = @lift 1:length($ls)
canvas.plots[Symbol("$(obj)_search_nodes")] = arrows!(
ax, ls, ds, colormap=cmap, color=colors, markerspace=:data,
arrowsize=node_size, arrowhead=node_marker, align=:head
)
e_ls = @lift $ls .- ($ds .* 0.5)
e_rs = @lift [atan(d[2], d[1]) for d in $ds]
e_ms = @lift map($ds) do d
d == Point2f(0, 0) ? node_marker : edge_arrow
end
canvas.plots[Symbol("$(obj)_search_arrows")] = scatter!(
ax, e_ls, rotation=e_rs, marker=e_ms, markersize=node_size,
markerspace=:data, colormap=cmap, color=colors
)
end
end
# Render trajectory
if get(options, :show_trajectory, true) && !isnothing(sol[].trajectory)
trajectory = @lift($sol.trajectory)
render_trajectory!(canvas, renderer, domain, trajectory; options...)
end
return canvas
end
@inline function _build_tree!(
agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer::Renderer,
sol::PathSearchSolution,
node_id::Union{Nothing, UInt} = nothing;
)
# Determine node expansion order
if !isnothing(sol.search_order)
node_ids = sol.status == :in_progress ?
sol.search_order : copy(sol.search_order)
elseif keytype(sol.search_tree) == keytype(sol.search_frontier)
node_ids = keys(sol.search_frontier)
setdiff!(node_ids, keys(sol.search_frontier))
node_ids = collect(node_ids)
elseif keytype(sol.search_tree) == eltype(sol.search_frontier)
node_ids = keys(sol.search_tree)
setdiff!(node_ids, sol.search_frontier)
node_ids = collect(node_ids)
end
if sol.status != :in_progress && !isnothing(node_id)
push!(node_ids, node_id)
end
# Add nodes to tree in order
_build_tree!(agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer, sol.search_tree, node_ids)
return nothing
end
@inline function _build_tree!(
agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer::Renderer,
search_tree::Dict{UInt,<:PathNode},
node_ids::Vector{UInt}
)
# Empty existing observables
if renderer.has_agent
empty!(agent_locs[])
empty!(agent_dirs[])
end
for i in eachindex(objects)
empty!(obj_locs[i][])
empty!(obj_dirs[i][])
end
# Iterate over nodes in search tree (in order if available)
for id in node_ids
_add_node_to_tree!(agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer, search_tree, id)
end
return nothing
end
@inline function _add_node_to_tree!(
agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer::Renderer, sol::PathSearchSolution, node_id::UInt
)
_add_node_to_tree!(agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer, sol.search_tree, node_id)
return nothing
end
@inline function _add_node_to_tree!(
agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer::Renderer, search_tree::Dict{UInt,<:PathNode}, node_id::UInt
)
# Extract current and previous states
node = search_tree[node_id]
state = node.state
prev_state = isnothing(node.parent) ?
state : search_tree[node.parent.id].state
# Update agent observables with current node
_add_node_to_tree!(agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer, state, prev_state)
return nothing
end
@inline function _add_node_to_tree!(
agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer::Renderer, state::State, prev_state::State = state
)
height = size(state[renderer.grid_fluents[1]], 1)
# Update agent observables with current node
if renderer.has_agent
loc = gw_agent_loc(renderer, state, height)
prev_loc = gw_agent_loc(renderer, prev_state, height)
push!(agent_locs[], loc)
push!(agent_dirs[], loc .- prev_loc)
end
# Update object observables with current node
for (i, obj) in enumerate(objects)
loc = gw_obj_loc(renderer, state, obj, height)
prev_loc = gw_obj_loc(renderer, prev_state, obj, height)
push!(obj_locs[i][], loc)
push!(obj_dirs[i][], loc .- prev_loc)
end
return nothing
end
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 13607 | using SymbolicPlanners:
get_value, has_cached_value, get_action, best_action
function render_sol!(
canvas::Canvas, renderer::GridworldRenderer, domain::Domain,
state::Observable, sol::Observable{<:PolicySolution};
options...
)
render_policy_heatmap!(canvas, renderer, domain, state, sol; options...)
return canvas
end
function render_sol!(
canvas::Canvas, renderer::GridworldRenderer, domain::Domain,
state::Observable, sol::Observable{<:ReusableTreePolicy};
options...
)
# Render heatmap
render_policy_heatmap!(canvas, renderer, domain, state, sol; options...)
# Render search tree
if get(options, :show_search, true)
search_sol = @lift($sol.search_sol)
render_sol!(canvas, renderer, domain, state, search_sol;
show_search=true, options...)
end
# Render reusable tree of paths to the goal
if get(options, :show_goal_tree, false)
render_goal_tree!(canvas, renderer, domain, state, sol; options...)
end
return canvas
end
function render_policy_heatmap!(
canvas::Canvas, renderer::GridworldRenderer, domain::Domain,
state::Observable, sol::Observable{<:PolicySolution};
options...
)
# Render initial state if not already on canvas
if canvas.state === nothing
render_state!(canvas, renderer, domain, state; options...)
end
# Extract main axis
ax = canvas.blocks[1]
# Update options
options = merge(renderer.trajectory_options, options)
max_states = get(options, :max_policy_states, 200)
arrowmarker = get(options, :track_arrowmarker, '▶')
stopmarker = get(options, :track_stopmarker, '⦿')
# Set up observables for agent
if renderer.has_agent
agent_locs = Observable(Point2f[])
agent_values = Observable(Float64[])
agent_markers = Observable(Char[])
agent_rotations = Observable(Float64[])
end
# Set up observables for tracked objects
objects = get(options, :tracked_objects, Const[])
types = get(options, :tracked_types, Symbol[])
for ty in types
objs = PDDL.get_objects(domain, state, ty)
append!(objects, objs)
end
obj_locs = [Observable(Point2f[]) for _ in 1:length(objects)]
obj_values = [Observable(Float64[]) for _ in 1:length(objects)]
# Update observables for reachable states
show_cached_only = get(options, :show_cached_only, false)
onany(sol, state) do sol, init_state
# Update agent observables
if renderer.has_agent
# Clear previous values
empty!(agent_locs[])
empty!(agent_markers[])
empty!(agent_rotations[])
empty!(agent_values[])
# Iterate over reachable agent locations up to limit
queue = [init_state]
visited = Set{UInt}()
while !isempty(queue) && length(visited) < max_states
state = popfirst!(queue)
state_id = hash(state)
state_id in visited && continue
push!(visited, state_id)
# Get agent location
height = size(state[renderer.grid_fluents[1]], 1)
loc = Point2f(gw_agent_loc(renderer, state, height))
loc_exists = loc in agent_locs[]
if show_cached_only && state != init_state
# Terminate if state has no cached value
!has_cached_value(sol, state) && continue
else
# Terminate if location has already been encountered
loc_exists && continue
end
# Get state value and best action
val = get_value(sol, state)
best_act = best_action(sol, state)
# Append agent location and value, etc.
next_state = transition(domain, state, best_act)
if !loc_exists # Skip if location already exists
push!(agent_locs[], loc)
next_loc = Point2f(gw_agent_loc(renderer, next_state, height))
marker = loc == next_loc ? stopmarker : arrowmarker
push!(agent_markers[], marker)
rotation = atan(next_loc[2] - loc[2], next_loc[1] - loc[1])
push!(agent_rotations[], rotation)
push!(agent_values[], val)
end
# Add next states to queue
push!(queue, next_state)
for act in available(domain, state)
next_state = transition(domain, state, act)
push!(queue, next_state)
end
end
# Trigger updates
notify(agent_locs)
notify(agent_markers)
notify(agent_rotations)
notify(agent_values)
end
# Update observables for tracked objects
for (obj, locs, vals) in zip(objects, obj_locs, obj_values)
# Clear previous values
empty!(locs[])
empty!(vals[])
# Add initial location and value
if show_cached_only && has_cached_value(sol, init_state)
push!(locs[], Point2f(gw_obj_loc(renderer, init_state, obj)))
push!(vals[], get_value(sol, init_state))
end
# Add locations and values of neighboring states
for act in available(domain, init_state)
next_state = transition(domain, init_state, act)
show_cached_only && !has_cached_value(sol, next_state) && continue
next_loc = Point2f(gw_obj_loc(renderer, next_state, obj))
next_loc in locs[] && continue
push!(locs[], next_loc)
push!(vals[], get_value(sol, next_state))
end
# Trigger updates
notify(locs)
notify(vals)
end
end
notify(sol)
# Render state value heatmap
if get(options, :show_value_heatmap, true)
cmap = get(options, :value_colormap) do
cgrad(Makie.ColorSchemes.viridis, alpha=0.5)
end
if renderer.has_agent
marker = _policy_heatmap_marker()
plt = scatter!(ax, agent_locs, color=agent_values, colormap=cmap,
marker=marker, markerspace=:data, markersize=1.0)
Makie.translate!(plt, 0.0, 0.0, -0.5)
canvas.plots[:agent_policy_values] = plt
end
for (i, obj) in enumerate(objects)
marker = _policy_heatmap_marker(length(objects), i)
locs, vals = obj_locs[i], obj_values[i]
plt = scatter!(ax, locs, color=vals, colormap=cmap,
marker=marker, markerspace=:data, markersize=1.0)
Makie.translate!(plt, 0.0, 0.0, -0.5)
canvas.plots[Symbol("$(obj)_policy_values")] = plt
end
end
# Render best agent actions at each location
if get(options, :show_actions, true) && renderer.has_agent
markersize = get(options, :track_markersize, 0.3)
color = get(options, :agent_color, :black)
plt = scatter!(ax, agent_locs, marker=agent_markers,
rotations=agent_rotations, markersize=markersize,
color=color, markerspace=:data)
canvas.plots[:agent_policy_actions] = plt
end
# Render state value labels at each location
if get(options, :show_value_labels, true)
if renderer.has_agent
offset = _policy_label_offset()
label_locs = @lift $agent_locs .+ offset
labels = @lift map($agent_values) do val
@sprintf("%.1f", val)
end
plt = text!(ax, label_locs; text=labels, color=:black,
fontsize=0.2, markerspace=:data,
align=(:center, :center))
canvas.plots[:agent_policy_labels] = plt
end
for (i, obj) in enumerate(objects)
locs, vals = obj_locs[i], obj_values[i]
label_locs = @lift $locs .+ _policy_label_offset(length(objects), i)
labels = @lift map($vals) do val
@sprintf("%.1f", val)
end
fontsize = length(objects) > 2 ? 0.15 : 0.2
plt = text!(ax, label_locs; text=labels, color=:black,
fontsize=fontsize, markerspace=:data,
align=(:center, :center))
canvas.plots[Symbol("$(obj)_policy_labels")] = plt
end
end
return canvas
end
@inline function _policy_heatmap_marker(n::Int = 1, i::Int = 1)
if n <= 1 # Square marker for single agent
return Polygon(Point2f.([(-.5, -.5), (-.5, .5), (.5, .5), (.5, -.5)]))
elseif n <= 2 # Bottom left and top right triangles for 2 agents
if i == 1
return Polygon(Point2f.([(-.5, -.5), (-.5, .5), (.5, -.5)]))
elseif i == 2
return Polygon(Point2f.([(.5, .5), (.5, -.5), (-.5, .5)]))
end
elseif n <= 4 # Four triangles for 4 or less agents
if i == 1
return Polygon(Point2f.([(-.5, -.5), (-.5, .5), (0.0, 0.0)]))
elseif i == 2
return Polygon(Point2f.([(-.5, .5), (.5, .5), (0.0, 0.0)]))
elseif i == 3
return Polygon(Point2f.([(.5, .5), (.5, -.5), (0.0, 0.0)]))
elseif i == 4
return Polygon(Point2f.([(.5, -.5), (-.5, -.5), (0.0, 0.0)]))
end
else # Circle marker for more than 4 agents
angle = 2*pi*i/n
x, y = 2/n*cos(angle), 2/n*sin(angle)
points = decompose(Point2f, Circle(Point2f(x, y), 1/n))
return Polygon(points)
end
end
@inline function _policy_label_offset(n::Int=1, i::Int=1)
if n <= 1
return Point2f(0.0, 0.25)
elseif n <= 2
if i == 1
return Point2f(-0.2, -0.2)
elseif i == 2
return Point2f(0.2, 0.2)
end
elseif n <= 4
if i == 1
return Point2f(-0.3, 0.0)
elseif i == 2
return Point2f(0.0, 0.3)
elseif i == 3
return Point2f(0.3, 0.0)
elseif i == 4
return Point2f(0.0, -0.3)
end
else
angle = 2*pi*i/n
x, y = 2/n*cos(angle), 2/n*sin(angle)
return Point2f(x, y)
end
end
function render_goal_tree!(
canvas::Canvas, renderer::GridworldRenderer, domain::Domain,
state::Observable, sol::Observable{<:ReusableTreePolicy};
options...
)
# Extract main axis
ax = canvas.blocks[1]
# Set up observables for agent
if renderer.has_agent
agent_locs = Observable(Point2f[])
agent_dirs = Observable(Point2f[])
else
agent_locs = nothing
agent_dirs = nothing
end
# Set up observables for tracked objects
objects = get(options, :tracked_objects, Const[])
types = get(options, :tracked_types, Symbol[])
for ty in types
objs = PDDL.get_objects(domain, state, ty)
append!(objects, objs)
end
obj_locs = [Observable(Point2f[]) for _ in 1:length(objects)]
obj_dirs = [Observable(Point2f[]) for _ in 1:length(objects)]
# Update observables
on(sol; update = true) do sol
# Rebuild observables for reusable tree
node_ids = collect(keys(sol.goal_tree))
_build_tree!(agent_locs, agent_dirs, objects, obj_locs, obj_dirs,
renderer, sol.goal_tree, node_ids)
# Add current state to tree only if goal tree is empty
if isempty(node_ids)
_add_node_to_tree!(agent_locs, agent_dirs,
objects, obj_locs, obj_dirs, renderer, state[])
end
# Trigger updates
if renderer.has_agent
notify(agent_locs); notify(agent_dirs)
end
for (ls, ds) in zip(obj_locs, obj_dirs)
notify(ls); notify(ds)
end
end
# Create arrow plots for agent and tracked objects
node_marker = get(options, :goal_tree_marker, '⦿')
node_size = get(options, :goal_tree_size, 0.3)
edge_arrow = get(options, :goal_tree_arrow, '◁')
color = get(options, :goal_tree_color, to_color_obs(:black))
if renderer.has_agent
canvas.plots[:agent_goal_tree_nodes] = arrows!(
ax, agent_locs, agent_dirs, color=color,
arrowsize=node_size, arrowhead=node_marker,
markerspace=:data, align=:head
)
edge_locs = @lift $agent_locs .- ($agent_dirs .* 0.5)
edge_rotations = @lift [atan(d[2], d[1]) for d in $agent_dirs]
edge_markers = @lift map($agent_dirs) do d
d == Point2f(0, 0) ? node_marker : edge_arrow
end
canvas.plots[:agent_goal_tree_arrows] = scatter!(
ax, edge_locs, rotation=edge_rotations, color=color,
marker=edge_markers, markersize=node_size, markerspace=:data,
)
end
for (obj, ls, ds) in zip(objects, obj_locs, obj_dirs)
canvas.plots[Symbol("$(obj)_goal_tree_nodes")] = arrows!(
ax, ls, ds, color=color, markerspace=:data,
arrowsize=node_size, arrowhead=node_marker, align=:head
)
e_ls = @lift $ls .- ($ds .* 0.5)
e_rs = @lift [atan(d[2], d[1]) for d in $ds]
e_ms = @lift map($ds) do d
d == Point2f(0, 0) ? node_marker : edge_arrow
end
canvas.plots[Symbol("$(obj)_goal_tree_arrows")] = scatter!(
ax, e_ls, rotation=e_rs, marker=e_ms, markersize=node_size,
markerspace=:data, color=color
)
end
return canvas
end
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 7773 | function render_state!(
canvas::Canvas, renderer::GridworldRenderer,
domain::Domain, state::Observable;
replace::Bool=true, options...
)
# Update options
options = merge(renderer.state_options, options)
# Set canvas state observable (replacing any previous state)
if replace || canvas.state === nothing
canvas.state = state
end
# Extract or construct main axis
ax = get(canvas.blocks, 1) do
_ax = Axis(canvas.layout[1,1], aspect=DataAspect(),
xzoomlock=true, xpanlock=true, xrectzoom=false,
yzoomlock=true, ypanlock=true, yrectzoom=false,
xgridstyle=:dash, ygridstyle=:dash,
xgridcolor=:black, ygridcolor=:black)
hidedecorations!(_ax, grid=false)
push!(canvas.blocks, _ax)
return _ax
end
# Get grid dimensions from PDDL state
base_grid = @lift $state[renderer.grid_fluents[1]]
height = @lift size($base_grid, 1)
width = @lift size($base_grid, 2)
# Render grid variables as heatmaps
if get(options, :show_grid, true)
for (i, grid_fluent) in enumerate(renderer.grid_fluents)
grid = @lift reverse(transpose(float($state[grid_fluent])), dims=2)
cmap = cgrad([:transparent, renderer.grid_colors[i]])
crange = @lift (min(minimum($grid), 0), max(maximum($grid), 1))
plt = heatmap!(ax, grid, colormap=cmap, colorrange=crange)
canvas.plots[Symbol("grid_$(grid_fluent)")] = plt
end
end
# Set ticks to show grid
map!(w -> (1:w-1) .+ 0.5, ax.xticks, width)
map!(h -> (1:h-1) .+ 0.5, ax.yticks, height)
xlims!(ax, 0.5, width[] + 0.5)
ylims!(ax, 0.5, height[] + 0.5)
# Render locations
if get(options, :show_locations, true)
for (x, y, label, color) in renderer.locations
_y = @lift $height - y + 1
fontsize = 1 / (1.5*length(label)^0.5)
text!(ax, x, _y; text=label, color=color, align=(:center, :center),
markerspace=:data, fontsize=fontsize)
end
end
# Render objects
default_obj_renderer(d, s, o) = SquareShape(0, 0, 0.2, color=:gray)
if get(options, :show_objects, true)
# Render objects with type-specific graphics
for type in renderer.obj_type_z_order
for obj in PDDL.get_objects(domain, state[], type)
r = get(renderer.obj_renderers, type, default_obj_renderer)
graphic = @lift begin
x, y = gw_obj_loc(renderer, $state, obj, $height)
translate(r(domain, $state, obj), x, y)
end
plt = graphicplot!(ax, graphic)
canvas.plots[Symbol("$(obj)_graphic")] = plt
end
end
end
# Render agent
if renderer.has_agent && get(options, :show_agent, true)
graphic = @lift begin
x, y = gw_agent_loc(renderer, $state, $height)
translate(renderer.agent_renderer(domain, $state), x, y)
end
plt = graphicplot!(ax, graphic)
canvas.plots[:agent_graphic] = plt
end
# Render inventories
if renderer.show_inventory && get(options, :show_inventory, true)
inventory_labelsize = renderer.inventory_labelsize
colsize!(canvas.layout, 1, Auto(1))
rowsize!(canvas.layout, 1, Auto(1))
for (i, inventory_fn) in enumerate(renderer.inventory_fns)
# Extract objects
ty = get(renderer.inventory_types, i, :object)
sorted_objs = sort(PDDL.get_objects(domain, state[], ty), by=string)
# Extract or construct axis for each inventory
ax_i = get(canvas.blocks, i+1) do
title = get(renderer.inventory_labels, i, "Inventory")
_ax = Axis(canvas.layout[i+1, 1], aspect=DataAspect(),
title=title, titlealign=:left,
titlefont=:regular, titlesize=inventory_labelsize,
xzoomlock=true, xpanlock=true, xrectzoom=false,
yzoomlock=true, ypanlock=true, yrectzoom=false,
xgridstyle=:solid, ygridstyle=:solid,
xgridcolor=:black, ygridcolor=:black)
hidedecorations!(_ax, grid=false)
push!(canvas.blocks, _ax)
return _ax
end
# Render inventory as heatmap
inventory_size = @lift max(length(sorted_objs), $width)
cmap = cgrad([:transparent, :black])
heatmap!(ax_i, @lift(zeros($inventory_size, 1)),
colormap=cmap, colorrange=(0, 1))
map!(w -> (1:w-1) .+ 0.5, ax_i.xticks, inventory_size)
map!(ax_i.limits, inventory_size) do w
return ((0.5, w + 0.5), nothing)
end
ax_i.yticks = [0.5, 1.5]
# Compute object locations
obj_locs = @lift begin
locs = Int[]
n = 0
for obj in sorted_objs
if inventory_fn(domain, $state, obj)
push!(locs, n += 1)
else
push!(locs, -1)
end
end
return locs
end
# Render objects in inventory
for (j, obj) in enumerate(sorted_objs)
type = PDDL.get_objtype(state[], obj)
r = get(renderer.obj_renderers, type, default_obj_renderer)
graphic = @lift begin
x = $obj_locs[j]
g = translate(r(domain, $state, obj), x, 1)
g.attributes[:visible] = x > 0
g
end
graphicplot!(ax_i, graphic)
end
# Resize row
row_height = 1/height[] * width[]/inventory_size[]
rowsize!(canvas.layout, i+1, Auto(row_height))
end
rowgap!(canvas.layout, 10)
resize_to_layout!(canvas.figure)
end
# Render caption
if get(options, :caption, nothing) !== nothing
caption = options[:caption]
_ax = canvas.blocks[end]
_ax.xlabel = caption
_ax.xlabelvisible = true
_ax.xlabelfont = get(options, :caption_font, :regular)
_ax.xlabelsize = get(options, :caption_size, 24)
_ax.xlabelcolor = get(options, :caption_color, :black)
_ax.xlabelpadding = get(options, :caption_padding, 12)
_ax.xlabelrotation = get(options, :caption_rotation, 0)
# Store observable for caption in canvas
canvas.observables[:caption] = _ax.xlabel
end
# Return the canvas
return canvas
end
"""
- `show_grid::Bool = true`: Whether to show grid variables (walls, etc).
- `show_agent::Bool = true`: Whether to show the agent.
- `show_objects::Bool = true`: Whether to show objects.
- `show_locations::Bool = true`: Whether to show locations.
- `show_inventory::Bool = true`: Whether to show inventories.
- `caption = nothing`: Caption to display below the figure.
- `caption_font = :regular`: Font for the caption.
- `caption_size = 24`: Font size for the caption.
- `caption_color = :black`: Font color for the caption.
- `caption_padding = 12`: Padding for the caption.
- `caption_rotation = 0`: Rotation for the caption.
"""
default_state_options(R::Type{GridworldRenderer}) = Dict{Symbol,Any}(
:show_grid => true,
:show_agent => true,
:show_objects => true,
:show_locations => true,
:show_inventory => true,
:caption => nothing,
:caption_font => :regular,
:caption_size => 24,
:caption_color => :black,
:caption_padding => 12,
:caption_rotation => 0
)
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 6485 | function render_trajectory!(
canvas::Canvas, renderer::GridworldRenderer,
domain::Domain, trajectory::Observable;
options...
)
# Render initial state if not already on canvas
if canvas.state === nothing
render_state!(canvas, renderer, domain, trajectory[][1]; options...)
end
# Update options
options = merge(renderer.trajectory_options, options)
# Extract main axis and grid height
ax = canvas.blocks[1]
# Determine set of objects to track
state = trajectory[][1]
objects = get(options, :tracked_objects, Const[])
obj_colors = get(options, :object_colors, Symbol[]) .|> to_color_obs
obj_s_colors = get(options, :object_start_colors, obj_colors) .|> to_color_obs
types = get(options, :tracked_types, Symbol[])
type_colors = get(options, :type_colors, Symbol[]) .|> to_color_obs
type_s_colors = get(options, :type_start_colors, type_colors) .|> to_color_obs
for (ty, col, s_col) in zip(types, type_colors, type_s_colors)
objs = PDDL.get_objects(domain, state, ty)
append!(objects, objs)
append!(obj_colors, fill(col, length(objs)))
append!(obj_start_colors, fill(s_col, length(objs)))
end
# Construct observables for object locations and markers
obj_locations = [Observable(Point2f[]) for _ in 1:length(objects)]
obj_markers = [Observable(Char[]) for _ in 1:length(objects)]
obj_rotations = [Observable(Float64[]) for _ in 1:length(objects)]
# Construct observables for agent locations and markers
locations = Observable(Point2f[])
markers = Observable(Char[])
rotations = Observable(Float64[])
# Fill observables
arrowmarker = get(options, :track_arrowmarker, '▶')
stopmarker = get(options, :track_stopmarker, '⦿')
on(trajectory; update = true) do trajectory
# Clear previous locations and markers
for (ls, ms, rs) in zip(obj_locations, obj_markers, obj_rotations)
empty!(ls[]); empty!(ms[]); empty!(rs[])
end
if renderer.has_agent
empty!(locations[]); empty!(markers[]); empty!(rotations[])
end
# Add locations and markers for each timestep
for (t, state) in enumerate(trajectory)
next_state = trajectory[min(t+1, length(trajectory))]
height = size(state[renderer.grid_fluents[1]], 1)
# Add markers for tracked objects
for (i, obj) in enumerate(objects)
loc = gw_obj_loc(renderer, state, obj, height)
next_loc = gw_obj_loc(renderer, next_state, obj, height)
push!(obj_locations[i][], loc)
marker = loc == next_loc ? stopmarker : arrowmarker
push!(obj_markers[i][], marker)
rotation = atan(next_loc[2] - loc[2], next_loc[1] - loc[1])
push!(obj_rotations[i][], rotation)
end
# Add markers for agent
if renderer.has_agent
loc = gw_agent_loc(renderer, state, height)
next_loc = gw_agent_loc(renderer, next_state, height)
push!(locations[], loc)
marker = loc == next_loc ? stopmarker : arrowmarker
push!(markers[], marker)
rotation = atan(next_loc[2] - loc[2], next_loc[1] - loc[1])
push!(rotations[], rotation)
end
end
# Trigger updates
for (ls, ms, rs) in zip(obj_locations, obj_markers, obj_rotations)
notify(ls); notify(ms); notify(rs)
end
if renderer.has_agent
notify(locations); notify(markers); notify(rotations)
end
end
markersize = get(options, :track_markersize, 0.3)
# Plot agent locations over time
if renderer.has_agent
stop_color = get(options, :agent_color, :black) |> to_color_obs
start_color = get(options, :agent_start_color, stop_color) |> to_color_obs
if start_color != stop_color
color = @lift if length($trajectory) > 1
cmap = cgrad([$start_color, $stop_color])
cmap[range(0, 1; length=length($trajectory))]
else
[$stop_color]
end
else
color = stop_color
end
plt= scatter!(ax, locations, marker=markers, rotations=rotations,
markersize=markersize, color=color, markerspace=:data)
canvas.plots[:agent_trajectory] = plt
end
# Plot tracked object locations over time
for (i, (col1, col2)) in enumerate(zip(obj_s_colors, obj_colors))
if col1 != col2
color = @lift if length($trajectory) > 1
cmap = cgrad([$col1, $col2])
cmap[range(0, 1; length=length($trajectory))]
else
[$col2]
end
else
color = col2
end
plt = scatter!(ax, obj_locations[i], marker=obj_markers[i],
rotations=obj_rotations[i], markersize=markersize,
color=color, markerspace=:data)
canvas.plots[Symbol("$(objects[i])_trajectory")] = plt
end
# Return the canvas
return canvas
end
"""
- `:agent_color = black`: Marker color of agent tracks.
- `:agent_start_color = agent_color`: Marker color of agent tracks at the start
of the trajectory, which fade into the main color.
- `:tracked_objects = Const[]`: Moving objects to plot marker tracks for.
- `:object_colors = Symbol[]`: Marker colors to use for tracked objects.
- `:object_start_colors = object_colors`: Marker colors to use for tracked
objects at the start of the trajectory, which fade into the main color.
- `:tracked_types = Symbol[]`: Types of objects to track.
- `:type_colors = Symbol[]`: Marker colors to use for tracked object types.
- `:type_start_colors = type_colors`: Marker colors to use for tracked object
types at the start of the trajectory, which fade into the main color.
- `:track_arrowmarker = '▶'`: Marker to use for directed tracks.
- `:track_stopmarker = '⦿'`: Marker to use for stationary tracks.
- `:track_markersize = 0.3`: Size of track markers.
"""
default_trajectory_options(R::Type{GridworldRenderer}) = Dict{Symbol,Any}(
:agent_color => :black,
:tracked_objects => Const[],
:object_colors => Symbol[],
:tracked_types => Symbol[],
:type_colors => Symbol[],
:track_arrowmarker => '▶',
:track_stopmarker => '⦿',
:track_markersize => 0.3,
)
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 158 | using Test
@testset "GridworldRenderer" begin
include("gridworld/test.jl")
end
@testset "GraphworldRenderer" begin
include("graphworld/test.jl")
end | PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 839 | using PDDLViz, GLMakie, GraphMakie
using PDDL, SymbolicPlanners, PlanningDomains
# Load blocksworld domain and problem
domain = load_domain(:blocksworld)
problem = load_problem(:blocksworld, 5)
# Construct initial state from domain and problem
state = initstate(domain, problem)
# Construct blocksworld renderer
renderer = BlocksworldRenderer()
# Render initial state
canvas = renderer(domain, state)
# Render animation
plan = @pddl(
"(unstack f e)", "(put-down f)",
"(unstack e b)", "(put-down e)",
"(unstack d a)", "(stack d e)",
"(unstack a c)", "(stack a f)",
"(pick-up c)", "(stack c d)",
"(pick-up b)", "(stack b c)",
"(unstack a f)", "(stack a b)"
)
anim = anim_plan!(canvas, renderer, domain, state, plan,
move_speed=0.4, framerate=24, showrate=Inf)
save("blocksworld.mp4", anim)
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 127 | @testset "blocksworld" begin
include("blocksworld.jl")
end
@testset "zeno-travel" begin
include("zeno_travel.jl")
end
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 3318 | using PDDLViz, GLMakie, GraphMakie
using PDDL, SymbolicPlanners, PlanningDomains
# Load example graph-based domain and problem
domain = load_domain(:zeno_travel)
problem = load_problem(:zeno_travel, 3)
# Construct initial state from domain and problem
state = initstate(domain, problem)
# Construct graphworld renderer
cmap = PDDLViz.colorschemes[:vibrant]
renderer = GraphworldRenderer(
has_mov_edges = true,
location_types = [:city],
movable_types = [:movable],
loc_edge_fn = (d, s, a, b) -> a != b,
loc_edge_label_fn = (d, s, a, b) -> string(s[Compound(:distance, [a, b])]),
mov_loc_edge_fn = (d, s, x, loc) -> s[Compound(:at, [x, loc])],
mov_edge_fn = (d, s, x, y) -> begin
terms = [Compound(:person, Term[x]), Compound(:aircraft, Term[y]),
Compound(:in, Term[x, y])]
return satisfy(d, s, terms)
end,
loc_type_renderers = Dict{Symbol, Function}(
:city => (d, s, loc) -> CityGraphic(
0, 0, 0.25, color=cmap[parse(Int, string(loc.name)[end])+1]
)
),
mov_type_renderers = Dict{Symbol, Function}(
:person => (d, s, o) -> HumanGraphic(
0, 0, 0.15, color=cmap[parse(Int, string(o.name)[end])]
),
:aircraft => (d, s, o) -> MarkerGraphic(
'✈', 0, 0, 0.2, color=cmap[parse(Int, string(o.name)[end])]
)
),
state_options = Dict{Symbol, Any}(
:show_location_labels => true,
:show_movable_labels => true,
:show_edge_labels => true,
:show_location_graphics => true,
:show_movable_graphics => true,
:label_offset => 0.15,
:movable_node_color => (:black, 0.0),
),
axis_options = Dict{Symbol, Any}(
:aspect => 1,
:autolimitaspect => 1,
:xautolimitmargin => (0.2, 0.2),
:yautolimitmargin => (0.2, 0.2),
:hidedecorations => true
),
graph_options = Dict{Symbol, Any}(
:node_size => 0.03,
:node_attr => (markerspace=:data,),
:nlabels_fontsize => 20,
:nlabels_align => (:center, :center),
:elabels_fontsize => 16,
)
)
# Render initial state
canvas = renderer(domain, state)
# Render animation
plan = @pddl("(refuel plane1)", "(fly plane1 city0 city2)",
"(board person1 plane1 city2)", "(fly plane1 city2 city1)",
"(debark person1 plane1 city1)", "(fly plane1 city1 city2)")
renderer.state_options[:show_edge_labels] = false
anim = anim_plan!(canvas, renderer, domain, state, plan, framerate=1)
save("zeno_travel.mp4", anim)
# Convert animation frames to storyboard
storyboard = render_storyboard(
anim, [1, 3, 4, 5, 6, 7], figscale=0.65, n_rows=2,
xlabels=["t=1", "t=3", "t=4", "t=5", "t=6", "t=7"],
subtitles=["(i) Initial state", "(ii) Plane flies to city 2",
"(iii) Person 1 boards plane", "(iv) Plane flies to city 1",
"(v) Person 1 debarks plane", "(vi) Plane flies back to city 2"],
xlabelsize=18, subtitlesize=22
)
# Render animation with linearly interpolated transitions
canvas = renderer(domain, state)
anim = anim_plan!(canvas, renderer, domain, state, plan,
transition=PDDLViz.LinearTransition(),
frames_per_step=12, framerate=12, showrate=Inf)
save("zeno_travel_smooth.mp4", anim)
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 4595 | # Test gridworld rendering
using PDDLViz, GLMakie
using PDDL, SymbolicPlanners, PlanningDomains
# Load example gridworld domain and problem
domain = load_domain(:doors_keys_gems)
problem = load_problem(:doors_keys_gems, 3)
# Load array extension to PDDL
PDDL.Arrays.register!()
# Construct initial state from domain and problem
state = initstate(domain, problem)
# Construct gridworld renderer
gem_colors = PDDLViz.colorschemes[:vibrant]
renderer = GridworldRenderer(
resolution = (600, 700),
agent_renderer = (d, s) -> HumanGraphic(color=:black),
obj_renderers = Dict(
:key => (d, s, o) -> KeyGraphic(
visible=!s[Compound(:has, [o])]
),
:door => (d, s, o) -> LockedDoorGraphic(
visible=s[Compound(:locked, [o])]
),
:gem => (d, s, o) -> GemGraphic(
visible=!s[Compound(:has, [o])],
color=gem_colors[parse(Int, string(o.name)[end])]
)
),
show_inventory = true,
inventory_fns = [(d, s, o) -> s[Compound(:has, [o])]],
inventory_types = [:item]
)
# Render initial state
canvas = renderer(domain, state)
# Render plan
plan = @pddl("(right)", "(right)", "(right)", "(up)", "(up)")
renderer(canvas, domain, state, plan)
# Render trajectory
trajectory = PDDL.simulate(domain, state, plan)
canvas = renderer(domain, trajectory)
# Render path search solution
astar = AStarPlanner(GoalCountHeuristic(), save_search=true,
save_search_order=true, max_nodes=100)
sol = astar(domain, state, pddl"(has gem2)")
canvas = renderer(domain, state, sol, show_search=true)
# Render policy solution
heuristic = PlannerHeuristic(AStarPlanner(GoalCountHeuristic(), max_nodes=20))
rtdp = RTDP(heuristic=heuristic, n_rollouts=5, max_depth=20)
policy = rtdp(domain, state, pddl"(has gem1)")
canvas = renderer(domain, state, policy)
# Render reusable tree policy
heuristic = GoalCountHeuristic()
rths = RTHS(heuristic=heuristic, n_iters=1, max_nodes=20)
policy = rths(domain, state, pddl"(has gem1)")
canvas = renderer(domain, state, policy, show_goal_tree=false)
new_state = copy(state)
new_state[pddl"(xpos)"] = 4
new_state[pddl"(ypos)"] = 4
policy = refine!(policy, rths, domain, new_state, pddl"(has gem1)")
canvas = renderer(domain, new_state, policy, show_goal_tree=true)
# Render multi-solution
rths_bfs = RTHS(GoalCountHeuristic(), h_mult=0.0, max_nodes=10)
rths_astar = RTHS(GoalCountHeuristic(), h_mult=1.0, max_nodes=20)
arths = AlternatingRTHS(rths_bfs, rths_astar)
new_state = copy(state)
new_state[pddl"(xpos)"] = 4
new_state[pddl"(ypos)"] = 4
policy = arths(domain, new_state, pddl"(has gem1)")
canvas = renderer(domain, new_state, policy, show_goal_tree=false)
# Animate plan
plan = collect(sol)
anim = anim_plan(renderer, domain, state, plan; trail_length=10)
save("doors_keys_gems.mp4", anim)
# Animate path search planning
canvas = renderer(domain, state)
sol_anim, sol = anim_solve!(canvas, renderer, astar,
domain, state, pddl"(has gem1)")
save("doors_keys_gems_astar.mp4", sol_anim)
# Animate RTDP planning
canvas = renderer(domain, state)
sol_anim, sol = anim_solve!(canvas, renderer, rtdp,
domain, state, pddl"(has gem2)")
save("doors_keys_gems_rtdp.mp4", sol_anim)
# Animate RTHS planning
rths = RTHS(GoalCountHeuristic(), n_iters=5, max_nodes=15, reuse_paths=false)
canvas = renderer(domain, state)
sol_anim, sol = anim_solve!(canvas, renderer, rths,
domain, state, pddl"(has gem1)")
save("doors_keys_gems_rths.mp4", sol_anim)
# Convert animation frames to storyboard
storyboard = render_storyboard(
anim, [1, 14, 17, 24], figscale=0.75,
xlabels=["t=1", "t=14", "t=17", "t=24"],
subtitles=["(i) Initial state", "(ii) Agent picks up key",
"(iii) Agent unlocks door", "(iv) Agent picks up gem"],
xlabelsize=18, subtitlesize=22
)
# Construct multiple canvases on the same figure
figure = Figure()
resize!(figure, 1200, 700)
canvas1 = new_canvas(renderer, figure[1, 1])
canvas2 = new_canvas(renderer, figure[1, 2])
renderer(canvas1, domain, state)
renderer(canvas2, domain, state, plan)
# Add controller
canvas = renderer(domain, state)
recorder = ControlRecorder()
controller = KeyboardController(
Keyboard.up => pddl"(up)",
Keyboard.down => pddl"(down)",
Keyboard.left => pddl"(left)",
Keyboard.right => pddl"(right)",
Keyboard.z, Keyboard.x, Keyboard.c, Keyboard.v;
callback = recorder
)
add_controller!(canvas, controller, domain, state; show_controls=true)
remove_controller!(canvas, controller)
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | code | 71 | @testset "doors-keys-gems" begin
include("doors_keys_gems.jl")
end
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"Apache-2.0"
] | 0.1.13 | 6b474016099e29cea6f7a910c9c20eb1b539b48c | docs | 2740 | # PDDLViz.jl
A library for visualizing, animating, and interacting with PDDL domains, built on top of [Makie.jl](https://github.com/MakieOrg/Makie.jl).
| Doors, Keys & Gems | Blocksworld | Zeno Travel |
| --- | --- | --- |
| ![Example gridworld animation](assets/gridworld.gif) | ![Example blocksworld animation](assets/blocksworld.gif) | ![Example zeno-travel animation](assets/zeno_travel.gif) |
## Installation
Press `]` at the Julia REPL to enter the package manager, then install this package along with `PDDL` and a `Makie` backend of your choice (e.g. `GLMakie`):
```
add PDDLViz
add PDDL GLMakie
```
To install the development version, replace `PDDLViz` above with `https://github.com/JuliaPlanners/PDDLViz.jl.git`.
## Usage
`PDDLViz.jl` provides a number of built-in renderer types for certain classes of domains, such as [`GridworldRenderer`](test/gridworld/doors_keys_gems.jl), [`GraphworldRenderer`](test/graphworld/zeno_travel.jl) or [`BlocksworldRenderer`](test/graphworld/blocksworld.jl). Each renderer can be customized for a specific domain by passing in options to its constructor:
```julia
using PDDLViz, GLMakie
# Construct gridworld renderer
gem_colors = PDDLViz.colorschemes[:vibrant]
renderer = GridworldRenderer(
resolution = (600, 700),
agent_renderer = (d, s) -> HumanGraphic(color=:black),
obj_renderers = Dict(
:key => (d, s, o) -> KeyGraphic(
visible=!s[Compound(:has, [o])]
),
:door => (d, s, o) -> LockedDoorGraphic(
visible=s[Compound(:locked, [o])]
),
:gem => (d, s, o) -> GemGraphic(
visible=!s[Compound(:has, [o])],
color=gem_colors[parse(Int, string(o.name)[end])]
)
),
show_inventory = true,
inventory_fns = [(d, s, o) -> s[Compound(:has, [o])]],
inventory_types = [:item]
)
```
A renderer can then be used to render PDDL states:
```julia
using PDDL, PlanningDomains
# Load example gridworld domain and problem
domain = load_domain(:doors_keys_gems)
problem = load_problem(:doors_keys_gems, 3)
# Load array extension to PDDL
PDDL.Arrays.register!()
# Construct initial state from domain and problem
state = initstate(domain, problem)
# Render initial state
canvas = renderer(domain, state)
# Save rendered canvas to file
save("gridworld.png", canvas)
```
The rendered image is below:
![Example gridworld rendered by PDDLViz.jl](assets/gridworld.png)
Renderers can also be used to create animations as well:
![Example gridworld trajectory animated by PDDLViz.jl](assets/gridworld.gif)
See the [`test`](test/) folder for examples of how to render plans, trajectories and planner solutions, how to animate trajectories, and how to enable interactive controls.
| PDDLViz | https://github.com/JuliaPlanners/PDDLViz.jl.git |
|
[
"MIT"
] | 0.2.0 | 89ef2431dd59344ebaf052d0737205854ded0c62 | code | 3334 | module VersionCheck
using Pkg, Logging, Dates, Random
using JSON3, UrlDownload
using Scratch
const version_info_bounds = r"start-versions-->(.*)<!--end-versions"s
const usersettings_filename = "usersettings.json"
# changelog_url = "https://genieframework.com/CHANGELOG.html"
usersettings = Dict()
"""
Extracts the list of dependencies for the given `pkgname`.
"""
function dependencyinfo(pkgname::String) :: Union{Pkg.Types.PackageInfo,Nothing}
try
Pkg.dependencies()[Pkg.project().dependencies[pkgname]]
catch ex
nothing
end
end
"""
Extracts the information about the latest version of `pkgname` using a special CHANGELOG.html file
"""
function versioninfo(pkgname::String; url::String) :: Union{JSON3.Object,Nothing}
try
changelog(url)[:packages][pkgname][:releases][1]
catch ex
nothing
end
end
"""
Checks if a new version is available for `pkgname`
"""
function newversion(pkgname::String; show_message = true, url::String) :: Bool
usersettings["enabled"] || return
vinfo = versioninfo(pkgname; url = url)
pinfo = dependencyinfo(pkgname)
if pinfo.version < VersionNumber(vinfo[:version])
if show_message && ((time() - usersettings["last_check"]) > usersettings["warn_frequency"] * 60 * 60)
@info "A new version ($(vinfo.version)) of $pkgname is available. You use version $(pinfo.version)."
end
save_usersettings("last_check" => time())
true
else
false
end
end
"""
Custom CHANGELOG.html parser for UrlDownload
"""
function textparser(content::Vector{UInt8}) :: JSON3.Object
try
match(version_info_bounds, String(content))[1] |> JSON3.read
catch
error("Invalid CHANGELOG.html document")
end
end
"""
Downloads the CHANGELOG.html file from `url`
"""
function changelog(url::String) :: JSON3.Object
url = (occursin("?", url) ? url * "&" : url * "?") * "id=$(usersettings["id"])"
urldownload(url, parser = textparser)
end
function default_usersettings()
Dict(
"enabled" => true,
"warn_frequency" => 24, # hours
"last_check" => 0.0, # time()
"id" => (randstring(24) |> uppercase)
)
end
function valid_usersettings(d::T) where {T<:AbstractDict}
issubset(collect(keys(default_usersettings())), string.(collect(keys(d))))
end
"""
Retrieves user settings from scratch
"""
function get_usersettings()
settings_file = joinpath(@get_scratch!("downloaded_files"), usersettings_filename)
defaults = default_usersettings()
if ! isfile(settings_file)
defaults |> save_usersettings
else
try
us = read(settings_file, String) |> JSON3.read
valid_usersettings(us) || error("Invalid usersettings file")
us
catch ex
# @error ex
defaults |> save_usersettings
end
end
end
"""
Persists user settings to scratch
"""
function save_usersettings(us::T) where {T<:AbstractDict}
settings_file = joinpath(@get_scratch!("downloaded_files"), usersettings_filename)
open(settings_file, "w") do io
JSON3.write(io, us)
end
global usersettings = us
end
function save_usersettings(p::Pair)
usersettings[p[1]] = p[2]
save_usersettings(usersettings)
end
function __init__()
global usersettings = get_usersettings()
end
module Changelog
function generate()
@warn "TODO: implement"
end
function exportmd()
@warn "TODO: implement"
end
end
end
| VersionCheck | https://github.com/GenieFramework/VersionCheck.jl.git |
|
[
"MIT"
] | 0.2.0 | 89ef2431dd59344ebaf052d0737205854ded0c62 | code | 97 | using VersionCheck
using Test
@testset "VersionCheck.jl" begin
# Write your tests here.
end
| VersionCheck | https://github.com/GenieFramework/VersionCheck.jl.git |
|
[
"MIT"
] | 0.2.0 | 89ef2431dd59344ebaf052d0737205854ded0c62 | docs | 618 | # VersionCheck
Utility package for checking if a new version of a Julia package is available. It uses the current `Project.toml` file and a special `CHANGELOG.html` file to determine the latest versions.
## Usage
Create a `CHANGELOG.html` file similar to the `CHANGELOG_sample.html` file included in this package. Host the `CHANGELOG.html` file on a publicly accessible web server.
In your package, add a check like the following:
```julia
module MyPackage
import VersionCheck
function __init__()
try
@async VersionCheck.newversion("MyPackage", url = "<URL to CHANGELOG.html>")
catch
end
end
end
```
| VersionCheck | https://github.com/GenieFramework/VersionCheck.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 674 | using VisualStringDistances
using Documenter
makedocs(;
modules=[VisualStringDistances],
authors="Eric P. Hanson",
repo="https://github.com/ericphanson/VisualStringDistances.jl/blob/{commit}{path}#L{line}",
sitename="VisualStringDistances.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://ericphanson.github.io/VisualStringDistances.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Visualizations" => "visualizations.md",
"Package names" => "packagenames.md",
],
)
deploydocs(;
repo="github.com/ericphanson/VisualStringDistances.jl",
)
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 5465 | @info "Loading packages..."
@time begin
using VisualStringDistances, UnbalancedOptimalTransport
using AbstractPlotting: px
using UnbalancedOptimalTransport: KL, Balanced
using Makie, MakieLayout
using GeometryBasics: Point2f0
using PlotUtils: RGBA, RGB
end
function hide_decorations!(ax)
ax.xticksvisible=false
ax.yticksvisible=false
ax.xticklabelsvisible=false
ax.yticklabelsvisible=false
ax.bottomspinevisible = false
ax.leftspinevisible = false
ax.topspinevisible = false
ax.rightspinevisible = false
ax.xgridvisible=false
ax.ygridvisible=false
end
function imgrot(v)
Point2f0(v[2], 1-v[1])
end
# transparent to black for 0 to 1, then becomes redder from 1 to 2
const CMAP = cgrad([RGBA{Float64}(0.0,0.0,0.0,0.0), RGBA{Float64}(0.0,0.0,0.0,1.0), RGBA{Float64}(1.0,0.0,0.0,1.0)], [0,1,1.5])
density_to_color(d) = get(CMAP, d/2)
function animate_words(string1, string2;
normalize_density = false,
D = KL(1.0),
random_colors = false,
kwargs...
)
w1 = word_measure(string1)
w2 = word_measure(string2)
if normalize_density
w1 = DiscreteMeasure(w1.density / sum(w1.density), w1.set)
w2 = DiscreteMeasure(w2.density / sum(w2.density), w2.set)
end
if random_colors
# doesn't matter how we chose `π`
π = ones(length(w1.set), length(w2.set))
else
π = optimal_coupling!(D, w1, w2)
end
scene, layout = layoutscene()
layout[1,1] = ax = LAxis(scene)
display(scene)
animate_coupling!(scene, ax, π, w1.set, w2.set; random_colors = random_colors, kwargs...)
return
end
function animate_coupling!(scene, ax, π, coords1, coords2;
duration = 2,
total_frames = 50*duration,
pause_frames = total_frames ÷ 4,
move_frames = total_frames - 2*pause_frames,
markersize = 20px,
random_colors = false,
save_path = nothing,
α = 0.7,
compression = 20
)
x_min = min(minimum([x[1] for x in imgrot.(coords1)]), minimum([x[1] for x in imgrot.(coords2)]))
x_max = max(maximum([x[1] for x in imgrot.(coords1)]), maximum([x[1] for x in imgrot.(coords2)]))
y_min = min(minimum([x[2] for x in imgrot.(coords1)]), minimum([x[2] for x in imgrot.(coords2)]))
y_max = max(maximum([x[2] for x in imgrot.(coords1)]), maximum([x[2] for x in imgrot.(coords2)]))
hide_decorations!(ax)
s1, s2 = size(π)
if random_colors
cvals = collect(Iterators.Flatten(Iterators.repeated([ RGBA(rand(RGB), α) for _ = 1:s1], s2)))
else
scale = sqrt(prod(size(π)))
cvals = density_to_color.(scale * vec(π) ./ sum(π))
end
t = 0
locations = Node([ imgrot( (1-t)*coords1[i] + t*coords2[j]) for j = 1:size(π,2) for i = 1:size(π,1)])
anim_plt = scatter!(ax, locations, color = cvals, markersize=markersize,
transparency=true)
x_pad = (x_max - x_min)*.05
y_pad = (y_max - y_min)*.05
limits!(ax, x_min - x_pad, x_max + x_pad, y_min - y_pad, y_max + y_pad)
move_ts = range(0, 1; length=move_frames)
do_frame = function(j)
if (j <= pause_frames) || (j > total_frames - pause_frames)
save_path === nothing && sleep(duration/total_frames)
else
t = move_ts[j - pause_frames]
locations[] = [ imgrot((1-t)*coords1[i] + t*coords2[j]) for j = 1:size(π,2) for i = 1:size(π,1)]
save_path === nothing && sleep(duration/total_frames)
end
end
if save_path === nothing
map(do_frame, 1:total_frames)
else
record(do_frame, scene, save_path, 1:total_frames, framerate = round(Int, total_frames / duration), compression=compression)
end
return
end
@eval AbstractPlotting begin
function save(path::String, io::VideoStream;
framerate::Int = 24, compression = 20)
close(io.process)
wait(io.process)
p, typ = splitext(path)
if typ == ".mkv"
cp(io.path, path, force=true)
elseif typ == ".mp4"
ffmpeg_exe(`-loglevel quiet -i $(io.path) -crf $compression -c:v libx264 -preset slow -r $framerate -pix_fmt yuv420p -c:a libvo_aacenc -b:a 128k -y $path`)
elseif typ == ".webm"
ffmpeg_exe(`-loglevel quiet -i $(io.path) -crf $compression -c:v libvpx-vp9 -threads 16 -b:v 2000k -c:a libvorbis -threads 16 -r $framerate -vf scale=iw:ih -y $path`)
elseif typ == ".gif"
filters = "fps=$framerate,scale=iw:ih:flags=lanczos"
palette_path = dirname(io.path)
pname = joinpath(palette_path, "palette.bmp")
isfile(pname) && rm(pname, force = true)
ffmpeg_exe(`-loglevel quiet -i $(io.path) -vf "$filters,palettegen" -y $pname`)
ffmpeg_exe(`-loglevel quiet -i $(io.path) -r $framerate -f image2 $(palette_path)/image_%06d.png`)
ffmpeg_exe(`-loglevel quiet -framerate $framerate -i $(palette_path)/image_%06d.png -i $pname -lavfi "$filters [x]; [x][1:v] paletteuse" -y $path`)
rm(pname, force = true)
else
rm(io.path)
error("Video type $typ not known")
end
rm(io.path)
return path
end
end
@info "Generating animations..."
@time begin
animate_words("hello", "heIIo"; D = KL(1.0), save_path=abspath(joinpath(@__DIR__, "..", "..", "docs", "assets", "hello_heIIo.gif")))
animate_words("hello", "heIIo"; D = Balanced(), normalize_density=true, save_path=abspath(joinpath(@__DIR__, "..", "..", "docs", "assets", "hello_heIIo_balanced.gif")))
end
@info "Done!"
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 3008 | using Pkg, VisualStringDistances
using VisualStringDistances: KL, sinkhorn_divergence!
using StringDistances
using DataFrames
using Transducers
const DL = DamerauLevenshtein()
function get_all_package_names(registry_dir::AbstractString)
packages = [x["name"] for x in values(Pkg.TOML.parsefile(joinpath(registry_dir, "Registry.toml"))["packages"])]
sort!(packages)
unique!(packages)
return packages
end
names = get_all_package_names(expanduser("~/.julia/registries/General"))
filter!(x -> !endswith(x, "_jll"), names)
@info "Loaded list of non-JLL package names ($(length(names)) names)"
normalized_dl_cutoff = .2
dl_cutoff = 1
@info "Computing list of pairs of package names within $(dl_cutoff) in DL distance or $(normalized_dl_cutoff) in normalized DL distance"
@time df = DataFrame(tcollect( (name1=names[i],name2=names[j]) for i = 1:length(names) for j = 1:(i-1) if (normalize(DL)(names[i], names[j]) <= normalized_dl_cutoff) || DL(names[i], names[j]) <= dl_cutoff))
df.longest_length = max.(length.(df.name1), length.(df.name2))
function compute_sd(df, penalty, ϵ)
tcollect( sinkhorn_divergence!(penalty, word_measure(df.name1[i]), word_measure(df.name2[i]), ϵ) for i = 1:size(df,1))
end
@info "Computing DL distances for pairs..."
col_dict = Dict{Any, String}()
col = "DL unnormalized"
col_dict[(name = :DL, normalization = :unnormalized, params=tuple())] = col
df[!, col] = DL.(df.name1, df.name2)
i = 0
for ϵ in (0.1, 0.5, 1.0), ρ in (1.0, 5.0, 10.0)
global i += 1
@info "($(i)/9) Computing sinkhorn divergences with ϵ=$ϵ, ρ=$ρ..."
col = "SD ϵ=$(ϵ) ρ=$(ρ) unnormalized"
col_dict[(name = :SD, normalization = :unnormalized, params=(ϵ=ϵ, ρ=ρ))] = col
@time df[!, col] = compute_sd(df, KL(ρ), ϵ)
end
@info "Computing normalized distances..."
@time begin
for ϵ in (0.1, 0.5, 1.0), ρ in (1.0, 5.0, 10.0)
col = "SD ϵ=$(ϵ) ρ=$(ρ) normalized"
col_dict[(name = :SD, normalization = :normalized, params=(ϵ=ϵ, ρ=ρ))] = col
df[!, col] = df[!, col_dict[(name = :SD, normalization = :unnormalized, params=(ϵ=ϵ, ρ=ρ))]] ./ df[!, :longest_length]
col = "SD ϵ=$(ϵ) ρ=$(ρ) sqrt normalized"
col_dict[(name = :SD, normalization = :sqrt_normalized, params=(ϵ=ϵ, ρ=ρ))] = col
df[!, col] = df[!, col_dict[(name = :SD, normalization = :unnormalized, params=(ϵ=ϵ, ρ=ρ))]] ./ sqrt.(df[!, :longest_length])
end
end
col = "DL sqrt normalized"
col_dict[(name = :DL, normalization = :sqrt_normalized, params=tuple())] = col
df[!, col] = df[!, col_dict[(name = :DL, normalization = :unnormalized, params=tuple())]] ./ sqrt.(df[!, :longest_length])
col = "DL normalized"
col_dict[(name = :DL, normalization = :normalized, params=tuple())] = col
df[!, col] = df[!, col_dict[(name = :DL, normalization = :unnormalized, params=tuple())]] ./ df[!, :longest_length]
using Serialization
@info "Serializing..."
serialize(joinpath(@__DIR__, "names_df.jls"), Dict{Any, Any}(:df => df, :col_dict => col_dict))
@info "Done!"
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 2078 | using DataFrames, UnicodePlots, Test, Random, StatsBase
input = deserialize(joinpath(@__DIR__, "names_df.jls"))
df = input[:df]
col_dict = input[:col_dict]
# naive O(n^2) algorithm
function kendall_tau_distance(σ1, σ2)
n = length(σ1)
n == length(σ2) || throw(DimensionMismatch())
D = 0
for j = 1:n
for i = 1:(j-1)
D += ( (σ1[i] < σ1[j]) & (σ2[i] > σ2[j]) ) |
( (σ1[i] > σ1[j]) & (σ2[i] < σ2[j]) )
end
end
return D
end
# Example borrowed from http://arxiv.org/abs/1905.02752
@test kendall_tau_distance([2, 4, 1, 3], [4, 1, 3, 2]) == 5
π = randperm(4)
@test kendall_tau_distance([2, 4, 1, 3][π], [4, 1, 3, 2][π]) == 5
function normalized_kendall_tau_distance(σ1, σ2)
n = length(σ1)
2*kendall_tau_distance(σ1, σ2) / (n * (n-1))
end
kendall_tau_coeff(x,y) = 1 - normalized_kendall_tau_distance(x,y)
function corrs(coeff, suffix)
cols = [v for (k, v) in pairs(col_dict) if k[2] == suffix ]
[ coeff(df[!, c1] , df[!, c2]) for c1 in cols, c2 in cols]
end
all_cols = collect(values(col_dict))
sort!(all_cols)
corrs(coeff) = [ coeff(df[!, s1] , df[!, s2]) for s1 in all_cols, s2 in all_cols ]
suffix = :sqrt_normalized
for coeff in (corkendall, corspearman, kendall_tau_coeff)
cols = [v for (k, v) in pairs(col_dict) if k.normalization == suffix ]
@info cols
@info "$coeff with $suffix" corrs(coeff, suffix) heatmap(corrs(coeff, suffix))
end
for coeff in (corkendall, corspearman, kendall_tau_coeff)
@info all_cols
@info "$coeff" corrs(coeff) heatmap(corrs(coeff))
end
for (k, v) in pairs(col_dict)
k.normalization == :sqrt_normalized || continue
if !isempty(k.params)
k.params.ϵ > .5 && continue
k.params.ρ > 5 && continue
end
n = size(df, 1)
# Shuffle dataframe
p = randperm(n)
for c in propertynames(df)
permute!(df[!, c], p)
end
top5 = sort!(df, v)[1:5, ["name1", "name2", v]]
num = count(x -> x <= df[5, v], df[!, v])
@info "$k; showing random top 5 with $(num) candidates to show" top5
end
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 1469 | using Random, Pkg, VisualStringDistances
# Define our distance measure
d(s1, s2) = visual_distance(s1, s2; normalize=x -> 2 + sqrt(x))
d((s1,s2)) = d(s1, s2)
confusable_names = [
("DifferentialEquations", "DifferentIalEquations"),
("jellyfish", "jeIlyfish"), # example from python
("ANOVA", "AN0VA"),
("ODEInterfaceDiffEq", "0DEInterfaceDiffEq"),
("ValueOrientedRiskManagementInsurance", "ValueOrientedRiskManagementlnsurance"),
("IsoPkg", "lsoPkg"),
("DiffEqNoiseProcess", "DiffEgNoiseProcess"),
("Graph500", "Graph5O0")
]
@show d.(confusable_names)
function get_all_package_names(registry_dir::AbstractString)
packages = [x["name"] for x in values(Pkg.TOML.parsefile(joinpath(registry_dir, "Registry.toml"))["packages"])]
sort!(packages)
unique!(packages)
return packages
end
names = get_all_package_names(expanduser("~/.julia/registries/General"))
filter!(x -> !endswith(x, "_jll"), names)
confusable = ["O" => "0", "I" => "l", "I" => "1", "g" => "q"]
append!(confusable, reverse.(confusable))
function gen_list(names; N = 10)
list = Tuple{String, String}[]
while length(list) < N
name = rand(names)
swap = rand(confusable)
if occursin(first(swap), name)
new_name = replace(name, swap; count=1)
push!(list, (name, new_name))
end
end
return list
end
list = gen_list(names)
dists = d.(list)
dist, idx = findmax(dists)
@show dist, list[idx]
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 275 | using Remark, FileWatching
while true
Remark.slideshow(@__DIR__; options = Dict("ratio" => "16:9"), credit=false,
title = "How similar do two strings look? Visual distances in Julia")
@info "Rebuilt"
FileWatching.watch_folder(joinpath(@__DIR__, "src"))
end
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 5727 | # # How similar do two strings look?
# ## VisualStringDistances.jl
# <img src="assets/julia_visual.gif" style="width: 65%" class="center" />
# ---
# ## Let's compare strings
# <br />
using StringDistances
# How many single-character edits are needed to turn "Julia" into "JuIia"?
StringDistances.Levenshtein()("Julia", "JuIia")
# What about "Julia" into "JuQia"?
StringDistances.Levenshtein()("Julia", "JuQia")
# We can also compare based on how many times consecutive pairs of letters appear in each string...
StringDistances.QGram(2)("Julia", "JuIia"), StringDistances.QGram(2)("Julia", "JuQia")
# ---
# ## Visual distances
# <br />
# But none of these take into account that "Julia" and "JuIia" look pretty similar, while "Julia" and "JuQia" look pretty different.
using VisualStringDistances: VisualStringDistances
const VSD = VisualStringDistances
VSD.visual_distance("Julia", "JuIia"), VSD.visual_distance("Julia", "JuQia")
# <br />
# That seems better! But how do we know it does something reasonable in other cases too? And how does it work?
# <br />
# Just need two tools:
# 1. <p> A way to translate strings into images </p>
# 2. <p> A way to compare images </p>
# ---
# ## 1. A way to translate strings into images: GNU Unifont
VSD.printglyph("GNU Unifont"; symbols=("#", "-"))
# A bitmap font!
# ---
# Unifont stores characters as bitmaps, making things quite easy for us:
VSD.Glyph("Julia")
# <br />
# (see also FreeTypeAbstraction.jl to render bitmaps from many fonts!)
# ---
# It is low resolution, but simple and comprehensive, with 57086 supported characters, including...
chars = [VSD.get_char(k) for k in rand(collect(keys(VSD.UNIFONT_LOOKUP)), 5)];
permutedims(chars)
# Which render as:
VSD.printglyph(join(chars, " "))
# ---
VSD.printglyph("Julia vs JuIia"); VSD.printglyph("Julia vs JuQia") # hide
# ---
# ## 2. A way to compare images: Optimal transport
# <br />
# * <p> you have $a(x_1)$ amount of stuff at site $x_1$, $a(x_2)$ amount of stuff at $x_2$, ..., $a(x_n)$ stuff at $x_n$. </p>
# * <p> you want to move it around until you have $b(y_1)$ stuff at site $y_1$, $b(y_2)$ stuff at $y_2$, ..., $b(y_m)$ stuff at $y_m$ </p>
# * <p> it costs $c(x_i, y_j)$ to move one unit of mass from $x_i$ to $y_j$ </p>
# ```math
# \begin{aligned}
# \operatorname{OT}(a,b) := \text{minimize} \quad & \sum\_{x,y} π(x,y)\, c(x,y)\\\\
# \text{such that} \quad & a(x) = \sum\_{y} \pi(x,y)\\\\
# & b(y) = \sum\_{x} \pi(x,y) \\\\
# & \pi(x,y) \geq 0
# \end{aligned}
# ```
# * <p> We optimize to find the variables $\pi(x,y)$ (how much stuff to move from $x$ to $y$) </p>
# ---
# ## How does optimal transport relate to our problem?
# <br />
# - <p> If we have a black pixel in the 3rd column and 2nd row of the bitmap, we can see that as $a(1) = 1$ unit of mass at site $x_1 = (2,3)$. </p>
# - <p> In this way, we can translate the bitmap representation of the string into the language of optimal transport. </p>
# - <p> $c(x,y)$ is just the distance between those points </p>
# - <p> Note: we do two modifications to this </p>
# - <p> we solve an approximate version for speed ("entropic regularization") </p>
# - <p> add penalties for creating/destroying stuff for the case $\sum_x a(x) \neq \sum_y b(y)$ [1]. </p>
# <br />
# <br />
# <br />
# [1]: Séjourné, T., Feydy, J., Vialard, F.-X., Trouvé, A., Peyré, G., 2019. *Sinkhorn Divergences for Unbalanced Optimal Transport*. https://arxiv.org/abs/1910.12958.
# ---
# ## What use does this have?
# Making gifs!
# ---
# ## What use does this have?
# Adding a check for new packages being added to the General registry to try to prevent the malicious impersonation another package.
# Two main concerns:
# 1. Possibly, one will make a typo, and end up at the wrong package ("typosquatting") $\leadsto$ edit distance check
# 2. Possibly, one will copy a malicious tutorial that has mimicked the appearance of the name of a popular package $\leadsto$ visual distance
# <img src="assets/FIux.gif" style="width: 45%" class="center" />
# ---
# ## Is this the right visual distance for an automated registry check?
# I'm not sure.
# * <p> Human perception is actually a bit different </p>
# * e.g. we mix up "p" vs "q" more than "a" vs "e" [2], but `visual_distance` says "p" and "q" are further apart than "a" and "e"
# * <p> optimal transport is a bit slow (though not prohibitively so, with entropic regularization and the low resolution font) </p>
# * <p> there are several parameters and cutoffs to tune </p>
#
#
# Possibly a perceptually-weighted edit distance is more sensible.
# <br />
# <br />
# [2]: Courrieu, Pierre, Fernand Farioli, and Jonathan Grainger. *Inverse Discrimination Time as a Perceptual Distance for Alphabetic Characters*. Visual Cognition 11, no. 7 (October 2004): 901–19. https://doi.org/10.1080/13506280444000049.
# ---
# ## References & Notes
# * Package for `visual_distance`, `printglyph`, etc: VisualStringDistances.jl
# * <p> Package with the underlying algorithm optimal transport algorithm: UnbalancedOptimalTransport.jl </p>
# <br />
# <br />
# References:
# <br />
# [1]: Séjourné, T., Feydy, J., Vialard, F.-X., Trouvé, A., Peyré, G., 2019. *Sinkhorn Divergences for Unbalanced Optimal Transport*. https://arxiv.org/abs/1910.12958.
#
# [2]: Courrieu, Pierre, Fernand Farioli, and Jonathan Grainger. *Inverse Discrimination Time as a Perceptual Distance for Alphabetic Characters*. Visual Cognition 11, no. 7 (October 2004): 901–19. https://doi.org/10.1080/13506280444000049.
# <br />
# Slides made with the help of Remark.jl, Literate.jl, and Documenter.jl; gifs made with Makie.jl.
# Thanks to Stefan Karpinski for suggesting GNU Unifont.
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 1370 | include("plotting.jl")
# animate_words("hello", "heIIo"; D = KL(1.0), save_path=abspath(joinpath(@__DIR__, "hello_heIIo.gif")))
# animate_words("Julia", "Strings"; D = Balanced(), normalize_density=true, save_path=abspath(joinpath(@__DIR__, "julia_strings.gif")), duration=4)
# animate_words("Julia", "distances"; D = Balanced(), normalize_density=true, save_path=abspath(joinpath(@__DIR__, "Julia_distances.gif")))
# animate_words("DifferentialEquations", "DifferentiaIEquations"; D = KL(1.0), save_path=abspath(joinpath(@__DIR__, "DifferentiaIEquations.gif")), duration=4)
# animate_words("FIux", "Flux"; D = KL(5.0), save_path=abspath(joinpath(@__DIR__, "FIux.gif")), duration=4)
# animate_words("Julia", "Visual"; D = KL(1.0), save_path=abspath(joinpath(@__DIR__, "FIux.gif")), duration=4)
animate_words("Julia", "visual"; D = Balanced(), normalize_density=true, save_path=abspath(joinpath(@__DIR__, "julia_visual.gif")), duration=4,
total_frames = 50*4,
pause_frames = 50*4 ÷ 3)
# animate_words("Julia", "visual"; D = KL(5), save_path=abspath(joinpath(@__DIR__, "julia_visual_unbalanced.gif")), duration=4)
# julia> calculate_contributions(KL(5.0), word_measure("Flux"), word_measure("FIux"), 0.1)
# (transport_cost = 4.074841771685898, regularization = 496.6252591146831, marginal_1_penalty = 0.1502096716498963, marginal_2_penalty = 1.2417285590962812)
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 5817 | using VisualStringDistances, UnbalancedOptimalTransport
using AbstractPlotting
using AbstractPlotting: px
using UnbalancedOptimalTransport: KL, Balanced, cost_matrix
using MakieLayout
using GLMakie
using GeometryBasics: Point2f0
using PlotUtils: RGBA, RGB
function hide_decorations!(ax)
ax.xticksvisible=false
ax.yticksvisible=false
ax.xticklabelsvisible=false
ax.yticklabelsvisible=false
ax.bottomspinevisible = false
ax.leftspinevisible = false
ax.topspinevisible = false
ax.rightspinevisible = false
ax.xgridvisible=false
ax.ygridvisible=false
end
function imgrot(v)
Point2f0(v[2], 1-v[1])
end
# transparent to black for 0 to 1, then becomes redder from 1 to 2
const CMAP = cgrad([RGBA{Float64}(0.0,0.0,0.0,0.0), RGBA{Float64}(0.0,0.0,0.0,1.0), RGBA{Float64}(1.0,0.0,0.0,1.0)], [0,1,1.5])
density_to_color(d) = get(CMAP, d/2)
function animate_words(string1, string2;
normalize_density = false,
D = KL(1.0),
random_colors = false,
display_plot=true,
ϵ=0.1,
kwargs...
)
w1 = word_measure(string1)
w2 = word_measure(string2)
if normalize_density
w1 = DiscreteMeasure(w1.density / sum(w1.density), w1.set)
w2 = DiscreteMeasure(w2.density / sum(w2.density), w2.set)
end
if random_colors
# doesn't matter how we chose `π`
π = ones(length(w1.set), length(w2.set))
else
π = optimal_coupling!(D, w1, w2, ϵ)
end
scene, layout = layoutscene()
layout[1,1] = ax = LAxis(scene)
if display_plot
display(scene)
end
animate_coupling!(scene, ax, π, w1.set, w2.set; random_colors = random_colors, kwargs...)
return
end
function animate_coupling!(scene, ax, π, coords1, coords2;
duration = 2,
total_frames = 50*duration,
pause_frames = total_frames ÷ 4,
move_frames = total_frames - 2*pause_frames,
markersize = 20px,
random_colors = false,
save_path = nothing,
α = 0.7,
compression = 20
)
x_min = min(minimum([x[1] for x in imgrot.(coords1)]), minimum([x[1] for x in imgrot.(coords2)]))
x_max = max(maximum([x[1] for x in imgrot.(coords1)]), maximum([x[1] for x in imgrot.(coords2)]))
y_min = min(minimum([x[2] for x in imgrot.(coords1)]), minimum([x[2] for x in imgrot.(coords2)]))
y_max = max(maximum([x[2] for x in imgrot.(coords1)]), maximum([x[2] for x in imgrot.(coords2)]))
x_min, x_max = minmax(x_min, x_max)
y_min, y_max = minmax(y_min, y_max)
hide_decorations!(ax)
s1, s2 = size(π)
if random_colors
cvals = collect(Iterators.Flatten(Iterators.repeated([ RGBA(rand(RGB), α) for _ = 1:s1], s2)))
else
scale = sqrt(prod(size(π)))
cvals = density_to_color.(scale * vec(π) ./ sum(π))
end
t = 0
locations = Node([ imgrot( (1-t)*coords1[i] + t*coords2[j]) for j = 1:size(π,2) for i = 1:size(π,1)])
anim_plt = scatter!(ax, locations, color = cvals, markersize=markersize,
transparency=true)
x_pad = (x_max - x_min)*.05
y_pad = (y_max - y_min)*.05
limits!(ax, x_min - x_pad, x_max + x_pad, y_min - y_pad, y_max + y_pad)
move_ts = range(0, 1; length=move_frames)
do_frame = function(j)
if (j <= pause_frames) || (j > total_frames - pause_frames)
save_path === nothing && sleep(duration/total_frames)
else
t = move_ts[j - pause_frames]
locations[] = [ imgrot((1-t)*coords1[i] + t*coords2[j]) for j = 1:size(π,2) for i = 1:size(π,1)]
save_path === nothing && sleep(duration/total_frames)
end
end
if save_path === nothing
map(do_frame, 1:total_frames)
else
record(do_frame, scene, save_path, 1:total_frames, framerate = round(Int, total_frames / duration), compression=compression)
end
return
end
@eval AbstractPlotting begin
function save(path::String, io::VideoStream;
framerate::Int = 24, compression = 20)
close(io.process)
wait(io.process)
p, typ = splitext(path)
if typ == ".mkv"
cp(io.path, path, force=true)
elseif typ == ".mp4"
ffmpeg_exe(`-loglevel quiet -i $(io.path) -crf $compression -c:v libx264 -preset slow -r $framerate -pix_fmt yuv420p -c:a libvo_aacenc -b:a 128k -y $path`)
elseif typ == ".webm"
ffmpeg_exe(`-loglevel quiet -i $(io.path) -crf $compression -c:v libvpx-vp9 -threads 16 -b:v 2000k -c:a libvorbis -threads 16 -r $framerate -vf scale=iw:ih -y $path`)
elseif typ == ".gif"
filters = "fps=$framerate,scale=iw:ih:flags=lanczos"
palette_path = dirname(io.path)
pname = joinpath(palette_path, "palette.bmp")
isfile(pname) && rm(pname, force = true)
ffmpeg_exe(`-loglevel quiet -i $(io.path) -vf "$filters,palettegen" -y $pname`)
ffmpeg_exe(`-loglevel quiet -i $(io.path) -r $framerate -f image2 $(palette_path)/image_%06d.png`)
ffmpeg_exe(`-loglevel quiet -framerate $framerate -i $(palette_path)/image_%06d.png -i $pname -lavfi "$filters [x]; [x][1:v] paletteuse" -y $path`)
rm(pname, force = true)
else
rm(io.path)
error("Video type $typ not known")
end
rm(io.path)
return path
end
end
using LinearAlgebra
lg(x) = x <= 0 ? zero(x) : log(x)
entropy(a::AbstractArray) = sum(x -> -x * lg(x), a)
function divergence(::KL{ρ}, a, b) where {ρ}
ρ * (-entropy(a) - dot(a, lg.(b)) + sum(b - a))
end
function calculate_contributions(D, a, b, ϵ)
π = optimal_coupling!(D, a, b, ϵ)
π_1 = sum(π, dims = 2)
π_2 = vec(sum(π, dims = 1))
return (transport_cost=dot(cost_matrix(a, b), π),
regularization=ϵ * divergence(KL(), vec(π), kron(b.density, a.density)),
marginal_1_penalty=divergence(D, π_1, a.density),
marginal_2_penalty=divergence(D, π_2, b.density))
end
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 4408 | module VisualStringDistances
using LinearAlgebra
using UnbalancedOptimalTransport: UnbalancedOptimalTransport, DiscreteMeasure, KL,
sinkhorn_divergence!
using DelimitedFiles
using StaticArrays
export printglyph, word_measure, visual_distance
include("compat.jl")
include("glyphs.jl")
include("glyphcoordinates.jl")
include("glue.jl")
"""
visual_distance(::Type{T}, s::Union{Char,AbstractString},
t::Union{Char,AbstractString}; D=KL(one(T)), ϵ=T(0.1),
normalize=nothing) where {T}
Computes a measure of distance between the strings `s` and `t` in terms of their visual representation
as rendered by GNU Unifont and quantified by an unbalanced Sinkhorn divergence from UnbalancedOptimalTransport.jl.
* The keyword argument `D` chooses the `UnbalancedOptimalTransport.AbstractDivergence` used to penalize the creation
or destruction of "mass" (black pixels). For `D = VisualStringDistances.KL(ρ)` for some number `ρ ≥ 0`,
the distance is non-negative and zero if and only if the two visual representations of the strings
are the same, as is generally desired.
* The keyword argument `ϵ` sets the "entropic regularization" in the Sinkhorn divergence; see the
[documentation](https://ericphanson.github.io/UnbalancedOptimalTransport.jl/stable/optimal_transport/)
there for more information. In short, smaller `ϵ` computes a quantity more directly related to the cost
of moving mass, but takes longer to compute.
* The keyword argument `normalize` can be chosen to be a function which returns a normalizing constant
given the maximum length of the two strings. The choice `normalize=identity` thus divides the result
by the maximum length of the two strings. The choice `normalize=sqrt` has been found to give
a good balance in some settings.
One may use [`printglyph`](@ref) to see the visual representation of the strings as rendered by GNU Unifont.
!!! note
At the time of this writing, GNU Unifont is capable of rendering 57086 different unicode characters.
However, it renders some unicode characters with the same graphical representation; specifically,
689 distinct unicode characters have duplicate representations. Here's a set of six duplicates, for
example:
* 'Ꮋ': Unicode U+13BB (category Lu: Letter, uppercase)
* 'Н': Unicode U+041D (category Lu: Letter, uppercase)
* 'ꓧ': Unicode U+A4E7 (category Lo: Letter, other)
* 'Ⲏ': Unicode U+2C8E (category Lu: Letter, uppercase)
* 'Η': Unicode U+0397 (category Lu: Letter, uppercase)
* 'H': ASCII/Unicode U+0048 (category Lu: Letter, uppercase)
The visual distance between these, therefore, is returned as zero (up to numerical error).
## Example
```julia
julia> using VisualStringDistances
julia> printglyph("abc")
------------------------
------------------------
------------------------
---------#--------------
---------#--------------
---------#--------------
--####---#-###----####--
-#----#--##---#--#----#-
------#--#----#--#------
--#####--#----#--#------
-#----#--#----#--#------
-#----#--#----#--#------
-#---##--##---#--#----#-
--###-#--#-###----####--
------------------------
------------------------
julia> printglyph("def")
------------------------
------------------------
------------------------
------#-------------##--
------#------------#----
------#------------#----
--###-#---####-----#----
-#---##--#----#--#####--
-#----#--#----#----#----
-#----#--######----#----
-#----#--#---------#----
-#----#--#---------#----
-#---##--#----#----#----
--###-#---####-----#----
------------------------
------------------------
julia> visual_distance("abc", "def")
31.57060117541754
julia> visual_distance("abc", "abe")
4.979840716647487
```
"""
function visual_distance(::Type{T}, s::Union{Char,AbstractString},
t::Union{Char,AbstractString}; D=KL(one(T)), ϵ=T(0.1),
normalize=nothing) where {T}
d = sinkhorn_divergence!(D, word_measure(T, s), word_measure(T, t), ϵ)
if normalize !== nothing
d = d / normalize(max(length(s), length(t)))
end
return d
end
# `Float64` default.
function visual_distance(s::Union{Char,AbstractString}, t::Union{Char,AbstractString};
D=KL(1.0), ϵ=0.1, normalize=nothing)
visual_distance(Float64, s, t; D=D, ϵ=ϵ, normalize=normalize)
end
end
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 123 | @static if v"0.7" <= VERSION < v"1.1.0-DEV.792"
eachrow(A::AbstractVecOrMat) = (view(A, i, :) for i in axes(A, 1))
end
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 1013 | struct ConstantVector{c,T} <: AbstractVector{T}
len::Int
end
Base.size(v::ConstantVector) = (v.len,)
Base.getindex(::ConstantVector{c,T}, i::Int) where {c,T} = T(c)
Base.sum(v::ConstantVector{c}) where {c} = c * length(v)
LinearAlgebra.dot(::ConstantVector{c}, v::AbstractVector) where {c} = conj(c) * sum(v)
LinearAlgebra.dot(v::AbstractVector, ::ConstantVector{c}) where {c} = c * conj(sum(v))
function UnbalancedOptimalTransport.fdot(f, ::ConstantVector{c},
v::AbstractVector) where {c}
conj(c) * sum(f, v)
end
function UnbalancedOptimalTransport.fdot(f, v::AbstractVector,
::ConstantVector{c}) where {c}
conj(sum(v)) * f(c)
end
function word_measure(::Type{T}, s::Union{Char,AbstractString}) where {T}
gc = GlyphCoordinates{T}(s)
n = length(gc)
DiscreteMeasure(ConstantVector{one(T),T}(n), ConstantVector{zero(T),T}(n), gc)
end
word_measure(s::Union{Char,AbstractString}) = word_measure(Float64, s)
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 1750 |
"""
GlyphCoordinates{T} <: AbstractVector{T}
A sparse representation of a [`Glyph`](@ref).
"""
struct GlyphCoordinates{T} <: AbstractVector{T}
v::Vector{SVector{2,T}}
sz::Tuple{Int,Int}
end
Base.size(g::GlyphCoordinates) = size(g.v)
Base.getindex(g::GlyphCoordinates, i::Int) = getindex(g.v, i)
Base.getindex(g::GlyphCoordinates, I...) = getindex(g.v, I...)
Base.IndexStyle(g::GlyphCoordinates) = IndexLinear()
GlyphCoordinates(args...) = GlyphCoordinates{Float64}(args...)
function GlyphCoordinates{T}(g::Glyph) where {T}
GlyphCoordinates([SVector{2,T}(Tuple(ci))
for ci in CartesianIndices(g) if !iszero(g[ci])], size(g))
end
const COORDS_CACHE = Dict{Char,GlyphCoordinates{Float64}}()
function GlyphCoordinates{Float64}(c::Char)
get!(COORDS_CACHE, c) do
GlyphCoordinates{Float64}(Glyph(c))
end
end
# fallback for generic types
GlyphCoordinates{T}(c::Char) where {T} = GlyphCoordinates{T}(Glyph(c))
# Use the character cache `COORDS_CACHE`
function GlyphCoordinates{T}(s::AbstractString) where {T}
gcs = [GlyphCoordinates{T}(c) for c in s]
L = sum(length, gcs)
v = Vector{SVector{2,T}}(undef, L)
shift = SVector{2,Int}(0, 0)
j = 1
for gc in gcs
l = length(gc.v)
v[j:j+l-1] .= gc.v .+ Ref(shift)
j += l
shift += SVector{2,Int}(0, gc.sz[2])
end
GlyphCoordinates{T}(v, Tuple(shift + SVector(16, 0)))
end
function printglyph(io, g::GlyphCoordinates{T}; symbols=("#", " ")) where {T}
for r = 1:g.sz[1]
for c = 1:g.sz[2]
if SVector{2,T}(r, c) ∈ g.v
print(io, symbols[1])
else
print(io, symbols[2])
end
end
println(io)
end
end
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 3203 | """
Glyph <: AbstractArray{Bool,2}
Holds the bitmap associated to a Unifont glyph in a packed format.
"""
struct Glyph <: AbstractArray{Bool,2}
data::Matrix{UInt8}
end
Base.size(A::Glyph) = (size(A.data, 1), size(A.data, 2) * 8)
function Base.getindex(A::Glyph, i::Int, j::Int)
block = A.data[i, cld(j, 8)]
k = (j - 1) % 8
Bool((block >> k) & 1)
end
Base.hcat(a::Glyph, b::Glyph) = Glyph(hcat(a.data, b.data))
# https://discourse.julialang.org/t/convert-integer-to-bits-array/26663/7
function revbits(z::UInt8)
z = (((z & 0xaa) >> 1) | ((z & 0x55) << 1))
z = (((z & 0xcc) >> 2) | ((z & 0x33) << 2))
z = (((z & 0xf0) >> 4) | ((z & 0x0f) << 4))
return z
end
"""
glyph!(v::Vector{UInt8}) -> Glyph
Creates a [`Glyph`](@ref) for a vector of bytes, assuming the vector represents a single Unifont character. Modifies `v` and may share its memory.
"""
function glyph!(v::Vector{UInt8})
v .= revbits.(v)
if length(v) == 16
Glyph(reshape(v, 16, 1))
elseif length(v) == 32
Glyph(transpose(reshape(v, 2, 16)))
else
throw(ArgumentError("Input vector must have length 16 or 32, corresponding to a single character."))
end
end
const UNIFONT_LOOKUP = let file = readdlm(joinpath(@__DIR__, "..", "data",
"unifont-12.1.04.hex"), ':', String)
Dict{String,String}(r[1] => r[2] for r in eachrow(file))
end
const GLYPH_CHAR_CACHE = Dict{Char,Glyph}()
function key(c::Char)
u = codepoint(c)
return uppercase(string(u, base=16, pad=u ≤ 0xffff ? 4 : 6))
end
function get_char(k::AbstractString)
Char(parse(UInt16, "0x$k"))
end
function Glyph(c::Char)
get!(GLYPH_CHAR_CACHE, c) do
k = key(c)
haskey(UNIFONT_LOOKUP, k) || error("UNIFONT doesn't know how to render `c`.")
return glyph!(hex2bytes(UNIFONT_LOOKUP[k]))
end
end
"""
Glyph(s::AbstractString) --> Glyph
Construct a `Glyph` from a string.
# Examples
```julia-repl
julia> Glyph("abc")
------------------------
------------------------
------------------------
---------#--------------
---------#--------------
---------#--------------
--####---#-###----####--
-#----#--##---#--#----#-
------#--#----#--#------
--#####--#----#--#------
-#----#--#----#--#------
-#----#--#----#--#------
-#---##--##---#--#----#-
--###-#--#-###----####--
------------------------
------------------------
```
"""
function Glyph(s::AbstractString)
foldl(hcat, (Glyph(c) for c in s))
end
"""
printglyph([io=stdout], g::Union{Char, AbstractString, Glyph})
Prints a visual representation of `g` to `io`.
"""
function printglyph end
function printglyph(io::IO, g::Glyph; symbols=("#", " "))
rep = s -> s ? symbols[1] : symbols[2]
for r in eachrow(g)
println(io, mapreduce(rep, *, r))
end
end
printglyph(g; kwargs...) = printglyph(stdout, g; kwargs...)
printglyph(io::IO, s::Union{Char, AbstractString}; kwargs...) = printglyph(io, Glyph(s); kwargs...)
printglyph(s::Union{Char, AbstractString}; kwargs...) = printglyph(stdout, s; kwargs...)
# Base.show(io::IO, ::MIME"text/plain", g::Glyph) = printglyph(io, g)
# Base.show(io::IO, g::Glyph) = printglyph(io, g)
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | code | 5771 | using VisualStringDistances
using Test
using VisualStringDistances: glyph!, Glyph, GlyphCoordinates, ConstantVector
using UnbalancedOptimalTransport: fdot, KL, sinkhorn_divergence!
using LinearAlgebra: dot
printglyph_dashes = (io, g) -> printglyph(io, g; symbols=("#", "-"))
@testset "VisualStringDistances.jl" begin
@testset "Glyphs" begin
g = glyph!(hex2bytes("0000000018242442427E424242420000"))
@test sprint(printglyph_dashes, g) == """
--------
--------
--------
--------
---##---
--#--#--
--#--#--
-#----#-
-#----#-
-######-
-#----#-
-#----#-
-#----#-
-#----#-
--------
--------
"""
@test_throws ArgumentError glyph!(hex2bytes("0000000018242442427E424242420"))
@test_throws ErrorException Glyph(Char(0x12480))
g = glyph!(hex2bytes("00000000000003C0042004200840095008E01040100010002000200000000000"))
@test sprint(printglyph_dashes, g) == """
----------------
----------------
----------------
------####------
-----#----#-----
-----#----#-----
----#----#------
----#--#-#-#----
----#---###-----
---#-----#------
---#------------
---#------------
--#-------------
--#-------------
----------------
----------------
"""
end
@testset "Printing abc many ways" begin
abc_printed_rep = """
------------------------
------------------------
------------------------
---------#--------------
---------#--------------
---------#--------------
--####---#-###----####--
-#----#--##---#--#----#-
------#--#----#--#------
--#####--#----#--#------
-#----#--#----#--#------
-#----#--#----#--#------
-#---##--##---#--#----#-
--###-#--#-###----####--
------------------------
------------------------
"""
@test sprint(printglyph_dashes, Glyph("abc")) == abc_printed_rep
@test sprint(printglyph_dashes, "abc") == abc_printed_rep
@test sprint(printglyph_dashes, hcat(Glyph("a"), Glyph("bc"))) == abc_printed_rep
@test sprint(printglyph_dashes, GlyphCoordinates("abc")) == abc_printed_rep
abc_substring = Glyph(SubString("abcd", 1:3))
@test sprint(printglyph_dashes, abc_substring) == abc_printed_rep
end
@testset "More GlyphCoordinates" begin
@test GlyphCoordinates('a') ==
GlyphCoordinates("a") ==
GlyphCoordinates{Float64}("a")
@test length(GlyphCoordinates('a')) ≈ sum(!iszero, Glyph("a"))
# test indexing
@test GlyphCoordinates('a')[1] == collect(Tuple(findfirst(!iszero, Glyph("a"))))
gc = GlyphCoordinates('a')
@test gc[1:length(gc)] == gc[:] == gc.v
gcF32 = GlyphCoordinates{Float32}('a')
@test gcF32[:] ≈ gc[:]
end
@testset "ConstantVector" begin
for constant in (5.0, 2.2 + im * 3.2, 1f0)
T = typeof(constant)
c = ConstantVector{constant,T}(10)
@test collect(c) isa Vector{T}
@test length(c) == 10
@test c == fill(constant, 10) == collect(c)
@test sum(c) ≈ sum(collect(c))
f(x) = sin(x) + 10.0
d = randn(10)
@test fdot(f, c, d) ≈ fdot(f, collect(c), d)
@test fdot(f, d, c) ≈ fdot(f, d, collect(c))
@test dot(c, d) ≈ dot(collect(c), d)
@test dot(d, c) ≈ dot(d, collect(c))
end
end
@testset "`visual_distance`" begin
for T in (Float32, Float64), ϵ in (T(0.1), T(0.2)), ρ in (T(1.0), T(5.0))
v1 = visual_distance(T, "abc", "def"; D=KL(ρ), ϵ=ϵ)
v2 = visual_distance(T, "def", "ghi"; D=KL(ρ), ϵ=ϵ)
v3 = visual_distance(T, "abc", "ghi"; D=KL(ρ), ϵ=ϵ)
@test v1 >= 0
@test v1 ≈ visual_distance(T, "def", "abc"; D=KL(ρ), ϵ=ϵ) rtol = 1e-3
# Note: triangle inequality doesn't necessary hold in general
# (it's not proven, as far as I know)
# However, it does in this case!
@test v3 <= v1 + v2
v4 = visual_distance(T, "abc", "abd"; D=KL(ρ), ϵ=ϵ)
@test v4 <= v1
end
# Defaults
v1 = visual_distance("abc", "def")
v2 = visual_distance("def", "ghi")
v3 = visual_distance("abc", "ghi")
@test v1 >= 0
@test v1 ≈ visual_distance("def", "abc") rtol = 1e-3
@test v3 <= v1 + v2
v4 = visual_distance("abc", "abd")
@test v4 <= v1
abc_measure = word_measure("abc")
def_measure = word_measure("def")
@test v1 ≈ sinkhorn_divergence!(KL(1.0), abc_measure, def_measure, 0.1)
# Make sure we can use non-String types
abc_substring = SubString("abcd", 1:3)
def_substring = SubString("defh", 1:3)
@test v1 ≈ visual_distance(abc_substring, def_substring)
# Test normalization
@test visual_distance("abc", "def", normalize=sqrt) ≈ v1 / sqrt(3)
@test visual_distance("abc", "def", normalize=identity) ≈ v1 / 3
end
end
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | docs | 4327 | # VisualStringDistances
[![Build Status](https://github.com/ericphanson/VisualStringDistances.jl/workflows/CI/badge.svg)](https://github.com/ericphanson/VisualStringDistances.jl/actions)
[![Coverage](https://codecov.io/gh/ericphanson/VisualStringDistances.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/ericphanson/VisualStringDistances.jl)
[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://ericphanson.github.io/VisualStringDistances.jl/stable)
[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://ericphanson.github.io/VisualStringDistances.jl/dev)
Provides a notion of "visual distance" between two strings, via the exported function `visual_distance`.
This package was the subject of the 2020 JuliaCon talk [How similar do two strings look? Visual distances in Julia](https://www.youtube.com/watch?v=hf2b9ganGxE),
so check that out if you like video explanations and animated gifs. For a text explanation, keep reading.
There are lots of ways to calculate distances between strings; [StringDistances.jl](https://github.com/matthieugomez/StringDistances.jl)
includes many of them, including edit distances which count how many "edits" of various kinds are needed to turn one string into another.
This package provides a distance measure via a very different mechanism. It tries to quantify how visually different two strings *look*.
It does this by rendering both strings with a font (GNU Unifont, in this case) to get a pixel bitmap, i.e. a matrix of 0s and 1s indicating
which pixels should be colored white or black in order to display a representation of the string.
Then these bitmaps are compared by a technique called *optimal transport*. In this technique, we see the 1s as units of mass setting at various
locations (corresponding to their indices in the matrix). We ask: how much mass do we need to move, and how far,
to turn the first bitmap into the second? We can formulate this as an optimization problem and solve it to give a notion of distance.
One subtlety we need to address is that if two strings have different amounts of black pixels in their bitmap, we cannot simply move mass around
to turn one bitmap into the other. We in fact need to create or destroy mass. We do this by adding a penalty term in our optimization problem
corresponding to creation or destruction of mass.
The actual optimization is performed by [UnbalancedOptimalTransport.jl](https://github.com/ericphanson/UnbalancedOptimalTransport.jl), and
the [docs](https://ericphanson.github.io/UnbalancedOptimalTransport.jl/stable/optimal_transport/)
for that package go into a lot more detail about optimal transport. In particular, we are actually computing the Sinkhorn divergence
corresponding to an entropically-regularized unbalanced optimal transport problem, following the algorithm of [SFVTP19].
[SFVTP19] Séjourné, T., Feydy, J., Vialard, F.-X., Trouvé, A., Peyré, G., 2019. Sinkhorn Divergences for Unbalanced Optimal Transport. [arXiv:1910.12958](https://arxiv.org/abs/1910.12958).
*Note*: While this package's source code is MIT licensed, it relies on GNU Unifont, which is GPL-licensed.
## Quick demo
```julia
julia> using VisualStringDistances
julia> printglyph("aaa")
#### #### ####
# # # # # #
# # #
##### ##### #####
# # # # # #
# # # # # #
# ## # ## # ##
### # ### # ### #
julia> printglyph("ZZZ")
###### ###### ######
# # #
# # #
# # #
# # #
# # #
# # #
# # #
# # #
###### ###### ######
julia> visual_distance("aaa", "ZZZ")
51.169602195312166
julia> printglyph("III")
##### ##### #####
# # #
# # #
# # #
# # #
# # #
# # #
# # #
# # #
##### ##### #####
julia> printglyph("lll")
## ## ##
# # #
# # #
# # #
# # #
# # #
# # #
# # #
# # #
# # #
##### ##### #####
julia> visual_distance("III", "lll")
9.7349485622592
```
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | docs | 145 | ```@meta
CurrentModule = VisualStringDistances
```
# VisualStringDistances
```@index
```
```@autodocs
Modules = [VisualStringDistances]
```
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | docs | 4429 | # Package names
One of the motivations for this package was to investigate using visual distances to look out for issues similar
to [typosquatting](https://en.wikipedia.org/wiki/Typosquatting) in the Julia General package registry.
The problem of interest is the following: say a user is following a Julia tutorial online, but a malicious
person has substituted a popular package name for a similiar-looking one in the tutorial. When the unsuspecting
user copy-pastes the commands to install the package, they don't realize they are installing the malicious one.
To prevent this kind of abuse, it could be useful to add an automated check to the registry process to check
that new package registrations' names aren't very close visually to existing packages, and to perhaps
issue a warning when they are.
[`visual_distance`](@ref) provides a means of evaluating how close two strings look. Let's investigate
it in the context of package names.
Let us consider some visually-confusable names, and compute their visual distances, as well as a simple
edit distance (the Damerau-Levenshtein distance).
```@repl pkgnames
using VisualStringDistances, DataFrames, StringDistances
const DL = DamerauLevenshtein();
# Define our distance measure
d(s1, s2) = visual_distance(s1, s2; normalize=x -> 5 + sqrt(x))
d((s1,s2)) = d(s1, s2)
df_subs = DataFrame([
("jellyfish", "jeIlyfish"), # https://developer-tech.com/news/2019/dec/05/python-libraries-dateutil-jellyfish-stealing-ssh-gpg-keys/
("DifferentialEquations", "DifferentIalEquations"),
("ANOVA", "AN0VA"),
("ODEInterfaceDiffEq", "0DEInterfaceDiffEq"),
("ValueOrientedRiskManagementInsurance", "ValueOrientedRiskManagementlnsurance"),
("IsoPkg", "lsoPkg"),
("DiffEqNoiseProcess", "DiffEgNoiseProcess"),
("Graph500", "Graph5O0")
]);
rename!(df_subs, [:name1, :name2]);
df_subs.DL = DL.(df_subs.name1, df_subs.name2);
df_subs.sqrt_normalized_DL = df_subs.DL ./ ( 5 .+ sqrt.(max.(length.(df_subs.name1), length.(df_subs.name2))) );
df_subs.sqrt_normalized_visual_dist = d.(df_subs.name1, df_subs.name2);
sort!(df_subs, :sqrt_normalized_visual_dist);
```
```@example pkgnames
df_subs
```
We can see all the pairs have DL distance of 1, since they are 1 edit apart. Their normalized
DL-distances thus just depend on their length. However, they have various visual distances,
depending on what subsitution was made. Note that GNU Unifont renders zeros with a slash through
the middle, and hence VisualStringDistances.jl sees "O" and "0" as fairly different.
Let us compare to some real package names from the registry. We will in fact consider all
package names, but then filter them down to a manageable list via the edit distance.
```@repl pkgnames
using Pkg
function get_all_package_names(registry_dir::AbstractString)
packages = [x["name"] for x in values(Pkg.TOML.parsefile(joinpath(registry_dir, "Registry.toml"))["packages"])]
sort!(packages)
unique!(packages)
return packages
end
names = get_all_package_names(expanduser("~/.julia/registries/General"));
filter!(x -> !endswith(x, "_jll"), names);
@info "Loaded list of non-JLL package names ($(length(names)) names)"
normalized_dl_cutoff = .2;
dl_cutoff = 1;
@info "Computing list of pairs of package names within $(dl_cutoff) in DL distance or $(normalized_dl_cutoff) in normalized DL distance..."
@time df = DataFrame(collect( (name1=names[i],name2=names[j]) for i = 1:length(names) for j = 1:(i-1) if (normalize(DL)(names[i], names[j]) <= normalized_dl_cutoff) || DL(names[i], names[j]) <= dl_cutoff));
@info "Found $(size(df,1)) pairs of packages meeting the criteria.";
df.DL = DL.(df.name1, df.name2);
df.sqrt_normalized_DL = df.DL ./ ( 5 .+ sqrt.(max.(length.(df.name1), length.(df.name2))) );
@time df.sqrt_normalized_visual_dist = d.(df.name1, df.name2);
```
Let's look at the 5 closest pairs according to the normalized visual distance.
```@example pkgnames
sort!(df, :sqrt_normalized_visual_dist);
df[1:5, :]
```
Here, we see that by this measurement, the closest pair of packages is "Modia"
and "Media". Indeed they look fairly similar, although they are not as easy
to mistake for each other as many of the earlier examples.
Let's compare to the 5 closest pairs according to the normalized edit distance.
```@example pkgnames
sort!(df, :sqrt_normalized_DL);
df[1:5, :]
```
These are just the longest package names that are 1 edit away from each other.
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 0.1.1 | 0b2c7d9d5c16629f165d03b8769a07ffd44c6ad7 | docs | 965 | # Visualizations
The script [`scripts/anim/plotting.jl`](../../scripts/anim/plotting.jl) can be used
to generate pictures showing the transport from one string to another.
First, we can use a balanced optimal transport to visualize the difference between "hello" (spelled the usual way),
and "heIIo" (with uppercase eye's instead of lowercase ell's).
```julia
using VisualStringDistances
using UnbalancedOptimalTransport: KL, Balanced
include(joinpath(@__DIR__, "..", "plotting.jl"))
animate_words("hello", "heIIo"; D = Balanced(), normalize_density=true, save_path="hello_heIIo_balanced.gif")
```
![](../assets/hello_heIIo_balanced.gif)
We see that mass has to move from all the letters in order to create part of the I's. In contrast, let us
try an unbalanced method that instead allows creation or destruction of mass with a penalty.
```julia
animate_words("hello", "heIIo"; D = KL(1.0), save_path="hello_heIIo.gif")
```
![](../assets/hello_heIIo.gif)
| VisualStringDistances | https://github.com/ericphanson/VisualStringDistances.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 6631 | # This file is a part of Julia. License is MIT: https://julialang.org/license
module Grisu
export print_shortest
export DIGITS, DIGITSs, grisu
const SHORTEST = 1
const FIXED = 2
const PRECISION = 3
include("float.jl")
include("fastshortest.jl")
include("fastprecision.jl")
include("fastfixed.jl")
include("bignums.jl")
include("bignum.jl")
const DIGITS = Vector{UInt8}(undef, 309+17)
const BIGNUMS = [Bignums.Bignum(),Bignums.Bignum(),Bignums.Bignum(),Bignums.Bignum()]
# NOTE: DIGITS[s] is deprecated; you should use getbuf() instead.
const DIGITSs = [DIGITS]
const BIGNUMSs = [BIGNUMS]
function __init__()
Threads.resize_nthreads!(DIGITSs)
Threads.resize_nthreads!(BIGNUMSs)
end
function getbuf()
tls = task_local_storage()
d = get(tls, :DIGITS, nothing)
if d === nothing
d = Vector{UInt8}(undef, 309+17)
tls[:DIGITS] = d
end
return d::Vector{UInt8}
end
"""
(len, point, neg) = Grisu.grisu(v::AbstractFloat, mode, requested_digits, [buffer], [bignums])
Convert the number `v` to decimal using the Grisu algorithm.
`mode` can be one of:
- `Grisu.SHORTEST`: convert to the shortest decimal representation which can be "round-tripped" back to `v`.
- `Grisu.FIXED`: round to `requested_digits` digits.
- `Grisu.PRECISION`: round to `requested_digits` significant digits.
The characters are written as bytes to `buffer`, with a terminating NUL byte, and `bignums` are used internally as part of the correction step. You can call `Grisu.getbuf()` to obtain a suitable task-local buffer.
The returned tuple contains:
- `len`: the number of digits written to `buffer` (excluding NUL)
- `point`: the location of the radix point relative to the start of the array (e.g. if
`point == 3`, then the radix point should be inserted between the 3rd and 4th
digit). Note that this can be negative (for very small values), or greater than `len`
(for very large values).
- `neg`: the signbit of `v` (see [`signbit`](@ref)).
"""
function grisu(v::AbstractFloat,mode,requested_digits,buffer=DIGITSs[Threads.threadid()],bignums=BIGNUMSs[Threads.threadid()])
if signbit(v)
neg = true
v = -v
else
neg = false
end
if mode == PRECISION && requested_digits == 0
buffer[1] = 0x00
len = 0
return 0, 0, neg
end
if v == 0.0
buffer[1] = 0x30
buffer[2] = 0x00
len = point = 1
return len, point, neg
end
if mode == SHORTEST
status,len,point = fastshortest(v,buffer)
elseif mode == FIXED
status,len,point = fastfixedtoa(v,0,requested_digits,buffer)
elseif mode == PRECISION
status,len,point = fastprecision(v,requested_digits,buffer)
end
status && return len-1, point, neg
status, len, point = bignumdtoa(v,mode,requested_digits,buffer,bignums)
return len-1, point, neg
end
nanstr(x::AbstractFloat) = "NaN"
nanstr(x::Float32) = "NaN32"
nanstr(x::Float16) = "NaN16"
infstr(x::AbstractFloat) = "Inf"
infstr(x::Float32) = "Inf32"
infstr(x::Float16) = "Inf16"
function _show(io::IO, x::AbstractFloat, mode, n::Int, typed, compact)
isnan(x) && return print(io, typed ? nanstr(x) : "NaN")
if isinf(x)
signbit(x) && print(io,'-')
print(io, typed ? infstr(x) : "Inf")
return
end
typed && isa(x,Float16) && print(io, "Float16(")
buffer = getbuf()
len, pt, neg = grisu(x,mode,n,buffer)
pdigits = pointer(buffer)
if mode == PRECISION
while len > 1 && buffer[len] == 0x30
len -= 1
end
end
neg && print(io,'-')
exp_form = pt <= -4 || pt > 6
exp_form = exp_form || (pt >= len && abs(mod(x + 0.05, 10^(pt - len)) - 0.05) > 0.05) # see issue #6608
if exp_form # .00001 to 100000.
# => #.#######e###
# assumes ASCII/UTF8 encoding of digits is okay for out:
unsafe_write(io, pdigits, 1)
print(io, '.')
if len > 1
unsafe_write(io, pdigits+1, len-1)
else
print(io, '0')
end
print(io, (typed && isa(x,Float32)) ? 'f' : 'e')
print(io, string(pt - 1))
typed && isa(x,Float16) && print(io, ")")
return
elseif pt <= 0
# => 0.00########
print(io, "0.")
while pt < 0
print(io, '0')
pt += 1
end
unsafe_write(io, pdigits, len)
elseif pt >= len
# => ########00.0
unsafe_write(io, pdigits, len)
while pt > len
print(io, '0')
len += 1
end
print(io, ".0")
else # => ####.####
unsafe_write(io, pdigits, pt)
print(io, '.')
unsafe_write(io, pdigits+pt, len-pt)
end
typed && !compact && isa(x,Float32) && print(io, "f0")
typed && isa(x,Float16) && print(io, ")")
nothing
end
# normal:
# 0 < pt < len ####.#### len+1
# pt <= 0 0.000######## len-pt+1
# len <= pt (dot) ########000. pt+1
# len <= pt (no dot) ########000 pt
# exponential:
# pt <= 0 ########e-### len+k+2
# 0 < pt ########e### len+k+1
function _print_shortest(io::IO, x::AbstractFloat, dot::Bool, mode, n::Int)
isnan(x) && return print(io, "NaN")
x < 0 && print(io,'-')
isinf(x) && return print(io, "Inf")
buffer = getbuf()
len, pt, neg = grisu(x,mode,n,buffer)
pdigits = pointer(buffer)
e = pt-len
k = -9<=e<=9 ? 1 : 2
if -pt > k+1 || e+dot > k+1
# => ########e###
unsafe_write(io, pdigits+0, len)
print(io, 'e')
print(io, string(e))
return
elseif pt <= 0
# => 0.000########
print(io, "0.")
while pt < 0
print(io, '0')
pt += 1
end
unsafe_write(io, pdigits+0, len)
elseif e >= dot
# => ########000.
unsafe_write(io, pdigits+0, len)
while e > 0
print(io, '0')
e -= 1
end
if dot
print(io, '.')
end
else # => ####.####
unsafe_write(io, pdigits+0, pt)
print(io, '.')
unsafe_write(io, pdigits+pt, len-pt)
end
nothing
end
"""
print_shortest(io::IO, x)
Print the shortest possible representation, with the minimum number of consecutive non-zero
digits, of number `x`, ensuring that it would parse to the exact same number.
"""
print_shortest(io::IO, x::AbstractFloat, dot::Bool) = _print_shortest(io, x, dot, SHORTEST, 0)
print_shortest(io::IO, x::Union{AbstractFloat,Integer}) = print_shortest(io, float(x), false)
end # module
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 9170 | # This file is a part of Julia, but is derived from
# https://github.com/google/double-conversion which has the following license
#
# Copyright 2006-2014, the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function normalizedexponent(significand, exponent::Int32)
significand = UInt64(significand)
while (significand & HiddenBit(Float64)) == 0
significand <<= UInt64(1)
exponent -= Int32(1)
end
return exponent
end
function bignumdtoa(v,mode,requested_digits::Int,buffer,bignums)
significand = _significand(v)
exponent = _exponent(v)
lower_boundary_is_closer = lowerboundaryiscloser(v)
need_boundary_deltas = mode == SHORTEST
is_even = (significand & 1) == 0
normalized_exponent = normalizedexponent(significand, exponent)
estimated_power = estimatepower(Int(normalized_exponent))
if mode == FIXED && -estimated_power - 1 > requested_digits
buffer[1] = 0
len = 1
decimal_point = -requested_digits
return true, len, decimal_point
end
num, den, minus, plus = bignums[1], bignums[2], bignums[3], bignums[4]
initialscaledstartvalues!(significand,exponent,lower_boundary_is_closer,
estimated_power,need_boundary_deltas,
num,den,minus,plus)
decimal_point = fixupmultiply10!(estimated_power,is_even,num,den,minus,plus)
if mode == SHORTEST
len = generateshortestdigits!(num,den,minus,plus,is_even,buffer)
elseif mode == FIXED
len, decimal_point = bignumtofixed!(requested_digits,num,den,buffer,decimal_point)
elseif mode == PRECISION
len, decimal_point = generatecounteddigits!(requested_digits,num,den,buffer,decimal_point)
end
buffer[len] = 0
return true, len, decimal_point
end
function generateshortestdigits!(num,den,minus,plus,is_even,buffer)
minus == plus && (plus = minus)
len = 1
while true
digit = Bignums.dividemodulointbignum!(num,den)
buffer[len] = 0x30 + (digit % UInt8)
len += 1
in_delta_room_minus = is_even ?
Bignums.lessequal(num,minus) : Bignums.less(num,minus)
in_delta_room_plus = is_even ?
Bignums.pluscompare(num,plus,den) >= 0 : Bignums.pluscompare(num,plus,den) > 0
if !in_delta_room_minus && !in_delta_room_plus
Bignums.times10!(num)
Bignums.times10!(minus)
minus != plus && Bignums.times10!(plus)
elseif in_delta_room_minus && in_delta_room_plus
compare = Bignums.pluscompare(num,num,den)
if compare < 0
elseif compare > 0
buffer[len - 1] += 1
else
if (buffer[len - 1] - 0x30) % 2 == 0
else
buffer[len - 1] += 1
end
end
return len
elseif in_delta_room_minus
return len
else
buffer[len - 1] += 1
return len
end
end
end
function generatecounteddigits!(count,num,den,buffer,decimal_point)
for i = 1:(count-1)
digit = Bignums.dividemodulointbignum!(num,den)
buffer[i] = 0x30 + (digit % UInt8)
Bignums.times10!(num)
end
digit = Bignums.dividemodulointbignum!(num,den)
if Bignums.pluscompare(num,num,den) >= 0
digit += 1
end
buffer[count] = 0x30 + (digit % UInt8)
for i = count:-1:2
buffer[i] != 0x30 + 10 && break
buffer[i] = 0x30
buffer[i - 1] += 1
end
if buffer[1] == 0x30 + 10
buffer[1] = 0x31
decimal_point += 1
end
len = count+1
return len, decimal_point
end
function bignumtofixed!(requested_digits,num,den,buffer,decimal_point)
if -decimal_point > requested_digits
decimal_point = -requested_digits
len = 1
return len, decimal_point
elseif -decimal_point == requested_digits
Bignums.times10!(den)
if Bignums.pluscompare(num,num,den) >= 0
buffer[1] = 0x31
len = 2
decimal_point += 1
else
len = 1
end
return len, decimal_point
else
needed_digits = decimal_point + requested_digits
len, decimal_point = generatecounteddigits!(
needed_digits,num,den,buffer,decimal_point)
end
return len, decimal_point
end
const k1Log10 = 0.30102999566398114
const kSignificandSize = SignificandSize(Float64)
estimatepower(exponent::Int) = ceil(Int,(exponent + kSignificandSize - 1) * k1Log10 - 1e-10)
function init3!(
significand,exponent,estimated_power,need_boundary_deltas,
num,den,minus,plus)
Bignums.assignuint64!(num,UInt64(significand))
Bignums.shiftleft!(num,exponent)
Bignums.assignpoweruint16!(den,UInt16(10),estimated_power)
if need_boundary_deltas
Bignums.shiftleft!(den,1)
Bignums.shiftleft!(num,1)
Bignums.assignuint16!(plus,UInt16(1))
Bignums.shiftleft!(plus,exponent)
Bignums.assignuint16!(minus,UInt16(1))
Bignums.shiftleft!(minus,exponent)
else
Bignums.zero!(plus)
Bignums.zero!(minus)
end
return
end
function init1!(
significand,exponent,estimated_power,need_boundary_deltas,
num,den,minus,plus)
Bignums.assignuint64!(num,UInt64(significand))
Bignums.assignpoweruint16!(den,UInt16(10),estimated_power)
Bignums.shiftleft!(den,-exponent)
if need_boundary_deltas
Bignums.shiftleft!(den,1)
Bignums.shiftleft!(num,1)
Bignums.assignuint16!(plus,UInt16(1))
Bignums.assignuint16!(minus,UInt16(1))
else
Bignums.zero!(plus)
Bignums.zero!(minus)
end
return
end
function init2!(
significand,exponent,estimated_power,need_boundary_deltas,
num,den,minus,plus)
power_ten = num
Bignums.assignpoweruint16!(power_ten,UInt16(10),-estimated_power)
if need_boundary_deltas
Bignums.assignbignum!(plus,power_ten)
Bignums.assignbignum!(minus,power_ten)
else
Bignums.zero!(plus)
Bignums.zero!(minus)
end
Bignums.multiplybyuint64!(num,UInt64(significand))
Bignums.assignuint16!(den,UInt16(1))
Bignums.shiftleft!(den,-exponent)
if need_boundary_deltas
Bignums.shiftleft!(num,1)
Bignums.shiftleft!(den,1)
end
return
end
function initialscaledstartvalues!(significand,
exponent,lower_boundary_is_closer,estimated_power,
need_boundary_deltas,num,den,minus,plus)
if exponent >= 0
init3!(significand, exponent, estimated_power, need_boundary_deltas,num,den,minus,plus)
elseif estimated_power >= 0
init1!(significand, exponent, estimated_power, need_boundary_deltas,num,den,minus,plus)
else
init2!(significand, exponent, estimated_power, need_boundary_deltas,num,den,minus,plus)
end
if need_boundary_deltas && lower_boundary_is_closer
Bignums.shiftleft!(den,1)
Bignums.shiftleft!(num,1)
Bignums.shiftleft!(plus,1)
end
return
end
function fixupmultiply10!(estimated_power,is_even,num,den,minus,plus)
in_range = is_even ? Bignums.pluscompare(num,plus,den) >= 0 :
Bignums.pluscompare(num,plus,den) > 0
if in_range
decimal_point = estimated_power + 1
else
decimal_point = estimated_power
Bignums.times10!(num)
if minus == plus
Bignums.times10!(minus)
Bignums.assignbignum!(plus,minus)
else
Bignums.times10!(minus)
Bignums.times10!(plus)
end
end
return decimal_point
end
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 15495 | # This file is a part of Julia, but is derived from
# https://github.com/google/double-conversion which has the following license
#
# Copyright 2006-2014, the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
module Bignums
import Base: ==, <
export Bignum
const kMaxSignificantBits = 3584
const Chunk = UInt32
const DoubleChunk = UInt64
const kChunkSize = sizeof(Chunk) * 8
const kDoubleChunkSize = sizeof(DoubleChunk) * 8
# With bigit size of 28 we loose some bits, but a double still fits easily
# into two chunks, and more importantly we can use the Comba multiplication.
const kBigitSize = 28
const kBigitMask = Chunk((1 << kBigitSize) - 1)
# Every instance allocates kBigitLength chunks on the stack. Bignums cannot
# grow. There are no checks if the stack-allocated space is sufficient.
const kBigitCapacity = div(kMaxSignificantBits, kBigitSize)
mutable struct Bignum
bigits::Vector{UInt32}
used_digits::Int32
exponent::Int32
function Bignum()
bigits = Vector{UInt32}(undef, kBigitCapacity)
@inbounds for i = 1:kBigitCapacity
bigits[i] = 0
end
new(bigits,0,0)
end
end
==(a::Bignum,b::Bignum) = compare(a,b) == 0
<(a::Bignum,b::Bignum) = compare(a,b) < 0
times10!(x::Bignum) = multiplybyuint32!(x,UInt32(10))
plusequal(a,b,c) = pluscompare(a,b,c) == 0
pluslessequal(a,b,c) = pluscompare(a,b,c) <= 0
plusless(a,b,c) = pluscompare(a,b,c) < 0
lessequal(a::Bignum,b::Bignum) = compare(a,b) <= 0
less(a::Bignum,b::Bignum) = compare(a,b) < 0
bigitlength(x::Bignum) = x.used_digits + x.exponent
bitsize(value) = 8 * sizeof(value)
function zero!(x::Bignum)
for i = 1:x.used_digits
@inbounds x.bigits[i] = 0
end
x.used_digits = 0
x.exponent = 0
return
end
function clamp!(x::Bignum)
@inbounds while (x.used_digits > 0 && x.bigits[x.used_digits] == 0)
x.used_digits -= 1
end
x.used_digits == 0 && (x.exponent = 0)
return
end
isclamped(x::Bignum) = x.used_digits == 0 || x.bigits[x.used_digits] != 0
function align!(x::Bignum,other::Bignum)
@inbounds if x.exponent > other.exponent
zero_digits = x.exponent - other.exponent
for i = x.used_digits:-1:1
x.bigits[i + zero_digits] = x.bigits[i]
end
for i = 1:zero_digits
x.bigits[i] = 0
end
x.used_digits += zero_digits
x.exponent -= zero_digits
end
return
end
function bigitshiftleft!(x::Bignum,shift_amount)
carry::UInt32 = 0
@inbounds begin
for i = 1:x.used_digits
new_carry::Chunk = x.bigits[i] >> (kBigitSize - shift_amount)
x.bigits[i] = ((x.bigits[i] << shift_amount) + carry) & kBigitMask
carry = new_carry
end
if carry != 0
x.bigits[x.used_digits+1] = carry
x.used_digits += 1
end
end
return
end
function subtracttimes!(x::Bignum,other::Bignum,factor)
if factor < 3
for i = 1:factor
subtractbignum!(x,other)
end
return
end
borrow::Chunk = 0
exponent_diff = other.exponent - x.exponent
@inbounds begin
for i = 1:other.used_digits
product::DoubleChunk = DoubleChunk(factor) * other.bigits[i]
remove::DoubleChunk = borrow + product
difference::Chunk = (x.bigits[i+exponent_diff] - (remove & kBigitMask)) % Chunk
x.bigits[i+exponent_diff] = difference & kBigitMask
borrow = ((difference >> (kChunkSize - 1)) + (remove >> kBigitSize)) % Chunk
end
for i = (other.used_digits + exponent_diff + 1):x.used_digits
borrow == 0 && return
difference::Chunk = x.bigits[i] - borrow
x.bigits[i] = difference & kBigitMask
borrow = difference >> (kChunkSize - 1)
end
end
clamp!(x)
end
function assignuint16!(x::Bignum,value::UInt16)
zero!(x)
value == 0 && return
x.bigits[1] = value
x.used_digits = 1
return
end
const kUInt64Size = 64
function assignuint64!(x::Bignum,value::UInt64)
zero!(x)
value == 0 && return
needed_bigits = div(kUInt64Size,kBigitSize) + 1
@inbounds for i = 1:needed_bigits
x.bigits[i] = value & kBigitMask
value >>= kBigitSize
end
x.used_digits = needed_bigits
clamp!(x)
end
function assignbignum!(x::Bignum,other::Bignum)
x.exponent = other.exponent
@inbounds begin
for i = 1:other.used_digits
x.bigits[i] = other.bigits[i]
end
for i = (other.used_digits+1):x.used_digits
x.bigits[i] = 0
end
end
x.used_digits = other.used_digits
return
end
function adduint64!(x::Bignum,operand::UInt64)
operand == 0 && return
other = Bignum()
assignuint64!(other,operand)
addbignum!(x,other)
end
function addbignum!(x::Bignum,other::Bignum)
align!(x,other)
carry::Chunk = 0
bigit_pos = other.exponent - x.exponent
@inbounds for i = 1:other.used_digits
sum::Chunk = x.bigits[bigit_pos+1] + other.bigits[i] + carry
x.bigits[bigit_pos+1] = sum & kBigitMask
carry = sum >> kBigitSize
bigit_pos += 1
end
@inbounds while carry != 0
sum = x.bigits[bigit_pos+1] + carry
x.bigits[bigit_pos+1] = sum & kBigitMask
carry = sum >> kBigitSize
bigit_pos += 1
end
x.used_digits = max(bigit_pos,x.used_digits)
return
end
function subtractbignum!(x::Bignum,other::Bignum)
align!(x,other)
offset = other.exponent - x.exponent
borrow = Chunk(0)
@inbounds begin
for i = 1:other.used_digits
difference = x.bigits[i+offset] - other.bigits[i] - borrow
x.bigits[i+offset] = difference & kBigitMask
borrow = difference >> (kChunkSize - 1)
end
i = other.used_digits+1
while borrow != 0
difference = x.bigits[i+offset] - borrow
x.bigits[i+offset] = difference & kBigitMask
borrow = difference >> (kChunkSize - 1)
i += 1
end
end
clamp!(x)
end
function shiftleft!(x::Bignum,shift_amount)
x.used_digits == 0 && return
x.exponent += div(shift_amount,kBigitSize)
local_shift = shift_amount % kBigitSize
bigitshiftleft!(x,local_shift)
end
function multiplybyuint32!(x::Bignum,factor::UInt32)
factor == 1 && return
if factor == 0
zero!(x)
return
end
x.used_digits == 0 && return
carry::DoubleChunk = 0
@inbounds begin
for i = 1:x.used_digits
product::DoubleChunk = (factor % DoubleChunk) * x.bigits[i] + carry
x.bigits[i] = (product & kBigitMask) % Chunk
carry = product >> kBigitSize
end
while carry != 0
x.bigits[x.used_digits+1] = carry & kBigitMask
x.used_digits += 1
carry >>= kBigitSize
end
end
return
end
function multiplybyuint64!(x::Bignum,factor::UInt64)
factor == 1 && return
if factor == 0
zero!(x)
return
end
carry::UInt64 = 0
low::UInt64 = factor & 0xFFFFFFFF
high::UInt64 = factor >> 32
@inbounds begin
for i = 1:x.used_digits
product_low::UInt64 = low * x.bigits[i]
product_high::UInt64 = high * x.bigits[i]
tmp::UInt64 = (carry & kBigitMask) + product_low
x.bigits[i] = tmp & kBigitMask
carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
(product_high << (32 - kBigitSize))
end
while carry != 0
x.bigits[x.used_digits+1] = carry & kBigitMask
x.used_digits += 1
carry >>= kBigitSize
end
end
return
end
const kFive27 = UInt64(0x6765c793fa10079d)
const kFive1 = UInt16(5)
const kFive2 = UInt16(kFive1 * 5)
const kFive3 = UInt16(kFive2 * 5)
const kFive4 = UInt16(kFive3 * 5)
const kFive5 = UInt16(kFive4 * 5)
const kFive6 = UInt16(kFive5 * 5)
const kFive7 = UInt32(kFive6 * 5)
const kFive8 = UInt32(kFive7 * 5)
const kFive9 = UInt32(kFive8 * 5)
const kFive10 = UInt32(kFive9 * 5)
const kFive11 = UInt32(kFive10 * 5)
const kFive12 = UInt32(kFive11 * 5)
const kFive13 = UInt32(kFive12 * 5)
const kFive1_to_12 = UInt32[kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
kFive7, kFive8, kFive9, kFive10, kFive11, kFive12]
function multiplybypoweroften!(x::Bignum,exponent)
exponent == 0 && return
x.used_digits == 0 && return
remaining_exponent = exponent
while remaining_exponent >= 27
multiplybyuint64!(x,kFive27)
remaining_exponent -= 27
end
while remaining_exponent >= 13
multiplybyuint32!(x,kFive13)
remaining_exponent -= 13
end
remaining_exponent > 0 && multiplybyuint32!(x,
kFive1_to_12[remaining_exponent])
shiftleft!(x,exponent)
end
function square!(x::Bignum)
product_length = 2 * x.used_digits
(1 << (2 * (kChunkSize - kBigitSize))) <= x.used_digits && error("unimplemented")
accumulator::DoubleChunk = 0
copy_offset = x.used_digits
@inbounds begin
for i = 1:x.used_digits
x.bigits[copy_offset + i] = x.bigits[i]
end
for i = 1:x.used_digits
bigit_index1 = i-1
bigit_index2 = 0
while bigit_index1 >= 0
chunk1::Chunk = x.bigits[copy_offset + bigit_index1 + 1]
chunk2::Chunk = x.bigits[copy_offset + bigit_index2 + 1]
accumulator += (chunk1 % DoubleChunk) * chunk2
bigit_index1 -= 1
bigit_index2 += 1
end
x.bigits[i] = (accumulator % Chunk) & kBigitMask
accumulator >>= kBigitSize
end
for i = x.used_digits+1:product_length
bigit_index1 = x.used_digits - 1
bigit_index2 = i - bigit_index1 - 1
while bigit_index2 < x.used_digits
chunk1::Chunk = x.bigits[copy_offset + bigit_index1 + 1]
chunk2::Chunk = x.bigits[copy_offset + bigit_index2 + 1]
accumulator += (chunk1 % DoubleChunk) * chunk2
bigit_index1 -= 1
bigit_index2 += 1
end
x.bigits[i] = (accumulator % Chunk) & kBigitMask
accumulator >>= kBigitSize
end
end
x.used_digits = product_length
x.exponent *= 2
clamp!(x)
end
function assignpoweruint16!(x::Bignum,base::UInt16,power_exponent::Int)
if power_exponent == 0
assignuint16!(x,UInt16(1))
return
end
zero!(x)
shifts::Int = 0
while base & UInt16(1) == UInt16(0)
base >>= UInt16(1)
shifts += 1
end
bit_size::Int = 0
tmp_base::Int= base
while tmp_base != 0
tmp_base >>= 1
bit_size += 1
end
final_size = bit_size * power_exponent
mask::Int = 1
while power_exponent >= mask
mask <<= 1
end
mask >>= 2
this_value::UInt64 = base
delayed_multiplication = false
max_32bits::UInt64 = 0xFFFFFFFF
while mask != 0 && this_value <= max_32bits
this_value *= this_value
if (power_exponent & mask) != 0
base_bits_mask::UInt64 = ~(UInt64(1) << (64 - bit_size) - 1)
high_bits_zero = (this_value & base_bits_mask) == 0
if high_bits_zero
this_value *= base
else
delayed_multiplication = true
end
end
mask >>= 1
end
assignuint64!(x,this_value)
delayed_multiplication && multiplybyuint32!(x,UInt32(base))
while mask != 0
square!(x)
(power_exponent & mask) != 0 && multiplybyuint32!(x,UInt32(base))
mask >>= 1
end
shiftleft!(x,shifts * power_exponent)
end
function dividemodulointbignum!(x::Bignum,other::Bignum)
bigitlength(x) < bigitlength(other) && return UInt16(0)
align!(x,other)
result::UInt16 = 0
@inbounds begin
while bigitlength(x) > bigitlength(other)
result += x.bigits[x.used_digits] % UInt16
subtracttimes!(x,other,x.bigits[x.used_digits])
end
this_bigit::Chunk = x.bigits[x.used_digits]
other_bigit::Chunk = other.bigits[other.used_digits]
if other.used_digits == 1
quotient = reinterpret(Int32,div(this_bigit,other_bigit))
x.bigits[x.used_digits] = this_bigit - other_bigit * reinterpret(UInt32,quotient)
result += quotient % UInt16
clamp!(x)
return result
end
end
division_estimate = reinterpret(Int32,div(this_bigit,other_bigit+Chunk(1)))
result += division_estimate % UInt16
subtracttimes!(x,other,division_estimate)
other_bigit * (division_estimate+1) > this_bigit && return result
while lessequal(other, x)
subtractbignum!(x,other)
result += UInt16(1)
end
return result
end
function pluscompare(a::Bignum,b::Bignum,c::Bignum)
bigitlength(a) < bigitlength(b) && return pluscompare(b,a,c)
bigitlength(a) + 1 < bigitlength(c) && return -1
bigitlength(a) > bigitlength(c) && return 1
a.exponent >= bigitlength(b) && bigitlength(a) < bigitlength(c) && return -1
borrow::Chunk = 0
min_exponent = min(a.exponent,b.exponent,c.exponent)
for i = (bigitlength(c)-1):-1:min_exponent
chunk_a::Chunk = bigitat(a,i)
chunk_b::Chunk = bigitat(b,i)
chunk_c::Chunk = bigitat(c,i)
sum::Chunk = chunk_a + chunk_b
if sum > chunk_c + borrow
return 1
else
borrow = chunk_c + borrow - sum
borrow > 1 && return -1
borrow <<= kBigitSize
end
end
borrow == 0 && return 0
return -1
end
function compare(a::Bignum,b::Bignum)
bigit_length_a = bigitlength(a)
bigit_length_b = bigitlength(b)
bigit_length_a < bigit_length_b && return -1
bigit_length_a > bigit_length_b && return 1
for i = (bigit_length_a-1):-1:min(a.exponent,b.exponent)
bigit_a::Chunk = bigitat(a,i)
bigit_b::Chunk = bigitat(b,i)
bigit_a < bigit_b && return -1
bigit_a > bigit_b && return 1
end
return 0
end
function bigitat(x::Bignum,index)
index >= bigitlength(x) && return Chunk(0)
index < x.exponent && return Chunk(0)
@inbounds ret = x.bigits[index - x.exponent+1]::Chunk
return ret
end
end # module
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 8347 | # This file is a part of Julia, but is derived from
# https://github.com/google/double-conversion which has the following license
#
# Copyright 2006-2014, the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
const kDoubleSignificandSize = 53
function filldigits32fixedlength(n1,requested_len,buffer,len)
for i = (requested_len-1):-1:0
buffer[len+i] = 0x30 + n1 % 10
n1 = div(n1,10)
end
return len + requested_len
end
function filldigits32(n,buffer,len)
n_len = 0
while n != 0
digit = n % 10
n = div(n,10)
buffer[len+n_len] = 0x30 + digit
n_len += 1
end
i,j = len, len + n_len - 1
while i < j
buffer[i], buffer[j] = buffer[j], buffer[i]
i += 1
j -= 1
end
return len + n_len
end
function filldigits64fixedlength(n2,buffer,len)
kTen7 = 10000000
part2 = n2 % kTen7
n2 = div(n2,kTen7)
part0, part1 = divrem(n2,kTen7)
len = filldigits32fixedlength(part0, 3, buffer, len)
len = filldigits32fixedlength(part1, 7, buffer, len)
len = filldigits32fixedlength(part2, 7, buffer, len)
return len
end
function filldigits64(n3,buffer,len)
kTen7 = 10000000
part2 = n3 % kTen7
n3 = div(n3,kTen7)
part0, part1 = divrem(n3,kTen7)
if part0 != 0
len = filldigits32(part0, buffer, len)
len = filldigits32fixedlength(part1, 7, buffer, len)
len = filldigits32fixedlength(part2, 7, buffer, len)
elseif part1 != 0
len = filldigits32(part1, buffer, len)
len = filldigits32fixedlength(part2, 7, buffer, len)
else
len = filldigits32(part2, buffer, len)
end
return len
end
function roundup(buffer, len, decimal_point)
if len == 1
buffer[1] = 0x31
decimal_point = 1
len = 2
return len, decimal_point
end
buffer[len - 1] += 1
for i = (len-1):-1:2
buffer[i] != 0x30 + 10 && return len, decimal_point
buffer[i] = 0x30
buffer[i - 1] += 1
end
if buffer[1] == 0x30 + 10
buffer[1] = 0x31
decimal_point += 1
end
return len, decimal_point
end
function fillfractionals(fractionals, exponent,
fractional_count, buffer,
len, decimal_point)
if -exponent <= 64
point = -exponent
for i = 1:fractional_count
fractionals == 0 && break
fractionals *= 5
point -= 1
digit = fractionals >> point
buffer[len] = 0x30 + digit
len += 1
fractionals -= UInt64(digit) << point
end
if ((fractionals >> (point - 1)) & 1) == 1
len, decimal_point = roundup(buffer, len, decimal_point)
end
else
fract128 = UInt128(fractionals) << 64
fract128 = shift(fract128,-exponent - 64)
point = 128
for i = 1:fractional_count
fract128 == 0 && break
fract128 *= 5
point -= 1
digit, fract128 = divrem2(fract128,point)
buffer[len] = 0x30 + digit
len += 1
end
if bitat(fract128,point - 1) == 1
len, decimal_point = roundup(buffer, len, decimal_point)
end
end
return len, decimal_point
end
low(x) = UInt64(x&0xffffffffffffffff)
high(x) = UInt64(x >>> 64)
bitat(x::UInt128,y) = y >= 64 ? (Int32(high(x) >> (y-64)) & 1) : (Int32(low(x) >> y) & 1)
function divrem2(x,power)
h = high(x)
l = low(x)
if power >= 64
result = Int32(h >> (power - 64))
h -= UInt64(result) << (power - 64)
return result, (UInt128(h) << 64) + l
else
part_low::UInt64 = l >> power
part_high::UInt64 = h << (64 - power)
result = Int32(part_low + part_high)
return result, UInt128(l - (part_low << power))
end
end
function shift(x::UInt128,amt)
if amt == 0
return x
elseif amt == -64
return x << 64
elseif amt == 64
return x >> 64
elseif amt <= 0
h = high(x); l = low(x)
h <<= -amt
h += l >> (64 + amt)
l <<= -amt
return (UInt128(h) << 64) + l
else
h = high(x); l = low(x)
l >>= amt
l += h << (64 - amt)
h >>= amt
return (UInt128(h) << 64) + l
end
end
function trimzeros(buffer, len, decimal_point)
while len > 1 && buffer[len - 1] == 0x30
len -= 1
end
first_non_zero::Int32 = 1
while first_non_zero < len && buffer[first_non_zero] == 0x30
first_non_zero += 1
end
if first_non_zero != 1
for i = first_non_zero:(len-1)
buffer[i - first_non_zero + 1] = buffer[i]
end
len -= first_non_zero-1
decimal_point -= first_non_zero-1
end
return len, decimal_point
end
function fastfixedtoa(v,mode,fractional_count,buffer)
v = Float64(v)
significand::UInt64 = _significand(v)
exponent = _exponent(v)
exponent > 20 && return false, 0, 0
fractional_count > 20 && return false, 0, 0
len = 1
if exponent + kDoubleSignificandSize > 64
kFive17 = divisor = Int64(5)^17
divisor_power = 17
dividend = significand
if exponent > divisor_power
dividend <<= exponent - divisor_power
quotient = div(dividend,divisor)
remainder = (dividend % divisor) << divisor_power
else
divisor <<= divisor_power - exponent
quotient = div(dividend,divisor)
remainder = (dividend % divisor) << exponent
end
len = filldigits32(quotient, buffer, len)
len = filldigits64fixedlength(remainder, buffer, len)
decimal_point = len-1
elseif exponent >= 0
significand <<= exponent
len = filldigits64(significand, buffer, len)
decimal_point = len-1
elseif exponent > -kDoubleSignificandSize
integrals = significand >> -exponent
fractionals = significand - (integrals << -exponent)
if integrals > 0xFFFFFFFF
len = filldigits64(integrals,buffer,len)
else
len = filldigits32(integrals%UInt32,buffer,len)
end
decimal_point = len-1
len, decimal_point = fillfractionals(fractionals,exponent,fractional_count,
buffer,len, decimal_point)
elseif exponent < -128
len = 1
decimal_point = -fractional_count
else
decimal_point = 0
len, decimal_point = fillfractionals(significand,exponent,fractional_count,
buffer,len, decimal_point)
end
len, decimal_point = trimzeros(buffer,len,decimal_point)
buffer[len] = 0
if (len-1) == 0
decimal_point = -fractional_count
end
return true, len, decimal_point
end
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 3947 | # This file is a part of Julia, but is derived from
# https://github.com/google/double-conversion which has the following license
#
# Copyright 2006-2014, the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function roundweed(buffer,len,rest,tk,unit,kappa)
unit >= tk && return false, kappa
tk - unit <= unit && return false, kappa
tk - rest > rest && (tk - 2 * rest >= 2 * unit) && return true, kappa
if rest > unit && (tk - (rest - unit) <= (rest - unit))
buffer[len-1] += 1
for i = (len-1):-1:2
buffer[i] != 0x30 + 10 && break
buffer[i] = 0x30
buffer[i-1] += 1
end
if buffer[1] == 0x30 + 10
buffer[1] = 0x31
kappa += 1
end
return true, kappa
end
return false, kappa
end
function digitgen(w,buffer,requested_digits=1000)
unit::UInt64 = 1
one = Float(unit << -w.e, w.e)
integrals = w.s >> -one.e
fractionals = w.s & (one.s-1)
divisor, kappa = bigpowten(integrals, 64 + one.e)
len = 1
rest = 0
while kappa > 0
digit = div(integrals,divisor)
buffer[len] = 0x30 + digit
len += 1
requested_digits -= 1
integrals %= divisor
kappa -= 1
if requested_digits == 0
rest = (UInt64(integrals) << -one.e) + fractionals
r, kappa = roundweed(buffer, len, rest, UInt64(divisor) << -one.e,
unit,kappa)
return r, kappa, len
end
divisor = div(divisor,10)
end
while requested_digits > 0 && fractionals > unit
fractionals *= 10
unit *= 10
digit = fractionals >> -one.e
buffer[len] = 0x30 + digit
len += 1
requested_digits -= 1
fractionals &= one.s - 1
kappa -= 1
end
requested_digits != 0 && return false, kappa, len
r, kappa = roundweed(buffer,len,fractionals,one.s,
unit,kappa)
return r, kappa, len
end
function fastprecision(v, requested_digits, buffer = Vector{UInt8}(undef, 100))
f = normalize(Float64(v))
ten_mk_min_exp = kMinExp - (f.e + FloatSignificandSize)
ten_mk_max_exp = kMaxExp - (f.e + FloatSignificandSize)
cp = binexp_cache(ten_mk_min_exp,ten_mk_max_exp)
scaled_w = f * cp
r, kappa, len = digitgen(scaled_w,buffer,requested_digits)
decimal_exponent = -cp.de + kappa
return r, len, decimal_exponent+len-1
end
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 4631 | # This file is a part of Julia, but is derived from
# https://github.com/google/double-conversion which has the following license
#
# Copyright 2006-2014, the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
const kMinExp = -60
const kMaxExp = -32
function roundweed(buffer,len,rest,tk,unit,kappa,too_high::UInt64,unsafe_interval::UInt64)
small = too_high - unit
big = too_high + unit
while rest < small &&
unsafe_interval - rest >= tk &&
(rest + tk < small ||
small - rest >= rest + tk - small)
buffer[len-1] -= 1
rest += tk
end
if rest < big &&
unsafe_interval - rest >= tk &&
(rest + tk < big ||
big - rest > rest + tk - big)
return false, kappa
end
return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit), kappa
end
const SmallPowersOfTen = [
0, 1, 10, 100, 1000, 10000, 100000,
1000000, 10000000, 100000000, 1000000000]
function bigpowten(n,n_bits)
guess = ((n_bits + 1) * 1233) >> 12
guess += 1
i = SmallPowersOfTen[guess+1]
return n < i ? (SmallPowersOfTen[guess], guess-1) : (i,guess)
end
function digitgen(low,w,high,buffer)
unit::UInt64 = 1
one = Float(unit << -w.e, w.e)
too_high = Float(high.s+unit,high.e)
unsafe_interval = too_high - Float(low.s-unit,low.e)
integrals = too_high.s >> -one.e
fractionals = too_high.s & (one.s-1)
divisor, kappa = bigpowten(integrals, 64 + one.e)
len = 1
rest = UInt64(0)
while kappa > 0
digit = div(integrals,divisor)
buffer[len] = 0x30 + digit
len += 1
integrals %= divisor
kappa -= 1
rest = (UInt64(integrals) << -one.e) + fractionals
if rest < unsafe_interval.s
r, kappa = roundweed(buffer, len, rest, UInt64(divisor) << -one.e,
unit,kappa,(too_high - w).s,unsafe_interval.s)
return r, kappa, len
end
divisor = div(divisor,10)
end
while true
fractionals *= 10
unit *= 10
unsafe_interval = Float(unsafe_interval.s*10,unsafe_interval.e)
digit = fractionals >> -one.e
buffer[len] = 0x30 + digit
len += 1
fractionals &= one.s - 1
kappa -= 1
if fractionals < unsafe_interval.s
r, kappa = roundweed(buffer,len,fractionals,one.s,
unit,kappa,(too_high - w).s*unit,unsafe_interval.s)
return r, kappa, len
end
end
end
function fastshortest(v, buffer = Vector{UInt8}(undef, 17))
f = normalize(Float64(v))
bound_minus, bound_plus = normalizedbound(v)
ten_mk_min_exp = kMinExp - (f.e + FloatSignificandSize)
ten_mk_max_exp = kMaxExp - (f.e + FloatSignificandSize)
cp = binexp_cache(ten_mk_min_exp,ten_mk_max_exp)
scaled_w = f * cp
scaled_bound_minus = bound_minus * cp
scaled_bound_plus = bound_plus * cp
r, kappa, len = digitgen(scaled_bound_minus,scaled_w,
scaled_bound_plus,buffer)
decimal_exponent = -cp.de + kappa
return r, len, decimal_exponent+len-1
end
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 10212 | # This file is a part of Julia, but is derived from
# https://github.com/google/double-conversion which has the following license
#
# Copyright 2006-2014, the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import Base: -, *
struct Float
s::UInt64
e::Int32
de::Int32
end
Float() = Float(0,0,0)
Float(x,y) = Float(x,y,Int32(0))
Float(d::AbstractFloat) = Float(_significand(d), _exponent(d))
# Consts
const Float10MSBits = 0xFFC0000000000000 # used normalize(Float)
const FloatSignMask = 0x8000000000000000 # used in normalize(Float)
const FloatSignificandSize = Int32(64)
function normalize(v::Float)
f = v.s
e::Int32 = v.e
while (f & Float10MSBits) == 0
f <<= 10
e -= 10
end
while (f & FloatSignMask) == 0
f <<= 1
e -= 1
end
return Float(f,e)
end
function normalize(v::Float64)
s = _significand(v); e = _exponent(v)
while (s & HiddenBit(Float64)) == 0
s <<= UInt64(1)
e -= Int32(1)
end
s <<= UInt64(FloatSignificandSize - SignificandSize(Float64))
e -= Int32( FloatSignificandSize - SignificandSize(Float64))
return Float(s, e)
end
# Float128
#DenormalExponent(::Type{Float128}) = Int32(-ExponentBias(Float128) + 1)
#ExponentMask(::Type{Float128}) = 0x7fff0000000000000000000000000000
#PhysicalSignificandSize(::Type{Float128}) = Int32(112)
#SignificandSize(::Type{Float128}) = Int32(113)
#ExponentBias(::Type{Float128}) = Int32(0x00003fff + PhysicalSignificandSize(Float128))
#SignificandMask(::Type{Float128}) = 0x0000ffffffffffffffffffffffffffff
#HiddenBit(::Type{Float128}) = 0x00010000000000000000000000000000
#uint_t(d::Float128) = reinterpret(UInt128,d)
# Float64
DenormalExponent(::Type{Float64}) = Int32(-ExponentBias(Float64) + 1)
ExponentMask(::Type{Float64}) = 0x7FF0000000000000
PhysicalSignificandSize(::Type{Float64}) = Int32(52)
SignificandSize(::Type{Float64}) = Int32(53)
ExponentBias(::Type{Float64}) = Int32(0x3FF + PhysicalSignificandSize(Float64))
SignificandMask(::Type{Float64}) = 0x000FFFFFFFFFFFFF
HiddenBit(::Type{Float64}) = 0x0010000000000000
uint_t(d::Float64) = reinterpret(UInt64,d)
# Float32
DenormalExponent(::Type{Float32}) = Int32(-ExponentBias(Float32) + 1)
ExponentMask(::Type{Float32}) = 0x7F800000
PhysicalSignificandSize(::Type{Float32}) = Int32(23)
SignificandSize(::Type{Float32}) = Int32(24)
ExponentBias(::Type{Float32}) = Int32(0x7F + PhysicalSignificandSize(Float32))
SignificandMask(::Type{Float32}) = 0x007FFFFF
HiddenBit(::Type{Float32}) = 0x00800000
uint_t(d::Float32) = reinterpret(UInt32,d)
# Float16
DenormalExponent(::Type{Float16}) = Int32(-ExponentBias(Float16) + 1)
ExponentMask(::Type{Float16}) = 0x7c00
PhysicalSignificandSize(::Type{Float16}) = Int32(10)
SignificandSize(::Type{Float16}) = Int32(11)
ExponentBias(::Type{Float16}) = Int32(0x000f + PhysicalSignificandSize(Float16))
SignificandMask(::Type{Float16}) = 0x03ff
HiddenBit(::Type{Float16}) = 0x0400
uint_t(d::Float16) = reinterpret(UInt16,d)
function _exponent(d::T) where T<:AbstractFloat
isdenormal(d) && return DenormalExponent(T)
biased_e::Int32 = Int32((uint_t(d) & ExponentMask(T)) >> PhysicalSignificandSize(T))
return Int32(biased_e - ExponentBias(T))
end
function _significand(d::T) where T<:AbstractFloat
s = uint_t(d) & SignificandMask(T)
return !isdenormal(d) ? s + HiddenBit(T) : s
end
isdenormal(d::T) where {T<:AbstractFloat} = (uint_t(d) & ExponentMask(T)) == 0
function normalizedbound(f::AbstractFloat)
v = Float(_significand(f),_exponent(f))
m_plus = normalize(Float((v.s << 1) + 1, v.e - 1))
if lowerboundaryiscloser(f)
m_minus = Float((v.s << 2) - 1, v.e - 2)
else
m_minus = Float((v.s << 1) - 1, v.e - 1)
end
return Float(m_minus.s << (m_minus.e - m_plus.e), m_plus.e), m_plus
end
function lowerboundaryiscloser(f::T) where T<:AbstractFloat
physical_significand_is_zero = (uint_t(f) & SignificandMask(T)) == 0
return physical_significand_is_zero && (_exponent(f) != DenormalExponent(T))
end
(-)(a::Float,b::Float) = Float(a.s - b.s,a.e,a.de)
const FloatM32 = 0xFFFFFFFF
function (*)(this::Float,other::Float)
a::UInt64 = this.s >> 32
b::UInt64 = this.s & FloatM32
c::UInt64 = other.s >> 32
d::UInt64 = other.s & FloatM32
ac::UInt64 = a * c
bc::UInt64 = b * c
ad::UInt64 = a * d
bd::UInt64 = b * d
tmp::UInt64 = (bd >> 32) + (ad & FloatM32) + (bc & FloatM32)
# By adding 1U << 31 to tmp we round the final result.
# Halfway cases will be round up.
tmp += UInt64(1) << 31
result_f::UInt64 = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32)
return Float(result_f,this.e + other.e + 64,this.de)
end
const CachedPowers = Float[
Float(0xfa8fd5a0081c0288, -1220, -348),
Float(0xbaaee17fa23ebf76, -1193, -340),
Float(0x8b16fb203055ac76, -1166, -332),
Float(0xcf42894a5dce35ea, -1140, -324),
Float(0x9a6bb0aa55653b2d, -1113, -316),
Float(0xe61acf033d1a45df, -1087, -308),
Float(0xab70fe17c79ac6ca, -1060, -300),
Float(0xff77b1fcbebcdc4f, -1034, -292),
Float(0xbe5691ef416bd60c, -1007, -284),
Float(0x8dd01fad907ffc3c, -980, -276),
Float(0xd3515c2831559a83, -954, -268),
Float(0x9d71ac8fada6c9b5, -927, -260),
Float(0xea9c227723ee8bcb, -901, -252),
Float(0xaecc49914078536d, -874, -244),
Float(0x823c12795db6ce57, -847, -236),
Float(0xc21094364dfb5637, -821, -228),
Float(0x9096ea6f3848984f, -794, -220),
Float(0xd77485cb25823ac7, -768, -212),
Float(0xa086cfcd97bf97f4, -741, -204),
Float(0xef340a98172aace5, -715, -196),
Float(0xb23867fb2a35b28e, -688, -188),
Float(0x84c8d4dfd2c63f3b, -661, -180),
Float(0xc5dd44271ad3cdba, -635, -172),
Float(0x936b9fcebb25c996, -608, -164),
Float(0xdbac6c247d62a584, -582, -156),
Float(0xa3ab66580d5fdaf6, -555, -148),
Float(0xf3e2f893dec3f126, -529, -140),
Float(0xb5b5ada8aaff80b8, -502, -132),
Float(0x87625f056c7c4a8b, -475, -124),
Float(0xc9bcff6034c13053, -449, -116),
Float(0x964e858c91ba2655, -422, -108),
Float(0xdff9772470297ebd, -396, -100),
Float(0xa6dfbd9fb8e5b88f, -369, -92),
Float(0xf8a95fcf88747d94, -343, -84),
Float(0xb94470938fa89bcf, -316, -76),
Float(0x8a08f0f8bf0f156b, -289, -68),
Float(0xcdb02555653131b6, -263, -60),
Float(0x993fe2c6d07b7fac, -236, -52),
Float(0xe45c10c42a2b3b06, -210, -44),
Float(0xaa242499697392d3, -183, -36),
Float(0xfd87b5f28300ca0e, -157, -28),
Float(0xbce5086492111aeb, -130, -20),
Float(0x8cbccc096f5088cc, -103, -12),
Float(0xd1b71758e219652c, -77, -4),
Float(0x9c40000000000000, -50, 4),
Float(0xe8d4a51000000000, -24, 12),
Float(0xad78ebc5ac620000, 3, 20),
Float(0x813f3978f8940984, 30, 28),
Float(0xc097ce7bc90715b3, 56, 36),
Float(0x8f7e32ce7bea5c70, 83, 44),
Float(0xd5d238a4abe98068, 109, 52),
Float(0x9f4f2726179a2245, 136, 60),
Float(0xed63a231d4c4fb27, 162, 68),
Float(0xb0de65388cc8ada8, 189, 76),
Float(0x83c7088e1aab65db, 216, 84),
Float(0xc45d1df942711d9a, 242, 92),
Float(0x924d692ca61be758, 269, 100),
Float(0xda01ee641a708dea, 295, 108),
Float(0xa26da3999aef774a, 322, 116),
Float(0xf209787bb47d6b85, 348, 124),
Float(0xb454e4a179dd1877, 375, 132),
Float(0x865b86925b9bc5c2, 402, 140),
Float(0xc83553c5c8965d3d, 428, 148),
Float(0x952ab45cfa97a0b3, 455, 156),
Float(0xde469fbd99a05fe3, 481, 164),
Float(0xa59bc234db398c25, 508, 172),
Float(0xf6c69a72a3989f5c, 534, 180),
Float(0xb7dcbf5354e9bece, 561, 188),
Float(0x88fcf317f22241e2, 588, 196),
Float(0xcc20ce9bd35c78a5, 614, 204),
Float(0x98165af37b2153df, 641, 212),
Float(0xe2a0b5dc971f303a, 667, 220),
Float(0xa8d9d1535ce3b396, 694, 228),
Float(0xfb9b7cd9a4a7443c, 720, 236),
Float(0xbb764c4ca7a44410, 747, 244),
Float(0x8bab8eefb6409c1a, 774, 252),
Float(0xd01fef10a657842c, 800, 260),
Float(0x9b10a4e5e9913129, 827, 268),
Float(0xe7109bfba19c0c9d, 853, 276),
Float(0xac2820d9623bf429, 880, 284),
Float(0x80444b5e7aa7cf85, 907, 292),
Float(0xbf21e44003acdd2d, 933, 300),
Float(0x8e679c2f5e44ff8f, 960, 308),
Float(0xd433179d9c8cb841, 986, 316),
Float(0x9e19db92b4e31ba9, 1013, 324),
Float(0xeb96bf6ebadf77d9, 1039, 332),
Float(0xaf87023b9bf0ee6b, 1066, 340)]
const CachedPowersLength = length(CachedPowers)
const CachedPowersOffset = 348 # -1 * the first decimal_exponent.
const D_1_LOG2_10 = 0.30102999566398114 # 1 / lg(10)
# Difference between the decimal exponents in the table above.
const DecimalExponentDistance = 8
const MinDecimalExponent = -348
const MaxDecimalExponent = 340
function binexp_cache(min_exponent,max_exponent)
k = ceil(Integer,(min_exponent+63)*D_1_LOG2_10)
index = div(CachedPowersOffset+k-1,DecimalExponentDistance) + 1
cp = CachedPowers[index+1]
return cp
end
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | code | 54170 | using Test
using Grisu
function trimrep(buffer)
len = length(unsafe_string(pointer(buffer)))
ind = len
for i = len:-1:1
buffer[i] != 0x30 && break
ind -= 1
end
buffer[ind+1] = 0
return unsafe_string(pointer(buffer))
end
const bufsize = 500
buffer = Vector{UInt8}(undef, bufsize)
fill!(buffer,0)
bignums = [Grisu.Bignums.Bignum(),Grisu.Bignums.Bignum(),Grisu.Bignums.Bignum(),Grisu.Bignums.Bignum()]
# Start by checking the byte-order.
ordered = 0x0123456789ABCDEF
@test 3512700564088504e-318 == reinterpret(Float64,ordered)
min_double64 = 0x0000000000000001
@test 5e-324 == reinterpret(Float64,min_double64)
max_double64 = 0x7fefffffffffffff
@test 1.7976931348623157e308 == reinterpret(Float64,max_double64)
# Start by checking the byte-order.
ordered = 0x01234567
@test Float32(2.9988165487136453e-38) == reinterpret(Float32,ordered)
min_float32 = 0x00000001
@test Float32(1.4e-45) == reinterpret(Float32,min_float32)
max_float32 = 0x7f7fffff
@test Float32(3.4028234e38) == reinterpret(Float32,max_float32)
ordered = 0x0123456789ABCDEF
diy_fp = Grisu.Float(reinterpret(Float64,ordered))
@test UInt64(0x12) - UInt64(0x3FF) - 52 == diy_fp.e % UInt64
# The 52 mantissa bits, plus the implicit 1 in bit 52 as a UINT64.
@test 0x0013456789ABCDEF== diy_fp.s
min_double64 = 0x0000000000000001
diy_fp = Grisu.Float(reinterpret(Float64,min_double64))
@test -UInt64(0x3FF) - Int64(52) + Int64(1) == diy_fp.e % UInt64
# This is a denormal so no hidden bit.
@test 1 == diy_fp.s
max_double64 = 0x7fefffffffffffff
diy_fp = Grisu.Float(reinterpret(Float64,max_double64))
@test 0x7FE - 0x3FF - 52 == diy_fp.e % UInt64
@test 0x001fffffffffffff== diy_fp.s
ordered = 0x01234567
diy_fp = Grisu.Float(reinterpret(Float32,ordered))
@test UInt64(0x2) - UInt64(0x7F) - 23 == diy_fp.e % UInt64
# The 23 mantissa bits, plus the implicit 1 in bit 24 as a uint32_t.
@test 0xA34567 == UInt64(diy_fp.s)
min_float32 = 0x00000001
diy_fp = Grisu.Float(reinterpret(Float32,min_float32))
@test -UInt64(0x7F) - 23 + 1 == diy_fp.e % UInt64
# This is a denormal so no hidden bit.
@test 1 == UInt64(diy_fp.s)
max_float32 = 0x7f7fffff
diy_fp = Grisu.Float(reinterpret(Float32,max_float32))
@test 0xFE - 0x7F - 23 == diy_fp.e % UInt64
@test 0x00ffffff == UInt64(diy_fp.s)
ordered = 0x0123456789ABCDEF
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float64,ordered)))
@test UInt64(0x12) - UInt64(0x3FF) - 52 - 11 == diy_fp.e % UInt64
@test 0x0013456789ABCDEF<< 11 == diy_fp.s
min_double64 = 0x0000000000000001
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float64,min_double64)))
@test -UInt64(0x3FF) - 52 + 1 - 63 == diy_fp.e % UInt64
# This is a denormal so no hidden bit.
@test 0x8000000000000000== diy_fp.s
max_double64 = 0x7fefffffffffffff
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float64,max_double64)))
@test 0x7FE - 0x3FF - 52 - 11 == diy_fp.e % UInt64
@test (0x001fffffffffffff<< 11) == diy_fp.s
min_double64 = 0x0000000000000001
@test Grisu.isdenormal(reinterpret(Float64,min_double64))
float_bits = 0x000FFFFFFFFFFFFF
@test Grisu.isdenormal(reinterpret(Float64,float_bits))
float_bits = 0x0010000000000000
@test !Grisu.isdenormal(reinterpret(Float64,float_bits))
min_float32 = 0x00000001
@test Grisu.isdenormal(reinterpret(Float32,min_float32))
float_bits = 0x007FFFFF
@test Grisu.isdenormal(reinterpret(Float32,float_bits))
float_bits = 0x00800000
@test !Grisu.isdenormal(reinterpret(Float32,float_bits))
diy_fp = Grisu.normalize(Grisu.Float(1.5))
boundary_minus, boundary_plus = Grisu.normalizedbound(1.5)
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# 1.5 does not have a significand of the form 2^p (for some p).
# Therefore its boundaries are at the same distance.
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
@test (1 << 10) == diy_fp.s - boundary_minus.s
diy_fp = Grisu.normalize(Grisu.Float(1.0))
boundary_minus, boundary_plus = Grisu.normalizedbound(1.0)
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# 1.0 does have a significand of the form 2^p (for some p).
# Therefore its lower boundary is twice as close as the upper boundary.
@test boundary_plus.s - diy_fp.s > diy_fp.s - boundary_minus.s
@test (1 << 9) == diy_fp.s - boundary_minus.s
@test (1 << 10) == boundary_plus.s - diy_fp.s
min_double64 = 0x0000000000000001
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float64,min_double64)))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float64,min_double64))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# min-value does not have a significand of the form 2^p (for some p).
# Therefore its boundaries are at the same distance.
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
# Denormals have their boundaries much closer.
@test (UInt64(1) << 62) == diy_fp.s - boundary_minus.s
smallest_normal64 = 0x0010000000000000
diy_fp = Grisu.normalize(reinterpret(Float64,smallest_normal64))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float64,smallest_normal64))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# Even though the significand is of the form 2^p (for some p), its boundaries
# are at the same distance. (This is the only exception).
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
@test (1 << 10) == diy_fp.s - boundary_minus.s
largest_denormal64 = 0x000FFFFFFFFFFFFF
diy_fp = Grisu.normalize(reinterpret(Float64,largest_denormal64))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float64,largest_denormal64))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
@test (1 << 11) == diy_fp.s - boundary_minus.s
max_double64 = 0x7fefffffffffffff
diy_fp = Grisu.normalize(reinterpret(Float64,max_double64))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float64,max_double64))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# max-value does not have a significand of the form 2^p (for some p).
# Therefore its boundaries are at the same distance.
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
@test (1 << 10) == diy_fp.s - boundary_minus.s
kOne64 = UInt64(1)
diy_fp = Grisu.normalize(Grisu.Float(Float32(1.5)))
boundary_minus, boundary_plus = Grisu.normalizedbound(Float32(1.5))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# 1.5 does not have a significand of the form 2^p (for some p).
# Therefore its boundaries are at the same distance.
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
# Normalization shifts the significand by 8 bits. Add 32 bits for the bigger
# data-type, and remove 1 because boundaries are at half a ULP.
@test (kOne64 << 39) == diy_fp.s - boundary_minus.s
diy_fp = Grisu.normalize(Grisu.Float(Float32(1.0)))
boundary_minus, boundary_plus = Grisu.normalizedbound(Float32(1.0))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# 1.0 does have a significand of the form 2^p (for some p).
# Therefore its lower boundary is twice as close as the upper boundary.
@test boundary_plus.s - diy_fp.s > diy_fp.s - boundary_minus.s
@test (kOne64 << 38) == diy_fp.s - boundary_minus.s
@test (kOne64 << 39) == boundary_plus.s - diy_fp.s
min_float32 = 0x00000001
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float32,min_float32)))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float32,min_float32))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# min-value does not have a significand of the form 2^p (for some p).
# Therefore its boundaries are at the same distance.
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
# Denormals have their boundaries much closer.
@test (kOne64 << 62) == diy_fp.s - boundary_minus.s
smallest_normal32 = 0x00800000
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float32,smallest_normal32)))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float32,smallest_normal32))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# Even though the significand is of the form 2^p (for some p), its boundaries
# are at the same distance. (This is the only exception).
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
@test (kOne64 << 39) == diy_fp.s - boundary_minus.s
largest_denormal32 = 0x007FFFFF
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float32,largest_denormal32)))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float32,largest_denormal32))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
@test (kOne64 << 40) == diy_fp.s - boundary_minus.s
max_float32 = 0x7f7fffff
diy_fp = Grisu.normalize(Grisu.Float(reinterpret(Float32,max_float32)))
boundary_minus, boundary_plus = Grisu.normalizedbound(reinterpret(Float32,max_float32))
@test diy_fp.e == boundary_minus.e
@test diy_fp.e == boundary_plus.e
# max-value does not have a significand of the form 2^p (for some p).
# Therefore its boundaries are at the same distance.
@test diy_fp.s - boundary_minus.s == boundary_plus.s - diy_fp.s
@test (kOne64 << 39) == diy_fp.s - boundary_minus.s
#fastshortest
min_double = 5e-324
status,len,point = Grisu.fastshortest(min_double, buffer)
@test status
@test "5" == trimrep(buffer)
@test -323 == point
fill!(buffer,0)
max_double = 1.7976931348623157e308
status,len,point = Grisu.fastshortest(max_double, buffer)
@test status
@test "17976931348623157" == trimrep(buffer)
@test 309 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(4294967272.0, buffer)
@test status
@test "4294967272" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(4.1855804968213567e298, buffer)
@test status
@test "4185580496821357" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(5.5626846462680035e-309, buffer)
@test status
@test "5562684646268003" == trimrep(buffer)
@test -308 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(2147483648.0, buffer)
@test status
@test "2147483648" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(3.5844466002796428e+298, buffer)
@test !status # Not all Grisu.fastshortest variants manage to compute this number.
if status
@test "35844466002796428" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
end
smallest_normal64 = 0x0010000000000000
v = reinterpret(Float64,smallest_normal64)
status,len,point = Grisu.fastshortest(v, buffer)
if status
@test "22250738585072014" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
end
largest_denormal64 = 0x000FFFFFFFFFFFFF
v = reinterpret(Float64,largest_denormal64)
status,len,point = Grisu.fastshortest(v, buffer)
if status
@test "2225073858507201" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
end
min_float = Float32(1e-45)
status,len,point = Grisu.fastshortest(min_float, buffer)
@test status
@test "1" == trimrep(buffer)
@test -44 == point
fill!(buffer,0)
max_float = 3.4028234f38 #Float32(3.4028234e38)
status,len,point = Grisu.fastshortest(max_float, buffer)
@test status
@test "34028235" == trimrep(buffer)
@test 39 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(Float32(4294967272.0), buffer)
@test status
@test "42949673" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(Float32(3.32306998946228968226e+35), buffer)
@test status
@test "332307" == trimrep(buffer)
@test 36 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(Float32(1.2341e-41), buffer)
@test status
@test "12341" == trimrep(buffer)
@test -40 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(Float32(3.3554432e7), buffer)
@test status
@test "33554432" == trimrep(buffer)
@test 8 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(Float32(3.26494756798464e14), buffer)
@test status
@test "32649476" == trimrep(buffer)
@test 15 == point
fill!(buffer,0)
status,len,point = Grisu.fastshortest(Float32(3.91132223637771935344e37), buffer)
if status # Not all Grisu.fastshortest variants manage to compute this number.
@test "39113222" == trimrep(buffer)
@test 38 == point
fill!(buffer,0)
end
smallest_normal32 = 0x00800000
v = reinterpret(Float32,smallest_normal32)
status,len,point = Grisu.fastshortest(v, buffer)
if status
@test "11754944" == trimrep(buffer)
@test -37 == point
fill!(buffer,0)
end
largest_denormal32 = 0x007FFFFF
v = reinterpret(Float32,largest_denormal32)
status,len,point = Grisu.fastshortest(v, buffer)
@test status
@test "11754942" == trimrep(buffer)
@test -37 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(1.0, 3, buffer)
@test status
@test 3 >= len-1
@test "1" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(1.5, 10, buffer)
if status
@test 10 >= len-1
@test "15" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
end
min_double = 5e-324
status,len,point = Grisu.fastprecision(min_double, 5,buffer)
@test status
@test "49407" == trimrep(buffer)
@test -323 == point
fill!(buffer,0)
max_double = 1.7976931348623157e308
status,len,point = Grisu.fastprecision(max_double, 7,buffer)
@test status
@test "1797693" == trimrep(buffer)
@test 309 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(4294967272.0, 14,buffer)
if status
@test 14 >= len-1
@test "4294967272" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
end
status,len,point = Grisu.fastprecision(4.1855804968213567e298, 17,buffer)
@test status
@test "41855804968213567" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(5.5626846462680035e-309, 1,buffer)
@test status
@test "6" == trimrep(buffer)
@test -308 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(2147483648.0, 5,buffer)
@test status
@test "21475" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(3.5844466002796428e+298, 10,buffer)
@test status
@test 10 >= len-1
@test "35844466" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
smallest_normal64 = 0x0010000000000000
v = reinterpret(Float64,smallest_normal64)
status,len,point = Grisu.fastprecision(v, 17, buffer)
@test status
@test "22250738585072014" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
largest_denormal64 = 0x000FFFFFFFFFFFFF
v = reinterpret(Float64,largest_denormal64)
status,len,point = Grisu.fastprecision(v, 17, buffer)
@test status
@test 20 >= len-1
@test "22250738585072009" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
v = 3.3161339052167390562200598e-237
status,len,point = Grisu.fastprecision(v, 18, buffer)
@test status
@test "331613390521673906" == trimrep(buffer)
@test -236 == point
fill!(buffer,0)
v = 7.9885183916008099497815232e+191
status,len,point = Grisu.fastprecision(v, 4, buffer)
@test status
@test "7989" == trimrep(buffer)
@test 192 == point
fill!(buffer,0)
#fastfixedtoa
status,len,point = Grisu.fastfixedtoa(1.0, 0,1, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1.0, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1.0, 0,0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0xFFFFFFFF, 0,5, buffer)
@test "4294967295" == unsafe_string(pointer(buffer))
@test 10 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(4294967296.0, 0,5, buffer)
@test "4294967296" == unsafe_string(pointer(buffer)) #todo
@test 10 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1e21, 0,5, buffer)
@test "1" == unsafe_string(pointer(buffer)) #todo extra '0's
@test 22 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(999999999999999868928.00, 0,2, buffer)
@test "999999999999999868928" == unsafe_string(pointer(buffer)) #todo extra '0'
@test 21 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(6.9999999999999989514240000e+21, 0,5, buffer)
@test "6999999999999998951424" == unsafe_string(pointer(buffer)) #todo short several '9's
@test 22 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1.5, 0,5, buffer)
@test "15" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1.55, 0,5, buffer)
@test "155" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1.55, 0,1, buffer)
@test "16" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1.00000001, 0,15, buffer)
@test "100000001" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.1, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 0 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.01, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.001, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0001, 0,10, buffer) #todo
@test "1" == unsafe_string(pointer(buffer))
@test -3 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00001, 0,10, buffer) #todo
@test "1" == unsafe_string(pointer(buffer))
@test -4 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000001, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -5 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000001, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -6 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000001, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -7 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000001, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -8 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000001, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -9 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000000001, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -10 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000001, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -11 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000001, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -12 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000000000001, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -13 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000001, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -14 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000000001, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -15 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000000000000001, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -16 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000001, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -17 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000000000001, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -18 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000000000000000001, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -19 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.10000000004, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 0 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.01000000004, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00100000004, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00010000004, 0,10, buffer) #todo
@test "1" == unsafe_string(pointer(buffer))
@test -3 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00001000004, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -4 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000100004, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -5 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000010004, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -6 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000001004, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -7 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000000104, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -8 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000001000004, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -9 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000100004, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -10 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000010004, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -11 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000001004, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -12 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000000104, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -13 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000001000004, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -14 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000100004, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -15 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000010004, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -16 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000001004, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -17 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000000104, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -18 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000000014, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -19 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.10000000006, 0,10, buffer)
@test "1000000001" == unsafe_string(pointer(buffer))
@test 0 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.01000000006, 0,10, buffer)
@test "100000001" == unsafe_string(pointer(buffer))
@test -1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00100000006, 0,10, buffer)
@test "10000001" == unsafe_string(pointer(buffer))
@test -2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00010000006, 0,10, buffer)
@test "1000001" == unsafe_string(pointer(buffer))
@test -3 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00001000006, 0,10, buffer)
@test "100001" == unsafe_string(pointer(buffer))
@test -4 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000100006, 0,10, buffer)
@test "10001" == unsafe_string(pointer(buffer))
@test -5 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000010006, 0,10, buffer)
@test "1001" == unsafe_string(pointer(buffer))
@test -6 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000001006, 0,10, buffer)
@test "101" == unsafe_string(pointer(buffer))
@test -7 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000000106, 0,10, buffer)
@test "11" == unsafe_string(pointer(buffer))
@test -8 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000001000006, 0,15, buffer)
@test "100001" == unsafe_string(pointer(buffer))
@test -9 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000100006, 0,15, buffer)
@test "10001" == unsafe_string(pointer(buffer))
@test -10 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000010006, 0,15, buffer)
@test "1001" == unsafe_string(pointer(buffer))
@test -11 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000001006, 0,15, buffer)
@test "101" == unsafe_string(pointer(buffer))
@test -12 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000000000000106, 0,15, buffer)
@test "11" == unsafe_string(pointer(buffer))
@test -13 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000001000006, 0,20, buffer)
@test "100001" == unsafe_string(pointer(buffer))
@test -14 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000100006, 0,20, buffer)
@test "10001" == unsafe_string(pointer(buffer))
@test -15 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000010006, 0,20, buffer)
@test "1001" == unsafe_string(pointer(buffer))
@test -16 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000001006, 0,20, buffer)
@test "101" == unsafe_string(pointer(buffer))
@test -17 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000000106, 0,20, buffer)
@test "11" == unsafe_string(pointer(buffer))
@test -18 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000000000000000016, 0,20, buffer)
@test "2" == unsafe_string(pointer(buffer))
@test -19 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.6, 0,0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.96, 0,1, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.996, 0,2, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.9996, 0,3, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.99996, 0,4, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.999996, 0,5, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.9999996, 0,6, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.99999996, 0,7, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.999999996, 0,8, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.9999999996, 0,9, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.99999999996, 0,10, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.999999999996, 0,11, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.9999999999996, 0,12, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.99999999999996, 0,13, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.999999999999996, 0,14, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.9999999999999996, 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00999999999999996, 0,16, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000999999999999996, 0,17, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.0000999999999999996, 0,18, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -3 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.00000999999999999996, 0,19, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -4 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.000000999999999999996, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -5 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(323423.234234, 0,10, buffer)
@test "323423234234" == unsafe_string(pointer(buffer))
@test 6 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(12345678.901234, 0,4, buffer)
@test "123456789012" == unsafe_string(pointer(buffer))
@test 8 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(98765.432109, 0,5, buffer)
@test "9876543211" == unsafe_string(pointer(buffer))
@test 5 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(42, 0,20, buffer)
@test "42" == unsafe_string(pointer(buffer))
@test 2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(0.5, 0,0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1e-23, 0,10, buffer)
@test "" == unsafe_string(pointer(buffer))
@test -10 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1e-123, 0,2, buffer)
@test "" == unsafe_string(pointer(buffer))
@test -2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1e-123, 0,0, buffer)
@test "" == unsafe_string(pointer(buffer))
@test 0 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1e-23, 0,20, buffer)
@test "" == unsafe_string(pointer(buffer))
@test -20 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1e-21, 0,20, buffer)
@test "" == unsafe_string(pointer(buffer))
@test -20 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1e-22, 0,20, buffer)
@test "" == unsafe_string(pointer(buffer))
@test -20 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(6e-21, 0,20, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -19 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(9.1193616301674545152000000e+19, 0,0,buffer)
@test "91193616301674545152" == unsafe_string(pointer(buffer))
@test 20 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(4.8184662102767651659096515e-04, 0,19,buffer)
@test "4818466210276765" == unsafe_string(pointer(buffer))
@test -3 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1.9023164229540652612705182e-23, 0,8,buffer)
@test "" == unsafe_string(pointer(buffer))
@test -8 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(1000000000000000128.0, 0,0,buffer)
@test "1000000000000000128" == unsafe_string(pointer(buffer))
@test 19 == point
fill!(buffer,0)
#bignumdtoa
status,len,point = Grisu.bignumdtoa(1.0, Grisu.SHORTEST, 0, buffer,bignums)
@test "1" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(1.0, Grisu.FIXED, 3, buffer,bignums)
@test 3 >= len - 1 - point
@test "1" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(1.0, Grisu.PRECISION, 3, buffer,bignums)
@test 3 >= len - 1
@test "1" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(1.5, Grisu.SHORTEST, 0, buffer,bignums)
@test "15" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(1.5, Grisu.FIXED, 10, buffer,bignums)
@test 10 >= len - 1 - point
@test "15" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(1.5, Grisu.PRECISION, 10, buffer,bignums)
@test 10 >= len - 1
@test "15" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
min_double = 5e-324
status,len,point = Grisu.bignumdtoa(min_double, Grisu.SHORTEST, 0, buffer,bignums)
@test "5" == trimrep(buffer)
@test -323 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(min_double, Grisu.FIXED, 5, buffer,bignums)
@test 5 >= len - 1 - point
@test "" == trimrep(buffer)
status,len,point = Grisu.bignumdtoa(min_double, Grisu.PRECISION, 5, buffer,bignums)
@test 5 >= len - 1
@test "49407" == trimrep(buffer)
@test -323 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
max_double = 1.7976931348623157e308
status,len,point = Grisu.bignumdtoa(max_double, Grisu.SHORTEST, 0, buffer,bignums)
@test "17976931348623157" == trimrep(buffer)
@test 309 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(max_double, Grisu.PRECISION, 7, buffer,bignums)
@test 7 >= len - 1
@test "1797693" == trimrep(buffer)
@test 309 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(4294967272.0, Grisu.SHORTEST, 0, buffer,bignums)
@test "4294967272" == unsafe_string(pointer(buffer))
@test 10 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(4294967272.0, Grisu.FIXED, 5, buffer,bignums)
@test "429496727200000" == unsafe_string(pointer(buffer))
@test 10 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(4294967272.0, Grisu.PRECISION, 14, buffer,bignums)
@test 14 >= len - 1
@test "4294967272" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(4.1855804968213567e298, Grisu.SHORTEST, 0,buffer,bignums)
@test "4185580496821357" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(4.1855804968213567e298, Grisu.PRECISION, 20,buffer,bignums)
@test 20 >= len - 1
@test "41855804968213567225" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(5.5626846462680035e-309, Grisu.SHORTEST, 0, buffer,bignums)
@test "5562684646268003" == trimrep(buffer)
@test -308 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(5.5626846462680035e-309, Grisu.PRECISION, 1, buffer,bignums)
@test 1 >= len - 1
@test "6" == trimrep(buffer)
@test -308 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(2147483648.0, Grisu.SHORTEST, 0, buffer,bignums)
@test "2147483648" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(2147483648.0, Grisu.FIXED, 2, buffer,bignums)
@test 2 >= len - 1 - point
@test "2147483648" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(2147483648.0, Grisu.PRECISION, 5, buffer,bignums)
@test 5 >= len - 1
@test "21475" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(3.5844466002796428e+298, Grisu.SHORTEST, 0, buffer,bignums)
@test "35844466002796428" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(3.5844466002796428e+298, Grisu.PRECISION, 10, buffer,bignums)
@test 10 >= len - 1
@test "35844466" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
v = reinterpret(Float64,0x0010000000000000)
status,len,point = Grisu.bignumdtoa(v, Grisu.SHORTEST, 0, buffer,bignums)
@test "22250738585072014" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(v, Grisu.PRECISION, 20, buffer,bignums)
@test 20 >= len - 1
@test "22250738585072013831" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
v = reinterpret(Float64,0x000FFFFFFFFFFFFF)
status,len,point = Grisu.bignumdtoa(v, Grisu.SHORTEST, 0, buffer,bignums)
@test "2225073858507201" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(v, Grisu.PRECISION, 20, buffer,bignums)
@test 20 >= len - 1
@test "2225073858507200889" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(4128420500802942e-24, Grisu.SHORTEST, 0, buffer,bignums)
@test "4128420500802942" == trimrep(buffer)
@test -8 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
v = 3.9292015898194142585311918e-10
status,len,point = Grisu.bignumdtoa(v, Grisu.SHORTEST, 0, buffer,bignums)
@test "39292015898194143" == trimrep(buffer)
v = 4194304.0
status,len,point = Grisu.bignumdtoa(v, Grisu.FIXED, 5, buffer,bignums)
@test 5 >= len - 1 - point
@test "4194304" == trimrep(buffer)
v = 3.3161339052167390562200598e-237
status,len,point = Grisu.bignumdtoa(v, Grisu.PRECISION, 19, buffer,bignums)
@test 19 >= len - 1
@test "3316133905216739056" == trimrep(buffer)
@test -236 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
v = 7.9885183916008099497815232e+191
status,len,point = Grisu.bignumdtoa(v, Grisu.PRECISION, 4, buffer,bignums)
@test 4 >= len - 1
@test "7989" == trimrep(buffer)
@test 192 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
v = 1.0000000000000012800000000e+17
status,len,point = Grisu.bignumdtoa(v, Grisu.FIXED, 1, buffer,bignums)
@test 1 >= len - 1 - point
@test "100000000000000128" == trimrep(buffer)
@test 18 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
min_float = Float32(1e-45)
status,len,point = Grisu.bignumdtoa(min_float, Grisu.SHORTEST, 0, buffer,bignums)
@test "1" == trimrep(buffer)
@test -44 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
max_float = Float32(3.4028234e38)
status,len,point = Grisu.bignumdtoa(max_float, Grisu.SHORTEST, 0, buffer,bignums)
@test "34028235" == trimrep(buffer)
@test 39 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(Float32(4294967272.0), Grisu.SHORTEST, 0, buffer,bignums)
@test "42949673" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(Float32(3.32306998946228968226e+35), Grisu.SHORTEST, 0, buffer,bignums)
@test "332307" == trimrep(buffer)
@test 36 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(Float32(1.2341e-41), Grisu.SHORTEST, 0, buffer,bignums)
@test "12341" == trimrep(buffer)
@test -40 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(Float32(3.3554432e7), Grisu.SHORTEST, 0, buffer,bignums)
@test "33554432" == trimrep(buffer)
@test 8 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(Float32(3.26494756798464e14), Grisu.SHORTEST, 0, buffer,bignums)
@test "32649476" == trimrep(buffer)
@test 15 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
status,len,point = Grisu.bignumdtoa(Float32(3.91132223637771935344e37), Grisu.SHORTEST, 0, buffer,bignums)
@test "39113222" == trimrep(buffer)
@test 38 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
v = reinterpret(Float32,0x00800000)
status,len,point = Grisu.bignumdtoa(v, Grisu.SHORTEST, 0, buffer,bignums)
@test "11754944" == trimrep(buffer)
@test -37 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
v = reinterpret(Float32,0x007FFFFF)
status,len,point = Grisu.bignumdtoa(v, Grisu.SHORTEST, 0, buffer,bignums)
@test "11754942" == trimrep(buffer)
@test -37 == point
fill!(buffer,0)
map(x->Grisu.Bignums.zero!(x),bignums)
#Float16
min_double = floatmin(Float16)
status,len,point = Grisu.fastshortest(min_double,buffer)
@test status
@test "6104" == trimrep(buffer)
@test -4 == point
fill!(buffer,0)
max_double = floatmax(Float16)
status,len,point = Grisu.fastshortest(max_double,buffer)
@test status
@test "655" == trimrep(buffer)
@test 5 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(Float16(1.0), 3, buffer)
@test status
@test 3 >= len-1
@test "1" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastprecision(Float16(1.5), 10, buffer)
if status
@test 10 >= len-1
@test "15" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
end
status,len,point = Grisu.fastfixedtoa(Float16(1.0), 0,1, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(1.0), 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(1.0), 0,0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(1.5), 0,5, buffer)
@test "15" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(1.55), 0,5, buffer)
@test "15498" == unsafe_string(pointer(buffer)) #todo
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(1.55), 0,1, buffer)
@test "15" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(1.00000001), 0,15, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.1), 0,10, buffer)
@test "999755859" == unsafe_string(pointer(buffer))
@test -1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.01), 0,10, buffer)
@test "100021362" == unsafe_string(pointer(buffer))
@test -1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.001), 0,10, buffer)
@test "10004044" == unsafe_string(pointer(buffer))
@test -2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.0001), 0,10, buffer) #todo
@test "1000166" == unsafe_string(pointer(buffer))
@test -3 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.00001), 0,10, buffer) #todo
@test "100136" == unsafe_string(pointer(buffer))
@test -4 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.000001), 0,10, buffer)
@test "10133" == unsafe_string(pointer(buffer))
@test -5 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.0000001), 0,10, buffer)
@test "1192" == unsafe_string(pointer(buffer))
@test -6 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.6), 0,0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.96), 0,1, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.996), 0,2, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.9996), 0,3, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.99996), 0,4, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.999996), 0,5, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.9999996), 0,6, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.99999996), 0,7, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(42), 0,20, buffer)
@test "42" == unsafe_string(pointer(buffer))
@test 2 == point
fill!(buffer,0)
status,len,point = Grisu.fastfixedtoa(Float16(0.5), 0,0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
#dtoa
len,point,neg = Grisu.grisu(0.0, Grisu.SHORTEST, 0, buffer)
@test "0" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(Float32(0.0), Grisu.SHORTEST, 0, buffer)
@test "0" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(0.0, Grisu.FIXED, 2, buffer)
@test 1 >= len-1
@test "0" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(0.0, Grisu.PRECISION, 3, buffer)
@test 1 >= len-1
@test "0" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(1.0, Grisu.SHORTEST, 0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(Float32(1.0), Grisu.SHORTEST, 0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(1.0, Grisu.FIXED, 3, buffer)
@test 3 >= len-1-point
@test "1" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(1.0, Grisu.PRECISION, 3, buffer)
@test 3 >= len-1
@test "1" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(1.5, Grisu.SHORTEST, 0, buffer)
@test "15" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(Float32(1.5), Grisu.SHORTEST, 0, buffer)
@test "15" == unsafe_string(pointer(buffer))
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(1.5, Grisu.FIXED, 10, buffer)
@test 10 >= len-1-point
@test "15" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(1.5, Grisu.PRECISION, 10, buffer)
@test 10 >= len-1
@test "15" == trimrep(buffer)
@test 1 == point
fill!(buffer,0)
min_double = 5e-324
len,point,neg = Grisu.grisu(min_double, Grisu.SHORTEST, 0, buffer)
@test "5" == unsafe_string(pointer(buffer))
@test -323 == point
fill!(buffer,0)
min_float = 1e-45
len,point,neg = Grisu.grisu(Float32(min_float), Grisu.SHORTEST, 0, buffer)
@test "1" == unsafe_string(pointer(buffer))
@test -44 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(min_double, Grisu.FIXED, 5, buffer)
@test 5 >= len-1-point
@test "" == trimrep(buffer)
@test -5 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(min_double, Grisu.PRECISION, 5, buffer)
@test 5 >= len-1
@test "49407" == trimrep(buffer)
@test -323 == point
fill!(buffer,0)
max_double = 1.7976931348623157e308
len,point,neg = Grisu.grisu(max_double, Grisu.SHORTEST, 0, buffer)
@test "17976931348623157" == unsafe_string(pointer(buffer))
@test 309 == point
fill!(buffer,0)
max_float = 3.4028234e38
len,point,neg = Grisu.grisu(Float32(max_float), Grisu.SHORTEST, 0, buffer)
@test "34028235" == unsafe_string(pointer(buffer))
@test 39 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(max_double, Grisu.PRECISION, 7, buffer)
@test 7 >= len-1
@test "1797693" == trimrep(buffer)
@test 309 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(4294967272.0, Grisu.SHORTEST, 0, buffer)
@test "4294967272" == unsafe_string(pointer(buffer))
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(Float32(4294967272.0), Grisu.SHORTEST, 0, buffer)
@test "42949673" == unsafe_string(pointer(buffer))
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(4294967272.0, Grisu.FIXED, 5, buffer)
@test 5 >= len-1-point
@test "4294967272" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(4294967272.0, Grisu.PRECISION, 14, buffer)
@test 14 >= len-1
@test "4294967272" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(4.1855804968213567e298, Grisu.SHORTEST, 0, buffer)
@test "4185580496821357" == unsafe_string(pointer(buffer))
@test 299 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(4.1855804968213567e298, Grisu.PRECISION, 20, buffer)
@test 20 >= len-1
@test "41855804968213567225" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(5.5626846462680035e-309, Grisu.SHORTEST, 0, buffer)
@test "5562684646268003" == unsafe_string(pointer(buffer))
@test -308 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(5.5626846462680035e-309, Grisu.PRECISION, 1, buffer)
@test 1 >= len-1
@test "6" == trimrep(buffer)
@test -308 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(-2147483648.0, Grisu.SHORTEST, 0, buffer)
@test 1 == neg
@test "2147483648" == unsafe_string(pointer(buffer))
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(Float32(-2147483648.), Grisu.SHORTEST, 0, buffer)
@test 1 == neg
@test "21474836" == unsafe_string(pointer(buffer))
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(-2147483648.0, Grisu.FIXED, 2, buffer)
@test 2 >= len-1-point
@test "2147483648" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(-2147483648.0, Grisu.PRECISION, 5, buffer)
@test 5 >= len-1
@test "21475" == trimrep(buffer)
@test 10 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(-3.5844466002796428e+298, Grisu.SHORTEST, 0, buffer)
@test 1 == neg
@test "35844466002796428" == unsafe_string(pointer(buffer))
@test 299 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(-3.5844466002796428e+298, Grisu.PRECISION, 10, buffer)
@test 1 == neg
@test 10 >= len-1
@test "35844466" == trimrep(buffer)
@test 299 == point
fill!(buffer,0)
v = reinterpret(Float64,0x0010000000000000)
len,point,neg = Grisu.grisu(v, Grisu.SHORTEST, 0, buffer)
@test "22250738585072014" == unsafe_string(pointer(buffer))
@test -307 == point
fill!(buffer,0)
f = reinterpret(Float32,0x00800000)
len,point,neg = Grisu.grisu(f, Grisu.SHORTEST, 0, buffer)
@test "11754944" == unsafe_string(pointer(buffer))
@test -37 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(v, Grisu.PRECISION, 20, buffer)
@test 20 >= len-1
@test "22250738585072013831" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
v = reinterpret(Float64,0x000FFFFFFFFFFFFF)
len,point,neg = Grisu.grisu(v, Grisu.SHORTEST, 0, buffer)
@test "2225073858507201" == unsafe_string(pointer(buffer))
@test -307 == point
fill!(buffer,0)
f = reinterpret(Float32,0x007FFFFF)
len,point,neg = Grisu.grisu(f, Grisu.SHORTEST, 0, buffer)
@test "11754942" == unsafe_string(pointer(buffer))
@test -37 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(v, Grisu.PRECISION, 20, buffer)
@test 20 >= len-1
@test "2225073858507200889" == trimrep(buffer)
@test -307 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(4128420500802942e-24, Grisu.SHORTEST, 0, buffer)
@test 0 == neg
@test "4128420500802942" == unsafe_string(pointer(buffer))
@test -8 == point
fill!(buffer,0)
v = -3.9292015898194142585311918e-10
len,point,neg = Grisu.grisu(v, Grisu.SHORTEST, 0, buffer)
@test "39292015898194143" == unsafe_string(pointer(buffer))
fill!(buffer,0)
f = Float32(-3.9292015898194142585311918e-10)
len,point,neg = Grisu.grisu(f, Grisu.SHORTEST, 0, buffer)
@test "39292017" == unsafe_string(pointer(buffer))
fill!(buffer,0)
v = 4194304.0
len,point,neg = Grisu.grisu(v, Grisu.FIXED, 5, buffer)
@test 5 >= len-1-point
@test "4194304" == trimrep(buffer)
fill!(buffer,0)
v = 3.3161339052167390562200598e-237
len,point,neg = Grisu.grisu(v, Grisu.PRECISION, 19, buffer)
@test 19 >= len-1
@test "3316133905216739056" == trimrep(buffer)
@test -236 == point
fill!(buffer,0)
len,point,neg = Grisu.grisu(0.0, Grisu.SHORTEST, 0, buffer)
@test !neg
len,point,neg = Grisu.grisu(-0.0, Grisu.SHORTEST, 0, buffer)
@test neg
len,point,neg = Grisu.grisu(1.0, Grisu.SHORTEST, 0, buffer)
@test !neg
len,point,neg = Grisu.grisu(-1.0, Grisu.SHORTEST, 0, buffer)
@test neg
len,point,neg = Grisu.grisu(Float32(0.0), Grisu.SHORTEST, 0, buffer)
@test !neg
len,point,neg = Grisu.grisu(-Float32(0.0), Grisu.SHORTEST, 0, buffer)
@test neg
len,point,neg = Grisu.grisu(Float32(1.0), Grisu.SHORTEST, 0, buffer)
@test !neg
len,point,neg = Grisu.grisu(-Float32(1.0), Grisu.SHORTEST, 0, buffer)
@test neg
len,point,neg = Grisu.grisu(0.0, Grisu.PRECISION, 1, buffer)
@test !neg
len,point,neg = Grisu.grisu(-0.0, Grisu.PRECISION, 1, buffer)
@test neg
len,point,neg = Grisu.grisu(1.0, Grisu.PRECISION, 1, buffer)
@test !neg
len,point,neg = Grisu.grisu(-1.0, Grisu.PRECISION, 1, buffer)
@test neg
len,point,neg = Grisu.grisu(0.0, Grisu.FIXED, 1, buffer)
@test !neg
len,point,neg = Grisu.grisu(-0.0, Grisu.FIXED, 1, buffer)
@test neg
len,point,neg = Grisu.grisu(1.0, Grisu.FIXED, 1, buffer)
@test !neg
len,point,neg = Grisu.grisu(-1.0, Grisu.FIXED, 1, buffer)
@test neg
len,point,neg = Grisu.grisu(0.0, Grisu.PRECISION, 0, buffer)
@test 0 >= len-1
@test "" == unsafe_string(pointer(buffer))
@test !neg
len,point,neg = Grisu.grisu(1.0, Grisu.PRECISION, 0, buffer)
@test 0 >= len-1
@test "" == unsafe_string(pointer(buffer))
@test !neg
len,point,neg = Grisu.grisu(0.0, Grisu.FIXED, 0, buffer)
@test 1 >= len-1
@test "0" == unsafe_string(pointer(buffer))
@test !neg
len,point,neg = Grisu.grisu(1.0, Grisu.FIXED, 0, buffer)
@test 1 >= len-1
@test "1" == unsafe_string(pointer(buffer))
@test !neg
# issue #29885
@sync let p = Pipe(), q = Pipe()
Base.link_pipe!(p, reader_supports_async=true, writer_supports_async=true)
Base.link_pipe!(q, reader_supports_async=true, writer_supports_async=true)
@async write(p, zeros(UInt8, 2^18))
@async (print(p, 12.345); close(p.in))
@async print(q, 9.8)
read(p, 2^18)
@test read(p, String) == "12.345"
end
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 1.0.2 | 53bb909d1151e57e2484c3d1b53e19552b887fb2 | docs | 514 | # Grisu
[![Build Status](https://travis-ci.com/JuliaAttic/Grisu.jl.svg?branch=master)](https://travis-ci.com/JuliaAttic/Grisu.jl)
The (internal) Grisu module was removed in Julia 1.6. However, some packages
relies on this module. To keep this working, the Grisu module was filtered out
as a normal package that can be depended on.
Use it as follows, add a dependency on Grisu and use this instead of normally
loading it:
```julia
if isdefined(Base, :Grisu)
import Base.Grisu
else
import Grisu
end
```
| Grisu | https://github.com/JuliaAttic/Grisu.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1640 | using DeconvOptim, TestImages, Images, FFTW, Noise, ImageView
using Plots
function norm_fft(x)
x = fftshift(abs.(fft(x)))
n = div(size(x)[1], 2)
x ./= maximum(x)
end
function ideal_freq()
img = zeros((512, 512))
for i = 1:1
img[rand((1:512)), rand((1:512))] = 1
end
img = fftshift(img)
N_phot = 376
img ./= maximum(img)
img = convert(Array{Float32}, img .* N_phot)
dist = [sqrt((-1 + i - size(img)[1] / 2)^2 + (-1 + j - size(img)[2] / 2)^2)
for i = 1:size(img)[1], j = 1:size(img)[2]]
psf = ifftshift(exp.(-dist .^2 ./ 5.0 .^2))
psf ./= sum(psf)
psf = convert(Array{Float32}, psf)
#img_b = center_extract(conv(center_set!(copy(z1), img), ifftshift(center_set!(z, fftshift(psf))), [1, 2]), size(img))
img_b = conv(img, psf, [1, 2])
img_n = poisson(img_b, N_phot)
reg = DeconvOptim.TV(num_dims=2, sum_dims=[1, 2])
@time res, o = deconvolution(img_n, psf, iterations=10, λ=0.001f0,
loss=Poisson(), regularizer=reg, padding=0.00, plan_fft=true)
img_ft = norm_fft(img)[:, 257]
img_n_ft = norm_fft(img_n)[:, 257]
res_ft = norm_fft(res)[:, 257]
freq = fftshift(fftfreq(512, 1))
mtf = norm_fft(psf)[:, 257]
plot(freq, mtf, xlabel="Frequency in 1/pixel", ylabel = "Normalized intensity in Frequency space", label="OTF", dpi=300)
plot!(freq, img_ft,label="Image with Constant Frequency content")
plot!(freq, img_n_ft, label="Blurry image with noise", linestyle = :dot)
plot!(freq, res_ft, label="Deconvolved image")
savefig("src/assets/ideal_frequencies.png")
end
ideal_freq()
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1690 | using Documenter, DocumenterCitations, DeconvOptim
cite_bib = CitationBibliography(joinpath(@__DIR__, "../paper/ref.bib"))
DocMeta.setdocmeta!(DeconvOptim, :DocTestSetup, :(using DeconvOptim); recursive=true)
makedocs(modules=[DeconvOptim],
plugins=[cite_bib],
sitename="DeconvOptim.jl",
doctest = false,
warnonly=true,
pages = Any[
"DeconvOptim.jl" => "index.md",
"Workflow" => Any[
"workflow/basic_workflow.md",
"workflow/changing_regularizers.md",
"workflow/changing_loss.md",
"workflow/3D_dataset.md",
"workflow/cuda.md",
"workflow/flexible_invert.md",
"workflow/performance_tips.md",
],
"Background" => Any[
"background/physical_background.md",
"background/mathematical_optimization.md",
"background/loss_functions.md",
"background/regularizer.md",
],
"Function references" => Any[
"function_references/deconvolution.md",
"function_references/loss.md",
"function_references/mapping.md",
"function_references/regularizer.md",
"function_references/utils.md",
"function_references/analysis.md",
],
"References" => "references.md"
],
)
deploydocs(repo = "github.com/roflmaostc/DeconvOptim.jl.git")
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 4032 | # here we compare various deconvolution options in terms of image quality
using IndexFunArrays, LinearAlgebra, Random, Noise, TestImages, FourierTools
using DeconvOptim, FFTW, Optim, LineSearches
using View5D, Plots
obj = 100f0 .* Float32.(testimage("resolution_test_512"))
# simulate a simple PSF
sz = size(obj);
R_max = sz[1] ./ 12.0;
psf = generate_psf(sz, R_max);
# simulate a perfect image
conv_img = DeconvOptim.conv(obj, psf);
# set a fixed point for the measured data quality
max_photons = 1000
Random.seed!(42)
measured = poisson(conv_img, max_photons);
opt_options = nothing
iterations = 100
function get_data(summary)
return (summary["best_ncc_img"], summary["best_nvar_img"])
end
function show_ncc!(summary, title="", dpi=300)
nccs = summary["nccs"]
plt = plot!(nccs, label=title*" NCC")
col = plt[1][end].plotattributes[:markercolor]
vline!([summary["best_ncc_idx"]], line=:dash, color=col, label=title*"_best NCC", dpi=dpi)
println("best ncc index is $(summary["best_ncc_idx"])")
println("best ncc is $(summary["best_ncc"])")
println("best ncc loss is $(summary["losses"][summary["best_ncc_idx"]])")
println("final ncc is $(summary["nccs"][end])")
println("final loss is $(summary["losses"][end])")
xlabel!("iteration")
ylabel!("normalized cross correlation")
end
function show_nvar!(summary, title="")
nvars = summary["nvars"]
nvars_norm = nvars ./ nvars[1]
plt = plot!(nvars_norm, label=title*" NVAR")
col = plt[1][end].plotattributes[:markercolor]
vline!([summary["best_nvar_idx"]], line=:dash, color=col, label=title*"_best NVAR")
println("best nvar index is $(summary["best_nvar_idx"])")
println("best nvar is $(summary["best_nvar"])")
println("best nvar loss is $(summary["losses"][summary["best_nvar_idx"]])")
println("final nvar is $(summary["nvars"][end])")
println("final loss is $(summary["losses"][end])")
xlabel!("iteration")
ylabel!("normalized variance")
end
function show_loss!(summary, addCurves="", lowest_loss=minimum(summary["losses"]))
losses = summary["losses"]
log_losses = log.(losses .- lowest_loss .+ 1)
rel_losses = log_losses # .- maximum(log_losses)
plot!(rel_losses[1:end-1], label=addCurves)
xlabel!("iteration")
ylabel!("log loss")
end
opt_options, noreg_summary = DeconvOptim.options_trace_deconv(obj, iterations, Non_negative());
res_noreg = deconvolution(measured, psf;
regularizer=nothing, mapping=Non_negative(),
opt_options=opt_options, debug_f=nothing)
opt_grad, grad_summary = DeconvOptim.options_trace_deconv(obj, iterations, Non_negative(), );
res_grad = deconvolution(measured, psf;
regularizer=nothing, mapping=Non_negative(),
opt=GradientDescent(), opt_options=opt_grad, debug_f=nothing)
opt_gr, gr_summary = DeconvOptim.options_trace_deconv(obj, iterations, Non_negative());
res_gr = deconvolution(measured, psf;
regularizer=DeconvOptim.GR(), λ=1e-3,
mapping=Non_negative(), opt_options=opt_gr, debug_f=nothing)
opt_tv, tv_summary = DeconvOptim.options_trace_deconv(obj, iterations, Non_negative())
res_tv = deconvolution(measured, psf;
regularizer=DeconvOptim.TV(), λ=1e-3,
mapping=Non_negative(), opt_options=opt_tv, debug_f=nothing)
plot()
title!("Regularization")
do_display=false
show_ncc!(noreg_summary, "NoReg")
show_nvar!(noreg_summary, "NoReg")
show_ncc!(gr_summary, "GR")
show_nvar!(gr_summary, "GR")
show_ncc!(tv_summary, "TV")
show_nvar!(tv_summary, "TV")
plot()
title!("Optimization")
show_loss!(noreg_summary, "LBFGS", minimum(noreg_summary["losses"]))
show_loss!(grad_summary, "SteepestDecent", minimum(noreg_summary["losses"]))
show_loss!(tv_summary, "TV", minimum(noreg_summary["losses"]))
show_loss!(gr_summary, "GR", minimum(noreg_summary["losses"]))
best_ncc_img, best_nvar_img = get_data(tv_summary)
@vt obj
@vt measured
@vt best_ncc_img
@vt best_nvar_img
@vt res_noreg
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 8646 | ### A Pluto.jl notebook ###
# v0.14.5
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : missing
el
end
end
# ╔═╡ 0507f8ed-8b64-48af-a6c4-ff7c4211b9e9
begin
using Pkg
Pkg.activate(".")
end
# ╔═╡ 3310d2f7-450a-4cd1-9c3a-46d02d23a7c6
using Revise
# ╔═╡ d27b2d72-d264-11eb-0be5-13dcacfd2adc
using DeconvOptim, TestImages, ImageShow, Plots, LinearAlgebra, IndexFunArrays, Noise, FourierTools, SpecialFunctions, FFTW, LaTeXStrings, PlutoUI, Images, Tullio
# ╔═╡ 952b251c-207b-4412-b6e1-268fce1647d9
begin
img = Float32.(testimage("fabio_gray"));
img_1D = img[:, 200]
end;
# ╔═╡ fa8cd9c9-a2fd-495c-8d22-ada7bb9c39f6
otf(x, Δx=1) = begin
x = abs(x)
if x <= Δx
SpecialFunctions.jinc(x * Δx *(1-x/Δx)) .* 2 / π * (acos(x/Δx) - x/Δx * sqrt(1-(x/Δx)^2))
else
zero(x)
end
end
# ╔═╡ 9a07bc88-be76-4531-bc91-df0d20c3221c
begin
x = range(-1.5, 1.5, length=size(img, 1))
freqs = fftshift(fftfreq(size(img_1D, 1), 1))
psf = Float32.(DeconvOptim.generate_psf(size(img), 20))
psf_1D = psf[1, :]
psf_1D ./= sum(psf_1D)
otf_1D = abs.(ffts(psf_1D))
end;
# ╔═╡ 24fc86da-0b7c-493c-8977-2a19ef6dc133
img_n = Float32.(poisson(DeconvOptim.conv(img, psf), 1000));
# ╔═╡ 3bda922d-552b-42ec-9055-33a141b5841a
blur(x, otf=otf_1D) = iffts(ffts(x) .* otf)
# ╔═╡ 308e2562-5a20-4082-8a6d-9fb135738c49
md"
Mathematically:
$(S * \text{PSF})(\mathbf r) = \int_{-\infty}^{\infty} S(\mathbf r - \mathbf x) \cdot \text{PSF}(\mathbf x) \, \mathrm d \mathbf x$
"
# ╔═╡ b542187a-3ae7-4430-a81d-f968d9000427
img_blurry = DeconvOptim.conv(img, psf);
# ╔═╡ 88d83629-f1b2-4d69-928f-d4acfbc76b70
reg_1D = TV(num_dims=1);
# ╔═╡ d6d0f436-48fd-4e8a-863c-3e9d3850cc91
begin
reg_tik = Tikhonov()
reg_TV = TV()
reg_GR = DeconvOptim.GR()
end
# ╔═╡ 061faf49-662c-4508-83b4-ddcf0970ed0d
md"### DeconvOptim.jl: Microscopy Image Deconvolution
"
# ╔═╡ 23829201-9756-4ef8-90c9-3917b761fe4b
load("../docs/src/assets/logo.png")
# ╔═╡ 30f21bb8-6d09-4fce-9d2a-568bfaf3ff7a
md"
* **Felix Wechsler:** Master Student at the Leibniz Institute of Photonic Technology in Jena, Germany
* https://github.com/roflmaostc/DeconvOptim.jl
* `]add DeconvOptim`
"
# ╔═╡ 7a0a44c9-fb07-44e6-9a8b-8f720b84e6f6
md"""### Image Convolution
* Typical description of isotropic blur of an image
* The blurring kernel describes blur
* In optics/microscopy a finite sized dot called Point Spread Function (**PSF**)
* often a Gaussian function used in image processing
* cigarre shaped object for motion blur
Discrete version:
$(S * \text{PSF})[i] = \sum_{m} S[i-m] \cdot \text{PSF}[m]$
"""
# ╔═╡ 49686d9a-1683-428f-83ba-a9131c2ad432
[Gray.(img) Gray.(DeconvOptim.conv(img, psf))]
# ╔═╡ 491ec7cb-1663-4acf-b81f-9acafba2b63d
md"## Convolution Theorem
$(S * \text{PSF})(\mathbf r) = \mathcal{F}^{-1}\bigg[ \mathcal{F}[S] \cdot \mathcal{F}[\text{PSF}] \bigg]$
* we can express the convolution with a Fast Fourier Transform (FFT) which only takes $\mathcal O(N \log(N))$ operations
* For large kernels (especially in 3D), sliding kernels are slower
* $\mathcal{F}[\text{PSF}]$ is called the $\text{OTF}$
"
# ╔═╡ b6f9e42e-5a23-40ad-9e73-8f02da48f69c
md"### Optical System act as low pass filter
* $\text{OTF}$ shows the frequency throughput
"
# ╔═╡ e31f4704-5bce-4c8d-b3a7-873a3460d9f8
plot(x, otf.(x), xlabel="frequency / maximum frequency", ylabel="contrast")
# ╔═╡ dc8cf4e2-a87c-47ea-8247-3ae772852241
md"## Frequency spectrum of blurred sample $Y(\mathbf r)$
Blurred sample:
$Y(\mathbf r) = (S * \text{PSF})(\mathbf r)$
"
# ╔═╡ f8033d13-faee-41f5-bdd6-ae8721e8b8a8
begin
plot(freqs, abs.(ffts(img_blurry)[:, 128]), yaxis=:log, ylabel="real part of FFT output in AU", xlabel="frequency in 1/px", ylims=(1e-4, 1e2), label="blurred")
plot!(freqs, abs.(ffts(img)[:, 128]), ylabel="abs of FFT output in AU",
xlabel="frequency in 1/px", yaxis=:log,
ylims=(1e-4, 1e4), label="ground truth")
#plot!(freqs, abs.(ffts(DeconvOptim.conv(img_1D, psf_1D))))
end
# ╔═╡ fe7e0292-31f6-43b5-83a5-a38698a87563
md"## Deconvolution Pipeline
* based on:
* Zygote.jl
* Optim.jl
* Tullio.jl
* CUDA.jl
"
# ╔═╡ e9ef5ba4-56c0-4595-bc28-e04882f44a9a
load("../docs/src/assets/tex/pipeline.png")
# ╔═╡ 90e5708c-05a4-46e4-b1e4-9a61c96dae32
TV_by_hand(x) = @tullio r = sqrt(1f-8 + abs2(x[i, j] - x[i+1, j]) +
abs2(x[i, j] - x[i, j+1]))
# ╔═╡ 03139ac5-3525-4fad-abf1-84421492b763
DeconvOptim.generate_TV(4, [1,2, 3], [1,1, 1], 1, 0)[1]
# ╔═╡ 4e84e739-9c59-4939-8b04-aec7dc069d67
md"
### Deconvolve with DeconvOptim.jl
"
# ╔═╡ 9d9a5da2-14df-46e6-b7e6-5a33aade1754
@bind reg_list2 Select(["1" => ("Tikhonov"), "2" => ("Total Variation TV"), "3" => ("Good's Roughness GR")])
# ╔═╡ 5ae1a6a3-4505-4123-9f1e-8a1d4ac0b4e1
reg = [reg_tik, reg_TV, reg_GR][parse(Int, reg_list2)]
# ╔═╡ 15d54e5e-64f4-4a1d-8cc8-9334b2e3784f
md"
iterations =
$(@bind iter Slider(0:50, show_value=true))
λ = $(@bind λ Slider(0:0.001:0.3, show_value=true))
regularizer = $(@bind reg_bool CheckBox())"
# ╔═╡ 803368e6-53fd-4413-b3f5-ffe46ee8983e
img_deconv, res_img = deconvolution(img_blurry, psf, regularizer=reg_bool ? reg : nothing, iterations=iter, λ=λ);
# ╔═╡ e58e1f63-2c81-48fb-866a-4bb70bd428a6
Gray.(img_deconv)
# ╔═╡ 438a6639-bd35-464f-a81d-d98eb65e006e
res_1D, o = deconvolution(real(blur(img_1D)), psf_1D, iterations=iter, regularizer=reg_1D, λ=0.01);
# ╔═╡ 7a16df73-95ad-47f5-907c-6fd23c6000cf
[Gray.(img) Gray.(img_blurry) Gray.(img_deconv)]
# ╔═╡ 5c8030b7-8805-4771-9329-23abb2744544
begin
plot(freqs, abs.(ffts(img_blurry)[:, 128]), yaxis=:log, ylabel="abs of FFT output in AU", xlabel="frequency in 1/px", ylims=(1e-4, 1e2), label="blurred")
plot!(freqs, abs.(ffts(img)[:, 128]), yaxis=:log,
ylims=(1e-4, 1e4), label="ground truth")
plot!(freqs, abs.(ffts(img_deconv)[:, 128]), label="deconvolved image")
end
# ╔═╡ ff9af79c-f06c-42ac-9c8d-6f09d2ff4056
[Gray.(img_1D); Gray.(res_1D)];
# ╔═╡ d8aee845-e922-41da-a17e-37ffa3e692f0
md"### Real Microscopy Data"
# ╔═╡ fc93fa3d-8599-463b-b9e6-8043b90e9d63
load("figures/real_data_large.png")
# ╔═╡ 97c45bfd-d1ef-49ad-908d-7360c03b0170
md"Image taken from [DeconvolutionLab2](http://bigwww.epfl.ch/deconvolution/deconvolutionlab2/)."
# ╔═╡ bf37664a-f726-46b6-9592-da419165af91
md"## Conclusion - DeconvOptim.jl
"
# ╔═╡ f8117250-bac8-43a1-aae4-5c8bab3a522d
[Gray.(ones(130, 012)) load("../docs/src/assets/logo.png")]
# ╔═╡ b5a70276-e9b9-46e8-8c67-0cad2cfa19da
md"* Flexible Image Deconvolution Software
* N-dimensional signal deconvolution
* Works both on CPU and GPUs
* GPUs usually 5-15x speed improvement
"
# ╔═╡ Cell order:
# ╠═3310d2f7-450a-4cd1-9c3a-46d02d23a7c6
# ╠═0507f8ed-8b64-48af-a6c4-ff7c4211b9e9
# ╠═d27b2d72-d264-11eb-0be5-13dcacfd2adc
# ╠═952b251c-207b-4412-b6e1-268fce1647d9
# ╠═24fc86da-0b7c-493c-8977-2a19ef6dc133
# ╠═3bda922d-552b-42ec-9055-33a141b5841a
# ╟─fa8cd9c9-a2fd-495c-8d22-ada7bb9c39f6
# ╠═9a07bc88-be76-4531-bc91-df0d20c3221c
# ╟─308e2562-5a20-4082-8a6d-9fb135738c49
# ╠═b542187a-3ae7-4430-a81d-f968d9000427
# ╠═88d83629-f1b2-4d69-928f-d4acfbc76b70
# ╠═e58e1f63-2c81-48fb-866a-4bb70bd428a6
# ╠═d6d0f436-48fd-4e8a-863c-3e9d3850cc91
# ╠═803368e6-53fd-4413-b3f5-ffe46ee8983e
# ╠═5ae1a6a3-4505-4123-9f1e-8a1d4ac0b4e1
# ╠═438a6639-bd35-464f-a81d-d98eb65e006e
# ╟─061faf49-662c-4508-83b4-ddcf0970ed0d
# ╟─23829201-9756-4ef8-90c9-3917b761fe4b
# ╟─30f21bb8-6d09-4fce-9d2a-568bfaf3ff7a
# ╟─7a0a44c9-fb07-44e6-9a8b-8f720b84e6f6
# ╟─49686d9a-1683-428f-83ba-a9131c2ad432
# ╟─491ec7cb-1663-4acf-b81f-9acafba2b63d
# ╟─b6f9e42e-5a23-40ad-9e73-8f02da48f69c
# ╟─e31f4704-5bce-4c8d-b3a7-873a3460d9f8
# ╟─dc8cf4e2-a87c-47ea-8247-3ae772852241
# ╟─f8033d13-faee-41f5-bdd6-ae8721e8b8a8
# ╟─fe7e0292-31f6-43b5-83a5-a38698a87563
# ╟─e9ef5ba4-56c0-4595-bc28-e04882f44a9a
# ╠═90e5708c-05a4-46e4-b1e4-9a61c96dae32
# ╠═03139ac5-3525-4fad-abf1-84421492b763
# ╟─4e84e739-9c59-4939-8b04-aec7dc069d67
# ╟─9d9a5da2-14df-46e6-b7e6-5a33aade1754
# ╟─15d54e5e-64f4-4a1d-8cc8-9334b2e3784f
# ╟─7a16df73-95ad-47f5-907c-6fd23c6000cf
# ╟─5c8030b7-8805-4771-9329-23abb2744544
# ╟─ff9af79c-f06c-42ac-9c8d-6f09d2ff4056
# ╟─d8aee845-e922-41da-a17e-37ffa3e692f0
# ╟─fc93fa3d-8599-463b-b9e6-8043b90e9d63
# ╟─97c45bfd-d1ef-49ad-908d-7360c03b0170
# ╟─bf37664a-f726-46b6-9592-da419165af91
# ╟─f8117250-bac8-43a1-aae4-5c8bab3a522d
# ╟─b5a70276-e9b9-46e8-8c67-0cad2cfa19da
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1708 | ### A Pluto.jl notebook ###
# v0.14.8
using Markdown
using InteractiveUtils
# ╔═╡ 0552a944-cdbe-11eb-2e63-49f12bee1624
using FourierTools, TestImages, Colors, ImageShow, ImageCore, Statistics
# ╔═╡ 9a73fe7d-d298-4791-8f29-5a962d1242d1
gauss(y, x, σ=1) = 1 / σ / √(2π) * exp(-0.5 * (x^2 + y^2) / σ^2)
# ╔═╡ b8b2b314-4fed-420e-9061-6c2716c134c8
begin
y = fftpos(512, 512)
x = y'
end;
# ╔═╡ 97dc4b9d-0b32-4057-8a22-85cb40ae1ebf
begin
kernel = ifftshift_view(gauss.(y, x, Ref(3)))
kernel ./= sum(kernel)
end;
# ╔═╡ 7500e8d2-16a7-4ea3-9192-78b4acfa327e
function conv_pad(A, B, value=zero(eltype(A)), pad=10)
A_1 = value .+ FourierTools.select_region(A .- value, new_size=size(A) .+ pad)
B_1 = ifftshift_view(FourierTools.select_region(fftshift_view(B), new_size=size(A) .+ pad))
return FourierTools.select_region(conv(A_1, B_1), new_size=size(A))
end
# ╔═╡ 44370313-62d3-4774-aed6-72a51410503d
img = Float64.(Gray.(testimage("house")));
# ╔═╡ d190772c-885f-405b-b0c9-bd11b6ac992c
img_blurry = Gray.(conv(kernel, img))
# ╔═╡ 0e57683e-7af8-4940-8494-032d6190f2cf
img_blurry[end-100:end-51, 1:50]
# ╔═╡ b8a8adfb-af4a-4ff9-b739-a040f05e2896
img_blurry2 = Gray.(conv_pad(img, kernel, 0.7, 10))
# ╔═╡ 94816b91-0ff3-41ef-9146-b71f150be161
img_blurry2[end-100:end-51, 1:50]
# ╔═╡ Cell order:
# ╠═0552a944-cdbe-11eb-2e63-49f12bee1624
# ╠═9a73fe7d-d298-4791-8f29-5a962d1242d1
# ╠═b8b2b314-4fed-420e-9061-6c2716c134c8
# ╠═97dc4b9d-0b32-4057-8a22-85cb40ae1ebf
# ╠═7500e8d2-16a7-4ea3-9192-78b4acfa327e
# ╠═44370313-62d3-4774-aed6-72a51410503d
# ╠═d190772c-885f-405b-b0c9-bd11b6ac992c
# ╠═0e57683e-7af8-4940-8494-032d6190f2cf
# ╠═b8a8adfb-af4a-4ff9-b739-a040f05e2896
# ╠═94816b91-0ff3-41ef-9146-b71f150be161
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1248 | module DeconvOptim
export gpu_or_cpu
# to check whether CUDA is enabled
using Requires
# for fast array regularizers
using Tullio
# optional CUDA dependency
include("requires.jl")
# for optimization
using Optim
#mean
using Statistics
using StatsBase
using FFTW
FFTW.set_num_threads(12)
using LineSearches
# possible up_sampling
using Interpolations
# for defining custom derivatives
using ChainRulesCore
using LinearAlgebra
using FillArrays
using PrecompileTools
include("forward_models.jl")
include("lossfunctions.jl")
include("mappings.jl")
# special CUDA regularizers
include("regularizer_cuda.jl")
include("regularizer.jl")
include("utils.jl")
include("conv.jl")
include("generic_invert.jl")
include("lucy_richardson.jl")
include("deconvolution.jl")
include("analysis_tools.jl")
# refresh Zygote to load the custom rrules defined with ChainRulesCore
using Zygote: gradient
# doesn't save too much but a little
@setup_workload begin
img = abs.(randn((4,4,2)))
psf = abs.(randn((4,4,2)))
@compile_workload begin
deconvolution(Float32.(img), Float32.(psf), regularizer=TV(num_dims=3), iterations=2)
deconvolution(img, psf, regularizer=TV(num_dims=3), iterations=2)
end
end
# end module
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 9013 |
"""
relative_energy_regain(ground_truth, rec)
Calculates the relative energy regain between the `ground_truth`
and the reconstruction.
Assumes that both arrays are 2 dimensional
# Reference
* [Rainer Heintzmann, \"Estimating missing information by maximum likelihood deconvolution\"](https://www.sciencedirect.com/science/article/abs/pii/S0968432806001272)
"""
function relative_energy_regain(ground_truth, rec)
T = eltype(ground_truth)
# go to fourier space
ground_truth_fft = fft(ground_truth)
rec_fft = fft(rec)
# a dict to store the values for certain frequencies
# we store a list since some (rounded) frequencies occur more than once
ΔE_R_dict = Dict{T,Vector{T}}()
E_R_dict = Dict{T,Vector{T}}()
# round the frequencies to 4 digits, alternative would be to bin
round4(x) = T(round(x, digits=3))
# iterate over the frequencies and calculate the relative energy regain
for (i₂, f₂) in enumerate(fftfreq(size(rec_fft, 2)))
for (i₁, f₁) in enumerate(fftfreq(size(rec_fft, 1)))
f_res = round4(√(f₁^2 + f₂^2))
Δ_E_R = abs2(ground_truth_fft[i₁, i₂] - rec_fft[i₁, i₂])
E_R = abs2(ground_truth_fft[i₁, i₂])
update_dict_list!(ΔE_R_dict, f_res, Δ_E_R)
update_dict_list!(E_R_dict, f_res, E_R)
end
end
# finally transform everything into a list of frequencies and
# a list of relative energy regains
freqs = T[]
G_R_list = T[]
for f in sort(T.(keys(ΔE_R_dict)))
push!(freqs, f)
mean_ΔE_r = mean(ΔE_R_dict[f])
mean_E_r = mean(E_R_dict[f])
push!(G_R_list, (mean_E_r - mean_ΔE_r) / mean_E_r)
end
return freqs, G_R_list
end
"""
update_dict_list!(d, k, v)
Updates the dict `d` which stores a list.
If `k` is in the keys of `d` we simply push `v` to the list
otherwise create a new list `[v]`
"""
function update_dict_list!(d, k, v)
if haskey(d, k)
push!(d[k], v)
else
d[k] = [v]
end
return d
end
"""
normalized_cross_correlation(ground_truth, measured)
Calculates the normalized cross correlation.
External links:
* [Wikipedia](https://en.wikipedia.org/wiki/Sombrero_function)
* [StatsBase.jl](https://juliastats.org/StatsBase.jl/stable/signalcorr/#StatsBase.crosscor)
"""
function normalized_cross_correlation(ground_truth, measured)
fl(x) = collect(Iterators.flatten(x))
ground_truth = fl(ground_truth)
measured = fl(measured)
ncc = crosscor(ground_truth, measured, [0], demean=true)[begin]
return ncc
end
"""
normalized_variance(a, b)
Calculates the mean variance between two array, but normalizing arra a to the same mean as array b.
"""
function normalized_variance(a, b)
factor = sum(b) / sum(a)
sum(abs2.(a .* factor .- b)) ./ prod(size(a))
end
function reset_summary!(summary)
summary["losses"] = []
summary["best_ncc"] = -Inf
summary["best_ncc_idx"] = 0
summary["best_ncc_img"] = []
summary["nccs"] = []
summary["best_nvar"] = Inf
summary["best_nvar_idx"] = 0
summary["best_nvar_img"] = []
summary["times"] = []
summary["step_sizes"] = []
summary["nvars"] = []
end
"""
options_trace_deconv(ground_truth, iterations, mapping, every=1; more_options...)
A useful routine to simplify performance checks of deconvolution on simulated data.
Returns an Options structure to be used with the deconvolution routine as an argument to `opt_options` and
a summary dictionary with all the performance metrics calculated, which is reset and updated during deconvolution.
This can then be plotted or visualized.
The summary dictionary has the following content:
"best_nvar" => the lowest normalized variance compared to the `ground_truth` that was achieved.
"best_nvar_img" => the reconstruction result corresponding to this lowest normalized variance
"best_nvar_idx" => the corresponding index where this was achieved. `(best_nvar_idx-1)*every+1` approximated the iteration number.
"best_ncc" => the highest normalized crosscorrelation compared to the `ground_truth` that was achieved.
"best_ncc_img" => the reconstruction result corresponding to this highest normalized crosscorrelation
"losses" => the vector of losses evaluated at each of `every` iterations.
"nccs" => the vector of normalized cross correlations calculated at each of `every` iterations.
"best_ncc_idx" => the corresponding index where this was achieved. `(best_ncc_idx-1)*every+1` approximated the iteration number.
"nvars" => the vector of normalized variances calculated at each of `every` iterations.
For an example of how to plot the results, see the file `` in the `examples` folder.
# Arguments
- `ground_truth`: The underlying ground truth data. Note that this scaling is unimportant due to the normalized norms used for comparison,
whereas the relative offset matters.
- `iterations`: The maximal number of iterations to performance. If convergence is reached, the result may have less iterations
- `mapping`: If mappings such as the positivity constraints (e.g. `NonNegative()`) are used in the deconvolution routing, they also
need to be provided here. Otherwises select `nothing`.
- `every`: This option allows to select every how many iterations the evaluation is performed. Note that the results will not keep track
of this iteration number.
the argument list can be followed by a semicolon and any number of named arguments which will be passed to the option structure.
E.g. `opt_noreg, show_noreg = options_trace_deconv(ground_truth, iterations, mapping; x_tol=0.001, f_tol=0.001, f_calls_limit=100);`
# Example
```julia-repl
julia> using DeconvOptim, TestImages, Noise, Plots;
julia> obj = Float32.(testimage("resolution_test_512"));
julia> psf = Float32.(generate_psf(size(obj), 30));
julia> img_b = conv(obj, psf);
julia> img_n = poisson(img_b, 300);
julia> iterations = 100;
julia> opt_noreg, show_noreg = options_trace_deconv(obj, iterations, Non_negative());
julia> res_noreg, o = deconvolution(img_n, psf, regularizer = nothing, opt_options=opt_noreg);
julia> opt_GR, show_GR = options_trace_deconv(obj, iterations, Non_negative());
julia> res_GR, o = deconvolution(img_n, psf, λ=1e-2, regularizer=DeconvOptim.GR(), opt_options=opt_GR);
julia> opt_TV, show_TV = options_trace_deconv(obj, iterations, Non_negative());
julia> res_TV, o = deconvolution(img_n, psf, λ=1e-3, regularizer=DeconvOptim.TV(), opt_options=opt_TV);
julia> plot()
julia> show_noreg(false,"NoReg")
julia> show_GR(false,"GR")
julia> show_TV(false,"TV")
julia> using View5D
julia> @vt (ground_truth, best_ncc_img, best_nvar_img) = show_noreg(true)
```
"""
function options_trace_deconv(ground_truth, iterations, mapping, every=1; more_options...)
@show more_options
summary = Dict()
reset_summary!(summary)
summary["ground_truth"] = ground_truth # needs to be accessible
idx = 1
cb = tr -> begin
img = (mapping === nothing) ? tr[end].metadata["x"] : mapping[1](tr[end].metadata["x"])
img *= mean(summary["ground_truth"])
record_progress!(summary, img, idx, tr[end].value,
tr[end].metadata["time"], tr[end].metadata["Current step size"])
idx += 1
false
end
opt_options = Optim.Options(callback=cb, iterations=iterations, show_every=every, store_trace=true, extended_trace=true; more_options...)
return (opt_options, summary)
end
"""
record_progress!(summary, img, idx, loss, mytime, stepsize)
helper function for recording the iteration progress in a summary dictionary.
"""
function record_progress!(summary, img, idx, loss, mytime, stepsize)
# iteration always starts with index 0 (before 1st iteration)
if idx == 0
reset_summary!(summary)
end
push!(summary["losses"], loss)
push!(summary["times"], mytime)
push!(summary["step_sizes"], stepsize)
# current image:
# the line below is needed, since in the iterations, the measurement is rescaled to a mean of one.
# see deconvolution.jl. This rescaling is only an estimate and does not affect the norms.
ground_truth = summary["ground_truth"]
ncc = DeconvOptim.normalized_cross_correlation(ground_truth, img)
push!(summary["nccs"], ncc)
summary["best_ncc"], summary["best_ncc_img"], summary["best_ncc_idx"] = let
if ncc > summary["best_ncc"]
(ncc, img, idx)
else
(summary["best_ncc"], summary["best_ncc_img"], summary["best_ncc_idx"])
end
end
nvar = normalized_variance(img, ground_truth)
push!(summary["nvars"], nvar)
summary["best_nvar"], summary["best_nvar_img"], summary["best_nvar_idx"] = let
if nvar < summary["best_nvar"]
(nvar, img, idx)
else
(summary["best_nvar"], summary["best_nvar_img"], summary["best_nvar_idx"])
end
end
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 6156 | export conv, plan_conv, conv_psf, plan_conv_psf
"""
conv(u, v[, dims])
Convolve `u` with `v` over `dims` dimensions with an FFT based method.
Note, that this method introduces wrap-around artifacts without
proper padding/windowing.
# Arguments
* `u` is an array in real space.
* `v` is the array to be convolved in real space as well.
* Per default `ntuple(+, min(N, M)))` means that we perform the convolution
over all dimensions of that array which has less dimensions.
If `dims` is an array with integers, we perform convolution
only over these dimensions. Eg. `dims=[1,3]` would perform the convolution
over the first and third dimension. Second dimension is not convolved.
If `u` and `v` are both a real valued array we use `rfft` and hence
the output is real as well.
If either `u` or `v` is complex we use `fft` and output is hence complex.
# Examples
1D with FFT over all dimensions. We choose `v` to be a delta peak.
Therefore convolution should act as identity.
```jldoctest
julia> u = [1 2 3 4 5]
1×5 Array{Int64,2}:
1 2 3 4 5
julia> v = [0 0 1 0 0]
1×5 Array{Int64,2}:
0 0 1 0 0
julia> conv(u, v)
1×5 Matrix{Float64}:
4.0 5.0 1.0 2.0 3.0
```
2D with FFT with different `dims` arguments.
```jldoctest
julia> u = 1im .* [1 2 3; 4 5 6]
2×3 Matrix{Complex{Int64}}:
0+1im 0+2im 0+3im
0+4im 0+5im 0+6im
julia> v = [1im 0 0; 1im 0 0]
2×3 Matrix{Complex{Int64}}:
0+1im 0+0im 0+0im
0+1im 0+0im 0+0im
julia> conv(u, v)
2×3 Matrix{ComplexF64}:
-5.0+0.0im -7.0+0.0im -9.0+0.0im
-5.0+0.0im -7.0+0.0im -9.0+0.0im
```
"""
function conv(u::AbstractArray{T, N}, v::AbstractArray{D, M}, dims=ntuple(+, min(N, M))) where {T, D, N, M}
return ifft(fft(u, dims) .* fft(v, dims), dims)
end
function conv(u::AbstractArray{<:Real, N}, v::AbstractArray{<:Real, M}, dims=ntuple(+, min(N, M))) where {N, M}
return irfft(rfft(u, dims) .* rfft(v, dims), size(u, dims[1]), dims)
end
"""
conv_psf(u, psf[, dims])
`conv_psf` is a shorthand for `conv(u,ifftshift(psf))`. For examples see `conv`.
"""
function conv_psf(u::AbstractArray{T, N}, psf::AbstractArray{D, M}, dims=ntuple(+, min(N, M))) where {T, D, N, M}
return conv(u, ifftshift(psf, dims), dims)
end
# define custom adjoint for conv
# so far only defined for the derivative regarding the first component
function ChainRulesCore.rrule(::typeof(conv), u::AbstractArray{T, N}, v::AbstractArray{D, M},
dims=ntuple(+, min(N, M))) where {T, D, N, M}
Y = conv(u, v, dims)
function conv_pullback(barx)
return NoTangent(), conv(barx, conj(v), dims), NoTangent(), NoTangent()
end
return Y, conv_pullback
end
"""
plan_conv(u, v [, dims])
Pre-plan an optimized convolution for arrays shaped like `u` and `v` (based on pre-plan FFT)
along the given dimensions `dims`.
`dims = 1:ndims(u)` per default.
The 0 frequency of `u` must be located at the first entry.
We return two arguments:
The first one is `v_ft` (obtained by `fft(v)` or `rfft(v)`).
The second return is the convolution function `pconv`.
`pconv` itself has two arguments. `pconv(u, v_ft=v_ft)` where `u` is the object and `v_ft` the v_ft.
This function achieves faster convolution than `conv(u, u)`.
Depending whether `u` is real or complex we do `fft`s or `rfft`s
# Warning
The resulting output of the `pconv` function is a reference to an internal, allocated array.
If you use the `pconv` function for different tasks,
a new call to `pconv` will change the previous result (since the previous result was only a reference, not a new array).
# Examples
```jldoctest
julia> u = [1 2 3 4 5]
1×5 Matrix{Int64}:
1 2 3 4 5
julia> v = [1 0 0 0 0]
1×5 Matrix{Int64}:
1 0 0 0 0
julia> v_ft, pconv = plan_conv(u, v);
julia> pconv(u, v_ft)
1×5 Matrix{Float64}:
1.0 2.0 3.0 4.0 5.0
julia> pconv(u)
1×5 Matrix{Float64}:
1.0 2.0 3.0 4.0 5.0
```
"""
function plan_conv(u::AbstractArray{T, N}, v::AbstractArray{T, M}, dims=ntuple(+, N)) where {T, N, M}
plan = get_plan(T)
# do the preplanning step
P = plan(u, dims)
u_ft_stor = P * u
P_inv = inv(P)
v_ft = fft_or_rfft(T)(v, dims)
out = similar(u)
# construct the efficient conv function
# P and P_inv can be understood like matrices
# but their computation is fast
conv(u, v_ft=v_ft) = p_conv_aux!(P, P_inv, u, v_ft, u_ft_stor, out)
return v_ft, conv
end
"""
plan_conv_psf(u, psf [, dims]) where {T, N}
`plan_conv_psf` is a shorthand for `plan_conv(u, ifftshift(psf))`. For examples see `plan_conv`.
"""
function plan_conv_psf(u::AbstractArray{T, N}, psf::AbstractArray{T, M}, dims=ntuple(+, N)) where {T, N, M}
return plan_conv(u, ifftshift(psf, dims), dims)
end
function p_conv_aux!(P, P_inv, u, v_ft, u_ft_stor, out)
#return P_inv.scale .* (P_inv.p * ((P * u) .* v_ft))
mul!(u_ft_stor, P, u)
u_ft_stor .*= v_ft
mul!(out, P_inv.p, u_ft_stor)
#out2 = out .* P_inv.scale
out .*= P_inv.scale
return out
end
function ChainRulesCore.rrule(::typeof(p_conv_aux!), P, P_inv, u, v, u_ft_stor, out)
Y = p_conv_aux!(P, P_inv, u, v, u_ft_stor, out)
function conv_pullback(barx)
conj_v = let
if eltype(v) <: Real
v
else
conj(v)
end
end
barx = let
if typeof(barx) <: FillArrays.Fill
collect(eltype(u).(barx))
else
barx
end
end
∇ = p_conv_aux!(P, P_inv, barx, conj_v, u_ft_stor, copy(out))
return NoTangent(), NoTangent(), NoTangent(), ∇, NoTangent(), NoTangent(), NoTangent()
end
return Y, conv_pullback
end
"""
fft_or_rfft(T)
Small helper function to decide whether a real
or a complex valued FFT is appropriate.
"""
function fft_or_rfft(::Type{<:Real})
return rfft
end
function fft_or_rfft(::Type{T}) where T
return fft
end
"""
get_plan(T)
Small helper function to decide whether a real
or a complex valued FFT plan is appropriate.
"""
function get_plan(::Type{<:Real})
return plan_rfft
end
function get_plan(::Type{T}) where T
return plan_fft
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 7169 | export deconvolution
"""
deconvolution(measured, psf; <keyword arguments>)
Computes the deconvolution of `measured` and `psf`. Return parameter is a tuple with
two elements. The first entry is the deconvolved image. The second return parameter
is the output of the optimization of Optim.jl
Multiple keyword arguments can be specified for different loss functions,
regularizers and mappings.
# Arguments
- `loss=Poisson()`: the loss function taking a vector the same shape as measured.
- `regularizer=nothing`: A regularizer function, same form as `loss`.
See `GR`, `TV`, `Tikhonov` and the help page for different regularizers.
- `λ=0.05`: A float indicating the total weighting of the regularizer with
respect to the global loss function
- `background=0`: A float indicating a background intensity level.
- `mapping=Non_negative()`: Applies a mapping of the optimizer weight. Default is a
parabola which achieves a non-negativity constraint.
- `iterations=nothing`: Specifies a number of iterations after the optimization.
definitely should stop. By default 20 iterations will be selected by generic_invert.jl,
if `nothing` is provided.
- `conv_dims`: A tuple indicating over which dimensions the convolution should happen.
per default `conv_dims=1:ndims(psf)`
- `plan_fft=true`: Boolean whether plan_fft is used. Gives a slight speed improvement.
- `padding=0`: an float indicating the amount (fraction of the size in that dimension)
of padded regions around the reconstruction. Prevents wrap around effects of the FFT.
A array with `size(arr)=(400, 400)` with `padding=0.05` would result in reconstruction size of
`(440, 440)`. However, if padding is >= 0.0, we only return the reconstruction cropped to the original size.
For negative paddings, the absolute value is used, but the result maintains the padded size.
`padding=0` disables any padding.
- `opt_package=Opt_Optim`: decides which backend for the optimizer is used.
- `opt=LBFGS()`: The chosen optimizer which must fit to `opt_package`
- `opt_options=nothing`: Can be a options file required by Optim.jl. Will overwrite iterations.
- `initial=mean(measured)`: defines a value (or array) with the initial guess. This will be pulled through the inverse mapping function
and extended with a mean value (if border regions are used).
- `debug_f=nothing`: A debug function which must take a single argument, the current reconstruction.
!!! note
If you want to provide your PSF model, ensure that centered around the first entry of the array (`psf[1]`).
You may need to use `ifftshift` for a PSF model or a measured PSF.
# Example
```julia-repl
julia> using DeconvOptim, TestImages, Colors, Noise;
julia> img = Float32.(testimage("resolution_test_512"));
julia> psf = Float32.(generate_psf(size(img), 30));
julia> img_b = conv(img, psf);
julia> img_n = poisson(img_b, 300);
julia> res, o = deconvolution(img_n, psf);
```
"""
function deconvolution(measured::AbstractArray{T, N}, psf;
loss=Poisson(),
regularizer=GR(),
λ=T(0.05),
background=zero(T),
mapping=Non_negative(),
iterations=nothing,
conv_dims = ntuple(+, ndims(psf)),
padding=0.00,
opt_options=nothing,
opt=LBFGS(linesearch=BackTracking()),
initial=mean(measured),
debug_f=nothing,
opt_package=Opt_Optim) where {T, N}
# rec0 will be an array storing the final reconstruction
# we choose it larger than the measured array to reduce
# wrap around artifacts of the Fourier Transform
# we create a array size_padded which stores a new array size
# our reconstruction array will be larger than measured
# to prevent wrap around artifacts
size_padded = []
for i = 1:ndims(measured)
# if the size of the i-th dimension is 1
# don't do any padding because there won't be no
# convolution in that dimension
if size(measured)[i] == 1
push!(size_padded, 1)
else
# only pad, if padding is true
if ~iszero(padding)
# 2 * ensures symmetric padding
# minimum padding is 2 (4 in total) on each side
x = next_fast_fft_size(max(4, 2 * round(Int, size(measured)[i] * abs(padding))))
else
x = 0
end
push!(size_padded, size(measured)[i] + x)
end
end
# we divide by the mean to normalize
rescaling = mean(measured)
measured = measured ./ rescaling
initial = initial ./ rescaling
# create rec0 which will be the initial guess for the reconstruction
rec0 = similar(measured, (size_padded)...)
fill!(rec0, one(eltype(measured)))
# alternative rec0_center, unused at the moment
#rec0_center = m_invf(abs.(conv(measured, psf, conv_dims)))
#
# take the mean as the initial guess
# therefore has the same total energy at the initial guess as
# measured
csize = isa(initial, AbstractArray) ? size(initial) : size(measured)
one_arr = similar(measured, size(measured))
fill!(one_arr, mean(measured))
center_set!(rec0, one_arr .* initial)
mf, mf_inv = get_mapping(mapping)
rec0 = mf_inv(rec0)
# psf_n is the psf with the same size as rec0 but only in that dimensions
# that were supported by the initial psf. Broadcasting of psf with less
# dimensions is still supported
# we put the small psf into the new one
# it is important to pad the PSF instead of the OTF
psf_new_size = Array{Int}(undef, 0)
for i = 1:ndims(psf)
push!(psf_new_size, size(rec0)[i])
end
psf_new_size = tuple(psf_new_size...)
psf_n = similar(rec0, psf_new_size)
fill!(psf_n, zero(eltype(rec0)))
psf_n = center_set!(psf_n, fftshift(psf))
psf = ifftshift(psf_n)
# the psf should be normalized to 1
psf ./= sum(psf)
otf, conv_temp = plan_conv(rec0, psf, conv_dims)
# forward model is a convolution
# due to numerics, we need to clip at 0
# analytically it's a convolution psf ≥ 0 and image ≥ 0
# so it must be conv(psf, image) ≥ 0
forward(x) =
let
if iszero(background)
center_extract((conv_aux(conv_temp, x, otf)), size(measured))
else
center_extract((conv_aux(conv_temp, x, otf) .+ background), size(measured))
end
end
# pass to more general optimization
res_out, res = invert(measured, rec0, forward;
iterations=iterations, λ=λ,
regularizer=regularizer,
opt=opt,
opt_options=opt_options,
mapping=mapping,
loss=loss,
debug_f=debug_f, opt_package=opt_package)
res_out .*= rescaling
# since we do some padding we need to extract the center part
# for negative paddings, keep the large size.
if padding > 0.0
res_out = center_extract(res_out, size(measured))
end
return res_out, res
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 698 | """
conv_aux(conv_otf, rec, otf)
Calculate the convolution between `rec` and `otf`.
The used convolution function is `conv_otf`.
`conv_otf` can be exchanged to be a rfft, fft or plan_fft based routine.
This function is just defined to speed up automatic differentiation
and it's custom defined adjoint.
"""
function conv_aux(conv_otf, rec, otf)
return conv_otf(rec, otf)
end
# define custom adjoint for conv_aux
function ChainRulesCore.rrule(::typeof(conv_aux), conv, rec, otf)
Y = conv_aux(conv, rec, otf)
function conv_aux_pullback(barx)
return zero(eltype(rec)), zero(eltype(rec)), conv(barx, conj(otf)), zero(eltype(rec))
end
return Y, conv_aux_pullback
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 5103 | export invert, OptInterface, Opt_Optim, Opt_OptimPackNextGen
abstract type OptInterface end # To accommodate multiple optimizers which are incompatible
struct Opt_Optim <: OptInterface end #
struct Opt_OptimPackNextGen <: OptInterface end #
"""
invert(measured, rec0, forward; <keyword arguments>)
Tries to invert the `forward` model. `forward` is a function taking
an input with the shape of `rec0` and returns an object which has the
same shape as `measured`
Multiple keyword arguments can be specified for different loss functions,
regularizers and mappings.
# Arguments
- `loss=Poisson()`: the loss function being compatible to compare with `measured`.
- `regularizer=nothing`: A regularizer function, same form as `loss`.
- `λ=0.05`: A float indicating the total weighting of the regularizer with
respect to the global loss function
- `mapping=Non_negative()`: Applies a mapping of the optimizer weight. Default is a
parabola which achieves a non-negativity constraint.
- `iterations=nothing`: Specifies a number of iterations after the optimization.
definitely should stop. Will be overwritten if `opt_options` is provided. Default: 20
- `opt_package=Opt_Optim`: decides which backend for the optimizer is used.
- `opt=LBFGS()`: The chosen optimizer which must fit to `opt_package`.
- `opt_options=nothing`: Can be a options file required by Optim.jl. Will overwrite iterations.
- `debug_f=nothing`: A debug function which must take a single argument, the current reconstruction.
"""
function invert(measured, rec0, forward;
iterations=nothing, λ=eltype(rec0)(0.05),
regularizer=nothing,
opt=LBFGS(linesearch=LineSearches.BackTracking()),
opt_options=nothing,
mapping=Non_negative(),
loss=Poisson(),
debug_f=nothing,
opt_package=Opt_Optim)
# if not special options are given, just restrict iterations
if opt_package <: Opt_Optim && opt_options !== nothing && iterations !== nothing
error("If `opt_options` are provided you need to include the iterations as part of these instead of providing the `iterations` argument.")
end
iterations = (iterations === nothing) ? 20 : iterations
if opt_package <: Opt_Optim
if opt_options === nothing
opt_options = Optim.Options(iterations=iterations)
end
end
# Get the mapping functions to achieve constraints
# like non negativity
mf, m_invf = get_mapping(mapping)
regularizer = get_regularizer(regularizer, eltype(rec0))
debug_f_n(x) =
let
if isnothing(debug_f)
identity(x)
else
debug_f(mf(x))
end
end
storage_μ = deepcopy(measured)
function total_loss(rec)
# handle if there is a provided mapping function
mf_rec = mf(rec)
forward_v = forward(mf_rec)
loss_v = sum(loss(forward_v, measured, storage_μ))
loss_v += λ .* regularizer(mf_rec)
return loss_v
end
# nice precompilation before calling Zygote etc.
Base.invokelatest(total_loss, rec0)
# this is the function which will be provided to Optimize
# check Optim's documentation for the purpose of F and Get
# but simply speaking F is the loss value and G it's gradient
# depending whether one of them is nothing, we skip some computations
# we need to call Base.invokelatest because the regularizer is a function
# generated at runtime with eval.
# This leads to the common "world age problem" in Julia
# for more details on that check:
# https://discourse.julialang.org/t/dynamically-create-a-function-initial-idea-with-eval-failed-due-to-world-age-issue/49139/17
function fg!(F, G, rec)
# Zygote calculates both derivative and loss, therefore do everything in one step
if G !== nothing
# apply debug function
debug_f_n(rec)
y, back = Base.invokelatest(Zygote._pullback, total_loss, rec)
# calculate gradient
G .= Base.invokelatest(back, 1)[2]
if F !== nothing
return y
end
end
if F !== nothing
return Base.invokelatest(total_loss, rec)
end
end
if isa(opt_package, Type{Opt_Optim})
if opt_options === nothing
opt_options = Optim.Options(iterations=iterations)
end
# do the optimization with LBGFS
res = Optim.optimize(Optim.only_fg!(fg!), rec0, opt, opt_options)
res_out = mf(Optim.minimizer(res))
# supports a different interface as for example used in OptimPackNextGen for the function 'vmlmb!'
elseif isa(opt_package, Type{Opt_OptimPackNextGen})
res = copy(rec0)
if isnothing(opt_options)
opt((x, g) -> fg!(true, g, x), res; maxiter=iterations)
else
opt((x, g) -> fg!(true, g, x), res; maxiter=iterations, opt_options...)
end
res_out = mf(res)
else
error("Unknown optimizer interface $(typeof(opt_package))")
end
return res_out, res
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 2412 | export HS
# References:
# * Lefkimmiatis, Stamatios, John Paul Ward, and Michael Unser. "Hessian Schatten-norm regularization for linear inverse problems." IEEE transactions on image processing 22.5 (2013): 1873-1888.
# * Lefkimmiatis, Stamatios, and Michael Unser. "Poisson image reconstruction with Hessian Schatten-norm regularization." IEEE transactions on image processing 22.11 (2013): 4314-4327.
function Δr1r1(x)
return @tullio res[i, j] := x[i+2, j+0] - 2 * x[i+1, j] + x[i, j] (i in 1:size(x)[1]-2, j in 1:size(x)[2]-2)
end
function Δr2r2(x)
return @tullio res[i, j] := x[i+0, j+2] - 2 * x[i, j+1] + x[i, j] (i in 1:size(x)[1]-2, j in 1:size(x)[2]-2)
end
function Δr1r2(x)
return @tullio res[i, j] := x[i+1, j+1] - x[i+1, j] - x[i, j+1] + x[i, j] (i in 1:size(x)[1]-2, j in 1:size(x)[2]-2)
end
"""
HS(; p=1)
Hessian Schatten norm. `p` determines which Schatten norm is used.
This regularizer only works with 2D arrays at the moment.
"""
function HS(;p=1)
if isone(p)
return HS1
end
f(x) = HSp(x, p=p)
return f
end
"""
Hessian schatten norm for p=1 efficiently with Tullio.
"""
function HS1(arr)
H11 = Δr1r1(arr)
H22 = Δr2r2(arr)
return schatten_norm_1(H11, H22)
end
function schatten_norm_1(a, d)
@tullio A[i, j] := a[i, j] + d[i, j]
@tullio res = abs(1f-8 + A[i, j])
end
"""
Hessian schatten norm for p.
But not as fast as p=1
"""
function HSp(arr; p=1)
H11 = Δr1r1(arr)
H22 = Δr2r2(arr)
H12 = Δr1r2(arr)
res = schatten_norm_tullio(H11, H12, H22, p)
return sum(res)
end
function schatten_norm(H11, H12, H22, p)
λ₁, λ₂ = eigvals_symmetric(H11, H12, H22)
return (λ₁^p + λ₂^p )^(1/p)
end
function schatten_norm_tullio(H11, H12, H22, p)
λ₁, λ₂ = eigvals_symmetric_tullio(H11, H12, H22)
return @tullio res = abs((1f-8 + λ₁[i, j]^p + λ₂[i, j]^p))^(1/p)
end
"""
eigvals_symmetric(a,b,c)
Calculate the eigenvalues of the matrix
[a b; b d] analytically.
"""
function eigvals_symmetric(a, b, d)
A = a+d
B = sqrt((a-d)^2+4*b^2)
λ₁ = 0.5 * (A + B)
λ₂ = 0.5 * (A - B)
return λ₁, λ₂
end
function eigvals_symmetric_tullio(a, b, d)
@tullio A[i, j] := a[i, j] + d[i, j]
@tullio B[i, j] := sqrt(1f-8 + (a[i, j]-d[i, j])^2+4*b[i, j]^2)
@tullio λ₁[i, j] := 0.5 * (A[i, j] + B[i, j])
@tullio λ₂[i, j] := 0.5 * (A[i, j] - B[i, j])
return λ₁, λ₂
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 5219 | export Poisson, poisson_aux
export Gauss, gauss_aux
export ScaledGauss, scaled_gauss_aux
export Anscombe, anscombe_aux
"""
poisson_aux(μ, meas, storage=similar(μ))
Calculates the Poisson loss for `μ` and `meas`.
`μ` can be of larger size than `meas`. In that case
we extract a centered region from `μ` of the same size as `meas`.
"""
function poisson_aux(μ, meas, storage=similar(μ))
# due to numerical errors, μ can be negative or 0
if minimum(μ) <= 0
μ .= μ .+ eps(maximum(μ)) .+ abs.(minimum(μ))
end
storage .= μ .- meas .* log.(μ)
return sum(storage)
end
# define custom gradient for speed-up
# ChainRulesCore offers the possibility to define a backward AD rule
# which can be used by several different AD systems
function ChainRulesCore.rrule(::typeof(poisson_aux), μ, meas, storage)
Y = poisson_aux(μ, meas, storage)
function poisson_aux_pullback(xbar)
storage .= xbar .* (one(eltype(μ)) .- meas ./ μ)
return NoTangent(), storage, (ChainRulesCore.@not_implemented "Save computation"), (ChainRulesCore.@not_implemented "Save computation")
end
return Y, poisson_aux_pullback
end
"""
Poisson()
Returns a function to calculate Poisson loss
Check the help of `poisson_aux`.
"""
function Poisson()
return poisson_aux
end
"""
gauss_aux(μ, meas, storage=similar(μ))
Calculates the Gauss loss for `μ` and `meas`.
`μ` can be of larger size than `meas`. In that case
we extract a centered region from `μ` of the same size as `meas`.
"""
function gauss_aux(μ, meas, storage=similar(μ))
storage .= abs2.(μ - meas)
return sum(storage)
end
# define custom gradient for speed-up
function ChainRulesCore.rrule(::typeof(gauss_aux), μ, meas, storage)
Y = gauss_aux(μ, meas)
function gauss_aux_pullback(xbar)
return NoTangent(), 2 .* xbar .* (μ - meas), (ChainRulesCore.@not_implemented "Save computation"), (ChainRulesCore.@not_implemented "Save computation")
end
return Y, gauss_aux_pullback
end
"""
Gauss()
Returns a function to calculate Gauss loss.
Check the help of `gauss_aux`.
"""
function Gauss()
return gauss_aux
end
"""
scaled_gauss_aux(μ, meas, storage=similar(μ); read_var=0)
Calculates the scaled Gauss loss for `μ` and `meas`.
`read_var=0` is the readout noise variance of the sensor.
`μ` can be of larger size than `meas`. In that case
we extract a centered region from `μ` of the same size as `meas`.
"""
function scaled_gauss_aux(μ, meas, storage=similar(μ); read_var=0)
μ[μ .<= 1f-8] .= 1f-8
storage .= log.(μ .+ read_var) .+ (meas .- μ).^2 ./ ((μ .+ read_var))
return sum(storage)
end
# define custom gradient for speed-up
function ChainRulesCore.rrule(::typeof(scaled_gauss_aux), μ, meas, storage; read_var=0)
Y = scaled_gauss_aux(μ, meas)
function scaled_gauss_aux_pullback(xbar)
∇ = xbar .* (μ.^2 .- meas.^2 .+ μ .+ read_var.*(1 .- 2 .* (meas .- µ)))./((μ .+read_var).^2)
∇[μ .<= 1f-8] .= 0
return NoTangent(), ∇, (ChainRulesCore.@not_implemented "Save computation"), (ChainRulesCore.@not_implemented "Save computation"), (ChainRulesCore.@not_implemented "Save computation")
end
return Y, scaled_gauss_aux_pullback
end
"""
ScaledGauss()
Returns a function to calculate scaled Gauss loss.
Check the help of `scaled_gauss_aux`.
"""
function ScaledGauss(read_var=0)
return (µ, meas, storage=similar(µ)) -> scaled_gauss_aux(µ, meas, storage, read_var=read_var)
end
"""
anscombe_aux(μ, meas, storage=similar(μ); b=0)
Calculates the Poisson loss using the Anscombe-based norm for `μ` and `meas`.
`μ` can be of larger size than `meas`. In that case
we extract a centered region from `μ` of the same size as `meas`.
`b=0` is the optional parameter under the `√`.
Note that the data will be normalized to the mean of the data, which means that you have to
divide this parameter also by the mean of the data, i.e. b=3.0/8.0/mean(measured).
"""
function anscombe_aux(μ, meas, storage=similar(μ); b=0)
# we cannot divide b here by meas, since meas is already normalized
# due to numerical errors, μ can be negative or 0
mm = minimum(μ)
if mm <= 0
μ .= μ .+ eps(maximum(μ)) .+ abs.(mm)
end
storage .= abs2.(sqrt.(meas .+ b) .- sqrt.(μ .+ b))
return sum(storage)
end
# define custom gradient for speed-up
# ChainRulesCore offers the possibility to define a backward AD rule
# which can be used by several different AD systems
function ChainRulesCore.rrule(::typeof(anscombe_aux), μ, meas, storage; b=1)
Y = anscombe_aux(μ, meas, storage, b=b)
function anscombe_aux_pullback(xbar)
storage .= xbar .* (one(eltype(μ)) .- sqrt.((meas .+ b) ./ (μ.+b)))
return NoTangent(), storage, (ChainRulesCore.@not_implemented "Save computation"), (ChainRulesCore.@not_implemented "Save computation"), (ChainRulesCore.@not_implemented "Save computation")
end
return Y, anscombe_aux_pullback
end
"""
Anscombe(b=0)
Returns a function to calculate Poisson loss using the Anscombe transform
Check the help of `anscombe_aux`.
"""
function Anscombe(b=0)
(μ, meas, storage=similar(μ)) -> anscombe_aux(μ, meas, storage, b=b)
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 2964 | export richardson_lucy_iterative
"""
richardson_lucy_iterative(measured, psf; <keyword arguments>)
Classical iterative Richardson-Lucy iteration scheme for deconvolution.
`measured` is the measured array and `psf` the point spread function.
Converges slower than the optimization approach of `deconvolution`
# Keyword Arguments
- `regularizer=GR()`: A regularizer function. Can be exchanged
- `λ=0.05`: A float indicating the total weighting of the regularizer with
respect to the global loss function
- `iterations=100`: Specifies number of iterations.
- `progress`: if not `nothing`, the progress will be monitored in a summary dictionary as obtained by
DeconvOptim.options_trace_deconv()
# Example
```julia-repl
julia> using DeconvOptim, TestImages, Colors, Noise;
julia> img = Float32.(testimage("resolution_test_512"));
julia> psf = Float32.(generate_psf(size(img), 30));
julia> img_b = conv(img, psf);
julia> img_n = poisson(img_b, 300);
julia> @time res = richardson_lucy_iterative(img_n, psf);
```
"""
function richardson_lucy_iterative(measured, psf;
regularizer=GR(),
λ=0.05,
iterations=100,
conv_dims=1:ndims(psf),
progress = nothing)
otf, conv_temp = plan_conv(measured, psf, conv_dims)
otf_conj = conj.(otf)
# initializer
rec = abs.(conv_temp(measured, otf))#ones(eltype(measured), size(measured))
# buffer for gradient
# we need Base.invokelatest because of world age issues with generated
# regularizers
buffer_grad = let
if !isnothing(regularizer)
Base.invokelatest(gradient, regularizer, rec)[1]
else
nothing
end
end
∇reg(x) = buffer_grad .= Base.invokelatest(gradient, regularizer, x)[1]
buffer = copy(measured)
iter_without_reg(rec) = begin
buffer .= measured ./ (conv_temp(rec, otf))
conv_temp(buffer, otf_conj)
end
iter_with_reg(rec) = buffer .= (iter_without_reg(rec) .- λ .* ∇reg(rec))
iter = isnothing(regularizer) ? iter_without_reg : iter_with_reg
# the loss function is only needed for logging, not for LR itself
loss(myrec) = begin
fwd = conv_temp(myrec, otf)
return sum(fwd .- measured .* log.(fwd))
end
# logging part
tmp_time = 0.0
if progress !== nothing
record_progress!(progress, rec, 0, loss(rec), 0.0, 1.0)
tmp_time=time()
end
code_time = 0.0
# do actual optimization
for i in 1:iterations
rec .*= iter(rec)
if progress !== nothing
# do not count the time for evaluating the loss here.
code_time += time() .- tmp_time
record_progress!(progress, copy(rec), i, loss(rec), code_time, 1.0)
tmp_time=time()
end
end
return rec
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 2490 | export Non_negative
export Map_0_1
export Piecewise_positive
export Pow4_positive
export Abs_positive
# All these functions return a mapping function and
# the inverse of it
# they are used to map the real numbers to non-negative real numbers
"""
Non_negative()
Returns a function and an inverse function inverse function
to map numbers to non-negative numbers.
We use a parabola.
# Examples
```julia-repl
julia> p, p_inv = Non_negative()
(DeconvOptim.var"#5#7"(), DeconvOptim.var"#6#8"())
julia> x = [-1, 2, -3]
3-element Array{Int64,1}:
-1
2
-3
julia> p(x)
3-element Array{Int64,1}:
1
4
9
julia> p_inv(p(x))
3-element Array{Float64,1}:
1.0
2.0
3.0
```
"""
function Non_negative()
#return x -> map(abs2, x), parab_inv
return parab, parab_inv
end
parab(x) = abs2.(x)
parab_inv(x) = sqrt.(x)
# define custom adjoint for parab because of
# slow broadcasting
function ChainRulesCore.rrule(::typeof(parab), x)
Y = parab(x)
function aux_pullback(barx)
return zero(eltype(Y)), (2 .* barx) .* x
end
return Y, aux_pullback
end
"""
Map_0_1()
Returns a function and an inverse function
to map numbers to an interval between 0 and 1.
via an exponential function.
"""
function Map_0_1()
return f01, f01_inv
end
f01(x) = 1 .- exp.(.- x.^2)
f01_inv(y) = sqrt.(.- log.(1 .- y))
"""
Piecewise_positive()
Returns a function and an inverse function
to map numbers to larger than 0 via
two function stitched together.
"""
function Piecewise_positive()
return f_pw_pos, f_pw_pos_inv
end
f_pw_pos(x) = ifelse.(x .> 0, one(eltype(x)) .+ x,one(eltype(x))./(one(eltype(x)).-x))
f_pw_pos_grad(x) = ifelse.(x .> 0, one(eltype(x)) , one(eltype(x))./abs2.(one(eltype(x)).-x))
f_pw_pos_inv(y) = ifelse.(y .> 1, y .- one(eltype(y)), one(eltype(y)) .- one(eltype(y))./y)
function ChainRulesCore.rrule(::typeof(f_pw_pos), x)
Y = f_pw_pos(x)
function aux_pullback(barx)
return zero(eltype(Y)), barx .* f_pw_pos_grad(x)
end
return Y, aux_pullback
end
"""
Pow4_positive()
Returns a function and an inverse function
to map numbers to larger than 0 with `abs2.(abs2.(x))`
"""
function Pow4_positive()
return f_pow4, f_pow4_inv
end
f_pow4(x) = abs2.(abs2.(x))
f_pow4_inv(y) = sqrt.(sqrt.(y))
"""
Abs_positive()
Returns a function and an inverse function
to map numbers to larger than 0.
"""
function Abs_positive()
return f_abs, f_abs_inv
end
f_abs(x) = abs.(x)
f_abs_inv(y) = abs.(y)
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 12871 | using Zygote
using Tullio
export Tikhonov, GR, TV, TH
export generate_spatial_grad_square, generate_GR, generate_TV
include("hessian_schatten_norm.jl")
# General hint
# for the creation of the regularizers we are using meta programming
# because the fastest way for automatic differentiation and Zygote
# is Tullio.jl at the moment.
# Our metaprogramming code was initially based on
# https://github.com/mcabbott/Tullio.jl/issues/11
#
"""
generate_indices(num_dims, d, ind1, ind2)
Generates a list of symbols which can be used to generate Tullio expressions
via metaprogramming.
`num_dims` is the total number of dimensions.
`d` is the dimension where there is a offset in the index.
`ind1` and `ind2` are the offsets each.
# Examples
```julia-repl
julia> a, b = generate_indices(5, 2, 1, 1)
(Any[:i1, :(i2 + 1), :i3, :i4, :i5], Any[:i1, :(i2 + 1), :i3, :i4, :i5])
```
"""
function generate_indices(num_dims, d, ind1, ind2)
# create initial symbol
ind = :i
# create the array of symbols for each dimension
inds1 = map(1:num_dims) do di
# map over numbers and append the number of the position to symbol i
i = Symbol(ind, di)
# at the dimension where we want to do the step, add $ind1
di == d ? :($i + $ind1) : i
end
inds2 = map(1:num_dims) do di
i = Symbol(ind, di)
# here we do the step but in the other dimension. ind2 should be
# (-1) * ind1 or negative
di == d ? :($i + $ind2) : i
end
return inds1, inds2
end
"""
generate_laplace(num_dims, sum_dims_arr, weights)
Generate the Tullio statement for computing the abs2 of Laplacian.
`num_dims` is the dimension of the array.
`sum_dims_arr` is a array indicating over which dimensions we must sum over.
`weights` is a array of a weight for the different dimension.
"""
function generate_laplace(num_dims, sum_dims_arr, weights; debug=false)
# create out list for the final expression
# add accumulates the different add expressions
out, add = [], []
# loop over all dimensions which we want to sum. for each dimension must
# be a weight provided
for (d, w) in zip(sum_dims_arr, weights)
# get the two lists of indices
inds1, inds2 = generate_indices(num_dims, d, 1, -1)
# critical part where we actually add the two expressions for
# the steps in the dimension to the add array
push!(add, :($w * arr[$(inds1...)] + $w * arr[$(inds2...)]))
end
# for laplace we need one final negative term at the position itself
inds = map(1:num_dims) do di
i = Symbol(:i, di)
end
# subtract this final term
pre_factor = 2 .* sum(weights)
push!(add, :(-$:($pre_factor * arr[$(inds...)])))
# create final expressions by adding all elements of the add list
if debug
push!(out, :(res = abs2(+$(add...))))
else
push!(out, :(@tullio res = abs2(+$(add...))))
end
return out
end
"""
create_Ndim_regularizer(expr, num_dims, sum_dims_arr, weights, ind1, ind2)
A helper function to create a N-dimensional regularizer. In principle
the same as `generate_laplace` but more general
`expr` needs to be a function which takes `inds1`, `inds2` and a weight `w`-
`num_dims` is the total amount of dimensions
`sum_dims_arr` is a array indicating over which dimensions we must sum over.
`weights` is a array of a weight for the different dimension.
``
"""
function create_Ndim_regularizer(expr, num_dims, sum_dims_arr, weights,
ind1, ind2)
out, add = [], []
for (d, w) in zip(sum_dims_arr, weights)
inds1, inds2 = generate_indices(num_dims, d, ind1, ind2)
push!(add, expr(inds1, inds2, w))
end
push!(out, :(@tullio res = +($(add...))))
return out
end
"""
generate_spatial_grad_square(num_dims, sum_dims_arr, weights)
Generate the Tullio statement for calculating the squared spatial gradient
over n dimensions.
`num_dims` is the dimension of the array.
`sum_dims_arr` is a array indicating over which dimensions we must sum over.
`weights` is a array of a weight for the different dimension.
`ind1` and `ind2` are the offsets for the difference.
"""
function generate_spatial_grad_square(num_dims, sum_dims_arr, weights)
expr(inds1, inds2, w) = :($w * abs2(arr[$(inds1...)] - arr[$(inds2...)]))
@eval x = arr -> ($(create_Ndim_regularizer(expr, num_dims, sum_dims_arr,
weights, 1, -1)...))
return x
end
"""
Tikhonov(; <keyword arguments>)
This function returns a function to calculate the Tikhonov regularizer
of a n-dimensional array.
# Arguments
- `num_dims=2`:
- `sum_dims=[1, 2]`: A array containing the dimensions we want to sum over
- `weights=nothing`: A array containing weights to weight the contribution of
different dimensions. If `weights=nothing` all dimensions are weighted equally.
- `step=1`: A integer indicating the step width for the array indexing
- `mode="laplace"`: Either `"laplace"`, `"spatial_grad_square"`, `"identity"` accounting for different
modes of the Tikhonov regularizer. Default is `"laplace"`.
# Examples
To create a regularizer for a 3D dataset where the third dimension
has different contribution.
```julia-repl
julia> reg = Tikhonov(num_dims=2, sum_dims=[1, 2], weights=[1, 1], mode="identity");
julia> reg([1 2 3; 4 5 6; 7 8 9])
285
```
"""
function Tikhonov(;num_dims=2, sum_dims=1:num_dims, weights=[1, 1], step=1, mode="laplace")
if weights == nothing
weights = ones(Int, num_dims)
end
if mode == "laplace"
Γ = @eval arr -> ($(generate_laplace(num_dims, sum_dims, weights)...))
elseif mode == "spatial_grad_square"
expr(inds1, inds2, w) = :($w * abs2(arr[$(inds1...)] - arr[$(inds2...)]))
Γ = @eval arr -> ($(create_Ndim_regularizer(expr, num_dims, sum_dims,
weights, step, (-1) * step)...))
elseif mode == "identity"
Γ = arr -> sum(abs2.(arr))
else
throw(ArgumentError("The provided mode is not valid."))
end
return Γ
end
"""
generate_GR(num_dims, sum_dims_arr, weights, ind1, ind2)
Generate the Tullio statement for computing the Good's roughness.
`num_dims` is the dimension of the array. `sum_dims_arr` is a array
indicating over which dimensions we must sum over.
`weights` is a array of a weight for the different dimension.
`ind1` and `ind2` are the offsets for the difference.
"""
function generate_GR(num_dims, sum_dims_arr, weights, ind1, ind2; debug=false)
out, add = [], []
inds = map(1:num_dims) do di
i = Symbol(:i, di)
end
for (d, w) in zip(sum_dims_arr, weights)
inds1, inds2 = generate_indices(num_dims, d, ind1, ind2)
push!(add, :($w * (arr[$(inds1...)] + arr[$(inds2...)])))
end
prefactor = - 4 / (abs(ind1) + abs(ind2))
diff_factor = -sum(weights) * 2
push!(add, :($diff_factor *arr[$(inds...)]))
if debug
push!(out, :(res = $prefactor * arr[$(inds...)] * +($(add...))))
else
push!(out, :(@tullio res = $prefactor * arr[$(inds...)] * +($(add...))))
end
return out
end
"""
GR(; <keyword arguments>)
This function returns a function to calculate the Good's roughness regularizer
of a n-dimensional array.
# Arguments
- `num_dims=2`: Dimension of the array that should be regularized
- `sum_dims=[1, 2]`: A array containing the dimensions we want to sum over
- `weights=nothing`: A array containing weights to weight the contribution of
different dimensions. If `weights=nothing` all dimensions are weighted equally.
- `step=1`: A integer indicating the step width for the array indexing
- `mode="forward"`: Either `"central"` or `"forward"` accounting for different
modes of the spatial gradient. Default is "forward".
- `ϵ=1f-8` is a smoothness variable, to make it differentiable
# Examples
To create a regularizer for a 3D dataset where the third dimension
has different contribution. For the derivative we use forward mode.
```julia-repl
julia> reg = GR(num_dims=2, sum_dims=[1, 2], weights=[1, 1], mode="forward");
julia> reg([1 2 3; 4 5 6; 7 8 9])
-26.36561871738898
```
"""
function GR(; num_dims=2, sum_dims=1:num_dims, weights=[1, 1], step=1,
mode="forward", ϵ=1f-8)
if weights == nothing
weights = ones(Int, num_dims)
end
if mode == "central"
GRf = @eval arr2 -> begin
arr = sqrt.(arr2 .+ $ϵ)
$(generate_GR(num_dims, sum_dims, weights,
step, (-1) * step)...)
end
elseif mode == "forward"
GRf = @eval arr2 -> begin
arr = sqrt.(arr2 .+ $ϵ)
($(generate_GR(num_dims, sum_dims, weights,
step, 0)...))
end
else
throw(ArgumentError("The provided mode is not valid."))
end
# we need to add a ϵ to prevent NaN in the derivative of it
return GRf#arr -> begin
end
"""
generate_TV(num_dims, sum_dims_arr, weights, ind1, ind2, ϵ)
Generate the Tullio statement for computing the Good's roughness.
`num_dims` is the dimension of the array.
`sum_dims_arr` is a array
indicating over which dimensions we must sum over.
`weights` is a array of a weight for the different dimension.
`ind1` and `ind2` are the offsets for the difference.
`ϵ` is a numerical constant to prevent division by zero.
this is important for the gradient
"""
function generate_TV(num_dims, sum_dims_arr, weights, ind1, ind2, ϵ=1f-8; debug=false)
out, add = [], []
for (d, w) in zip(sum_dims_arr, weights)
inds1, inds2 = generate_indices(num_dims, d, ind1, ind2)
push!(add, :($w * abs2(arr[$(inds1...)] - arr[$(inds2...)])))
end
push!(add, ϵ)
if debug
push!(out, :(res = sqrt(+($(add...)))))
else
push!(out, :(@tullio res = sqrt(+($(add...)))))
end
return out
end
"""
TV(; <keyword arguments>)
This function returns a function to calculate the Total Variation regularizer
of a n-dimensional array.
# Arguments
- `num_dims=2`:
- `sum_dims=1:num_dims`: A array containing the dimensions we want to sum over
- `weights=nothing`: A array containing weights to weight the contribution of
different dimensions. If `weights=nothing` all dimensions are weighted equally.
- `step=1`: A integer indicating the step width for the array indexing
- `mode="forward"`: Either `"central"` or `"forward"` accounting for different
modes of the spatial gradient. Default is "forward".
- `ϵ=1f-8` is a smoothness variable, to make it differentiable
# Examples
To create a regularizer for a 3D dataset where the third dimension
has different contribution. For the derivative we use forward mode.
```julia-repl
julia> reg = TV(num_dims=2, sum_dims=[1, 2], weights=[1, 1], mode="forward");
julia> reg([1 2 3; 4 5 6; 7 8 9])
12.649111f0
```
"""
function TV(; num_dims=2, sum_dims=1:num_dims, weights=nothing, step=1, mode="forward", ϵ=1f-8)
if weights == nothing
weights = ones(Int, num_dims)
end
if mode == "central"
total_var = @eval arr -> ($(generate_TV(num_dims, sum_dims, weights,
step, (-1) * step, ϵ)...))
elseif mode == "forward"
total_var = @eval arr -> ($(generate_TV(num_dims, sum_dims, weights,
step, 0, ϵ)...))
else
throw(ArgumentError("The provided mode is not valid."))
end
return total_var
end
"""
TH(; <keyword arguments>)
This function returns a function to calculate the Total Hessian norm
of a n-dimensional array.
# Arguments
- `num_dims=2`
- `ϵ=1f-8` is a smoothness variable, to make it differentiable
"""
function TH(; num_dims=2, ϵ=1f-8)
if num_dims == 3
reg_HES = x -> @tullio res = sqrt(ϵ + abs2(x[i+1,j,k] + x[i-1,j,k] - 2* x[i,j,k]) +
abs2(x[i,j+1,k] + x[i,j-1,k] - 2* x[i,j,k]) +
abs2(x[i,j,k+1] + x[i,j,k-1] - 2* x[i,j,k]) +
2 * abs2(x[i+1,j+1,k] - x[i+1,j,k] - x[i,j+1,k] + x[i, j,k]) +
2 * abs2(x[i+1,j,k+1] - x[i+1,j,k] - x[i,j,k+1] + x[i, j,k]) +
2 * abs2(x[i,j+1,k+1] - x[i,j,k+1] - x[i,j,k+1] + x[i, j,k]))
return reg_HES
elseif num_dims == 2
reg_HES = x -> @tullio res = sqrt(ϵ + abs2(x[i+1, j] + x[i-1, j] - 2* x[i, j]) +
abs2(x[i,j+1] + x[i, j-1] - 2* x[i, j]) +
2 * abs2(x[i+1, j+1] - x[i+1, j] - x[i, j+1] + x[i, j]))
return reg_HES
else
throw(ArgumentError("num_dims must be 2 or 3"))
end
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1598 | export TV_cuda
f_inds(rs, b) = ntuple(i -> i == b ? rs[i] .+ 1 : rs[i], length(rs))
"""
TV_cuda(; num_dims=2)
This function returns a function to calculate the Total Variation regularizer
of a 2 or 3 dimensional array.
`num_dims` can be either `2` or `3`.
```julia-repl
julia> using CUDA
julia> reg = TV_cuda(num_dims=2);
julia> reg(CuArray([1 2 3; 4 5 6; 7 8 9]))
12.649111f0
```
"""
function TV_cuda(; num_dims=2, weights=ones(Float32, num_dims), ϵ=1f-8)
if num_dims == 3
return arr -> TV_3D_view(arr, weights, ϵ)
elseif num_dims == 2
return arr -> TV_2D_view(arr, weights, ϵ)
else
throw(ArgumentError("num_dims must be 2 or 3"))
end
return reg_TV
end
function TV_2D_view(arr::AbstractArray{T, N}, weights, ϵ=1f-8) where {T, N}
as = ntuple(i -> axes(arr, i), Val(N))
rs = map(x -> first(x):last(x)-1, as)
arr0 = view(arr, f_inds(rs, 0)...)
arr1 = view(arr, f_inds(rs, 1)...)
arr2 = view(arr, f_inds(rs, 2)...)
return @fastmath sum(sqrt.(ϵ .+ weights[1] .* (arr1 .- arr0).^2 .+ weights[2] .* (arr0 .- arr2).^2))
end
function TV_3D_view(arr::AbstractArray{T, N}, weights, ϵ=1f-8) where {T, N}
as = ntuple(i -> axes(arr, i), Val(N))
rs = map(x -> first(x):last(x)-1, as)
arr0 = view(arr, f_inds(rs, 0)...)
arr1 = view(arr, f_inds(rs, 1)...)
arr2 = view(arr, f_inds(rs, 2)...)
arr3 = view(arr, f_inds(rs, 3)...)
return @fastmath sum(sqrt.(ϵ .+ weights[1] .* (arr1 .- arr0).^2 .+
weights[2] .* (arr2 .- arr0).^2 .+ weights[3] .* (arr3 .- arr0).^2 ))
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 827 |
isgpu(x) = false
function __init__()
@require CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" begin
@info "DeconvOptim.jl: CUDA.jl is loaded, so include GPU functionality"
gpu_or_cpu(x) = CUDA.CuArray
isgpu(x::CUDA.CuArray) = true
# prevent slow scalar indexing on GPU
CUDA.allowscalar(false);
# we need to fix some operations so that they are fast o GPUs
# # Reference: https://discourse.julialang.org/t/cuarray-and-optim/14053
# LinearAlgebra.norm1(x::CUDA.CuArray{T,N}) where {T,N} = sum(abs, x); # specializes the one-norm
# LinearAlgebra.normInf(x::CUDA.CuArray{T,N}) where {T,N} = maximum(abs, x); # specializes the one-norm
# Optim.maxdiff(x::CUDA.CuArray{T,N},y::CUDA.CuArray{T,N}) where {T,N} = maximum(abs.(x-y));
end
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 9566 | export generate_psf
export generate_downsample, my_interpolate
export center_extract, center_set!, get_indices_around_center, center_pos
"""
generate_downsample(num_dim, downsample_dims, factor)
Generate a function (based on Tullio.jl) which can be used to downsample arrays.
`num_dim` (Integer) are the dimensions of the array.
`downsample_dims` is a list of which dimensions should be downsampled.
`factor` is a downsampling factor. It needs to be an integer number.
# Examples
```jldoctest
julia> ds = generate_downsample(2, [1, 2], 2)
[...]
julia> ds([1 2; 3 4; 5 6; 7 8])
2×1 Array{Float64,2}:
2.5
6.5
julia> ds = generate_downsample(2, [1], 2)
[...]
julia> ds([1 2; 3 5; 5 6; 7 8])
2×2 Array{Float64,2}:
2.0 3.5
6.0 7.0
```
"""
function generate_downsample(num_dim, downsample_dims, factor)
@assert num_dim ≥ length(downsample_dims)
# create unit cell with Cartesian Index
# dims_units contains every where a 1 where the downsampling should happen
dims_units = zeros(Int, num_dim)
# here we set which dimensions should be downsamples
dims_units[downsample_dims] .= 1
# the unit cell expressed in CartesianIndex
one = CartesianIndex(dims_units...)
# create a list of symbols
# these list represents the symbols to access the arrays
ind = :i
inds_out = map(1:num_dim) do di
i = Symbol(ind, di)
end
# output list for the add commands
add = []
# via CartesianIndex we can loop over all rectangular neighbours
# we loop only over the neighbours in the downsample_dims
for n = one:one*factor
# for each index calculate the offset to the neighbour
inds = map(1:num_dim) do di
i = Symbol(ind, di)
if n[di] == 0
di = i
else
expr = :($factor * $i)
diff = -factor + n[di]
di = :($expr + $diff)
end
end
# push this single neighbour to add list
push!(add, :(arr[$(inds...)]))
end
# combine the different parts and divide for averaging
expr = [:(@tullio res[$(inds_out...)] := (+($(add...))) / $factor^$(length(downsample_dims)))]
#= return expr =#
# evaluate to function
@eval f = arr -> ($(expr...))
return f
end
"""
my_interpolate(arr, size_n, [interp_type])
Interpolates `arr` to the sizes provided in `size_n`.
Therefore it holds `ndims(arr) == length(size_n)`.
`interp_type` specifies the interpolation type.
See Interpolations.jl for all options
"""
function my_interpolate(arr, size_n, interp_type=BSpline(Linear()))
# we construct a arr which includes the interpolation
# type for each dimension
interp = []
for s in size_n
# if the outpute size is of the s-th dimension=1,
# do NoInterp
if s == 1
push!(interp, NoInterp())
else
push!(interp, interp_type)
end
end
# prepare the interpolation
arr_n = interpolate(arr, Tuple(interp))
# interpolate introduces fractional indices
# via LinRange we access these fractional indices
inds = []
for d = 1:ndims(arr)
push!(inds, LinRange(1, size(arr)[d], size_n[d]))
end
# return the new array sampled at the positions of inds
# this accessing actually interpolates the data
return arr_n(inds...)
end
"""
get_indices_around_center(i_in, i_out)
A function which provides two output indices `i1` and `i2`
where `i2 - i1 = i_out`
The indices are chosen in a way that the set `i1:i2`
cuts the interval `1:i_in` in a way that the center frequency
stays at the center position.
Works for both odd and even indices
"""
function get_indices_around_center(i_in, i_out)
if (mod(i_in, 2) == 0 && mod(i_out, 2) == 0
||
mod(i_in, 2) == 1 && mod(i_out, 2) == 1)
x = (i_in - i_out) ÷ 2
return 1 + x, i_in - x
elseif mod(i_in, 2) == 1 && mod(i_out, 2) == 0
x = (i_in - 1 - i_out) ÷ 2
return 1 + x, i_in - x - 1
elseif mod(i_in, 2) == 0 && mod(i_out, 2) == 1
x = (i_in - (i_out - 1)) ÷ 2
return 1 + x, i_in - (x - 1)
end
end
"""
center_extract(arr, new_size_array)
Extracts a center of an array.
`new_size_array` must be list of sizes indicating the output
size of each dimension. Centered means that a center frequency
stays at the center position. Works for even and uneven.
If `length(new_size_array) < length(ndims(arr))` the remaining dimensions
are untouched and copied.
# Examples
```jldoctest
julia> DeconvOptim.center_extract([1 2; 3 4], [1])
1×2 Array{Int64,2}:
3 4
julia> DeconvOptim.center_extract([1 2; 3 4], [1, 1])
1×1 Array{Int64,2}:
4
julia> DeconvOptim.center_extract([1 2 3; 3 4 5; 6 7 8], [2 2])
2×2 Array{Int64,2}:
1 2
3 4
```
"""
function center_extract(arr::AbstractArray, new_size_array)
if size(arr) == new_size_array
return arr
end
new_size_array = collect(new_size_array)
# we construct two lists
# the reason is, that we don't change higher dimensions which are not
# specified in new_size_array
out_indices1 = [get_indices_around_center(size(arr)[x], new_size_array[x])
for x = 1:length(new_size_array)]
out_indices1 = [x[1]:x[2] for x = out_indices1]
# out_indices2 contains just ranges covering the full size of each dimension
out_indices2 = [1:size(arr)[i] for i = (1+length(new_size_array)):ndims(arr)]
return view(arr, out_indices1..., out_indices2...)
end
function ChainRulesCore.rrule(::typeof(center_extract), arr, new_size_array)
new_arr = center_extract(arr, new_size_array)
function aux_pullback(xbar)
if size(arr) == new_size_array
return zero(eltype(arr)), xbar, zero(eltype(arr))
else
∇ = similar(arr, size(arr))
fill!(∇, zero(eltype(arr)))
o = similar(arr, new_size_array)
fill!(o, one(eltype(arr)))
o .*= xbar
center_set!(∇, o)
return zero(eltype(arr)), ∇, zero(eltype(arr))
end
end
return new_arr, aux_pullback
end
"""
center_set!(arr_large, arr_small)
Puts the `arr_small` central into `arr_large`.
The convention, where the center is, is the same as the definition
as for FFT based centered.
Function works both for even and uneven arrays.
# Examples
```jldoctest
julia> DeconvOptim.center_set!([1, 1, 1, 1, 1, 1], [5, 5, 5])
6-element Array{Int64,1}:
1
1
5
5
5
1
```
"""
function center_set!(arr_large, arr_small)
out_is = []
for i = 1:ndims(arr_large)
a, b = get_indices_around_center(size(arr_large)[i], size(arr_small)[i])
push!(out_is, a:b)
end
#rest = ones(Int, ndims(arr_large) - 3)
arr_large[out_is...] .= arr_small
return arr_large
end
"""
center_pos(x)
Calculate the position of the center frequency.
Size of the array is `x`
# Examples
```jldoctest
julia> DeconvOptim.center_pos(3)
2
julia> DeconvOptim.center_pos(4)
3
```
"""
function center_pos(x::Integer)
# integer division
return div(x, 2) + 1
end
"""
generate_psf(psf_size, radius)
Generation of an approximate 2D PSF.
`psf_size` is the output size of the PSF. The PSF will be centered
around the point [1, 1],
`radius` indicates the pupil diameter in pixel from which the PSF is generated.
!!! note
Returned 2D PSF is `fftshift`ed in contrast to models, you can find in
literature.
# Examples
```julia-repl
julia> generate_psf([5, 5], 2)
5×5 Array{Float64,2}:
0.36 0.104721 0.0152786 0.0152786 0.104721
0.104721 0.0304627 0.00444444 0.00444444 0.0304627
0.0152786 0.00444444 0.000648436 0.000648436 0.00444444
0.0152786 0.00444444 0.000648436 0.000648436 0.00444444
0.104721 0.0304627 0.00444444 0.00444444 0.0304627
```
"""
function generate_psf(psf_size, radius)
mask = rr_2D(psf_size) .<= radius
mask_ft = fft(mask)
psf = abs2.(mask_ft)
return psf ./ sum(psf)
end
function rr_3D(s)
rarr = zeros((s...))
for k = 1:s[3]
for j = 1:s[2]
for i = 1:s[1]
rarr[i, j, k] = sqrt((i - center_pos(s[1]))^2 + (j - center_pos(s[2]))^2 + (k - center_pos(s[3]))^2)
end
end
end
return rarr
end
"""
rr_2D(s)
Generate a image with values being the distance to the center pixel.
`s` specifies the output size of the 2D array.
# Examples
```julia-repl
julia> DeconvOptim.rr_2D((6, 6))
6×6 Array{Float64,2}:
4.24264 3.60555 3.16228 3.0 3.16228 3.60555
3.60555 2.82843 2.23607 2.0 2.23607 2.82843
3.16228 2.23607 1.41421 1.0 1.41421 2.23607
3.0 2.0 1.0 0.0 1.0 2.0
3.16228 2.23607 1.41421 1.0 1.41421 2.23607
3.60555 2.82843 2.23607 2.0 2.23607 2.82843
```
"""
function rr_2D(s)
rarr = zeros((s...))
for j = 1:s[2]
for i = 1:s[1]
rarr[i, j] = sqrt((i - center_pos(s[1]))^2 + (j - center_pos(s[2]))^2)
end
end
return rarr
end
function get_mapping(mapping)
return mapping[1], mapping[2]
end
function get_mapping(mapping::Nothing)
return identity, identity
end
function get_regularizer(reg, etype)
return reg
end
function get_regularizer(reg::Nothing, etype)
x -> zero(etype)
end
"""
next_fast_fft_size(x)
`x` is a tuple of sizes.
It rounds to the next fast FFT size.
FFT is especially fast on small prime factors.
"""
function next_fast_fft_size(x)
nextprod([2, 3, 5, 7], x)
end
function next_fast_fft_size(x::Tuple)
next_fast_fft_size.(x)
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1201 |
@testset "Analysis tools" begin
# minimalistic test
@testset "Relative Energy regain" begin
x = [1f0 -2; 4 5; 7 8 ]
@test DeconvOptim.relative_energy_regain(x, x .* 1) == (Float32[0.0, 0.333, 0.5, 0.601], Float32[1.0, 1.0, 1.0, 1.0])
@test DeconvOptim.relative_energy_regain(x, x .* 0.5) == (Float32[0.0, 0.333, 0.5, 0.601], Float32[0.75, 0.75, 0.75, 0.75])
end
# minimalistic test
@testset "Normalized Cross Correlation" begin
x = [1f0 -2; 4 5; 7 8 ]
@test DeconvOptim.normalized_cross_correlation(x, x) == 1.0f0
end
@testset "Trace Deconvolution" begin
sz = (10,10)
x = rand(sz...)
psf = rand(10,10)
psf /= sum(psf)
y = DeconvOptim.conv(x,psf)
# test whether starting with the ground truth really yield the perfect reconstruction after 0 iterations
opt_options, summary = DeconvOptim.options_trace_deconv(x, 0, nothing)
res = deconvolution(y,psf; initial=x, mapping=nothing, padding=0.0, opt_options=opt_options)
@test summary["nccs"][1] > 0.999
@test summary["nvars"][1] < 0.001
@test (summary["best_nvar_img"] ≈ x)
end
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 3406 | @testset "Convolution methods" begin
conv_gen(u, v, dims) = real(ifft(fft(u, dims) .* fft(v, dims), dims))
function conv_test(psf, img, img_out, dims, s)
otf = fft(psf, dims)
otf_r = rfft(psf, dims)
otf_p, conv_p = plan_conv(img, psf, dims)
otf_p2, conv_p2 = plan_conv(img .+ 0.0im, 0.0im .+ psf, dims)
otf_p3, conv_p3 = plan_conv_psf(img, fftshift(psf,dims), dims)
@testset "$s" begin
@test img_out ≈ conv(0.0im .+ img, psf, dims)
@test img_out ≈ conv(img, psf, dims)
@test img_out ≈ conv_p(img, otf_p)
@test img_out ≈ conv_p(img)
@test img_out ≈ conv_p2(img .+ 0.0im, otf_p2)
@test img_out ≈ conv_p2(img .+ 0.0im)
@test img_out ≈ conv_psf(img, fftshift(psf, dims), dims)
@test img_out ≈ conv_p3(img)
end
end
N = 5
psf = zeros((N, N))
psf[1, 1] = 1
img = randn((N, N))
conv_test(psf, img, img, [1,2], "Convolution random image with delta peak")
N = 5
psf = zeros((N, N))
psf[1, 1] = 1
img = randn((N, N, N))
conv_test(psf, img, img, [1,2], "Convolution with different dimensions psf, img delta")
N = 5
psf = abs.(randn((N, N, 2)))
img = randn((N, N, 2))
dims = [1, 2]
img_out = conv_gen(img, psf, dims)
conv_test(psf, img, img_out, dims, "Convolution with random 3D PSF and random 3D image over 2D dimensions")
N = 5
psf = abs.(randn((N, N, N, N, N)))
img = randn((N, N, N, N, N))
dims = [1, 2, 3, 4]
img_out = conv_gen(img, psf, dims)
conv_test(psf, img, img_out, dims, "Convolution with random 5D PSF and random 5D image over 4 Dimensions")
N = 5
psf = abs.(zeros((N, N, N, N, N)))
for i = 1:N
psf[1,1,1,1, i] = 1
end
img = randn((N, N, N, N, N))
dims = [1, 2, 3, 4]
img_out = conv_gen(img, psf, dims)
conv_test(psf, img, img, dims, "Convolution with 5D delta peak and random 5D image over 4 Dimensions")
@testset "Check types" begin
N = 10
img = randn(Float32, (N, N))
psf = abs.(randn(Float32, (N, N)))
dims = [1, 2]
@test typeof(conv_gen(img, psf, dims)) == typeof(conv(img, psf))
@test typeof(conv_gen(img, psf, dims)) != typeof(conv(img .+ 0f0im, psf))
@test conv_gen(img, psf, dims) .+ 1f0im ≈ 1f0im .+ conv(img .+ 0f0im, psf)
end
@testset "Check type get_plan" begin
@test plan_rfft === DeconvOptim.get_plan(typeof(1f0))
@test plan_fft === DeconvOptim.get_plan(typeof(1im))
end
@testset "dims argument nothing" begin
N = 5
psf = abs.(randn((N, N, N, N, N)))
img = randn((N, N, N, N, N))
dims = [1,2,3,4,5]
@test conv(psf, img) ≈ conv(img, psf, dims)
@test conv(psf, img) ≈ conv(psf, img, dims)
@test conv(img, psf) ≈ conv(img, psf, dims)
end
@testset "adjoint convolution" begin
x = randn(ComplexF32, (5,6))
y = randn(ComplexF32, (5,6))
y_ft, p = plan_conv(x, y)
@test ≈(exp(1im * 1.23) .+ conv(ones(eltype(y), size(x)), conj.(y)), exp(1im * 1.23) .+ Zygote.gradient(x -> sum(real(conv(x, y))), x)[1], rtol=1e-4)
@test ≈(exp(1im * 1.23) .+ conv(ones(ComplexF32, size(x)), conj.(y)), exp(1im * 1.23) .+ Zygote.gradient(x -> sum(real(p(x))), x)[1], rtol=1e-4)
end
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 328 | @testset "Forward Models: Convolution" begin
N = 5
psf = zeros((N, N))
psf[1, 1] = 1
img = randn((N, N))
c(img, psf) = conv(img, psf, [1, 2])
conv_temp = c
@test conv_temp(img, psf) ≈ img
s(img, psf) = sum(conv_temp(img, psf))
@test all(1 .≈ Zygote.gradient(s, img, psf)[1])
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 669 |
@testset "Test eigvals" begin
function f(a1::T,b1,c1) where T
a = Array{T, 2}(undef, 1, 1)
a[1,1] = a1
b = Array{T, 2}(undef, 1, 1)
b[1,1] = b1
c = Array{T, 2}(undef, 1, 1)
c[1,1] = c1
@test all(.≈(DeconvOptim.eigvals_symmetric_tullio(a,b,c), DeconvOptim.eigvals_symmetric(a,b,c)))
end
f(10.0, 20.0, -10.0)
f(0f0, -12f0, 13f0)
end
@testset "Schatten norm consistent" begin
x = [1 2 3; 1 1 1; 0 0 -1f0]
@test DeconvOptim.HSp(x, p = 1) ≈ 0.9999999900000001
@test DeconvOptim.HSp(x, p = 2) ≈ 1.732050831647567
@test abs.(DeconvOptim.HSp(x, p = 1)) ≈ DeconvOptim.HS1(x)
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1110 | @testset "Poisson loss" begin
N = 6
img = abs.(randn((N, N)))
@test 0.22741127776021886 ≈ poisson_aux([1.,2.], [3.,4.])
@test all(0 .≈ Zygote.gradient(poisson_aux, img, img)[1])
@test all( -1 .≈ Zygote.gradient(poisson_aux, [1.], [2.])[1])
end
@testset "Gaussian Loss" begin
N = 6
img = abs.(randn((N, N)))
gauss = Gauss()
@test 0 ≈ gauss(img, img)
@test 0 ≈ gauss_aux(img, img)
@test all(0 .≈ Zygote.gradient(gauss_aux, img, img)[1])
end
@testset "Scaled Gauss" begin
N = 6
img = abs.(randn((N, N)))
scaled_gauss = ScaledGauss(0)
@test 3.4094379124341003 ≈ scaled_gauss([5.], [2.])
@test 3.4094379124341003 ≈ scaled_gauss_aux([5.], [2.], read_var=0)
@test all( -0.75 .≈ Zygote.gradient((a, b) -> scaled_gauss_aux(a, b, read_var=1), [1.], [2.])[1])
end
@testset "Anscombe Loss" begin
N = 6
img = abs.(randn((N, N)))
anscombe = Anscombe(1.0)
@test 0 ≈ anscombe(img, img)
@test 0 ≈ anscombe_aux(img, img,b=1)
@test all(0 .≈ Zygote.gradient(im1 -> anscombe_aux(im1,img, b=1), img)[1])
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 4515 | @testset "Testing Main deconvolution code" begin
Random.seed!(42)
img = [0.5560268761463861 0.29948409035891055 0.46860588216767457; 0.444383357109696 1.7778610980573246 0.15614346264074028; 0.027155338009193845 1.14490153172882 2.641991008076796]
psf = [1.0033099014594844 0.5181487878771377 0.8862052960481365; 1.0823812056084292 1.4913791170403063 0.6845647041648603; 0.18702790710363 0.3675627461748204 1.590579974922555]
res = [2.4393835034275493 0.013696697097634842 0.0002833052222499294; 0.07541628019133978 1.0066536888249171 0.02222160874466724; 0.0004945773667781262 0.008547708184955495 3.717245734531717]
#@show deconvolution(img, psf, λ=0.01)[1]
@test all(≈(res, deconvolution(img, psf, λ=0.01)[1], rtol=0.1))
@test all(≈(res, deconvolution(img, psf, λ=0.01)[1], rtol=0.1))
@test all(≈(res, deconvolution(img, psf, λ=0.01, iterations=20)[1], rtol=0.1))
# testing regularizer
res2 = [4.188270526990536 5.999388400251461e-10 2.8299849680327642e-8; 1.725273124171714e-7 2.54195544512864 2.0216187854619135e-9; 9.594324085846738e-10 1.2000166997002865e-8 0.7863126081711094]
@test all(≈(res2, deconvolution(img, psf, regularizer=nothing, padding=0.0)[1], rtol=0.1))
# testing padding
img = [8.21306764808666 10.041589152470781 86.74936458947307; 17.126996611046078 4.324960146596254 11.39657297820361; 21.019754207225656 14.128485444028698 28.441178191470662]
psf = [0.37240087577993225 0.562668812321259 0.6810849274435286; 0.36901028455183293 0.10686911035365092 1.3391251213773154; 0.007612980079313577 0.5694584949295476 0.23828371819888622]
#@show deconvolution(img, psf, regularizer=nothing, padding=0.1)[1]
res3 = [3.700346192004195 16.163004402781457 0.0005317269571170434; 0.3852386808335988 14.378882057906575 0.04691706650167405; 1.5059695704739982 16.94303953714345 22.72731111751148]
@test all(≈(res3, deconvolution(img, psf, regularizer=nothing, padding=0.1)[1], rtol=1e-2))
# test without mapping
res4 = [-13.085096066984729 34.39174935297922 -11.353417020874208; -13.01079706662783 43.76596781851713 -7.565144283495296; 16.740081837985805 -7.488374542587605 37.666978022259336]
#= @show deconvolution(img, psf, regularizer=nothing, padding=0.1, mapping=nothing)[1] =#
@test all(≈(res4, deconvolution(img, psf, regularizer=nothing, padding=0.1, mapping=nothing)[1], rtol=0.1))
# test OptimPackNextGen optimizers (which is currently not officially released yet)
# TODO: temporarily excluded
#res5 = [0.49633 16.4936 0.00862045; 0.0139499 15.8587 2.31716; 0.000227487 9.263 19.5289]
#@test all(≈(res5, deconvolution(img, psf, opt=vmlmb!, opt_options=(mem=20, lower=0, lnsrch=OptimPackNextGen.LineSearches.MoreThuenteLineSearch()),
#regularizer=nothing, padding=0.1, mapping=nothing, opt_package=Opt_OptimPackNextGen)[1], rtol=0.1))
# test broadcasting with image having more dimensions
img = zeros((3, 3, 2))
imgc = abs.(randn((3, 3, 1)))
img[:, :, 1] = imgc
img[:, :, 2] = imgc
psf = zeros((3, 3))
psf[1,1] = 1
res = deconvolution(img, psf, regularizer=GR(num_dims=3, sum_dims=[1,2]))[1]
@test all(res[:, :, 1] .≈ res[:, :, 2])
img = zeros((3, 3, 2, 1))
imgc = abs.(randn((3, 3, 1, 1)))
img[:, :, 1, 1] = imgc
img[:, :, 2, 1] = imgc
psf = zeros((3, 3))
psf[1,1] = 1
res = deconvolution(img, psf, regularizer=GR(num_dims=4, sum_dims=[1,2]))[1]
@test all(≈(res[:, :, 1, :], res[:, :, 2, :], rtol=0.1))
end
@testset "Compare optimization with iterative lucy richardson scheme" begin
img = Float32.(testimage("resolution_test_512"));
psf = Float32.(generate_psf(size(img), 30));
img_b = conv(img, psf);
img_n = poisson(img_b, 300);
reg = GR()
# don't provide argument to test for world age bugs
res = richardson_lucy_iterative(img_n, psf, iterations=200);
res2, o = deconvolution(img_n, psf, regularizer=reg, iterations=30);
@test ≈(res .+ 1, res2 .+ 1, rtol=0.003)
reg = TV()
res = richardson_lucy_iterative(img_n, psf, iterations=500, λ=0.005, regularizer=reg);
res2, o = deconvolution(img_n, psf, iterations=35, regularizer=reg, λ=0.005);
@test ≈(res2 .+1, res .+ 1, rtol=0.02)
reg = nothing
res = richardson_lucy_iterative(img_n, psf, regularizer=reg, iterations=400);
res2, o = deconvolution(img_n, psf, regularizer=reg, iterations=40);
@test ≈(res2 .+1, res .+ 1, rtol=0.02)
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1310 |
@testset "Non_negative" begin
p, p_inv = Non_negative()
x = abs.(randn((10, 10)))
x2 = 100 .*randn((10, 10))
@test x ≈ p(p_inv(x))
@test x ≈ p_inv(p(x))
@test all(p(x2) .>= 0)
end
@testset "Map_0_1" begin
p, p_inv = Map_0_1()
x = abs.(randn((10, 10)))
x2 = 100 .*randn((10, 10))
@test x ≈ p_inv(p(x))
@test all(p(x2) .>= 0)
@test all(p(x2) .<= 1)
end
@testset "Pow4_positive" begin
p, p_inv = Pow4_positive()
x = abs.(randn((10, 10)))
x2 = 100 .*randn((10, 10))
@test x ≈ p(p_inv(x))
@test x ≈ p_inv(p(x))
@test all(p(x2) .>= 0)
end
@testset "Piecewise_positive" begin
p, p_inv = Piecewise_positive()
x = abs.(randn((10, 10)))
x2 = 100 .*randn((10, 10))
@test x ≈ p(p_inv(x))
@test x ≈ p_inv(p(x))
@test all(p(x2) .>= 0)
function f(x)
@test all(.≈(Zygote.gradient(x -> sum(p(x)), x)[1], (p(x .+ 1e-8) .- p(x))./1e-8, rtol=1e-4))
@test Zygote.gradient(x -> sum(p(x)), x)[1] ≈ DeconvOptim.f_pw_pos_grad(x)
end
f([1.1, 12312.2, -10.123, 22.2, -123.23, 0])
end
@testset "Abs_positive" begin
p, p_inv = Abs_positive()
x = abs.(randn((10, 10)))
x2 = 100 .*randn((10, 10))
@test x ≈ p(p_inv(x))
@test x ≈ p_inv(p(x))
@test all(p(x2) .>= 0)
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 1884 | @testset "generate indices" begin
@test DeconvOptim.generate_indices(5, 2, 1, 5) == (Any[:i1, :(i2 + 1), :i3, :i4, :i5], Any[:i1, :(i2 + 5), :i3, :i4, :i5])
end
@testset "generate_laplace" begin
x = DeconvOptim.generate_laplace(2, [1, 2], [4 , 5], debug=true)
@test x==Any[:(res = abs2((4 * arr[i1 + 1, i2] + 4 * arr[i1 + -1, i2]) + (5 * arr[i1, i2 + 1] + 5 * arr[i1, i2 + -1]) + -(18* arr[i1, i2])))]
x = DeconvOptim.generate_laplace(2, [1, 2], [1 , 1], debug=true)
@test x==Any[:(res = abs2((1 * arr[i1 + 1, i2] + 1 * arr[i1 + -1, i2]) + (1 * arr[i1, i2 + 1] + 1 * arr[i1, i2 + -1]) + -(4 * arr[i1, i2])))]
end
@testset "Tikhonov" begin
x = [1,2,3,1,3,1,12.0,2,2,3,2.0]
reg = Tikhonov(num_dims=1, sum_dims=[1], weights=[1])
@test 756 ≈ reg(x)
reg = Tikhonov(num_dims=1, mode="spatial_grad_square")
@test 188 ≈ reg(x)
reg = Tikhonov(num_dims=1, mode="identity")
@test 190 ≈ reg(x)
end
@testset "Good's roughness" begin
x = generate_GR(5, [1,2], [4, 5], 1, -1, debug=true)
@test x == Any[:(res = -2.0 * arr[i1, i2, i3, i4, i5] * (4 * (arr[i1 + 1, i2, i3, i4, i5] + arr[i1 + -1, i2, i3, i4, i5]) + 5 * (arr[i1, i2 + 1, i3, i4, i5] + arr[i1, i2 + -1, i3, i4, i5]) + -18 * arr[i1, i2, i3, i4, i5]))]
x = [1,2,3,1,3,1,12.0,2,2,3,2.0]
reg = GR(num_dims=1, sum_dims=[1], weights=[1])
@test 22.71233466779126 ≈ reg(x)
end
@testset "TV" begin
x = [1,2,3,1,3,1,12.0,2,2,3,2.0]
reg = TV(num_dims=1, sum_dims=[1], weights=[1])
@test 31.00010002845424 ≈ reg(x)
@test TV_cuda(num_dims=2)(x) ≈ reg(x)
@test TV_cuda(num_dims=3)(x) ≈ reg(x)
x = generate_TV(4, [1,2], [5, 7], 1, -1, debug=true)
@test x == Any[:(res = sqrt(5 * abs2(arr[i1 + 1, i2, i3, i4] - arr[i1 + -1, i2, i3, i4]) + 7 * abs2(arr[i1, i2 + 1, i3, i4] - arr[i1, i2 + -1, i3, i4]) + 1.0f-8))]
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 585 | using DeconvOptim
using Test
using FFTW, Noise, Statistics, Zygote
using Random
using TestImages, Noise
using Pkg
#Pkg.add(url="https://github.com/emmt/OptimPackNextGen.jl")
#using OptimPackNextGen
# fix seed for reproducibility
Random.seed!(42)
@testset "Utils" begin
include("utils.jl")
end
include("analysis_tools.jl")
include("hessian_schatten_norm.jl")
include("conv.jl")
include("mappings.jl")
include("forward_models.jl")
include("lossfunctions.jl")
# testing is rather hard, but include at least some basic testing
include("regularizer.jl")
include("main.jl")
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | code | 2629 |
function center_test(x1, x2, x3, y1, y2, y3)
arr1 = randn((x1, x2, x3))
arr2 = zeros((y1, y2, y3))
center_set!(arr2, arr1)
arr3 = center_extract(arr2, (x1, x2, x3))
@test arr1 ≈ arr3
end
# test center set and center extract methods
@testset "center methods" begin
center_test(4, 4, 4, 6,7,4)
center_test(5, 4, 4, 7, 8, 4)
center_test(5, 4, 4, 8, 8, 8)
center_test(6, 4, 4, 7, 8, 8)
@test 1 == center_pos(1)
@test 2 == center_pos(2)
@test 2 == center_pos(3)
@test 3 == center_pos(4)
@test 3 == center_pos(5)
@test 513 == center_pos(1024)
end
@testset "interpolate methods" begin
x = [12,2,2,1,2,4,3,1]
y = [12, 7, 2, 2, 2, 1.5, 1, 1.5, 2, 3, 4, 3.5, 3, 2, 1]
@test y ≈ my_interpolate(x, (15))
x = collect(0:0.05:3)
y = my_interpolate(x, (2 * size(x)[1]))
@test mean(exp.(x)) ≈ mean(exp.(x))
x = sin.(collect(0:0.001:3))
y = my_interpolate(x, size(x)[1] * 3 + 2)
y2 = my_interpolate(y, size(x)[1])
@test isapprox(y2, x, rtol=1e-6)
x = [12,2,2,1,2,4,3,1]
y = [12, 7, 2, 2, 2, 1.5, 1, 1.5, 2, 3, 4, 3.5, 3, 2, 1]
y_2d = reshape(y, 1, :)
x_2d = my_interpolate(reshape(x, 1, :), size(y_2d))
@test isapprox(x_2d, y_2d, rtol=1e-6)
end
@testset "Generate downsample" begin
ds = generate_downsample(2, [1,2], 2)
@test [2.5] ≈ ds([1 2; 3 4])
ds = generate_downsample(2, [2], 2)
@test [1.5; 3.5; 5.5; 7.5] ≈ ds([1 2; 3 4; 5 6; 7 8])
ds = generate_downsample(2, [1], 2)
@test [2.0 3.0; 6.0 7.0] ≈ ds([1 2; 3 4; 5 6; 7 8])
end
@testset "Generate PSF method" begin
# large aperture is delta peak
out = zeros((5, 5))
out[1,1] = 1
@test out ≈ generate_psf((5, 5), 100)
# pinhole aperture
out = ones((10, 10))
out ./= sum(out)
@test out ≈ generate_psf((10, 10), 0.01)
# normalized
@test 1 ≈ sum(generate_psf((100, 100), 10))
end
@testset "rr methods" begin
out = [1.4142135623730951 1.0 1.4142135623730951; 1.0 0.0 1.0; 1.4142135623730951 1.0 1.4142135623730951]
out ≈ DeconvOptim.rr_2D((3,3))
@test [0] ≈ DeconvOptim.rr_2D((1, 1))
@test [2,1,0,1,2] ≈ DeconvOptim.rr_2D((5, 1))
@test [3,2,1,0,1,2] ≈ DeconvOptim.rr_2D((6, 1))
out = [1.7320508075688772 1.4142135623730951; 1.4142135623730951 1.0; 1.4142135623730951 1.0; 1.0 0.0]
out = reshape(out, (2,2,2))
@test out ≈ DeconvOptim.rr_3D((2,2,2))
end
@testset "next fast fft size" begin
@test DeconvOptim.next_fast_fft_size(23) == 24
@test DeconvOptim.next_fast_fft_size((23, 46)) == (24, 48)
end
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 5404 | # DeconvOptim.jl
<br>
<a name="logo"/>
<div align="left">
<a href="https://roflmaostc.github.io/DeconvOptim.jl/stable/" target="_blank">
<img src="docs/src/assets/logo.svg" alt="DeconvOptim Logo" width="150"></img>
</a>
</div>
<br>
A package for microscopy image based deconvolution via Optim.jl. This package works with N dimensional <a href="https://github.com/RainerHeintzmann/PointSpreadFunctions.jl">Point Spread Functions</a> and images.
The package was created with microscopy in mind but since the code base is quite general it is possible to deconvolve different kernels as well.
<br>
| **Documentation** | **Build Status** | **Code Coverage** | **Publication** |
|:---------------------------------------:|:-----------------------------------------:|:-------------------------------:|:-----------------------:|
| [![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] | [![][CI-img]][CI-url] | [![][codecov-img]][codecov-url] |[![DOI](https://proceedings.juliacon.org/papers/10.21105/jcon.00099/status.svg)](https://doi.org/10.21105/jcon.00099)|
## Installation
Type `]`in the REPL to get to the package manager:
```julia
julia> ] add DeconvOptim
```
## Documentation
The documentation of the latest release is [here](docs-stable-url).
The documentation of current master is [here](docs-dev-url).
For a quick introduction you can also watch the presentation at the JuliaCon 2021.
<a href="https://www.youtube.com/watch?v=FodpnOhccis"><img src="docs/src/assets/julia_con.jpg" width="300"></a>
## Usage
A quick example is shown below.
```julia
using DeconvOptim, TestImages, Colors, ImageIO, Noise, ImageShow
# load test image
img = Float32.(testimage("resolution_test_512"))
# generate simple Point Spread Function of aperture radius 30
psf = Float32.(generate_psf(size(img), 30))
# create a blurred, noisy version of that image
img_b = conv(img, psf)
img_n = poisson(img_b, 300)
# deconvolve 2D with default options
@time res, o = deconvolution(img_n, psf)
# deconvolve 2D with no regularizer
@time res_no_reg, o = deconvolution(img_n, psf, regularizer=nothing)
# show final results next to original and blurred version
Gray.([img img_n res])
```
![Results Quick Example](docs/src/assets/quick_example_results.png)
## Examples
Have a quick look into the [examples folder](examples).
We demonstrate the effect of different regularizers. There is also a [CUDA example](examples/cuda_2D.ipynb).
Using regularizers together with a CUDA GPU is faster but unfortunately only a factor of ~5-10.
For [3D](examples/cuda_3D.ipynb) the speed-up is larger.
## CUDA
For CUDA we only provide a Total variation regularizer via `TV_cuda`. The reason is that Tullio.jl is currently not very fast with `CuArray`s and especially
the derivative of such functions.
## Performance Tips
### Regularizers
The regularizers are generated with metaprogramming when `TV()` (or any other regularizer) is called. To prevent that the code
compile every time again, define the regularizer once and use it multiple times without newly defining it:
```julia
reg = TV()
```
And in the new cell then use:
```julia
res, o = deconvolution(img_n, psf, regularizer=reg)
```
## Development
Feel free to file an issue regarding problems, suggestions or improvement ideas for this package!
We would be happy to deconvolve *real* data! File an issue if we can help deconvolving an image/stack. We would be also excited to adapt DeconvOptim.jl to your special needs!
## Citation
If you use this paper, please cite it:
```bibtex
@article{Wechsler2023,
doi = {10.21105/jcon.00099},
url = {https://doi.org/10.21105/jcon.00099},
year = {2023},
publisher = {The Open Journal},
volume = {1},
number = {1},
pages = {99},
author = {Felix Wechsler and Rainer Heintzmann},
title = {DeconvOptim.jl - Signal Deconvolution with Julia},
journal = {Proceedings of the JuliaCon Conferences}
}
```
## Contributions
I would like to thank [Rainer Heintzmann](https://nanoimaging.de/) for the great support and discussions during development.
Furthermore without [Tullio.jl](https://github.com/mcabbott/Tullio.jl) and [@mcabbott](https://github.com/mcabbott/) this package wouldn't be as fast as it is. His package and ideas are the basis for the implementations of the regularizers.
## Related Packages
* [ThreeDeconv](https://github.com/computational-imaging/ThreeDeconv.jl): works great, CPU performance is much slower, GPU performance is slower
* [Deconvolution.jl](https://github.com/JuliaDSP/Deconvolution.jl): rather simple package with Wiener and Lucy Richardson deconvolution.
* [PointSpreadFunctions.jl](https://github.com/RainerHeintzmann/PointSpreadFunctions.jl): generates point spread functions for microscopy applications
[docs-dev-img]: https://img.shields.io/badge/docs-dev-orange.svg
[docs-dev-url]: https://roflmaostc.github.io/DeconvOptim.jl/dev/
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://roflmaostc.github.io/DeconvOptim.jl/stable/
[codecov-img]: https://codecov.io/gh/roflmaostc/DeconvOptim.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/roflmaostc/DeconvOptim.jl
[CI-img]: https://github.com/roflmaostc/DeconvOptim.jl/workflows/CI/badge.svg
[CI-url]: https://github.com/roflmaostc/DeconvOptim.jl/actions?query=workflow%3ACI
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 2555 | # DeconvOptim.jl
A framework for deconvolution of images convolved with a Point Spread Function (PSF).
## Overview
In optics, especially in microscopy, measurements are done with lenses. These lenses support only certain frequencies
and weaken the contrast of high frequency content. Furthermore, in many cases Poisson or Gaussian noise is introduced by the
quantum nature of light (Poisson shot noise) or sensors (readout noise).
[DeconvOptim.jl](https://github.com/roflmaostc/DeconvOptim.jl) is a Julia solution to deconvolution reducing the blur of lenses and denoising the image.
Our framework relies on several other tools:
The deconvolution problem is stated as a convex optimization problem via a loss function. Hence we make use of [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl/) and especially fast solvers like [L-BFGS](https://julianlsolvers.github.io/Optim.jl/stable/#algo/lbfgs/).
Since such solvers require gradients (of the loss function) we use automatic differentiation (AD) offered by [Zygote.jl](https://github.com/FluxML/Zygote.jl) for that.
Of course, one could derive the gradient by hand, however that's error-prone and for some regularizers hard to do by hand.
Furthermore, fast AD of the regularizers is hard to achieve if the gradients are written with for loops.
Fortunately [Tullio.jl](https://github.com/mcabbott/Tullio.jl) provides an extensive and fast framework to get expressions which can derived by the AD in acceptable speed.
## Installation
To get the latest stable release of DeconvOptim.jl type `]` in the Julia REPL:
```
] add DeconvOptim
```
## Quick Example
Below is a quick example how to deconvolve a image which is blurred with a Gaussian Kernel.
```@jldoctest
using DeconvOptim, TestImages, Colors, ImageIO, Noise, ImageShow
# load test image
img = Float32.(testimage("resolution_test_512"))
# generate simple Point Spread Function of aperture radius 30
psf = Float32.(generate_psf(size(img), 30))
# create a blurred, noisy version of that image
img_b = conv(img, psf)
img_n = poisson(img_b, 300)
# deconvolve 2D with default options
@time res, o = deconvolution(img_n, psf)
# deconvolve 2D with no regularizer
@time res_no_reg, o = deconvolution(img_n, psf, regularizer=nothing)
# show final results next to original and blurred version
Gray.([img img_n res])
```
Left image is the sample. In the middle we display the the noisy and blurred version captured with an optical system. The right image is the deconvolved image with default options.
![](assets/quick_example_results.png)
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 69 | # References
See here for a list of references
```@bibliography
```
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 4761 | # Loss functions
Loss functions are generally introduced in mathematical optimization theory.
The purpose is to map a certain optimization problem onto a real number.
By minimizing this real number, one hopes that the obtained parameters provide
a useful result for the problem.
One common loss function (especially in deep learning) is simply the $L^2$ norm between measurement and prediction.
So far we provide three adapted loss functions with our package. However, it is relatively easy to incorporate
custom defined loss functions or import them from packages like [Flux.jl](https://fluxml.ai/Flux.jl/stable/models/losses/).
The interface from Flux.jl is the same as for our loss functions.
## Poisson Loss
As mentioned in [Noise Model](@ref), Poisson shot noise is usually the dominant source of noise.
Therefore one achieves good results by choosing a loss function which considers both the difference between measurement and reconstruction but also the noise process.
See [Verveer:98](@cite) and [Mertz:2019](@cite) for more details on that.
As key idea we interpret the measurement as a stochastic process. Our aim is to find a deconvolved image which describes as accurate as possible the measured image.
Mathematically the probability for a certain measurement $Y$ is
$$p(Y(r)|\mu(r)) = \prod_r \frac{\mu(r)^{Y(r)}}{\Gamma(Y(r) + 1)} \exp(- \mu(r))$$
where $Y$ is the measurement, $\mu$ is the expected measurement (ideal measurement without noise) and $\Gamma$ is the generalized factorial function.
In the deconvolution process we get $Y$ as input and want to find the ideal specimen $S$ which results in a measurement $\mu(r) = (S * \text{PSF})(r))$.
Since we want to find the best reconstruction, we want to find a $\mu(r)$ so that $p(Y(r) | \mu(r))$ gets as large as possible. Because that means
that we find the specimen which describes the measurement with the highest probability.
Instead of maximizing $p(Y(r) | \mu(r))$ a common trick is to minimize $- \log(p(Y(r)|\mu(r)))$.
Mathematically, the optimization of both functions provides same results but the latter is numerically more stable.
$$\underset{S(r)}{\arg \min} (- \log(p(Y(r)|\mu(r)))) = \underset{S(r)}{\arg \min} \sum_r (\mu(r) + \log(\Gamma(Y(r) + 1)) - Y(r) \log(\mu(r))$$
which is equivalent to
$$\underset{S(r)}{\arg \min}\, L = \underset{S(r)}{\arg \min} \sum_r (\mu(r) - Y(r) \log(\mu(r))$$
since the second term only depends on $Y(r)$ but not on $\mu(r)$.
The gradient of $L$ with respect to $\mu(r)$ is simply
$$\nabla L = 1 - \frac{Y(r)}{\mu(r)}.$$
The function $L$ and the gradient $\nabla L$ are needed for any gradient descent optimization algorithm.
The numerical evaluation of the Poisson loss can lead to issues. Since $\mu(r)=0$ can happen for a measurement with zero intensity background. However, the loss is not defined for $\mu \leq 0$. In our source code we set all intensity values below a certain threshold $\epsilon$ to $\epsilon$ itself. This prevents the evaluation of the logarithm at undefined values.
## Scaled Gaussian Loss
It is well known that the Poisson density function behaves similar as a Gaussian density function for $\mu\gg 1$. This approximation is almost for all use cases in microscopy valid since regions of interest in an image usually consists of multiple photons and not to a single measured photon.
Mathematically the Poisson probability can be approximately (using [Stirling's formula](https://en.wikipedia.org/wiki/Stirling's_approximation) in the derivation) expressed as:
$$p(Y(r)|\mu(r)) \approx \prod_r \frac{\exp \left(-\frac{(x-\mu(r) )^2}{2 \mu(r) }\right)}{\sqrt{2 \pi \mu(r) }}$$
Applying the negative logarithm we get for the loss function:
$$\underset{S(r)}{\arg \min}\, L = \underset{S(r)}{\arg \min} \sum_r \frac12 \log(\mu(r)) + \frac{(Y(r)-\mu(r))^2}{2 \mu(r)}$$
The gradient is given by:
$\nabla L = \frac{\mu(r) + \mu(r)^2 - Y(r)^2}{2 \mu^2}$
## Gaussian Loss
A very common loss in optimization (and Deep Learning) is a simple Gaussian loss. However, this loss is not recommended for low intensity microscopy since it doesn't consider Poisson noise.
However, still combined with suitable regularizer reasonable results can be achieved.
The probability is defined as
$$p(Y(r)|\mu(r)) = \prod_r \frac1{\sqrt{2 \pi \sigma^2}} \exp\left(- \frac{(Y(r) - \mu(r))^2}{2 \sigma ^2} \right)$$
where $\sigma$ is the standard deviation of the Gaussian.
Applying the negative logarithm we can simplify the loss to be minimized:
$$\underset{S(r)}{\arg \min}\, L = \underset{S(r)}{\arg \min} \sum_r (Y(r) - \mu(r))^2$$
Since we are looking for $\mu(r)$ minimizing this expression, $\sigma$ is just a constant offset being irrelevant for the solution.
This expression is also called *L2 loss*.
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 1991 | # Mathematical Optimization
Deconvolution was already described as an optimization problem in the 1970s by [lucy:74](@cite), [Richardson:72](@cite).
Since then, many variants and different kinds of deconvolution algorithms were presented, but mainly based on the concept of Lucy-Richardson.
We try to formulate convolution as an inverse physical problem and solve it using a convex optimization loss function so that we can use
fast optimizers to find the optimum. The variables we want to optimize for, are the pixels of the reconstruction $S(r)$. Therefore our reconstruction problem consists of several thousands to billion variables.
Mathematically the optimization can be written as:
$\underset{S(r)}{\arg \min}\, L(\text{Fwd}(S(r))) + \text{Reg}(S(r))$
where $\text{Fwd}$ represents the forward model (in our case convolution of $S(r)$ with the $\text{PSF}$), $S(r)$ is ideal reconstruction, $L$ the loss function and $\text{Reg}$ is a regularizer. The regularizer
puts in some prior information about the structure of the object.
See the following sections for more details about each part.
## Map Functions
In some cases we want to restrict the optimizer to solutions with $S(r) \geq 0$. Usually one uses boxed optimizer or penalties to prevent negativity.
However, in some cases, a $S(r) < 0$ can lead to issues during the optimization process. For that purpose we can introduce a mapping function.
Instead of optimizing for $S(r)$ we can optimize for some $\hat S(r)$ where $M$ is the mapping function connection
$S(r)= M(\hat S(r)).$
A simple mapping function leading to $S(r) \geq 0$ is
$M(\hat S(r)) = \hat S(r)^2$
The optimization problem is then given by
$\underset{\hat S(r)}{\arg \min}\, L(\text{Fwd}(M(\hat S(r)))) + \text{Reg}(M(\hat S(r)))$
After the optimization we need to apply $M$ on $\hat S$ to get the reconstructed sample
$S(r) = M(\hat S(r))$
One could also choose different functions $M$ to obtain reconstruction in certain intensity intervals.
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 3563 | # Physical Background
We want to provide some physical background to the process of (de)convolution in optics.
Optical systems like brightfield microscopes can only collect a certain amount of light emitted by a specimen. This effect (diffraction) leads to a blurred image of that specimen.
Mathematically the lens has a certain frequency support. Within that frequency range, transmission of light is supported.
Information (light) outside of this frequency support (equivalent to high frequency information) is lost.
In the following picture we can see several curves in the frequency domain.
The orange line is a artificial object with a constant frequency spectrum (delta peak in real space).
If such a delta peak is transferred through an optical lens, in real space the object is convolved with the point spread function (PSF).
In frequency space such a convolution is a multiplication of the OTF (OTF is the Fourier transform of the PSF) and the frequency spectrum of the object.
The green dotted curve is the captured image after transmission through the system. Additionally some noise was introduced which can be recognized through some bumps outside of the
OTF support.
![Frequency spectrum](../assets/ideal_frequencies.png)
## Forward Model
Mathematically an ideal imaging process of specimen emitting incoherent light by a lens (or any optical system in general) can be described as:
$Y(r) = (S * \text{PSF})(r)$
where $*$ being a convolution operation, $r$ being the position, $S$ being the sample and $\text{PSF}$ being the point spread function of the system.
One can also introduce a background term $b$ independent of the position, which models a constant signal offset of the imaging sensor:
$Y(r) = (S * \text{PSF})(r) + b$
In frequency space (Fourier transforming the above equation) the equation with $b=0$ is:
$\tilde Y(k) = (\tilde S \cdot \tilde{\text{PSF}})(k),$
where $k$ is the spatial frequency and $\cdot$ represents term-wise multiplication (this is due to the [convolution theorem of the Fourier transform](https://en.wikipedia.org/wiki/Convolution_theorem)). From that equation it is clear why the green and blue line in the plot look very similar. The reason is, that the orange line is constant and we basically multiply the OTF with the orange line.
## Noise Model
However, the physical description (forward model) should also contain a noise term to reflect the measurement process in reality more accurately.
$Y(r) = (S * \text{PSF})(r) + N(r) = \mu(r) + N(r)$
where $N$ being a noise term.
In fluorescence microscopy the dominant noise is usually *Poisson shot noise* (see [Mertz:2019](@cite)).
The origin of that noise is the quantum nature of photons. Since the measurement process spans over a time T only a discrete number of photons is detected (in real experiment the amount of photons per pixel is usually in the order of $10^1 - 10^3$). Note that this noise is not introduced by the sensor and is just a effect due to quantum nature of light.
We can interpret every sensor pixel as a discrete random variable $X$. The expected value of that pixel would be $\mu(r)$ (true specimen convolved with the $\text{PSF})$. Due to noise, the systems measures randomly a signal for $X$ according to the Poisson distribution:
$f(y, \mu) = \frac{\mu^y \exp(-\mu)}{\Gamma(y + 1)}$
where $f$ is the probability density distribution, $y$ the measured value of the sensor, $\mu$ the expected value and $\Gamma$ the generalized factorial function ([Gamma function](https://en.wikipedia.org/wiki/Gamma_function)).
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 2185 | # Regularizer
Regularizer are commonly used in inverse problems and especially in deconvolution to obtain solutions which are optimal with respect to some prior.
So far we have included three common regularizer. The regularizer take the current reconstruction $S(r)$ as argument and return a scalar value. This value should be also minimized and is also added
to the loss function.
Each regularizer produces some characteristic image styles.
# Good's Roughness (GR)
The Good's roughness definition was taken from [Good:71](@cite) and [Verveer:98](@cite).
For Good's roughness several identical expressions can be derived. We implemented the following one:
$\text{Reg}(S(r)) = \sum_r \sqrt{S(r)} (\Delta_N \sqrt{S})(r)$
where $N$ is the dimension of $S(r)$. $\sqrt S$ is applied elementwise.
$\Delta_n \sqrt{S(r)}$ is the n-dimensional discrete Laplace operator. As 2D example where $r = (x,y)$:
$(\Delta_n \sqrt{S})(r) = \frac{\sqrt{S(x + s_x, y)} + \sqrt{S(x - s_x, y)} + \sqrt{S(x, y+s_y)} + \sqrt{S(x, y-s_y)} - 4 \cdot \sqrt{S(x, y)}}{s_x \cdot s_y}$
where $s_x$ and $s_y$ are the stencil width in the respective dimension. The Laplace operator can be straightforwardly generalized to $n$ dimensions.
# Total Variation (TV)
As the name suggests, Total variation tries to penalize variation in the image intensity. Therefore it sums up the gradient strength at each point
of the image. In 2D this is:
$\text{Reg}(S(r)) = \sum_r |(\nabla S)(r)|$
Since we look at the magnitude of the gradient strength, this regularizer is anisotropic.
In 2D this is:
$\text{Reg}(S(r)) = \sum_{x,y} \sqrt{|S(x + 1, y) - S(x, y)|^2 + |S(x, y + 1) - S(x, y)|^2}$
# Tikhonov Regularization
The Tikhonov regularizer is not as specific defined as Good's Roughness or Total Variation. In general Tikhonov regularization is defined by:
$\text{Reg}(S(r)) = \| (\Gamma S)(r) \|_2^2$
where $\Gamma$ is an operator which can be chosen freely. Common options are the identity operator which penalizes therefore just high intensity values. Another option would be the spatial gradient which would result
in a similar operator to TV. And the last option we implemented is the spatial Laplace.
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 136 | # Analysis functions
## Quantitative Criteria
```@docs
DeconvOptim.relative_energy_regain
DeconvOptim.normalized_cross_correlation
```
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 131 | # Deconvolution
```@docs
deconvolution
DeconvOptim.richardson_lucy_iterative
```
## More generic alternative
```@docs
invert
```
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 95 | # Loss Functions
```@docs
Poisson
poisson_aux
Gauss
gauss_aux
ScaledGauss
scaled_gauss_aux
```
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 101 | # Mapping Functions
```@docs
Non_negative
Map_0_1
Piecewise_positive
Pow4_positive
Abs_positive
```
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 89 | # Regularizers
## CPU
```@docs
TV
Tikhonov
GR
TH
HS
```
## CUDA
```@docs
TV_cuda
```
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 333 | # Util functions
## Convolution Functions
```@docs
conv
conv_psf
plan_conv
plan_conv_psf
DeconvOptim.next_fast_fft_size
```
## Point Spread Function
```@docs
generate_psf
```
## Interpolation and downsampling
```@docs
generate_downsample
my_interpolate
```
## Center Methods
```@docs
center_extract
center_set!
center_pos
```
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 2187 | # 3D Dataset
We can also deconvolve a 3D dataset with a 3D PSF.
The workflow, especially for the regularizers, must be adapted slightly for 3D.
## Code Example
This example is also hosted in a notebook on [GitHub](https://github.com/roflmaostc/DeconvOptim.jl/blob/master/examples/3D_example.ipynb).
First, load the 3D PSF and image.
```@jldoctest
using Revise, DeconvOptim, TestImages, Images, FFTW, Noise, ImageView
img = convert(Array{Float32}, channelview(load("obj.tif")))
psf = ifftshift(convert(Array{Float32}, channelview(load("psf.tif"))))
psf ./= sum(psf)
# create a blurred, noisy version of that image
img_b = conv(img, psf, [1, 2, 3])
img_n = poisson(img_b, 300);
```
As the next step we need to create the regularizers. With `num_dims` we define how many dimensions our reconstruction image has.
With `sum_dims` we specify which dimensions of those should be included in the regularizing process.
```@jldoctest
reg1 = TV(num_dims=3, sum_dims=[1, 2, 3])
reg2 = Tikhonov(num_dims=3, sum_dims=[1, 2, 3], mode="identity")
```
We can then invoke the deconvolution. For `Tikhonov` using `identity` mode a smaller $\lambda$ produces better results. In the first reconstruction we also specified the `padding`. This parameters adds some spacing around the reconstruction image to prevent wrap around effects of the FFT based deconvolution. However, since we don't have bright objects at the boundary of the image we don't see an impact of that parameter.
```@jldoctest
@time res, ores = deconvolution(img, psf, regularizer=reg1, loss=Poisson(),
λ=0.05, padding=0.2, iterations=10);
@time res2, ores = deconvolution(img, psf, regularizer=reg2, loss=Poisson(),
λ=0.001, padding=0.0, iterations=10);
```
Finally we can inspect the results:
```@jldoctest
img_comb1 = [img[:, : ,32] res2[:, :, 32] res[:, :, 32] img_n[:, :, 32]]
img_comb2 = [img[:, : ,38] res2[:, :, 38] res[:, :, 38] img_n[:, :, 38]]
img_comb = cat(img_comb1, img_comb2, dims=1)
img_comb ./= maximum(img_comb)
imshow([img[:, :, 20:end] res2[:, :, 20:end] res[:, :, 20:end] img_n[:, :, 20:end]])
colorview(Gray, img_comb)
```
![](../assets/3D_comparison.png)
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 1607 | # Basic Workflow
In this section we show the workflow for deconvolution of 2D and 3D images using different regularizers.
From these examples one can also understand the different effects of the regularizers.
The picture below shows the general principle of DeconvOptim.jl.
Since we interpret deconvolution as an optimization we initialize the reconstruction variables *rec*.
*rec* is a array of pixels which are the variables we are optimizing for.
Then we can apply some mapping eg. to reconstruct only pixels having non-negative intensity values.
Afterwards we compose the *total loss* functions. It consists of a regularizing part (weighted with $\lambda$) and a *loss* part.
The latter one compares the current reconstruction with the measured image.
*Total loss* adds both values to a single scalar value. Using Zygote.jl we calculate the gradient with respect to all pixel values of *rec*.
Note, Zygote.jl calculates the gradient with a reverse mode. From performance point of view, that is necessary since the loss function is a mapping from many pixels to a single value ($\text{total loss}: \mathbb{R}^N \mapsto \mathbb{R}$).
We can plug this gradient and the *loss* function into Optim.jl. Optim.jl then minimizes this loss function.
The different parts of the pipeline (mapping, forward, regularizer) can be exchanged and adapted to the users needs.
In most cases changing the regularizer or the number of iterations is enough.
![](../assets/tex/pipeline.svg)
For all options, see the function references.
Via the help of Julia (typing `?` in the REPL) we can also access extensive help.
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 1856 | # Changing Loss Function: 2D Example
We can also change the loss function. However, the loss is the most important part guaranteeing good results. Therefore choosing different loss functions than the
provided ones, will most likely lead to worse results.
We now compare all implemented loss functions of DeconvOptim.jl.
However, we could also include loss functions of Flux.jl since they have the same interface as our loss functions.
`Poisson()` will most likely produce the best results in presence of Poisson Noise. For Gaussian Noise, `Gauss()` is a suitable option.
`ScaledGaussian()` is an mathematical approximation of `Poisson()`.
At the moment `ScaledGaussian()` is not recommended because of artifacts in certain images.
## Code Example
This example is also hosted in a notebook on [GitHub](https://github.com/roflmaostc/DeconvOptim.jl/blob/master/examples/changing_loss.ipynb).
```@jldoctest
using Revise, DeconvOptim, TestImages, Images, FFTW, Noise, ImageView
# custom image views
imshow_m(args...) = imshow(cat(args..., dims=3))
h_view(args...) = begin
img = cat(args..., dims=2)
img ./= maximum(img)
colorview(Gray, img)
end
# load test images
img = convert(Array{Float32}, channelview(testimage("resolution_test_512")))
psf = generate_psf(size(img), 30)
# create a blurred, noisy version of that image
img_b = conv(img, psf, [1, 2])
img_n = poisson(img_b, 300);
@time resP, optim_res = deconvolution(img_n, psf, loss=Poisson(), iterations=10)
@show optim_res
@time resG, optim_res = deconvolution(img_n, psf, loss=Gauss(), iterations=10)
@show optim_res
@time resSG, optim_res = deconvolution(img_n, psf, loss=ScaledGauss(), iterations=10)
@show optim_res
h_view(resP, resG, resSG)
```
The left image is `Poisson()`, in the middle `Gauss()`. The right image is `ScaledGauss()`.
![](../assets/loss_comparison.png)
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 4044 | # Changing Regularizers: 2D Example
In this section we show how to change the regularizer and what are the different effects of it.
The arguments of `deconvolution` we consider here are `regularizer` and $\lambda$. `regularizer` specifies which regularizer is used.
$\lambda$ specifies how strong the regularizer is weighted. The larger $\lambda$ the more you see the typical styles introduced by the regularizers.
## Initializing
This example is also hosted in a notebook on [GitHub](https://github.com/roflmaostc/DeconvOptim.jl/blob/master/examples/changing_regularizers.ipynb).
Load the required modules for these examples:
```@jldoctest
using DeconvOptim, TestImages, Images, FFTW, Noise, ImageView
# custom image views
imshow_m(args...) = imshow(cat(args..., dims=3))
h_view(args...) = begin
img = cat(args..., dims=2)
img ./= maximum(img)
colorview(Gray, img)
end
```
As the next step we can prepare a noisy, blurred image.
```@jldoctest
# load test images
img = convert(Array{Float32}, channelview(testimage("resolution_test_512")))
psf = generate_psf(size(img), 30)
# create a blurred, noisy version of that image
img_b = conv(img, psf, [1, 2])
img_n = poisson(img_b, 300);
h_view(img, img_b, img_n)
```
![](../assets/input_comparison.png)
## Let's test Good's roughness (GR)
In this part we can look at the results produced with a GR regularizer. After inspecting the results, it becomes clear, that the benefit of 100 iterations is not really visible.
In most cases $\approx 15$ iterations produce good results. By executing `GR()` we in fact create a function which takes a array and returns
a single value.
```jldoctest
@time resGR100, optim_res = deconvolution(img_n, psf, regularizer=GR(), iterations=100)
@show optim_res
@time resGR15, optim_res = deconvolution(img_n, psf, regularizer=GR(), iterations=15)
@show optim_res
@time resGR15_2, optim_res = deconvolution(img_n, psf, λ=0.05, regularizer=GR(), iterations=15)
@show optim_res
h_view(img_n, resGR100, resGR15, resGR15_2)
```
![](../assets/GR_comparison.png)
## Let's test Total Variation (TV)
TV produces characteristic staircase artifacts. However, the results it produces are usually noise free and clear.
```@jldoctest
@time resTV50, optim_res = deconvolution(img_n, psf, regularizer=TV(), iterations=50)
@show optim_res
@time resTV15, optim_res = deconvolution(img_n, psf, regularizer=TV(), iterations=15)
@show optim_res
@time resTV15_2, optim_res = deconvolution(img_n, psf, λ=0.005, regularizer=TV(), iterations=15)
@show optim_res
h_view(img_n, resTV50, resTV15, resTV15_2)
```
![](../assets/TV_comparison.png)
## Let's test Tikhonov
Tikhonov is not defined as precisely as the other two regularizers. Therefore we offer three different modes which differ quite a lot from each other. However, the results look all very similar
```@jldoctest
@time resTik1, optim_res = deconvolution(img_n, psf, λ=0.001, regularizer=Tikhonov(), iterations=15)
@show optim_res
@time resTik2, optim_res = deconvolution(img_n, psf, λ=0.0001,
regularizer=Tikhonov(mode="spatial_grad_square"), iterations=15)
@show optim_res
@time resTik3, optim_res = deconvolution(img_n, psf, λ=0.0001,
regularizer=Tikhonov(mode="identity"), iterations=15)
@show optim_res
h_view(img_n, resTik1, resTik2, resTik3)
```
![](../assets/Tik_comparison.png)
## Let's test without regularizers
Usually optimizing without a regularizer is does not produce good results. The reason is, that the deconvolution tries to enhance high frequencies more and more with increasing iteration number.
However, high frequencies have low contrast and therefore the algorithm mostly enhances noise content (which is present in all frequency regions).
```
@jldoctest
@time res100, optim_res = deconvolution(img_n, psf, regularizer=nothing, iterations=50)
@show optim_res
@time res15, optim_res = deconvolution(img_n, psf, regularizer=nothing, iterations=15)
@show optim_res
h_view(img_n, 0.7 .* res100, res15)
```
![](../assets/no_reg_comparison.png)
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 848 | # CUDA
We also support [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl).
## Load
Before using a `CuArray` simply invoke.
```julia
using CUDA
```
Our routines need as input array either only `Array`s or `CuArray`s. To get the deconvolution running, both the PSF and the measured
array needs to be a `CuArray`.
See also [our 3D example here](https://github.com/roflmaostc/DeconvOptim.jl/blob/master/examples/cuda_3D.ipynb).
## Issues with Regularizers
However, our approach to express the regularizers with [Tullio.jl](https://github.com/mcabbott/Tullio.jl) is currently not performant with GPUs.
Therefore, to use `CuArray`s with regularizers, you need to choose [`TV_cuda`](@ref).
Other regularizers are not yet supported since we hope that Tullio.jl will be one day mature enough to produce
reasonable fast gradients for CUDA kernels as well.
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 534 | # More complex Invert
We also provide functionality to invert problems which are not a straightforward deconvolution
like multi view deconvolution or a problem where several measurements with different properties and forward models are available.
The idea is that a `forward` model, a initial guess and the according measurements are in principle enough to invert the problem.
## Example
Look into the [examples folder](https://github.com/roflmaostc/DeconvOptim.jl/tree/master/examples/generic_invert.ipynb) to see how it can work.
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MIT"
] | 0.7.3 | 4257575512979d48424448b1cb44e29d6fd5fd4a | docs | 1451 | # Performance Tips
## Regularizer
The regularizers are built during calling with metaprogramming. Every time you call `TV()` it creates a new version which is evaluated with
`eval`. In the first `deconvolution` routine it has to compile this piece of code.
To prevent the compilation every time, define
```julia
reg = TV()
```
which is then later used as a variable. In a notebook or REPL environment just define it in a different cell.
## No Regularizer
Often the results are good without regularizer but then need to be early stopped (e.g. like `iterations=20`).
This increases the performance drastically, but might lead to more artifacts in certain regions.
## Optimizer
### L-BFGS
You can also try to adjust the settings of the [L-BFGS algorithm](https://julianlsolvers.github.io/Optim.jl/stable/#algo/lbfgs/) of Optim.jl
Try to change `m` in `opt=LBFGS(linesearch=BackTracking(), m=10)`.
`m` is the history value of the L-BFGS algorithm. Smaller is usually faster, but might lead to worse results.
See also [Wikipedia](https://en.wikipedia.org/wiki/Limited-memory_BFGS).
### Line Search
L-BFGS uses [LineSearches.jl](https://github.com/JuliaNLSolvers/LineSearches.jl). In our examples `BackTracking` turned
out to be the fastest, but it might be worth to try different ones.
### Iterations
Try to set the keyword `iterations=20` to a lower number if you want to early stop the deconvolution.
Of course, the results might be worse then.
| DeconvOptim | https://github.com/roflmaostc/DeconvOptim.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 534 | using Documenter, SuiteSparseMatrixCollection
makedocs(
modules = [SuiteSparseMatrixCollection],
doctest = true,
linkcheck = true,
format = Documenter.HTML(
assets = ["assets/style.css"],
prettyurls = get(ENV, "CI", nothing) == "true",
ansicolor = true,
),
sitename = "SuiteSparseMatrixCollection.jl",
pages = ["Home" => "index.md", "Reference" => "reference.md"],
)
deploydocs(
repo = "github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git",
push_preview = true,
devbranch = "main",
)
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 596 | using SuiteSparseMatrixCollection
# real rectangular matrices
rect = ssmc[
(100 .≤ ssmc.nrows .≤ 1000) .& (100 .≤ ssmc.ncols .≤ 1000) .& (ssmc.nrows .!= ssmc.ncols) .& (ssmc.real .== true),
:,
]
# all symmetric positive definite matrices
posdef = ssmc[
(ssmc.numerical_symmetry .== 1) .& (ssmc.positive_definite .== true) .& (ssmc.real .== true),
:,
]
# small symmetric positive definite matrices
posdef_small = ssmc[
(ssmc.numerical_symmetry .== 1) .& (ssmc.positive_definite .== true) .& (ssmc.real .== true) .& (ssmc.nrows .≤ 200),
:,
]
fetch_ssmc(posdef_small, format = "MM")
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 585 | module SuiteSparseMatrixCollection
using Pkg.Artifacts
using DataFrames
using JLD2
import REPL.TerminalMenus
import Base.format_bytes, Base.SHA1
import Printf.@sprintf
export ssmc_db, fetch_ssmc, ssmc_matrices, ssmc_formats, installed_ssmc
export delete_ssmc, delete_all_ssmc, manage_ssmc
const ssmc_jld2 = joinpath(@__DIR__, "..", "src", "ssmc.jld2") |> normpath
const ssmc_artifacts = joinpath(@__DIR__, "..", "Artifacts.toml") |> normpath
"Formats in which matrices are available."
const ssmc_formats = ("MM", "RB")
include("ssmc_database.jl")
include("ssmc_manager.jl")
end
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 2169 | """
ssmc_db(; verbose::Bool=false)
Load the database of the SuiteSparseMatrixCollection.
A summary of the statistics available for each matrix can be found at https://www.cise.ufl.edu/research/sparse/matrices/stats.html.
"""
function ssmc_db(; verbose::Bool = false)
file = jldopen(ssmc_jld2, "r")
ssmc = file["df"]
last_rev_date = file["last_rev_date"]
close(file)
verbose && println("loaded database with revision date $last_rev_date")
return ssmc
end
"""
fetch_ssmc(group::AbstractString, name::AbstractString; format="MM")
Download the matrix with name `name` in group `group`.
Return the path where the matrix is stored.
"""
function fetch_ssmc(group::AbstractString, name::AbstractString; format = "MM")
group_and_name = group * "/" * name * "." * format
# download lazy artifact if not already done and obtain path
loc = ensure_artifact_installed(group_and_name, ssmc_artifacts)
return joinpath(loc, name)
end
"""
fetch_ssmc(matrices; format="MM")
Download matrices from the SuiteSparseMatrixCollection.
The argument `matrices` should be a `DataFrame` or `DataFrameRow`.
An array of strings is returned with the paths where the matrices are stored.
"""
function fetch_ssmc(matrices; format = "MM")
format ∈ ssmc_formats || error("unknown format $format")
paths = String[]
for (group, name) ∈ zip(matrices.group, matrices.name)
push!(paths, fetch_ssmc(group, name, format = format))
end
return paths
end
"""
ssmc_matrices(ssmc, group, name)
Return a `DataFrame` of matrices whose group contains the string `group` and whose
name contains the string `name`.
ssmc_matrices(ssmc, name)
ssmc_matrices(ssmc, "", name)
Return a `DataFrame` of matrices whose name contains the string `name`.
ssmc_matrices(ssmc, group, "")
Return a `DataFrame` of matrices whose group contains the string `group`.
Example: `ssmc_matrices(ssmc, "HB", "bcsstk")`.
"""
function ssmc_matrices(ssmc::DataFrame, group::AbstractString, name::AbstractString)
ssmc[occursin.(group, ssmc.group) .& occursin.(name, ssmc.name), :]
end
ssmc_matrices(ssmc::DataFrame, name::AbstractString) = ssmc_matrices(ssmc, "", name)
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 5184 | """
installed_ssmc()
Return a vector of tuples `(group, name, format)` of all installed matrices from the SuiteSparseMatrixCollection.
"""
function installed_ssmc()
database = Artifacts.select_downloadable_artifacts(ssmc_artifacts, include_lazy = true)
installed_matrices = Tuple{String, String, String}[]
for artifact_name in keys(database)
hash = Base.SHA1(database[artifact_name]["git-tree-sha1"])
if artifact_exists(hash)
matrix = tuple(split(artifact_name, ['/', '.'])...)
push!(installed_matrices, matrix)
end
end
return installed_matrices
end
"""
delete_ssmc(name::AbstractString, group::AbstractString, format = "MM")
Remove the matrix with name `name` in group `group` and format `format`.
"""
function delete_ssmc(group::AbstractString, name::AbstractString, format = "MM")
artifact_name = group * "/" * name * "." * format
meta = artifact_meta(artifact_name, ssmc_artifacts)
(meta == nothing) && error("Cannot locate artifact $(artifact_name) in Artifacts.toml.")
hash = Base.SHA1(meta["git-tree-sha1"])
if !artifact_exists(hash)
println("The artifact $(artifact_name) was not found on the disk.")
else
ssmc_nbytes = artifact_path(hash) |> totalsize
remove_artifact(hash)
println("The artifact $(artifact_name) has been deleted, freeing up $(ssmc_nbytes |> format_bytes).")
end
end
"""
delete_all_ssmc()
Remove all matrices from the SuiteSparseMatrixCollection.
"""
function delete_all_ssmc()
hashes = Artifacts.extract_all_hashes(ssmc_artifacts, include_lazy = true)
ssmc_nbytes = 0
for hash in hashes
if artifact_exists(hash)
ssmc_nbytes += artifact_path(hash) |> totalsize
remove_artifact(hash)
end
end
if ssmc_nbytes == 0
println("No matrices to remove. All SSMC matrices from the SuiteSparseMatrixCollection have already been cleared.")
else
println("All matrices from the SuiteSparseMatrixCollection have been deleted for a total of $(ssmc_nbytes |> format_bytes).")
end
end
"""
manage_ssmc(; sort_by::Symbol=:name, rev::Bool=false)
Opens a prompt allowing the user to selectively remove matrices from the SuiteSparseMatrixCollection.
By default, the matrices are sorted by name.
Alternatively, you can sort them by file size on disk by specifying `sort_by=:size`.
Use `rev=true` to reverse the sort order.
"""
function manage_ssmc(; sort_by::Symbol = :name, rev::Bool = false)
# Get all installed ssmc matrices
ssmc_hashes = SHA1[]
ssmc_matrices = String[]
ssmc_sizes = Int[]
database = Artifacts.select_downloadable_artifacts(ssmc_artifacts, include_lazy = true)
for ssmc_matrix in keys(database)
ssmc_hash = SHA1(database[ssmc_matrix]["git-tree-sha1"])
if artifact_exists(ssmc_hash)
push!(ssmc_hashes, ssmc_hash)
push!(ssmc_matrices, ssmc_matrix)
ssmc_nbytes = artifact_path(ssmc_hash) |> totalsize
push!(ssmc_sizes, ssmc_nbytes)
end
end
if isempty(ssmc_matrices)
println("No matrices to remove. All SSMC matrices from the SuiteSparseMatrixCollection have already been cleared.")
else
# Sort ssmc_problems and ssmc_sizes
if sort_by === :name
perm = sortperm(ssmc_matrices; rev)
elseif sort_by == :size
perm = sortperm(ssmc_sizes; rev)
else
error("unsupported sort value: :$sort_by (allowed: :name, :size)")
end
ssmc_hashes = ssmc_hashes[perm]
ssmc_matrices = ssmc_matrices[perm]
ssmc_sizes = ssmc_sizes[perm]
# Build menu items
menu_items = similar(ssmc_matrices)
for i in eachindex(ssmc_matrices, ssmc_sizes)
menu_items[i] = @sprintf("%-30s (%s)", ssmc_matrices[i], ssmc_sizes[i] |> Base.format_bytes)
end
# Prompt user
ts = @sprintf("%s", sum(ssmc_sizes) |> Base.format_bytes)
manage_ssmc_menu = TerminalMenus.request(
"Which problems should be removed (total size on disk: $ts)?",
TerminalMenus.MultiSelectMenu(menu_items; pagesize = 10, charset = :ascii),
)
# Handle no selection
if isempty(manage_ssmc_menu)
println("No matrices have been removed.")
else
# Otherwise prompt for confirmation
println("\nThe following matrices have been marked for removal:\n")
index_items = Int.(manage_ssmc_menu)
for item in menu_items[sort(index_items)]
println(" ", item)
end
print("\nAre you sure that these should be removed? [Y/n]: ")
answer = strip(readline()) |> lowercase
# If removal is confirmed, deleting the relevant files
if isempty(answer) || answer == "yes" || answer == "y"
for index_item in index_items
ssmc_hash = ssmc_hashes[index_item]
remove_artifact(ssmc_hash)
end
println("Removed ", length(manage_ssmc_menu), " matrices.")
else
println("Removed 0 matrices.")
end
end
end
end
# Return total size on a disk of a file or directory
function totalsize(path::String)
if !isdir(path)
return filesize(path)
end
total = 0
for (root, dirs, files) in walkdir(path)
total += root |> filesize
for file in files
total += joinpath(root, file) |> filesize
end
end
return total
end
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 2398 | using SuiteSparseMatrixCollection
using Test
@testset "fetch test" begin
ssmc = ssmc_db()
matrices = ssmc[
(ssmc.numerical_symmetry .== 1) .& (ssmc.positive_definite .== false) .& (ssmc.real .== true) .& (ssmc.nrows .≤ 10),
:,
]
@test size(matrices, 1) == 3
for (group, name) ∈ zip(matrices.group, matrices.name)
for format ∈ SuiteSparseMatrixCollection.ssmc_formats
path = fetch_ssmc(group, name, format = format)
@test isdir(path)
ext = format == "MM" ? "mtx" : "rb"
@test isfile(joinpath(path, "$(name).$(ext)"))
end
end
end
@testset "select test" begin
ssmc = ssmc_db()
bcsstk = ssmc_matrices(ssmc, "bcsstk")
@test size(bcsstk, 1) == 40
bcsstk_small = bcsstk[bcsstk.nrows .≤ 100, :]
@test size(bcsstk_small, 1) == 2
hb_bcsstk = ssmc_matrices(ssmc, "HB", "bcsstk")
@test size(hb_bcsstk, 1) == 33
hb_matrices = ssmc_matrices(ssmc, "HB", "")
@test size(hb_matrices, 1) == 292
end
@testset "fetch by name test" begin
ssmc = ssmc_db()
subset = ssmc_matrices(ssmc, "Belcastro", "")
@test size(subset, 1) == 3
matrices = ssmc_matrices(ssmc, "Belcastro", "")
for (group, name) ∈ zip(matrices.group, matrices.name)
for format ∈ SuiteSparseMatrixCollection.ssmc_formats
path = fetch_ssmc(group, name, format = format)
@test isdir(path)
ext = format == "MM" ? "mtx" : "rb"
@test isfile(joinpath(path, "$(name).$(ext)"))
end
end
end
@testset "installed test" begin
downloaded_matrices = installed_ssmc()
for matrix ∈ [
("Pajek", "Stranke94", "RB"),
("Belcastro", "human_gene2", "MM"),
("Mycielski", "mycielskian2", "RB"),
("Mycielski", "mycielskian2", "MM"),
("Pajek", "Stranke94", "MM"),
("Mycielski", "mycielskian3", "MM"),
("Mycielski", "mycielskian3", "RB"),
("Belcastro", "human_gene2", "RB"),
("Belcastro", "mouse_gene", "RB"),
("Belcastro", "mouse_gene", "MM"),
("Belcastro", "human_gene1", "MM"),
("Belcastro", "human_gene1", "RB"),
]
@test matrix ∈ downloaded_matrices
end
end
@testset "delete test" begin
path = fetch_ssmc("HB", "1138_bus", format = "MM")
delete_ssmc("HB", "1138_bus", "MM")
@test !isdir(path)
path = fetch_ssmc("HB", "illc1033", format = "RB")
delete_ssmc("HB", "illc1033", "RB")
@test !isdir(path)
delete_all_ssmc()
@test installed_ssmc() == Tuple{String, String, String}[]
end
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |
|
[
"MPL-2.0"
] | 0.5.6 | 576c5936c2017dc614ae8001efd872d50909e041 | code | 929 | using Pkg.Artifacts
using ArtifactUtils
using DataFrames
using JLD2
const ssmc_url = "https://sparse.tamu.edu"
const ssmc_jld2 = joinpath(@__DIR__, "..", "src", "ssmc.jld2")
db = jldopen(ssmc_jld2, "r")
matrices = db["df"]
# mat files are not recognized as artifacts
formats = ("MM", "RB")
nmatrices = size(matrices, 1) * length(formats)
const artifacts_toml = joinpath(@__DIR__, "..", "Artifacts.toml")
global k = 0
fails = String[]
for format ∈ formats
global k
for matrix ∈ eachrow(matrices)
k += 1
url = "$ssmc_url/$format/$(matrix.group)/$(matrix.name).tar.gz"
println("$k/$nmatrices: ", url)
try
add_artifact!(
artifacts_toml,
"$(matrix.group)/$(matrix.name).$(format)",
url,
lazy = true,
force = true,
)
catch
push!(fails, matrix.name)
end
end
end
length(fails) > 0 && @warn "the following matrices could not be downloaded" fails
| SuiteSparseMatrixCollection | https://github.com/JuliaSmoothOptimizers/SuiteSparseMatrixCollection.jl.git |