text
stringlengths 1
2.05k
|
---|
selectors.insert(GadgetType::VarDivRoundBig3, vec![selector]);
GadgetConfig {
columns,
tables,
selectors,
..gadget_config
}
}
}
impl<F: PrimeField> Gadget<F> for VarDivRoundBig3Chip<F> {
fn name(&self) -> String {
"VarDivBig3RoundChip".to_string()
}
fn num_cols_per_op(&self) -> usize {
Self::num_cols_per_op()
}
fn num_inputs_per_row(&self) -> usize {
(self.config.columns.len() - 1) / self.num_cols_per_op()
}
fn num_outputs_per_row(&self) -> usize {
self.num_inputs_per_row()
}
fn op_row_region(
&self,
region: &mut Region<F>,
row_offset: usize,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let a_vec = &vec_inputs[0];
let b = &single_inputs[1];
let c_shift_base = (-(1_i64 << 62)) as i128;
let num_rows = self.config.num_rows as i128;
if self.config.use_selectors {
let selector = self
.config
.selectors
.get(&GadgetType::VarDivRoundBig3)
.unwrap()[0];
selector.enable(region, row_offset)?;
}
b.copy_advice(
|| "",
region,
self.config.columns[self.config.columns.len() - 1],
row_offset,
)?;
let mut div_out = vec![];
for (i, a) in a_vec.iter().enumerate() {
let offset = i * self.num_cols_per_op();
a.copy_advice(|| "", region, self.config.columns[offset], row_offset)
.unwrap();
let div_mod = a.value().zip(b.value()).map(|(a, b)| {
let b = convert_to_u128(b);
let c_shift = (-c_shift_base) as u128 / b * b;
let div_inp_min_val_pos = F::from(c_shift as u64);
let a_pos = *a + div_inp_min_val_pos;
let a = convert_to_u128(&a_pos);
let c_pos = a.rounded_div(b);
let c = c_pos as i128 - (c_shift / b) as i128;
let rem_floor = (a as i128) - (c_pos * b) as i128;
let r = 2 * rem_floor + (b as i128);
(c, r)
}); |
let br_split = div_mod.zip(b.value()).map(|((_, r), b)| {
let b = convert_to_u128(b) as i128;
let val = 2 * b - r;
let p2 = val / (num_rows * num_rows);
let p1 = (val % (num_rows * num_rows)) / num_rows;
let p0 = val % num_rows;
(p2, p1, p0)
});
let r_split = div_mod.map(|(_, r)| {
let p2 = r / (num_rows * num_rows);
let p1 = (r % (num_rows * num_rows)) / num_rows;
let p0 = r % num_rows;
(p2, p1, p0)
});
let div_cell = region.assign_advice(
|| "",
self.config.columns[offset + 1],
row_offset,
|| {
div_mod.map(|(c, _)| {
let offset = F::from(-c_shift_base as u64);
let c = F::from((c - c_shift_base) as u64);
c - offset
})
},
)?;
let _mod_cell = region.assign_advice(
|| "",
self.config.columns[offset + 2],
row_offset,
|| div_mod.map(|(_, r)| F::from(r as u64)),
)?;
let _br_split_cell_2 = region.assign_advice(
|| "",
self.config.columns[offset + 3],
row_offset,
|| br_split.map(|(p2, _, _)| F::from(p2 as u64)),
)?;
let _br_split_cell_1 = region.assign_advice(
|| "",
self.config.columns[offset + 4],
row_offset,
|| br_split.map(|(_, p1, _)| F::from(p1 as u64)),
)?;
let _br_split_cell_0 = region.assign_advice(
|| "",
self.config.columns[offset + 5],
row_offset,
|| br_split.map(|(_, _, p0)| F::from(p0 as u64)),
)?;
let _r_split_cell_2 = region.assign_advice(
|| "",
self.config.columns[offset + 6],
row_offset,
|| r_split.map(|(p2, _, _)| F::from(p2 as u64)),
)?;
let _r_split_cell_1 = region.assign_advice(
|| "",
self.config.columns[offset + 7],
row_offset,
|| r_split.map(|(_, p1, _)| F::from(p1 as u64)),
)?;
let _r_sp |
lit_cell_0 = region.assign_advice(
|| "",
self.config.columns[offset + 8],
row_offset,
|| r_split.map(|(_, _, p0)| F::from(p0 as u64)),
)?;
div_out.push(div_cell);
}
Ok(div_out)
}
fn forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
single_inputs: &Vec<&AssignedCell<F, F>>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mut inps = vec_inputs[0].clone();
let initial_len = inps.len();
let default = &single_inputs[0];
while inps.len() % self.num_inputs_per_row() != 0 {
inps.push(&default);
}
let res = self.op_aligned_rows(
layouter.namespace(|| "var_div_big3"),
&vec![inps],
single_inputs,
)?;
Ok(res[..initial_len].to_vec())
}
} |
// Generics
pub mod averager;
pub mod arithmetic;
pub mod shape;
// Concrete implementations
pub mod avg_pool_2d;
pub mod batch_mat_mul;
pub mod conv2d;
pub mod div_fixed;
pub mod fully_connected;
pub mod logistic;
pub mod max_pool_2d;
pub mod mean;
pub mod noop;
pub mod pow;
pub mod rsqrt;
pub mod softmax;
pub mod sqrt;
pub mod square;
pub mod squared_diff;
pub mod tanh;
pub mod update;
// Special: dag
pub mod dag;
// Special: layer
pub mod layer;
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use crate::{gadgets::gadget::GadgetConfig, utils::helpers::broadcast};
use super::layer::{AssignedTensor, CellRc};
pub mod add;
pub mod div_var;
pub mod mul;
pub mod sub;
pub trait Arithmetic<F: PrimeField> {
fn gadget_forward(
&self,
layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
constants: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error>;
fn arithmetic_forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<(Vec<CellRc<F>>, Vec<usize>), Error> {
assert_eq!(tensors.len(), 2);
// println!("tensors: {:?} {:?}", tensors[0].shape(), tensors[1].shape());
let (inp1, inp2) = broadcast(&tensors[0], &tensors[1]);
let out_shape = inp1.shape().clone();
assert_eq!(inp1.shape(), inp2.shape());
let zero = constants.get(&0).unwrap().as_ref();
let inp1_vec = inp1.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let inp2_vec = inp2.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let vec_inputs = vec![inp1_vec, inp2_vec];
let constants = vec![zero];
let out = self.gadget_forward(
layouter.namespace(|| ""),
&vec_inputs,
&constants,
gadget_config.clone(),
)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
Ok((out, out_shape.to_vec()))
}
}
|
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
add_pairs::AddPairsChip,
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::relu::ReluChip,
},
layers::layer::{ActivationType, AssignedTensor, CellRc, GadgetConsumer},
};
use super::{
super::layer::{Layer, LayerConfig},
Arithmetic,
};
pub |
struct AddChip {}
impl AddChip {
fn get_activation(&self, layer_params: &Vec<i64>) -> ActivationType {
let activation = layer_params[0];
match activation {
0 => ActivationType::None,
1 => ActivationType::Relu,
_ => panic!("Unsupported activation type for add"),
}
}
}
impl<F: PrimeField> Arithmetic<F> for AddChip {
fn gadget_forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
constants: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let add_pairs_chip = AddPairsChip::<F>::construct(gadget_config);
let out = add_pairs_chip.forward(layouter.namespace(|| "add chip"), &vec_inputs, constants)?;
Ok(out)
}
}
impl<F: PrimeField> Layer<F> for AddChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let activation = self.get_activation(&layer_config.layer_params);
let (out, out_shape) = self.arithmetic_forward(
layouter.namespace(|| ""),
tensors,
constants,
gadget_config.clone(),
)?;
let out = if activation == ActivationType::Relu {
let zero = constants.get(&0).unwrap();
let single_inps = vec![zero.as_ref()];
let out = out.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let relu_chip = ReluChip::<F>::construct(gadget_config);
let out = relu_chip.forward(layouter.namespace(|| "relu"), &vec![out], &single_inps)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
out
} else if activation == ActivationType::None {
out
} else {
panic!("Unsupported activation type for add");
};
let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for AddChip {
f |
n used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
let activation = self.get_activation(&layer_params);
let mut outp = vec![GadgetType::AddPairs];
match activation {
ActivationType::Relu => outp.push(GadgetType::Relu),
ActivationType::None => (),
_ => panic!("Unsupported activation type for add"),
}
outp
}
} |
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
mul_pairs::MulPairsChip,
var_div::VarDivRoundChip,
},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer},
};
use super::Arithmetic;
pub |
struct DivVarChip {}
impl<F: PrimeField> Arithmetic<F> for DivVarChip {
fn gadget_forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
constants: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mul_pairs_chip = MulPairsChip::<F>::construct(gadget_config.clone());
let out = mul_pairs_chip.forward(
layouter.namespace(|| "mul pairs chip"),
&vec_inputs,
constants,
)?;
Ok(out)
}
}
impl<F: PrimeField> Layer<F> for DivVarChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &crate::layers::layer::LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
assert_eq!(tensors.len(), 2);
assert_eq!(tensors[1].shape().len(), 1);
assert_eq!(tensors[1].shape()[0], 1);
let sf = constants
.get(&(gadget_config.scale_factor as i64))
.unwrap()
.as_ref();
let sf_tensor = Array::from_shape_vec(IxDyn(&[1]), vec![Rc::new(sf.clone())]).unwrap();
let (out, out_shape) = self.arithmetic_forward(
layouter.namespace(|| ""),
&vec![tensors[0].clone(), sf_tensor],
constants,
gadget_config.clone(),
)?;
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());
let div = tensors[1].iter().next().unwrap().as_ref();
let zero = constants.get(&0).unwrap().as_ref();
let single_inputs = vec![zero, div];
let out = out.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let out = var_div_chip.forward(layouter.namespace(|| "mul div"), &vec![out], &single_inputs)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for DivVarChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Ve |
c<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::MulPairs,
GadgetType::VarDivRound,
GadgetType::InputLookup,
]
}
} |
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
mul_pairs::MulPairsChip,
var_div::VarDivRoundChip,
},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::{
super::layer::{Layer, LayerConfig},
Arithmetic,
};
pub |
struct MulChip {}
impl<F: PrimeField> Arithmetic<F> for MulChip {
fn gadget_forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
constants: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let mul_pairs_chip = MulPairsChip::<F>::construct(gadget_config.clone());
let out = mul_pairs_chip.forward(
layouter.namespace(|| "mul pairs chip"),
&vec_inputs,
constants,
)?;
Ok(out)
}
}
impl<F: PrimeField> Layer<F> for MulChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let (out, out_shape) = self.arithmetic_forward(
layouter.namespace(|| ""),
tensors,
constants,
gadget_config.clone(),
)?;
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());
let div = constants
.get(&(gadget_config.scale_factor as i64))
.unwrap()
.as_ref();
let zero = constants.get(&0).unwrap().as_ref();
let single_inputs = vec![zero, div];
let out = out.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let out = var_div_chip.forward(layouter.namespace(|| "mul div"), &vec![out], &single_inputs)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for MulChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::MulPairs,
GadgetType::VarDivRound,
GadgetType::InputLookup,
]
}
} |
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
sub_pairs::SubPairsChip,
},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::{
super::layer::{Layer, LayerConfig},
Arithmetic,
};
#[derive(Clone, Debug)]
pub struct SubChip {}
impl<F: PrimeField> Arithmetic<F> for SubChip {
fn gadget_forward(
&self,
mut layouter: impl Layouter<F>,
vec_inputs: &Vec<Vec<&AssignedCell<F, F>>>,
constants: &Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let sub_pairs_chip = SubPairsChip::<F>::construct(gadget_config);
let out = sub_pairs_chip.forward(layouter.namespace(|| "sub chip"), &vec_inputs, constants)?;
Ok(out)
}
}
impl<F: PrimeField> Layer<F> for SubChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let (out, out_shape) = self.arithmetic_forward(
layouter.namespace(|| ""),
tensors,
constants,
gadget_config.clone(),
)?;
let out = Array::from_shape_vec(IxDyn(out_shape.as_slice()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for SubChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::SubPairs]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use crate::gadgets::gadget::Gadget;
use crate::gadgets::{adder::AdderChip, gadget::GadgetConfig, var_div::VarDivRoundChip};
use super::layer::{AssignedTensor, CellRc, LayerConfig};
pub trait Averager<F: PrimeField> {
fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig) -> Vec<Vec<CellRc<F>>>;
fn get_div_val(
&self,
layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<AssignedCell<F, F>, Error>;
fn avg_forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<CellRc<F>>, Error> {
let zero = constants.get(&0).unwrap().as_ref();
let inp = &tensors[0];
let splat_inp = self.splat(inp, layer_config);
let adder_chip = AdderChip::<F>::construct(gadget_config.clone());
let single_inputs = vec![zero];
let mut added = vec![];
for i in 0..splat_inp.len() {
let tmp = splat_inp[i].iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let tmp = adder_chip.forward(
layouter.namespace(|| format!("average {}", i)),
&vec![tmp],
&single_inputs,
)?;
added.push(tmp[0].clone());
}
let div = self.get_div_val(
layouter.namespace(|| "average div"),
tensors,
gadget_config.clone(),
layer_config,
)?;
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());
let single_inputs = vec![zero, &div];
let added = added.iter().map(|x| x).collect::<Vec<_>>();
let dived = var_div_chip.forward(
layouter.namespace(|| "average div"),
&vec![added],
&single_inputs,
)?;
let dived = dived.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>(); |
Ok(dived)
}
} |
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Value},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::gadget::{GadgetConfig, GadgetType},
layers::max_pool_2d::MaxPool2DChip,
};
use super::{
averager::Averager,
layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig},
};
pub |
struct AvgPool2DChip {}
impl<F: PrimeField> Averager<F> for AvgPool2DChip {
fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig) -> Vec<Vec<CellRc<F>>> {
assert_eq!(input.shape().len(), 4);
assert_eq!(input.shape()[0], 1);
MaxPool2DChip::splat(input, layer_config).unwrap()
}
fn get_div_val(
&self,
mut layouter: impl Layouter<F>,
_tensors: &Vec<AssignedTensor<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<AssignedCell<F, F>, Error> {
let div = layer_config.layer_params[0] * layer_config.layer_params[1];
let div = F::from(div as u64);
let div = layouter
.assign_region(
|| "avg pool 2d div",
|mut region| {
let div = region
.assign_advice(
|| "avg pool 2d div",
gadget_config.columns[0],
0,
|| Value::known(div),
)
.unwrap();
Ok(div)
},
)
.unwrap();
Ok(div)
}
}
impl<F: PrimeField> Layer<F> for AvgPool2DChip {
fn forward(
&self,
layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let dived = self
.avg_forward(layouter, tensors, constants, gadget_config, layer_config)
.unwrap();
let inp = &tensors[0];
let out_xy = MaxPool2DChip::shape(inp, layer_config);
let out_shape = vec![1, out_xy.0, out_xy.1, inp.shape()[3]];
println!("out_shape: {:?}", out_shape);
let out = Array::from_shape_vec(IxDyn(&out_shape), dived).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for AvgPool2DChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::Adder,
GadgetType::VarDivRound,
GadgetType::InputLookup,
]
}
} |
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, Axis, IxDyn};
use crate::{
gadgets::gadget::{GadgetConfig, GadgetType},
layers::fully_connected::FullyConnectedConfig,
};
use super::{
fully_connected::FullyConnectedChip,
layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig},
};
pub |
struct BatchMatMulChip {}
impl<F: PrimeField> Layer<F> for BatchMatMulChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp1 = &tensors[0];
let inp2 = &tensors[1];
println!("inp1: {:?}", inp1.shape());
println!("inp2: {:?}", inp2.shape());
assert_eq!(inp1.ndim(), 3);
assert_eq!(inp2.ndim(), 3);
assert_eq!(inp1.shape()[0], inp2.shape()[0]);
let adj_y = layer_config.layer_params[1] == 1;
if adj_y {
assert_eq!(inp1.shape()[2], inp2.shape()[2]);
} else {
assert_eq!(inp1.shape()[2], inp2.shape()[1]);
}
let out_shape = if adj_y {
vec![inp1.shape()[0], inp1.shape()[1], inp2.shape()[1]]
} else {
vec![inp1.shape()[0], inp1.shape()[1], inp2.shape()[2]]
};
let fc_chip = FullyConnectedChip::<F> {
_marker: PhantomData,
config: FullyConnectedConfig::construct(true),
};
let mut outp: Vec<CellRc<F>> = vec![];
for i in 0..inp1.shape()[0] {
let inp1_slice = inp1.index_axis(Axis(0), i).to_owned();
let inp2_slice = if adj_y {
inp2.index_axis(Axis(0), i).to_owned()
} else {
inp2.index_axis(Axis(0), i).t().to_owned()
};
println!("inp1_slice: {:?}", inp1_slice.shape());
println!("inp2_slice: {:?}", inp2_slice.shape());
let tmp_config = LayerConfig {
layer_params: vec![0],
..layer_config.clone()
};
let outp_slice = fc_chip.forward(
layouter.namespace(|| ""),
&vec![inp1_slice, inp2_slice],
constants,
gadget_config.clone(),
&tmp_config,
)?;
outp.extend(outp_slice[0].iter().map(|x| x.clone()).collect::<Vec<_>>());
}
let outp = Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap();
Ok(vec![outp])
}
}
impl GadgetConsumer for BatchMat |
MulChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::Adder,
GadgetType::DotProduct,
GadgetType::VarDivRound,
GadgetType::InputLookup,
]
}
} |
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
bias_div_round_relu6::BiasDivRoundRelu6Chip,
dot_prod::DotProductChip,
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::relu::ReluChip,
},
layers::{
fully_connected::{FullyConnectedChip, FullyConnectedConfig},
shape::pad::pad,
},
};
use super::layer::{ActivationType, AssignedTensor, GadgetConsumer, Layer, LayerConfig};
pub enum PaddingEnum {
Same,
Valid,
}
pub enum ConvLayerEnum {
Conv2D,
DepthwiseConv2D,
}
pub |
struct Conv2DConfig {
pub conv_type: ConvLayerEnum,
pub padding: PaddingEnum,
pub activation: ActivationType,
pub stride: (usize, usize),
}
pub struct Conv2DChip<F: PrimeField> {
pub config: LayerConfig,
pub _marker: PhantomData<F>,
}
impl<F: PrimeField> Conv2DChip<F> {
pub fn param_vec_to_config(layer_params: Vec<i64>) -> Conv2DConfig {
let conv_type = match layer_params[0] {
0 => ConvLayerEnum::Conv2D,
1 => ConvLayerEnum::DepthwiseConv2D,
_ => panic!("Invalid conv type"),
};
let padding = match layer_params[1] {
0 => PaddingEnum::Same,
1 => PaddingEnum::Valid,
_ => panic!("Invalid padding"),
};
let activation = match layer_params[2] {
0 => ActivationType::None,
1 => ActivationType::Relu,
3 => ActivationType::Relu6,
_ => panic!("Invalid activation type"),
};
let stride = (layer_params[3] as usize, layer_params[4] as usize);
Conv2DConfig {
conv_type,
padding,
activation,
stride,
}
}
pub fn get_padding(
h: usize,
w: usize,
si: usize,
sj: usize,
ci: usize,
cj: usize,
) -> ((usize, usize), (usize, usize)) {
let ph = if h % si == 0 {
(ci as i64 - sj as i64).max(0)
} else {
(ci as i64 - (h % si) as i64).max(0)
} as usize;
let pw = if w % sj == 0 {
(cj as i64 - sj as i64).max(0)
} else {
(cj as i64 - (w % sj) as i64).max(0)
} as usize;
((ph / 2, ph - ph / 2), (pw / 2, pw - pw / 2))
}
pub fn out_hw(
h: usize,
w: usize,
si: usize,
sj: usize,
ch: usize,
cw: usize,
padding: PaddingEnum,
) -> (usize, usize) {
/*
println!(
"H: {}, W: {}, SI: {}, SJ: {}, CH: {}, CW: {}",
h, w, si, sj, ch, cw
);
*/
match padding {
PaddingEnum::Same => ((h + si - 1) / si, (w + sj - 1) / sj),
PaddingEnum::Valid => ((h - ch) / si + 1, (w - cw) / sj + 1),
}
}
pub fn splat<G: Clone>(
&self,
tensors: &Vec<Array<Rc<G>, |
IxDyn>>,
zero: Rc<G>,
) -> (Vec<Vec<Rc<G>>>, Vec<Vec<Rc<G>>>, Vec<Rc<G>>) {
assert!(tensors.len() <= 3);
let conv_config = &Self::param_vec_to_config(self.config.layer_params.clone());
let inp = &tensors[0];
let weights = &tensors[1];
let zero_arr = Array::from_elem(IxDyn(&vec![1]), zero.clone());
let biases = if tensors.len() == 3 {
&tensors[2]
} else {
&zero_arr
};
let h: usize = inp.shape()[1];
let w: usize = inp.shape()[2];
let ch: usize = weights.shape()[1];
let cw: usize = weights.shape()[2];
let (si, sj) = conv_config.stride;
assert_eq!(inp.shape().len(), 4);
let (ph, pw) = if conv_config.padding == PaddingEnum::Same {
Self::get_padding(h, w, si, sj, ch, cw)
} else {
((0, 0), (0, 0))
};
let padding = vec![[0, 0], [ph.0, ph.1], [pw.0, pw.1], [0, 0]];
let inp_pad = pad(&inp, padding, &zero);
let (oh, ow) = Self::out_hw(h, w, si, sj, ch, cw, conv_config.padding);
let mut inp_cells = vec![];
let mut weights_cells = vec![];
let mut biases_cells = vec![];
let mut input_row_idx = 0;
let mut weight_row_idx = 0;
for chan_out in 0..weights.shape()[0] {
weights_cells.push(vec![]);
for ci in 0..weights.shape()[1] {
for cj in 0..weights.shape()[2] {
for ck in 0..weights.shape()[3] {
weights_cells[weight_row_idx].push(weights[[chan_out, ci, cj, ck]].clone());
}
}
}
weight_row_idx += 1;
}
for batch in 0..inp.shape()[0] {
for i in 0..oh {
for j in 0..ow {
inp_cells.push(vec![]);
for ci in 0..weights.shape()[1] {
for cj in 0..weights.shape()[2] {
for ck in 0..weights.shape()[3] {
let idx_i = i * si + ci;
let idx_j = j * sj + cj;
inp_cells[input_row_idx].push(inp_pad[[batch, idx_i, idx_j, ck]].clone());
}
}
}
input_row_idx += |
1;
}
}
}
for _batch in 0..inp.shape()[0] {
for _ in 0..oh {
for _ in 0..ow {
for chan_out in 0..weights.shape()[0] {
if tensors.len() == 3 {
biases_cells.push(biases[chan_out].clone());
} else {
biases_cells.push(zero.clone());
}
}
}
}
}
(inp_cells, weights_cells, biases_cells)
}
pub fn splat_depthwise<G: Clone>(
&self,
tensors: &Vec<Array<Rc<G>, IxDyn>>,
zero: Rc<G>,
) -> (Vec<Vec<Rc<G>>>, Vec<Vec<Rc<G>>>, Vec<Rc<G>>) {
let input = &tensors[0];
let weights = &tensors[1];
let biases = &tensors[2];
assert_eq!(tensors.len(), 3);
assert_eq!(input.shape().len(), 4);
assert_eq!(weights.shape().len(), 4);
assert_eq!(input.shape()[0], 1);
let conv_config = &Self::param_vec_to_config(self.config.layer_params.clone());
let strides = conv_config.stride;
let h: usize = input.shape()[1];
let w: usize = input.shape()[2];
let ch: usize = weights.shape()[1];
let cw: usize = weights.shape()[2];
let (si, sj) = conv_config.stride;
let (oh, ow) = Self::out_hw(h, w, si, sj, ch, cw, conv_config.padding);
let (ph, pw) = if conv_config.padding == PaddingEnum::Same {
Self::get_padding(h, w, si, sj, ch, cw)
} else {
((0, 0), (0, 0))
};
let padding = vec![[0, 0], [ph.0, ph.1], [pw.0, pw.1], [0, 0]];
let inp_pad = pad(&input, padding, &zero);
let mut inp_cells = vec![];
let mut weight_cells = vec![];
let mut biases_cells = vec![];
let mut row_idx = 0;
for i in 0..oh {
for j in 0..ow {
for chan_out in 0..weights.shape()[3] {
inp_cells.push(vec![]);
weight_cells.push(vec![]);
biases_cells.push(biases[[chan_out]].clone());
for ci in 0..weights.shape()[1] {
for cj in 0..weights.shape()[2] {
let idx_i = i * strides.0 + ci;
let idx_j = j * strides.1 + cj;
inp_ |
cells[row_idx].push(inp_pad[[0, idx_i, idx_j, chan_out]].clone());
weight_cells[row_idx].push(weights[[0, ci, cj, chan_out]].clone());
}
}
row_idx += 1;
}
}
}
(inp_cells, weight_cells, biases_cells)
}
}
impl<F: PrimeField> Layer<F> for Conv2DChip<F> {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, Rc<AssignedCell<F, F>>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let conv_config = &Self::param_vec_to_config(self.config.layer_params.clone());
let zero = constants.get(&0).unwrap();
let inp = &tensors[0];
let weights = &tensors[1];
let (oh, ow) = Self::out_hw(
inp.shape()[1],
inp.shape()[2],
conv_config.stride.0,
conv_config.stride.1,
weights.shape()[1],
weights.shape()[2],
conv_config.padding,
);
let batch_size = inp.shape()[0];
let (splat_inp, splat_weights, splat_biases) = match conv_config.conv_type {
ConvLayerEnum::Conv2D => self.splat(tensors, zero.clone()),
ConvLayerEnum::DepthwiseConv2D => self.splat_depthwise(tensors, zero.clone()),
};
let outp_flat: Vec<AssignedCell<F, F>> = match conv_config.conv_type {
ConvLayerEnum::Conv2D => {
let fc_chip = FullyConnectedChip::<F> {
_marker: PhantomData,
config: FullyConnectedConfig::construct(false),
};
let conv_size = splat_inp[0].len();
let flattened_inp: Vec<_> = splat_inp.into_iter().flat_map(|x| x.into_iter()).collect();
let flattened_weights = splat_weights
.into_iter()
.flat_map(|x| x.into_iter())
.collect::<Vec<_>>();
let out_channels = weights.shape()[0];
let inp_array =
Array::from_shape_vec(IxDyn(&vec![batch_size * oh * ow, conv_size]), flattened_inp)
.unwrap();
let weights_array =
Arra |
y::from_shape_vec(IxDyn(&vec![out_channels, conv_size]), flattened_weights).unwrap();
let outp_slice = fc_chip
.forward(
layouter.namespace(|| ""),
&vec![weights_array, inp_array],
constants,
gadget_config.clone(),
layer_config,
)
.unwrap();
let outp_flat = outp_slice[0]
.t()
.into_iter()
.map(|x| (**x).clone())
.collect::<Vec<_>>();
outp_flat
}
ConvLayerEnum::DepthwiseConv2D => {
let dot_prod_chip = DotProductChip::<F>::construct(gadget_config.clone());
let mut outp_flat = vec![];
for (inp_vec, weight_vec) in splat_inp.iter().zip(splat_weights.iter()) {
let inp_vec = inp_vec.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let weight_vec = weight_vec.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let vec_inputs = vec![inp_vec, weight_vec];
let constants = vec![zero.as_ref()];
let outp = dot_prod_chip
.forward(layouter.namespace(|| "dot_prod"), &vec_inputs, &constants)
.unwrap();
outp_flat.push(outp[0].clone());
}
outp_flat
}
};
let mut biases = vec![];
for bias in splat_biases.iter() {
biases.push(bias.as_ref());
}
let bdr_chip = BiasDivRoundRelu6Chip::<F>::construct(gadget_config.clone());
let tmp = vec![zero.as_ref()];
let outp_flat = outp_flat.iter().map(|x| x).collect::<Vec<_>>();
let outp = bdr_chip
.forward(
layouter.namespace(|| "bias_div_relu"),
&vec![outp_flat, biases],
&tmp,
)
.unwrap();
let outp = if conv_config.activation == ActivationType::Relu6 {
outp
.into_iter()
.step_by(2)
.map(|x| Rc::new(x))
.collect::<Vec<_>>()
} else if conv_config.activation == ActivationType::None {
outp
.into_iter()
.skip(1)
.step_by(2) |
.map(|x| Rc::new(x))
.collect::<Vec<_>>()
} else if conv_config.activation == ActivationType::Relu {
let dived = outp.iter().skip(1).step_by(2).collect::<Vec<_>>();
let relu_chip = ReluChip::<F>::construct(gadget_config.clone());
let relu_outp = relu_chip
.forward(layouter.namespace(|| "relu"), &vec![dived], &tmp)
.unwrap();
let relu_outp = relu_outp
.into_iter()
.map(|x| Rc::new(x))
.collect::<Vec<_>>();
relu_outp
} else {
panic!("Unsupported activation type");
};
let oc = match conv_config.conv_type {
ConvLayerEnum::Conv2D => weights.shape()[0],
ConvLayerEnum::DepthwiseConv2D => weights.shape()[3],
};
let out_shape = vec![batch_size, oh, ow, oc];
let outp = Array::from_shape_vec(IxDyn(&out_shape), outp).unwrap();
Ok(vec![outp])
}
}
impl<F: PrimeField> GadgetConsumer for Conv2DChip<F> {
fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
let conv_config = &Self::param_vec_to_config(layer_params.clone());
let mut outp = vec![
GadgetType::Adder,
GadgetType::DotProduct,
GadgetType::InputLookup,
GadgetType::BiasDivRoundRelu6,
];
if conv_config.activation == ActivationType::Relu {
outp.push(GadgetType::Relu);
}
outp
}
} |
use std::{collections::HashMap, fs::File, io::BufWriter, marker::PhantomData, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use crate::{
gadgets::gadget::{convert_to_u64, GadgetConfig},
layers::{
arithmetic::{add::AddChip, div_var::DivVarChip, mul::MulChip, sub::SubChip},
batch_mat_mul::BatchMatMulChip,
div_fixed::DivFixedChip,
fully_connected::{FullyConnectedChip, FullyConnectedConfig},
logistic::LogisticChip,
max_pool_2d::MaxPool2DChip,
mean::MeanChip,
noop::NoopChip,
pow::PowChip,
rsqrt::RsqrtChip,
shape::{
broadcast::BroadcastChip, concatenation::ConcatenationChip, mask_neg_inf::MaskNegInfChip,
pack::PackChip, pad::PadChip, permute::PermuteChip, reshape::ReshapeChip,
resize_nn::ResizeNNChip, rotate::RotateChip, slice::SliceChip, split::SplitChip,
transpose::TransposeChip,
},
softmax::SoftmaxChip,
sqrt::SqrtChip,
square::SquareChip,
squared_diff::SquaredDiffChip,
tanh::TanhChip,
update::UpdateChip,
},
utils::helpers::print_assigned_arr,
};
use super::{
avg_pool_2d::AvgPool2DChip,
conv2d::Conv2DChip,
layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig, LayerType},
};
pub |
struct DAGLayerConfig {
pub ops: Vec<LayerConfig>,
pub inp_idxes: Vec<Vec<usize>>,
pub out_idxes: Vec<Vec<usize>>,
pub final_out_idxes: Vec<usize>,
}
pub struct DAGLayerChip<F: PrimeField + Ord> {
dag_config: DAGLayerConfig,
_marker: PhantomData<F>,
}
impl<F: PrimeField + Ord> DAGLayerChip<F> {
pub fn construct(dag_config: DAGLayerConfig) -> Self {
Self {
dag_config,
_marker: PhantomData,
}
}
pub fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<(HashMap<usize, AssignedTensor<F>>, Vec<AssignedTensor<F>>), Error> {
let mut tensor_map = HashMap::new();
for (idx, tensor) in tensors.iter().enumerate() {
tensor_map.insert(idx, tensor.clone());
}
for (layer_idx, layer_config) in self.dag_config.ops.iter().enumerate() {
let layer_type = &layer_config.layer_type;
let inp_idxes = &self.dag_config.inp_idxes[layer_idx];
let out_idxes = &self.dag_config.out_idxes[layer_idx];
println!(
"Processing layer {}, type: {:?}, inp_idxes: {:?}, out_idxes: {:?}, layer_params: {:?}",
layer_idx, layer_type, inp_idxes, out_idxes, layer_config.layer_params
);
let vec_inps = inp_idxes
.iter()
.map(|idx| tensor_map.get(idx).unwrap().clone())
.collect::<Vec<_>>();
let out = match layer_type {
LayerType::Add => {
let add_chip = AddChip {};
add_chip.forward(
layouter.namespace(|| "dag add"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::AvgPool2D => {
let avg_pool_2d_chip = AvgPool2DChip {};
avg_pool_2d_chip.forward(
layouter.namespace(|| "dag avg pool 2d"),
&vec_inps,
constants,
gadget_config.clone(), |
&layer_config,
)?
}
LayerType::MaxPool2D => {
let max_pool_2d_chip = MaxPool2DChip {
marker: PhantomData::<F>,
};
max_pool_2d_chip.forward(
layouter.namespace(|| "dag max pool 2d"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::BatchMatMul => {
let batch_mat_mul_chip = BatchMatMulChip {};
batch_mat_mul_chip.forward(
layouter.namespace(|| "dag batch mat mul"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Broadcast => {
let broadcast_chip = BroadcastChip {};
broadcast_chip.forward(
layouter.namespace(|| "dag batch mat mul"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Conv2D => {
let conv_2d_chip = Conv2DChip {
config: layer_config.clone(),
_marker: PhantomData,
};
conv_2d_chip.forward(
layouter.namespace(|| "dag conv 2d"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::DivFixed => {
let div_fixed_chip = DivFixedChip {};
div_fixed_chip.forward(
layouter.namespace(|| "dag div"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::DivVar => {
let div_var_chip = DivVarChip {};
div_var_chip.forward(
layouter.namespace(|| "dag div"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::FullyConnected => {
let fc_chip = FullyConnectedChi |
p {
_marker: PhantomData,
config: FullyConnectedConfig::construct(true),
};
fc_chip.forward(
layouter.namespace(|| "dag fully connected"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Softmax => {
let softmax_chip = SoftmaxChip {};
softmax_chip.forward(
layouter.namespace(|| "dag softmax"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Mean => {
let mean_chip = MeanChip {};
mean_chip.forward(
layouter.namespace(|| "dag mean"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Pad => {
let pad_chip = PadChip {};
pad_chip.forward(
layouter.namespace(|| "dag pad"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Permute => {
let pad_chip = PermuteChip {};
pad_chip.forward(
layouter.namespace(|| "dag permute"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::SquaredDifference => {
let squared_diff_chip = SquaredDiffChip {};
squared_diff_chip.forward(
layouter.namespace(|| "dag squared diff"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Rsqrt => {
let rsqrt_chip = RsqrtChip {};
rsqrt_chip.forward(
layouter.namespace(|| "dag rsqrt"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
} |
LayerType::Sqrt => {
let sqrt_chip = SqrtChip {};
sqrt_chip.forward(
layouter.namespace(|| "dag sqrt"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Logistic => {
let logistic_chip = LogisticChip {};
logistic_chip.forward(
layouter.namespace(|| "dag logistic"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Pow => {
let pow_chip = PowChip {};
pow_chip.forward(
layouter.namespace(|| "dag logistic"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Tanh => {
let tanh_chip = TanhChip {};
tanh_chip.forward(
layouter.namespace(|| "dag tanh"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Mul => {
let mul_chip = MulChip {};
mul_chip.forward(
layouter.namespace(|| "dag mul"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Sub => {
let sub_chip = SubChip {};
sub_chip.forward(
layouter.namespace(|| "dag sub"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Noop => {
let noop_chip = NoopChip {};
noop_chip.forward(
layouter.namespace(|| "dag noop"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Transpose => {
let transpose_chip = TransposeChip {};
transpose_chip.forward( |
layouter.namespace(|| "dag transpose"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Reshape => {
let reshape_chip = ReshapeChip {};
reshape_chip.forward(
layouter.namespace(|| "dag reshape"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::ResizeNN => {
let resize_nn_chip = ResizeNNChip {};
resize_nn_chip.forward(
layouter.namespace(|| "dag resize nn"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Rotate => {
let rotate_chip = RotateChip {};
rotate_chip.forward(
layouter.namespace(|| "dag rotate"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Concatenation => {
let concat_chip = ConcatenationChip {};
concat_chip.forward(
layouter.namespace(|| "dag concatenation"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Pack => {
let pack_chip = PackChip {};
pack_chip.forward(
layouter.namespace(|| "dag pack"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Split => {
let split_chip = SplitChip {};
split_chip.forward(
layouter.namespace(|| "dag split"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Update => {
let split_chip = UpdateChip {};
split_chip.forward(
layouter.namespace |
(|| "dag update"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Slice => {
let slice_chip = SliceChip {};
slice_chip.forward(
layouter.namespace(|| "dag slice"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::MaskNegInf => {
let mask_neg_inf_chip = MaskNegInfChip {};
mask_neg_inf_chip.forward(
layouter.namespace(|| "dag mask neg inf"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
LayerType::Square => {
let square_chip = SquareChip {};
square_chip.forward(
layouter.namespace(|| "dag square"),
&vec_inps,
constants,
gadget_config.clone(),
&layer_config,
)?
}
};
for (idx, tensor_idx) in out_idxes.iter().enumerate() {
println!("Out {} shape: {:?}", idx, out[idx].shape());
tensor_map.insert(*tensor_idx, out[idx].clone());
}
println!();
}
let mut final_out = vec![];
for idx in self.dag_config.final_out_idxes.iter() {
final_out.push(tensor_map.get(idx).unwrap().clone());
}
let print_arr = if final_out.len() > 0 {
&final_out[0]
} else {
if self.dag_config.ops.len() > 0 {
let last_layer_idx = self.dag_config.ops.len() - 1;
let out_idx = self.dag_config.out_idxes[last_layer_idx][0];
tensor_map.get(&out_idx).unwrap()
} else {
tensor_map.get(&0).unwrap()
}
};
let tmp = print_arr.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
print_assigned_arr("final out", &tmp.to_vec(), gadget_config.scale_factor);
println!("final out idxes: {:?}", self.dag_config.final_out_idxes);
let mut x = vec![];
for cell in print_arr.iter() { |
cell.value().map(|v| {
let bias = 1 << 60 as i64;
let v_pos = *v + F::from(bias as u64);
let v = convert_to_u64(&v_pos) as i64 - bias;
x.push(v);
});
}
if x.len() > 0 {
let out_fname = "out.msgpack";
let f = File::create(out_fname).unwrap();
let mut buf = BufWriter::new(f);
rmp_serde::encode::write_named(&mut buf, &x).unwrap();
}
Ok((tensor_map, final_out))
}
}
impl<F: PrimeField + Ord> GadgetConsumer for DAGLayerChip<F> {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
} |
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Value},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
var_div::VarDivRoundChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub |
struct DivFixedChip {}
impl DivFixedChip {
fn get_div_val<F: PrimeField>(
&self,
mut layouter: impl Layouter<F>,
_tensors: &Vec<AssignedTensor<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<AssignedCell<F, F>, Error> {
let div = layer_config.layer_params[0];
let div = F::from(div as u64);
let div = layouter
.assign_region(
|| "division",
|mut region| {
let div = region
.assign_advice(
|| "avg pool 2d div",
gadget_config.columns[0],
0,
|| Value::known(div),
)
.unwrap();
Ok(div)
},
)
.unwrap();
Ok(div)
}
}
impl<F: PrimeField> Layer<F> for DivFixedChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let inp_flat = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let zero = constants.get(&0).unwrap().as_ref();
let shape = inp.shape();
let div = self.get_div_val(
layouter.namespace(|| "average div"),
tensors,
gadget_config.clone(),
layer_config,
)?;
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());
let dived = var_div_chip.forward(
layouter.namespace(|| "average div"),
&vec![inp_flat],
&vec![zero, &div],
)?;
let dived = dived.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(shape), dived).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for DivFixedChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::VarDivRound]
}
} |
use std::{collections::HashMap, marker::PhantomData, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Region, Value},
halo2curves::ff::PrimeField,
plonk::{Advice, Column, Error},
};
use ndarray::{Array, ArrayView, Axis, IxDyn};
use crate::{
gadgets::{
add_pairs::AddPairsChip,
dot_prod::DotProductChip,
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::relu::ReluChip,
var_div::VarDivRoundChip,
},
layers::layer::ActivationType,
utils::helpers::RAND_START_IDX,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub |
struct FullyConnectedConfig {
pub normalize: bool,
}
impl FullyConnectedConfig {
pub fn construct(normalize: bool) -> Self {
Self { normalize }
}
}
pub struct FullyConnectedChip<F: PrimeField> {
pub _marker: PhantomData<F>,
pub config: FullyConnectedConfig,
}
impl<F: PrimeField> FullyConnectedChip<F> {
pub fn compute_mm(
input: &ArrayView<CellRc<F>, IxDyn>,
weight: &AssignedTensor<F>,
) -> Array<Value<F>, IxDyn> {
assert_eq!(input.ndim(), 2);
assert_eq!(weight.ndim(), 2);
assert_eq!(input.shape()[1], weight.shape()[0]);
let mut outp = vec![];
for i in 0..input.shape()[0] {
for j in 0..weight.shape()[1] {
let mut sum = input[[i, 0]].value().map(|x: &F| *x) * weight[[0, j]].value();
for k in 1..input.shape()[1] {
sum = sum + input[[i, k]].value().map(|x: &F| *x) * weight[[k, j]].value();
}
outp.push(sum);
}
}
let out_shape = [input.shape()[0], weight.shape()[1]];
Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap()
}
pub fn assign_array(
columns: &Vec<Column<Advice>>,
region: &mut Region<F>,
array: &Array<Value<F>, IxDyn>,
) -> Result<Array<AssignedCell<F, F>, IxDyn>, Error> {
assert_eq!(array.ndim(), 2);
let mut outp = vec![];
for (idx, val) in array.iter().enumerate() {
let row_idx = idx / columns.len();
let col_idx = idx % columns.len();
let cell = region
.assign_advice(|| "assign array", columns[col_idx], row_idx, || *val)
.unwrap();
outp.push(cell);
}
let out_shape = [array.shape()[0], array.shape()[1]];
Ok(Array::from_shape_vec(IxDyn(out_shape.as_slice()), outp).unwrap())
}
pub fn random_vector(
constants: &HashMap<i64, CellRc<F>>,
size: usize,
) -> Result<Vec<CellRc<F>>, Error> {
let mut outp = vec![];
for idx in 0..size {
let idx = RAND_START_IDX + (idx as i64);
if !constants.contains_key(&idx) {
println!("Random vector is too small: {:?}", size); |
}
let cell = constants.get(&idx).unwrap().clone();
outp.push(cell);
}
Ok(outp)
}
fn get_activation(&self, layer_params: &Vec<i64>) -> ActivationType {
let activation = layer_params[0];
match activation {
0 => ActivationType::None,
1 => ActivationType::Relu,
_ => panic!("Unsupported activation type for fully connected"),
}
}
}
impl<F: PrimeField> Layer<F> for FullyConnectedChip<F> {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
assert!(tensors.len() <= 3);
let activation = self.get_activation(&layer_config.layer_params);
let input = &tensors[0];
let ndim = input.ndim();
let input = if ndim == 2 {
ArrayView::from(input)
} else {
input.index_axis(Axis(0), 0)
};
let weight = &tensors[1].t().into_owned();
let zero = constants.get(&0).unwrap().as_ref();
let mm_result = layouter
.assign_region(
|| "compute and assign mm",
|mut region| {
let mm_result = Self::compute_mm(&input, weight);
let mm_result =
Self::assign_array(&gadget_config.columns, &mut region, &mm_result).unwrap();
Ok(mm_result)
},
)
.unwrap();
let r1 = Self::random_vector(constants, mm_result.shape()[0]).unwrap();
let r2 = Self::random_vector(constants, mm_result.shape()[1]).unwrap();
let dot_prod_chip = DotProductChip::<F>::construct(gadget_config.clone());
let r1_ref = r1.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let r2_ref = r2.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let mut r1_res = vec![];
for i in 0..mm_result.shape()[1] {
let tmp = mm_result.index_axis(Axis(1), i);
let mm_ci = tmp.iter().collect::<Vec<_>>();
let r1_res_i = dot_prod_chip
.forwar |
d(
layouter.namespace(|| format!("r1_res_{}", i)),
&vec![mm_ci, r1_ref.clone()],
&vec![zero],
)
.unwrap();
r1_res.push(r1_res_i[0].clone());
}
let r1_res_ref = r1_res.iter().collect::<Vec<_>>();
let r1_res_r2 = dot_prod_chip
.forward(
layouter.namespace(|| "r1_res_r2"),
&vec![r1_res_ref, r2_ref.clone()],
&vec![zero],
)
.unwrap();
let r1_res_r2 = r1_res_r2[0].clone();
let mut r1_input = vec![];
for i in 0..input.shape()[1] {
let tmp = input.index_axis(Axis(1), i);
let input_ci = tmp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let r1_input_i = dot_prod_chip
.forward(
layouter.namespace(|| format!("r1_input_{}", i)),
&vec![input_ci, r1_ref.clone()],
&vec![zero],
)
.unwrap();
r1_input.push(r1_input_i[0].clone());
}
let mut weight_r2 = vec![];
for i in 0..weight.shape()[0] {
let tmp = weight.index_axis(Axis(0), i);
let weight_ci = tmp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let weight_r2_i = dot_prod_chip
.forward(
layouter.namespace(|| format!("weight_r2_{}", i)),
&vec![weight_ci, r2_ref.clone()],
&vec![zero],
)
.unwrap();
weight_r2.push(weight_r2_i[0].clone());
}
let r1_input_ref = r1_input.iter().collect::<Vec<_>>();
let weight_r2_ref = weight_r2.iter().collect::<Vec<_>>();
let r1_inp_weight_r2 = dot_prod_chip
.forward(
layouter.namespace(|| "r1_inp_weight_r2"),
&vec![r1_input_ref, weight_r2_ref],
&vec![zero],
)
.unwrap();
let r1_inp_weight_r2 = r1_inp_weight_r2[0].clone();
layouter
.assign_region(
|| "fc equality check",
|mut region| {
let t1 = r1_res_r2
.copy_advice(|| "", &mut region, gadget_config.columns[0], 0)
.unwrap();
let t2 = r1_inp_weigh |
t_r2
.copy_advice(|| "", &mut region, gadget_config.columns[0], 1)
.unwrap();
region.constrain_equal(t1.cell(), t2.cell()).unwrap();
Ok(())
},
)
.unwrap();
let shape = [mm_result.shape()[0], mm_result.shape()[1]];
let final_result_flat = if self.config.normalize {
let mm_flat = mm_result.iter().collect::<Vec<_>>();
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());
let sf = constants
.get(&(gadget_config.scale_factor as i64))
.unwrap()
.as_ref();
let mm_div = var_div_chip
.forward(
layouter.namespace(|| "mm_div"),
&vec![mm_flat],
&vec![zero, sf],
)
.unwrap();
let mm_div = if tensors.len() == 3 {
let bias = tensors[2].broadcast(shape.clone()).unwrap();
let bias = bias.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let mm_div = mm_div.iter().collect::<Vec<_>>();
let adder_chip = AddPairsChip::<F>::construct(gadget_config.clone());
let mm_bias = adder_chip
.forward(
layouter.namespace(|| "mm_bias"),
&vec![mm_div, bias],
&vec![zero],
)
.unwrap();
mm_bias
} else {
mm_div
};
let mm_div = if activation == ActivationType::Relu {
let relu_chip = ReluChip::<F>::construct(gadget_config.clone());
let mm_div = mm_div.iter().collect::<Vec<_>>();
let vec_inputs = vec![mm_div];
relu_chip
.forward(layouter.namespace(|| "relu"), &vec_inputs, &vec![zero])
.unwrap()
} else if activation == ActivationType::None {
mm_div
} else {
panic!("Unsupported activation type");
};
mm_div.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>()
} else {
mm_result
.into_iter()
.map(|x| Rc::new(x))
.collect::<Vec<_>>()
};
let final_result = Array::from_shape_vec(IxDyn(&shape), |
final_result_flat).unwrap();
Ok(vec![final_result])
}
}
impl<F: PrimeField> GadgetConsumer for FullyConnectedChip<F> {
fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
let activation = self.get_activation(&layer_params);
let mut outp = vec![
GadgetType::Adder,
GadgetType::AddPairs,
GadgetType::DotProduct,
GadgetType::VarDivRound,
GadgetType::InputLookup,
];
match activation {
ActivationType::Relu => outp.push(GadgetType::Relu),
ActivationType::None => (),
_ => panic!("Unsupported activation type"),
}
outp
}
} |
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, IxDyn};
use crate::gadgets::gadget::{GadgetConfig, GadgetType};
#[derive(Clone, Copy, Debug, Default, Hash, Eq, PartialEq)]
pub enum LayerType {
Add,
AvgPool2D,
BatchMatMul,
Broadcast,
Concatenation,
Conv2D,
DivVar,
DivFixed,
FullyConnected,
Logistic,
MaskNegInf,
MaxPool2D,
Mean,
Mul,
#[default]
Noop,
Pack,
Pad,
Pow,
Permute,
Reshape,
ResizeNN,
Rotate,
Rsqrt,
Slice,
Softmax,
Split,
Sqrt,
Square,
SquaredDifference,
Sub,
Tanh,
Transpose,
Update,
}
// NOTE: This is the same order as the TFLite schema
// Must not be changed
#[derive(Clone, Debug, Default, Hash, Eq, PartialEq)]
pub enum ActivationType {
#[default]
None,
Relu,
ReluN1To1,
Relu6,
Tanh,
SignBit,
}
#[derive(Clone, Debug, Default)]
pub struct LayerConfig {
pub layer_type: LayerType,
pub layer_params: Vec<i64>, // This is turned into layer specific configurations at runtime
pub inp_shapes: Vec<Vec<usize>>,
pub out_shapes: Vec<Vec<usize>>,
pub mask: Vec<i64>,
}
pub type CellRc<F> = Rc<AssignedCell<F, F>>;
pub type AssignedTensor<F> = Array<CellRc<F>, IxDyn>;
// General issue with rust: I'm not sure how to pass named arguments to a trait...
// Currently, the caller must be aware of the order of the tensors and results
pub trait Layer<F: PrimeField> {
fn forward(
&self,
layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error>;
}
pub trait GadgetConsumer {
fn used_gadgets(&self, layer_params: Vec<i64>) -> Vec<GadgetType>;
}
|
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::logistic::LogisticGadgetChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
#[derive(Clone, Debug)]
pub struct LogisticChip {}
impl<F: PrimeField> Layer<F> for LogisticChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let zero = constants.get(&0).unwrap().as_ref();
let logistic_chip = LogisticGadgetChip::<F>::construct(gadget_config.clone());
let vec_inps = vec![inp_vec];
let constants = vec![zero];
let out = logistic_chip.forward(
layouter.namespace(|| "logistic chip"),
&vec_inps,
&constants,
)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for LogisticChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::Logistic, GadgetType::InputLookup]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
max::MaxChip,
},
layers::conv2d::{Conv2DChip, PaddingEnum},
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub struct MaxPool2DChip<F: PrimeField> {
pub marker: std::marker::PhantomData<F>,
}
impl<F: PrimeField> MaxPool2DChip<F> {
pub fn shape(inp: &AssignedTensor<F>, layer_config: &LayerConfig) -> (usize, usize) {
let params = &layer_config.layer_params;
let (fx, fy) = (params[0], params[1]);
let (fx, fy) = (fx as usize, fy as usize);
let (sx, sy) = (params[2], params[3]);
let (sx, sy) = (sx as usize, sy as usize);
assert_eq!(inp.shape()[0], 1);
let out_shape = Conv2DChip::<F>::out_hw(
inp.shape()[1],
inp.shape()[2],
sx,
sy,
fx,
fy,
PaddingEnum::Valid,
);
out_shape
}
pub fn splat(
inp: &AssignedTensor<F>,
layer_config: &LayerConfig,
) -> Result<Vec<Vec<CellRc<F>>>, Error> {
let params = &layer_config.layer_params;
let (fx, fy) = (params[0], params[1]);
let (fx, fy) = (fx as usize, fy as usize);
let (sx, sy) = (params[2], params[3]);
let (sx, sy) = (sx as usize, sy as usize);
assert_eq!(inp.shape()[0], 1);
let out_shape = Self::shape(inp, layer_config);
let mut splat = vec![];
for i in 0..out_shape.0 {
for j in 0..out_shape.1 {
for k in 0..inp.shape()[3] {
let mut tmp = vec![];
for x in 0..fx {
for y in 0..fy {
let x = i * sx + x;
let y = j * sy + y;
if x < inp.shape()[1] && y < inp.shape()[2] {
tmp.push(inp[[0, x, y, k]].clone());
}
}
}
splat.push(tmp);
}
}
}
Ok(splat)
}
}
impl<F: PrimeField> Layer<F> for MaxPool2DChip<F> { |
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let splat = Self::splat(inp, layer_config).unwrap();
let max_chip = MaxChip::<F>::construct(gadget_config.clone());
let mut out = vec![];
for i in 0..splat.len() {
let inps = &splat[i];
let inps = inps.iter().map(|x| x.as_ref()).collect();
let max = max_chip
.forward(
layouter.namespace(|| format!("max {}", i)),
&vec![inps],
&vec![],
)
.unwrap();
out.push(max[0].clone());
}
let out = out.into_iter().map(|x| Rc::new(x)).collect();
let out_xy = Self::shape(inp, layer_config);
let out_shape = vec![1, out_xy.0, out_xy.1, inp.shape()[3]];
let out = Array::from_shape_vec(IxDyn(&out_shape), out).unwrap();
Ok(vec![out])
}
}
impl<F: PrimeField> GadgetConsumer for MaxPool2DChip<F> {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
vec![GadgetType::Max, GadgetType::InputLookup]
}
} |
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter, Value},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, Axis, IxDyn};
use crate::gadgets::gadget::{GadgetConfig, GadgetType};
use super::{
averager::Averager,
layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig},
};
pub |
struct MeanChip {}
impl MeanChip {
pub fn get_keep_axis(&self, layer_config: &LayerConfig) -> usize {
let inp_shape = &layer_config.inp_shapes[0];
let out_shape = &layer_config.out_shapes[0];
assert_eq!(inp_shape[0], 1);
assert_eq!(out_shape[0], 1);
let mut keep_axes = (1..inp_shape.len()).collect::<Vec<_>>();
for mean_axis in layer_config.layer_params.iter() {
keep_axes.retain(|&x| x != *mean_axis as usize);
}
assert_eq!(keep_axes.len(), 1);
keep_axes[0]
/*
let mut num_same = 0;
let mut keep_axis: i64 = -1;
for i in 1..inp_shape.len() {
if inp_shape[i] == out_shape[i] {
keep_axis = i as i64;
num_same += 1;
}
}
if keep_axis == -1 {
panic!("All axes are different");
}
if num_same > 1 {
panic!("More than one axis is the same");
}
keep_axis as usize
*/
}
}
impl<F: PrimeField> Averager<F> for MeanChip {
fn splat(&self, input: &AssignedTensor<F>, layer_config: &LayerConfig) -> Vec<Vec<CellRc<F>>> {
assert_eq!(input.shape()[0], 1);
let keep_axis = self.get_keep_axis(layer_config);
let mut splat = vec![];
for i in 0..input.shape()[keep_axis] {
let mut tmp = vec![];
for x in input.index_axis(Axis(keep_axis), i).iter() {
tmp.push(x.clone());
}
splat.push(tmp);
}
splat
}
fn get_div_val(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<AssignedCell<F, F>, Error> {
let inp = &tensors[0];
let keep_axis = self.get_keep_axis(layer_config);
let mut div = 1;
for i in 0..inp.shape().len() {
if i != keep_axis {
div *= inp.shape()[i];
}
}
let div = F::from(div as u64);
let div = layouter.assign_region(
|| "mean div",
|mut region| {
let div = region.assign_advice(
|| "mean div",
gadget_config.columns[0], |
0,
|| Value::known(div),
)?;
Ok(div)
},
)?;
Ok(div)
}
}
impl<F: PrimeField> Layer<F> for MeanChip {
fn forward(
&self,
layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let dived = self.avg_forward(layouter, tensors, constants, gadget_config, layer_config)?;
let out_shape = layer_config.out_shapes[0]
.iter()
.map(|x| *x as usize)
.collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(&out_shape), dived).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for MeanChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::Adder,
GadgetType::VarDivRound,
GadgetType::InputLookup,
]
}
} |
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use crate::gadgets::gadget::GadgetConfig;
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub struct NoopChip {}
impl<F: PrimeField> Layer<F> for NoopChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let ret_idx = layer_config.layer_params[0] as usize;
Ok(vec![tensors[ret_idx].clone()])
}
}
impl GadgetConsumer for NoopChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::pow::PowGadgetChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
#[derive(Clone, Debug)]
pub struct PowChip {}
impl<F: PrimeField> Layer<F> for PowChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let zero = constants.get(&0).unwrap().as_ref();
let pow_chip = PowGadgetChip::<F>::construct(gadget_config.clone());
let vec_inps = vec![inp_vec];
let constants = vec![zero];
let out = pow_chip.forward(layouter.namespace(|| "pow chip"), &vec_inps, &constants)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for PowChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::Pow, GadgetType::InputLookup]
}
}
|
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::rsqrt::RsqrtGadgetChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub |
struct RsqrtChip {}
impl<F: PrimeField> Layer<F> for RsqrtChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let mut inp_vec = vec![];
let mask = &layer_config.mask;
let mut mask_map = HashMap::new();
for i in 0..mask.len() / 2 {
mask_map.insert(mask[2 * i], mask[2 * i + 1]);
}
let min_val = gadget_config.min_val;
let min_val = constants.get(&min_val).unwrap().as_ref();
let max_val = gadget_config.max_val;
let max_val = constants.get(&max_val).unwrap().as_ref();
for (i, val) in inp.iter().enumerate() {
let i = i as i64;
if mask_map.contains_key(&i) {
let mask_val = *mask_map.get(&i).unwrap();
if mask_val == 1 {
inp_vec.push(max_val);
} else if mask_val == -1 {
inp_vec.push(min_val);
} else {
panic!();
}
} else {
inp_vec.push(val.as_ref());
}
}
let zero = constants.get(&0).unwrap().as_ref();
let rsqrt_chip = RsqrtGadgetChip::<F>::construct(gadget_config.clone());
let vec_inps = vec![inp_vec];
let constants = vec![zero, min_val, max_val];
let out = rsqrt_chip.forward(layouter.namespace(|| "rsqrt chip"), &vec_inps, &constants)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for RsqrtChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::Rsqrt, GadgetType::InputLookup]
}
} |
pub mod broadcast;
pub mod concatenation;
pub mod mask_neg_inf;
pub mod pack;
pub mod pad;
pub mod permute;
pub mod reshape;
pub mod resize_nn;
pub mod rotate;
pub mod slice;
pub mod split;
pub mod transpose;
|
//
// Broadcast is used as a temporary measure to represent a the backprop
// of a full-kernel AvgPool2D
//
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::Array;
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct BroadcastChip {}
// TODO: Fix this after demo
impl<F: PrimeField> Layer<F> for BroadcastChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let shape = inp.shape();
let output_shape = layer_config.out_shapes[0].clone();
// Check that we only broadcast dimensions with shape 1
assert!(shape.len() == output_shape.len());
assert!(shape.len() == 4);
for (inp, outp) in shape.iter().zip(output_shape.iter()) {
if *inp != *outp && !(*inp == 1) {
panic!();
}
}
let mut output_flat = vec![];
for i in 0..output_shape[0] {
for j in 0..output_shape[1] {
for k in 0..output_shape[2] {
for l in 0..output_shape[3] {
let indexes = [i, j, k, l]
.iter()
.enumerate()
.map(|(idx, x)| if shape[idx] == 1 { 0 } else { *x })
.collect::<Vec<_>>();
output_flat.push(inp[[indexes[0], indexes[1], indexes[2], indexes[3]]].clone());
}
}
}
}
println!("Broadcast : {:?} -> {:?}", inp.shape(), output_shape);
let out = Array::from_shape_vec(output_shape, output_flat).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for BroadcastChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{concatenate, Axis};
use crate::{
gadgets::gadget::{GadgetConfig, GadgetType},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct ConcatenationChip {}
impl<F: PrimeField> Layer<F> for ConcatenationChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let axis = layer_config.layer_params[0] as usize;
let views = tensors.iter().map(|x| x.view()).collect::<Vec<_>>();
// TODO: this is a bit of a hack
let out = concatenate(Axis(axis), views.as_slice()).unwrap_or(tensors[0].clone());
Ok(vec![out])
}
}
impl GadgetConsumer for ConcatenationChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct MaskNegInfChip {}
impl<F: PrimeField> Layer<F> for MaskNegInfChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let mask_ndim = layer_config.layer_params[0] as usize;
let mask_shape = layer_config.layer_params[1..mask_ndim + 1]
.iter()
.map(|x| *x as usize)
.collect::<Vec<_>>();
let mask_vec = layer_config.layer_params[mask_ndim + 1..].to_vec();
let mask = Array::from_shape_vec(IxDyn(&mask_shape), mask_vec).unwrap();
let mask = mask.broadcast(inp.raw_dim()).unwrap();
let min_val = gadget_config.min_val;
let min_val = constants.get(&min_val).unwrap().clone();
let mut out_vec = vec![];
for (val, to_mask) in inp.iter().zip(mask.iter()) {
if *to_mask == 0 {
out_vec.push(val.clone());
} else {
out_vec.push(min_val.clone());
}
}
let outp = Array::from_shape_vec(inp.raw_dim(), out_vec).unwrap();
Ok(vec![outp])
}
}
impl GadgetConsumer for MaskNegInfChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{concatenate, Axis};
use crate::{
gadgets::gadget::{GadgetConfig, GadgetType},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct PackChip {}
impl<F: PrimeField> Layer<F> for PackChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let axis = layer_config.layer_params[0] as usize;
if axis > 1 {
panic!("Pack only supports axis=0 or axis=1");
}
let expanded = tensors
.into_iter()
.map(|x| x.clone().insert_axis(Axis(axis)))
.collect::<Vec<_>>();
let views = expanded.iter().map(|x| x.view()).collect::<Vec<_>>();
// TODO: in some cases, the pack is unnecessary. Simply return the first tensor in this case
let out = concatenate(Axis(axis), views.as_slice()).unwrap_or(tensors[0].clone());
Ok(vec![out])
}
}
impl GadgetConsumer for PackChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{Array, Axis, IxDyn, Slice};
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub fn pad<G: Clone>(
input: &Array<Rc<G>, IxDyn>,
padding: Vec<[usize; 2]>,
pad_val: &Rc<G>,
) -> Array<Rc<G>, IxDyn> {
let tmp = input.iter().collect();
let input = Array::from_shape_vec(input.raw_dim(), tmp).unwrap();
assert_eq!(input.ndim(), padding.len());
let mut padded_shape = input.raw_dim();
for (ax, (&ax_len, &[pad_lo, pad_hi])) in input.shape().iter().zip(&padding).enumerate() {
padded_shape[ax] = ax_len + pad_lo + pad_hi;
}
let mut padded = Array::from_elem(padded_shape, pad_val);
let padded_dim = padded.raw_dim();
{
let mut orig_portion = padded.view_mut();
for (ax, &[pad_lo, pad_hi]) in padding.iter().enumerate() {
orig_portion.slice_axis_inplace(
Axis(ax),
Slice::from(pad_lo as isize..padded_dim[ax] as isize - (pad_hi as isize)),
);
}
orig_portion.assign(&input.view());
}
let dim = padded.raw_dim();
let tmp = padded.into_iter().map(|x| x.clone()).collect();
let padded = Array::from_shape_vec(dim, tmp).unwrap();
padded
}
pub |
struct PadChip {}
pub |
struct PadConfig {
pub padding: Vec<[usize; 2]>,
}
impl PadChip {
pub fn param_vec_to_config(layer_params: Vec<i64>) -> PadConfig {
assert!(layer_params.len() % 2 == 0);
let padding = layer_params
.chunks(2)
.map(|chunk| [chunk[0] as usize, chunk[1] as usize])
.collect();
PadConfig { padding }
}
}
impl<F: PrimeField> Layer<F> for PadChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, Rc<AssignedCell<F, F>>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let input = &tensors[0];
let zero = constants.get(&0).unwrap().clone();
let padding = PadChip::param_vec_to_config(layer_config.layer_params.clone());
let padded = pad(input, padding.padding, &zero);
Ok(vec![padded])
}
}
impl GadgetConsumer for PadChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
} |
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::IxDyn;
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct PermuteChip {}
impl<F: PrimeField> Layer<F> for PermuteChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let params = &layer_config
.layer_params
.iter()
.map(|x| *x as usize)
.collect::<Vec<_>>()[..];
assert!(inp.ndim() == params.len());
let out = inp.clone();
let out = out.permuted_axes(IxDyn(params));
Ok(vec![out])
}
}
impl GadgetConsumer for PermuteChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::Array;
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct ReshapeChip {}
impl<F: PrimeField> Layer<F> for ReshapeChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let shape = layer_config.out_shapes[0].clone();
println!("Reshape: {:?} -> {:?}", inp.shape(), shape);
let flat = inp.iter().map(|x| x.clone()).collect();
let out = Array::from_shape_vec(shape, flat).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for ReshapeChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct ResizeNNChip {}
// TODO: this does not work in general
impl<F: PrimeField> Layer<F> for ResizeNNChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let output_shape = layer_config.out_shapes[0].clone();
assert_eq!(inp.ndim(), 4);
assert_eq!(inp.shape()[0], 1);
assert_eq!(inp.shape()[3], output_shape[3]);
let mut flat = vec![];
// Do nearest neighbor interpolation over batch, h, w, c
// The interpolation is over h and w
for b in 0..inp.shape()[0] {
for h in 0..output_shape[1] {
let h_in = (h as f64 * (inp.shape()[1] as f64 / output_shape[1] as f64)) as usize;
for w in 0..output_shape[2] {
let w_in = (w as f64 * (inp.shape()[2] as f64 / output_shape[2] as f64)) as usize;
for c in 0..inp.shape()[3] {
flat.push(inp[[b, h_in, w_in, c]].clone());
}
}
}
}
let outp = Array::from_shape_vec(IxDyn(&output_shape), flat).unwrap();
Ok(vec![outp])
}
}
impl GadgetConsumer for ResizeNNChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
// TODO: The implementation is not ideal.
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct RotateChip {}
// Example:
// input:
// [1 2 3 4]
// [5 6 7 8]
//
// params: [1] -- flip axis 1 only
// output:
// [4 3 2 1]
// [8 7 6 5]
impl<F: PrimeField> Layer<F> for RotateChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let params = &layer_config.layer_params;
assert!(inp.shape().len() == 4);
let mut flip = vec![false; 4];
for p in params {
flip[*p as usize] = true;
}
let shape = inp.shape();
println!("Rotate: {:?} -> {:?}", inp.shape(), shape);
let mut out = inp.clone();
for i in 0..shape[0] {
for j in 0..shape[1] {
for k in 0..shape[2] {
for l in 0..shape[3] {
let [ix, jx, kx, lx]: [usize; 4] = [i, j, k, l]
.iter()
.enumerate()
.map(|(idx, x)| if flip[idx] { shape[idx] - 1 - *x } else { *x })
.collect::<Vec<_>>()
.try_into()
.unwrap();
out[[ix, jx, kx, lx]] = inp[[i, j, k, l]].clone();
}
}
}
}
Ok(vec![out])
}
}
impl GadgetConsumer for RotateChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::Slice;
use crate::{
gadgets::gadget::{GadgetConfig, GadgetType},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct SliceChip {}
impl<F: PrimeField> Layer<F> for SliceChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let params = &layer_config.layer_params;
assert_eq!(params.len() % 2, 0);
let num_axes = params.len() / 2;
let starts = ¶ms[0..num_axes];
let sizes = ¶ms[num_axes..];
let inp = &tensors[0];
let outp = inp.slice_each_axis(|ax| {
let start = starts[ax.axis.0] as usize;
let size = sizes[ax.axis.0];
if size == -1 {
Slice::from(start..)
} else {
Slice::from(start..(start + size as usize))
}
});
Ok(vec![outp.to_owned()])
}
}
impl GadgetConsumer for SliceChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Axis, Slice};
use crate::{
gadgets::gadget::{GadgetConfig, GadgetType},
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct SplitChip {}
impl<F: PrimeField> Layer<F> for SplitChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let axis = layer_config.layer_params[0] as usize;
let num_splits = layer_config.layer_params[1] as usize;
let inp = &tensors[1];
let mut out = vec![];
let split_len = inp.shape()[axis] / num_splits;
for i in 0..num_splits {
let slice = inp
.slice_axis(
Axis(axis),
Slice::from((i * split_len)..((i + 1) * split_len)),
)
.to_owned();
out.push(slice.to_owned());
}
Ok(out)
}
}
impl GadgetConsumer for SplitChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::gadget::GadgetConfig,
layers::layer::{AssignedTensor, CellRc, GadgetConsumer},
};
use super::super::layer::{Layer, LayerConfig};
pub struct TransposeChip {}
impl<F: PrimeField> Layer<F> for TransposeChip {
fn forward(
&self,
_layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
_constants: &HashMap<i64, CellRc<F>>,
_gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
assert_eq!(layer_config.layer_params.len() % 2, 0);
let ndim = layer_config.layer_params.len() / 2;
let inp_shape = layer_config.layer_params[0..ndim]
.to_vec()
.iter()
.map(|x| *x as usize)
.collect::<Vec<_>>();
let permutation = layer_config.layer_params[ndim..]
.to_vec()
.iter()
.map(|x| *x as usize)
.collect::<Vec<_>>();
let inp = &tensors[0];
// Required because of memory layout issues
let inp_flat = inp.iter().cloned().collect::<Vec<_>>();
let inp = Array::from_shape_vec(IxDyn(&inp_shape), inp_flat).unwrap();
let inp = inp.permuted_axes(IxDyn(&permutation));
Ok(vec![inp])
}
}
impl GadgetConsumer for TransposeChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![]
}
}
|
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{
circuit::{AssignedCell, Layouter},
halo2curves::ff::PrimeField,
plonk::Error,
};
use ndarray::{s, Array, IxDyn};
use crate::gadgets::{
adder::AdderChip,
gadget::{Gadget, GadgetConfig, GadgetType},
max::MaxChip,
nonlinear::exp::ExpGadgetChip,
sub_pairs::SubPairsChip,
var_div_big3::VarDivRoundBig3Chip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub |
struct SoftmaxChip {}
impl SoftmaxChip {
pub fn softmax_flat<F: PrimeField>(
mut layouter: impl Layouter<F>,
constants: &HashMap<i64, CellRc<F>>,
inp_flat: Vec<&AssignedCell<F, F>>,
gadget_config: Rc<GadgetConfig>,
mask: &Vec<i64>,
) -> Result<Vec<AssignedCell<F, F>>, Error> {
let exp_chip = ExpGadgetChip::<F>::construct(gadget_config.clone());
let adder_chip = AdderChip::<F>::construct(gadget_config.clone());
let sub_pairs_chip = SubPairsChip::<F>::construct(gadget_config.clone());
let max_chip = MaxChip::<F>::construct(gadget_config.clone());
let var_div_big_chip = VarDivRoundBig3Chip::<F>::construct(gadget_config.clone());
let zero = constants.get(&0).unwrap().as_ref();
let sf = constants
.get(&(gadget_config.scale_factor as i64))
.unwrap()
.as_ref();
let inp_take = inp_flat
.iter()
.enumerate()
.filter(|(i, _)| mask[*i] == 0)
.map(|(_, x)| *x)
.collect::<Vec<_>>();
let max = max_chip
.forward(
layouter.namespace(|| format!("max")),
&vec![inp_take.clone()],
&vec![zero],
)
.unwrap();
let max = &max[0];
let max_flat = vec![max; inp_take.len()];
let sub = sub_pairs_chip.forward(
layouter.namespace(|| format!("sub")),
&vec![inp_take, max_flat],
&vec![zero],
)?;
let sub = sub.iter().collect::<Vec<_>>();
let exp_slice = exp_chip.forward(
layouter.namespace(|| format!("exp")),
&vec![sub],
&vec![zero],
)?;
let sum = adder_chip.forward(
layouter.namespace(|| format!("sum")),
&vec![exp_slice.iter().collect()],
&vec![zero],
)?;
let sum = sum[0].clone();
let sum_div_sf = var_div_big_chip.forward(
layouter.namespace(|| format!("sum div sf")),
&vec![vec![&sum]],
&vec![zero, sf],
)?;
let sum_div_sf = sum_div_sf[0].clone();
let dived = var_div_big_chip.forward(
layouter.namespace(|| format!("div")),
&vec![e |
xp_slice.iter().collect()],
&vec![zero, &sum_div_sf],
)?;
let mut div_idx = 0;
let dived = mask
.iter()
.map(|x| {
if *x == 1 {
zero.clone()
} else {
let tmp = dived[div_idx].clone();
div_idx = div_idx + 1;
tmp
}
})
.collect();
Ok(dived)
}
}
impl<F: PrimeField> Layer<F> for SoftmaxChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
assert!(inp.ndim() == 2 || inp.ndim() == 3 || inp.ndim() == 4);
if inp.ndim() == 4 {
assert_eq!(inp.shape()[0], 1);
}
let inp_shape = inp.shape().iter().map(|x| *x).collect::<Vec<_>>();
let mask = if layer_config.layer_params.len() == 0 {
Array::from_shape_fn(IxDyn(&inp_shape), |_| 0)
} else {
let mask_shape_len = layer_config.layer_params[0] as usize;
let mask_shape = layer_config.layer_params[1..(1 + mask_shape_len)]
.iter()
.map(|x| *x as usize)
.collect::<Vec<_>>();
let mask = layer_config.layer_params[(1 + mask_shape_len)..].to_vec();
let mask = Array::from_shape_vec(IxDyn(&mask_shape), mask).unwrap();
let mask = mask.broadcast(IxDyn(&inp_shape)).unwrap().to_owned();
mask
};
let shape = if inp.ndim() == 2 || inp.ndim() == 3 {
inp.shape().iter().map(|x| *x).collect::<Vec<_>>()
} else {
vec![inp.shape()[1], inp.shape()[2], inp.shape()[3]]
};
let inp = inp.to_owned().into_shape(shape.clone()).unwrap();
let mask = mask.into_shape(shape.clone()).unwrap();
let mut outp = vec![];
if inp.ndim() == 2 {
for i in 0..shape[0] {
let inp_slice = inp.slice(s![i, ..]);
let inp_flat = inp_slice.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let mask_slice = mask.slice(s![i, . |
.]);
let mask_flat = mask_slice.iter().map(|x| *x as i64).collect::<Vec<_>>();
let dived = Self::softmax_flat(
layouter.namespace(|| format!("softmax {}", i)),
constants,
inp_flat,
gadget_config.clone(),
&mask_flat,
)
.unwrap();
outp.extend(dived);
}
} else if inp.ndim() == 3 {
for i in 0..shape[0] {
for j in 0..shape[1] {
let inp_slice = inp.slice(s![i, j, ..]);
let inp_flat = inp_slice.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let mask_slice = mask.slice(s![i, j, ..]);
let mask_flat = mask_slice.iter().map(|x| *x as i64).collect::<Vec<_>>();
let dived = Self::softmax_flat(
layouter.namespace(|| format!("softmax {} {}", i, j)),
constants,
inp_flat,
gadget_config.clone(),
&mask_flat,
)
.unwrap();
outp.extend(dived);
}
}
} else {
panic!("Not implemented");
}
let outp = outp.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let outp = Array::from_shape_vec(IxDyn(inp.shape()), outp).unwrap();
Ok(vec![outp])
}
}
impl GadgetConsumer for SoftmaxChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::Exp,
GadgetType::Adder,
GadgetType::VarDivRoundBig3,
GadgetType::Max,
GadgetType::SubPairs,
GadgetType::InputLookup,
]
}
} |
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::sqrt::SqrtGadgetChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub |
struct SqrtChip {}
impl<F: PrimeField> Layer<F> for SqrtChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let mut inp_vec = vec![];
let mask = &layer_config.mask;
let mut mask_map = HashMap::new();
for i in 0..mask.len() / 2 {
mask_map.insert(mask[2 * i], mask[2 * i + 1]);
}
let min_val = gadget_config.min_val;
let min_val = constants.get(&min_val).unwrap().as_ref();
let max_val = gadget_config.max_val;
let max_val = constants.get(&max_val).unwrap().as_ref();
for (i, val) in inp.iter().enumerate() {
let i = i as i64;
if mask_map.contains_key(&i) {
let mask_val = *mask_map.get(&i).unwrap();
if mask_val == 1 {
inp_vec.push(max_val);
} else if mask_val == -1 {
inp_vec.push(min_val);
} else {
panic!();
}
} else {
inp_vec.push(val.as_ref());
}
}
let zero = constants.get(&0).unwrap().as_ref();
let sqrt_chip = SqrtGadgetChip::<F>::construct(gadget_config.clone());
let vec_inps = vec![inp_vec];
let constants = vec![zero, min_val, max_val];
let out = sqrt_chip.forward(layouter.namespace(|| "sqrt chip"), &vec_inps, &constants)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for SqrtChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::Sqrt, GadgetType::InputLookup]
}
} |
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
square::SquareGadgetChip,
var_div::VarDivRoundChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
#[derive(Clone, Debug)]
pub struct SquareChip {}
impl<F: PrimeField> Layer<F> for SquareChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
assert_eq!(tensors.len(), 1);
let inp = &tensors[0];
let zero = constants.get(&0).unwrap().as_ref();
let square_chip = SquareGadgetChip::<F>::construct(gadget_config.clone());
let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let vec_inputs = vec![inp_vec];
let single_inps = vec![zero];
let out = square_chip.forward(
layouter.namespace(|| "square chip"),
&vec_inputs,
&single_inps,
)?;
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());
let div = constants
.get(&(gadget_config.scale_factor as i64))
.unwrap()
.as_ref();
let single_inps = vec![zero, div];
let out = out.iter().collect::<Vec<_>>();
let vec_inputs = vec![out];
let out = var_div_chip.forward(
layouter.namespace(|| "var div chip"),
&vec_inputs,
&single_inps,
)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for SquareChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::Square,
GadgetType::VarDivRound,
GadgetType::InputLookup,
]
}
}
|
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::{
gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
squared_diff::SquaredDiffGadgetChip,
var_div::VarDivRoundChip,
},
utils::helpers::broadcast,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
pub |
struct SquaredDiffChip {}
impl<F: PrimeField> Layer<F> for SquaredDiffChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
assert_eq!(tensors.len(), 2);
let inp1 = &tensors[0];
let inp2 = &tensors[1];
let (inp1, inp2) = broadcast(inp1, inp2);
let zero = constants.get(&0).unwrap().as_ref();
let sq_diff_chip = SquaredDiffGadgetChip::<F>::construct(gadget_config.clone());
let inp1_vec = inp1.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let inp2_vec = inp2.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let vec_inputs = vec![inp1_vec, inp2_vec];
let tmp_constants = vec![zero];
let out = sq_diff_chip.forward(
layouter.namespace(|| "sq diff chip"),
&vec_inputs,
&tmp_constants,
)?;
let var_div_chip = VarDivRoundChip::<F>::construct(gadget_config.clone());
let div = constants
.get(&(gadget_config.scale_factor as i64))
.unwrap()
.as_ref();
let single_inputs = vec![zero, div];
let out = out.iter().map(|x| x).collect::<Vec<_>>();
let out = var_div_chip.forward(
layouter.namespace(|| "sq diff div"),
&vec![out],
&single_inputs,
)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(inp1.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for SquaredDiffChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![
GadgetType::SquaredDiff,
GadgetType::VarDivRound,
GadgetType::InputLookup,
]
}
} |
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
nonlinear::tanh::TanhGadgetChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
#[derive(Clone, Debug)]
pub struct TanhChip {}
impl<F: PrimeField> Layer<F> for TanhChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let inp = &tensors[0];
let inp_vec = inp.iter().map(|x| x.as_ref()).collect::<Vec<_>>();
let zero = constants.get(&0).unwrap().as_ref();
let tanh_chip = TanhGadgetChip::<F>::construct(gadget_config.clone());
let vec_inps = vec![inp_vec];
let constants = vec![zero];
let out = tanh_chip.forward(layouter.namespace(|| "tanh chip"), &vec_inps, &constants)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(inp.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for TanhChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::Tanh, GadgetType::InputLookup]
}
}
|
use std::{collections::HashMap, rc::Rc, vec};
use halo2_proofs::{circuit::Layouter, halo2curves::ff::PrimeField, plonk::Error};
use ndarray::{Array, IxDyn};
use crate::gadgets::{
gadget::{Gadget, GadgetConfig, GadgetType},
update::UpdateGadgetChip,
};
use super::layer::{AssignedTensor, CellRc, GadgetConsumer, Layer, LayerConfig};
#[derive(Clone, Debug)]
pub struct UpdateChip {}
impl<F: PrimeField + Ord> Layer<F> for UpdateChip {
fn forward(
&self,
mut layouter: impl Layouter<F>,
tensors: &Vec<AssignedTensor<F>>,
constants: &HashMap<i64, CellRc<F>>,
gadget_config: Rc<GadgetConfig>,
_layer_config: &LayerConfig,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let w = &tensors[0];
let dw = &tensors[1];
let zero = constants.get(&0).unwrap().as_ref();
let update_chip = UpdateGadgetChip::<F>::construct((*gadget_config).clone());
let flattened_w = w.into_iter().map(|x| (**x).clone()).collect::<Vec<_>>();
let flattened_dw = dw.into_iter().map(|x| (**x).clone()).collect::<Vec<_>>();
let flattened_w_ref = flattened_w.iter().collect::<Vec<_>>();
let flattened_dw_ref = flattened_dw.iter().collect::<Vec<_>>();
let vec_inps = vec![flattened_w_ref, flattened_dw_ref];
let constants = vec![zero];
let out = update_chip.forward(layouter.namespace(|| "update chip"), &vec_inps, &constants)?;
let out = out.into_iter().map(|x| Rc::new(x)).collect::<Vec<_>>();
let out = Array::from_shape_vec(IxDyn(w.shape()), out).unwrap();
Ok(vec![out])
}
}
impl GadgetConsumer for UpdateChip {
fn used_gadgets(&self, _layer_params: Vec<i64>) -> Vec<crate::gadgets::gadget::GadgetType> {
vec![GadgetType::Update]
}
}
|
#![feature(int_roundings)]
pub mod commitments;
pub mod gadgets;
pub mod layers;
pub mod model;
pub mod utils;
|
use std::{
collections::{BTreeMap, BTreeSet, HashMap},
marker::PhantomData,
rc::Rc,
sync::{Arc, Mutex},
};
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
halo2curves::ff::{FromUniformBytes, PrimeField},
plonk::{Advice, Circuit, Column, ConstraintSystem, Error, Instance},
};
use lazy_static::lazy_static;
use ndarray::{Array, IxDyn};
use num_bigint::BigUint;
use crate::{
commitments::{
commit::Commit,
packer::PackerChip,
poseidon_commit::{PoseidonCommitChip, L, RATE, WIDTH},
},
gadgets::{
add_pairs::AddPairsChip,
adder::AdderChip,
bias_div_round_relu6::BiasDivRoundRelu6Chip,
dot_prod::DotProductChip,
gadget::{Gadget, GadgetConfig, GadgetType},
input_lookup::InputLookupChip,
max::MaxChip,
mul_pairs::MulPairsChip,
nonlinear::{exp::ExpGadgetChip, pow::PowGadgetChip, relu::ReluChip, tanh::TanhGadgetChip},
nonlinear::{logistic::LogisticGadgetChip, rsqrt::RsqrtGadgetChip, sqrt::SqrtGadgetChip},
sqrt_big::SqrtBigChip,
square::SquareGadgetChip,
squared_diff::SquaredDiffGadgetChip,
sub_pairs::SubPairsChip,
update::UpdateGadgetChip,
var_div::VarDivRoundChip,
var_div_big::VarDivRoundBigChip,
var_div_big3::VarDivRoundBig3Chip,
},
layers::{
arithmetic::{add::AddChip, div_var::DivVarChip, mul::MulChip, sub::SubChip},
avg_pool_2d::AvgPool2DChip,
batch_mat_mul::BatchMatMulChip,
conv2d::Conv2DChip,
dag::{DAGLayerChip, DAGLayerConfig},
fully_connected::{FullyConnectedChip, FullyConnectedConfig},
layer::{AssignedTensor, CellRc, GadgetConsumer, LayerConfig, LayerType},
logistic::LogisticChip,
max_pool_2d::MaxPool2DChip,
mean::MeanChip,
noop::NoopChip,
pow::PowChip,
rsqrt::RsqrtChip,
shape::{
broadcast::BroadcastChip, concatenation::ConcatenationChip, mask_neg_inf::MaskNegInfChip,
pack::PackChip, pad::PadChip, permute::PermuteChip, reshape::ReshapeChip,
resize_nn::ResizeNNChip, rotate::RotateChip, slice::SliceChip, split::SplitChip, |
transpose::TransposeChip,
},
softmax::SoftmaxChip,
sqrt::SqrtChip,
square::SquareChip,
squared_diff::SquaredDiffChip,
tanh::TanhChip,
update::UpdateChip,
},
utils::{
helpers::{convert_to_bigint, RAND_START_IDX},
loader::{load_model_msgpack, ModelMsgpack},
},
};
lazy_static! {
pub static ref GADGET_CONFIG: Mutex<GadgetConfig> = Mutex::new(GadgetConfig::default());
pub static ref PUBLIC_VALS: Mutex<Vec<BigUint>> = Mutex::new(vec![]);
}
pub struct ModelCircuit<F: PrimeField> {
pub used_gadgets: Arc<BTreeSet<GadgetType>>,
pub dag_config: DAGLayerConfig,
pub tensors: BTreeMap<i64, Array<F, IxDyn>>,
pub commit_before: Vec<Vec<i64>>,
pub commit_after: Vec<Vec<i64>>,
pub k: usize,
pub bits_per_elem: usize,
pub inp_idxes: Vec<i64>,
pub num_random: i64,
}
pub struct ModelConfig<F: PrimeField + Ord + FromUniformBytes<64>> {
pub gadget_config: Rc<GadgetConfig>,
pub public_col: Column<Instance>,
pub hasher: Option<PoseidonCommitChip<F, WIDTH, RATE, L>>,
pub _marker: PhantomData<F>,
}
impl<F: PrimeField + Ord + FromUniformBytes<64>> ModelCircuit<F> {
pub fn assign_tensors_map(
&self,
mut layouter: impl Layouter<F>,
columns: &Vec<Column<Advice>>,
tensors: &BTreeMap<i64, Array<F, IxDyn>>,
) -> Result<BTreeMap<i64, AssignedTensor<F>>, Error> {
let tensors = layouter.assign_region(
|| "asssignment",
|mut region| {
let mut cell_idx = 0;
let mut assigned_tensors = BTreeMap::new();
for (tensor_idx, tensor) in tensors.iter() {
let mut flat = vec![];
for val in tensor.iter() {
let row_idx = cell_idx / columns.len();
let col_idx = cell_idx % columns.len();
let cell = region
.assign_advice(
|| "assignment",
columns[col_idx],
row_idx,
|| Value::known(*val),
)
.unwrap();
flat.push(Rc::new(cell));
cell_idx += 1; |
}
let tensor = Array::from_shape_vec(tensor.shape(), flat).unwrap();
assigned_tensors.insert(*tensor_idx, tensor);
}
Ok(assigned_tensors)
},
)?;
Ok(tensors)
}
pub fn tensor_map_to_vec(
&self,
tensor_map: &BTreeMap<i64, Array<CellRc<F>, IxDyn>>,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let smallest_tensor = tensor_map
.iter()
.min_by_key(|(_, tensor)| tensor.len())
.unwrap()
.1;
let max_tensor_key = tensor_map
.iter()
.max_by_key(|(key, _)| *key)
.unwrap()
.0
.clone();
let mut tensors = vec![];
for i in 0..max_tensor_key + 1 {
let tensor = tensor_map.get(&i).unwrap_or(smallest_tensor);
tensors.push(tensor.clone());
}
Ok(tensors)
}
pub fn assign_tensors_vec(
&self,
mut layouter: impl Layouter<F>,
columns: &Vec<Column<Advice>>,
tensors: &BTreeMap<i64, Array<F, IxDyn>>,
) -> Result<Vec<AssignedTensor<F>>, Error> {
let tensor_map = self
.assign_tensors_map(
layouter.namespace(|| "assign_tensors_map"),
columns,
tensors,
)
.unwrap();
self.tensor_map_to_vec(&tensor_map)
}
pub fn assign_constants(
&self,
mut layouter: impl Layouter<F>,
gadget_config: Rc<GadgetConfig>,
) -> Result<HashMap<i64, CellRc<F>>, Error> {
let sf = gadget_config.scale_factor;
let min_val = gadget_config.min_val;
let max_val = gadget_config.max_val;
let constants = layouter.assign_region(
|| "constants",
|mut region| {
let mut constants: HashMap<i64, CellRc<F>> = HashMap::new();
let vals = vec![0 as i64, 1, sf as i64, min_val, max_val];
let shift_val_i64 = -min_val * 2;
let shift_val_f = F::from(shift_val_i64 as u64);
for (i, val) in vals.iter().enumerate() {
let cell = region.assign_fixed(
|| format!("constant_{}", i),
gadget_config.fixed_columns[0],
i,
|| Value::kno |
wn(F::from((val + shift_val_i64) as u64) - shift_val_f),
)?;
constants.insert(*val, Rc::new(cell));
}
let r_base = F::from(0x123456789abcdef);
let mut r = r_base.clone();
for i in 0..self.num_random {
let rand = region.assign_fixed(
|| format!("rand_{}", i),
gadget_config.fixed_columns[0],
constants.len(),
|| Value::known(r),
)?;
r = r * r_base;
constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand));
}
Ok(constants)
},
)?;
Ok(constants)
}
pub fn assign_constants2(
&self,
mut layouter: impl Layouter<F>,
gadget_config: Rc<GadgetConfig>,
fixed_constants: &HashMap<i64, CellRc<F>>,
) -> Result<HashMap<i64, CellRc<F>>, Error> {
let sf = gadget_config.scale_factor;
let min_val = gadget_config.min_val;
let max_val = gadget_config.max_val;
let constants = layouter.assign_region(
|| "constants",
|mut region| {
let mut constants: HashMap<i64, CellRc<F>> = HashMap::new();
let vals = vec![0 as i64, 1, sf as i64, min_val, max_val];
let shift_val_i64 = -min_val * 2;
let shift_val_f = F::from(shift_val_i64 as u64);
for (i, val) in vals.iter().enumerate() {
let assignment_idx = i as usize;
let row_idx = assignment_idx / gadget_config.columns.len();
let col_idx = assignment_idx % gadget_config.columns.len();
let cell = region.assign_advice(
|| format!("constant_{}", i),
gadget_config.columns[col_idx],
row_idx,
|| Value::known(F::from((val + shift_val_i64) as u64) - shift_val_f),
)?;
constants.insert(*val, Rc::new(cell));
}
let r_base = F::from(0x123456789abcdef);
let mut r = r_base.clone();
for i in 0..self.num_random {
let assignment_idx = constants.len();
let |
row_idx = assignment_idx / gadget_config.columns.len();
let col_idx = assignment_idx % gadget_config.columns.len();
let rand = region.assign_advice(
|| format!("rand_{}", i),
gadget_config.columns[col_idx],
row_idx,
|| Value::known(r),
)?;
r = r * r_base;
constants.insert(RAND_START_IDX + (i as i64), Rc::new(rand));
}
for (k, v) in fixed_constants.iter() {
let v2 = constants.get(k).unwrap();
region.constrain_equal(v.cell(), v2.cell()).unwrap();
}
Ok(constants)
},
)?;
Ok(constants)
}
pub fn generate_from_file(config_file: &str, inp_file: &str) -> ModelCircuit<F> {
let config = load_model_msgpack(config_file, inp_file);
Self::generate_from_msgpack(config, true)
}
pub fn generate_from_msgpack(config: ModelMsgpack, panic_empty_tensor: bool) -> ModelCircuit<F> {
let to_field = |x: i64| {
let bias = 1 << 31;
let x_pos = x + bias;
F::from(x_pos as u64) - F::from(bias as u64)
};
let match_layer = |x: &str| match x {
"AveragePool2D" => LayerType::AvgPool2D,
"Add" => LayerType::Add,
"BatchMatMul" => LayerType::BatchMatMul,
"Broadcast" => LayerType::Broadcast,
"Concatenation" => LayerType::Concatenation,
"Conv2D" => LayerType::Conv2D,
"Div" => LayerType::DivFixed,
"DivVar" => LayerType::DivVar,
"FullyConnected" => LayerType::FullyConnected,
"Logistic" => LayerType::Logistic,
"MaskNegInf" => LayerType::MaskNegInf,
"MaxPool2D" => LayerType::MaxPool2D,
"Mean" => LayerType::Mean,
"Mul" => LayerType::Mul,
"Noop" => LayerType::Noop,
"Pack" => LayerType::Pack,
"Pad" => LayerType::Pad,
"Pow" => LayerType::Pow,
"Permute" => LayerType::Permute,
"Reshape" => LayerType::Reshape,
"ResizeNearestNeighbor" => LayerType::ResizeNN,
"Rotate" => LayerType::Rotate,
"Rsqrt" => LayerType::Rsqrt, |
"Slice" => LayerType::Slice,
"Softmax" => LayerType::Softmax,
"Split" => LayerType::Split,
"Sqrt" => LayerType::Sqrt,
"Square" => LayerType::Square,
"SquaredDifference" => LayerType::SquaredDifference,
"Sub" => LayerType::Sub,
"Tanh" => LayerType::Tanh,
"Transpose" => LayerType::Transpose,
"Update" => LayerType::Update,
_ => panic!("unknown op: {}", x),
};
let mut tensors = BTreeMap::new();
for flat in config.tensors {
let value_flat = flat.data.iter().map(|x| to_field(*x)).collect::<Vec<_>>();
let shape = flat.shape.iter().map(|x| *x as usize).collect::<Vec<_>>();
let num_el: usize = shape.iter().product();
if panic_empty_tensor && num_el != value_flat.len() {
panic!("tensor shape and data length mismatch");
}
if num_el == value_flat.len() {
let tensor = Array::from_shape_vec(IxDyn(&shape), value_flat).unwrap();
tensors.insert(flat.idx, tensor);
} else {
};
}
let i64_to_usize = |x: &Vec<i64>| x.iter().map(|x| *x as usize).collect::<Vec<_>>();
let mut used_gadgets = BTreeSet::new();
let dag_config = {
let ops = config
.layers
.iter()
.map(|layer| {
let layer_type = match_layer(&layer.layer_type);
let layer_gadgets = match layer_type {
LayerType::Add => Box::new(AddChip {}) as Box<dyn GadgetConsumer>,
LayerType::AvgPool2D => Box::new(AvgPool2DChip {}) as Box<dyn GadgetConsumer>,
LayerType::BatchMatMul => Box::new(BatchMatMulChip {}) as Box<dyn GadgetConsumer>,
LayerType::Broadcast => Box::new(BroadcastChip {}) as Box<dyn GadgetConsumer>,
LayerType::Concatenation => Box::new(ConcatenationChip {}) as Box<dyn GadgetConsumer>,
LayerType::DivFixed => Box::new(ConcatenationChip {}) as Box<dyn GadgetConsumer>,
LayerType::DivVar => Box::new(DivVarChip {}) as Box<dyn GadgetConsumer>,
LayerType::Conv2D => Box |
::new(Conv2DChip {
config: LayerConfig::default(),
_marker: PhantomData::<F>,
}) as Box<dyn GadgetConsumer>,
LayerType::FullyConnected => Box::new(FullyConnectedChip {
config: FullyConnectedConfig { normalize: true },
_marker: PhantomData::<F>,
}) as Box<dyn GadgetConsumer>,
LayerType::Logistic => Box::new(LogisticChip {}) as Box<dyn GadgetConsumer>,
LayerType::MaskNegInf => Box::new(MaskNegInfChip {}) as Box<dyn GadgetConsumer>,
LayerType::MaxPool2D => Box::new(MaxPool2DChip {
marker: PhantomData::<F>,
}) as Box<dyn GadgetConsumer>,
LayerType::Mean => Box::new(MeanChip {}) as Box<dyn GadgetConsumer>,
LayerType::Mul => Box::new(MulChip {}) as Box<dyn GadgetConsumer>,
LayerType::Noop => Box::new(NoopChip {}) as Box<dyn GadgetConsumer>,
LayerType::Pack => Box::new(PackChip {}) as Box<dyn GadgetConsumer>,
LayerType::Pad => Box::new(PadChip {}) as Box<dyn GadgetConsumer>,
LayerType::Pow => Box::new(PowChip {}) as Box<dyn GadgetConsumer>,
LayerType::Permute => Box::new(PermuteChip {}) as Box<dyn GadgetConsumer>,
LayerType::Reshape => Box::new(ReshapeChip {}) as Box<dyn GadgetConsumer>,
LayerType::ResizeNN => Box::new(ResizeNNChip {}) as Box<dyn GadgetConsumer>,
LayerType::Rotate => Box::new(RotateChip {}) as Box<dyn GadgetConsumer>,
LayerType::Rsqrt => Box::new(RsqrtChip {}) as Box<dyn GadgetConsumer>,
LayerType::Slice => Box::new(SliceChip {}) as Box<dyn GadgetConsumer>,
LayerType::Softmax => Box::new(SoftmaxChip {}) as Box<dyn GadgetConsumer>,
LayerType::Split => Box::new(SplitChip {}) as Box<dyn GadgetConsumer>,
LayerType::Sqrt => Box::new(SqrtChip {}) as Box<dyn GadgetConsumer>,
LayerType::Square => Box::new(SquareChip {}) as Box<dyn GadgetConsumer>,
LayerType::S |
quaredDifference => Box::new(SquaredDiffChip {}) as Box<dyn GadgetConsumer>,
LayerType::Sub => Box::new(SubChip {}) as Box<dyn GadgetConsumer>,
LayerType::Tanh => Box::new(TanhChip {}) as Box<dyn GadgetConsumer>,
LayerType::Transpose => Box::new(TransposeChip {}) as Box<dyn GadgetConsumer>,
LayerType::Update => Box::new(UpdateChip {}) as Box<dyn GadgetConsumer>,
}
.used_gadgets(layer.params.clone());
for gadget in layer_gadgets {
used_gadgets.insert(gadget);
}
LayerConfig {
layer_type,
layer_params: layer.params.clone(),
inp_shapes: layer.inp_shapes.iter().map(|x| i64_to_usize(x)).collect(),
out_shapes: layer.out_shapes.iter().map(|x| i64_to_usize(x)).collect(),
mask: layer.mask.clone(),
}
})
.collect::<Vec<_>>();
let inp_idxes = config
.layers
.iter()
.map(|layer| i64_to_usize(&layer.inp_idxes))
.collect::<Vec<_>>();
let out_idxes = config
.layers
.iter()
.map(|layer| i64_to_usize(&layer.out_idxes))
.collect::<Vec<_>>();
let final_out_idxes = config
.out_idxes
.iter()
.map(|x| *x as usize)
.collect::<Vec<_>>();
DAGLayerConfig {
inp_idxes,
out_idxes,
ops,
final_out_idxes,
}
};
used_gadgets.insert(GadgetType::InputLookup);
let used_gadgets = Arc::new(used_gadgets);
let gadget = &GADGET_CONFIG;
let cloned_gadget = gadget.lock().unwrap().clone();
*gadget.lock().unwrap() = GadgetConfig {
scale_factor: config.global_sf as u64,
shift_min_val: -(config.global_sf * config.global_sf * (1 << 17)),
div_outp_min_val: -(1 << (config.k - 1)),
min_val: -(1 << (config.k - 1)),
max_val: (1 << (config.k - 1)) - 10,
k: config.k as usize,
num_rows: (1 << config.k) - 10 + 1,
num_cols: config.num_cols as usize |
,
used_gadgets: used_gadgets.clone(),
commit_before: config.commit_before.clone().unwrap_or(vec![]),
commit_after: config.commit_after.clone().unwrap_or(vec![]),
use_selectors: config.use_selectors.unwrap_or(true),
num_bits_per_elem: config.bits_per_elem.unwrap_or(config.k),
..cloned_gadget
};
ModelCircuit {
tensors,
dag_config,
used_gadgets,
k: config.k as usize,
bits_per_elem: config.bits_per_elem.unwrap_or(config.k) as usize,
inp_idxes: config.inp_idxes.clone(),
commit_after: config.commit_after.unwrap_or(vec![]),
commit_before: config.commit_before.unwrap_or(vec![]),
num_random: config.num_random.unwrap_or(0),
}
}
pub fn assign_and_commit(
&self,
mut layouter: impl Layouter<F>,
constants: &HashMap<i64, CellRc<F>>,
config: &ModelConfig<F>,
tensors: &BTreeMap<i64, Array<F, IxDyn>>,
) -> (BTreeMap<i64, AssignedTensor<F>>, CellRc<F>) {
let num_bits = self.bits_per_elem;
let packer_config = PackerChip::<F>::construct(num_bits, config.gadget_config.as_ref());
let packer_chip = PackerChip::<F> {
config: packer_config,
};
let (tensor_map, packed) = packer_chip
.assign_and_pack(
layouter.namespace(|| "packer"),
config.gadget_config.clone(),
constants,
tensors,
)
.unwrap();
let zero = constants.get(&0).unwrap().clone();
let commit_chip = config.hasher.clone().unwrap();
let commitments = commit_chip
.commit(
layouter.namespace(|| "commit"),
config.gadget_config.clone(),
constants,
&packed,
zero.clone(),
)
.unwrap();
assert_eq!(commitments.len(), 1);
(tensor_map, commitments[0].clone())
}
pub fn copy_and_commit(
&self,
mut layouter: impl Layouter<F>,
constants: &HashMap<i64, CellRc<F>>,
config: &ModelConfig<F>,
tensors: &BTreeMap<i64, AssignedTensor<F>>,
) -> CellRc<F> {
let num_bits = self.bits_per_elem; |
let packer_config = PackerChip::<F>::construct(num_bits, config.gadget_config.as_ref());
let packer_chip = PackerChip::<F> {
config: packer_config,
};
let packed = packer_chip
.copy_and_pack(
layouter.namespace(|| "packer"),
config.gadget_config.clone(),
constants,
tensors,
)
.unwrap();
let zero = constants.get(&0).unwrap().clone();
let commit_chip = config.hasher.clone().unwrap();
let commitments = commit_chip
.commit(
layouter.namespace(|| "commit"),
config.gadget_config.clone(),
constants,
&packed,
zero.clone(),
)
.unwrap();
assert_eq!(commitments.len(), 1);
commitments[0].clone()
}
}
impl<F: PrimeField + Ord + FromUniformBytes<64>> Circuit<F> for ModelCircuit<F> {
type Config = ModelConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
type Params = ();
fn without_witnesses(&self) -> Self {
todo!()
}
fn configure(meta: &mut ConstraintSystem<F>) -> Self::Config {
let mut gadget_config = crate::model::GADGET_CONFIG.lock().unwrap().clone();
let columns = (0..gadget_config.num_cols)
.map(|_| meta.advice_column())
.collect::<Vec<_>>();
for col in columns.iter() {
meta.enable_equality(*col);
}
gadget_config.columns = columns;
let public_col = meta.instance_column();
meta.enable_equality(public_col);
gadget_config.fixed_columns = vec![meta.fixed_column()];
meta.enable_equality(gadget_config.fixed_columns[0]);
gadget_config = InputLookupChip::<F>::configure(meta, gadget_config);
let used_gadgets = gadget_config.used_gadgets.clone();
for gadget_type in used_gadgets.iter() {
gadget_config = match gadget_type {
GadgetType::AddPairs => AddPairsChip::<F>::configure(meta, gadget_config),
GadgetType::Adder => AdderChip::<F>::configure(meta, gadget_config),
GadgetType::BiasDivRoundRelu6 => BiasDivRoundRelu6Chip::<F>::configure(meta, gadget_config), |
GadgetType::BiasDivFloorRelu6 => panic!(),
GadgetType::DotProduct => DotProductChip::<F>::configure(meta, gadget_config),
GadgetType::Exp => ExpGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::Logistic => LogisticGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::Max => MaxChip::<F>::configure(meta, gadget_config),
GadgetType::MulPairs => MulPairsChip::<F>::configure(meta, gadget_config),
GadgetType::Pow => PowGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::Relu => ReluChip::<F>::configure(meta, gadget_config),
GadgetType::Rsqrt => RsqrtGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::Sqrt => SqrtGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::SqrtBig => SqrtBigChip::<F>::configure(meta, gadget_config),
GadgetType::Square => SquareGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::SquaredDiff => SquaredDiffGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::SubPairs => SubPairsChip::<F>::configure(meta, gadget_config),
GadgetType::Tanh => TanhGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::VarDivRound => VarDivRoundChip::<F>::configure(meta, gadget_config),
GadgetType::VarDivRoundBig => VarDivRoundBigChip::<F>::configure(meta, gadget_config),
GadgetType::VarDivRoundBig3 => VarDivRoundBig3Chip::<F>::configure(meta, gadget_config),
GadgetType::InputLookup => gadget_config,
GadgetType::Update => UpdateGadgetChip::<F>::configure(meta, gadget_config),
GadgetType::Packer => panic!(),
};
}
let hasher = if gadget_config.commit_before.len() + gadget_config.commit_after.len() > 0 {
let packer_config =
PackerChip::<F>::construct(gadget_config.num_bits_per_elem as usize, &gadget_config);
gadget_config = PackerChip::<F>::configure(meta, packer_config, gadget_config);
let input = gadget_config.columns[0..L].try_into().unwrap(); |
let state = gadget_config.columns[L..L + WIDTH].try_into().unwrap();
let partial_sbox = gadget_config.columns[L + WIDTH].into();
Some(PoseidonCommitChip::<F, WIDTH, RATE, L>::configure(
meta,
input,
state,
partial_sbox,
))
} else {
None
};
ModelConfig {
gadget_config: gadget_config.into(),
public_col,
hasher,
_marker: PhantomData,
}
}
fn synthesize(&self, config: Self::Config, mut layouter: impl Layouter<F>) -> Result<(), Error> {
let gadget_rc: Rc<GadgetConfig> = config.gadget_config.clone().into();
for gadget in self.used_gadgets.iter() {
match gadget {
GadgetType::AddPairs => {
let chip = AddPairsChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "add pairs lookup"))?;
}
GadgetType::Adder => {
let chip = AdderChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "adder lookup"))?;
}
GadgetType::BiasDivRoundRelu6 => {
let chip = BiasDivRoundRelu6Chip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "bias div round relu6 lookup"))?;
}
GadgetType::DotProduct => {
let chip = DotProductChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "dot product lookup"))?;
}
GadgetType::VarDivRound => {
let chip = VarDivRoundChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "var div lookup"))?;
}
GadgetType::Pow => {
let chip = PowGadgetChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "pow lookup"))?;
}
GadgetType::Relu => {
let chip = ReluChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "relu lookup"))?;
}
GadgetType::Rsqrt => {
let c |
hip = RsqrtGadgetChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "rsqrt lookup"))?;
}
GadgetType::Sqrt => {
let chip = SqrtGadgetChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "sqrt lookup"))?;
}
GadgetType::Tanh => {
let chip = TanhGadgetChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "tanh lookup"))?;
}
GadgetType::Exp => {
let chip = ExpGadgetChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "exp lookup"))?;
}
GadgetType::Logistic => {
let chip = LogisticGadgetChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "logistic lookup"))?;
}
GadgetType::InputLookup => {
let chip = InputLookupChip::<F>::construct(gadget_rc.clone());
chip.load_lookups(layouter.namespace(|| "input lookup"))?;
}
GadgetType::VarDivRoundBig => {}
GadgetType::VarDivRoundBig3 => {}
GadgetType::Max => {}
GadgetType::MulPairs => {}
GadgetType::SqrtBig => {}
GadgetType::Square => {}
GadgetType::SquaredDiff => {}
GadgetType::SubPairs => {}
GadgetType::Update => {}
_ => panic!("unsupported gadget {:?}", gadget),
}
}
let constants_base = self
.assign_constants(
layouter.namespace(|| "constants"),
config.gadget_config.clone(),
)
.unwrap();
let constants = self
.assign_constants2(
layouter.namespace(|| "constants 2"),
config.gadget_config.clone(),
&constants_base,
)
.unwrap();
let mut commitments = vec![];
let tensors = if self.commit_before.len() > 0 {
let mut tensor_map = BTreeMap::new();
let mut ignore_idxes: Vec<i64> = vec![];
for commit_idxes in self.commit_before.iter() { |
let to_commit = BTreeMap::from_iter(
commit_idxes
.iter()
.map(|idx| (*idx, self.tensors.get(idx).unwrap().clone())),
);
let (mut committed_tensors, commitment) = self.assign_and_commit(
layouter.namespace(|| "commit"),
&constants,
&config,
&to_commit,
);
commitments.push(commitment);
tensor_map.append(&mut committed_tensors);
ignore_idxes.extend(commit_idxes.iter());
}
let mut assign_map = BTreeMap::new();
for (idx, tensor) in self.tensors.iter() {
if ignore_idxes.contains(idx) {
continue;
}
assign_map.insert(*idx, tensor.clone());
}
let mut remainder_tensor_map = self
.assign_tensors_map(
layouter.namespace(|| "assignment"),
&config.gadget_config.columns,
&assign_map,
)
.unwrap();
tensor_map.append(&mut remainder_tensor_map);
self.tensor_map_to_vec(&tensor_map).unwrap()
} else {
self
.assign_tensors_vec(
layouter.namespace(|| "assignment"),
&config.gadget_config.columns,
&self.tensors,
)
.unwrap()
};
let dag_chip = DAGLayerChip::<F>::construct(self.dag_config.clone());
let (final_tensor_map, result) = dag_chip.forward(
layouter.namespace(|| "dag"),
&tensors,
&constants,
config.gadget_config.clone(),
&LayerConfig::default(),
)?;
if self.commit_after.len() > 0 {
for commit_idxes in self.commit_after.iter() {
let to_commit = BTreeMap::from_iter(commit_idxes.iter().map(|idx| {
(
*idx,
final_tensor_map.get(&(*idx as usize)).unwrap().clone(),
)
}));
let commitment = self.copy_and_commit(
layouter.namespace(|| "commit"),
&constants,
&config,
&to_commit,
);
commitments.push(commitment);
} |
}
let mut pub_layouter = layouter.namespace(|| "public");
let mut total_idx = 0;
let mut new_public_vals = vec![];
for cell in commitments.iter() {
pub_layouter
.constrain_instance(cell.as_ref().cell(), config.public_col, total_idx)
.unwrap();
let val = convert_to_bigint(cell.value().map(|x| x.to_owned()));
new_public_vals.push(val);
total_idx += 1;
}
for tensor in result {
for cell in tensor.iter() {
pub_layouter
.constrain_instance(cell.as_ref().cell(), config.public_col, total_idx)
.unwrap();
let val = convert_to_bigint(cell.value().map(|x| x.to_owned()));
new_public_vals.push(val);
total_idx += 1;
}
}
*PUBLIC_VALS.lock().unwrap() = new_public_vals;
Ok(())
}
} |
pub mod helpers;
pub mod loader;
pub mod proving_ipa;
pub mod proving_kzg;
|
use halo2_proofs::{
circuit::{AssignedCell, Value},
halo2curves::ff::PrimeField,
};
use ndarray::{Array, IxDyn};
use num_bigint::BigUint;
use crate::{gadgets::gadget::convert_to_u128, model::PUBLIC_VALS};
pub const RAND_START_IDX: i64 = i64::MIN;
pub const NUM_RANDOMS: i64 = 20001;
pub fn convert_to_bigint<F: PrimeField>(x: Value<F>) -> BigUint {
let mut big = Default::default();
x.map(|x| {
big = BigUint::from_bytes_le(x.to_repr().as_ref());
});
big
}
pub fn convert_pos_int<F: PrimeField>(x: Value<F>) -> i128 {
let bias = 1 << 60;
let x_pos = x + Value::known(F::from(bias as u64));
let mut outp: i128 = 0;
x_pos.map(|x| {
let x_pos = convert_to_u128(&x);
let tmp = x_pos as i128 - bias;
outp = tmp;
});
return outp;
}
pub fn print_pos_int<F: PrimeField>(prefix: &str, x: Value<F>, scale_factor: u64) {
let tmp = convert_pos_int(x);
let tmp_float = tmp as f64 / scale_factor as f64;
println!("{} x: {} ({})", prefix, tmp, tmp_float);
}
pub fn print_assigned_arr<F: PrimeField>(
prefix: &str,
arr: &Vec<&AssignedCell<F, F>>,
scale_factor: u64,
) {
for (idx, x) in arr.iter().enumerate() {
print_pos_int(
&format!("{}[{}]", prefix, idx),
x.value().map(|x: &F| x.to_owned()),
scale_factor,
);
}
}
pub fn get_public_values<F: PrimeField>() -> Vec<F> {
let mut public_vals = vec![];
for val in PUBLIC_VALS.lock().unwrap().iter() {
let val = F::from_str_vartime(&val.to_str_radix(10));
public_vals.push(val.unwrap());
}
public_vals
}
fn shape_dominates(s1: &[usize], s2: &[usize]) -> bool {
if s1.len() != s2.len() {
return false;
}
for (x1, x2) in s1.iter().zip(s2.iter()) {
if x1 < x2 {
return false;
}
}
true
}
fn intermediate_shape(s1: &[usize], s2: &[usize]) -> Vec<usize> {
let mut res = vec![1; s2.len() - s1.len()];
for s in s1.iter() {
res.push(*s);
}
res
}
fn final_shape(s1: &[usize], s2: &[usize]) -> Vec<usize> {
let mut res = vec![];
for (x1, x2) in s1.iter().zip(s2.iter()) |
{
res.push(std::cmp::max(*x1, *x2));
}
res
}
pub fn broadcast<G: Clone>(
x1: &Array<G, IxDyn>,
x2: &Array<G, IxDyn>,
) -> (Array<G, IxDyn>, Array<G, IxDyn>) {
if x1.shape() == x2.shape() {
return (x1.clone(), x2.clone());
}
if x1.ndim() == x2.ndim() {
let s1 = x1.shape();
let s2 = x2.shape();
if shape_dominates(s1, s2) {
return (x1.clone(), x2.broadcast(s1).unwrap().into_owned());
} else if shape_dominates(x2.shape(), x1.shape()) {
return (x1.broadcast(s2).unwrap().into_owned(), x2.clone());
}
}
let (tmp1, tmp2) = if x1.ndim() < x2.ndim() {
(x1, x2)
} else {
(x2, x1)
};
let s1 = tmp1.shape();
let s2 = tmp2.shape();
let s = intermediate_shape(s1, s2);
let final_shape = final_shape(s2, s.as_slice());
let tmp1 = tmp1.broadcast(s.clone()).unwrap().into_owned();
let tmp1 = tmp1.broadcast(final_shape.as_slice()).unwrap().into_owned();
let tmp2 = tmp2.broadcast(final_shape.as_slice()).unwrap().into_owned();
if x1.ndim() < x2.ndim() {
return (tmp1, tmp2);
} else {
return (tmp2, tmp1);
}
} |