diff --git a/bellman/.gitignore b/bellman/.gitignore new file mode 100644 index 0000000..9c858b5 --- /dev/null +++ b/bellman/.gitignore @@ -0,0 +1,3 @@ +target +Cargo.lock +pkg \ No newline at end of file diff --git a/bellman/COPYRIGHT b/bellman/COPYRIGHT new file mode 100644 index 0000000..8b5f8cf --- /dev/null +++ b/bellman/COPYRIGHT @@ -0,0 +1,14 @@ +Copyrights in the "bellman" library are retained by their contributors. No +copyright assignment is required to contribute to the "bellman" library. + +The "bellman" library is licensed under either of + + * Apache License, Version 2.0, (see ./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license (see ./LICENSE-MIT or http://opensource.org/licenses/MIT) + +at your option. + +Unless you explicitly state otherwise, any contribution intentionally +submitted for inclusion in the work by you, as defined in the Apache-2.0 +license, shall be dual licensed as above, without any additional terms or +conditions. diff --git a/bellman/Cargo.toml b/bellman/Cargo.toml new file mode 100644 index 0000000..da3504c --- /dev/null +++ b/bellman/Cargo.toml @@ -0,0 +1,44 @@ +[package] +authors = ["Sean Bowe ", "Alex Vlasov ", "Alex Gluchowski { + /// Synthesize the circuit into a rank-1 quadratic constraint system + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError>; +} + +/// Represents a variable in our constraint system. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct Variable(pub(crate) Index); + +impl Variable { + /// This constructs a variable with an arbitrary index. + /// Circuit implementations are not recommended to use this. + pub fn new_unchecked(idx: Index) -> Variable { + Variable(idx) + } + + /// This returns the index underlying the variable. + /// Circuit implementations are not recommended to use this. + pub fn get_unchecked(&self) -> Index { + self.0 + } +} + +/// Represents the index of either an input variable or +/// auxillary variable. +#[derive(Copy, Clone, PartialEq, Debug, Hash, Eq)] +pub enum Index { + Input(usize), + Aux(usize) +} + +/// This represents a linear combination of some variables, with coefficients +/// in the scalar field of a pairing-friendly elliptic curve group. +#[derive(Clone)] +pub struct LinearCombination(pub(crate) Vec<(Variable, E::Fr)>); + +impl AsRef<[(Variable, E::Fr)]> for LinearCombination { + fn as_ref(&self) -> &[(Variable, E::Fr)] { + &self.0 + } +} + +impl LinearCombination { + pub fn zero() -> LinearCombination { + LinearCombination(vec![]) + } +} + +impl Add<(E::Fr, Variable)> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, (coeff, var): (E::Fr, Variable)) -> LinearCombination { + self.0.push((var, coeff)); + + self + } +} + +impl Sub<(E::Fr, Variable)> for LinearCombination { + type Output = LinearCombination; + + fn sub(self, (mut coeff, var): (E::Fr, Variable)) -> LinearCombination { + coeff.negate(); + + self + (coeff, var) + } +} + +impl Add for LinearCombination { + type Output = LinearCombination; + + fn add(self, other: Variable) -> LinearCombination { + self + (E::Fr::one(), other) + } +} + +impl Sub for LinearCombination { + type Output = LinearCombination; + + fn sub(self, other: Variable) -> LinearCombination { + self - (E::Fr::one(), other) + } +} + +impl<'a, E: Engine> Add<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, other: &'a LinearCombination) -> LinearCombination { + for s in &other.0 { + self = self + (s.1, s.0); + } + + self + } +} + +impl<'a, E: Engine> Sub<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn sub(mut self, other: &'a LinearCombination) -> LinearCombination { + for s in &other.0 { + self = self - (s.1, s.0); + } + + self + } +} + +impl<'a, E: Engine> Add<(E::Fr, &'a LinearCombination)> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, (coeff, other): (E::Fr, &'a LinearCombination)) -> LinearCombination { + for s in &other.0 { + let mut tmp = s.1; + tmp.mul_assign(&coeff); + self = self + (tmp, s.0); + } + + self + } +} + +impl<'a, E: Engine> Sub<(E::Fr, &'a LinearCombination)> for LinearCombination { + type Output = LinearCombination; + + fn sub(mut self, (coeff, other): (E::Fr, &'a LinearCombination)) -> LinearCombination { + for s in &other.0 { + let mut tmp = s.1; + tmp.mul_assign(&coeff); + self = self - (tmp, s.0); + } + + self + } +} + +/// This is an error that could occur during circuit synthesis contexts, +/// such as CRS generation, proving or verification. +#[derive(Debug)] +pub enum SynthesisError { + /// During synthesis, we lacked knowledge of a variable assignment. + AssignmentMissing, + /// During synthesis, we divided by zero. + DivisionByZero, + /// During synthesis, we constructed an unsatisfiable constraint system. + Unsatisfiable, + /// During synthesis, our polynomials ended up being too high of degree + PolynomialDegreeTooLarge, + /// During proof generation, we encountered an identity in the CRS + UnexpectedIdentity, + /// During proof generation, we encountered an I/O error with the CRS + IoError(io::Error), + /// During verification, our verifying key was malformed. + MalformedVerifyingKey, + /// During CRS generation, we observed an unconstrained auxillary variable + UnconstrainedVariable +} + +impl From for SynthesisError { + fn from(e: io::Error) -> SynthesisError { + SynthesisError::IoError(e) + } +} + +impl Error for SynthesisError { + fn description(&self) -> &str { + match *self { + SynthesisError::AssignmentMissing => "an assignment for a variable could not be computed", + SynthesisError::DivisionByZero => "division by zero", + SynthesisError::Unsatisfiable => "unsatisfiable constraint system", + SynthesisError::PolynomialDegreeTooLarge => "polynomial degree is too large", + SynthesisError::UnexpectedIdentity => "encountered an identity element in the CRS", + SynthesisError::IoError(_) => "encountered an I/O error", + SynthesisError::MalformedVerifyingKey => "malformed verifying key", + SynthesisError::UnconstrainedVariable => "auxillary variable was unconstrained" + } + } +} + +impl fmt::Display for SynthesisError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + if let &SynthesisError::IoError(ref e) = self { + write!(f, "I/O error: ")?; + e.fmt(f) + } else { + write!(f, "{}", self.description()) + } + } +} + +/// Represents a constraint system which can have new variables +/// allocated and constrains between them formed. +pub trait ConstraintSystem: Sized { + /// Represents the type of the "root" of this constraint system + /// so that nested namespaces can minimize indirection. + type Root: ConstraintSystem; + + /// Return the "one" input variable + fn one() -> Variable { + Variable::new_unchecked(Index::Input(0)) + } + + /// Allocate a private variable in the constraint system. The provided function is used to + /// determine the assignment of the variable. The given `annotation` function is invoked + /// in testing contexts in order to derive a unique name for this variable in the current + /// namespace. + fn alloc( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into; + + /// Allocate a public variable in the constraint system. The provided function is used to + /// determine the assignment of the variable. + fn alloc_input( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into; + + /// Enforce that `A` * `B` = `C`. The `annotation` function is invoked in testing contexts + /// in order to derive a unique name for the constraint in the current namespace. + fn enforce( + &mut self, + annotation: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination; + + /// Create a new (sub)namespace and enter into it. Not intended + /// for downstream use; use `namespace` instead. + fn push_namespace(&mut self, name_fn: N) + where NR: Into, N: FnOnce() -> NR; + + /// Exit out of the existing namespace. Not intended for + /// downstream use; use `namespace` instead. + fn pop_namespace(&mut self); + + /// Gets the "root" constraint system, bypassing the namespacing. + /// Not intended for downstream use; use `namespace` instead. + fn get_root(&mut self) -> &mut Self::Root; + + /// Begin a namespace for this constraint system. + fn namespace<'a, NR, N>( + &'a mut self, + name_fn: N + ) -> Namespace<'a, E, Self::Root> + where NR: Into, N: FnOnce() -> NR + { + self.get_root().push_namespace(name_fn); + + Namespace(self.get_root(), PhantomData) + } +} + +/// This is a "namespaced" constraint system which borrows a constraint system (pushing +/// a namespace context) and, when dropped, pops out of the namespace context. +pub struct Namespace<'a, E: Engine, CS: ConstraintSystem + 'a>(&'a mut CS, PhantomData); + +impl<'cs, E: Engine, CS: ConstraintSystem> ConstraintSystem for Namespace<'cs, E, CS> { + type Root = CS::Root; + + fn one() -> Variable { + CS::one() + } + + fn alloc( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.0.alloc(annotation, f) + } + + fn alloc_input( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.0.alloc_input(annotation, f) + } + + fn enforce( + &mut self, + annotation: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + self.0.enforce(annotation, a, b, c) + } + + // Downstream users who use `namespace` will never interact with these + // functions and they will never be invoked because the namespace is + // never a root constraint system. + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + panic!("only the root's push_namespace should be called"); + } + + fn pop_namespace(&mut self) + { + panic!("only the root's pop_namespace should be called"); + } + + fn get_root(&mut self) -> &mut Self::Root + { + self.0.get_root() + } +} + +impl<'a, E: Engine, CS: ConstraintSystem> Drop for Namespace<'a, E, CS> { + fn drop(&mut self) { + self.get_root().pop_namespace() + } +} + +/// Convenience implementation of ConstraintSystem for mutable references to +/// constraint systems. +impl<'cs, E: Engine, CS: ConstraintSystem> ConstraintSystem for &'cs mut CS { + type Root = CS::Root; + + fn one() -> Variable { + CS::one() + } + + fn alloc( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + (**self).alloc(annotation, f) + } + + fn alloc_input( + &mut self, + annotation: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + (**self).alloc_input(annotation, f) + } + + fn enforce( + &mut self, + annotation: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + (**self).enforce(annotation, a, b, c) + } + + fn push_namespace(&mut self, name_fn: N) + where NR: Into, N: FnOnce() -> NR + { + (**self).push_namespace(name_fn) + } + + fn pop_namespace(&mut self) + { + (**self).pop_namespace() + } + + fn get_root(&mut self) -> &mut Self::Root + { + (**self).get_root() + } +} \ No newline at end of file diff --git a/bellman/src/domain.rs b/bellman/src/domain.rs new file mode 100644 index 0000000..7d3d07a --- /dev/null +++ b/bellman/src/domain.rs @@ -0,0 +1,552 @@ +//! This module contains an `EvaluationDomain` abstraction for +//! performing various kinds of polynomial arithmetic on top of +//! the scalar field. +//! +//! In pairing-based SNARKs like Groth16, we need to calculate +//! a quotient polynomial over a target polynomial with roots +//! at distinct points associated with each constraint of the +//! constraint system. In order to be efficient, we choose these +//! roots to be the powers of a 2^n root of unity in the field. +//! This allows us to perform polynomial operations in O(n) +//! by performing an O(n log n) FFT over such a domain. + +use crate::pairing::{ + Engine, + CurveProjective +}; + +use crate::pairing::ff::{ + Field, + PrimeField +}; + +use super::{ + SynthesisError +}; + +use super::worker::Worker; +pub use super::group::*; + +pub struct EvaluationDomain> { + coeffs: Vec, + exp: u32, + omega: E::Fr, + omegainv: E::Fr, + geninv: E::Fr, + minv: E::Fr +} + +impl> EvaluationDomain { + pub fn as_ref(&self) -> &[G] { + &self.coeffs + } + + pub fn as_mut(&mut self) -> &mut [G] { + &mut self.coeffs + } + + pub fn into_coeffs(self) -> Vec { + self.coeffs + } + + pub fn from_coeffs(mut coeffs: Vec) -> Result, SynthesisError> + { + use crate::pairing::ff::PrimeField; + // Compute the size of our evaluation domain + + let coeffs_len = coeffs.len(); + + // m is a size of domain where Z polynomial does NOT vanish + // in normal domain Z is in a form of (X-1)(X-2)...(X-N) + let mut m = 1; + let mut exp = 0; + let mut omega = E::Fr::root_of_unity(); + let max_degree = (1 << E::Fr::S) - 1; + + if coeffs_len > max_degree { + return Err(SynthesisError::PolynomialDegreeTooLarge) + } + + while m < coeffs_len { + m *= 2; + exp += 1; + + // The pairing-friendly curve may not be able to support + // large enough (radix2) evaluation domains. + if exp > E::Fr::S { + return Err(SynthesisError::PolynomialDegreeTooLarge) + } + } + + // If full domain is not needed - limit it, + // e.g. if (2^N)th power is not required, just double omega and get 2^(N-1)th + // Compute omega, the 2^exp primitive root of unity + for _ in exp..E::Fr::S { + omega.square(); + } + + // Extend the coeffs vector with zeroes if necessary + coeffs.resize(m, G::group_zero()); + + Ok(EvaluationDomain { + coeffs: coeffs, + exp: exp, + omega: omega, + omegainv: omega.inverse().unwrap(), + geninv: E::Fr::multiplicative_generator().inverse().unwrap(), + minv: E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap() + }) + } + + // this one does expect coefficients to be smaller than `num_roots_of_unity/2` as we expect multiplication + pub fn from_coeffs_into_sized(mut coeffs: Vec, size: usize) -> Result, SynthesisError> + { + use crate::pairing::ff::PrimeField; + // Compute the size of our evaluation domain + + assert!(size >= coeffs.len()); + + let coeffs_len = size; + + // m is a size of domain where Z polynomial does NOT vanish + // in normal domain Z is in a form of (X-1)(X-2)...(X-N) + let mut m = 1; + let mut exp = 0; + let mut omega = E::Fr::root_of_unity(); + let max_degree = (1 << E::Fr::S) - 1; + + if coeffs_len > max_degree { + return Err(SynthesisError::PolynomialDegreeTooLarge) + } + + while m < coeffs_len { + m *= 2; + exp += 1; + + // The pairing-friendly curve may not be able to support + // large enough (radix2) evaluation domains. + if exp > E::Fr::S { + return Err(SynthesisError::PolynomialDegreeTooLarge) + } + } + + // If full domain is not needed - limit it, + // e.g. if (2^N)th power is not required, just double omega and get 2^(N-1)th + // Compute omega, the 2^exp primitive root of unity + for _ in exp..E::Fr::S { + omega.square(); + } + + // Extend the coeffs vector with zeroes if necessary + coeffs.resize(m, G::group_zero()); + + Ok(EvaluationDomain { + coeffs: coeffs, + exp: exp, + omega: omega, + omegainv: omega.inverse().unwrap(), + geninv: E::Fr::multiplicative_generator().inverse().unwrap(), + minv: E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap() + }) + } + + + pub fn fft(&mut self, worker: &Worker) + { + best_fft(&mut self.coeffs, worker, &self.omega, self.exp); + } + + pub fn ifft(&mut self, worker: &Worker) + { + best_fft(&mut self.coeffs, worker, &self.omegainv, self.exp); + + worker.scope(self.coeffs.len(), |scope, chunk| { + let minv = self.minv; + + for v in self.coeffs.chunks_mut(chunk) { + scope.spawn(move |_| { + for v in v { + v.group_mul_assign(&minv); + } + }); + } + }); + } + + pub fn distribute_powers(&mut self, worker: &Worker, g: E::Fr) + { + worker.scope(self.coeffs.len(), |scope, chunk| { + for (i, v) in self.coeffs.chunks_mut(chunk).enumerate() { + scope.spawn(move |_| { + let mut u = g.pow(&[(i * chunk) as u64]); + for v in v.iter_mut() { + v.group_mul_assign(&u); + u.mul_assign(&g); + } + }); + } + }); + } + + pub fn coset_fft(&mut self, worker: &Worker) + { + self.distribute_powers(worker, E::Fr::multiplicative_generator()); + self.fft(worker); + } + + pub fn icoset_fft(&mut self, worker: &Worker) + { + let geninv = self.geninv; + + self.ifft(worker); + self.distribute_powers(worker, geninv); + } + + /// This evaluates t(tau) for this domain, which is + /// tau^m - 1 for these radix-2 domains. + pub fn z(&self, tau: &E::Fr) -> E::Fr { + let mut tmp = tau.pow(&[self.coeffs.len() as u64]); + tmp.sub_assign(&E::Fr::one()); + + tmp + } + + /// The target polynomial is the zero polynomial in our + /// evaluation domain, so we must perform division over + /// a coset. + pub fn divide_by_z_on_coset(&mut self, worker: &Worker) + { + let i = self.z(&E::Fr::multiplicative_generator()).inverse().unwrap(); + + worker.scope(self.coeffs.len(), |scope, chunk| { + for v in self.coeffs.chunks_mut(chunk) { + scope.spawn(move |_| { + for v in v { + v.group_mul_assign(&i); + } + }); + } + }); + } + + /// Perform O(n) multiplication of two polynomials in the domain. + pub fn mul_assign(&mut self, worker: &Worker, other: &EvaluationDomain>) { + assert_eq!(self.coeffs.len(), other.coeffs.len()); + + worker.scope(self.coeffs.len(), |scope, chunk| { + for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) { + scope.spawn(move |_| { + for (a, b) in a.iter_mut().zip(b.iter()) { + a.group_mul_assign(&b.0); + } + }); + } + }); + } + + /// Perform O(n) subtraction of one polynomial from another in the domain. + pub fn sub_assign(&mut self, worker: &Worker, other: &EvaluationDomain) { + assert_eq!(self.coeffs.len(), other.coeffs.len()); + + worker.scope(self.coeffs.len(), |scope, chunk| { + for (a, b) in self.coeffs.chunks_mut(chunk).zip(other.coeffs.chunks(chunk)) { + scope.spawn(move |_| { + for (a, b) in a.iter_mut().zip(b.iter()) { + a.group_sub_assign(&b); + } + }); + } + }); + } +} + +pub(crate) fn best_fft>(a: &mut [T], worker: &Worker, omega: &E::Fr, log_n: u32) +{ + let log_cpus = worker.log_num_cpus(); + + if log_n <= log_cpus { + serial_fft(a, omega, log_n); + } else { + parallel_fft(a, worker, omega, log_n, log_cpus); + } +} + +pub(crate) fn serial_fft>(a: &mut [T], omega: &E::Fr, log_n: u32) +{ + fn bitreverse(mut n: u32, l: u32) -> u32 { + let mut r = 0; + for _ in 0..l { + r = (r << 1) | (n & 1); + n >>= 1; + } + r + } + + let n = a.len() as u32; + assert_eq!(n, 1 << log_n); + + for k in 0..n { + let rk = bitreverse(k, log_n); + if k < rk { + a.swap(rk as usize, k as usize); + } + } + + let mut m = 1; + for _ in 0..log_n { + let w_m = omega.pow(&[(n / (2*m)) as u64]); + + let mut k = 0; + while k < n { + let mut w = E::Fr::one(); + for j in 0..m { + let mut t = a[(k+j+m) as usize]; + t.group_mul_assign(&w); + let mut tmp = a[(k+j) as usize]; + tmp.group_sub_assign(&t); + a[(k+j+m) as usize] = tmp; + a[(k+j) as usize].group_add_assign(&t); + w.mul_assign(&w_m); + } + + k += 2*m; + } + + m *= 2; + } +} + +pub(crate) fn parallel_fft>( + a: &mut [T], + worker: &Worker, + omega: &E::Fr, + log_n: u32, + log_cpus: u32 +) +{ + assert!(log_n >= log_cpus); + + let num_cpus = 1 << log_cpus; + let log_new_n = log_n - log_cpus; + let mut tmp = vec![vec![T::group_zero(); 1 << log_new_n]; num_cpus]; + let new_omega = omega.pow(&[num_cpus as u64]); + + worker.scope(0, |scope, _| { + let a = &*a; + + for (j, tmp) in tmp.iter_mut().enumerate() { + scope.spawn(move |_| { + // Shuffle into a sub-FFT + let omega_j = omega.pow(&[j as u64]); + let omega_step = omega.pow(&[(j as u64) << log_new_n]); + + let mut elt = E::Fr::one(); + for i in 0..(1 << log_new_n) { + for s in 0..num_cpus { + let idx = (i + (s << log_new_n)) % (1 << log_n); + let mut t = a[idx]; + t.group_mul_assign(&elt); + tmp[i].group_add_assign(&t); + elt.mul_assign(&omega_step); + } + elt.mul_assign(&omega_j); + } + + // Perform sub-FFT + serial_fft(tmp, &new_omega, log_new_n); + }); + } + }); + + // TODO: does this hurt or help? + worker.scope(a.len(), |scope, chunk| { + let tmp = &tmp; + + for (idx, a) in a.chunks_mut(chunk).enumerate() { + scope.spawn(move |_| { + let mut idx = idx * chunk; + let mask = (1 << log_cpus) - 1; + for a in a { + *a = tmp[idx & mask][idx >> log_cpus]; + idx += 1; + } + }); + } + }); +} + +// Test multiplying various (low degree) polynomials together and +// comparing with naive evaluations. +#[test] +fn polynomial_arith() { + use crate::pairing::bls12_381::Bls12; + use rand::{self, Rand}; + + fn test_mul(rng: &mut R) + { + let worker = Worker::new(); + + for coeffs_a in 0..70 { + for coeffs_b in 0..70 { + let mut a: Vec<_> = (0..coeffs_a).map(|_| Scalar::(E::Fr::rand(rng))).collect(); + let mut b: Vec<_> = (0..coeffs_b).map(|_| Scalar::(E::Fr::rand(rng))).collect(); + + // naive evaluation + let mut naive = vec![Scalar(E::Fr::zero()); coeffs_a + coeffs_b]; + for (i1, a) in a.iter().enumerate() { + for (i2, b) in b.iter().enumerate() { + let mut prod = *a; + prod.group_mul_assign(&b.0); + naive[i1 + i2].group_add_assign(&prod); + } + } + + a.resize(coeffs_a + coeffs_b, Scalar(E::Fr::zero())); + b.resize(coeffs_a + coeffs_b, Scalar(E::Fr::zero())); + + let mut a = EvaluationDomain::from_coeffs(a).unwrap(); + let mut b = EvaluationDomain::from_coeffs(b).unwrap(); + + a.fft(&worker); + b.fft(&worker); + a.mul_assign(&worker, &b); + a.ifft(&worker); + + for (naive, fft) in naive.iter().zip(a.coeffs.iter()) { + assert!(naive == fft); + } + } + } + } + + let rng = &mut rand::thread_rng(); + + test_mul::(rng); +} + +#[test] +fn fft_composition() { + use crate::pairing::bls12_381::Bls12; + use rand; + + fn test_comp(rng: &mut R) + { + let worker = Worker::new(); + + for coeffs in 0..10 { + let coeffs = 1 << coeffs; + + let mut v = vec![]; + for _ in 0..coeffs { + v.push(Scalar::(rng.gen())); + } + + let mut domain = EvaluationDomain::from_coeffs(v.clone()).unwrap(); + domain.ifft(&worker); + domain.fft(&worker); + assert!(v == domain.coeffs); + domain.fft(&worker); + domain.ifft(&worker); + assert!(v == domain.coeffs); + domain.icoset_fft(&worker); + domain.coset_fft(&worker); + assert!(v == domain.coeffs); + domain.coset_fft(&worker); + domain.icoset_fft(&worker); + assert!(v == domain.coeffs); + } + } + + let rng = &mut rand::thread_rng(); + + test_comp::(rng); +} + +#[test] +fn parallel_fft_consistency() { + use crate::pairing::bls12_381::Bls12; + use rand::{self, Rand}; + use std::cmp::min; + + fn test_consistency(rng: &mut R) + { + let worker = Worker::new(); + + for _ in 0..5 { + for log_d in 0..10 { + let d = 1 << log_d; + + let v1 = (0..d).map(|_| Scalar::(E::Fr::rand(rng))).collect::>(); + let mut v1 = EvaluationDomain::from_coeffs(v1).unwrap(); + let mut v2 = EvaluationDomain::from_coeffs(v1.coeffs.clone()).unwrap(); + + for log_cpus in log_d..min(log_d+1, 3) { + parallel_fft(&mut v1.coeffs, &worker, &v1.omega, log_d, log_cpus); + serial_fft(&mut v2.coeffs, &v2.omega, log_d); + + assert!(v1.coeffs == v2.coeffs); + } + } + } + } + + let rng = &mut rand::thread_rng(); + + test_consistency::(rng); +} + +#[test] +fn test_field_element_multiplication_bn256() { + use rand::{self, Rand}; + use crate::pairing::bn256::Bn256; + use crate::pairing::bn256::Fr; + use num_cpus; + + let cpus = num_cpus::get(); + const SAMPLES: usize = 1 << 22; + + let rng = &mut rand::thread_rng(); + let v1 = (0..SAMPLES).map(|_| Scalar::(Fr::rand(rng))).collect::>(); + let v2 = (0..SAMPLES).map(|_| Scalar::(Fr::rand(rng))).collect::>(); + + let mut v1 = EvaluationDomain::from_coeffs(v1).unwrap(); + let v2 = EvaluationDomain::from_coeffs(v2).unwrap(); + + let pool = Worker::new(); + + let start = std::time::Instant::now(); + + v1.mul_assign(&pool, &v2); + + let duration_ns = start.elapsed().as_nanos() as f64; + println!("Elapsed {} ns for {} samples", duration_ns, SAMPLES); + let time_per_sample = duration_ns/(SAMPLES as f64); + println!("Tested on {} samples on {} CPUs with {} ns per field element multiplication", SAMPLES, cpus, time_per_sample); +} + +#[test] +fn test_fft_bn256() { + use rand::{self, Rand}; + use crate::pairing::bn256::Bn256; + use crate::pairing::bn256::Fr; + use num_cpus; + + let cpus = num_cpus::get(); + const SAMPLES: usize = 1 << 27; + + let rng = &mut rand::thread_rng(); + let v1 = (0..SAMPLES).map(|_| Scalar::(Fr::rand(rng))).collect::>(); + + let mut v1 = EvaluationDomain::from_coeffs(v1).unwrap(); + + let pool = Worker::new(); + + let start = std::time::Instant::now(); + + v1.ifft(&pool); + + let duration_ns = start.elapsed().as_nanos() as f64; + println!("Elapsed {} ns for {} samples", duration_ns, SAMPLES); + let time_per_sample = duration_ns/(SAMPLES as f64); + println!("Tested on {} samples on {} CPUs with {} ns per field element multiplication", SAMPLES, cpus, time_per_sample); +} \ No newline at end of file diff --git a/bellman/src/gm17/generator.rs b/bellman/src/gm17/generator.rs new file mode 100644 index 0000000..f1dd3f0 --- /dev/null +++ b/bellman/src/gm17/generator.rs @@ -0,0 +1,700 @@ +use super::super::verbose_flag; + +use rand::Rng; + +use std::sync::Arc; + +use crate::pairing::{ + Engine, + Wnaf, + CurveProjective, + CurveAffine +}; + +use crate::pairing::ff::{ + PrimeField, + Field +}; + +use super::{ + Parameters, + VerifyingKey +}; + +use crate::{ + SynthesisError, + Circuit, + ConstraintSystem, + LinearCombination, + Variable, + Index +}; + +use crate::domain::{ + EvaluationDomain, + Scalar +}; + +use crate::worker::{ + Worker +}; + +// /// Generates a random common reference string for +// /// a circuit. +// pub fn generate_random_parameters( +// circuit: C, +// rng: &mut R +// ) -> Result, SynthesisError> +// where E: Engine, C: Circuit, R: Rng +// { +// let g1 = rng.gen(); +// let g2 = rng.gen(); +// let alpha = rng.gen(); +// let beta = rng.gen(); +// let gamma = rng.gen(); +// let delta = rng.gen(); +// let tau = rng.gen(); + +// generate_parameters::( +// circuit, +// g1, +// g2, +// alpha, +// beta, +// gamma, +// delta, +// tau +// ) +// } + +/// This is our assembly structure that we'll use to synthesize the +/// circuit into a SAP. Square arithmetic problem is different from QAP in a form: +/// it's A*A - C = 0 instead of A*B - C = 0 +struct KeypairAssembly { + num_inputs: usize, + num_aux: usize, + num_constraints: usize, + num_r1cs_aux: usize, + num_r1cs_constraints: usize, + at_inputs: Vec>, + ct_inputs: Vec>, + at_aux: Vec>, + ct_aux: Vec> +} + +impl ConstraintSystem for KeypairAssembly { + type Root = Self; + + fn alloc( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_aux; + self.num_aux += 1; + + self.num_r1cs_aux += 1; + + self.at_aux.push(vec![]); + self.ct_aux.push(vec![]); + + Ok(Variable(Index::Aux(index))) + } + + fn alloc_input( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_inputs; + self.num_inputs += 1; + + self.at_inputs.push(vec![]); + self.ct_inputs.push(vec![]); + + Ok(Variable(Index::Input(index))) + } + + fn enforce( + &mut self, + _: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + use std::ops::{Add, Sub}; + + // this is where reduction happens. First we need to re-arrange initial constraints + // from the form * = to an artificial + // * = y + // * = 4* + y + + fn quadruple( + coeff: E::Fr + ) -> E::Fr { + let mut tmp = coeff; + tmp.double(); + tmp.double(); + + tmp + } + + fn eval( + l: LinearCombination, + inputs: &mut [Vec<(E::Fr, usize)>], + aux: &mut [Vec<(E::Fr, usize)>], + this_constraint: usize + ) + { + for (index, coeff) in l.0 { + match index { + Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)), + Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint)) + } + } + } + + // * = x_i + let i = self.num_constraints; + let y = self.alloc( + || format!("SAP reduction y_{}", i), + || Ok(E::Fr::one()) + ).expect("must allocate SAP reduction variable"); + self.num_r1cs_aux -= 1; + + let lc_a = a(LinearCombination::zero()); + let lc_b = b(LinearCombination::zero()); + let lc_c = c(LinearCombination::zero()); + + let lc_a_minus_b = lc_a.clone().sub(&lc_b); + + let mut lc_y: LinearCombination = LinearCombination::zero(); + lc_y = lc_y.add(y); + + eval(lc_a_minus_b, &mut self.at_inputs, &mut self.at_aux, self.num_constraints); + eval(lc_y, &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints); + + self.num_constraints += 1; + + // * = 4* + y + let lc_a_plus_b = lc_a.add(&lc_b); + + let mut lc_c_quadrupled: LinearCombination = LinearCombination::zero(); + for s in &lc_c.0 { + let tmp = quadruple::(s.1); + lc_c_quadrupled = lc_c_quadrupled + (tmp, s.0); + } + lc_c_quadrupled = lc_c_quadrupled.add(y); + + eval(lc_a_plus_b, &mut self.at_inputs, &mut self.at_aux, self.num_constraints); + eval(lc_c_quadrupled, &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints); + + self.num_constraints += 1; + + self.num_r1cs_constraints += 1; + } + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) + { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +/// Create parameters for a circuit, given some toxic waste. +pub fn generate_parameters( + circuit: C, + g1: E::G1, + g2: E::G2, + alpha: E::Fr, + beta: E::Fr, + gamma: E::Fr, + // delta: E::Fr, + tau: E::Fr +) -> Result<(), SynthesisError> +// Result, SynthesisError> + where E: Engine, C: Circuit +{ + let verbose = verbose_flag(); + + let mut assembly = KeypairAssembly { + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + num_r1cs_aux: 0, + num_r1cs_constraints: 0, + at_inputs: vec![], + ct_inputs: vec![], + at_aux: vec![], + ct_aux: vec![] + }; + + // Allocate the "one" input variable + let input_0 = assembly.alloc_input(|| "", || Ok(E::Fr::one()))?; + + // Synthesize the circuit. + circuit.synthesize(&mut assembly)?; + + let num_inputs_without_identity = assembly.num_inputs - 1; + + // inputs must be constrained manually in SAP style, + // so input 0 (identity) is constrained as 1*1=1 + { + use std::ops::{Add, Sub}; + + fn eval_lc( + l: LinearCombination, + inputs: &mut [Vec<(E::Fr, usize)>], + aux: &mut [Vec<(E::Fr, usize)>], + this_constraint: usize + ) + { + for (index, coeff) in l.0 { + match index { + Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)), + Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint)) + } + } + } + + let mut lc_input_0_a: LinearCombination = LinearCombination::zero(); + lc_input_0_a = lc_input_0_a.add(input_0.clone()); + eval_lc(lc_input_0_a, &mut assembly.at_inputs, &mut assembly.at_aux, assembly.num_constraints); + + assembly.num_constraints += 1; + } + + let num_constraints_before_inputs_constraining = assembly.num_constraints; + let num_aux_before_inputs_constraining = assembly.num_aux; + + // Other inputs are constrained as x_i * 1 = x_i where + // 1 is actually input number 0 (identity) + + for i in 1..assembly.num_inputs { + assembly.enforce(|| "", + |lc| lc + Variable(Index::Input(i)), + |lc| lc + Variable(Index::Input(0)), + |lc| lc + Variable(Index::Input(i)), + ); + } + + // check that each input generates 2 constraints + assert_eq!(num_inputs_without_identity * 2 + + num_constraints_before_inputs_constraining, + assembly.num_constraints, + "each input must produce two extra constraints"); + // and that it creates one extra variable + assert_eq!(num_inputs_without_identity + + num_aux_before_inputs_constraining, + assembly.num_aux, + "each input must generate an extra variable"); + + assert_eq!(assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux, + assembly.num_inputs + assembly.num_aux, + "each constraint in principle adds one variable"); + + if verbose {eprintln!("Constraint system size is {}", assembly.num_constraints)}; + // Create bases for blind evaluation of polynomials at tau + let powers_of_tau = vec![Scalar::(E::Fr::zero()); assembly.num_constraints]; + let mut domain = EvaluationDomain::from_coeffs(powers_of_tau)?; + + // Compute G1 window table + let mut g1_wnaf = Wnaf::new(); + let g1_wnaf = g1_wnaf.base(g1, { + 2*(assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux) + + assembly.num_r1cs_constraints + assembly.num_r1cs_aux + + 2*(assembly.num_inputs + assembly.num_r1cs_constraints) + }); + + // Compute gamma*G2 window table + let mut g2_wnaf = Wnaf::new(); + // let gamma_g2 = g2.into_affine().mul(gamma.into_repr()); + let g2_wnaf = g2_wnaf.base(g2, { + // B query + assembly.num_inputs + assembly.num_aux + // alternatively expressed as + // assembly.num_inputs + assembly.num_r1cs_constraints + assembly.num_r1cs_aux + }); + + let worker = Worker::new(); + + // let z_at_tau = { + // // Compute powers of tau + // if verbose {eprintln!("computing powers of tau...")}; + + // let start = std::time::Instant::now(); + + // { + // let domain = domain.as_mut(); + // worker.scope(domain.len(), |scope, chunk| { + // for (i, subdomain) in domain.chunks_mut(chunk).enumerate() + // { + // scope.spawn(move || { + // let mut current_power = tau.pow(&[(i*chunk) as u64]); + + // for p in subdomain { + // p.0 = current_power; + // current_power.mul_assign(&tau); + // } + // }); + // } + // }); + // } + // if verbose {eprintln!("powers of tau stage 1 done in {} s", start.elapsed().as_millis() as f64 / 1000.0);}; + + // // z_at_tau = t(x) + // let z_at_tau = domain.z(&tau); + + // z_at_tau + // }; + + let domain_length = domain.as_ref().len(); + + if verbose {eprintln!("Domain length is {} ", domain_length)}; + + // G1^{gamma^2 * Z(t) * t^i} for 0 <= i < 2^m - 1 for 2^m domains + let mut gamma2_z_t_g1 = vec![E::G1::zero(); domain.as_ref().len() - 1]; + let mut z_at_tau = E::Fr::zero(); + + { + // Compute powers of tau + if verbose {eprintln!("computing powers of tau...")}; + + let start = std::time::Instant::now(); + + { + let domain = domain.as_mut(); + worker.scope(domain.len(), |scope, chunk| { + for (i, subdomain) in domain.chunks_mut(chunk).enumerate() + { + scope.spawn(move |_| { + let mut current_power = tau.pow(&[(i*chunk) as u64]); + + for p in subdomain { + p.0 = current_power; + current_power.mul_assign(&tau); + } + }); + } + }); + } + if verbose {eprintln!("powers of tau stage 1 done in {} s", start.elapsed().as_millis() as f64 / 1000.0);}; + + // z_at_tau = t(x) + z_at_tau = domain.z(&tau); + + let mut gamma2_z_t = z_at_tau; + gamma2_z_t.mul_assign(&gamma); + gamma2_z_t.mul_assign(&gamma); + + if verbose {eprintln!("computing the `G1^(gamma^2 * Z(t) * t^i)` query with multiple threads...")}; + + let start = std::time::Instant::now(); + + // Compute the H query with multiple threads + worker.scope(gamma2_z_t_g1.len(), |scope, chunk| { + for (gamma2_z_t_g1, p) in gamma2_z_t_g1.chunks_mut(chunk).zip(domain.as_ref().chunks(chunk)) + { + let mut g1_wnaf = g1_wnaf.shared(); + scope.spawn(move |_| { + // Set values of the H query to g1^{(tau^i * t(tau)) / delta} + for (gamma2_z_t_g1, p) in gamma2_z_t_g1.iter_mut().zip(p.iter()) + { + // Compute final exponent + let mut exp = p.0; + exp.mul_assign(&gamma2_z_t); + + // Exponentiate + *gamma2_z_t_g1 = g1_wnaf.scalar(exp.into_repr()); + } + + // Batch normalize + E::G1::batch_normalization(gamma2_z_t_g1); + }); + } + }); + if verbose {eprintln!("computing the `G1^(gamma^2 * Z(t) * t^i)` query done in {} s", start.elapsed().as_millis() as f64 / 1000.0);}; + } + + // G1^{gamma * A_i(t)} for 0 <= i <= num_variables + let mut a_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux]; + // G2^{gamma * A_i(t)} for 0 <= i <= num_variables + let mut a_g2 = vec![E::G2::zero(); assembly.num_inputs + assembly.num_aux]; + + // G1^{gamma^2 * C_i(t) + (alpha + beta) * gamma * A_i(t)} + // for num_inputs + 1 < i <= num_variables + let mut c_1_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux]; + // G1^{2 * gamma^2 * Z(t) * A_i(t)} for 0 <= i <= num_variables + let mut c_2_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux]; + + // G1^{gamma * Z(t)} + let mut gamma_zt = gamma; + gamma_zt.mul_assign(&z_at_tau); + + let gamma_z = g1.into_affine().mul(gamma.into_repr()); + // G2^{gamma * Z(t)} + let gamma_z_g2 = g2.into_affine().mul(gamma.into_repr()); + + let mut ab_gamma = alpha; + ab_gamma.add_assign(&beta); + ab_gamma.mul_assign(&gamma); + // G1^{(alpha + beta) * gamma * Z(t)} + let ab_gamma_z_g1 = g1.into_affine().mul(ab_gamma.into_repr()); + + let mut gamma2_z2 = gamma; + gamma2_z2.mul_assign(&z_at_tau); + gamma2_z2.square(); + // G1^{gamma^2 * Z(t)^2} + let gamma2_z2_g1 = g1.into_affine().mul(gamma2_z2.into_repr()); + + // G^{gamma^2 * Z(t) * t^i} for 0 <= i < 2^m - 1 for 2^m domains + let mut gamma2_z_t = vec![E::G1::zero(); domain.as_ref().len() - 1]; + + if verbose {eprintln!("using inverse FFT to convert to intepolation coefficients...")}; + + let start = std::time::Instant::now(); + + // Use inverse FFT to convert to intepolation coefficients + domain.ifft(&worker); + let powers_of_tau = domain.into_coeffs(); + // domain is now a set of scalars + + if verbose {eprintln!("powers of tau evaluation in radix2 domain in {} s", start.elapsed().as_millis() as f64 / 1000.0)}; + + if verbose {eprintln!("evaluating polynomials...")}; + let start = std::time::Instant::now(); + + // overall strategy: + // a_g1, a_g2, c_1_g1, c_2_g1 should be combined together by computing + // ab = (alpha + beta) + // g_2 = gamma^2 + // t0 = gamma*A_i(t) + // t1 = g_2*C_t(t) + // a_g1 = t0*G1 + // a_g2 = t0*G2 + // c_1_g1 = (t1 + ab*t0)*G1 + // c_2_g1 = (2*gamma*z_at_tau*t0)*G1 + + fn eval_stage_1( + // wNAF window tables + g1_wnaf: &Wnaf>, + g2_wnaf: &Wnaf>, + + // powers of tau coefficients + powers_of_tau: &[Scalar], + + // SAP polynomials + at: &[Vec<(E::Fr, usize)>], + ct: &[Vec<(E::Fr, usize)>], + + // Resulting evaluated SAP polynomials + a_g1: &mut [E::G1], + a_g2: &mut [E::G2], + c_1_g1: &mut [E::G1], + c_2_g1: &mut [E::G1], + + // Trapdoors + alpha: &E::Fr, + beta: &E::Fr, + gamma: &E::Fr, + z_at_tau: &E::Fr, + + // Worker + worker: &Worker + ) + + { + // Sanity check + assert_eq!(a_g1.len(), at.len()); + assert_eq!(a_g1.len(), ct.len()); + assert_eq!(a_g1.len(), a_g2.len()); + assert_eq!(a_g1.len(), c_1_g1.len()); + assert_eq!(a_g1.len(), c_2_g1.len()); + + // compute once + let mut ab = *alpha; + ab.add_assign(&beta); + + let mut gamma2 = *gamma; + gamma2.square(); + + // Evaluate polynomials in multiple threads + worker.scope(a_g1.len(), |scope, chunk| { + for (((((a_g1, a_g2), c_1_g1), c_2_g1), at), ct) in a_g1.chunks_mut(chunk) + .zip(a_g2.chunks_mut(chunk)) + .zip(c_1_g1.chunks_mut(chunk)) + .zip(c_2_g1.chunks_mut(chunk)) + .zip(at.chunks(chunk)) + .zip(ct.chunks(chunk)) + { + let mut g1_wnaf = g1_wnaf.shared(); + let mut g2_wnaf = g2_wnaf.shared(); + + scope.spawn(move |_| { + for (((((a_g1, a_g2), c_1_g1), c_2_g1), at), ct) in a_g1.iter_mut() + .zip(a_g2.iter_mut()) + .zip(c_1_g1.iter_mut()) + .zip(c_2_g1.iter_mut()) + .zip(at.iter()) + .zip(ct.iter()) + { + fn eval_at_tau( + powers_of_tau: &[Scalar], + p: &[(E::Fr, usize)] + ) -> E::Fr + { + let mut acc = E::Fr::zero(); + + for &(ref coeff, index) in p { + let mut n = powers_of_tau[index].0; + n.mul_assign(coeff); + acc.add_assign(&n); + } + + acc + } + + // Evaluate SAP polynomials at tau + // t0 = gamma*A_i(t) + let mut t0 = eval_at_tau(powers_of_tau, at); + t0.mul_assign(&gamma); + // t1 = gamma^2*C_t(t) + let mut t1 = eval_at_tau(powers_of_tau, ct); + t1.mul_assign(&gamma2); + + // a_g1 = t0*G1 + // a_g2 = t0*G2 + // c_1_g1 = (t1 + ab*t0)*G1 + // c_2_g1 = (2*gamma*z_at_tau*t0)*G1 + + // Compute a_g1 and a_g2 + if !t0.is_zero() { + *a_g1 = g1_wnaf.scalar(t0.into_repr()); + *a_g2 = g2_wnaf.scalar(t0.into_repr()); + } + + let mut c_1_g1_factor = t0; + c_1_g1_factor.mul_assign(&ab); + c_1_g1_factor.add_assign(&t1); + + // (2*gamma*z_at_tau*t0) inplace + t0.mul_assign(&z_at_tau); + t0.mul_assign(&gamma); + t0.double(); + + *c_1_g1 = g1_wnaf.scalar(c_1_g1_factor.into_repr()); + *c_2_g1 = g1_wnaf.scalar(t0.into_repr()); + } + + // Batch normalize + E::G1::batch_normalization(a_g1); + E::G2::batch_normalization(a_g2); + E::G1::batch_normalization(c_1_g1); + E::G1::batch_normalization(c_2_g1); + }); + }; + }); + } + + // Evaluate for inputs. + eval_stage_1( + &g1_wnaf, + &g2_wnaf, + &powers_of_tau, + &assembly.at_inputs, + &assembly.ct_inputs, + &mut a_g1[0..assembly.num_inputs], + &mut a_g2[0..assembly.num_inputs], + &mut c_1_g1[0..assembly.num_inputs], + &mut c_2_g1[0..assembly.num_inputs], + &alpha, + &beta, + &gamma, + &z_at_tau, + &worker + ); + + // Evaluate for inputs. + eval_stage_1( + &g1_wnaf, + &g2_wnaf, + &powers_of_tau, + &assembly.at_aux, + &assembly.ct_aux, + &mut a_g1[assembly.num_inputs..], + &mut a_g2[assembly.num_inputs..], + &mut c_1_g1[assembly.num_inputs..], + &mut c_2_g1[assembly.num_inputs..], + &alpha, + &beta, + &gamma, + &z_at_tau, + &worker + ); + + // for _ in 0..assembly.num_inputs { + // c_1_g1.remove(0); + // } + + if verbose {eprintln!("evaluating polynomials done in {} s", start.elapsed().as_millis() as f64 / 1000.0);}; + + // // Don't allow any elements be unconstrained, so that + // // the L query is always fully dense. + // for e in l.iter() { + // if e.is_zero() { + // return Err(SynthesisError::UnconstrainedVariable); + // } + // } + + // let g1 = g1.into_affine(); + // let g2 = g2.into_affine(); + + // let vk = VerifyingKey:: { + // alpha_g1: g1.mul(alpha).into_affine(), + // beta_g1: g1.mul(beta).into_affine(), + // beta_g2: g2.mul(beta).into_affine(), + // gamma_g2: g2.mul(gamma).into_affine(), + // delta_g1: g1.mul(delta).into_affine(), + // delta_g2: g2.mul(delta).into_affine(), + // ic: ic.into_iter().map(|e| e.into_affine()).collect() + // }; + + println!("Has generated {} points", a_g1.len()); + + Ok(()) + + // Ok(Parameters { + // vk: vk, + // h: Arc::new(h.into_iter().map(|e| e.into_affine()).collect()), + // l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()), + + // // Filter points at infinity away from A/B queries + // a: Arc::new(a.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + // b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + // b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()) + // }) +} diff --git a/bellman/src/gm17/mod.rs b/bellman/src/gm17/mod.rs new file mode 100644 index 0000000..dc500d7 --- /dev/null +++ b/bellman/src/gm17/mod.rs @@ -0,0 +1,563 @@ +use crate::pairing::{ + Engine, + CurveAffine, + EncodedPoint +}; + +use crate::{ + SynthesisError +}; + +use crate::source::SourceBuilder; +use std::io::{self, Read, Write}; +use std::sync::Arc; +use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; + +#[cfg(test)] +mod tests; + +mod generator; +// mod prover; +// mod verifier; + +pub use self::generator::*; +// pub use self::prover::*; +// pub use self::verifier::*; + +#[derive(Debug, Clone)] +pub struct Proof { + pub a: E::G1Affine, + pub b: E::G2Affine, + pub c: E::G1Affine +} + +impl PartialEq for Proof { + fn eq(&self, other: &Self) -> bool { + self.a == other.a && + self.b == other.b && + self.c == other.c + } +} + +impl Proof { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.a.into_compressed().as_ref())?; + writer.write_all(self.b.into_compressed().as_ref())?; + writer.write_all(self.c.into_compressed().as_ref())?; + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = ::Compressed::empty(); + let mut g2_repr = ::Compressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let a = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g2_repr.as_mut())?; + let b = g2_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g1_repr.as_mut())?; + let c = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + Ok(Proof { + a: a, + b: b, + c: c + }) + } +} + +#[derive(Clone)] +pub struct VerifyingKey { + pub h_g2: E::G2Affine, + + // alpha in g1 for verifying and for creating A/C elements of + // proof. Never the point at infinity. + pub alpha_g1: E::G1Affine, + + // beta in g2 for verifying. Never the point at infinity. + pub beta_g2: E::G2Affine, + + // gamma in g1 for verifying. Never the point at infinity. + pub gamma_g1: E::G1Affine, + + // gamma in g2 for verifying. Never the point at infinity. + pub gamma_g2: E::G2Affine, + + // Elements of the form G^{gamma * A_i(t) + (alpha + beta) * A_i(t)} + // for all public inputs. Because all public inputs have a dummy constraint, + // this is the same size as the number of inputs, and never contains points + // at infinity. + pub ic: Vec +} + +impl PartialEq for VerifyingKey { + fn eq(&self, other: &Self) -> bool { + self.h_g2 == other.h_g2 && + self.alpha_g1 == other.alpha_g1 && + self.beta_g2 == other.beta_g2 && + self.gamma_g1 == other.gamma_g1 && + self.gamma_g2 == other.gamma_g2 && + self.ic == other.ic + } +} + +impl VerifyingKey { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.h_g2.into_uncompressed().as_ref())?; + writer.write_all(self.alpha_g1.into_uncompressed().as_ref())?; + writer.write_all(self.beta_g2.into_uncompressed().as_ref())?; + writer.write_all(self.gamma_g1.into_uncompressed().as_ref())?; + writer.write_all(self.gamma_g2.into_uncompressed().as_ref())?; + writer.write_u32::(self.ic.len() as u32)?; + for ic in &self.ic { + writer.write_all(ic.into_uncompressed().as_ref())?; + } + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = ::Uncompressed::empty(); + let mut g2_repr = ::Uncompressed::empty(); + + reader.read_exact(g2_repr.as_mut())?; + let h_h2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g1_repr.as_mut())?; + let alpha_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let beta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g1_repr.as_mut())?; + let gamma_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let gamma_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let ic_len = reader.read_u32::()? as usize; + + let mut ic = vec![]; + + for _ in 0..ic_len { + reader.read_exact(g1_repr.as_mut())?; + let g1 = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + ic.push(g1); + } + + Ok(VerifyingKey { + h_g2: h_h2, + alpha_g1: alpha_g1, + beta_g2: beta_g2, + gamma_g1: gamma_g1, + gamma_g2: gamma_g2, + ic: ic + }) + } +} + +#[derive(Clone)] +pub struct Parameters { + pub vk: VerifyingKey, + pub a_g1: Arc>, + pub a_g2: Arc>, + + pub c_1_g1: Arc>, + pub c_2_g1: Arc>, + + pub gamma_z: E::G1Affine, + pub gamma_z_g2: E::G2Affine, + + pub ab_gamma_z_g1: E::G1Affine, + pub gamma2_z2_g1: E::G1Affine, + + pub gamma2_z_t: Arc>, +} + +impl PartialEq for Parameters { + fn eq(&self, other: &Self) -> bool { + self.vk == other.vk && + self.a_g1 == other.a_g1 && + self.a_g2 == other.a_g2 && + self.c_1_g1 == other.c_1_g1 && + self.c_2_g1 == other.c_2_g1 && + self.gamma_z == other.gamma_z && + self.gamma_z_g2 == other.gamma_z_g2 && + self.ab_gamma_z_g1 == other.ab_gamma_z_g1 && + self.gamma2_z2_g1 == other.gamma2_z2_g1 && + self.gamma2_z_t == other.gamma2_z_t + } +} + +// impl Parameters { +// pub fn write( +// &self, +// mut writer: W +// ) -> io::Result<()> +// { +// self.vk.write(&mut writer)?; + +// writer.write_u32::(self.h.len() as u32)?; +// for g in &self.h[..] { +// writer.write_all(g.into_uncompressed().as_ref())?; +// } + +// writer.write_u32::(self.l.len() as u32)?; +// for g in &self.l[..] { +// writer.write_all(g.into_uncompressed().as_ref())?; +// } + +// writer.write_u32::(self.a.len() as u32)?; +// for g in &self.a[..] { +// writer.write_all(g.into_uncompressed().as_ref())?; +// } + +// writer.write_u32::(self.b_g1.len() as u32)?; +// for g in &self.b_g1[..] { +// writer.write_all(g.into_uncompressed().as_ref())?; +// } + +// writer.write_u32::(self.b_g2.len() as u32)?; +// for g in &self.b_g2[..] { +// writer.write_all(g.into_uncompressed().as_ref())?; +// } + +// Ok(()) +// } + +// pub fn read( +// mut reader: R, +// checked: bool +// ) -> io::Result +// { +// let read_g1 = |reader: &mut R| -> io::Result { +// let mut repr = ::Uncompressed::empty(); +// reader.read_exact(repr.as_mut())?; + +// if checked { +// repr +// .into_affine() +// } else { +// repr +// .into_affine_unchecked() +// } +// .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) +// .and_then(|e| if e.is_zero() { +// Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) +// } else { +// Ok(e) +// }) +// }; + +// let read_g2 = |reader: &mut R| -> io::Result { +// let mut repr = ::Uncompressed::empty(); +// reader.read_exact(repr.as_mut())?; + +// if checked { +// repr +// .into_affine() +// } else { +// repr +// .into_affine_unchecked() +// } +// .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) +// .and_then(|e| if e.is_zero() { +// Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) +// } else { +// Ok(e) +// }) +// }; + +// let vk = VerifyingKey::::read(&mut reader)?; + +// let mut h = vec![]; +// let mut l = vec![]; +// let mut a = vec![]; +// let mut b_g1 = vec![]; +// let mut b_g2 = vec![]; + +// { +// let len = reader.read_u32::()? as usize; +// for _ in 0..len { +// h.push(read_g1(&mut reader)?); +// } +// } + +// { +// let len = reader.read_u32::()? as usize; +// for _ in 0..len { +// l.push(read_g1(&mut reader)?); +// } +// } + +// { +// let len = reader.read_u32::()? as usize; +// for _ in 0..len { +// a.push(read_g1(&mut reader)?); +// } +// } + +// { +// let len = reader.read_u32::()? as usize; +// for _ in 0..len { +// b_g1.push(read_g1(&mut reader)?); +// } +// } + +// { +// let len = reader.read_u32::()? as usize; +// for _ in 0..len { +// b_g2.push(read_g2(&mut reader)?); +// } +// } + +// Ok(Parameters { +// vk: vk, +// h: Arc::new(h), +// l: Arc::new(l), +// a: Arc::new(a), +// b_g1: Arc::new(b_g1), +// b_g2: Arc::new(b_g2) +// }) +// } +// } + +// pub struct PreparedVerifyingKey { +// /// Pairing result of alpha*beta +// alpha_g1_beta_g2: E::Fqk, +// /// -gamma in G2 +// neg_gamma_g2: ::Prepared, +// /// -delta in G2 +// neg_delta_g2: ::Prepared, +// /// Copy of IC from `VerifiyingKey`. +// ic: Vec +// } + +// pub trait ParameterSource { +// type G1Builder: SourceBuilder; +// type G2Builder: SourceBuilder; + +// fn get_vk( +// &mut self, +// num_ic: usize +// ) -> Result, SynthesisError>; +// fn get_h( +// &mut self, +// num_h: usize +// ) -> Result; +// fn get_l( +// &mut self, +// num_l: usize +// ) -> Result; +// fn get_a( +// &mut self, +// num_inputs: usize, +// num_aux: usize +// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>; +// fn get_b_g1( +// &mut self, +// num_inputs: usize, +// num_aux: usize +// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>; +// fn get_b_g2( +// &mut self, +// num_inputs: usize, +// num_aux: usize +// ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>; +// } + +// impl<'a, E: Engine> ParameterSource for &'a Parameters { +// type G1Builder = (Arc>, usize); +// type G2Builder = (Arc>, usize); + +// fn get_vk( +// &mut self, +// _: usize +// ) -> Result, SynthesisError> +// { +// Ok(self.vk.clone()) +// } + +// fn get_h( +// &mut self, +// _: usize +// ) -> Result +// { +// Ok((self.h.clone(), 0)) +// } + +// fn get_l( +// &mut self, +// _: usize +// ) -> Result +// { +// Ok((self.l.clone(), 0)) +// } + +// fn get_a( +// &mut self, +// num_inputs: usize, +// _: usize +// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> +// { +// Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs))) +// } + +// fn get_b_g1( +// &mut self, +// num_inputs: usize, +// _: usize +// ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> +// { +// Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs))) +// } + +// fn get_b_g2( +// &mut self, +// num_inputs: usize, +// _: usize +// ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError> +// { +// Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs))) +// } +// } + +// #[cfg(test)] +// mod test_with_bls12_381 { +// use super::*; +// use {Circuit, SynthesisError, ConstraintSystem}; + +// use rand::{Rand, thread_rng}; +// use crate::pairing::ff::{Field}; +// use crate::pairing::bls12_381::{Bls12, Fr}; + +// #[test] +// fn serialization() { +// struct MySillyCircuit { +// a: Option, +// b: Option +// } + +// impl Circuit for MySillyCircuit { +// fn synthesize>( +// self, +// cs: &mut CS +// ) -> Result<(), SynthesisError> +// { +// let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?; +// let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?; +// let c = cs.alloc_input(|| "c", || { +// let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; +// let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + +// a.mul_assign(&b); +// Ok(a) +// })?; + +// cs.enforce( +// || "a*b=c", +// |lc| lc + a, +// |lc| lc + b, +// |lc| lc + c +// ); + +// Ok(()) +// } +// } + +// let rng = &mut thread_rng(); + +// let params = generate_random_parameters::( +// MySillyCircuit { a: None, b: None }, +// rng +// ).unwrap(); + +// { +// let mut v = vec![]; + +// params.write(&mut v).unwrap(); +// assert_eq!(v.len(), 2136); + +// let de_params = Parameters::read(&v[..], true).unwrap(); +// assert!(params == de_params); + +// let de_params = Parameters::read(&v[..], false).unwrap(); +// assert!(params == de_params); +// } + +// let pvk = prepare_verifying_key::(¶ms.vk); + +// for _ in 0..100 { +// let a = Fr::rand(rng); +// let b = Fr::rand(rng); +// let mut c = a; +// c.mul_assign(&b); + +// let proof = create_random_proof( +// MySillyCircuit { +// a: Some(a), +// b: Some(b) +// }, +// ¶ms, +// rng +// ).unwrap(); + +// let mut v = vec![]; +// proof.write(&mut v).unwrap(); + +// assert_eq!(v.len(), 192); + +// let de_proof = Proof::read(&v[..]).unwrap(); +// assert!(proof == de_proof); + +// assert!(verify_proof(&pvk, &proof, &[c]).unwrap()); +// assert!(!verify_proof(&pvk, &proof, &[a]).unwrap()); +// } +// } +// } \ No newline at end of file diff --git a/bellman/src/gm17/tests/mod.rs b/bellman/src/gm17/tests/mod.rs new file mode 100644 index 0000000..697065a --- /dev/null +++ b/bellman/src/gm17/tests/mod.rs @@ -0,0 +1,329 @@ +use crate::pairing::{ + Engine +}; + +use crate::pairing::ff:: { + Field, + PrimeField, +}; + +use super::super::tests::dummy_engine::*; +use super::super::tests::XORDemo; + +use std::marker::PhantomData; + +use crate::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +use super::{ + generate_parameters, + // prepare_verifying_key, + // create_proof, + // verify_proof +}; + +#[test] +fn test_gm17_xordemo() { + let g1 = Fr::one(); + let g2 = Fr::one(); + let alpha = Fr::from_str("48577").unwrap(); + let beta = Fr::from_str("22580").unwrap(); + let gamma = Fr::from_str("53332").unwrap(); + // let delta = Fr::from_str("5481").unwrap(); + let tau = Fr::from_str("3673").unwrap(); + + let params = { + let c = XORDemo:: { + a: None, + b: None, + _marker: PhantomData + }; + + generate_parameters( + c, + g1, + g2, + alpha, + beta, + gamma, + tau + ).unwrap() + }; + + // // This will synthesize the constraint system: + // // + // // public inputs: a_0 = 1, a_1 = c + // // aux inputs: a_2 = a, a_3 = b + // // constraints: + // // (a_0 - a_2) * (a_2) = 0 + // // (a_0 - a_3) * (a_3) = 0 + // // (a_2 + a_2) * (a_3) = (a_2 + a_3 - a_1) + // // (a_0) * 0 = 0 + // // (a_1) * 0 = 0 + + // // The evaluation domain is 8. The H query should + // // have 7 elements (it's a quotient polynomial) + // assert_eq!(7, params.h.len()); + + // let mut root_of_unity = Fr::root_of_unity(); + + // // We expect this to be a 2^10 root of unity + // assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 10])); + + // // Let's turn it into a 2^3 root of unity. + // root_of_unity = root_of_unity.pow(&[1 << 7]); + // assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 3])); + // assert_eq!(Fr::from_str("20201").unwrap(), root_of_unity); + + // // Let's compute all the points in our evaluation domain. + // let mut points = Vec::with_capacity(8); + // for i in 0..8 { + // points.push(root_of_unity.pow(&[i])); + // } + + // // Let's compute t(tau) = (tau - p_0)(tau - p_1)... + // // = tau^8 - 1 + // let mut t_at_tau = tau.pow(&[8]); + // t_at_tau.sub_assign(&Fr::one()); + // { + // let mut tmp = Fr::one(); + // for p in &points { + // let mut term = tau; + // term.sub_assign(p); + // tmp.mul_assign(&term); + // } + // assert_eq!(tmp, t_at_tau); + // } + + // // We expect our H query to be 7 elements of the form... + // // {tau^i t(tau) / delta} + // let delta_inverse = delta.inverse().unwrap(); + // let gamma_inverse = gamma.inverse().unwrap(); + // { + // let mut coeff = delta_inverse; + // coeff.mul_assign(&t_at_tau); + + // let mut cur = Fr::one(); + // for h in params.h.iter() { + // let mut tmp = cur; + // tmp.mul_assign(&coeff); + + // assert_eq!(*h, tmp); + + // cur.mul_assign(&tau); + // } + // } + + // // The density of the IC query is 2 (2 inputs) + // assert_eq!(2, params.vk.ic.len()); + + // // The density of the L query is 2 (2 aux variables) + // assert_eq!(2, params.l.len()); + + // // The density of the A query is 4 (each variable is in at least one A term) + // assert_eq!(4, params.a.len()); + + // // The density of the B query is 2 (two variables are in at least one B term) + // assert_eq!(2, params.b_g1.len()); + // assert_eq!(2, params.b_g2.len()); + + // /* + // Lagrange interpolation polynomials in our evaluation domain: + + // ,-------------------------------. ,-------------------------------. ,-------------------------------. + // | A TERM | | B TERM | | C TERM | + // `-------------------------------. `-------------------------------' `-------------------------------' + // | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | + // | 1 | 0 | 64512 | 0 | | 0 | 0 | 1 | 0 | | 0 | 0 | 0 | 0 | + // | 1 | 0 | 0 | 64512 | | 0 | 0 | 0 | 1 | | 0 | 0 | 0 | 0 | + // | 0 | 0 | 2 | 0 | | 0 | 0 | 0 | 1 | | 0 | 64512 | 1 | 1 | + // | 1 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | + // | 0 | 1 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | + // `-------'-------'-------'-------' `-------'-------'-------'-------' `-------'-------'-------'-------' + + // Example for u_0: + + // sage: r = 64513 + // sage: Fr = GF(r) + // sage: omega = (Fr(5)^63)^(2^7) + // sage: tau = Fr(3673) + // sage: R. = PolynomialRing(Fr, 'x') + // sage: def eval(tau, c0, c1, c2, c3, c4): + // ....: p = R.lagrange_polynomial([(omega^0, c0), (omega^1, c1), (omega^2, c2), (omega^3, c3), (omega^4, c4), (omega^5, 0), (omega^6, 0), (omega^7, 0)]) + // ....: return p.substitute(tau) + // sage: eval(tau, 1, 1, 0, 1, 0) + // 59158 + // */ + + // let u_i = [59158, 48317, 21767, 10402].iter().map(|e| { + // Fr::from_str(&format!("{}", e)).unwrap() + // }).collect::>(); + // let v_i = [0, 0, 60619, 30791].iter().map(|e| { + // Fr::from_str(&format!("{}", e)).unwrap() + // }).collect::>(); + // let w_i = [0, 23320, 41193, 41193].iter().map(|e| { + // Fr::from_str(&format!("{}", e)).unwrap() + // }).collect::>(); + + // for (u, a) in u_i.iter() + // .zip(¶ms.a[..]) + // { + // assert_eq!(u, a); + // } + + // for (v, b) in v_i.iter() + // .filter(|&&e| e != Fr::zero()) + // .zip(¶ms.b_g1[..]) + // { + // assert_eq!(v, b); + // } + + // for (v, b) in v_i.iter() + // .filter(|&&e| e != Fr::zero()) + // .zip(¶ms.b_g2[..]) + // { + // assert_eq!(v, b); + // } + + // for i in 0..4 { + // let mut tmp1 = beta; + // tmp1.mul_assign(&u_i[i]); + + // let mut tmp2 = alpha; + // tmp2.mul_assign(&v_i[i]); + + // tmp1.add_assign(&tmp2); + // tmp1.add_assign(&w_i[i]); + + // if i < 2 { + // // Check the correctness of the IC query elements + // tmp1.mul_assign(&gamma_inverse); + + // assert_eq!(tmp1, params.vk.ic[i]); + // } else { + // // Check the correctness of the L query elements + // tmp1.mul_assign(&delta_inverse); + + // assert_eq!(tmp1, params.l[i - 2]); + // } + // } + + // // Check consistency of the other elements + // assert_eq!(alpha, params.vk.alpha_g1); + // assert_eq!(beta, params.vk.beta_g1); + // assert_eq!(beta, params.vk.beta_g2); + // assert_eq!(gamma, params.vk.gamma_g2); + // assert_eq!(delta, params.vk.delta_g1); + // assert_eq!(delta, params.vk.delta_g2); + + // let pvk = prepare_verifying_key(¶ms.vk); + + // let r = Fr::from_str("27134").unwrap(); + // let s = Fr::from_str("17146").unwrap(); + + // let proof = { + // let c = XORDemo { + // a: Some(true), + // b: Some(false), + // _marker: PhantomData + // }; + + // create_proof( + // c, + // ¶ms, + // r, + // s + // ).unwrap() + // }; + + // // A(x) = + // // a_0 * (44865*x^7 + 56449*x^6 + 44865*x^5 + 8064*x^4 + 3520*x^3 + 56449*x^2 + 3520*x + 40321) + + // // a_1 * (8064*x^7 + 56449*x^6 + 8064*x^5 + 56449*x^4 + 8064*x^3 + 56449*x^2 + 8064*x + 56449) + + // // a_2 * (16983*x^7 + 24192*x^6 + 63658*x^5 + 56449*x^4 + 16983*x^3 + 24192*x^2 + 63658*x + 56449) + + // // a_3 * (5539*x^7 + 27797*x^6 + 6045*x^5 + 56449*x^4 + 58974*x^3 + 36716*x^2 + 58468*x + 8064) + + // { + // // proof A = alpha + A(tau) + delta * r + // let mut expected_a = delta; + // expected_a.mul_assign(&r); + // expected_a.add_assign(&alpha); + // expected_a.add_assign(&u_i[0]); // a_0 = 1 + // expected_a.add_assign(&u_i[1]); // a_1 = 1 + // expected_a.add_assign(&u_i[2]); // a_2 = 1 + // // a_3 = 0 + // assert_eq!(proof.a, expected_a); + // } + + // // B(x) = + // // a_0 * (0) + + // // a_1 * (0) + + // // a_2 * (56449*x^7 + 56449*x^6 + 56449*x^5 + 56449*x^4 + 56449*x^3 + 56449*x^2 + 56449*x + 56449) + + // // a_3 * (31177*x^7 + 44780*x^6 + 21752*x^5 + 42255*x^3 + 35861*x^2 + 33842*x + 48385) + // { + // // proof B = beta + B(tau) + delta * s + // let mut expected_b = delta; + // expected_b.mul_assign(&s); + // expected_b.add_assign(&beta); + // expected_b.add_assign(&v_i[0]); // a_0 = 1 + // expected_b.add_assign(&v_i[1]); // a_1 = 1 + // expected_b.add_assign(&v_i[2]); // a_2 = 1 + // // a_3 = 0 + // assert_eq!(proof.b, expected_b); + // } + + // // C(x) = + // // a_0 * (0) + + // // a_1 * (27797*x^7 + 56449*x^6 + 36716*x^5 + 8064*x^4 + 27797*x^3 + 56449*x^2 + 36716*x + 8064) + + // // a_2 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) + + // // a_3 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) + // // + // // If A * B = C at each point in the domain, then the following polynomial... + // // P(x) = A(x) * B(x) - C(x) + // // = 49752*x^14 + 13914*x^13 + 29243*x^12 + 27227*x^11 + 62362*x^10 + 35703*x^9 + 4032*x^8 + 14761*x^6 + 50599*x^5 + 35270*x^4 + 37286*x^3 + 2151*x^2 + 28810*x + 60481 + // // + // // ... should be divisible by t(x), producing the quotient polynomial: + // // h(x) = P(x) / t(x) + // // = 49752*x^6 + 13914*x^5 + 29243*x^4 + 27227*x^3 + 62362*x^2 + 35703*x + 4032 + // { + // let mut expected_c = Fr::zero(); + + // // A * s + // let mut tmp = proof.a; + // tmp.mul_assign(&s); + // expected_c.add_assign(&tmp); + + // // B * r + // let mut tmp = proof.b; + // tmp.mul_assign(&r); + // expected_c.add_assign(&tmp); + + // // delta * r * s + // let mut tmp = delta; + // tmp.mul_assign(&r); + // tmp.mul_assign(&s); + // expected_c.sub_assign(&tmp); + + // // L query answer + // // a_2 = 1, a_3 = 0 + // expected_c.add_assign(¶ms.l[0]); + + // // H query answer + // for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739].iter().enumerate() { + // let coeff = Fr::from_str(&format!("{}", coeff)).unwrap(); + + // let mut tmp = params.h[i]; + // tmp.mul_assign(&coeff); + // expected_c.add_assign(&tmp); + // } + + // assert_eq!(expected_c, proof.c); + // } + + // assert!(verify_proof( + // &pvk, + // &proof, + // &[Fr::one()] + // ).unwrap()); +} diff --git a/bellman/src/groth16/generator.rs b/bellman/src/groth16/generator.rs new file mode 100644 index 0000000..e1dfdaf --- /dev/null +++ b/bellman/src/groth16/generator.rs @@ -0,0 +1,510 @@ +use crate::log::Stopwatch; + +use rand::Rng; + +use std::sync::Arc; + +use crate::pairing::{ + Engine, + Wnaf, + CurveProjective, + CurveAffine +}; + +use crate::pairing::ff::{ + PrimeField, + Field +}; + +use super::{ + Parameters, + VerifyingKey +}; + +use crate::{ + SynthesisError, + Circuit, + ConstraintSystem, + LinearCombination, + Variable, + Index +}; + +use crate::domain::{ + EvaluationDomain, + Scalar +}; + +use crate::worker::{ + Worker +}; + +/// Generates a random common reference string for +/// a circuit. +pub fn generate_random_parameters( + circuit: C, + rng: &mut R +) -> Result, SynthesisError> + where E: Engine, C: Circuit, R: Rng +{ + let g1 = rng.gen(); + let g2 = rng.gen(); + let alpha = rng.gen(); + let beta = rng.gen(); + let gamma = rng.gen(); + let delta = rng.gen(); + let tau = rng.gen(); + + generate_parameters::( + circuit, + g1, + g2, + alpha, + beta, + gamma, + delta, + tau + ) +} + +/// This is our assembly structure that we'll use to synthesize the +/// circuit into a QAP. +struct KeypairAssembly { + num_inputs: usize, + num_aux: usize, + num_constraints: usize, + at_inputs: Vec>, + bt_inputs: Vec>, + ct_inputs: Vec>, + at_aux: Vec>, + bt_aux: Vec>, + ct_aux: Vec> +} + +impl ConstraintSystem for KeypairAssembly { + type Root = Self; + + fn alloc( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_aux; + self.num_aux += 1; + + self.at_aux.push(vec![]); + self.bt_aux.push(vec![]); + self.ct_aux.push(vec![]); + + Ok(Variable(Index::Aux(index))) + } + + fn alloc_input( + &mut self, + _: A, + _: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + // There is no assignment, so we don't even invoke the + // function for obtaining one. + + let index = self.num_inputs; + self.num_inputs += 1; + + self.at_inputs.push(vec![]); + self.bt_inputs.push(vec![]); + self.ct_inputs.push(vec![]); + + Ok(Variable(Index::Input(index))) + } + + fn enforce( + &mut self, + _: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + fn eval( + l: LinearCombination, + inputs: &mut [Vec<(E::Fr, usize)>], + aux: &mut [Vec<(E::Fr, usize)>], + this_constraint: usize + ) + { + for (index, coeff) in l.0 { + match index { + Variable(Index::Input(id)) => inputs[id].push((coeff, this_constraint)), + Variable(Index::Aux(id)) => aux[id].push((coeff, this_constraint)) + } + } + } + + eval(a(LinearCombination::zero()), &mut self.at_inputs, &mut self.at_aux, self.num_constraints); + eval(b(LinearCombination::zero()), &mut self.bt_inputs, &mut self.bt_aux, self.num_constraints); + eval(c(LinearCombination::zero()), &mut self.ct_inputs, &mut self.ct_aux, self.num_constraints); + + self.num_constraints += 1; + } + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) + { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +/// Create parameters for a circuit, given some toxic waste. +pub fn generate_parameters( + circuit: C, + g1: E::G1, + g2: E::G2, + alpha: E::Fr, + beta: E::Fr, + gamma: E::Fr, + delta: E::Fr, + tau: E::Fr +) -> Result, SynthesisError> + where E: Engine, C: Circuit +{ + let mut assembly = KeypairAssembly { + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + at_inputs: vec![], + bt_inputs: vec![], + ct_inputs: vec![], + at_aux: vec![], + bt_aux: vec![], + ct_aux: vec![] + }; + + // Allocate the "one" input variable + assembly.alloc_input(|| "", || Ok(E::Fr::one()))?; + + // Synthesize the circuit. + circuit.synthesize(&mut assembly)?; + + // Input constraints to ensure full density of IC query + // x * 0 = 0 + for i in 0..assembly.num_inputs { + assembly.enforce(|| "", + |lc| lc + Variable(Index::Input(i)), + |lc| lc, + |lc| lc, + ); + } + + elog_verbose!("Making {} powers of tau", assembly.num_constraints); + // Create bases for blind evaluation of polynomials at tau + let powers_of_tau = vec![Scalar::(E::Fr::zero()); assembly.num_constraints]; + let mut powers_of_tau = EvaluationDomain::from_coeffs(powers_of_tau)?; + + // Compute G1 window table + let mut g1_wnaf = Wnaf::new(); + let g1_wnaf = g1_wnaf.base(g1, { + // H query + (powers_of_tau.as_ref().len() - 1) + // IC/L queries + + assembly.num_inputs + assembly.num_aux + // A query + + assembly.num_inputs + assembly.num_aux + // B query + + assembly.num_inputs + assembly.num_aux + }); + + // Compute G2 window table + let mut g2_wnaf = Wnaf::new(); + let g2_wnaf = g2_wnaf.base(g2, { + // B query + assembly.num_inputs + assembly.num_aux + }); + + let gamma_inverse = gamma.inverse().ok_or(SynthesisError::UnexpectedIdentity)?; + let delta_inverse = delta.inverse().ok_or(SynthesisError::UnexpectedIdentity)?; + + let worker = Worker::new(); + + let mut h = vec![E::G1::zero(); powers_of_tau.as_ref().len() - 1]; + { + // Compute powers of tau + elog_verbose!("computing powers of tau..."); + + let stopwatch = Stopwatch::new(); + + { + let powers_of_tau = powers_of_tau.as_mut(); + worker.scope(powers_of_tau.len(), |scope, chunk| { + for (i, powers_of_tau) in powers_of_tau.chunks_mut(chunk).enumerate() + { + scope.spawn(move |_| { + let mut current_tau_power = tau.pow(&[(i*chunk) as u64]); + + for p in powers_of_tau { + p.0 = current_tau_power; + current_tau_power.mul_assign(&tau); + } + }); + } + }); + } + elog_verbose!("powers of tau stage 1 done in {} s", stopwatch.elapsed()); + + // coeff = t(x) / delta + let mut coeff = powers_of_tau.z(&tau); + coeff.mul_assign(&delta_inverse); + + elog_verbose!("computing the H query with multiple threads..."); + + let stopwatch = Stopwatch::new(); + + // Compute the H query with multiple threads + worker.scope(h.len(), |scope, chunk| { + for (h, p) in h.chunks_mut(chunk).zip(powers_of_tau.as_ref().chunks(chunk)) + { + let mut g1_wnaf = g1_wnaf.shared(); + scope.spawn(move |_| { + // Set values of the H query to g1^{(tau^i * t(tau)) / delta} + for (h, p) in h.iter_mut().zip(p.iter()) + { + // Compute final exponent + let mut exp = p.0; + exp.mul_assign(&coeff); + + // Exponentiate + *h = g1_wnaf.scalar(exp.into_repr()); + } + + // Batch normalize + E::G1::batch_normalization(h); + }); + } + }); + elog_verbose!("computing the H query done in {} s", stopwatch.elapsed()); + } + + elog_verbose!("using inverse FFT to convert powers of tau to Lagrange coefficients..."); + + let stopwatch = Stopwatch::new(); + + // Use inverse FFT to convert powers of tau to Lagrange coefficients + powers_of_tau.ifft(&worker); + let powers_of_tau = powers_of_tau.into_coeffs(); + + elog_verbose!("powers of tau stage 2 done in {} s", stopwatch.elapsed()); + let mut a = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g1 = vec![E::G1::zero(); assembly.num_inputs + assembly.num_aux]; + let mut b_g2 = vec![E::G2::zero(); assembly.num_inputs + assembly.num_aux]; + let mut ic = vec![E::G1::zero(); assembly.num_inputs]; + let mut l = vec![E::G1::zero(); assembly.num_aux]; + + elog_verbose!("evaluating polynomials..."); + let stopwatch = Stopwatch::new(); + + fn eval( + // wNAF window tables + g1_wnaf: &Wnaf>, + g2_wnaf: &Wnaf>, + + // Lagrange coefficients for tau + powers_of_tau: &[Scalar], + + // QAP polynomials + at: &[Vec<(E::Fr, usize)>], + bt: &[Vec<(E::Fr, usize)>], + ct: &[Vec<(E::Fr, usize)>], + + // Resulting evaluated QAP polynomials + a: &mut [E::G1], + b_g1: &mut [E::G1], + b_g2: &mut [E::G2], + ext: &mut [E::G1], + + // Inverse coefficient for ext elements + inv: &E::Fr, + + // Trapdoors + alpha: &E::Fr, + beta: &E::Fr, + + // Worker + worker: &Worker + ) + + { + // Sanity check + assert_eq!(a.len(), at.len()); + assert_eq!(a.len(), bt.len()); + assert_eq!(a.len(), ct.len()); + assert_eq!(a.len(), b_g1.len()); + assert_eq!(a.len(), b_g2.len()); + assert_eq!(a.len(), ext.len()); + + // Evaluate polynomials in multiple threads + worker.scope(a.len(), |scope, chunk| { + for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.chunks_mut(chunk) + .zip(b_g1.chunks_mut(chunk)) + .zip(b_g2.chunks_mut(chunk)) + .zip(ext.chunks_mut(chunk)) + .zip(at.chunks(chunk)) + .zip(bt.chunks(chunk)) + .zip(ct.chunks(chunk)) + { + let mut g1_wnaf = g1_wnaf.shared(); + let mut g2_wnaf = g2_wnaf.shared(); + + scope.spawn(move |_| { + for ((((((a, b_g1), b_g2), ext), at), bt), ct) in a.iter_mut() + .zip(b_g1.iter_mut()) + .zip(b_g2.iter_mut()) + .zip(ext.iter_mut()) + .zip(at.iter()) + .zip(bt.iter()) + .zip(ct.iter()) + { + fn eval_at_tau( + powers_of_tau: &[Scalar], + p: &[(E::Fr, usize)] + ) -> E::Fr + { + let mut acc = E::Fr::zero(); + + for &(ref coeff, index) in p { + let mut n = powers_of_tau[index].0; + n.mul_assign(coeff); + acc.add_assign(&n); + } + + acc + } + + // Evaluate QAP polynomials at tau + let mut at = eval_at_tau(powers_of_tau, at); + let mut bt = eval_at_tau(powers_of_tau, bt); + let ct = eval_at_tau(powers_of_tau, ct); + + // Compute A query (in G1) + if !at.is_zero() { + *a = g1_wnaf.scalar(at.into_repr()); + } + + // Compute B query (in G1/G2) + if !bt.is_zero() { + let bt_repr = bt.into_repr(); + *b_g1 = g1_wnaf.scalar(bt_repr); + *b_g2 = g2_wnaf.scalar(bt_repr); + } + + at.mul_assign(&beta); + bt.mul_assign(&alpha); + + let mut e = at; + e.add_assign(&bt); + e.add_assign(&ct); + e.mul_assign(inv); + + *ext = g1_wnaf.scalar(e.into_repr()); + } + + // Batch normalize + E::G1::batch_normalization(a); + E::G1::batch_normalization(b_g1); + E::G2::batch_normalization(b_g2); + E::G1::batch_normalization(ext); + }); + }; + }); + } + + // Evaluate for inputs. + eval( + &g1_wnaf, + &g2_wnaf, + &powers_of_tau, + &assembly.at_inputs, + &assembly.bt_inputs, + &assembly.ct_inputs, + &mut a[0..assembly.num_inputs], + &mut b_g1[0..assembly.num_inputs], + &mut b_g2[0..assembly.num_inputs], + &mut ic, + &gamma_inverse, + &alpha, + &beta, + &worker + ); + + // Evaluate for auxillary variables. + eval( + &g1_wnaf, + &g2_wnaf, + &powers_of_tau, + &assembly.at_aux, + &assembly.bt_aux, + &assembly.ct_aux, + &mut a[assembly.num_inputs..], + &mut b_g1[assembly.num_inputs..], + &mut b_g2[assembly.num_inputs..], + &mut l, + &delta_inverse, + &alpha, + &beta, + &worker + ); + + elog_verbose!("evaluating polynomials done in {} s", stopwatch.elapsed()); + + // Don't allow any elements be unconstrained, so that + // the L query is always fully dense. + for e in l.iter() { + if e.is_zero() { + return Err(SynthesisError::UnconstrainedVariable); + } + } + + let g1 = g1.into_affine(); + let g2 = g2.into_affine(); + + let vk = VerifyingKey:: { + alpha_g1: g1.mul(alpha).into_affine(), + beta_g1: g1.mul(beta).into_affine(), + beta_g2: g2.mul(beta).into_affine(), + gamma_g2: g2.mul(gamma).into_affine(), + delta_g1: g1.mul(delta).into_affine(), + delta_g2: g2.mul(delta).into_affine(), + ic: ic.into_iter().map(|e| e.into_affine()).collect() + }; + + log!("Has generated {} points", a.len()); + + Ok(Parameters { + vk: vk, + h: Arc::new(h.into_iter().map(|e| e.into_affine()).collect()), + l: Arc::new(l.into_iter().map(|e| e.into_affine()).collect()), + + // Filter points at infinity away from A/B queries + a: Arc::new(a.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g1: Arc::new(b_g1.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()), + b_g2: Arc::new(b_g2.into_iter().filter(|e| !e.is_zero()).map(|e| e.into_affine()).collect()) + }) +} diff --git a/bellman/src/groth16/mod.rs b/bellman/src/groth16/mod.rs new file mode 100644 index 0000000..7abdc24 --- /dev/null +++ b/bellman/src/groth16/mod.rs @@ -0,0 +1,576 @@ +use crate::pairing::{ + Engine, + CurveAffine, + EncodedPoint +}; + +use crate::{ + SynthesisError +}; + +use crate::source::SourceBuilder; +use std::io::{self, Read, Write}; +use std::sync::Arc; +use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; + +#[cfg(test)] +mod tests; + +mod generator; +mod prover; +mod verifier; + +pub use self::generator::*; +pub use self::prover::*; +pub use self::verifier::*; + +#[derive(Debug, Clone)] +pub struct Proof { + pub a: E::G1Affine, + pub b: E::G2Affine, + pub c: E::G1Affine +} + +impl PartialEq for Proof { + fn eq(&self, other: &Self) -> bool { + self.a == other.a && + self.b == other.b && + self.c == other.c + } +} + +impl Proof { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.a.into_compressed().as_ref())?; + writer.write_all(self.b.into_compressed().as_ref())?; + writer.write_all(self.c.into_compressed().as_ref())?; + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = ::Compressed::empty(); + let mut g2_repr = ::Compressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let a = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g2_repr.as_mut())?; + let b = g2_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g1_repr.as_mut())?; + let c = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + Ok(Proof { + a: a, + b: b, + c: c + }) + } +} + +#[derive(Clone)] +pub struct VerifyingKey { + // alpha in g1 for verifying and for creating A/C elements of + // proof. Never the point at infinity. + pub alpha_g1: E::G1Affine, + + // beta in g1 and g2 for verifying and for creating B/C elements + // of proof. Never the point at infinity. + pub beta_g1: E::G1Affine, + pub beta_g2: E::G2Affine, + + // gamma in g2 for verifying. Never the point at infinity. + pub gamma_g2: E::G2Affine, + + // delta in g1/g2 for verifying and proving, essentially the magic + // trapdoor that forces the prover to evaluate the C element of the + // proof with only components from the CRS. Never the point at + // infinity. + pub delta_g1: E::G1Affine, + pub delta_g2: E::G2Affine, + + // Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / gamma + // for all public inputs. Because all public inputs have a dummy constraint, + // this is the same size as the number of inputs, and never contains points + // at infinity. + pub ic: Vec +} + +impl PartialEq for VerifyingKey { + fn eq(&self, other: &Self) -> bool { + self.alpha_g1 == other.alpha_g1 && + self.beta_g1 == other.beta_g1 && + self.beta_g2 == other.beta_g2 && + self.gamma_g2 == other.gamma_g2 && + self.delta_g1 == other.delta_g1 && + self.delta_g2 == other.delta_g2 && + self.ic == other.ic + } +} + +impl VerifyingKey { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.alpha_g1.into_uncompressed().as_ref())?; + writer.write_all(self.beta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.beta_g2.into_uncompressed().as_ref())?; + writer.write_all(self.gamma_g2.into_uncompressed().as_ref())?; + writer.write_all(self.delta_g1.into_uncompressed().as_ref())?; + writer.write_all(self.delta_g2.into_uncompressed().as_ref())?; + writer.write_u32::(self.ic.len() as u32)?; + for ic in &self.ic { + writer.write_all(ic.into_uncompressed().as_ref())?; + } + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = ::Uncompressed::empty(); + let mut g2_repr = ::Uncompressed::empty(); + + reader.read_exact(g1_repr.as_mut())?; + let alpha_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g1_repr.as_mut())?; + let beta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let beta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let gamma_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g1_repr.as_mut())?; + let delta_g1 = g1_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let delta_g2 = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let ic_len = reader.read_u32::()? as usize; + + let mut ic = vec![]; + + for _ in 0..ic_len { + reader.read_exact(g1_repr.as_mut())?; + let g1 = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + ic.push(g1); + } + + Ok(VerifyingKey { + alpha_g1: alpha_g1, + beta_g1: beta_g1, + beta_g2: beta_g2, + gamma_g2: gamma_g2, + delta_g1: delta_g1, + delta_g2: delta_g2, + ic: ic + }) + } +} + +#[derive(Clone)] +pub struct Parameters { + pub vk: VerifyingKey, + + // Elements of the form ((tau^i * t(tau)) / delta) for i between 0 and + // m-2 inclusive. Never contains points at infinity. + pub h: Arc>, + + // Elements of the form (beta * u_i(tau) + alpha v_i(tau) + w_i(tau)) / delta + // for all auxillary inputs. Variables can never be unconstrained, so this + // never contains points at infinity. + pub l: Arc>, + + // QAP "A" polynomials evaluated at tau in the Lagrange basis. Never contains + // points at infinity: polynomials that evaluate to zero are omitted from + // the CRS and the prover can deterministically skip their evaluation. + pub a: Arc>, + + // QAP "B" polynomials evaluated at tau in the Lagrange basis. Needed in + // G1 and G2 for C/B queries, respectively. Never contains points at + // infinity for the same reason as the "A" polynomials. + pub b_g1: Arc>, + pub b_g2: Arc> +} + +impl PartialEq for Parameters { + fn eq(&self, other: &Self) -> bool { + self.vk == other.vk && + self.h == other.h && + self.l == other.l && + self.a == other.a && + self.b_g1 == other.b_g1 && + self.b_g2 == other.b_g2 + } +} + +impl Parameters { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + self.vk.write(&mut writer)?; + + writer.write_u32::(self.h.len() as u32)?; + for g in &self.h[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.l.len() as u32)?; + for g in &self.l[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.a.len() as u32)?; + for g in &self.a[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.b_g1.len() as u32)?; + for g in &self.b_g1[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + writer.write_u32::(self.b_g2.len() as u32)?; + for g in &self.b_g2[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + Ok(()) + } + + pub fn read( + mut reader: R, + checked: bool + ) -> io::Result + { + let read_g1 = |reader: &mut R| -> io::Result { + let mut repr = ::Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + if checked { + repr + .into_affine() + } else { + repr + .into_affine_unchecked() + } + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let read_g2 = |reader: &mut R| -> io::Result { + let mut repr = ::Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + if checked { + repr + .into_affine() + } else { + repr + .into_affine_unchecked() + } + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let vk = VerifyingKey::::read(&mut reader)?; + + let mut h = vec![]; + let mut l = vec![]; + let mut a = vec![]; + let mut b_g1 = vec![]; + let mut b_g2 = vec![]; + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + h.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + l.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + a.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + b_g1.push(read_g1(&mut reader)?); + } + } + + { + let len = reader.read_u32::()? as usize; + for _ in 0..len { + b_g2.push(read_g2(&mut reader)?); + } + } + + Ok(Parameters { + vk: vk, + h: Arc::new(h), + l: Arc::new(l), + a: Arc::new(a), + b_g1: Arc::new(b_g1), + b_g2: Arc::new(b_g2) + }) + } +} + +pub struct PreparedVerifyingKey { + /// Pairing result of alpha*beta + alpha_g1_beta_g2: E::Fqk, + /// -gamma in G2 + neg_gamma_g2: ::Prepared, + /// -delta in G2 + neg_delta_g2: ::Prepared, + /// Copy of IC from `VerifiyingKey`. + ic: Vec +} + +pub trait ParameterSource { + type G1Builder: SourceBuilder; + type G2Builder: SourceBuilder; + + fn get_vk( + &mut self, + num_ic: usize + ) -> Result, SynthesisError>; + fn get_h( + &mut self, + num_h: usize + ) -> Result; + fn get_l( + &mut self, + num_l: usize + ) -> Result; + fn get_a( + &mut self, + num_inputs: usize, + num_aux: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>; + fn get_b_g1( + &mut self, + num_inputs: usize, + num_aux: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError>; + fn get_b_g2( + &mut self, + num_inputs: usize, + num_aux: usize + ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError>; +} + +impl<'a, E: Engine> ParameterSource for &'a Parameters { + type G1Builder = (Arc>, usize); + type G2Builder = (Arc>, usize); + + fn get_vk( + &mut self, + _: usize + ) -> Result, SynthesisError> + { + Ok(self.vk.clone()) + } + + fn get_h( + &mut self, + _: usize + ) -> Result + { + Ok((self.h.clone(), 0)) + } + + fn get_l( + &mut self, + _: usize + ) -> Result + { + Ok((self.l.clone(), 0)) + } + + fn get_a( + &mut self, + num_inputs: usize, + _: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> + { + Ok(((self.a.clone(), 0), (self.a.clone(), num_inputs))) + } + + fn get_b_g1( + &mut self, + num_inputs: usize, + _: usize + ) -> Result<(Self::G1Builder, Self::G1Builder), SynthesisError> + { + Ok(((self.b_g1.clone(), 0), (self.b_g1.clone(), num_inputs))) + } + + fn get_b_g2( + &mut self, + num_inputs: usize, + _: usize + ) -> Result<(Self::G2Builder, Self::G2Builder), SynthesisError> + { + Ok(((self.b_g2.clone(), 0), (self.b_g2.clone(), num_inputs))) + } +} + +#[cfg(test)] +mod test_with_bls12_381 { + use super::*; + use crate::{Circuit, SynthesisError, ConstraintSystem}; + + use rand::{Rand, thread_rng}; + use crate::pairing::ff::{Field}; + use crate::pairing::bls12_381::{Bls12, Fr}; + + #[test] + fn serialization() { + struct MySillyCircuit { + a: Option, + b: Option + } + + impl Circuit for MySillyCircuit { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.alloc_input(|| "c", || { + let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + a.mul_assign(&b); + Ok(a) + })?; + + cs.enforce( + || "a*b=c", + |lc| lc + a, + |lc| lc + b, + |lc| lc + c + ); + + Ok(()) + } + } + + let rng = &mut thread_rng(); + + let params = generate_random_parameters::( + MySillyCircuit { a: None, b: None }, + rng + ).unwrap(); + + { + let mut v = vec![]; + + params.write(&mut v).unwrap(); + assert_eq!(v.len(), 2136); + + let de_params = Parameters::read(&v[..], true).unwrap(); + assert!(params == de_params); + + let de_params = Parameters::read(&v[..], false).unwrap(); + assert!(params == de_params); + } + + let pvk = prepare_verifying_key::(¶ms.vk); + + for _ in 0..100 { + let a = Fr::rand(rng); + let b = Fr::rand(rng); + let mut c = a; + c.mul_assign(&b); + + let proof = create_random_proof( + MySillyCircuit { + a: Some(a), + b: Some(b) + }, + ¶ms, + rng + ).unwrap(); + + let mut v = vec![]; + proof.write(&mut v).unwrap(); + + assert_eq!(v.len(), 192); + + let de_proof = Proof::read(&v[..]).unwrap(); + assert!(proof == de_proof); + + assert!(verify_proof(&pvk, &proof, &[c]).unwrap()); + assert!(!verify_proof(&pvk, &proof, &[a]).unwrap()); + } + } +} \ No newline at end of file diff --git a/bellman/src/groth16/prover.rs b/bellman/src/groth16/prover.rs new file mode 100644 index 0000000..e46f4d4 --- /dev/null +++ b/bellman/src/groth16/prover.rs @@ -0,0 +1,463 @@ +use crate::log::Stopwatch; + +use rand::Rng; + +use std::sync::Arc; + +use futures::Future; + +use crate::pairing::{ + Engine, + CurveProjective, + CurveAffine +}; + +use crate::pairing::ff::{ + PrimeField, + Field +}; + +use super::{ + ParameterSource, + Proof +}; + +use crate::{ + SynthesisError, + Circuit, + ConstraintSystem, + LinearCombination, + Variable, + Index +}; + +use crate::domain::{ + EvaluationDomain, + Scalar +}; + +use crate::source::{ + DensityTracker, + FullDensity +}; + +use crate::multiexp::*; + +use crate::worker::{ + Worker +}; + +fn eval( + lc: &LinearCombination, + mut input_density: Option<&mut DensityTracker>, + mut aux_density: Option<&mut DensityTracker>, + input_assignment: &[E::Fr], + aux_assignment: &[E::Fr] +) -> E::Fr +{ + let mut acc = E::Fr::zero(); + + for &(index, coeff) in lc.0.iter() { + let mut tmp; + + match index { + Variable(Index::Input(i)) => { + tmp = input_assignment[i]; + if let Some(ref mut v) = input_density { + v.inc(i); + } + }, + Variable(Index::Aux(i)) => { + tmp = aux_assignment[i]; + if let Some(ref mut v) = aux_density { + v.inc(i); + } + } + } + + if coeff == E::Fr::one() { + acc.add_assign(&tmp); + } else { + tmp.mul_assign(&coeff); + acc.add_assign(&tmp); + } + } + + acc +} + +pub(crate) fn field_elements_into_representations( + worker: &Worker, + scalars: Vec +) -> Result::Repr>, SynthesisError> +{ + let mut representations = vec![::Repr::default(); scalars.len()]; + worker.scope(scalars.len(), |scope, chunk| { + for (scalar, repr) in scalars.chunks(chunk) + .zip(representations.chunks_mut(chunk)) { + scope.spawn(move |_| { + for (scalar, repr) in scalar.iter() + .zip(repr.iter_mut()) { + *repr = scalar.into_repr(); + } + }); + } + }); + + Ok(representations) +} + +pub(crate) fn scalars_into_representations( + worker: &Worker, + scalars: Vec> +) -> Result::Repr>, SynthesisError> +{ + let mut representations = vec![::Repr::default(); scalars.len()]; + worker.scope(scalars.len(), |scope, chunk| { + for (scalar, repr) in scalars.chunks(chunk) + .zip(representations.chunks_mut(chunk)) { + scope.spawn(move |_| { + for (scalar, repr) in scalar.iter() + .zip(repr.iter_mut()) { + *repr = scalar.0.into_repr(); + } + }); + } + }); + + Ok(representations) +} + +// This is a proving assignment with densities precalculated +pub struct PreparedProver{ + assignment: ProvingAssignment, +} + +#[derive(Clone)] +struct ProvingAssignment { + // Density of queries + a_aux_density: DensityTracker, + b_input_density: DensityTracker, + b_aux_density: DensityTracker, + + // Evaluations of A, B, C polynomials + a: Vec>, + b: Vec>, + c: Vec>, + + // Assignments of variables + input_assignment: Vec, + aux_assignment: Vec +} + +pub fn prepare_prover( + circuit: C, +) -> Result, SynthesisError> + where E: Engine, C: Circuit +{ + let mut prover = ProvingAssignment { + a_aux_density: DensityTracker::new(), + b_input_density: DensityTracker::new(), + b_aux_density: DensityTracker::new(), + a: vec![], + b: vec![], + c: vec![], + input_assignment: vec![], + aux_assignment: vec![] + }; + + prover.alloc_input(|| "", || Ok(E::Fr::one()))?; + + circuit.synthesize(&mut prover)?; + + for i in 0..prover.input_assignment.len() { + prover.enforce(|| "", + |lc| lc + Variable(Index::Input(i)), + |lc| lc, + |lc| lc, + ); + } + + let prepared = PreparedProver { + assignment: prover + }; + + return Ok(prepared) +} + +impl PreparedProver { + pub fn create_random_proof>( + self, + params: P, + rng: &mut R + ) -> Result, SynthesisError> + where R: Rng + { + let r = rng.gen(); + let s = rng.gen(); + + self.create_proof(params, r, s) + } + + pub fn create_proof>( + self, + mut params: P, + r: E::Fr, + s: E::Fr + ) -> Result, SynthesisError> + { + let prover = self.assignment; + let worker = Worker::new(); + + let vk = params.get_vk(prover.input_assignment.len())?; + + let stopwatch = Stopwatch::new(); + + let h = { + let mut a = EvaluationDomain::from_coeffs(prover.a)?; + let mut b = EvaluationDomain::from_coeffs(prover.b)?; + let mut c = EvaluationDomain::from_coeffs(prover.c)?; + elog_verbose!("H query domain size is {}", a.as_ref().len()); + + // here a coset is a domain where denominator (z) does not vanish + // inverse FFT is an interpolation + a.ifft(&worker); + // evaluate in coset + a.coset_fft(&worker); + // same is for B and C + b.ifft(&worker); + b.coset_fft(&worker); + c.ifft(&worker); + c.coset_fft(&worker); + + // do A*B-C in coset + a.mul_assign(&worker, &b); + drop(b); + a.sub_assign(&worker, &c); + drop(c); + // z does not vanish in coset, so we divide by non-zero + a.divide_by_z_on_coset(&worker); + // interpolate back in coset + a.icoset_fft(&worker); + let mut a = a.into_coeffs(); + let a_len = a.len() - 1; + a.truncate(a_len); + // TODO: parallelize if it's even helpful + // TODO: in large settings it may worth to parallelize + let a = Arc::new(scalars_into_representations::(&worker, a)?); + // let a = Arc::new(a.into_iter().map(|s| s.0.into_repr()).collect::>()); + + multiexp(&worker, params.get_h(a.len())?, FullDensity, a) + }; + + elog_verbose!("{} seconds for prover for H evaluation (mostly FFT)", stopwatch.elapsed()); + + let stopwatch = Stopwatch::new(); + + // TODO: Check that difference in operations for different chunks is small + + let input_len = prover.input_assignment.len(); + let aux_len = prover.aux_assignment.len(); + + let input_assignment = Arc::new(field_elements_into_representations::(&worker, prover.input_assignment)?); + let aux_assignment = Arc::new(field_elements_into_representations::(&worker, prover.aux_assignment)?); + + // TODO: parallelize if it's even helpful + // TODO: in large settings it may worth to parallelize + // let input_assignment = Arc::new(prover.input_assignment.into_iter().map(|s| s.into_repr()).collect::>()); + // let aux_assignment = Arc::new(prover.aux_assignment.into_iter().map(|s| s.into_repr()).collect::>()); + + // let input_len = input_assignment.len(); + // let aux_len = aux_assignment.len(); + elog_verbose!("H query is dense in G1,\nOther queries are {} elements in G1 and {} elements in G2", + 2*(input_len + aux_len) + aux_len, input_len + aux_len); + + // Run a dedicated process for dense vector + let l = multiexp(&worker, params.get_l(aux_assignment.len())?, FullDensity, aux_assignment.clone()); + + let a_aux_density_total = prover.a_aux_density.get_total_density(); + + let (a_inputs_source, a_aux_source) = params.get_a(input_assignment.len(), a_aux_density_total)?; + + let a_inputs = multiexp(&worker, a_inputs_source, FullDensity, input_assignment.clone()); + let a_aux = multiexp(&worker, a_aux_source, Arc::new(prover.a_aux_density), aux_assignment.clone()); + + let b_input_density = Arc::new(prover.b_input_density); + let b_input_density_total = b_input_density.get_total_density(); + let b_aux_density = Arc::new(prover.b_aux_density); + let b_aux_density_total = b_aux_density.get_total_density(); + + let (b_g1_inputs_source, b_g1_aux_source) = params.get_b_g1(b_input_density_total, b_aux_density_total)?; + + let b_g1_inputs = multiexp(&worker, b_g1_inputs_source, b_input_density.clone(), input_assignment.clone()); + let b_g1_aux = multiexp(&worker, b_g1_aux_source, b_aux_density.clone(), aux_assignment.clone()); + + let (b_g2_inputs_source, b_g2_aux_source) = params.get_b_g2(b_input_density_total, b_aux_density_total)?; + + let b_g2_inputs = multiexp(&worker, b_g2_inputs_source, b_input_density, input_assignment); + let b_g2_aux = multiexp(&worker, b_g2_aux_source, b_aux_density, aux_assignment); + + if vk.delta_g1.is_zero() || vk.delta_g2.is_zero() { + // If this element is zero, someone is trying to perform a + // subversion-CRS attack. + return Err(SynthesisError::UnexpectedIdentity); + } + + let mut g_a = vk.delta_g1.mul(r); + g_a.add_assign_mixed(&vk.alpha_g1); + let mut g_b = vk.delta_g2.mul(s); + g_b.add_assign_mixed(&vk.beta_g2); + let mut g_c; + { + let mut rs = r; + rs.mul_assign(&s); + + g_c = vk.delta_g1.mul(rs); + g_c.add_assign(&vk.alpha_g1.mul(s)); + g_c.add_assign(&vk.beta_g1.mul(r)); + } + let mut a_answer = a_inputs.wait()?; + a_answer.add_assign(&a_aux.wait()?); + g_a.add_assign(&a_answer); + a_answer.mul_assign(s); + g_c.add_assign(&a_answer); + + let mut b1_answer = b_g1_inputs.wait()?; + b1_answer.add_assign(&b_g1_aux.wait()?); + let mut b2_answer = b_g2_inputs.wait()?; + b2_answer.add_assign(&b_g2_aux.wait()?); + + g_b.add_assign(&b2_answer); + b1_answer.mul_assign(r); + g_c.add_assign(&b1_answer); + g_c.add_assign(&h.wait()?); + g_c.add_assign(&l.wait()?); + + elog_verbose!("{} seconds for prover for point multiplication", stopwatch.elapsed()); + + Ok(Proof { + a: g_a.into_affine(), + b: g_b.into_affine(), + c: g_c.into_affine() + }) + } +} + + +impl ConstraintSystem for ProvingAssignment { + type Root = Self; + + fn alloc( + &mut self, + _: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.aux_assignment.push(f()?); + self.a_aux_density.add_element(); + self.b_aux_density.add_element(); + + Ok(Variable(Index::Aux(self.aux_assignment.len() - 1))) + } + + fn alloc_input( + &mut self, + _: A, + f: F + ) -> Result + where F: FnOnce() -> Result, A: FnOnce() -> AR, AR: Into + { + self.input_assignment.push(f()?); + self.b_input_density.add_element(); + + Ok(Variable(Index::Input(self.input_assignment.len() - 1))) + } + + fn enforce( + &mut self, + _: A, + a: LA, + b: LB, + c: LC + ) + where A: FnOnce() -> AR, AR: Into, + LA: FnOnce(LinearCombination) -> LinearCombination, + LB: FnOnce(LinearCombination) -> LinearCombination, + LC: FnOnce(LinearCombination) -> LinearCombination + { + let a = a(LinearCombination::zero()); + let b = b(LinearCombination::zero()); + let c = c(LinearCombination::zero()); + + self.a.push(Scalar(eval( + &a, + // Inputs have full density in the A query + // because there are constraints of the + // form x * 0 = 0 for each input. + None, + Some(&mut self.a_aux_density), + &self.input_assignment, + &self.aux_assignment + ))); + self.b.push(Scalar(eval( + &b, + Some(&mut self.b_input_density), + Some(&mut self.b_aux_density), + &self.input_assignment, + &self.aux_assignment + ))); + self.c.push(Scalar(eval( + &c, + // There is no C polynomial query, + // though there is an (beta)A + (alpha)B + C + // query for all aux variables. + // However, that query has full density. + None, + None, + &self.input_assignment, + &self.aux_assignment + ))); + } + + fn push_namespace(&mut self, _: N) + where NR: Into, N: FnOnce() -> NR + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) + { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +pub fn create_random_proof>( + circuit: C, + params: P, + rng: &mut R +) -> Result, SynthesisError> + where E: Engine, C: Circuit, R: Rng +{ + let r = rng.gen(); + let s = rng.gen(); + + create_proof::(circuit, params, r, s) +} + +pub fn create_proof>( + circuit: C, + params: P, + r: E::Fr, + s: E::Fr +) -> Result, SynthesisError> + where E: Engine, C: Circuit +{ + let prover = prepare_prover(circuit)?; + + prover.create_proof(params, r, s) +} diff --git a/bellman/src/groth16/tests/mod.rs b/bellman/src/groth16/tests/mod.rs new file mode 100644 index 0000000..307f813 --- /dev/null +++ b/bellman/src/groth16/tests/mod.rs @@ -0,0 +1,330 @@ +use crate::pairing::{ + Engine +}; + +use crate::pairing::ff:: { + Field, + PrimeField, +}; + +use super::super::tests::dummy_engine::*; +use super::super::tests::XORDemo; + +use std::marker::PhantomData; + +use crate::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +use super::{ + generate_parameters, + prepare_verifying_key, + create_proof, + verify_proof +}; + +#[test] +fn test_xordemo() { + let g1 = Fr::one(); + let g2 = Fr::one(); + let alpha = Fr::from_str("48577").unwrap(); + let beta = Fr::from_str("22580").unwrap(); + let gamma = Fr::from_str("53332").unwrap(); + let delta = Fr::from_str("5481").unwrap(); + let tau = Fr::from_str("3673").unwrap(); + + let params = { + let c = XORDemo:: { + a: None, + b: None, + _marker: PhantomData + }; + + generate_parameters( + c, + g1, + g2, + alpha, + beta, + gamma, + delta, + tau + ).unwrap() + }; + + // This will synthesize the constraint system: + // + // public inputs: a_0 = 1, a_1 = c + // aux inputs: a_2 = a, a_3 = b + // constraints: + // (a_0 - a_2) * (a_2) = 0 + // (a_0 - a_3) * (a_3) = 0 + // (a_2 + a_2) * (a_3) = (a_2 + a_3 - a_1) + // (a_0) * 0 = 0 + // (a_1) * 0 = 0 + + // The evaluation domain is 8. The H query should + // have 7 elements (it's a quotient polynomial) + assert_eq!(7, params.h.len()); + + let mut root_of_unity = Fr::root_of_unity(); + + // We expect this to be a 2^10 root of unity + assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 10])); + + // Let's turn it into a 2^3 root of unity. + root_of_unity = root_of_unity.pow(&[1 << 7]); + assert_eq!(Fr::one(), root_of_unity.pow(&[1 << 3])); + assert_eq!(Fr::from_str("20201").unwrap(), root_of_unity); + + // Let's compute all the points in our evaluation domain. + let mut points = Vec::with_capacity(8); + for i in 0..8 { + points.push(root_of_unity.pow(&[i])); + } + + // Let's compute t(tau) = (tau - p_0)(tau - p_1)... + // = tau^8 - 1 + let mut t_at_tau = tau.pow(&[8]); + t_at_tau.sub_assign(&Fr::one()); + { + let mut tmp = Fr::one(); + for p in &points { + let mut term = tau; + term.sub_assign(p); + tmp.mul_assign(&term); + } + assert_eq!(tmp, t_at_tau); + } + + // We expect our H query to be 7 elements of the form... + // {tau^i t(tau) / delta} + let delta_inverse = delta.inverse().unwrap(); + let gamma_inverse = gamma.inverse().unwrap(); + { + let mut coeff = delta_inverse; + coeff.mul_assign(&t_at_tau); + + let mut cur = Fr::one(); + for h in params.h.iter() { + let mut tmp = cur; + tmp.mul_assign(&coeff); + + assert_eq!(*h, tmp); + + cur.mul_assign(&tau); + } + } + + // The density of the IC query is 2 (2 inputs) + assert_eq!(2, params.vk.ic.len()); + + // The density of the L query is 2 (2 aux variables) + assert_eq!(2, params.l.len()); + + // The density of the A query is 4 (each variable is in at least one A term) + assert_eq!(4, params.a.len()); + + // The density of the B query is 2 (two variables are in at least one B term) + assert_eq!(2, params.b_g1.len()); + assert_eq!(2, params.b_g2.len()); + + /* + Lagrange interpolation polynomials in our evaluation domain: + + ,-------------------------------. ,-------------------------------. ,-------------------------------. + | A TERM | | B TERM | | C TERM | + `-------------------------------. `-------------------------------' `-------------------------------' + | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | | a_0 | a_1 | a_2 | a_3 | + | 1 | 0 | 64512 | 0 | | 0 | 0 | 1 | 0 | | 0 | 0 | 0 | 0 | + | 1 | 0 | 0 | 64512 | | 0 | 0 | 0 | 1 | | 0 | 0 | 0 | 0 | + | 0 | 0 | 2 | 0 | | 0 | 0 | 0 | 1 | | 0 | 64512 | 1 | 1 | + | 1 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | + | 0 | 1 | 0 | 0 | | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | + `-------'-------'-------'-------' `-------'-------'-------'-------' `-------'-------'-------'-------' + + Example for u_0: + + sage: r = 64513 + sage: Fr = GF(r) + sage: omega = (Fr(5)^63)^(2^7) + sage: tau = Fr(3673) + sage: R. = PolynomialRing(Fr, 'x') + sage: def eval(tau, c0, c1, c2, c3, c4): + ....: p = R.lagrange_polynomial([(omega^0, c0), (omega^1, c1), (omega^2, c2), (omega^3, c3), (omega^4, c4), (omega^5, 0), (omega^6, 0), (omega^7, 0)]) + ....: return p.substitute(tau) + sage: eval(tau, 1, 1, 0, 1, 0) + 59158 + */ + + let u_i = [59158, 48317, 21767, 10402].iter().map(|e| { + Fr::from_str(&format!("{}", e)).unwrap() + }).collect::>(); + let v_i = [0, 0, 60619, 30791].iter().map(|e| { + Fr::from_str(&format!("{}", e)).unwrap() + }).collect::>(); + let w_i = [0, 23320, 41193, 41193].iter().map(|e| { + Fr::from_str(&format!("{}", e)).unwrap() + }).collect::>(); + + for (u, a) in u_i.iter() + .zip(¶ms.a[..]) + { + assert_eq!(u, a); + } + + for (v, b) in v_i.iter() + .filter(|&&e| e != Fr::zero()) + .zip(¶ms.b_g1[..]) + { + assert_eq!(v, b); + } + + for (v, b) in v_i.iter() + .filter(|&&e| e != Fr::zero()) + .zip(¶ms.b_g2[..]) + { + assert_eq!(v, b); + } + + for i in 0..4 { + let mut tmp1 = beta; + tmp1.mul_assign(&u_i[i]); + + let mut tmp2 = alpha; + tmp2.mul_assign(&v_i[i]); + + tmp1.add_assign(&tmp2); + tmp1.add_assign(&w_i[i]); + + if i < 2 { + // Check the correctness of the IC query elements + tmp1.mul_assign(&gamma_inverse); + + assert_eq!(tmp1, params.vk.ic[i]); + } else { + // Check the correctness of the L query elements + tmp1.mul_assign(&delta_inverse); + + assert_eq!(tmp1, params.l[i - 2]); + } + } + + // Check consistency of the other elements + assert_eq!(alpha, params.vk.alpha_g1); + assert_eq!(beta, params.vk.beta_g1); + assert_eq!(beta, params.vk.beta_g2); + assert_eq!(gamma, params.vk.gamma_g2); + assert_eq!(delta, params.vk.delta_g1); + assert_eq!(delta, params.vk.delta_g2); + + let pvk = prepare_verifying_key(¶ms.vk); + + let r = Fr::from_str("27134").unwrap(); + let s = Fr::from_str("17146").unwrap(); + + let proof = { + let c = XORDemo { + a: Some(true), + b: Some(false), + _marker: PhantomData + }; + + create_proof( + c, + ¶ms, + r, + s + ).unwrap() + }; + + // A(x) = + // a_0 * (44865*x^7 + 56449*x^6 + 44865*x^5 + 8064*x^4 + 3520*x^3 + 56449*x^2 + 3520*x + 40321) + + // a_1 * (8064*x^7 + 56449*x^6 + 8064*x^5 + 56449*x^4 + 8064*x^3 + 56449*x^2 + 8064*x + 56449) + + // a_2 * (16983*x^7 + 24192*x^6 + 63658*x^5 + 56449*x^4 + 16983*x^3 + 24192*x^2 + 63658*x + 56449) + + // a_3 * (5539*x^7 + 27797*x^6 + 6045*x^5 + 56449*x^4 + 58974*x^3 + 36716*x^2 + 58468*x + 8064) + + { + // proof A = alpha + A(tau) + delta * r + let mut expected_a = delta; + expected_a.mul_assign(&r); + expected_a.add_assign(&alpha); + expected_a.add_assign(&u_i[0]); // a_0 = 1 + expected_a.add_assign(&u_i[1]); // a_1 = 1 + expected_a.add_assign(&u_i[2]); // a_2 = 1 + // a_3 = 0 + assert_eq!(proof.a, expected_a); + } + + // B(x) = + // a_0 * (0) + + // a_1 * (0) + + // a_2 * (56449*x^7 + 56449*x^6 + 56449*x^5 + 56449*x^4 + 56449*x^3 + 56449*x^2 + 56449*x + 56449) + + // a_3 * (31177*x^7 + 44780*x^6 + 21752*x^5 + 42255*x^3 + 35861*x^2 + 33842*x + 48385) + { + // proof B = beta + B(tau) + delta * s + let mut expected_b = delta; + expected_b.mul_assign(&s); + expected_b.add_assign(&beta); + expected_b.add_assign(&v_i[0]); // a_0 = 1 + expected_b.add_assign(&v_i[1]); // a_1 = 1 + expected_b.add_assign(&v_i[2]); // a_2 = 1 + // a_3 = 0 + assert_eq!(proof.b, expected_b); + } + + // C(x) = + // a_0 * (0) + + // a_1 * (27797*x^7 + 56449*x^6 + 36716*x^5 + 8064*x^4 + 27797*x^3 + 56449*x^2 + 36716*x + 8064) + + // a_2 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) + + // a_3 * (36716*x^7 + 8064*x^6 + 27797*x^5 + 56449*x^4 + 36716*x^3 + 8064*x^2 + 27797*x + 56449) + // + // If A * B = C at each point in the domain, then the following polynomial... + // P(x) = A(x) * B(x) - C(x) + // = 49752*x^14 + 13914*x^13 + 29243*x^12 + 27227*x^11 + 62362*x^10 + 35703*x^9 + 4032*x^8 + 14761*x^6 + 50599*x^5 + 35270*x^4 + 37286*x^3 + 2151*x^2 + 28810*x + 60481 + // + // ... should be divisible by t(x), producing the quotient polynomial: + // h(x) = P(x) / t(x) + // = 49752*x^6 + 13914*x^5 + 29243*x^4 + 27227*x^3 + 62362*x^2 + 35703*x + 4032 + { + let mut expected_c = Fr::zero(); + + // A * s + let mut tmp = proof.a; + tmp.mul_assign(&s); + expected_c.add_assign(&tmp); + + // B * r + let mut tmp = proof.b; + tmp.mul_assign(&r); + expected_c.add_assign(&tmp); + + // delta * r * s + let mut tmp = delta; + tmp.mul_assign(&r); + tmp.mul_assign(&s); + expected_c.sub_assign(&tmp); + + // L query answer + // a_2 = 1, a_3 = 0 + expected_c.add_assign(¶ms.l[0]); + + // H query answer + for (i, coeff) in [5040, 11763, 10755, 63633, 128, 9747, 8739].iter().enumerate() { + let coeff = Fr::from_str(&format!("{}", coeff)).unwrap(); + + let mut tmp = params.h[i]; + tmp.mul_assign(&coeff); + expected_c.add_assign(&tmp); + } + + assert_eq!(expected_c, proof.c); + } + + assert!(verify_proof( + &pvk, + &proof, + &[Fr::one()] + ).unwrap()); +} diff --git a/bellman/src/groth16/verifier.rs b/bellman/src/groth16/verifier.rs new file mode 100644 index 0000000..03de2b9 --- /dev/null +++ b/bellman/src/groth16/verifier.rs @@ -0,0 +1,67 @@ +use crate::pairing::{ + Engine, + CurveProjective, + CurveAffine +}; + +use crate::pairing::ff::{PrimeField}; + +use super::{ + Proof, + VerifyingKey, + PreparedVerifyingKey +}; + +use crate::{ + SynthesisError +}; + +pub fn prepare_verifying_key( + vk: &VerifyingKey +) -> PreparedVerifyingKey +{ + let mut gamma = vk.gamma_g2; + gamma.negate(); + let mut delta = vk.delta_g2; + delta.negate(); + + PreparedVerifyingKey { + alpha_g1_beta_g2: E::pairing(vk.alpha_g1, vk.beta_g2), + neg_gamma_g2: gamma.prepare(), + neg_delta_g2: delta.prepare(), + ic: vk.ic.clone() + } +} + +pub fn verify_proof<'a, E: Engine>( + pvk: &'a PreparedVerifyingKey, + proof: &Proof, + public_inputs: &[E::Fr] +) -> Result +{ + if (public_inputs.len() + 1) != pvk.ic.len() { + return Err(SynthesisError::MalformedVerifyingKey); + } + + let mut acc = pvk.ic[0].into_projective(); + + for (i, b) in public_inputs.iter().zip(pvk.ic.iter().skip(1)) { + acc.add_assign(&b.mul(i.into_repr())); + } + + // The original verification equation is: + // A * B = alpha * beta + inputs * gamma + C * delta + // ... however, we rearrange it so that it is: + // A * B - inputs * gamma - C * delta = alpha * beta + // or equivalently: + // A * B + inputs * (-gamma) + C * (-delta) = alpha * beta + // which allows us to do a single final exponentiation. + + Ok(E::final_exponentiation( + &E::miller_loop([ + (&proof.a.prepare(), &proof.b.prepare()), + (&acc.into_affine().prepare(), &pvk.neg_gamma_g2), + (&proof.c.prepare(), &pvk.neg_delta_g2) + ].into_iter()) + ).unwrap() == pvk.alpha_g1_beta_g2) +} diff --git a/bellman/src/group.rs b/bellman/src/group.rs new file mode 100644 index 0000000..8671d76 --- /dev/null +++ b/bellman/src/group.rs @@ -0,0 +1,82 @@ +use crate::pairing::{ + Engine, + CurveProjective +}; + +use crate::pairing::ff::{ + Field, + PrimeField +}; + +use super::{ + SynthesisError +}; + +pub trait Group: Sized + Copy + Clone + Send + Sync { + fn group_zero() -> Self; + fn group_mul_assign(&mut self, by: &E::Fr); + fn group_add_assign(&mut self, other: &Self); + fn group_sub_assign(&mut self, other: &Self); +} + +pub struct Point(pub G); + +impl PartialEq for Point { + fn eq(&self, other: &Point) -> bool { + self.0 == other.0 + } +} + +impl Copy for Point { } + +impl Clone for Point { + fn clone(&self) -> Point { + *self + } +} + +impl Group for Point { + fn group_zero() -> Self { + Point(G::zero()) + } + fn group_mul_assign(&mut self, by: &G::Scalar) { + self.0.mul_assign(by.into_repr()); + } + fn group_add_assign(&mut self, other: &Self) { + self.0.add_assign(&other.0); + } + fn group_sub_assign(&mut self, other: &Self) { + self.0.sub_assign(&other.0); + } +} + +pub struct Scalar(pub E::Fr); + +impl PartialEq for Scalar { + fn eq(&self, other: &Scalar) -> bool { + self.0 == other.0 + } +} + +impl Copy for Scalar { } + +impl Clone for Scalar { + fn clone(&self) -> Scalar { + *self + } +} + +impl Group for Scalar { + fn group_zero() -> Self { + Scalar(E::Fr::zero()) + } + fn group_mul_assign(&mut self, by: &E::Fr) { + self.0.mul_assign(by); + } + fn group_add_assign(&mut self, other: &Self) { + self.0.add_assign(&other.0); + } + fn group_sub_assign(&mut self, other: &Self) { + self.0.sub_assign(&other.0); + } +} \ No newline at end of file diff --git a/bellman/src/lib.rs b/bellman/src/lib.rs new file mode 100644 index 0000000..8256168 --- /dev/null +++ b/bellman/src/lib.rs @@ -0,0 +1,54 @@ +#![allow(unused_imports)] +#![allow(unused_macros)] +#[macro_use] + +extern crate cfg_if; +pub extern crate pairing; +extern crate rand; +extern crate bit_vec; +extern crate byteorder; + +#[macro_use] +mod log; + +pub mod domain; +pub mod groth16; + +#[cfg(feature = "gm17")] +pub mod gm17; +#[cfg(feature = "sonic")] +pub mod sonic; + +mod group; +mod source; +mod multiexp; + +#[cfg(test)] +mod tests; + +cfg_if! { + if #[cfg(feature = "multicore")] { + #[cfg(feature = "wasm")] + compile_error!("Multicore feature is not yet compatible with wasm target arch"); + + mod multicore; + mod worker { + pub use crate::multicore::*; + } + } else { + mod singlecore; + mod worker { + pub use crate::singlecore::*; + } + } +} + +mod cs; +pub use self::cs::*; + +use std::str::FromStr; +use std::env; + +fn verbose_flag() -> bool { + option_env!("BELLMAN_VERBOSE").unwrap_or("0") == "1" +} \ No newline at end of file diff --git a/bellman/src/log.rs b/bellman/src/log.rs new file mode 100644 index 0000000..b912a16 --- /dev/null +++ b/bellman/src/log.rs @@ -0,0 +1,70 @@ +#[allow(unused_macros)] + +cfg_if! { + if #[cfg(feature = "wasm")] { + use web_sys; + use web_sys::Performance; + + macro_rules! log { + ($($t:tt)*) => (web_sys::console::log_1(&format_args!($($t)*).to_string().into())) + } + + macro_rules! elog { + ($($t:tt)*) => (web_sys::console::log_1(&format_args!($($t)*).to_string().into())) + } + + macro_rules! log_verbose { + ($($t:tt)*) => (if $crate::verbose_flag() { web_sys::console::log_1(&format_args!($($t)*).to_string().into()) }) + } + + macro_rules! elog_verbose { + ($($t:tt)*) => (if $crate::verbose_flag() { web_sys::console::log_1(&format_args!($($t)*).to_string().into()) }) + } + + pub struct Stopwatch { + start: f64, + perf: Performance + } + + impl Stopwatch { + pub fn new() -> Stopwatch { + let perf = web_sys::window().unwrap().performance().unwrap(); + Stopwatch { start: perf.now(), perf } + } + + pub fn elapsed(&self) -> f64 { + (self.perf.now() - self.start) / 1000.0 + } + } + } else { + macro_rules! log { + ($($t:tt)*) => (println!($($t)*)) + } + + macro_rules! elog { + ($($t:tt)*) => (eprintln!($($t)*)) + } + + macro_rules! log_verbose { + ($($t:tt)*) => (if $crate::verbose_flag() { println!($($t)*) }) + } + + macro_rules! elog_verbose { + ($($t:tt)*) => (if $crate::verbose_flag() { eprintln!($($t)*) }) + } + + pub struct Stopwatch { + start: std::time::Instant + } + + impl Stopwatch { + pub fn new() -> Stopwatch { + Stopwatch { start: std::time::Instant::now() } + } + + pub fn elapsed(&self) -> f64 { + self.start.elapsed().as_millis() as f64 / 1000.0 + } + } + } +} \ No newline at end of file diff --git a/bellman/src/multicore.rs b/bellman/src/multicore.rs new file mode 100644 index 0000000..bb113c6 --- /dev/null +++ b/bellman/src/multicore.rs @@ -0,0 +1,110 @@ +//! This is an interface for dealing with the kinds of +//! parallel computations involved in bellman. It's +//! currently just a thin wrapper around CpuPool and +//! crossbeam but may be extended in the future to +//! allow for various parallelism strategies. + +extern crate num_cpus; +extern crate futures; +extern crate futures_cpupool; +extern crate crossbeam; + +use self::futures::{Future, IntoFuture, Poll}; +use self::futures_cpupool::{CpuPool, CpuFuture}; +use self::crossbeam::thread::{Scope}; + +#[derive(Clone)] +pub struct Worker { + cpus: usize, + pool: CpuPool +} + +impl Worker { + // We don't expose this outside the library so that + // all `Worker` instances have the same number of + // CPUs configured. + pub(crate) fn new_with_cpus(cpus: usize) -> Worker { + Worker { + cpus: cpus, + pool: CpuPool::new(cpus) + } + } + + pub fn new() -> Worker { + Self::new_with_cpus(num_cpus::get()) + } + + pub fn log_num_cpus(&self) -> u32 { + log2_floor(self.cpus) + } + + pub fn compute( + &self, f: F + ) -> WorkerFuture + where F: FnOnce() -> R + Send + 'static, + R: IntoFuture + 'static, + R::Future: Send + 'static, + R::Item: Send + 'static, + R::Error: Send + 'static + { + WorkerFuture { + future: self.pool.spawn_fn(f) + } + } + + pub fn scope<'a, F, R>( + &self, + elements: usize, + f: F + ) -> R + where F: FnOnce(&Scope<'a>, usize) -> R + { + let chunk_size = if elements < self.cpus { + 1 + } else { + elements / self.cpus + }; + + crossbeam::scope(|scope| { + f(scope, chunk_size) + }).expect("must run") + } +} + +pub struct WorkerFuture { + future: CpuFuture +} + +impl Future for WorkerFuture { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll + { + self.future.poll() + } +} + +fn log2_floor(num: usize) -> u32 { + assert!(num > 0); + + let mut pow = 0; + + while (1 << (pow+1)) <= num { + pow += 1; + } + + pow +} + +#[test] +fn test_log2_floor() { + assert_eq!(log2_floor(1), 0); + assert_eq!(log2_floor(2), 1); + assert_eq!(log2_floor(3), 1); + assert_eq!(log2_floor(4), 2); + assert_eq!(log2_floor(5), 2); + assert_eq!(log2_floor(6), 2); + assert_eq!(log2_floor(7), 2); + assert_eq!(log2_floor(8), 3); +} diff --git a/bellman/src/multiexp.rs b/bellman/src/multiexp.rs new file mode 100644 index 0000000..c09b4e9 --- /dev/null +++ b/bellman/src/multiexp.rs @@ -0,0 +1,619 @@ +use crate::pairing::{ + CurveAffine, + CurveProjective, + Engine +}; + +use crate::pairing::ff::{ + PrimeField, + Field, + PrimeFieldRepr, + ScalarEngine}; + +use std::sync::Arc; +use super::source::*; +use futures::{Future}; +use super::worker::Worker; + +use super::SynthesisError; + +use cfg_if; + +/// This genious piece of code works in the following way: +/// - choose `c` - the bit length of the region that one thread works on +/// - make `2^c - 1` buckets and initialize them with `G = infinity` (that's equivalent of zero) +/// - there is no bucket for "zero" cause it's not necessary +/// - go over the pairs `(base, scalar)` +/// - for each scalar calculate `scalar % 2^c` and add the base (without any multiplications!) to the +/// corresponding bucket +/// - at the end each bucket will have an accumulated value that should be multiplied by the corresponding factor +/// between `1` and `2^c - 1` to get the right value +/// - here comes the first trick - you don't need to do multiplications at all, just add all the buckets together +/// starting from the first one `(a + b + c + ...)` and than add to the first sum another sum of the form +/// `(b + c + d + ...)`, and than the third one `(c + d + ...)`, that will result in the proper prefactor infront of every +/// accumulator, without any multiplication operations at all +/// - that's of course not enough, so spawn the next thread +/// - this thread works with the same bit width `c`, but SKIPS lowers bits completely, so it actually takes values +/// in the form `(scalar >> c) % 2^c`, so works on the next region +/// - spawn more threads until you exhaust all the bit length +/// - you will get roughly `[bitlength / c] + 1` inaccumulators +/// - double the highest accumulator enough times, add to the next one, double the result, add the next accumulator, continue +/// +/// Demo why it works: +/// ``` +/// a * G + b * H = (a_2 * (2^c)^2 + a_1 * (2^c)^1 + a_0) * G + (b_2 * (2^c)^2 + b_1 * (2^c)^1 + b_0) * H +/// ``` +/// - make buckets over `0` labeled coefficients +/// - make buckets over `1` labeled coefficients +/// - make buckets over `2` labeled coefficients +/// - accumulators over each set of buckets will have an implicit factor of `(2^c)^i`, so before summing thme up +/// "higher" accumulators must be doubled `c` times +/// +#[cfg(not(feature = "nightly"))] +fn multiexp_inner( + pool: &Worker, + bases: S, + density_map: D, + exponents: Arc::Repr>>, + // exponents: Arc::Fr as PrimeField>::Repr>>, + mut skip: u32, + c: u32, + handle_trivial: bool +) -> Box::Projective, Error=SynthesisError>> + where for<'a> &'a Q: QueryDensity, + D: Send + Sync + 'static + Clone + AsRef, + G: CurveAffine, + S: SourceBuilder +{ + // Perform this region of the multiexp + let this = { + let bases = bases.clone(); + let exponents = exponents.clone(); + let density_map = density_map.clone(); + + // This is a Pippenger’s algorithm + pool.compute(move || { + // Accumulate the result + let mut acc = G::Projective::zero(); + + // Build a source for the bases + let mut bases = bases.new(); + + // Create buckets to place remainders s mod 2^c, + // it will be 2^c - 1 buckets (no bucket for zeroes) + + // Create space for the buckets + let mut buckets = vec![::Projective::zero(); (1 << c) - 1]; + + let zero = ::Fr::zero().into_repr(); + let one = ::Fr::one().into_repr(); + + // Sort the bases into buckets + for (&exp, density) in exponents.iter().zip(density_map.as_ref().iter()) { + // Go over density and exponents + if density { + if exp == zero { + bases.skip(1)?; + } else if exp == one { + if handle_trivial { + bases.add_assign_mixed(&mut acc)?; + } else { + bases.skip(1)?; + } + } else { + // Place multiplication into the bucket: Separate s * P as + // (s/2^c) * P + (s mod 2^c) P + // First multiplication is c bits less, so one can do it, + // sum results from different buckets and double it c times, + // then add with (s mod 2^c) P parts + let mut exp = exp; + exp.shr(skip); + let exp = exp.as_ref()[0] % (1 << c); + + if exp != 0 { + bases.add_assign_mixed(&mut buckets[(exp - 1) as usize])?; + } else { + bases.skip(1)?; + } + } + } + } + + // Summation by parts + // e.g. 3a + 2b + 1c = a + + // (a) + b + + // ((a) + b) + c + let mut running_sum = G::Projective::zero(); + for exp in buckets.into_iter().rev() { + running_sum.add_assign(&exp); + acc.add_assign(&running_sum); + } + + Ok(acc) + }) + }; + + skip += c; + + if skip >= ::Fr::NUM_BITS { + // There isn't another region. + Box::new(this) + } else { + // There's another region more significant. Calculate and join it with + // this region recursively. + Box::new( + this.join(multiexp_inner(pool, bases, density_map, exponents, skip, c, false)) + .map(move |(this, mut higher)| { + for _ in 0..c { + higher.double(); + } + + higher.add_assign(&this); + + higher + }) + ) + } +} + + +cfg_if! { + if #[cfg(feature = "nightly")] { + #[inline(always)] + fn multiexp_inner_impl( + pool: &Worker, + bases: S, + density_map: D, + exponents: Arc::Repr>>, + skip: u32, + c: u32, + handle_trivial: bool + ) -> Box::Projective, Error=SynthesisError>> + where for<'a> &'a Q: QueryDensity, + D: Send + Sync + 'static + Clone + AsRef, + G: CurveAffine, + S: SourceBuilder + { + multiexp_inner_with_prefetch(pool, bases, density_map, exponents, skip, c, handle_trivial) + } + } else { + #[inline(always)] + fn multiexp_inner_impl( + pool: &Worker, + bases: S, + density_map: D, + exponents: Arc::Repr>>, + skip: u32, + c: u32, + handle_trivial: bool + ) -> Box::Projective, Error=SynthesisError>> + where for<'a> &'a Q: QueryDensity, + D: Send + Sync + 'static + Clone + AsRef, + G: CurveAffine, + S: SourceBuilder + { + multiexp_inner(pool, bases, density_map, exponents, skip, c, handle_trivial) + } + } +} + + + +#[cfg(feature = "nightly")] +extern crate prefetch; + +#[cfg(feature = "nightly")] +fn multiexp_inner_with_prefetch( + pool: &Worker, + bases: S, + density_map: D, + exponents: Arc::Repr>>, + mut skip: u32, + c: u32, + handle_trivial: bool +) -> Box::Projective, Error=SynthesisError>> + where for<'a> &'a Q: QueryDensity, + D: Send + Sync + 'static + Clone + AsRef, + G: CurveAffine, + S: SourceBuilder +{ + use prefetch::prefetch::*; + // Perform this region of the multiexp + let this = { + let bases = bases.clone(); + let exponents = exponents.clone(); + let density_map = density_map.clone(); + + // This is a Pippenger’s algorithm + pool.compute(move || { + // Accumulate the result + let mut acc = G::Projective::zero(); + + // Build a source for the bases + let mut bases = bases.new(); + + // Create buckets to place remainders s mod 2^c, + // it will be 2^c - 1 buckets (no bucket for zeroes) + + // Create space for the buckets + let mut buckets = vec![::Projective::zero(); (1 << c) - 1]; + + let zero = ::Fr::zero().into_repr(); + let one = ::Fr::one().into_repr(); + let padding = Arc::new(vec![zero]); + + let mask = 1 << c; + + // Sort the bases into buckets + for ((&exp, &next_exp), density) in exponents.iter() + .zip(exponents.iter().skip(1).chain(padding.iter())) + .zip(density_map.as_ref().iter()) { + // no matter what happens - prefetch next bucket + if next_exp != zero && next_exp != one { + let mut next_exp = next_exp; + next_exp.shr(skip); + let next_exp = next_exp.as_ref()[0] % mask; + if next_exp != 0 { + let p: *const ::Projective = &buckets[(next_exp - 1) as usize]; + prefetch::(p); + } + + } + // Go over density and exponents + if density { + if exp == zero { + bases.skip(1)?; + } else if exp == one { + if handle_trivial { + bases.add_assign_mixed(&mut acc)?; + } else { + bases.skip(1)?; + } + } else { + // Place multiplication into the bucket: Separate s * P as + // (s/2^c) * P + (s mod 2^c) P + // First multiplication is c bits less, so one can do it, + // sum results from different buckets and double it c times, + // then add with (s mod 2^c) P parts + let mut exp = exp; + exp.shr(skip); + let exp = exp.as_ref()[0] % mask; + + if exp != 0 { + bases.add_assign_mixed(&mut buckets[(exp - 1) as usize])?; + } else { + bases.skip(1)?; + } + } + } + } + + // Summation by parts + // e.g. 3a + 2b + 1c = a + + // (a) + b + + // ((a) + b) + c + let mut running_sum = G::Projective::zero(); + for exp in buckets.into_iter().rev() { + running_sum.add_assign(&exp); + acc.add_assign(&running_sum); + } + + Ok(acc) + }) + }; + + skip += c; + + if skip >= ::Fr::NUM_BITS { + // There isn't another region. + Box::new(this) + } else { + // There's another region more significant. Calculate and join it with + // this region recursively. + Box::new( + this.join(multiexp_inner_with_prefetch(pool, bases, density_map, exponents, skip, c, false)) + .map(move |(this, mut higher)| { + for _ in 0..c { + higher.double(); + } + + higher.add_assign(&this); + + higher + }) + ) + } +} + +/// Perform multi-exponentiation. The caller is responsible for ensuring the +/// query size is the same as the number of exponents. +pub fn multiexp( + pool: &Worker, + bases: S, + density_map: D, + exponents: Arc::Fr as PrimeField>::Repr>> +) -> Box::Projective, Error=SynthesisError>> + where for<'a> &'a Q: QueryDensity, + D: Send + Sync + 'static + Clone + AsRef, + G: CurveAffine, + S: SourceBuilder +{ + let c = if exponents.len() < 32 { + 3u32 + } else { + (f64::from(exponents.len() as u32)).ln().ceil() as u32 + }; + + if let Some(query_size) = density_map.as_ref().get_query_size() { + // If the density map has a known query size, it should not be + // inconsistent with the number of exponents. + + assert!(query_size == exponents.len()); + } + + multiexp_inner_impl(pool, bases, density_map, exponents, 0, c, true) +} + + +/// Perform multi-exponentiation. The caller is responsible for ensuring that +/// the number of bases is the same as the number of exponents. +#[allow(dead_code)] +pub fn dense_multiexp( + pool: &Worker, + bases: & [G], + exponents: & [<::Fr as PrimeField>::Repr] +) -> Result<::Projective, SynthesisError> +{ + if exponents.len() != bases.len() { + return Err(SynthesisError::AssignmentMissing); + } + let c = if exponents.len() < 32 { + 3u32 + } else { + (f64::from(exponents.len() as u32)).ln().ceil() as u32 + }; + + dense_multiexp_inner(pool, bases, exponents, 0, c, true) +} + +fn dense_multiexp_inner( + pool: &Worker, + bases: & [G], + exponents: & [<::Fr as PrimeField>::Repr], + mut skip: u32, + c: u32, + handle_trivial: bool +) -> Result<::Projective, SynthesisError> +{ + use std::sync::{Mutex}; + // Perform this region of the multiexp. We use a different strategy - go over region in parallel, + // then over another region, etc. No Arc required + let this = { + // let mask = (1u64 << c) - 1u64; + let this_region = Mutex::new(::Projective::zero()); + let arc = Arc::new(this_region); + pool.scope(bases.len(), |scope, chunk| { + for (base, exp) in bases.chunks(chunk).zip(exponents.chunks(chunk)) { + let this_region_rwlock = arc.clone(); + // let handle = + scope.spawn(move |_| { + let mut buckets = vec![::Projective::zero(); (1 << c) - 1]; + // Accumulate the result + let mut acc = G::Projective::zero(); + let zero = ::Fr::zero().into_repr(); + let one = ::Fr::one().into_repr(); + + for (base, &exp) in base.iter().zip(exp.iter()) { + // let index = (exp.as_ref()[0] & mask) as usize; + + // if index != 0 { + // buckets[index - 1].add_assign_mixed(base); + // } + + // exp.shr(c as u32); + + if exp != zero { + if exp == one { + if handle_trivial { + acc.add_assign_mixed(base); + } + } else { + let mut exp = exp; + exp.shr(skip); + let exp = exp.as_ref()[0] % (1 << c); + if exp != 0 { + buckets[(exp - 1) as usize].add_assign_mixed(base); + } + } + } + } + + // buckets are filled with the corresponding accumulated value, now sum + let mut running_sum = G::Projective::zero(); + for exp in buckets.into_iter().rev() { + running_sum.add_assign(&exp); + acc.add_assign(&running_sum); + } + + let mut guard = match this_region_rwlock.lock() { + Ok(guard) => guard, + Err(_) => { + panic!("poisoned!"); + // poisoned.into_inner() + } + }; + + (*guard).add_assign(&acc); + }); + + } + }); + + let this_region = Arc::try_unwrap(arc).unwrap(); + let this_region = this_region.into_inner().unwrap(); + + this_region + }; + + skip += c; + + if skip >= ::Fr::NUM_BITS { + // There isn't another region, and this will be the highest region + return Ok(this); + } else { + // next region is actually higher than this one, so double it enough times + let mut next_region = dense_multiexp_inner( + pool, bases, exponents, skip, c, false).unwrap(); + for _ in 0..c { + next_region.double(); + } + + next_region.add_assign(&this); + + return Ok(next_region); + } +} + + + +#[test] +fn test_with_bls12() { + fn naive_multiexp( + bases: Arc>, + exponents: Arc::Repr>> + ) -> G::Projective + { + assert_eq!(bases.len(), exponents.len()); + + let mut acc = G::Projective::zero(); + + for (base, exp) in bases.iter().zip(exponents.iter()) { + acc.add_assign(&base.mul(*exp)); + } + + acc + } + + use rand::{self, Rand}; + use crate::pairing::bls12_381::Bls12; + + const SAMPLES: usize = 1 << 14; + + let rng = &mut rand::thread_rng(); + let v = Arc::new((0..SAMPLES).map(|_| ::Fr::rand(rng).into_repr()).collect::>()); + let g = Arc::new((0..SAMPLES).map(|_| ::G1::rand(rng).into_affine()).collect::>()); + + let naive = naive_multiexp(g.clone(), v.clone()); + + let pool = Worker::new(); + + let fast = multiexp( + &pool, + (g, 0), + FullDensity, + v + ).wait().unwrap(); + + assert_eq!(naive, fast); +} + +#[test] +fn test_speed_with_bn256() { + use rand::{self, Rand}; + use crate::pairing::bn256::Bn256; + use num_cpus; + + let cpus = num_cpus::get(); + const SAMPLES: usize = 1 << 22; + + let rng = &mut rand::thread_rng(); + let v = Arc::new((0..SAMPLES).map(|_| ::Fr::rand(rng).into_repr()).collect::>()); + let g = Arc::new((0..SAMPLES).map(|_| ::G1::rand(rng).into_affine()).collect::>()); + + let pool = Worker::new(); + + let start = std::time::Instant::now(); + + let _fast = multiexp( + &pool, + (g, 0), + FullDensity, + v + ).wait().unwrap(); + + + let duration_ns = start.elapsed().as_nanos() as f64; + println!("Elapsed {} ns for {} samples", duration_ns, SAMPLES); + let time_per_sample = duration_ns/(SAMPLES as f64); + println!("Tested on {} samples on {} CPUs with {} ns per multiplication", SAMPLES, cpus, time_per_sample); +} + + +#[test] +fn test_dense_multiexp() { + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + use crate::pairing::bn256::Bn256; + use num_cpus; + + // const SAMPLES: usize = 1 << 22; + const SAMPLES: usize = 1 << 16; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + + let v = (0..SAMPLES).map(|_| ::Fr::rand(rng).into_repr()).collect::>(); + let g = (0..SAMPLES).map(|_| ::G1::rand(rng).into_affine()).collect::>(); + + println!("Done generating test points and scalars"); + + let pool = Worker::new(); + + let start = std::time::Instant::now(); + + let dense = dense_multiexp( + &pool, &g, &v.clone()).unwrap(); + + let duration_ns = start.elapsed().as_nanos() as f64; + println!("{} ns for dense for {} samples", duration_ns, SAMPLES); + + let start = std::time::Instant::now(); + + let sparse = multiexp( + &pool, + (Arc::new(g), 0), + FullDensity, + Arc::new(v) + ).wait().unwrap(); + + let duration_ns = start.elapsed().as_nanos() as f64; + println!("{} ns for sparse for {} samples", duration_ns, SAMPLES); + + assert_eq!(dense, sparse); +} + + +#[test] +fn test_bench_sparse_multiexp() { + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + use crate::pairing::bn256::Bn256; + use num_cpus; + + const SAMPLES: usize = 1 << 22; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + + let v = (0..SAMPLES).map(|_| ::Fr::rand(rng).into_repr()).collect::>(); + let g = (0..SAMPLES).map(|_| ::G1::rand(rng).into_affine()).collect::>(); + + println!("Done generating test points and scalars"); + + let pool = Worker::new(); + let start = std::time::Instant::now(); + + let _sparse = multiexp( + &pool, + (Arc::new(g), 0), + FullDensity, + Arc::new(v) + ).wait().unwrap(); + + let duration_ns = start.elapsed().as_nanos() as f64; + println!("{} ms for sparse for {} samples", duration_ns/1000.0f64, SAMPLES); +} \ No newline at end of file diff --git a/bellman/src/singlecore.rs b/bellman/src/singlecore.rs new file mode 100644 index 0000000..816ceef --- /dev/null +++ b/bellman/src/singlecore.rs @@ -0,0 +1,93 @@ +//! This is a dummy interface to substitute multicore worker +//! in environments like WASM +extern crate futures; + +use std::marker::PhantomData; + +use self::futures::{Future, IntoFuture, Poll}; +use self::futures::future::{result, FutureResult}; + +#[derive(Clone)] +pub struct Worker { + cpus: usize, +} + +impl Worker { + // We don't expose this outside the library so that + // all `Worker` instances have the same number of + // CPUs configured. + pub(crate) fn new_with_cpus(_cpus: usize) -> Worker { + Worker { + cpus: 1, + } + } + + pub fn new() -> Worker { + Self::new_with_cpus(1) + } + + pub fn log_num_cpus(&self) -> u32 { + 0u32 + } + + pub fn compute( + &self, f: F + ) -> WorkerFuture + where F: FnOnce() -> R + Send + 'static, + R: IntoFuture + 'static, + R::Future: Send + 'static, + R::Item: Send + 'static, + R::Error: Send + 'static + { + let future = f().into_future(); + + WorkerFuture { + future: result(future.wait()) + } + } + + pub fn scope<'a, F, R>( + &self, + elements: usize, + f: F + ) -> R + where F: FnOnce(&Scope<'a>, usize) -> R + { + let chunk_size = elements; + + let scope = Scope{ + _marker: PhantomData + }; + + f(&scope, chunk_size) + } +} +#[derive(Clone)] +pub struct Scope<'a> { + _marker: PhantomData<& 'a usize> +} + +impl<'a> Scope<'a> { + pub fn spawn( + &self, + f: F + ) -> R + where F: FnOnce(&Scope<'a>) -> R + { + f(&self) + } +} + +pub struct WorkerFuture { + future: FutureResult +} + +impl Future for WorkerFuture { + type Item = T; + type Error = E; + + fn poll(&mut self) -> Poll + { + self.future.poll() + } +} \ No newline at end of file diff --git a/bellman/src/sonic/README.md b/bellman/src/sonic/README.md new file mode 100644 index 0000000..8c4cf47 --- /dev/null +++ b/bellman/src/sonic/README.md @@ -0,0 +1,28 @@ +# Description + +Initial SONIC proof system integration using the code from the [original implementation](https://github.com/zknuckles/sonic.git). It's here for experimental reasons and evaluation of the following properties: + +- How applicable is "helped" procedure for a case of Ethereum +- What is a final verification cost for "helped" and "unhelped" procedures +- Prover efficiency in both cases +- Implementation of a memory constrained prover and helper +- Smart-contract implementation of verifiers +- Code cleanup +- Migration for smart-contract compatible transcripts + +## Current state + +Beta - fast and valid, but breaking API changes are expected + +## Completed + +- Basic proof modes (helped/unhelped) +- Succinct `S` polynomial evaluation using permutation argument +- High-level API for non-succinct mode that can produce "large enough" SRS from a "global" SRS +- Proving/verifying keys that have additional information about the circuit such as number of gates, linear constraints and public inputs +- Implement non-assigning backends for faster estimation of circuit parameters in un-cached cases + +## TODO Plan +- [ ] Make caching proving/verifying key for succinct mode +- [ ] Fix high-level API for both modes +- [ ] Re-structure the package itself \ No newline at end of file diff --git a/bellman/src/sonic/cs/lc.rs b/bellman/src/sonic/cs/lc.rs new file mode 100644 index 0000000..0bb32e5 --- /dev/null +++ b/bellman/src/sonic/cs/lc.rs @@ -0,0 +1,149 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine}; +use std::ops::{Add, Sub, Neg}; + +/// This represents a linear combination of some variables, with coefficients +/// in the scalar field of a pairing-friendly elliptic curve group. +#[derive(Clone)] +pub struct LinearCombination(Vec<(Variable, Coeff)>); + +impl From for LinearCombination { + fn from(var: Variable) -> LinearCombination { + LinearCombination::::zero() + var + } +} + +impl AsRef<[(Variable, Coeff)]> for LinearCombination { + fn as_ref(&self) -> &[(Variable, Coeff)] { + &self.0 + } +} + +impl LinearCombination { + pub fn zero() -> LinearCombination { + LinearCombination(vec![]) + } +} + +impl Add<(Coeff, Variable)> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, (coeff, var): (Coeff, Variable)) -> LinearCombination { + self.0.push((var, coeff)); + + self + } +} + +impl Sub<(Coeff, Variable)> for LinearCombination { + type Output = LinearCombination; + + fn sub(self, (coeff, var): (Coeff, Variable)) -> LinearCombination { + self + (-coeff, var) + } +} + +impl Add for LinearCombination { + type Output = LinearCombination; + + fn add(self, other: Variable) -> LinearCombination { + self + (Coeff::One, other) + } +} + +impl Sub for LinearCombination { + type Output = LinearCombination; + + fn sub(self, other: Variable) -> LinearCombination { + self - (Coeff::One, other) + } +} + +impl<'a, E: Engine> Add<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn add(mut self, other: &'a LinearCombination) -> LinearCombination { + for s in &other.0 { + self = self + (s.1, s.0); + } + + self + } +} + +impl<'a, E: Engine> Sub<&'a LinearCombination> for LinearCombination { + type Output = LinearCombination; + + fn sub(mut self, other: &'a LinearCombination) -> LinearCombination { + for s in &other.0 { + self = self - (s.1, s.0); + } + + self + } +} + +#[derive(Copy, Clone, Debug)] +pub enum Variable { + A(usize), + B(usize), + C(usize), +} + +impl Variable { + pub(crate) fn get_index(&self) -> usize { + match *self { + Variable::A(index) => index, + Variable::B(index) => index, + Variable::C(index) => index, + } + } +} + +#[derive(Debug)] +pub enum Coeff { + Zero, + One, + NegativeOne, + Full(E::Fr), +} + +impl Coeff { + pub fn multiply(&self, with: &mut E::Fr) { + match self { + Coeff::Zero => { + *with = E::Fr::zero(); + }, + Coeff::One => {}, + Coeff::NegativeOne => { + with.negate(); + }, + Coeff::Full(val) => { + with.mul_assign(val); + } + } + } +} + +impl Copy for Coeff {} +impl Clone for Coeff { + fn clone(&self) -> Self { + *self + } +} + +impl Neg for Coeff { + type Output = Coeff; + + fn neg(self) -> Self { + match self { + Coeff::Zero => Coeff::Zero, + Coeff::One => Coeff::NegativeOne, + Coeff::NegativeOne => Coeff::One, + Coeff::Full(mut a) => { + a.negate(); + Coeff::Full(a) + } + } + } +} \ No newline at end of file diff --git a/bellman/src/sonic/cs/mod.rs b/bellman/src/sonic/cs/mod.rs new file mode 100644 index 0000000..d7ad271 --- /dev/null +++ b/bellman/src/sonic/cs/mod.rs @@ -0,0 +1,73 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine}; + +use crate::{SynthesisError}; +use std::marker::PhantomData; + +mod lc; +pub use self::lc::{Coeff, Variable, LinearCombination}; + +pub trait Circuit { + fn synthesize>(&self, cs: &mut CS) -> Result<(), SynthesisError>; +} + +pub trait ConstraintSystem { + const ONE: Variable; + + fn alloc(&mut self, value: F) -> Result + where + F: FnOnce() -> Result; + + fn alloc_input(&mut self, value: F) -> Result + where + F: FnOnce() -> Result; + + fn enforce_zero(&mut self, lc: LinearCombination); + + fn multiply(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError> + where + F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError>; + + // TODO: get rid of this + fn get_value(&self, _var: Variable) -> Result { + Err(()) + } +} + + + +/// This is a backend for the `SynthesisDriver` to relay information about +/// the concrete circuit. One backend might just collect basic information +/// about the circuit for verification, while another actually constructs +/// a witness. +pub trait Backend { + type LinearConstraintIndex; + + /// Get the value of a variable. Can return None if we don't know. + fn get_var(&self, _variable: Variable) -> Option { None } + + /// Set the value of a variable. Might error if this backend expects to know it. + fn set_var(&mut self, _variable: Variable, _value: F) -> Result<(), SynthesisError> + where F: FnOnce() -> Result { Ok(()) } + + /// Create a new multiplication gate. + fn new_multiplication_gate(&mut self) { } + + /// Create a new linear constraint, returning the power of Y for caching purposes. + fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex; + + /// Insert a term into a linear constraint. TODO: bad name of function + fn insert_coefficient(&mut self, _var: Variable, _coeff: Coeff, _y: &Self::LinearConstraintIndex) { } + + /// Compute a `LinearConstraintIndex` from `q`. + fn get_for_q(&self, q: usize) -> Self::LinearConstraintIndex; + + /// Mark y^{_index} as the power of y cooresponding to the public input + /// coefficient for the next public input, in the k(Y) polynomial. + fn new_k_power(&mut self, _index: usize) { } +} + +/// This is an abstraction which synthesizes circuits. +pub trait SynthesisDriver { + fn synthesize, B: Backend>(backend: B, circuit: &C) -> Result<(), SynthesisError>; +} \ No newline at end of file diff --git a/bellman/src/sonic/discussion.pdf b/bellman/src/sonic/discussion.pdf new file mode 100644 index 0000000..6db1686 Binary files /dev/null and b/bellman/src/sonic/discussion.pdf differ diff --git a/bellman/src/sonic/helped/adapted_helper.rs b/bellman/src/sonic/helped/adapted_helper.rs new file mode 100644 index 0000000..197f1b1 --- /dev/null +++ b/bellman/src/sonic/helped/adapted_helper.rs @@ -0,0 +1,33 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use rand::{Rand, Rng}; + +use super::{Proof, SxyAdvice}; +use super::batch::Batch; +use super::poly::{SxEval, SyEval}; +use super::parameters::{Parameters}; +use super::helper::{Aggregate}; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::{Circuit}; +use crate::sonic::sonic::AdaptorCircuit; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::Nonassigning; +use super::helper::create_aggregate as create_aggregate_sonic_circuit; + +pub fn create_aggregate + Clone>( + circuit: C, + inputs: &[(Proof, SxyAdvice)], + params: &Parameters, +) -> Aggregate +{ + let adapted_circuit = AdaptorCircuit(circuit); + + create_aggregate_sonic_circuit::<_, _, Nonassigning>(&adapted_circuit, inputs, params) +} diff --git a/bellman/src/sonic/helped/adapted_prover.rs b/bellman/src/sonic/helped/adapted_prover.rs new file mode 100644 index 0000000..62b6b12 --- /dev/null +++ b/bellman/src/sonic/helped/adapted_prover.rs @@ -0,0 +1,137 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use super::{Proof, SxyAdvice}; +use super::batch::Batch; +use super::poly::{SxEval, SyEval}; +use super::parameters::{Parameters}; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::{Circuit}; +use crate::sonic::sonic::AdaptorCircuit; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::Basic; +use super::prover::create_advice as create_advice_sonic_circuit; +use super::prover::create_advice_on_information_and_srs as create_advice_on_information_and_srs_sonic_circuit; +use super::prover::create_proof_on_srs as create_proof_on_srs_sonic_circuit; +use crate::sonic::sonic::CountN; + +// pub fn create_advice_on_information_and_srs + Clone, S: SynthesisDriver>( +pub fn create_advice_on_information_and_srs + Clone>( + circuit: C, + proof: &Proof, + srs: &SRS, + n: usize +) -> Result, SynthesisError> +{ + let adapted_circuit = AdaptorCircuit(circuit); + + create_advice_on_information_and_srs_sonic_circuit::<_, _, Basic>(&adapted_circuit, proof, srs, n) +} + +// pub fn create_advice + Clone, S: SynthesisDriver>( +pub fn create_advice + Clone>( + circuit: C, + proof: &Proof, + parameters: &Parameters, +) -> Result, SynthesisError> +{ + let n = parameters.vk.n; + create_advice_on_information_and_srs::(circuit, proof, ¶meters.srs, n) +} + +// pub fn create_advice_on_srs + Clone, S: SynthesisDriver>( +pub fn create_advice_on_srs + Clone>( + circuit: C, + proof: &Proof, + srs: &SRS +) -> Result, SynthesisError> +{ + use crate::sonic::sonic::Nonassigning; + + let adapted_circuit = AdaptorCircuit(circuit.clone()); + // annoying, but we need n to compute s(z, y), and this isn't + // precomputed anywhere yet + let n = { + let mut tmp = CountN::::new(); + Nonassigning::synthesize(&mut tmp, &adapted_circuit)?; + + tmp.n + }; + + create_advice_on_information_and_srs::(circuit, proof, srs, n) +} + +// pub fn create_proof + Clone, S: SynthesisDriver>( +pub fn create_proof + Clone>( + circuit: C, + parameters: &Parameters +) -> Result, SynthesisError> { + create_proof_on_srs::(circuit, ¶meters.srs) +} + +// pub fn create_proof_on_srs + Clone, S: SynthesisDriver>( +pub fn create_proof_on_srs + Clone>( + circuit: C, + srs: &SRS +) -> Result, SynthesisError> +{ + let adapted_circuit = AdaptorCircuit(circuit); + + create_proof_on_srs_sonic_circuit::<_, _, Basic>(&adapted_circuit, srs) +} + +// #[test] +// fn my_fun_circuit_test() { +// use crate::pairing::ff::PrimeField; +// use crate::pairing::bls12_381::{Bls12, Fr}; +// use super::*; +// use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination}; + +// struct MyCircuit; + +// impl Circuit for MyCircuit { +// fn synthesize>(&self, cs: &mut CS) -> Result<(), SynthesisError> { +// let (a, b, _) = cs.multiply(|| { +// Ok(( +// E::Fr::from_str("10").unwrap(), +// E::Fr::from_str("20").unwrap(), +// E::Fr::from_str("200").unwrap(), +// )) +// })?; + +// cs.enforce_zero(LinearCombination::from(a) + a - b); + +// //let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?; + +// //cs.enforce_zero(LinearCombination::from(b) - multiplier); + +// Ok(()) +// } +// } + +// let srs = SRS::::new( +// 20, +// Fr::from_str("22222").unwrap(), +// Fr::from_str("33333333").unwrap(), +// ); +// let proof = create_proof_on_srs::(&MyCircuit, &srs).unwrap(); + +// use std::time::{Instant}; +// let start = Instant::now(); +// let mut batch = MultiVerifier::::new(MyCircuit, &srs).unwrap(); + +// for _ in 0..1 { +// batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None); +// } + +// assert!(batch.check_all()); + +// let elapsed = start.elapsed(); +// println!("time to verify: {:?}", elapsed); +// } diff --git a/bellman/src/sonic/helped/adapted_verifier.rs b/bellman/src/sonic/helped/adapted_verifier.rs new file mode 100644 index 0000000..c796506 --- /dev/null +++ b/bellman/src/sonic/helped/adapted_verifier.rs @@ -0,0 +1,102 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use rand::{Rand, Rng}; + +use super::{Proof, SxyAdvice}; +use super::batch::Batch; +use super::poly::{SxEval, SyEval}; +use super::parameters::{Parameters}; +use super::helper::{Aggregate}; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::{Circuit}; +use crate::sonic::sonic::AdaptorCircuit; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::Nonassigning; +use super::verifier::verify_aggregate_on_srs as verify_aggregate_on_srs_sonic_circuit; +use super::verifier::verify_proofs_on_srs as verify_proofs_on_srs_sonic_circuit; + +pub fn verify_proofs + Clone, R: Rng>( + proofs: &[Proof], + inputs: &[Vec], + circuit: C, + rng: R, + params: &Parameters, +) -> Result +{ + let adapted_circuit = AdaptorCircuit(circuit); + + verify_proofs_on_srs_sonic_circuit::<_, _, Nonassigning, _>(proofs, inputs, adapted_circuit, rng, ¶ms.srs) +} + +/// Check multiple proofs with aggregation. Verifier's work is +/// not succint due to `S(X, Y)` evaluation +pub fn verify_aggregate + Clone, R: Rng>( + proofs: &[(Proof, SxyAdvice)], + aggregate: &Aggregate, + inputs: &[Vec], + circuit: C, + rng: R, + params: &Parameters, +) -> Result { + let adapted_circuit = AdaptorCircuit(circuit); + + verify_aggregate_on_srs_sonic_circuit::<_, _, Nonassigning, _>(proofs, aggregate, inputs, adapted_circuit, rng, ¶ms.srs) +} + + +// #[test] +// fn my_fun_circuit_test() { +// use crate::pairing::ff::PrimeField; +// use crate::pairing::bls12_381::{Bls12, Fr}; +// use super::*; +// use crate::sonic::cs::{Basic, ConstraintSystem, LinearCombination}; + +// struct MyCircuit; + +// impl Circuit for MyCircuit { +// fn synthesize>(&self, cs: &mut CS) -> Result<(), SynthesisError> { +// let (a, b, _) = cs.multiply(|| { +// Ok(( +// E::Fr::from_str("10").unwrap(), +// E::Fr::from_str("20").unwrap(), +// E::Fr::from_str("200").unwrap(), +// )) +// })?; + +// cs.enforce_zero(LinearCombination::from(a) + a - b); + +// //let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?; + +// //cs.enforce_zero(LinearCombination::from(b) - multiplier); + +// Ok(()) +// } +// } + +// let srs = SRS::::new( +// 20, +// Fr::from_str("22222").unwrap(), +// Fr::from_str("33333333").unwrap(), +// ); +// let proof = create_proof_on_srs::(&MyCircuit, &srs).unwrap(); + +// use std::time::{Instant}; +// let start = Instant::now(); +// let mut batch = MultiVerifier::::new(MyCircuit, &srs).unwrap(); + +// for _ in 0..1 { +// batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None); +// } + +// assert!(batch.check_all()); + +// let elapsed = start.elapsed(); +// println!("time to verify: {:?}", elapsed); +// } diff --git a/bellman/src/sonic/helped/batch.rs b/bellman/src/sonic/helped/batch.rs new file mode 100644 index 0000000..6160e23 --- /dev/null +++ b/bellman/src/sonic/helped/batch.rs @@ -0,0 +1,169 @@ +//! Our protocol allows the verification of multiple proofs and even +//! of individual proofs to batch the pairing operations such that +//! only a smaller, fixed number of pairings must occur for an entire +//! batch of proofs. This is possible because G2 elements are fixed +//! in our protocol and never appear in proofs; everything can be +//! combined probabilistically. +//! +//! This submodule contains the `Batch` abstraction for creating a +//! context for batch verification. + +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveAffine, CurveProjective}; + +use crate::SynthesisError; + +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::cs::{Circuit}; + +use super::parameters::VerifyingKey; + +use crate::sonic::srs::SRS; +use crate::sonic::util::multiexp; + +use std::marker::PhantomData; + +// One of the primary functions of the `Batch` abstraction is handling +// Kate commitment openings: +// +// e(P', [\alpha(x - z)] H) = e(P, H) e([-v] G, [\alpha] H) +// ==> e(P', [\alpha x] H) e([-z] P', [\alpha] H) = e(P, H) e([-v] G, [\alpha] H) +// +// Many of these can be opened simultaneously by sampling random `r` and +// accumulating... +// +// e([r] P', [\alpha x] H) +// e([-rz] P', [\alpha] H) +// e([r] P, -H) +// e([rv] G, [\alpha] H) +// +// ... and checking that the result is the identity in the target group. +pub struct Batch { + alpha_x: Vec<(E::G1Affine, E::Fr)>, + alpha_x_precomp: ::Prepared, + + alpha: Vec<(E::G1Affine, E::Fr)>, + alpha_precomp: ::Prepared, + + neg_h: Vec<(E::G1Affine, E::Fr)>, + neg_h_precomp: ::Prepared, + + neg_x_n_minus_d: Vec<(E::G1Affine, E::Fr)>, + neg_x_n_minus_d_precomp: ::Prepared, + + // The value paired with [\alpha] H, accumulated in the field + // to save group operations. + value: E::Fr, + g: E::G1Affine, +} + +impl Batch { + pub fn new(srs: &SRS, n: usize) -> Self { + Batch { + alpha_x: vec![], + alpha_x_precomp: srs.h_positive_x_alpha[1].prepare(), + + alpha: vec![], + alpha_precomp: srs.h_positive_x_alpha[0].prepare(), + + neg_h: vec![], + neg_h_precomp: { + let mut tmp = srs.h_negative_x[0]; + tmp.negate(); + tmp.prepare() + }, + + neg_x_n_minus_d: vec![], + neg_x_n_minus_d_precomp: { + let mut tmp = srs.h_negative_x[srs.d - n]; + tmp.negate(); + tmp.prepare() + }, + + value: E::Fr::zero(), + g: srs.g_positive_x[0], + } + } + + pub fn new_from_key(vk: &VerifyingKey) -> Self { + Batch { + alpha_x: vec![], + alpha_x_precomp: vk.alpha_x.prepare(), + + alpha: vec![], + alpha_precomp: vk.alpha.prepare(), + + neg_h: vec![], + neg_h_precomp: vk.neg_h.prepare(), + + neg_x_n_minus_d: vec![], + neg_x_n_minus_d_precomp: vk.neg_x_n_minus_d.prepare(), + + value: E::Fr::zero(), + g: E::G1Affine::one(), + } + } + + /// add `(r*P) to the h^(alpha*x) terms, add -(r*point)*P to h^(alpha) terms + pub fn add_opening(&mut self, p: E::G1Affine, mut r: E::Fr, point: E::Fr) { + self.alpha_x.push((p, r)); + r.mul_assign(&point); + r.negate(); + self.alpha.push((p, r)); + } + + /// add (r*P) to -h^(x) terms + pub fn add_commitment(&mut self, p: E::G1Affine, r: E::Fr) { + self.neg_h.push((p, r)); + } + + /// add (r*P) to -h^(d-n) terms + pub fn add_commitment_max_n(&mut self, p: E::G1Affine, r: E::Fr) { + self.neg_x_n_minus_d.push((p, r)); + } + + /// add (r*point) to g terms for later pairing with h^(alpha) + pub fn add_opening_value(&mut self, mut r: E::Fr, point: E::Fr) { + r.mul_assign(&point); + self.value.add_assign(&r); + } + + pub fn check_all(mut self) -> bool { + self.alpha.push((self.g, self.value)); + + let alpha_x = multiexp( + self.alpha_x.iter().map(|x| &x.0), + self.alpha_x.iter().map(|x| &x.1), + ).into_affine(); + + let alpha_x = alpha_x.prepare(); + + let alpha = multiexp( + self.alpha.iter().map(|x| &x.0), + self.alpha.iter().map(|x| &x.1), + ).into_affine(); + + let alpha = alpha.prepare(); + + let neg_h = multiexp( + self.neg_h.iter().map(|x| &x.0), + self.neg_h.iter().map(|x| &x.1), + ).into_affine(); + + let neg_h = neg_h.prepare(); + + let neg_x_n_minus_d = multiexp( + self.neg_x_n_minus_d.iter().map(|x| &x.0), + self.neg_x_n_minus_d.iter().map(|x| &x.1), + ).into_affine(); + + let neg_x_n_minus_d = neg_x_n_minus_d.prepare(); + + E::final_exponentiation(&E::miller_loop(&[ + (&alpha_x, &self.alpha_x_precomp), + (&alpha, &self.alpha_precomp), + (&neg_h, &self.neg_h_precomp), + (&neg_x_n_minus_d, &self.neg_x_n_minus_d_precomp), + ])).unwrap() == E::Fqk::one() + } +} \ No newline at end of file diff --git a/bellman/src/sonic/helped/generator.rs b/bellman/src/sonic/helped/generator.rs new file mode 100644 index 0000000..6ef9efc --- /dev/null +++ b/bellman/src/sonic/helped/generator.rs @@ -0,0 +1,635 @@ +use rand::Rng; + +use std::sync::Arc; + +use crate::pairing::{ + Engine, + Wnaf, + CurveProjective, + CurveAffine +}; + +use crate::pairing::ff::{ + PrimeField, + Field +}; + +use super::{ + Parameters, + VerifyingKey +}; + +use crate::{ + SynthesisError, + Circuit, + ConstraintSystem, + LinearCombination, + Variable, + Index +}; + +use crate::domain::{ + Scalar +}; + +use crate::multicore::{ + Worker +}; + +use std::marker::PhantomData; + +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::srs::SRS; +use crate::sonic::cs::LinearCombination as SonicLinearCombination; +use crate::sonic::cs::Circuit as SonicCircuit; +use crate::sonic::cs::ConstraintSystem as SonicConstraintSystem; +use crate::sonic::cs::Variable as SonicVariable; +use crate::sonic::cs::Coeff; +use crate::sonic::sonic::{AdaptorCircuit}; +use super::parameters::NUM_BLINDINGS; +use crate::sonic::sonic::NonassigningSynthesizer; +use crate::sonic::sonic::PermutationSynthesizer; +use crate::sonic::sonic::{Basic, Preprocess}; + +use crate::verbose_flag; + +/// Generates a random common reference string for +/// a circuit. +pub fn generate_random_parameters( + circuit: C, + rng: &mut R +) -> Result, SynthesisError> + where E: Engine, C: Circuit, R: Rng +{ + let alpha = rng.gen(); + let x = rng.gen(); + + generate_parameters::( + circuit, + alpha, + x + ) +} + +/// This is our assembly structure that we'll use to synthesize the +/// circuit into +#[derive(Clone, Debug)] +pub struct CircuitParameters { + pub num_inputs: usize, + pub num_aux: usize, + pub num_constraints: usize, + pub k_map: Vec, + pub n: usize, + pub q: usize, + _marker: PhantomData +} + +/// This is our assembly structure that we'll use to synthesize the +/// circuit into +struct GeneratorAssembly<'a, E: Engine, CS: SonicConstraintSystem + 'a> { + cs: &'a mut CS, + num_inputs: usize, + num_aux: usize, + num_constraints: usize, + _marker: PhantomData +} + +impl<'a, E: Engine, CS: SonicConstraintSystem + 'a> crate::ConstraintSystem + for GeneratorAssembly<'a, E, CS> +{ + type Root = Self; + + // this is an important change + fn one() -> crate::Variable { + crate::Variable::new_unchecked(crate::Index::Input(1)) + } + + fn alloc(&mut self, _: A, f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, + { + self.num_aux += 1; + + let var = self.cs.alloc(|| { + f().map_err(|_| crate::SynthesisError::AssignmentMissing) + }).map_err(|_| crate::SynthesisError::AssignmentMissing)?; + + Ok(match var { + SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)), + SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)), + _ => unreachable!(), + }) + } + + fn alloc_input( + &mut self, + _: A, + f: F, + ) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, + { + self.num_inputs += 1; + + let var = self.cs.alloc_input(|| { + f().map_err(|_| crate::SynthesisError::AssignmentMissing) + }).map_err(|_| crate::SynthesisError::AssignmentMissing)?; + + Ok(match var { + SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)), + SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)), + _ => unreachable!(), + }) + } + + fn enforce(&mut self, _: A, a: LA, b: LB, c: LC) + where + A: FnOnce() -> AR, + AR: Into, + LA: FnOnce(crate::LinearCombination) -> crate::LinearCombination, + LB: FnOnce(crate::LinearCombination) -> crate::LinearCombination, + LC: FnOnce(crate::LinearCombination) -> crate::LinearCombination, + { + fn convert(lc: crate::LinearCombination) -> SonicLinearCombination { + let mut ret = SonicLinearCombination::zero(); + + for &(v, coeff) in lc.as_ref().iter() { + let var = match v.get_unchecked() { + crate::Index::Input(i) => SonicVariable::A(i), + crate::Index::Aux(i) => SonicVariable::B(i), + }; + + ret = ret + (Coeff::Full(coeff), var); + } + + ret + } + + fn eval>( + lc: &SonicLinearCombination, + cs: &CS, + ) -> Option { + let mut ret = E::Fr::zero(); + + for &(v, coeff) in lc.as_ref().iter() { + let mut tmp = match cs.get_value(v) { + Ok(tmp) => tmp, + Err(_) => return None, + }; + coeff.multiply(&mut tmp); + ret.add_assign(&tmp); + } + + Some(ret) + } + + self.num_constraints += 1; + + let a_lc = convert(a(crate::LinearCombination::zero())); + let a_value = eval(&a_lc, &*self.cs); + let b_lc = convert(b(crate::LinearCombination::zero())); + let b_value = eval(&b_lc, &*self.cs); + let c_lc = convert(c(crate::LinearCombination::zero())); + let c_value = eval(&c_lc, &*self.cs); + + let (a, b, c) = self + .cs + .multiply(|| Ok((a_value.unwrap(), b_value.unwrap(), c_value.unwrap()))) + .unwrap(); + + self.cs.enforce_zero(a_lc - a); + self.cs.enforce_zero(b_lc - b); + self.cs.enforce_zero(c_lc - c); + } + + fn push_namespace(&mut self, _: N) + where + NR: Into, + N: FnOnce() -> NR, + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + + + +/// Get circuit information such as number of input, variables, +/// constraints, and the corresponding SONIC parameters +/// k_map, n, q +pub fn get_circuit_parameters( + circuit: C, +) -> Result, SynthesisError> + where E: Engine, C: Circuit + +{ + let mut preprocess = Preprocess::new(); + + let (num_inputs, num_aux, num_constraints) = { + + let mut cs: NonassigningSynthesizer> = NonassigningSynthesizer::new(&mut preprocess); + + let one = cs.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues"); + + match (one, > as SonicConstraintSystem>::ONE) { + (SonicVariable::A(1), SonicVariable::A(1)) => {}, + _ => return Err(SynthesisError::UnconstrainedVariable) + } + + let mut assembly = GeneratorAssembly::<'_, E, _> { + cs: &mut cs, + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + _marker: PhantomData + }; + + circuit.synthesize(&mut assembly)?; + + (assembly.num_inputs, assembly.num_aux, assembly.num_constraints) + }; + + Ok(CircuitParameters { + num_inputs: num_inputs, + num_aux: num_aux, + num_constraints: num_constraints, + k_map: preprocess.k_map, + n: preprocess.n, + q: preprocess.q, + _marker: PhantomData + }) +} + +/// Get circuit information such as number of input, variables, +/// constraints, and the corresponding SONIC parameters +/// k_map, n, q +pub fn get_circuit_parameters_for_succinct_sonic( + circuit: C, +) -> Result, SynthesisError> + where E: Engine, C: Circuit + +{ + let mut preprocess = Preprocess::new(); + + let (num_inputs, num_aux, num_constraints) = { + + let mut cs: PermutationSynthesizer> = PermutationSynthesizer::new(&mut preprocess); + + let one = cs.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues"); + + match (one, > as SonicConstraintSystem>::ONE) { + (SonicVariable::A(1), SonicVariable::A(1)) => {}, + _ => return Err(SynthesisError::UnconstrainedVariable) + } + + let mut assembly = GeneratorAssembly::<'_, E, _> { + cs: &mut cs, + num_inputs: 0, + num_aux: 0, + num_constraints: 0, + _marker: PhantomData + }; + + circuit.synthesize(&mut assembly)?; + + (assembly.num_inputs, assembly.num_aux, assembly.num_constraints) + }; + + Ok(CircuitParameters { + num_inputs: num_inputs, + num_aux: num_aux, + num_constraints: num_constraints, + k_map: preprocess.k_map, + n: preprocess.n, + q: preprocess.q, + _marker: PhantomData + }) +} + +pub fn generate_parameters( + circuit: C, + alpha: E::Fr, + x: E::Fr +) -> Result, SynthesisError> + where E: Engine, C: Circuit +{ + let circuit_parameters = get_circuit_parameters::(circuit)?; + let min_d = circuit_parameters.n * 4 + 2*NUM_BLINDINGS; + + let srs = generate_srs(alpha, x, min_d)?; + + let parameters = generate_parameters_on_srs_and_information::(&srs, circuit_parameters)?; + + Ok(parameters) +} + +pub fn generate_parameters_on_srs( + circuit: C, + srs: &SRS, +) -> Result, SynthesisError> + where E: Engine, C: Circuit +{ + let circuit_parameters = get_circuit_parameters::(circuit)?; + let parameters = generate_parameters_on_srs_and_information(&srs, circuit_parameters)?; + + Ok(parameters) +} + +pub fn generate_parameters_on_srs_and_information( + srs: &SRS, + information: CircuitParameters +) -> Result, SynthesisError> +{ + assert!(srs.d >= information.n * 4 + 2*NUM_BLINDINGS); + let min_d = information.n * 4 + 2*NUM_BLINDINGS; + + let trimmed_srs: SRS = SRS { + d: min_d, + g_negative_x: srs.g_negative_x[0..min_d+1].to_vec(), + g_positive_x: srs.g_positive_x[0..min_d+1].to_vec().clone(), + + h_negative_x: srs.h_negative_x[0..min_d+1].to_vec(), + h_positive_x: srs.h_positive_x[0..min_d+1].to_vec(), + + g_negative_x_alpha: srs.g_negative_x_alpha[0..min_d].to_vec(), + g_positive_x_alpha: srs.g_positive_x_alpha[0..min_d].to_vec(), + + h_negative_x_alpha: srs.h_negative_x_alpha[0..min_d+1].to_vec(), + h_positive_x_alpha: srs.h_positive_x_alpha[0..min_d+1].to_vec(), + + }; + + let vk = VerifyingKey { + alpha_x: trimmed_srs.h_positive_x_alpha[1], + + alpha: trimmed_srs.h_positive_x_alpha[0], + + neg_h: { + let mut tmp = trimmed_srs.h_negative_x[0]; + tmp.negate(); + + tmp + }, + + neg_x_n_minus_d: { + let mut tmp = trimmed_srs.h_negative_x[trimmed_srs.d - information.n]; + tmp.negate(); + + tmp + }, + + k_map: information.k_map, + n: information.n, + q: information.q + }; + + Ok(Parameters{ + vk: vk, + srs: trimmed_srs + }) +} + +pub fn generate_srs( + alpha: E::Fr, + x: E::Fr, + d: usize +) -> Result, SynthesisError> { + let verbose = verbose_flag(); + + let g1 = E::G1Affine::one().into_projective(); + let g2 = E::G2Affine::one().into_projective(); + + // Compute G1 window table + let mut g1_wnaf = Wnaf::new(); + let g1_wnaf = g1_wnaf.base(g1, 4*d); + + // Compute G2 window table + let mut g2_wnaf = Wnaf::new(); + let g2_wnaf = g2_wnaf.base(g2, 4*d); + + let x_inverse = x.inverse().ok_or(SynthesisError::UnexpectedIdentity)?; + + let worker = Worker::new(); + + let mut x_powers_positive = vec![Scalar::(E::Fr::zero()); d]; + let mut x_powers_negative = vec![Scalar::(E::Fr::zero()); d]; + { + // Compute powers of tau + if verbose {eprintln!("computing powers of x...")}; + + let start = std::time::Instant::now(); + + { + worker.scope(d, |scope, chunk| { + for (i, x_powers) in x_powers_positive.chunks_mut(chunk).enumerate() + { + scope.spawn(move |_| { + let mut current_power = x.pow(&[(i*chunk + 1) as u64]); + + for p in x_powers { + p.0 = current_power; + current_power.mul_assign(&x); + } + }); + } + }); + } + { + worker.scope(d, |scope, chunk| { + for (i, x_powers) in x_powers_negative.chunks_mut(chunk).enumerate() + { + scope.spawn(move |_| { + let mut current_power = x_inverse.pow(&[(i*chunk + 1) as u64]); + + for p in x_powers { + p.0 = current_power; + current_power.mul_assign(&x_inverse); + } + }); + } + }); + } + if verbose {eprintln!("powers of x done in {} s", start.elapsed().as_millis() as f64 / 1000.0);}; + } + + // we will later add zero powers to g_x, h_x, h_x_alpha + let mut g_negative_x = vec![E::G1::one(); d]; + let mut g_positive_x = vec![E::G1::one(); d]; + + let mut h_negative_x = vec![E::G2::one(); d]; + let mut h_positive_x = vec![E::G2::one(); d]; + + let mut g_negative_x_alpha = vec![E::G1::one(); d]; + let mut g_positive_x_alpha = vec![E::G1::one(); d]; + + let mut h_negative_x_alpha = vec![E::G2::one(); d]; + let mut h_positive_x_alpha = vec![E::G2::one(); d]; + + fn eval( + // wNAF window tables + g1_wnaf: &Wnaf>, + g2_wnaf: &Wnaf>, + + powers_of_x: &[Scalar], + + g_x: &mut [E::G1], + g_x_alpha: &mut [E::G1], + h_x: &mut [E::G2], + h_x_alpha: &mut [E::G2], + + // Trapdoors + alpha: &E::Fr, + + // Worker + worker: &Worker + ) + + { + // Sanity check + assert_eq!(g_x.len(), powers_of_x.len()); + assert_eq!(g_x.len(), g_x_alpha.len()); + assert_eq!(g_x.len(), h_x.len()); + assert_eq!(g_x.len(), h_x_alpha.len()); + + // Evaluate polynomials in multiple threads + worker.scope(g_x.len(), |scope, chunk| { + for ((((x, g_x), g_x_alpha), h_x), h_x_alpha) in powers_of_x.chunks(chunk) + .zip(g_x.chunks_mut(chunk)) + .zip(g_x_alpha.chunks_mut(chunk)) + .zip(h_x.chunks_mut(chunk)) + .zip(h_x_alpha.chunks_mut(chunk)) + { + let mut g1_wnaf = g1_wnaf.shared(); + let mut g2_wnaf = g2_wnaf.shared(); + + scope.spawn(move |_| { + for ((((x, g_x), g_x_alpha), h_x), h_x_alpha) in x.iter() + .zip(g_x.iter_mut()) + .zip(g_x_alpha.iter_mut()) + .zip(h_x.iter_mut()) + .zip(h_x_alpha.iter_mut()) + { + let mut x_alpha = x.0; + x_alpha.mul_assign(&alpha); + + *g_x = g1_wnaf.scalar(x.0.into_repr()); + *h_x = g2_wnaf.scalar(x.0.into_repr()); + + *g_x_alpha = g1_wnaf.scalar(x_alpha.into_repr()); + *h_x_alpha = g2_wnaf.scalar(x_alpha.into_repr()); + } + + // Batch normalize + E::G1::batch_normalization(g_x); + E::G1::batch_normalization(g_x_alpha); + E::G2::batch_normalization(h_x); + E::G2::batch_normalization(h_x_alpha); + }); + }; + }); + } + + let start = std::time::Instant::now(); + + // Evaluate for positive powers. + eval( + &g1_wnaf, + &g2_wnaf, + &x_powers_positive, + &mut g_positive_x[..], + &mut g_positive_x_alpha[..], + &mut h_positive_x[..], + &mut h_positive_x_alpha[..], + &alpha, + &worker + ); + + // Evaluate for negative powers + eval( + &g1_wnaf, + &g2_wnaf, + &x_powers_negative, + &mut g_negative_x[..], + &mut g_negative_x_alpha[..], + &mut h_negative_x[..], + &mut h_negative_x_alpha[..], + &alpha, + &worker + ); + + if verbose {eprintln!("evaluating points done in {} s", start.elapsed().as_millis() as f64 / 1000.0);}; + + let g1 = g1.into_affine(); + let g2 = g2.into_affine(); + + let h_alpha = g2.mul(alpha.into_repr()).into_affine(); + + let g_negative_x = { + let mut tmp = vec![g1]; + tmp.extend(g_negative_x.into_iter().map(|e| e.into_affine())); + + tmp + }; + let g_positive_x = { + let mut tmp = vec![g1]; + tmp.extend(g_positive_x.into_iter().map(|e| e.into_affine())); + + tmp + }; + + let h_negative_x = { + let mut tmp = vec![g2]; + tmp.extend(h_negative_x.into_iter().map(|e| e.into_affine())); + + tmp + }; + let h_positive_x = { + let mut tmp = vec![g2]; + tmp.extend(h_positive_x.into_iter().map(|e| e.into_affine())); + + tmp + }; + + let g_negative_x_alpha = g_negative_x_alpha.into_iter().map(|e| e.into_affine()).collect(); + let g_positive_x_alpha = g_positive_x_alpha.into_iter().map(|e| e.into_affine()).collect(); + + let h_negative_x_alpha = { + let mut tmp = vec![h_alpha]; + tmp.extend(h_negative_x_alpha.into_iter().map(|e| e.into_affine())); + + tmp + }; + let h_positive_x_alpha = { + let mut tmp = vec![h_alpha]; + tmp.extend(h_positive_x_alpha.into_iter().map(|e| e.into_affine())); + + tmp + }; + + Ok(SRS { + d: d, + g_negative_x: g_negative_x, + g_positive_x: g_positive_x, + + h_negative_x: h_negative_x, + h_positive_x: h_positive_x, + + g_negative_x_alpha: g_negative_x_alpha, + g_positive_x_alpha: g_positive_x_alpha, + + h_negative_x_alpha: h_negative_x_alpha, + h_positive_x_alpha: h_positive_x_alpha, + } + ) +} \ No newline at end of file diff --git a/bellman/src/sonic/helped/helper.rs b/bellman/src/sonic/helped/helper.rs new file mode 100644 index 0000000..30a0a3a --- /dev/null +++ b/bellman/src/sonic/helped/helper.rs @@ -0,0 +1,220 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use super::{Proof, SxyAdvice}; +use super::batch::Batch; +use super::poly::{SxEval, SyEval}; +use super::Parameters; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::cs::{Circuit, Variable, Coeff}; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::CountNandQ; + +#[derive(Clone)] +pub struct Aggregate { + // Commitment to s(z, Y) + pub c: E::G1Affine, + // We have to open each of the S commitments to a random point `z` + pub s_opening: E::G1Affine, + // We have to open C to each constituent `y` + pub c_openings: Vec<(E::G1Affine, E::Fr)>, + // Then we have to finally open C + pub opening: E::G1Affine, + + pub z: E::Fr, + pub w: E::Fr, +} + +pub fn create_aggregate, S: SynthesisDriver>( + circuit: &C, + inputs: &[(Proof, SxyAdvice)], + params: &Parameters, +) -> Aggregate +{ + let n = params.vk.n; + let q = params.vk.q; + + create_aggregate_on_srs_using_information::(circuit, inputs, ¶ms.srs, n, q) +} + +pub fn create_aggregate_on_srs, S: SynthesisDriver>( + circuit: &C, + inputs: &[(Proof, SxyAdvice)], + srs: &SRS, +) -> Aggregate +{ + // TODO: precompute this? + let (n, q) = { + let mut tmp = CountNandQ::::new(); + + S::synthesize(&mut tmp, circuit).unwrap(); // TODO + + (tmp.n, tmp.q) + }; + + create_aggregate_on_srs_using_information::(circuit, inputs, srs, n, q) +} + +pub fn create_aggregate_on_srs_using_information, S: SynthesisDriver>( + circuit: &C, + inputs: &[(Proof, SxyAdvice)], + srs: &SRS, + n: usize, + q: usize, +) -> Aggregate +{ + let mut transcript = Transcript::new(&[]); + let mut y_values: Vec = Vec::with_capacity(inputs.len()); + for &(ref proof, ref sxyadvice) in inputs { + { + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&proof.r); + y_values.push(transcript.get_challenge_scalar()); + } + + transcript.commit_point(&sxyadvice.s); + } + + let z: E::Fr = transcript.get_challenge_scalar(); + + // Compute s(z, Y) + let (s_poly_negative, s_poly_positive) = { + let mut tmp = SyEval::new(z, n, q); + S::synthesize(&mut tmp, circuit).unwrap(); // TODO + + tmp.poly() + }; + + // Compute C = g^{s(z, x)} + let c = multiexp( + srs.g_positive_x_alpha[0..(n + q)] + .iter() + .chain_ext(srs.g_negative_x_alpha[0..n].iter()), + s_poly_positive.iter().chain_ext(s_poly_negative.iter()) + ).into_affine(); + + transcript.commit_point(&c); + + // Open C at w + let w: E::Fr = transcript.get_challenge_scalar(); + + let value = compute_value::(&w, &s_poly_positive, &s_poly_negative); + + let opening = { + let mut value = value; + value.negate(); + + polynomial_commitment_opening( + n, + 0, + s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()), + w, + &srs + ) + }; + + // Let's open up C to every y. + fn compute_value(y: &E::Fr, poly_positive: &[E::Fr], poly_negative: &[E::Fr]) -> E::Fr { + let mut value = E::Fr::zero(); + let yinv = y.inverse().unwrap(); // TODO + + let positive_powers_contrib = evaluate_at_consequitive_powers(poly_positive, *y, *y); + let negative_powers_contrib = evaluate_at_consequitive_powers(poly_negative, yinv, yinv); + value.add_assign(&positive_powers_contrib); + value.add_assign(&negative_powers_contrib); + + value + } + + use std::time::Instant; + let start = Instant::now(); + + let mut c_openings = vec![]; + for y in &y_values { + let value = compute_value::(y, &s_poly_positive, &s_poly_negative); + + let opening = { + let mut value = value; + value.negate(); + + polynomial_commitment_opening( + n, + 0, + s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()), + *y, + &srs + ) + }; + + c_openings.push((opening, value)); + } + + println!("Evaluation of s(z, Y) taken {:?}", start.elapsed()); + + // Okay, great. Now we need to open up each S at the same point z to the same value. + // Since we're opening up all the S's at the same point, we create a bunch of random + // challenges instead and open up a random linear combination. + + let mut poly_negative = vec![E::Fr::zero(); n]; + let mut poly_positive = vec![E::Fr::zero(); 2*n]; + let mut expected_value = E::Fr::zero(); + + // TODO: this part can be further parallelized due to synthesis of S(X, y) being singlethreaded + let start = Instant::now(); + + for (y, c_opening) in y_values.iter().zip(c_openings.iter()) { + // Compute s(X, y_i) + let (s_poly_negative, s_poly_positive) = { + let mut tmp = SxEval::new(*y, n); + S::synthesize(&mut tmp, circuit).unwrap(); // TODO + + tmp.poly() + }; + + let mut value = c_opening.1; + let r: E::Fr = transcript.get_challenge_scalar(); + value.mul_assign(&r); + expected_value.add_assign(&value); + + mul_add_polynomials(& mut poly_negative[..], &s_poly_negative[..], r); + mul_add_polynomials(& mut poly_positive[..], &s_poly_positive[..], r); + + } + + println!("Re-evaluation of {} S polynomials taken {:?}", y_values.len(), start.elapsed()); + + let s_opening = { + let mut value = expected_value; + value.negate(); + + polynomial_commitment_opening( + n, + 0, + poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(poly_positive.iter()), + z, + &srs + ) + + }; + + Aggregate { + // Commitment to s(z, Y) + c, + // We have to open each of the S commitments to a random point `z` + s_opening, + // We have to open C to each constituent `y` + c_openings, + // Then we have to finally open C + opening, + + z: z, + + w: w + } +} \ No newline at end of file diff --git a/bellman/src/sonic/helped/mod.rs b/bellman/src/sonic/helped/mod.rs new file mode 100644 index 0000000..c73396a --- /dev/null +++ b/bellman/src/sonic/helped/mod.rs @@ -0,0 +1,51 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +pub mod batch; +pub mod poly; +pub mod prover; +pub mod verifier; +pub mod helper; +pub mod parameters; +pub mod generator; +mod adapted_prover; +mod adapted_verifier; +mod adapted_helper; + +pub use self::batch::{Batch}; +pub use self::verifier::{MultiVerifier}; + +pub use self::generator::{ + CircuitParameters, + generate_parameters, + generate_parameters_on_srs, + generate_parameters_on_srs_and_information, + generate_random_parameters, + generate_srs, + get_circuit_parameters, + get_circuit_parameters_for_succinct_sonic +}; +pub use self::parameters::{ + Proof, + SxyAdvice, + Parameters, + VerifyingKey, + PreparedVerifyingKey +}; +pub use self::adapted_prover::{ + create_advice, + create_advice_on_srs, + create_advice_on_information_and_srs, + create_proof, + create_proof_on_srs, +}; + +pub use self::adapted_verifier::{ + verify_proofs, + verify_aggregate +}; + +pub use self::adapted_helper::{ + create_aggregate +}; \ No newline at end of file diff --git a/bellman/src/sonic/helped/parameters.rs b/bellman/src/sonic/helped/parameters.rs new file mode 100644 index 0000000..cdf6e0f --- /dev/null +++ b/bellman/src/sonic/helped/parameters.rs @@ -0,0 +1,469 @@ +use crate::pairing::ff::{ + Field, + PrimeField, + PrimeFieldRepr +}; + +use crate::pairing::{ + Engine, + CurveAffine, + EncodedPoint +}; + +use crate::{ + SynthesisError +}; + +use crate::source::SourceBuilder; +use std::io::{self, Read, Write}; +use std::sync::Arc; +use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; + +pub const NUM_BLINDINGS: usize = 6; +// pub const NUM_BLINDINGS: usize = 0; + +#[derive(Clone, Debug, Eq)] +pub struct SxyAdvice { + pub s: E::G1Affine, + pub opening: E::G1Affine, + pub szy: E::Fr, +} + +impl PartialEq for SxyAdvice { + fn eq(&self, other: &SxyAdvice) -> bool { + self.s == other.s && + self.opening == other.opening && + self.szy == other.szy + } +} + +#[derive(Clone, Debug, Eq)] +pub struct Proof { + pub r: E::G1Affine, + pub t: E::G1Affine, + pub rz: E::Fr, + pub rzy: E::Fr, + pub z_opening: E::G1Affine, + pub zy_opening: E::G1Affine +} + +impl PartialEq for Proof { + fn eq(&self, other: &Proof) -> bool { + self.r == other.r && + self.t == other.t && + self.rz == other.rz && + self.rzy == other.rzy && + self.z_opening == other.z_opening && + self.zy_opening == other.zy_opening + } +} + +impl Proof { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + use crate::pairing::ff::{PrimeField, PrimeFieldRepr}; + writer.write_all(self.r.into_compressed().as_ref())?; + writer.write_all(self.t.into_compressed().as_ref())?; + let mut buffer = vec![]; + self.rz.into_repr().write_be(&mut buffer)?; + writer.write_all(&buffer[..])?; + let mut buffer = vec![]; + self.rzy.into_repr().write_be(&mut buffer)?; + writer.write_all(&buffer[..])?; + writer.write_all(self.z_opening.into_compressed().as_ref())?; + writer.write_all(self.zy_opening.into_compressed().as_ref())?; + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g1_repr = ::Compressed::empty(); + let mut fr_repr = E::Fr::zero().into_repr(); + + reader.read_exact(g1_repr.as_mut())?; + let r = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g1_repr.as_mut())?; + let t = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + fr_repr.read_be(&mut reader)?; + let rz = E::Fr::from_repr(fr_repr) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "field element is zero")) + } else { + Ok(e) + })?; + + fr_repr.read_be(&mut reader)?; + let rzy = E::Fr::from_repr(fr_repr) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "field element is zero")) + } else { + Ok(e) + })?; + + + reader.read_exact(g1_repr.as_mut())?; + let z_opening = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + reader.read_exact(g1_repr.as_mut())?; + let zy_opening = g1_repr + .into_affine() + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + })?; + + Ok(Proof { + r: r, + t: t, + rz: rz, + rzy: rzy, + z_opening: z_opening, + zy_opening: zy_opening + }) + } +} + +#[derive(Clone, Debug, Eq)] +pub struct VerifyingKey { + pub alpha_x: E::G2Affine, + + pub alpha: E::G2Affine, + + pub neg_h: E::G2Affine, + + pub neg_x_n_minus_d: E::G2Affine, + + pub k_map: Vec, + + pub n: usize, + + pub q: usize +} + +impl PartialEq for VerifyingKey { + fn eq(&self, other: &VerifyingKey) -> bool { + self.alpha_x == other.alpha_x && + self.alpha == other.alpha && + self.neg_h == other.neg_h && + self.neg_x_n_minus_d == other.neg_x_n_minus_d && + self.k_map == other.k_map && + self.n == other.n && + self.q == other.q + } +} + +impl VerifyingKey { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + writer.write_all(self.alpha_x.into_uncompressed().as_ref())?; + writer.write_all(self.alpha.into_uncompressed().as_ref())?; + writer.write_all(self.neg_h.into_uncompressed().as_ref())?; + writer.write_all(self.neg_x_n_minus_d.into_uncompressed().as_ref())?; + + writer.write_u32::(self.k_map.len() as u32)?; + for k in &self.k_map { + writer.write_u32::(*k as u32)?; + } + writer.write_u32::(self.n as u32)?; + writer.write_u32::(self.q as u32)?; + + Ok(()) + } + + pub fn read( + mut reader: R + ) -> io::Result + { + let mut g2_repr = ::Uncompressed::empty(); + + reader.read_exact(g2_repr.as_mut())?; + let alpha_x = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let alpha = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let neg_h = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + reader.read_exact(g2_repr.as_mut())?; + let neg_x_n_minus_d = g2_repr.into_affine().map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + + let k_map_len = reader.read_u32::()? as usize; + + let mut k_map = vec![]; + + for _ in 0..k_map_len { + let k = reader.read_u32::()? as usize; + + k_map.push(k); + } + + let n = reader.read_u32::()? as usize; + + let q = reader.read_u32::()? as usize; + + Ok(VerifyingKey { + alpha_x: alpha_x, + alpha: alpha, + neg_h: neg_h, + neg_x_n_minus_d: neg_x_n_minus_d, + k_map: k_map, + n: n, + q: q + }) + } +} + +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::srs::SRS; +use crate::sonic::cs::Circuit as SonicCircuit; +use crate::sonic::sonic::{Basic, Preprocess}; +use std::marker::PhantomData; + + +impl VerifyingKey { + pub fn new, S: SynthesisDriver>(circuit: C, srs: &SRS) -> Result { + let mut preprocess = Preprocess::new(); + + S::synthesize(&mut preprocess, &circuit)?; + + Ok(Self { + alpha_x: srs.h_positive_x_alpha[1], + + alpha: srs.h_positive_x_alpha[0], + + neg_h: { + let mut tmp = srs.h_negative_x[0]; + tmp.negate(); + + tmp + }, + + neg_x_n_minus_d: { + let mut tmp = srs.h_negative_x[srs.d - preprocess.n]; + tmp.negate(); + + tmp + }, + + k_map: preprocess.k_map, + n: preprocess.n, + q: preprocess.q + }) + } +} + +pub struct PreparedVerifyingKey { + alpha_x: ::Prepared, + alpha: ::Prepared, + neg_h: ::Prepared, + neg_x_n_minus_d: ::Prepared, + k_map: Vec, + n: usize, + q: usize +} + +#[derive(Clone, Eq)] +pub struct Parameters { + pub vk: VerifyingKey, + + pub srs: SRS, + // pub d: usize, + + // // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}} + // pub g_negative_x: Arc>, + + // // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}} + // pub g_positive_x: Arc>, + + // // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}} + // pub h_negative_x: Arc>, + + // // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}} + // pub h_positive_x: Arc>, + + // // alpha*(g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}) + // pub g_negative_x_alpha: Arc>, + + // // alpha*(g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}) + // pub g_positive_x_alpha: Arc>, + + // // alpha*(h^{x^0}, h^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}) + // pub h_negative_x_alpha: Arc>, + + // // alpha*(h^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}) + // pub h_positive_x_alpha: Arc>, +} + +impl PartialEq for Parameters { + fn eq(&self, other: &Parameters) -> bool { + self.vk == other.vk && + self.srs == other.srs + } +} + +impl Parameters { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + self.vk.write(&mut writer)?; + self.srs.write(&mut writer)?; + + Ok(()) + } + + pub fn read( + mut reader: R, + checked: bool + ) -> io::Result + { + let vk = VerifyingKey::::read(&mut reader)?; + let srs = SRS::::read(&mut reader, checked)?; + + Ok(Parameters { + vk: vk, + srs: srs + }) + } +} + +#[test] +fn parameters_generation() { + use crate::{ConstraintSystem, Circuit}; + + use crate::pairing::bls12_381::{Bls12, Fr}; + + #[derive(Clone)] + struct MySillyCircuit { + a: Option, + b: Option + } + + impl Circuit for MySillyCircuit { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + let a = cs.alloc(|| "a", || self.a.ok_or(SynthesisError::AssignmentMissing))?; + let b = cs.alloc(|| "b", || self.b.ok_or(SynthesisError::AssignmentMissing))?; + let c = cs.alloc_input(|| "c", || { + let mut a = self.a.ok_or(SynthesisError::AssignmentMissing)?; + let b = self.b.ok_or(SynthesisError::AssignmentMissing)?; + + a.mul_assign(&b); + Ok(a) + })?; + + cs.enforce( + || "a*b=c", + |lc| lc + a, + |lc| lc + b, + |lc| lc + c + ); + + Ok(()) + } + } + + use rand::{Rng, Rand, thread_rng}; + use super::{generate_parameters, get_circuit_parameters, generate_srs, generate_parameters_on_srs_and_information}; + use super::adapted_prover::create_proof; + + let info = get_circuit_parameters::(MySillyCircuit { a: None, b: None }).expect("Must get circuit info"); + println!("{:?}", info); + let rng = &mut thread_rng(); + + let x: Fr = rng.gen(); + let alpha: Fr = rng.gen(); + + let params = generate_parameters::(MySillyCircuit { a: None, b: None }, alpha, x).unwrap(); + let srs = generate_srs::(alpha, x, info.n * 100).unwrap(); + let naive_srs = SRS::::new( + info.n * 100, + x, + alpha, + ); + + assert!(srs == naive_srs); + + let params_on_srs = generate_parameters_on_srs_and_information::(&srs, info.clone()).unwrap(); + + assert!(params == params_on_srs); + + { + let mut v = vec![]; + + params.write(&mut v).unwrap(); + + let de_params = Parameters::read(&v[..], true).unwrap(); + assert!(params == de_params); + + let de_params = Parameters::read(&v[..], false).unwrap(); + assert!(params == de_params); + } + + for _ in 0..100 { + let a = Fr::rand(rng); + let b = Fr::rand(rng); + let mut c = a; + c.mul_assign(&b); + + let proof = create_proof ( + MySillyCircuit { + a: Some(a), + b: Some(b) + }, + ¶ms, + ).unwrap(); + + let mut v = vec![]; + proof.write(&mut v).unwrap(); + + assert_eq!(v.len(), 256); + + let de_proof = Proof::read(&v[..]).unwrap(); + assert!(proof == de_proof); + + // assert!(verify_proof(&pvk, &proof, &[c]).unwrap()); + // assert!(!verify_proof(&pvk, &proof, &[a]).unwrap()); + } +} \ No newline at end of file diff --git a/bellman/src/sonic/helped/poly.rs b/bellman/src/sonic/helped/poly.rs new file mode 100644 index 0000000..e54b276 --- /dev/null +++ b/bellman/src/sonic/helped/poly.rs @@ -0,0 +1,323 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use crate::sonic::cs::{Backend}; +use crate::sonic::cs::{Coeff, Variable, LinearCombination}; +use crate::sonic::util::*; + +/* +s(X, Y) = \sum\limits_{i=1}^N u_i(Y) X^{-i} + + \sum\limits_{i=1}^N v_i(Y) X^{i} + + \sum\limits_{i=1}^N w_i(Y) X^{i+N} + +where + + u_i(Y) = \sum\limits_{q=1}^Q Y^{q+N} u_{i,q} + v_i(Y) = \sum\limits_{q=1}^Q Y^{q+N} v_{i,q} + w_i(Y) = -Y^i + -Y^{-i} + \sum\limits_{q=1}^Q Y^{q+N} w_{i,q} + +*/ +#[derive(Clone)] +pub struct SxEval { + y: E::Fr, + + // current value of y^{q+N} + yqn: E::Fr, + + // x^{-i} (\sum\limits_{q=1}^Q y^{q+N} u_{q,i}) + u: Vec, + // x^{i} (\sum\limits_{q=1}^Q y^{q+N} v_{q,i}) + v: Vec, + // x^{i+N} (-y^i -y^{-i} + \sum\limits_{q=1}^Q y^{q+N} w_{q,i}) + w: Vec, + + max_n: usize, +} + +impl SxEval { + pub fn new(y: E::Fr, n: usize) -> Self { + let y_inv = y.inverse().unwrap(); // TODO + + let yqn = y.pow(&[n as u64]); + + let u = vec![E::Fr::zero(); n]; + let v = vec![E::Fr::zero(); n]; + + let mut minus_one = E::Fr::one(); + minus_one.negate(); + + let mut w = vec![minus_one; n]; + let mut w_neg = vec![minus_one; n]; + mut_distribute_consequitive_powers(&mut w[..], y, y); + mut_distribute_consequitive_powers(&mut w_neg[..], y_inv, y_inv); + add_polynomials(&mut w[..], &w_neg[..]); + + // let mut w = vec![E::Fr::zero(); n]; + // let mut tmp1 = y; + // let mut tmp2 = y_inv; + // for w in &mut w { + // let mut new = tmp1; + // new.add_assign(&tmp2); + // new.negate(); + // *w = new; + // tmp1.mul_assign(&y); + // tmp2.mul_assign(&y_inv); + // } + + SxEval { + y, + yqn, + u, + v, + w, + max_n: n + } + } + + pub fn poly(mut self) -> (Vec, Vec) { + self.v.extend(self.w); + + (self.u, self.v) + } + + pub fn finalize(self, x: E::Fr) -> E::Fr { + let x_inv = x.inverse().unwrap(); // TODO + + let mut acc = E::Fr::zero(); + + let tmp = x_inv; + acc.add_assign(&evaluate_at_consequitive_powers(& self.u[..], tmp, tmp)); + let tmp = x; + acc.add_assign(&evaluate_at_consequitive_powers(& self.v[..], tmp, tmp)); + let tmp = x.pow(&[(self.v.len()+1) as u64]); + acc.add_assign(&evaluate_at_consequitive_powers(& self.w[..], tmp, x)); + + // let mut tmp = x_inv; + // for mut u in self.u { + // u.mul_assign(&tmp); + // acc.add_assign(&u); + // tmp.mul_assign(&x_inv); + // } + + // let mut tmp = x; + // for mut v in self.v { + // v.mul_assign(&tmp); + // acc.add_assign(&v); + // tmp.mul_assign(&x); + // } + // for mut w in self.w { + // w.mul_assign(&tmp); + // acc.add_assign(&w); + // tmp.mul_assign(&x); + // } + + acc + } +} + +impl<'a, E: Engine> Backend for &'a mut SxEval { + type LinearConstraintIndex = E::Fr; + + fn new_linear_constraint(&mut self) -> E::Fr { + self.yqn.mul_assign(&self.y); + + self.yqn + } + + fn get_for_q(&self, q: usize) -> Self::LinearConstraintIndex { + self.y.pow(&[(self.max_n + q) as u64]) + } + + fn insert_coefficient(&mut self, var: Variable, coeff: Coeff, y: &E::Fr) { + let acc = match var { + Variable::A(index) => { + &mut self.u[index - 1] + } + Variable::B(index) => { + &mut self.v[index - 1] + } + Variable::C(index) => { + &mut self.w[index - 1] + } + }; + + match coeff { + Coeff::Zero => { }, + Coeff::One => { + acc.add_assign(&y); + }, + Coeff::NegativeOne => { + acc.sub_assign(&y); + }, + Coeff::Full(mut val) => { + val.mul_assign(&y); + acc.add_assign(&val); + } + } + } +} + +/* +s(X, Y) = \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} u_{i,q} X^{-i} + + \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} v_{i,q} X^{i} + + \sum\limits_{i=1}^N \sum\limits_{q=1}^Q Y^{q+N} w_{i,q} X^{i+N} + - \sum\limits_{i=1}^N Y^i X^{i+N} + - \sum\limits_{i=1}^N Y^{-i} X^{i+N} +*/ +pub struct SyEval { + max_n: usize, + current_q: usize, + + // x^{-1}, ..., x^{-N} + a: Vec, + + // x^1, ..., x^{N} + b: Vec, + + // x^{N+1}, ..., x^{2*N} + c: Vec, + + // coeffs for y^1, ..., y^{N+Q} + positive_coeffs: Vec, + + // coeffs for y^{-1}, y^{-2}, ..., y^{-N} + negative_coeffs: Vec, +} + + +impl SyEval { + pub fn new(x: E::Fr, n: usize, q: usize) -> Self { + let xinv = x.inverse().unwrap(); + let mut a = vec![E::Fr::one(); n]; + let mut b = vec![E::Fr::one(); n]; + + mut_distribute_consequitive_powers(&mut a[..], xinv, xinv); + mut_distribute_consequitive_powers(&mut b[..], x, x); + + let mut c = vec![E::Fr::one(); n]; + mut_distribute_consequitive_powers(&mut c[..], x.pow(&[(n+1) as u64]), x); + + let mut minus_one = E::Fr::one(); + minus_one.negate(); + + let mut positive_coeffs = vec![minus_one; n]; + mut_distribute_consequitive_powers(&mut positive_coeffs[..], x.pow(&[(n+1) as u64]), x); + let negative_coeffs = positive_coeffs.clone(); + + positive_coeffs.resize(n + q, E::Fr::zero()); + + // let mut tmp = E::Fr::one(); + // let mut a = vec![E::Fr::zero(); n]; + // for a in &mut a { + // tmp.mul_assign(&xinv); // tmp = x^{-i} + // *a = tmp; + // } + + // let mut tmp = E::Fr::one(); + // let mut b = vec![E::Fr::zero(); n]; + // for b in &mut b { + // tmp.mul_assign(&x); // tmp = x^{i} + // *b = tmp; + // } + + // let mut positive_coeffs = vec![E::Fr::zero(); n + q]; + // let mut negative_coeffs = vec![E::Fr::zero(); n]; + + // let mut c = vec![E::Fr::zero(); n]; + // for ((c, positive_coeff), negative_coeff) in c.iter_mut().zip(&mut positive_coeffs).zip(&mut negative_coeffs) { + // tmp.mul_assign(&x); // tmp = x^{i+N} + // *c = tmp; + + // // - \sum\limits_{i=1}^N Y^i X^{i+N} + // let mut tmp = tmp; + // tmp.negate(); + // *positive_coeff = tmp; + + // // - \sum\limits_{i=1}^N Y^{-i} X^{i+N} + // *negative_coeff = tmp; + // } + + SyEval { + a, + b, + c, + positive_coeffs, + negative_coeffs, + current_q: 0, + max_n: n, + } + } + + pub fn poly(self) -> (Vec, Vec) { + (self.negative_coeffs, self.positive_coeffs) + } + + pub fn finalize(self, y: E::Fr) -> E::Fr { + let mut acc = E::Fr::zero(); + let yinv = y.inverse().unwrap(); // TODO + + let positive_powers_contrib = evaluate_at_consequitive_powers(& self.positive_coeffs[..], y, y); + let negative_powers_contrib = evaluate_at_consequitive_powers(& self.negative_coeffs[..], yinv, yinv); + acc.add_assign(&positive_powers_contrib); + acc.add_assign(&negative_powers_contrib); + + // let mut tmp = y; + // for mut coeff in self.positive_coeffs { + // coeff.mul_assign(&tmp); + // acc.add_assign(&coeff); + // tmp.mul_assign(&y); + // } + + // let mut tmp = yinv; + // for mut coeff in self.negative_coeffs { + // coeff.mul_assign(&tmp); + // acc.add_assign(&coeff); + // tmp.mul_assign(&yinv); + // } + + acc + } +} + +impl<'a, E: Engine> Backend for &'a mut SyEval { + type LinearConstraintIndex = usize; + + fn new_linear_constraint(&mut self) -> usize { + self.current_q += 1; + self.current_q + } + + fn get_for_q(&self, q: usize) -> Self::LinearConstraintIndex { + q + } + + fn insert_coefficient(&mut self, var: Variable, coeff: Coeff, q: &usize) { + match var { + Variable::A(index) => { + let index = index - 1; + // Y^{q+N} += X^{-i} * coeff + let mut tmp = self.a[index]; + coeff.multiply(&mut tmp); + let yindex = *q + self.max_n; + self.positive_coeffs[yindex - 1].add_assign(&tmp); + } + Variable::B(index) => { + let index = index - 1; + // Y^{q+N} += X^{i} * coeff + let mut tmp = self.b[index]; + coeff.multiply(&mut tmp); + let yindex = *q + self.max_n; + self.positive_coeffs[yindex - 1].add_assign(&tmp); + } + Variable::C(index) => { + let index = index - 1; + // Y^{q+N} += X^{i+N} * coeff + let mut tmp = self.c[index]; + coeff.multiply(&mut tmp); + let yindex = *q + self.max_n; + self.positive_coeffs[yindex - 1].add_assign(&tmp); + } + }; + } +} \ No newline at end of file diff --git a/bellman/src/sonic/helped/prover.rs b/bellman/src/sonic/helped/prover.rs new file mode 100644 index 0000000..eec3e1e --- /dev/null +++ b/bellman/src/sonic/helped/prover.rs @@ -0,0 +1,440 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use super::{Proof, SxyAdvice}; +use super::batch::Batch; +use super::poly::{SxEval, SyEval}; +use super::parameters::{Parameters, NUM_BLINDINGS}; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::cs::{Circuit, Variable, Coeff}; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::{CountN, Basic}; + +pub fn create_advice_on_information_and_srs, S: SynthesisDriver>( + circuit: &C, + proof: &Proof, + srs: &SRS, + n: usize +) -> Result, SynthesisError> +{ + let z: E::Fr; + let y: E::Fr; + { + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&proof.r); + y = transcript.get_challenge_scalar(); + transcript.commit_point(&proof.t); + z = transcript.get_challenge_scalar(); + } + + let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?; + + let (s_poly_negative, s_poly_positive) = { + let mut tmp = SxEval::new(y, n); + S::synthesize(&mut tmp, circuit)?; + + tmp.poly() + }; + + // Compute S commitment + let s = multiexp( + srs.g_positive_x_alpha[0..(2 * n)] + .iter() + .chain_ext(srs.g_negative_x_alpha[0..(n)].iter()), + s_poly_positive.iter().chain_ext(s_poly_negative.iter()) + ).into_affine(); + + // Compute s(z, y) + let mut szy = E::Fr::zero(); + { + szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_positive[..], z, z)); + szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_negative[..], z_inv, z_inv)); + } + + // let mut szy = E::Fr::zero(); + // { + // let mut tmp = z; + // for &p in &s_poly_positive { + // let mut p = p; + // p.mul_assign(&tmp); + // szy.add_assign(&p); + // tmp.mul_assign(&z); + // } + // let mut tmp = z_inv; + // for &p in &s_poly_negative { + // let mut p = p; + // p.mul_assign(&tmp); + // szy.add_assign(&p); + // tmp.mul_assign(&z_inv); + // } + // } + + // Compute kate opening + let opening = { + let mut open = szy; + open.negate(); + + let poly = kate_divison( + s_poly_negative.iter().rev().chain_ext(Some(open).iter()).chain_ext(s_poly_positive.iter()), + z, + ); + + let negative_poly = poly[0..n].iter().rev(); + let positive_poly = poly[n..].iter(); + multiexp( + srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext( + srs.g_positive_x[0..positive_poly.len()].iter() + ), + negative_poly.chain_ext(positive_poly) + ).into_affine() + }; + + Ok(SxyAdvice { + s, + szy, + opening + }) +} + +pub fn create_advice, S: SynthesisDriver>( + circuit: &C, + proof: &Proof, + parameters: &Parameters, +) -> Result, SynthesisError> +{ + let n = parameters.vk.n; + create_advice_on_information_and_srs::(circuit, proof, ¶meters.srs, n) +} + +pub fn create_advice_on_srs, S: SynthesisDriver>( + circuit: &C, + proof: &Proof, + srs: &SRS +) -> Result, SynthesisError> +{ + // annoying, but we need n to compute s(z, y), and this isn't + // precomputed anywhere yet + let n = { + let mut tmp = CountN::::new(); + S::synthesize(&mut tmp, circuit)?; + + tmp.n + }; + + create_advice_on_information_and_srs::(circuit, proof, srs, n) +} + +pub fn create_proof, S: SynthesisDriver>( + circuit: &C, + parameters: &Parameters +) -> Result, SynthesisError> { + create_proof_on_srs::(circuit, ¶meters.srs) +} + +extern crate rand; +use self::rand::{Rand, Rng, thread_rng}; +use crate::sonic::sonic::Wires; + +pub fn create_proof_on_srs, S: SynthesisDriver>( + circuit: &C, + srs: &SRS +) -> Result, SynthesisError> +{ + let mut wires = Wires::new(); + + S::synthesize(&mut wires, circuit)?; + + let n = wires.a.len(); + + let mut transcript = Transcript::new(&[]); + + let rng = &mut thread_rng(); + + // c_{n+1}, c_{n+2}, c_{n+3}, c_{n+4} + let blindings: Vec = (0..NUM_BLINDINGS).into_iter().map(|_| E::Fr::rand(rng)).collect(); + + // r is a commitment to r(X, 1) + let r = polynomial_commitment::( + n, + 2*n + NUM_BLINDINGS, + n, + &srs, + blindings.iter().rev() + .chain_ext(wires.c.iter().rev()) + .chain_ext(wires.b.iter().rev()) + .chain_ext(Some(E::Fr::zero()).iter()) + .chain_ext(wires.a.iter()), + ); + + transcript.commit_point(&r); + + let y: E::Fr = transcript.get_challenge_scalar(); + + // create r(X, 1) by observation that it's just a series of coefficients. + // Used representation is for powers X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}, X^{1}...X^{n} + // Same representation is ok for r(X, Y) too cause powers always match + let mut rx1 = wires.b; + rx1.extend(wires.c); + rx1.extend(blindings.clone()); + rx1.reverse(); + rx1.push(E::Fr::zero()); + rx1.extend(wires.a); + + let mut rxy = rx1.clone(); + + let y_inv = y.inverse().ok_or(SynthesisError::DivisionByZero)?; + + // y^(-2n - num blindings) + let tmp = y_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]); + mut_distribute_consequitive_powers( + &mut rxy, + tmp, + y, + ); + + // negative powers [-1, -2n], positive [1, n] + let (mut s_poly_negative, s_poly_positive) = { + let mut tmp = SxEval::new(y, n); + S::synthesize(&mut tmp, circuit)?; + + tmp.poly() + }; + + // r'(X, y) = r(X, y) + s(X, y). Note `y` - those are evaluated at the point already + let mut rxy_prime = rxy.clone(); + { + // extend to have powers [n+1, 2n] + rxy_prime.resize(4 * n + 1 + NUM_BLINDINGS, E::Fr::zero()); + s_poly_negative.reverse(); + + let neg_poly_len = s_poly_negative.len(); + add_polynomials(&mut rxy_prime[(NUM_BLINDINGS+neg_poly_len)..(2 * n + NUM_BLINDINGS)], &s_poly_negative[..]); + s_poly_negative.reverse(); + + add_polynomials(&mut rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..], &s_poly_positive[..]) + + // // add coefficients in front of X^{-2n}...X^{-n-1}, X^{-n}...X^{-1} + // for (r, s) in rxy_prime[NUM_BLINDINGS..(2 * n + NUM_BLINDINGS)] + // .iter_mut() + // .rev() + // .zip(s_poly_negative) + // { + // r.add_assign(&s); + // } + // // add coefficients in front of X^{1}...X^{n}, X^{n+1}...X^{2*n} + // for (r, s) in rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..].iter_mut().zip(s_poly_positive) { + // r.add_assign(&s); + // } + } + + // by this point all R related polynomials are blinded and evaluated for Y variable + + // t(X, y) = r'(X, y)*r(X, 1) and will be later evaluated at z + // contained degree in respect to X are from -4*n to 3*n including X^0 + let mut txy = multiply_polynomials::(rx1.clone(), rxy_prime); + txy[4 * n + 2 * NUM_BLINDINGS] = E::Fr::zero(); // -k(y) + + // commit to t(X, y) to later open at z + let t = polynomial_commitment( + srs.d, + (4 * n) + 2*NUM_BLINDINGS, + 3 * n, + srs, + // skip what would be zero power + txy[0..(4 * n) + 2*NUM_BLINDINGS].iter() + .chain_ext(txy[(4 * n + 2*NUM_BLINDINGS + 1)..].iter()), + ); + + transcript.commit_point(&t); + + let z: E::Fr = transcript.get_challenge_scalar(); + let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?; + + let rz = { + let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]); + + evaluate_at_consequitive_powers(&rx1, tmp, z) + }; + + // rzy is evaluation of r(X, Y) at z, y + let rzy = { + let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]); + + evaluate_at_consequitive_powers(&rxy, tmp, z) + }; + + transcript.commit_scalar(&rz); + transcript.commit_scalar(&rzy); + + let r1: E::Fr = transcript.get_challenge_scalar(); + + let zy_opening = { + // r(X, 1) - r(z, y) + // subtract constant term from R(X, 1) + rx1[(2 * n + NUM_BLINDINGS)].sub_assign(&rzy); + + let mut point = y; + point.mul_assign(&z); + + polynomial_commitment_opening( + 2 * n + NUM_BLINDINGS, + n, + &rx1, + point, + srs + ) + }; + + assert_eq!(rx1.len(), 3*n + NUM_BLINDINGS + 1); + + // it's an opening of t(X, y) at z + let z_opening = { + rx1[(2 * n + NUM_BLINDINGS)].add_assign(&rzy); // restore + + let rx1_len = rx1.len(); + mul_add_polynomials(&mut txy[(2 * n + NUM_BLINDINGS)..(2 * n + NUM_BLINDINGS + rx1_len)], &rx1[..], r1); + + // // skip powers from until reach -2n - NUM_BLINDINGS + // for (t, &r) in txy[(2 * n + NUM_BLINDINGS)..].iter_mut().zip(rx1.iter()) { + // let mut r = r; + // r.mul_assign(&r1); + // t.add_assign(&r); + // } + + let val = { + let tmp = z_inv.pow(&[(4*n + 2*NUM_BLINDINGS) as u64]); + + evaluate_at_consequitive_powers(&txy, tmp, z) + }; + + txy[(4 * n + 2*NUM_BLINDINGS)].sub_assign(&val); + + polynomial_commitment_opening( + 4*n + 2*NUM_BLINDINGS, + 3*n, + &txy, + z, + srs) + }; + + Ok(Proof { + r, rz, rzy, t, z_opening, zy_opening + }) +} + +#[test] +fn my_fun_circuit_test() { + use crate::pairing::ff::PrimeField; + use crate::pairing::bls12_381::{Bls12, Fr}; + use super::*; + use crate::sonic::cs::{ConstraintSystem, LinearCombination}; + use crate::sonic::sonic::Basic; + use rand::{thread_rng}; + + struct MyCircuit; + + impl Circuit for MyCircuit { + fn synthesize>(&self, cs: &mut CS) -> Result<(), SynthesisError> { + let (a, b, _) = cs.multiply(|| { + Ok(( + E::Fr::from_str("10").unwrap(), + E::Fr::from_str("20").unwrap(), + E::Fr::from_str("200").unwrap(), + )) + })?; + + cs.enforce_zero(LinearCombination::from(a) + a - b); + + //let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?; + + //cs.enforce_zero(LinearCombination::from(b) - multiplier); + + Ok(()) + } + } + + let srs = SRS::::new( + 20, + Fr::from_str("22222").unwrap(), + Fr::from_str("33333333").unwrap(), + ); + let proof = self::create_proof_on_srs::(&MyCircuit, &srs).unwrap(); + + use std::time::{Instant}; + let start = Instant::now(); + let rng = thread_rng(); + let mut batch = MultiVerifier::::new(MyCircuit, &srs, rng).unwrap(); + + for _ in 0..1 { + batch.add_proof(&proof, &[/*Fr::from_str("20").unwrap()*/], |_, _| None); + } + + assert!(batch.check_all()); + + let elapsed = start.elapsed(); + println!("time to verify: {:?}", elapsed); +} + +#[test] +fn polynomial_commitment_test() { + use crate::pairing::ff::PrimeField; + use crate::pairing::ff::PrimeFieldRepr; + use crate::pairing::bls12_381::{Bls12, Fr}; + use super::*; + use crate::sonic::cs::{ConstraintSystem, LinearCombination}; + use crate::sonic::sonic::Basic; + use rand::{thread_rng}; + use crate::pairing::{CurveAffine}; + + let srs = SRS::::new( + 20, + Fr::from_str("22222").unwrap(), + Fr::from_str("33333333").unwrap(), + ); + + let mut rng = thread_rng(); + // x^-4 + x^-3 + x^-2 + x^-1 + x + x^2 + let mut poly = vec![Fr::one(), Fr::one(), Fr::one(), Fr::one(), Fr::zero(), Fr::one(), Fr::one()]; + // make commitment to the poly + let commitment = polynomial_commitment(2, 4, 2, &srs, poly.iter()); + let point: Fr = rng.gen(); + let mut tmp = point.inverse().unwrap(); + tmp.square(); + let value = evaluate_at_consequitive_powers(&poly, tmp, point); + // evaluate f(z) + poly[4] = value; + poly[4].negate(); + // f(x) - f(z) + + let opening = polynomial_commitment_opening(4, 2, poly.iter(), point, &srs); + + // e(W , hα x )e(g^{v} * W{-z} , hα ) = e(F , h^{x^{−d +max}} ) + + let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + let alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + let mut neg_x_n_minus_d_precomp = srs.h_negative_x[srs.d - 2]; + neg_x_n_minus_d_precomp.negate(); + let neg_x_n_minus_d_precomp = neg_x_n_minus_d_precomp.prepare(); + // let neg_x_n_minus_d_precomp = srs.h_negative_x[0].prepare(); + + let w = opening.prepare(); + let mut gv = srs.g_positive_x[0].mul(value.into_repr()); + let mut z_neg = point; + z_neg.negate(); + let w_minus_z = opening.mul(z_neg.into_repr()); + gv.add_assign(&w_minus_z); + + let gv = gv.into_affine().prepare(); + + assert!(Bls12::final_exponentiation(&Bls12::miller_loop(&[ + (&w, &alpha_x_precomp), + (&gv, &alpha_precomp), + (&commitment.prepare(), &neg_x_n_minus_d_precomp), + ])).unwrap() == ::Fqk::one()); +} diff --git a/bellman/src/sonic/helped/verifier.rs b/bellman/src/sonic/helped/verifier.rs new file mode 100644 index 0000000..da150e7 --- /dev/null +++ b/bellman/src/sonic/helped/verifier.rs @@ -0,0 +1,313 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; +use rand::{Rand, Rng}; + +use super::{Proof, SxyAdvice}; +use super::batch::Batch; +use super::poly::{SxEval, SyEval}; +use super::helper::Aggregate; +use super::parameters::{Parameters}; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::cs::{Circuit, Variable, Coeff}; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::Preprocess; + +pub struct MultiVerifier, S: SynthesisDriver, R: Rng> { + circuit: C, + pub(crate) batch: Batch, + k_map: Vec, + n: usize, + q: usize, + randomness_source: R, + _marker: PhantomData<(E, S)> +} + +impl, S: SynthesisDriver, R: Rng> MultiVerifier { + // This constructor consumes randomness source cause it's later used internally + pub fn new(circuit: C, srs: &SRS, rng: R) -> Result { + let mut preprocess = Preprocess::new(); + + S::synthesize(&mut preprocess, &circuit)?; + + Ok(MultiVerifier { + circuit, + batch: Batch::new(srs, preprocess.n), + k_map: preprocess.k_map, + n: preprocess.n, + q: preprocess.q, + randomness_source: rng, + _marker: PhantomData + }) + } + + pub fn add_aggregate( + &mut self, + proofs: &[(Proof, SxyAdvice)], + aggregate: &Aggregate, + ) + { + let mut transcript = Transcript::new(&[]); + let mut y_values: Vec = Vec::with_capacity(proofs.len()); + for &(ref proof, ref sxyadvice) in proofs { + { + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&proof.r); + y_values.push(transcript.get_challenge_scalar()); + } + + transcript.commit_point(&sxyadvice.s); + } + + let z: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_point(&aggregate.c); + + let w: E::Fr = transcript.get_challenge_scalar(); + + let szw = { + let mut tmp = SxEval::new(w, self.n); + S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO + + tmp.finalize(z) + }; + + { + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(aggregate.opening, random, w); + self.batch.add_commitment(aggregate.c, random); + self.batch.add_opening_value(szw, random); + } + + for ((opening, value), &y) in aggregate.c_openings.iter().zip(y_values.iter()) { + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(*opening, random, y); + self.batch.add_commitment(aggregate.c, random); + self.batch.add_opening_value(*value, random); + } + + let random: E::Fr = self.randomness_source.gen(); + + let mut expected_value = E::Fr::zero(); + for ((_, advice), c_opening) in proofs.iter().zip(aggregate.c_openings.iter()) { + let mut r: E::Fr = transcript.get_challenge_scalar(); + + // expected value of the later opening + { + let mut tmp = c_opening.1; + tmp.mul_assign(&r); + expected_value.add_assign(&tmp); + } + + r.mul_assign(&random); + + self.batch.add_commitment(advice.s, r); + } + + self.batch.add_opening_value(expected_value, random); + self.batch.add_opening(aggregate.s_opening, random, z); + } + + /// Caller must ensure to add aggregate after adding a proof + pub fn add_proof_with_advice( + &mut self, + proof: &Proof, + inputs: &[E::Fr], + advice: &SxyAdvice, + ) + { + let mut z = None; + + self.add_proof(proof, inputs, |_z, _y| { + z = Some(_z); + Some(advice.szy) + }); + + let z = z.unwrap(); + + // We need to open up SxyAdvice.s at z using SxyAdvice.opening + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&advice.opening); + transcript.commit_point(&advice.s); + transcript.commit_scalar(&advice.szy); + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(advice.opening, random, z); + self.batch.add_commitment(advice.s, random); + self.batch.add_opening_value(advice.szy, random); + } + + pub fn add_proof( + &mut self, + proof: &Proof, + inputs: &[E::Fr], + sxy: F + ) + where F: FnOnce(E::Fr, E::Fr) -> Option + { + let mut transcript = Transcript::new(&[]); + + transcript.commit_point(&proof.r); + + let y: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_point(&proof.t); + + let z: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_scalar(&proof.rz); + transcript.commit_scalar(&proof.rzy); + + let r1: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_point(&proof.z_opening); + transcript.commit_point(&proof.zy_opening); + + // First, the easy one. Let's open up proof.r at zy, using proof.zy_opening + // as the evidence and proof.rzy as the opening. + { + let random: E::Fr = self.randomness_source.gen(); + let mut zy = z; + zy.mul_assign(&y); + self.batch.add_opening(proof.zy_opening, random, zy); + self.batch.add_commitment_max_n(proof.r, random); + self.batch.add_opening_value(proof.rzy, random); + } + + // Now we need to compute t(z, y) with what we have. Let's compute k(y). + let mut ky = E::Fr::zero(); + for (exp, input) in self.k_map.iter().zip(Some(E::Fr::one()).iter().chain(inputs.iter())) { + let mut term = y.pow(&[(*exp + self.n) as u64]); + term.mul_assign(input); + ky.add_assign(&term); + } + + // Compute s(z, y) + let szy = sxy(z, y).unwrap_or_else(|| { + let mut tmp = SxEval::new(y, self.n); + S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO + + tmp.finalize(z) + + // let mut tmp = SyEval::new(z, self.n, self.q); + // S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO + + // tmp.finalize(y) + }); + + // Finally, compute t(z, y) + // t(z, y) = (r(z, y) + s(z,y))*r(z, 1) - k(y) + let mut tzy = proof.rzy; + tzy.add_assign(&szy); + tzy.mul_assign(&proof.rz); + tzy.sub_assign(&ky); + + // We open these both at the same time by keeping their commitments + // linearly independent (using r1). + { + let mut random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(proof.z_opening, random, z); + self.batch.add_opening_value(tzy, random); + self.batch.add_commitment(proof.t, random); + + random.mul_assign(&r1); + + self.batch.add_opening_value(proof.rz, random); + self.batch.add_commitment_max_n(proof.r, random); + } + } + + pub fn get_k_map(&self) -> Vec { + return self.k_map.clone(); + } + + pub fn get_n(&self) -> usize { + return self.n; + } + + pub fn get_q(&self) -> usize { + return self.q; + } + + pub fn check_all(self) -> bool { + self.batch.check_all() + } +} + +/// Check multiple proofs without aggregation. Verifier's work is +/// not succint due to `S(X, Y)` evaluation +pub fn verify_proofs, S: SynthesisDriver, R: Rng>( + proofs: &[Proof], + inputs: &[Vec], + circuit: C, + rng: R, + params: &Parameters, +) -> Result { + verify_proofs_on_srs::(proofs, inputs, circuit, rng, ¶ms.srs) +} + +/// Check multiple proofs without aggregation. Verifier's work is +/// not succint due to `S(X, Y)` evaluation +pub fn verify_proofs_on_srs, S: SynthesisDriver, R: Rng>( + proofs: &[Proof], + inputs: &[Vec], + circuit: C, + rng: R, + srs: &SRS, +) -> Result { + let mut verifier = MultiVerifier::::new(circuit, srs, rng)?; + let expected_inputs_size = verifier.get_k_map().len() - 1; + for (proof, inputs) in proofs.iter().zip(inputs.iter()) { + if inputs.len() != expected_inputs_size { + return Err(SynthesisError::Unsatisfiable); + } + verifier.add_proof(proof, &inputs, |_, _| None); + } + + Ok(verifier.check_all()) +} + +/// Check multiple proofs with aggregation. Verifier's work is +/// not succint due to `S(X, Y)` evaluation +pub fn verify_aggregate, S: SynthesisDriver,R: Rng>( + proofs: &[(Proof, SxyAdvice)], + aggregate: &Aggregate, + inputs: &[Vec], + circuit: C, + rng: R, + params: &Parameters, +) -> Result { + verify_aggregate_on_srs::(proofs, aggregate, inputs, circuit, rng, ¶ms.srs) +} + +/// Check multiple proofs with aggregation. Verifier's work is +/// not succint due to `S(X, Y)` evaluation +pub fn verify_aggregate_on_srs, S: SynthesisDriver, R: Rng>( + proofs: &[(Proof, SxyAdvice)], + aggregate: &Aggregate, + inputs: &[Vec], + circuit: C, + rng: R, + srs: &SRS, +) -> Result { + let mut verifier = MultiVerifier::::new(circuit, srs, rng)?; + let expected_inputs_size = verifier.get_k_map().len() - 1; + for ((proof, advice), inputs) in proofs.iter().zip(inputs.iter()) { + if inputs.len() != expected_inputs_size { + return Err(SynthesisError::Unsatisfiable); + } + verifier.add_proof_with_advice(proof, &inputs, &advice); + } + verifier.add_aggregate(proofs, aggregate); + + Ok(verifier.check_all()) +} + diff --git a/bellman/src/sonic/mod.rs b/bellman/src/sonic/mod.rs new file mode 100644 index 0000000..54c2005 --- /dev/null +++ b/bellman/src/sonic/mod.rs @@ -0,0 +1,15 @@ +pub use crate::{SynthesisError}; + +pub mod sonic; +pub mod srs; +pub mod util; +pub mod helped; +pub mod cs; +pub mod unhelped; + +mod transcript; + +#[cfg(test)] +mod tests; + + diff --git a/bellman/src/sonic/paper.rs b/bellman/src/sonic/paper.rs new file mode 100644 index 0000000..cebe6fc --- /dev/null +++ b/bellman/src/sonic/paper.rs @@ -0,0 +1,310 @@ + +#[test] +fn test_paper_results() { + use crate::pairing::bls12_381::{Bls12, Fr}; + use std::time::{Instant}; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + println!("making srs"); + let start = Instant::now(); + let srs = SRS::::dummy(830564, srs_x, srs_alpha); + println!("done in {:?}", start.elapsed()); + + struct PedersenHashPreimageCircuit<'a, E: sapling_crypto::jubjub::JubjubEngine + 'a> { + preimage: Vec>, + params: &'a E::Params, + } + + impl<'a, E: sapling_crypto::jubjub::JubjubEngine + 'a> Clone for PedersenHashPreimageCircuit<'a, E> { + fn clone(&self) -> Self { + PedersenHashPreimageCircuit { + preimage: self.preimage.clone(), + params: self.params + } + } + } + + impl<'a, E: sapling_crypto::jubjub::JubjubEngine> bellman::Circuit for PedersenHashPreimageCircuit<'a, E> { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), bellman::SynthesisError> + { + //use bellman::ConstraintSystem; + use sapling_crypto::circuit::boolean::{AllocatedBit, Boolean}; + use sapling_crypto::circuit::pedersen_hash; + + let mut preimage = vec![]; + + for &bit in self.preimage.iter() { + preimage.push(Boolean::from(AllocatedBit::alloc(&mut* cs, bit)?)); + } + + pedersen_hash::pedersen_hash( + &mut* cs, pedersen_hash::Personalization::NoteCommitment, &preimage, self.params)?; + + Ok(()) + } + } + + #[derive(Clone)] + struct SHA256PreimageCircuit { + preimage: Vec>, + } + + impl bellman::Circuit for SHA256PreimageCircuit { + fn synthesize>( + self, + cs: &mut CS, + ) -> Result<(), bellman::SynthesisError> { + //use bellman::ConstraintSystem; + use sapling_crypto::circuit::boolean::{AllocatedBit, Boolean}; + use sapling_crypto::circuit::sha256::sha256_block_no_padding; + + let mut preimage = vec![]; + + for &bit in self.preimage.iter() { + preimage.push(Boolean::from(AllocatedBit::alloc(&mut *cs, bit)?)); + } + + sha256_block_no_padding(&mut *cs, &preimage)?; + sha256_block_no_padding(&mut *cs, &preimage)?; + sha256_block_no_padding(&mut *cs, &preimage)?; + // sha256_block_no_padding(&mut *cs, &preimage)?; + + Ok(()) + } + } + + { + use crate::pairing::{CurveAffine}; + use crate::pairing::bls12_381::{G1Affine, G2Affine}; + let a = G1Affine::one(); + let b = G2Affine::one(); + let c = G1Affine::one(); + + let alpha = G1Affine::one(); + let beta = G2Affine::one(); + let iv = G1Affine::one(); + let gamma = G2Affine::one().prepare(); + let delta = G2Affine::one().prepare(); + + let alphabeta = ::pairing(alpha, beta); + + println!("verifying an idealized groth16 proof"); + let start = Instant::now(); + assert!(::final_exponentiation( + &::miller_loop([ + (&a.prepare(), &b.prepare()), + (&iv.prepare(), &gamma), + (&c.prepare(), &delta), + ].into_iter()) + ).unwrap() != alphabeta); + println!("done in {:?}", start.elapsed()); + } + + { + use sonic::util::multiexp; + use crate::pairing::{CurveAffine}; + use crate::pairing::bls12_381::{G1Affine, G2Affine}; + // e([\alpha G], [\beta H]) = e(A, B) e(IV, [\gamma] H) e(C, [\delta] H) + let a = G1Affine::one(); + let b = G2Affine::one(); + let c = vec![G1Affine::one(); 100]; + let mut tmp = Fr::one(); + tmp.double(); + tmp = tmp.inverse().unwrap(); + let cscalars = (0..100).map(|_| {tmp.square(); tmp}).collect::>(); + + let alpha = G1Affine::one(); + let beta = G2Affine::one(); + let iv = G1Affine::one(); + let gamma = G2Affine::one().prepare(); + let delta = G2Affine::one().prepare(); + + let alphabeta = ::pairing(alpha, beta); + + println!("verifying 100 idealized groth16 proofs"); + let start = Instant::now(); + let c = multiexp( + c.iter(), + cscalars.iter(), + ).into_affine(); + assert!(::final_exponentiation( + &::miller_loop([ + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&a.prepare(), &b.prepare()), + (&iv.prepare(), &gamma), + (&c.prepare(), &delta), + ].into_iter()) + ).unwrap() != alphabeta); + println!("done in {:?}", start.elapsed()); + } + + { + let samples: usize = 100; + + const NUM_BITS: usize = 384; + + let params = sapling_crypto::jubjub::JubjubBls12::new(); + let circuit = PedersenHashPreimageCircuit { + preimage: vec![Some(true); NUM_BITS], + params: ¶ms + }; + + println!("creating proof"); + let start = Instant::now(); + let proof = create_proof::(&AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating advice"); + let start = Instant::now(); + let advice = create_advice::(&AdaptorCircuit(circuit.clone()), &proof, &srs); + println!("done in {:?}", start.elapsed()); + + println!("creating aggregate for {} proofs", samples); + let start = Instant::now(); + let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect(); + let aggregate = create_aggregate::(&AdaptorCircuit(circuit.clone()), &proofs, &srs); + println!("done in {:?}", start.elapsed()); + + { + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("verifying 1 proof without advice"); + let start = Instant::now(); + { + for _ in 0..1 { + verifier.add_proof(&proof, &[], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("verifying {} proofs without advice", samples); + let start = Instant::now(); + { + for _ in 0..samples { + verifier.add_proof(&proof, &[], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("verifying 100 proofs with advice"); + let start = Instant::now(); + { + for (ref proof, ref advice) in &proofs { + verifier.add_proof_with_advice(proof, &[], advice); + } + verifier.add_aggregate(&proofs, &aggregate); + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + } +} diff --git a/bellman/src/sonic/sonic/adaptor.rs b/bellman/src/sonic/sonic/adaptor.rs new file mode 100644 index 0000000..e57fb37 --- /dev/null +++ b/bellman/src/sonic/sonic/adaptor.rs @@ -0,0 +1,162 @@ +use crate::pairing::ff::{Field, PrimeField}; +use crate::pairing::{Engine, CurveProjective}; + +// this one is for all external interfaces +// use crate::{LinearCombination, ConstraintSystem, Circuit, Variable}; + +use crate::SynthesisError; + +use crate::sonic::srs::SRS; +use crate::sonic::cs::LinearCombination as SonicLinearCombination; +use crate::sonic::cs::Circuit as SonicCircuit; +use crate::sonic::cs::ConstraintSystem as SonicConstraintSystem; +use crate::sonic::cs::Variable as SonicVariable; +use crate::sonic::cs::Coeff; +use std::marker::PhantomData; + +pub struct Adaptor<'a, E: Engine, CS: SonicConstraintSystem + 'a> { + cs: &'a mut CS, + _marker: PhantomData, +} + +impl<'a, E: Engine, CS: SonicConstraintSystem + 'a> crate::ConstraintSystem + for Adaptor<'a, E, CS> +{ + type Root = Self; + + // this is an important change + fn one() -> crate::Variable { + crate::Variable::new_unchecked(crate::Index::Input(1)) + } + + fn alloc(&mut self, _: A, f: F) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, + { + let var = self.cs.alloc(|| { + f().map_err(|_| crate::SynthesisError::AssignmentMissing) + }).map_err(|_| crate::SynthesisError::AssignmentMissing)?; + + Ok(match var { + SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)), + SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)), + _ => unreachable!(), + }) + } + + fn alloc_input( + &mut self, + _: A, + f: F, + ) -> Result + where + F: FnOnce() -> Result, + A: FnOnce() -> AR, + AR: Into, + { + let var = self.cs.alloc_input(|| { + f().map_err(|_| crate::SynthesisError::AssignmentMissing) + }).map_err(|_| crate::SynthesisError::AssignmentMissing)?; + + Ok(match var { + SonicVariable::A(index) => crate::Variable::new_unchecked(crate::Index::Input(index)), + SonicVariable::B(index) => crate::Variable::new_unchecked(crate::Index::Aux(index)), + _ => unreachable!(), + }) + } + + fn enforce(&mut self, _: A, a: LA, b: LB, c: LC) + where + A: FnOnce() -> AR, + AR: Into, + LA: FnOnce(crate::LinearCombination) -> crate::LinearCombination, + LB: FnOnce(crate::LinearCombination) -> crate::LinearCombination, + LC: FnOnce(crate::LinearCombination) -> crate::LinearCombination, + { + fn convert(lc: crate::LinearCombination) -> SonicLinearCombination { + let mut ret = SonicLinearCombination::zero(); + + for &(v, coeff) in lc.as_ref().iter() { + let var = match v.get_unchecked() { + crate::Index::Input(i) => SonicVariable::A(i), + crate::Index::Aux(i) => SonicVariable::B(i), + }; + + ret = ret + (Coeff::Full(coeff), var); + } + + ret + } + + fn eval>( + lc: &SonicLinearCombination, + cs: &CS, + ) -> Option { + let mut ret = E::Fr::zero(); + + for &(v, coeff) in lc.as_ref().iter() { + let mut tmp = match cs.get_value(v) { + Ok(tmp) => tmp, + Err(_) => return None, + }; + coeff.multiply(&mut tmp); + ret.add_assign(&tmp); + } + + Some(ret) + } + + let a_lc = convert(a(crate::LinearCombination::zero())); + let a_value = eval(&a_lc, &*self.cs); + let b_lc = convert(b(crate::LinearCombination::zero())); + let b_value = eval(&b_lc, &*self.cs); + let c_lc = convert(c(crate::LinearCombination::zero())); + let c_value = eval(&c_lc, &*self.cs); + + let (a, b, c) = self + .cs + .multiply(|| Ok((a_value.unwrap(), b_value.unwrap(), c_value.unwrap()))) + .unwrap(); + + self.cs.enforce_zero(a_lc - a); + self.cs.enforce_zero(b_lc - b); + self.cs.enforce_zero(c_lc - c); + } + + fn push_namespace(&mut self, _: N) + where + NR: Into, + N: FnOnce() -> NR, + { + // Do nothing; we don't care about namespaces in this context. + } + + fn pop_namespace(&mut self) { + // Do nothing; we don't care about namespaces in this context. + } + + fn get_root(&mut self) -> &mut Self::Root { + self + } +} + +#[derive(Clone)] +pub struct AdaptorCircuit(pub T); + +impl<'a, E: Engine, C: crate::Circuit + Clone> SonicCircuit for AdaptorCircuit { + fn synthesize>(&self, cs: &mut CS) -> Result<(), SynthesisError> { + let mut adaptor = Adaptor { + cs: cs, + _marker: PhantomData, + }; + + match self.0.clone().synthesize(&mut adaptor) { + Err(_) => return Err(SynthesisError::AssignmentMissing), + Ok(_) => {} + }; + + Ok(()) + } +} \ No newline at end of file diff --git a/bellman/src/sonic/sonic/backends.rs b/bellman/src/sonic/sonic/backends.rs new file mode 100644 index 0000000..f8b3e48 --- /dev/null +++ b/bellman/src/sonic/sonic/backends.rs @@ -0,0 +1,170 @@ +use crate::pairing::{Engine}; +use crate::sonic::cs::Backend; +use std::marker::PhantomData; +use crate::SynthesisError; +use crate::sonic::cs::SynthesisDriver; + +use crate::sonic::cs::{Circuit, ConstraintSystem, Variable, LinearCombination}; + +use crate::pairing::ff::Field; + +pub struct Preprocess { + pub k_map: Vec, + pub n: usize, + pub q: usize, + _marker: PhantomData +} + +impl<'a, E: Engine> Backend for &'a mut Preprocess { + type LinearConstraintIndex = (); + + fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () } + + fn new_k_power(&mut self, index: usize) { + self.k_map.push(index); + } + + fn new_multiplication_gate(&mut self) { + self.n += 1; + } + + fn new_linear_constraint(&mut self) { + self.q += 1; + + () + } +} + +impl Preprocess { + pub fn new() -> Self { + Preprocess { + k_map: vec![], + n: 0, + q: 0, + _marker: PhantomData + } + } +} + +pub struct Wires { + pub a: Vec, + pub b: Vec, + pub c: Vec +} + +impl<'a, E: Engine> Backend for &'a mut Wires { + type LinearConstraintIndex = (); + + fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex { () } + + fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () } + + fn new_multiplication_gate(&mut self) { + self.a.push(E::Fr::zero()); + self.b.push(E::Fr::zero()); + self.c.push(E::Fr::zero()); + } + + fn get_var(&self, variable: Variable) -> Option { + Some(match variable { + Variable::A(index) => { + self.a[index - 1] + }, + Variable::B(index) => { + self.b[index - 1] + }, + Variable::C(index) => { + self.c[index - 1] + } + }) + } + + fn set_var(&mut self, variable: Variable, value: F) -> Result<(), SynthesisError> + where F: FnOnce() -> Result + { + let value = value()?; + + match variable { + Variable::A(index) => { + self.a[index - 1] = value; + }, + Variable::B(index) => { + self.b[index - 1] = value; + }, + Variable::C(index) => { + self.c[index - 1] = value; + } + } + + Ok(()) + } +} + +impl Wires { + pub fn new() -> Self { + Wires { + a: vec![], + b: vec![], + c: vec![], + } + } +} + +pub struct CountNandQ { + pub n: usize, + pub q: usize, + _marker: std::marker::PhantomData +} + +impl<'a, E: Engine, S: SynthesisDriver> Backend for &'a mut CountNandQ { + type LinearConstraintIndex = (); + + fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () } + + fn new_multiplication_gate(&mut self) { + self.n += 1; + } + + fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex { + self.q += 1; + + () + } +} + +impl CountNandQ { + pub fn new() -> Self { + Self { + n: 0, + q: 0, + _marker: std::marker::PhantomData + } + } +} + +pub struct CountN { + pub n: usize, + _marker: std::marker::PhantomData +} + +impl<'a, E: Engine, S: SynthesisDriver> Backend for &'a mut CountN { + type LinearConstraintIndex = (); + + fn new_linear_constraint(&mut self) -> Self::LinearConstraintIndex { () } + + fn get_for_q(&self, _q: usize) -> Self::LinearConstraintIndex { () } + + fn new_multiplication_gate(&mut self) { + self.n += 1; + } +} + +impl CountN { + pub fn new() -> Self { + Self { + n: 0, + _marker: std::marker::PhantomData + } + } +} + diff --git a/bellman/src/sonic/sonic/constraint_systems.rs b/bellman/src/sonic/sonic/constraint_systems.rs new file mode 100644 index 0000000..1bdf4f7 --- /dev/null +++ b/bellman/src/sonic/sonic/constraint_systems.rs @@ -0,0 +1,638 @@ +use crate::pairing::{Engine}; +use crate::sonic::cs::Backend; +use std::marker::PhantomData; +use std::iter::Peekable; + +use crate::SynthesisError; +use crate::sonic::cs::SynthesisDriver; + +use crate::sonic::cs::{Circuit, ConstraintSystem, Variable, LinearCombination, Coeff}; + +use crate::pairing::ff::Field; + +use super::M; + +pub struct NonassigningSynthesizer> { + backend: B, + current_variable: Option, + _marker: PhantomData, + q: usize, + n: usize, +} + +impl>NonassigningSynthesizer { + pub fn new(backend: B) -> Self { + Self { + backend: backend, + current_variable: None, + _marker: PhantomData, + q: 0, + n: 0, + } + } +} + +impl> ConstraintSystem for NonassigningSynthesizer { + const ONE: Variable = Variable::A(1); + + fn alloc(&mut self, _value: F) -> Result + where + F: FnOnce() -> Result + { + match self.current_variable.take() { + Some(index) => { + let var_b = Variable::B(index); + + self.current_variable = None; + + Ok(var_b) + }, + None => { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + let var_a = Variable::A(index); + + self.current_variable = Some(index); + + Ok(var_a) + } + } + } + + fn alloc_input(&mut self, value: F) -> Result + where + F: FnOnce() -> Result + { + let input_var = self.alloc(value)?; + + self.enforce_zero(LinearCombination::zero() + input_var); + self.backend.new_k_power(self.q); + + Ok(input_var) + } + + fn enforce_zero(&mut self, lc: LinearCombination) + { + self.q += 1; + let y = self.backend.new_linear_constraint(); + + for (var, coeff) in lc.as_ref() { + self.backend.insert_coefficient(*var, *coeff, &y); + } + } + + fn multiply(&mut self, _values: F) -> Result<(Variable, Variable, Variable), SynthesisError> + where + F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError> + { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + let a = Variable::A(index); + let b = Variable::B(index); + let c = Variable::C(index); + + Ok((a, b, c)) + } + + fn get_value(&self, var: Variable) -> Result { + self.backend.get_var(var).ok_or(()) + } +} + +pub struct Synthesizer> { + backend: B, + current_variable: Option, + _marker: PhantomData, + q: usize, + n: usize, +} + +impl> ConstraintSystem for Synthesizer { + const ONE: Variable = Variable::A(1); + + fn alloc(&mut self, value: F) -> Result + where + F: FnOnce() -> Result + { + match self.current_variable.take() { + Some(index) => { + let var_a = Variable::A(index); + let var_b = Variable::B(index); + let var_c = Variable::C(index); + + let mut product = None; + + let value_a = self.backend.get_var(var_a); + + self.backend.set_var(var_b, || { + let value_b = value()?; + product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?); + product.as_mut().map(|product| product.mul_assign(&value_b)); + + Ok(value_b) + })?; + + self.backend.set_var(var_c, || { + product.ok_or(SynthesisError::AssignmentMissing) + })?; + + self.current_variable = None; + + Ok(var_b) + }, + None => { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + let var_a = Variable::A(index); + + self.backend.set_var(var_a, value)?; + + self.current_variable = Some(index); + + Ok(var_a) + } + } + } + + fn alloc_input(&mut self, value: F) -> Result + where + F: FnOnce() -> Result + { + let input_var = self.alloc(value)?; + + self.enforce_zero(LinearCombination::zero() + input_var); + self.backend.new_k_power(self.q); + + Ok(input_var) + } + + fn enforce_zero(&mut self, lc: LinearCombination) + { + self.q += 1; + let y = self.backend.new_linear_constraint(); + + for (var, coeff) in lc.as_ref() { + self.backend.insert_coefficient(*var, *coeff, &y); + } + } + + fn multiply(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError> + where + F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError> + { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + let a = Variable::A(index); + let b = Variable::B(index); + let c = Variable::C(index); + + let mut b_val = None; + let mut c_val = None; + + self.backend.set_var(a, || { + let (a, b, c) = values()?; + + b_val = Some(b); + c_val = Some(c); + + Ok(a) + })?; + + self.backend.set_var(b, || { + b_val.ok_or(SynthesisError::AssignmentMissing) + })?; + + self.backend.set_var(c, || { + c_val.ok_or(SynthesisError::AssignmentMissing) + })?; + + Ok((a, b, c)) + } + + fn get_value(&self, var: Variable) -> Result { + self.backend.get_var(var).ok_or(()) + } +} + +impl>Synthesizer { + pub fn new(backend: B) -> Self { + Self { + backend: backend, + current_variable: None, + _marker: PhantomData, + q: 0, + n: 0, + } + } +} + +pub struct PermutationSynthesizer> { + backend: B, + current_variable: Option, + _marker: PhantomData, + q: usize, + n: usize, + + // These vectors will hold, for all of the wires, the terms related to these + // wires for each of the M permutation polynomials. The Coeff is the + // coefficient, and the usize is q, the index of the linear constraint and is + // related to the power of Y in the s_1(X, Y) polynomial. + pub a: Vec<[Option<(Coeff, usize)>; M]>, + pub b: Vec<[Option<(Coeff, usize)>; M]>, + pub c: Vec<[Option<(Coeff, usize)>; M]>, +} + +impl> ConstraintSystem for PermutationSynthesizer { + const ONE: Variable = Variable::A(1); + + fn alloc(&mut self, value: F) -> Result + where + F: FnOnce() -> Result + { + match self.current_variable.take() { + Some(index) => { + let var_a = Variable::A(index); + let var_b = Variable::B(index); + let var_c = Variable::C(index); + + let mut product = None; + + let value_a = self.backend.get_var(var_a); + + self.backend.set_var(var_b, || { + let value_b = value()?; + product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?); + product.as_mut().map(|product| product.mul_assign(&value_b)); + + Ok(value_b) + })?; + + self.backend.set_var(var_c, || { + product.ok_or(SynthesisError::AssignmentMissing) + })?; + + self.current_variable = None; + + Ok(var_b) + }, + None => { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + // Create slots for the new wires. + self.a.push([None; M]); + self.b.push([None; M]); + self.c.push([None; M]); + + let var_a = Variable::A(index); + + self.backend.set_var(var_a, value)?; + + self.current_variable = Some(index); + + Ok(var_a) + } + } + } + + fn alloc_input(&mut self, value: F) -> Result + where + F: FnOnce() -> Result + { + let input_var = self.alloc(value)?; + + self.enforce_zero(LinearCombination::zero() + input_var); + // The new variable has all free slots, so this shouldn't create + // more than one linear combination. + self.backend.new_k_power(self.q); + + Ok(input_var) + } + + fn enforce_zero(&mut self, lc: LinearCombination) + { + // We just redirect things into the (recursing) enforce_equals method which + // does the actual work. Annoyingly, we need to use dynamic dispatch on the + // underlying iterator because once you've taken a Peekable you can't get + // the underlying iterator (since .next() may have been called on it) so + // at each depth of recursion we'd end up with a new type, which is + // impossible for the compiler to reason about. + let lc = lc.as_ref(); + let lc: &mut Iterator)> = &mut lc.into_iter(); + let lc = lc.peekable(); + + self.enforce_equals(lc, None); + } + + fn multiply(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError> + where + F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError> + { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + // Create slots for the new wires. + self.a.push([None; M]); + self.b.push([None; M]); + self.c.push([None; M]); + + let a = Variable::A(index); + let b = Variable::B(index); + let c = Variable::C(index); + + let mut b_val = None; + let mut c_val = None; + + self.backend.set_var(a, || { + let (a, b, c) = values()?; + + b_val = Some(b); + c_val = Some(c); + + Ok(a) + })?; + + self.backend.set_var(b, || { + b_val.ok_or(SynthesisError::AssignmentMissing) + })?; + + self.backend.set_var(c, || { + c_val.ok_or(SynthesisError::AssignmentMissing) + })?; + + Ok((a, b, c)) + } + + fn get_value(&self, var: Variable) -> Result { + self.backend.get_var(var).ok_or(()) + } +} + +impl> PermutationSynthesizer { + pub fn new(backend: B) -> Self { + Self { + backend: backend, + current_variable: None, + _marker: PhantomData, + q: 0, + n: 0, + + a: vec![], + b: vec![], + c: vec![], + } + } + + // Enforces that the value of `lhs` equals the value + // of `rhs`, returning the value of the left hand side + // as determined by the assignment. If rhs is none, it + // is interpreted to be zero. + fn enforce_equals<'a>( + &mut self, + mut lhs: Peekable<&mut Iterator)>>, + rhs: Option + ) -> Option + { + // First, let's create a new linear constraint. We'll save its y value + // for the backend and q as well. + self.q += 1; + let q = self.q; + let y = self.backend.new_linear_constraint(); + let mut slots_available = [true; M]; + let mut num_slots_available = M; + + // If the caller is enforce_equals we need to return the value of the lhs + // so that rhs can be assigned properly, so we keep track of it here. + let mut current_value = if rhs.is_some() { Some(E::Fr::zero()) } else { None }; + + // If rhs is Some, then we _need_ to involve it in this + // linear constraint, so let's just handle it right away. (This also + // helps avoid an infinite recursion issue described later.) + if let Some(rhs) = rhs { + self.emplace_variable(&mut slots_available, &y, rhs, Coeff::NegativeOne, q); + num_slots_available -= 1; + } + + // Iterate through the linear combination + loop { + if let Some(term) = lhs.next() { + assert!(num_slots_available > 0); + + if num_slots_available == 1 && lhs.peek().is_some() { + // We'll be out of slots if we add this variable to the linear + // combination; instead, create an ephemeral variable to hold + // the value of the remaining terms and use that. Temporarily, + // give the variable "zero" value. + let ephemeral = self.alloc(|| Ok(E::Fr::zero())).expect("assignment is provided so this should not fail"); + + // One of the annoying "tricks" we have to embrace is that the ephemeral + // variable has all of its slots available, and so because it's the rhs + // when we recursively call `enforce_equals` we know that it will not trigger + // a condition in `emplace_variable` that results in the variable being + // duplicated; otherwise, the duplicate variable will have a value of zero + // and we'd have to somehow track all of the duplicates when we later assign. + let mut iter = Some(term).into_iter().chain(lhs); + let iter: &mut Iterator)> = &mut iter; + let value = self.enforce_equals(iter.peekable(), Some(ephemeral)); + + // Set the correct ephemeral value right away + self.backend.set_var(ephemeral, || { + value.ok_or(SynthesisError::AssignmentMissing) + }).expect("assignment is provided so this should not fail"); + + // Fix the underlying assignment -- the c-wire value will change if the ephemeral + // value was a b-wire. + self.fix_variable_assignment(ephemeral); + + // Now we emplace the variable into the linear combination. + self.emplace_variable(&mut slots_available, &y, ephemeral, Coeff::One, q); + num_slots_available -= 1; + + match (&mut current_value, &value) { + (Some(ref mut current_value), Some(ref value)) => { + current_value.add_assign(&value); + }, + _ => { + current_value = None; + } + } + + assert!(num_slots_available == 0); + + // We're done, so return. + return current_value; + } else { + self.emplace_variable(&mut slots_available, &y, term.0, term.1, q); + num_slots_available -= 1; + + match (&mut current_value, self.backend.get_var(term.0)) { + (Some(ref mut current_value), Some(mut value)) => { + term.1.multiply(&mut value); + current_value.add_assign(&value); + }, + _ => { + current_value = None; + } + } + } + } else { + // We're done, so return. + return current_value; + } + } + } + + // This takes a variable and coefficient and places it into a linear combination, + // given a set of slots that are available, and updates the slot availability to + // reflect which slot was chosen. + fn emplace_variable(&mut self, slots_available: &mut [bool; M], y: &B::LinearConstraintIndex, var: Variable, coeff: Coeff, q: usize) + { + // Get the slots for this wire. + let wire_slots = self.get_wire_slots(var); + + // Let's handle the simple case where the linear combination and the + // variable have a slot that coincides. + let mut available_i = None; + for i in 0..M { + if slots_available[i] { + available_i = Some(i); + + if wire_slots[i] { + self.emplace_slot(var, i, coeff, y, q); + slots_available[i] = false; + return; + } + } + } + + let available_i = available_i.expect("there is always at least one slot open"); + + // available_i corresponds to a slot that is available in the linear + // combination; clearly, it is not available for the wire. In order + // to rectify this, we will create a new wire with the same value. + let ephemeral_value = self.backend.get_var(var); + let ephemeral = self.alloc(|| { + ephemeral_value.ok_or(SynthesisError::AssignmentMissing) + }).expect("assignment is provided so this should not fail"); + + // Now, we'll emplace the slot for _this_ variable. + self.emplace_slot(ephemeral, available_i, coeff, y, q); + slots_available[available_i] = false; + + // Next, we'll free up a slot in the original wire + let free_i = (available_i + 1) % M; + + // by moving the term to the ephemeral wire. + self.move_slot(free_i, var, ephemeral); + + // The original wire has slot free_i available now, and + // the new wire has only available_i and (available_i + 1) % M + // occupied. As long as M>=3, this means available_i + 2 % M + // is a free wire for the ephemeral and it is distinct from + // free_i! So, we can relate the ephemeral variable to the + // original. + let iter = [(var, Coeff::One), (ephemeral, Coeff::NegativeOne)]; + let mut iter = iter.into_iter(); + let iter: &mut Iterator)> = &mut iter; + self.enforce_equals(iter.peekable(), None); + } + + // Move slot value from wire to another + fn move_slot(&mut self, slot: usize, from: Variable, to: Variable) { + let slot_val; + { + let from_vals = match from { + Variable::A(index) => &mut self.a[index - 1], + Variable::B(index) => &mut self.b[index - 1], + Variable::C(index) => &mut self.c[index - 1], + }; + + if from_vals[slot].is_none() { + // In this case, we do nothing. + return; + } + + slot_val = from_vals[slot].unwrap(); + from_vals[slot] = None; + } + + // We need the backend to compute the cached y^q value for us, + // if it needs it. + let y = self.backend.get_for_q(slot_val.1); + + self.backend.insert_coefficient(from, -slot_val.0, &y); // Negate coefficient to undo + + { + let to_vals = match to { + Variable::A(index) => &mut self.a[index - 1], + Variable::B(index) => &mut self.b[index - 1], + Variable::C(index) => &mut self.c[index - 1], + }; + + to_vals[slot] = Some(slot_val); + self.backend.insert_coefficient(to, slot_val.0, &y); + } + } + + // Place a coefficient in a slot + fn emplace_slot(&mut self, var: Variable, slot_index: usize, coeff: Coeff, y: &B::LinearConstraintIndex, q: usize) + { + let vals = match var { + Variable::A(index) => &mut self.a[index - 1], + Variable::B(index) => &mut self.b[index - 1], + Variable::C(index) => &mut self.c[index - 1], + }; + + vals[slot_index] = Some((coeff, q)); + + self.backend.insert_coefficient(var, coeff, &y); + } + + // Get available slots for a wire + fn get_wire_slots(&self, var: Variable) -> [bool; M] { + let vals = match var { + Variable::A(index) => &self.a[index - 1], + Variable::B(index) => &self.b[index - 1], + Variable::C(index) => &self.c[index - 1], + }; + + let mut slots = [true; M]; + for i in 0..M { + if vals[i].is_some() { + slots[i] = false; + } + } + + slots + } + + // If a variable changes value, we probably need to adjust. + fn fix_variable_assignment(&mut self, var: Variable) { + let index = var.get_index(); + + let a_value = self.backend.get_var(Variable::A(index)); + let b_value = self.backend.get_var(Variable::B(index)); + + let c_value = match (a_value, b_value) { + (Some(mut a), Some(b)) => { + a.mul_assign(&b); + Some(a) + }, + _ => { None } + }; + + self.backend.set_var(Variable::C(index), || { + c_value.ok_or(SynthesisError::AssignmentMissing) + }).expect("assignment exists if the closure is called"); + } +} \ No newline at end of file diff --git a/bellman/src/sonic/sonic/mod.rs b/bellman/src/sonic/sonic/mod.rs new file mode 100644 index 0000000..00f32c4 --- /dev/null +++ b/bellman/src/sonic/sonic/mod.rs @@ -0,0 +1,11 @@ +mod adaptor; +mod synthesis_drivers; +mod backends; +mod constraint_systems; + +pub use self::adaptor::{Adaptor, AdaptorCircuit}; +pub use self::synthesis_drivers::{Basic, Nonassigning, Permutation3}; +pub use self::backends::{CountNandQ, CountN, Preprocess, Wires}; +pub use self::constraint_systems::{NonassigningSynthesizer, Synthesizer, PermutationSynthesizer}; + +pub const M: usize = 3; \ No newline at end of file diff --git a/bellman/src/sonic/sonic/synthesis_drivers.rs b/bellman/src/sonic/sonic/synthesis_drivers.rs new file mode 100644 index 0000000..1ca7e5c --- /dev/null +++ b/bellman/src/sonic/sonic/synthesis_drivers.rs @@ -0,0 +1,126 @@ +use std::marker::PhantomData; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::pairing::{Engine}; +use crate::sonic::cs::{Variable, Circuit, ConstraintSystem, LinearCombination}; +use crate::SynthesisError; + +use crate::pairing::ff::{Field}; + +use super::constraint_systems::{NonassigningSynthesizer, Synthesizer, PermutationSynthesizer}; + +pub struct Basic; + +impl SynthesisDriver for Basic { + fn synthesize, B: Backend>(backend: B, circuit: &C) -> Result<(), SynthesisError> { + let mut tmp: Synthesizer = Synthesizer::new(backend); + + let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues"); + + match (one, as ConstraintSystem>::ONE) { + (Variable::A(1), Variable::A(1)) => {}, + _ => panic!("one variable is incorrect") + } + + circuit.synthesize(&mut tmp)?; + + Ok(()) + } +} + +pub struct Nonassigning; + +impl SynthesisDriver for Nonassigning { + fn synthesize, B: Backend>(backend: B, circuit: &C) -> Result<(), SynthesisError> { + let mut tmp: NonassigningSynthesizer = NonassigningSynthesizer::new(backend); + + let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues"); + + match (one, as ConstraintSystem>::ONE) { + (Variable::A(1), Variable::A(1)) => {}, + _ => panic!("one variable is incorrect") + } + + circuit.synthesize(&mut tmp)?; + + Ok(()) + } +} + +/* + +In order to use the fully succinct version of Sonic, the resulting s(X, Y) polynomial +must be in a more "trivial" form + +s(X, Y) = X^{-N - 1} Y^N s_1(X, Y) - X^N s_2(X, Y) + +where + +s_1(X, Y) = \sum\limits_{i=1}^N u'_i(Y) X^{-i + N + 1} + + \sum\limits_{i=1}^N v'_i(Y) X^{i + N + 1} + + \sum\limits_{i=1}^N w'_i(Y) X^{i + 2N + 1} +s_2(X, Y) = \sum\limits_{i=1}^N (Y^i + Y^{-i}) X^i + +u'_i(Y) = \sum\limits_{q=1}^Q Y^q u_{q,i} +v'_i(Y) = \sum\limits_{q=1}^Q Y^q v_{q,i} +w'_i(Y) = \sum\limits_{q=1}^Q Y^q w_{q,i} + +such that s_1(X, Y) can be expressed as the sum of M permutation polynomials. + +It is trivial for the verifier to evaluate s_2(X, Y), since polynomials of the form +x + x^2 + x^3 + ... can be evaluated with a logarithmic number of field operations. + +In order to get s_1(X, Y) into the form needed, each constituent permutation polynomial +is effectively of the form + +s_j(X, Y) = \sum\limits_{i=1}^{3N+1} c_i X^i Y^\sigma_j(i) + +where \sigma_j(i) defines the permutation. The X^i corresponds to the wire, and the +Y^\sigma_j(i) corresponds to the index of the linear constraint. + +This effectively means that within each polynomial there can be only one particular +X^i term, and so wires can only appear in M different linear combinations. Further, +because there is only ever a particular Y^i term in each M permutation polynomial, +linear combinations can have only M wires. + +In order to synthesize a constraint system into a form that supports this wonky +arrangement, we need M>=3. The general goal is to treat each permutation polynomial +as a "slot" and, when constructing linear constraints, keep track of which slots are +"occupied" by wires, either with respect to the wires themselves or with respect to +the linear combination as it is being assembled. + +If the linear combination has more than M terms, then we need to recursively +construct ephemeral wires to hold the values of the remaining terms, and relate those +wires to those terms in new linear combinations. + +Once our linear combinations are small enough to fit the terms into the M slots, +we eagerly shove the terms in. The easy case is when a slot is available for both +the wire and the linear combination. The remaining cases can be addressed generally +by imagining that the wire has no available slots. We will create a new ephemeral +wire that holds the same value as the original wire and use this wire to insert the +linear combination. Then, we'll swap one of the terms from another slot into the new +ephemeral wire, freeing a slot in the original wire. Then, we trivially have that the +new wire and old wire have distinct slots free (since M>=3) and so we can now force +that they become equal. + +In terms of actually implementing this, things can get tricky. We don't want to end +up in a circumstance where we are infinitely recursing, which can happen depending on +the order we create linear combinations for the ephemeral variables. +*/ +pub struct Permutation3; + +impl SynthesisDriver for Permutation3 { + fn synthesize, B: Backend>(backend: B, circuit: &C) -> Result<(), SynthesisError> { + let mut tmp: PermutationSynthesizer = PermutationSynthesizer::new(backend); + + let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues"); + + match (one, as ConstraintSystem>::ONE) { + (Variable::A(1), Variable::A(1)) => {}, + _ => panic!("one variable is incorrect") + } + + circuit.synthesize(&mut tmp)?; + + Ok(()) + } +} diff --git a/bellman/src/sonic/srs/mod.rs b/bellman/src/sonic/srs/mod.rs new file mode 100644 index 0000000..db33799 --- /dev/null +++ b/bellman/src/sonic/srs/mod.rs @@ -0,0 +1,2 @@ +mod srs; +pub use self::srs::SRS; \ No newline at end of file diff --git a/bellman/src/sonic/srs/srs.rs b/bellman/src/sonic/srs/srs.rs new file mode 100644 index 0000000..16c8887 --- /dev/null +++ b/bellman/src/sonic/srs/srs.rs @@ -0,0 +1,274 @@ +use crate::pairing::ff::{Field, PrimeField}; +use crate::pairing::{CurveAffine, CurveProjective, Engine, Wnaf}; + +use std::io::{self, Read, Write}; +use std::sync::Arc; +use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; + +#[derive(Clone, Eq)] +pub struct SRS { + pub d: usize, + + // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}} + pub g_negative_x: Vec, + + // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}} + pub g_positive_x: Vec, + + // g^{x^0}, g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}} + pub h_negative_x: Vec, + + // g^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}} + pub h_positive_x: Vec, + + // alpha*(g^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}) + pub g_negative_x_alpha: Vec, + + // alpha*(g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}) + pub g_positive_x_alpha: Vec, + + // alpha*(h^{x^0}, h^{x^{-1}}, g^{x^{-2}}, ..., g^{x^{-d}}) + pub h_negative_x_alpha: Vec, + + // alpha*(h^{x^0}, g^{x^{1}}, g^{x^{2}}, ..., g^{x^{d}}) + pub h_positive_x_alpha: Vec, +} + +impl PartialEq for SRS { + fn eq(&self, other: &SRS) -> bool { + self.d == other.d && + self.g_negative_x == other.g_negative_x && + self.g_positive_x == other.g_positive_x && + self.h_negative_x == other.h_negative_x && + self.h_positive_x == other.h_positive_x && + self.g_negative_x_alpha == other.g_negative_x_alpha && + self.g_positive_x_alpha == other.g_positive_x_alpha && + self.h_negative_x_alpha == other.h_negative_x_alpha && + self.h_positive_x_alpha == other.h_positive_x_alpha + } +} + +impl SRS { + pub fn dummy(d: usize, _: E::Fr, _: E::Fr) -> Self { + SRS { + d: d, + g_negative_x: vec![E::G1Affine::one(); d + 1], + g_positive_x: vec![E::G1Affine::one(); d + 1], + + h_negative_x: vec![E::G2Affine::one(); d + 1], + h_positive_x: vec![E::G2Affine::one(); d + 1], + + g_negative_x_alpha: vec![E::G1Affine::one(); d], + g_positive_x_alpha: vec![E::G1Affine::one(); d], + + h_negative_x_alpha: vec![E::G2Affine::one(); d + 1], + h_positive_x_alpha: vec![E::G2Affine::one(); d + 1], + } + } + + pub fn new(d: usize, x: E::Fr, alpha: E::Fr) -> Self { + let mut g1 = Wnaf::new(); + let mut g1 = g1.base(E::G1::one(), d * 4); + let mut g2 = Wnaf::new(); + let mut g2 = g2.base(E::G2::one(), d * 4); + + fn table( + mut cur: C::Scalar, + step: C::Scalar, + num: usize, + table: &mut Wnaf>, + ) -> Vec { + let mut v = vec![]; + for _ in 0..num { + v.push(table.scalar(cur.into_repr())); + cur.mul_assign(&step); + } + C::Projective::batch_normalization(&mut v); + let v = v.into_iter().map(|e| e.into_affine()).collect(); + v + } + + let x_inv = x.inverse().unwrap(); + + let mut x_alpha = x; + x_alpha.mul_assign(&alpha); + + let mut inv_x_alpha = x_inv; + inv_x_alpha.mul_assign(&alpha); + + SRS { + d: d, + g_negative_x: table(E::Fr::one(), x_inv, d + 1, &mut g1), + g_positive_x: table(E::Fr::one(), x, d + 1, &mut g1), + + h_negative_x: table(E::Fr::one(), x_inv, d + 1, &mut g2), + h_positive_x: table(E::Fr::one(), x, d + 1, &mut g2), + + g_negative_x_alpha: table(inv_x_alpha, x_inv, d, &mut g1), + g_positive_x_alpha: table(x_alpha, x, d, &mut g1), + + h_negative_x_alpha: table(alpha, x_inv, d + 1, &mut g2), + h_positive_x_alpha: table(alpha, x, d + 1, &mut g2), + } + } +} + +impl SRS { + pub fn write( + &self, + mut writer: W + ) -> io::Result<()> + { + assert_eq!(self.d + 1, self.g_negative_x.len()); + assert_eq!(self.d + 1, self.g_positive_x.len()); + + assert_eq!(self.d + 1, self.h_negative_x.len()); + assert_eq!(self.d + 1, self.h_positive_x.len()); + + assert_eq!(self.d, self.g_negative_x_alpha.len()); + assert_eq!(self.d, self.g_positive_x_alpha.len()); + + assert_eq!(self.d + 1, self.h_negative_x_alpha.len()); + assert_eq!(self.d + 1, self.h_positive_x_alpha.len()); + + writer.write_u32::(self.d as u32)?; + + for g in &self.g_negative_x[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + for g in &self.g_positive_x[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + for g in &self.h_negative_x[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + for g in &self.h_positive_x[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + for g in &self.g_negative_x_alpha[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + for g in &self.g_positive_x_alpha[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + for g in &self.h_negative_x_alpha[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + for g in &self.h_positive_x_alpha[..] { + writer.write_all(g.into_uncompressed().as_ref())?; + } + + Ok(()) + } + + pub fn read( + mut reader: R, + checked: bool + ) -> io::Result + { + use crate::pairing::EncodedPoint; + + let read_g1 = |reader: &mut R| -> io::Result { + let mut repr = ::Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + if checked { + repr + .into_affine() + } else { + repr + .into_affine_unchecked() + } + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let read_g2 = |reader: &mut R| -> io::Result { + let mut repr = ::Uncompressed::empty(); + reader.read_exact(repr.as_mut())?; + + if checked { + repr + .into_affine() + } else { + repr + .into_affine_unchecked() + } + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + .and_then(|e| if e.is_zero() { + Err(io::Error::new(io::ErrorKind::InvalidData, "point at infinity")) + } else { + Ok(e) + }) + }; + + let mut g_negative_x = vec![]; + let mut g_positive_x = vec![]; + + let mut h_negative_x = vec![]; + let mut h_positive_x = vec![]; + + let mut g_negative_x_alpha = vec![]; + let mut g_positive_x_alpha = vec![]; + + let mut h_negative_x_alpha = vec![]; + let mut h_positive_x_alpha = vec![]; + + let d = reader.read_u32::()? as usize; + + { + for _ in 0..(d+1) { + g_negative_x.push(read_g1(&mut reader)?); + } + for _ in 0..(d+1) { + g_positive_x.push(read_g1(&mut reader)?); + } + } + + { + for _ in 0..(d+1) { + h_negative_x.push(read_g2(&mut reader)?); + } + for _ in 0..(d+1) { + h_positive_x.push(read_g2(&mut reader)?); + } + } + + { + for _ in 0..d { + g_negative_x_alpha.push(read_g1(&mut reader)?); + } + for _ in 0..d { + g_positive_x_alpha.push(read_g1(&mut reader)?); + } + } + + { + for _ in 0..(d+1) { + h_negative_x_alpha.push(read_g2(&mut reader)?); + } + for _ in 0..(d+1) { + h_positive_x_alpha.push(read_g2(&mut reader)?); + } + } + + Ok(Self { + d: d, + g_negative_x: g_negative_x, + g_positive_x: g_positive_x, + h_negative_x: h_negative_x, + h_positive_x: h_positive_x, + g_negative_x_alpha: g_negative_x_alpha, + g_positive_x_alpha: g_positive_x_alpha, + h_negative_x_alpha: h_negative_x_alpha, + h_positive_x_alpha: h_positive_x_alpha + }) + } +} \ No newline at end of file diff --git a/bellman/src/sonic/tests/mod.rs b/bellman/src/sonic/tests/mod.rs new file mode 100644 index 0000000..075a109 --- /dev/null +++ b/bellman/src/sonic/tests/mod.rs @@ -0,0 +1 @@ +mod sonics; \ No newline at end of file diff --git a/bellman/src/sonic/tests/sonics.rs b/bellman/src/sonic/tests/sonics.rs new file mode 100644 index 0000000..3c406df --- /dev/null +++ b/bellman/src/sonic/tests/sonics.rs @@ -0,0 +1,871 @@ +extern crate rand; + +// For randomness (during paramgen and proof generation) +use rand::{thread_rng, Rng}; + +// For benchmarking +use std::time::{Duration, Instant}; + +// Bring in some tools for using pairing-friendly curves +use crate::pairing::{ + Engine +}; + +use crate::pairing::ff::{ + Field, +}; + +// We're going to use the BLS12-381 pairing-friendly elliptic curve. +use crate::pairing::bls12_381::{ + Bls12 +}; + +use crate::pairing::bn256::{ + Bn256 +}; + +// We'll use these interfaces to construct our circuit. +use crate::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +// const MIMC_ROUNDS: usize = 322; + +const MIMC_ROUNDS: usize = 1000000; + +fn mimc( + mut xl: E::Fr, + mut xr: E::Fr, + constants: &[E::Fr] +) -> E::Fr +{ + assert_eq!(constants.len(), MIMC_ROUNDS); + + for i in 0..MIMC_ROUNDS { + let mut tmp1 = xl; + tmp1.add_assign(&constants[i]); + let mut tmp2 = tmp1; + tmp2.square(); + tmp2.mul_assign(&tmp1); + tmp2.add_assign(&xr); + xr = xl; + xl = tmp2; + } + + xl +} + +/// This is our demo circuit for proving knowledge of the +/// preimage of a MiMC hash invocation. +#[derive(Clone)] +struct MiMCDemo<'a, E: Engine> { + xl: Option, + xr: Option, + constants: &'a [E::Fr] +} + +/// Our demo circuit implements this `Circuit` trait which +/// is used during paramgen and proving in order to +/// synthesize the constraint system. +impl<'a, E: Engine> Circuit for MiMCDemo<'a, E> { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + assert_eq!(self.constants.len(), MIMC_ROUNDS); + + // Allocate the first component of the preimage. + let mut xl_value = self.xl; + let mut xl = cs.alloc(|| "preimage xl", || { + xl_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + // Allocate the second component of the preimage. + let mut xr_value = self.xr; + let mut xr = cs.alloc(|| "preimage xr", || { + xr_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + for i in 0..MIMC_ROUNDS { + // xL, xR := xR + (xL + Ci)^3, xL + let cs = &mut cs.namespace(|| format!("round {}", i)); + + // tmp = (xL + Ci)^2 + let tmp_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.square(); + e + }); + let tmp = cs.alloc(|| "tmp", || { + tmp_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + cs.enforce( + || "tmp = (xL + Ci)^2", + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + tmp + ); + + // new_xL = xR + (xL + Ci)^3 + // new_xL = xR + tmp * (xL + Ci) + // new_xL - xR = tmp * (xL + Ci) + let new_xl_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.mul_assign(&tmp_value.unwrap()); + e.add_assign(&xr_value.unwrap()); + e + }); + + let new_xl = if i == (MIMC_ROUNDS-1) { + // This is the last round, xL is our image and so + // we allocate a public input. + cs.alloc_input(|| "image", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + } else { + cs.alloc(|| "new_xl", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + }; + + cs.enforce( + || "new_xL = xR + (xL + Ci)^3", + |lc| lc + tmp, + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + new_xl - xr + ); + + // xR = xL + xr = xl; + xr_value = xl_value; + + // xL = new_xL + xl = new_xl; + xl_value = new_xl_value; + } + + Ok(()) + } +} + +/// This is our demo circuit for proving knowledge of the +/// preimage of a MiMC hash invocation. +#[derive(Clone)] +struct MiMCDemoNoInputs<'a, E: Engine> { + xl: Option, + xr: Option, + image: Option, + constants: &'a [E::Fr] +} + +/// Our demo circuit implements this `Circuit` trait which +/// is used during paramgen and proving in order to +/// synthesize the constraint system. +impl<'a, E: Engine> Circuit for MiMCDemoNoInputs<'a, E> { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + assert_eq!(self.constants.len(), MIMC_ROUNDS); + + // Allocate the first component of the preimage. + let mut xl_value = self.xl; + let mut xl = cs.alloc(|| "preimage xl", || { + xl_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + // Allocate the second component of the preimage. + let mut xr_value = self.xr; + let mut xr = cs.alloc(|| "preimage xr", || { + xr_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + for i in 0..MIMC_ROUNDS { + // xL, xR := xR + (xL + Ci)^3, xL + let cs = &mut cs.namespace(|| format!("round {}", i)); + + // tmp = (xL + Ci)^2 + let tmp_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.square(); + e + }); + let tmp = cs.alloc(|| "tmp", || { + tmp_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + cs.enforce( + || "tmp = (xL + Ci)^2", + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + tmp + ); + + // new_xL = xR + (xL + Ci)^3 + // new_xL = xR + tmp * (xL + Ci) + // new_xL - xR = tmp * (xL + Ci) + let new_xl_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.mul_assign(&tmp_value.unwrap()); + e.add_assign(&xr_value.unwrap()); + e + }); + + let new_xl = if i == (MIMC_ROUNDS-1) { + // This is the last round, xL is our image and so + // we use the image + let image_value = self.image; + cs.alloc(|| "image", || { + image_value.ok_or(SynthesisError::AssignmentMissing) + })? + } else { + cs.alloc(|| "new_xl", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + }; + + cs.enforce( + || "new_xL = xR + (xL + Ci)^3", + |lc| lc + tmp, + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + new_xl - xr + ); + + // xR = xL + xr = xl; + xr_value = xl_value; + + // xL = new_xL + xl = new_xl; + xl_value = new_xl_value; + } + + Ok(()) + } +} + +#[test] +fn test_sonic_mimc() { + use crate::pairing::ff::{Field, PrimeField}; + use crate::pairing::{Engine, CurveAffine, CurveProjective}; + use crate::pairing::bls12_381::{Bls12, Fr}; + use std::time::{Instant}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + println!("making srs"); + let start = Instant::now(); + let srs = SRS::::dummy(830564, srs_x, srs_alpha); + println!("done in {:?}", start.elapsed()); + + { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + let samples: usize = 100; + + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + // Create an instance of our circuit (with the + // witness) + let circuit = MiMCDemoNoInputs { + xl: Some(xl), + xr: Some(xr), + image: Some(image), + constants: &constants + }; + + use crate::sonic::sonic::Basic; + use crate::sonic::sonic::AdaptorCircuit; + use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs}; + use crate::sonic::helped::{MultiVerifier, get_circuit_parameters}; + use crate::sonic::helped::helper::{create_aggregate_on_srs}; + + println!("creating proof"); + let start = Instant::now(); + let proof = create_proof_on_srs::(&AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating advice"); + let start = Instant::now(); + let advice = create_advice_on_srs::(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating aggregate for {} proofs", samples); + let start = Instant::now(); + let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect(); + let aggregate = create_aggregate_on_srs::(&AdaptorCircuit(circuit.clone()), &proofs, &srs); + println!("done in {:?}", start.elapsed()); + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying 1 proof without advice"); + let start = Instant::now(); + { + for _ in 0..1 { + verifier.add_proof(&proof, &[], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying {} proofs without advice", samples); + let start = Instant::now(); + { + for _ in 0..samples { + verifier.add_proof(&proof, &[], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying 100 proofs with advice"); + let start = Instant::now(); + { + for (ref proof, ref advice) in &proofs { + verifier.add_proof_with_advice(proof, &[], advice); + } + verifier.add_aggregate(&proofs, &aggregate); + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + } +} + + +#[test] +fn test_sonic_mimc_in_permutation_driver() { + use crate::pairing::ff::{Field, PrimeField}; + use crate::pairing::{Engine, CurveAffine, CurveProjective}; + use crate::pairing::bls12_381::{Bls12, Fr}; + use std::time::{Instant}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + println!("making srs"); + let start = Instant::now(); + let srs = SRS::::dummy(830564, srs_x, srs_alpha); + println!("done in {:?}", start.elapsed()); + + { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + let samples: usize = 100; + + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + // Create an instance of our circuit (with the + // witness) + let circuit = MiMCDemoNoInputs { + xl: Some(xl), + xr: Some(xr), + image: Some(image), + constants: &constants + }; + + use crate::sonic::sonic::Basic; + use crate::sonic::sonic::AdaptorCircuit; + use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs}; + use crate::sonic::helped::{MultiVerifier, get_circuit_parameters}; + use crate::sonic::helped::helper::{create_aggregate_on_srs}; + use crate::sonic::sonic::Permutation3; + + println!("creating proof"); + let start = Instant::now(); + let proof = create_proof_on_srs::(&AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating advice"); + let start = Instant::now(); + let advice = create_advice_on_srs::(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating aggregate for {} proofs", samples); + let start = Instant::now(); + let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect(); + let aggregate = create_aggregate_on_srs::(&AdaptorCircuit(circuit.clone()), &proofs, &srs); + println!("done in {:?}", start.elapsed()); + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying 1 proof without advice"); + let start = Instant::now(); + { + for _ in 0..1 { + verifier.add_proof(&proof, &[], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying {} proofs without advice", samples); + let start = Instant::now(); + { + for _ in 0..samples { + verifier.add_proof(&proof, &[], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying 100 proofs with advice"); + let start = Instant::now(); + { + for (ref proof, ref advice) in &proofs { + verifier.add_proof_with_advice(proof, &[], advice); + } + verifier.add_aggregate(&proofs, &aggregate); + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + } +} + +#[test] +fn test_succinct_sonic_mimc() { + use crate::pairing::ff::{Field, PrimeField}; + use crate::pairing::{Engine, CurveAffine, CurveProjective}; + use crate::pairing::bls12_381::{Bls12, Fr}; + use std::time::{Instant}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + println!("making srs"); + let start = Instant::now(); + // let srs = SRS::::dummy(830564, srs_x, srs_alpha); + let srs = SRS::::dummy(40000000, srs_x, srs_alpha); + println!("done in {:?}", start.elapsed()); + + { + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + + // Generate the MiMC round constants + // let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + let constants = (0..MIMC_ROUNDS).map(|_| Fr::one()).collect::>(); + let samples: usize = 100; + + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + // Create an instance of our circuit (with the + // witness) + let circuit = MiMCDemoNoInputs:: { + xl: Some(xl), + xr: Some(xr), + image: Some(image), + constants: &constants + }; + + use crate::sonic::sonic::Basic; + use crate::sonic::sonic::AdaptorCircuit; + use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs}; + use crate::sonic::helped::{get_circuit_parameters_for_succinct_sonic, MultiVerifier}; + use crate::sonic::sonic::Permutation3; + use crate::sonic::unhelped::permutation_structure::*; + use crate::sonic::unhelped::SuccinctMultiVerifier; + use crate::sonic::unhelped::{create_aggregate_on_srs}; + + use crate::sonic::cs::{Circuit, ConstraintSystem, LinearCombination, Coeff}; + + let perm_structure = create_permutation_structure::(&AdaptorCircuit(circuit.clone())); + let s1_srs = perm_structure.create_permutation_special_reference(&srs); + // let s2_srs = perm_structure.calculate_s2_commitment_value(&srs); + + let info = get_circuit_parameters_for_succinct_sonic::(circuit.clone()).expect("Must get circuit info"); + println!("{:?}", info); + + println!("creating proof"); + let start = Instant::now(); + let proof = create_proof_on_srs::(&AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating advice"); + let start = Instant::now(); + let advice = create_advice_on_srs::(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating aggregate for {} proofs", samples); + let start = Instant::now(); + let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect(); + let aggregate = create_aggregate_on_srs::(&AdaptorCircuit(circuit.clone()), &proofs, &srs, &s1_srs); + println!("done in {:?}", start.elapsed()); + + // { + // let rng = thread_rng(); + // let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + // println!("verifying 1 proof without advice"); + // let start = Instant::now(); + // { + // for _ in 0..1 { + // verifier.add_proof(&proof, &[], |_, _| None); + // } + // assert_eq!(verifier.check_all(), true); // TODO + // } + // println!("done in {:?}", start.elapsed()); + // } + + // { + // let rng = thread_rng(); + // let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + // println!("verifying {} proofs without advice", samples); + // let start = Instant::now(); + // { + // for _ in 0..samples { + // verifier.add_proof(&proof, &[], |_, _| None); + // } + // assert_eq!(verifier.check_all(), true); // TODO + // } + // println!("done in {:?}", start.elapsed()); + // } + + { + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + + let mut verifier = SuccinctMultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying 100 proofs with succinct advice"); + let start = Instant::now(); + { + for (ref proof, ref advice) in &proofs { + verifier.add_proof_with_advice(proof, &[], advice); + } + verifier.add_aggregate( + &proofs, + &aggregate, + &srs, + ); + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + } +} + +#[test] +fn test_inputs_into_sonic_mimc() { + use crate::pairing::ff::{Field, PrimeField}; + use crate::pairing::{Engine, CurveAffine, CurveProjective}; + use crate::pairing::bn256::{Bn256, Fr}; + // use crate::pairing::bls12_381::{Bls12, Fr}; + use std::time::{Instant}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + println!("making srs"); + let start = Instant::now(); + let srs = SRS::::dummy(830564, srs_x, srs_alpha); + println!("done in {:?}", start.elapsed()); + + { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + let samples: usize = 100; + + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + // Create an instance of our circuit (with the + // witness) + let circuit = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants + }; + + use crate::sonic::sonic::Basic; + use crate::sonic::sonic::AdaptorCircuit; + use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs}; + use crate::sonic::helped::{MultiVerifier, get_circuit_parameters}; + use crate::sonic::helped::helper::{create_aggregate_on_srs}; + + let info = get_circuit_parameters::(circuit.clone()).expect("Must get circuit info"); + println!("{:?}", info); + + println!("creating proof"); + let start = Instant::now(); + let proof = create_proof_on_srs::(&AdaptorCircuit(circuit.clone()), &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating advice"); + let start = Instant::now(); + let advice = create_advice_on_srs::(&AdaptorCircuit(circuit.clone()), &proof, &srs).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating aggregate for {} proofs", samples); + let start = Instant::now(); + let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect(); + let aggregate = create_aggregate_on_srs::(&AdaptorCircuit(circuit.clone()), &proofs, &srs); + println!("done in {:?}", start.elapsed()); + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying 1 proof without advice"); + let start = Instant::now(); + { + for _ in 0..1 { + verifier.add_proof(&proof, &[image], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying {} proofs without advice", samples); + let start = Instant::now(); + { + for _ in 0..samples { + verifier.add_proof(&proof, &[image], |_, _| None); + } + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + + { + let rng = thread_rng(); + let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); + println!("verifying 100 proofs with advice and aggregate"); + let start = Instant::now(); + { + for (ref proof, ref advice) in &proofs { + verifier.add_proof_with_advice(proof, &[image], advice); + } + verifier.add_aggregate(&proofs, &aggregate); + assert_eq!(verifier.check_all(), true); // TODO + } + println!("done in {:?}", start.elapsed()); + } + } +} + +#[test] +fn test_high_level_sonic_api() { + use crate::pairing::bn256::{Bn256}; + use std::time::{Instant}; + use crate::sonic::helped::{ + generate_random_parameters, + verify_aggregate, + verify_proofs, + create_proof, + create_advice, + create_aggregate, + get_circuit_parameters + }; + + { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let mut rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + let samples: usize = 100; + + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + // Create an instance of our circuit (with the + // witness) + let circuit = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants + }; + + let info = get_circuit_parameters::(circuit.clone()).expect("Must get circuit info"); + println!("{:?}", info); + + let params = generate_random_parameters(circuit.clone(), &mut rng).unwrap(); + + println!("creating proof"); + let start = Instant::now(); + let proof = create_proof(circuit.clone(), ¶ms).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating advice"); + let start = Instant::now(); + let advice = create_advice(circuit.clone(), &proof, ¶ms).unwrap(); + println!("done in {:?}", start.elapsed()); + + println!("creating aggregate for {} proofs", samples); + let proofs: Vec<_> = (0..samples).map(|_| (proof.clone(), advice.clone())).collect(); + + let start = Instant::now(); + let aggregate = create_aggregate::(circuit.clone(), &proofs, ¶ms); + println!("done in {:?}", start.elapsed()); + + { + println!("verifying 1 proof without advice"); + let rng = thread_rng(); + let start = Instant::now(); + assert_eq!(verify_proofs(&vec![proof.clone()], &vec![vec![image.clone()]], circuit.clone(), rng, ¶ms).unwrap(), true); + println!("done in {:?}", start.elapsed()); + } + + { + println!("verifying {} proofs without advice", samples); + let rng = thread_rng(); + let start = Instant::now(); + assert_eq!(verify_proofs(&vec![proof.clone(); 100], &vec![vec![image.clone()]; 100], circuit.clone(), rng, ¶ms).unwrap(), true); + println!("done in {:?}", start.elapsed()); + } + + { + println!("verifying 100 proofs with advice and aggregate"); + let rng = thread_rng(); + let start = Instant::now(); + assert_eq!(verify_aggregate(&vec![(proof.clone(), advice.clone()); 100], &aggregate, &vec![vec![image.clone()]; 100], circuit.clone(), rng, ¶ms).unwrap(), true); + println!("done in {:?}", start.elapsed()); + } + } +} + +// #[test] +// fn test_constraints_info() { +// use crate::pairing::bn256::{Bn256}; +// use std::time::{Instant}; +// use crate::sonic::unhelped::padding::{constraints_info}; +// { +// // This may not be cryptographically safe, use +// // `OsRng` (for example) in production software. +// let mut rng = &mut thread_rng(); + +// // Generate the MiMC round constants +// let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + +// let xl = rng.gen(); +// let xr = rng.gen(); +// let image = mimc::(xl, xr, &constants); + +// // Create an instance of our circuit (with the +// // witness) +// let circuit = MiMCDemo { +// xl: Some(xl), +// xr: Some(xr), +// constants: &constants +// }; + +// constraints_info::(circuit.clone()); +// } +// } + +// #[test] +// fn test_padding_using_mimc() { +// use crate::pairing::ff::{Field, PrimeField}; +// use crate::pairing::{Engine, CurveAffine, CurveProjective}; +// use crate::pairing::bls12_381::{Bls12, Fr}; +// use std::time::{Instant}; +// use crate::sonic::srs::SRS; + +// let srs_x = Fr::from_str("23923").unwrap(); +// let srs_alpha = Fr::from_str("23728792").unwrap(); +// println!("making srs"); +// let start = Instant::now(); +// let srs = SRS::::dummy(830564, srs_x, srs_alpha); +// println!("done in {:?}", start.elapsed()); + +// { +// // This may not be cryptographically safe, use +// // `OsRng` (for example) in production software. +// let rng = &mut thread_rng(); + +// // Generate the MiMC round constants +// let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); +// let samples: usize = 100; + +// let xl = rng.gen(); +// let xr = rng.gen(); +// let image = mimc::(xl, xr, &constants); + +// // Create an instance of our circuit (with the +// // witness) +// let circuit = MiMCDemoNoInputs { +// xl: Some(xl), +// xr: Some(xr), +// image: Some(image), +// constants: &constants +// }; + +// use crate::sonic::cs::Basic; +// use crate::sonic::sonic::AdaptorCircuit; +// use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs}; +// use crate::sonic::helped::{MultiVerifier, get_circuit_parameters}; +// use crate::sonic::helped::helper::{create_aggregate_on_srs}; +// use crate::sonic::unhelped::padding::Padding; + +// let info = get_circuit_parameters::(circuit.clone()).expect("Must get circuit info"); +// println!("{:?}", info); + +// println!("creating proof"); +// let start = Instant::now(); +// let proof = create_proof_on_srs::(&AdaptorCircuit(circuit.clone()), &srs).unwrap(); +// println!("done in {:?}", start.elapsed()); + +// { +// let rng = thread_rng(); +// let mut verifier = MultiVerifier::::new(AdaptorCircuit(circuit.clone()), &srs, rng).unwrap(); +// println!("K map = {:?}", verifier.get_k_map()); +// println!("verifying 1 proof without advice"); +// let start = Instant::now(); +// { +// for _ in 0..1 { +// verifier.add_proof(&proof, &[], |_, _| None); +// } +// assert_eq!(verifier.check_all(), true); // TODO +// } +// println!("done in {:?}", start.elapsed()); +// } +// } +// } \ No newline at end of file diff --git a/bellman/src/sonic/transcript/hasher.rs b/bellman/src/sonic/transcript/hasher.rs new file mode 100644 index 0000000..351a3a9 --- /dev/null +++ b/bellman/src/sonic/transcript/hasher.rs @@ -0,0 +1,74 @@ +extern crate tiny_keccak; +extern crate blake2_rfc; + +use self::tiny_keccak::Keccak; +use self::blake2_rfc::blake2s::{Blake2s, blake2s}; + +pub trait Hasher { + fn new(personalization: &[u8]) -> Self; + fn update(&mut self, data: &[u8]); + fn finalize(&mut self) -> Vec; +} + +#[derive(Clone)] +pub struct BlakeHasher { + h: Blake2s +} + +impl Hasher for BlakeHasher { + fn new(personalization: &[u8]) -> Self { + let mut h = Blake2s::new(32); + h.update(personalization); + + Self { + h: h + } + } + + fn update(&mut self, data: &[u8]) { + self.h.update(data); + } + + fn finalize(&mut self) -> Vec { + use std::mem; + + let new_h = Blake2s::new(32); + let h = std::mem::replace(&mut self.h, new_h); + + let result = h.finalize(); + + result.as_ref().to_vec().clone() + } +} + +#[derive(Clone)] +pub struct Keccak256Hasher { + h: Keccak +} + +impl Hasher for Keccak256Hasher { + fn new(personalization: &[u8]) -> Self { + let mut h = Keccak::new_keccak256(); + h.update(personalization); + + Self { + h: h + } + } + + fn update(&mut self, data: &[u8]) { + self.h.update(data); + } + + fn finalize(&mut self) -> Vec { + use std::mem; + + let new_h = Keccak::new_keccak256(); + let h = std::mem::replace(&mut self.h, new_h); + + let mut res: [u8; 32] = [0; 32]; + h.finalize(&mut res); + + res[..].to_vec() + } +} \ No newline at end of file diff --git a/bellman/src/sonic/transcript/mod.rs b/bellman/src/sonic/transcript/mod.rs new file mode 100644 index 0000000..744fdc7 --- /dev/null +++ b/bellman/src/sonic/transcript/mod.rs @@ -0,0 +1,134 @@ +use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use crate::pairing::{CurveAffine, CurveProjective, Engine}; +use std::io; + +mod hasher; + +use self::hasher::{Hasher, Keccak256Hasher, BlakeHasher}; + +#[derive(Clone)] +pub struct Transcript { + transcriptor: RollingHashTranscript +} + +impl Transcript { + pub fn new(personalization: &[u8]) -> Self { + Self { + transcriptor: RollingHashTranscript::new(personalization) + } + } +} + +impl TranscriptProtocol for Transcript { + fn commit_point(&mut self, point: &G) { + self.transcriptor.commit_point(point); + } + + fn commit_scalar(&mut self, scalar: &F) { + self.transcriptor.commit_scalar(scalar); + } + + fn get_challenge_scalar(&mut self) -> F { + self.transcriptor.get_challenge_scalar() + } +} + +use std::marker::PhantomData; + +#[derive(Clone)] +pub struct RollingHashTranscript { + buffer: Vec, + last_finalized_value: Vec, + repeated_request_nonce: u32, + _marker: PhantomData +} + +impl RollingHashTranscript { + pub fn new(personalization: &[u8]) -> Self { + let mut h = H::new(personalization); + let buffer = h.finalize(); + + Self { + buffer: buffer, + last_finalized_value: vec![], + repeated_request_nonce: 0u32, + _marker: PhantomData + } + } + + pub fn commit_bytes(&mut self, personalization: &[u8], bytes: &[u8]) { + let mut h = H::new(&[]); + h.update(&self.buffer); + h.update(personalization); + h.update(bytes); + + self.buffer = h.finalize(); + } + + pub fn get_challenge_bytes(&mut self, nonce: &[u8]) -> Vec { + let challenge_bytes = &self.buffer; + + let mut h = H::new(&[]); + h.update(challenge_bytes); + h.update(nonce); + + let challenge_bytes = h.finalize(); + + challenge_bytes + } +} + +pub trait TranscriptProtocol { + fn commit_point(&mut self, point: &G); + fn commit_scalar(&mut self, scalar: &F); + fn get_challenge_scalar(&mut self) -> F; +} + +impl TranscriptProtocol for RollingHashTranscript { + fn commit_point(&mut self, point: &G) { + self.commit_bytes(b"point", point.into_uncompressed().as_ref()); + // self.commit_bytes(b"point", point.into_compressed().as_ref()); + self.repeated_request_nonce = 0u32; + } + + fn commit_scalar(&mut self, scalar: &F) { + let mut v = vec![]; + scalar.into_repr().write_be(&mut v).unwrap(); + // scalar.into_repr().write_le(&mut v).unwrap(); + + self.commit_bytes(b"scalar", &v); + self.repeated_request_nonce = 0u32; + } + + fn get_challenge_scalar(&mut self) -> F { + use byteorder::ByteOrder; + let mut nonce = self.repeated_request_nonce; + loop { + let mut nonce_bytes = vec![0u8; 4]; + byteorder::BigEndian::write_u32(&mut nonce_bytes, nonce); + let mut repr: F::Repr = Default::default(); + let challenge_bytes = self.get_challenge_bytes(&nonce_bytes); + repr.read_be(&challenge_bytes[..]).unwrap(); + + if let Ok(result) = F::from_repr(repr) { + // println!("Got a challenge {} for nonce = {}", result, nonce); + self.repeated_request_nonce = nonce + 1u32; + return result; + } + if nonce == (0xffffffff as u32) { + panic!("can not make challenge scalar"); + } + nonce += 1; + } + } +} + +// struct TranscriptReader<'a, H:Hasher>(&'a mut Transcript); + +// impl<'a, H:Hasher> io::Read for TranscriptReader<'a, H: Hasher> { +// fn read(&mut self, buf: &mut [u8]) -> io::Result { +// self.0.challenge_bytes(b"read", buf); + +// Ok(buf.len()) +// } +// } \ No newline at end of file diff --git a/bellman/src/sonic/unhelped/aggregate.rs b/bellman/src/sonic/unhelped/aggregate.rs new file mode 100644 index 0000000..1375c4c --- /dev/null +++ b/bellman/src/sonic/unhelped/aggregate.rs @@ -0,0 +1,264 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use crate::sonic::helped::{Proof, SxyAdvice}; +use crate::sonic::helped::batch::Batch; +use crate::sonic::helped::poly::{SxEval, SyEval}; +use crate::sonic::helped::Parameters; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::cs::{Circuit, Variable, Coeff}; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::CountNandQ; +use crate::sonic::sonic::M; +use super::s2_proof::{S2Eval, S2Proof}; +use super::permutation_structure::create_permutation_structure; +use super::permutation_argument::PermutationArgument; +use super::permutation_argument::SignatureOfCorrectComputation; +use super::permutation_argument::SpecializedSRS; + +#[derive(Clone)] +pub struct SuccinctAggregate { + pub signature: SignatureOfCorrectComputation, + pub s2_proof: S2Proof, + pub c: E::G1Affine, + // We have to open each of the S commitments to a random point `z` + pub s_opening: E::G1Affine, + // We have to open C to each constituent `y` + pub c_openings: Vec<(E::G1Affine, E::Fr)>, + // Then we have to finally open C + pub opening: E::G1Affine, + + pub z: E::Fr, + pub w: E::Fr, + +} + +// pub fn create_aggregate, S: SynthesisDriver>( +// circuit: &C, +// inputs: &[(Proof, SxyAdvice)], +// params: &Parameters, +// ) -> SuccinctAggregate +// { +// let n = params.vk.n; +// let q = params.vk.q; + +// create_aggregate_on_srs_using_information::(circuit, inputs, ¶ms.srs, n, q) +// } + +pub fn create_aggregate_on_srs, S: SynthesisDriver>( + circuit: &C, + inputs: &[(Proof, SxyAdvice)], + srs: &SRS, + specialized_srs: &SpecializedSRS +) -> SuccinctAggregate +{ + // TODO: precompute this? + let (n, q) = { + let mut tmp = CountNandQ::::new(); + + S::synthesize(&mut tmp, circuit).unwrap(); // TODO + + (tmp.n, tmp.q) + }; + + create_aggregate_on_srs_using_information::(circuit, inputs, srs, specialized_srs, n, q) +} + +pub fn create_aggregate_on_srs_using_information, S: SynthesisDriver>( + circuit: &C, + inputs: &[(Proof, SxyAdvice)], + srs: &SRS, + _specialized_srs: &SpecializedSRS, + n: usize, + q: usize, +) -> SuccinctAggregate +{ + use std::time::Instant; + let start = Instant::now(); + // take few proofs that are to be evaluated at some y_i and make an aggregate from them + let mut transcript = Transcript::new(&[]); + let mut y_values: Vec = Vec::with_capacity(inputs.len()); + for &(ref proof, ref sxyadvice) in inputs { + { + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&proof.r); + y_values.push(transcript.get_challenge_scalar()); + } + + transcript.commit_point(&sxyadvice.s); + } + + let z: E::Fr = transcript.get_challenge_scalar(); + + // Compute s(z, Y) for opening of the previous commitments at the same `z` + let (s_poly_negative, s_poly_positive) = { + let mut tmp = SyEval::new(z, n, q); + S::synthesize(&mut tmp, circuit).unwrap(); // TODO + + tmp.poly() + }; + + // Compute C = g^{s(z, x)} + let c = multiexp( + srs.g_positive_x_alpha[0..(n + q)] + .iter() + .chain_ext(srs.g_negative_x_alpha[0..n].iter()), + s_poly_positive.iter().chain_ext(s_poly_negative.iter()) + ).into_affine(); + + transcript.commit_point(&c); + + // Open C at w + let w: E::Fr = transcript.get_challenge_scalar(); + + let value = compute_value::(&w, &s_poly_positive, &s_poly_negative); + + let opening = { + let mut value = value; + value.negate(); + + polynomial_commitment_opening( + n, + 0, + s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()), + w, + &srs + ) + }; + + println!("Commit and opening of for s(z, w) taken {:?}", start.elapsed()); + + // now we need signature of correct computation. For this purpose + // verifier already knows specialized SRS, so we can just commit to + // s1 and s2 parts of such signature to get `w` and later open at this point! + + // Commit! + + // TODO: Precompute! + // this will internally synthesize a circuit and structure of permutations + + let start = Instant::now(); + + let s2_eval = S2Eval::new(n); + let s2_proof = s2_eval.evaluate(z, w, &srs); + + println!("S2 proof taken {:?}", start.elapsed()); + let start = Instant::now(); + + let permutation_structure = create_permutation_structure(circuit); + let (non_permuted_coeffs, permutations) = permutation_structure.create_permutation_vectors(); + + println!("Permutation vectors synthesis taken {:?}", start.elapsed()); + let start = Instant::now(); + + let signature = PermutationArgument::make_signature( + non_permuted_coeffs, + permutations, + w, + z, + &srs, + ); + + println!("Succinct signature for s(z, Y) taken {:?}", start.elapsed()); + + // Let's open up C to every y. + fn compute_value(y: &E::Fr, poly_positive: &[E::Fr], poly_negative: &[E::Fr]) -> E::Fr { + let mut value = E::Fr::zero(); + let yinv = y.inverse().unwrap(); // TODO + + let positive_powers_contrib = evaluate_at_consequitive_powers(poly_positive, *y, *y); + let negative_powers_contrib = evaluate_at_consequitive_powers(poly_negative, yinv, yinv); + value.add_assign(&positive_powers_contrib); + value.add_assign(&negative_powers_contrib); + + value + } + + let start = Instant::now(); + + // we still need to re-open previous commitments at the same new z + + let mut c_openings = vec![]; + for y in &y_values { + let value = compute_value::(y, &s_poly_positive, &s_poly_negative); + + let opening = { + let mut value = value; + value.negate(); + + polynomial_commitment_opening( + n, + 0, + s_poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(s_poly_positive.iter()), + *y, + &srs + ) + }; + + c_openings.push((opening, value)); + } + + println!("Re-Evaluation and re-opening of s(z, Y) taken {:?}", start.elapsed()); + + // Okay, great. Now we need to open up each S at the same point z to the same value. + // Since we're opening up all the S's at the same point, we create a bunch of random + // challenges instead and open up a random linear combination. + + let mut poly_negative = vec![E::Fr::zero(); n]; + let mut poly_positive = vec![E::Fr::zero(); 2*n]; + let mut expected_value = E::Fr::zero(); + + // TODO: this part can be further parallelized due to synthesis of S(X, y) being singlethreaded + let start = Instant::now(); + + for (y, c_opening) in y_values.iter().zip(c_openings.iter()) { + // Compute s(X, y_i) + let (s_poly_negative, s_poly_positive) = { + let mut tmp = SxEval::new(*y, n); + S::synthesize(&mut tmp, circuit).unwrap(); // TODO + + tmp.poly() + }; + + let mut value = c_opening.1; + let r: E::Fr = transcript.get_challenge_scalar(); + value.mul_assign(&r); + expected_value.add_assign(&value); + + mul_add_polynomials(& mut poly_negative[..], &s_poly_negative[..], r); + mul_add_polynomials(& mut poly_positive[..], &s_poly_positive[..], r); + } + + println!("Re-evaluation of {} S polynomials taken {:?}", y_values.len(), start.elapsed()); + + let s_opening = { + let mut value = expected_value; + value.negate(); + + polynomial_commitment_opening( + n, + 0, + poly_negative.iter().rev().chain_ext(Some(value).iter()).chain_ext(poly_positive.iter()), + z, + &srs + ) + }; + + SuccinctAggregate { + signature, + s2_proof, + c, + s_opening, + c_openings, + opening, + + z: z, + w: w, + } +} \ No newline at end of file diff --git a/bellman/src/sonic/unhelped/grand_product_argument.rs b/bellman/src/sonic/unhelped/grand_product_argument.rs new file mode 100644 index 0000000..3c31ea0 --- /dev/null +++ b/bellman/src/sonic/unhelped/grand_product_argument.rs @@ -0,0 +1,851 @@ +/// One must prove that for commitments to two polynomials of degree n products of the coefficients +/// in those two polynomials are equal (part of the permutation argument) with additional assumption that +/// those coefficients are never equal to zero + +use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use crate::pairing::{Engine, CurveProjective, CurveAffine}; +use std::marker::PhantomData; + +use crate::sonic::srs::SRS; +use crate::sonic::util::*; +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use super::wellformed_argument::{WellformednessSignature, WellformednessArgument}; + +#[derive(Clone)] +pub struct GrandProductArgument { + a_polynomials: Vec>, + c_polynomials: Vec>, + v_elements: Vec, + t_polynomial: Option>, + n: usize +} + +#[derive(Clone)] +pub struct GrandProductProof { + pub t_opening: E::G1Affine, + pub e_zinv: E::Fr, + pub e_opening: E::G1Affine, + pub f_y: E::Fr, + pub f_opening: E::G1Affine, +} + +#[derive(Clone)] +pub struct GrandProductSignature { + pub c_commitments: Vec<(E::G1Affine, E::Fr)>, + pub t_commitment: E::G1Affine, + pub grand_product_openings: Vec<(E::Fr, E::G1Affine)>, + pub proof: GrandProductProof, + pub wellformedness_signature: WellformednessSignature, +} + +impl GrandProductArgument { + pub fn create_signature( + transcript: &mut Transcript, + grand_products: Vec<(Vec, Vec)>, + y: E::Fr, + z: E::Fr, + srs: &SRS, + ) -> GrandProductSignature { + let mut grand_product_challenges = vec![]; + + for _ in 0..grand_products.len() { + let c = transcript.get_challenge_scalar(); + grand_product_challenges.push(c); + } + + let mut all_polys = vec![]; + let mut wellformed_challenges = vec![]; + for _ in 0..(grand_products.len()*2) { + let c = transcript.get_challenge_scalar(); + wellformed_challenges.push(c); + } + + for p in grand_products.iter() { + let (a, b) = p; + all_polys.push(a.clone()); + all_polys.push(b.clone()); + } + + let wellformedness_signature = WellformednessArgument::create_signature( + all_polys, + wellformed_challenges, + &srs + ); + + let mut grand_product_argument = GrandProductArgument::new(grand_products); + let c_commitments = grand_product_argument.commit_to_individual_c_polynomials(&srs); + let t_commitment = grand_product_argument.commit_to_t_polynomial(&grand_product_challenges, y, &srs); + let grand_product_openings = grand_product_argument.open_commitments_for_grand_product(y, z, &srs); + let a_zy: Vec = grand_product_openings.iter().map(|el| el.0.clone()).collect(); + let proof = grand_product_argument.make_argument(&a_zy, &grand_product_challenges, y, z, &srs); + + GrandProductSignature { + c_commitments, + t_commitment, + grand_product_openings, + // a_zy, + proof, + wellformedness_signature + } + + } + + + pub fn new(polynomials: Vec<(Vec, Vec)>) -> Self { + assert!(polynomials.len() > 0); + + let n = polynomials[0].0.len(); + let mut a_polynomials = vec![]; + let mut c_polynomials = vec![]; + let mut v_elements = vec![]; + + // a_{1..n} = first poly + // a_{n+1..2n+1} = b_{1..n} = second poly + + // c_1 = a_1 + // c_2 = a_2 * c_1 = a_2 * a_1 + // c_3 = a_3 * c_2 = a_3 * a_2 * a_1 + // ... + // c_n = a_n * c_{n-1} = \prod a_i + // a_{n+1} = c_{n}^-1 + // c_{n+1} = 1 + // c_{n+1} = a_{n+2} * c_{n+1} = a_{n+2} + // ... + // c_{2n+1} = \prod a_{n+1+i} = \prod b_{i} + // v = c_{n}^-1 + + // calculate c, serially for now + + for p in polynomials.into_iter() { + let (p0, p1) = p; + assert!(p0.len() == p1.len()); + assert!(p0.len() == n); + let mut c_poly: Vec = Vec::with_capacity(2*n + 1); + let mut a_poly: Vec = Vec::with_capacity(2*n + 1); + let mut c_coeff = E::Fr::one(); + // add a + for a in p0.iter() { + c_coeff.mul_assign(a); + c_poly.push(c_coeff); + } + assert_eq!(c_poly.len(), n); + a_poly.extend(p0); + assert_eq!(a_poly.len(), n); + + // v = a_{n+1} = c_{n}^-1 + // let v = c_poly[n-1].inverse().unwrap(); + let v = c_coeff.inverse().unwrap(); + + // ! IMPORTANT + // This line is indeed assigning a_{n+1} to zero instead of v + // for the practical purpose later we manually evaluate T polynomial + // and assign v to the term X^{n+1} + a_poly.push(E::Fr::zero()); + // a_poly.push(v); + + // add c_{n+1} + let mut c_coeff = E::Fr::one(); + c_poly.push(c_coeff); + // add b + for b in p1.iter() { + c_coeff.mul_assign(b); + c_poly.push(c_coeff); + } + assert_eq!(c_poly.len(), 2*n + 1); + a_poly.extend(p1); + + assert_eq!(c_poly[n-1], c_poly[2*n]); + assert_eq!(c_poly[n], E::Fr::one()); + + a_polynomials.push(a_poly); + c_polynomials.push(c_poly); + v_elements.push(v); + } + + GrandProductArgument { + a_polynomials: a_polynomials, + c_polynomials: c_polynomials, + v_elements: v_elements, + t_polynomial: None, + n: n + } + } + + // // Make a commitment to a polynomial in a form A*B^{x+1} = [a_1...a_{n}, 0, b_1...b_{n}] + // pub fn commit_for_grand_product(a: &[E::Fr], b: &[E::Fr], srs: &SRS) -> E::G1Affine { + // assert_eq!(a.len(), b.len()); + + // let n = a.len(); + + // multiexp( + // srs.g_positive_x_alpha[0..(2*n+1)].iter(), + // a.iter() + // .chain_ext(Some(E::Fr::zero()).iter()) + // .chain_ext(b.iter()) + // ).into_affine() + // } + + + pub fn commit_for_individual_products(a: &[E::Fr], b: &[E::Fr], srs: &SRS) -> (E::G1Affine, E::G1Affine) { + assert_eq!(a.len(), b.len()); + + let n = a.len(); + + let a = multiexp( + srs.g_positive_x_alpha[0..n].iter(), + a.iter()).into_affine(); + + + let b = multiexp( + srs.g_positive_x_alpha[0..n].iter(), + b.iter()).into_affine(); + + (a, b) + } + + pub fn open_commitments_for_grand_product(&self, y: E::Fr, z: E::Fr, srs: &SRS) -> Vec<(E::Fr, E::G1Affine)> { + let n = self.n; + + let mut yz = y; + yz.mul_assign(&z); + + let mut results = vec![]; + + for a_poly in self.a_polynomials.iter() { + assert_eq!(a_poly[n], E::Fr::zero()); // there is no term for n+1 power + let val = evaluate_at_consequitive_powers(&a_poly[..], yz, yz); + + // let a = & a_poly[0..n]; // powers [1, n] + // let b = & a_poly[(n+1)..]; // there is no n+1 term (numerated as `n`), skip it and start b + // assert_eq!(a.len(), n); + // assert_eq!(b.len(), n); + // let mut val = evaluate_at_consequitive_powers(a, yz, yz); + // { + // let tmp = yz.pow([(n+2) as u64]); + // let v = evaluate_at_consequitive_powers(b, tmp, yz); + // val.add_assign(&v); + // } + + let mut constant_term = val; + constant_term.negate(); + + let opening = polynomial_commitment_opening( + 0, + 2*n + 1, + Some(constant_term).iter() + .chain_ext(a_poly.iter()), + yz, + &srs + ); + + // let opening = polynomial_commitment_opening( + // 0, + // 2*n + 1, + // Some(constant_term).iter() + // .chain_ext(a.iter()) + // .chain_ext(Some(E::Fr::zero()).iter()) + // .chain_ext(b.iter()), + // yz, + // &srs); + + results.push((val, opening)); + + } + + results + } + + // Make a commitment for the begining of the protocol, returns commitment and `v` scalar + pub fn commit_to_individual_c_polynomials(&self, srs: &SRS) -> Vec<(E::G1Affine, E::Fr)> { + + let mut results = vec![]; + + let two_n_plus_1 = self.c_polynomials[0].len(); + + for (p, v) in self.c_polynomials.iter().zip(self.v_elements.iter()) { + let n = self.n; + assert_eq!(p[n], E::Fr::one(), "C_(n+1) must be one"); + + let c = multiexp( + srs.g_positive_x_alpha[0..two_n_plus_1].iter(), + p.iter() + ).into_affine(); + + results.push((c, *v)); + } + + results + } + + // Argument is based on an approach of main SONIC construction, but with a custom S(X,Y) polynomial of a simple form + pub fn commit_to_t_polynomial(&mut self, challenges: & Vec, y: E::Fr, srs: &SRS) -> E::G1Affine { + assert_eq!(challenges.len(), self.a_polynomials.len()); + + let n = self.n; + + let mut t_polynomial: Option> = None; + + for (((a, c), v), challenge) in self.a_polynomials.iter() + .zip(self.c_polynomials.iter()) + .zip(self.v_elements.iter()) + .zip(challenges.iter()) + { + let mut a_xy = a.clone(); + let c_xy = c.clone(); + let v = *v; + + assert_eq!(a_xy.len(), 2*n + 1); + assert_eq!(c_xy.len(), 2*n + 1); + + // make a T polynomial + + let r: Vec = { + // p_a(X,Y)*Y + let mut tmp = y; + tmp.square(); + mut_distribute_consequitive_powers(&mut a_xy[..], tmp, y); + + // add extra terms + //v*(XY)^{n+1}*Y + X^{n+2} + X^{n+1}Y − X^{2n+2}*Y + + // n+1 term v*(XY)^{n+1}*Y + X^{n+1}Y + let tmp = y.pow(&[(n+2) as u64]); + let mut x_n_plus_one_term = v; + x_n_plus_one_term.mul_assign(&tmp); + x_n_plus_one_term.add_assign(&y); + a_xy[n].add_assign(&x_n_plus_one_term); + + // n+2 term + a_xy[n+1].add_assign(&E::Fr::one()); + + // 2n+2 term + let mut tmp = y; + tmp.negate(); + + a_xy.push(tmp); + + assert_eq!(a_xy.len(), 2*n + 2); + + let mut r = vec![E::Fr::zero(); 2*n + 3]; + r.extend(a_xy); + + r + }; + + let r_prime: Vec = { + let mut c_prime: Vec = c_xy.iter().rev().map(|el| *el).collect(); + c_prime.push(E::Fr::one()); + c_prime.push(E::Fr::zero()); + + assert_eq!(c_prime.len(), 2*n + 3); + + c_prime + }; + + // multiply polynomials with powers [-2n-2, -1] and [1, 2n+2], + // expect result to be [-2n+1, 2n+1] + let mut t: Vec = multiply_polynomials::(r, r_prime); + + assert_eq!(t.len(), 6*n + 7); + + // drain first powers due to the padding and last element due to requirement of being zero + for (i, el) in t[0..(2*n+3)].iter().enumerate() { + assert_eq!(*el, E::Fr::zero(), "{}", format!("Element {} is non-zero", i)); + } + + t.drain(0..(2*n+3)); + let last = t.pop(); + assert_eq!(last.unwrap(), E::Fr::zero(), "last element should be zero"); + + assert_eq!(t.len(), 4*n + 3); + + let mut val = { + let mut tmp = y; + tmp.square(); + evaluate_at_consequitive_powers(&c_xy, tmp, y) + }; + + val.add_assign(&E::Fr::one()); + + // subtract a constant term + assert_eq!(t[2*n+1], val); + + t[2*n+1].sub_assign(&val); + + if t_polynomial.is_some() { + if let Some(t_poly) = t_polynomial.as_mut() { + mul_add_polynomials(&mut t_poly[..], &t, *challenge); + } + } else { + mul_polynomial_by_scalar(&mut t, *challenge); + t_polynomial = Some(t); + } + } + + let t_polynomial = t_polynomial.unwrap(); + + let c = multiexp(srs.g_negative_x_alpha[0..(2*n+1)].iter().rev() + .chain_ext(srs.g_positive_x_alpha[0..(2*n+1)].iter()), + t_polynomial[0..(2*n+1)].iter() + .chain_ext(t_polynomial[(2*n+2)..].iter())).into_affine(); + + self.t_polynomial = Some(t_polynomial); + + c + } + + // Argument is based on an approach of main SONIC construction, but with a custom S(X,Y) polynomial of a simple form + pub fn make_argument(self, a_zy: & Vec, challenges: & Vec, y: E::Fr, z: E::Fr, srs: &SRS) -> GrandProductProof { + assert_eq!(a_zy.len(), self.a_polynomials.len()); + assert_eq!(challenges.len(), self.a_polynomials.len()); + + let n = self.n; + + let c_polynomials = self.c_polynomials; + let mut e_polynomial: Option> = None; + let mut f_polynomial: Option> = None; + + let mut yz = y; + yz.mul_assign(&z); + + let z_inv = z.inverse().unwrap(); + + let mut t_subcomponent = E::Fr::zero(); + + for (((a, c), challenge), v) in a_zy.iter() + .zip(c_polynomials.into_iter()) + .zip(challenges.iter()) + .zip(self.v_elements.iter()) + { + // cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y − z2n+2y)z−1 + let mut c_zy = yz.pow([(n + 1) as u64]); + c_zy.mul_assign(v); + c_zy.add_assign(a); + c_zy.mul_assign(&y); + + let mut z_n_plus_1 = z.pow([(n + 1) as u64]); + + let mut z_n_plus_2 = z_n_plus_1; + z_n_plus_2.mul_assign(&z); + + let mut z_2n_plus_2 = z_n_plus_1; + z_2n_plus_2.square(); + z_2n_plus_2.mul_assign(&y); + + z_n_plus_1.mul_assign(&y); + + c_zy.add_assign(&z_n_plus_1); + c_zy.add_assign(&z_n_plus_2); + c_zy.sub_assign(&z_2n_plus_2); + + c_zy.mul_assign(&z_inv); + + let mut rc = c_zy; + rc.mul_assign(challenge); + + let mut ry = y; + ry.mul_assign(challenge); + + t_subcomponent.add_assign(&rc); + t_subcomponent.sub_assign(&challenge); + + if e_polynomial.is_some() && f_polynomial.is_some() { + if let Some(e_poly) = e_polynomial.as_mut() { + if let Some(f_poly) = f_polynomial.as_mut() { + mul_add_polynomials(&mut e_poly[..], &c, rc); + mul_add_polynomials(&mut f_poly[..], &c, ry); + } + } + } else { + let mut e = c.clone(); + let mut f = c; + mul_polynomial_by_scalar(&mut e, rc); + mul_polynomial_by_scalar(&mut f, ry); + e_polynomial = Some(e); + f_polynomial = Some(f); + } + } + + let e_polynomial = e_polynomial.unwrap(); + let f_polynomial = f_polynomial.unwrap(); + + // evaluate e at z^-1 + + let mut e_val = evaluate_at_consequitive_powers(&e_polynomial, z_inv, z_inv); + e_val.negate(); + + // evaluate f at y + + let mut f_val = evaluate_at_consequitive_powers(&f_polynomial, y, y); + f_val.negate(); + + let e_opening = polynomial_commitment_opening( + 0, + 2*n + 1, + Some(e_val).iter().chain_ext(e_polynomial.iter()), + z_inv, + srs); + + let f_opening = polynomial_commitment_opening( + 0, + 2*n + 1, + Some(f_val).iter().chain_ext(f_polynomial.iter()), + y, + srs); + + e_val.negate(); + f_val.negate(); + + t_subcomponent.add_assign(&e_val); + t_subcomponent.sub_assign(&f_val); + + let mut t_poly = self.t_polynomial.unwrap(); + assert_eq!(t_poly.len(), 4*n + 3); + + assert!(t_poly[2*n + 1].is_zero()); + + // largest negative power of t is -2n-1 + let t_zy = { + let tmp = z_inv.pow([(2*n+1) as u64]); + evaluate_at_consequitive_powers(&t_poly, tmp, z) + }; + + assert_eq!(t_zy, t_subcomponent); + + assert!(t_poly[2*n + 1].is_zero()); + + t_poly[2*n + 1].sub_assign(&t_zy); + + let t_opening = polynomial_commitment_opening( + 2*n + 1, + 2*n + 1, + t_poly.iter(), + z, + srs); + + GrandProductProof { + t_opening: t_opening, + e_zinv: e_val, + e_opening: e_opening, + f_y: f_val, + f_opening: f_opening, + } + } + + pub fn verify_ab_commitment( + n: usize, + randomness: & Vec, + a_commitments: &Vec, + b_commitments: &Vec, + openings: &Vec<(E::Fr, E::G1Affine)>, + y: E::Fr, + z: E::Fr, + srs: &SRS + ) -> bool { + assert_eq!(randomness.len(), a_commitments.len()); + assert_eq!(openings.len(), a_commitments.len()); + assert_eq!(b_commitments.len(), a_commitments.len()); + + // e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα) + + let g = srs.g_positive_x[0]; + + let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + + let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + + // H^(x^(n+1)) is n+1 indexed + let mut h_x_n_plus_one_precomp = srs.h_positive_x[n+1]; + h_x_n_plus_one_precomp.negate(); + let h_x_n_plus_one_precomp = h_x_n_plus_one_precomp.prepare(); + + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + let a = multiexp( + a_commitments.iter(), + randomness.iter(), + ).into_affine(); + + let a = a.prepare(); + + let b = multiexp( + b_commitments.iter(), + randomness.iter(), + ).into_affine(); + + let b = b.prepare(); + + let mut yz_neg = y; + yz_neg.mul_assign(&z); + yz_neg.negate(); + + let mut ops = vec![]; + let mut value = E::Fr::zero(); + + for (el, r) in openings.iter().zip(randomness.iter()) { + let (v, o) = el; + ops.push(o.clone()); + let mut val = *v; + val.mul_assign(&r); + value.add_assign(&val); + } + + let value = g.mul(value.into_repr()).into_affine().prepare(); + + let openings = multiexp( + ops.iter(), + randomness.iter(), + ).into_affine(); + + let openings_zy = openings.mul(yz_neg.into_repr()).into_affine().prepare(); + let openings = openings.prepare(); + + + // e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα) + + E::final_exponentiation(&E::miller_loop(&[ + (&openings, &h_alpha_x_precomp), + (&openings_zy, &h_alpha_precomp), + (&a, &h_prep), + (&b, &h_x_n_plus_one_precomp), + (&value, &h_alpha_precomp) + ])).unwrap() == E::Fqk::one() + } + + pub fn verify( + n: usize, + randomness: & Vec, + a_zy: & Vec, + challenges: &Vec, + t_commitment: E::G1Affine, + commitments: &Vec<(E::G1Affine, E::Fr)>, + proof: &GrandProductProof, + y: E::Fr, + z: E::Fr, + srs: &SRS + ) -> bool { + assert_eq!(randomness.len(), 3); + assert_eq!(a_zy.len(), challenges.len()); + assert_eq!(commitments.len(), challenges.len()); + + let g = srs.g_positive_x[0]; + + let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + + let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + // first re-calculate cj and t(z,y) + + let mut yz = y; + yz.mul_assign(&z); + + let z_inv = z.inverse().unwrap(); + + let mut t_zy = E::Fr::zero(); + t_zy.add_assign(&proof.e_zinv); + t_zy.sub_assign(&proof.f_y); + + let mut commitments_points = vec![]; + let mut rc_vec = vec![]; + let mut ry_vec = vec![]; + + for ((r, commitment), a) in challenges.iter() + .zip(commitments.iter()) + .zip(a_zy.iter()) { + let (c, v) = commitment; + commitments_points.push(c.clone()); + + // cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y − z2n+2y)z−1 + let mut c_zy = yz.pow([(n + 1) as u64]); + c_zy.mul_assign(v); + c_zy.add_assign(a); + c_zy.mul_assign(&y); + + let mut z_n_plus_1 = z.pow([(n + 1) as u64]); + + let mut z_n_plus_2 = z_n_plus_1; + z_n_plus_2.mul_assign(&z); + + let mut z_2n_plus_2 = z_n_plus_1; + z_2n_plus_2.square(); + z_2n_plus_2.mul_assign(&y); + + z_n_plus_1.mul_assign(&y); + + c_zy.add_assign(&z_n_plus_1); + c_zy.add_assign(&z_n_plus_2); + c_zy.sub_assign(&z_2n_plus_2); + + c_zy.mul_assign(&z_inv); + + let mut rc = c_zy; + rc.mul_assign(&r); + rc_vec.push(rc); + + let mut ry = y; + ry.mul_assign(&r); + ry_vec.push(ry); + + let mut val = rc; + val.sub_assign(r); + t_zy.add_assign(&val); + } + + let c_rc = multiexp( + commitments_points.iter(), + rc_vec.iter(), + ).into_affine(); + + let c_ry = multiexp( + commitments_points.iter(), + ry_vec.iter(), + ).into_affine(); + + let mut minus_y = y; + minus_y.negate(); + + let mut f_y = proof.f_opening.mul(minus_y.into_repr()); + let g_f = g.mul(proof.f_y.into_repr()); + f_y.add_assign(&g_f); + + let mut minus_z = z; + minus_z.negate(); + + let mut t_z = proof.t_opening.mul(minus_z.into_repr()); + let g_tzy = g.mul(t_zy.into_repr()); + t_z.add_assign(&g_tzy); + + let mut minus_z_inv = z_inv; + minus_z_inv.negate(); + + let mut e_z_inv = proof.e_opening.mul(minus_z_inv.into_repr()); + let g_e = g.mul(proof.e_zinv.into_repr()); + e_z_inv.add_assign(&g_e); + + let h_alpha_term = multiexp( + vec![e_z_inv.into_affine(), f_y.into_affine(), t_z.into_affine()].iter(), + randomness.iter(), + ).into_affine(); + + let h_alpha_x_term = multiexp( + Some(proof.e_opening).iter() + .chain_ext(Some(proof.f_opening).iter()) + .chain_ext(Some(proof.t_opening).iter()), + randomness.iter(), + ).into_affine(); + + + let h_term = multiexp( + Some(c_rc).iter() + .chain_ext(Some(c_ry).iter()) + .chain_ext(Some(t_commitment).iter()), + randomness.iter(), + ).into_affine(); + + E::final_exponentiation(&E::miller_loop(&[ + (&h_alpha_x_term.prepare(), &h_alpha_x_precomp), + (&h_alpha_term.prepare(), &h_alpha_precomp), + (&h_term.prepare(), &h_prep), + ])).unwrap() == E::Fqk::one() + + } +} + +#[test] +fn test_grand_product_argument() { + use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12}; + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + // let srs = SRS::::dummy(830564, srs_x, srs_alpha); + let srs = SRS::::new(128, srs_x, srs_alpha); + + let n: usize = 1 << 5; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::>(); + let mut permutation = coeffs.clone(); + rng.shuffle(&mut permutation); + + let coeffs_product = coeffs.iter().fold(Fr::one(), |mut sum, x| { + sum.mul_assign(&x); + + sum + }); + + let permutation_product = permutation.iter().fold(Fr::one(), |mut sum, x| { + sum.mul_assign(&x); + + sum + }); + + assert_eq!(coeffs_product, permutation_product); + assert!(!coeffs_product.is_zero()); + + let a_commitment = multiexp(srs.g_positive_x_alpha[0..n].iter(), coeffs.iter()).into_affine(); + let b_commitment = multiexp(srs.g_positive_x_alpha[0..n].iter(), permutation.iter()).into_affine(); + + let (a, b) = GrandProductArgument::commit_for_individual_products(&coeffs[..], &permutation[..], &srs); + + assert_eq!(a_commitment, a); + assert_eq!(b_commitment, b); + + let mut argument = GrandProductArgument::new(vec![(coeffs, permutation)]); + + let commitments_and_v_values = argument.commit_to_individual_c_polynomials(&srs); + + assert_eq!(commitments_and_v_values.len(), 1); + + let y : Fr = rng.gen(); + + let challenges = (0..1).map(|_| Fr::rand(rng)).collect::>(); + + let t_commitment = argument.commit_to_t_polynomial(&challenges, y, &srs); + + let z : Fr = rng.gen(); + + let grand_product_openings = argument.open_commitments_for_grand_product(y, z, &srs); + + let randomness = (0..1).map(|_| Fr::rand(rng)).collect::>(); + + let valid = GrandProductArgument::verify_ab_commitment( + n, + &randomness, + &vec![a_commitment], + &vec![b_commitment], + &grand_product_openings, + y, + z, + &srs + ); + + assert!(valid, "grand product commitments should be valid"); + + let a_zy: Vec = grand_product_openings.iter().map(|el| el.0.clone()).collect(); + + let proof = argument.make_argument(&a_zy, &challenges, y, z, &srs); + + let randomness = (0..3).map(|_| Fr::rand(rng)).collect::>(); + + let valid = GrandProductArgument::verify( + n, + &randomness, + &a_zy, + &challenges, + t_commitment, + &commitments_and_v_values, + &proof, + y, + z, + &srs); + + assert!(valid, "t commitment should be valid"); +} + diff --git a/bellman/src/sonic/unhelped/mod.rs b/bellman/src/sonic/unhelped/mod.rs new file mode 100644 index 0000000..454a48d --- /dev/null +++ b/bellman/src/sonic/unhelped/mod.rs @@ -0,0 +1,17 @@ +/// Largeley this module is implementation of provable evaluation of s(z, y), that is represented in two parts +/// s2(X, Y) = \sum_{i=1}^{N} (Y^{-i} + Y^{i})X^{i} +/// s1(X, Y) = ... +/// s1 part requires grand product and permutation arguments, that are also implemented + +mod s2_proof; +mod wellformed_argument; +pub mod grand_product_argument; +mod permutation_argument; +mod verifier; +pub mod permutation_structure; +mod aggregate; + +pub use self::wellformed_argument::{WellformednessArgument, WellformednessProof}; +pub use self::permutation_argument::{PermutationArgument, PermutationProof, PermutationArgumentProof}; +pub use self::verifier::SuccinctMultiVerifier; +pub use self::aggregate::*; \ No newline at end of file diff --git a/bellman/src/sonic/unhelped/padding.rs b/bellman/src/sonic/unhelped/padding.rs new file mode 100644 index 0000000..99b471a --- /dev/null +++ b/bellman/src/sonic/unhelped/padding.rs @@ -0,0 +1,686 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use crate::sonic::cs::{Backend}; +use crate::sonic::cs::{Coeff, Variable, LinearCombination}; +use crate::sonic::util::*; +use crate::sonic::util::*; +use crate::sonic::cs::{SynthesisDriver}; +use crate::Circuit as BellmanCircuit; +use crate::sonic::sonic::AdaptorCircuit; +use crate::sonic::cs::Circuit; +use crate::sonic::cs::ConstraintSystem; +use crate::sonic::cs::Nonassigning; +use crate::SynthesisError; + +/* +s_1(X, Y) = \sum\limits_{i=1}^N u_i(Y) X^{N + 1 - i} + + \sum\limits_{i=1}^N v_i(Y) X^{N + 1 + i} + + \sum\limits_{i=1}^N w_i(Y) X^{2N + 1 + i} + +where + + u_i(Y) = \sum\limits_{q=1}^Q Y^{q} u_{i,q} + v_i(Y) = \sum\limits_{q=1}^Q Y^{q} v_{i,q} + w_i(Y) = \sum\limits_{q=1}^Q Y^{q} w_{i,q} + +s_1(X, Y) = \sum\limits_{i=1}^(3N + 1) [u_{N + 1 - i}(Y), v_{i - N - 1}(Y), w_{i - 2N - 1}(Y)] X^{i} + +where [] means concatenation + +if we open up both sums a little it would look like + +// q = 1, +Y * ( X * u_{N, 1} + X^{N + 1} * v_{1, 1} + X^{2N + 1} * w{1, 1}) = Y * (k_0 * X + k_1 * X^{N + 1} + k_2 * X^{2N + 1}) +and for permutation where should exist another term over Y that would have the same structure, but with coefficients permuted, e.g. +Y^{p_1} * (k_1 * X + k_2 * X^{N + 1} + k_0 * X^{2N + 1}) and Y^{p_2} * (k_2 * X + k_0 * X^{N + 1} + k_1 * X^{2N + 1}) +that would result in a sum + + X * (k_0 * Y + k_1 * Y^{p_1} + k_2 * Y^{p_2}) ++ X^{N + 1} * (k_1 * Y + k_2 * Y^{p_1} + k_0 * Y^{p_2}) ++ X^{2N + 1} * (k_2 * Y + k_0 * Y^{p_1} + k_1 * Y^{p_2}) + +and permutations would look like + [k_0, k_1, k_2] + [1 , p_1, p_2] + + [k_0, k_1, k_2] + [p_2, 1 , p_1] + + [k_0, k_1, k_2] + [p_1, p_2, 1 ] + +that would naively mean that k_0 should appear in constraint number 1 for variable number 1 + constraint number p_1 for variable number N + 1 + constraint number p_2 for variable number 2N + 1 + +restructuring strategy: + +where u_{i, q} is a coefficient in a linear constraint for an A type variable number i +that corresponds to the qth multiplication gate + +to make s_1 representable as a permutation we first must synthesize all the normal constraints, +then make what would look like a cyclic shift + expansion + +- imagine that there were originally N variables +- variable A(i) in linear constraint number q had a coefficient of u{i, q} +- add a variable B(i+n) that would have a number + +*/ + +pub struct Debugging { + constraint_num: usize, + u: Vec, + v: Vec, + w: Vec, + _marker: std::marker::PhantomData +} + +impl<'a, E: Engine> Backend for &'a mut Debugging { + fn new_linear_constraint(&mut self) { + self.constraint_num += 1; + self.u.push("".to_string()); + self.v.push("".to_string()); + self.w.push("".to_string()); + } + + fn insert_coefficient(&mut self, var: Variable, coeff: Coeff) { + let one = E::Fr::one(); + let mut minus_one = one; + minus_one.negate(); + match var { + Variable::A(index) => { + let acc = &mut self.u[self.constraint_num - 1]; + match coeff { + Coeff::Zero => { }, + Coeff::One => { + acc.push_str(&format!(" + A{}", index)); + }, + Coeff::NegativeOne => { + acc.push_str(&format!(" - A{}", index)); + }, + Coeff::Full(val) => { + if val == one { + acc.push_str(&format!(" + A{}", index)); + } else if val == minus_one { + acc.push_str(&format!(" - A{}", index)); + } else { + acc.push_str(&format!(" + {}*A{}", val, index)); + } + } + } + } + Variable::B(index) => { + let acc = &mut self.v[self.constraint_num - 1]; + match coeff { + Coeff::Zero => { }, + Coeff::One => { + acc.push_str(&format!(" + B{}", index)); + }, + Coeff::NegativeOne => { + acc.push_str(&format!(" - B{}", index)); + }, + Coeff::Full(val) => { + if val == one { + acc.push_str(&format!(" + B{}", index)); + } else if val == minus_one { + acc.push_str(&format!(" - B{}", index)); + } else { + acc.push_str(&format!(" + {}*B{}", val, index)); + } + } + } + } + Variable::C(index) => { + let acc = &mut self.w[self.constraint_num - 1]; + match coeff { + Coeff::Zero => { }, + Coeff::One => { + acc.push_str(&format!(" + C{}", index)); + }, + Coeff::NegativeOne => { + acc.push_str(&format!(" - C{}", index)); + }, + Coeff::Full(val) => { + if val == one { + acc.push_str(&format!(" + C{}", index)); + } else if val == minus_one { + acc.push_str(&format!(" - C{}", index)); + } else { + acc.push_str(&format!(" + {}*C{}", val, index)); + } + } + } + } + }; + } +} + +pub struct Padding; + +impl SynthesisDriver for Padding { + fn synthesize, B: Backend>(backend: B, circuit: &C) -> Result<(), SynthesisError> { + struct Synthesizer> { + backend: B, + current_variable: Option, + _marker: PhantomData, + q: usize, + n: usize, + } + + impl>Synthesizer { + fn purge_current_var(&mut self) { + match self.current_variable.take() { + Some(index) => { + let var_a = Variable::A(index); + let var_b = Variable::B(index); + let var_c = Variable::C(index); + + let mut product = None; + + let value_a = self.backend.get_var(var_a); + + self.backend.set_var(var_b, || { + let value_b = E::Fr::one(); + product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?); + product.as_mut().map(|product| product.mul_assign(&value_b)); + + Ok(value_b) + }).expect("should exist by now"); + + self.backend.set_var(var_c, || { + product.ok_or(SynthesisError::AssignmentMissing) + }).expect("should exist by now"); + + self.current_variable = None; + }, + _ => {} + } + } + + fn alloc_one(&mut self) -> Variable { + self.n += 1; + let index = self.n; + assert_eq!(index, 1); + self.backend.new_multiplication_gate(); + + let var_a = Variable::A(1); + let var_b = Variable::B(1); + let var_c = Variable::C(1); + + self.backend.set_var(var_a, || { + Ok(E::Fr::one()) + }).expect("should exist by now"); + + self.backend.set_var(var_b, || { + Ok(E::Fr::one()) + }).expect("should exist by now"); + + self.backend.set_var(var_c, || { + Ok(E::Fr::one()) + }).expect("should exist by now"); + + self.q += 1; + self.backend.new_linear_constraint(); + self.backend.insert_coefficient(var_a, Coeff::One); + self.backend.insert_coefficient(var_b, Coeff::One); + self.backend.insert_coefficient(var_c, Coeff::NegativeOne); + self.backend.new_k_power(self.q); + + var_a + } + } + + impl> ConstraintSystem for Synthesizer { + const ONE: Variable = Variable::A(1); + + fn alloc(&mut self, value: F) -> Result + where + F: FnOnce() -> Result + { + match self.current_variable.take() { + Some(index) => { + let var_a = Variable::A(index); + let var_b = Variable::B(index); + let var_c = Variable::C(index); + + let mut product = None; + + let value_a = self.backend.get_var(var_a); + + self.backend.set_var(var_b, || { + let value_b = value()?; + product = Some(value_a.ok_or(SynthesisError::AssignmentMissing)?); + product.as_mut().map(|product| product.mul_assign(&value_b)); + + Ok(value_b) + })?; + + self.backend.set_var(var_c, || { + product.ok_or(SynthesisError::AssignmentMissing) + })?; + + self.current_variable = None; + + Ok(var_b) + }, + None => { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + let var_a = Variable::A(index); + + self.backend.set_var(var_a, value)?; + + self.current_variable = Some(index); + + Ok(var_a) + } + } + } + + // TODO: allocate input without spawning extra constraints + fn alloc_input(&mut self, value: F) -> Result + where + F: FnOnce() -> Result + { + // self.purge_current_var(); + // self.n += 1; + // self.backend.new_multiplication_gate(); + + // let index = self.n; + + // let var = Variable::A::(index); + + // self.q += 1; + // self.backend.new_k_power(self.q); + // self.backend.self.backend.insert_coefficient(new_var, Coeff::One); + + // it's always going to be + let input_var = self.alloc(value)?; + + self.enforce_zero(LinearCombination::zero() + input_var); + self.backend.new_k_power(self.q-2); + self.backend.new_k_power(self.q-1); + self.backend.new_k_power(self.q); + + Ok(input_var) + } + + fn enforce_zero(&mut self, lc: LinearCombination) + { + self.q += 1; + self.backend.new_linear_constraint(); + + for (var, coeff) in lc.as_ref() { + self.backend.insert_coefficient(*var, *coeff); + } + + // now we need to "rotate" a linear constraint by allocating more dummy variables, so ensuring + // that if for some q (index of LC) there is a coefficient C in front of a variable A(i) (that will result in a term ~ C*Y^{q}*X^{i}) + // then there will be some other q' where there is a coefficient C in front of the variable B(i) + // (that will result in a term ~ C*Y^{q'}*X^{i+N}) and another q'' with C in front of C(i) + // (that will result in a term ~ C*Y^{q''}*X^{i+2N}), so S polynomial is indeed a permutation + + // allocate at max 1 variable to later work with whole gates directly + + self.purge_current_var(); + + use std::collections::HashMap; + + // A -> B, B -> C, C -> A + { + self.q += 1; + self.backend.new_linear_constraint(); + + let mut allocation_map = HashMap::with_capacity(lc.as_ref().len()); + let mut expected_new_index = self.n + 1; + + // determine size of the map + for (var, _) in lc.as_ref() { + match var { + Variable::A(index) => { + if allocation_map.get(index).is_none() && *index != 1 { + allocation_map.insert(*index, expected_new_index); + expected_new_index += 1; + println!("A{} -> B{}", index, expected_new_index); + } + }, + Variable::B(index) => { + if allocation_map.get(index).is_none() && *index != 2 { + allocation_map.insert(*index, expected_new_index); + expected_new_index += 1; + println!("B{} -> C{}", index, expected_new_index); + } + }, + Variable::C(index) => { + if allocation_map.get(index).is_none() && *index != 3 { + allocation_map.insert(*index, expected_new_index); + expected_new_index += 1; + println!("C{} -> A{}", index, expected_new_index); + } + } + } + } + + for _ in 0..allocation_map.len() { + self.backend.new_multiplication_gate(); + self.n += 1; + } + + for (index, new_index) in allocation_map.iter() { + let var_a = Variable::A(*new_index); + let var_b = Variable::B(*new_index); + let var_c = Variable::C(*new_index); + + // A -> B, B -> C, C -> A + let b_val = self.backend.get_var(Variable::A(*index)); + let c_val = self.backend.get_var(Variable::B(*index)); + let a_val = self.backend.get_var(Variable::C(*index)); + + self.backend.set_var(var_a, || { + let value = a_val.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(value) + }).expect("should exist by now"); + + self.backend.set_var(var_b, || { + let value = b_val.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(value) + }).expect("should exist by now"); + + self.backend.set_var(var_c, || { + let value = c_val.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(value) + }).expect("should exist by now"); + + } + + // A -> B, B -> C, C -> A + for (var, coeff) in lc.as_ref() { + let new_var = match var { + Variable::A(index) => { + let var = if *index == 1 { + Variable::B(2) + } else { + let new_index = allocation_map.get(index).unwrap(); + Variable::B(*new_index) + }; + + var + }, + Variable::B(index) => { + let var = if *index == 2 { + Variable::C(3) + } else { + let new_index = allocation_map.get(index).unwrap(); + Variable::C(*new_index) + }; + + var + }, + Variable::C(index) => { + let var = if *index == 3 { + Variable::A(1) + } else { + let new_index = allocation_map.get(index).unwrap(); + Variable::A(*new_index) + }; + + var + } + }; + + self.backend.insert_coefficient(new_var, *coeff); + } + } + + // A -> C, B -> A, C -> B + { + self.q += 1; + self.backend.new_linear_constraint(); + + let mut allocation_map = HashMap::with_capacity(lc.as_ref().len()); + let mut expected_new_index = self.n + 1; + + // determine size of the map + for (var, _) in lc.as_ref() { + match var { + Variable::A(index) => { + if allocation_map.get(index).is_none() && *index != 1 { + allocation_map.insert(*index, expected_new_index); + expected_new_index += 1; + println!("A{} -> C{}", index, expected_new_index); + } + }, + Variable::B(index) => { + if allocation_map.get(index).is_none() && *index != 2 { + allocation_map.insert(*index, expected_new_index); + expected_new_index += 1; + println!("B{} -> A{}", index, expected_new_index); + } + }, + Variable::C(index) => { + if allocation_map.get(index).is_none() && *index != 3 { + allocation_map.insert(*index, expected_new_index); + expected_new_index += 1; + println!("C{} -> B{}", index, expected_new_index); + } + } + } + } + + for _ in 0..allocation_map.len() { + self.backend.new_multiplication_gate(); + self.n += 1; + } + + // A -> C, B -> A, C -> B + for (index, new_index) in allocation_map.iter() { + let var_a = Variable::A(*new_index); + let var_b = Variable::B(*new_index); + let var_c = Variable::C(*new_index); + + let b_val = self.backend.get_var(Variable::C(*index)); + let c_val = self.backend.get_var(Variable::A(*index)); + let a_val = self.backend.get_var(Variable::B(*index)); + + self.backend.set_var(var_a, || { + let value = a_val.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(value) + }).expect("should exist by now"); + + self.backend.set_var(var_b, || { + let value = b_val.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(value) + }).expect("should exist by now"); + + self.backend.set_var(var_c, || { + let value = c_val.ok_or(SynthesisError::AssignmentMissing)?; + + Ok(value) + }).expect("should exist by now"); + } + + // A -> C, B -> A, C -> B + for (var, coeff) in lc.as_ref() { + let new_var = match var { + Variable::A(index) => { + let var = if *index == 1 { + Variable::C(3) + } else { + let new_index = allocation_map.get(index).unwrap(); + Variable::C(*new_index) + }; + + var + }, + Variable::B(index) => { + let var = if *index == 2 { + Variable::A(1) + } else { + let new_index = allocation_map.get(index).unwrap(); + Variable::A(*new_index) + }; + + var + }, + Variable::C(index) => { + let var = if *index == 3 { + Variable::B(2) + } else { + let new_index = allocation_map.get(index).unwrap(); + Variable::B(*new_index) + }; + + var + } + }; + + self.backend.insert_coefficient(new_var, *coeff); + } + } + } + + fn multiply(&mut self, values: F) -> Result<(Variable, Variable, Variable), SynthesisError> + where + F: FnOnce() -> Result<(E::Fr, E::Fr, E::Fr), SynthesisError> + { + self.n += 1; + let index = self.n; + self.backend.new_multiplication_gate(); + + let a = Variable::A(index); + let b = Variable::B(index); + let c = Variable::C(index); + + let mut b_val = None; + let mut c_val = None; + + self.backend.set_var(a, || { + let (a, b, c) = values()?; + + b_val = Some(b); + c_val = Some(c); + + Ok(a) + })?; + + self.backend.set_var(b, || { + b_val.ok_or(SynthesisError::AssignmentMissing) + })?; + + self.backend.set_var(c, || { + c_val.ok_or(SynthesisError::AssignmentMissing) + })?; + + Ok((a, b, c)) + } + + fn get_value(&self, var: Variable) -> Result { + self.backend.get_var(var).ok_or(()) + } + } + + let mut tmp: Synthesizer = Synthesizer { + backend: backend, + current_variable: None, + _marker: PhantomData, + q: 0, + n: 0, + }; + + let one = tmp.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues"); + + match (one, as ConstraintSystem>::ONE) { + (Variable::A(1), Variable::A(1)) => {}, + _ => panic!("one variable is incorrect") + } + + circuit.synthesize(&mut tmp)?; + + println!("Done synthesizing, N = {}, Q = {}", tmp.n, tmp.q); + + Ok(()) + } +} + +pub fn constraints_info + Clone>( + circuit: C, +) +{ + let adapted_circuit = AdaptorCircuit(circuit); + + create_constraints_info::<_, _, Nonassigning>(&adapted_circuit) +} + +pub fn constraints_padding_info + Clone>( + circuit: C, +) +{ + let adapted_circuit = AdaptorCircuit(circuit); + + create_constraints_info::<_, _, Padding>(&adapted_circuit) +} + +pub fn create_constraints_info, S: SynthesisDriver>( + circuit: &C, +) +{ + let mut backend = Debugging:: { + constraint_num: 0, + u: vec![], + v: vec![], + w: vec![], + _marker: std::marker::PhantomData + }; + + S::synthesize(&mut backend, circuit).unwrap(); + + + for (i, ((u, v), w)) in backend.u.iter() + .zip(backend.v.iter()) + .zip(backend.w.iter()) + .enumerate() + { + println!("Constraint {}: 0 = {}{}{}", i, u, v, w); + } +} + +#[test] +fn my_fun_circuit_test() { + use crate::pairing::ff::PrimeField; + use crate::pairing::bls12_381::{Bls12, Fr}; + + struct MyCircuit; + + impl Circuit for MyCircuit { + fn synthesize>(&self, cs: &mut CS) -> Result<(), SynthesisError> { + let (a, b, _) = cs.multiply(|| { + Ok(( + E::Fr::from_str("10").unwrap(), + E::Fr::from_str("20").unwrap(), + E::Fr::from_str("200").unwrap(), + )) + })?; + + cs.enforce_zero(LinearCombination::from(a) + a - b); + + let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?; + + cs.enforce_zero(LinearCombination::from(b) - multiplier); + + Ok(()) + } + } + + create_constraints_info::(&MyCircuit); + println!("---------------"); + create_constraints_info::(&MyCircuit); +} \ No newline at end of file diff --git a/bellman/src/sonic/unhelped/permutation_argument.rs b/bellman/src/sonic/unhelped/permutation_argument.rs new file mode 100644 index 0000000..5c3466e --- /dev/null +++ b/bellman/src/sonic/unhelped/permutation_argument.rs @@ -0,0 +1,853 @@ +/// Permutation argument allows to prove that a commitment to a vector A is +/// actually a commitment to a vector of values that are equal to `(s^{perm})_i * y^{perm(i)}` +/// for some fixed permutation `perm` + +use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine}; +use crate::pairing::{Engine, CurveProjective, CurveAffine}; +use std::marker::PhantomData; + +use crate::sonic::srs::SRS; +use crate::sonic::util::*; +use super::wellformed_argument::{WellformednessArgument, WellformednessProof}; +use super::grand_product_argument::{GrandProductArgument, GrandProductSignature}; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; + +#[derive(Clone)] +pub struct SpecializedSRS { + pub p_1: E::G1Affine, + pub p_2: Vec, + pub p_3: E::G1Affine, + pub p_4: Vec, + n: usize +} + +#[derive(Clone)] +pub struct PermutationArgument { + non_permuted_coefficients: Vec>, + non_permuted_at_y_coefficients: Vec>, + permuted_at_y_coefficients: Vec>, + inverse_permuted_at_y_coefficients: Vec>, + permutations: Vec>, + n: usize +} + +#[derive(Clone)] +pub struct PermutationProof { + pub v_zy: E::Fr, + pub e_opening: E::G1Affine, + pub f_opening: E::G1Affine, +} + +#[derive(Clone)] +pub struct PermutationArgumentProof { + pub j: usize, + pub s_opening: E::G1Affine, + pub s_zy: E::Fr +} + +#[derive(Clone)] +pub struct SignatureOfCorrectComputation { + pub s_commitments: Vec, + pub s_prime_commitments: Vec, + pub perm_argument_proof: PermutationArgumentProof, + pub perm_proof: PermutationProof, + pub grand_product_signature: GrandProductSignature +} + +// fn permute(coeffs: &[F], permutation: & [usize]) -> Vec{ +// assert_eq!(coeffs.len(), permutation.len()); +// let mut result: Vec = vec![F::zero(); coeffs.len()]; +// for (i, j) in permutation.iter().enumerate() { +// // if *j < 1 { +// // // if permutation information is missing coefficient itself must be zero! +// // assert!(coeffs[i].is_zero()); +// // continue; +// // } +// result[*j - 1] = coeffs[i]; +// } +// result +// } + +fn permute_inverse(permuted_coeffs: &[F], permutation: & [usize]) -> Vec{ + assert_eq!(permuted_coeffs.len(), permutation.len()); + let mut result: Vec = vec![F::zero(); permuted_coeffs.len()]; + for (i, j) in permutation.iter().enumerate() { + // if *j < 1 { + // // if permutation information is missing coefficient itself must be zero! + // assert!(coeffs[i].is_zero()); + // continue; + // } + result[i] = permuted_coeffs[*j - 1]; + } + result +} + +impl PermutationArgument { + pub fn new(coefficients: Vec>, permutations: Vec>) -> Self { + assert!(coefficients.len() > 0); + assert_eq!(coefficients.len(), permutations.len()); + + let n = coefficients[0].len(); + + for (c, p) in coefficients.iter().zip(permutations.iter()) { + assert!(c.len() == p.len()); + assert!(c.len() == n); + } + + PermutationArgument { + non_permuted_coefficients: coefficients, + non_permuted_at_y_coefficients: vec![], + // permuted_coefficients: vec![], + permuted_at_y_coefficients: vec![], + inverse_permuted_at_y_coefficients: vec![], + permutations: permutations, + n: n + } + } + + pub fn make_specialized_srs(non_permuted_coefficients: &Vec>, permutations: &Vec>, srs: &SRS) -> SpecializedSRS { + assert!(non_permuted_coefficients.len() > 0); + assert_eq!(non_permuted_coefficients.len(), permutations.len()); + + let n = non_permuted_coefficients[0].len(); + + // p1 is just a commitment to the powers of x. It's indexed from 0 cause there is no g^0 + let p_1 = multiexp(srs.g_positive_x_alpha[0..n].iter(), vec![E::Fr::one(); n].iter()).into_affine(); + + let mut p_2 = vec![]; + + let p_3 = { + let values: Vec = (1..=n).map(|el| { + let mut repr = <::Fr as PrimeField>::Repr::default(); + repr.as_mut()[0] = el as u64; + let fe = E::Fr::from_repr(repr).unwrap(); + + fe + }).collect(); + + multiexp(srs.g_positive_x_alpha[0..n].iter(), values.iter()).into_affine() + }; + + let mut p_4 = vec![]; + + for (c, p) in non_permuted_coefficients.iter().zip(permutations.iter()) { + assert!(c.len() == p.len()); + assert!(c.len() == n); + + // p2 is a commitment to the s^{perm}_i * x^i + { + let p2 = multiexp(srs.g_positive_x_alpha[0..n].iter(), c.iter()).into_affine(); + p_2.push(p2); + } + + { + let values: Vec = p.iter().map(|el| { + let mut repr = <::Fr as PrimeField>::Repr::default(); + repr.as_mut()[0] = *el as u64; + let fe = E::Fr::from_repr(repr).unwrap(); + + fe + }).collect(); + + let p4 = multiexp(srs.g_positive_x_alpha[0..n].iter(), values.iter()).into_affine(); + + p_4.push(p4); + } + } + + SpecializedSRS { + p_1: p_1, + p_2: p_2, + p_3: p_3, + p_4: p_4, + n: n + } + } + + // commit to s and s' at y. Mutates the state + pub fn commit(&mut self, y: E::Fr, srs: &SRS) -> Vec<(E::G1Affine, E::G1Affine)> { + assert!(self.inverse_permuted_at_y_coefficients.len() == 0); + let mut result = vec![]; + + let n = self.non_permuted_coefficients[0].len(); + + let mut non_permuted_at_y_coefficients = vec![]; + // let mut permuted_coefficients = vec![]; + // let mut permuted_at_y_coefficients = vec![]; + let mut inverse_permuted_at_y_coefficients = vec![]; + + // naive algorithms + // for every permutation poly + // -- go throught all variable_idx + // - take coeff from non-permuted coeffs[permutation[variable_idx]] + // - mul by Y^{permutation[variable_idx]} + // - mul by X^{variable_idx + 1} + + for (c, p) in self.non_permuted_coefficients.iter().zip(self.permutations.iter()) { + let mut non_permuted_at_y = c.clone(); + mut_distribute_consequitive_powers(&mut non_permuted_at_y[..], y, y); + let s_prime = multiexp(srs.g_positive_x_alpha[0..n].iter(), non_permuted_at_y.iter()).into_affine(); + + // if we pretend that non_permuted_at_y[sigma[i]] = coeffs[sigma[i]] * Y^sigma[i], + // then inverse_permuted_at_y[i] = coeffs[sigma[i]] * Y^sigma[i] + let inverse_permuted_at_y = permute_inverse(&non_permuted_at_y[..], &p[..]); + + // let mut t = vec![E::Fr::zero(); inverse_permuted_at_y.len()]; + // for i in 0..t.len() { + // let coeff = c[i]; + // let sigma_i = p[i]; + // let y_sigma_i = y.pow([sigma_i as u64]); + // t[i] = coeff; + // t[i].mul_assign(&y_sigma_i); + // } + + // and commit to S + let s = multiexp(srs.g_positive_x_alpha[0..n].iter(), inverse_permuted_at_y.iter()).into_affine(); + + // let s = multiexp(srs.g_positive_x_alpha[0..n].iter(), t.iter()).into_affine(); + + result.push((s, s_prime)); + + non_permuted_at_y_coefficients.push(non_permuted_at_y); + // permuted_coefficients.push(permuted); + // permuted_at_y_coefficients.push(t); + // permuted_at_y_coefficients.push(permuted_at_y); + inverse_permuted_at_y_coefficients.push(inverse_permuted_at_y); + } + + self.non_permuted_at_y_coefficients = non_permuted_at_y_coefficients; + // self.permuted_coefficients = permuted_coefficients; + // self.permuted_at_y_coefficients = permuted_at_y_coefficients; + self.inverse_permuted_at_y_coefficients = inverse_permuted_at_y_coefficients; + + result + } + + pub fn open_commitments_to_s_prime( + &self, + challenges: &Vec, + y: E::Fr, + z_prime: E::Fr, + srs: &SRS + ) -> PermutationProof { + let n = self.non_permuted_coefficients[0].len(); + + let mut yz = y; + yz.mul_assign(&z_prime); + + let mut polynomial: Option> = None; + + for (p, r) in self.non_permuted_coefficients.iter() + .zip(challenges.iter()) { + if polynomial.is_some() { + if let Some(poly) = polynomial.as_mut() { + mul_add_polynomials(&mut poly[..], &p[..], *r); + } + } else { + let mut poly = p.clone(); + mul_polynomial_by_scalar(&mut poly[..], *r); + polynomial = Some(poly); + } + } + + let mut polynomial = polynomial.unwrap(); + let v = evaluate_at_consequitive_powers(&polynomial[..], yz, yz); + + let mut v_neg = v; + v_neg.negate(); + + let f = polynomial_commitment_opening( + 0, + n, + Some(v_neg).iter().chain_ext(polynomial.iter()), + yz, + &srs + ); + + mut_distribute_consequitive_powers(&mut polynomial[..], y, y); + + let e = polynomial_commitment_opening( + 0, + n, + Some(v_neg).iter().chain_ext(polynomial.iter()), + z_prime, + &srs + ); + + PermutationProof { + v_zy: v, + e_opening: e, + f_opening: f + } + } + + // Argument a permutation argument. Current implementation consumes, cause extra arguments are required + pub fn make_argument(self, + beta: E::Fr, + gamma: E::Fr, + grand_product_challenges: & Vec, + wellformed_challenges: & Vec, + y: E::Fr, + z: E::Fr, + _specialized_srs: &SpecializedSRS, + srs: &SRS + ) -> PermutationArgumentProof { + // Sj(P4j)β(P1j)γ is equal to the product of the coefficients of Sj′(P3j)β(P1j)γ + // also open s = \sum self.permuted_coefficients(X, y) at z + + let n = self.n; + let j = self.non_permuted_coefficients.len(); + assert_eq!(j, grand_product_challenges.len()); + assert_eq!(2*j, wellformed_challenges.len()); + + let mut s_polynomial: Option> = None; + + for c in self.inverse_permuted_at_y_coefficients.iter() + { + if s_polynomial.is_some() { + if let Some(poly) = s_polynomial.as_mut() { + add_polynomials(&mut poly[..], & c[..]); + } + } else { + s_polynomial = Some(c.clone()); + } + } + let s_polynomial = s_polynomial.unwrap(); + // evaluate at z + let s_zy = evaluate_at_consequitive_powers(& s_polynomial[..], z, z); + + let mut s_zy_neg = s_zy; + s_zy_neg.negate(); + + let s_zy_opening = polynomial_commitment_opening( + 0, + n, + Some(s_zy_neg).iter().chain_ext(s_polynomial.iter()), + z, + &srs + ); + + // Sj(P4j)^β (P1j)^γ is equal to the product of the coefficients of Sj′(P3j)^β (P1j)^γ + + let p_1_values = vec![E::Fr::one(); n]; + let p_3_values: Vec = (1..=n).map(|el| { + let mut repr = <::Fr as PrimeField>::Repr::default(); + repr.as_mut()[0] = el as u64; + let fe = E::Fr::from_repr(repr).unwrap(); + + fe + }).collect(); + + let mut grand_products = vec![]; + + for ((non_permuted, inv_permuted), permutation) in self.non_permuted_at_y_coefficients.into_iter() + .zip(self.inverse_permuted_at_y_coefficients.into_iter()) + .zip(self.permutations.into_iter()) + + { + // in S combination at the place i there should be term coeff[sigma(i)] * Y^sigma(i), that we can take + // from non-permuted by inverse_permuting it + // let mut s_combination = permute_inverse(&non_permuted[..], &permutation); + let mut s_combination = inv_permuted; + { + let p_4_values: Vec = permutation.into_iter().map(|el| { + let mut repr = <::Fr as PrimeField>::Repr::default(); + repr.as_mut()[0] = el as u64; + let fe = E::Fr::from_repr(repr).unwrap(); + + fe + }).collect(); + mul_add_polynomials(&mut s_combination[..], & p_4_values[..], beta); + mul_add_polynomials(&mut s_combination[..], & p_1_values[..], gamma); + } + + // combination of coeff[i]*Y^i + beta * i + gamma + let mut s_prime_combination = non_permuted.clone(); + { + + mul_add_polynomials(&mut s_prime_combination[..], & p_3_values[..], beta); + mul_add_polynomials(&mut s_prime_combination[..], & p_1_values[..], gamma); + } + + // Sanity check + let s_prime_product = s_prime_combination.iter().fold(E::Fr::one(), |mut sum, x| + { + sum.mul_assign(&x); + + sum + }); + + let s_product = s_combination.iter().fold(E::Fr::one(), |mut sum, x| + { + sum.mul_assign(&x); + + sum + }); + + assert_eq!(s_product, s_prime_product, "product of coefficients must be the same"); + + grand_products.push((s_combination, s_prime_combination)); + } + + let mut a_commitments = vec![]; + let mut b_commitments = vec![]; + + for (a, b) in grand_products.iter() { + let (c_a, c_b) = GrandProductArgument::commit_for_individual_products(& a[..], & b[..], &srs); + a_commitments.push(c_a); + b_commitments.push(c_b); + } + + { + let mut all_polys = vec![]; + for p in grand_products.iter() { + let (a, b) = p; + all_polys.push(a.clone()); + all_polys.push(b.clone()); + } + + let wellformed_argument = WellformednessArgument::new(all_polys); + let commitments = wellformed_argument.commit(&srs); + let proof = wellformed_argument.make_argument(wellformed_challenges.clone(), &srs); + let valid = WellformednessArgument::verify(n, &wellformed_challenges, &commitments, &proof, &srs); + + assert!(valid, "wellformedness argument must be valid"); + } + + let mut grand_product_argument = GrandProductArgument::new(grand_products); + let c_commitments = grand_product_argument.commit_to_individual_c_polynomials(&srs); + let t_commitment = grand_product_argument.commit_to_t_polynomial(&grand_product_challenges, y, &srs); + let grand_product_openings = grand_product_argument.open_commitments_for_grand_product(y, z, &srs); + let a_zy: Vec = grand_product_openings.iter().map(|el| el.0.clone()).collect(); + let proof = grand_product_argument.make_argument(&a_zy, &grand_product_challenges, y, z, &srs); + + { + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let randomness = (0..j).map(|_| E::Fr::rand(rng)).collect::>(); + + let valid = GrandProductArgument::verify_ab_commitment(n, + & randomness, + & a_commitments, + & b_commitments, + &grand_product_openings, + y, + z, + &srs); + assert!(valid, "ab part of grand product argument must be valid"); + + let randomness = (0..3).map(|_| E::Fr::rand(rng)).collect::>(); + let valid = GrandProductArgument::verify(n, + &randomness, + &a_zy, + &grand_product_challenges, + t_commitment, + &c_commitments, + &proof, + y, + z, + &srs); + + assert!(valid, "grand product argument must be valid"); + } + + PermutationArgumentProof { + j: j, + s_opening: s_zy_opening, + s_zy: s_zy + } + } + + pub fn verify_s_prime_commitment( + _n: usize, + randomness: & Vec, + challenges: & Vec, + commitments: &Vec, + proof: &PermutationProof, + y: E::Fr, + z_prime: E::Fr, + specialized_srs: &SpecializedSRS, + srs: &SRS + ) -> bool { + assert_eq!(randomness.len(), 2); + assert_eq!(challenges.len(), commitments.len()); + + // e(E,hαx)e(E−z′,hα) = e(􏰗Mj=1Sj′rj,h)e(g−v,hα) + // e(F,hαx)e(F−yz′,hα) = e(􏰗Mj=1P2jrj,h)e(g−v,hα) + + let g = srs.g_positive_x[0]; + + let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + + let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + let mut value = E::Fr::zero(); + + for r in randomness.iter() { + value.add_assign(&r); + } + value.mul_assign(&proof.v_zy); + + let mut minus_yz = z_prime; + minus_yz.mul_assign(&y); + minus_yz.negate(); + + let mut minus_z_prime = z_prime; + minus_z_prime.negate(); + + let f_yz = proof.f_opening.mul(minus_yz.into_repr()); + let e_z = proof.e_opening.mul(minus_z_prime.into_repr()); + + let mut h_alpha_term = multiexp( + vec![e_z.into_affine(), f_yz.into_affine()].iter(), + randomness.iter(), + ); + + let g_v = g.mul(value.into_repr()); + + h_alpha_term.add_assign(&g_v); + + let h_alpha_x_term = multiexp( + Some(proof.e_opening).iter() + .chain_ext(Some(proof.f_opening).iter()), + randomness.iter(), + ).into_affine(); + + let s_r = multiexp( + commitments.iter(), + challenges.iter() + ).into_affine(); + + let p2_r = multiexp( + specialized_srs.p_2.iter(), + challenges.iter() + ).into_affine(); + + let h_term = multiexp( + Some(s_r).iter() + .chain_ext(Some(p2_r).iter()), + randomness.iter() + ).into_affine(); + + E::final_exponentiation(&E::miller_loop(&[ + (&h_alpha_x_term.prepare(), &h_alpha_x_precomp), + (&h_alpha_term.into_affine().prepare(), &h_alpha_precomp), + (&h_term.prepare(), &h_prep), + ])).unwrap() == E::Fqk::one() + } + + pub fn verify( + s_commitments: &Vec, + proof: &PermutationArgumentProof, + z: E::Fr, + srs: &SRS + ) -> bool { + + let g = srs.g_positive_x[0]; + + let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + + let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + let mut minus_z = z; + minus_z.negate(); + + let opening_z = proof.s_opening.mul(minus_z.into_repr()); + + let mut h_alpha_term = opening_z; + let g_s = g.mul(proof.s_zy.into_repr()); + + h_alpha_term.add_assign(&g_s); + + let h_alpha_x_term = proof.s_opening; + + let mut s = E::G1::zero(); + for p in s_commitments { + s.add_assign_mixed(&p); + } + + let h_term = s.into_affine(); + + E::final_exponentiation(&E::miller_loop(&[ + (&h_alpha_x_term.prepare(), &h_alpha_x_precomp), + (&h_alpha_term.into_affine().prepare(), &h_alpha_precomp), + (&h_term.prepare(), &h_prep), + ])).unwrap() == E::Fqk::one() + } + + pub fn make_signature( + coefficients: Vec>, + permutations: Vec>, + y: E::Fr, + z: E::Fr, + srs: &SRS, + ) -> SignatureOfCorrectComputation { + let mut argument = PermutationArgument::new(coefficients, permutations); + let commitments = argument.commit(y, &srs); + let mut transcript = Transcript::new(&[]); + + let mut s_commitments = vec![]; + let mut s_prime_commitments = vec![]; + let mut challenges = vec![]; + let num_commitments = commitments.len(); + for (s, s_prime) in commitments.into_iter() { + transcript.commit_point(&s); + transcript.commit_point(&s_prime); + s_commitments.push(s); + s_prime_commitments.push(s_prime); + } + + // get challenges for a full batch + for _ in 0..num_commitments { + let c: E::Fr = transcript.get_challenge_scalar(); + challenges.push(c); + } + + let z_prime = transcript.get_challenge_scalar(); + + let s_prime_commitments_opening = argument.open_commitments_to_s_prime(&challenges, y, z_prime, &srs); + + let (proof, grand_product_signature) = { + let (proof, grand_product_signature) = argument.make_argument_with_transcript( + &mut transcript, + y, + z, + &srs + ); + + (proof, grand_product_signature) + }; + + SignatureOfCorrectComputation { + s_commitments, + s_prime_commitments, + perm_argument_proof: proof, + perm_proof: s_prime_commitments_opening, + grand_product_signature + } + + } + + // Argument a permutation argument. Current implementation consumes, cause extra arguments are required + pub fn make_argument_with_transcript(self, + transcript: &mut Transcript, + y: E::Fr, + z: E::Fr, + srs: &SRS + ) -> (PermutationArgumentProof, GrandProductSignature) { + // create random beta and gamma for every single permutation argument + let mut betas = vec![]; + let mut gammas = vec![]; + + for _ in 0..self.permutations.len() { + let beta: E::Fr = transcript.get_challenge_scalar(); + let gamma: E::Fr = transcript.get_challenge_scalar(); + + betas.push(beta); + gammas.push(gamma); + } + + // Sj(P4j)β(P1j)γ is equal to the product of the coefficients of Sj′(P3j)β(P1j)γ + // also open s = \sum self.permuted_coefficients(X, y) at z + + let n = self.n; + let j = self.non_permuted_coefficients.len(); + + let mut s_polynomial: Option> = None; + + for c in self.inverse_permuted_at_y_coefficients.iter() + { + if s_polynomial.is_some() { + if let Some(poly) = s_polynomial.as_mut() { + add_polynomials(&mut poly[..], & c[..]); + } + } else { + s_polynomial = Some(c.clone()); + } + } + let s_polynomial = s_polynomial.unwrap(); + // evaluate at z + let s_zy = evaluate_at_consequitive_powers(& s_polynomial[..], z, z); + + let mut s_zy_neg = s_zy; + s_zy_neg.negate(); + + let s_zy_opening = polynomial_commitment_opening( + 0, + n, + Some(s_zy_neg).iter().chain_ext(s_polynomial.iter()), + z, + &srs + ); + + // Sj(P4j)^β (P1j)^γ is equal to the product of the coefficients of Sj′(P3j)^β (P1j)^γ + + let p_1_values = vec![E::Fr::one(); n]; + let p_3_values: Vec = (1..=n).map(|el| { + let mut repr = <::Fr as PrimeField>::Repr::default(); + repr.as_mut()[0] = el as u64; + let fe = E::Fr::from_repr(repr).unwrap(); + + fe + }).collect(); + + let mut grand_products = vec![]; + + for ((((non_permuted, inv_permuted), permutation), beta), gamma) in + self.non_permuted_at_y_coefficients.into_iter() + .zip(self.inverse_permuted_at_y_coefficients.into_iter()) + .zip(self.permutations.into_iter()) + .zip(betas.into_iter()) + .zip(gammas.into_iter()) + + { + // in S combination at the place i there should be term coeff[sigma(i)] * Y^sigma(i), that we can take + // from non-permuted by inverse_permuting it + // let mut s_combination = permute_inverse(&non_permuted[..], &permutation); + let mut s_combination = inv_permuted; + { + let p_4_values: Vec = permutation.into_iter().map(|el| { + let mut repr = <::Fr as PrimeField>::Repr::default(); + repr.as_mut()[0] = el as u64; + let fe = E::Fr::from_repr(repr).unwrap(); + + fe + }).collect(); + mul_add_polynomials(&mut s_combination[..], & p_4_values[..], beta); + mul_add_polynomials(&mut s_combination[..], & p_1_values[..], gamma); + } + + // combination of coeff[i]*Y^i + beta * i + gamma + let mut s_prime_combination = non_permuted.clone(); + { + + mul_add_polynomials(&mut s_prime_combination[..], & p_3_values[..], beta); + mul_add_polynomials(&mut s_prime_combination[..], & p_1_values[..], gamma); + } + + // Sanity check + let s_prime_product = s_prime_combination.iter().fold(E::Fr::one(), |mut sum, x| + { + sum.mul_assign(&x); + + sum + }); + + let s_product = s_combination.iter().fold(E::Fr::one(), |mut sum, x| + { + sum.mul_assign(&x); + + sum + }); + + assert_eq!(s_product, s_prime_product, "product of coefficients must be the same"); + assert!(!s_product.is_zero(), "grand products must not be zero"); + + grand_products.push((s_combination, s_prime_combination)); + } + + let grand_product_signature = GrandProductArgument::create_signature( + transcript, + grand_products, + y, + z, + &srs + ); + + let proof = PermutationArgumentProof { + j: j, + s_opening: s_zy_opening, + s_zy: s_zy + }; + + (proof, grand_product_signature) + } + +} + +#[test] +fn test_permutation_argument() { + use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12}; + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + // let srs = SRS::::dummy(830564, srs_x, srs_alpha); + let srs = SRS::::new(128, srs_x, srs_alpha); + + let n: usize = 1 << 4; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let mut coeffs = (0..n).map(|_| Fr::rand(rng)).collect::>(); + coeffs[2] = Fr::zero(); // edge case + let mut permutation = (1..=n).collect::>(); + rng.shuffle(&mut permutation); + + let coeffs = vec![coeffs]; + let permutations = vec![permutation]; + + let specialized_srs = PermutationArgument::make_specialized_srs(&coeffs, &permutations, &srs); + + let mut argument = PermutationArgument::new(coeffs, permutations); + + let y : Fr = rng.gen(); + + let challenges = (0..1).map(|_| Fr::rand(rng)).collect::>(); + + let commitments = argument.commit(y, &srs); + let mut s_commitments = vec![]; + let mut s_prime_commitments = vec![]; + for (s, s_prime) in commitments.into_iter() { + s_commitments.push(s); + s_prime_commitments.push(s_prime); + } + + let z_prime : Fr = rng.gen(); + + let opening = argument.open_commitments_to_s_prime(&challenges, y, z_prime, &srs); + + let randomness = (0..2).map(|_| Fr::rand(rng)).collect::>(); + + let valid = PermutationArgument::verify_s_prime_commitment(n, + &randomness, + &challenges, + &s_prime_commitments, + &opening, + y, + z_prime, + &specialized_srs, + &srs); + + assert!(valid, "s' commitment must be valid"); + + let beta : Fr = rng.gen(); + let gamma : Fr = rng.gen(); + + let grand_product_challenges = (0..1).map(|_| Fr::rand(rng)).collect::>(); + let wellformed_challenges = (0..2).map(|_| Fr::rand(rng)).collect::>(); + + let z : Fr = rng.gen(); + + let proof = argument.make_argument( + beta, + gamma, + & grand_product_challenges, + & wellformed_challenges, + y, + z, + &specialized_srs, &srs); + + let valid = PermutationArgument::verify(&s_commitments, &proof, z, &srs); + + assert!(valid, "permutation argument must be valid"); +} + diff --git a/bellman/src/sonic/unhelped/permutation_structure.rs b/bellman/src/sonic/unhelped/permutation_structure.rs new file mode 100644 index 0000000..f7270ef --- /dev/null +++ b/bellman/src/sonic/unhelped/permutation_structure.rs @@ -0,0 +1,743 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use crate::sonic::helped::{Proof, SxyAdvice}; +use crate::sonic::helped::batch::Batch; +use crate::sonic::helped::poly::{SxEval, SyEval}; +use crate::sonic::helped::Parameters; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver, ConstraintSystem}; +use crate::sonic::cs::{Circuit, Variable, Coeff}; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::Preprocess; +use crate::sonic::sonic::M; +use crate::sonic::sonic::PermutationSynthesizer; + +use super::s2_proof::*; +use super::permutation_argument::*; + +#[derive(Clone)] +pub struct PermutationStructure { + pub n: usize, + pub q: usize, + pub a: Vec<[Option<(Coeff, usize)>; M]>, + pub b: Vec<[Option<(Coeff, usize)>; M]>, + pub c: Vec<[Option<(Coeff, usize)>; M]>, +} + +pub fn create_permutation_structure>( + circuit: &C, +) -> PermutationStructure +{ + let mut backend: Preprocess = Preprocess::new(); + + let (a, b, c) = { + + let mut cs: PermutationSynthesizer> = PermutationSynthesizer::new(&mut backend); + + let one = cs.alloc_input(|| Ok(E::Fr::one())).expect("should have no issues"); + + match (one, > as ConstraintSystem>::ONE) { + (Variable::A(1), Variable::A(1)) => {}, + _ => panic!("one variable is incorrect") + } + + circuit.synthesize(&mut cs).expect("should synthesize"); + + + (cs.a, cs.b, cs.c) + }; + + let n = backend.n; + let q = backend.q; + + // println!("Will have {} gates and {} linear constraints", n, q); + + PermutationStructure:: { + n: n, + q: q, + a: a, + b: b, + c: c + } +} + +use rand::{Rng, Rand}; + +impl PermutationStructure { + pub fn calculate_s2_commitment_value(&self, srs: &SRS) -> E::G1Affine { + S2Eval::calculate_commitment_element(self.n, srs) + } + + pub fn calculate_s2_proof(&self, x: E::Fr, y: E::Fr, srs: &SRS) -> S2Proof { + let s2_eval = S2Eval::new(self.n); + + s2_eval.evaluate(x, y, &srs) + } + + pub fn create_inverse_permutation_vectors(&self) -> (Vec>, Vec>) { + // we have to form non-permuted coefficients, as well as permutation structures; + let n = self.n; + let mut non_permuted_coeffs = vec![vec![E::Fr::zero(); 3*n+1]; M]; + let mut permutations = vec![vec![0usize; 3*n+1]; M]; + + let one = E::Fr::one(); + let mut minus_one = E::Fr::one(); + minus_one.negate(); + + let mut not_empty = [false; M]; + // go other the permutations + for (gate_index, info) in self.a.iter().enumerate() { + let offset = n-1; + for i in 0..M { + // coefficients of A are placed at the offset = 0 from the beginning of the vector + if let Some((coeff, place)) = info[i].as_ref() { + // place it + assert!(*place != 0); + let array_position = offset - gate_index; // special for A + let place_coeff_into = &mut non_permuted_coeffs[i]; + let place_permutation_into = &mut permutations[i]; + match coeff { + Coeff::Zero => { + }, + Coeff::One => { + not_empty[i] = true; + place_coeff_into[array_position] = one; + place_permutation_into[array_position] = *place; + }, + Coeff::NegativeOne => { + not_empty[i] = true; + place_coeff_into[array_position] = minus_one; + place_permutation_into[array_position] = *place; + }, + Coeff::Full(value) => { + not_empty[i] = true; + place_coeff_into[array_position] = *value; + place_permutation_into[array_position] = *place; + } + } + } + } + } + + for (gate_index, info) in self.b.iter().enumerate() { + let offset = n + 1; + for i in 0..M { + if let Some((coeff, place)) = info[i].as_ref() { + // place it + assert!(*place != 0); + let array_position = offset + gate_index; + let place_coeff_into = &mut non_permuted_coeffs[i]; + let place_permutation_into = &mut permutations[i]; + match coeff { + Coeff::Zero => { + }, + Coeff::One => { + not_empty[i] = true; + place_coeff_into[array_position] = one; + place_permutation_into[array_position] = *place; + }, + Coeff::NegativeOne => { + not_empty[i] = true; + place_coeff_into[array_position] = minus_one; + place_permutation_into[array_position] = *place; + }, + Coeff::Full(value) => { + not_empty[i] = true; + place_coeff_into[array_position] = *value; + place_permutation_into[array_position] = *place; + } + } + } + } + } + + for (gate_index, info) in self.c.iter().enumerate() { + let offset = 2*n + 1; + for i in 0..M { + // coefficients of A are placed at the offset = 0 from the beginning of the vector + if let Some((coeff, place)) = info[i].as_ref() { + // place it + assert!(*place != 0); + let array_position = offset + gate_index; + let place_coeff_into = &mut non_permuted_coeffs[i]; + let place_permutation_into = &mut permutations[i]; + match coeff { + Coeff::Zero => { + }, + Coeff::One => { + not_empty[i] = true; + place_coeff_into[array_position] = one; + place_permutation_into[array_position] = *place; + }, + Coeff::NegativeOne => { + not_empty[i] = true; + place_coeff_into[array_position] = minus_one; + place_permutation_into[array_position] = *place; + }, + Coeff::Full(value) => { + not_empty[i] = true; + place_coeff_into[array_position] = *value; + place_permutation_into[array_position] = *place; + } + } + } + } + } + + Self::print_constraints(n, self.q, &non_permuted_coeffs, &permutations); + + // need to fill arrays with non-zero indexes just to have full permutation, even while it's just zero coefficient + + // TODO: fix + + let mut m = M; + for i in (0..M).into_iter().rev() { + // these are no constant terms + assert!(non_permuted_coeffs[i][n].is_zero()); + assert!(permutations[i][n] == 0); + } + + for i in (0..M).into_iter().rev() { + if !not_empty[i] { + non_permuted_coeffs.pop(); + permutations.pop(); + m -= 1; + } + } + + assert!(m != 0); + + // find something faster, although it's still linear + + for i in 0..m { + let mut fillers: Vec = (1..=(3*n+1)).map(|el| el).collect(); + for (p, c) in permutations[i].iter_mut().zip(non_permuted_coeffs[i].iter()) { + if *p == 0 { + assert!(c.is_zero()); + } else { + fillers[*p - 1] = 0; + } + } + let mut fill_from = 0; + for p in permutations[i].iter_mut() { + if *p == 0 { + loop { + if fillers[fill_from] != 0 { + *p = fillers[fill_from]; + fill_from += 1; + break; + } else { + fill_from += 1; + } + } + } + } + } + + (non_permuted_coeffs, permutations) + } + + pub fn create_permutation_vectors(&self) -> (Vec>, Vec>) { + // we have to form non-permuted coefficients, as well as permutation structures; + let n = self.n; + let mut non_permuted_coeffs = vec![vec![E::Fr::zero(); 3*n+1]; M]; + let mut permutations = vec![vec![0usize; 3*n+1]; M]; + + let one = E::Fr::one(); + let mut minus_one = E::Fr::one(); + minus_one.negate(); + + let mut not_empty = [false; M]; + // go other the permutations + for (gate_index, info) in self.a.iter().enumerate() { + let offset = n-1; + for i in 0..M { + // coefficients of A are placed at the offset = 0 from the beginning of the vector + if let Some((coeff, place)) = info[i].as_ref() { + // place it + assert!(*place != 0); + let array_position = offset - gate_index; // special for A + let coeff_position = *place - 1; + let place_coeff_into = &mut non_permuted_coeffs[i]; + let place_permutation_into = &mut permutations[i]; + match coeff { + Coeff::Zero => { + }, + Coeff::One => { + not_empty[i] = true; + place_coeff_into[coeff_position] = one; + place_permutation_into[array_position] = *place; + }, + Coeff::NegativeOne => { + not_empty[i] = true; + place_coeff_into[coeff_position] = minus_one; + place_permutation_into[array_position] = *place; + }, + Coeff::Full(value) => { + not_empty[i] = true; + place_coeff_into[coeff_position] = *value; + place_permutation_into[array_position] = *place; + } + } + } + } + } + + for (gate_index, info) in self.b.iter().enumerate() { + let offset = n + 1; + for i in 0..M { + if let Some((coeff, place)) = info[i].as_ref() { + // place it + assert!(*place != 0); + let array_position = offset + gate_index; + let coeff_position = *place - 1; + let place_coeff_into = &mut non_permuted_coeffs[i]; + let place_permutation_into = &mut permutations[i]; + match coeff { + Coeff::Zero => { + }, + Coeff::One => { + not_empty[i] = true; + place_coeff_into[coeff_position] = one; + place_permutation_into[array_position] = *place; + }, + Coeff::NegativeOne => { + not_empty[i] = true; + place_coeff_into[coeff_position] = minus_one; + place_permutation_into[array_position] = *place; + }, + Coeff::Full(value) => { + not_empty[i] = true; + place_coeff_into[coeff_position] = *value; + place_permutation_into[array_position] = *place; + } + } + } + } + } + + for (gate_index, info) in self.c.iter().enumerate() { + let offset = 2*n + 1; + for i in 0..M { + // coefficients of A are placed at the offset = 0 from the beginning of the vector + if let Some((coeff, place)) = info[i].as_ref() { + // place it + assert!(*place != 0); + let array_position = offset + gate_index; + let coeff_position = *place - 1; + let place_coeff_into = &mut non_permuted_coeffs[i]; + let place_permutation_into = &mut permutations[i]; + match coeff { + Coeff::Zero => { + }, + Coeff::One => { + not_empty[i] = true; + place_coeff_into[coeff_position] = one; + place_permutation_into[array_position] = *place; + }, + Coeff::NegativeOne => { + not_empty[i] = true; + place_coeff_into[coeff_position] = minus_one; + place_permutation_into[array_position] = *place; + }, + Coeff::Full(value) => { + not_empty[i] = true; + place_coeff_into[coeff_position] = *value; + place_permutation_into[array_position] = *place; + } + } + } + } + } + + // Self::print_constraints(n, self.q, &non_permuted_coeffs, &permutations); + + // need to fill arrays with non-zero indexes just to have full permutation, even while it's just zero coefficient + + // TODO: fix + + let mut m = M; + // for i in (0..M).into_iter().rev() { + // // these are no constant terms + // assert!(non_permuted_coeffs[i][n].is_zero()); + // assert!(permutations[i][n] == 0); + // } + + for i in (0..M).into_iter().rev() { + if !not_empty[i] { + non_permuted_coeffs.pop(); + permutations.pop(); + m -= 1; + } + } + + assert!(m != 0); + + // find something faster, although it's still linear + + for i in 0..m { + let mut fillers: Vec = (1..=(3*n+1)).map(|el| el).collect(); + for (p, _c) in permutations[i].iter_mut().zip(non_permuted_coeffs[i].iter()) { + if *p == 0 { + continue; + // assert!(c.is_zero()); + } else { + fillers[*p - 1] = 0; + } + } + let mut fill_from = 0; + for p in permutations[i].iter_mut() { + if *p == 0 { + loop { + if fillers[fill_from] != 0 { + *p = fillers[fill_from]; + fill_from += 1; + break; + } else { + fill_from += 1; + } + } + } + } + } + + (non_permuted_coeffs, permutations) + } + + pub fn print_constraints(n:usize, q: usize, coeffs: &Vec>, permutations: &Vec>) { + let m = coeffs.len(); + + for constraint_idx in 1..=q { + println!("Constraint {} (term for Y^{})", constraint_idx, constraint_idx); + let mut terms = vec![]; + for p_idx in 0..m { + if let Some(variable_idx) = permutations[p_idx].iter().position(|&s| s == constraint_idx) { + let coeff = coeffs[p_idx][variable_idx]; + terms.push((variable_idx, coeff)); + } + } + for (var_idx, coeff) in terms.into_iter() { + if var_idx < n + 1 { + print!("{} * A({})", coeff, n - var_idx); + } else if var_idx < 2*n + 1 { + print!("{} * B({})", coeff, var_idx - n); + } else { + print!("{} * C({})", coeff, var_idx - 2*n); + } + print!("\n"); + } + } + } + + pub fn create_permutation_special_reference(&self, srs: &SRS) -> SpecializedSRS + { + let (non_permuted_coeffs, permutations) = self.create_permutation_vectors(); + + let specialized_srs = PermutationArgument::make_specialized_srs( + &non_permuted_coeffs, + &permutations, + &srs + ); + + specialized_srs + } + + pub fn make_signature(&self, y: E::Fr, z: E::Fr, srs: &SRS) -> SignatureOfCorrectComputation { + let (non_permuted_coeffs, permutations) = self.create_permutation_vectors(); + + let mut s_contrib = E::Fr::zero(); + for permutation_index in 0..permutations.len() { + for (variable_index, sigma_i) in permutations[permutation_index].iter().enumerate() { + let y_power = y.pow([*sigma_i as u64]); + let x_power = z.pow([(variable_index+1) as u64]); + let coeff = non_permuted_coeffs[permutation_index][*sigma_i - 1]; + + let mut result = coeff; + result.mul_assign(&x_power); + result.mul_assign(&y_power); + s_contrib.add_assign(&result); + } + } + + let z_n_plus_1_inv = z.pow([(self.n + 1) as u64]).inverse().unwrap(); + let y_n = y.pow([self.n as u64]); + + println!("Naive S contribution = {}", s_contrib); + + s_contrib.mul_assign(&z_n_plus_1_inv); + s_contrib.mul_assign(&y_n); + + println!("Naive S contribution scaled = {}", s_contrib); + + // let specialized_srs = PermutationArgument::make_specialized_srs( + // &non_permuted_coeffs, + // &permutations, + // &srs + // ); + + let signature = PermutationArgument::make_signature( + non_permuted_coeffs, + permutations, + y, + z, + &srs, + ); + + signature + } + + pub fn create_permutation_arguments(&self, y: E::Fr, z: E::Fr, rng: &mut R, srs: &SRS) + -> (Vec<(E::G1Affine, E::G1Affine)>, Vec, PermutationProof, PermutationArgumentProof, E::Fr, usize, E::Fr) + { + // we have to form non-permuted coefficients, as well as permutation structures; + let n = self.n; + + let (non_permuted_coeffs, permutations) = self.create_permutation_vectors(); + + let m = non_permuted_coeffs.len(); + + println!("Will need {} permutation polynomials", m); + + let specialized_srs = PermutationArgument::make_specialized_srs( + &non_permuted_coeffs, + &permutations, + &srs + ); + + // evaluate S naively + + let mut s_contrib = E::Fr::zero(); + for permutation_index in 0..m { + for (variable_index, sigma_i) in permutations[permutation_index].iter().enumerate() { + let y_power = y.pow([*sigma_i as u64]); + let x_power = z.pow([(variable_index+1) as u64]); + let coeff = non_permuted_coeffs[permutation_index][*sigma_i - 1]; + + let mut result = coeff; + result.mul_assign(&x_power); + result.mul_assign(&y_power); + s_contrib.add_assign(&result); + } + } + + println!("Naive S contribution = {}", s_contrib); + + let mut argument = PermutationArgument::new(non_permuted_coeffs, permutations); + let challenges = (0..m).map(|_| E::Fr::rand(rng)).collect::>(); + + let commitments = argument.commit(y, &srs); + let mut s_commitments = vec![]; + let mut s_prime_commitments = vec![]; + for (s, s_prime) in commitments.clone().into_iter() { + s_commitments.push(s); + // println!("S' = {}", s_prime); + s_prime_commitments.push(s_prime); + + } + + let z_prime : E::Fr = rng.gen(); + + let opening = argument.open_commitments_to_s_prime(&challenges, y, z_prime, &srs); + + let randomness = (0..2).map(|_| E::Fr::rand(rng)).collect::>(); + + let valid = PermutationArgument::verify_s_prime_commitment(n, + &randomness, + &challenges, + &s_prime_commitments, + &opening, + y, + z_prime, + &specialized_srs, + &srs); + + assert!(valid, "s' commitment must be valid"); + + let beta : E::Fr = rng.gen(); + let gamma : E::Fr = rng.gen(); + + let grand_product_challenges = (0..m).map(|_| E::Fr::rand(rng)).collect::>(); + let wellformed_challenges = (0..(2*m)).map(|_| E::Fr::rand(rng)).collect::>(); + + let proof = argument.make_argument( + beta, + gamma, + & grand_product_challenges, + & wellformed_challenges, + y, + z, + &specialized_srs, &srs); + + let valid = PermutationArgument::verify(&s_commitments, &proof, z, &srs); + + assert!(valid, "permutation argument must be valid"); + + (commitments, challenges, opening, proof, z_prime, m, s_contrib) + } +} + +#[test] +fn test_simple_succinct_sonic() { + use crate::pairing::ff::{Field, PrimeField}; + use crate::pairing::{Engine, CurveAffine, CurveProjective}; + use crate::pairing::bls12_381::{Bls12, Fr}; + use std::time::{Instant}; + use crate::sonic::srs::SRS; + use crate::sonic::cs::{Circuit, ConstraintSystem, LinearCombination}; + + struct MyCircuit; + + impl Circuit for MyCircuit { + fn synthesize>(&self, cs: &mut CS) -> Result<(), SynthesisError> { + let (a, b, c) = cs.multiply(|| { + Ok(( + E::Fr::from_str("10").unwrap(), + E::Fr::from_str("20").unwrap(), + E::Fr::from_str("200").unwrap(), + )) + })?; + + cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("2").unwrap()), a) - b); + cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("20").unwrap()), a) - c); + cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("10").unwrap()), b) - c); + + // let multiplier = cs.alloc_input(|| Ok(E::Fr::from_str("20").unwrap()))?; + + // cs.enforce_zero(LinearCombination::from(b) - multiplier); + + // let (a1, b1, _) = cs.multiply(|| { + // Ok(( + // E::Fr::from_str("5").unwrap(), + // E::Fr::from_str("5").unwrap(), + // E::Fr::from_str("25").unwrap(), + // )) + // })?; + + // cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("2").unwrap()), b1) - a); + // cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("4").unwrap()), a1) - b); + // cs.enforce_zero(LinearCombination::zero() + (Coeff::Full(E::Fr::from_str("40").unwrap()), b1) - c); + + Ok(()) + } + } + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + println!("making srs"); + let start = Instant::now(); + // let srs = SRS::::dummy(830564, srs_x, srs_alpha); + let srs = SRS::::new(100, srs_x, srs_alpha); + println!("done in {:?}", start.elapsed()); + + { + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + let _rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + + use crate::sonic::sonic::Basic; + use crate::sonic::sonic::AdaptorCircuit; + use crate::sonic::helped::prover::{create_advice_on_srs, create_proof_on_srs}; + use crate::sonic::helped::{MultiVerifier, get_circuit_parameters_for_succinct_sonic}; + use crate::sonic::helped::helper::{create_aggregate_on_srs}; + use crate::sonic::sonic::Permutation3; + use crate::sonic::unhelped::permutation_structure::*; + + // let z: Fr = rng.gen(); + // let y: Fr = rng.gen(); + + let z: Fr = Fr::from_str("2").unwrap(); + let y: Fr = Fr::one(); + + let perm_structure = create_permutation_structure::(&MyCircuit); + let (non_permuted_coeffs, permutations) = perm_structure.create_permutation_vectors(); + println!("Non-permuted = {:?}", non_permuted_coeffs[0]); + println!("Permutation = {:?}", permutations[0]); + println!("N = {}, Q = {}", perm_structure.n, perm_structure.q); + let n = perm_structure.n; + let szy = { + let mut tmp = SxEval::::new(y, n); + Permutation3::synthesize(&mut tmp, &MyCircuit).unwrap(); // TODO + tmp.finalize(z) + }; + + let naive_s1 = { + let mut res = Fr::zero(); + for j in 0..permutations.len() { + for i in 0..non_permuted_coeffs[j].len() { + let sigma_i = permutations[j][i]; + let coeff_i = non_permuted_coeffs[j][i]; + // let coeff_sigma_i = non_permuted_coeffs[j][sigma_i - 1]; + + let y_power = y.pow([sigma_i as u64]); + let x_power = z.pow([(i+1) as u64]); + // let mut result = coeff_sigma_i; + let mut result = coeff_i; + result.mul_assign(&y_power); + result.mul_assign(&x_power); + + res.add_assign(&result); + } + } + + res + }; + + println!("Naive s1 = {}", naive_s1); + + // perm_structure.create_permutation_arguments(y, z, rng, &srs); + let signature = perm_structure.make_signature(y, z, &srs); + let s2 = S2Eval::new(perm_structure.n); + let s2 = s2.evaluate(z, y, &srs); + let mut s2_value = s2.c_value; + s2_value.add_assign(&s2.d_value); + + let mut expected_s2_value = Fr::zero(); + let y_inv = y.inverse().unwrap(); + let mut p1 = y; + p1.add_assign(&y_inv); + p1.mul_assign(&z); + expected_s2_value.add_assign(&p1); + + let mut t0 = y; + t0.square(); + + let mut t1 = y_inv; + t1.square(); + + let mut p2 = t0; + p2.add_assign(&t1); + p2.mul_assign(&z); + p2.mul_assign(&z); + + expected_s2_value.add_assign(&p2); + + let z_n = z.pow([n as u64]); + let z_n_plus_1_inv = z.pow([(n + 1) as u64]).inverse().unwrap(); + let y_n = y.pow([n as u64]); + + assert!(expected_s2_value == s2_value); + + s2_value.mul_assign(&z_n); + + let mut s1 = signature.perm_argument_proof.s_zy; + println!("S1 = {}", s1); + s1.mul_assign(&z_n_plus_1_inv); + s1.mul_assign(&y_n); + + s1.sub_assign(&s2_value); + + let mut naive_s1 = naive_s1; + naive_s1.mul_assign(&z_n_plus_1_inv); + naive_s1.mul_assign(&y_n); + naive_s1.sub_assign(&s2_value); + + println!("S1(?) = {}", naive_s1); + + assert_eq!(s1, szy); + } +} \ No newline at end of file diff --git a/bellman/src/sonic/unhelped/prover.rs b/bellman/src/sonic/unhelped/prover.rs new file mode 100644 index 0000000..970e411 --- /dev/null +++ b/bellman/src/sonic/unhelped/prover.rs @@ -0,0 +1,310 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; + +use super::{Proof, SxyAdvice}; +use super::batch::Batch; +use super::poly::{SxEval, SyEval}; +use super::parameters::{Parameters, NUM_BLINDINGS}; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::cs::{Circuit, Variable, Coeff}; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::{CountN, Basic}; + +pub fn create_advice_on_information_and_srs, S: SynthesisDriver>( + circuit: &C, + proof: &Proof, + srs: &SRS, + n: usize +) -> Result, SynthesisError> +{ + let z: E::Fr; + let y: E::Fr; + { + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&proof.r); + y = transcript.get_challenge_scalar(); + transcript.commit_point(&proof.t); + z = transcript.get_challenge_scalar(); + } + let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?; + + let (s_poly_negative, s_poly_positive) = { + let mut tmp = SxEval::new(y, n); + S::synthesize(&mut tmp, circuit)?; + + tmp.poly() + }; + + // Compute S commitment + let s = multiexp( + srs.g_positive_x_alpha[0..(2 * n)] + .iter() + .chain_ext(srs.g_negative_x_alpha[0..(n)].iter()), + s_poly_positive.iter().chain_ext(s_poly_negative.iter()) + ).into_affine(); + + // Compute s(z, y) + let mut szy = E::Fr::zero(); + { + szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_positive[..], z, z)); + szy.add_assign(& evaluate_at_consequitive_powers(& s_poly_negative[..], z_inv, z_inv)); + } + + // Compute kate opening + let opening = { + let mut open = szy; + open.negate(); + + let poly = kate_divison( + s_poly_negative.iter().rev().chain_ext(Some(open).iter()).chain_ext(s_poly_positive.iter()), + z, + ); + + let negative_poly = poly[0..n].iter().rev(); + let positive_poly = poly[n..].iter(); + multiexp( + srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext( + srs.g_positive_x[0..positive_poly.len()].iter() + ), + negative_poly.chain_ext(positive_poly) + ).into_affine() + }; + + Ok(SxyAdvice { + s, + szy, + opening + }) +} + +pub fn create_advice, S: SynthesisDriver>( + circuit: &C, + proof: &Proof, + parameters: &Parameters, +) -> Result, SynthesisError> +{ + let n = parameters.vk.n; + create_advice_on_information_and_srs::(circuit, proof, ¶meters.srs, n) +} + +pub fn create_advice_on_srs, S: SynthesisDriver>( + circuit: &C, + proof: &Proof, + srs: &SRS +) -> Result, SynthesisError> +{ + // annoying, but we need n to compute s(z, y), and this isn't + // precomputed anywhere yet + let n = { + let mut tmp = CountN::::new(); + S::synthesize(&mut tmp, circuit)?; + + tmp.n + }; + + create_advice_on_information_and_srs::(circuit, proof, srs, n) +} + +pub fn create_proof, S: SynthesisDriver>( + circuit: &C, + parameters: &Parameters +) -> Result, SynthesisError> { + create_proof_on_srs::(circuit, ¶meters.srs) +} + +extern crate rand; +use self::rand::{Rand, Rng, thread_rng}; +use crate::sonic::sonic::Wires; + +pub fn create_proof_on_srs, S: SynthesisDriver>( + circuit: &C, + srs: &SRS +) -> Result, SynthesisError> +{ + let mut wires = Wires::new(); + + S::synthesize(&mut wires, circuit)?; + + let n = wires.a.len(); + + let mut transcript = Transcript::new(&[]); + + let rng = &mut thread_rng(); + + // c_{n+1}, c_{n+2}, c_{n+3}, c_{n+4} + let blindings: Vec = (0..NUM_BLINDINGS).into_iter().map(|_| E::Fr::rand(rng)).collect(); + + // r is a commitment to r(X, 1) + let r = polynomial_commitment::( + n, + 2*n + NUM_BLINDINGS, + n, + &srs, + blindings.iter().rev() + .chain_ext(wires.c.iter().rev()) + .chain_ext(wires.b.iter().rev()) + .chain_ext(Some(E::Fr::zero()).iter()) + .chain_ext(wires.a.iter()), + ); + + transcript.commit_point(&r); + + let y: E::Fr = transcript.get_challenge_scalar(); + + // create r(X, 1) by observation that it's just a series of coefficients. + // Used representation is for powers X^{-2n}...X^{-n-1}, X^{-n}...X^{-1}, X^{1}...X^{n} + // Same representation is ok for r(X, Y) too cause powers always match + let mut rx1 = wires.b; + rx1.extend(wires.c); + rx1.extend(blindings.clone()); + rx1.reverse(); + rx1.push(E::Fr::zero()); + rx1.extend(wires.a); + + let mut rxy = rx1.clone(); + + let y_inv = y.inverse().ok_or(SynthesisError::DivisionByZero)?; + + // y^(-2n - num blindings) + let tmp = y_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]); + mut_distribute_consequitive_powers( + &mut rxy, + tmp, + y, + ); + + // negative powers [-1, -2n], positive [1, n] + let (mut s_poly_negative, s_poly_positive) = { + let mut tmp = SxEval::new(y, n); + S::synthesize(&mut tmp, circuit)?; + + tmp.poly() + }; + + // r'(X, y) = r(X, y) + s(X, y). Note `y` - those are evaluated at the point already + let mut rxy_prime = rxy.clone(); + { + // extend to have powers [n+1, 2n] + rxy_prime.resize(4 * n + 1 + NUM_BLINDINGS, E::Fr::zero()); + s_poly_negative.reverse(); + + let neg_poly_len = s_poly_negative.len(); + add_polynomials(&mut rxy_prime[(NUM_BLINDINGS+neg_poly_len)..(2 * n + NUM_BLINDINGS)], &s_poly_negative[..]); + s_poly_negative.reverse(); + + add_polynomials(&mut rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..], &s_poly_positive[..]) + + // // add coefficients in front of X^{-2n}...X^{-n-1}, X^{-n}...X^{-1} + // for (r, s) in rxy_prime[NUM_BLINDINGS..(2 * n + NUM_BLINDINGS)] + // .iter_mut() + // .rev() + // .zip(s_poly_negative) + // { + // r.add_assign(&s); + // } + // // add coefficients in front of X^{1}...X^{n}, X^{n+1}...X^{2*n} + // for (r, s) in rxy_prime[(2 * n + 1 + NUM_BLINDINGS)..].iter_mut().zip(s_poly_positive) { + // r.add_assign(&s); + // } + } + + // by this point all R related polynomials are blinded and evaluated for Y variable + + // t(X, y) = r'(X, y)*r(X, 1) and will be later evaluated at z + // contained degree in respect to X are from -4*n to 3*n including X^0 + let mut txy = multiply_polynomials::(rx1.clone(), rxy_prime); + txy[4 * n + 2 * NUM_BLINDINGS] = E::Fr::zero(); // -k(y) + + // commit to t(X, y) to later open at z + let t = polynomial_commitment( + srs.d, + (4 * n) + 2*NUM_BLINDINGS, + 3 * n, + srs, + // skip what would be zero power + txy[0..(4 * n) + 2*NUM_BLINDINGS].iter() + .chain_ext(txy[(4 * n + 2*NUM_BLINDINGS + 1)..].iter()), + ); + + transcript.commit_point(&t); + + let z: E::Fr = transcript.get_challenge_scalar(); + let z_inv = z.inverse().ok_or(SynthesisError::DivisionByZero)?; + + let rz = { + let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]); + + evaluate_at_consequitive_powers(&rx1, tmp, z) + }; + + // rzy is evaluation of r(X, Y) at z, y + let rzy = { + let tmp = z_inv.pow(&[(2*n + NUM_BLINDINGS) as u64]); + + evaluate_at_consequitive_powers(&rxy, tmp, z) + }; + + transcript.commit_scalar(&rz); + transcript.commit_scalar(&rzy); + + let r1: E::Fr = transcript.get_challenge_scalar(); + + let zy_opening = { + // r(X, 1) - r(z, y) + // subtract constant term from R(X, 1) + rx1[(2 * n + NUM_BLINDINGS)].sub_assign(&rzy); + + let mut point = y; + point.mul_assign(&z); + + polynomial_commitment_opening( + 2 * n + NUM_BLINDINGS, + n, + &rx1, + point, + srs + ) + }; + + assert_eq!(rx1.len(), 3*n + NUM_BLINDINGS + 1); + + // it's an opening of t(X, y) at z + let z_opening = { + rx1[(2 * n + NUM_BLINDINGS)].add_assign(&rzy); // restore + + let rx1_len = rx1.len(); + mul_add_polynomials(&mut txy[(2 * n + NUM_BLINDINGS)..(2 * n + NUM_BLINDINGS + rx1_len)], &rx1[..], r1); + + // // skip powers from until reach -2n - NUM_BLINDINGS + // for (t, &r) in txy[(2 * n + NUM_BLINDINGS)..].iter_mut().zip(rx1.iter()) { + // let mut r = r; + // r.mul_assign(&r1); + // t.add_assign(&r); + // } + + let val = { + let tmp = z_inv.pow(&[(4*n + 2*NUM_BLINDINGS) as u64]); + + evaluate_at_consequitive_powers(&txy, tmp, z) + }; + + txy[(4 * n + 2*NUM_BLINDINGS)].sub_assign(&val); + + polynomial_commitment_opening( + 4*n + 2*NUM_BLINDINGS, + 3*n, + &txy, + z, + srs) + }; + + Ok(Proof { + r, rz, rzy, t, z_opening, zy_opening + }) +} diff --git a/bellman/src/sonic/unhelped/s2_proof.rs b/bellman/src/sonic/unhelped/s2_proof.rs new file mode 100644 index 0000000..6cbaf9b --- /dev/null +++ b/bellman/src/sonic/unhelped/s2_proof.rs @@ -0,0 +1,167 @@ +use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use crate::pairing::{Engine, CurveProjective, CurveAffine}; +use std::marker::PhantomData; + +use crate::sonic::srs::SRS; +use crate::sonic::util::*; + +#[derive(Clone)] +pub struct S2Eval { + n: usize, + _marker: PhantomData +} + +#[derive(Clone)] +pub struct S2Proof { + o: E::G1Affine, + pub c_value: E::Fr, + pub d_value: E::Fr, + pub c_opening: E::G1Affine, + pub d_opening: E::G1Affine +} + +impl S2Eval { + pub fn calculate_commitment_element(n: usize, srs: &SRS) -> E::G1Affine { + // TODO: parallelize + let mut o = E::G1::zero(); + for i in 0..n { + o.add_assign_mixed(&srs.g_positive_x_alpha[i]); + } + + o.into_affine() + } + + pub fn new(n: usize) -> Self { + S2Eval { + n: n, + _marker: PhantomData + } + } + + pub fn evaluate(&self, x: E::Fr, y: E::Fr, srs: &SRS) -> S2Proof { + // create a reference element first + + let o = Self::calculate_commitment_element(self.n, &srs); + + let mut poly = vec![E::Fr::one(); self.n+1]; + + let (c, c_opening) = { + let mut point = y; + point.mul_assign(&x); + let val = evaluate_at_consequitive_powers(&poly[1..], point, point); + poly[0] = val; + poly[0].negate(); + let opening = polynomial_commitment_opening(0, self.n, poly.iter(), point, &srs); + + (val, opening) + }; + + let (d, d_opening) = { + let mut point = y.inverse().unwrap(); + point.mul_assign(&x); + let val = evaluate_at_consequitive_powers(&poly[1..], point, point); + poly[0] = val; + poly[0].negate(); + let opening = polynomial_commitment_opening(0, self.n, poly.iter(), point, &srs); + + (val, opening) + }; + + + S2Proof { + o: o, + c_value: c, + d_value: d, + c_opening: c_opening, + d_opening: d_opening + } + } + + pub fn verify(x: E::Fr, y: E::Fr, proof: &S2Proof, srs: &SRS) -> bool { + + // e(C,hαx)e(C−yz,hα) = e(O,h)e(g−c,hα) + + let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + let alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + let mut minus_xy = x; + minus_xy.mul_assign(&y); + minus_xy.negate(); + + let mut h_alpha_term = proof.c_opening.mul(minus_xy.into_repr()); + let g_in_c = E::G1Affine::one().mul(proof.c_value); + h_alpha_term.add_assign(&g_in_c); + + let h_alpha_term = h_alpha_term.into_affine(); + + let valid = E::final_exponentiation(&E::miller_loop(&[ + (&proof.c_opening.prepare(), &alpha_x_precomp), + (&h_alpha_term.prepare(), &alpha_precomp), + (&proof.o.prepare(), &h_prep), + ])).unwrap() == E::Fqk::one(); + + if !valid { + return false; + } + + // e(D,hαx)e(D−y−1z,hα) = e(O,h)e(g−d,hα) + + let mut minus_x_y_inv = x; + minus_x_y_inv.mul_assign(&y.inverse().unwrap()); + minus_x_y_inv.negate(); + + let mut h_alpha_term = proof.d_opening.mul(minus_x_y_inv.into_repr()); + let g_in_d = E::G1Affine::one().mul(proof.d_value); + h_alpha_term.add_assign(&g_in_d); + + let h_alpha_term = h_alpha_term.into_affine(); + + let valid = E::final_exponentiation(&E::miller_loop(&[ + (&proof.d_opening.prepare(), &alpha_x_precomp), + (&h_alpha_term.prepare(), &alpha_precomp), + (&proof.o.prepare(), &h_prep), + ])).unwrap() == E::Fqk::one(); + + if !valid { + return false; + } + + true + } +} + + +#[test] +fn test_s2_proof() { + use crate::pairing::ff::{Field, PrimeField}; + use crate::pairing::{Engine, CurveAffine, CurveProjective}; + use crate::pairing::bls12_381::{Bls12, Fr}; + use std::time::{Instant}; + use crate::sonic::srs::SRS; + use crate::sonic::cs::{Circuit, ConstraintSystem, LinearCombination}; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + println!("making srs"); + let start = Instant::now(); + let srs = SRS::::dummy(830564, srs_x, srs_alpha); + println!("done in {:?}", start.elapsed()); + + { + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + + let x: Fr = rng.gen(); + let y: Fr = rng.gen(); + + let proof = S2Eval::new(1024); + let proof = proof.evaluate(x, y, &srs); + + let valid = S2Eval::verify(x, y, &proof, &srs); + + assert!(valid); + } +} \ No newline at end of file diff --git a/bellman/src/sonic/unhelped/verifier.rs b/bellman/src/sonic/unhelped/verifier.rs new file mode 100644 index 0000000..7718681 --- /dev/null +++ b/bellman/src/sonic/unhelped/verifier.rs @@ -0,0 +1,761 @@ +use crate::pairing::ff::{Field}; +use crate::pairing::{Engine, CurveProjective}; +use std::marker::PhantomData; +use rand::{Rand, Rng}; + +use crate::sonic::helped::{Proof, SxyAdvice}; +use crate::sonic::helped::batch::Batch; +use crate::sonic::helped::poly::{SxEval, SyEval}; +use crate::sonic::helped::helper::Aggregate; +use crate::sonic::helped::parameters::{Parameters}; + +use crate::SynthesisError; + +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; +use crate::sonic::util::*; +use crate::sonic::cs::{Backend, SynthesisDriver}; +use crate::sonic::cs::{Circuit, Variable, Coeff}; +use crate::sonic::srs::SRS; +use crate::sonic::sonic::Preprocess; + +use super::s2_proof::{S2Proof, S2Eval}; +use super::aggregate::SuccinctAggregate; +use super::permutation_structure::create_permutation_structure; +use super::permutation_argument::{ + PermutationArgumentProof, + PermutationProof, + PermutationArgument, + SpecializedSRS +}; + +pub struct SuccinctMultiVerifier, S: SynthesisDriver, R: Rng> { + circuit: C, + s1_special_reference: SpecializedSRS, + s2_special_reference: E::G1Affine, + pub(crate) batch: Batch, + k_map: Vec, + n: usize, + q: usize, + randomness_source: R, + _marker: PhantomData<(E, S)> +} + +impl, S: SynthesisDriver, R: Rng> SuccinctMultiVerifier { + // This constructor consumes randomness source cause it's later used internally + pub fn new(circuit: C, srs: &SRS, rng: R) -> Result { + let (n, q, k_map) = { + let mut preprocess = Preprocess::new(); + S::synthesize(&mut preprocess, &circuit)?; + + (preprocess.n, preprocess.q, preprocess.k_map) + }; + + // also calculate special reference for s1 + + let permutation_structure = create_permutation_structure(&circuit); + let s2_special_reference = permutation_structure.calculate_s2_commitment_value(&srs); + let s1_special_reference = permutation_structure.create_permutation_special_reference(&srs); + + Ok(SuccinctMultiVerifier { + circuit, + s1_special_reference, + s2_special_reference, + batch: Batch::new(srs, n), + k_map: k_map, + n: n, + q: q, + randomness_source: rng, + _marker: PhantomData + }) + } + + pub fn add_aggregate( + &mut self, + proofs: &[(Proof, SxyAdvice)], + aggregate: &SuccinctAggregate, + srs: &SRS + ) + { + let mut transcript = Transcript::new(&[]); + let mut y_values: Vec = Vec::with_capacity(proofs.len()); + for &(ref proof, ref sxyadvice) in proofs { + { + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&proof.r); + y_values.push(transcript.get_challenge_scalar()); + } + + transcript.commit_point(&sxyadvice.s); + } + + let z: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_point(&aggregate.c); + + let w: E::Fr = transcript.get_challenge_scalar(); + + let szw = { + // prover will supply s1 and s2, need to calculate + // s(z, w) = X^-(N+1) * Y^N * s1 - X^N * s2 + + let x_n = z.pow(&[self.n as u64]); + let mut x_n_plus_1 = x_n; + x_n_plus_1.mul_assign(&z); + let x_n_plus_1_inv = x_n_plus_1.inverse().unwrap(); + + let y_n = w.pow(&[self.n as u64]); + + // simultaneously add components to the batch verifier + + // this is s2 contribution itself + let s2_proof = &aggregate.s2_proof; + let mut s2_part = s2_proof.c_value; + s2_part.add_assign(&s2_proof.d_value); + s2_part.mul_assign(&x_n); + + // add terms for S2 for verification + + { + let random: E::Fr = self.randomness_source.gen(); + + // e(C,hαx)e(C−yz,hα) = e(O,h)e(g−c,hα) that is + // e(C,hαx)e(C^−yz,hα)*e(O,-h)e(g^c,hα) = 1 + + let mut xy = z; + xy.mul_assign(&w); + + self.batch.add_opening(s2_proof.c_opening, random, xy); + self.batch.add_opening_value(random, s2_proof.c_value); + self.batch.add_commitment(self.s2_special_reference, random); + + } + + { + let random: E::Fr = self.randomness_source.gen(); + + // e(D,hαx)e(D−y−1z,hα) = e(O,h)e(g−d,hα) that is + // e(D,hαx)e(D^−y-1z,hα)*e(O,-h)e(g^d,hα) = 1 + + let mut y_inv_by_x = z; + y_inv_by_x.mul_assign(&w.inverse().unwrap()); + + self.batch.add_opening(s2_proof.d_opening, random, y_inv_by_x); + self.batch.add_opening_value(random, s2_proof.d_value); + self.batch.add_commitment(self.s2_special_reference, random); + + } + + // now work with s1 part + + let mut s1_part = aggregate.signature.perm_argument_proof.s_zy; + s1_part.mul_assign(&x_n_plus_1_inv); + s1_part.mul_assign(&y_n); + + let mut szw = s1_part; + szw.sub_assign(&s2_part); + + // verify commitments for s' and s + + { + let mut transcript = Transcript::new(&[]); + + // let s_commitments = &aggregate.signature.s_commitments; + // let s_prime_commitments = &aggregate.signature.s_prime_commitments; + + let mut challenges = vec![]; + for (s, s_prime) in aggregate.signature.s_commitments.iter() + .zip(aggregate.signature.s_prime_commitments.iter()) { + transcript.commit_point(s); + transcript.commit_point(s_prime); + } + + for _ in 0..aggregate.signature.s_commitments.len() { + let challenge = transcript.get_challenge_scalar(); + challenges.push(challenge); + } + + let z_prime: E::Fr = transcript.get_challenge_scalar(); + + // we expect M permutation proofs, add them all into verification + // using batching with random challenges and extra randomness for pairing equation + { + // e(E,hαx)e(E−z′,hα) = e(􏰇Mj=1Sj′rj,h)e(g−v,hα) + let perm_proof = &aggregate.signature.perm_proof; + + let s_r = multiexp( + aggregate.signature.s_prime_commitments.iter(), + challenges.iter() + ).into_affine(); + + let p2_r = multiexp( + self.s1_special_reference.p_2.iter(), + challenges.iter() + ).into_affine(); + + + let value = perm_proof.v_zy; + + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(perm_proof.e_opening, random, z_prime); + self.batch.add_opening_value(random, value); + self.batch.add_commitment(s_r, random); + + + // e(F,hαx)e(F−yz′,hα) = e(􏰇Mj=1P2jrj,h)e(g−v,hα) + + let mut y_z_prime = z_prime; + y_z_prime.mul_assign(&w); + + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(perm_proof.f_opening, random, y_z_prime); + self.batch.add_opening_value(random, value); + self.batch.add_commitment(p2_r, random); + + } + + // now we can actually take an opening of S commitments and + + { + // e(I,hαx)e(I−z,hα) = e(􏰇Mj=1 Sj,h)e(g−s,hα) + + let value = aggregate.signature.perm_argument_proof.s_zy; + let mut s_commitment = E::G1::zero(); + + for s in aggregate.signature.s_commitments.iter() { + s_commitment.add_assign_mixed(s); + } + + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(aggregate.signature.perm_argument_proof.s_opening, random, z); + self.batch.add_opening_value(random, value); + self.batch.add_commitment(s_commitment.into_affine(), random); + + } + + // TODO: Add grand product argument! + + // for each of the grand product arguments create a corresponding commitment + // from already known elements + + let mut betas = vec![]; + let mut gammas = vec![]; + + let mut a_commitments = vec![]; + let mut b_commitments = vec![]; + + for _ in 0..aggregate.signature.s_commitments.len() { + let beta: E::Fr = transcript.get_challenge_scalar(); + let gamma: E::Fr = transcript.get_challenge_scalar(); + + betas.push(beta); + gammas.push(gamma); + } + + let mut wellformedness_argument_commitments = vec![]; + + use crate::pairing::CurveAffine; + use crate::pairing::ff::PrimeField; + + for (j, (((s, s_prime), beta), gamma)) in aggregate.signature.s_commitments.iter() + .zip(aggregate.signature.s_prime_commitments.iter()) + .zip(betas.iter()) + .zip(gammas.iter()) + .enumerate() + + { + // Sj(P4j)β(P1j)γ + + let mut a = s.into_projective(); + a.add_assign(&self.s1_special_reference.p_4[j].mul(beta.into_repr())); + a.add_assign(&self.s1_special_reference.p_1.mul(gamma.into_repr())); + let a = a.into_affine(); + + // Sj′(P3j)β(P1j)γ + + let mut b = s_prime.into_projective(); + b.add_assign(&self.s1_special_reference.p_3.mul(beta.into_repr())); + b.add_assign(&self.s1_special_reference.p_1.mul(gamma.into_repr())); + let b = b.into_affine(); + + a_commitments.push(a); + b_commitments.push(b); + wellformedness_argument_commitments.push(a); + wellformedness_argument_commitments.push(b); + } + + // commitments to invidvidual grand products are assembled, now check first part of a grand + // product argument + + // Now perform an actual check + { + let randomness: Vec = (0..aggregate.signature.s_commitments.len()).map(|_| self.randomness_source.gen()).collect(); + // e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα) + + let g = srs.g_positive_x[0]; + let h_alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + let h_alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + + let mut h_x_n_plus_one_precomp = srs.h_positive_x[self.n+1]; + h_x_n_plus_one_precomp.negate(); + let h_x_n_plus_one_precomp = h_x_n_plus_one_precomp.prepare(); + + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + let a = multiexp( + a_commitments.iter(), + randomness.iter(), + ).into_affine(); + + let a = a.prepare(); + + let b = multiexp( + b_commitments.iter(), + randomness.iter(), + ).into_affine(); + + let b = b.prepare(); + + let mut yz_neg = w; + yz_neg.mul_assign(&z); + yz_neg.negate(); + + let mut ops = vec![]; + let mut value = E::Fr::zero(); + + for (el, r) in aggregate.signature.grand_product_signature.grand_product_openings.iter().zip(randomness.iter()) { + let (v, o) = el; + ops.push(o.clone()); + let mut val = *v; + val.mul_assign(&r); + value.add_assign(&val); + } + + let value = g.mul(value.into_repr()).into_affine().prepare(); + + let openings = multiexp( + ops.iter(), + randomness.iter(), + ).into_affine(); + + let openings_zy = openings.mul(yz_neg.into_repr()).into_affine().prepare(); + let openings = openings.prepare(); + + // e(Dj,hαx)e(D−yz,hα) = e(Aj,h)e(Bj,hxn+1)e(g−aj ,hα) + + let valid = E::final_exponentiation(&E::miller_loop(&[ + (&openings, &h_alpha_x_precomp), + (&openings_zy, &h_alpha_precomp), + (&a, &h_prep), + (&b, &h_x_n_plus_one_precomp), + (&value, &h_alpha_precomp) + ])).unwrap() == E::Fqk::one(); + + // TODO + assert!(valid, "grand product arguments must be valid for individual commitments"); + + } + + // Now the second part of the grand product argument + + { + let mut grand_product_challenges = vec![]; + + for _ in 0..aggregate.signature.grand_product_signature.c_commitments.len() { + let c: E::Fr = transcript.get_challenge_scalar(); + grand_product_challenges.push(c); + } + // first re-calculate cj and t(z,y) + + let mut yz = w; + yz.mul_assign(&z); + + let z_inv = z.inverse().unwrap(); + + let mut t_zy = E::Fr::zero(); + + let mut commitments_points = vec![]; + let mut rc_vec = vec![]; + let mut ry_vec = vec![]; + + // in grand product arguments n is not a number of gates, but 3n+1 - number of variables + 1 + let three_n_plus_1 = 3*self.n + 1; + + for ((r, commitment), (a, _)) in grand_product_challenges.iter() + .zip(aggregate.signature.grand_product_signature.c_commitments.iter()) + .zip(aggregate.signature.grand_product_signature.grand_product_openings.iter()) + { + let (c, v) = commitment; + commitments_points.push(*c); + + // cj = ((aj + vj(yz)n+1)y + zn+2 + zn+1y − z2n+2y)z−1 + let mut c_zy = yz.pow([(three_n_plus_1 + 1) as u64]); + c_zy.mul_assign(v); + c_zy.add_assign(a); + c_zy.mul_assign(&w); + + let mut z_n_plus_1 = z.pow([(three_n_plus_1 + 1) as u64]); + + let mut z_n_plus_2 = z_n_plus_1; + z_n_plus_2.mul_assign(&z); + + let mut z_2n_plus_2 = z_n_plus_1; + z_2n_plus_2.square(); + z_2n_plus_2.mul_assign(&w); + + z_n_plus_1.mul_assign(&w); + + c_zy.add_assign(&z_n_plus_1); + c_zy.add_assign(&z_n_plus_2); + c_zy.sub_assign(&z_2n_plus_2); + + c_zy.mul_assign(&z_inv); + + let mut rc = c_zy; + rc.mul_assign(&r); + rc_vec.push(rc); + + let mut ry = w; + ry.mul_assign(&r); + ry_vec.push(ry); + + let mut val = rc; + val.sub_assign(&r); + t_zy.add_assign(&val); + } + + t_zy.add_assign(&aggregate.signature.grand_product_signature.proof.e_zinv); + t_zy.sub_assign(&aggregate.signature.grand_product_signature.proof.f_y); + + // t(z, y) is now calculated + + let c_rc = multiexp( + commitments_points.iter(), + rc_vec.iter(), + ).into_affine(); + + let c_ry = multiexp( + commitments_points.iter(), + ry_vec.iter(), + ).into_affine(); + + // e(E,h^alphax)e(E^-z^-1,h^alpha) = e(\sumCj^(rj*cj),h)e(g^-e,h^alpha) + + { + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(aggregate.signature.grand_product_signature.proof.e_opening, random, z_inv); + self.batch.add_opening_value(random, aggregate.signature.grand_product_signature.proof.e_zinv); + self.batch.add_commitment(c_rc, random); + } + + // e(F,h^alphax)e(F^-y,h) = e(\sumCj^(rj&y),h)e(g^-f,h^alpha) + + { + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(aggregate.signature.grand_product_signature.proof.f_opening, random, w); + self.batch.add_opening_value(random, aggregate.signature.grand_product_signature.proof.f_y); + self.batch.add_commitment(c_ry, random); + } + + // e(T′,hαx)e(T′−z,hα) = e(T,h)e(g−t(z,y),hα) + + { + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(aggregate.signature.grand_product_signature.proof.t_opening, random, z); + self.batch.add_opening_value(random, t_zy); + self.batch.add_commitment(aggregate.signature.grand_product_signature.t_commitment, random); + } + } + + // finally check the wellformedness arguments + + { + let mut wellformedness_challenges = vec![]; + + for _ in 0..wellformedness_argument_commitments.len() { + let c: E::Fr = transcript.get_challenge_scalar(); + wellformedness_challenges.push(c); + } + + let d = srs.d; + let n = 3*self.n + 1; // same as for grand products + + let alpha_x_d_precomp = srs.h_positive_x_alpha[d].prepare(); + // TODO: not strictly required + assert!(n < d); + let d_minus_n = d - n; + let alpha_x_n_minus_d_precomp = srs.h_negative_x_alpha[d_minus_n].prepare(); + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + let a = multiexp( + wellformedness_argument_commitments.iter(), + wellformedness_challenges.iter(), + ).into_affine(); + + let r1: E::Fr = self.randomness_source.gen(); + let r2: E::Fr = self.randomness_source.gen(); + + let mut r = r1; + r.add_assign(&r2); + let l_r1 = aggregate.signature.grand_product_signature.wellformedness_signature.proof.l.mul(r1.into_repr()).into_affine(); + let r_r2 = aggregate.signature.grand_product_signature.wellformedness_signature.proof.r.mul(r2.into_repr()).into_affine(); + + let a_r = a.mul(r.into_repr()).into_affine(); + + let valid = E::final_exponentiation(&E::miller_loop(&[ + (&a_r.prepare(), &h_prep), + (&l_r1.prepare(), &alpha_x_d_precomp), + (&r_r2.prepare(), &alpha_x_n_minus_d_precomp) + ])).unwrap() == E::Fqk::one(); + + assert!(valid, "wellformedness argument must be valid"); + } + + } + + szw + }; + + { + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(aggregate.opening, random, w); + self.batch.add_commitment(aggregate.c, random); + self.batch.add_opening_value(szw, random); + } + + for ((opening, value), &y) in aggregate.c_openings.iter().zip(y_values.iter()) { + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(*opening, random, y); + self.batch.add_commitment(aggregate.c, random); + self.batch.add_opening_value(*value, random); + } + + let random: E::Fr = self.randomness_source.gen(); + + let mut expected_value = E::Fr::zero(); + for ((_, advice), c_opening) in proofs.iter().zip(aggregate.c_openings.iter()) { + let mut r: E::Fr = transcript.get_challenge_scalar(); + + // expected value of the later opening + { + let mut tmp = c_opening.1; + tmp.mul_assign(&r); + expected_value.add_assign(&tmp); + } + + r.mul_assign(&random); + + self.batch.add_commitment(advice.s, r); + } + + self.batch.add_opening_value(expected_value, random); + self.batch.add_opening(aggregate.s_opening, random, z); + } + + /// Caller must ensure to add aggregate after adding a proof + pub fn add_proof_with_advice( + &mut self, + proof: &Proof, + inputs: &[E::Fr], + advice: &SxyAdvice, + ) + { + let mut z = None; + + self.add_proof(proof, inputs, |_z, _y| { + z = Some(_z); + Some(advice.szy) + }); + + let z = z.unwrap(); + + // We need to open up SxyAdvice.s at z using SxyAdvice.opening + let mut transcript = Transcript::new(&[]); + transcript.commit_point(&advice.opening); + transcript.commit_point(&advice.s); + transcript.commit_scalar(&advice.szy); + let random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(advice.opening, random, z); + self.batch.add_commitment(advice.s, random); + self.batch.add_opening_value(advice.szy, random); + } + + pub fn add_proof( + &mut self, + proof: &Proof, + inputs: &[E::Fr], + sxy: F + ) + where F: FnOnce(E::Fr, E::Fr) -> Option + { + let mut transcript = Transcript::new(&[]); + + transcript.commit_point(&proof.r); + + let y: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_point(&proof.t); + + let z: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_scalar(&proof.rz); + transcript.commit_scalar(&proof.rzy); + + let r1: E::Fr = transcript.get_challenge_scalar(); + + transcript.commit_point(&proof.z_opening); + transcript.commit_point(&proof.zy_opening); + + // First, the easy one. Let's open up proof.r at zy, using proof.zy_opening + // as the evidence and proof.rzy as the opening. + { + let random: E::Fr = self.randomness_source.gen(); + let mut zy = z; + zy.mul_assign(&y); + self.batch.add_opening(proof.zy_opening, random, zy); + self.batch.add_commitment_max_n(proof.r, random); + self.batch.add_opening_value(proof.rzy, random); + } + + // Now we need to compute t(z, y) with what we have. Let's compute k(y). + let mut ky = E::Fr::zero(); + for (exp, input) in self.k_map.iter().zip(Some(E::Fr::one()).iter().chain(inputs.iter())) { + let mut term = y.pow(&[(*exp + self.n) as u64]); + term.mul_assign(input); + ky.add_assign(&term); + } + + // Compute s(z, y) + let szy = sxy(z, y).unwrap_or_else(|| { + let mut tmp = SxEval::new(y, self.n); + S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO + + tmp.finalize(z) + + // let mut tmp = SyEval::new(z, self.n, self.q); + // S::synthesize(&mut tmp, &self.circuit).unwrap(); // TODO + + // tmp.finalize(y) + }); + + // Finally, compute t(z, y) + // t(z, y) = (r(z, y) + s(z,y))*r(z, 1) - k(y) + let mut tzy = proof.rzy; + tzy.add_assign(&szy); + tzy.mul_assign(&proof.rz); + tzy.sub_assign(&ky); + + // We open these both at the same time by keeping their commitments + // linearly independent (using r1). + { + let mut random: E::Fr = self.randomness_source.gen(); + + self.batch.add_opening(proof.z_opening, random, z); + self.batch.add_opening_value(tzy, random); + self.batch.add_commitment(proof.t, random); + + random.mul_assign(&r1); + + self.batch.add_opening_value(proof.rz, random); + self.batch.add_commitment_max_n(proof.r, random); + } + } + + pub fn get_k_map(&self) -> Vec { + return self.k_map.clone(); + } + + pub fn get_n(&self) -> usize { + return self.n; + } + + pub fn get_q(&self) -> usize { + return self.q; + } + + pub fn check_all(self) -> bool { + self.batch.check_all() + } +} + +// /// Check multiple proofs without aggregation. Verifier's work is +// /// not succint due to `S(X, Y)` evaluation +// pub fn verify_proofs, S: SynthesisDriver, R: Rng>( +// proofs: &[Proof], +// inputs: &[Vec], +// circuit: C, +// rng: R, +// params: &Parameters, +// ) -> Result { +// verify_proofs_on_srs::(proofs, inputs, circuit, rng, ¶ms.srs) +// } + +// /// Check multiple proofs without aggregation. Verifier's work is +// /// not succint due to `S(X, Y)` evaluation +// pub fn verify_proofs_on_srs, S: SynthesisDriver, R: Rng>( +// proofs: &[Proof], +// inputs: &[Vec], +// circuit: C, +// rng: R, +// srs: &SRS, +// ) -> Result { +// let mut verifier = MultiVerifier::::new(circuit, srs, rng)?; +// let expected_inputs_size = verifier.get_k_map().len() - 1; +// for (proof, inputs) in proofs.iter().zip(inputs.iter()) { +// if inputs.len() != expected_inputs_size { +// return Err(SynthesisError::Unsatisfiable); +// } +// verifier.add_proof(proof, &inputs, |_, _| None); +// } + +// Ok(verifier.check_all()) +// } + +// /// Check multiple proofs with aggregation. Verifier's work is +// /// not succint due to `S(X, Y)` evaluation +// pub fn verify_aggregate, S: SynthesisDriver,R: Rng>( +// proofs: &[(Proof, SxyAdvice)], +// aggregate: &Aggregate, +// inputs: &[Vec], +// circuit: C, +// rng: R, +// params: &Parameters, +// ) -> Result { +// verify_aggregate_on_srs::(proofs, aggregate, inputs, circuit, rng, ¶ms.srs) +// } + +// /// Check multiple proofs with aggregation. Verifier's work is +// /// not succint due to `S(X, Y)` evaluation +// pub fn verify_aggregate_on_srs, S: SynthesisDriver, R: Rng>( +// proofs: &[(Proof, SxyAdvice)], +// aggregate: &Aggregate, +// inputs: &[Vec], +// circuit: C, +// rng: R, +// srs: &SRS, +// ) -> Result { +// let mut verifier = MultiVerifier::::new(circuit, srs, rng)?; +// let expected_inputs_size = verifier.get_k_map().len() - 1; +// for ((proof, advice), inputs) in proofs.iter().zip(inputs.iter()) { +// if inputs.len() != expected_inputs_size { +// return Err(SynthesisError::Unsatisfiable); +// } +// verifier.add_proof_with_advice(proof, &inputs, &advice); +// } +// verifier.add_aggregate(proofs, aggregate); + +// Ok(verifier.check_all()) +// } + diff --git a/bellman/src/sonic/unhelped/wellformed_argument.rs b/bellman/src/sonic/unhelped/wellformed_argument.rs new file mode 100644 index 0000000..c91c867 --- /dev/null +++ b/bellman/src/sonic/unhelped/wellformed_argument.rs @@ -0,0 +1,213 @@ +/// Wellformedness argument allows to verify that some committment was to multivariate polynomial of degree n, +/// with no constant term and negative powers + +use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr}; +use crate::pairing::{Engine, CurveProjective, CurveAffine}; +use std::marker::PhantomData; + +use crate::sonic::srs::SRS; +use crate::sonic::util::*; +use crate::sonic::transcript::{Transcript, TranscriptProtocol}; + +#[derive(Clone)] +pub struct WellformednessArgument { + polynomials: Vec> +} + +#[derive(Clone)] +pub struct WellformednessProof { + pub l: E::G1Affine, + pub r: E::G1Affine +} + +#[derive(Clone)] +pub struct WellformednessSignature { + pub proof: WellformednessProof +} + +impl WellformednessArgument { + + pub fn create_signature( + all_polys: Vec>, + wellformed_challenges: Vec, + srs: &SRS + ) -> WellformednessSignature { + let wellformed_argument = WellformednessArgument::new(all_polys); + + let proof = wellformed_argument.make_argument(wellformed_challenges, &srs); + + WellformednessSignature { + proof + } + } + + pub fn new(polynomials: Vec>) -> Self { + assert!(polynomials.len() > 0); + + let length = polynomials[0].len(); + for p in polynomials.iter() { + assert!(p.len() == length); + } + + WellformednessArgument { + polynomials: polynomials + } + } + + pub fn commit(&self, srs: &SRS) -> Vec { + + let mut results = vec![]; + + let n = self.polynomials[0].len(); + + for p in self.polynomials.iter() { + let c = multiexp( + srs.g_positive_x_alpha[0..n].iter(), + p.iter() + ).into_affine(); + + results.push(c); + } + + results + } + + pub fn make_argument(self, challenges: Vec, srs: &SRS) -> WellformednessProof { + assert_eq!(challenges.len(), self.polynomials.len()); + let mut polynomials = self.polynomials; + let mut challenges = challenges; + + let mut p0 = polynomials.pop().unwrap(); + let r0 = challenges.pop().unwrap(); + let n = p0.len(); + mul_polynomial_by_scalar(&mut p0[..], r0); + + let m = polynomials.len(); + + for _ in 0..m { + let p = polynomials.pop().unwrap(); + let r = challenges.pop().unwrap(); + mul_add_polynomials(&mut p0[..], & p[..], r); + } + + let d = srs.d; + + // TODO: it's not necessary to have n < d, fix later + + assert!(n < d); + + // here the multiplier is x^-d, so largest negative power is -(d - 1), smallest negative power is - (d - n) + // H^{x^k} are labeled from 0 power, so we need to use proper indexes + let l = multiexp( + srs.g_negative_x[(d - n)..=(d - 1)].iter().rev(), + p0.iter() + ).into_affine(); + + // here the multiplier is x^d-n, so largest positive power is d, smallest positive power is d - n + 1 + + let r = multiexp( + srs.g_positive_x[(d - n + 1)..=d].iter(), + p0.iter() + ).into_affine(); + + WellformednessProof { + l: l, + r: r + } + } + + pub fn verify(n: usize, challenges: &Vec, commitments: &Vec, proof: &WellformednessProof, srs: &SRS) -> bool { + let d = srs.d; + + let alpha_x_d_precomp = srs.h_positive_x_alpha[d].prepare(); + // TODO: not strictly required + assert!(n < d); + let d_minus_n = d - n; + let alpha_x_n_minus_d_precomp = srs.h_negative_x_alpha[d_minus_n].prepare(); + let mut h_prep = srs.h_positive_x[0]; + h_prep.negate(); + let h_prep = h_prep.prepare(); + + let a = multiexp( + commitments.iter(), + challenges.iter(), + ).into_affine(); + + let a = a.prepare(); + + let valid = E::final_exponentiation(&E::miller_loop(&[ + (&a, &h_prep), + (&proof.l.prepare(), &alpha_x_d_precomp) + ])).unwrap() == E::Fqk::one(); + + if !valid { + return false; + } + + let valid = E::final_exponentiation(&E::miller_loop(&[ + (&a, &h_prep), + (&proof.r.prepare(), &alpha_x_n_minus_d_precomp) + ])).unwrap() == E::Fqk::one(); + + if !valid { + return false; + } + + true + } +} + +#[test] +fn test_argument() { + use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12}; + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + // let srs = SRS::::dummy(830564, srs_x, srs_alpha); + let srs = SRS::::new(128, srs_x, srs_alpha); + + let n: usize = 1 << 5; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::>(); + + let argument = WellformednessArgument::new(vec![coeffs]); + let challenges = (0..1).map(|_| Fr::rand(rng)).collect::>(); + + let commitments = argument.commit(&srs); + + let proof = argument.make_argument(challenges.clone(), &srs); + + let valid = WellformednessArgument::verify(n, &challenges, &commitments, &proof, &srs); + + assert!(valid); +} + +#[test] +fn test_argument_soundness() { + use crate::pairing::bls12_381::{Fr, G1Affine, G1, Bls12}; + use rand::{XorShiftRng, SeedableRng, Rand, Rng}; + use crate::sonic::srs::SRS; + + let srs_x = Fr::from_str("23923").unwrap(); + let srs_alpha = Fr::from_str("23728792").unwrap(); + let srs = SRS::::dummy(830564, srs_x, srs_alpha); + + let n: usize = 1 << 8; + let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]); + let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::>(); + + let argument = WellformednessArgument::new(vec![coeffs]); + let commitments = argument.commit(&srs); + + let coeffs = (1..=n).map(|_| Fr::rand(rng)).collect::>(); + let argument = WellformednessArgument::new(vec![coeffs]); + let challenges = (0..1).map(|_| Fr::rand(rng)).collect::>(); + + let proof = argument.make_argument(challenges.clone(), &srs); + + let valid = WellformednessArgument::verify(n, &challenges, &commitments, &proof, &srs); + + assert!(!valid); +} \ No newline at end of file diff --git a/bellman/src/sonic/util.rs b/bellman/src/sonic/util.rs new file mode 100644 index 0000000..a91cfa8 --- /dev/null +++ b/bellman/src/sonic/util.rs @@ -0,0 +1,1161 @@ +use crate::SynthesisError; +use crate::pairing::ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine}; +use crate::pairing::{CurveAffine, CurveProjective, Engine}; +use super::srs::SRS; + +pub trait ChainExt: Iterator { + fn chain_ext(self, other: U) -> Chain + where + Self: Sized, + U: IntoIterator, + { + Chain { + t: self, + u: other.into_iter(), + } + } +} + +impl ChainExt for I {} + +#[derive(Clone)] +pub struct Chain { + t: T, + u: U, +} + +impl Iterator for Chain +where + T: Iterator, + U: Iterator, +{ + type Item = T::Item; + + fn next(&mut self) -> Option { + match self.t.next() { + Some(v) => Some(v), + None => match self.u.next() { + Some(v) => Some(v), + None => None, + }, + } + } +} + +impl ExactSizeIterator for Chain +where + T: Iterator, + U: Iterator, + T: ExactSizeIterator, + U: ExactSizeIterator, +{ + fn len(&self) -> usize { + self.t.len() + self.u.len() + } +} + +impl DoubleEndedIterator for Chain +where + T: Iterator, + U: Iterator, + T: DoubleEndedIterator, + U: DoubleEndedIterator, +{ + fn next_back(&mut self) -> Option { + match self.u.next_back() { + Some(v) => Some(v), + None => match self.t.next_back() { + Some(v) => Some(v), + None => None, + }, + } + } +} + +pub fn polynomial_commitment< + 'a, + E: Engine, + IS: IntoIterator, + >( + max: usize, + largest_negative_power: usize, + largest_positive_power: usize, + srs: &'a SRS, + s: IS, + ) -> E::G1Affine + where + IS::IntoIter: ExactSizeIterator, + { + // smallest power is d - max - largest_negative_power; It should either be 0 for use of positive powers only, + // of we should use part of the negative powers + let d = srs.d; + assert!(max >= largest_positive_power); + // use both positive and negative powers for commitment + if d < max + largest_negative_power + 1 { + let min_power = largest_negative_power + max - d; + let max_power = d + largest_positive_power - max; + // need to use negative powers to make a proper commitment + return multiexp( + srs.g_negative_x_alpha[0..min_power].iter().rev() + .chain_ext(srs.g_positive_x_alpha[..max_power].iter()), + s + ).into_affine(); + } else { + return multiexp( + srs.g_positive_x_alpha[(srs.d - max - largest_negative_power - 1)..].iter(), + s + ).into_affine(); + } + } + + +/// For now this function MUST take a polynomial in a form f(x) - f(z) +pub fn polynomial_commitment_opening< + 'a, + E: Engine, + I: IntoIterator + >( + largest_negative_power: usize, + _largest_positive_power: usize, + polynomial_coefficients: I, + point: E::Fr, + srs: &'a SRS, + ) -> E::G1Affine + where I::IntoIter: DoubleEndedIterator + ExactSizeIterator, + { + // let poly = parallel_kate_divison::(polynomial_coefficients, point); + + // use std::time::Instant; + // let start = Instant::now(); + + let poly = kate_divison( + polynomial_coefficients, + point, + ); + + // println!("Kate division of size {} taken {:?}", poly.len(), start.elapsed()); + + let negative_poly = poly[0..largest_negative_power].iter().rev(); + let positive_poly = poly[largest_negative_power..].iter(); + multiexp( + srs.g_negative_x[1..(negative_poly.len() + 1)].iter().chain_ext( + srs.g_positive_x[0..positive_poly.len()].iter() + ), + negative_poly.chain_ext(positive_poly) + ).into_affine() + } + +extern crate crossbeam; +use self::crossbeam::channel::{unbounded}; + +pub fn evaluate_at_consequitive_powers<'a, F: Field> ( + coeffs: &[F], + first_power: F, + base: F +) -> F + { + use crate::multicore::Worker; + + let (s, r) = unbounded(); + + let worker = Worker::new(); + + worker.scope(coeffs.len(), |scope, chunk| { + for (i, coeffs) in coeffs.chunks(chunk).enumerate() + { + let s = s.clone(); + scope.spawn(move |_| { + let mut current_power = base.pow(&[(i*chunk) as u64]); + current_power.mul_assign(&first_power); + + let mut acc = F::zero(); + + for p in coeffs { + let mut tmp = *p; + tmp.mul_assign(¤t_power); + acc.add_assign(&tmp); + + current_power.mul_assign(&base); + } + + s.send(acc).expect("must send"); + }); + } + }); + + drop(s); + + // all threads in a scope have done working, so we can safely read + let mut result = F::zero(); + + loop { + if r.is_empty() { + break; + } + let value = r.recv().expect("must not be empty"); + result.add_assign(&value); + } + + result +} + +pub fn mut_evaluate_at_consequitive_powers<'a, F: Field> ( + coeffs: &mut [F], + first_power: F, + base: F +) -> F + { + use crate::multicore::Worker; + + let (s, r) = unbounded(); + + let worker = Worker::new(); + + worker.scope(coeffs.len(), |scope, chunk| { + for (i, coeffs) in coeffs.chunks_mut(chunk).enumerate() + { + let s = s.clone(); + scope.spawn(move |_| { + let mut current_power = base.pow(&[(i*chunk) as u64]); + current_power.mul_assign(&first_power); + + let mut acc = F::zero(); + + for p in coeffs { + p.mul_assign(¤t_power); + acc.add_assign(&p); + + current_power.mul_assign(&base); + } + + s.send(acc).expect("must send"); + }); + } + }); + + drop(s); + + // all threads in a scope have done working, so we can safely read + let mut result = F::zero(); + + loop { + if r.is_empty() { + break; + } + let value = r.recv().expect("must not be empty"); + result.add_assign(&value); + } + + result +} + +/// Multiply each coefficient by some power of the base in a form +/// `first_power * base^{i}` +pub fn mut_distribute_consequitive_powers<'a, F: Field> ( + coeffs: &mut [F], + first_power: F, + base: F +) + { + use crate::multicore::Worker; + + let worker = Worker::new(); + + worker.scope(coeffs.len(), |scope, chunk| { + for (i, coeffs_chunk) in coeffs.chunks_mut(chunk).enumerate() + { + scope.spawn(move |_| { + let mut current_power = base.pow(&[(i*chunk) as u64]); + current_power.mul_assign(&first_power); + + for p in coeffs_chunk { + p.mul_assign(¤t_power); + + current_power.mul_assign(&base); + } + }); + } + }); +} + +// pub fn multiexp< +// 'a, +// G: CurveAffine, +// IB: IntoIterator, +// IS: IntoIterator, +// >( +// g: IB, +// s: IS, +// ) -> G::Projective +// where +// IB::IntoIter: ExactSizeIterator + Clone, +// IS::IntoIter: ExactSizeIterator, +// { +// use crate::multicore::Worker; +// use crate::multiexp::dense_multiexp; + +// use std::time::Instant; +// let start = Instant::now(); + +// let s: Vec<::Repr> = s.into_iter().map(|e| e.into_repr()).collect::>(); +// let g: Vec = g.into_iter().map(|e| *e).collect::>(); + +// println!("Multiexp collecting taken {:?}", start.elapsed()); + +// assert_eq!(s.len(), g.len(), "scalars and exponents must have the same length"); + +// let start = Instant::now(); +// let pool = Worker::new(); +// println!("Multiexp pool creation taken {:?}", start.elapsed()); + +// let start = Instant::now(); + +// let result = dense_multiexp( +// &pool, +// &g, +// &s +// ).unwrap(); + +// println!("Multiexp taken {:?}", start.elapsed()); + +// result +// } + +pub fn multiexp< + 'a, + G: CurveAffine, + IB: IntoIterator, + IS: IntoIterator, +>( + g: IB, + s: IS, +) -> G::Projective +where + IB::IntoIter: ExactSizeIterator + Clone, + IS::IntoIter: ExactSizeIterator, +{ + use crate::multicore::Worker; + use crate::multiexp::multiexp; + use crate::source::FullDensity; + use futures::Future; + use std::sync::Arc; + + let s: Vec<::Repr> = s.into_iter().map(|e| e.into_repr()).collect::>(); + let g: Vec = g.into_iter().map(|e| *e).collect::>(); + + assert_eq!(s.len(), g.len(), "scalars and exponents must have the same length"); + + let pool = Worker::new(); + + // use std::time::Instant; + // let start = Instant::now(); + + let result = multiexp( + &pool, + (Arc::new(g), 0), + FullDensity, + Arc::new(s) + ).wait().unwrap(); + + // println!("Multiexp taken {:?}", start.elapsed()); + + result +} + + + + +pub fn multiexp_serial< + 'a, + G: CurveAffine, + IB: IntoIterator, + IS: IntoIterator, +>( + g: IB, + s: IS, +) -> G::Projective +where + IB::IntoIter: ExactSizeIterator + Clone, + IS::IntoIter: ExactSizeIterator, +{ + let g = g.into_iter(); + let s = s.into_iter(); + assert_eq!(g.len(), s.len()); + + let c = if s.len() < 32 { + 3u32 + } else { + (f64::from(s.len() as u32)).ln().ceil() as u32 + }; + + // Convert all of the scalars into representations + let mut s = s.map(|s| s.into_repr()).collect::>(); + + let mut windows = vec![]; + let mut buckets = vec![]; + + let mask = (1u64 << c) - 1u64; + let mut cur = 0; + let num_bits = ::Fr::NUM_BITS; + while cur <= num_bits { + let mut acc = G::Projective::zero(); + + buckets.truncate(0); + buckets.resize((1 << c) - 1, G::Projective::zero()); + + let g = g.clone(); + + for (s, g) in s.iter_mut().zip(g) { + let index = (s.as_ref()[0] & mask) as usize; + + if index != 0 { + buckets[index - 1].add_assign_mixed(g); + } + + s.shr(c as u32); + } + + let mut running_sum = G::Projective::zero(); + for exp in buckets.iter().rev() { + running_sum.add_assign(exp); + acc.add_assign(&running_sum); + } + + windows.push(acc); + + cur += c; + } + + let mut acc = G::Projective::zero(); + + for window in windows.into_iter().rev() { + for _ in 0..c { + acc.double(); + } + + acc.add_assign(&window); + } + + acc +} + +/// Divides polynomial `a` in `x` by `x - b` with +/// no remainder. +pub fn kate_divison<'a, F: Field, I: IntoIterator>(a: I, mut b: F) -> Vec +where + I::IntoIter: DoubleEndedIterator + ExactSizeIterator, +{ + b.negate(); + let a = a.into_iter(); + + let mut q = vec![F::zero(); a.len() - 1]; + + let mut tmp = F::zero(); + for (q, r) in q.iter_mut().rev().zip(a.rev()) { + let mut lead_coeff = *r; + lead_coeff.sub_assign(&tmp); + *q = lead_coeff; + tmp = lead_coeff; + tmp.mul_assign(&b); + } + + q +} + +/// Divides polynomial `a` in `x` by `x - b` with +/// no remainder using fft. +pub fn parallel_kate_divison<'a, E: Engine, I: IntoIterator>(a: I, b: E::Fr) -> Vec +where + I::IntoIter: DoubleEndedIterator + ExactSizeIterator, +{ + // this implementation is only for division by `x - b` form polynomail, + // so we can manuall calculate the reciproical poly of the form `x^2/(x-b)` + // and the reminder + + // x^2 /(x - b) = x + b*x/(x - b) = (x + b) + b^2/(x - b) + + let reciproical = vec![b, E::Fr::one()]; // x + b + + // and remainder b^2 + let mut b_squared = b; + b_squared.square(); + + let mut b_neg = b; + b_neg.negate(); + + let divisor = vec![b_neg, E::Fr::one()]; + + let poly: Vec = a.into_iter().map(|el| el.clone()).collect(); + + let (q, _) = kate_divison_inner::(poly, divisor, reciproical, b_squared); + + // assert_eq!(r.len(), 0); + + q +} + +fn kate_divison_inner( + poly: Vec, + divisor: Vec, + reciproical: Vec, + remainder: E::Fr + ) -> (Vec, Vec) { + if poly.len() == 1 { + return (vec![], poly); + } + // TODO: Change generic multiplications by multiplications by degree 1 polynomial + let poly_degree = poly.len() - 1; + let mut q = multiply_polynomials::(poly.clone(), reciproical.clone()); + q.drain(0..2); + // recursion step + if poly_degree > 2 { + let mut rec_step = poly.clone(); + mul_polynomial_by_scalar(&mut rec_step[..], remainder); + // truncate low order terms + rec_step.drain(0..2); + let (q2, _) = kate_divison_inner::(rec_step, divisor.clone(), reciproical, remainder); + // length of q2 is smaller + add_polynomials(&mut q[..q2.len()], &q2[..]); + } + + // although r must be zero, calculate it for now + if q.len() == 0 { + return (q, poly); + } + + // r = u - v*q + let mut poly = poly; + let tmp = multiply_polynomials::(divisor, q.clone()); + sub_polynomials(&mut poly[..], &tmp[..]); + + return (q, poly); +} + +/// Convenience function to check polynomail commitment +pub fn check_polynomial_commitment( + commitment: &E::G1Affine, + point: &E::Fr, + value: &E::Fr, + opening: &E::G1Affine, + max: usize, + srs: &SRS +) -> bool { + // e(W , hα x )e(g^{v} * W{-z} , hα ) = e(F , h^{x^{−d +max}} ) + if srs.d < max { + return false; + } + let alpha_x_precomp = srs.h_positive_x_alpha[1].prepare(); + let alpha_precomp = srs.h_positive_x_alpha[0].prepare(); + let mut neg_x_n_minus_d_precomp = srs.h_negative_x[srs.d - max]; + neg_x_n_minus_d_precomp.negate(); + let neg_x_n_minus_d_precomp = neg_x_n_minus_d_precomp.prepare(); + + let w = opening.prepare(); + let mut gv = srs.g_positive_x[0].mul(value.into_repr()); + let mut z_neg = *point; + z_neg.negate(); + let w_minus_z = opening.mul(z_neg.into_repr()); + gv.add_assign(&w_minus_z); + + let gv = gv.into_affine().prepare(); + + E::final_exponentiation(&E::miller_loop(&[ + (&w, &alpha_x_precomp), + (&gv, &alpha_precomp), + (&commitment.prepare(), &neg_x_n_minus_d_precomp), + ])).unwrap() == E::Fqk::one() +} + +#[test] +fn laurent_division() { + use crate::pairing::ff::PrimeField; + use crate::pairing::bls12_381::{Fr}; + + let mut poly = vec![ + Fr::from_str("328947234").unwrap(), + Fr::from_str("3545623451111").unwrap(), + Fr::from_str("112").unwrap(), + Fr::from_str("55555").unwrap(), + Fr::from_str("1235685").unwrap(), + ]; + + fn eval(poly: &[Fr], point: Fr) -> Fr { + let point_inv = point.inverse().unwrap(); + + let mut acc = Fr::zero(); + let mut tmp = Fr::one(); + for p in &poly[2..] { + let mut t = *p; + t.mul_assign(&tmp); + acc.add_assign(&t); + tmp.mul_assign(&point); + } + let mut tmp = point_inv; + for p in poly[0..2].iter().rev() { + let mut t = *p; + t.mul_assign(&tmp); + acc.add_assign(&t); + tmp.mul_assign(&point_inv); + } + + acc + } + + let x = Fr::from_str("23").unwrap(); + let z = Fr::from_str("2000").unwrap(); + + let p_at_x = eval(&poly, x); + let p_at_z = eval(&poly, z); + + // poly = poly(X) - poly(z) + poly[2].sub_assign(&p_at_z); + + let quotient_poly = kate_divison(&poly, z); + + let quotient = eval("ient_poly, x); + + // check that + // quotient * (x - z) = p_at_x - p_at_z + + let mut lhs = x; + lhs.sub_assign(&z); + lhs.mul_assign("ient); + + let mut rhs = p_at_x; + rhs.sub_assign(&p_at_z); + + assert_eq!(lhs, rhs); +} + +pub fn multiply_polynomials(a: Vec, b: Vec) -> Vec { + let result_len = a.len() + b.len() - 1; + + use crate::multicore::Worker; + use crate::domain::{EvaluationDomain, Scalar}; + + let worker = Worker::new(); + let scalars_a: Vec> = a.into_iter().map(|e| Scalar::(e)).collect(); + let mut domain_a = EvaluationDomain::from_coeffs_into_sized(scalars_a, result_len).unwrap(); + + let scalars_b: Vec> = b.into_iter().map(|e| Scalar::(e)).collect(); + let mut domain_b = EvaluationDomain::from_coeffs_into_sized(scalars_b, result_len).unwrap(); + + domain_a.fft(&worker); + domain_b.fft(&worker); + + domain_a.mul_assign(&worker, &domain_b); + drop(domain_b); + + domain_a.ifft(&worker); + + let mut mul_result: Vec = domain_a.into_coeffs().iter().map(|e| e.0).collect(); + + mul_result.truncate(result_len); + + mul_result +} + + +// alternative implementation that does not require an `Evaluation domain` struct +pub fn multiply_polynomials_fft(a: Vec, b: Vec) -> Vec { + use crate::multicore::Worker; + use crate::domain::{best_fft, Scalar}; + use crate::group::Group; + + let result_len = a.len() + b.len() - 1; + + // m is a size of domain where Z polynomial does NOT vanish + // in normal domain Z is in a form of (X-1)(X-2)...(X-N) + let mut m = 1; + let mut exp = 0; + let mut omega = E::Fr::root_of_unity(); + let max_degree = (1 << E::Fr::S) - 1; + + if result_len > max_degree { + panic!("multiplication result degree is too large"); + } + + while m < result_len { + m *= 2; + exp += 1; + + // The pairing-friendly curve may not be able to support + // large enough (radix2) evaluation domains. + if exp > E::Fr::S { + panic!("multiplication result degree is too large"); + } + } + + // If full domain is not needed - limit it, + // e.g. if (2^N)th power is not required, just double omega and get 2^(N-1)th + // Compute omega, the 2^exp primitive root of unity + for _ in exp..E::Fr::S { + omega.square(); + } + + let omegainv = omega.inverse().unwrap(); + let minv = E::Fr::from_str(&format!("{}", m)).unwrap().inverse().unwrap(); + + let worker = Worker::new(); + + let mut scalars_a: Vec> = a.into_iter().map(|e| Scalar::(e)).collect(); + let mut scalars_b: Vec> = b.into_iter().map(|e| Scalar::(e)).collect(); + scalars_a.resize(m, Scalar::(E::Fr::zero())); + scalars_b.resize(m, Scalar::(E::Fr::zero())); + + + best_fft(&mut scalars_a[..], &worker, &omega, exp); + best_fft(&mut scalars_b[..], &worker, &omega, exp); + + // do the convolution + worker.scope(scalars_a.len(), |scope, chunk| { + for (a, b) in scalars_a.chunks_mut(chunk).zip(scalars_b.chunks(chunk)) { + scope.spawn(move |_| { + for (a, b) in a.iter_mut().zip(b.iter()) { + a.group_mul_assign(&b.0); + } + }); + } + }); + + // no longer need it + drop(scalars_b); + + best_fft(&mut scalars_a[..], &worker, &omegainv, exp); + worker.scope(scalars_a.len(), |scope, chunk| { + for v in scalars_a.chunks_mut(chunk) { + scope.spawn(move |_| { + for v in v { + v.group_mul_assign(&minv); + } + }); + } + }); + + let mut mul_result: Vec = scalars_a.into_iter().map(|e| e.0).collect(); + + mul_result.truncate(result_len); + + mul_result +} + +pub fn multiply_polynomials_serial(mut a: Vec, mut b: Vec) -> Vec { + let result_len = a.len() + b.len() - 1; + + // Compute the size of our evaluation domain + let mut m = 1; + let mut exp = 0; + while m < result_len { + m *= 2; + exp += 1; + + // The pairing-friendly curve may not be able to support + // large enough (radix2) evaluation domains. + if exp >= E::Fr::S { + panic!("polynomial too large") + } + } + + // Compute omega, the 2^exp primitive root of unity + let mut omega = E::Fr::root_of_unity(); + for _ in exp..E::Fr::S { + omega.square(); + } + + // Extend with zeroes + a.resize(m, E::Fr::zero()); + b.resize(m, E::Fr::zero()); + + serial_fft::(&mut a[..], &omega, exp); + serial_fft::(&mut b[..], &omega, exp); + + for (a, b) in a.iter_mut().zip(b.iter()) { + a.mul_assign(b); + } + + serial_fft::(&mut a[..], &omega.inverse().unwrap(), exp); + + a.truncate(result_len); + + let minv = E::Fr::from_str(&format!("{}", m)) + .unwrap() + .inverse() + .unwrap(); + + for a in a.iter_mut() { + a.mul_assign(&minv); + } + + a +} + +// add polynomails in coefficient form +pub fn add_polynomials(a: &mut [F], b: &[F]) { + use crate::multicore::Worker; + use crate::domain::{EvaluationDomain, Scalar}; + + let worker = Worker::new(); + + assert_eq!(a.len(), b.len()); + + worker.scope(a.len(), |scope, chunk| { + for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk)) + { + scope.spawn(move |_| { + for (a, b) in a.iter_mut().zip(b.iter()) { + a.add_assign(b); + } + }); + } + }); +} + +// subtract polynomails in coefficient form +pub fn sub_polynomials(a: &mut [F], b: &[F]) { + use crate::multicore::Worker; + use crate::domain::{EvaluationDomain, Scalar}; + + let worker = Worker::new(); + + assert_eq!(a.len(), b.len()); + + worker.scope(a.len(), |scope, chunk| { + for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk)) + { + scope.spawn(move |_| { + for (a, b) in a.iter_mut().zip(b.iter()) { + a.sub_assign(b); + } + }); + } + }); +} + +// multiply coefficients of the polynomial by the scalar +pub fn mul_polynomial_by_scalar(a: &mut [F], b: F) { + use crate::multicore::Worker; + use crate::domain::{EvaluationDomain, Scalar}; + + let worker = Worker::new(); + + worker.scope(a.len(), |scope, chunk| { + for a in a.chunks_mut(chunk) + { + scope.spawn(move |_| { + for a in a.iter_mut() { + a.mul_assign(&b); + } + }); + } + }); +} + +// elementwise add coeffs of one polynomial with coeffs of other, that are +// first multiplied by a scalar +pub fn mul_add_polynomials(a: &mut [F], b: &[F], c: F) { + use crate::multicore::Worker; + use crate::domain::{EvaluationDomain, Scalar}; + + let worker = Worker::new(); + + assert_eq!(a.len(), b.len()); + + worker.scope(a.len(), |scope, chunk| { + for (a, b) in a.chunks_mut(chunk).zip(b.chunks(chunk)) + { + scope.spawn(move |_| { + for (a, b) in a.iter_mut().zip(b.iter()) { + let mut r = *b; + r.mul_assign(&c); + + a.add_assign(&r); + } + }); + } + }); +} + +fn serial_fft(a: &mut [E::Fr], omega: &E::Fr, log_n: u32) { + fn bitreverse(mut n: u32, l: u32) -> u32 { + let mut r = 0; + for _ in 0..l { + r = (r << 1) | (n & 1); + n >>= 1; + } + r + } + + let n = a.len() as u32; + assert_eq!(n, 1 << log_n); + + for k in 0..n { + let rk = bitreverse(k, log_n); + if k < rk { + a.swap(rk as usize, k as usize); + } + } + + let mut m = 1; + for _ in 0..log_n { + let w_m = omega.pow(&[(n / (2 * m)) as u64]); + + let mut k = 0; + while k < n { + let mut w = E::Fr::one(); + for j in 0..m { + let mut t = a[(k + j + m) as usize]; + t.mul_assign(&w); + let mut tmp = a[(k + j) as usize]; + tmp.sub_assign(&t); + a[(k + j + m) as usize] = tmp; + a[(k + j) as usize].add_assign(&t); + w.mul_assign(&w_m); + } + + k += 2 * m; + } + + m *= 2; + } +} + +pub trait OptionExt { + fn get(self) -> Result; +} + +impl OptionExt for Option { + fn get(self) -> Result { + match self { + Some(t) => Ok(t), + None => Err(SynthesisError::AssignmentMissing), + } + } +} + +#[test] +fn test_mul() { + use rand::{self, Rand}; + use crate::pairing::bls12_381::Bls12; + use crate::pairing::bls12_381::Fr; + + const SAMPLES: usize = 100; + + let rng = &mut rand::thread_rng(); + let a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::>(); + let b = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::>(); + + let serial_res = multiply_polynomials_serial::(a.clone(), b.clone()); + let parallel_res = multiply_polynomials::(a, b); + + assert_eq!(serial_res.len(), parallel_res.len()); + assert_eq!(serial_res, parallel_res); +} + +#[test] +fn test_eval_at_powers() { + use rand::{self, Rand, Rng}; + use crate::pairing::bls12_381::Bls12; + use crate::pairing::bls12_381::Fr; + + const SAMPLES: usize = 100000; + + let rng = &mut rand::thread_rng(); + let a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::>(); + let x: Fr = rng.gen(); + let n: u32 = rng.gen(); + + let mut acc = Fr::zero(); + + { + let mut tmp = x.pow(&[n as u64]); + + for coeff in a.iter() { + let mut c = *coeff; + c.mul_assign(&tmp); + acc.add_assign(&c); + tmp.mul_assign(&x); + } + } + + let first_power = x.pow(&[n as u64]); + let acc_parallel = evaluate_at_consequitive_powers(&a[..], first_power, x); + + assert_eq!(acc_parallel, acc); +} + +#[test] +fn test_mut_eval_at_powers() { + use rand::{self, Rand, Rng}; + use crate::pairing::bls12_381::Bls12; + use crate::pairing::bls12_381::Fr; + + const SAMPLES: usize = 100000; + + let rng = &mut rand::thread_rng(); + let mut a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::>(); + let mut b = a.clone(); + let x: Fr = rng.gen(); + let n: u32 = rng.gen(); + + let mut acc = Fr::zero(); + + { + let mut tmp = x.pow(&[n as u64]); + + for coeff in a.iter_mut() { + coeff.mul_assign(&tmp); + acc.add_assign(&coeff); + tmp.mul_assign(&x); + } + } + + let first_power = x.pow(&[n as u64]); + let acc_parallel = mut_evaluate_at_consequitive_powers(&mut b[..], first_power, x); + + assert_eq!(acc_parallel, acc); + assert!(a == b); +} + +#[test] +fn test_mut_distribute_powers() { + use rand::{self, Rand, Rng}; + use crate::pairing::bls12_381::Bls12; + use crate::pairing::bls12_381::Fr; + + const SAMPLES: usize = 100000; + + let rng = &mut rand::thread_rng(); + let mut a = (0..SAMPLES).map(|_| Fr::rand(rng)).collect::>(); + let mut b = a.clone(); + let x: Fr = rng.gen(); + let n: u32 = rng.gen(); + + { + let mut tmp = x.pow(&[n as u64]); + + for coeff in a.iter_mut() { + coeff.mul_assign(&tmp); + tmp.mul_assign(&x); + } + } + + let first_power = x.pow(&[n as u64]); + mut_distribute_consequitive_powers(&mut b[..], first_power, x); + + assert!(a == b); +} + + +#[test] +fn test_trivial_parallel_kate_division() { + use crate::pairing::ff::PrimeField; + use crate::pairing::bls12_381::{Bls12, Fr}; + + let mut minus_one = Fr::one(); + minus_one.negate(); + + let z = Fr::one(); + + // this is x^2 - 1 + let poly = vec![ + minus_one, + Fr::from_str("0").unwrap(), + Fr::from_str("1").unwrap(), + ]; + + let quotient_poly = kate_divison(&poly, z); + + let parallel_q_poly = parallel_kate_divison::(&poly, z); + + assert_eq!(quotient_poly, parallel_q_poly); +} + +#[test] +fn test_less_trivial_parallel_kate_division() { + use crate::pairing::ff::PrimeField; + use crate::pairing::bls12_381::{Bls12, Fr}; + + let z = Fr::one(); + + let mut poly = vec![ + Fr::from_str("328947234").unwrap(), + Fr::from_str("3545623451111").unwrap(), + Fr::from_str("5").unwrap(), + Fr::from_str("55555").unwrap(), + Fr::from_str("1235685").unwrap(), + ]; + + fn eval(poly: &[Fr], point: Fr) -> Fr { + let mut acc = Fr::zero(); + let mut tmp = Fr::one(); + for p in &poly[..] { + let mut t = *p; + t.mul_assign(&tmp); + acc.add_assign(&t); + tmp.mul_assign(&point); + } + + acc + } + + let p_at_z = eval(&poly, z); + + // poly = poly(X) - poly(z) + poly[0].sub_assign(&p_at_z); + + let quotient_poly = kate_divison(&poly, z); + + let parallel_q_poly = parallel_kate_divison::(&poly, z); + + assert_eq!(quotient_poly, parallel_q_poly); +} + + +#[test] +fn test_parallel_kate_division() { + use crate::pairing::ff::PrimeField; + use crate::pairing::bls12_381::{Bls12, Fr}; + + let mut poly = vec![ + Fr::from_str("328947234").unwrap(), + Fr::from_str("3545623451111").unwrap(), + Fr::from_str("0").unwrap(), + Fr::from_str("55555").unwrap(), + Fr::from_str("1235685").unwrap(), + ]; + + fn eval(poly: &[Fr], point: Fr) -> Fr { + let point_inv = point.inverse().unwrap(); + + let mut acc = Fr::zero(); + let mut tmp = Fr::one(); + for p in &poly[2..] { + let mut t = *p; + t.mul_assign(&tmp); + acc.add_assign(&t); + tmp.mul_assign(&point); + } + let mut tmp = point_inv; + for p in poly[0..2].iter().rev() { + let mut t = *p; + t.mul_assign(&tmp); + acc.add_assign(&t); + tmp.mul_assign(&point_inv); + } + + acc + } + + let z = Fr::from_str("2000").unwrap(); + + let p_at_z = eval(&poly, z); + + // poly = poly(X) - poly(z) + poly[2].sub_assign(&p_at_z); + + let quotient_poly = kate_divison(&poly, z); + + let parallel_q_poly = parallel_kate_divison::(&poly, z); + + assert_eq!(quotient_poly, parallel_q_poly); +} \ No newline at end of file diff --git a/bellman/src/source.rs b/bellman/src/source.rs new file mode 100644 index 0000000..ad4c065 --- /dev/null +++ b/bellman/src/source.rs @@ -0,0 +1,141 @@ +use crate::pairing::{ + CurveAffine, + CurveProjective, + Engine +}; + +use crate::pairing::ff::{ + PrimeField, + Field, + PrimeFieldRepr, + ScalarEngine}; + +use std::sync::Arc; +use std::io; +use bit_vec::{self, BitVec}; +use std::iter; + +use super::SynthesisError; + +/// An object that builds a source of bases. +pub trait SourceBuilder: Send + Sync + 'static + Clone { + type Source: Source; + + fn new(self) -> Self::Source; +} + +/// A source of bases, like an iterator. +pub trait Source { + /// Parses the element from the source. Fails if the point is at infinity. + fn add_assign_mixed(&mut self, to: &mut ::Projective) -> Result<(), SynthesisError>; + + /// Skips `amt` elements from the source, avoiding deserialization. + fn skip(&mut self, amt: usize) -> Result<(), SynthesisError>; +} + +impl SourceBuilder for (Arc>, usize) { + type Source = (Arc>, usize); + + fn new(self) -> (Arc>, usize) { + (self.0.clone(), self.1) + } +} + +impl Source for (Arc>, usize) { + fn add_assign_mixed(&mut self, to: &mut ::Projective) -> Result<(), SynthesisError> { + if self.0.len() <= self.1 { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases when adding from source").into()); + } + + if self.0[self.1].is_zero() { + return Err(SynthesisError::UnexpectedIdentity) + } + + to.add_assign_mixed(&self.0[self.1]); + + self.1 += 1; + + Ok(()) + } + + fn skip(&mut self, amt: usize) -> Result<(), SynthesisError> { + if self.0.len() <= self.1 { + return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "expected more bases skipping from source").into()); + } + + self.1 += amt; + + Ok(()) + } +} + +pub trait QueryDensity { + /// Returns whether the base exists. + type Iter: Iterator; + + fn iter(self) -> Self::Iter; + fn get_query_size(self) -> Option; +} + +#[derive(Clone)] +pub struct FullDensity; + +impl AsRef for FullDensity { + fn as_ref(&self) -> &FullDensity { + self + } +} + +impl<'a> QueryDensity for &'a FullDensity { + type Iter = iter::Repeat; + + fn iter(self) -> Self::Iter { + iter::repeat(true) + } + + fn get_query_size(self) -> Option { + None + } +} + +#[derive(Clone)] +pub struct DensityTracker { + bv: BitVec, + total_density: usize +} + +impl<'a> QueryDensity for &'a DensityTracker { + type Iter = bit_vec::Iter<'a>; + + fn iter(self) -> Self::Iter { + self.bv.iter() + } + + fn get_query_size(self) -> Option { + Some(self.bv.len()) + } +} + +impl DensityTracker { + pub fn new() -> DensityTracker { + DensityTracker { + bv: BitVec::new(), + total_density: 0 + } + } + + pub fn add_element(&mut self) { + self.bv.push(false); + } + + pub fn inc(&mut self, idx: usize) { + if !self.bv.get(idx).unwrap() { + self.bv.set(idx, true); + self.total_density += 1; + } + } + + pub fn get_total_density(&self) -> usize { + self.total_density + } +} \ No newline at end of file diff --git a/bellman/src/tests/dummy_engine.rs b/bellman/src/tests/dummy_engine.rs new file mode 100644 index 0000000..7b0954a --- /dev/null +++ b/bellman/src/tests/dummy_engine.rs @@ -0,0 +1,488 @@ +use crate::pairing::{ + Engine, + CurveProjective, + CurveAffine, + GroupDecodingError, + RawEncodable, + EncodedPoint +}; + +use crate::pairing::ff::{ + PrimeField, + PrimeFieldRepr, + Field, + SqrtField, + LegendreSymbol, + ScalarEngine, + PrimeFieldDecodingError, +}; + +use std::cmp::Ordering; +use std::fmt; +use rand::{Rand, Rng}; +use std::num::Wrapping; + +const MODULUS_R: Wrapping = Wrapping(64513); + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct Fr(Wrapping); + +impl fmt::Display for Fr { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", (self.0).0) + } +} + +impl Rand for Fr { + fn rand(rng: &mut R) -> Self { + Fr(Wrapping(rng.gen()) % MODULUS_R) + } +} + +impl Field for Fr { + fn zero() -> Self { + Fr(Wrapping(0)) + } + + fn one() -> Self { + Fr(Wrapping(1)) + } + + fn is_zero(&self) -> bool { + (self.0).0 == 0 + } + + fn square(&mut self) { + self.0 = (self.0 * self.0) % MODULUS_R; + } + + fn double(&mut self) { + self.0 = (self.0 << 1) % MODULUS_R; + } + + fn negate(&mut self) { + if !::is_zero(self) { + self.0 = MODULUS_R - self.0; + } + } + + fn add_assign(&mut self, other: &Self) { + self.0 = (self.0 + other.0) % MODULUS_R; + } + + fn sub_assign(&mut self, other: &Self) { + self.0 = ((MODULUS_R + self.0) - other.0) % MODULUS_R; + } + + fn mul_assign(&mut self, other: &Self) { + self.0 = (self.0 * other.0) % MODULUS_R; + } + + fn inverse(&self) -> Option { + if ::is_zero(self) { + None + } else { + Some(self.pow(&[(MODULUS_R.0 as u64) - 2])) + } + } + + fn frobenius_map(&mut self, _: usize) { + // identity + } +} + +impl RawEncodable for Fr { + fn into_raw_uncompressed_le(&self) -> Self::Uncompressed { + Self::Uncompressed::empty() + } + + fn from_raw_uncompressed_le_unchecked( + _encoded: &Self::Uncompressed, + _infinity: bool + ) -> Result { + Ok(::zero()) + } + + fn from_raw_uncompressed_le(encoded: &Self::Uncompressed, _infinity: bool) -> Result { + Self::from_raw_uncompressed_le_unchecked(&encoded, _infinity) + } +} + +impl SqrtField for Fr { + fn legendre(&self) -> LegendreSymbol { + // s = self^((r - 1) // 2) + let s = self.pow([32256]); + if s == ::zero() { LegendreSymbol::Zero } + else if s == ::one() { LegendreSymbol::QuadraticResidue } + else { LegendreSymbol::QuadraticNonResidue } + } + + fn sqrt(&self) -> Option { + // Tonelli-Shank's algorithm for q mod 16 = 1 + // https://eprint.iacr.org/2012/685.pdf (page 12, algorithm 5) + match self.legendre() { + LegendreSymbol::Zero => Some(*self), + LegendreSymbol::QuadraticNonResidue => None, + LegendreSymbol::QuadraticResidue => { + let mut c = Fr::root_of_unity(); + // r = self^((t + 1) // 2) + let mut r = self.pow([32]); + // t = self^t + let mut t = self.pow([63]); + let mut m = Fr::S; + + while t != ::one() { + let mut i = 1; + { + let mut t2i = t; + t2i.square(); + loop { + if t2i == ::one() { + break; + } + t2i.square(); + i += 1; + } + } + + for _ in 0..(m - i - 1) { + c.square(); + } + ::mul_assign(&mut r, &c); + c.square(); + ::mul_assign(&mut t, &c); + m = i; + } + + Some(r) + } + } + } +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct FrRepr([u64; 1]); + +impl Ord for FrRepr { + fn cmp(&self, other: &FrRepr) -> Ordering { + (self.0)[0].cmp(&(other.0)[0]) + } +} + +impl PartialOrd for FrRepr { + fn partial_cmp(&self, other: &FrRepr) -> Option { + Some(self.cmp(other)) + } +} + +impl Rand for FrRepr { + fn rand(rng: &mut R) -> Self { + FrRepr([rng.gen()]) + } +} + +impl fmt::Display for FrRepr { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", (self.0)[0]) + } +} + +impl From for FrRepr { + fn from(v: u64) -> FrRepr { + FrRepr([v]) + } +} + +impl From for FrRepr { + fn from(v: Fr) -> FrRepr { + FrRepr([(v.0).0 as u64]) + } +} + +impl AsMut<[u64]> for FrRepr { + fn as_mut(&mut self) -> &mut [u64] { + &mut self.0[..] + } +} + +impl AsRef<[u64]> for FrRepr { + fn as_ref(&self) -> &[u64] { + &self.0[..] + } +} + +impl Default for FrRepr { + fn default() -> FrRepr { + FrRepr::from(0u64) + } +} + +impl PrimeFieldRepr for FrRepr { + fn sub_noborrow(&mut self, other: &Self) { + self.0[0] = self.0[0].wrapping_sub(other.0[0]); + } + fn add_nocarry(&mut self, other: &Self) { + self.0[0] = self.0[0].wrapping_add(other.0[0]); + } + fn num_bits(&self) -> u32 { + 64 - self.0[0].leading_zeros() + } + fn is_zero(&self) -> bool { + self.0[0] == 0 + } + fn is_odd(&self) -> bool { + !self.is_even() + } + fn is_even(&self) -> bool { + self.0[0] % 2 == 0 + } + fn div2(&mut self) { + self.shr(1) + } + fn shr(&mut self, amt: u32) { + self.0[0] >>= amt; + } + fn mul2(&mut self) { + self.shl(1) + } + fn shl(&mut self, amt: u32) { + self.0[0] <<= amt; + } +} + +impl PrimeField for Fr { + type Repr = FrRepr; + + const NUM_BITS: u32 = 16; + const CAPACITY: u32 = 15; + const S: u32 = 10; + + fn from_repr(repr: FrRepr) -> Result { + if repr.0[0] >= (MODULUS_R.0 as u64) { + Err(PrimeFieldDecodingError::NotInField(format!("{}", repr))) + } else { + Ok(Fr(Wrapping(repr.0[0] as u32))) + } + } + + fn from_raw_repr(repr: FrRepr) -> Result { + if repr.0[0] >= (MODULUS_R.0 as u64) { + Err(PrimeFieldDecodingError::NotInField(format!("{}", repr))) + } else { + Ok(Fr(Wrapping(repr.0[0] as u32))) + } + } + + fn into_repr(&self) -> FrRepr { + FrRepr::from(*self) + } + + fn char() -> FrRepr { + Fr(MODULUS_R).into() + } + + fn into_raw_repr(&self) -> FrRepr { + FrRepr::from(*self) + } + + fn multiplicative_generator() -> Fr { + Fr(Wrapping(5)) + } + + fn root_of_unity() -> Fr { + Fr(Wrapping(57751)) + } +} + +#[derive(Clone)] +pub struct DummyEngine; + +impl ScalarEngine for DummyEngine { + type Fr = Fr; +} + +impl Engine for DummyEngine { + type G1 = Fr; + type G1Affine = Fr; + type G2 = Fr; + type G2Affine = Fr; + type Fq = Fr; + type Fqe = Fr; + + // TODO: This should be F_645131 or something. Doesn't matter for now. + type Fqk = Fr; + + fn miller_loop<'a, I>(i: I) -> Self::Fqk + where I: IntoIterator::Prepared, + &'a ::Prepared + )> + { + let mut acc = ::zero(); + + for &(a, b) in i { + let mut tmp = *a; + ::mul_assign(&mut tmp, b); + ::add_assign(&mut acc, &tmp); + } + + acc + } + + /// Perform final exponentiation of the result of a miller loop. + fn final_exponentiation(this: &Self::Fqk) -> Option + { + Some(*this) + } +} + +impl CurveProjective for Fr { + type Affine = Fr; + type Base = Fr; + type Scalar = Fr; + type Engine = DummyEngine; + + fn zero() -> Self { + ::zero() + } + + fn one() -> Self { + ::one() + } + + fn is_zero(&self) -> bool { + ::is_zero(self) + } + + fn batch_normalization(_: &mut [Self]) { + + } + + fn is_normalized(&self) -> bool { + true + } + + fn double(&mut self) { + ::double(self); + } + + fn add_assign(&mut self, other: &Self) { + ::add_assign(self, other); + } + + fn add_assign_mixed(&mut self, other: &Self) { + ::add_assign(self, other); + } + + fn negate(&mut self) { + ::negate(self); + } + + fn mul_assign::Repr>>(&mut self, other: S) + { + let tmp = Fr::from_repr(other.into()).unwrap(); + + ::mul_assign(self, &tmp); + } + + fn into_affine(&self) -> Fr { + *self + } + + fn recommended_wnaf_for_scalar(_: ::Repr) -> usize { + 3 + } + + fn recommended_wnaf_for_num_scalars(_: usize) -> usize { + 3 + } +} + +#[derive(Copy, Clone)] +pub struct FakePoint; + +impl AsMut<[u8]> for FakePoint { + fn as_mut(&mut self) -> &mut [u8] { + unimplemented!() + } +} + +impl AsRef<[u8]> for FakePoint { + fn as_ref(&self) -> &[u8] { + unimplemented!() + } +} + +impl EncodedPoint for FakePoint { + type Affine = Fr; + + fn empty() -> Self { + unimplemented!() + } + + fn size() -> usize { + unimplemented!() + } + + fn into_affine(&self) -> Result { + unimplemented!() + } + + fn into_affine_unchecked(&self) -> Result { + unimplemented!() + } + + fn from_affine(_: Self::Affine) -> Self { + unimplemented!() + } +} + +impl CurveAffine for Fr { + type Pair = Fr; + type PairingResult = Fr; + type Compressed = FakePoint; + type Uncompressed = FakePoint; + type Prepared = Fr; + type Projective = Fr; + type Base = Fr; + type Scalar = Fr; + type Engine = DummyEngine; + + fn zero() -> Self { + ::zero() + } + + fn one() -> Self { + ::one() + } + + fn is_zero(&self) -> bool { + ::is_zero(self) + } + + fn negate(&mut self) { + ::negate(self); + } + + fn mul::Repr>>(&self, other: S) -> Self::Projective + { + let mut res = *self; + let tmp = Fr::from_repr(other.into()).unwrap(); + + ::mul_assign(&mut res, &tmp); + + res + } + + fn prepare(&self) -> Self::Prepared { + *self + } + + fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult { + self.mul(*other) + } + + fn into_projective(&self) -> Self::Projective { + *self + } +} diff --git a/bellman/src/tests/mod.rs b/bellman/src/tests/mod.rs new file mode 100644 index 0000000..843d2cf --- /dev/null +++ b/bellman/src/tests/mod.rs @@ -0,0 +1,93 @@ +use crate::pairing::{ + Engine +}; + +use crate::pairing::ff:: { + Field, + PrimeField, +}; + +pub mod dummy_engine; +use self::dummy_engine::*; + +use std::marker::PhantomData; + +use crate::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +pub(crate) struct XORDemo { + pub(crate) a: Option, + pub(crate) b: Option, + pub(crate) _marker: PhantomData +} + +impl Circuit for XORDemo { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + let a_var = cs.alloc(|| "a", || { + if self.a.is_some() { + if self.a.unwrap() { + Ok(E::Fr::one()) + } else { + Ok(E::Fr::zero()) + } + } else { + Err(SynthesisError::AssignmentMissing) + } + })?; + + cs.enforce( + || "a_boolean_constraint", + |lc| lc + CS::one() - a_var, + |lc| lc + a_var, + |lc| lc + ); + + let b_var = cs.alloc(|| "b", || { + if self.b.is_some() { + if self.b.unwrap() { + Ok(E::Fr::one()) + } else { + Ok(E::Fr::zero()) + } + } else { + Err(SynthesisError::AssignmentMissing) + } + })?; + + cs.enforce( + || "b_boolean_constraint", + |lc| lc + CS::one() - b_var, + |lc| lc + b_var, + |lc| lc + ); + + let c_var = cs.alloc_input(|| "c", || { + if self.a.is_some() && self.b.is_some() { + if self.a.unwrap() ^ self.b.unwrap() { + Ok(E::Fr::one()) + } else { + Ok(E::Fr::zero()) + } + } else { + Err(SynthesisError::AssignmentMissing) + } + })?; + + cs.enforce( + || "c_xor_constraint", + |lc| lc + a_var + a_var, + |lc| lc + b_var, + |lc| lc + a_var + b_var - c_var + ); + + Ok(()) + } +} + diff --git a/bellman/tests/mimc.rs b/bellman/tests/mimc.rs new file mode 100644 index 0000000..623c1ee --- /dev/null +++ b/bellman/tests/mimc.rs @@ -0,0 +1,341 @@ +// For randomness (during paramgen and proof generation) +use rand::{thread_rng, Rng}; + +// For benchmarking +use std::time::{Duration, Instant}; + +// Bring in some tools for using pairing-friendly curves +use bellman_ce::pairing::{ + Engine +}; + +use bellman_ce::pairing::ff::{ + Field, +}; + +// We're going to use the BLS12-381 pairing-friendly elliptic curve. +use bellman_ce::pairing::bls12_381::{ + Bls12 +}; + +use bellman_ce::pairing::bn256::{ + Bn256 +}; + +// We'll use these interfaces to construct our circuit. +use bellman_ce::{ + Circuit, + ConstraintSystem, + SynthesisError +}; + +// We're going to use the Groth16 proving system. +use bellman_ce::groth16::{ + Proof, + generate_random_parameters, + prepare_verifying_key, + create_random_proof, + verify_proof, +}; + +const MIMC_ROUNDS: usize = 322; + +// const MIMC_ROUNDS: usize = 1000000; + +/// This is an implementation of MiMC, specifically a +/// variant named `LongsightF322p3` for BLS12-381. +/// See http://eprint.iacr.org/2016/492 for more +/// information about this construction. +/// +/// ``` +/// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) { +/// for i from 0 up to 321 { +/// xL, xR := xR + (xL + Ci)^3, xL +/// } +/// return xL +/// } +/// ``` +fn mimc( + mut xl: E::Fr, + mut xr: E::Fr, + constants: &[E::Fr] +) -> E::Fr +{ + assert_eq!(constants.len(), MIMC_ROUNDS); + + for i in 0..MIMC_ROUNDS { + let mut tmp1 = xl; + tmp1.add_assign(&constants[i]); + let mut tmp2 = tmp1; + tmp2.square(); + tmp2.mul_assign(&tmp1); + tmp2.add_assign(&xr); + xr = xl; + xl = tmp2; + } + + xl +} + +/// This is our demo circuit for proving knowledge of the +/// preimage of a MiMC hash invocation. +#[derive(Clone)] +struct MiMCDemo<'a, E: Engine> { + xl: Option, + xr: Option, + constants: &'a [E::Fr] +} + +/// Our demo circuit implements this `Circuit` trait which +/// is used during paramgen and proving in order to +/// synthesize the constraint system. +impl<'a, E: Engine> Circuit for MiMCDemo<'a, E> { + fn synthesize>( + self, + cs: &mut CS + ) -> Result<(), SynthesisError> + { + assert_eq!(self.constants.len(), MIMC_ROUNDS); + + // Allocate the first component of the preimage. + let mut xl_value = self.xl; + let mut xl = cs.alloc(|| "preimage xl", || { + xl_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + // Allocate the second component of the preimage. + let mut xr_value = self.xr; + let mut xr = cs.alloc(|| "preimage xr", || { + xr_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + for i in 0..MIMC_ROUNDS { + // xL, xR := xR + (xL + Ci)^3, xL + let cs = &mut cs.namespace(|| format!("round {}", i)); + + // tmp = (xL + Ci)^2 + let tmp_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.square(); + e + }); + let tmp = cs.alloc(|| "tmp", || { + tmp_value.ok_or(SynthesisError::AssignmentMissing) + })?; + + cs.enforce( + || "tmp = (xL + Ci)^2", + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + tmp + ); + + // new_xL = xR + (xL + Ci)^3 + // new_xL = xR + tmp * (xL + Ci) + // new_xL - xR = tmp * (xL + Ci) + let new_xl_value = xl_value.map(|mut e| { + e.add_assign(&self.constants[i]); + e.mul_assign(&tmp_value.unwrap()); + e.add_assign(&xr_value.unwrap()); + e + }); + + let new_xl = if i == (MIMC_ROUNDS-1) { + // This is the last round, xL is our image and so + // we allocate a public input. + cs.alloc_input(|| "image", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + } else { + cs.alloc(|| "new_xl", || { + new_xl_value.ok_or(SynthesisError::AssignmentMissing) + })? + }; + + cs.enforce( + || "new_xL = xR + (xL + Ci)^3", + |lc| lc + tmp, + |lc| lc + xl + (self.constants[i], CS::one()), + |lc| lc + new_xl - xr + ); + + // xR = xL + xr = xl; + xr_value = xl_value; + + // xL = new_xL + xl = new_xl; + xl_value = new_xl_value; + } + + Ok(()) + } +} + +#[test] +fn test_mimc_bls12() { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + + println!("Creating parameters..."); + + // Create parameters for our circuit + let params = { + let c = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants + }; + + generate_random_parameters(c, rng).unwrap() + }; + + // Prepare the verification key (for proof verification) + let pvk = prepare_verifying_key(¶ms.vk); + + println!("Creating proofs..."); + + // Let's benchmark stuff! + const SAMPLES: u32 = 1; + let mut total_proving = Duration::new(0, 0); + let mut total_verifying = Duration::new(0, 0); + + // Just a place to put the proof data, so we can + // benchmark deserialization. + let mut proof_vec = vec![]; + + for _ in 0..SAMPLES { + // Generate a random preimage and compute the image + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + proof_vec.truncate(0); + + let start = Instant::now(); + { + // Create an instance of our circuit (with the + // witness) + let c = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants + }; + + // Create a groth16 proof with our parameters. + let proof = create_random_proof(c, ¶ms, rng).unwrap(); + + proof.write(&mut proof_vec).unwrap(); + } + + total_proving += start.elapsed(); + + let start = Instant::now(); + let proof = Proof::read(&proof_vec[..]).unwrap(); + // Check the proof + assert!(verify_proof( + &pvk, + &proof, + &[image] + ).unwrap()); + total_verifying += start.elapsed(); + } + let proving_avg = total_proving / SAMPLES; + let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (proving_avg.as_secs() as f64); + + let verifying_avg = total_verifying / SAMPLES; + let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (verifying_avg.as_secs() as f64); + + println!("Average proving time: {:?} seconds", proving_avg); + println!("Average verifying time: {:?} seconds", verifying_avg); +} + +#[test] +fn test_mimc_bn256() { + // This may not be cryptographically safe, use + // `OsRng` (for example) in production software. + let rng = &mut thread_rng(); + + // Generate the MiMC round constants + let constants = (0..MIMC_ROUNDS).map(|_| rng.gen()).collect::>(); + + println!("Creating parameters..."); + + // Create parameters for our circuit + let params = { + let c = MiMCDemo:: { + xl: None, + xr: None, + constants: &constants + }; + + generate_random_parameters(c, rng).unwrap() + }; + + // Prepare the verification key (for proof verification) + let pvk = prepare_verifying_key(¶ms.vk); + + println!("Creating proofs..."); + + // Let's benchmark stuff! + const SAMPLES: u32 = 50; + let mut total_proving = Duration::new(0, 0); + let mut total_verifying = Duration::new(0, 0); + + // Just a place to put the proof data, so we can + // benchmark deserialization. + let mut proof_vec = vec![]; + + for _ in 0..SAMPLES { + // Generate a random preimage and compute the image + let xl = rng.gen(); + let xr = rng.gen(); + let image = mimc::(xl, xr, &constants); + + proof_vec.truncate(0); + + let start = Instant::now(); + { + // Create an instance of our circuit (with the + // witness) + let c = MiMCDemo { + xl: Some(xl), + xr: Some(xr), + constants: &constants + }; + + // Create a groth16 proof with our parameters. + let proof = create_random_proof(c, ¶ms, rng).unwrap(); + + proof.write(&mut proof_vec).unwrap(); + } + + total_proving += start.elapsed(); + + let start = Instant::now(); + let proof = Proof::read(&proof_vec[..]).unwrap(); + // Check the proof + assert!(verify_proof( + &pvk, + &proof, + &[image] + ).unwrap()); + total_verifying += start.elapsed(); + } + let proving_avg = total_proving / SAMPLES; + let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (proving_avg.as_secs() as f64); + + let verifying_avg = total_verifying / SAMPLES; + let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + + (verifying_avg.as_secs() as f64); + + println!("Average proving time: {:?} seconds", proving_avg); + println!("Average verifying time: {:?} seconds", verifying_avg); +}