use group::{
ff::{BatchInvert, PrimeField},
Curve, GroupOpsOwned, ScalarMulOwned,
};
pub use halo2_middleware::ff::Field;
use halo2_middleware::multicore;
use halo2curves::fft::best_fft;
pub use halo2curves::{CurveAffine, CurveExt};
#[allow(dead_code)]
pub(crate) trait FftGroup<Scalar: Field>:
Copy + Send + Sync + 'static + GroupOpsOwned + ScalarMulOwned<Scalar>
{
}
impl<T, Scalar> FftGroup<Scalar> for T
where
Scalar: Field,
T: Copy + Send + Sync + 'static + GroupOpsOwned + ScalarMulOwned<Scalar>,
{
}
pub(crate) fn g_to_lagrange<C: CurveAffine>(g_projective: Vec<C::Curve>, k: u32) -> Vec<C> {
let n_inv = C::Scalar::TWO_INV.pow_vartime([k as u64, 0, 0, 0]);
let mut omega_inv = C::Scalar::ROOT_OF_UNITY_INV;
for _ in k..C::Scalar::S {
omega_inv = omega_inv.square();
}
let mut g_lagrange_projective = g_projective;
best_fft(&mut g_lagrange_projective, omega_inv, k);
parallelize(&mut g_lagrange_projective, |g, _| {
for g in g.iter_mut() {
*g *= n_inv;
}
});
let mut g_lagrange = vec![C::identity(); 1 << k];
parallelize(&mut g_lagrange, |g_lagrange, starts| {
C::Curve::batch_normalize(
&g_lagrange_projective[starts..(starts + g_lagrange.len())],
g_lagrange,
);
});
g_lagrange
}
pub(crate) fn eval_polynomial<F: Field>(poly: &[F], point: F) -> F {
fn evaluate<F: Field>(poly: &[F], point: F) -> F {
poly.iter()
.rev()
.fold(F::ZERO, |acc, coeff| acc * point + coeff)
}
let n = poly.len();
let num_threads = multicore::current_num_threads();
if n * 2 < num_threads {
evaluate(poly, point)
} else {
let chunk_size = (n + num_threads - 1) / num_threads;
let mut parts = vec![F::ZERO; num_threads];
multicore::scope(|scope| {
for (chunk_idx, (out, poly)) in
parts.chunks_mut(1).zip(poly.chunks(chunk_size)).enumerate()
{
scope.spawn(move |_| {
let start = chunk_idx * chunk_size;
out[0] = evaluate(poly, point) * point.pow_vartime([start as u64, 0, 0, 0]);
});
}
});
parts.iter().fold(F::ZERO, |acc, coeff| acc + coeff)
}
}
pub(crate) fn compute_inner_product<F: Field>(a: &[F], b: &[F]) -> F {
assert_eq!(a.len(), b.len());
let mut acc = F::ZERO;
for (a, b) in a.iter().zip(b.iter()) {
acc += (*a) * (*b);
}
acc
}
pub(crate) fn kate_division<'a, F: Field, I: IntoIterator<Item = &'a F>>(a: I, mut b: F) -> Vec<F>
where
I::IntoIter: DoubleEndedIterator + ExactSizeIterator,
{
b = -b;
let a = a.into_iter();
let mut q = vec![F::ZERO; a.len() - 1];
let mut tmp = F::ZERO;
for (q, r) in q.iter_mut().rev().zip(a.rev()) {
let mut lead_coeff = *r;
lead_coeff.sub_assign(&tmp);
*q = lead_coeff;
tmp = lead_coeff;
tmp.mul_assign(&b);
}
q
}
pub fn parallelize<T: Send, F: Fn(&mut [T], usize) + Send + Sync + Clone>(v: &mut [T], f: F) {
let f = &f;
let total_iters = v.len();
let num_threads = multicore::current_num_threads();
let base_chunk_size = total_iters / num_threads;
let cutoff_chunk_id = total_iters % num_threads;
let split_pos = cutoff_chunk_id * (base_chunk_size + 1);
let (v_hi, v_lo) = v.split_at_mut(split_pos);
multicore::scope(|scope| {
if cutoff_chunk_id != 0 {
for (chunk_id, chunk) in v_hi.chunks_exact_mut(base_chunk_size + 1).enumerate() {
let offset = chunk_id * (base_chunk_size + 1);
scope.spawn(move |_| f(chunk, offset));
}
}
if base_chunk_size != 0 {
for (chunk_id, chunk) in v_lo.chunks_exact_mut(base_chunk_size).enumerate() {
let offset = split_pos + (chunk_id * base_chunk_size);
scope.spawn(move |_| f(chunk, offset));
}
}
});
}
pub(crate) fn lagrange_interpolate<F: Field>(points: &[F], evals: &[F]) -> Vec<F> {
assert_eq!(points.len(), evals.len());
if points.len() == 1 {
vec![evals[0]]
} else {
let mut denoms = Vec::with_capacity(points.len());
for (j, x_j) in points.iter().enumerate() {
let mut denom = Vec::with_capacity(points.len() - 1);
for x_k in points
.iter()
.enumerate()
.filter(|&(k, _)| k != j)
.map(|a| a.1)
{
denom.push(*x_j - x_k);
}
denoms.push(denom);
}
denoms.iter_mut().flat_map(|v| v.iter_mut()).batch_invert();
let mut final_poly = vec![F::ZERO; points.len()];
for (j, (denoms, eval)) in denoms.into_iter().zip(evals.iter()).enumerate() {
let mut tmp: Vec<F> = Vec::with_capacity(points.len());
let mut product = Vec::with_capacity(points.len() - 1);
tmp.push(F::ONE);
for (x_k, denom) in points
.iter()
.enumerate()
.filter(|&(k, _)| k != j)
.map(|a| a.1)
.zip(denoms.into_iter())
{
product.resize(tmp.len() + 1, F::ZERO);
for ((a, b), product) in tmp
.iter()
.chain(std::iter::once(&F::ZERO))
.zip(std::iter::once(&F::ZERO).chain(tmp.iter()))
.zip(product.iter_mut())
{
*product = *a * (-denom * x_k) + *b * denom;
}
std::mem::swap(&mut tmp, &mut product);
}
assert_eq!(tmp.len(), points.len());
assert_eq!(product.len(), points.len() - 1);
for (final_coeff, interpolation_coeff) in final_poly.iter_mut().zip(tmp.into_iter()) {
*final_coeff += interpolation_coeff * eval;
}
}
final_poly
}
}
pub(crate) fn evaluate_vanishing_polynomial<F: Field>(roots: &[F], z: F) -> F {
fn evaluate<F: Field>(roots: &[F], z: F) -> F {
roots.iter().fold(F::ONE, |acc, point| (z - point) * acc)
}
let n = roots.len();
let num_threads = multicore::current_num_threads();
if n * 2 < num_threads {
evaluate(roots, z)
} else {
let chunk_size = (n + num_threads - 1) / num_threads;
let mut parts = vec![F::ONE; num_threads];
multicore::scope(|scope| {
for (out, roots) in parts.chunks_mut(1).zip(roots.chunks(chunk_size)) {
scope.spawn(move |_| out[0] = evaluate(roots, z));
}
});
parts.iter().fold(F::ONE, |acc, part| acc * part)
}
}
pub(crate) fn powers<F: Field>(base: F) -> impl Iterator<Item = F> {
std::iter::successors(Some(F::ONE), move |power| Some(base * power))
}
#[cfg(test)]
use rand_core::OsRng;
#[cfg(test)]
use halo2curves::pasta::Fp;
#[test]
fn test_lagrange_interpolate() {
let rng = OsRng;
let points = (0..5).map(|_| Fp::random(rng)).collect::<Vec<_>>();
let evals = (0..5).map(|_| Fp::random(rng)).collect::<Vec<_>>();
for coeffs in 0..5 {
let points = &points[0..coeffs];
let evals = &evals[0..coeffs];
let poly = lagrange_interpolate(points, evals);
assert_eq!(poly.len(), points.len());
for (point, eval) in points.iter().zip(evals) {
assert_eq!(eval_polynomial(&poly, *point), *eval);
}
}
}