pub trait IndexedParallelIterator: ParallelIterator {
Show 33 methods // Required methods fn len(&self) -> usize; fn drive<C>(self, consumer: C) -> <C as Consumer<Self::Item>>::Result where C: Consumer<Self::Item>; fn with_producer<CB>( self, callback: CB ) -> <CB as ProducerCallback<Self::Item>>::Output where CB: ProducerCallback<Self::Item>; // Provided methods fn by_exponential_blocks(self) -> ExponentialBlocks<Self> { ... } fn by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self> { ... } fn collect_into_vec(self, target: &mut Vec<Self::Item>) { ... } fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>) where Self: IndexedParallelIterator<Item = (A, B)>, A: Send, B: Send { ... } fn zip<Z>(self, zip_op: Z) -> Zip<Self, <Z as IntoParallelIterator>::Iter> where Z: IntoParallelIterator, <Z as IntoParallelIterator>::Iter: IndexedParallelIterator { ... } fn zip_eq<Z>( self, zip_op: Z ) -> ZipEq<Self, <Z as IntoParallelIterator>::Iter> where Z: IntoParallelIterator, <Z as IntoParallelIterator>::Iter: IndexedParallelIterator { ... } fn interleave<I>( self, other: I ) -> Interleave<Self, <I as IntoParallelIterator>::Iter> where I: IntoParallelIterator<Item = Self::Item>, <I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item> { ... } fn interleave_shortest<I>( self, other: I ) -> InterleaveShortest<Self, <I as IntoParallelIterator>::Iter> where I: IntoParallelIterator<Item = Self::Item>, <I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item> { ... } fn chunks(self, chunk_size: usize) -> Chunks<Self> { ... } fn fold_chunks<T, ID, F>( self, chunk_size: usize, identity: ID, fold_op: F ) -> FoldChunks<Self, ID, F> where ID: Fn() -> T + Send + Sync, F: Fn(T, Self::Item) -> T + Send + Sync, T: Send { ... } fn fold_chunks_with<T, F>( self, chunk_size: usize, init: T, fold_op: F ) -> FoldChunksWith<Self, T, F> where T: Send + Clone, F: Fn(T, Self::Item) -> T + Send + Sync { ... } fn cmp<I>(self, other: I) -> Ordering where I: IntoParallelIterator<Item = Self::Item>, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: Ord { ... } fn partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoParallelIterator, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: PartialOrd<<I as IntoParallelIterator>::Item> { ... } fn eq<I>(self, other: I) -> bool where I: IntoParallelIterator, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: PartialEq<<I as IntoParallelIterator>::Item> { ... } fn ne<I>(self, other: I) -> bool where I: IntoParallelIterator, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: PartialEq<<I as IntoParallelIterator>::Item> { ... } fn lt<I>(self, other: I) -> bool where I: IntoParallelIterator, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: PartialOrd<<I as IntoParallelIterator>::Item> { ... } fn le<I>(self, other: I) -> bool where I: IntoParallelIterator, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: PartialOrd<<I as IntoParallelIterator>::Item> { ... } fn gt<I>(self, other: I) -> bool where I: IntoParallelIterator, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: PartialOrd<<I as IntoParallelIterator>::Item> { ... } fn ge<I>(self, other: I) -> bool where I: IntoParallelIterator, <I as IntoParallelIterator>::Iter: IndexedParallelIterator, Self::Item: PartialOrd<<I as IntoParallelIterator>::Item> { ... } fn enumerate(self) -> Enumerate<Self> { ... } fn step_by(self, step: usize) -> StepBy<Self> { ... } fn skip(self, n: usize) -> Skip<Self> { ... } fn take(self, n: usize) -> Take<Self> { ... } fn position_any<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send { ... } fn position_first<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send { ... } fn position_last<P>(self, predicate: P) -> Option<usize> where P: Fn(Self::Item) -> bool + Sync + Send { ... } fn positions<P>(self, predicate: P) -> Positions<Self, P> where P: Fn(Self::Item) -> bool + Sync + Send { ... } fn rev(self) -> Rev<Self> { ... } fn with_min_len(self, min: usize) -> MinLen<Self> { ... } fn with_max_len(self, max: usize) -> MaxLen<Self> { ... }
}
Expand description

An iterator that supports “random access” to its data, meaning that you can split it at arbitrary indices and draw data from those points.

Note: Not implemented for u64, i64, u128, or i128 ranges

Required Methods§

fn len(&self) -> usize

Produces an exact count of how many items this iterator will produce, presuming no panic occurs.

§Examples
use rayon::prelude::*;

let par_iter = (0..100).into_par_iter().zip(vec![0; 10]);
assert_eq!(par_iter.len(), 10);

let vec: Vec<_> = par_iter.collect();
assert_eq!(vec.len(), 10);

fn drive<C>(self, consumer: C) -> <C as Consumer<Self::Item>>::Result
where C: Consumer<Self::Item>,

Internal method used to define the behavior of this parallel iterator. You should not need to call this directly.

This method causes the iterator self to start producing items and to feed them to the consumer consumer one by one. It may split the consumer before doing so to create the opportunity to produce in parallel. If a split does happen, it will inform the consumer of the index where the split should occur (unlike ParallelIterator::drive_unindexed()).

See the README for more details on the internals of parallel iterators.

fn with_producer<CB>( self, callback: CB ) -> <CB as ProducerCallback<Self::Item>>::Output
where CB: ProducerCallback<Self::Item>,

Internal method used to define the behavior of this parallel iterator. You should not need to call this directly.

This method converts the iterator into a producer P and then invokes callback.callback() with P. Note that the type of this producer is not defined as part of the API, since callback must be defined generically for all producers. This allows the producer type to contain references; it also means that parallel iterators can adjust that type without causing a breaking change.

See the README for more details on the internals of parallel iterators.

Provided Methods§

fn by_exponential_blocks(self) -> ExponentialBlocks<Self>

Divides an iterator into sequential blocks of exponentially-increasing size.

Normally, parallel iterators are recursively divided into tasks in parallel. This adaptor changes the default behavior by splitting the iterator into a sequence of parallel iterators of increasing sizes. Sizes grow exponentially in order to avoid creating too many blocks. This also allows to balance the current block with all previous ones.

This can have many applications but the most notable ones are:

  • better performance with find_first()
  • more predictable performance with find_any() or any interruptible computation
§Examples
use rayon::prelude::*;
assert_eq!((0..10_000).into_par_iter()
                      .by_exponential_blocks()
                      .find_first(|&e| e==4_999), Some(4_999))

In this example, without blocks, rayon will split the initial range into two but all work on the right hand side (from 5,000 onwards) is useless since the sequential algorithm never goes there. This means that if two threads are used there will be no speedup at all.

by_exponential_blocks on the other hand will start with the leftmost range from 0 to p (threads number), continue with p to 3p, the 3p to 7p…

Each subrange is treated in parallel, while all subranges are treated sequentially. We therefore ensure a logarithmic number of blocks (and overhead) while guaranteeing we stop at the first block containing the searched data.

fn by_uniform_blocks(self, block_size: usize) -> UniformBlocks<Self>

Divides an iterator into sequential blocks of the given size.

Normally, parallel iterators are recursively divided into tasks in parallel. This adaptor changes the default behavior by splitting the iterator into a sequence of parallel iterators of given block_size. The main application is to obtain better memory locality (especially if the reduce operation re-use folded data).

Panics if block_size is 0.

§Example
use rayon::prelude::*;
// during most reductions v1 and v2 fit the cache
let v = (0u32..10_000_000)
    .into_par_iter()
    .by_uniform_blocks(1_000_000)
    .fold(Vec::new, |mut v, e| { v.push(e); v})
    .reduce(Vec::new, |mut v1, mut v2| { v1.append(&mut v2); v1});
assert_eq!(v, (0u32..10_000_000).collect::<Vec<u32>>());

fn collect_into_vec(self, target: &mut Vec<Self::Item>)

Collects the results of the iterator into the specified vector. The vector is always cleared before execution begins. If possible, reusing the vector across calls can lead to better performance since it reuses the same backing buffer.

§Examples
use rayon::prelude::*;

// any prior data will be cleared
let mut vec = vec![-1, -2, -3];

(0..5).into_par_iter()
    .collect_into_vec(&mut vec);

assert_eq!(vec, [0, 1, 2, 3, 4]);

fn unzip_into_vecs<A, B>(self, left: &mut Vec<A>, right: &mut Vec<B>)
where Self: IndexedParallelIterator<Item = (A, B)>, A: Send, B: Send,

Unzips the results of the iterator into the specified vectors. The vectors are always cleared before execution begins. If possible, reusing the vectors across calls can lead to better performance since they reuse the same backing buffer.

§Examples
use rayon::prelude::*;

// any prior data will be cleared
let mut left = vec![42; 10];
let mut right = vec![-1; 10];

(10..15).into_par_iter()
    .enumerate()
    .unzip_into_vecs(&mut left, &mut right);

assert_eq!(left, [0, 1, 2, 3, 4]);
assert_eq!(right, [10, 11, 12, 13, 14]);

fn zip<Z>(self, zip_op: Z) -> Zip<Self, <Z as IntoParallelIterator>::Iter>

Iterates over tuples (A, B), where the items A are from this iterator and B are from the iterator given as argument. Like the zip method on ordinary iterators, if the two iterators are of unequal length, you only get the items they have in common.

§Examples
use rayon::prelude::*;

let result: Vec<_> = (1..4)
    .into_par_iter()
    .zip(vec!['a', 'b', 'c'])
    .collect();

assert_eq!(result, [(1, 'a'), (2, 'b'), (3, 'c')]);

fn zip_eq<Z>(self, zip_op: Z) -> ZipEq<Self, <Z as IntoParallelIterator>::Iter>

The same as Zip, but requires that both iterators have the same length.

§Panics

Will panic if self and zip_op are not the same length.

use rayon::prelude::*;

let one = [1u8];
let two = [2u8, 2];
let one_iter = one.par_iter();
let two_iter = two.par_iter();

// this will panic
let zipped: Vec<(&u8, &u8)> = one_iter.zip_eq(two_iter).collect();

// we should never get here
assert_eq!(1, zipped.len());

fn interleave<I>( self, other: I ) -> Interleave<Self, <I as IntoParallelIterator>::Iter>
where I: IntoParallelIterator<Item = Self::Item>, <I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,

Interleaves elements of this iterator and the other given iterator. Alternately yields elements from this iterator and the given iterator, until both are exhausted. If one iterator is exhausted before the other, the last elements are provided from the other.

§Examples
use rayon::prelude::*;
let (x, y) = (vec![1, 2], vec![3, 4, 5, 6]);
let r: Vec<i32> = x.into_par_iter().interleave(y).collect();
assert_eq!(r, vec![1, 3, 2, 4, 5, 6]);

fn interleave_shortest<I>( self, other: I ) -> InterleaveShortest<Self, <I as IntoParallelIterator>::Iter>
where I: IntoParallelIterator<Item = Self::Item>, <I as IntoParallelIterator>::Iter: IndexedParallelIterator<Item = Self::Item>,

Interleaves elements of this iterator and the other given iterator, until one is exhausted.

§Examples
use rayon::prelude::*;
let (x, y) = (vec![1, 2, 3, 4], vec![5, 6]);
let r: Vec<i32> = x.into_par_iter().interleave_shortest(y).collect();
assert_eq!(r, vec![1, 5, 2, 6, 3]);

fn chunks(self, chunk_size: usize) -> Chunks<Self>

Splits an iterator up into fixed-size chunks.

Returns an iterator that returns Vecs of the given number of elements. If the number of elements in the iterator is not divisible by chunk_size, the last chunk may be shorter than chunk_size.

See also par_chunks() and par_chunks_mut() for similar behavior on slices, without having to allocate intermediate Vecs for the chunks.

Panics if chunk_size is 0.

§Examples
use rayon::prelude::*;
let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let r: Vec<Vec<i32>> = a.into_par_iter().chunks(3).collect();
assert_eq!(r, vec![vec![1,2,3], vec![4,5,6], vec![7,8,9], vec![10]]);

fn fold_chunks<T, ID, F>( self, chunk_size: usize, identity: ID, fold_op: F ) -> FoldChunks<Self, ID, F>
where ID: Fn() -> T + Send + Sync, F: Fn(T, Self::Item) -> T + Send + Sync, T: Send,

Splits an iterator into fixed-size chunks, performing a sequential fold() on each chunk.

Returns an iterator that produces a folded result for each chunk of items produced by this iterator.

This works essentially like:

iter.chunks(chunk_size)
    .map(|chunk|
        chunk.into_iter()
            .fold(identity, fold_op)
    )

except there is no per-chunk allocation overhead.

Panics if chunk_size is 0.

§Examples
use rayon::prelude::*;
let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let chunk_sums = nums.into_par_iter().fold_chunks(2, || 0, |a, n| a + n).collect::<Vec<_>>();
assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);

fn fold_chunks_with<T, F>( self, chunk_size: usize, init: T, fold_op: F ) -> FoldChunksWith<Self, T, F>
where T: Send + Clone, F: Fn(T, Self::Item) -> T + Send + Sync,

Splits an iterator into fixed-size chunks, performing a sequential fold() on each chunk.

Returns an iterator that produces a folded result for each chunk of items produced by this iterator.

This works essentially like fold_chunks(chunk_size, || init.clone(), fold_op), except it doesn’t require the init type to be Sync, nor any other form of added synchronization.

Panics if chunk_size is 0.

§Examples
use rayon::prelude::*;
let nums = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let chunk_sums = nums.into_par_iter().fold_chunks_with(2, 0, |a, n| a + n).collect::<Vec<_>>();
assert_eq!(chunk_sums, vec![3, 7, 11, 15, 19]);

fn cmp<I>(self, other: I) -> Ordering

Lexicographically compares the elements of this ParallelIterator with those of another.

§Examples
use rayon::prelude::*;
use std::cmp::Ordering::*;

let x = vec![1, 2, 3];
assert_eq!(x.par_iter().cmp(&vec![1, 3, 0]), Less);
assert_eq!(x.par_iter().cmp(&vec![1, 2, 3]), Equal);
assert_eq!(x.par_iter().cmp(&vec![1, 2]), Greater);

fn partial_cmp<I>(self, other: I) -> Option<Ordering>

Lexicographically compares the elements of this ParallelIterator with those of another.

§Examples
use rayon::prelude::*;
use std::cmp::Ordering::*;
use std::f64::NAN;

let x = vec![1.0, 2.0, 3.0];
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 3.0, 0.0]), Some(Less));
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0, 3.0]), Some(Equal));
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, 2.0]), Some(Greater));
assert_eq!(x.par_iter().partial_cmp(&vec![1.0, NAN]), None);

fn eq<I>(self, other: I) -> bool

Determines if the elements of this ParallelIterator are equal to those of another

fn ne<I>(self, other: I) -> bool

Determines if the elements of this ParallelIterator are unequal to those of another

fn lt<I>(self, other: I) -> bool

Determines if the elements of this ParallelIterator are lexicographically less than those of another.

fn le<I>(self, other: I) -> bool

Determines if the elements of this ParallelIterator are less or equal to those of another.

fn gt<I>(self, other: I) -> bool

Determines if the elements of this ParallelIterator are lexicographically greater than those of another.

fn ge<I>(self, other: I) -> bool

Determines if the elements of this ParallelIterator are less or equal to those of another.

fn enumerate(self) -> Enumerate<Self>

Yields an index along with each item.

§Examples
use rayon::prelude::*;

let chars = vec!['a', 'b', 'c'];
let result: Vec<_> = chars
    .into_par_iter()
    .enumerate()
    .collect();

assert_eq!(result, [(0, 'a'), (1, 'b'), (2, 'c')]);

fn step_by(self, step: usize) -> StepBy<Self>

Creates an iterator that steps by the given amount

§Examples
use rayon::prelude::*;

let range = (3..10);
let result: Vec<i32> = range
   .into_par_iter()
   .step_by(3)
   .collect();

assert_eq!(result, [3, 6, 9])

fn skip(self, n: usize) -> Skip<Self>

Creates an iterator that skips the first n elements.

§Examples
use rayon::prelude::*;

let result: Vec<_> = (0..100)
    .into_par_iter()
    .skip(95)
    .collect();

assert_eq!(result, [95, 96, 97, 98, 99]);

fn take(self, n: usize) -> Take<Self>

Creates an iterator that yields the first n elements.

§Examples
use rayon::prelude::*;

let result: Vec<_> = (0..100)
    .into_par_iter()
    .take(5)
    .collect();

assert_eq!(result, [0, 1, 2, 3, 4]);

fn position_any<P>(self, predicate: P) -> Option<usize>
where P: Fn(Self::Item) -> bool + Sync + Send,

Searches for some item in the parallel iterator that matches the given predicate, and returns its index. Like ParallelIterator::find_any, the parallel search will not necessarily find the first match, and once a match is found we’ll attempt to stop processing any more.

§Examples
use rayon::prelude::*;

let a = [1, 2, 3, 3];

let i = a.par_iter().position_any(|&x| x == 3).expect("found");
assert!(i == 2 || i == 3);

assert_eq!(a.par_iter().position_any(|&x| x == 100), None);

fn position_first<P>(self, predicate: P) -> Option<usize>
where P: Fn(Self::Item) -> bool + Sync + Send,

Searches for the sequentially first item in the parallel iterator that matches the given predicate, and returns its index.

Like ParallelIterator::find_first, once a match is found, all attempts to the right of the match will be stopped, while attempts to the left must continue in case an earlier match is found.

Note that not all parallel iterators have a useful order, much like sequential HashMap iteration, so “first” may be nebulous. If you just want the first match that discovered anywhere in the iterator, position_any is a better choice.

§Examples
use rayon::prelude::*;

let a = [1, 2, 3, 3];

assert_eq!(a.par_iter().position_first(|&x| x == 3), Some(2));

assert_eq!(a.par_iter().position_first(|&x| x == 100), None);

fn position_last<P>(self, predicate: P) -> Option<usize>
where P: Fn(Self::Item) -> bool + Sync + Send,

Searches for the sequentially last item in the parallel iterator that matches the given predicate, and returns its index.

Like ParallelIterator::find_last, once a match is found, all attempts to the left of the match will be stopped, while attempts to the right must continue in case a later match is found.

Note that not all parallel iterators have a useful order, much like sequential HashMap iteration, so “last” may be nebulous. When the order doesn’t actually matter to you, position_any is a better choice.

§Examples
use rayon::prelude::*;

let a = [1, 2, 3, 3];

assert_eq!(a.par_iter().position_last(|&x| x == 3), Some(3));

assert_eq!(a.par_iter().position_last(|&x| x == 100), None);

fn positions<P>(self, predicate: P) -> Positions<Self, P>
where P: Fn(Self::Item) -> bool + Sync + Send,

Searches for items in the parallel iterator that match the given predicate, and returns their indices.

§Examples
use rayon::prelude::*;

let primes = vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29];

// Find the positions of primes congruent to 1 modulo 6
let p1mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 1).collect();
assert_eq!(p1mod6, [3, 5, 7]); // primes 7, 13, and 19

// Find the positions of primes congruent to 5 modulo 6
let p5mod6: Vec<_> = primes.par_iter().positions(|&p| p % 6 == 5).collect();
assert_eq!(p5mod6, [2, 4, 6, 8, 9]); // primes 5, 11, 17, 23, and 29

fn rev(self) -> Rev<Self>

Produces a new iterator with the elements of this iterator in reverse order.

§Examples
use rayon::prelude::*;

let result: Vec<_> = (0..5)
    .into_par_iter()
    .rev()
    .collect();

assert_eq!(result, [4, 3, 2, 1, 0]);

fn with_min_len(self, min: usize) -> MinLen<Self>

Sets the minimum length of iterators desired to process in each rayon job. Rayon will not split any smaller than this length, but of course an iterator could already be smaller to begin with.

Producers like zip and interleave will use greater of the two minimums. Chained iterators and iterators inside flat_map may each use their own minimum length.

§Examples
use rayon::prelude::*;

let min = (0..1_000_000)
    .into_par_iter()
    .with_min_len(1234)
    .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
    .min().unwrap();

assert!(min >= 1234);

fn with_max_len(self, max: usize) -> MaxLen<Self>

Sets the maximum length of iterators desired to process in each rayon job. Rayon will try to split at least below this length, unless that would put it below the length from with_min_len(). For example, given min=10 and max=15, a length of 16 will not be split any further.

Producers like zip and interleave will use lesser of the two maximums. Chained iterators and iterators inside flat_map may each use their own maximum length.

§Examples
use rayon::prelude::*;

let max = (0..1_000_000)
    .into_par_iter()
    .with_max_len(1234)
    .fold(|| 0, |acc, _| acc + 1) // count how many are in this segment
    .max().unwrap();

assert!(max <= 1234);

Object Safety§

This trait is not object safe.

Implementations on Foreign Types§

§

impl<L, R> IndexedParallelIterator for Either<L, R>

§

fn drive<C>( self, consumer: C ) -> <C as Consumer<<Either<L, R> as ParallelIterator>::Item>>::Result
where C: Consumer<<Either<L, R> as ParallelIterator>::Item>,

§

fn len(&self) -> usize

§

fn with_producer<CB>( self, callback: CB ) -> <CB as ProducerCallback<<Either<L, R> as ParallelIterator>::Item>>::Output
where CB: ProducerCallback<<Either<L, R> as ParallelIterator>::Item>,

Implementors§

§

impl IndexedParallelIterator for Iter<char>

§

impl IndexedParallelIterator for Iter<char>

§

impl<'a, T> IndexedParallelIterator for Drain<'a, T>
where T: Ord + Send,

§

impl<'a, T> IndexedParallelIterator for Drain<'a, T>
where T: Send,

§

impl<'a, T> IndexedParallelIterator for Iter<'a, T>
where T: Ord + Sync + 'a,

§

impl<'a, T> IndexedParallelIterator for Iter<'a, T>
where T: Sync + 'a,

§

impl<'a, T> IndexedParallelIterator for Iter<'a, T>
where T: Sync + 'a,

§

impl<'a, T> IndexedParallelIterator for Iter<'a, T>
where T: Sync + 'a,

§

impl<'a, T> IndexedParallelIterator for IterMut<'a, T>
where T: Send + 'a,

§

impl<'a, T> IndexedParallelIterator for IterMut<'a, T>
where T: Send + 'a,

§

impl<'a, T> IndexedParallelIterator for IterMut<'a, T>
where T: Send + 'a,

§

impl<'a, T, I> IndexedParallelIterator for Cloned<I>
where I: IndexedParallelIterator<Item = &'a T>, T: 'a + Clone + Send + Sync,

§

impl<'a, T, I> IndexedParallelIterator for Copied<I>
where I: IndexedParallelIterator<Item = &'a T>, T: 'a + Copy + Send + Sync,

§

impl<'data, T> IndexedParallelIterator for Chunks<'data, T>
where T: Sync + 'data,

§

impl<'data, T> IndexedParallelIterator for ChunksExact<'data, T>
where T: Sync + 'data,

§

impl<'data, T> IndexedParallelIterator for ChunksExactMut<'data, T>
where T: Send + 'data,

§

impl<'data, T> IndexedParallelIterator for ChunksMut<'data, T>
where T: Send + 'data,

§

impl<'data, T> IndexedParallelIterator for Drain<'data, T>
where T: Send,

§

impl<'data, T> IndexedParallelIterator for Iter<'data, T>
where T: Sync + 'data,

§

impl<'data, T> IndexedParallelIterator for IterMut<'data, T>
where T: Send + 'data,

§

impl<'data, T> IndexedParallelIterator for RChunks<'data, T>
where T: Sync + 'data,

§

impl<'data, T> IndexedParallelIterator for RChunksExact<'data, T>
where T: Sync + 'data,

§

impl<'data, T> IndexedParallelIterator for RChunksExactMut<'data, T>
where T: Send + 'data,

§

impl<'data, T> IndexedParallelIterator for RChunksMut<'data, T>
where T: Send + 'data,

§

impl<'data, T> IndexedParallelIterator for Windows<'data, T>
where T: Sync + 'data,

§

impl<A> IndexedParallelIterator for MultiZip<(A,)>

§

impl<A, B> IndexedParallelIterator for Chain<A, B>

§

impl<A, B> IndexedParallelIterator for MultiZip<(A, B)>

§

impl<A, B> IndexedParallelIterator for Zip<A, B>

§

impl<A, B> IndexedParallelIterator for ZipEq<A, B>

§

impl<A, B, C> IndexedParallelIterator for MultiZip<(A, B, C)>

§

impl<A, B, C, D> IndexedParallelIterator for MultiZip<(A, B, C, D)>

§

impl<A, B, C, D, E> IndexedParallelIterator for MultiZip<(A, B, C, D, E)>

§

impl<A, B, C, D, E, F> IndexedParallelIterator for MultiZip<(A, B, C, D, E, F)>

§

impl<A, B, C, D, E, F, G> IndexedParallelIterator for MultiZip<(A, B, C, D, E, F, G)>

§

impl<A, B, C, D, E, F, G, H> IndexedParallelIterator for MultiZip<(A, B, C, D, E, F, G, H)>

§

impl<A, B, C, D, E, F, G, H, I> IndexedParallelIterator for MultiZip<(A, B, C, D, E, F, G, H, I)>

§

impl<A, B, C, D, E, F, G, H, I, J> IndexedParallelIterator for MultiZip<(A, B, C, D, E, F, G, H, I, J)>

§

impl<A, B, C, D, E, F, G, H, I, J, K> IndexedParallelIterator for MultiZip<(A, B, C, D, E, F, G, H, I, J, K)>

§

impl<A, B, C, D, E, F, G, H, I, J, K, L> IndexedParallelIterator for MultiZip<(A, B, C, D, E, F, G, H, I, J, K, L)>

§

impl<I> IndexedParallelIterator for Chunks<I>

§

impl<I> IndexedParallelIterator for Enumerate<I>

§

impl<I> IndexedParallelIterator for Intersperse<I>

§

impl<I> IndexedParallelIterator for MaxLen<I>

§

impl<I> IndexedParallelIterator for MinLen<I>

§

impl<I> IndexedParallelIterator for PanicFuse<I>

§

impl<I> IndexedParallelIterator for Rev<I>

§

impl<I> IndexedParallelIterator for Skip<I>

§

impl<I> IndexedParallelIterator for StepBy<I>

§

impl<I> IndexedParallelIterator for Take<I>

§

impl<I, F> IndexedParallelIterator for Inspect<I, F>

§

impl<I, F> IndexedParallelIterator for Update<I, F>
where I: IndexedParallelIterator, F: Fn(&mut <I as ParallelIterator>::Item) + Send + Sync,

§

impl<I, F, R> IndexedParallelIterator for Map<I, F>
where I: IndexedParallelIterator, F: Fn(<I as ParallelIterator>::Item) -> R + Sync + Send, R: Send,

§

impl<I, ID, U, F> IndexedParallelIterator for FoldChunks<I, ID, F>
where I: IndexedParallelIterator, ID: Fn() -> U + Send + Sync, F: Fn(U, <I as ParallelIterator>::Item) -> U + Send + Sync, U: Send,

§

impl<I, INIT, T, F, R> IndexedParallelIterator for MapInit<I, INIT, F>
where I: IndexedParallelIterator, INIT: Fn() -> T + Sync + Send, F: Fn(&mut T, <I as ParallelIterator>::Item) -> R + Sync + Send, R: Send,

§

impl<I, J> IndexedParallelIterator for Interleave<I, J>

§

impl<I, J> IndexedParallelIterator for InterleaveShortest<I, J>

§

impl<I, T, F, R> IndexedParallelIterator for MapWith<I, T, F>
where I: IndexedParallelIterator, T: Send + Clone, F: Fn(&mut T, <I as ParallelIterator>::Item) -> R + Sync + Send, R: Send,

§

impl<I, U, F> IndexedParallelIterator for FoldChunksWith<I, U, F>
where I: IndexedParallelIterator, U: Send + Clone, F: Fn(U, <I as ParallelIterator>::Item) -> U + Send + Sync,

§

impl<T> IndexedParallelIterator for Empty<T>
where T: Send,

§

impl<T> IndexedParallelIterator for IntoIter<T>
where T: Ord + Send,

§

impl<T> IndexedParallelIterator for IntoIter<T>
where T: Send,

§

impl<T> IndexedParallelIterator for IntoIter<T>
where T: Send,

§

impl<T> IndexedParallelIterator for IntoIter<T>
where T: Send,

§

impl<T> IndexedParallelIterator for IntoIter<T>
where T: Send,

§

impl<T> IndexedParallelIterator for Iter<T>
where T: IndexedRangeInteger,

§

impl<T> IndexedParallelIterator for Iter<T>
where T: IndexedRangeInteger,

§

impl<T> IndexedParallelIterator for Once<T>
where T: Send,

§

impl<T> IndexedParallelIterator for RepeatN<T>
where T: Clone + Send,

§

impl<T, const N: usize> IndexedParallelIterator for IntoIter<T, N>
where T: Send,