Skip to main content

hydro_lang/live_collections/stream/
mod.rs

1//! Definitions for the [`Stream`] live collection.
2
3use std::cell::RefCell;
4use std::future::Future;
5use std::hash::Hash;
6use std::marker::PhantomData;
7use std::ops::Deref;
8use std::rc::Rc;
9
10use stageleft::{IntoQuotedMut, QuotedWithContext, QuotedWithContextWithProps, q, quote_type};
11use tokio::time::Instant;
12
13use super::boundedness::{Bounded, Boundedness, IsBounded, Unbounded};
14use super::keyed_singleton::KeyedSingleton;
15use super::keyed_stream::{Generate, KeyedStream};
16use super::optional::Optional;
17use super::singleton::Singleton;
18use crate::compile::builder::{CycleId, FlowState};
19use crate::compile::ir::{
20    CollectionKind, HydroIrOpMetadata, HydroNode, HydroRoot, SharedNode, StreamOrder, StreamRetry,
21};
22#[cfg(stageleft_runtime)]
23use crate::forward_handle::{CycleCollection, CycleCollectionWithInitial, ReceiverComplete};
24use crate::forward_handle::{ForwardRef, TickCycle};
25use crate::live_collections::batch_atomic::BatchAtomic;
26use crate::live_collections::singleton::SingletonBound;
27#[cfg(stageleft_runtime)]
28use crate::location::dynamic::{DynLocation, LocationId};
29use crate::location::tick::{Atomic, DeferTick, NoAtomic};
30use crate::location::{Location, NoTick, Tick, check_matching_location};
31use crate::manual_expr::ManualExpr;
32use crate::nondet::{NonDet, nondet};
33use crate::prelude::manual_proof;
34use crate::properties::{
35    AggFuncAlgebra, ApplyMonotoneStream, ValidCommutativityFor, ValidIdempotenceFor,
36};
37
38pub mod networking;
39
40/// A trait implemented by valid ordering markers ([`TotalOrder`] and [`NoOrder`]).
41#[sealed::sealed]
42pub trait Ordering:
43    MinOrder<Self, Min = Self> + MinOrder<TotalOrder, Min = Self> + MinOrder<NoOrder, Min = NoOrder>
44{
45    /// The [`StreamOrder`] corresponding to this type.
46    const ORDERING_KIND: StreamOrder;
47}
48
49/// Marks the stream as being totally ordered, which means that there are
50/// no sources of non-determinism (other than intentional ones) that will
51/// affect the order of elements.
52pub enum TotalOrder {}
53
54#[sealed::sealed]
55impl Ordering for TotalOrder {
56    const ORDERING_KIND: StreamOrder = StreamOrder::TotalOrder;
57}
58
59/// Marks the stream as having no order, which means that the order of
60/// elements may be affected by non-determinism.
61///
62/// This restricts certain operators, such as `fold` and `reduce`, to only
63/// be used with commutative aggregation functions.
64pub enum NoOrder {}
65
66#[sealed::sealed]
67impl Ordering for NoOrder {
68    const ORDERING_KIND: StreamOrder = StreamOrder::NoOrder;
69}
70
71/// Marker trait for an [`Ordering`] that is available when `Self` is a weaker guarantee than
72/// `Other`, which means that a stream with `Other` guarantees can be safely converted to
73/// have `Self` guarantees instead.
74#[sealed::sealed]
75pub trait WeakerOrderingThan<Other: ?Sized>: Ordering {}
76#[sealed::sealed]
77impl<O: Ordering, O2: Ordering> WeakerOrderingThan<O2> for O where O: MinOrder<O2, Min = O> {}
78
79/// Helper trait for determining the weakest of two orderings.
80#[sealed::sealed]
81pub trait MinOrder<Other: ?Sized> {
82    /// The weaker of the two orderings.
83    type Min: Ordering;
84}
85
86#[sealed::sealed]
87impl<O: Ordering> MinOrder<O> for TotalOrder {
88    type Min = O;
89}
90
91#[sealed::sealed]
92impl<O: Ordering> MinOrder<O> for NoOrder {
93    type Min = NoOrder;
94}
95
96/// A trait implemented by valid retries markers ([`ExactlyOnce`] and [`AtLeastOnce`]).
97#[sealed::sealed]
98pub trait Retries:
99    MinRetries<Self, Min = Self>
100    + MinRetries<ExactlyOnce, Min = Self>
101    + MinRetries<AtLeastOnce, Min = AtLeastOnce>
102{
103    /// The [`StreamRetry`] corresponding to this type.
104    const RETRIES_KIND: StreamRetry;
105}
106
107/// Marks the stream as having deterministic message cardinality, with no
108/// possibility of duplicates.
109pub enum ExactlyOnce {}
110
111#[sealed::sealed]
112impl Retries for ExactlyOnce {
113    const RETRIES_KIND: StreamRetry = StreamRetry::ExactlyOnce;
114}
115
116/// Marks the stream as having non-deterministic message cardinality, which
117/// means that duplicates may occur, but messages will not be dropped.
118pub enum AtLeastOnce {}
119
120#[sealed::sealed]
121impl Retries for AtLeastOnce {
122    const RETRIES_KIND: StreamRetry = StreamRetry::AtLeastOnce;
123}
124
125/// Marker trait for a [`Retries`] that is available when `Self` is a weaker guarantee than
126/// `Other`, which means that a stream with `Other` guarantees can be safely converted to
127/// have `Self` guarantees instead.
128#[sealed::sealed]
129pub trait WeakerRetryThan<Other: ?Sized>: Retries {}
130#[sealed::sealed]
131impl<R: Retries, R2: Retries> WeakerRetryThan<R2> for R where R: MinRetries<R2, Min = R> {}
132
133/// Helper trait for determining the weakest of two retry guarantees.
134#[sealed::sealed]
135pub trait MinRetries<Other: ?Sized> {
136    /// The weaker of the two retry guarantees.
137    type Min: Retries + WeakerRetryThan<Self> + WeakerRetryThan<Other>;
138}
139
140#[sealed::sealed]
141impl<R: Retries> MinRetries<R> for ExactlyOnce {
142    type Min = R;
143}
144
145#[sealed::sealed]
146impl<R: Retries> MinRetries<R> for AtLeastOnce {
147    type Min = AtLeastOnce;
148}
149
150#[sealed::sealed]
151#[diagnostic::on_unimplemented(
152    message = "The input stream must be totally-ordered (`TotalOrder`), but has order `{Self}`. Strengthen the order upstream or consider a different API.",
153    label = "required here",
154    note = "To intentionally process the stream by observing a non-deterministic (shuffled) order of elements, use `.assume_ordering`. This introduces non-determinism so avoid unless necessary."
155)]
156/// Marker trait that is implemented for the [`TotalOrder`] ordering guarantee.
157pub trait IsOrdered: Ordering {}
158
159#[sealed::sealed]
160#[diagnostic::do_not_recommend]
161impl IsOrdered for TotalOrder {}
162
163#[sealed::sealed]
164#[diagnostic::on_unimplemented(
165    message = "The input stream must be exactly-once (`ExactlyOnce`), but has retries `{Self}`. Strengthen the retries guarantee upstream or consider a different API.",
166    label = "required here",
167    note = "To intentionally process the stream by observing non-deterministic (randomly duplicated) retries, use `.assume_retries`. This introduces non-determinism so avoid unless necessary."
168)]
169/// Marker trait that is implemented for the [`ExactlyOnce`] retries guarantee.
170pub trait IsExactlyOnce: Retries {}
171
172#[sealed::sealed]
173#[diagnostic::do_not_recommend]
174impl IsExactlyOnce for ExactlyOnce {}
175
176/// Streaming sequence of elements with type `Type`.
177///
178/// This live collection represents a growing sequence of elements, with new elements being
179/// asynchronously appended to the end of the sequence. This can be used to model the arrival
180/// of network input, such as API requests, or streaming ingestion.
181///
182/// By default, all streams have deterministic ordering and each element is materialized exactly
183/// once. But streams can also capture non-determinism via the `Order` and `Retries` type
184/// parameters. When the ordering / retries guarantee is relaxed, fewer APIs will be available
185/// on the stream. For example, if the stream is unordered, you cannot invoke [`Stream::first`].
186///
187/// Type Parameters:
188/// - `Type`: the type of elements in the stream
189/// - `Loc`: the location where the stream is being materialized
190/// - `Bound`: the boundedness of the stream, which is either [`Bounded`] or [`Unbounded`]
191/// - `Order`: the ordering of the stream, which is either [`TotalOrder`] or [`NoOrder`]
192///   (default is [`TotalOrder`])
193/// - `Retries`: the retry guarantee of the stream, which is either [`ExactlyOnce`] or
194///   [`AtLeastOnce`] (default is [`ExactlyOnce`])
195pub struct Stream<
196    Type,
197    Loc,
198    Bound: Boundedness = Unbounded,
199    Order: Ordering = TotalOrder,
200    Retry: Retries = ExactlyOnce,
201> {
202    pub(crate) location: Loc,
203    pub(crate) ir_node: RefCell<HydroNode>,
204    pub(crate) flow_state: FlowState,
205
206    _phantom: PhantomData<(Type, Loc, Bound, Order, Retry)>,
207}
208
209impl<T, L, B: Boundedness, O: Ordering, R: Retries> Drop for Stream<T, L, B, O, R> {
210    fn drop(&mut self) {
211        let ir_node = self.ir_node.replace(HydroNode::Placeholder);
212        if !matches!(ir_node, HydroNode::Placeholder) && !ir_node.is_shared_with_others() {
213            self.flow_state.borrow_mut().try_push_root(HydroRoot::Null {
214                input: Box::new(ir_node),
215                op_metadata: HydroIrOpMetadata::new(),
216            });
217        }
218    }
219}
220
221impl<'a, T, L, O: Ordering, R: Retries> From<Stream<T, L, Bounded, O, R>>
222    for Stream<T, L, Unbounded, O, R>
223where
224    L: Location<'a>,
225{
226    fn from(stream: Stream<T, L, Bounded, O, R>) -> Stream<T, L, Unbounded, O, R> {
227        let new_meta = stream
228            .location
229            .new_node_metadata(Stream::<T, L, Unbounded, O, R>::collection_kind());
230
231        Stream {
232            location: stream.location.clone(),
233            flow_state: stream.flow_state.clone(),
234            ir_node: RefCell::new(HydroNode::Cast {
235                inner: Box::new(stream.ir_node.replace(HydroNode::Placeholder)),
236                metadata: new_meta,
237            }),
238            _phantom: PhantomData,
239        }
240    }
241}
242
243impl<'a, T, L, B: Boundedness, R: Retries> From<Stream<T, L, B, TotalOrder, R>>
244    for Stream<T, L, B, NoOrder, R>
245where
246    L: Location<'a>,
247{
248    fn from(stream: Stream<T, L, B, TotalOrder, R>) -> Stream<T, L, B, NoOrder, R> {
249        stream.weaken_ordering()
250    }
251}
252
253impl<'a, T, L, B: Boundedness, O: Ordering> From<Stream<T, L, B, O, ExactlyOnce>>
254    for Stream<T, L, B, O, AtLeastOnce>
255where
256    L: Location<'a>,
257{
258    fn from(stream: Stream<T, L, B, O, ExactlyOnce>) -> Stream<T, L, B, O, AtLeastOnce> {
259        stream.weaken_retries()
260    }
261}
262
263impl<'a, T, L, O: Ordering, R: Retries> DeferTick for Stream<T, Tick<L>, Bounded, O, R>
264where
265    L: Location<'a>,
266{
267    fn defer_tick(self) -> Self {
268        Stream::defer_tick(self)
269    }
270}
271
272impl<'a, T, L, O: Ordering, R: Retries> CycleCollection<'a, TickCycle>
273    for Stream<T, Tick<L>, Bounded, O, R>
274where
275    L: Location<'a>,
276{
277    type Location = Tick<L>;
278
279    fn create_source(cycle_id: CycleId, location: Tick<L>) -> Self {
280        Stream::new(
281            location.clone(),
282            HydroNode::CycleSource {
283                cycle_id,
284                metadata: location.new_node_metadata(Self::collection_kind()),
285            },
286        )
287    }
288}
289
290impl<'a, T, L, O: Ordering, R: Retries> CycleCollectionWithInitial<'a, TickCycle>
291    for Stream<T, Tick<L>, Bounded, O, R>
292where
293    L: Location<'a>,
294{
295    type Location = Tick<L>;
296
297    fn create_source_with_initial(cycle_id: CycleId, initial: Self, location: Tick<L>) -> Self {
298        let from_previous_tick: Stream<T, Tick<L>, Bounded, O, R> = Stream::new(
299            location.clone(),
300            HydroNode::DeferTick {
301                input: Box::new(HydroNode::CycleSource {
302                    cycle_id,
303                    metadata: location.new_node_metadata(Self::collection_kind()),
304                }),
305                metadata: location.new_node_metadata(Self::collection_kind()),
306            },
307        );
308
309        from_previous_tick.chain(initial.filter_if(location.optional_first_tick(q!(())).is_some()))
310    }
311}
312
313impl<'a, T, L, O: Ordering, R: Retries> ReceiverComplete<'a, TickCycle>
314    for Stream<T, Tick<L>, Bounded, O, R>
315where
316    L: Location<'a>,
317{
318    fn complete(self, cycle_id: CycleId, expected_location: LocationId) {
319        assert_eq!(
320            Location::id(&self.location),
321            expected_location,
322            "locations do not match"
323        );
324        self.location
325            .flow_state()
326            .borrow_mut()
327            .push_root(HydroRoot::CycleSink {
328                cycle_id,
329                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
330                op_metadata: HydroIrOpMetadata::new(),
331            });
332    }
333}
334
335impl<'a, T, L, B: Boundedness, O: Ordering, R: Retries> CycleCollection<'a, ForwardRef>
336    for Stream<T, L, B, O, R>
337where
338    L: Location<'a> + NoTick,
339{
340    type Location = L;
341
342    fn create_source(cycle_id: CycleId, location: L) -> Self {
343        Stream::new(
344            location.clone(),
345            HydroNode::CycleSource {
346                cycle_id,
347                metadata: location.new_node_metadata(Self::collection_kind()),
348            },
349        )
350    }
351}
352
353impl<'a, T, L, B: Boundedness, O: Ordering, R: Retries> ReceiverComplete<'a, ForwardRef>
354    for Stream<T, L, B, O, R>
355where
356    L: Location<'a> + NoTick,
357{
358    fn complete(self, cycle_id: CycleId, expected_location: LocationId) {
359        assert_eq!(
360            Location::id(&self.location),
361            expected_location,
362            "locations do not match"
363        );
364        self.location
365            .flow_state()
366            .borrow_mut()
367            .push_root(HydroRoot::CycleSink {
368                cycle_id,
369                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
370                op_metadata: HydroIrOpMetadata::new(),
371            });
372    }
373}
374
375impl<'a, T, L, B: Boundedness, O: Ordering, R: Retries> Clone for Stream<T, L, B, O, R>
376where
377    T: Clone,
378    L: Location<'a>,
379{
380    fn clone(&self) -> Self {
381        if !matches!(self.ir_node.borrow().deref(), HydroNode::Tee { .. }) {
382            let orig_ir_node = self.ir_node.replace(HydroNode::Placeholder);
383            *self.ir_node.borrow_mut() = HydroNode::Tee {
384                inner: SharedNode(Rc::new(RefCell::new(orig_ir_node))),
385                metadata: self.location.new_node_metadata(Self::collection_kind()),
386            };
387        }
388
389        if let HydroNode::Tee { inner, metadata } = self.ir_node.borrow().deref() {
390            Stream {
391                location: self.location.clone(),
392                flow_state: self.flow_state.clone(),
393                ir_node: HydroNode::Tee {
394                    inner: SharedNode(inner.0.clone()),
395                    metadata: metadata.clone(),
396                }
397                .into(),
398                _phantom: PhantomData,
399            }
400        } else {
401            unreachable!()
402        }
403    }
404}
405
406impl<'a, T, L, B: Boundedness, O: Ordering, R: Retries> Stream<T, L, B, O, R>
407where
408    L: Location<'a>,
409{
410    pub(crate) fn new(location: L, ir_node: HydroNode) -> Self {
411        debug_assert_eq!(ir_node.metadata().location_id, Location::id(&location));
412        debug_assert_eq!(ir_node.metadata().collection_kind, Self::collection_kind());
413
414        let flow_state = location.flow_state().clone();
415        Stream {
416            location,
417            flow_state,
418            ir_node: RefCell::new(ir_node),
419            _phantom: PhantomData,
420        }
421    }
422
423    /// Returns the [`Location`] where this stream is being materialized.
424    pub fn location(&self) -> &L {
425        &self.location
426    }
427
428    pub(crate) fn collection_kind() -> CollectionKind {
429        CollectionKind::Stream {
430            bound: B::BOUND_KIND,
431            order: O::ORDERING_KIND,
432            retry: R::RETRIES_KIND,
433            element_type: quote_type::<T>().into(),
434        }
435    }
436
437    /// Produces a stream based on invoking `f` on each element.
438    /// If you do not want to modify the stream and instead only want to view
439    /// each item use [`Stream::inspect`] instead.
440    ///
441    /// # Example
442    /// ```rust
443    /// # #[cfg(feature = "deploy")] {
444    /// # use hydro_lang::prelude::*;
445    /// # use futures::StreamExt;
446    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
447    /// let words = process.source_iter(q!(vec!["hello", "world"]));
448    /// words.map(q!(|x| x.to_uppercase()))
449    /// # }, |mut stream| async move {
450    /// # for w in vec!["HELLO", "WORLD"] {
451    /// #     assert_eq!(stream.next().await.unwrap(), w);
452    /// # }
453    /// # }));
454    /// # }
455    /// ```
456    pub fn map<U, F>(self, f: impl IntoQuotedMut<'a, F, L>) -> Stream<U, L, B, O, R>
457    where
458        F: Fn(T) -> U + 'a,
459    {
460        let f = f.splice_fn1_ctx(&self.location).into();
461        Stream::new(
462            self.location.clone(),
463            HydroNode::Map {
464                f,
465                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
466                metadata: self
467                    .location
468                    .new_node_metadata(Stream::<U, L, B, O, R>::collection_kind()),
469            },
470        )
471    }
472
473    /// For each item `i` in the input stream, transform `i` using `f` and then treat the
474    /// result as an [`Iterator`] to produce items one by one. The implementation for [`Iterator`]
475    /// for the output type `U` must produce items in a **deterministic** order.
476    ///
477    /// For example, `U` could be a `Vec`, but not a `HashSet`. If the order of the items in `U` is
478    /// not deterministic, use [`Stream::flat_map_unordered`] instead.
479    ///
480    /// # Example
481    /// ```rust
482    /// # #[cfg(feature = "deploy")] {
483    /// # use hydro_lang::prelude::*;
484    /// # use futures::StreamExt;
485    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
486    /// process
487    ///     .source_iter(q!(vec![vec![1, 2], vec![3, 4]]))
488    ///     .flat_map_ordered(q!(|x| x))
489    /// # }, |mut stream| async move {
490    /// // 1, 2, 3, 4
491    /// # for w in (1..5) {
492    /// #     assert_eq!(stream.next().await.unwrap(), w);
493    /// # }
494    /// # }));
495    /// # }
496    /// ```
497    pub fn flat_map_ordered<U, I, F>(self, f: impl IntoQuotedMut<'a, F, L>) -> Stream<U, L, B, O, R>
498    where
499        I: IntoIterator<Item = U>,
500        F: Fn(T) -> I + 'a,
501    {
502        let f = f.splice_fn1_ctx(&self.location).into();
503        Stream::new(
504            self.location.clone(),
505            HydroNode::FlatMap {
506                f,
507                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
508                metadata: self
509                    .location
510                    .new_node_metadata(Stream::<U, L, B, O, R>::collection_kind()),
511            },
512        )
513    }
514
515    /// Like [`Stream::flat_map_ordered`], but allows the implementation of [`Iterator`]
516    /// for the output type `U` to produce items in any order.
517    ///
518    /// # Example
519    /// ```rust
520    /// # #[cfg(feature = "deploy")] {
521    /// # use hydro_lang::{prelude::*, live_collections::stream::{NoOrder, ExactlyOnce}};
522    /// # use futures::StreamExt;
523    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test::<_, _, _, NoOrder, ExactlyOnce>(|process| {
524    /// process
525    ///     .source_iter(q!(vec![
526    ///         std::collections::HashSet::<i32>::from_iter(vec![1, 2]),
527    ///         std::collections::HashSet::from_iter(vec![3, 4]),
528    ///     ]))
529    ///     .flat_map_unordered(q!(|x| x))
530    /// # }, |mut stream| async move {
531    /// // 1, 2, 3, 4, but in no particular order
532    /// # let mut results = Vec::new();
533    /// # for w in (1..5) {
534    /// #     results.push(stream.next().await.unwrap());
535    /// # }
536    /// # results.sort();
537    /// # assert_eq!(results, vec![1, 2, 3, 4]);
538    /// # }));
539    /// # }
540    /// ```
541    pub fn flat_map_unordered<U, I, F>(
542        self,
543        f: impl IntoQuotedMut<'a, F, L>,
544    ) -> Stream<U, L, B, NoOrder, R>
545    where
546        I: IntoIterator<Item = U>,
547        F: Fn(T) -> I + 'a,
548    {
549        let f = f.splice_fn1_ctx(&self.location).into();
550        Stream::new(
551            self.location.clone(),
552            HydroNode::FlatMap {
553                f,
554                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
555                metadata: self
556                    .location
557                    .new_node_metadata(Stream::<U, L, B, NoOrder, R>::collection_kind()),
558            },
559        )
560    }
561
562    /// For each item `i` in the input stream, treat `i` as an [`Iterator`] and produce its items one by one.
563    /// The implementation for [`Iterator`] for the element type `T` must produce items in a **deterministic** order.
564    ///
565    /// For example, `T` could be a `Vec`, but not a `HashSet`. If the order of the items in `T` is
566    /// not deterministic, use [`Stream::flatten_unordered`] instead.
567    ///
568    /// ```rust
569    /// # #[cfg(feature = "deploy")] {
570    /// # use hydro_lang::prelude::*;
571    /// # use futures::StreamExt;
572    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
573    /// process
574    ///     .source_iter(q!(vec![vec![1, 2], vec![3, 4]]))
575    ///     .flatten_ordered()
576    /// # }, |mut stream| async move {
577    /// // 1, 2, 3, 4
578    /// # for w in (1..5) {
579    /// #     assert_eq!(stream.next().await.unwrap(), w);
580    /// # }
581    /// # }));
582    /// # }
583    /// ```
584    pub fn flatten_ordered<U>(self) -> Stream<U, L, B, O, R>
585    where
586        T: IntoIterator<Item = U>,
587    {
588        self.flat_map_ordered(q!(|d| d))
589    }
590
591    /// Like [`Stream::flatten_ordered`], but allows the implementation of [`Iterator`]
592    /// for the element type `T` to produce items in any order.
593    ///
594    /// # Example
595    /// ```rust
596    /// # #[cfg(feature = "deploy")] {
597    /// # use hydro_lang::{prelude::*, live_collections::stream::{NoOrder, ExactlyOnce}};
598    /// # use futures::StreamExt;
599    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test::<_, _, _, NoOrder, ExactlyOnce>(|process| {
600    /// process
601    ///     .source_iter(q!(vec![
602    ///         std::collections::HashSet::<i32>::from_iter(vec![1, 2]),
603    ///         std::collections::HashSet::from_iter(vec![3, 4]),
604    ///     ]))
605    ///     .flatten_unordered()
606    /// # }, |mut stream| async move {
607    /// // 1, 2, 3, 4, but in no particular order
608    /// # let mut results = Vec::new();
609    /// # for w in (1..5) {
610    /// #     results.push(stream.next().await.unwrap());
611    /// # }
612    /// # results.sort();
613    /// # assert_eq!(results, vec![1, 2, 3, 4]);
614    /// # }));
615    /// # }
616    /// ```
617    pub fn flatten_unordered<U>(self) -> Stream<U, L, B, NoOrder, R>
618    where
619        T: IntoIterator<Item = U>,
620    {
621        self.flat_map_unordered(q!(|d| d))
622    }
623
624    /// For each item in the input stream, apply `f` to produce a [`futures::stream::Stream`],
625    /// then emit the elements of that stream one by one. When the inner stream yields
626    /// `Pending`, this operator yields as well.
627    pub fn flat_map_stream_blocking<U, S, F>(
628        self,
629        f: impl IntoQuotedMut<'a, F, L>,
630    ) -> Stream<U, L, B, O, R>
631    where
632        S: futures::Stream<Item = U>,
633        F: Fn(T) -> S + 'a,
634    {
635        let f = f.splice_fn1_ctx(&self.location).into();
636        Stream::new(
637            self.location.clone(),
638            HydroNode::FlatMapStreamBlocking {
639                f,
640                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
641                metadata: self
642                    .location
643                    .new_node_metadata(Stream::<U, L, B, O, R>::collection_kind()),
644            },
645        )
646    }
647
648    /// For each item in the input stream, treat it as a [`futures::stream::Stream`] and
649    /// emit its elements one by one. When the inner stream yields `Pending`, this operator
650    /// yields as well.
651    pub fn flatten_stream_blocking<U>(self) -> Stream<U, L, B, O, R>
652    where
653        T: futures::Stream<Item = U>,
654    {
655        self.flat_map_stream_blocking(q!(|d| d))
656    }
657
658    /// Creates a stream containing only the elements of the input stream that satisfy a predicate
659    /// `f`, preserving the order of the elements.
660    ///
661    /// The closure `f` receives a reference `&T` rather than an owned value `T` because filtering does
662    /// not modify or take ownership of the values. If you need to modify the values while filtering
663    /// use [`Stream::filter_map`] instead.
664    ///
665    /// # Example
666    /// ```rust
667    /// # #[cfg(feature = "deploy")] {
668    /// # use hydro_lang::prelude::*;
669    /// # use futures::StreamExt;
670    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
671    /// process
672    ///     .source_iter(q!(vec![1, 2, 3, 4]))
673    ///     .filter(q!(|&x| x > 2))
674    /// # }, |mut stream| async move {
675    /// // 3, 4
676    /// # for w in (3..5) {
677    /// #     assert_eq!(stream.next().await.unwrap(), w);
678    /// # }
679    /// # }));
680    /// # }
681    /// ```
682    pub fn filter<F>(self, f: impl IntoQuotedMut<'a, F, L>) -> Self
683    where
684        F: Fn(&T) -> bool + 'a,
685    {
686        let f = f.splice_fn1_borrow_ctx(&self.location).into();
687        Stream::new(
688            self.location.clone(),
689            HydroNode::Filter {
690                f,
691                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
692                metadata: self.location.new_node_metadata(Self::collection_kind()),
693            },
694        )
695    }
696
697    /// Splits the stream into two streams based on a predicate, without cloning elements.
698    ///
699    /// Elements for which `f` returns `true` are sent to the first output stream,
700    /// and elements for which `f` returns `false` are sent to the second output stream.
701    ///
702    /// Unlike using `filter` twice, this only evaluates the predicate once per element
703    /// and does not require `T: Clone`.
704    ///
705    /// The closure `f` receives a reference `&T` rather than an owned value `T` because
706    /// the predicate is only used for routing; the element itself is moved to the
707    /// appropriate output stream.
708    ///
709    /// # Example
710    /// ```rust
711    /// # #[cfg(feature = "deploy")] {
712    /// # use hydro_lang::prelude::*;
713    /// # use hydro_lang::live_collections::stream::{NoOrder, ExactlyOnce};
714    /// # use futures::StreamExt;
715    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test::<_, _, _, NoOrder, ExactlyOnce>(|process| {
716    /// let numbers: Stream<_, _, Unbounded> = process.source_iter(q!(vec![1, 2, 3, 4, 5, 6])).into();
717    /// let (evens, odds) = numbers.partition(q!(|&x| x % 2 == 0));
718    /// // evens: 2, 4, 6 tagged with true; odds: 1, 3, 5 tagged with false
719    /// evens.map(q!(|x| (x, true)))
720    ///     .merge_unordered(odds.map(q!(|x| (x, false))))
721    /// # }, |mut stream| async move {
722    /// # let mut results = Vec::new();
723    /// # for _ in 0..6 {
724    /// #     results.push(stream.next().await.unwrap());
725    /// # }
726    /// # results.sort();
727    /// # assert_eq!(results, vec![(1, false), (2, true), (3, false), (4, true), (5, false), (6, true)]);
728    /// # }));
729    /// # }
730    /// ```
731    #[expect(
732        clippy::type_complexity,
733        reason = "return type mirrors the input stream type"
734    )]
735    pub fn partition<F>(
736        self,
737        f: impl IntoQuotedMut<'a, F, L>,
738    ) -> (Stream<T, L, B, O, R>, Stream<T, L, B, O, R>)
739    where
740        F: Fn(&T) -> bool + 'a,
741    {
742        let f: crate::compile::ir::DebugExpr = f.splice_fn1_borrow_ctx(&self.location).into();
743        let shared = SharedNode(Rc::new(RefCell::new(
744            self.ir_node.replace(HydroNode::Placeholder),
745        )));
746
747        let true_stream = Stream::new(
748            self.location.clone(),
749            HydroNode::Partition {
750                inner: SharedNode(shared.0.clone()),
751                f: f.clone(),
752                is_true: true,
753                metadata: self.location.new_node_metadata(Self::collection_kind()),
754            },
755        );
756
757        let false_stream = Stream::new(
758            self.location.clone(),
759            HydroNode::Partition {
760                inner: SharedNode(shared.0),
761                f,
762                is_true: false,
763                metadata: self.location.new_node_metadata(Self::collection_kind()),
764            },
765        );
766
767        (true_stream, false_stream)
768    }
769
770    /// An operator that both filters and maps. It yields only the items for which the supplied closure `f` returns `Some(value)`.
771    ///
772    /// # Example
773    /// ```rust
774    /// # #[cfg(feature = "deploy")] {
775    /// # use hydro_lang::prelude::*;
776    /// # use futures::StreamExt;
777    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
778    /// process
779    ///     .source_iter(q!(vec!["1", "hello", "world", "2"]))
780    ///     .filter_map(q!(|s| s.parse::<usize>().ok()))
781    /// # }, |mut stream| async move {
782    /// // 1, 2
783    /// # for w in (1..3) {
784    /// #     assert_eq!(stream.next().await.unwrap(), w);
785    /// # }
786    /// # }));
787    /// # }
788    /// ```
789    pub fn filter_map<U, F>(self, f: impl IntoQuotedMut<'a, F, L>) -> Stream<U, L, B, O, R>
790    where
791        F: Fn(T) -> Option<U> + 'a,
792    {
793        let f = f.splice_fn1_ctx(&self.location).into();
794        Stream::new(
795            self.location.clone(),
796            HydroNode::FilterMap {
797                f,
798                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
799                metadata: self
800                    .location
801                    .new_node_metadata(Stream::<U, L, B, O, R>::collection_kind()),
802            },
803        )
804    }
805
806    /// Generates a stream that maps each input element `i` to a tuple `(i, x)`,
807    /// where `x` is the final value of `other`, a bounded [`Singleton`] or [`Optional`].
808    /// If `other` is an empty [`Optional`], no values will be produced.
809    ///
810    /// # Example
811    /// ```rust
812    /// # #[cfg(feature = "deploy")] {
813    /// # use hydro_lang::prelude::*;
814    /// # use futures::StreamExt;
815    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
816    /// let tick = process.tick();
817    /// let batch = process
818    ///   .source_iter(q!(vec![1, 2, 3, 4]))
819    ///   .batch(&tick, nondet!(/** test */));
820    /// let count = batch.clone().count(); // `count()` returns a singleton
821    /// batch.cross_singleton(count).all_ticks()
822    /// # }, |mut stream| async move {
823    /// // (1, 4), (2, 4), (3, 4), (4, 4)
824    /// # for w in vec![(1, 4), (2, 4), (3, 4), (4, 4)] {
825    /// #     assert_eq!(stream.next().await.unwrap(), w);
826    /// # }
827    /// # }));
828    /// # }
829    /// ```
830    pub fn cross_singleton<O2>(
831        self,
832        other: impl Into<Optional<O2, L, Bounded>>,
833    ) -> Stream<(T, O2), L, B, O, R>
834    where
835        O2: Clone,
836    {
837        let other: Optional<O2, L, Bounded> = other.into();
838        check_matching_location(&self.location, &other.location);
839
840        Stream::new(
841            self.location.clone(),
842            HydroNode::CrossSingleton {
843                left: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
844                right: Box::new(other.ir_node.replace(HydroNode::Placeholder)),
845                metadata: self
846                    .location
847                    .new_node_metadata(Stream::<(T, O2), L, B, O, R>::collection_kind()),
848            },
849        )
850    }
851
852    /// Passes this stream through if the boolean signal is `true`, otherwise the output is empty.
853    ///
854    /// # Example
855    /// ```rust
856    /// # #[cfg(feature = "deploy")] {
857    /// # use hydro_lang::prelude::*;
858    /// # use futures::StreamExt;
859    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
860    /// let tick = process.tick();
861    /// // ticks are lazy by default, forces the second tick to run
862    /// tick.spin_batch(q!(1)).all_ticks().for_each(q!(|_| {}));
863    ///
864    /// let signal = tick.optional_first_tick(q!(())).is_some(); // true on tick 1, false on tick 2
865    /// let batch_first_tick = process
866    ///   .source_iter(q!(vec![1, 2, 3, 4]))
867    ///   .batch(&tick, nondet!(/** test */));
868    /// let batch_second_tick = process
869    ///   .source_iter(q!(vec![5, 6, 7, 8]))
870    ///   .batch(&tick, nondet!(/** test */))
871    ///   .defer_tick();
872    /// batch_first_tick.chain(batch_second_tick)
873    ///   .filter_if(signal)
874    ///   .all_ticks()
875    /// # }, |mut stream| async move {
876    /// // [1, 2, 3, 4]
877    /// # for w in vec![1, 2, 3, 4] {
878    /// #     assert_eq!(stream.next().await.unwrap(), w);
879    /// # }
880    /// # }));
881    /// # }
882    /// ```
883    pub fn filter_if(self, signal: Singleton<bool, L, Bounded>) -> Stream<T, L, B, O, R> {
884        self.cross_singleton(signal.filter(q!(|b| *b)))
885            .map(q!(|(d, _)| d))
886    }
887
888    /// Passes this stream through if the argument (a [`Bounded`] [`Optional`]`) is non-null, otherwise the output is empty.
889    ///
890    /// Useful for gating the release of elements based on a condition, such as only processing requests if you are the
891    /// leader of a cluster.
892    ///
893    /// # Example
894    /// ```rust
895    /// # #[cfg(feature = "deploy")] {
896    /// # use hydro_lang::prelude::*;
897    /// # use futures::StreamExt;
898    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
899    /// let tick = process.tick();
900    /// // ticks are lazy by default, forces the second tick to run
901    /// tick.spin_batch(q!(1)).all_ticks().for_each(q!(|_| {}));
902    ///
903    /// let batch_first_tick = process
904    ///   .source_iter(q!(vec![1, 2, 3, 4]))
905    ///   .batch(&tick, nondet!(/** test */));
906    /// let batch_second_tick = process
907    ///   .source_iter(q!(vec![5, 6, 7, 8]))
908    ///   .batch(&tick, nondet!(/** test */))
909    ///   .defer_tick(); // appears on the second tick
910    /// let some_on_first_tick = tick.optional_first_tick(q!(()));
911    /// batch_first_tick.chain(batch_second_tick)
912    ///   .filter_if_some(some_on_first_tick)
913    ///   .all_ticks()
914    /// # }, |mut stream| async move {
915    /// // [1, 2, 3, 4]
916    /// # for w in vec![1, 2, 3, 4] {
917    /// #     assert_eq!(stream.next().await.unwrap(), w);
918    /// # }
919    /// # }));
920    /// # }
921    /// ```
922    #[deprecated(note = "use `filter_if` with `Optional::is_some()` instead")]
923    pub fn filter_if_some<U>(self, signal: Optional<U, L, Bounded>) -> Stream<T, L, B, O, R> {
924        self.filter_if(signal.is_some())
925    }
926
927    /// Passes this stream through if the argument (a [`Bounded`] [`Optional`]`) is null, otherwise the output is empty.
928    ///
929    /// Useful for gating the release of elements based on a condition, such as triggering a protocol if you are missing
930    /// some local state.
931    ///
932    /// # Example
933    /// ```rust
934    /// # #[cfg(feature = "deploy")] {
935    /// # use hydro_lang::prelude::*;
936    /// # use futures::StreamExt;
937    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
938    /// let tick = process.tick();
939    /// // ticks are lazy by default, forces the second tick to run
940    /// tick.spin_batch(q!(1)).all_ticks().for_each(q!(|_| {}));
941    ///
942    /// let batch_first_tick = process
943    ///   .source_iter(q!(vec![1, 2, 3, 4]))
944    ///   .batch(&tick, nondet!(/** test */));
945    /// let batch_second_tick = process
946    ///   .source_iter(q!(vec![5, 6, 7, 8]))
947    ///   .batch(&tick, nondet!(/** test */))
948    ///   .defer_tick(); // appears on the second tick
949    /// let some_on_first_tick = tick.optional_first_tick(q!(()));
950    /// batch_first_tick.chain(batch_second_tick)
951    ///   .filter_if_none(some_on_first_tick)
952    ///   .all_ticks()
953    /// # }, |mut stream| async move {
954    /// // [5, 6, 7, 8]
955    /// # for w in vec![5, 6, 7, 8] {
956    /// #     assert_eq!(stream.next().await.unwrap(), w);
957    /// # }
958    /// # }));
959    /// # }
960    /// ```
961    #[deprecated(note = "use `filter_if` with `!Optional::is_some()` instead")]
962    pub fn filter_if_none<U>(self, other: Optional<U, L, Bounded>) -> Stream<T, L, B, O, R> {
963        self.filter_if(other.is_none())
964    }
965
966    /// Forms the cross-product (Cartesian product, cross-join) of the items in the 2 input streams,
967    /// returning all tupled pairs.
968    ///
969    /// When the right side is [`Bounded`], it is accumulated first and the left side streams
970    /// through, preserving the left side's ordering. When both sides are [`Unbounded`], a
971    /// symmetric hash join is used and ordering is [`NoOrder`].
972    ///
973    /// # Example
974    /// ```rust
975    /// # #[cfg(feature = "deploy")] {
976    /// # use hydro_lang::prelude::*;
977    /// # use std::collections::HashSet;
978    /// # use futures::StreamExt;
979    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
980    /// let tick = process.tick();
981    /// let stream1 = process.source_iter(q!(vec![1, 2]));
982    /// let stream2 = process.source_iter(q!(vec!['a', 'b']));
983    /// stream1.cross_product(stream2)
984    /// # }, |mut stream| async move {
985    /// // (1, 'a'), (1, 'b'), (2, 'a'), (2, 'b') in any order
986    /// # let expected = HashSet::from([(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b')]);
987    /// # stream.map(|i| assert!(expected.contains(&i)));
988    /// # }));
989    /// # }
990    pub fn cross_product<T2, B2: Boundedness, O2: Ordering>(
991        self,
992        other: Stream<T2, L, B2, O2, R>,
993    ) -> Stream<(T, T2), L, B, B2::PreserveOrderIfBounded<O>, R>
994    where
995        T: Clone,
996        T2: Clone,
997    {
998        self.map(q!(|v| ((), v)))
999            .join(other.map(q!(|v| ((), v))))
1000            .map(q!(|((), (v1, v2))| (v1, v2)))
1001    }
1002
1003    /// Takes one stream as input and filters out any duplicate occurrences. The output
1004    /// contains all unique values from the input.
1005    ///
1006    /// # Example
1007    /// ```rust
1008    /// # #[cfg(feature = "deploy")] {
1009    /// # use hydro_lang::prelude::*;
1010    /// # use futures::StreamExt;
1011    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1012    /// let tick = process.tick();
1013    /// process.source_iter(q!(vec![1, 2, 3, 2, 1, 4])).unique()
1014    /// # }, |mut stream| async move {
1015    /// # for w in vec![1, 2, 3, 4] {
1016    /// #     assert_eq!(stream.next().await.unwrap(), w);
1017    /// # }
1018    /// # }));
1019    /// # }
1020    /// ```
1021    pub fn unique(self) -> Stream<T, L, B, O, ExactlyOnce>
1022    where
1023        T: Eq + Hash,
1024    {
1025        Stream::new(
1026            self.location.clone(),
1027            HydroNode::Unique {
1028                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1029                metadata: self
1030                    .location
1031                    .new_node_metadata(Stream::<T, L, B, O, ExactlyOnce>::collection_kind()),
1032            },
1033        )
1034    }
1035
1036    /// Outputs everything in this stream that is *not* contained in the `other` stream.
1037    ///
1038    /// The `other` stream must be [`Bounded`], since this function will wait until
1039    /// all its elements are available before producing any output.
1040    /// # Example
1041    /// ```rust
1042    /// # #[cfg(feature = "deploy")] {
1043    /// # use hydro_lang::prelude::*;
1044    /// # use futures::StreamExt;
1045    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1046    /// let tick = process.tick();
1047    /// let stream = process
1048    ///   .source_iter(q!(vec![ 1, 2, 3, 4 ]))
1049    ///   .batch(&tick, nondet!(/** test */));
1050    /// let batch = process
1051    ///   .source_iter(q!(vec![1, 2]))
1052    ///   .batch(&tick, nondet!(/** test */));
1053    /// stream.filter_not_in(batch).all_ticks()
1054    /// # }, |mut stream| async move {
1055    /// # for w in vec![3, 4] {
1056    /// #     assert_eq!(stream.next().await.unwrap(), w);
1057    /// # }
1058    /// # }));
1059    /// # }
1060    /// ```
1061    pub fn filter_not_in<O2: Ordering, B2>(self, other: Stream<T, L, B2, O2, R>) -> Self
1062    where
1063        T: Eq + Hash,
1064        B2: IsBounded,
1065    {
1066        check_matching_location(&self.location, &other.location);
1067
1068        Stream::new(
1069            self.location.clone(),
1070            HydroNode::Difference {
1071                pos: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1072                neg: Box::new(other.ir_node.replace(HydroNode::Placeholder)),
1073                metadata: self
1074                    .location
1075                    .new_node_metadata(Stream::<T, L, Bounded, O, R>::collection_kind()),
1076            },
1077        )
1078    }
1079
1080    /// An operator which allows you to "inspect" each element of a stream without
1081    /// modifying it. The closure `f` is called on a reference to each item. This is
1082    /// mainly useful for debugging, and should not be used to generate side-effects.
1083    ///
1084    /// # Example
1085    /// ```rust
1086    /// # #[cfg(feature = "deploy")] {
1087    /// # use hydro_lang::prelude::*;
1088    /// # use futures::StreamExt;
1089    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1090    /// let nums = process.source_iter(q!(vec![1, 2]));
1091    /// // prints "1 * 10 = 10" and "2 * 10 = 20"
1092    /// nums.inspect(q!(|x| println!("{} * 10 = {}", x, x * 10)))
1093    /// # }, |mut stream| async move {
1094    /// # for w in vec![1, 2] {
1095    /// #     assert_eq!(stream.next().await.unwrap(), w);
1096    /// # }
1097    /// # }));
1098    /// # }
1099    /// ```
1100    pub fn inspect<F>(self, f: impl IntoQuotedMut<'a, F, L>) -> Self
1101    where
1102        F: Fn(&T) + 'a,
1103    {
1104        let f = f.splice_fn1_borrow_ctx(&self.location).into();
1105
1106        Stream::new(
1107            self.location.clone(),
1108            HydroNode::Inspect {
1109                f,
1110                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1111                metadata: self.location.new_node_metadata(Self::collection_kind()),
1112            },
1113        )
1114    }
1115
1116    /// Executes the provided closure for every element in this stream.
1117    ///
1118    /// Because the closure may have side effects, the stream must have deterministic order
1119    /// ([`TotalOrder`]) and no retries ([`ExactlyOnce`]). If the side effects can tolerate
1120    /// out-of-order or duplicate execution, use [`Stream::assume_ordering`] and
1121    /// [`Stream::assume_retries`] with an explanation for why this is the case.
1122    pub fn for_each<F: Fn(T) + 'a>(self, f: impl IntoQuotedMut<'a, F, L>)
1123    where
1124        O: IsOrdered,
1125        R: IsExactlyOnce,
1126    {
1127        let f = f.splice_fn1_ctx(&self.location).into();
1128        self.location
1129            .flow_state()
1130            .borrow_mut()
1131            .push_root(HydroRoot::ForEach {
1132                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1133                f,
1134                op_metadata: HydroIrOpMetadata::new(),
1135            });
1136    }
1137
1138    /// Sends all elements of this stream to a provided [`futures::Sink`], such as an external
1139    /// TCP socket to some other server. You should _not_ use this API for interacting with
1140    /// external clients, instead see [`Location::bidi_external_many_bytes`] and
1141    /// [`Location::bidi_external_many_bincode`]. This should be used for custom, low-level
1142    /// interaction with asynchronous sinks.
1143    pub fn dest_sink<S>(self, sink: impl QuotedWithContext<'a, S, L>)
1144    where
1145        O: IsOrdered,
1146        R: IsExactlyOnce,
1147        S: 'a + futures::Sink<T> + Unpin,
1148    {
1149        self.location
1150            .flow_state()
1151            .borrow_mut()
1152            .push_root(HydroRoot::DestSink {
1153                sink: sink.splice_typed_ctx(&self.location).into(),
1154                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1155                op_metadata: HydroIrOpMetadata::new(),
1156            });
1157    }
1158
1159    /// Maps each element `x` of the stream to `(i, x)`, where `i` is the index of the element.
1160    ///
1161    /// # Example
1162    /// ```rust
1163    /// # #[cfg(feature = "deploy")] {
1164    /// # use hydro_lang::{prelude::*, live_collections::stream::{TotalOrder, ExactlyOnce}};
1165    /// # use futures::StreamExt;
1166    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test::<_, _, _, TotalOrder, ExactlyOnce>(|process| {
1167    /// let tick = process.tick();
1168    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
1169    /// numbers.enumerate()
1170    /// # }, |mut stream| async move {
1171    /// // (0, 1), (1, 2), (2, 3), (3, 4)
1172    /// # for w in vec![(0, 1), (1, 2), (2, 3), (3, 4)] {
1173    /// #     assert_eq!(stream.next().await.unwrap(), w);
1174    /// # }
1175    /// # }));
1176    /// # }
1177    /// ```
1178    pub fn enumerate(self) -> Stream<(usize, T), L, B, O, R>
1179    where
1180        O: IsOrdered,
1181        R: IsExactlyOnce,
1182    {
1183        Stream::new(
1184            self.location.clone(),
1185            HydroNode::Enumerate {
1186                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1187                metadata: self.location.new_node_metadata(Stream::<
1188                    (usize, T),
1189                    L,
1190                    B,
1191                    TotalOrder,
1192                    ExactlyOnce,
1193                >::collection_kind()),
1194            },
1195        )
1196    }
1197
1198    /// Combines elements of the stream into a [`Singleton`], by starting with an intitial value,
1199    /// generated by the `init` closure, and then applying the `comb` closure to each element in the stream.
1200    /// Unlike iterators, `comb` takes the accumulator by `&mut` reference, so that it can be modified in place.
1201    ///
1202    /// Depending on the input stream guarantees, the closure may need to be commutative
1203    /// (for unordered streams) or idempotent (for streams with non-deterministic duplicates).
1204    ///
1205    /// # Example
1206    /// ```rust
1207    /// # #[cfg(feature = "deploy")] {
1208    /// # use hydro_lang::prelude::*;
1209    /// # use futures::StreamExt;
1210    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1211    /// let words = process.source_iter(q!(vec!["HELLO", "WORLD"]));
1212    /// words
1213    ///     .fold(q!(|| String::new()), q!(|acc, x| acc.push_str(x)))
1214    ///     .into_stream()
1215    /// # }, |mut stream| async move {
1216    /// // "HELLOWORLD"
1217    /// # assert_eq!(stream.next().await.unwrap(), "HELLOWORLD");
1218    /// # }));
1219    /// # }
1220    /// ```
1221    pub fn fold<A, I, F, C, Idemp, M, B2: SingletonBound>(
1222        self,
1223        init: impl IntoQuotedMut<'a, I, L>,
1224        comb: impl IntoQuotedMut<'a, F, L, AggFuncAlgebra<C, Idemp, M>>,
1225    ) -> Singleton<A, L, B2>
1226    where
1227        I: Fn() -> A + 'a,
1228        F: Fn(&mut A, T),
1229        C: ValidCommutativityFor<O>,
1230        Idemp: ValidIdempotenceFor<R>,
1231        B: ApplyMonotoneStream<M, B2>,
1232    {
1233        let init = init.splice_fn0_ctx(&self.location).into();
1234        let (comb, proof) = comb.splice_fn2_borrow_mut_ctx_props(&self.location);
1235        proof.register_proof(&comb);
1236
1237        let nondet = nondet!(/** the combinator function is commutative and idempotent */);
1238        let ordered_etc: Stream<T, L, B> = self.assume_retries(nondet).assume_ordering(nondet);
1239
1240        let core = HydroNode::Fold {
1241            init,
1242            acc: comb.into(),
1243            input: Box::new(ordered_etc.ir_node.replace(HydroNode::Placeholder)),
1244            metadata: ordered_etc
1245                .location
1246                .new_node_metadata(Singleton::<A, L, B2>::collection_kind()),
1247        };
1248
1249        Singleton::new(ordered_etc.location.clone(), core)
1250    }
1251
1252    /// Combines elements of the stream into an [`Optional`], by starting with the first element in the stream,
1253    /// and then applying the `comb` closure to each element in the stream. The [`Optional`] will be empty
1254    /// until the first element in the input arrives. Unlike iterators, `comb` takes the accumulator by `&mut`
1255    /// reference, so that it can be modified in place.
1256    ///
1257    /// Depending on the input stream guarantees, the closure may need to be commutative
1258    /// (for unordered streams) or idempotent (for streams with non-deterministic duplicates).
1259    ///
1260    /// # Example
1261    /// ```rust
1262    /// # #[cfg(feature = "deploy")] {
1263    /// # use hydro_lang::prelude::*;
1264    /// # use futures::StreamExt;
1265    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1266    /// let bools = process.source_iter(q!(vec![false, true, false]));
1267    /// bools.reduce(q!(|acc, x| *acc |= x)).into_stream()
1268    /// # }, |mut stream| async move {
1269    /// // true
1270    /// # assert_eq!(stream.next().await.unwrap(), true);
1271    /// # }));
1272    /// # }
1273    /// ```
1274    pub fn reduce<F, C, Idemp>(
1275        self,
1276        comb: impl IntoQuotedMut<'a, F, L, AggFuncAlgebra<C, Idemp>>,
1277    ) -> Optional<T, L, B>
1278    where
1279        F: Fn(&mut T, T) + 'a,
1280        C: ValidCommutativityFor<O>,
1281        Idemp: ValidIdempotenceFor<R>,
1282    {
1283        let (f, proof) = comb.splice_fn2_borrow_mut_ctx_props(&self.location);
1284        proof.register_proof(&f);
1285
1286        let nondet = nondet!(/** the combinator function is commutative and idempotent */);
1287        let ordered_etc: Stream<T, L, B> = self.assume_retries(nondet).assume_ordering(nondet);
1288
1289        let core = HydroNode::Reduce {
1290            f: f.into(),
1291            input: Box::new(ordered_etc.ir_node.replace(HydroNode::Placeholder)),
1292            metadata: ordered_etc
1293                .location
1294                .new_node_metadata(Optional::<T, L, B>::collection_kind()),
1295        };
1296
1297        Optional::new(ordered_etc.location.clone(), core)
1298    }
1299
1300    /// Computes the maximum element in the stream as an [`Optional`], which
1301    /// will be empty until the first element in the input arrives.
1302    ///
1303    /// # Example
1304    /// ```rust
1305    /// # #[cfg(feature = "deploy")] {
1306    /// # use hydro_lang::prelude::*;
1307    /// # use futures::StreamExt;
1308    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1309    /// let tick = process.tick();
1310    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
1311    /// let batch = numbers.batch(&tick, nondet!(/** test */));
1312    /// batch.max().all_ticks()
1313    /// # }, |mut stream| async move {
1314    /// // 4
1315    /// # assert_eq!(stream.next().await.unwrap(), 4);
1316    /// # }));
1317    /// # }
1318    /// ```
1319    pub fn max(self) -> Optional<T, L, B>
1320    where
1321        T: Ord,
1322    {
1323        self.assume_retries_trusted::<ExactlyOnce>(nondet!(/** max is idempotent */))
1324            .assume_ordering_trusted_bounded::<TotalOrder>(
1325                nondet!(/** max is commutative, but order affects intermediates */),
1326            )
1327            .reduce(q!(|curr, new| {
1328                if new > *curr {
1329                    *curr = new;
1330                }
1331            }))
1332    }
1333
1334    /// Computes the minimum element in the stream as an [`Optional`], which
1335    /// will be empty until the first element in the input arrives.
1336    ///
1337    /// # Example
1338    /// ```rust
1339    /// # #[cfg(feature = "deploy")] {
1340    /// # use hydro_lang::prelude::*;
1341    /// # use futures::StreamExt;
1342    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1343    /// let tick = process.tick();
1344    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
1345    /// let batch = numbers.batch(&tick, nondet!(/** test */));
1346    /// batch.min().all_ticks()
1347    /// # }, |mut stream| async move {
1348    /// // 1
1349    /// # assert_eq!(stream.next().await.unwrap(), 1);
1350    /// # }));
1351    /// # }
1352    /// ```
1353    pub fn min(self) -> Optional<T, L, B>
1354    where
1355        T: Ord,
1356    {
1357        self.assume_retries_trusted::<ExactlyOnce>(nondet!(/** min is idempotent */))
1358            .assume_ordering_trusted_bounded::<TotalOrder>(
1359                nondet!(/** max is commutative, but order affects intermediates */),
1360            )
1361            .reduce(q!(|curr, new| {
1362                if new < *curr {
1363                    *curr = new;
1364                }
1365            }))
1366    }
1367
1368    /// Computes the first element in the stream as an [`Optional`], which
1369    /// will be empty until the first element in the input arrives.
1370    ///
1371    /// This requires the stream to have a [`TotalOrder`] guarantee, otherwise
1372    /// re-ordering of elements may cause the first element to change.
1373    ///
1374    /// # Example
1375    /// ```rust
1376    /// # #[cfg(feature = "deploy")] {
1377    /// # use hydro_lang::prelude::*;
1378    /// # use futures::StreamExt;
1379    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1380    /// let tick = process.tick();
1381    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
1382    /// let batch = numbers.batch(&tick, nondet!(/** test */));
1383    /// batch.first().all_ticks()
1384    /// # }, |mut stream| async move {
1385    /// // 1
1386    /// # assert_eq!(stream.next().await.unwrap(), 1);
1387    /// # }));
1388    /// # }
1389    /// ```
1390    pub fn first(self) -> Optional<T, L, B>
1391    where
1392        O: IsOrdered,
1393    {
1394        self.make_totally_ordered()
1395            .assume_retries_trusted::<ExactlyOnce>(nondet!(/** first is idempotent */))
1396            .generator(q!(|| ()), q!(|_, item| Generate::Return(item)))
1397            .reduce(q!(|_, _| {}))
1398    }
1399
1400    /// Computes the last element in the stream as an [`Optional`], which
1401    /// will be empty until an element in the input arrives.
1402    ///
1403    /// This requires the stream to have a [`TotalOrder`] guarantee, otherwise
1404    /// re-ordering of elements may cause the last element to change.
1405    ///
1406    /// # Example
1407    /// ```rust
1408    /// # #[cfg(feature = "deploy")] {
1409    /// # use hydro_lang::prelude::*;
1410    /// # use futures::StreamExt;
1411    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1412    /// let tick = process.tick();
1413    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
1414    /// let batch = numbers.batch(&tick, nondet!(/** test */));
1415    /// batch.last().all_ticks()
1416    /// # }, |mut stream| async move {
1417    /// // 4
1418    /// # assert_eq!(stream.next().await.unwrap(), 4);
1419    /// # }));
1420    /// # }
1421    /// ```
1422    pub fn last(self) -> Optional<T, L, B>
1423    where
1424        O: IsOrdered,
1425    {
1426        self.make_totally_ordered()
1427            .assume_retries_trusted::<ExactlyOnce>(nondet!(/** last is idempotent */))
1428            .reduce(q!(|curr, new| *curr = new))
1429    }
1430
1431    /// Returns a stream containing at most the first `n` elements of the input stream,
1432    /// preserving the original order. Similar to `LIMIT` in SQL.
1433    ///
1434    /// This requires the stream to have a [`TotalOrder`] guarantee and [`ExactlyOnce`]
1435    /// retries, since the result depends on the order and cardinality of elements.
1436    ///
1437    /// # Example
1438    /// ```rust
1439    /// # #[cfg(feature = "deploy")] {
1440    /// # use hydro_lang::prelude::*;
1441    /// # use futures::StreamExt;
1442    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1443    /// let numbers = process.source_iter(q!(vec![10, 20, 30, 40, 50]));
1444    /// numbers.limit(q!(3))
1445    /// # }, |mut stream| async move {
1446    /// // 10, 20, 30
1447    /// # for w in vec![10, 20, 30] {
1448    /// #     assert_eq!(stream.next().await.unwrap(), w);
1449    /// # }
1450    /// # }));
1451    /// # }
1452    /// ```
1453    pub fn limit(
1454        self,
1455        n: impl QuotedWithContext<'a, usize, L> + Copy + 'a,
1456    ) -> Stream<T, L, B, TotalOrder, ExactlyOnce>
1457    where
1458        O: IsOrdered,
1459        R: IsExactlyOnce,
1460    {
1461        self.generator(
1462            q!(|| 0usize),
1463            q!(move |count, item| {
1464                if *count == n {
1465                    Generate::Break
1466                } else {
1467                    *count += 1;
1468                    if *count == n {
1469                        Generate::Return(item)
1470                    } else {
1471                        Generate::Yield(item)
1472                    }
1473                }
1474            }),
1475        )
1476    }
1477
1478    /// Collects all the elements of this stream into a single [`Vec`] element.
1479    ///
1480    /// If the input stream is [`Unbounded`], the output [`Singleton`] will be [`Unbounded`] as
1481    /// well, which means that the value of the [`Vec`] will asynchronously grow as new elements
1482    /// are added. On such a value, you can use [`Singleton::snapshot`] to grab an instance of
1483    /// the vector at an arbitrary point in time.
1484    ///
1485    /// # Example
1486    /// ```rust
1487    /// # #[cfg(feature = "deploy")] {
1488    /// # use hydro_lang::prelude::*;
1489    /// # use futures::StreamExt;
1490    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1491    /// let tick = process.tick();
1492    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
1493    /// let batch = numbers.batch(&tick, nondet!(/** test */));
1494    /// batch.collect_vec().all_ticks() // emit each tick's Vec into an unbounded stream
1495    /// # }, |mut stream| async move {
1496    /// // [ vec![1, 2, 3, 4] ]
1497    /// # for w in vec![vec![1, 2, 3, 4]] {
1498    /// #     assert_eq!(stream.next().await.unwrap(), w);
1499    /// # }
1500    /// # }));
1501    /// # }
1502    /// ```
1503    pub fn collect_vec(self) -> Singleton<Vec<T>, L, B>
1504    where
1505        O: IsOrdered,
1506        R: IsExactlyOnce,
1507    {
1508        self.make_totally_ordered().make_exactly_once().fold(
1509            q!(|| vec![]),
1510            q!(|acc, v| {
1511                acc.push(v);
1512            }),
1513        )
1514    }
1515
1516    /// Applies a function to each element of the stream, maintaining an internal state (accumulator)
1517    /// and emitting each intermediate result.
1518    ///
1519    /// Unlike `fold` which only returns the final accumulated value, `scan` produces a new stream
1520    /// containing all intermediate accumulated values. The scan operation can also terminate early
1521    /// by returning `None`.
1522    ///
1523    /// The function takes a mutable reference to the accumulator and the current element, and returns
1524    /// an `Option<U>`. If the function returns `Some(value)`, `value` is emitted to the output stream.
1525    /// If the function returns `None`, the stream is terminated and no more elements are processed.
1526    ///
1527    /// # Examples
1528    ///
1529    /// Basic usage - running sum:
1530    /// ```rust
1531    /// # #[cfg(feature = "deploy")] {
1532    /// # use hydro_lang::prelude::*;
1533    /// # use futures::StreamExt;
1534    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1535    /// process.source_iter(q!(vec![1, 2, 3, 4])).scan(
1536    ///     q!(|| 0),
1537    ///     q!(|acc, x| {
1538    ///         *acc += x;
1539    ///         Some(*acc)
1540    ///     }),
1541    /// )
1542    /// # }, |mut stream| async move {
1543    /// // Output: 1, 3, 6, 10
1544    /// # for w in vec![1, 3, 6, 10] {
1545    /// #     assert_eq!(stream.next().await.unwrap(), w);
1546    /// # }
1547    /// # }));
1548    /// # }
1549    /// ```
1550    ///
1551    /// Early termination example:
1552    /// ```rust
1553    /// # #[cfg(feature = "deploy")] {
1554    /// # use hydro_lang::prelude::*;
1555    /// # use futures::StreamExt;
1556    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1557    /// process.source_iter(q!(vec![1, 2, 3, 4])).scan(
1558    ///     q!(|| 1),
1559    ///     q!(|state, x| {
1560    ///         *state = *state * x;
1561    ///         if *state > 6 {
1562    ///             None // Terminate the stream
1563    ///         } else {
1564    ///             Some(-*state)
1565    ///         }
1566    ///     }),
1567    /// )
1568    /// # }, |mut stream| async move {
1569    /// // Output: -1, -2, -6
1570    /// # for w in vec![-1, -2, -6] {
1571    /// #     assert_eq!(stream.next().await.unwrap(), w);
1572    /// # }
1573    /// # }));
1574    /// # }
1575    /// ```
1576    pub fn scan<A, U, I, F>(
1577        self,
1578        init: impl IntoQuotedMut<'a, I, L>,
1579        f: impl IntoQuotedMut<'a, F, L>,
1580    ) -> Stream<U, L, B, TotalOrder, ExactlyOnce>
1581    where
1582        O: IsOrdered,
1583        R: IsExactlyOnce,
1584        I: Fn() -> A + 'a,
1585        F: Fn(&mut A, T) -> Option<U> + 'a,
1586    {
1587        let init = init.splice_fn0_ctx(&self.location).into();
1588        let f = f.splice_fn2_borrow_mut_ctx(&self.location).into();
1589
1590        Stream::new(
1591            self.location.clone(),
1592            HydroNode::Scan {
1593                init,
1594                acc: f,
1595                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1596                metadata: self.location.new_node_metadata(
1597                    Stream::<U, L, B, TotalOrder, ExactlyOnce>::collection_kind(),
1598                ),
1599            },
1600        )
1601    }
1602
1603    /// Async version of [`Stream::scan`]. Applies an async function to each element of the
1604    /// stream, maintaining an internal state (accumulator) and emitting the values returned
1605    /// by the function.
1606    ///
1607    /// The closure runs synchronously (so it can mutate the accumulator), then returns a
1608    /// future. The future is polled to completion. If it resolves to `Some`, the value is
1609    /// emitted. If it resolves to `None`, the item is filtered out.
1610    ///
1611    /// # Examples
1612    ///
1613    /// ```rust
1614    /// # #[cfg(feature = "deploy")] {
1615    /// # use hydro_lang::prelude::*;
1616    /// # use futures::StreamExt;
1617    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1618    /// process
1619    ///     .source_iter(q!(vec![1, 2, 3, 4]))
1620    ///     .scan_async_blocking(
1621    ///         q!(|| 0),
1622    ///         q!(|acc, x| {
1623    ///             *acc += x;
1624    ///             let val = *acc;
1625    ///             async move { Some(val) }
1626    ///         }),
1627    ///     )
1628    /// # }, |mut stream| async move {
1629    /// // Output: 1, 3, 6, 10
1630    /// # for w in vec![1, 3, 6, 10] {
1631    /// #     assert_eq!(stream.next().await.unwrap(), w);
1632    /// # }
1633    /// # }));
1634    /// # }
1635    /// ```
1636    pub fn scan_async_blocking<A, U, I, F, Fut>(
1637        self,
1638        init: impl IntoQuotedMut<'a, I, L>,
1639        f: impl IntoQuotedMut<'a, F, L>,
1640    ) -> Stream<U, L, B, TotalOrder, ExactlyOnce>
1641    where
1642        O: IsOrdered,
1643        R: IsExactlyOnce,
1644        I: Fn() -> A + 'a,
1645        F: Fn(&mut A, T) -> Fut + 'a,
1646        Fut: Future<Output = Option<U>> + 'a,
1647    {
1648        let init = init.splice_fn0_ctx(&self.location).into();
1649        let f = f.splice_fn2_borrow_mut_ctx(&self.location).into();
1650
1651        Stream::new(
1652            self.location.clone(),
1653            HydroNode::ScanAsyncBlocking {
1654                init,
1655                acc: f,
1656                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1657                metadata: self.location.new_node_metadata(
1658                    Stream::<U, L, B, TotalOrder, ExactlyOnce>::collection_kind(),
1659                ),
1660            },
1661        )
1662    }
1663
1664    /// Iteratively processes the elements of the stream using a state machine that can yield
1665    /// elements as it processes its inputs. This is designed to mirror the unstable generator
1666    /// syntax in Rust, without requiring special syntax.
1667    ///
1668    /// Like [`Stream::scan`], this function takes in an initializer that emits the initial
1669    /// state. The second argument defines the processing logic, taking in a mutable reference
1670    /// to the state and the value to be processed. It emits a [`Generate`] value, whose
1671    /// variants define what is emitted and whether further inputs should be processed.
1672    ///
1673    /// # Example
1674    /// ```rust
1675    /// # #[cfg(feature = "deploy")] {
1676    /// # use hydro_lang::prelude::*;
1677    /// # use futures::StreamExt;
1678    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
1679    /// process.source_iter(q!(vec![1, 3, 100, 10])).generator(
1680    ///     q!(|| 0),
1681    ///     q!(|acc, x| {
1682    ///         *acc += x;
1683    ///         if *acc > 100 {
1684    ///             hydro_lang::live_collections::keyed_stream::Generate::Return("done!".to_owned())
1685    ///         } else if *acc % 2 == 0 {
1686    ///             hydro_lang::live_collections::keyed_stream::Generate::Yield("even".to_owned())
1687    ///         } else {
1688    ///             hydro_lang::live_collections::keyed_stream::Generate::Continue
1689    ///         }
1690    ///     }),
1691    /// )
1692    /// # }, |mut stream| async move {
1693    /// // Output: "even", "done!"
1694    /// # let mut results = Vec::new();
1695    /// # for _ in 0..2 {
1696    /// #     results.push(stream.next().await.unwrap());
1697    /// # }
1698    /// # results.sort();
1699    /// # assert_eq!(results, vec!["done!".to_owned(), "even".to_owned()]);
1700    /// # }));
1701    /// # }
1702    /// ```
1703    pub fn generator<A, U, I, F>(
1704        self,
1705        init: impl IntoQuotedMut<'a, I, L> + Copy,
1706        f: impl IntoQuotedMut<'a, F, L> + Copy,
1707    ) -> Stream<U, L, B, TotalOrder, ExactlyOnce>
1708    where
1709        O: IsOrdered,
1710        R: IsExactlyOnce,
1711        I: Fn() -> A + 'a,
1712        F: Fn(&mut A, T) -> Generate<U> + 'a,
1713    {
1714        let init: ManualExpr<I, _> = ManualExpr::new(move |ctx: &L| init.splice_fn0_ctx(ctx));
1715        let f: ManualExpr<F, _> = ManualExpr::new(move |ctx: &L| f.splice_fn2_borrow_mut_ctx(ctx));
1716
1717        let this = self.make_totally_ordered().make_exactly_once();
1718
1719        // State is Option<Option<A>>:
1720        //   None = not yet initialized
1721        //   Some(Some(a)) = active with state a
1722        //   Some(None) = terminated
1723        let scan_init = q!(|| None)
1724            .splice_fn0_ctx::<Option<Option<A>>>(&this.location)
1725            .into();
1726        let scan_f = q!(move |state: &mut Option<Option<_>>, v| {
1727            if state.is_none() {
1728                *state = Some(Some(init()));
1729            }
1730            match state {
1731                Some(Some(state_value)) => match f(state_value, v) {
1732                    Generate::Yield(out) => Some(Some(out)),
1733                    Generate::Return(out) => {
1734                        *state = Some(None);
1735                        Some(Some(out))
1736                    }
1737                    // Unlike KeyedStream, we can terminate the scan directly on
1738                    // Break/Return because there is only one state (no other keys
1739                    // that still need processing).
1740                    Generate::Break => None,
1741                    Generate::Continue => Some(None),
1742                },
1743                // State is Some(None) after Return; terminate the scan.
1744                _ => None,
1745            }
1746        })
1747        .splice_fn2_borrow_mut_ctx::<Option<Option<A>>, T, _>(&this.location)
1748        .into();
1749
1750        let scan_node = HydroNode::Scan {
1751            init: scan_init,
1752            acc: scan_f,
1753            input: Box::new(this.ir_node.replace(HydroNode::Placeholder)),
1754            metadata: this.location.new_node_metadata(Stream::<
1755                Option<U>,
1756                L,
1757                B,
1758                TotalOrder,
1759                ExactlyOnce,
1760            >::collection_kind()),
1761        };
1762
1763        let flatten_f = q!(|d| d)
1764            .splice_fn1_ctx::<Option<U>, _>(&this.location)
1765            .into();
1766        let flatten_node = HydroNode::FlatMap {
1767            f: flatten_f,
1768            input: Box::new(scan_node),
1769            metadata: this
1770                .location
1771                .new_node_metadata(Stream::<U, L, B, TotalOrder, ExactlyOnce>::collection_kind()),
1772        };
1773
1774        Stream::new(this.location.clone(), flatten_node)
1775    }
1776
1777    /// Given a time interval, returns a stream corresponding to samples taken from the
1778    /// stream roughly at that interval. The output will have elements in the same order
1779    /// as the input, but with arbitrary elements skipped between samples. There is also
1780    /// no guarantee on the exact timing of the samples.
1781    ///
1782    /// # Non-Determinism
1783    /// The output stream is non-deterministic in which elements are sampled, since this
1784    /// is controlled by a clock.
1785    pub fn sample_every(
1786        self,
1787        interval: impl QuotedWithContext<'a, std::time::Duration, L> + Copy + 'a,
1788        nondet: NonDet,
1789    ) -> Stream<T, L, Unbounded, O, AtLeastOnce>
1790    where
1791        L: NoTick + NoAtomic,
1792    {
1793        let samples = self.location.source_interval(interval, nondet);
1794
1795        let tick = self.location.tick();
1796        self.batch(&tick, nondet)
1797            .filter_if(samples.batch(&tick, nondet).first().is_some())
1798            .all_ticks()
1799            .weaken_retries()
1800    }
1801
1802    /// Given a timeout duration, returns an [`Optional`]  which will have a value if the
1803    /// stream has not emitted a value since that duration.
1804    ///
1805    /// # Non-Determinism
1806    /// Timeout relies on non-deterministic sampling of the stream, so depending on when
1807    /// samples take place, timeouts may be non-deterministically generated or missed,
1808    /// and the notification of the timeout may be delayed as well. There is also no
1809    /// guarantee on how long the [`Optional`] will have a value after the timeout is
1810    /// detected based on when the next sample is taken.
1811    pub fn timeout(
1812        self,
1813        duration: impl QuotedWithContext<'a, std::time::Duration, Tick<L>> + Copy + 'a,
1814        nondet: NonDet,
1815    ) -> Optional<(), L, Unbounded>
1816    where
1817        L: NoTick + NoAtomic,
1818    {
1819        let tick = self.location.tick();
1820
1821        let latest_received = self.assume_retries::<ExactlyOnce>(nondet).fold(
1822            q!(|| None),
1823            q!(
1824                |latest, _| {
1825                    *latest = Some(Instant::now());
1826                },
1827                commutative = manual_proof!(/** TODO */)
1828            ),
1829        );
1830
1831        latest_received
1832            .snapshot(&tick, nondet)
1833            .filter_map(q!(move |latest_received| {
1834                if let Some(latest_received) = latest_received {
1835                    if Instant::now().duration_since(latest_received) > duration {
1836                        Some(())
1837                    } else {
1838                        None
1839                    }
1840                } else {
1841                    Some(())
1842                }
1843            }))
1844            .latest()
1845    }
1846
1847    /// Shifts this stream into an atomic context, which guarantees that any downstream logic
1848    /// will all be executed synchronously before any outputs are yielded (in [`Stream::end_atomic`]).
1849    ///
1850    /// This is useful to enforce local consistency constraints, such as ensuring that a write is
1851    /// processed before an acknowledgement is emitted.
1852    pub fn atomic(self) -> Stream<T, Atomic<L>, B, O, R> {
1853        let id = self.location.flow_state().borrow_mut().next_clock_id();
1854        let out_location = Atomic {
1855            tick: Tick {
1856                id,
1857                l: self.location.clone(),
1858            },
1859        };
1860        Stream::new(
1861            out_location.clone(),
1862            HydroNode::BeginAtomic {
1863                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1864                metadata: out_location
1865                    .new_node_metadata(Stream::<T, Atomic<L>, B, O, R>::collection_kind()),
1866            },
1867        )
1868    }
1869
1870    /// Given a tick, returns a stream corresponding to a batch of elements segmented by
1871    /// that tick. These batches are guaranteed to be contiguous across ticks and preserve
1872    /// the order of the input. The output stream will execute in the [`Tick`] that was
1873    /// used to create the atomic section.
1874    ///
1875    /// # Non-Determinism
1876    /// The batch boundaries are non-deterministic and may change across executions.
1877    pub fn batch(self, tick: &Tick<L>, _nondet: NonDet) -> Stream<T, Tick<L>, Bounded, O, R> {
1878        assert_eq!(Location::id(tick.outer()), Location::id(&self.location));
1879        Stream::new(
1880            tick.clone(),
1881            HydroNode::Batch {
1882                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1883                metadata: tick
1884                    .new_node_metadata(Stream::<T, Tick<L>, Bounded, O, R>::collection_kind()),
1885            },
1886        )
1887    }
1888
1889    /// An operator which allows you to "name" a `HydroNode`.
1890    /// This is only used for testing, to correlate certain `HydroNode`s with IDs.
1891    pub fn ir_node_named(self, name: &str) -> Stream<T, L, B, O, R> {
1892        {
1893            let mut node = self.ir_node.borrow_mut();
1894            let metadata = node.metadata_mut();
1895            metadata.tag = Some(name.to_owned());
1896        }
1897        self
1898    }
1899
1900    /// Turns this [`Stream`] into a [`Optional`], under the invariant assumption that there is at
1901    /// most one element. If this invariant is broken, the program may exhibit undefined behavior,
1902    /// so uses must be carefully vetted.
1903    pub(crate) fn cast_at_most_one_element(self) -> Optional<T, L, B>
1904    where
1905        B: IsBounded,
1906    {
1907        Optional::new(
1908            self.location.clone(),
1909            HydroNode::Cast {
1910                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1911                metadata: self
1912                    .location
1913                    .new_node_metadata(Optional::<T, L, B>::collection_kind()),
1914            },
1915        )
1916    }
1917
1918    pub(crate) fn use_ordering_type<O2: Ordering>(self) -> Stream<T, L, B, O2, R> {
1919        if O::ORDERING_KIND == O2::ORDERING_KIND {
1920            Stream::new(
1921                self.location.clone(),
1922                self.ir_node.replace(HydroNode::Placeholder),
1923            )
1924        } else {
1925            panic!(
1926                "Runtime ordering {:?} did not match requested cast {:?}.",
1927                O::ORDERING_KIND,
1928                O2::ORDERING_KIND
1929            )
1930        }
1931    }
1932
1933    /// Explicitly "casts" the stream to a type with a different ordering
1934    /// guarantee. Useful in unsafe code where the ordering cannot be proven
1935    /// by the type-system.
1936    ///
1937    /// # Non-Determinism
1938    /// This function is used as an escape hatch, and any mistakes in the
1939    /// provided ordering guarantee will propagate into the guarantees
1940    /// for the rest of the program.
1941    pub fn assume_ordering<O2: Ordering>(self, _nondet: NonDet) -> Stream<T, L, B, O2, R> {
1942        if O::ORDERING_KIND == O2::ORDERING_KIND {
1943            self.use_ordering_type()
1944        } else if O2::ORDERING_KIND == StreamOrder::NoOrder {
1945            // We can always weaken the ordering guarantee
1946            Stream::new(
1947                self.location.clone(),
1948                HydroNode::Cast {
1949                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1950                    metadata: self
1951                        .location
1952                        .new_node_metadata(Stream::<T, L, B, O2, R>::collection_kind()),
1953                },
1954            )
1955        } else {
1956            Stream::new(
1957                self.location.clone(),
1958                HydroNode::ObserveNonDet {
1959                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1960                    trusted: false,
1961                    metadata: self
1962                        .location
1963                        .new_node_metadata(Stream::<T, L, B, O2, R>::collection_kind()),
1964                },
1965            )
1966        }
1967    }
1968
1969    // like `assume_ordering_trusted`, but only if the input stream is bounded and therefore
1970    // intermediate states will not be revealed
1971    fn assume_ordering_trusted_bounded<O2: Ordering>(
1972        self,
1973        nondet: NonDet,
1974    ) -> Stream<T, L, B, O2, R> {
1975        if B::BOUNDED {
1976            self.assume_ordering_trusted(nondet)
1977        } else {
1978            self.assume_ordering(nondet)
1979        }
1980    }
1981
1982    // only for internal APIs that have been carefully vetted to ensure that the non-determinism
1983    // is not observable
1984    pub(crate) fn assume_ordering_trusted<O2: Ordering>(
1985        self,
1986        _nondet: NonDet,
1987    ) -> Stream<T, L, B, O2, R> {
1988        if O::ORDERING_KIND == O2::ORDERING_KIND {
1989            Stream::new(
1990                self.location.clone(),
1991                self.ir_node.replace(HydroNode::Placeholder),
1992            )
1993        } else if O2::ORDERING_KIND == StreamOrder::NoOrder {
1994            // We can always weaken the ordering guarantee
1995            Stream::new(
1996                self.location.clone(),
1997                HydroNode::Cast {
1998                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
1999                    metadata: self
2000                        .location
2001                        .new_node_metadata(Stream::<T, L, B, O2, R>::collection_kind()),
2002                },
2003            )
2004        } else {
2005            Stream::new(
2006                self.location.clone(),
2007                HydroNode::ObserveNonDet {
2008                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2009                    trusted: true,
2010                    metadata: self
2011                        .location
2012                        .new_node_metadata(Stream::<T, L, B, O2, R>::collection_kind()),
2013                },
2014            )
2015        }
2016    }
2017
2018    #[deprecated = "use `weaken_ordering::<NoOrder>()` instead"]
2019    /// Weakens the ordering guarantee provided by the stream to [`NoOrder`],
2020    /// which is always safe because that is the weakest possible guarantee.
2021    pub fn weakest_ordering(self) -> Stream<T, L, B, NoOrder, R> {
2022        self.weaken_ordering::<NoOrder>()
2023    }
2024
2025    /// Weakens the ordering guarantee provided by the stream to `O2`, with the type-system
2026    /// enforcing that `O2` is weaker than the input ordering guarantee.
2027    pub fn weaken_ordering<O2: WeakerOrderingThan<O>>(self) -> Stream<T, L, B, O2, R> {
2028        let nondet = nondet!(/** this is a weaker ordering guarantee, so it is safe to assume */);
2029        self.assume_ordering::<O2>(nondet)
2030    }
2031
2032    /// Strengthens the ordering guarantee to `TotalOrder`, given that `O: IsOrdered`, which
2033    /// implies that `O == TotalOrder`.
2034    pub fn make_totally_ordered(self) -> Stream<T, L, B, TotalOrder, R>
2035    where
2036        O: IsOrdered,
2037    {
2038        self.assume_ordering(nondet!(/** no-op */))
2039    }
2040
2041    /// Explicitly "casts" the stream to a type with a different retries
2042    /// guarantee. Useful in unsafe code where the lack of retries cannot
2043    /// be proven by the type-system.
2044    ///
2045    /// # Non-Determinism
2046    /// This function is used as an escape hatch, and any mistakes in the
2047    /// provided retries guarantee will propagate into the guarantees
2048    /// for the rest of the program.
2049    pub fn assume_retries<R2: Retries>(self, _nondet: NonDet) -> Stream<T, L, B, O, R2> {
2050        if R::RETRIES_KIND == R2::RETRIES_KIND {
2051            Stream::new(
2052                self.location.clone(),
2053                self.ir_node.replace(HydroNode::Placeholder),
2054            )
2055        } else if R2::RETRIES_KIND == StreamRetry::AtLeastOnce {
2056            // We can always weaken the retries guarantee
2057            Stream::new(
2058                self.location.clone(),
2059                HydroNode::Cast {
2060                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2061                    metadata: self
2062                        .location
2063                        .new_node_metadata(Stream::<T, L, B, O, R2>::collection_kind()),
2064                },
2065            )
2066        } else {
2067            Stream::new(
2068                self.location.clone(),
2069                HydroNode::ObserveNonDet {
2070                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2071                    trusted: false,
2072                    metadata: self
2073                        .location
2074                        .new_node_metadata(Stream::<T, L, B, O, R2>::collection_kind()),
2075                },
2076            )
2077        }
2078    }
2079
2080    // only for internal APIs that have been carefully vetted to ensure that the non-determinism
2081    // is not observable
2082    fn assume_retries_trusted<R2: Retries>(self, _nondet: NonDet) -> Stream<T, L, B, O, R2> {
2083        if R::RETRIES_KIND == R2::RETRIES_KIND {
2084            Stream::new(
2085                self.location.clone(),
2086                self.ir_node.replace(HydroNode::Placeholder),
2087            )
2088        } else if R2::RETRIES_KIND == StreamRetry::AtLeastOnce {
2089            // We can always weaken the retries guarantee
2090            Stream::new(
2091                self.location.clone(),
2092                HydroNode::Cast {
2093                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2094                    metadata: self
2095                        .location
2096                        .new_node_metadata(Stream::<T, L, B, O, R2>::collection_kind()),
2097                },
2098            )
2099        } else {
2100            Stream::new(
2101                self.location.clone(),
2102                HydroNode::ObserveNonDet {
2103                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2104                    trusted: true,
2105                    metadata: self
2106                        .location
2107                        .new_node_metadata(Stream::<T, L, B, O, R2>::collection_kind()),
2108                },
2109            )
2110        }
2111    }
2112
2113    #[deprecated = "use `weaken_retries::<AtLeastOnce>()` instead"]
2114    /// Weakens the retries guarantee provided by the stream to [`AtLeastOnce`],
2115    /// which is always safe because that is the weakest possible guarantee.
2116    pub fn weakest_retries(self) -> Stream<T, L, B, O, AtLeastOnce> {
2117        self.weaken_retries::<AtLeastOnce>()
2118    }
2119
2120    /// Weakens the retries guarantee provided by the stream to `R2`, with the type-system
2121    /// enforcing that `R2` is weaker than the input retries guarantee.
2122    pub fn weaken_retries<R2: WeakerRetryThan<R>>(self) -> Stream<T, L, B, O, R2> {
2123        let nondet = nondet!(/** this is a weaker retry guarantee, so it is safe to assume */);
2124        self.assume_retries::<R2>(nondet)
2125    }
2126
2127    /// Strengthens the retry guarantee to `ExactlyOnce`, given that `R: IsExactlyOnce`, which
2128    /// implies that `R == ExactlyOnce`.
2129    pub fn make_exactly_once(self) -> Stream<T, L, B, O, ExactlyOnce>
2130    where
2131        R: IsExactlyOnce,
2132    {
2133        self.assume_retries(nondet!(/** no-op */))
2134    }
2135
2136    /// Strengthens the boundedness guarantee to `Bounded`, given that `B: IsBounded`, which
2137    /// implies that `B == Bounded`.
2138    pub fn make_bounded(self) -> Stream<T, L, Bounded, O, R>
2139    where
2140        B: IsBounded,
2141    {
2142        self.weaken_boundedness()
2143    }
2144
2145    /// Weakens the boundedness guarantee to an arbitrary boundedness `B2`, given that `B: IsBounded`,
2146    /// which implies that `B == Bounded`.
2147    pub fn weaken_boundedness<B2: Boundedness>(self) -> Stream<T, L, B2, O, R> {
2148        if B::BOUNDED == B2::BOUNDED {
2149            Stream::new(
2150                self.location.clone(),
2151                self.ir_node.replace(HydroNode::Placeholder),
2152            )
2153        } else {
2154            // We can always weaken the boundedness
2155            Stream::new(
2156                self.location.clone(),
2157                HydroNode::Cast {
2158                    inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2159                    metadata: self
2160                        .location
2161                        .new_node_metadata(Stream::<T, L, B2, O, R>::collection_kind()),
2162                },
2163            )
2164        }
2165    }
2166}
2167
2168impl<'a, T, L, B: Boundedness, O: Ordering, R: Retries> Stream<&T, L, B, O, R>
2169where
2170    L: Location<'a>,
2171{
2172    /// Clone each element of the stream; akin to `map(q!(|d| d.clone()))`.
2173    ///
2174    /// # Example
2175    /// ```rust
2176    /// # #[cfg(feature = "deploy")] {
2177    /// # use hydro_lang::prelude::*;
2178    /// # use futures::StreamExt;
2179    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2180    /// process.source_iter(q!(&[1, 2, 3])).cloned()
2181    /// # }, |mut stream| async move {
2182    /// // 1, 2, 3
2183    /// # for w in vec![1, 2, 3] {
2184    /// #     assert_eq!(stream.next().await.unwrap(), w);
2185    /// # }
2186    /// # }));
2187    /// # }
2188    /// ```
2189    pub fn cloned(self) -> Stream<T, L, B, O, R>
2190    where
2191        T: Clone,
2192    {
2193        self.map(q!(|d| d.clone()))
2194    }
2195}
2196
2197impl<'a, T, L, B: Boundedness, O: Ordering> Stream<T, L, B, O, ExactlyOnce>
2198where
2199    L: Location<'a>,
2200{
2201    /// Computes the number of elements in the stream as a [`Singleton`].
2202    ///
2203    /// # Example
2204    /// ```rust
2205    /// # #[cfg(feature = "deploy")] {
2206    /// # use hydro_lang::prelude::*;
2207    /// # use futures::StreamExt;
2208    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2209    /// let tick = process.tick();
2210    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
2211    /// let batch = numbers.batch(&tick, nondet!(/** test */));
2212    /// batch.count().all_ticks()
2213    /// # }, |mut stream| async move {
2214    /// // 4
2215    /// # assert_eq!(stream.next().await.unwrap(), 4);
2216    /// # }));
2217    /// # }
2218    /// ```
2219    pub fn count(self) -> Singleton<usize, L, B::StreamToMonotone> {
2220        self.assume_ordering_trusted::<TotalOrder>(nondet!(
2221            /// Order does not affect eventual count, and also does not affect intermediate states.
2222        ))
2223        .fold(
2224            q!(|| 0usize),
2225            q!(
2226                |count, _| *count += 1,
2227                monotone = manual_proof!(/** += 1 is monotone */)
2228            ),
2229        )
2230    }
2231}
2232
2233impl<'a, T, L: Location<'a> + NoTick, O: Ordering, R: Retries> Stream<T, L, Unbounded, O, R> {
2234    /// Produces a new stream that merges the elements of the two input streams.
2235    /// The result has [`NoOrder`] because the order of merging is not guaranteed.
2236    ///
2237    /// Currently, both input streams must be [`Unbounded`]. When the streams are
2238    /// [`Bounded`], you can use [`Stream::chain`] instead.
2239    ///
2240    /// # Example
2241    /// ```rust
2242    /// # #[cfg(feature = "deploy")] {
2243    /// # use hydro_lang::prelude::*;
2244    /// # use futures::StreamExt;
2245    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2246    /// let numbers: Stream<i32, _, Unbounded> = // 1, 2, 3, 4
2247    /// # process.source_iter(q!(vec![1, 2, 3, 4])).into();
2248    /// numbers.clone().map(q!(|x| x + 1)).merge_unordered(numbers)
2249    /// # }, |mut stream| async move {
2250    /// // 2, 3, 4, 5, and 1, 2, 3, 4 merged in unknown order
2251    /// # for w in vec![2, 3, 4, 5, 1, 2, 3, 4] {
2252    /// #     assert_eq!(stream.next().await.unwrap(), w);
2253    /// # }
2254    /// # }));
2255    /// # }
2256    /// ```
2257    pub fn merge_unordered<O2: Ordering, R2: Retries>(
2258        self,
2259        other: Stream<T, L, Unbounded, O2, R2>,
2260    ) -> Stream<T, L, Unbounded, NoOrder, <R as MinRetries<R2>>::Min>
2261    where
2262        R: MinRetries<R2>,
2263    {
2264        Stream::new(
2265            self.location.clone(),
2266            HydroNode::Chain {
2267                first: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2268                second: Box::new(other.ir_node.replace(HydroNode::Placeholder)),
2269                metadata: self.location.new_node_metadata(Stream::<
2270                    T,
2271                    L,
2272                    Unbounded,
2273                    NoOrder,
2274                    <R as MinRetries<R2>>::Min,
2275                >::collection_kind()),
2276            },
2277        )
2278    }
2279
2280    /// Deprecated: use [`Stream::merge_unordered`] instead.
2281    #[deprecated(note = "use `merge_unordered` instead")]
2282    pub fn interleave<O2: Ordering, R2: Retries>(
2283        self,
2284        other: Stream<T, L, Unbounded, O2, R2>,
2285    ) -> Stream<T, L, Unbounded, NoOrder, <R as MinRetries<R2>>::Min>
2286    where
2287        R: MinRetries<R2>,
2288    {
2289        self.merge_unordered(other)
2290    }
2291}
2292
2293impl<'a, T, L: Location<'a> + NoTick, R: Retries> Stream<T, L, Unbounded, TotalOrder, R> {
2294    /// Produces a new stream that combines the elements of the two input streams,
2295    /// preserving the relative order of elements within each input.
2296    ///
2297    /// Currently, both input streams must be [`Unbounded`]. When the streams are
2298    /// [`Bounded`], you can use [`Stream::chain`] instead.
2299    ///
2300    /// # Non-Determinism
2301    /// The order in which elements *across* the two streams will be interleaved is
2302    /// non-deterministic, so the order of elements will vary across runs. If the output order
2303    /// is irrelevant, use [`Stream::merge_unordered`] instead, which is deterministic but emits an
2304    /// unordered stream.
2305    ///
2306    /// # Example
2307    /// ```rust
2308    /// # #[cfg(feature = "deploy")] {
2309    /// # use hydro_lang::prelude::*;
2310    /// # use futures::StreamExt;
2311    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2312    /// let numbers: Stream<i32, _, Unbounded> = // 1, 3
2313    /// # process.source_iter(q!(vec![1, 3])).into();
2314    /// numbers.clone().merge_ordered(numbers.map(q!(|x| x + 1)), nondet!(/** example */))
2315    /// # }, |mut stream| async move {
2316    /// // 1, 3 and 2, 4 in some order, preserving the original local order
2317    /// # for w in vec![1, 3, 2, 4] {
2318    /// #     assert_eq!(stream.next().await.unwrap(), w);
2319    /// # }
2320    /// # }));
2321    /// # }
2322    /// ```
2323    pub fn merge_ordered<R2: Retries>(
2324        self,
2325        other: Stream<T, L, Unbounded, TotalOrder, R2>,
2326        nondet: NonDet,
2327    ) -> Stream<T, L, Unbounded, TotalOrder, <R as MinRetries<R2>>::Min>
2328    where
2329        R: MinRetries<R2>,
2330    {
2331        super::sliced::sliced! {
2332            let self_batch = use(self, nondet);
2333            let other_batch = use(other, nondet);
2334            self_batch.chain(other_batch)
2335        }
2336    }
2337}
2338
2339impl<'a, T, L, B: Boundedness, O: Ordering, R: Retries> Stream<T, L, B, O, R>
2340where
2341    L: Location<'a>,
2342{
2343    /// Produces a new stream that emits the input elements in sorted order.
2344    ///
2345    /// The input stream can have any ordering guarantee, but the output stream
2346    /// will have a [`TotalOrder`] guarantee. This operator will block until all
2347    /// elements in the input stream are available, so it requires the input stream
2348    /// to be [`Bounded`].
2349    ///
2350    /// # Example
2351    /// ```rust
2352    /// # #[cfg(feature = "deploy")] {
2353    /// # use hydro_lang::prelude::*;
2354    /// # use futures::StreamExt;
2355    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2356    /// let tick = process.tick();
2357    /// let numbers = process.source_iter(q!(vec![4, 2, 3, 1]));
2358    /// let batch = numbers.batch(&tick, nondet!(/** test */));
2359    /// batch.sort().all_ticks()
2360    /// # }, |mut stream| async move {
2361    /// // 1, 2, 3, 4
2362    /// # for w in (1..5) {
2363    /// #     assert_eq!(stream.next().await.unwrap(), w);
2364    /// # }
2365    /// # }));
2366    /// # }
2367    /// ```
2368    pub fn sort(self) -> Stream<T, L, Bounded, TotalOrder, R>
2369    where
2370        B: IsBounded,
2371        T: Ord,
2372    {
2373        let this = self.make_bounded();
2374        Stream::new(
2375            this.location.clone(),
2376            HydroNode::Sort {
2377                input: Box::new(this.ir_node.replace(HydroNode::Placeholder)),
2378                metadata: this
2379                    .location
2380                    .new_node_metadata(Stream::<T, L, Bounded, TotalOrder, R>::collection_kind()),
2381            },
2382        )
2383    }
2384
2385    /// Produces a new stream that first emits the elements of the `self` stream,
2386    /// and then emits the elements of the `other` stream. The output stream has
2387    /// a [`TotalOrder`] guarantee if and only if both input streams have a
2388    /// [`TotalOrder`] guarantee.
2389    ///
2390    /// Currently, both input streams must be [`Bounded`]. This operator will block
2391    /// on the first stream until all its elements are available. In a future version,
2392    /// we will relax the requirement on the `other` stream.
2393    ///
2394    /// # Example
2395    /// ```rust
2396    /// # #[cfg(feature = "deploy")] {
2397    /// # use hydro_lang::prelude::*;
2398    /// # use futures::StreamExt;
2399    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2400    /// let tick = process.tick();
2401    /// let numbers = process.source_iter(q!(vec![1, 2, 3, 4]));
2402    /// let batch = numbers.batch(&tick, nondet!(/** test */));
2403    /// batch.clone().map(q!(|x| x + 1)).chain(batch).all_ticks()
2404    /// # }, |mut stream| async move {
2405    /// // 2, 3, 4, 5, 1, 2, 3, 4
2406    /// # for w in vec![2, 3, 4, 5, 1, 2, 3, 4] {
2407    /// #     assert_eq!(stream.next().await.unwrap(), w);
2408    /// # }
2409    /// # }));
2410    /// # }
2411    /// ```
2412    pub fn chain<O2: Ordering, R2: Retries, B2: Boundedness>(
2413        self,
2414        other: Stream<T, L, B2, O2, R2>,
2415    ) -> Stream<T, L, B2, <O as MinOrder<O2>>::Min, <R as MinRetries<R2>>::Min>
2416    where
2417        B: IsBounded,
2418        O: MinOrder<O2>,
2419        R: MinRetries<R2>,
2420    {
2421        check_matching_location(&self.location, &other.location);
2422
2423        Stream::new(
2424            self.location.clone(),
2425            HydroNode::Chain {
2426                first: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2427                second: Box::new(other.ir_node.replace(HydroNode::Placeholder)),
2428                metadata: self.location.new_node_metadata(Stream::<
2429                    T,
2430                    L,
2431                    B2,
2432                    <O as MinOrder<O2>>::Min,
2433                    <R as MinRetries<R2>>::Min,
2434                >::collection_kind()),
2435            },
2436        )
2437    }
2438
2439    /// Forms the cross-product (Cartesian product, cross-join) of the items in the 2 input streams.
2440    /// Unlike [`Stream::cross_product`], the output order is totally ordered when the inputs are
2441    /// because this is compiled into a nested loop.
2442    pub fn cross_product_nested_loop<T2, O2: Ordering + MinOrder<O>>(
2443        self,
2444        other: Stream<T2, L, Bounded, O2, R>,
2445    ) -> Stream<(T, T2), L, Bounded, <O2 as MinOrder<O>>::Min, R>
2446    where
2447        B: IsBounded,
2448        T: Clone,
2449        T2: Clone,
2450    {
2451        let this = self.make_bounded();
2452        check_matching_location(&this.location, &other.location);
2453
2454        Stream::new(
2455            this.location.clone(),
2456            HydroNode::CrossProduct {
2457                left: Box::new(this.ir_node.replace(HydroNode::Placeholder)),
2458                right: Box::new(other.ir_node.replace(HydroNode::Placeholder)),
2459                metadata: this.location.new_node_metadata(Stream::<
2460                    (T, T2),
2461                    L,
2462                    Bounded,
2463                    <O2 as MinOrder<O>>::Min,
2464                    R,
2465                >::collection_kind()),
2466            },
2467        )
2468    }
2469
2470    /// Creates a [`KeyedStream`] with the same set of keys as `keys`, but with the elements in
2471    /// `self` used as the values for *each* key.
2472    ///
2473    /// This is helpful when "broadcasting" a set of values so that all the keys have the same
2474    /// values. For example, it can be used to send the same set of elements to several cluster
2475    /// members, if the membership information is available as a [`KeyedSingleton`].
2476    ///
2477    /// # Example
2478    /// ```rust
2479    /// # #[cfg(feature = "deploy")] {
2480    /// # use hydro_lang::prelude::*;
2481    /// # use futures::StreamExt;
2482    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2483    /// # let tick = process.tick();
2484    /// let keyed_singleton = // { 1: (), 2: () }
2485    /// # process
2486    /// #     .source_iter(q!(vec![(1, ()), (2, ())]))
2487    /// #     .into_keyed()
2488    /// #     .batch(&tick, nondet!(/** test */))
2489    /// #     .first();
2490    /// let stream = // [ "a", "b" ]
2491    /// # process
2492    /// #     .source_iter(q!(vec!["a".to_owned(), "b".to_owned()]))
2493    /// #     .batch(&tick, nondet!(/** test */));
2494    /// stream.repeat_with_keys(keyed_singleton)
2495    /// # .entries().all_ticks()
2496    /// # }, |mut stream| async move {
2497    /// // { 1: ["a", "b" ], 2: ["a", "b"] }
2498    /// # let mut results = Vec::new();
2499    /// # for _ in 0..4 {
2500    /// #     results.push(stream.next().await.unwrap());
2501    /// # }
2502    /// # results.sort();
2503    /// # assert_eq!(results, vec![(1, "a".to_owned()), (1, "b".to_owned()), (2, "a".to_owned()), (2, "b".to_owned())]);
2504    /// # }));
2505    /// # }
2506    /// ```
2507    pub fn repeat_with_keys<K, V2>(
2508        self,
2509        keys: KeyedSingleton<K, V2, L, Bounded>,
2510    ) -> KeyedStream<K, T, L, Bounded, O, R>
2511    where
2512        B: IsBounded,
2513        K: Clone,
2514        T: Clone,
2515    {
2516        keys.keys()
2517            .weaken_retries()
2518            .assume_ordering_trusted::<TotalOrder>(
2519                nondet!(/** keyed stream does not depend on ordering of keys */),
2520            )
2521            .cross_product_nested_loop(self.make_bounded())
2522            .into_keyed()
2523    }
2524
2525    /// Consumes a stream of `Future<T>`, resolving each future while blocking subgraph
2526    /// execution until all results are available. The output order is based on when futures
2527    /// complete, and may be different than the input order.
2528    ///
2529    /// Unlike [`Stream::resolve_futures`], which allows the subgraph to continue executing
2530    /// while futures are pending, this variant blocks until the futures resolve.
2531    ///
2532    /// # Example
2533    /// ```rust
2534    /// # #[cfg(feature = "deploy")] {
2535    /// # use std::collections::HashSet;
2536    /// # use futures::StreamExt;
2537    /// # use hydro_lang::prelude::*;
2538    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2539    /// process
2540    ///     .source_iter(q!([2, 3, 1, 9, 6, 5, 4, 7, 8]))
2541    ///     .map(q!(|x| async move {
2542    ///         tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
2543    ///         x
2544    ///     }))
2545    ///     .resolve_futures_blocking()
2546    /// #   },
2547    /// #   |mut stream| async move {
2548    /// // 1, 2, 3, 4, 5, 6, 7, 8, 9 (in any order)
2549    /// #       let mut output = HashSet::new();
2550    /// #       for _ in 1..10 {
2551    /// #           output.insert(stream.next().await.unwrap());
2552    /// #       }
2553    /// #       assert_eq!(
2554    /// #           output,
2555    /// #           HashSet::<i32>::from_iter(1..10)
2556    /// #       );
2557    /// #   },
2558    /// # ));
2559    /// # }
2560    /// ```
2561    pub fn resolve_futures_blocking(self) -> Stream<T::Output, L, B, NoOrder, R>
2562    where
2563        T: Future,
2564    {
2565        Stream::new(
2566            self.location.clone(),
2567            HydroNode::ResolveFuturesBlocking {
2568                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2569                metadata: self
2570                    .location
2571                    .new_node_metadata(Stream::<T::Output, L, B, NoOrder, R>::collection_kind()),
2572            },
2573        )
2574    }
2575
2576    /// Returns a [`Singleton`] containing `true` if the stream has no elements, or `false` otherwise.
2577    ///
2578    /// # Example
2579    /// ```rust
2580    /// # #[cfg(feature = "deploy")] {
2581    /// # use hydro_lang::prelude::*;
2582    /// # use futures::StreamExt;
2583    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2584    /// let tick = process.tick();
2585    /// let empty: Stream<i32, _, Bounded> = process
2586    ///   .source_iter(q!(Vec::<i32>::new()))
2587    ///   .batch(&tick, nondet!(/** test */));
2588    /// empty.is_empty().all_ticks()
2589    /// # }, |mut stream| async move {
2590    /// // true
2591    /// # assert_eq!(stream.next().await.unwrap(), true);
2592    /// # }));
2593    /// # }
2594    /// ```
2595    #[expect(clippy::wrong_self_convention, reason = "stream function naming")]
2596    pub fn is_empty(self) -> Singleton<bool, L, Bounded>
2597    where
2598        B: IsBounded,
2599    {
2600        self.make_bounded()
2601            .assume_ordering_trusted::<TotalOrder>(
2602                nondet!(/** is_empty intermediates unaffected by order */),
2603            )
2604            .first()
2605            .is_none()
2606    }
2607}
2608
2609impl<'a, K, V1, L, B: Boundedness, O: Ordering, R: Retries> Stream<(K, V1), L, B, O, R>
2610where
2611    L: Location<'a>,
2612{
2613    #[expect(clippy::type_complexity, reason = "ordering / retries propagation")]
2614    /// Given two streams of pairs `(K, V1)` and `(K, V2)`, produces a new stream of nested pairs `(K, (V1, V2))`
2615    /// by equi-joining the two streams on the key attribute `K`.
2616    ///
2617    /// When the right-hand side is [`Bounded`], the join accumulates the right side first
2618    /// and streams the left side through, preserving the left side's ordering. When both
2619    /// sides are [`Unbounded`], a symmetric hash join is used and ordering is [`NoOrder`].
2620    ///
2621    /// # Example
2622    /// ```rust
2623    /// # #[cfg(feature = "deploy")] {
2624    /// # use hydro_lang::prelude::*;
2625    /// # use std::collections::HashSet;
2626    /// # use futures::StreamExt;
2627    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2628    /// let tick = process.tick();
2629    /// let stream1 = process.source_iter(q!(vec![(1, 'a'), (2, 'b')]));
2630    /// let stream2 = process.source_iter(q!(vec![(1, 'x'), (2, 'y')]));
2631    /// stream1.join(stream2)
2632    /// # }, |mut stream| async move {
2633    /// // (1, ('a', 'x')), (2, ('b', 'y'))
2634    /// # let expected = HashSet::from([(1, ('a', 'x')), (2, ('b', 'y'))]);
2635    /// # stream.map(|i| assert!(expected.contains(&i)));
2636    /// # }));
2637    /// # }
2638    pub fn join<V2, B2: Boundedness, O2: Ordering, R2: Retries>(
2639        self,
2640        n: Stream<(K, V2), L, B2, O2, R2>,
2641    ) -> Stream<(K, (V1, V2)), L, B, B2::PreserveOrderIfBounded<O>, <R as MinRetries<R2>>::Min>
2642    where
2643        K: Eq + Hash + Clone,
2644        R: MinRetries<R2>,
2645        V1: Clone,
2646        V2: Clone,
2647    {
2648        check_matching_location(&self.location, &n.location);
2649
2650        let ir_node = if B2::BOUNDED {
2651            HydroNode::JoinHalf {
2652                left: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2653                right: Box::new(n.ir_node.replace(HydroNode::Placeholder)),
2654                metadata: self.location.new_node_metadata(Stream::<
2655                    (K, (V1, V2)),
2656                    L,
2657                    B,
2658                    B2::PreserveOrderIfBounded<O>,
2659                    <R as MinRetries<R2>>::Min,
2660                >::collection_kind()),
2661            }
2662        } else {
2663            HydroNode::Join {
2664                left: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2665                right: Box::new(n.ir_node.replace(HydroNode::Placeholder)),
2666                metadata: self.location.new_node_metadata(Stream::<
2667                    (K, (V1, V2)),
2668                    L,
2669                    B,
2670                    B2::PreserveOrderIfBounded<O>,
2671                    <R as MinRetries<R2>>::Min,
2672                >::collection_kind()),
2673            }
2674        };
2675
2676        Stream::new(self.location.clone(), ir_node)
2677    }
2678
2679    /// Given a stream of pairs `(K, V1)` and a bounded stream of keys `K`,
2680    /// computes the anti-join of the items in the input -- i.e. returns
2681    /// unique items in the first input that do not have a matching key
2682    /// in the second input.
2683    ///
2684    /// # Example
2685    /// ```rust
2686    /// # #[cfg(feature = "deploy")] {
2687    /// # use hydro_lang::prelude::*;
2688    /// # use futures::StreamExt;
2689    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2690    /// let tick = process.tick();
2691    /// let stream = process
2692    ///   .source_iter(q!(vec![ (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd') ]))
2693    ///   .batch(&tick, nondet!(/** test */));
2694    /// let batch = process
2695    ///   .source_iter(q!(vec![1, 2]))
2696    ///   .batch(&tick, nondet!(/** test */));
2697    /// stream.anti_join(batch).all_ticks()
2698    /// # }, |mut stream| async move {
2699    /// # for w in vec![(3, 'c'), (4, 'd')] {
2700    /// #     assert_eq!(stream.next().await.unwrap(), w);
2701    /// # }
2702    /// # }));
2703    /// # }
2704    pub fn anti_join<O2: Ordering, R2: Retries>(
2705        self,
2706        n: Stream<K, L, Bounded, O2, R2>,
2707    ) -> Stream<(K, V1), L, B, O, R>
2708    where
2709        K: Eq + Hash,
2710    {
2711        check_matching_location(&self.location, &n.location);
2712
2713        Stream::new(
2714            self.location.clone(),
2715            HydroNode::AntiJoin {
2716                pos: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2717                neg: Box::new(n.ir_node.replace(HydroNode::Placeholder)),
2718                metadata: self
2719                    .location
2720                    .new_node_metadata(Stream::<(K, V1), L, B, O, R>::collection_kind()),
2721            },
2722        )
2723    }
2724}
2725
2726impl<'a, K, V, L: Location<'a>, B: Boundedness, O: Ordering, R: Retries>
2727    Stream<(K, V), L, B, O, R>
2728{
2729    /// Transforms this stream into a [`KeyedStream`], where the first element of each tuple
2730    /// is used as the key and the second element is added to the entries associated with that key.
2731    ///
2732    /// Because [`KeyedStream`] lazily groups values into buckets, this operator has zero computational
2733    /// cost and _does not_ require that the key type is hashable. Keyed streams are useful for
2734    /// performing grouped aggregations, but also for more precise ordering guarantees such as
2735    /// total ordering _within_ each group but no ordering _across_ groups.
2736    ///
2737    /// # Example
2738    /// ```rust
2739    /// # #[cfg(feature = "deploy")] {
2740    /// # use hydro_lang::prelude::*;
2741    /// # use futures::StreamExt;
2742    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2743    /// process
2744    ///     .source_iter(q!(vec![(1, 2), (1, 3), (2, 4)]))
2745    ///     .into_keyed()
2746    /// #   .entries()
2747    /// # }, |mut stream| async move {
2748    /// // { 1: [2, 3], 2: [4] }
2749    /// # for w in vec![(1, 2), (1, 3), (2, 4)] {
2750    /// #     assert_eq!(stream.next().await.unwrap(), w);
2751    /// # }
2752    /// # }));
2753    /// # }
2754    /// ```
2755    pub fn into_keyed(self) -> KeyedStream<K, V, L, B, O, R> {
2756        KeyedStream::new(
2757            self.location.clone(),
2758            HydroNode::Cast {
2759                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2760                metadata: self
2761                    .location
2762                    .new_node_metadata(KeyedStream::<K, V, L, B, O, R>::collection_kind()),
2763            },
2764        )
2765    }
2766}
2767
2768impl<'a, K, V, L, O: Ordering, R: Retries> Stream<(K, V), Tick<L>, Bounded, O, R>
2769where
2770    K: Eq + Hash,
2771    L: Location<'a>,
2772{
2773    /// Given a stream of pairs `(K, V)`, produces a new stream of unique keys `K`.
2774    /// # Example
2775    /// ```rust
2776    /// # #[cfg(feature = "deploy")] {
2777    /// # use hydro_lang::prelude::*;
2778    /// # use futures::StreamExt;
2779    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2780    /// let tick = process.tick();
2781    /// let numbers = process.source_iter(q!(vec![(1, 2), (2, 3), (1, 3), (2, 4)]));
2782    /// let batch = numbers.batch(&tick, nondet!(/** test */));
2783    /// batch.keys().all_ticks()
2784    /// # }, |mut stream| async move {
2785    /// // 1, 2
2786    /// # assert_eq!(stream.next().await.unwrap(), 1);
2787    /// # assert_eq!(stream.next().await.unwrap(), 2);
2788    /// # }));
2789    /// # }
2790    /// ```
2791    pub fn keys(self) -> Stream<K, Tick<L>, Bounded, NoOrder, ExactlyOnce> {
2792        self.into_keyed()
2793            .fold(
2794                q!(|| ()),
2795                q!(
2796                    |_, _| {},
2797                    commutative = manual_proof!(/** values are ignored */),
2798                    idempotent = manual_proof!(/** values are ignored */)
2799                ),
2800            )
2801            .keys()
2802    }
2803}
2804
2805impl<'a, T, L, B: Boundedness, O: Ordering, R: Retries> Stream<T, Atomic<L>, B, O, R>
2806where
2807    L: Location<'a> + NoTick,
2808{
2809    /// Returns a stream corresponding to the latest batch of elements being atomically
2810    /// processed. These batches are guaranteed to be contiguous across ticks and preserve
2811    /// the order of the input.
2812    ///
2813    /// # Non-Determinism
2814    /// The batch boundaries are non-deterministic and may change across executions.
2815    pub fn batch_atomic(
2816        self,
2817        tick: &Tick<L>,
2818        _nondet: NonDet,
2819    ) -> Stream<T, Tick<L>, Bounded, O, R> {
2820        Stream::new(
2821            tick.clone(),
2822            HydroNode::Batch {
2823                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2824                metadata: tick
2825                    .new_node_metadata(Stream::<T, Tick<L>, Bounded, O, R>::collection_kind()),
2826            },
2827        )
2828    }
2829
2830    /// Yields the elements of this stream back into a top-level, asynchronous execution context.
2831    /// See [`Stream::atomic`] for more details.
2832    pub fn end_atomic(self) -> Stream<T, L, B, O, R> {
2833        Stream::new(
2834            self.location.tick.l.clone(),
2835            HydroNode::EndAtomic {
2836                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2837                metadata: self
2838                    .location
2839                    .tick
2840                    .l
2841                    .new_node_metadata(Stream::<T, L, B, O, R>::collection_kind()),
2842            },
2843        )
2844    }
2845}
2846
2847impl<'a, F, T, L, B: Boundedness, O: Ordering, R: Retries> Stream<F, L, B, O, R>
2848where
2849    L: Location<'a> + NoTick + NoAtomic,
2850    F: Future<Output = T>,
2851{
2852    /// Consumes a stream of `Future<T>`, produces a new stream of the resulting `T` outputs.
2853    /// Future outputs are produced as available, regardless of input arrival order.
2854    ///
2855    /// # Example
2856    /// ```rust
2857    /// # #[cfg(feature = "deploy")] {
2858    /// # use std::collections::HashSet;
2859    /// # use futures::StreamExt;
2860    /// # use hydro_lang::prelude::*;
2861    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2862    /// process.source_iter(q!([2, 3, 1, 9, 6, 5, 4, 7, 8]))
2863    ///     .map(q!(|x| async move {
2864    ///         tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
2865    ///         x
2866    ///     }))
2867    ///     .resolve_futures()
2868    /// #   },
2869    /// #   |mut stream| async move {
2870    /// // 1, 2, 3, 4, 5, 6, 7, 8, 9 (in any order)
2871    /// #       let mut output = HashSet::new();
2872    /// #       for _ in 1..10 {
2873    /// #           output.insert(stream.next().await.unwrap());
2874    /// #       }
2875    /// #       assert_eq!(
2876    /// #           output,
2877    /// #           HashSet::<i32>::from_iter(1..10)
2878    /// #       );
2879    /// #   },
2880    /// # ));
2881    /// # }
2882    pub fn resolve_futures(self) -> Stream<T, L, Unbounded, NoOrder, R> {
2883        Stream::new(
2884            self.location.clone(),
2885            HydroNode::ResolveFutures {
2886                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2887                metadata: self
2888                    .location
2889                    .new_node_metadata(Stream::<T, L, Unbounded, NoOrder, R>::collection_kind()),
2890            },
2891        )
2892    }
2893
2894    /// Consumes a stream of `Future<T>`, produces a new stream of the resulting `T` outputs.
2895    /// Future outputs are produced in the same order as the input stream.
2896    ///
2897    /// # Example
2898    /// ```rust
2899    /// # #[cfg(feature = "deploy")] {
2900    /// # use std::collections::HashSet;
2901    /// # use futures::StreamExt;
2902    /// # use hydro_lang::prelude::*;
2903    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2904    /// process.source_iter(q!([2, 3, 1, 9, 6, 5, 4, 7, 8]))
2905    ///     .map(q!(|x| async move {
2906    ///         tokio::time::sleep(tokio::time::Duration::from_millis(10)).await;
2907    ///         x
2908    ///     }))
2909    ///     .resolve_futures_ordered()
2910    /// #   },
2911    /// #   |mut stream| async move {
2912    /// // 2, 3, 1, 9, 6, 5, 4, 7, 8
2913    /// #       let mut output = Vec::new();
2914    /// #       for _ in 1..10 {
2915    /// #           output.push(stream.next().await.unwrap());
2916    /// #       }
2917    /// #       assert_eq!(
2918    /// #           output,
2919    /// #           vec![2, 3, 1, 9, 6, 5, 4, 7, 8]
2920    /// #       );
2921    /// #   },
2922    /// # ));
2923    /// # }
2924    pub fn resolve_futures_ordered(self) -> Stream<T, L, Unbounded, O, R> {
2925        Stream::new(
2926            self.location.clone(),
2927            HydroNode::ResolveFuturesOrdered {
2928                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2929                metadata: self
2930                    .location
2931                    .new_node_metadata(Stream::<T, L, Unbounded, O, R>::collection_kind()),
2932            },
2933        )
2934    }
2935}
2936
2937impl<'a, T, L, O: Ordering, R: Retries> Stream<T, Tick<L>, Bounded, O, R>
2938where
2939    L: Location<'a>,
2940{
2941    /// Asynchronously yields this batch of elements outside the tick as an unbounded stream,
2942    /// which will stream all the elements across _all_ tick iterations by concatenating the batches.
2943    pub fn all_ticks(self) -> Stream<T, L, Unbounded, O, R> {
2944        Stream::new(
2945            self.location.outer().clone(),
2946            HydroNode::YieldConcat {
2947                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2948                metadata: self
2949                    .location
2950                    .outer()
2951                    .new_node_metadata(Stream::<T, L, Unbounded, O, R>::collection_kind()),
2952            },
2953        )
2954    }
2955
2956    /// Synchronously yields this batch of elements outside the tick as an unbounded stream,
2957    /// which will stream all the elements across _all_ tick iterations by concatenating the batches.
2958    ///
2959    /// Unlike [`Stream::all_ticks`], this preserves synchronous execution, as the output stream
2960    /// is emitted in an [`Atomic`] context that will process elements synchronously with the input
2961    /// stream's [`Tick`] context.
2962    pub fn all_ticks_atomic(self) -> Stream<T, Atomic<L>, Unbounded, O, R> {
2963        let out_location = Atomic {
2964            tick: self.location.clone(),
2965        };
2966
2967        Stream::new(
2968            out_location.clone(),
2969            HydroNode::YieldConcat {
2970                inner: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
2971                metadata: out_location
2972                    .new_node_metadata(Stream::<T, Atomic<L>, Unbounded, O, R>::collection_kind()),
2973            },
2974        )
2975    }
2976
2977    /// Transforms the stream using the given closure in "stateful" mode, where stateful operators
2978    /// such as `fold` retrain their memory across ticks rather than resetting across batches of
2979    /// input.
2980    ///
2981    /// This API is particularly useful for stateful computation on batches of data, such as
2982    /// maintaining an accumulated state that is up to date with the current batch.
2983    ///
2984    /// # Example
2985    /// ```rust
2986    /// # #[cfg(feature = "deploy")] {
2987    /// # use hydro_lang::prelude::*;
2988    /// # use futures::StreamExt;
2989    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
2990    /// let tick = process.tick();
2991    /// # // ticks are lazy by default, forces the second tick to run
2992    /// # tick.spin_batch(q!(1)).all_ticks().for_each(q!(|_| {}));
2993    /// # let batch_first_tick = process
2994    /// #   .source_iter(q!(vec![1, 2, 3, 4]))
2995    /// #  .batch(&tick, nondet!(/** test */));
2996    /// # let batch_second_tick = process
2997    /// #   .source_iter(q!(vec![5, 6, 7]))
2998    /// #   .batch(&tick, nondet!(/** test */))
2999    /// #   .defer_tick(); // appears on the second tick
3000    /// let input = // [1, 2, 3, 4 (first batch), 5, 6, 7 (second batch)]
3001    /// # batch_first_tick.chain(batch_second_tick).all_ticks();
3002    ///
3003    /// input.batch(&tick, nondet!(/** test */))
3004    ///     .across_ticks(|s| s.count()).all_ticks()
3005    /// # }, |mut stream| async move {
3006    /// // [4, 7]
3007    /// assert_eq!(stream.next().await.unwrap(), 4);
3008    /// assert_eq!(stream.next().await.unwrap(), 7);
3009    /// # }));
3010    /// # }
3011    /// ```
3012    pub fn across_ticks<Out: BatchAtomic>(
3013        self,
3014        thunk: impl FnOnce(Stream<T, Atomic<L>, Unbounded, O, R>) -> Out,
3015    ) -> Out::Batched {
3016        thunk(self.all_ticks_atomic()).batched_atomic()
3017    }
3018
3019    /// Shifts the elements in `self` to the **next tick**, so that the returned stream at tick `T`
3020    /// always has the elements of `self` at tick `T - 1`.
3021    ///
3022    /// At tick `0`, the output stream is empty, since there is no previous tick.
3023    ///
3024    /// This operator enables stateful iterative processing with ticks, by sending data from one
3025    /// tick to the next. For example, you can use it to compare inputs across consecutive batches.
3026    ///
3027    /// # Example
3028    /// ```rust
3029    /// # #[cfg(feature = "deploy")] {
3030    /// # use hydro_lang::prelude::*;
3031    /// # use futures::StreamExt;
3032    /// # tokio_test::block_on(hydro_lang::test_util::stream_transform_test(|process| {
3033    /// let tick = process.tick();
3034    /// // ticks are lazy by default, forces the second tick to run
3035    /// tick.spin_batch(q!(1)).all_ticks().for_each(q!(|_| {}));
3036    ///
3037    /// let batch_first_tick = process
3038    ///   .source_iter(q!(vec![1, 2, 3, 4]))
3039    ///   .batch(&tick, nondet!(/** test */));
3040    /// let batch_second_tick = process
3041    ///   .source_iter(q!(vec![0, 3, 4, 5, 6]))
3042    ///   .batch(&tick, nondet!(/** test */))
3043    ///   .defer_tick(); // appears on the second tick
3044    /// let changes_across_ticks = batch_first_tick.chain(batch_second_tick);
3045    ///
3046    /// changes_across_ticks.clone().filter_not_in(
3047    ///     changes_across_ticks.defer_tick() // the elements from the previous tick
3048    /// ).all_ticks()
3049    /// # }, |mut stream| async move {
3050    /// // [1, 2, 3, 4 /* first tick */, 0, 5, 6 /* second tick */]
3051    /// # for w in vec![1, 2, 3, 4, 0, 5, 6] {
3052    /// #     assert_eq!(stream.next().await.unwrap(), w);
3053    /// # }
3054    /// # }));
3055    /// # }
3056    /// ```
3057    pub fn defer_tick(self) -> Stream<T, Tick<L>, Bounded, O, R> {
3058        Stream::new(
3059            self.location.clone(),
3060            HydroNode::DeferTick {
3061                input: Box::new(self.ir_node.replace(HydroNode::Placeholder)),
3062                metadata: self
3063                    .location
3064                    .new_node_metadata(Stream::<T, Tick<L>, Bounded, O, R>::collection_kind()),
3065            },
3066        )
3067    }
3068}
3069
3070#[cfg(test)]
3071mod tests {
3072    #[cfg(feature = "deploy")]
3073    use futures::{SinkExt, StreamExt};
3074    #[cfg(feature = "deploy")]
3075    use hydro_deploy::Deployment;
3076    #[cfg(feature = "deploy")]
3077    use serde::{Deserialize, Serialize};
3078    #[cfg(any(feature = "deploy", feature = "sim"))]
3079    use stageleft::q;
3080
3081    #[cfg(any(feature = "deploy", feature = "sim"))]
3082    use crate::compile::builder::FlowBuilder;
3083    #[cfg(feature = "deploy")]
3084    use crate::live_collections::sliced::sliced;
3085    #[cfg(feature = "deploy")]
3086    use crate::live_collections::stream::ExactlyOnce;
3087    #[cfg(feature = "sim")]
3088    use crate::live_collections::stream::NoOrder;
3089    #[cfg(any(feature = "deploy", feature = "sim"))]
3090    use crate::live_collections::stream::TotalOrder;
3091    #[cfg(any(feature = "deploy", feature = "sim"))]
3092    use crate::location::Location;
3093    #[cfg(feature = "sim")]
3094    use crate::networking::TCP;
3095    #[cfg(any(feature = "deploy", feature = "sim"))]
3096    use crate::nondet::nondet;
3097
3098    mod backtrace_chained_ops;
3099
3100    #[cfg(feature = "deploy")]
3101    struct P1 {}
3102    #[cfg(feature = "deploy")]
3103    struct P2 {}
3104
3105    #[cfg(feature = "deploy")]
3106    #[derive(Serialize, Deserialize, Debug)]
3107    struct SendOverNetwork {
3108        n: u32,
3109    }
3110
3111    #[cfg(feature = "deploy")]
3112    #[tokio::test]
3113    async fn first_ten_distributed() {
3114        use crate::networking::TCP;
3115
3116        let mut deployment = Deployment::new();
3117
3118        let mut flow = FlowBuilder::new();
3119        let first_node = flow.process::<P1>();
3120        let second_node = flow.process::<P2>();
3121        let external = flow.external::<P2>();
3122
3123        let numbers = first_node.source_iter(q!(0..10));
3124        let out_port = numbers
3125            .map(q!(|n| SendOverNetwork { n }))
3126            .send(&second_node, TCP.fail_stop().bincode())
3127            .send_bincode_external(&external);
3128
3129        let nodes = flow
3130            .with_process(&first_node, deployment.Localhost())
3131            .with_process(&second_node, deployment.Localhost())
3132            .with_external(&external, deployment.Localhost())
3133            .deploy(&mut deployment);
3134
3135        deployment.deploy().await.unwrap();
3136
3137        let mut external_out = nodes.connect(out_port).await;
3138
3139        deployment.start().await.unwrap();
3140
3141        for i in 0..10 {
3142            assert_eq!(external_out.next().await.unwrap().n, i);
3143        }
3144    }
3145
3146    #[cfg(feature = "deploy")]
3147    #[tokio::test]
3148    async fn first_cardinality() {
3149        let mut deployment = Deployment::new();
3150
3151        let mut flow = FlowBuilder::new();
3152        let node = flow.process::<()>();
3153        let external = flow.external::<()>();
3154
3155        let node_tick = node.tick();
3156        let count = node_tick
3157            .singleton(q!([1, 2, 3]))
3158            .into_stream()
3159            .flatten_ordered()
3160            .first()
3161            .into_stream()
3162            .count()
3163            .all_ticks()
3164            .send_bincode_external(&external);
3165
3166        let nodes = flow
3167            .with_process(&node, deployment.Localhost())
3168            .with_external(&external, deployment.Localhost())
3169            .deploy(&mut deployment);
3170
3171        deployment.deploy().await.unwrap();
3172
3173        let mut external_out = nodes.connect(count).await;
3174
3175        deployment.start().await.unwrap();
3176
3177        assert_eq!(external_out.next().await.unwrap(), 1);
3178    }
3179
3180    #[cfg(feature = "deploy")]
3181    #[tokio::test]
3182    async fn unbounded_reduce_remembers_state() {
3183        let mut deployment = Deployment::new();
3184
3185        let mut flow = FlowBuilder::new();
3186        let node = flow.process::<()>();
3187        let external = flow.external::<()>();
3188
3189        let (input_port, input) = node.source_external_bincode(&external);
3190        let out = input
3191            .reduce(q!(|acc, v| *acc += v))
3192            .sample_eager(nondet!(/** test */))
3193            .send_bincode_external(&external);
3194
3195        let nodes = flow
3196            .with_process(&node, deployment.Localhost())
3197            .with_external(&external, deployment.Localhost())
3198            .deploy(&mut deployment);
3199
3200        deployment.deploy().await.unwrap();
3201
3202        let mut external_in = nodes.connect(input_port).await;
3203        let mut external_out = nodes.connect(out).await;
3204
3205        deployment.start().await.unwrap();
3206
3207        external_in.send(1).await.unwrap();
3208        assert_eq!(external_out.next().await.unwrap(), 1);
3209
3210        external_in.send(2).await.unwrap();
3211        assert_eq!(external_out.next().await.unwrap(), 3);
3212    }
3213
3214    #[cfg(feature = "deploy")]
3215    #[tokio::test]
3216    async fn top_level_bounded_cross_singleton() {
3217        let mut deployment = Deployment::new();
3218
3219        let mut flow = FlowBuilder::new();
3220        let node = flow.process::<()>();
3221        let external = flow.external::<()>();
3222
3223        let (input_port, input) =
3224            node.source_external_bincode::<_, _, TotalOrder, ExactlyOnce>(&external);
3225
3226        let out = input
3227            .cross_singleton(
3228                node.source_iter(q!(vec![1, 2, 3]))
3229                    .fold(q!(|| 0), q!(|acc, v| *acc += v)),
3230            )
3231            .send_bincode_external(&external);
3232
3233        let nodes = flow
3234            .with_process(&node, deployment.Localhost())
3235            .with_external(&external, deployment.Localhost())
3236            .deploy(&mut deployment);
3237
3238        deployment.deploy().await.unwrap();
3239
3240        let mut external_in = nodes.connect(input_port).await;
3241        let mut external_out = nodes.connect(out).await;
3242
3243        deployment.start().await.unwrap();
3244
3245        external_in.send(1).await.unwrap();
3246        assert_eq!(external_out.next().await.unwrap(), (1, 6));
3247
3248        external_in.send(2).await.unwrap();
3249        assert_eq!(external_out.next().await.unwrap(), (2, 6));
3250    }
3251
3252    #[cfg(feature = "deploy")]
3253    #[tokio::test]
3254    async fn top_level_bounded_reduce_cardinality() {
3255        let mut deployment = Deployment::new();
3256
3257        let mut flow = FlowBuilder::new();
3258        let node = flow.process::<()>();
3259        let external = flow.external::<()>();
3260
3261        let (input_port, input) =
3262            node.source_external_bincode::<_, _, TotalOrder, ExactlyOnce>(&external);
3263
3264        let out = sliced! {
3265            let input = use(input, nondet!(/** test */));
3266            let v = use(node.source_iter(q!(vec![1, 2, 3])).reduce(q!(|acc, v| *acc += v)), nondet!(/** test */));
3267            input.cross_singleton(v.into_stream().count())
3268        }
3269        .send_bincode_external(&external);
3270
3271        let nodes = flow
3272            .with_process(&node, deployment.Localhost())
3273            .with_external(&external, deployment.Localhost())
3274            .deploy(&mut deployment);
3275
3276        deployment.deploy().await.unwrap();
3277
3278        let mut external_in = nodes.connect(input_port).await;
3279        let mut external_out = nodes.connect(out).await;
3280
3281        deployment.start().await.unwrap();
3282
3283        external_in.send(1).await.unwrap();
3284        assert_eq!(external_out.next().await.unwrap(), (1, 1));
3285
3286        external_in.send(2).await.unwrap();
3287        assert_eq!(external_out.next().await.unwrap(), (2, 1));
3288    }
3289
3290    #[cfg(feature = "deploy")]
3291    #[tokio::test]
3292    async fn top_level_bounded_into_singleton_cardinality() {
3293        let mut deployment = Deployment::new();
3294
3295        let mut flow = FlowBuilder::new();
3296        let node = flow.process::<()>();
3297        let external = flow.external::<()>();
3298
3299        let (input_port, input) =
3300            node.source_external_bincode::<_, _, TotalOrder, ExactlyOnce>(&external);
3301
3302        let out = sliced! {
3303            let input = use(input, nondet!(/** test */));
3304            let v = use(node.source_iter(q!(vec![1, 2, 3])).reduce(q!(|acc, v| *acc += v)).into_singleton(), nondet!(/** test */));
3305            input.cross_singleton(v.into_stream().count())
3306        }
3307        .send_bincode_external(&external);
3308
3309        let nodes = flow
3310            .with_process(&node, deployment.Localhost())
3311            .with_external(&external, deployment.Localhost())
3312            .deploy(&mut deployment);
3313
3314        deployment.deploy().await.unwrap();
3315
3316        let mut external_in = nodes.connect(input_port).await;
3317        let mut external_out = nodes.connect(out).await;
3318
3319        deployment.start().await.unwrap();
3320
3321        external_in.send(1).await.unwrap();
3322        assert_eq!(external_out.next().await.unwrap(), (1, 1));
3323
3324        external_in.send(2).await.unwrap();
3325        assert_eq!(external_out.next().await.unwrap(), (2, 1));
3326    }
3327
3328    #[cfg(feature = "deploy")]
3329    #[tokio::test]
3330    async fn atomic_fold_replays_each_tick() {
3331        let mut deployment = Deployment::new();
3332
3333        let mut flow = FlowBuilder::new();
3334        let node = flow.process::<()>();
3335        let external = flow.external::<()>();
3336
3337        let (input_port, input) =
3338            node.source_external_bincode::<_, _, TotalOrder, ExactlyOnce>(&external);
3339        let tick = node.tick();
3340
3341        let out = input
3342            .batch(&tick, nondet!(/** test */))
3343            .cross_singleton(
3344                node.source_iter(q!(vec![1, 2, 3]))
3345                    .atomic()
3346                    .fold(q!(|| 0), q!(|acc, v| *acc += v))
3347                    .snapshot_atomic(&tick, nondet!(/** test */)),
3348            )
3349            .all_ticks()
3350            .send_bincode_external(&external);
3351
3352        let nodes = flow
3353            .with_process(&node, deployment.Localhost())
3354            .with_external(&external, deployment.Localhost())
3355            .deploy(&mut deployment);
3356
3357        deployment.deploy().await.unwrap();
3358
3359        let mut external_in = nodes.connect(input_port).await;
3360        let mut external_out = nodes.connect(out).await;
3361
3362        deployment.start().await.unwrap();
3363
3364        external_in.send(1).await.unwrap();
3365        assert_eq!(external_out.next().await.unwrap(), (1, 6));
3366
3367        external_in.send(2).await.unwrap();
3368        assert_eq!(external_out.next().await.unwrap(), (2, 6));
3369    }
3370
3371    #[cfg(feature = "deploy")]
3372    #[tokio::test]
3373    async fn unbounded_scan_remembers_state() {
3374        let mut deployment = Deployment::new();
3375
3376        let mut flow = FlowBuilder::new();
3377        let node = flow.process::<()>();
3378        let external = flow.external::<()>();
3379
3380        let (input_port, input) = node.source_external_bincode(&external);
3381        let out = input
3382            .scan(
3383                q!(|| 0),
3384                q!(|acc, v| {
3385                    *acc += v;
3386                    Some(*acc)
3387                }),
3388            )
3389            .send_bincode_external(&external);
3390
3391        let nodes = flow
3392            .with_process(&node, deployment.Localhost())
3393            .with_external(&external, deployment.Localhost())
3394            .deploy(&mut deployment);
3395
3396        deployment.deploy().await.unwrap();
3397
3398        let mut external_in = nodes.connect(input_port).await;
3399        let mut external_out = nodes.connect(out).await;
3400
3401        deployment.start().await.unwrap();
3402
3403        external_in.send(1).await.unwrap();
3404        assert_eq!(external_out.next().await.unwrap(), 1);
3405
3406        external_in.send(2).await.unwrap();
3407        assert_eq!(external_out.next().await.unwrap(), 3);
3408    }
3409
3410    #[cfg(feature = "deploy")]
3411    #[tokio::test]
3412    async fn unbounded_enumerate_remembers_state() {
3413        let mut deployment = Deployment::new();
3414
3415        let mut flow = FlowBuilder::new();
3416        let node = flow.process::<()>();
3417        let external = flow.external::<()>();
3418
3419        let (input_port, input) = node.source_external_bincode(&external);
3420        let out = input.enumerate().send_bincode_external(&external);
3421
3422        let nodes = flow
3423            .with_process(&node, deployment.Localhost())
3424            .with_external(&external, deployment.Localhost())
3425            .deploy(&mut deployment);
3426
3427        deployment.deploy().await.unwrap();
3428
3429        let mut external_in = nodes.connect(input_port).await;
3430        let mut external_out = nodes.connect(out).await;
3431
3432        deployment.start().await.unwrap();
3433
3434        external_in.send(1).await.unwrap();
3435        assert_eq!(external_out.next().await.unwrap(), (0, 1));
3436
3437        external_in.send(2).await.unwrap();
3438        assert_eq!(external_out.next().await.unwrap(), (1, 2));
3439    }
3440
3441    #[cfg(feature = "deploy")]
3442    #[tokio::test]
3443    async fn unbounded_unique_remembers_state() {
3444        let mut deployment = Deployment::new();
3445
3446        let mut flow = FlowBuilder::new();
3447        let node = flow.process::<()>();
3448        let external = flow.external::<()>();
3449
3450        let (input_port, input) =
3451            node.source_external_bincode::<_, _, TotalOrder, ExactlyOnce>(&external);
3452        let out = input.unique().send_bincode_external(&external);
3453
3454        let nodes = flow
3455            .with_process(&node, deployment.Localhost())
3456            .with_external(&external, deployment.Localhost())
3457            .deploy(&mut deployment);
3458
3459        deployment.deploy().await.unwrap();
3460
3461        let mut external_in = nodes.connect(input_port).await;
3462        let mut external_out = nodes.connect(out).await;
3463
3464        deployment.start().await.unwrap();
3465
3466        external_in.send(1).await.unwrap();
3467        assert_eq!(external_out.next().await.unwrap(), 1);
3468
3469        external_in.send(2).await.unwrap();
3470        assert_eq!(external_out.next().await.unwrap(), 2);
3471
3472        external_in.send(1).await.unwrap();
3473        external_in.send(3).await.unwrap();
3474        assert_eq!(external_out.next().await.unwrap(), 3);
3475    }
3476
3477    #[cfg(feature = "sim")]
3478    #[test]
3479    #[should_panic]
3480    fn sim_batch_nondet_size() {
3481        let mut flow = FlowBuilder::new();
3482        let node = flow.process::<()>();
3483
3484        let (in_send, input) = node.sim_input::<_, TotalOrder, _>();
3485
3486        let tick = node.tick();
3487        let out_recv = input
3488            .batch(&tick, nondet!(/** test */))
3489            .count()
3490            .all_ticks()
3491            .sim_output();
3492
3493        flow.sim().exhaustive(async || {
3494            in_send.send(());
3495            in_send.send(());
3496            in_send.send(());
3497
3498            assert_eq!(out_recv.next().await.unwrap(), 3); // fails with nondet batching
3499        });
3500    }
3501
3502    #[cfg(feature = "sim")]
3503    #[test]
3504    fn sim_batch_preserves_order() {
3505        let mut flow = FlowBuilder::new();
3506        let node = flow.process::<()>();
3507
3508        let (in_send, input) = node.sim_input();
3509
3510        let tick = node.tick();
3511        let out_recv = input
3512            .batch(&tick, nondet!(/** test */))
3513            .all_ticks()
3514            .sim_output();
3515
3516        flow.sim().exhaustive(async || {
3517            in_send.send(1);
3518            in_send.send(2);
3519            in_send.send(3);
3520
3521            out_recv.assert_yields_only([1, 2, 3]).await;
3522        });
3523    }
3524
3525    #[cfg(feature = "sim")]
3526    #[test]
3527    #[should_panic]
3528    fn sim_batch_unordered_shuffles() {
3529        let mut flow = FlowBuilder::new();
3530        let node = flow.process::<()>();
3531
3532        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3533
3534        let tick = node.tick();
3535        let batch = input.batch(&tick, nondet!(/** test */));
3536        let out_recv = batch
3537            .clone()
3538            .min()
3539            .zip(batch.max())
3540            .all_ticks()
3541            .sim_output();
3542
3543        flow.sim().exhaustive(async || {
3544            in_send.send_many_unordered([1, 2, 3]);
3545
3546            if out_recv.collect::<Vec<_>>().await == vec![(1, 3), (2, 2)] {
3547                panic!("saw both (1, 3) and (2, 2), so batching must have shuffled the order");
3548            }
3549        });
3550    }
3551
3552    #[cfg(feature = "sim")]
3553    #[test]
3554    fn sim_batch_unordered_shuffles_count() {
3555        let mut flow = FlowBuilder::new();
3556        let node = flow.process::<()>();
3557
3558        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3559
3560        let tick = node.tick();
3561        let batch = input.batch(&tick, nondet!(/** test */));
3562        let out_recv = batch.all_ticks().sim_output();
3563
3564        let instance_count = flow.sim().exhaustive(async || {
3565            in_send.send_many_unordered([1, 2, 3, 4]);
3566            out_recv.assert_yields_only_unordered([1, 2, 3, 4]).await;
3567        });
3568
3569        assert_eq!(
3570            instance_count,
3571            75 // ∑ (k=1 to 4) S(4,k) × k! = 75
3572        )
3573    }
3574
3575    #[cfg(feature = "sim")]
3576    #[test]
3577    #[should_panic]
3578    fn sim_observe_order_batched() {
3579        let mut flow = FlowBuilder::new();
3580        let node = flow.process::<()>();
3581
3582        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3583
3584        let tick = node.tick();
3585        let batch = input.batch(&tick, nondet!(/** test */));
3586        let out_recv = batch
3587            .assume_ordering::<TotalOrder>(nondet!(/** test */))
3588            .all_ticks()
3589            .sim_output();
3590
3591        flow.sim().exhaustive(async || {
3592            in_send.send_many_unordered([1, 2, 3, 4]);
3593            out_recv.assert_yields_only([1, 2, 3, 4]).await; // fails with assume_ordering
3594        });
3595    }
3596
3597    #[cfg(feature = "sim")]
3598    #[test]
3599    fn sim_observe_order_batched_count() {
3600        let mut flow = FlowBuilder::new();
3601        let node = flow.process::<()>();
3602
3603        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3604
3605        let tick = node.tick();
3606        let batch = input.batch(&tick, nondet!(/** test */));
3607        let out_recv = batch
3608            .assume_ordering::<TotalOrder>(nondet!(/** test */))
3609            .all_ticks()
3610            .sim_output();
3611
3612        let instance_count = flow.sim().exhaustive(async || {
3613            in_send.send_many_unordered([1, 2, 3, 4]);
3614            let _ = out_recv.collect::<Vec<_>>().await;
3615        });
3616
3617        assert_eq!(
3618            instance_count,
3619            192 // 4! * 2^{4 - 1}
3620        )
3621    }
3622
3623    #[cfg(feature = "sim")]
3624    #[test]
3625    fn sim_unordered_count_instance_count() {
3626        let mut flow = FlowBuilder::new();
3627        let node = flow.process::<()>();
3628
3629        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3630
3631        let tick = node.tick();
3632        let out_recv = input
3633            .count()
3634            .snapshot(&tick, nondet!(/** test */))
3635            .all_ticks()
3636            .sim_output();
3637
3638        let instance_count = flow.sim().exhaustive(async || {
3639            in_send.send_many_unordered([1, 2, 3, 4]);
3640            assert!(out_recv.collect::<Vec<_>>().await.last().unwrap() == &4);
3641        });
3642
3643        assert_eq!(
3644            instance_count,
3645            16 // 2^4, { 0, 1, 2, 3 } can be a snapshot and 4 is always included
3646        )
3647    }
3648
3649    #[cfg(feature = "sim")]
3650    #[test]
3651    fn sim_top_level_assume_ordering() {
3652        let mut flow = FlowBuilder::new();
3653        let node = flow.process::<()>();
3654
3655        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3656
3657        let out_recv = input
3658            .assume_ordering::<TotalOrder>(nondet!(/** test */))
3659            .sim_output();
3660
3661        let instance_count = flow.sim().exhaustive(async || {
3662            in_send.send_many_unordered([1, 2, 3]);
3663            let mut out = out_recv.collect::<Vec<_>>().await;
3664            out.sort();
3665            assert_eq!(out, vec![1, 2, 3]);
3666        });
3667
3668        assert_eq!(instance_count, 6)
3669    }
3670
3671    #[cfg(feature = "sim")]
3672    #[test]
3673    fn sim_top_level_assume_ordering_cycle_back() {
3674        let mut flow = FlowBuilder::new();
3675        let node = flow.process::<()>();
3676        let node2 = flow.process::<()>();
3677
3678        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3679
3680        let (complete_cycle_back, cycle_back) =
3681            node.forward_ref::<super::Stream<_, _, _, NoOrder>>();
3682        let ordered = input
3683            .merge_unordered(cycle_back)
3684            .assume_ordering::<TotalOrder>(nondet!(/** test */));
3685        complete_cycle_back.complete(
3686            ordered
3687                .clone()
3688                .map(q!(|v| v + 1))
3689                .filter(q!(|v| v % 2 == 1))
3690                .send(&node2, TCP.fail_stop().bincode())
3691                .send(&node, TCP.fail_stop().bincode()),
3692        );
3693
3694        let out_recv = ordered.sim_output();
3695
3696        let mut saw = false;
3697        let instance_count = flow.sim().exhaustive(async || {
3698            in_send.send_many_unordered([0, 2]);
3699            let out = out_recv.collect::<Vec<_>>().await;
3700
3701            if out.starts_with(&[0, 1, 2]) {
3702                saw = true;
3703            }
3704        });
3705
3706        assert!(saw, "did not see an instance with 0, 1, 2 in order");
3707        assert_eq!(instance_count, 6);
3708    }
3709
3710    #[cfg(feature = "sim")]
3711    #[test]
3712    fn sim_top_level_assume_ordering_cycle_back_tick() {
3713        let mut flow = FlowBuilder::new();
3714        let node = flow.process::<()>();
3715        let node2 = flow.process::<()>();
3716
3717        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3718
3719        let (complete_cycle_back, cycle_back) =
3720            node.forward_ref::<super::Stream<_, _, _, NoOrder>>();
3721        let ordered = input
3722            .merge_unordered(cycle_back)
3723            .assume_ordering::<TotalOrder>(nondet!(/** test */));
3724        complete_cycle_back.complete(
3725            ordered
3726                .clone()
3727                .batch(&node.tick(), nondet!(/** test */))
3728                .all_ticks()
3729                .map(q!(|v| v + 1))
3730                .filter(q!(|v| v % 2 == 1))
3731                .send(&node2, TCP.fail_stop().bincode())
3732                .send(&node, TCP.fail_stop().bincode()),
3733        );
3734
3735        let out_recv = ordered.sim_output();
3736
3737        let mut saw = false;
3738        let instance_count = flow.sim().exhaustive(async || {
3739            in_send.send_many_unordered([0, 2]);
3740            let out = out_recv.collect::<Vec<_>>().await;
3741
3742            if out.starts_with(&[0, 1, 2]) {
3743                saw = true;
3744            }
3745        });
3746
3747        assert!(saw, "did not see an instance with 0, 1, 2 in order");
3748        assert_eq!(instance_count, 58);
3749    }
3750
3751    #[cfg(feature = "sim")]
3752    #[test]
3753    fn sim_top_level_assume_ordering_multiple() {
3754        let mut flow = FlowBuilder::new();
3755        let node = flow.process::<()>();
3756        let node2 = flow.process::<()>();
3757
3758        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3759        let (_, input2) = node.sim_input::<_, NoOrder, _>();
3760
3761        let (complete_cycle_back, cycle_back) =
3762            node.forward_ref::<super::Stream<_, _, _, NoOrder>>();
3763        let input1_ordered = input
3764            .clone()
3765            .merge_unordered(cycle_back)
3766            .assume_ordering::<TotalOrder>(nondet!(/** test */));
3767        let foo = input1_ordered
3768            .clone()
3769            .map(q!(|v| v + 3))
3770            .weaken_ordering::<NoOrder>()
3771            .merge_unordered(input2)
3772            .assume_ordering::<TotalOrder>(nondet!(/** test */));
3773
3774        complete_cycle_back.complete(
3775            foo.filter(q!(|v| *v == 3))
3776                .send(&node2, TCP.fail_stop().bincode())
3777                .send(&node, TCP.fail_stop().bincode()),
3778        );
3779
3780        let out_recv = input1_ordered.sim_output();
3781
3782        let mut saw = false;
3783        let instance_count = flow.sim().exhaustive(async || {
3784            in_send.send_many_unordered([0, 1]);
3785            let out = out_recv.collect::<Vec<_>>().await;
3786
3787            if out.starts_with(&[0, 3, 1]) {
3788                saw = true;
3789            }
3790        });
3791
3792        assert!(saw, "did not see an instance with 0, 3, 1 in order");
3793        assert_eq!(instance_count, 24);
3794    }
3795
3796    #[cfg(feature = "sim")]
3797    #[test]
3798    fn sim_atomic_assume_ordering_cycle_back() {
3799        let mut flow = FlowBuilder::new();
3800        let node = flow.process::<()>();
3801        let node2 = flow.process::<()>();
3802
3803        let (in_send, input) = node.sim_input::<_, NoOrder, _>();
3804
3805        let (complete_cycle_back, cycle_back) =
3806            node.forward_ref::<super::Stream<_, _, _, NoOrder>>();
3807        let ordered = input
3808            .merge_unordered(cycle_back)
3809            .atomic()
3810            .assume_ordering::<TotalOrder>(nondet!(/** test */))
3811            .end_atomic();
3812        complete_cycle_back.complete(
3813            ordered
3814                .clone()
3815                .map(q!(|v| v + 1))
3816                .filter(q!(|v| v % 2 == 1))
3817                .send(&node2, TCP.fail_stop().bincode())
3818                .send(&node, TCP.fail_stop().bincode()),
3819        );
3820
3821        let out_recv = ordered.sim_output();
3822
3823        let instance_count = flow.sim().exhaustive(async || {
3824            in_send.send_many_unordered([0, 2]);
3825            let out = out_recv.collect::<Vec<_>>().await;
3826            assert_eq!(out.len(), 4);
3827        });
3828        assert_eq!(instance_count, 22);
3829    }
3830
3831    #[cfg(feature = "deploy")]
3832    #[tokio::test]
3833    async fn partition_evens_odds() {
3834        let mut deployment = Deployment::new();
3835
3836        let mut flow = FlowBuilder::new();
3837        let node = flow.process::<()>();
3838        let external = flow.external::<()>();
3839
3840        let numbers = node.source_iter(q!(vec![1i32, 2, 3, 4, 5, 6]));
3841        let (evens, odds) = numbers.partition(q!(|x: &i32| x % 2 == 0));
3842        let evens_port = evens.send_bincode_external(&external);
3843        let odds_port = odds.send_bincode_external(&external);
3844
3845        let nodes = flow
3846            .with_process(&node, deployment.Localhost())
3847            .with_external(&external, deployment.Localhost())
3848            .deploy(&mut deployment);
3849
3850        deployment.deploy().await.unwrap();
3851
3852        let mut evens_out = nodes.connect(evens_port).await;
3853        let mut odds_out = nodes.connect(odds_port).await;
3854
3855        deployment.start().await.unwrap();
3856
3857        let mut even_results = Vec::new();
3858        for _ in 0..3 {
3859            even_results.push(evens_out.next().await.unwrap());
3860        }
3861        even_results.sort();
3862        assert_eq!(even_results, vec![2, 4, 6]);
3863
3864        let mut odd_results = Vec::new();
3865        for _ in 0..3 {
3866            odd_results.push(odds_out.next().await.unwrap());
3867        }
3868        odd_results.sort();
3869        assert_eq!(odd_results, vec![1, 3, 5]);
3870    }
3871
3872    #[cfg(feature = "deploy")]
3873    #[tokio::test]
3874    async fn unconsumed_inspect_still_runs() {
3875        use crate::deploy::DeployCrateWrapper;
3876
3877        let mut deployment = Deployment::new();
3878
3879        let mut flow = FlowBuilder::new();
3880        let node = flow.process::<()>();
3881
3882        // The return value of .inspect() is intentionally dropped.
3883        // Before the Null-root fix, this would silently do nothing.
3884        node.source_iter(q!(0..5))
3885            .inspect(q!(|x| println!("inspect: {}", x)));
3886
3887        let nodes = flow
3888            .with_process(&node, deployment.Localhost())
3889            .deploy(&mut deployment);
3890
3891        deployment.deploy().await.unwrap();
3892
3893        let mut stdout = nodes.get_process(&node).stdout();
3894
3895        deployment.start().await.unwrap();
3896
3897        let mut lines = Vec::new();
3898        for _ in 0..5 {
3899            lines.push(stdout.recv().await.unwrap());
3900        }
3901        lines.sort();
3902        assert_eq!(
3903            lines,
3904            vec![
3905                "inspect: 0",
3906                "inspect: 1",
3907                "inspect: 2",
3908                "inspect: 3",
3909                "inspect: 4",
3910            ]
3911        );
3912    }
3913
3914    #[cfg(feature = "sim")]
3915    #[test]
3916    fn sim_limit() {
3917        let mut flow = FlowBuilder::new();
3918        let node = flow.process::<()>();
3919
3920        let (in_send, input) = node.sim_input();
3921
3922        let out_recv = input.limit(q!(3)).sim_output();
3923
3924        flow.sim().exhaustive(async || {
3925            in_send.send(1);
3926            in_send.send(2);
3927            in_send.send(3);
3928            in_send.send(4);
3929            in_send.send(5);
3930
3931            out_recv.assert_yields_only([1, 2, 3]).await;
3932        });
3933    }
3934
3935    #[cfg(feature = "sim")]
3936    #[test]
3937    fn sim_limit_zero() {
3938        let mut flow = FlowBuilder::new();
3939        let node = flow.process::<()>();
3940
3941        let (in_send, input) = node.sim_input();
3942
3943        let out_recv = input.limit(q!(0)).sim_output();
3944
3945        flow.sim().exhaustive(async || {
3946            in_send.send(1);
3947            in_send.send(2);
3948
3949            out_recv.assert_yields_only::<i32, _>([]).await;
3950        });
3951    }
3952
3953    #[cfg(feature = "sim")]
3954    #[test]
3955    fn sim_merge_ordered() {
3956        let mut flow = FlowBuilder::new();
3957        let node = flow.process::<()>();
3958
3959        let (in_send, input) = node.sim_input();
3960        let (in_send2, input2) = node.sim_input();
3961
3962        let out_recv = input
3963            .merge_ordered(input2, nondet!(/** test */))
3964            .sim_output();
3965
3966        let mut saw_out_of_order = false;
3967        let instances = flow.sim().exhaustive(async || {
3968            in_send.send(1);
3969            in_send.send(2);
3970            in_send2.send(3);
3971            in_send2.send(4);
3972
3973            let mut out = out_recv.collect::<Vec<_>>().await;
3974
3975            if out == [1, 3, 2, 4] {
3976                saw_out_of_order = true;
3977            }
3978
3979            out.sort();
3980            assert_eq!(out, vec![1, 2, 3, 4]);
3981        });
3982
3983        assert!(saw_out_of_order);
3984        assert_eq!(instances, 26);
3985    }
3986
3987    #[cfg(feature = "deploy")]
3988    #[tokio::test]
3989    async fn monotone_fold_threshold() {
3990        use crate::properties::manual_proof;
3991
3992        let mut deployment = Deployment::new();
3993
3994        let mut flow = FlowBuilder::new();
3995        let node = flow.process::<()>();
3996        let external = flow.external::<()>();
3997
3998        let in_unbounded: super::Stream<_, _> =
3999            node.source_iter(q!(vec![1i32, 2, 3, 4, 5, 6])).into();
4000        let sum = in_unbounded.fold(
4001            q!(|| 0),
4002            q!(
4003                |sum, v| {
4004                    *sum += v;
4005                },
4006                monotone = manual_proof!(/** test */)
4007            ),
4008        );
4009
4010        let threshold_out = sum
4011            .threshold_greater_or_equal(node.singleton(q!(7)))
4012            .send_bincode_external(&external);
4013
4014        let nodes = flow
4015            .with_process(&node, deployment.Localhost())
4016            .with_external(&external, deployment.Localhost())
4017            .deploy(&mut deployment);
4018
4019        deployment.deploy().await.unwrap();
4020
4021        let mut threshold_out = nodes.connect(threshold_out).await;
4022
4023        deployment.start().await.unwrap();
4024
4025        assert_eq!(threshold_out.next().await.unwrap(), 7);
4026    }
4027
4028    #[cfg(feature = "deploy")]
4029    #[tokio::test]
4030    async fn monotone_count_threshold() {
4031        let mut deployment = Deployment::new();
4032
4033        let mut flow = FlowBuilder::new();
4034        let node = flow.process::<()>();
4035        let external = flow.external::<()>();
4036
4037        let in_unbounded: super::Stream<_, _> =
4038            node.source_iter(q!(vec![1i32, 2, 3, 4, 5, 6])).into();
4039        let sum = in_unbounded.count();
4040
4041        let threshold_out = sum
4042            .threshold_greater_or_equal(node.singleton(q!(3)))
4043            .send_bincode_external(&external);
4044
4045        let nodes = flow
4046            .with_process(&node, deployment.Localhost())
4047            .with_external(&external, deployment.Localhost())
4048            .deploy(&mut deployment);
4049
4050        deployment.deploy().await.unwrap();
4051
4052        let mut threshold_out = nodes.connect(threshold_out).await;
4053
4054        deployment.start().await.unwrap();
4055
4056        assert_eq!(threshold_out.next().await.unwrap(), 3);
4057    }
4058
4059    #[cfg(feature = "deploy")]
4060    #[tokio::test]
4061    async fn monotone_map_order_preserving_threshold() {
4062        use crate::properties::manual_proof;
4063
4064        let mut deployment = Deployment::new();
4065
4066        let mut flow = FlowBuilder::new();
4067        let node = flow.process::<()>();
4068        let external = flow.external::<()>();
4069
4070        let in_unbounded: super::Stream<_, _> =
4071            node.source_iter(q!(vec![1i32, 2, 3, 4, 5, 6])).into();
4072        let sum = in_unbounded.fold(
4073            q!(|| 0),
4074            q!(
4075                |sum, v| {
4076                    *sum += v;
4077                },
4078                monotone = manual_proof!(/** test */)
4079            ),
4080        );
4081
4082        // map with order_preserving should preserve monotonicity
4083        let doubled = sum.map(q!(
4084            |v| v * 2,
4085            order_preserving = manual_proof!(/** doubling preserves order */)
4086        ));
4087
4088        let threshold_out = doubled
4089            .threshold_greater_or_equal(node.singleton(q!(14)))
4090            .send_bincode_external(&external);
4091
4092        let nodes = flow
4093            .with_process(&node, deployment.Localhost())
4094            .with_external(&external, deployment.Localhost())
4095            .deploy(&mut deployment);
4096
4097        deployment.deploy().await.unwrap();
4098
4099        let mut threshold_out = nodes.connect(threshold_out).await;
4100
4101        deployment.start().await.unwrap();
4102
4103        assert_eq!(threshold_out.next().await.unwrap(), 14);
4104    }
4105
4106    // === Compile-time type tests for join/cross_product ordering ===
4107
4108    #[cfg(any(feature = "deploy", feature = "sim"))]
4109    mod join_ordering_type_tests {
4110        use crate::live_collections::boundedness::{Bounded, Unbounded};
4111        use crate::live_collections::stream::{ExactlyOnce, NoOrder, Stream, TotalOrder};
4112        use crate::location::{Location, Process};
4113
4114        #[expect(dead_code, reason = "compile-time type test")]
4115        fn join_unbounded_with_bounded_preserves_order<'a>(
4116            left: Stream<(i32, char), Process<'a>, Unbounded, TotalOrder, ExactlyOnce>,
4117            right: Stream<(i32, char), Process<'a>, Bounded, TotalOrder, ExactlyOnce>,
4118        ) -> Stream<(i32, (char, char)), Process<'a>, Unbounded, TotalOrder, ExactlyOnce> {
4119            left.join(right)
4120        }
4121
4122        #[expect(dead_code, reason = "compile-time type test")]
4123        fn join_unbounded_with_unbounded_is_no_order<'a>(
4124            left: Stream<(i32, char), Process<'a>, Unbounded, TotalOrder, ExactlyOnce>,
4125            right: Stream<(i32, char), Process<'a>, Unbounded, TotalOrder, ExactlyOnce>,
4126        ) -> Stream<(i32, (char, char)), Process<'a>, Unbounded, NoOrder, ExactlyOnce> {
4127            left.join(right)
4128        }
4129
4130        #[expect(dead_code, reason = "compile-time type test")]
4131        fn join_bounded_with_bounded_preserves_order<'a, L: Location<'a>>(
4132            left: Stream<(i32, char), L, Bounded, TotalOrder, ExactlyOnce>,
4133            right: Stream<(i32, char), L, Bounded, TotalOrder, ExactlyOnce>,
4134        ) -> Stream<(i32, (char, char)), L, Bounded, TotalOrder, ExactlyOnce> {
4135            left.join(right)
4136        }
4137
4138        #[expect(dead_code, reason = "compile-time type test")]
4139        fn join_unbounded_noorder_with_bounded<'a>(
4140            left: Stream<(i32, char), Process<'a>, Unbounded, NoOrder, ExactlyOnce>,
4141            right: Stream<(i32, char), Process<'a>, Bounded, NoOrder, ExactlyOnce>,
4142        ) -> Stream<(i32, (char, char)), Process<'a>, Unbounded, NoOrder, ExactlyOnce> {
4143            left.join(right)
4144        }
4145
4146        // === Compile-time type tests for cross_product ordering ===
4147
4148        #[expect(dead_code, reason = "compile-time type test")]
4149        fn cross_product_unbounded_with_bounded_preserves_order<'a>(
4150            left: Stream<i32, Process<'a>, Unbounded, TotalOrder, ExactlyOnce>,
4151            right: Stream<char, Process<'a>, Bounded, TotalOrder, ExactlyOnce>,
4152        ) -> Stream<(i32, char), Process<'a>, Unbounded, TotalOrder, ExactlyOnce> {
4153            left.cross_product(right)
4154        }
4155
4156        #[expect(dead_code, reason = "compile-time type test")]
4157        fn cross_product_bounded_with_bounded_preserves_order<'a>(
4158            left: Stream<i32, Process<'a>, Bounded, TotalOrder, ExactlyOnce>,
4159            right: Stream<char, Process<'a>, Bounded, TotalOrder, ExactlyOnce>,
4160        ) -> Stream<(i32, char), Process<'a>, Bounded, TotalOrder, ExactlyOnce> {
4161            left.cross_product(right)
4162        }
4163
4164        #[expect(dead_code, reason = "compile-time type test")]
4165        fn cross_product_unbounded_with_unbounded_is_no_order<'a>(
4166            left: Stream<i32, Process<'a>, Unbounded, TotalOrder, ExactlyOnce>,
4167            right: Stream<char, Process<'a>, Unbounded, TotalOrder, ExactlyOnce>,
4168        ) -> Stream<(i32, char), Process<'a>, Unbounded, NoOrder, ExactlyOnce> {
4169            left.cross_product(right)
4170        }
4171    } // mod join_ordering_type_tests
4172
4173    // === Runtime correctness tests for bounded join/cross_product ===
4174
4175    #[cfg(feature = "sim")]
4176    #[test]
4177    fn cross_product_mixed_boundedness_correctness() {
4178        use stageleft::q;
4179
4180        use crate::compile::builder::FlowBuilder;
4181        use crate::nondet::nondet;
4182
4183        let mut flow = FlowBuilder::new();
4184        let process = flow.process::<()>();
4185        let tick = process.tick();
4186
4187        let left = process.source_iter(q!(vec![1, 2]));
4188        let right = process
4189            .source_iter(q!(vec!['a', 'b']))
4190            .batch(&tick, nondet!(/** test */))
4191            .all_ticks();
4192
4193        let out = left.cross_product(right).sim_output();
4194
4195        flow.sim().exhaustive(async || {
4196            out.assert_yields_only_unordered(vec![(1, 'a'), (1, 'b'), (2, 'a'), (2, 'b')])
4197                .await;
4198        });
4199    }
4200
4201    #[cfg(feature = "sim")]
4202    #[test]
4203    fn join_mixed_boundedness_correctness() {
4204        use stageleft::q;
4205
4206        use crate::compile::builder::FlowBuilder;
4207        use crate::nondet::nondet;
4208
4209        let mut flow = FlowBuilder::new();
4210        let process = flow.process::<()>();
4211        let tick = process.tick();
4212
4213        let left = process.source_iter(q!(vec![(1, 'a'), (2, 'b')]));
4214        let right = process
4215            .source_iter(q!(vec![(1, 'x'), (2, 'y')]))
4216            .batch(&tick, nondet!(/** test */))
4217            .all_ticks();
4218
4219        let out = left.join(right).sim_output();
4220
4221        flow.sim().exhaustive(async || {
4222            out.assert_yields_only_unordered(vec![(1, ('a', 'x')), (2, ('b', 'y'))])
4223                .await;
4224        });
4225    }
4226}