Skip to content

Commit c54929d

Browse files
committed
Fix the doctest
1 parent 704ffce commit c54929d

File tree

24 files changed

+101
-68
lines changed

24 files changed

+101
-68
lines changed

core/src/DocTestDataMutArray.hs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,6 @@
66
77
For APIs that have not been released yet.
88
9-
>>> import Streamly.Internal.Data.MutArray as MutArray
9+
>>> import qualified Streamly.Internal.Data.Fold as Fold
10+
>>> import qualified Streamly.Internal.Data.MutArray as MutArray
1011
-}

core/src/DocTestDataStream.hs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
>>> import Data.Functor.Identity (runIdentity)
1414
>>> import Data.IORef
1515
>>> import Data.Semigroup (cycle1)
16+
>>> import Data.Word (Word8, Word16)
1617
>>> import GHC.Exts (Ptr (Ptr))
1718
>>> import System.IO (stdout, hSetBuffering, BufferMode(LineBuffering))
1819
@@ -31,6 +32,9 @@
3132
3233
For APIs that have not been released yet.
3334
35+
>>> import qualified Streamly.Internal.FileSystem.Path as Path
36+
>>> import qualified Streamly.Internal.Data.Scanr as Scanr
37+
>>> import qualified Streamly.Internal.Data.Scanl as Scanl
3438
>>> import qualified Streamly.Internal.Data.Fold as Fold
3539
>>> import qualified Streamly.Internal.Data.Parser as Parser
3640
>>> import qualified Streamly.Internal.Data.Stream as Stream

core/src/DocTestDataStreamK.hs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
1717
For APIs that have not been released yet.
1818
19+
>>> import qualified Streamly.Internal.FileSystem.Path as Path
1920
>>> import qualified Streamly.Internal.Data.StreamK as StreamK
2021
>>> import qualified Streamly.Internal.FileSystem.DirIO as Dir
2122
-}

core/src/Streamly/Internal/Data/Array/Type.hs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ fromListRev xs = unsafePerformIO $ unsafeFreeze <$> MA.fromListRev xs
447447
-- allocated to size N, if the stream terminates before N elements then the
448448
-- array may hold less than N elements.
449449
--
450-
-- >>> fromStreamN n = Stream.fold (Array.writeN n)
450+
-- >>> fromStreamN n = Stream.fold (Array.createOf n)
451451
--
452452
-- /Pre-release/
453453
{-# INLINE_NORMAL fromStreamN #-}
@@ -466,7 +466,7 @@ fromStreamDN = fromStreamN
466466
-- single array from a stream of unknown size. 'writeN' is at least twice
467467
-- as efficient when the size is already known.
468468
--
469-
-- >>> fromStream = Stream.fold Array.write
469+
-- >>> fromStream = Stream.fold Array.create
470470
--
471471
-- Note that if the input stream is too large memory allocation for the array
472472
-- may fail. When the stream size is not known, `chunksOf` followed by
@@ -502,7 +502,7 @@ bufferChunks = buildChunks
502502
--
503503
-- Same as the following but may be more efficient:
504504
--
505-
-- >>> chunksOf n = Stream.foldMany (Array.writeN n)
505+
-- >>> chunksOf n = Stream.foldMany (Array.createOf n)
506506
--
507507
-- /Pre-release/
508508
{-# INLINE_NORMAL chunksOf #-}
@@ -550,7 +550,7 @@ chunksEndByLn' = chunksEndBy' (== fromIntegral (ord '\n'))
550550

551551
-- | Convert a stream of arrays into a stream of their elements.
552552
--
553-
-- >>> concat = Stream.unfoldMany Array.reader
553+
-- >>> concat = Stream.unfoldEach Array.reader
554554
--
555555
{-# INLINE_NORMAL concat #-}
556556
concat :: (Monad m, Unbox a) => Stream m (Array a) -> Stream m a
@@ -567,7 +567,7 @@ flattenArrays = concat
567567
-- | Convert a stream of arrays into a stream of their elements reversing the
568568
-- contents of each array before flattening.
569569
--
570-
-- >>> concatRev = Stream.unfoldMany Array.readerRev
570+
-- >>> concatRev = Stream.unfoldEach Array.readerRev
571571
--
572572
{-# INLINE_NORMAL concatRev #-}
573573
concatRev :: forall m a. (Monad m, Unbox a)
@@ -591,7 +591,7 @@ flattenArraysRev = concatRev
591591
-- arrays would have no capacity to append, therefore, a copy will be forced
592592
-- anyway.
593593

594-
-- | Fold @createCompactBySizeGE n@ coalesces adjacent arrays in the input
594+
-- | Fold @createCompactMin n@ coalesces adjacent arrays in the input
595595
-- stream until the size becomes greater than or equal to n.
596596
--
597597
-- Generates unpinned arrays irrespective of the pinning status of input
@@ -618,7 +618,7 @@ fPinnedCompactGE = createCompactMin
618618
-- | @compactBySize n stream@ coalesces adjacent arrays in the @stream@ until
619619
-- the size becomes greater than or equal to @n@.
620620
--
621-
-- >>> compactBySize n = Stream.foldMany (Array.createCompactBySizeGE n)
621+
-- >>> compactBySize n = Stream.foldMany (Array.createCompactMin n)
622622
--
623623
-- Generates unpinned arrays irrespective of the pinning status of input
624624
-- arrays.
@@ -633,7 +633,7 @@ RENAME(compactGE,compactMin)
633633

634634
-- | Like 'compactBySizeGE' but for transforming folds instead of stream.
635635
--
636-
-- >>> lCompactBySizeGE n = Fold.many (Array.createCompactBySizeGE n)
636+
-- >>> lCompactBySizeGE n = Fold.many (Array.createCompactMin n)
637637
--
638638
-- Generates unpinned arrays irrespective of the pinning status of input
639639
-- arrays.
@@ -987,7 +987,7 @@ pinnedWrite = pinnedCreate
987987
-- could be unsafe and dangerous. This is dangerous especially when used with
988988
-- foldMany like operations.
989989
--
990-
-- >>> unsafePureWrite = Array.unsafeMakePure Array.write
990+
-- >>> unsafePureWrite = Array.unsafeMakePure Array.create
991991
--
992992
{-# INLINE unsafeMakePure #-}
993993
unsafeMakePure :: Monad m => Fold IO a b -> Fold m a b

core/src/Streamly/Internal/Data/Fold/Combinators.hs

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,7 @@ the = foldt' step initial id
648648
-- identity (@0@) when the stream is empty. Note that this is not numerically
649649
-- stable for floating point numbers.
650650
--
651-
-- >>> sum = Fold.cumulative Fold.windowSum
651+
-- >>> sum = Fold.fromScanl (Scanl.cumulativeScan Scanl.incrSum)
652652
--
653653
-- Same as following but numerically stable:
654654
--
@@ -1936,6 +1936,8 @@ partitionByMUsing t f fld1 fld2 =
19361936
--
19371937
-- Example, send input to the two folds in a proportion of 2:1:
19381938
--
1939+
-- >>> :set -fno-warn-unrecognised-warning-flags
1940+
-- >>> :set -fno-warn-x-partial
19391941
-- >>> :{
19401942
-- proportionately m n = do
19411943
-- ref <- newIORef $ cycle $ concat [replicate m Left, replicate n Right]

core/src/Streamly/Internal/Data/Fold/Window.hs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ import Prelude hiding (length, sum, minimum, maximum)
8888
-- | Map a function on the incoming as well as outgoing element of a rolling
8989
-- window fold.
9090
--
91+
-- >>> :set -fno-warn-deprecations
9192
-- >>> lmap f = Fold.lmap (bimap f (f <$>))
9293
--
9394
{-# INLINE windowLmap #-}

core/src/Streamly/Internal/Data/MutArray/Type.hs

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2355,9 +2355,6 @@ writeRevN = revCreateOf
23552355
-- | @pinnedWriteNAligned align n@ folds a maximum of @n@ elements from the
23562356
-- input stream to a 'MutArray' aligned to the given size.
23572357
--
2358-
-- >>> pinnedWriteNAligned align = MutArray.createWithOf (MutArray.pinnedNewAligned align)
2359-
-- >>> pinnedWriteNAligned align n = MutArray.appendN n (MutArray.pinnedNewAligned align n)
2360-
--
23612358
-- /Pre-release/
23622359
--
23632360
{-# INLINE_NORMAL pinnedWriteNAligned #-}
@@ -3107,7 +3104,7 @@ cmp = byteCmp
31073104

31083105
-- | Byte equality of two arrays.
31093106
--
3110-
-- >>> byteEq arr1 arr2 = (==) EQ $ MArray.byteCmp arr1 arr2
3107+
-- >>> byteEq arr1 arr2 = (==) EQ <$> MutArray.byteCmp arr1 arr2
31113108
--
31123109
-- /Unsafe/: See 'byteCmp'.
31133110
{-# INLINE byteEq #-}

core/src/Streamly/Internal/Data/Parser/Type.hs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -883,8 +883,8 @@ data AltParseState sl sr = AltParseL !Int !sl | AltParseR !sr
883883
-- result, if a parser is defined recursively using this, it may cause an
884884
-- infintie loop. The following example checks the strictness:
885885
--
886-
-- >>> p = Parser.satisfy (> 0) `Parser.alt` undefined
887-
-- >>> Stream.parse p $ Stream.fromList [1..10]
886+
-- >> p = Parser.satisfy (> 0) `Parser.alt` undefined
887+
-- >> Stream.parse p $ Stream.fromList [1..10]
888888
-- *** Exception: Prelude.undefined
889889
--
890890
-- CAVEAT 2: QUADRATIC TIME COMPLEXITY. Static composition is fast due to

core/src/Streamly/Internal/Data/Scanl/Combinators.hs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1798,6 +1798,8 @@ data PartState sL sR = PartLeft !sL !sR | PartRight !sL !sR
17981798
--
17991799
-- Example, send input to the two folds in a proportion of 2:1:
18001800
--
1801+
-- >>> :set -fno-warn-unrecognised-warning-flags
1802+
-- >>> :set -fno-warn-x-partial
18011803
-- >>> :{
18021804
-- proportionately m n = do
18031805
-- ref <- newIORef $ cycle $ concat [replicate m Left, replicate n Right]

core/src/Streamly/Internal/Data/Scanl/Window.hs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ incrScan n f = incrScanWith n (Scanl.lmap fst f)
226226
-- | Convert an incremental scan to a cumulative scan using the entire input
227227
-- stream as a single window.
228228
--
229-
-- >>> cumulativeScan = Scanl.lmap Insert
229+
-- >>> cumulativeScan = Scanl.lmap Scanl.Insert
230230
--
231231
{-# INLINE cumulativeScan #-}
232232
cumulativeScan :: Scanl m (Incr a) b -> Scanl m a b

0 commit comments

Comments
 (0)