hbs2/hbs2-tests/test/PeerMain.hs

397 lines
12 KiB
Haskell
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{-# Language RankNTypes #-}
{-# Language TemplateHaskell #-}
{-# Language AllowAmbiguousTypes #-}
module Main where
import HBS2.Prelude.Plated
import HBS2.Clock
import HBS2.Hash
-- import HBS2.Net.Messaging
import HBS2.Net.Proto
import HBS2.Net.Proto.BlockInfo
import HBS2.Net.Proto.BlockChunks
import HBS2.Net.Messaging
import HBS2.Net.Messaging.Fake
import HBS2.Actors.Peer
import HBS2.Defaults
import HBS2.Storage
import HBS2.Storage.Simple
import HBS2.Storage.Simple.Extra
import HBS2.Actors.ChunkWriter
-- import Test.Tasty hiding (Timeout)
import Test.Tasty.HUnit
import Codec.Serialise
import Control.Concurrent.Async
import Control.Monad
import Control.Monad.Trans.Maybe
import Data.ByteString.Lazy (ByteString)
import Data.ByteString.Lazy.Char8 qualified as B8
import Data.Cache (Cache)
import Data.Cache qualified as Cache
import Data.Foldable
import Data.Hashable
import Data.Map (Map)
import Data.Map qualified as Map
import Data.Maybe
import Data.Word
import Lens.Micro.Platform
import Prettyprinter
import System.Directory
import System.Exit
import System.FilePath.Posix
import System.IO
import Control.Concurrent
import Control.Concurrent.STM
import Control.Concurrent.STM.TQueue qualified as Q
debug :: (MonadIO m) => Doc ann -> m ()
debug p = liftIO $ hPrint stderr p
-- FIXME: peer should be a part of the key
-- therefore, key is ( p | cookie )
-- but client's cookie in protocol should be just ( cookie :: Word32 )
data BlockDownload m =
BlockDownload
{ _sBlockHash :: Hash HbSync
, _sBlockChunkSize :: ChunkSize
, _sBlockOffset :: Offset
, _sBlockWritten :: Size
, _sOnBlockReady :: OnBlockReady HbSync m
}
data MySessions e m =
Sessions
{ _sBlockDownload :: Cache (Peer e, Cookie e) (BlockDownload m)
, _sBlockSizes :: Cache (Hash HbSync) (Map (Peer e) Size)
, _sBlockSize :: Cache (Hash HbSync) Size
}
makeLenses 'Sessions
makeLenses 'BlockDownload
newBlockDownload :: forall m . MonadIO m
=> Hash HbSync
-> OnBlockReady HbSync m
-> BlockDownload m
newBlockDownload h = BlockDownload h 0 0 0
data Fake
instance HasPeer Fake where
newtype instance Peer Fake = FakePeer Word8
deriving newtype (Hashable,Num,Enum,Real,Integral)
deriving stock (Eq,Ord,Show)
instance Pretty (Peer Fake) where
pretty (FakePeer n) = parens ("peer" <+> pretty n)
instance HasProtocol Fake (BlockSize Fake) where
type instance ProtocolId (BlockSize Fake) = 1
type instance Encoded Fake = ByteString
decode = either (const Nothing) Just . deserialiseOrFail
encode = serialise
instance HasProtocol Fake (BlockChunks Fake) where
type instance ProtocolId (BlockChunks Fake) = 2
type instance Encoded Fake = ByteString
decode = either (const Nothing) Just . deserialiseOrFail
encode = serialise
main :: IO ()
main = do
hSetBuffering stderr LineBuffering
test1
-- defaultMain $
-- testGroup "root"
-- [
-- testCase "test1" test1
-- ]
emptySessions :: forall e m . MonadIO m => m (MySessions e m)
emptySessions = liftIO $
Sessions <$> Cache.newCache (Just defCookieTimeout)
<*> Cache.newCache (Just defBlockInfoTimeout)
<*> Cache.newCache (Just defBlockInfoTimeout)
newSession :: (Eq k, Hashable k,MonadIO m)
=> s
-> Getting (Cache k v) s (Cache k v)
-> k
-> v
-> m ()
newSession se l k v = do
let cache = view l se
liftIO $ Cache.insert cache k v
getSession' se l k fn = do
let cache = view l se
liftIO $ Cache.lookup cache k <&> fmap fn
getSession se l k = getSession' se l k id
updSession se def l k fn = liftIO do
let cache = view l se
v <- Cache.fetchWithCache cache k (const $ pure def)
Cache.insert cache k (fn v)
delSession se l k = liftIO do
Cache.delete (view l se) k
expireSession se l = liftIO do
Cache.purgeExpired (view l se)
-- A questionable FIX to avoid "orphans" complains
data Adapted e = Adapted
-- newtype FullPeerM m a = RealPeerM { fromRealPeerM :: ReaderT }
runFakePeer :: forall e b . ( e ~ Fake
-- , m ~ IO
, Messaging b e ByteString
-- , MonadIO m
-- , Response e p m
-- , EngineM e m
)
=> Peer e
-> b
-> EngineM e IO ()
-> IO ()
runFakePeer p bus work = do
env <- newEnv p bus
se <- emptySessions @e
let pid = fromIntegral (hash (env ^. self)) :: Word8
dir <- liftIO $ canonicalizePath ( ".peers" </> show pid)
let chDir = dir </> "tmp-chunks"
liftIO $ createDirectoryIfMissing True dir
let opts = [ StoragePrefix dir
]
storage <- simpleStorageInit opts -- :: IO (SimpleStorage HbSync)
w <- liftIO $ async $ simpleStorageWorker storage
cww <- newChunkWriterIO storage (Just chDir)
cw <- async $ runChunkWriter cww
let size = 1024*1024
let blk = B8.concat [ fromString (take 1 $ show x)
| x <- replicate size (fromIntegral pid :: Int)
]
root <- putAsMerkle storage blk
debug $ "I'm" <+> pretty pid <+> pretty root
let handleBlockInfo (p, h, sz') = do
maybe1 sz' (pure ()) $ \sz -> do
let bsz = fromIntegral sz
-- here we cache block size information
updSession se mempty sBlockSizes h (Map.insert p bsz)
updSession se bsz sBlockSize h (const bsz)
debug $ pretty p <+> "has block" <+> pretty h <+> pretty sz'
let adapter =
BlockChunksI
{ blkSize = hasBlock storage
, blkChunk = getChunk storage
, blkGetHash = \c -> getSession' se sBlockDownload c (view sBlockHash)
-- КАК ТОЛЬКО ПРИНЯЛИ ВСЕ ЧАНКИ (ПРИШЁЛ ПОСЛЕДНИЙ ЧАНК):
-- СЧИТАЕМ ХЭШ ТОГО, ЧТО ПОЛУЧИЛОСЬ
-- ЕСЛИ ПОЛУЧИЛОСЬ ХОРОШО --- ТО:
-- ПЕРЕЗАПИСЫВАЕМ БЛОК В СТОРЕЙДЖ
-- ГОВОРИМ ОЖИДАЮЩЕЙ СТОРОНЕ, ЧТО БЛОК ПРИНЯТ?
-- УДАЛЯЕМ КУКУ?
, blkAcceptChunk = \(c,p,h,n,bs) -> void $ runMaybeT $ do
let cKey = (p,c)
-- check if there is a session
void $ MaybeT $ getSession' se sBlockDownload cKey id
let def = newBlockDownload h dontHandle
let bslen = fromIntegral $ B8.length bs
-- TODO: log this situation
mbSize <- MaybeT $ getSession' se sBlockSizes h (Map.lookup p) <&> fromMaybe Nothing
mbChSize <- MaybeT $ getSession' se sBlockDownload cKey (view sBlockChunkSize)
let offset = fromIntegral n * fromIntegral mbChSize :: Offset
updSession se def sBlockDownload cKey (over sBlockOffset (max offset))
liftIO $ do
writeChunk cww cKey h offset bs
updSession se def sBlockDownload cKey (over sBlockWritten (+bslen))
dwnld <- MaybeT $ getSession' se sBlockDownload cKey id
let maxOff = view sBlockOffset dwnld
let written = view sBlockWritten dwnld
let notify = view sOnBlockReady dwnld
let mbDone = (maxOff + fromIntegral mbChSize) > fromIntegral mbSize
&& written >= mbSize
when mbDone $ lift do
deferred (Proxy @(BlockChunks e)) $ do
h1 <- liftIO $ getHash cww cKey h
-- ПОСЧИТАТЬ ХЭШ
-- ЕСЛИ СОШЁЛСЯ - ФИНАЛИЗИРОВАТЬ БЛОК
-- ЕСЛИ НЕ СОШЁЛСЯ - ТО ПОДОЖДАТЬ ЕЩЕ
when ( h1 == h ) $ do
lift $ commitBlock cww cKey h
lift $ notify h
delSession se sBlockDownload cKey
when (written > mbSize * defBlockDownloadThreshold) $ do
debug $ "SESSION DELETED BECAUSE THAT PEER IS JERK:" <+> pretty p
delSession se sBlockDownload cKey
-- ЕСЛИ ТУТ ВИСЕТЬ ДОЛГО, ТО НАС МОЖНО ДИДОСИТЬ,
-- ПОСЫЛАЯ НЕ ВСЕ БЛОКИ ЧАНКА ИЛИ ПОСЫЛАЯ ОТДЕЛЬНЫЕ
-- ЧАНКИ ПО МНОГУ РАЗ. А МЫ БУДЕМ ХЭШИ СЧИТАТЬ.
-- ТАК НЕ ПОЙДЕТ
-- ТАК ЧТО ТУТ ЖДЁМ, ДОПУСТИМ 2*mbSize и отваливаемся
}
peer <- async $ runPeer env
[ makeResponse (blockSizeProto (hasBlock storage) handleBlockInfo)
, makeResponse (blockChunksProto adapter)
]
runEngineM env work
simpleStorageStop storage
stopChunkWriter cww
mapM_ cancel [w,cw,peer]
test1 :: IO ()
test1 = do
hSetBuffering stderr LineBuffering
fake <- newFakeP2P True
void $ race (pause (2 :: Timeout 'Seconds)) $ do
let p0 = 0 :: Peer Fake
let p1 = 1 :: Peer Fake
p1Thread <- async $ runFakePeer p1 fake (liftIO $ forever yield)
p0Thread <- async $ runFakePeer p0 fake $ do
request p1 (GetBlockSize @Fake (fromString "81JeD7LNR6Q7RYfyWBxfjJn1RsWzvegkUXae6FUNgrMZ"))
request p1 (GetBlockSize @Fake (fromString "5KP4vM6RuEX6RA1ywthBMqZV5UJDLANC17UrF6zuWdRt"))
let h = fromString "81JeD7LNR6Q7RYfyWBxfjJn1RsWzvegkUXae6FUNgrMZ"
-- updateSession cookie (id)
-- se <- getSession cookie (lens)
-- cookie <- newSession ???
-- newCookie <- genCookie @Fake (p1, h) -- <<~~~ FIXME: generate a good session id!
-- let cKey@(_, cookie) = (p1, newCookie)
pure ()
let peerz = p0Thread : [p1Thread]
-- peerz <- mapM (async . uncurry runFakePeer) ee
--runEngineM e0 $ do
-- -- TODO: #ASAP generate unique cookie!!
-- --
-- -- FIXME: withAllCrap $ do ...
-- let s0 = (fst . head) ee
-- newCookie <- genCookie @Fake (p1, h) -- <<~~~ FIXME: generate a good session id!
-- let cKey@(_, cookie) = (p1, newCookie)
-- let chsz = defChunkSize
-- debug $ "new cookie:" <+> pretty cookie
-- qblk <- liftIO Q.newTQueueIO
-- let onBlockReady bh = do
-- liftIO $ atomically $ Q.writeTQueue qblk bh
-- let def = newBlockDownload h onBlockReady
-- -- create sessions before sequesting anything
-- updSession s0 def sBlockDownload cKey (set sBlockChunkSize chsz)
-- request p1 (GetBlockSize @Fake (fromString "81JeD7LNR6Q7RYfyWBxfjJn1RsWzvegkUXae6FUNgrMZ"))
-- request p1 (GetBlockSize @Fake (fromString "5KP4vM6RuEX6RA1ywthBMqZV5UJDLANC17UrF6zuWdRt"))
-- request p0 (GetBlockSize @Fake (fromString "81JeD7LNR6Q7RYfyWBxfjJn1RsWzvegkUXae6FUNgrMZ"))
-- request p0 (GetBlockSize @Fake (fromString "5KP4vM6RuEX6RA1ywthBMqZV5UJDLANC17UrF6zuWdRt"))
-- -- TODO: #ASAP block ready notification
-- debug $ "REQUEST BLOCK:" <+> pretty h <+> "from" <+> pretty p1
-- request p1 (BlockChunks @Fake cookie (BlockGetAllChunks h chsz))
-- blk <- liftIO $ atomically $ Q.readTQueue qblk
-- debug $ "BLOCK READY:" <+> pretty blk
-- -- TODO: смотрим, что за блок
-- -- если Merkle - то качаем рекурсивно
-- -- если ссылка - то смотрим, что за ссылка
-- -- проверяем пруфы
-- -- качаем рекурсивно
-- -- let mbLink = deserialiseOrFail @Merkle obj
-- pure ()
mapM_ cancel peerz
(_, e) <- waitAnyCatchCancel peerz
debug (pretty $ show e)
debug "we're done"
assertBool "success" True
exitSuccess
assertBool "failed" False