This commit is contained in:
Dmitry Zuikov 2023-01-18 14:24:06 +03:00
parent b8696cc9db
commit d3a40299d6
2 changed files with 31 additions and 16 deletions

View File

@ -196,12 +196,15 @@ simpleRefFileName ss h = path
-- --
-- So, the block MUST be small -- So, the block MUST be small
-- --
simpleGetBlockLazy :: IsKey h simpleGetBlockLazy :: (IsKey h, Pretty (Key h))
=> SimpleStorage h => SimpleStorage h
-> Hash h -> Hash h
-> IO (Maybe LBS.ByteString) -> IO (Maybe LBS.ByteString)
simpleGetBlockLazy s key = do simpleGetBlockLazy s key = do
liftIO $ print $ "simpleGetBlockLazy" <+> pretty key
resQ <- TBMQ.newTBMQueueIO 1 :: IO (TBMQueue (Maybe LBS.ByteString)) resQ <- TBMQ.newTBMQueueIO 1 :: IO (TBMQueue (Maybe LBS.ByteString))
let fn = simpleBlockFileName s key let fn = simpleBlockFileName s key
let action = do let action = do

View File

@ -40,12 +40,12 @@ debug :: (MonadIO m) => Doc ann -> m ()
debug p = liftIO $ hPrint stderr p debug p = liftIO $ hPrint stderr p
newtype ChunkSize = ChunkSize Word16 newtype ChunkSize = ChunkSize Word16
deriving newtype (Num,Enum,Real,Integral) deriving newtype (Num,Enum,Real,Integral,Pretty)
deriving stock (Eq,Ord,Show,Data,Generic) deriving stock (Eq,Ord,Show,Data,Generic)
newtype ChunkNum = ChunkNum Word16 newtype ChunkNum = ChunkNum Word16
deriving newtype (Num,Enum,Real,Integral) deriving newtype (Num,Enum,Real,Integral,Pretty)
deriving stock (Eq,Ord,Show,Data,Generic) deriving stock (Eq,Ord,Show,Data,Generic)
@ -56,13 +56,13 @@ newtype Sessions e =
makeLenses 'Sessions makeLenses 'Sessions
type GetBlockChunk h = forall m . MonadIO m => Hash h -> Offset -> Size -> m (Maybe ByteString) type GetBlockChunk h m = Hash h -> Offset -> Size -> m (Maybe ByteString)
data BlockChunksI e m = data BlockChunksI e m =
BlockChunksI BlockChunksI
{ blkSize :: GetBlockSize HbSync m { blkSize :: GetBlockSize HbSync m
, blkChunk :: GetBlockChunk HbSync , blkChunk :: GetBlockChunk HbSync m
, blkGetHash :: Cookie e -> m (Maybe (Hash HbSync)) , blkGetHash :: Cookie e -> m (Maybe (Hash HbSync))
, blkAcceptChunk :: (Hash HbSync, ChunkNum, ByteString) -> m () , blkAcceptChunk :: (Hash HbSync, ChunkNum, ByteString) -> m ()
} }
@ -90,6 +90,7 @@ instance Serialise (BlockChunks e)
blockChunksProto :: forall e m . ( MonadIO m blockChunksProto :: forall e m . ( MonadIO m
, Response e (BlockChunks e) m , Response e (BlockChunks e) m
, Pretty (Peer e)
) )
=> BlockChunksI e m => BlockChunksI e m
-> BlockChunks e -> BlockChunks e
@ -100,10 +101,13 @@ blockChunksProto adapter (BlockChunks c p) =
BlockGetAllChunks h size -> deferred proto do BlockGetAllChunks h size -> deferred proto do
bsz <- blkSize adapter h bsz <- blkSize adapter h
debug $ "bzs" <+> pretty bsz
let offsets' = calcChunks (fromJust bsz) (fromIntegral size) :: [(Offset, Size)] let offsets' = calcChunks (fromJust bsz) (fromIntegral size) :: [(Offset, Size)]
let offsets = zip offsets' [0..] let offsets = zip offsets' [0..]
for_ offsets $ \((o,sz),i) -> do for_ offsets $ \((o,sz),i) -> do
p <- thatPeer proto
chunk <- blkChunk adapter h o sz chunk <- blkChunk adapter h o sz
maybe (pure ()) (response_ . BlockChunk @e i) chunk maybe (pure ()) (response_ . BlockChunk @e i) chunk
@ -170,16 +174,22 @@ emptySessions = do
{ _sBlockHash = bh { _sBlockHash = bh
} }
newSession :: (Eq k, Hashable k) newSession :: (Eq k, Hashable k,MonadIO m)
=> s => s
-> Getting (Cache k v) s (Cache k v) -> Getting (Cache k v) s (Cache k v)
-> k -> k
-> v -> v
-> IO () -> m ()
newSession se l x = do newSession se l k v = do
let cache = view l se let cache = view l se
Cache.insert cache x liftIO $ Cache.insert cache k v
withNewSession se l k v m = newSession se l k v >> m
getSession se l k = do
let cache = view l se
liftIO $ Cache.lookup cache k
runFakePeer :: forall e . e ~ Fake => Sessions e -> EngineEnv e -> IO () runFakePeer :: forall e . e ~ Fake => Sessions e -> EngineEnv e -> IO ()
runFakePeer se env = do runFakePeer se env = do
@ -207,8 +217,6 @@ runFakePeer se env = do
debug $ "I'm" <+> pretty pid <+> pretty root debug $ "I'm" <+> pretty pid <+> pretty root
simpleStorageStop storage
let handleBlockInfo (p, h, sz) = do let handleBlockInfo (p, h, sz) = do
debug $ pretty p <+> "has block" <+> pretty h <+> pretty sz debug $ pretty p <+> "has block" <+> pretty h <+> pretty sz
@ -218,8 +226,10 @@ runFakePeer se env = do
BlockChunksI BlockChunksI
{ blkSize = hasBlock storage { blkSize = hasBlock storage
, blkChunk = getChunk storage , blkChunk = getChunk storage
, blkGetHash = liftIO . Cache.lookup (se ^. sBlockHash) , blkGetHash = getSession se sBlockHash
, blkAcceptChunk = dontHandle , blkAcceptChunk = \(h,n,bs) -> debug $ "got chunk" <+> pretty h
<+> pretty n
<+> pretty (B8.length bs)
} }
runPeer env runPeer env
@ -229,6 +239,8 @@ runFakePeer se env = do
cancel w cancel w
simpleStorageStop storage
pure () pure ()
@ -261,9 +273,9 @@ test1 = do
let cookie = 0 let cookie = 0
let s0 = (fst . head) ee let s0 = (fst . head) ee
liftIO $ newSession s0 sBlockHash cookie h
request p1 (BlockChunks @Fake cookie (BlockGetAllChunks h defChunkSize))
withNewSession s0 sBlockHash cookie h $ do
request p1 (BlockChunks @Fake cookie (BlockGetAllChunks h defChunkSize))
pure () pure ()
@ -290,7 +302,7 @@ test1 = do
-- Как быть с тем, что кука может не поддерживаться подпротоколом? -- Как быть с тем, что кука может не поддерживаться подпротоколом?
-- Требовать HasCookie у всех? -- Требовать HasCookie у всех?
pause ( 0.5 :: Timeout 'Seconds) pause ( 1 :: Timeout 'Seconds)
mapM_ cancel peerz mapM_ cancel peerz