mirror of https://github.com/voidlizard/hbs2
gonna-fuckup
This commit is contained in:
parent
ec417fe3ef
commit
5d3d60778d
|
@ -24,7 +24,7 @@ data Pipeline m a =
|
||||||
, toQueue :: TBMQueue ( m a )
|
, toQueue :: TBMQueue ( m a )
|
||||||
}
|
}
|
||||||
|
|
||||||
newPipeline :: forall a (m :: Type -> Type) . MonadIO m => Int -> m (Pipeline m a)
|
newPipeline :: forall a (m1 :: Type -> Type) (m :: Type -> Type) . (MonadIO m, MonadIO m1) => Int -> m (Pipeline m1 a)
|
||||||
newPipeline size = do
|
newPipeline size = do
|
||||||
tv <- liftIO $ TVar.newTVarIO False
|
tv <- liftIO $ TVar.newTVarIO False
|
||||||
liftIO $ TBMQ.newTBMQueueIO size <&> Pipeline tv
|
liftIO $ TBMQ.newTBMQueueIO size <&> Pipeline tv
|
||||||
|
|
|
@ -39,7 +39,7 @@ class Typeable a => Unkey a where
|
||||||
instance Typeable a => Unkey a where
|
instance Typeable a => Unkey a where
|
||||||
unfuck _ = fromDynamic @a
|
unfuck _ = fromDynamic @a
|
||||||
|
|
||||||
newSKey :: forall a . (Eq a, Typeable a, Unkey a, Hashable a, Show a) => a -> SKey
|
newSKey :: forall a . (Eq a, Typeable a, Unkey a, Hashable a) => a -> SKey
|
||||||
newSKey s = SKey (Proxy @a) (toDyn s)
|
newSKey s = SKey (Proxy @a) (toDyn s)
|
||||||
|
|
||||||
|
|
||||||
|
@ -143,8 +143,6 @@ instance ( MonadIO m
|
||||||
, Typeable (SessionKey e p)
|
, Typeable (SessionKey e p)
|
||||||
, Typeable (SessionData e p)
|
, Typeable (SessionData e p)
|
||||||
, Hashable (SessionKey e p)
|
, Hashable (SessionKey e p)
|
||||||
, Show (SessionData e p)
|
|
||||||
, Show (SessionKey e p)
|
|
||||||
) => Sessions e p (ResponseM e m) where
|
) => Sessions e p (ResponseM e m) where
|
||||||
|
|
||||||
fetch i d k f = flip runEngineM (fetch i d k f) =<< asks (view engine)
|
fetch i d k f = flip runEngineM (fetch i d k f) =<< asks (view engine)
|
||||||
|
@ -160,8 +158,6 @@ instance ( MonadIO m
|
||||||
, Typeable (SessionKey e p)
|
, Typeable (SessionKey e p)
|
||||||
, Typeable (SessionData e p)
|
, Typeable (SessionData e p)
|
||||||
, Hashable (SessionKey e p)
|
, Hashable (SessionKey e p)
|
||||||
, Show (SessionData e p)
|
|
||||||
, Show (SessionKey e p)
|
|
||||||
) => Sessions e p (EngineM e m) where
|
) => Sessions e p (EngineM e m) where
|
||||||
|
|
||||||
|
|
||||||
|
@ -170,8 +166,6 @@ instance ( MonadIO m
|
||||||
let sk = newSKey @(SessionKey e p) k
|
let sk = newSKey @(SessionKey e p) k
|
||||||
let ddef = toDyn def
|
let ddef = toDyn def
|
||||||
|
|
||||||
liftIO $ print ("fetch!", show k)
|
|
||||||
|
|
||||||
r <- liftIO $ Cache.lookup se sk
|
r <- liftIO $ Cache.lookup se sk
|
||||||
|
|
||||||
case r of
|
case r of
|
||||||
|
@ -183,10 +177,7 @@ instance ( MonadIO m
|
||||||
update def k f = do
|
update def k f = do
|
||||||
se <- asks (view sessions)
|
se <- asks (view sessions)
|
||||||
val <- fetch @e @p True def k id
|
val <- fetch @e @p True def k id
|
||||||
liftIO $ print "UPDATE !!!!"
|
|
||||||
liftIO $ Cache.insert se (newSKey @(SessionKey e p) k) (toDyn (f val))
|
liftIO $ Cache.insert se (newSKey @(SessionKey e p) k) (toDyn (f val))
|
||||||
z <- liftIO $ Cache.lookup se (newSKey k)
|
|
||||||
liftIO $ print $ ("INSERTED SHIT", z)
|
|
||||||
|
|
||||||
expire k = do
|
expire k = do
|
||||||
se <- asks (view sessions)
|
se <- asks (view sessions)
|
||||||
|
@ -266,10 +257,5 @@ runPeer env@(EngineEnv {bus = pipe, defer = d}) hh = do
|
||||||
, handle = h
|
, handle = h
|
||||||
}) -> maybe (pure ()) (runResponseM ee pip . h) (decoder msg)
|
}) -> maybe (pure ()) (runResponseM ee pip . h) (decoder msg)
|
||||||
|
|
||||||
-- FIXME: slow and dumb
|
|
||||||
instance {-# OVERLAPPABLE #-} (MonadIO m, Num (Cookie e)) => GenCookie e (EngineM e m) where
|
|
||||||
genCookie salt = do
|
|
||||||
r <- liftIO $ Random.randomIO @Int
|
|
||||||
pure $ fromInteger $ fromIntegral $ asWord32 $ hash32 (hash salt + r)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,9 @@ defPipelineSize = 100
|
||||||
defChunkWriterQ :: Integral a => a
|
defChunkWriterQ :: Integral a => a
|
||||||
defChunkWriterQ = 100
|
defChunkWriterQ = 100
|
||||||
|
|
||||||
|
defBlockDownloadQ :: Integral a => a
|
||||||
|
defBlockDownloadQ = 100
|
||||||
|
|
||||||
defBlockDownloadThreshold :: Integral a => a
|
defBlockDownloadThreshold :: Integral a => a
|
||||||
defBlockDownloadThreshold = 2
|
defBlockDownloadThreshold = 2
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
{-# Language TypeFamilyDependencies #-}
|
{-# Language TypeFamilyDependencies #-}
|
||||||
{-# Language FunctionalDependencies #-}
|
{-# Language FunctionalDependencies #-}
|
||||||
{-# Language AllowAmbiguousTypes #-}
|
{-# Language AllowAmbiguousTypes #-}
|
||||||
|
{-# Language UndecidableInstances #-}
|
||||||
module HBS2.Net.Proto.Types
|
module HBS2.Net.Proto.Types
|
||||||
( module HBS2.Net.Proto.Types
|
( module HBS2.Net.Proto.Types
|
||||||
) where
|
) where
|
||||||
|
@ -11,6 +12,8 @@ import Data.Proxy
|
||||||
import Data.Hashable
|
import Data.Hashable
|
||||||
import Control.Monad.IO.Class
|
import Control.Monad.IO.Class
|
||||||
import Data.Typeable
|
import Data.Typeable
|
||||||
|
import System.Random qualified as Random
|
||||||
|
import Data.Digest.Murmur32
|
||||||
|
|
||||||
-- e -> Transport (like, UDP or TChan)
|
-- e -> Transport (like, UDP or TChan)
|
||||||
-- p -> L4 Protocol (like Ping/Pong)
|
-- p -> L4 Protocol (like Ping/Pong)
|
||||||
|
@ -93,3 +96,10 @@ class (KnownNat (ProtocolId p), HasPeer e) => HasProtocol e p | p -> e where
|
||||||
decode :: Encoded e -> Maybe p
|
decode :: Encoded e -> Maybe p
|
||||||
encode :: p -> Encoded e
|
encode :: p -> Encoded e
|
||||||
|
|
||||||
|
|
||||||
|
-- FIXME: slow and dumb
|
||||||
|
instance {-# OVERLAPPABLE #-} (MonadIO m, Num (Cookie e)) => GenCookie e m where
|
||||||
|
genCookie salt = do
|
||||||
|
r <- liftIO $ Random.randomIO @Int
|
||||||
|
pure $ fromInteger $ fromIntegral $ asWord32 $ hash32 (hash salt + r)
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ module Main where
|
||||||
import HBS2.Prelude.Plated
|
import HBS2.Prelude.Plated
|
||||||
import HBS2.Clock
|
import HBS2.Clock
|
||||||
import HBS2.Hash
|
import HBS2.Hash
|
||||||
|
import HBS2.Actors
|
||||||
-- import HBS2.Net.Messaging
|
-- import HBS2.Net.Messaging
|
||||||
import HBS2.Net.Proto
|
import HBS2.Net.Proto
|
||||||
import HBS2.Net.Proto.BlockInfo
|
import HBS2.Net.Proto.BlockInfo
|
||||||
|
@ -48,9 +49,12 @@ import Control.Concurrent
|
||||||
import Data.Default
|
import Data.Default
|
||||||
import Control.Monad.Reader
|
import Control.Monad.Reader
|
||||||
import Data.Dynamic
|
import Data.Dynamic
|
||||||
|
import Data.Kind
|
||||||
|
|
||||||
import Control.Concurrent.STM
|
import Control.Concurrent.STM
|
||||||
import Control.Concurrent.STM.TQueue qualified as Q
|
import Control.Concurrent.STM.TQueue qualified as Q
|
||||||
|
import Control.Concurrent.STM.TBQueue qualified as TBQ
|
||||||
|
import Control.Concurrent.STM.TBQueue (TBQueue)
|
||||||
|
|
||||||
debug :: (MonadIO m) => Doc ann -> m ()
|
debug :: (MonadIO m) => Doc ann -> m ()
|
||||||
debug p = liftIO $ hPrint stderr p
|
debug p = liftIO $ hPrint stderr p
|
||||||
|
@ -62,35 +66,19 @@ debug p = liftIO $ hPrint stderr p
|
||||||
-- but client's cookie in protocol should be just ( cookie :: Word32 )
|
-- but client's cookie in protocol should be just ( cookie :: Word32 )
|
||||||
|
|
||||||
|
|
||||||
data BlockDownload m =
|
data BlockDownload =
|
||||||
BlockDownload
|
BlockDownload
|
||||||
{ _sBlockHash :: Hash HbSync
|
{ _sBlockHash :: Hash HbSync
|
||||||
, _sBlockChunkSize :: ChunkSize
|
, _sBlockChunkSize :: ChunkSize
|
||||||
, _sBlockOffset :: Offset
|
, _sBlockOffset :: Offset
|
||||||
, _sBlockWritten :: Size
|
, _sBlockWritten :: Size
|
||||||
, _sOnBlockReady :: OnBlockReady HbSync m
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data MySessions e m =
|
|
||||||
Sessions
|
|
||||||
{ _sBlockDownload :: Cache (Peer e, Cookie e) (BlockDownload m)
|
|
||||||
, _sBlockSizes :: Cache (Hash HbSync) (Map (Peer e) Size)
|
|
||||||
, _sBlockSize :: Cache (Hash HbSync) Size
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
makeLenses 'Sessions
|
|
||||||
makeLenses 'BlockDownload
|
makeLenses 'BlockDownload
|
||||||
|
|
||||||
newBlockDownload :: forall m . MonadIO m
|
newBlockDownload :: Hash HbSync -> BlockDownload
|
||||||
=> Hash HbSync
|
|
||||||
-> OnBlockReady HbSync m
|
|
||||||
-> BlockDownload m
|
|
||||||
|
|
||||||
newBlockDownload h = BlockDownload h 0 0 0
|
newBlockDownload h = BlockDownload h 0 0 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
data Fake
|
data Fake
|
||||||
|
|
||||||
instance HasPeer Fake where
|
instance HasPeer Fake where
|
||||||
|
@ -117,6 +105,12 @@ instance HasProtocol Fake (BlockChunks Fake) where
|
||||||
|
|
||||||
|
|
||||||
type instance SessionData Fake (BlockSize Fake) = BlockSizeSession Fake
|
type instance SessionData Fake (BlockSize Fake) = BlockSizeSession Fake
|
||||||
|
type instance SessionData Fake (BlockChunks Fake) = BlockDownload
|
||||||
|
|
||||||
|
newtype instance SessionKey Fake (BlockChunks Fake) =
|
||||||
|
DownloadSessionKey (Peer Fake, Cookie Fake)
|
||||||
|
deriving newtype (Eq, Hashable)
|
||||||
|
deriving stock (Generic)
|
||||||
|
|
||||||
newtype BlockSizeSession e =
|
newtype BlockSizeSession e =
|
||||||
BlockSizeSession
|
BlockSizeSession
|
||||||
|
@ -143,63 +137,56 @@ main = do
|
||||||
-- ]
|
-- ]
|
||||||
|
|
||||||
|
|
||||||
emptySessions :: forall e m . MonadIO m => m (MySessions e m)
|
newtype PeerEvents e (m :: Type -> Type) =
|
||||||
emptySessions = liftIO $
|
PeerEvents
|
||||||
Sessions <$> Cache.newCache (Just defCookieTimeout)
|
{ onBlockSize :: TVar (Map (Hash HbSync) [HasBlockEvent HbSync e m])
|
||||||
<*> Cache.newCache (Just defBlockInfoTimeout)
|
}
|
||||||
<*> Cache.newCache (Just defBlockInfoTimeout)
|
|
||||||
|
|
||||||
newSession :: (Eq k, Hashable k,MonadIO m)
|
newPeerEventsIO :: forall e m . MonadIO m => IO (PeerEvents e m)
|
||||||
=> s
|
newPeerEventsIO = PeerEvents <$> newTVarIO mempty
|
||||||
-> Getting (Cache k v) s (Cache k v)
|
|
||||||
-> k
|
addBlockSizeEventNotify :: forall e m . (MonadIO m)
|
||||||
-> v
|
=> PeerEvents e m
|
||||||
|
-> Hash HbSync
|
||||||
|
-> HasBlockEvent HbSync e m
|
||||||
-> m ()
|
-> m ()
|
||||||
|
|
||||||
newSession se l k v = do
|
addBlockSizeEventNotify pe h e = do
|
||||||
let cache = view l se
|
void $ liftIO $ atomically $ modifyTVar' (onBlockSize pe) (Map.insertWith (<>) h [e])
|
||||||
liftIO $ Cache.insert cache k v
|
|
||||||
|
|
||||||
getSession' se l k fn = do
|
emitBlockSizeEvent :: MonadIO m
|
||||||
let cache = view l se
|
=> PeerEvents e m
|
||||||
liftIO $ Cache.lookup cache k <&> fmap fn
|
-> Hash HbSync
|
||||||
|
-> (Peer e, Hash HbSync, Maybe Integer)
|
||||||
|
-> m ()
|
||||||
|
|
||||||
getSession se l k = getSession' se l k id
|
emitBlockSizeEvent pe h event = do
|
||||||
|
ev <- liftIO $ atomically $ stateTVar (onBlockSize pe) alter
|
||||||
|
for_ ev $ \e -> e event
|
||||||
|
|
||||||
updSession se def l k fn = liftIO do
|
where
|
||||||
let cache = view l se
|
alter m =
|
||||||
v <- Cache.fetchWithCache cache k (const $ pure def)
|
let ev = Map.lookup h m
|
||||||
Cache.insert cache k (fn v)
|
in (mconcat (maybeToList ev), Map.delete h m)
|
||||||
|
|
||||||
delSession se l k = liftIO do
|
|
||||||
Cache.delete (view l se) k
|
|
||||||
|
|
||||||
expireSession se l = liftIO do
|
|
||||||
Cache.purgeExpired (view l se)
|
|
||||||
|
|
||||||
-- A questionable FIX to avoid "orphans" complains
|
|
||||||
data Adapted e = Adapted
|
|
||||||
|
|
||||||
|
|
||||||
-- newtype FullPeerM m a = RealPeerM { fromRealPeerM :: ReaderT }
|
runFakePeer :: forall e b m . ( e ~ Fake
|
||||||
|
, MonadIO m
|
||||||
runFakePeer :: forall e b . ( e ~ Fake
|
|
||||||
-- , m ~ IO
|
|
||||||
, Messaging b e ByteString
|
, Messaging b e ByteString
|
||||||
-- , MonadIO m
|
-- , MonadIO m
|
||||||
-- , Response e p m
|
-- , Response e p m
|
||||||
-- , EngineM e m
|
-- , EngineM e m
|
||||||
)
|
)
|
||||||
=> Peer e
|
=> PeerEvents e m
|
||||||
|
-> Peer e
|
||||||
-> b
|
-> b
|
||||||
-> EngineM e IO ()
|
-> EngineM e m ()
|
||||||
-> IO ()
|
-> IO ()
|
||||||
runFakePeer p0 bus work = do
|
|
||||||
|
runFakePeer ev p0 bus work = do
|
||||||
|
|
||||||
env <- newEnv p0 bus
|
env <- newEnv p0 bus
|
||||||
|
|
||||||
se <- emptySessions @e
|
|
||||||
|
|
||||||
let pid = fromIntegral (hash (env ^. self)) :: Word8
|
let pid = fromIntegral (hash (env ^. self)) :: Word8
|
||||||
|
|
||||||
dir <- liftIO $ canonicalizePath ( ".peers" </> show pid)
|
dir <- liftIO $ canonicalizePath ( ".peers" </> show pid)
|
||||||
|
@ -233,25 +220,14 @@ runFakePeer p0 bus work = do
|
||||||
maybe1 sz' (pure ()) $ \sz -> do
|
maybe1 sz' (pure ()) $ \sz -> do
|
||||||
let bsz = fromIntegral sz
|
let bsz = fromIntegral sz
|
||||||
|
|
||||||
z <- fetch @e False def (BlockSizeKey h) id
|
|
||||||
liftIO $ print ("QQQQQ", pretty p0, pretty p, z)
|
|
||||||
|
|
||||||
update @e def (BlockSizeKey h) (over bsBlockSizes (Map.insert p bsz))
|
update @e def (BlockSizeKey h) (over bsBlockSizes (Map.insert p bsz))
|
||||||
|
emitBlockSizeEvent ev h (p, h, Just sz)
|
||||||
-- here we cache block size information
|
|
||||||
updSession se mempty sBlockSizes h (Map.insert p bsz)
|
|
||||||
updSession se bsz sBlockSize h (const bsz)
|
|
||||||
|
|
||||||
debug $ pretty p <+> "has block" <+> pretty h <+> pretty sz'
|
|
||||||
|
|
||||||
z <- fetch @e False def (BlockSizeKey h) id
|
|
||||||
liftIO $ print ("BEBEBE", pretty p0, pretty p, z)
|
|
||||||
|
|
||||||
let adapter =
|
let adapter =
|
||||||
BlockChunksI
|
BlockChunksI
|
||||||
{ blkSize = hasBlock storage
|
{ blkSize = hasBlock storage
|
||||||
, blkChunk = getChunk storage
|
, blkChunk = getChunk storage
|
||||||
, blkGetHash = \c -> getSession' se sBlockDownload c (view sBlockHash)
|
, blkGetHash = error "FUCK" -- FIXME! \c -> getSession' se sBlockDownload c (view sBlockHash)
|
||||||
|
|
||||||
-- КАК ТОЛЬКО ПРИНЯЛИ ВСЕ ЧАНКИ (ПРИШЁЛ ПОСЛЕДНИЙ ЧАНК):
|
-- КАК ТОЛЬКО ПРИНЯЛИ ВСЕ ЧАНКИ (ПРИШЁЛ ПОСЛЕДНИЙ ЧАНК):
|
||||||
-- СЧИТАЕМ ХЭШ ТОГО, ЧТО ПОЛУЧИЛОСЬ
|
-- СЧИТАЕМ ХЭШ ТОГО, ЧТО ПОЛУЧИЛОСЬ
|
||||||
|
@ -261,55 +237,58 @@ runFakePeer p0 bus work = do
|
||||||
-- УДАЛЯЕМ КУКУ?
|
-- УДАЛЯЕМ КУКУ?
|
||||||
, blkAcceptChunk = \(c,p,h,n,bs) -> void $ runMaybeT $ do
|
, blkAcceptChunk = \(c,p,h,n,bs) -> void $ runMaybeT $ do
|
||||||
|
|
||||||
let cKey = (p,c)
|
let cKey = DownloadSessionKey (p,c)
|
||||||
|
|
||||||
-- check if there is a session
|
-- check if there is a session
|
||||||
void $ MaybeT $ getSession' se sBlockDownload cKey id
|
-- FIXME
|
||||||
|
-- void $ MaybeT $ getSession' se sBlockDownload cKey id
|
||||||
|
|
||||||
let def = newBlockDownload h dontHandle
|
let de = newBlockDownload h
|
||||||
|
|
||||||
let bslen = fromIntegral $ B8.length bs
|
let bslen = fromIntegral $ B8.length bs
|
||||||
-- TODO: log this situation
|
-- TODO: log this situation
|
||||||
mbSize <- MaybeT $ getSession' se sBlockSizes h (Map.lookup p) <&> fromMaybe Nothing
|
-- FIXME
|
||||||
mbChSize <- MaybeT $ getSession' se sBlockDownload cKey (view sBlockChunkSize)
|
-- mbSize <- MaybeT $ getSession' se sBlockSizes h (Map.lookup p) <&> fromMaybe Nothing
|
||||||
|
-- mbChSize <- MaybeT $ getSession' se sBlockDownload cKey (view sBlockChunkSize)
|
||||||
|
|
||||||
let offset = fromIntegral n * fromIntegral mbChSize :: Offset
|
-- let offset = fromIntegral n * fromIntegral mbChSize :: Offset
|
||||||
|
|
||||||
updSession se def sBlockDownload cKey (over sBlockOffset (max offset))
|
-- updSession se de sBlockDownload cKey (over sBlockOffset (max offset))
|
||||||
|
|
||||||
liftIO $ do
|
-- liftIO $ do
|
||||||
writeChunk cww cKey h offset bs
|
-- writeChunk cww cKey h offset bs
|
||||||
updSession se def sBlockDownload cKey (over sBlockWritten (+bslen))
|
-- updSession se de sBlockDownload cKey (over sBlockWritten (+bslen))
|
||||||
|
|
||||||
dwnld <- MaybeT $ getSession' se sBlockDownload cKey id
|
-- dwnld <- MaybeT $ getSession' se sBlockDownload cKey id
|
||||||
|
|
||||||
let maxOff = view sBlockOffset dwnld
|
-- let maxOff = view sBlockOffset dwnld
|
||||||
let written = view sBlockWritten dwnld
|
-- let written = view sBlockWritten dwnld
|
||||||
let notify = view sOnBlockReady dwnld
|
-- let notify = view sOnBlockReady dwnld
|
||||||
|
|
||||||
let mbDone = (maxOff + fromIntegral mbChSize) > fromIntegral mbSize
|
-- let mbDone = (maxOff + fromIntegral mbChSize) > fromIntegral mbSize
|
||||||
&& written >= mbSize
|
-- && written >= mbSize
|
||||||
|
|
||||||
when mbDone $ lift do
|
-- when mbDone $ lift do
|
||||||
deferred (Proxy @(BlockChunks e)) $ do
|
-- deferred (Proxy @(BlockChunks e)) $ do
|
||||||
h1 <- liftIO $ getHash cww cKey h
|
-- h1 <- liftIO $ getHash cww cKey h
|
||||||
|
|
||||||
-- ПОСЧИТАТЬ ХЭШ
|
-- -- ПОСЧИТАТЬ ХЭШ
|
||||||
-- ЕСЛИ СОШЁЛСЯ - ФИНАЛИЗИРОВАТЬ БЛОК
|
-- -- ЕСЛИ СОШЁЛСЯ - ФИНАЛИЗИРОВАТЬ БЛОК
|
||||||
-- ЕСЛИ НЕ СОШЁЛСЯ - ТО ПОДОЖДАТЬ ЕЩЕ
|
-- -- ЕСЛИ НЕ СОШЁЛСЯ - ТО ПОДОЖДАТЬ ЕЩЕ
|
||||||
when ( h1 == h ) $ do
|
-- when ( h1 == h ) $ do
|
||||||
lift $ commitBlock cww cKey h
|
-- lift $ commitBlock cww cKey h
|
||||||
lift $ notify h
|
-- lift $ notify h
|
||||||
delSession se sBlockDownload cKey
|
-- delSession se sBlockDownload cKey
|
||||||
|
|
||||||
when (written > mbSize * defBlockDownloadThreshold) $ do
|
-- when (written > mbSize * defBlockDownloadThreshold) $ do
|
||||||
debug $ "SESSION DELETED BECAUSE THAT PEER IS JERK:" <+> pretty p
|
-- debug $ "SESSION DELETED BECAUSE THAT PEER IS JERK:" <+> pretty p
|
||||||
delSession se sBlockDownload cKey
|
-- delSession se sBlockDownload cKey
|
||||||
-- ЕСЛИ ТУТ ВИСЕТЬ ДОЛГО, ТО НАС МОЖНО ДИДОСИТЬ,
|
-- ЕСЛИ ТУТ ВИСЕТЬ ДОЛГО, ТО НАС МОЖНО ДИДОСИТЬ,
|
||||||
-- ПОСЫЛАЯ НЕ ВСЕ БЛОКИ ЧАНКА ИЛИ ПОСЫЛАЯ ОТДЕЛЬНЫЕ
|
-- ПОСЫЛАЯ НЕ ВСЕ БЛОКИ ЧАНКА ИЛИ ПОСЫЛАЯ ОТДЕЛЬНЫЕ
|
||||||
-- ЧАНКИ ПО МНОГУ РАЗ. А МЫ БУДЕМ ХЭШИ СЧИТАТЬ.
|
-- ЧАНКИ ПО МНОГУ РАЗ. А МЫ БУДЕМ ХЭШИ СЧИТАТЬ.
|
||||||
-- ТАК НЕ ПОЙДЕТ
|
-- ТАК НЕ ПОЙДЕТ
|
||||||
-- ТАК ЧТО ТУТ ЖДЁМ, ДОПУСТИМ 2*mbSize и отваливаемся
|
-- ТАК ЧТО ТУТ ЖДЁМ, ДОПУСТИМ 2*mbSize и отваливаемся
|
||||||
|
pure ()
|
||||||
}
|
}
|
||||||
|
|
||||||
peer <- async $ runPeer env
|
peer <- async $ runPeer env
|
||||||
|
@ -333,45 +312,61 @@ test1 = do
|
||||||
|
|
||||||
fake <- newFakeP2P True
|
fake <- newFakeP2P True
|
||||||
|
|
||||||
void $ race (pause (10 :: Timeout 'Seconds)) $ do
|
void $ race (pause (2 :: Timeout 'Seconds)) $ do
|
||||||
|
|
||||||
let p0 = 0 :: Peer Fake
|
let p0 = 0 :: Peer Fake
|
||||||
let p1 = 1 :: Peer Fake
|
let p1 = 1 :: Peer Fake
|
||||||
|
|
||||||
p1Thread <- async $ runFakePeer p1 fake (liftIO $ forever yield)
|
ev1 <- liftIO newPeerEventsIO
|
||||||
|
ev0 <- liftIO newPeerEventsIO
|
||||||
|
|
||||||
p0Thread <- async $ runFakePeer p0 fake $ do
|
p1Thread <- async $ runFakePeer ev1 p1 fake (liftIO $ forever yield)
|
||||||
|
|
||||||
let h1 = "5KP4vM6RuEX6RA1ywthBMqZV5UJDLANC17UrF6zuWdRt"
|
let ini = [ "5KP4vM6RuEX6RA1ywthBMqZV5UJDLANC17UrF6zuWdRt"
|
||||||
let h0 = "81JeD7LNR6Q7RYfyWBxfjJn1RsWzvegkUXae6FUNgrMZ"
|
, "81JeD7LNR6Q7RYfyWBxfjJn1RsWzvegkUXae6FUNgrMZ"
|
||||||
|
]
|
||||||
|
|
||||||
-- fetch @Fake @(BlockSize Fake) True def h id
|
blkQ <- liftIO $ do
|
||||||
-- update @Fake @(BlockSize Fake) def (fromString h1) (over bsBlockSizes (Map.insert p1 111))
|
b <- newTBQueueIO defBlockDownloadQ
|
||||||
update @Fake @(BlockSize Fake) def (BlockSizeKey (fromString h0)) (over bsBlockSizes (Map.insert p0 100))
|
traverse_ (atomically . TBQ.writeTBQueue b) ini
|
||||||
|
pure b
|
||||||
|
|
||||||
-- request p1 (GetBlockSize @Fake (fromString h1))
|
p0Thread <- async $ runFakePeer ev0 p0 fake $ do
|
||||||
request p0 (GetBlockSize @Fake (fromString h0))
|
|
||||||
|
|
||||||
se1 <- fetch @Fake @(BlockSize Fake) False def (BlockSizeKey (fromString h0)) id
|
let knownPeers = [p1]
|
||||||
-- se2 <- fetch @Fake @(BlockSize Fake) False def (fromString h1) id
|
|
||||||
|
|
||||||
jopa <- asks (view sessions)
|
fix \next -> do
|
||||||
|
|
||||||
wtf <- liftIO $ Cache.lookup jopa (newSKey @(SessionKey Fake (BlockSize Fake)) (BlockSizeKey (fromString h0)))
|
-- НА САМОМ ДЕЛЕ НАМ НЕ НАДО ЖДАТЬ БЛОКИНФЫ.
|
||||||
|
-- НАМ НАДО ОТПРАВЛЯТЬ КАЧАТЬ БЛОК, КАК ТОЛЬКО
|
||||||
|
-- ПО НЕМУ ПОЯВИЛАСЬ ИНФА
|
||||||
|
|
||||||
pause ( 2 :: Timeout 'Seconds)
|
blkHash <- liftIO $ atomically $ TBQ.readTBQueue blkQ
|
||||||
|
|
||||||
liftIO $ print $ (p0, "AAAAAA", se1, fromDynamic @(SessionData Fake (BlockSize Fake)) (fromJust wtf))
|
-- TODO: надо трекать, может блок-то и найден
|
||||||
|
-- либо по всем пирам спросить
|
||||||
-- updateSession cookie (id)
|
|
||||||
-- se <- getSession cookie (lens)
|
|
||||||
-- cookie <- newSession ???
|
|
||||||
|
|
||||||
-- newCookie <- genCookie @Fake (p1, h) -- <<~~~ FIXME: generate a good session id!
|
|
||||||
-- let cKey@(_, cookie) = (p1, newCookie)
|
|
||||||
|
|
||||||
|
addBlockSizeEventNotify ev0 blkHash $ \case
|
||||||
|
(p, h, Just _) -> do
|
||||||
|
-- coo <- genCookie (p,blkHash)
|
||||||
|
-- let key = DownloadSessionKey (p, coo)
|
||||||
|
-- let new = newBlockDownload blkHash
|
||||||
|
-- update @Fake new key id
|
||||||
|
-- (over bsBlockSizes (Map.insert p bsz))
|
||||||
|
request p (GetBlockSize @Fake blkHash)
|
||||||
|
-- liftIO $ print $ "DAVAI BLOCK!" <+> pretty h
|
||||||
|
-- update
|
||||||
|
-- let q = pure ()
|
||||||
pure ()
|
pure ()
|
||||||
|
|
||||||
|
_ -> pure ()
|
||||||
|
|
||||||
|
-- КТО ПЕРВЫЙ ВСТАЛ ТОГО И ТАПКИ
|
||||||
|
for_ knownPeers $ \who ->
|
||||||
|
request who (GetBlockSize @Fake blkHash)
|
||||||
|
|
||||||
|
next
|
||||||
|
|
||||||
let peerz = p0Thread : [p1Thread]
|
let peerz = p0Thread : [p1Thread]
|
||||||
|
|
||||||
-- peerz <- mapM (async . uncurry runFakePeer) ee
|
-- peerz <- mapM (async . uncurry runFakePeer) ee
|
||||||
|
@ -427,7 +422,7 @@ test1 = do
|
||||||
|
|
||||||
-- pure ()
|
-- pure ()
|
||||||
|
|
||||||
pause ( 5 :: Timeout 'Seconds)
|
pause ( 1 :: Timeout 'Seconds)
|
||||||
|
|
||||||
mapM_ cancel peerz
|
mapM_ cancel peerz
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue