maffia commited on
Commit
92b0f4c
1 Parent(s): 958201d

Upload dht_utils.py

Browse files
Files changed (1) hide show
  1. dht_utils.py +132 -0
dht_utils.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for declaring and retrieving active model layers using a shared DHT.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from functools import partial
7
+ from typing import Dict, List, Optional, Sequence, Union
8
+
9
+ from hivemind.dht import DHT, DHTNode, DHTValue
10
+ from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
11
+ from hivemind.p2p import P2P, PeerID
12
+ from hivemind.utils import DHTExpiration, MPFuture, get_dht_time, get_logger, use_hivemind_log_handler
13
+
14
+ import src
15
+ from src.data_structures import CHAIN_DELIMITER, UID_DELIMITER, ModuleUID, RemoteModuleInfo
16
+
17
+ use_hivemind_log_handler("in_root_logger")
18
+ logger = get_logger(__file__)
19
+
20
+
21
+ def declare_active_modules(
22
+ dht: DHT,
23
+ uids: Sequence[ModuleUID],
24
+ expiration_time: DHTExpiration,
25
+ throughput: Optional[float] = None,
26
+ wait: bool = True,
27
+ ) -> Union[Dict[ModuleUID, bool], MPFuture[Dict[ModuleUID, bool]]]:
28
+ """
29
+ Declare that your node serves the specified modules; update timestamps if declared previously
30
+
31
+ :param uids: a list of module ids to declare
32
+ :param wait: if True, awaits for declaration to finish, otherwise runs in background
33
+ :param throughput: optionally specify your performance in terms of compute throughput
34
+ :param expiration_time: declated modules will be visible for this many seconds
35
+ :returns: if wait, returns store status for every key (True = store succeeded, False = store rejected)
36
+ """
37
+ if isinstance(uids, str):
38
+ uids = [uids]
39
+ if not isinstance(uids, list):
40
+ uids = list(uids)
41
+ for uid in uids:
42
+ assert isinstance(uid, ModuleUID) and UID_DELIMITER in uid and CHAIN_DELIMITER not in uid
43
+ return dht.run_coroutine(
44
+ partial(_declare_active_modules, uids=uids, expiration_time=expiration_time, throughput=throughput),
45
+ return_future=not wait,
46
+ )
47
+
48
+
49
+ async def _declare_active_modules(
50
+ dht: DHT,
51
+ node: DHTNode,
52
+ uids: List[ModuleUID],
53
+ expiration_time: DHTExpiration,
54
+ throughput: Optional[float] = None,
55
+ ) -> Dict[ModuleUID, bool]:
56
+ num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
57
+ return await node.store_many(
58
+ keys=uids,
59
+ subkeys=[dht.peer_id.to_base58()] * len(uids),
60
+ values=[throughput] * len(uids),
61
+ expiration_time=expiration_time,
62
+ num_workers=num_workers,
63
+ )
64
+
65
+
66
+ def get_remote_module(
67
+ dht: DHT,
68
+ uid_or_uids: Union[ModuleUID, List[ModuleUID]],
69
+ expiration_time: Optional[DHTExpiration] = None,
70
+ return_future: bool = False,
71
+ ) -> Union[List[Optional[src.RemoteTransformerBlock]], MPFuture[List[Optional[src.RemoteTransformerBlock]]]]:
72
+ """
73
+ :param uid_or_uids: find one or more modules with these ids from across the DHT
74
+ :param expiration_time: if specified, return modules that expire no sooner than this (based on get_dht_time)
75
+ :param return_future: if False (default), return when finished. Otherwise return MPFuture and run in background.
76
+ :returns: a list of [RemoteTransformerBlock if found else None]
77
+ """
78
+ single_uid = isinstance(uid_or_uids, ModuleUID)
79
+ uids = [uid_or_uids] if single_uid else uid_or_uids
80
+ infos = dht.run_coroutine(
81
+ partial(_get_remote_module_infos, uids=uids, expiration_time=expiration_time), return_future
82
+ )
83
+
84
+ if return_future:
85
+
86
+ async def _unpack(infos_future: MPFuture, dht: DHT):
87
+ p2p = await dht.replicate_p2p()
88
+ modules = _create_remote_modules_from_infos(await infos_future, p2p)
89
+ return modules[0] if single_uid else modules
90
+
91
+ return RemoteExpertWorker.run_coroutine(_unpack(infos, dht), return_future)
92
+ p2p = RemoteExpertWorker.run_coroutine(dht.replicate_p2p())
93
+ modules = _create_remote_modules_from_infos(infos, p2p)
94
+ return modules[0] if single_uid else modules
95
+
96
+
97
+ async def _get_remote_module_infos(
98
+ dht: DHT, node: DHTNode, uids: List[ModuleUID], expiration_time: Optional[DHTExpiration]
99
+ ) -> List[Optional[RemoteModuleInfo]]:
100
+ if expiration_time is None:
101
+ expiration_time = get_dht_time()
102
+ num_workers = len(uids) if dht.num_workers is None else min(len(uids), dht.num_workers)
103
+ found: Dict[ModuleUID, DHTValue] = await node.get_many(uids, expiration_time, num_workers=num_workers)
104
+
105
+ modules: List[Optional[RemoteModuleInfo]] = [None] * len(uids)
106
+ for i, uid in enumerate(uids):
107
+ metadata = found[uid]
108
+ if metadata is None or not isinstance(metadata.value, dict):
109
+ if metadata is not None:
110
+ logger.error(f"Incorrect metadata for {uid}: {metadata}")
111
+ continue
112
+ valid_entries = set()
113
+ for maybe_peer_id, _unused_value in metadata.value.items():
114
+ try:
115
+ valid_entries.add(PeerID.from_base58(maybe_peer_id))
116
+ except:
117
+ logger.error(f"Incorrect peer entry for {uid}: {maybe_peer_id}")
118
+ if valid_entries:
119
+ modules[i] = RemoteModuleInfo(uid, valid_entries)
120
+ return modules
121
+
122
+
123
+ def _create_remote_modules_from_infos(
124
+ infos: Sequence[Optional[RemoteModuleInfo]], p2p: P2P
125
+ ) -> List[Optional[src.RemoteTransformerBlock]]:
126
+ modules: List[Optional[src.RemoteTransformerBlock]] = []
127
+ for info in infos:
128
+ if info is not None:
129
+ modules.append(src.RemoteTransformerBlock(info, p2p))
130
+ else:
131
+ modules.append(None)
132
+ return modules