code
stringlengths 23
981k
| language
stringclasses 2
values | AST_depth
int64 -1
40
| alphanumeric_fraction
float64 0
1
| max_line_length
int64 0
632k
| avg_line_length
float64 0
15.4k
| num_lines
int64 0
3.86k
| original_docstring
stringlengths 7
42.9k
| source
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
class TimeSeriesResponse:
"""Grafana timeseries response"""
target: str
datapoints: List[List[Union[int, float]]]
def serialize(self):
return {
"target": self.target,
"datapoints": self.datapoints
} | python | 15 | 0.588235 | 45 | 24.6 | 10 | Grafana timeseries response | class |
class TableResponse:
"""Grafana table response"""
columns: List[Dict[str, str]]
rows: List[List[Union[int, float, str]]]
type: str = "table"
def serialize(self):
return {
"type": self.type,
"columns": self.columns,
"rows": self.rows
} | python | 15 | 0.530945 | 44 | 24.666667 | 12 | Grafana table response | class |
class CaffeCudaContext:
"""See doc of `caffe_cuda_context`"""
def __init__(self):
import caffe
if not caffe.check_mode_gpu():
raise ValueError(
"PyCuda cannot be used if Caffe is not in GPU mode.")
self.ctx_ = cuda.Context.attach()
def __del__(self):
self.ctx_.detach() | python | 11 | 0.555556 | 69 | 30.181818 | 11 | See doc of `caffe_cuda_context` | class |
class AbstractCredentialValidator:
"""An abstract CredentialValidator, when inherited it must validate self.user credentials
agains self.action"""
encoding_map = {
b'0': 'gsm0338',
b'1': 'ascii',
b'2': None, #OCTET_UNSPECIFIED
b'3': 'iso-8859-1',
b'4': None, # OCTET_UNSPECIFIED_COMMON
b'5': 'shift_jis',
b'6': 'iso-8859-5',
b'7': 'iso-8859-8',
b'8': 'utf_16_be',
b'9': 'shift_jis', #https://en.wikipedia.org/wiki/Short_Message_Peer-to-Peer#Unclear_support_for_Shift-JIS_encoding
b'10': 'iso2022_jp',
# jisx0212 doesnt exist in python codecs but it looks like its decodable/encodable in iso2022_jp assuming the escape patterns are included
# https://github.com/python/cpython/blob/master/Modules/cjkcodecs/_codecs_iso2022.c#L51
b'13': 'iso2022_jp',
b'14': 'ksc5601'
}
def __init__(self, action, user):
self.action = action
self.user = user
def updatePDUWithUserDefaults(self, PDU):
"""Must update PDU.params from User credential defaults whenever a
PDU.params item is None"""
raise NotImplementedError()
def validate(self):
"Must validate requests through Authorizations and ValueFilters credential check"
raise NotImplementedError() | python | 8 | 0.630208 | 146 | 36.361111 | 36 | An abstract CredentialValidator, when inherited it must validate self.user credentials
agains self.action | class |
class KukurFlightServer:
"""KukurFlightServer exposes the data sources provided by a SourceFactory over Arrow Flight."""
def __init__(self, source: Source):
self.__source = source
def search(self, _, action: fl.Action) -> Generator[bytes, None, None]:
"""Search a data source for time series.
This returns either a SeriesSelector or Metadata as JSON, depending on
what is supported by the source."""
request = json.loads(action.body.to_pybytes())
selector = SeriesSelector(request["source"], request["name"])
for result in self.__source.search(selector):
if isinstance(result, Metadata):
assert result.series.name is not None
metadata = result.to_data()
yield json.dumps(metadata).encode()
else:
assert result.name is not None
series = {
"source": result.source,
"name": result.name,
}
yield json.dumps(series).encode()
def get_metadata(self, _, action: fl.Action) -> List[bytes]:
"""Return metadata for the given time series as JSON."""
request = json.loads(action.body.to_pybytes())
selector = SeriesSelector(request["source"], request["name"])
metadata = self.__source.get_metadata(selector).to_data()
return [json.dumps(metadata).encode()]
def get_data(self, _, request) -> Any:
"""Return time series data as Arrow data."""
selector = SeriesSelector(
request["selector"]["source"], request["selector"]["name"]
)
start_date = parse_date(request["start_date"])
end_date = parse_date(request["end_date"])
data = self.__source.get_data(selector, start_date, end_date)
return fl.RecordBatchStream(data) | python | 16 | 0.592712 | 99 | 43.452381 | 42 | KukurFlightServer exposes the data sources provided by a SourceFactory over Arrow Flight. | class |
class EnvironmentDescriptor:
"""
This class should describe an instance of an environment.
The class has to be easily picklable and able to create new environments.
It also has to be able to describe itself using metadata.
"""
def create_environment(self, workdir: Path) -> Environment:
raise NotImplementedError
def name(self) -> str:
raise NotImplementedError
def parameters(self) -> Dict[str, Any]:
raise NotImplementedError
def metadata(self) -> Dict[str, Any]:
return {} | python | 8 | 0.676417 | 77 | 29.444444 | 18 |
This class should describe an instance of an environment.
The class has to be easily picklable and able to create new environments.
It also has to be able to describe itself using metadata.
| class |
class User:
"""
class that generates new instances of users
"""
users_list =[] #Empty user list
def __init__(self,first_name,last_name,password):
"""
Method to define the properties of the object
"""
self.first_name = first_name
self.last_name = last_name
self.password = password
def save_user(self):
"""
save user details method into users_list
"""
User.users_list.append(self) | python | 9 | 0.566735 | 53 | 22.238095 | 21 |
class that generates new instances of users
| class |
class BaseWorkflowTransformer:
"""
Base class for all transformers
"""
def process_workflow_after_parse_workflow_xml(self, workflow: Workflow):
pass
def process_workflow_after_convert_nodes(self, workflow: Workflow, props: PropertySet):
pass | python | 7 | 0.691756 | 91 | 27 | 10 |
Base class for all transformers
| class |
class KMeans:
'''
k = number of groups / clusters / ... (group)
tolerance = acceptable level of variation in precision (tol)
Iteration : repetition of process
'''
##you could use another tolerance stop limits as :
#error : (actual-forecast/forecast)*100
#accuracy : 1-error
#Note : centroid, center of mass and geometric center could be different.
def __init__(self, group=2, maxTolerance=0.001, iteration=300):
self.k = group
self.tol = maxTolerance
self.iteration = iteration
self.fig = plt.figure('K-Means PLOT',figsize=(9, 6))
self.ax = self.fig.add_subplot(111)#1*1 grid , no.1
self.colors = 200*["r","g","b","k","c"]
def fit(self,data):
self.centroids = {}
#start with first k data as centroids
self.centroids={i:data[i] for i in range(self.k)}
for _ in range(self.iteration):
self.classes={i:[] for i in range(self.k)}
for j in data:#j : featureset
distances = [np.linalg.norm(j-self.centroids[i]) for i in self.centroids]
self.classes[np.argmin(distances)].append(j)#min as cluster
pc = self.centroids #pc : prev_centroids
self.centroids={i:np.average(self.classes[i],axis=0) for i in self.classes}
print(self.centroids)
op=[False for c in self.centroids if np.sum(self.centroids[c]-pc[c]) > self.tol]
if op==[] : break #not op : optimum
def predict(self,data):
distances = [np.linalg.norm(data-self.centroids[i]) for i in self.centroids]
self.ax.scatter(data[0], data[1], marker="*",
color=self.colors[np.argmin(distances)], s=150, linewidths=2)
return np.argmin(distances)
def visualize(self):
for centroid in clf.centroids:
self.ax.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1],
marker="$C$", color="k", s=100, linewidths=2)
for j in clf.classes:
[plt.scatter(i[0],i[1],marker="x",color=self.colors[j],s=150,linewidth=2) for i in clf.classes[j]]
self.ax.set_title('K-Means clustering, untagged data',fontsize=14)
self.ax.set_xlabel('X1',fontsize=12)
self.ax.set_ylabel('X2',fontsize=12)
customLines = [Line2D([0], [0], color='w', marker='*',
markersize=15,markerfacecolor='k'),
Line2D([0], [0], color='w', marker='$x$',
markersize=15,markerfacecolor='k'),
Line2D([0], [0], color='w', marker='$C$',
markersize=15,markerfacecolor='k')]
self.ax.legend(customLines,['new data','data','Center'],
loc='upper center', shadow=True) | python | 17 | 0.558075 | 110 | 52.132075 | 53 |
k = number of groups / clusters / ... (group)
tolerance = acceptable level of variation in precision (tol)
Iteration : repetition of process
| class |
class CGateway:
"""
COrgan Gateway Interface
This class is used to handle the HTTP request from other part of this
solution project and perform them. The HTTP request will be decomposed
to some key-value pairs which represents the action and its parameters.
The action will be executed and the result will be presented as JSON.
It is highlighted that all request from any part of the solution ought
to be passed to here as a web service request parameter dictionary and
return a JSON response string.
"""
"""
CController Static Instance
"""
core = CController.CControllerCore
def __init__(self):
pass
"""
Helper methods
"""
@staticmethod
def _DumpEntityHandler(obj):
"""
Handle how to dump a Luminous entity into JSON serializable object.
:param obj: entity object
:return: JSON serializable object
"""
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
else:
raise TypeError('%r is not JSON serializable' % obj)
@staticmethod
def _DumpResponse(response_dict):
"""
Dump the response dictionary into structure XML string
:param response_dict: A dictionary, key is the label and value is the body
:return: JSON string
"""
dumpResp = json.dumps(response_dict, skipkeys=True, ensure_ascii=False, default=lambda t: t.__dict__)
return u"%s\n" % dumpResp
@staticmethod
def _SuccessResponse(args_dict=None):
"""
Dump the response of simple success.
:param args_dict: a dict, contains optional response arguments
:return: the success JSON string
"""
if args_dict is None:
args_dict = {}
args_dict["code"] = "OK"
return CGateway._DumpResponse(args_dict)
@staticmethod
def _FailureResponse(args_dict=None):
"""
Dump the response of simple failure.
:param args_dict: a dict, contains optional response arguments
:return: the failure JSON string
"""
if args_dict is None:
args_dict = {}
args_dict["code"] = "Fail"
return CGateway._DumpResponse(args_dict)
@staticmethod
def _ExceptionResponse(args_dict=None):
"""
Dump the response of simple exception.
:param args_dict: a dict, contains optional response arguments
:return: the failure JSON string
"""
if args_dict is None:
args_dict = {}
args_dict["code"] = "Exception"
return CGateway._DumpResponse(args_dict)
@staticmethod
def _UnauthorizedServiceResponse(session):
"""
Dump the response of unauthorized exception.
:param session: session id
:return: the unauthorized exception JSON string
"""
return CGateway._DumpResponse({"code": "Unauthorized",
"return": "Unauthorized Service Request. session: %s" % session})
@staticmethod
def _HandleExceptionAndUnauthorized(flagVal, retVal, session=None):
"""
Handle the standard validation of CController return values.
:param flagVal: flag of Exception-Raise-While-Execution-In-Engine
:param retVal: return value package
:param session: session id
:return: immediate return JSON
"""
if flagVal is False and retVal == GCC.UNAUTHORIZED:
return CGateway._UnauthorizedServiceResponse(session)
if flagVal is False:
return CGateway._ExceptionResponse()
return None
"""
Authority API
"""
@staticmethod
def Connect(**argd):
"""
Restful API for authority connection.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CController.CController.Connect(argd["username"], EncryptUtil.EncryptSHA256(argd["password"]))
if flag is False:
return CGateway._ExceptionResponse()
if ret is None:
return CGateway._FailureResponse({"return": "invalid user id or password"})
return CGateway._SuccessResponse({"session": ret})
@staticmethod
def CheckConnect(**argd):
"""
Restful API for authority token validation check.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CController.CController.CheckConnect(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse() if ret is True else CGateway._FailureResponse()
@staticmethod
def Disconnect(**argd):
"""
Restful API for authority token destroy.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CController.CController.Disconnect(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse() if ret is True else CGateway._FailureResponse()
"""
Data Retrieving API
"""
@staticmethod
def GetOrganization(**argd):
"""
Restful API for getting organization name.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.GetOrganizationName(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def GetDataVersion(**argd):
"""
Restful API for getting data version string.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.GetCurrentDataVersion(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveAllHuman(**argd):
"""
Restful API for getting all human.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAllHuman(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
hmBuilder = []
for hm in ret:
hmBuilder.append(hm.ToJsonDict())
return CGateway._SuccessResponse({'return': hmBuilder})
@staticmethod
def RetrieveAllAgent(**argd):
"""
Restful API for getting all agent.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAllAgent(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
hmBuilder = []
for hm in ret:
hmBuilder.append(hm.ToJsonDict())
return CGateway._SuccessResponse({'return': hmBuilder})
@staticmethod
def RetrieveAllGroups(**argd):
"""
Restful API for getting all group.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAllGroup(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
hmBuilder = []
for hm in ret:
hmBuilder.append(hm.ToJsonDict())
return CGateway._SuccessResponse({'return': hmBuilder})
@staticmethod
def RetrieveAllPositions(**argd):
"""
Restful API for getting all position.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAllPosition(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
hmBuilder = []
for hm in ret:
hmBuilder.append(hm.ToJsonDict())
return CGateway._SuccessResponse({'return': hmBuilder})
@staticmethod
def RetrieveAllCapabilities(**argd):
"""
Restful API for getting all capability.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAllCapabilities(argd["session"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
hmBuilder = []
for hm in ret:
hmBuilder.append(hm.ToJsonDict())
return CGateway._SuccessResponse({'return': hmBuilder})
@staticmethod
def RetrieveHumanInWhatGroup(**argd):
"""
Restful API for getting a set of groups that a specific human in.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveHumanInWhatGroup(argd["session"], argd["personId"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveHumanInWhatPosition(**argd):
"""
Restful API for getting a set of positions that a specific human at.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveHumanInWhatPosition(argd["session"], argd["personId"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveHumanWithWhatCapability(**argd):
"""
Restful API for getting a set of capabilities that a specific human with.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveHumanWithWhatCapability(argd["session"], argd["personId"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveAgentInWhatGroup(**argd):
"""
Restful API for getting a set of groups that a specific agent in.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAgentInWhatGroup(argd["session"], argd["personId"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveAgentInWhatPosition(**argd):
"""
Restful API for getting a set of positions that a specific agent at.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAgentInWhatPosition(argd["session"], argd["personId"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveAgentWithWhatCapability(**argd):
"""
Restful API for getting a set of capabilities that a specific agent with.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAgentWithWhatCapability(argd["session"], argd["personId"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveHumanInGroup(**argd):
"""
Restful API for getting a set of human that a specific group contains.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveHumanInGroup(argd["session"], argd["name"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveAgentInGroup(**argd):
"""
Restful API for getting a set of agent that a specific group contains.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAgentInGroup(argd["session"], argd["name"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveHumanInPosition(**argd):
"""
Restful API for getting a set of humans that a specific position contains.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveHumanInPosition(argd["session"], argd["name"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveAgentInPosition(**argd):
"""
Restful API for getting a set of agents that a specific position contains.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAgentInPosition(argd["session"], argd["name"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveHumanWithCapability(**argd):
"""
Restful API for getting a set of humans that a specific capability category contains.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveHumanWithCapability(argd["session"], argd["name"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveAgentWithCapability(**argd):
"""
Restful API for getting a set of agents that a specific capability category contains.
:param argd: request argument dictionary
:return: dumped json string
"""
flag, ret = CGateway.core.RetrieveAgentWithCapability(argd["session"], argd["name"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, argd["session"])
if xFlag is not None:
return xFlag
return CGateway._SuccessResponse({'return': ret})
@staticmethod
def RetrieveWorkerInGroup(**argd):
"""
Restful API for getting a set of workers that a specific group contains, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag1, ret1 = CGateway.core.RetrieveHumanInGroup(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["groupName"])
flag2, ret2 = CGateway.core.RetrieveAgentInGroup(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["groupName"])
return CGateway._DumpResponse(ret1 + ret2)
@staticmethod
def RetrieveWorkerInPosition(**argd):
"""
Restful API for getting a set of workers that a specific position contains, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag1, ret1 = CGateway.core.RetrieveHumanInPosition(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["positionName"])
flag2, ret2 = CGateway.core.RetrieveAgentInPosition(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["positionName"])
return CGateway._DumpResponse(ret1 + ret2)
@staticmethod
def RetrieveWorkerInCapability(**argd):
"""
Restful API for getting a set of workers that a specific capability contains, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag1, ret1 = CGateway.core.RetrieveHumanWithCapability(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["capabilityName"])
flag2, ret2 = CGateway.core.RetrieveAgentWithCapability(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["capabilityName"])
return CGateway._DumpResponse(ret1 + ret2)
@staticmethod
def RetrieveWorkerByOrganizable(**argd):
"""
Restful API for getting workers in a organizable in the COrgan, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag, ret = CGateway.core.RetrieveWorkerByOrganizableGid(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["gid"])
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, GlobalConfigContext.AUTH_INTERNAL_SESSION)
if xFlag is not None:
return xFlag
return CGateway._DumpResponse(ret)
@staticmethod
def RetrieveAllEntity(**argd):
"""
Restful API for getting all entities in the COrgan, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag1, ret1 = CGateway.core.RetrieveAllHuman(GlobalConfigContext.AUTH_INTERNAL_SESSION)
flag2, ret2 = CGateway.core.RetrieveAllAgent(GlobalConfigContext.AUTH_INTERNAL_SESSION)
flag3, ret3 = CGateway.core.RetrieveAllGroup(GlobalConfigContext.AUTH_INTERNAL_SESSION)
flag4, ret4 = CGateway.core.RetrieveAllPosition(GlobalConfigContext.AUTH_INTERNAL_SESSION)
flag5, ret5 = CGateway.core.RetrieveAllCapabilities(GlobalConfigContext.AUTH_INTERNAL_SESSION)
retDict = dict()
retDict["human"] = ret1
retDict["agent"] = ret2
retDict["group"] = ret3
retDict["position"] = ret4
retDict["capability"] = ret5
return CGateway._DumpResponse(retDict)
@staticmethod
def RetrieveWorkerEntityByGid(**argd):
"""
Restful API for getting a list of worker entity, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag, ret = CGateway.core.RetrieveWorkersEntity(GlobalConfigContext.AUTH_INTERNAL_SESSION, argd["gids"])
return CGateway._DumpResponse(ret)
@staticmethod
def RetrieveAllConnection(**argd):
"""
Restful API for getting all connections in the COrgan, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag, ret = CGateway.core.RetrieveAllConnection(GlobalConfigContext.AUTH_INTERNAL_SESSION)
xFlag = CGateway._HandleExceptionAndUnauthorized(flag, ret, GlobalConfigContext.AUTH_INTERNAL_SESSION)
if xFlag is not None:
return xFlag
return CGateway._DumpResponse(ret)
@staticmethod
def RetrieveDataVersionGid(**argd):
"""
Restful API for getting data version of COrgan, ONLY USE BY NAME SERVICE.
:param argd: request argument dictionary
:return: dumped json string
"""
checkSign = argd["nsid"] + "," + argd["renid"]
token = EncryptUtil.DecodeURLSafeBase64(argd["token"])
try:
tokenRet = EncryptUtil.VerifySign(checkSign, token, GlobalConfigContext.AUTH_NameService_PublicKey)
except:
tokenRet = False
if tokenRet is False:
return CGateway._UnauthorizedServiceResponse(token)
flag1, ret1 = CGateway.core.GetCurrentDataVersion(GlobalConfigContext.AUTH_INTERNAL_SESSION)
flag2, ret2 = CGateway.core.GetOrganizationId(GlobalConfigContext.AUTH_INTERNAL_SESSION)
xFlag = CGateway._HandleExceptionAndUnauthorized(flag1 & flag2, ret1, GlobalConfigContext.AUTH_INTERNAL_SESSION)
if xFlag is not None:
return xFlag
return CGateway._DumpResponse("%s,%s" % (ret1, ret2)) | python | 14 | 0.642466 | 130 | 39.848537 | 581 |
COrgan Gateway Interface
This class is used to handle the HTTP request from other part of this
solution project and perform them. The HTTP request will be decomposed
to some key-value pairs which represents the action and its parameters.
The action will be executed and the result will be presented as JSON.
It is highlighted that all request from any part of the solution ought
to be passed to here as a web service request parameter dictionary and
return a JSON response string.
| class |
class SimControl:
"""
TODO:
Class cleanup.
3D.
"""
"""
ANALYSIS:
There are different layer types:
Physical, CAD
Physical, BOM
Physical, Simulation
Simulation, Flag
Simulation, Integer
Simulation, Float
The simulation layers are in Mesh2D.py and are data-driven constructors working from SimControl json configuration.
The physical CAD layers are described in matls.js, layers.js, and vias.js.
This description data is loaded in Matls.py, Layers.py, and Vias.py.
To see them all at once and verify correctness, and HTML table is generated for each one, and is displayed by SimControl.py.
"""
"""
TODO:
Need clean structure of the JSON with:
Simulation controls: HTTP server, settings
Input data: geometry, materials, layers, components, power inputs, boundary conditions
Intermediate data: mesh, solver, matrix in, matrix out
Output raw data: HDF5, Spice, and PNG specifications, basically
Visualization: What to create, where to put it
Need a higher level for reports, which can have a collection of simulations.
These can show design tradeoffs such as what-if thicker copper or larger vias.
They can also show benchmarks from a test set of simulations.
"""
"""
TODO:
There needs to be a top level directory for program outputs.
Directories, such as for PNG files, go beneath this level.
Directories that do not exist need to be created.
These names should all go in the JSON config file.
roq/therm/onous/layers/layer1.png
The main program should launch the controller in a location.
The controller discovers that it is in an existing design and loads it,
or it discovers that it is in a new location and initializes it.
"""
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('cfg', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
args = parser.parse_args()
print "Config file is: " + str(args.cfg)
self.configJSON= args.cfg.read()
self.config= yaml.load(self.configJSON)
def loadModel(self):
self.matls = Matls.Matls(self.config['matl_config'])
# BOM Layers: Manufacturer's BOM, for example with three layers that stack to make a single core.
# CAD Layers: Data from CAD source, could also include drills, for example.
# Physical Layers: PhyLayers: 2D multilayer bitmap representation of the design. This is a solid model.
# Thermal Simulation Layers: Thermapythia layers: Values used to load the matrix, flags, etc.
self.lyr = Layers.Layers(self.config['layer_config'])
self.via = Vias.Vias(self.config['via_config'])
self.createWebPage(self.config['webPageFileName'])
# TODO: Consider refactoring to split mesh into geometry and mesh
self.mesh = Mesh2D.Mesh(self.config['mesh_config'], self.lyr, self.matls)
return
def createWebPage(self, webPageFileName):
# np.set_printoptions(threshold='nan', linewidth=10000)
f= open(webPageFileName, 'w')
self.webpage()
f.write(self.html)
f.close()
def webpage(self):
h = Html.Html()
head = h.title("Stackup")
body = h.h1("Materials")
body += self.matls.genHTMLMatlTable(h)
body += h.h1("Layers")
body += self.lyr.genHTMLLayersTable(self.matls, h)
body += h.h1("Vias")
body += self.via.genHTMLViaTable(self.matls, self.lyr, h)
self.html= h.html(h.head(head) + h.body(body))
def solveModel(self):
self.solv = Solver2D.Solver2D(self.config['solver'], self.mesh.nodeCount)
self.solv.solve(self.lyr, self.mesh, self.matls)
return
def loadView(self):
self.plots= InteractivePlot.InteractivePlot(self.config, self.solv, self.lyr, self.mesh)
self.plots.plotAll()
return
def loadController(self):
self.http = Http.Http(self.config)
self.http.openWebBrowser(1)
return
def launchController(self):
# This does not return until the web server is stopped.
self.http.startServer()
return | python | 12 | 0.69366 | 128 | 36.398148 | 108 |
TODO:
Class cleanup.
3D.
| class |
class defaultdict:
"""
Default Dict Implementation.
Defaultdcit that returns the key if the key is not found in dictionnary (see
unswap in karma-lib):
>>> d = defaultdict(default=lambda key: key)
>>> d['foo'] = 'bar'
>>> d['foo']
'bar'
>>> d['baz']
'baz'
DefaultDict that returns an empty string if the key is not found (see
prefix in karma-lib for typical usage):
>>> d = defaultdict(default=lambda key: '')
>>> d['foo'] = 'bar'
>>> d['foo']
'bar'
>>> d['baz']
''
Representation of a default dict:
>>> defaultdict([('foo', 'bar')])
defaultdict(None, {'foo': 'bar'})
"""
@staticmethod
# pylint: disable=W0613
def __new__(cls, default_factory=None, **kwargs):
self = super(defaultdict, cls).__new__(cls)
# pylint: disable=C0103
self.d = {}
return self
def __init__(self, default_factory=None, **kwargs):
self.d = kwargs
self.default_factory = default_factory
def __getitem__(self, key):
try:
return self.d[key]
except KeyError:
val = self.__missing__(key)
self.d[key] = val
return val
def __setitem__(self, key, val):
self.d[key] = val
def __delitem__(self, key):
del self.d[key]
def __contains__(self, key):
return key in self.d
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
return self.default_factory() | python | 12 | 0.543322 | 80 | 25.482759 | 58 |
Default Dict Implementation.
Defaultdcit that returns the key if the key is not found in dictionnary (see
unswap in karma-lib):
>>> d = defaultdict(default=lambda key: key)
>>> d['foo'] = 'bar'
>>> d['foo']
'bar'
>>> d['baz']
'baz'
DefaultDict that returns an empty string if the key is not found (see
prefix in karma-lib for typical usage):
>>> d = defaultdict(default=lambda key: '')
>>> d['foo'] = 'bar'
>>> d['foo']
'bar'
>>> d['baz']
''
Representation of a default dict:
>>> defaultdict([('foo', 'bar')])
defaultdict(None, {'foo': 'bar'})
| class |
class ListSplitter:
"""
Collect list of objects and split it on train and valid (if possible)
Returns list of objects for train and valid
"""
def __init__(self, object_list, valid_size=0.5, is_sort=True, seed=None):
self.object_list = object_list
self.valid_size = valid_size
self.is_sort = is_sort
self.seed = seed
self._n = len(self.object_list)
self._train_files = None
self._valid_files = None
self.split()
self.sort()
@property
def train(self):
return self._train_files
@property
def valid(self):
return self._valid_files
def size_select(self):
if self.valid_size == 0.0:
return self._n, 0
_valid = self._n * self.valid_size
if _valid < 1.0:
_valid = 1
else:
_valid = int(round(_valid))
_train = self._n - _valid
if _train == 0:
raise AttributeError(', '.join([
'Incorrect train size',
f'N: {self._n}',
f'train: {_train}',
f'valid: {_valid}',
f'rate: {self.valid_size:.3f}',
]))
return _train, _valid
def split(self):
train_size, valid_size = self.size_select()
if valid_size == 0:
self._train_files = self.object_list
self._valid_files = None
else:
valid_ix = np.arange(self._n)
rs = np.random.RandomState(self.seed)
valid_ix = set(rs.choice(valid_ix, size=valid_size, replace=False).tolist())
self._train_files = [rec for i, rec in enumerate(self.object_list) if i not in valid_ix]
self._valid_files = [rec for i, rec in enumerate(self.object_list) if i in valid_ix]
def sort(self):
if not self.is_sort:
return
if self._train_files is not None:
self._train_files = sorted(self._train_files)
if self._valid_files is not None:
self._valid_files = sorted(self._valid_files) | python | 17 | 0.530651 | 100 | 29.720588 | 68 |
Collect list of objects and split it on train and valid (if possible)
Returns list of objects for train and valid
| class |
class CreateReport:
"Creates a static .html file with charts using Google Charts API"
def __init__(self,title):
self.title = title
self.html_string = ""
self.partitionsFilled = 0.0
self.chartCount=0
self.partition=1.0
def setupNewChart(self,width):
#insert data input validation in list
if (width=="full"): self.partition = 1.0
if (width=="half"): self.partition = 2.0
if (width=="third"): self.partition = 3.0
if (width=="2-thirds"): self.partition = 3.0/2.0
if (width=="quarter"): self.partition = 4.0
if (round(self.partitionsFilled + 1.0/self.partition,2) > 1):
self.html_string += "</div>\n"
self.partitionsFilled = 0
if (self.partitionsFilled==0):
self.html_string += "<div class='flex-container'>\n"
self.partitionsFilled += 1.0/self.partition
self.chartCount+=1
def text(self,html_text,width="full"):
self.setupNewChart(width);
self.html_string += "<div style='width:" + "{:.2%}".format(1.0/self.partition) + ";'><p>" + html_text + "</p></div>\n"
def plot(self,data,x,y,type="line",width="full",height=400,options={}):
self.setupNewChart(width);
self.html_string += '''
<div id="chart_div''' + str(self.chartCount) + '''" style="width:''' + "{:.2%}".format(1.0/self.partition) + ''';height:''' + str(height) + '''px;"></div>
<script type="text/javascript">
google.charts.load('current', {packages: ['corechart']});
google.charts.setOnLoadCallback(drawChart''' + str(self.chartCount) + ''');
function drawChart''' + str(self.chartCount) + '''() {
var data = new google.visualization.DataTable();\n''';
if (is_datetime64_any_dtype(data[x])): self.html_string += "data.addColumn('date', '" + x + "');\n";
elif (is_numeric_dtype(data[x])): self.html_string += "data.addColumn('number', '" + x + "');\n";
else: self.html_string += "data.addColumn('string', '" + x + "');\n";
for a in y:
if (is_numeric_dtype(data[a])): self.html_string += "data.addColumn('number', '" + a + "');\n";
else: self.html_string += "data.addColumn('string', '" + a + "');\n";
self.html_string += "data.addRows([\n";
for index, row in data.iterrows():
if (is_datetime64_any_dtype(data[x])): self.html_string+="[new Date("+str(row[x].year)+","+str(row[x].month-1)+","+str(row[x].day)+"),";
else: self.html_string += "[" + str(row[x]) + ",";
for a in y:
if (isna(row[a])): self.html_string += "null,";
else: self.html_string += str(row[a]) + ",";
self.html_string += "],\n"
self.html_string += "]);\n";
self.html_string += " var options = " + str(options) + ";\n"
types={'line':'LineChart','column':'ColumnChart'}
self.html_string += "var chart = new google.visualization.{}".format(types[type]);
self.html_string += "(document.getElementById('chart_div" + str(self.chartCount) + ''''));
chart.draw(data, options);}
</script>
'''
def exporthtml(self,full_path):
f = open(full_path, "w")
html_string_full = '''
<!DOCTYPE html>
<!-- This .html file was created using Python Library plot2html -->
<head>
<meta name="viewport" content="width=device-width, initial-scale=1">
<style>
body {
margin: 0;
padding-top: 80px; /* is overwriten by javascript */
font-family: Arial, Helvetica, sans-serif;
color: #444444;
}
.fixed-header {
width: 100%;
margin: 0 ;
padding: 0;
display: flex;
justify-content: space-between;
position: fixed;
background: #DDDDDD;
border-bottom: 1px solid #666;
box-shadow: 0px 1px 4px 1px #999;
top: 0;
font-weight: bold;
font-size: 24px;
}
.fixed-header > div {
text-align: center;
padding: 20px 16px;
}
.content {
padding: 10px 16px;
}
.flex-container {
display: flex;
flex-wrap: nowrap;
}
</style>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
</head>
<body>
<div class="fixed-header" id="myHeader">
<div></div>
<div>''' + self.title + '''</div>
<div>''' + datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + ''' UTC</div>
</div>
<script type="text/javascript">
function adjustHeaderHeight(){
document.body.style.paddingTop = document.getElementById('myHeader').clientHeight + "px";
}
window.addEventListener("resize", adjustHeaderHeight);
adjustHeaderHeight();
</script>
<div class="content">
''' + self.html_string
if (self.partitionsFilled > 0):
html_string_full += "</div>\n"
html_string_full += "</div></body></html>"
f.write(html_string_full)
f.close()
print("File exported to "+full_path); | python | 21 | 0.581415 | 154 | 37.269841 | 126 | Creates a static .html file with charts using Google Charts API | class |
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [
p.clone().detach().cuda() for p in parameters
]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = list(parameters)
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
for s_param, param in zip(self.shadow_params, parameters):
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone()
for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data) | python | 15 | 0.594851 | 80 | 38.905405 | 74 |
Maintains (exponential) moving average of a set of parameters.
| class |
class Loc:
"""
A single location in a Layout. Attributes are a coordinate (2-d tuple)
and a dict of direction names, each of which will be mapped to a
destination (neighbor) Loc.
"""
def __init__(self, coord=None):
self.direction_map = {}
self.coord = (None, None) if coord is None else coord
def set_dir(self, direction, dest=None):
""" Set the neighboring Loc that corresponds to the specified direction. """
self.direction_map[direction] = dest
def get_dest(self, direction):
""" Get the neighboring Loc that corresponds to the specified direction. """
return self.direction_map.get(direction, None)
def get_neighbors(self):
return [self.direction_map[direction]
for direction, _ in self.direction_map.items()
if self.direction_map.get(direction, None)]
def get_neighbor_dirs(self):
return [direction for direction in self.direction_map.keys()
if self.direction_map.get(direction, None)]
def __str__(self):
return f'Loc {self.coord}'
def __repr__(self):
return f'Loc {self.coord} ({len(self.get_neighbors())} neighbors)' | python | 12 | 0.623558 | 84 | 36.96875 | 32 |
A single location in a Layout. Attributes are a coordinate (2-d tuple)
and a dict of direction names, each of which will be mapped to a
destination (neighbor) Loc.
| class |
class Layout:
"""
Creates a dict of Locs corresponding to the coordinates of this Layout.
Initializes each Loc with its neighboring Locs corresponding to each of
the directions in the layout. Coords can either be an iterable of (x,y)
tuples, or a string such as '2x2' specifying the number of rows and
columns for a rectangular grid. Defaults to a 3x3 grid of square locs
(with neighbors in n, s, e, w directions).
"""
def __init__(self, coords='3x3', direction_map=None):
self.loc_dict = {}
if 'x' in coords:
numrows, _, numcols = coords.partition('x')
self.coords = [(i, j) for j in range(int(numrows)) \
for i in range(int(numcols))]
else:
self.coords = coords
for coord in self.coords:
self.loc_dict[coord] = Loc(coord)
if direction_map is None:
self.direction_map = {
'n':(0, -1), 's':(0, 1), 'e':(1, 0), 'w':(-1, 0)
}
else:
self.direction_map = direction_map
# construct direction pairs from the direction map
reverse_direction_map = {v: k for k, v in self.direction_map.items()}
self.direction_pairs = {}
for direction, dirvec in self.direction_map.items():
reverse_dirvec = tuple(-1 * d for d in dirvec)
self.direction_pairs[direction] = reverse_direction_map[reverse_dirvec]
self.inner_edges = 0
for loc in self.locs:
for direction, dirvec in self.direction_map.items():
try_coord = (loc.coord[0] + dirvec[0],
loc.coord[1] + dirvec[1])
dest = (self.loc_dict[try_coord] if try_coord in self.coords
else None)
loc.set_dir(direction, dest)
self.inner_edges += 1 if dest else 0
@property
def locs(self):
return self.loc_dict.values()
def get_paired_dir(self, dir):
return self.direction_pairs.get(dir, None)
def __repr__(self):
return (f'Layout ({len(self.coords)} coords, '
f'{len(self.direction_map)} directions)') | python | 15 | 0.556106 | 83 | 39.363636 | 55 |
Creates a dict of Locs corresponding to the coordinates of this Layout.
Initializes each Loc with its neighboring Locs corresponding to each of
the directions in the layout. Coords can either be an iterable of (x,y)
tuples, or a string such as '2x2' specifying the number of rows and
columns for a rectangular grid. Defaults to a 3x3 grid of square locs
(with neighbors in n, s, e, w directions).
| class |
class Symbol:
"""Class to represent attributes of each symbol contained on a Tile."""
_rare_dict = {}
def __init__(self, sym_type, side, rare=False):
self.sym_type = sym_type
self.side = side
self._sym = (sym_type, side)
self.rare = rare
@property
def rare(self):
return Symbol._rare_dict.get(self._sym, None)
@rare.setter
def rare(self, rare):
Symbol._rare_dict[self._sym] = rare
def __eq__(self, other):
return self._sym == other._sym
def __hash__(self):
return hash(self._sym)
def __repr__(self):
return f'({self.sym_type}|{self.side})' | python | 10 | 0.5586 | 75 | 24.307692 | 26 | Class to represent attributes of each symbol contained on a Tile. | class |
class Tile:
"""
Contains assignments of symbols to "canonical" directions (rotation 0).
Helper queries return symbol / dir assignments for other possible rotations.
directions is a list that must be in sequential order of rotations.
symbols is a list that must be in order corresponding to directions.
"""
id_gen = itertools.count()
def __init__(self, tile_id=None, directions=None, symbols=None):
self.tile_id = next(self.id_gen) if tile_id is None else tile_id
self.directions = (['n', 'e', 's', 'w'] if directions is None
else directions)
self.symbols = ([None for d in self.directions] if symbols is None
else symbols)
def get_dir(self, symbol, rotation=0):
"""Get the direction corresponding to the given symbol and rotation."""
idx = self.symbols.index(symbol)
return self.directions[(idx + rotation) % len(self.symbols)]
def get_symbol(self, direction, rotation=0):
"""Get the symbol corresponding to the given direction and rotation."""
idx = self.directions.index(direction)
return self.symbols[(idx - rotation) % len(self.directions)]
def get_rotations(self, symbol, dir):
"""
Returns a list of rotations (since a Tile can contain more
than one instance of a symbol) that would be required to place
each matching symbol into the specified direction.
"""
d = self.directions.index(dir)
return [(d - i) % len(self.directions)
for i, s in enumerate(self.symbols)
if s == symbol]
def set_symbol(self, symbol, direction=None):
if direction:
idx = self.directions.index(direction)
self.symbols[idx] = symbol
else:
self.symbols.append(symbol)
def __lt__(self, other):
return self.tile_id < other.tile_id
def __str__(self):
return f'Tile {self.tile_id}'
def __repr__(self):
symlist = ", ".join([f'{direction}: {self.symbols[i]}'
for i, direction in enumerate(self.directions)])
return f'Tile {self.tile_id} ({symlist})' | python | 14 | 0.605418 | 80 | 40.037037 | 54 |
Contains assignments of symbols to "canonical" directions (rotation 0).
Helper queries return symbol / dir assignments for other possible rotations.
directions is a list that must be in sequential order of rotations.
symbols is a list that must be in order corresponding to directions.
| class |
class Assignment:
"""
Class to represent an assignment of a specific Tile with specific
rotation to a specific Loc.
"""
loc: Loc
tile: Tile
rotation: int
validated: bool = False
def __repr__(self):
return (f'Assignment ({self.loc} | {self.tile} | '
f'rot={self.rotation} | {self.validated})'
) | python | 11 | 0.559459 | 69 | 25.5 | 14 |
Class to represent an assignment of a specific Tile with specific
rotation to a specific Loc.
| class |
class Board:
"""
Class to represent a possible board configuration as a list of
Assignments. A validated Board is one in which all Assignments are
consistent with each other (tile symbols match across edges).
A solved Board is a validated Board that includes either all of the Locs
or all of the Tiles in a Game (none remain available to be assigned).
"""
def __init__(self, game=None, assignments=None):
self.game = game
self.assignments = [] if not assignments else assignments
def validate(self):
for assignment in self.assignments:
if assignment.validated:
continue
# look at each direction of this assignment's tile
tile = assignment.tile
for direction in tile.directions:
sym = tile.get_symbol(direction, assignment.rotation)
# find the layout loc associated with the current direction
destloc = assignment.loc.get_dest(direction)
# rare symbols may not match an "edge" with no associated loc
if not destloc:
if Globals.USE_RARES and sym.rare:
return False
else:
continue
# find the assignment that is associated with the dest loc
destassign = next((da for da in self.assignments
if da.loc == destloc), None)
if not destassign:
continue
# find the symbol in the reciprocal dir of the dest assignment
otherdir = self.game.layout.get_paired_dir(direction)
othertile = destassign.tile
othersym = othertile.get_symbol(otherdir, destassign.rotation)
# is the reciprocal sym the matching pair to the current sym?
if self.game.sympairs.get(sym, None) != othersym:
return False
assignment.validated = True
return True
def check_solved(self):
for assignment in self.assignments:
if not assignment.validated:
return False
if (len(self.assignments) == len(self.game.tiles) or
len(self.assignments) == len(self.game.layout.locs)):
return True
else:
return False
def extend_board(self):
"""
Construct a list of next board candidates by finding all Assignment
directions that point to an open Loc, then finding all matching Tiles
that would fit that Assigment.
"""
next_boards = []
if Globals.DETERMINISTIC:
unassigned_tiles = list(set(self.game.tiles) -
{a.tile for a in self.assignments})
unassigned_tiles.sort()
else:
unassigned_tiles = (set(self.game.tiles) -
{a.tile for a in self.assignments})
for assignment in self.assignments:
if not assignment.validated:
raise ValueError(
"Can only extend a Board that is fully validated.")
tile = assignment.tile
# look at each direction of this assignment's tile
for direction in tile.directions:
# find the layout loc associated with the current direction
destloc = assignment.loc.get_dest(direction)
if not destloc:
continue
# is there an assignment already associated with the dest loc?
destassign = next((da for da in self.assignments
if da.loc == destloc), None)
if destassign:
continue
# which reciprocal dir and sym match the current dir and sym?
sym = tile.get_symbol(direction, assignment.rotation)
otherdir = self.game.layout.get_paired_dir(direction)
othersym = self.game.sympairs.get(sym, None)
if not othersym:
continue
for othertile in unassigned_tiles:
rots = othertile.get_rotations(othersym, otherdir)
for rot in rots:
newassign = Assignment(loc=destloc, tile=othertile,
rotation=rot, validated=False)
next_boards.append(Board(self.game,
self.assignments+[newassign]))
return next_boards
def memoize(self):
self.game.boards_visited.add(frozenset(self.assignments))
def check_memo(self):
return frozenset(self.assignments) in self.game.boards_visited
def __str__(self):
asgns = sorted(self.assignments,
key=lambda a:(a.loc.coord[1], a.loc.coord[0]))
strs = [f'{a.loc.__str__()}: {a.tile.__str__()}, '
f'rot={a.rotation}' for a in asgns]
return '\n'.join(strs)
def __repr__(self):
return self.assignments.__repr__() | python | 20 | 0.548894 | 79 | 44.219298 | 114 |
Class to represent a possible board configuration as a list of
Assignments. A validated Board is one in which all Assignments are
consistent with each other (tile symbols match across edges).
A solved Board is a validated Board that includes either all of the Locs
or all of the Tiles in a Game (none remain available to be assigned).
| class |
class Game:
"""
Class to track the state of a Game (Layout, Tiles, Board candidate stack
for solution in progress.)
"""
def __init__(self, layout=None, tiles=None):
self.layout = Layout() if not layout else layout
if not tiles:
raise ValueError('Tiles must be supplied at Game initialization.')
else:
self.tiles = tiles
self._symlist = list(
itertools.chain.from_iterable(p.symbols for p in self.tiles))
self._symfreq = Counter(self._symlist)
self.symbols = set(self._symlist)
self.boards_visited = set()
self.stack = []
if Globals.USE_RARES:
for sym in self.symbols:
if self._symfreq[sym] <= (self.layout.inner_edges
/ len(self.symbols)):
sym.rare = True
self.symsides = set(sym.side for sym in self.symbols)
if len(self.symsides) != 2:
raise ValueError(
f'There should be exactly two types of symbol sides. '
f'(e.g. "top" and "bottom").')
self.sympairs = {}
for sym in self.symbols:
otherside = next(
(pair for pair in self.symbols
if sym.sym_type == pair.sym_type and sym.side != pair.side)
, None)
if otherside:
self.sympairs[sym] = otherside
def solve(self):
"""Solve the game and return one or more Boards with valid solutions."""
def print_update():
print(f'solution_length: {len(board.assignments)}, '
f'trials: {trials}, valid_trials: {valid_trials}, '
f'stack depth = {len(self.stack)}')
print(f'boards visited: {len(self.boards_visited)}, '
f'revisits: {revisits}')
trials, valid_trials, revisits = 0,0,0
if not Globals.FIRST_ONLY:
boards = []
# Initialize the stack by placing all possible tile rotations
# onto one initial location.
if Globals.DETERMINISTIC:
loc = next(iter(self.layout.locs))
else:
loc = random.choice(list(self.layout.locs))
for tile in self.tiles:
for rotation, _ in enumerate(tile.directions):
assignment = Assignment(loc=loc, tile=tile,
rotation=rotation, validated=False)
board = Board(self, [assignment])
self.stack.insert(0, board)
if not Globals.DETERMINISTIC:
random.shuffle(self.stack)
while self.stack:
board = self.stack.pop()
trials += 1
if trials % Globals.LOG_FREQUENCY == 0:
print_update()
if not board.validate():
continue
valid_trials += 1
if Globals.MEMOIZE:
if board.check_memo():
revisits += 1
continue
else:
board.memoize()
if board.check_solved():
print_update()
if Globals.FIRST_ONLY:
return board
else:
boards.append(board)
for new_board in board.extend_board():
self.stack.append(new_board)
print_update()
if Globals.FIRST_ONLY:
return board
else:
return boards | python | 16 | 0.508276 | 80 | 36.688172 | 93 |
Class to track the state of a Game (Layout, Tiles, Board candidate stack
for solution in progress.)
| class |
class Ariia:
"""
Motherclass of the project, the domotic assistant
"""
def __init__(self):
"""
Constructor
"""
self.keyEventListener = kel.KeyEventListener()
self.audioDeviceManager = adm.AudioDeviceManager(self.keyEventListener)
self.talkManager = tm.TalkManager()
self.audio = None
self.speech = None
self.answer = None
self.request = list()
self.cityList = list()
self.meteoScrapper = ms.MeteoScrapper()
self.historyScrapper = hs.HistoryScrapper()
self.keywords = dict()
self.resetKeywords()
def cleanStop(self):
"""
Used to correctly stop all the active tasks
"""
self.keyEventListener.unregisterListener()
def interaction(self):
"""
Interact with the user, main interface
between Ariia and the user.
"""
self.speech = self.audioDeviceManager.listenAndCreateSpeech()
self.analyseSpeech()
self.audioDeviceManager.speakAnswer(self.answer)
def resetKeywords(self):
"""
Reset the keywords
"""
# Keaywords Booleans dictionnaire
self.keywords["Aria"] = False
self.keywords["date"] = False
self.keywords["jour"] = False
self.keywords["heure"] = False
self.keywords["meteo"] = False
self.keywords["lHeure"] = False
self.keywords["liste"] = False
self.keywords["courses"] = False
self.keywords["est"] = False
self.keywords["es"] = False
self.keywords["sommesNous"] = False
self.keywords["es-tu"] = False
self.keywords["etait"] = False
self.keywords["suis-je"] = False
self.keywords["tAppelles"] = False
self.keywords["sais"] = False
self.keywords["comment"] = False
self.keywords["qui"] = False
self.keywords["faire"] = False
self.keywords["tu"] = False
self.keywords["de"] = False
# Flush keywords kists
del self.request[:]
del self.cityList[:]
def analyseSpeech(self):
"""
Analyse the speech of the user and trigger answering methods
"""
self.answer = ""
self.resetKeywords()
try:
assert self.speech is not None
except AssertionError:
self.speech = "nsSpeechText"
for word in self.speech.split(" "):
self.request.append(word)
for word in self.request:
if word.lower() == "aria":
self.keywords["Aria"] = True
elif word.lower() == "heure":
self.keywords["heure"] = True
elif word.lower() == "l'heure":
self.keywords["lHeure"] = True
elif word.lower() == "est":
self.keywords["est"] = True
elif word.lower() == "date":
self.keywords["date"] = True
elif word.lower() == "jour":
self.keywords["jour"] = True
elif word.lower() == "sommes-nous":
self.keywords["sommesNous"] = True
elif word.lower() == unicode("météo", 'utf-8'):
self.keywords["meteo"] = True
elif word.lower() == "comment":
self.keywords["comment"] = True
elif word.lower() == "t'appelles":
self.keywords["tAppelles"] = True
elif word.lower() == "sais":
self.keywords["sais"] = True
elif word.lower() == "faire":
self.keywords["faire"] = True
elif word.lower() == "es":
self.keywords["es"] = True
elif word.lower() == "qui":
self.keywords["qui"] = True
elif word.lower() == "tu":
self.keywords["tu"] = True
elif word.lower() == "es-tu":
self.keywords["es-tu"] = True
elif word.lower() == "suis-je":
self.keywords["suis-je"] = True
elif word.lower() == unicode("était", 'utf-8'):
self.keywords["etait"] = True
elif word.lower() == "liste":
self.keywords["liste"] = True
elif word.lower() == "de":
self.keywords["de"] = True
elif word.lower() == "courses":
self.keywords["courses"] = True
if self.keywords["heure"] and self.keywords["est"] or self.keywords["heure"] or self.keywords["lHeure"]:
self.giveHour()
if self.keywords["date"] and self.keywords["est"] or self.keywords["date"] or self.keywords["jour"] and self.keywords["sommesNous"]:
self.giveDate()
if self.keywords["meteo"]:
self.giveMeteo()
if self.keywords["qui"] and self.keywords["etait"]:
self.giveHistory()
if self.keywords["liste"] and self.keywords["de"] and self.keywords["courses"]:
self.manageShoppingLists()
# if self.keywords["comment"] and self.keywords["tAppelles"]:
# self.basicAnswer("aria")
# if len(self.request) == 1 and self.keywords["Aria"]:
# self.basicAnswer("oui")
# if self.keywords["tu"] and self.keywords["sais"] and self.keywords["faire"]:
# self.basicAnswer("jeSaisFaire")
# if self.keywords["qui"] and self.keywords["es-tu"]:
# self.basicAnswer("presentationAria")
# if self.keywords["qui"] and self.keywords["suis-je"]:
# self.basicAnswer("presentationHumain")
if self.answer == "":
self.answer = self.talkManager.getTalk(self.speech)
return self.answer
def basicAnswer(self, answerFlag):
"""
Gives a basic answer
Parameters :
answerFlag - The flag given for a specific answer
"""
if answerFlag == "oui":
self.answer = "Oui ?"
elif answerFlag == "aria":
self.answer = " Je m'appelle Aria, avec deux iii."
elif answerFlag == "jeSaisFaire":
self.answer = u"Pour le moment, je ne sais pas faire grand chose. Mais je vais m'améliorer ! ".encode('utf-8')
elif answerFlag == "presentationAria":
self.answer = "Je m'appelle Aria, et je suis une assistance domotique."
elif answerFlag == "presentationHumain":
self.answer = u"Je suis presque sûre que tu es un humain !".encode('utf-8')
def giveHour(self):
"""
Gives the hour
"""
currentTime = time.localtime()
self.answer += " Il est actuellement " + str(currentTime[3]) + " heure " + str(currentTime[4]) + "."
def giveDate(self):
"""
Gives the date
"""
currentTime = time.localtime()
if currentTime[1] == 1:
month = "janvier"
elif currentTime[1] == 2:
month = "fevrier"
elif currentTime[1] == 3:
month = "mars"
elif currentTime[1] == 4:
month = "avril"
elif currentTime[1] == 5:
month = "mai"
elif currentTime[1] == 6:
month = "juin"
elif currentTime[1] == 7:
month = "juillet"
elif currentTime[1] == 8:
month = "aout"
elif currentTime[1] == 9:
month = "septembre"
elif currentTime[1] == 10:
month = "octobre"
elif currentTime[1] == 11:
month = "novembre"
elif currentTime[1] == 12:
month = "decembre"
self.answer += " Nous sommes le " + str(currentTime[2]) + month + str(currentTime[0]) + "."
def giveMeteo(self):
"""
Gives meteo data for a specific city
Note : For now this method is in alpha version, can retreive meteo data from cities in "ile de france"
"""
for word in self.request:
if word is not "Aria":
for letter in word:
letterUpper = letter.upper()
if letter == letterUpper:
self.cityList.append(word)
break
self.answer += u" voici la météo : ".encode('utf-8')
for city in self.cityList:
try:
sky, temperature, wind = self.meteoScrapper.getMeteo(city)
self.answer += u" à ".encode('utf-8') + city.encode('utf-8') + ": "
if self.meteoScrapper.sky is not "":
self.answer += " le ciel est "
self.answer += sky
self.answer += ", "
if self.meteoScrapper.temperature is not "":
self.answer += " Il fait"
self.answer += temperature
self.answer += u" degrés".encode('utf-8')
self.answer += ", "
if self.meteoScrapper.wind is not "":
self.answer += " Il y a "
self.answer += wind
self.answer += "."
except urllib2.HTTPError:
self.answer += u" Je n'ai pas de données météo pour la ville : ".encode('utf-8')
self.answer += " " + city.encode('utf-8')
self.answer += "."
def giveHistory(self):
"""
Gives historical data
"""
historicName = list()
historicNameStr = ""
for word in self.request:
if word is not "Aria":
for letter in word:
letterUpper = letter.upper()
if letter == letterUpper:
historicName.append(word)
historicNameStr += word + " "
break
try:
self.answer += self.historyScrapper.getHistoricDescription(historicName)
except Exception:
self.answer += u"Je n'ai pas de données historiques pour le personnage : ".encode('utf-8')
self.answer += historicNameStr.encode('utf-8')
self.answer += u", ou ces données sont corrompues.".encode('utf-8')
def manageShoppingLists(self):
"""
Manage the shopping lists of the user.
"""
self.shoppingListManager = slm.ShoppingListManager(self.audioDeviceManager)
self.shoppingListManager.manageShoppingLists()
self.answer = "Je ferme mon application de liste de courses." | python | 17 | 0.50911 | 140 | 28.487603 | 363 |
Motherclass of the project, the domotic assistant
| class |
class RemoteAnonServer:
"""An anonymization server that can be talked to via the API"""
def __init__(self, name, url):
"""Create a Remote anon server entry
Parameters
----------
name: str
short keyword to identify this server
url: str
full url to a valid Anonymization server web API
"""
self.name = name
self.url = url
def to_dict(self):
"""Dictionary representation of this server
Returns
-------
Dict
"""
return {"name": self.name, "url": self.url}
@classmethod
def from_dict(cls, dict_in):
"""Load instance from output of to_dict
Returns
-------
RemoteAnonServer
"""
return cls(name=dict_in["name"], url=dict_in["url"])
def __str__(self):
return f"{self.name}: {self.url}" | python | 12 | 0.521643 | 67 | 22.128205 | 39 | An anonymization server that can be talked to via the API | class |
class Spinner:
'''
A simple spinner
Args:
title (str): The title for the spinner
'''
def __init__(self, title: str):
from sys import stdout
self.stdout: file = stdout
self.step: int = 0
self.steps: List[str] = ['-', '\\', '|', '/']
self.stdout.write(title + ': ' + self.steps[self.step] + chr(8))
self.stdout.flush()
def __call__(self):
''' Advance the spinner '''
self.step += 1
self.step %= 4
self.stdout.write(self.steps[self.step] + chr(8))
self.stdout.flush()
def __del__(self):
''' Finish using the spinner and clean up '''
self.stdout.write(self.steps[self.step] + '\n')
self.stdout.flush() | python | 12 | 0.605634 | 66 | 21.857143 | 28 |
A simple spinner
Args:
title (str): The title for the spinner
| class |
class Connector:
"""connect the data (from the ncfiles/local sources) to larda
Args:
system (str): system identifier
system_info (dict): dict info loaded from toml
valid_dates (list of lists): list of begin and end datetime
description_dir (optional): dir with the description rst
"""
def __init__(self, system, system_info, valid_dates, description_dir=None):
self.system = system
self.system_info = system_info
self.valid_dates = valid_dates
self.params_list = list(system_info["params"].keys())
self.description_dir = description_dir
logger.info("params in this connector {} {}".format(self.system, self.params_list))
logger.debug('connector.system_info {}'.format(system_info))
def __str__(self):
s = "connector for system {} \ncontains parameters: ".format(self.system)
s += " ".join(self.params_list)
return s
def build_filehandler(self):
"""scrape the directories and build the filehandler
"""
pathdict = self.system_info['path']
filehandler = {}
for key, pathinfo in pathdict.items():
all_files = []
current_regex = pathinfo['matching_subdirs'] if 'matching_subdirs' in pathinfo else ''
for root, dirs, files in os.walk(pathinfo['base_dir']):
#print(root, dirs, len(files), files[:5], files[-5:] )
abs_filepaths = [root + f if (root[-1] == '/') else root + '/' + f for f in files if
re.search(current_regex, root + '/' + f)]
logger.debug("valid_files {} {}".format(root, [f for f in files if re.search(current_regex, root + "/" + f)]))
#print("skipped_files {} {}".format(root, [f for f in files if not re.search(current_regex, root + "/" + f)]))
all_files += abs_filepaths
#files = [f for f in os.listdir('.') if re.match(r'[0-9]+.*\.jpg', f)]
# remove basedir (not sure if that is a good idea)
all_files = [p.replace(pathinfo['base_dir'], "./") for p in all_files]
logger.debug('filelist {} {}'.format(len(all_files), all_files[:10]))
dates = [convert_to_datestring(pathinfo["date_in_filename"], f)\
for f in all_files]
all_files = [f for _, f in sorted(zip(dates, all_files), key=lambda pair: pair[0])]
dates = sorted(dates)
if dates:
if len(dates) > 1:
guessed_duration = (datetime.datetime.strptime(dates[-1],'%Y%m%d-%H%M%S') -
datetime.datetime.strptime(dates[-2],'%Y%m%d-%H%M%S'))
else:
guessed_duration = datetime.timedelta(seconds=(23*60*60)-1)
# quick fix guessed duration not longer than 24 h
if guessed_duration >= datetime.timedelta(hours=24):
guessed_duration = datetime.timedelta(seconds=(24*60*60)-1)
last_data = (
datetime.datetime.strptime(dates[-1],'%Y%m%d-%H%M%S') + guessed_duration
).strftime("%Y%m%d-%H%M%S")
date_pairs = list(zip(dates, dates[1:]+[last_data]))
else:
date_pairs = []
#singlehandler = zip(date_pairs, all_files)
valid_date_filter = setup_valid_date_filter(self.valid_dates)
singlehandler = list(filter(
valid_date_filter,
list(zip(date_pairs, all_files))))
filehandler[key] = singlehandler
#pprint.pprint(filehandler)
self.filehandler = filehandler
def save_filehandler(self, path, camp_name):
"""save the filehandler to json file"""
savename = 'connector_{}.json'.format(self.system)
pretty = {'indent': 2, 'sort_keys':True}
#pretty = {}
if not os.path.isdir(path+'/'+camp_name):
os.makedirs(path+'/'+camp_name)
with open(path+'/'+camp_name+'/'+savename, 'w') as outfile:
json.dump(self.filehandler, outfile, **pretty)
logger.info('saved connector to {}/{}/{}'.format(path,camp_name,savename))
def load_filehandler(self, path, camp_name):
"""load the filehandler from the json file"""
filename = "connector_{}.json".format(self.system)
with open(path+'/'+camp_name+'/'+filename) as json_data:
self.filehandler = json.load(json_data)
def collect(self, param, time_interval, *further_intervals, **kwargs):
"""collect the data from a parameter for the given intervals
Args:
param (str) identifying the parameter
time_interval: list of begin and end datetime
*further_intervals: range, velocity, ...
**interp_rg_join: interpolate range during join
"""
paraminfo = self.system_info["params"][param]
if 'interp_rg_join' not in paraminfo:
# default value
paraminfo['interp_rg_join'] = False
if 'interp_rg_join' in kwargs:
paraminfo['interp_rg_join'] = kwargs['interp_rg_join']
base_dir = self.system_info['path'][paraminfo['which_path']]["base_dir"]
logger.debug("paraminfo at collect {}".format(paraminfo))
if len(time_interval) == 2:
begin, end = [dt.strftime("%Y%m%d-%H%M%S") for dt in time_interval]
# cover all three cases: 1. file only covers first part
# 2. file covers middle part 3. file covers end
#print(begin, end)
flist = [e for e in self.filehandler[paraminfo['which_path']] \
if (e[0][0] <= begin < e[0][1])
or (e[0][0] > begin and e[0][1] < end)
or (e[0][0] <= end <= e[0][1])]
assert len(flist) > 0, "no files available"
elif len(time_interval) == 1:
begin = time_interval[0].strftime("%Y%m%d-%H%M%S")
flist = [e for e in self.filehandler[paraminfo['which_path']] if e[0][0] <= begin < e[0][1]]
assert len(flist) == 1, "flist too long or too short: {}".format(len(flist))
#[print(e, (e[0][0] <= begin and e[0][1] > begin), (e[0][0] > begin and e[0][1] < end), (e[0][0] <= end and e[0][1] >= end)) for e in flist]
load_data = setupreader(paraminfo)
datalist = [load_data(base_dir + e[1], time_interval, *further_intervals) for e in flist]
# [print(e.keys) if e != None else print("NONE!") for e in datalist]
# reader returns none, if it detects no data prior to begin
# now these none values are filtered from the list
assert len(datalist) > 0, 'No data found for parameter: {}'.format(param)
datalist = list(filter(lambda x: x != None, datalist))
#Transf.join(datalist[0], datalist[1])
data = functools.reduce(Transf.join, datalist)
return data
def description(self, param):
paraminfo = self.system_info["params"][param]
print('connector local paraminfo: ' + paraminfo['variable_name'])
# Prints the nicely formatted dictionary
# this is the python pprint function, not the larda.helpers function
pp = pprint2.PrettyPrinter(indent=4)
pp.pprint(paraminfo)
if 'description_file' not in paraminfo:
return 'no description file defined in config'
if self.description_dir == None:
return 'description dir not set'
description_file = self.description_dir + paraminfo['description_file']
logger.info('load description file {}'.format(description_file))
with open(description_file, 'r', encoding="utf-8") as f:
descr = f.read()
return descr
def get_as_plain_dict(self):
"""put the most important information of the connector into a plain dict (for http tranfer)
.. code::
{params: {param_name: fileidentifier, ...},
avail: {fileidentifier: {"YYYYMMDD": no_files, ...}, ...}``
Returns:
``dict``
"""
return {
'params': {e: self.system_info['params'][e]['which_path'] for e in self.params_list},
'avail': {k: self.files_per_day(k) for k in self.filehandler.keys()}
}
def get_matching_files(self, begin_time, end_time):
""" """
matching_files = []
begin_day = datetime.datetime.utcfromtimestamp(begin_time).date()
end_day = datetime.datetime.utcfromtimestamp(end_time).date()
for i in range(len(self.datelist)):
if self.datelist[i] >= begin_day and self.datelist[i] <= end_day:
matching_files.append(self.filelist[i])
if len(matching_files) == 0:
raise Exception("no files found for " + self.param_info.system_type + " " + self.param_info.variable_name)
return matching_files
def files_per_day(self, which_path):
"""replaces ``days_available`` and ``day_available``
Returns:
``dict``: ``{'YYYYMMDD': no of files, ...}``
"""
fh = self.filehandler[which_path]
groupedby_day = collections.defaultdict(list)
for d, f in fh:
groupedby_day[d[0][:8]] += [f]
no_files_per_day = {k: len(v) for k, v in groupedby_day.items()}
return no_files_per_day | python | 21 | 0.561179 | 148 | 43.85782 | 211 | connect the data (from the ncfiles/local sources) to larda
Args:
system (str): system identifier
system_info (dict): dict info loaded from toml
valid_dates (list of lists): list of begin and end datetime
description_dir (optional): dir with the description rst
| class |
class CommanderProcess:
"""Process that sends messages with information on how to create new data"""
def __init__(
self,
commander: Commander,
buffer_state_memory: BufferStateMemory,
message_queue: Queue,
buffer_id_sender: Optional[Connection],
):
"""Init
Args:
buffer_state_memory (BufferStateMemory): contains the states of the buffers
message_queue (Queue): queue that is used to send information on how to create data
buffer_id_sender (Optional[Connection]): connection that is used to send the buffer_id, used to ensure determinstic behaviour.
"""
super().__init__()
self.daemon = True
self._commander = commander
self._buffer_state_memory = buffer_state_memory
self._message_queue = message_queue
self._buffer_id_sender = buffer_id_sender
def run(self):
self._commander.build()
while True:
buffer_id = self._buffer_state_memory.get_free_buffer_id()
if buffer_id is None:
continue
self._message(buffer_id)
def _message(self, buffer_id, *args, **kwargs):
message = self._commander.create_message(*args, **kwargs)
message[BUFFER_ID_KEY] = buffer_id
self._message_queue.put(message)
if self._buffer_id_sender is not None:
self._buffer_id_sender.send(buffer_id) | python | 12 | 0.610497 | 138 | 35.225 | 40 | Process that sends messages with information on how to create new data | class |
class BasePolicy:
"""BasePolicy
Base Policy class. Randomly samples from the action state. You have can provide an action state as an argument (args[0]). If no action state is provided, the policy is initialized with an action state with a single 'None' action.
"""
def __init__(self, *args, action_state=None, **kwargs):
# If a state is provided, use it; else create one (important not to lose the reference w/r the game_state)
if action_state is None:
action_state = State()
action_state["action"] = StateElement(
values=None,
spaces=Space([numpy.array([None], dtype=object)]),
)
self.action_state = action_state
self.host = None
# https://stackoverflow.com/questions/1015307/python-bind-an-unbound-method
def _bind(self, func, as_name=None):
if as_name is None:
as_name = func.__name__
bound_method = func.__get__(self, self.__class__)
setattr(self, as_name, bound_method)
return bound_method
def __content__(self):
return self.__class__.__name__
@property
def observation(self):
"""observation
Return the last observation.
:return: last observation
:rtype: `State<coopihc.space.State.State>`
"""
return self.host.inference_engine.buffer[-1]
@property
def action(self):
"""action
Return the last action.
:return: last action
:rtype: `State<coopihc.space.StateElement.StateElement>`
"""
return self.action_state["action"]
@property
def new_action(self):
"""new action (copy)
Return a copy of the last action.
:return: last action
:rtype: `StateElement<coopihc.space.StateElement.StateElement>`
"""
return copy.copy(self.action_state["action"])
@property
def unwrapped(self):
return self
def reset(self):
self.action.reset()
def sample(self, observation=None):
"""sample
(Randomly) Sample from the policy
:return: (action, action reward)
:rtype: (StateElement<coopihc.space.StateElement.StateElement>, float)
"""
self.action.reset()
return self.action, 0
def __repr__(self):
try:
return self.action_state.__str__()
except AttributeError:
return "Policy--unreadable" | python | 18 | 0.591977 | 233 | 27.37931 | 87 | BasePolicy
Base Policy class. Randomly samples from the action state. You have can provide an action state as an argument (args[0]). If no action state is provided, the policy is initialized with an action state with a single 'None' action.
| class |
class ChannelResults:
""" Performance parameters for one optical element for one channel """
def __init__(self):
""" Constructor """
self.temp = None
self.refl = None
self.spil = None
self.scat = None
self.spil_temp = None
self.scat_temp = None
self.abso = None
self.emiss = None
self.effic = None
#@Function
@staticmethod
def emission(freqs, abso, spil, spil_temp, scat, scat_temp, temp): #pylint: disable=too-many-arguments
""" Compute the emission for this element """
return abso +\
spil * physics.pow_frac(spil_temp, temp, freqs) +\
scat * physics.pow_frac(scat_temp, temp, freqs)
#@Function
@staticmethod
def efficiency(refl, abso, spil, scat):
""" Compute the transmission for this element """
return (1-refl)*(1-abso)*(1-spil)*(1-scat)
def calculate(self, freqs):
""" Compute the results for the frequencies of interest for a given channel """
emiss_shape = np.broadcast(freqs, self.abso, self.spil, self.spil_temp, self.scat, self.scat_temp, self.temp).shape
self.emiss = self.emission(freqs, self.abso, self.spil, self.spil_temp, self.scat, self.scat_temp, self.temp).reshape(emiss_shape)
effic_shape = np.broadcast(self.refl, self.abso, self.spil, self.scat).shape
self.effic = self.efficiency(self.refl, self.abso, self.spil, self.scat).reshape(effic_shape)
def __call__(self):
""" Return key parameters """
return (self.effic, self.emiss, self.temp) | python | 12 | 0.621944 | 138 | 39.923077 | 39 | Performance parameters for one optical element for one channel | class |
class Printer:
"""
Concatenates a prefix to the input
Consumer: Gets a value from the provided queue
Producer: Puts the result into the provided queue
"""
# They're all public in this example, but should probably be private
# Globals must be initialized here ("compile"-time)
# They could be initialized at runtime before starting the process but this is HIGHLY DISCOURAGED
prefixStart = "PrinterGlobal "
def __init__(self, prefix):
self.prefix = self.prefixStart + prefix
# The working function
def print(self, inData):
# Pretending we are hard at work
time.sleep(0.1)
# Validate input
# Incoming data MUST be checked in this class,
# not the wrapper function!
# It should be more sophisticated than this,
# probably something like class method for checking and unpacking
if (inData == None):
return False, None
suffix = str(inData)
# Print string
stringToPrint = self.prefix + suffix
# Outgoing data MUST be in this form
# so that bad data isn't going in the queue
# Data must be packed into one variable before going out to the wrapper function!
return True, stringToPrint | python | 9 | 0.650432 | 101 | 30.85 | 40 |
Concatenates a prefix to the input
Consumer: Gets a value from the provided queue
Producer: Puts the result into the provided queue
| class |
class TestSend:
"""AWS task status post."""
def test_not_stopped(self, task):
"""Task is not known to stopped."""
send_fn = mock.Mock()
kw = {"a": 42, "b": "spam"}
task._send(send_fn, **kw)
send_fn.assert_called_once_with(taskToken="taskToken", **kw)
def test_stopped(self, task):
"""Task is stopped."""
task._request_stop = True
send_fn = mock.Mock()
kw = {"a": 42, "b": "spam"}
task._send(send_fn, **kw)
send_fn.assert_not_called() | python | 10 | 0.472881 | 72 | 35.9375 | 16 | AWS task status post. | class |
class TestJoin:
"""Waiting for worker to finish."""
def test_succeeds(self, worker):
"""Worker finishes successfully."""
worker._poller = mock.Mock(spec=threading.Thread)
worker.join()
assert worker._request_finish is False
worker._poller.join.assert_called_once_with()
def test_cancelled(self, worker):
"""Worker is cancelled by main thread."""
worker._poller = mock.Mock(spec=threading.Thread)
exc = KeyboardInterrupt()
worker._poller.join.side_effect = exc
worker.join()
assert worker._request_finish is True
worker._poller.join.assert_called_once_with()
def test_error(self, worker):
"""Unknown failure on worker joining."""
worker._poller = mock.Mock(spec=threading.Thread)
exc = ValueError("spambla42")
worker._poller.join.side_effect = exc
with pytest.raises(ValueError) as e:
worker.join()
assert e.value == exc
assert worker._request_finish is True
worker._poller.join.assert_called_once_with()
def test_poller_error(self, worker):
"""Failure during polling/execution."""
worker._poller = mock.Mock(spec=threading.Thread)
worker._exc = ValueError("spambla42")
with pytest.raises(ValueError) as e:
worker.join()
assert e.value == worker._exc
worker._poller.join.assert_called_once_with() | python | 11 | 0.573325 | 61 | 41.783784 | 37 | Waiting for worker to finish. | class |
class Segment:
"""
A collection of parts that appears in the output file.
This class is meant to be subclassed with a PARTS class member.
"""
PARTS: ClassVar[Dict['str', 'str']] # virtual member
def __init__(self):
self.raw_parts = []
self.activated = set() # so we don't add the same part twice
def add_part(self, part_name):
if part_name in self.activated:
return
val = self.PARTS[part_name]
self.raw_parts.append(val)
self.activated.add(part_name)
def add_raw(self, part_string):
self.raw_parts.append(part_string)
def partial_part(self, part_name):
return partial(self.add_part, part_name)
def __iter__(self):
yield from self.raw_parts
def join(self, sep='\n'):
return sep.join(self) | python | 13 | 0.600962 | 69 | 26.766667 | 30 |
A collection of parts that appears in the output file.
This class is meant to be subclassed with a PARTS class member.
| class |
class JurisDiction:
"""Scraper for Juris Diction news source, Queen's Law Newspaper.
Site is currently located at <http://juris-diction.ca>.
"""
host = 'http://www.juris-diction.ca'
slug = 'jurisdiction'
scraper = Scraper()
logger = scraper.logger
@staticmethod
def scrape(deep=False, location='./dumps/news', *args, **kwargs):
"""Scrape information custom to Juris Diction.
Args:
deep: Bool for a scrape of just the curent year, or every archive.
location (optional): String location of output files.
"""
JurisDiction.logger.info('Starting JurisDiction scrape')
try:
archive_month_urls = get_urls_on_depth(
JurisDiction._get_archive_month_urls(),
JurisDiction.logger,
deep
)
for archive_month_url in archive_month_urls:
try:
JurisDiction.logger.debug('ARCHIVE: %s', archive_month_url)
archive_page_urls = JurisDiction._get_archive_page_urls(
archive_month_url)
page_num = 1
for archive_page_url in archive_page_urls:
try:
archive_page = JurisDiction.scraper.http_request(
archive_page_url)
JurisDiction.logger.debug('Page %s', page_num)
article_rel_urls = (
JurisDiction._get_rel_article_urls(
archive_page)
)
for article_rel_url in article_rel_urls:
try:
article_page, article_url = (
get_article_page(
JurisDiction.scraper,
JurisDiction.host,
JurisDiction.logger,
article_rel_url,
)
)
article_data = (
JurisDiction._parse_article_data(
article_page, article_url)
)
if article_data:
save_article(
JurisDiction.scraper,
article_data,
location,
)
JurisDiction.scraper.wait()
except Exception:
JurisDiction.scraper.handle_error()
page_num += 1
except Exception:
JurisDiction.scraper.handle_error()
except Exception:
JurisDiction.scraper.handle_error()
except Exception:
JurisDiction.scraper.handle_error()
JurisDiction.logger.info('Completed JurisDiction scrape')
@staticmethod
def _get_archive_month_urls():
# Request main URL and extract all archived month URLs.
soup = JurisDiction.scraper.http_request(JurisDiction.host)
archives = soup.find('div', id='archives-3').find_all('li')
archive_month_urls = [arch.find('a')['href'] for arch in archives]
return archive_month_urls
@staticmethod
def _get_archive_page_urls(archive_month_url):
# Requests an archive month's URL and crawls the archive for any
# additional paginated 'next' URLs, if they exist.
archive_page_urls = [archive_month_url]
archive_page = JurisDiction.scraper.http_request(archive_month_url)
# Paginate until we no longer see a 'next' button.
while archive_page.find('a', 'next'):
archive_page_url = archive_page.find('a', 'next')['href']
archive_page_urls.append(archive_page_url)
archive_page = JurisDiction.scraper.http_request(archive_page_url)
return archive_page_urls
@staticmethod
def _get_rel_article_urls(archive_page):
# Extract every article's relative URL from the current archive page.
article_section = archive_page.find('div', 'vw-isotope')
articles = article_section.find_all('h3', 'vw-post-box-title')
article_rel_urls = [article.find('a')['href'] for article in articles]
return article_rel_urls
@staticmethod
def _parse_article_data(article_page, article_url):
title = article_page.find('h1', 'entry-title').text.strip()
# Queen's Juris Diction uses HTML5 element 'time', which already
# contains ISO format in 'datetime' attribute.
published_iso = article_page.find(
'div', class_='vw-post-meta-inner').find('time')['datetime']
# Multiple authors are listed with commas, except for last author with
# 'and' such as 'John, Alex and Jason'.
authors_raw = article_page.find('a', 'author-name')
authors = (
authors_raw.text.replace(' and', ',').split(', ')
if authors_raw else []
)
content = article_page.find('div', 'vw-post-content').text.strip()
content_raw = str(article_page.find('div', 'vw-post-content'))
data = {
'title': title,
'slug': JurisDiction.slug,
'url': article_url[:-1],
'published': published_iso,
'updated': published_iso,
'authors': authors,
'content': content,
'content_raw': content_raw,
}
return OrderedDict(data) | python | 26 | 0.491661 | 79 | 36.018519 | 162 | Scraper for Juris Diction news source, Queen's Law Newspaper.
Site is currently located at <http://juris-diction.ca>.
| class |
class PlottingOptions:
"""
class to use with plotter script
stores options related to having many solution workdirs to draw from for plotting
"""
nplot : int = 25
ntotal : int = 1024
plot_type : str = "mc_results" # [ snapshot , video , mc_results ]
snapshot_num : int = 20
mc_workdir_name : str = "mc_" | python | 6 | 0.626822 | 85 | 27.666667 | 12 |
class to use with plotter script
stores options related to having many solution workdirs to draw from for plotting
| class |
class Meta:
'''
to set table name in database
'''
db_table = "login" | python | 6 | 0.45 | 37 | 19.2 | 5 |
to set table name in database
| class |
class MockUSBContext:
"""This class mocks the behaviour of usb.core.Context."""
def dispose(self, device: "MockUSBServoBoardDevice") -> None:
"""Dispose of the device."""
pass | python | 8 | 0.65 | 65 | 32.5 | 6 | This class mocks the behaviour of usb.core.Context. | class |
class MockUSBServoBoardDevice:
"""This class mocks the behaviour of a USB device for a Servo Board."""
def __init__(self, serial_number: str, fw_version: int = 2):
self.serial = serial_number
self.firmware_version = fw_version
self._ctx = MockUSBContext() # Used by PyUSB when cleaning up the device.
@property
def serial_number(self) -> str:
"""Get the serial number of the USB device."""
return self.serial
def ctrl_transfer(
self,
bmRequestType: int,
bRequest: int,
wValue: int = 0,
wIndex: int = 0,
data_or_wLength: Optional[Union[int, bytes]] = None,
timeout: Optional[int] = None,
) -> bytes:
"""Mock a control transfer."""
assert bRequest == 64 # This is the same for read and write.
if bmRequestType == 0x80:
assert isinstance(data_or_wLength, int)
return self.read_data(wValue, wIndex, data_or_wLength, timeout)
if bmRequestType == 0x00:
assert isinstance(data_or_wLength, bytes)
self.write_data(wValue, wIndex, data_or_wLength, timeout)
return b""
raise ValueError("Invalid Request Type for mock device.")
def read_data(
self,
wValue: int = 0,
wIndex: int = 0,
wLength: int = 0,
timeout: Optional[int] = None,
) -> bytes:
"""Mock reading data from a device."""
assert wValue == 0 # Always 0 on read.
if wIndex == 9:
return self.read_fw(wLength)
raise NotImplementedError
def read_fw(self, wLength: int) -> bytes:
"""Mock reading the firmware number."""
assert wLength == 4
return struct.pack("<I", self.firmware_version)
def write_data(
self,
wValue: int = 0,
wIndex: int = 0,
data: bytes = b"",
timeout: Optional[int] = None,
) -> None:
"""Mock writing data to a device."""
if 0 <= wIndex < 12:
# Set Servo.
return self.write_servo(wValue, data)
if wIndex == 12:
# Initialise. Behaviour unknown currently
return
raise NotImplementedError
def write_servo(self, wValue: int, data: bytes) -> None:
"""Set the value of a servo."""
assert -100 <= wValue <= 100
assert data == b'' | python | 13 | 0.560518 | 82 | 30.539474 | 76 | This class mocks the behaviour of a USB device for a Servo Board. | class |
class Matrix:
"""
Implementation of matrices.
"""
def __init__(self, matrix):
super().__init__()
# self.matrix = [[1, 2, 3], [4, 5, 6]]
self.matrix = matrix
self.used = []
# self.shape = self.get_shape()
self.__all__ = []
self.__all__.extend("MatrixDef")
def get_shape(self):
# TODO(aaronhma): Stabilize this:
self.__all__.extend("MatrixShape")
shape = set()
shape.add(len(self.matrix))
shape.add(len(self.matrix[0]))
return shape
def test_valid_matrix(self) -> bool:
"""
Returns if matrix is valid.
"""
self.__all__.extend("ValidateMatrix")
X, y = self.get_shape()
check_valid_row = False
check_valid_column = False
if X == 2:
check_valid_row = True
if y >= 1:
check_valid_column = True
if check_valid_column and check_valid_row:
return True
else:
raise MatrixError("Invalid matrix!")
def transform(self):
self.__all__.extend("TransformMatrix")
# X, y = self.get_shape()
try:
assert(self.test_valid_matrix)
except:
raise MathlyAssertError("The matrix is invalid!")
return self.shape
def transpose(self):
self.__all__.extend("TransposeMatrix")
X, y = self.get_shape() | python | 12 | 0.506925 | 61 | 27.333333 | 51 |
Implementation of matrices.
| class |
class Admin:
''' Administrative tasks for my own servers '''
def __init__(self, bot):
self.bot = bot
self.flags = FlagFramework()
async def on_member_join(self, member):
server = member.server
if server.id == rc.ADMIN_SERVER:
role = discord.utils.get(member.server.roles, name=rc.ADD_ROLE)
await self.bot.add_roles(member, role)
@commands.command(pass_context=True)
async def amIAdmin(self, ctx):
admin = AdminFramework(ctx.message.server)
if admin.check(ctx.message.author):
await self.bot.say("Yes")
else:
await self.bot.say("No")
@commands.command(pass_context=True, timeout=10)
async def promote(self, ctx, user):
p = Phrasebook(ctx, self.bot)
admin = AdminFramework(ctx.message.server)
if not admin.check(ctx.message.author):
await self.bot.say(p.pickPhrase('admin', 'notauthorized'))
return
mem = ctx.message.server.get_member_named(user)
if not mem:
await self.bot.say(p.pickPhrase('admin', 'notfound', user))
else:
await self.bot.say(p.pickPhrase('admin', 'confirmpromote', \
mem.name, mem.discriminator, mem.id)\
)
msg = await self.bot.wait_for_message(author=ctx.message.author)
if msg and 'yes' in msg.content.lower() or 'confirm' in msg.content.lower():
admin.promote(mem)
await self.bot.say(p.pickPhrase('admin', 'promote', mem.mention))
else:
await self.bot.say(p.pickPhrase('admin', 'abort'))
@commands.command(pass_context=True, timeout=10)
async def demote(self, ctx, user):
p = Phrasebook(ctx, self.bot)
admin = AdminFramework(ctx.message.server)
if not admin.check(ctx.message.author):
await self.bot.say(p.pickPhrase('admin', 'notauthorized'))
return
mem = ctx.message.server.get_member_named(user)
if not mem:
await self.bot.say(p.pickPhrase('admin', 'notfound', user))
else:
await self.bot.say(p.pickPhrase('admin', 'confirmdemote', \
mem.name, mem.discriminator, mem.id)\
)
msg = await self.bot.wait_for_message(author=ctx.message.author)
if msg and 'yes' in msg.content.lower() or 'confirm' in msg.content.lower():
admin.demote(mem)
await self.bot.say(p.pickPhrase('admin', 'demote', mem.mention))
else:
await self.bot.say(p.pickPhrase('admin', 'abort'))
@commands.command(pass_context=True)
async def dumpUsers(self, ctx):
s = 'Member list:\n```'
for member in ctx.message.server.members:
s = s + str(member) + '(id: ' + member.id + ') '
if member.server_permissions == discord.Permissions.all():
s = s + '[ADMIN]'
s = s + '\n'
s = s + '```'
await self.bot.say(s) | python | 19 | 0.677596 | 79 | 33.173333 | 75 | Administrative tasks for my own servers | class |
class CircleApproximation:
"""A class to track the position of the robot in a system of coordinates
using only encoders as feedback, using a combination of line and circle
approximation methods."""
def __init__(self, axis_width, l_encoder, r_encoder):
"""Saves input values, initializes class variables."""
self.axis_width = axis_width
self.l_encoder, self.r_encoder = l_encoder, r_encoder
# previous values for the encoder position and heading
self.prev_l, self.prev_r, self.prev_heading = 0, 0, 0
# starting position of the robot
self.x, self.y = 0, 0
def update(self):
"""Update the position of the robot."""
# get sensor values and the previous heading
l, r, heading = self.l_encoder(), self.r_encoder(), self.prev_heading
# calculate encoder deltas (differences from this and previous readings)
l_delta, r_delta = l - self.prev_l, r - self.prev_r
# calculate omega
h_delta = (r_delta - l_delta) / self.axis_width
# either approximate if we're going (almost) straight or calculate arc
if abs(l_delta - r_delta) < 1e-5:
self.x += l_delta * cos(heading)
self.y += r_delta * sin(heading)
else:
# calculate the radius of ICC
R = (self.axis_width / 2) * (r_delta + l_delta) / (r_delta - l_delta)
# calculate the robot position by finding a point that is rotated
# around ICC by heading delta
self.x += R * sin(h_delta + heading) - R * sin(heading)
self.y += - R * cos(h_delta + heading) + R * cos(heading)
# set previous values to current values
self.prev_l, self.prev_r, self.prev_heading = l, r, heading + h_delta
def get_position(self):
"""Return the position of the robot."""
return (self.x, self.y) | python | 15 | 0.588386 | 81 | 40.347826 | 46 | A class to track the position of the robot in a system of coordinates
using only encoders as feedback, using a combination of line and circle
approximation methods. | class |
class Node:
"""This defines a Node in the Rule decision tree."""
arg_types = ("RLOC", "SPAWN", "HP")
comp_ops = ("LT", "GT")
operation_types = ("AND", "OR")
rloc_types = ("ENEMY", "SPAWN", "INVALID", "EMPTY", "ALLY", "ALLY_FUT", "ATT_FUT")
directions = ("UP", "DOWN", "RIGHT", "LEFT")
rloc_w_hp = ("ENEMY", "ALLY", "ALLY_FUT")
def __init__(self, operation, arg_type, par=None, arg_hp=0, arg_comp_op="LT",
arg_spawn=0, arg_rloc_type="EMPTY", arg_rloc=(0,0), arg_dir=None,
arg_not=False, arg_root=False):
self.type = arg_type
self.root = arg_root
self.op = operation
self.parent = par
self.children = []
self.direction = arg_dir
self.not_op = arg_not
self.rloc_type = arg_rloc_type
self.rloc = arg_rloc
self.hp = arg_hp
self.comp_op = arg_comp_op
self.turns_since_spawn = arg_spawn
@staticmethod
def new_random_node(root=False):
"""Creates a random Node."""
hp = 0
comp_op = ""
spawn = 0
rloc = (0, 0)
rloc_type = "EMPTY"
dir = random.choice(Node.directions)
not_op = (random.random() < 0.5)
op = random.choice(Node.operation_types)
type = random.choice(Node.arg_types)
if type == "RLOC":
rloc = Rule.gen_rloc_gaussian()
rloc_type = random.choice(Node.rloc_types)
if any(rloc_type == s for s in Node.rloc_w_hp):
hp = random.randint(1, 50)
comp_op = random.choice(Node.comp_ops)
elif type == "SPAWN":
spawn = random.randint(0, 9)
comp_op = random.choice(Node.comp_ops)
elif type == "HP":
hp = random.randint(1, 50)
comp_op = random.choice(Node.comp_ops)
return Node(op, type, None, hp, comp_op, spawn, rloc_type, rloc, dir, not_op, root)
def insert_child_randomly(self, child):
"""Inserts the child at random in this Node's children list."""
last_idx = len(self.children) - 1
if last_idx < 0:
self.children.append(child)
else:
self.children.insert((random.randint(0, (len(self.children) - 1))), child)
def remove_child(self, child):
"""Removes the child from this Node's children list."""
self.children.remove(child)
def remove_child_by_idx(self, idx):
"""Removes the child at idx in Node's children list."""
del self.children[idx]
def is_root(self):
return self.root
def make_copy(self):
"""Creates copy of this node.
Does not copy parent and children pointers."""
return Node(self.op, self.type, None, self.hp, self.comp_op,
self.turns_since_spawn, self.rloc_type, (self.rloc[0], self.rloc[1]),
self.direction, self.not_op, self.root)
def evaluate(self, game, robot, ally_fut, attack_fut):
"""Recursive evaluation of this Node and it's children given game state."""
result = False
dir_result = self.direction
# First handle this node's expression
if self.type == "SPAWN":
if self.comp_op == "LT" and self.turns_since_spawn > (game['turn'] % 10):
result = True
elif self.comp_op == "GT" and self.turns_since_spawn < (game['turn'] % 10):
result = True
elif self.type == "HP":
if self.comp_op == "LT" and robot.hp < self.hp:
result = True
elif self.comp_op == "GT" and robot.hp > self.hp:
result = True
elif self.type == "RLOC":
true_loc = (robot.location[0] + self.rloc[0], robot.location[1] + self.rloc[1])
bots = game.get('robots')
check_hp = False
hp_to_check = 0
if self.rloc_type == "ENEMY":
if true_loc in bots.keys():
bot = bots[true_loc]
if bot.player_id != robot.player_id:
check_hp = True
hp_to_check = bot.hp
elif self.rloc_type == "SPAWN":
if 'spawn' in rg.loc_types(true_loc):
result = True
elif self.rloc_type == "INVALID":
if any(x in rg.loc_types(true_loc) for x in ['obstacle', 'invalid']):
result = True
elif self.rloc_type == "EMPTY":
if not any(x in rg.loc_types(true_loc) for x in ['obstacle', 'invalid']):
if true_loc not in bots.keys():
result = True
elif self.rloc_type == "ALLY":
if true_loc in bots.keys():
bot = bots[true_loc]
if bot.player_id == robot.player_id:
check_hp = True
hp_to_check = bot.hp
elif self.rloc_type == "ALLY_FUT":
if not any(x in rg.loc_types(true_loc) for x in ['obstacle', 'invalid']):
hp_to_check = ally_fut[true_loc[0]][true_loc[1]]
if hp_to_check > 0:
check_hp = True
elif self.rloc_type == "ATT_FUT":
if not any(x in rg.loc_types(true_loc) for x in ['obstacle', 'invalid']):
result = (attack_fut[true_loc[0]][true_loc[1]] > 0)
if check_hp:
if self.comp_op == "LT" and hp_to_check < self.hp:
result = True
elif self.comp_op == "GT" and hp_to_check > self.hp:
result = True
if self.not_op:
result = not result
# Now check children Nodes
# Precedence of True evalutations:
# True OR child > True AND children + True self > No AND children + True self
# Direction is set by the "lowest in the tree" True node of the highest
# precedence condition. For example, a True node with all True AND
# children and two True OR children will use the direction of the first True
# OR child. Assuming non-contradictory Node expressions, this should allow
# every Node to determine the direction in at least one game state.
and_still_possible = True
for child in self.children:
ch_result = child.evaluate(game, robot, ally_fut, attack_fut)
if result and (child.op == "AND"):
and_still_possible = (and_still_possible and ch_result[0])
dir_result = ch_result[1]
if child.op == "OR" and ch_result[0]:
return ch_result
return ((result and and_still_possible), dir_result)
def debug_print(self, indent=0):
i_str = ""
for x in range(indent):
i_str = i_str + " "
print i_str + "parent:", self.parent
print i_str + "root:", self.root, "op:", self.op, "not:", self.not_op, \
"type:", self.type, \
"rloc:", self.rloc, "rloc_type:", self.rloc_type, "comp_op:", self.comp_op, \
"hp:", self.hp, "spawn:", self.turns_since_spawn, "dir:", self.direction
print i_str + "num children:", len(self.children)
for child in self.children:
print i_str + "child:", child
child.debug_print(indent+1) | python | 20 | 0.508244 | 93 | 42.325714 | 175 | This defines a Node in the Rule decision tree. | class |
class LineElement:
"""LineElement represents a finite arc in the discretization of the domain boundary."""
def __init__(self, a, b, n, is_fault):
"""Constructor.
:param a: Start point
:param b: End point
:param n: Outward-pointing normal
:param is_fault: Flag this element as fault
"""
self.a = np.array(a)
self.h = np.array(b) - self.a
self.h_norm = np.linalg.norm(self.h)
self.n = np.array(n)
self.n = self.n / np.linalg.norm(self.n)
self.is_fault = is_fault
def xi(self, theta):
"""Map from interval [-1, 1] to line a-b.
:param theta: Scalar in [-1, 1].
"""
# TODO - done
ThetaInv = self.h * (theta + 1.0)/2.0 + self.a
#return np.array([0.0, 0.0])
return ThetaInv
def basis(self, theta):
"""Basis function evaluated at theta.
:param theta: Scalar in [-1, 1]
"""
# TODO: done
#return 0.0
return 1
def factor(self, theta):
"""Integration factor.
Must return basis(theta) * |xi'(theta)|
:param theta: Scalar in [-1, 1]
"""
# TODO: done
#return 0.0
return self.h_norm /2
def collocation_point(self):
"""Returns midpoint of line."""
return self.xi(0)
def __repr__(self):
return 'LineElement({}, {})'.format(self.a, self.a + self.h) | python | 12 | 0.523151 | 91 | 26.320755 | 53 | LineElement represents a finite arc in the discretization of the domain boundary. | class |
class InfiniteLineElement:
"""InfiniteLineElement represents an infinite arc in the discretization of the domain boundary."""
def __init__(self, a, n):
"""Constructor.
:param a: Start point (line direction is also a)
:param n: Outward-pointing normal
:param is_fault: Flag this element as fault
"""
self.a = np.array(a)
self.a_norm = np.linalg.norm(self.a)
self.n = np.array(n)
self.n = self.n / np.linalg.norm(self.n)
self.is_fault = False
def xi(self, theta):
"""Map from interval [-1, 1] to line starting at "a" with direction "a" extending to infinity.
:param theta: Scalar in [-1, 1].
"""
# TODO: done
ChiMap = self.a* (theta + 3)/(1-theta)
#return np.array([0.0, 0.0])
return ChiMap
def basis(self, theta):
"""Basis function evaluated at theta.
:param theta: Scalar in [-1, 1]
"""
# TODO: done
#return 0.0
return ((1-theta)/(theta + 3))**2
def factor(self, theta):
"""Integration factor.
Must return basis(theta) * |xi'(theta)|
:param theta: Scalar in [-1, 1]
"""
# TODO: done
#return 0.0
return 4* self.a_norm / (theta+3)**2
def collocation_point(self):
"""Returns start point of line."""
return self.xi(-1)
def __repr__(self):
return 'InfiniteLineElement({})'.format(self.a) | python | 11 | 0.541862 | 102 | 27.730769 | 52 | InfiniteLineElement represents an infinite arc in the discretization of the domain boundary. | class |
class SolutionV2:
"""
Time complexity O(k*n), and faster, but not _that_ fast.
Strategy: Result is minimal, if digits are removed
from high order to low order, and if a given digit is
greater than its neighbour digit on the right.
"""
def removeKdigits(self, num: str, k: int) -> str:
if len(num) == k:
return "0"
for _ in range(k):
for i in range(len(num) - 1):
if num[i] > num[i + 1]:
num = num[:i] + num[i + 1:]
break
else:
num = num[:-1]
return str(int(num)) | python | 17 | 0.492777 | 60 | 31.842105 | 19 |
Time complexity O(k*n), and faster, but not _that_ fast.
Strategy: Result is minimal, if digits are removed
from high order to low order, and if a given digit is
greater than its neighbour digit on the right.
| class |
class SolutionV3:
"""
An elegant O(n) solution from the LeetCode discussion.
It is relatively fast, and nice to read.
It appends digits from 'num' to the output string, Digit by digit,
but removes digits from the output before appending the next,
if the next digit has a lower value than the most recent output
digit (as long as we have something to remove, i.e. 'k' is not
exhausted.
The return expression covers corner cases:
- digits count of 'num' equals 'k' (in case, output will be empty), - or -
- output has leading zeros, - or -
- deletions are necessary at the right end of 'num'.
"""
def removeKdigits(self, num: str, k: int) -> str:
output = []
for d in num:
while k and output and output[-1] > d:
output.pop()
k -= 1
output.append(d)
return ''.join(output[:-k or None]).lstrip('0') or '0' | python | 15 | 0.607635 | 80 | 40.043478 | 23 |
An elegant O(n) solution from the LeetCode discussion.
It is relatively fast, and nice to read.
It appends digits from 'num' to the output string, Digit by digit,
but removes digits from the output before appending the next,
if the next digit has a lower value than the most recent output
digit (as long as we have something to remove, i.e. 'k' is not
exhausted.
The return expression covers corner cases:
- digits count of 'num' equals 'k' (in case, output will be empty), - or -
- output has leading zeros, - or -
- deletions are necessary at the right end of 'num'.
| class |
class SocketInterface:
"""Implement of the BSD socket interface."""
def __init__(self, destination, protocol, socket_options=(), buffer_size=2048):
"""Creates a network socket to exchange messages."""
self.__socket = None
try:
self.__destination = socket.gethostbyname(destination)
except socket.gaierror as error:
raise RuntimeError(f'Cannot resolve the address {destination}, try verify your DNS or host file.\n{error}')
self.__protocol = socket.getprotobyname(protocol)
self.__buffer_size = buffer_size
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, self.protocol)
if socket_options:
self.socket.setsockopt(*socket_options)
def __enter__(self):
"""Return this object."""
return self
def __exit__(self, *args):
"""Call the `close_socket` method."""
self.close_socket()
def __del__(self):
self.close_socket()
def close_socket(self):
"""Safe socket cleanup after all references to the object have been deleted."""
try:
if hasattr(self, 'socket') and not self.is_closed:
self.socket.close()
self.socket = None
except AttributeError:
raise AttributeError('Attribute error because of failed socket init. Make sure you have the root '
'privilege. This error may also be caused from DNS resolution problems.')
@property
def is_closed(self):
"""Indicate whether the socket is closed."""
return self.socket is None
@property
def destination(self):
return self.__destination
@property
def buffer_size(self):
return self.__buffer_size
@property
def protocol(self):
return self.__protocol
@property
def socket(self):
"""Return the socket object."""
return self.__socket
@socket.setter
def socket(self, value):
"""Set the socket object."""
self.__socket = value
@property
def is_closed(self):
"""Indicate whether the socket is closed."""
return self.socket is None
def send_packet(self, packet):
"""Sends a raw packet on the stream."""
self.socket.sendto(packet, (self.destination, 0))
def receive_packet(self, timeout=2):
"""Listen for incoming packets until timeout."""
time_left = timeout
while time_left > 0:
start_time = time.perf_counter()
data_ready = select.select([self.socket], [], [], time_left)
time_elapse_in_select = time.perf_counter() - start_time
time_left -= time_elapse_in_select
if not data_ready[0]:
return b'', '', time_left
packet_received, address = self.socket.recvfrom(self.buffer_size)
return packet_received, address, time_left
@property
def dont_fragment(self):
"""Specifies whether sockets cannot be fragmented.
Datagrams require fragmentation when their size exceeds the Maximum Transfer Unit (MTU) of the transmission
medium. Datagrams may be fragmented by the sending host (all Internet Protocol versions) or an intermediate
router (Internet Protocol Version 4 only). If a datagram must be fragmented, and the DontFragment option is
set, the datagram is discarded, and an Internet Control Message Protocol (ICMPPacket) error message is
sent back to the sender of the datagram."""
return socket.SOL_IP, 10, 1 | python | 13 | 0.614938 | 119 | 29.905983 | 117 | Implement of the BSD socket interface. | class |
class Param:
"""
Trainable parameter of the model
Captures both parameter value and the gradient
"""
def __init__(self, value):
self.value = value
self.grad = None
self.grad_clear()
def grad_clear(self):
self.grad = np.zeros_like(self.value) | python | 10 | 0.591973 | 50 | 22.076923 | 13 |
Trainable parameter of the model
Captures both parameter value and the gradient
| class |
class PolyProcess:
"""This is the PolyProcess class
This class takes two arguments
1. PolyTracker produced json containing CFG, taint sets, and version number. (PolyMerge has better version parsing)
2. PolyTracker raw taint forest
PolyProcess will take these files and:
1. Reconstruct the CFG, taint forest and taint sets
2. Process the taint sets to produce a final json containing the byte offsets touched in each function
"""
def __init__(self, polytracker_json_path: str, polytracker_forest_path: str):
if polytracker_json_path is None or polytracker_forest_path is None:
raise ValueError("Error: Path cannot be None")
if not os.path.exists(polytracker_json_path):
raise ValueError("Error: Could not find polytracker json path")
if not os.path.exists(polytracker_forest_path):
raise ValueError("Error: Could not find polytracker forest path")
self.json_file = open(polytracker_json_path, "r")
self.json_size = os.path.getsize(polytracker_json_path)
self.polytracker_json = json.loads(self.json_file.read(self.json_size))
self.processed_taint_sets: Dict[str, Dict[str, Dict[str, List[int]]]] = {}
if "tainted_functions" not in self.polytracker_json:
self.taint_sets = None
else:
self.taint_sets = self.polytracker_json["tainted_functions"]
if "tainted_input_blocks" not in self.polytracker_json:
self.tainted_input_blocks = None
else:
self.tainted_input_blocks = self.polytracker_json["tainted_input_blocks"]
self.forest_file = open(polytracker_forest_path, "rb")
self.forest_file_size = os.path.getsize(polytracker_forest_path)
self.taint_forest: nx.DiGraph = nx.DiGraph()
self.source_metadata: Dict[str, SourceMetadata] = {}
self.canonical_mapping: Dict[int, Tuple[str, int]] = {}
self.process_source_metadata()
self.process_canonical_mapping()
self.process_forest()
self.outfile = "polytracker.json"
def process_source_metadata(self):
source_info = self.polytracker_json["taint_sources"]
source_prog_bar = tqdm(source_info)
source_prog_bar.set_description("Processing source metadata")
for source in source_prog_bar:
self.source_metadata[source] = SourceMetadata(source_info[source]["start_byte"], source_info[source]["end_byte"])
def process_canonical_mapping(self):
canonical_map = self.polytracker_json["canonical_mapping"]
source_prog_bar = tqdm(canonical_map)
source_prog_bar.set_description("Processing canonical mapping")
for source in source_prog_bar:
for label_offset_pair in canonical_map[source]:
self.canonical_mapping[label_offset_pair[0]] = (source, label_offset_pair[1])
def set_output_filepath(self, filepath: str):
self.outfile = filepath
def validate_forest(self) -> bool:
return self.forest_file_size % taint_node_size == 0
def max_node(self) -> int:
return self.forest_file_size // taint_node_size
def process_forest(self):
"""This function reads the taint forest file and converts it to a networkX graph
The taint forest file is a bunch of raw bytes, where each sizeof(dfsan_label) chunk
represents a taint_node struct. The actual definition of the struct can be found in
include/dfsan_types.h
The function that produces this taint forest is outputRawTaintForest
Note that the taint info here (and anything of type label) is 1 indexed, because 0 is the null index.
The way we convert back to canonical bytes is by travelling up the forest to a node with two null parents
Then subtract one from that label, which gets you the original offset.
"""
logger.log(logging.DEBUG, "Processing taint forest!")
is_valid = self.validate_forest()
if not is_valid:
raise Exception("Invalid taint forest!")
nodes_to_process = tqdm(range(self.max_node()))
nodes_to_process.set_description("Processing taint forest")
for curr_node in nodes_to_process:
taint_forest_entry = struct.unpack("=II", self.forest_file.read(taint_node_size))
parent_1: int = taint_forest_entry[0]
parent_2: int = taint_forest_entry[1]
self.taint_forest.add_node(curr_node)
# Parents for canonical labels should have parents of label 0
assert parent_1 == parent_2 == 0 or (parent_1 != 0 and parent_2 != 0)
if parent_1 != 0 and parent_2 != 0:
self.taint_forest.add_edge(curr_node, parent_1)
self.taint_forest.add_edge(curr_node, parent_2)
def draw_forest(self):
logger.log(logging.DEBUG, "Drawing forest")
pos = nx.nx_agraph.graphviz_layout(self.taint_forest)
nx.draw(self.taint_forest, pos=pos)
write_dot(self.taint_forest, "taint_forest.dot")
check_call(["dot", "-Tpdf", "taint_forest.dot", "-o", "taint_forest.pdf"])
def is_canonical_label(self, label: int) -> bool:
try:
out_edges = self.taint_forest.edges(label)
if len(out_edges) == 0:
return True
except nx.exception.NetworkXError:
raise
return False
def get_canonical_offset(self, label: int) -> int:
return self.canonical_mapping[label][1]
def get_canonical_source(self, label: int) -> str:
return self.canonical_mapping[label][0]
def process_taint_sets(self):
if self.taint_sets is None:
print("Warning! No taint information to process")
return
taint_sets = tqdm(self.taint_sets)
processed_labels = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))
for function in taint_sets:
taint_sets.set_description(f"Processing {function}")
label_list = self.taint_sets[function]["input_bytes"]
# Function --> Input_bytes/Cmp bytes --> Source --> labels
for label in label_list:
# Canonical labels
canonical_labels = set(
label for label in nx.dfs_preorder_nodes(self.taint_forest, label) if self.is_canonical_label(label)
)
# Now partition based on taint source
for can_label in canonical_labels:
offset = self.get_canonical_offset(can_label)
source = self.get_canonical_source(can_label)
processed_labels[function]["input_bytes"][source].add(offset)
# Check if this function has cmp bytes/if we should add the label
if "cmp_bytes" in self.taint_sets[function]:
if label in self.taint_sets[function]["cmp_bytes"]:
processed_labels[function]["cmp_bytes"][source].add(offset)
# Now that we have constructed the input_bytes sources, convert it to a sorted list:
for source in processed_labels[function]["input_bytes"]:
processed_labels[function]["input_bytes"][source] = list(
sorted(processed_labels[function]["input_bytes"][source])
)
processed_labels[function]["cmp_bytes"][source] = list(sorted(processed_labels[function]["cmp_bytes"][source]))
self.processed_taint_sets = processed_labels
def output_processed_json(self):
# Remove canonical mapping
processed_json = defaultdict(dict)
processed_json["tainted_functions"] = self.processed_taint_sets
processed_json["runtime_cfg"] = self.polytracker_json["runtime_cfg"]
processed_json["version"] = self.polytracker_json["version"]
processed_json["taint_sources"] = self.polytracker_json["taint_sources"]
processed_json["tainted_input_blocks"] = self.polytracker_json["tainted_input_blocks"]
with open(self.outfile, "w") as out_fd:
json.dump(processed_json, out_fd, indent=4) | python | 21 | 0.63496 | 127 | 48.150602 | 166 | This is the PolyProcess class
This class takes two arguments
1. PolyTracker produced json containing CFG, taint sets, and version number. (PolyMerge has better version parsing)
2. PolyTracker raw taint forest
PolyProcess will take these files and:
1. Reconstruct the CFG, taint forest and taint sets
2. Process the taint sets to produce a final json containing the byte offsets touched in each function
| class |
class SensitiveManager:
"""
This class provides utility functions to remove & restore
sensitive values from the experiment extension
"""
def __init__(self):
self.sensitive = {}
def remove_sensitive_values(self, experiment: Experiment,
keys: List[str] = None):
if keys is None:
return
for key in keys:
self.sensitive[key] = get_extension_value(experiment, key)
del_extension_value(experiment, key, silent=True)
def restore_sensitive_values(self, experiment: Experiment,
keys: List[str] = None):
if keys is None:
return
for key in keys:
set_extension_value(experiment, key, self.sensitive[key]) | python | 12 | 0.574144 | 70 | 31.916667 | 24 |
This class provides utility functions to remove & restore
sensitive values from the experiment extension
| class |
class Dataset:
"""Mock the MindSpore Dataset class."""
def __init__(self, dataset_size=None, dataset_path=None):
self.dataset_size = dataset_size
self.dataset_path = dataset_path
self.inputs = []
def get_dataset_size(self):
"""Mocked get_dataset_size."""
return self.dataset_size
def get_batch_size(self):
"""Mocked get_batch_size"""
return 32 | python | 8 | 0.598086 | 61 | 26.933333 | 15 | Mock the MindSpore Dataset class. | class |
class inverse:
r"""Open a inverse scope
Inside a ``with inverse`` scope, the the quantum operations backwards.
:Usage:
.. code-block:: ket
with inverse():
...
"""
def __enter__ (self):
adj_begin()
def __exit__ (self, type, value, tb):
adj_end() | python | 8 | 0.480597 | 74 | 17.666667 | 18 | r"""Open a inverse scope
Inside a ``with inverse`` scope, the the quantum operations backwards.
:Usage:
.. code-block:: ket
with inverse():
...
| class |
class TextDistiller:
"""
Instances of this class are used to 'distill' a text corpus using
basic NLP techniques.
"""
def __init__(self, raw_fulltext):
self.crp_raw = raw_fulltext
self.crp_clean = self.clean_text(self.crp_raw)
stopword_list = config.tech_stopwords + config.eng_stopwords
# When tokenizing the corpus, ignore stop words and words of
# length = 1, which also excludes abbreviations like "e.g."
# once the punctuation has been stripped.
self.crp_toks = [word for word in word_tokenize(self.crp_clean)
if word not in stopword_list
and len(word) > 1]
self.word_freqs = FreqDist(word for word in self.crp_toks)
self.wordcloud = self.gen_wordcloud()
def clean_text(self, raw_text):
# This function scrubs extra white spaces and non-letter characters from
# the input text, and converts it to lowercase.
processed_text = re.sub(r"\s+", " ", raw_text) # Delete extra white spaces
processed_text = processed_text.lower() # Convert to lowercase
processed_text = re.sub(r"[^a-zA-Z]", " ", processed_text) # allow only letters
return processed_text
def gen_bigrams(self, min_freq = config.bg_min_freq):
# Generate ranked bigrams as a list of tuples from the tokenized corpus.
bg_finder = BigramCollocationFinder.from_words(self.crp_toks,
window_size = config.bg_win)
bg_finder.apply_freq_filter(min_freq)
bg_ranked = bg_finder.nbest(BigramAssocMeasures.pmi, config.bg_count)
return bg_ranked
def gen_wordcloud(self):
# Build a word cloud using the tokenized patent text. Stopwords have been removed by this point.
# Generate the word cloud & convert to a matplotlib plot
wc = WordCloud(max_words=250,
margin=10,
random_state=1,
mode = "RGBA",
background_color = None,
collocations = False).generate(" ".join(self.crp_toks))
recolor = get_single_color_func("midnightblue")
wc_plt = plt.imshow(wc.recolor(color_func = recolor))
# Convert plot to PNG image
wc_png_img = io.BytesIO()
wc_plt.write_png(wc_png_img)
# Encode PNG image to base64 string, then return the string.
wc_png_img_str = "data:image/png;base64,"
wc_png_img_str += base64.b64encode(wc_png_img.getvalue()).decode('utf8')
return wc_png_img_str | python | 13 | 0.587078 | 104 | 43.163934 | 61 |
Instances of this class are used to 'distill' a text corpus using
basic NLP techniques.
| class |
class Results:
"""
The Results class works closely with the Observation class. It holds information regarding the state, action, observation, reward, and error.
Functions keep track of errors that happen and enables external sources to copy the information regarding the current action-observation, as well
as printing the results.
"""
def __init__(self,
observation: dict = None,
done: bool = None,
reward: float = None,
info=None,
parameter_mask=None,
action_space=None,
error: Exception = None,
error_msg: str = None,
next_observation=None,
action=None,
action_name: str = None):
self.observation = observation
self.next_observation = next_observation
self.done = done
self.reward = reward
self.action = action
self.info = info
self.parameter_mask = parameter_mask
self.action_space = action_space
self.error = error
self.error_msg = error_msg
self.action_name = action_name
self.selection_masks = None
def has_error(self):
return self.error is not None
def copy(self):
copy_kwargs = {
"done": self.done,
"reward": self.reward,
"error": deepcopy(self.error),
"error_msg": deepcopy(self.error_msg),
"action": deepcopy(self.action),
"info": deepcopy(self.info),
"action_space": deepcopy(self.action_space)
}
if isinstance(self.observation, Observation):
copy_kwargs["observation"] = self.observation.copy()
else:
copy_kwargs["observation"] = deepcopy(self.observation)
if isinstance(self.next_observation, Observation):
copy_kwargs["next_observation"] = self.next_observation.copy()
else:
copy_kwargs["next_observation"] = deepcopy(self.next_observation)
return Results(**copy_kwargs)
def __str__(self):
output = [f"{self.__class__.__name__}:"]
for attr, v in self.__dict__.items():
if v is None:
continue
if isinstance(v, dict):
v_str = pprint.pformat(v)
else:
v_str = str(v)
output.append(f"{attr}={v_str}")
return "\n".join(output)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
for k, v in self.__dict__.items():
if k not in other.__dict__:
return False
if v != other.__dict__[k]:
return False
return True | python | 14 | 0.539763 | 149 | 33.75 | 80 |
The Results class works closely with the Observation class. It holds information regarding the state, action, observation, reward, and error.
Functions keep track of errors that happen and enables external sources to copy the information regarding the current action-observation, as well
as printing the results.
| class |
class Quantizer:
"""Handles quantization and dequantization of data
"""
def __init__(self, num_levels=5):
"""Initalization
Args:
num_levels (int, optional): Number of quantization levels. Defaults to 5.
"""
# use tuple for immutability
self.num_levels = num_levels
"""number of quantization levels"""
labels = tuple(string.ascii_uppercase[:num_levels])
self.labels = {label: idx for idx, label in enumerate(labels)}
"""ex. `{A: 0, B: 1, ...}`"""
self.variable_bin_map = {}
"""key-value pairs `{biome_name: quantization map}`"""
self.column_names = None
"""a list of columns in the format `{biome}_{week}`"""
self.subject_id_column = None
"""cache this column to add back to the label matrix with `self.add_meta_to_matrix`"""
self.random_forest_dict = {}
"""key-value pairs `{biome_name: sklearn.ensemble.RandomForestRegressor}`"""
def save_quantizer_states(self, out_fname):
"""Save `self.column_names, self.subject_id_column, self.variable_bin_map, self.random_forest_dict`. Call this after calling `self.quantize_df`
Args:
out_fname (str): output file name
"""
states = {
'column_names': self.column_names,
'subject_id_column': self.subject_id_column,
'variable_bin_map': self.variable_bin_map,
'random_forest_dict': self.random_forest_dict
}
with open(out_fname, 'wb') as f:
pickle.dump(states, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_quantizer_states(self, in_fname):
"""Load in `self.column_names, self.variable_bin_map, self.random_forest_dict` from file
Args:
in_fname (str): input file name
"""
with open(in_fname, 'rb') as f:
states = pickle.load(f)
self.column_names = states['column_names']
self.subject_id_column = states['subject_id_column']
self.variable_bin_map = states['variable_bin_map']
self.random_forest_dict = states['random_forest_dict']
def pivot_into_quantize_format(self, data):
"""Pivot the data into a format the quantizer can quantize
Input data format, produced by `DataFormatter.load_data`:
| sample_id | subject_id | variable | week | value |
|:----------------|-------------:|:-----------------|-------:|---------:|
| MBSMPL0020-6-10 | 1 | Actinobacteriota | 27 | 0.36665 |
| MBSMPL0020-6-10 | 1 | Bacteroidota | 27 | 0.507248 |
| MBSMPL0020-6-10 | 1 | Campilobacterota | 27 | 0.002032 |
| MBSMPL0020-6-10 | 1 | Desulfobacterota | 27 | 0.005058 |
| MBSMPL0020-6-10 | 1 | Firmicutes | 27 | 0.057767 |
Output format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | 0.36665 | nan |
| 10 | nan | 0.36665 | nan |
| 11 | nan | 0.36665 | nan |
Args:
data (pandas.DataFrame): see format above
Returns:
pandas.DataFrame: see format above
"""
# some hacky intermediate format used by quantizer only
# so this probably shouldn't go into DataFormatter
melted = pd.concat([
data.subject_id,
data.variable + '_' + data.week.astype(str),
data.value
], axis=1).rename(columns={0: 'variable'})
to_quantize = melted.pivot_table(
index='subject_id', columns='variable', dropna=False)['value'].reset_index()
return to_quantize
def quantize_df(self, data):
"""This function must be called before calling any of the dequantization procedures. It populates `self.column_names, self.subject_id_column, self.variable_bin_map`
Input data format, produced by `DataFormatter.load_data`:
| sample_id | subject_id | variable | week | value |
|:----------------|-------------:|:-----------------|-------:|---------:|
| MBSMPL0020-6-10 | 1 | Actinobacteriota | 27 | 0.36665 |
| MBSMPL0020-6-10 | 1 | Bacteroidota | 27 | 0.507248 |
| MBSMPL0020-6-10 | 1 | Campilobacterota | 27 | 0.002032 |
| MBSMPL0020-6-10 | 1 | Desulfobacterota | 27 | 0.005058 |
| MBSMPL0020-6-10 | 1 | Firmicutes | 27 | 0.057767 |
Output data format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | A | nan |
| 10 | nan | A | nan |
| 11 | nan | A | nan |
| 12 | nan | D | nan |
| 14 | nan | A | nan |
Args:
data (pandas.DataFrame): see format above
Returns:
pandas.DataFrame: see format above
"""
to_quantize = self.pivot_into_quantize_format(data)
self.column_names = to_quantize.columns[1:] # skip subject_id, only biome names
# cache the subject_id column to add back to a dequantized matrix
self.subject_id_column = to_quantize.subject_id
return self._quantize_df(to_quantize)
def _quantize_df(self, to_quantize):
"""Quantize a data frame in quantizable format
Input data format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | 0.36665 | nan |
| 10 | nan | 0.36665 | nan |
| 11 | nan | 0.36665 | nan |
Output data format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | A | nan |
| 10 | nan | A | nan |
| 11 | nan | A | nan |
Args:
to_quantize (pd.DataFrame): data frame in quantizable format
Returns:
pandas.DataFrame: see format above
"""
quantized = pd.DataFrame() # return df
if not self.variable_bin_map:
for col in self.column_names:
cut, bins = pd.cut(to_quantize[col], self.num_levels,
labels=list(self.labels.keys()), retbins=True)
quantized[col] = cut
self.variable_bin_map[col] = bins
else: # use existing bins
for col in self.column_names:
cut = pd.cut(to_quantize[col], self.variable_bin_map[col],
labels=list(self.labels.keys()))
quantized[col] = cut
# sort the columns by name in a natural order
quantized = quantized.reindex(sorted(quantized.columns, key=_natural_keys), axis=1)
quantized.insert(0, 'subject_id', to_quantize.subject_id)
return quantized
def get_qnet_inputs(self, quantized_df):
"""Retrieve the feature names and data matrix from a quantized data frame produced by `self.quantize_df`
Args:
quantized_df (pandas.DataFrame): a quantized data frame produced by `self.quantize_df`
Returns:
list: a list of feature names, ex. `['Acidobacteriota_35', 'Actinobacteriota_1', 'Actinobacteriota_2']`
numpy.ndarray: a 2D data array of quantized labels (`'A', 'B', ...,` or empty string `''` for NaN)
"""
# skip subject_id column
df = quantized_df.drop(columns='subject_id')
# matrix = df.astype(str).replace('nan', '').to_numpy(dtype=str)
# matrix = df.astype(str).fillna('').to_numpy(dtype=str)
matrix = df.astype(str).replace('nan', '').fillna('').to_numpy(dtype=str)
# sanity-check matrix contains only empty strings and label strings
valid_labels = list(self.labels.keys()) + ['']
is_valid = np.isin(np.unique(matrix), valid_labels).all()
#if not is_valid:
# raise Exception('The label matrix contains strings that are neither the empty string nor the label strings')
return df.columns, matrix
def quantize_new_subject(self, subject_data, subject_id=None):
"""Construct and quantize a new subject with missing data
Input format:
| subject_id | variable | week | value |
|-------------:|:-----------------|-------:|---------:|
| 1 | Actinobacteriota | 1 | 0.36665 |
| 1 | Bacteroidota | 1 | 0.507248 |
| 1 | Campilobacterota | 1 | 0.002032 |
| 1 | Desulfobacterota | 1 | 0.005058 |
| 1 | Firmicutes | 1 | 0.057767 |
Output format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | A | nan |
| 1 | nan | A | nan |
| 1 | nan | A | nan |
| 1 | nan | D | nan |
Args:
subject_data ([type]): subject data frame with some but maybe not all the timestamps
subject_id (str, optional): if not None, add the subject_id as a column; if None, assume that the input has a column named subject_id. Defaults to None.
Returns:
pd.DataFrame: quantized subject data frame with complete timestamps, see format above
"""
if subject_id is None and not 'subject_id' in subject_data.columns:
raise Exception('You must provide a subject_id if there is none in the input data frame')
if subject_id is not None:
subject_data['subject_id'] = subject_id
new_subject = self.pivot_into_quantize_format(subject_data)
# add columns that are in self.column_names but not in pivoted as np.nan
for column in self.column_names:
if column not in new_subject.columns:
new_subject[column] = np.nan
return self._quantize_df(new_subject)
def get_bin_array_of_index(self, idx):
"""Return the `pandas.cut` bin array corresponding to the sequence index by looking up `self.variable_bin_map[self.column_names[idx]]`
Args:
idx (int): index into `self.column_names`
Returns:
numpy.ndarray: bins
"""
col = self.column_names[idx]
bin_arr = self.variable_bin_map[col]
return bin_arr
def quantize_value(self, val, bin_arr):
"""Quantize a numeric value into a label. This function is the inverse of `self.dequantize_label`
Args:
val (float): number to quantize
bin_arr (numpy.ndarray): bins produced by `pandas.cut` or retrieved using `self.get_bin_array_of_index`
Returns:
str: quantized label string
"""
label = pd.cut([val], bin_arr, labels=list(self.labels.keys()))[0]
return label
# procedures and helpers for dequantization follows
def _fit_random_forest_one_biome(self, x, y):
idx_old = np.arange(len(x))
fx = interpolate.interp1d(idx_old, x, fill_value='extrapolate')
fy = interpolate.interp1d(idx_old, y, fill_value='extrapolate')
idx = np.arange(0, len(x), 0.01)
X = fx(idx)[:, np.newaxis]
Y = fy(idx)
model = RandomForestRegressor()
model.fit(X, Y)
return model
def compute_average_df(self, df):
"""Take the average over the input data frame by grouping by `variable, week`
Input data format:
| sample_id | subject_id | variable | week | value |
|:----------------|-------------:|:-----------------|-------:|---------:|
| MBSMPL0020-6-10 | 1 | Actinobacteriota | 27 | 0.36665 |
| MBSMPL0020-6-10 | 1 | Bacteroidota | 27 | 0.507248 |
| MBSMPL0020-6-10 | 1 | Campilobacterota | 27 | 0.002032 |
| MBSMPL0020-6-10 | 1 | Desulfobacterota | 27 | 0.005058 |
| MBSMPL0020-6-10 | 1 | Firmicutes | 27 | 0.057767 |
Output data format:
| variable | week | value |
|:-----------------|-------:|---------:|
| Actinobacteriota | 27 | 0.36665 |
| Bacteroidota | 27 | 0.507248 |
| Campilobacterota | 27 | 0.002032 |
Args:
df (pandas.DataFrame): see format above
Returns:
pandas.DataFrame: the average data frame, see format above
"""
avg = df[['variable', 'week', 'value']].groupby(
by=['variable', 'week']).mean().reset_index()
return avg
def fit_random_forest(self, data, dequantized_data):
"""Fit a random forest regressor for each of the biome. Use as input the average of the quantized data. Fit regressor to the average of the original data as output. Populate `self.random_forest_dict`.
Input format for both data frames:
| subject_id | variable | week | value |
|-------------:|:-----------------|-------:|---------:|
| 1 | Actinobacteriota | 27 | 0.36665 |
| 1 | Bacteroidota | 27 | 0.507248 |
| 1 | Campilobacterota | 27 | 0.002032 |
Args:
data (pandas.DataFrame): see format above
dequantized_data (pandas.DataFrame): see format above
"""
if self.random_forest_dict: # already populated
return
# take avg of data and dequantized_data, grouped by week and biome
# want to map dequantized to original, hence dequantized is input
inputs = self.compute_average_df(dequantized_data)
outputs = self.compute_average_df(data)
for biome in inputs.variable.unique():
x = inputs[inputs.variable == biome].value
y = outputs[outputs.variable == biome].value
model = self._fit_random_forest_one_biome(x, y)
self.random_forest_dict[biome] = model
def dequantize_label(self, label, bin_arr):
"""Dequantize a label string into a numeric value. This function is the inverse of `self.quantize_value`. If the input is an empty string, the return value will be `numpy.nan`
Args:
label (str): label string
bin_arr (numpy.ndarray): bins produced by `pandas.cut` or retrieved using `self.get_bin_array_of_index`
Returns:
float: the dequantized numeric value
"""
if label is np.nan or label.lower() == 'nan' or label not in self.labels:
return np.nan
low = self.labels[label]
high = low + 1
val = (bin_arr[low] + bin_arr[high]) / 2
return val
def dequantize_sequence(self, label_seq):
"""Dequantize an entire label sequence
Args:
label_seq (numpy.ndarray): 1D array of label strings
Returns:
numpy.ndarray: 1D array of floats
"""
numeric_seq = np.empty(label_seq.shape)
for idx, label in enumerate(label_seq):
bin_arr = self.get_bin_array_of_index(idx)
numeric_seq[idx] = self.dequantize_label(label, bin_arr)
return numeric_seq
def dequantize_to_df(self, matrix):
"""Dequantize a label matrix (with no column names, just the qnet input matrix) into a data frame with numeric values. To make the output data frame into a format `seaborn` can easily plot, apply `self.melt_into_plot_format`
Output format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | 0.36665 | nan |
| 10 | nan | 0.36665 | nan |
| 11 | nan | 0.36665 | nan |
Args:
matrix (numpy.ndarray): 2D matrix of label strings
Returns:
pandas.DataFrame: see format above
"""
numeric_matrix = np.empty(matrix.shape)
for idx, seq in enumerate(matrix):
numeric_matrix[idx] = self.dequantize_sequence(seq)
df = self.add_meta_to_matrix(numeric_matrix)
return df
def add_meta_to_matrix(self, matrix, add_subject_id=True):
"""Add back `self.subject_ud` and `self.column_names` to the data matrix to convert it into a data frame
Output format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | A | nan |
| 10 | nan | A | nan |
| 11 | nan | A | nan |
| 12 | nan | D | nan |
| 14 | nan | A | nan |
Args:
matrix (np.ndarray): 2D matrix of either label strings or numeric values
add_subject_id (bool, optional): whether to add back the cached subject_id column. Defaults to True.
Returns:
pandas.DataFrame: see format above
"""
df = pd.DataFrame(matrix, columns=self.column_names)
if add_subject_id:
df = pd.concat([self.subject_id_column, df], axis=1)
return df
def melt_into_plot_format(self, data):
"""Melt data into a format that `seaborn` can easily plot
Input format:
| subject_id | Acidobacteriota_35 | Actinobacteriota_1 | Actinobacteriota_2 |
|-------------:|---------------------:|:---------------------|---------------------:|
| 1 | nan | 0.36665 | nan |
| 10 | nan | 0.36665 | nan |
| 11 | nan | 0.36665 | nan |
Output format:
| subject_id | variable | week | value |
|-------------:|:-----------------|-------:|---------:|
| 1 | Actinobacteriota | 27 | 0.36665 |
| 1 | Bacteroidota | 27 | 0.507248 |
| 1 | Campilobacterota | 27 | 0.002032 |
Args:
data (pandas.DataFrame): numeric data, see format above
Returns:
pandas.DataFrame: see format above
"""
# pivot into plottable format
melted = data.melt(id_vars='subject_id')
# split variable names
splitted = melted.variable.str.extract(r'([\D|\d]+)_(\d+)', expand=True)
splitted.rename(columns={0: 'variable', 1: 'week'}, inplace=True)
splitted.week = splitted.week.astype(int)
plot_df = pd.concat([
melted.subject_id, splitted, melted.value
], axis=1)
return plot_df
def apply_random_forest_regressor(self, data):
"""Apply the trained biome regressor on the data to reduce the conversion distortion resulted from quantization-dequantization. If the data frame has columns other than `variable, week, value`, the extra columns will be averaged over using `self.compute_average_df`
Data formats for the input and the output are the same:
| variable | week | value |
|:-----------------|-------:|---------:|
| Actinobacteriota | 27 | 0.36665 |
| Bacteroidota | 27 | 0.507248 |
| Campilobacterota | 27 | 0.002032 |
Args:
data (pandas.DataFrame): see format above
Raises:
Exception: `self.random_forest_dict` hasn't been populated. You need to call `self.fit_random_forest` first
Returns:
pandas.DataFrame: with columns `variable, week, value`
"""
if not self.random_forest_dict:
raise Exception('No random forest models. First train with fit_random_forest')
avg_data = self.compute_average_df(data)
dataframes = []
for biome in avg_data.variable.unique():
x = avg_data[avg_data.variable == biome].value
# check if there is NaN
if x.isnull().any():
raise Exception('There are NaNs in the inputs. Please run the forecaster to fill in all the NaNs first')
x = x.to_numpy()[:, np.newaxis]
model = self.random_forest_dict[biome]
pred = model.predict(x)
df = pd.DataFrame({
'variable': biome,
'week': avg_data[avg_data.variable == biome].week,
'value': pred
})
dataframes.append(df)
ret = pd.concat(dataframes)
return ret | python | 20 | 0.491578 | 273 | 44.635815 | 497 | Handles quantization and dequantization of data
| class |
class Config:
'''
General configuration parent class
'''
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'postgres+psycopg2://postgres:psql@localhost/notebook'
API_BASE_URL='http://quotes.stormconsultancy.co.uk/random.json'
SQLALCHEMY_TRACK_MODIFICATIONS=False | python | 8 | 0.717532 | 84 | 37.625 | 8 |
General configuration parent class
| class |
class TapTempoGui:
"""Tk-App to tap-in a tempo."""
def __init__(self, root, tempo=TapTempo()):
"""Build the Gui."""
# Model and Stingvariables
self.tempo = tempo
self.strBPM = tk.StringVar()
self.strBPMAVG = tk.StringVar()
self.update()
self.root = root
# main app
self.mainframe = ttk.Frame(self.root, padding="4 4 4 4")
self.mainframe.grid(column=0, row=0)
# midi status bar
self.midi = MidiInterface(self.tap)
self.statusBar = MidiStatusBar(self.root, self.midi)
self.statusBar.grid(column=0, row=1, sticky="nwse")
# BPM Labels
ttk.Label(self.mainframe, text="BPM:", justify="right").grid(column=0, row=0)
self.labelBPM = ttk.Label(self.mainframe, textvariable=self.strBPM)
self.labelBPM.grid(column=1, row=0)
# Average BPM Labels
ttk.Label(self.mainframe, text="Average BPM:", justify="right").grid(column=0, row=1)
self.labelBPMAVG = ttk.Label(self.mainframe, textvariable=self.strBPMAVG)
self.labelBPMAVG.grid(column=1, row=1)
# Buttons
self.buttonTap = ttk.Button(self.mainframe, text="Tap")
# usually a button is clicked after releasing the
# mousebutton. For tapping the pressing of the button is more
# appropriate
self.buttonTap.bind("<ButtonPress-1>", lambda e: self.tap())
self.buttonTap.grid(column=0, row=2)
self.buttonReset = ttk.Button(self.mainframe, text="Reset", command=self.reset)
self.buttonReset.grid(column=1, row=2)
# polish
for child in self.mainframe.winfo_children():
child.grid_configure(padx=5, pady=5)
def update(self):
"""Update all labels."""
self.strBPM.set("{:.2f}".format(self.tempo.bpm))
self.strBPMAVG.set("{:.2f}".format(self.tempo.bpmAvg))
def tap(self):
"""Perform a single tap and update the tempo labels."""
self.tempo.tap()
self.update()
def reset(self):
"""Reset everything to zero."""
self.tempo.reset()
self.update()
def run(self):
"""Run TK-Mainloop."""
self.root.mainloop() | python | 12 | 0.597943 | 93 | 31.897059 | 68 | Tk-App to tap-in a tempo. | class |
class MetadataProcessorFactory:
processorTypeMap = {}
""" An abstract factory to create MetadataProcessor objects based on their name """
@classmethod
def registerProcessorClass( self, processorName, ProcessorClass):
self.processorTypeMap[processorName] = ProcessorClass;
@classmethod
def createProcessor( self, processorType, config ):
if not processorType in self.processorTypeMap:
raise KeyError( "Metadata processor type \"%s\" not known." % processorType )
processorClass = self.processorTypeMap[processorType]
newObj = processorClass( config )
return newObj | python | 14 | 0.712949 | 89 | 41.8 | 15 | An abstract factory to create MetadataProcessor objects based on their name | class |
class descriptive_stats:
"""A series of operations for descriptive stats."""
def __init__(self, df, colname):
self.df = df
self.colname = colname
self.cat_dict = {}
def count_issues(self):
"""Calculate number of unique issues."""
for tweet in self.df[self.colname]:
single_issues = str(tweet).split(', ')
for single_issue in single_issues:
single_issue = single_issue.strip()
try:
self.cat_dict[single_issue] += 1
except KeyError:
self.cat_dict[single_issue] = 1
print('There are {} unique issues.\n'.format(len(self.cat_dict)))
def bar_plot(self):
"""Plot a bar graph orgnized by issue counts."""
sorted_cat_dict = sorted(self.cat_dict.items(),
key=lambda x: x[1], reverse=True)
sorted_cat_dict = OrderedDict(sorted_cat_dict)
plt.figure(figsize=(12, 12))
sns.barplot(list(sorted_cat_dict.values()),
list(sorted_cat_dict.keys()),
alpha=0.8, orient='h')
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel('Issues', fontsize=12)
plot_dir = create_dir('plots')
plt.savefig(plot_dir +'/issue_counts.png')
print('Descriptive plot saved in this directory:\n{}\n'.format(plot_dir)) | python | 16 | 0.551237 | 81 | 40.647059 | 34 | A series of operations for descriptive stats. | class |
class CompositionGenerator:
"""
kmer frequencies generator
"""
def __init__(self, pairs_file, fasta=None,
batch_size=64, kmer=4, rc=False, threads=1):
self.i = 0
self.kmer = kmer
self.pairs = np.load(pairs_file)
self.n_batches = max(1, len(self.pairs) // max(batch_size, 1))
self.batch_size = batch_size if batch_size > 0 else len(self.pairs)
self.rc = rc
if fasta is not None:
self.set_genomes(fasta)
self.pool = Pool(threads)
def set_genomes(self, fasta):
contigs = set(self.pairs['sp'].flatten())
self.genomes = dict()
with open(fasta, 'r') as handle:
for (name, seq) in SimpleFastaParser(handle):
ctg_id = name.split()[0]
if ctg_id in contigs:
self.genomes[ctg_id] = seq
def __iter__(self):
return self
def __len__(self):
return self.n_batches
def __next__(self):
if self.i < self.n_batches:
pairs_batch = self.pairs[self.i*self.batch_size:(self.i+1)*self.batch_size]
get_kmer_frequency_with_args = partial(
get_kmer_frequency, kmer=self.kmer, rc=self.rc
)
fragments_a = [self.genomes[spA][startA:endA] for (spA, startA, endA), _ in pairs_batch]
fragments_b = [self.genomes[spB][startB:endB] for _, (spB, startB, endB) in pairs_batch]
x1 = self.pool.map(get_kmer_frequency_with_args, fragments_a)
x2 = self.pool.map(get_kmer_frequency_with_args, fragments_b)
self.i += 1
if self.i >= self.n_batches:
self.pool.close()
return (format_array(np.array(x1, dtype='float32')),
format_array(np.array(x2, dtype='float32')))
raise StopIteration() | python | 15 | 0.545067 | 100 | 32.5 | 56 |
kmer frequencies generator
| class |
class CoverageGenerator:
"""
Genearator for coverage feature
It loads the coverage every [load_batch] batches.
"""
def __init__(self, pairs_file, coverage_h5=None,
batch_size=64, load_batch=1000, wsize=16, wstep=8):
self.i = 0
self.pairs = np.load(pairs_file)
self.coverage_h5 = coverage_h5
self.n_batches = max(1, len(self.pairs) // max(batch_size, 1))
self.batch_size = batch_size if batch_size > 0 else len(self.pairs)
self.load_batch = load_batch
self.wsize = wsize
self.wstep = wstep
def __iter__(self):
return self
def __len__(self):
return self.n_batches
def load(self):
"""
Extract coverage for next pair batch
"""
pairs = self.pairs[self.i*self.batch_size : (self.i + self.load_batch)*self.batch_size]
self.x1, self.x2 = get_coverage(pairs, self.coverage_h5, self.wsize, self.wstep)
def __next__(self):
if self.i < self.n_batches:
if self.i % self.load_batch == 0:
self.load()
idx_inf = (self.i % self.load_batch) * self.batch_size
idx_sup = idx_inf + self.batch_size
self.i += 1
return (
format_array(self.x1[idx_inf:idx_sup, :, :]),
format_array(self.x2[idx_inf:idx_sup, :, :])
)
raise StopIteration() | python | 13 | 0.543676 | 95 | 29.468085 | 47 |
Genearator for coverage feature
It loads the coverage every [load_batch] batches.
| class |
class QueryLucene:
"""Match documents from the corpus with queries using lucene"""
def __init__(self, index_path=os.path.join(ROOT_DIR, 'corpus/indexRI')):
"""
Lucene components initialization
:param index_path: path of the index
"""
self.analyzer = StandardAnalyzer()
self.index = SimpleFSDirectory(File(index_path).toPath())
self.reader = DirectoryReader.open(self.index)
self.searcher = IndexSearcher(self.reader)
self.constrained_query = BooleanQuery.Builder()
self.parser = Parser()
def query_parser_filter(self, field_values, field_filter=['Vector']):
"""
Filtering queries according to field values
:param field_values: values of the fields
:param field_filter: fields to filter
"""
assert len(field_filter) == len(field_values), "Number of fields different from number of values"
for i in range(len(field_filter)):
query_parser = QueryParser(field_filter[i], self.analyzer)
query = query_parser.parse(field_values[i])
self.constrained_query.add(query, BooleanClause.Occur.FILTER)
def query_parser_must(self, field_values, field_must=['Text']):
"""
The values that the fields must match
:param field_values: values of the fields
:param field_must: fields that must match
"""
assert len(field_must) == len(field_values), "Number of fields different from number of values"
for i in range(len(field_must)):
query_parser = QueryParser(field_must[i], self.analyzer)
query = query_parser.parse(field_values[i])
self.constrained_query.add(query, BooleanClause.Occur.MUST)
def remove_duplicates(self, hits):
"""
remove duplicates (regarding the text field) from a scoreDocs object
:param hits: the scoreDocs object resulting from a query
:return: the scoreDocs object without duplicates
"""
seen = set()
keep = []
for i in range(len(hits)):
if hits[i]["Text"] not in seen:
seen.add(hits[i]["Text"])
keep.append(hits[i])
return keep
def get_results(self, nb_results=1000):
"""
Get results that match with the query
:param nb_results:
:return:
"""
docs = self.searcher.search(self.constrained_query.build(), nb_results).scoreDocs
self.constrained_query = BooleanQuery.Builder()
hits = []
for i in range(len(docs)):
hits.append({})
for field in self.reader.document(docs[i].doc).getFields():
hits[i][field.name()] = field.stringValue()
hits = self.remove_duplicates(hits)
return hits
def rerank_results(self, results, user_vector, user_gender, user_location, user_sentiment):
"""
reranks the results of a query by using the similarity between the user thematic vector and the vector from the tweets
:param results: the documents resulting from a query
:param user_vector: the thematic vector of a user
:param user_gender: the gender of a user
:param user_location: the location of a user
:param user_sentiment: the sentiment of a user
:return: the reranked list of documents
"""
reranked = []
user_vec = ProfileOneHotEncoder.add_info_to_vec(user_vector, user_gender, user_location,
user_sentiment).reshape(1, -1)
for i in range(len(results)):
doc_infos = Tweet.load(int(results[i]['TweetID']))
if doc_infos is None:
reranked.append({'doc': results[i], 'sim': 0.})
else:
doc_vector = ProfileOneHotEncoder.add_info_to_vec(doc_infos.vector, doc_infos.gender, doc_infos.country,
doc_infos.sentiment).reshape(1, -1)
sim = cosine_similarity(user_vec, doc_vector)
reranked.append({'doc': doc_infos, 'sim': sim[0][0]})
reranked = sorted(reranked, key=lambda k: k['sim'], reverse=True)
return [x['doc'] for x in reranked]
def close_reader(self):
self.reader.close()
def link_tweets(self, results):
return [Tweet.load(r['TweetID']) for r in results] | python | 17 | 0.689386 | 120 | 36.330097 | 103 | Match documents from the corpus with queries using lucene | class |
class Rsvp:
"""
Contains models for the rsvp"""
def __init__(self):
"""
Initializes the rsvp class
"""
self.db = RSVP
def set_rsvp(self, meetup_id, user_id, response):
"""
Sets the rsvp status for a meetup and adds it to the DB
"""
rsvp = {
"id": len(self.db) + 1,
"meetup": meetup_id,
"user": user_id,
"response": response
}
RSVP.append(rsvp) | python | 13 | 0.460285 | 63 | 20.391304 | 23 |
Contains models for the rsvp | class |
class Connection:
"Class that encapsulates all the interface to the DB."
def __init__(self):
self.connection = None
self.cursor = None
def commit(self, *args, **kwargs) -> None:
return self.connection.commit(*args, **kwargs)
def connect(self, path: Optional[str] = "/usr/share/parentopticon/db.sqlite"):
self.connection = sqlite3.connect(
path,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
)
self.cursor = self.connection.cursor()
def execute(self, *args) -> Iterable[Tuple[Any]]:
return self.cursor.execute(*args)
def execute_commit_return(self, *args) -> int:
"Execute a statement, commit it, return the rowid."
self.cursor.execute(*args)
self.connection.commit()
return self.cursor.lastrowid | python | 12 | 0.713333 | 79 | 30.291667 | 24 | Class that encapsulates all the interface to the DB. | class |
class RevCorrData:
"""Restructure input NDVars into arrays for reverse correlation
Attributes
----------
y : NDVar
Dependent variable.
x : NDVar | sequence of NDVar
Predictors.
segments : np.ndarray
``(n_segments, 2)`` array of segment ``[start, stop]`` indices.
cv_segments : Sequence
Sequence of ``(all_segments, train, test)`` tuples, where each is a
2d-array of ``[start, stop]`` indices.
cv_indexes : Sequence
Only available for segmented data. For each partition, the index into
:attr:`.segments` used as test set.
"""
def __init__(self, y, x, error, scale_data, ds=None):
y = asndvar(y, ds=ds)
if isinstance(x, (tuple, list, Iterator)):
x = (asndvar(x_, ds=ds) for x_ in x)
else:
x = asndvar(x, ds=ds)
# scale_data param
if isinstance(scale_data, bool):
scale_in_place = False
elif isinstance(scale_data, str):
if scale_data == 'inplace':
scale_in_place = True
else:
raise ValueError("scale_data=%r" % (scale_data,))
else:
raise TypeError("scale_data=%r, need bool or str" % (scale_data,))
# check y and x
if isinstance(x, NDVar):
x_name = x.name
x = (x,)
multiple_x = False
else:
x = tuple(x)
assert all(isinstance(x_, NDVar) for x_ in x)
x_name = tuple(x_.name for x_ in x)
multiple_x = True
time_dim = y.get_dim('time')
if any(x_.get_dim('time') != time_dim for x_ in x):
raise ValueError("Not all NDVars have the same time dimension")
n_times = len(time_dim)
# determine cases (used as segments)
n_cases = segments = None
for x_ in x:
# determine cases
if n_cases is None:
if x_.has_case:
n_cases = len(x_)
# check y
if not y.has_case:
raise ValueError(f'y={y!r}: x has case dimension but y does not')
elif len(y) != n_cases:
raise ValueError(f'y={y!r}: different number of cases from x {n_cases}')
# prepare segment index
seg_i = np.arange(0, n_cases * n_times + 1, n_times, np.int64)[:, newaxis]
segments = np.hstack((seg_i[:-1], seg_i[1:]))
else:
n_cases = 0
elif n_cases:
if len(x_) != n_cases:
raise ValueError(f'x={x}: not all components have same number of cases')
else:
assert not x_.has_case, 'some but not all x have case'
case_to_segments = n_cases > 0
# vector dimension
vector_dims = [dim.name for dim in y.dims if dim._connectivity_type == 'vector']
if not vector_dims:
vector_dim = None
elif len(vector_dims) == 1:
vector_dim = vector_dims.pop()
else:
raise NotImplementedError(f"y={y!r}: more than one vector dimension ({', '.join(vector_dims)})")
# y_data: flatten to ydim x time array
last = ('time',)
n_ydims = -1
if case_to_segments:
last = ('case',) + last
n_ydims -= 1
if vector_dim:
last = (vector_dim,) + last
y_dimnames = y.get_dimnames(last=last)
ydims = y.get_dims(y_dimnames[:n_ydims])
n_times_flat = n_cases * n_times if case_to_segments else n_times
n_flat = reduce(mul, map(len, ydims), 1)
shape = (n_flat, n_times_flat)
y_data = y.get_data(y_dimnames).reshape(shape)
# shape for exposing vector dimension
if vector_dim:
if not scale_data:
raise NotImplementedError("Vector data without scaling")
n_flat_prevector = reduce(mul, map(len, ydims[:-1]), 1)
n_vector = len(ydims[-1])
assert n_vector > 1
vector_shape = (n_flat_prevector, n_vector, n_times_flat)
else:
vector_shape = None
# x_data: predictor x time array
x_data = []
x_meta = []
x_names = []
n_x = 0
for x_ in x:
ndim = x_.ndim - bool(n_cases)
if ndim == 1:
xdim = None
dimnames = ('case' if n_cases else newaxis, 'time')
data = x_.get_data(dimnames)
index = n_x
x_names.append(dataobj_repr(x_))
elif ndim == 2:
dimnames = x_.get_dimnames(last='time')
xdim = x_.get_dim(dimnames[-2])
if n_cases:
dimnames = (xdim.name, 'case', 'time')
data = x_.get_data(dimnames)
index = slice(n_x, n_x + len(data))
x_repr = dataobj_repr(x_)
for v in xdim:
x_names.append("%s-%s" % (x_repr, v))
else:
raise NotImplementedError("x with more than 2 dimensions")
if n_cases:
data = data.reshape((-1, n_cases * n_times))
x_data.append(data)
x_meta.append((x_.name, xdim, index))
n_x += len(data)
if len(x_data) == 1:
x_data = x_data[0]
x_is_copy = False
else:
x_data = np.concatenate(x_data)
x_is_copy = True
if scale_data:
if not scale_in_place:
y_data = y_data.copy()
if not x_is_copy:
x_data = x_data.copy()
x_is_copy = True
y_mean = y_data.mean(1)
x_mean = x_data.mean(1)
y_data -= y_mean[:, newaxis]
x_data -= x_mean[:, newaxis]
# for vector data, scale by vector norm
if vector_shape:
y_data_vector_shape = y_data.reshape(vector_shape)
y_data_scale = norm(y_data_vector_shape, axis=1)
else:
y_data_vector_shape = None
y_data_scale = y_data
if error == 'l1':
y_scale = np.abs(y_data_scale).mean(-1)
x_scale = np.abs(x_data).mean(-1)
elif error == 'l2':
y_scale = (y_data_scale ** 2).mean(-1) ** 0.5
x_scale = (x_data ** 2).mean(-1) ** 0.5
else:
raise RuntimeError(f"error={error!r}")
if vector_shape:
y_data_vector_shape /= y_scale[:, newaxis, newaxis]
else:
y_data /= y_scale[:, newaxis]
x_data /= x_scale[:, newaxis]
# for data-check
y_check = y_scale
x_check = x_scale
# zero-padding for convolution
x_pads = -x_mean / x_scale
else:
y_mean = x_mean = y_scale = x_scale = None
y_check = y_data.var(1)
x_check = x_data.var(1)
x_pads = np.zeros(n_x)
# check for flat data
zero_var = [y.name or 'y'] if np.any(y_check == 0) else []
zero_var.extend(x_name[i] for i, v in enumerate(x_check) if v == 0)
if zero_var:
raise ValueError("Flat data: " + ', '.join(zero_var))
# check for NaN
has_nan = [y.name] if np.isnan(y_check.sum()) else []
has_nan.extend(x_name[i] for i, v in enumerate(x_check) if np.isnan(v))
if has_nan:
raise ValueError("Data with NaN: " + ', '.join(has_nan))
self.error = error
self.time = time_dim
self.segments = segments
self.cv_segments = self.cv_indexes = self.partitions = self.model = None
self._scale_data = bool(scale_data)
self.shortest_segment_n_times = n_times
# y
self.y = y_data # (n_signals, n_times)
self.y_mean = y_mean
self.y_scale = y_scale
self.y_name = y.name
self.y_info = _info.copy(y.info)
self.ydims = ydims # without case and time
self.yshape = tuple(map(len, ydims))
self.full_y_dims = y.get_dims(y_dimnames)
self.vector_dim = vector_dim # vector dimension name
self.vector_shape = vector_shape # flat shape with vector dim separate
# x
self.x = x_data # (n_predictors, n_times)
self.x_mean = x_mean
self.x_scale = x_scale
self.x_name = x_name
self._x_meta = x_meta # [(x.name, xdim, index), ...]; index is int or slice
self._multiple_x = multiple_x
self._x_is_copy = x_is_copy
self.x_pads = x_pads
def apply_basis(self, basis, basis_window):
"Apply basis to x"
if not basis:
return
n = int(round(basis / self.time.tstep))
w = scipy.signal.get_window(basis_window, n, False)
w /= w.sum()
for xi in self.x:
xi[:] = scipy.signal.convolve(xi, w, 'same')
def prefit(self, res):
if not res:
return
from ._boosting import convolve
hs = (res.h_source,) if isinstance(res.h_source, NDVar) else res.h_source
n_y = self.y.shape[0]
n_x = self.x.shape[0]
# check that names are unique
x_names = [name for name, *_ in self._x_meta]
if len(set(x_names)) != len(x_names):
raise ValueError(f"prefit={res}: prefit requires that all predictors have unique names; x has names {x_names}")
# check that prefit matches y dims
h0 = hs[0]
index = {}
for ydim in self.ydims:
hdim = h0.get_dim(ydim.name)
if hdim == ydim:
continue
elif not hdim._is_superset_of(ydim):
raise ValueError(f"prefit: y dimension {ydim.name} has elements that are not contained in the prefit")
index[ydim.name] = hdim.index_into_dim(ydim)
if index:
hs = [h.sub(**index) for h in hs]
# check predictor dims
y_dimnames = [dim.name for dim in self.ydims]
meta = {name: (dim, index) for name, dim, index in self._x_meta}
for h in hs:
if h.name not in meta:
raise ValueError(f"prefit: {h.name!r} not in x")
dim, index = meta[h.name]
need_dimnames = (*y_dimnames, 'time') if dim is None else (*y_dimnames, dim.name, 'time')
if h.dimnames != need_dimnames:
raise ValueError(f"prefit: {h.name!r} dimension mismatch, has {h.dimnames}, needs {need_dimnames}")
if dim is not None and h.dims[-2] != dim:
raise ValueError(f"prefit: {h.name!r} {dim.name} dimension mismatch")
# generate flat h
h_n_times = len(h0.get_dim('time'))
h_flat = []
h_index = []
for h in hs:
dimnames = h.get_dimnames(first=y_dimnames, last='time')
h_data = h.get_data(dimnames)
index = meta[h.name][1]
if isinstance(index, int):
h_flat.append(h_data.reshape((n_y, 1, h_n_times)))
h_index.append(index)
else:
n_hdim = index.stop - index.start
h_flat.append(h_data.reshape((n_y, n_hdim, h_n_times)))
h_index.extend(range(index.start, index.stop))
h_flat = np.concatenate(h_flat, 1)
# assert scaling equivalent
# assert np.all(res.x_mean == self.x_mean[h_index])
# assert np.all(res.x_scale == self.x_scale[h_index])
# subset to relevant data
x = self.x[h_index]
x_pads = self.x_pads[h_index]
# subtract prefit predictions
i_start = int(round(res.tstart / self.time.tstep))
for y, h in zip(self.y, h_flat):
y -= convolve(h, x, x_pads, i_start, self.segments)
# remove prefit predictors
keep = np.setdiff1d(np.arange(n_x), h_index)
self.x = self.x[keep]
self.x_mean = self.x_mean[keep]
self.x_scale = self.x_scale[keep]
self.x_pads = self.x_pads[keep]
self._x_is_copy = True
# update x meta-information
target_index = np.empty(n_x, int)
target_index.fill(-1)
target_index[keep] = np.arange(len(keep))
new_meta = []
self.x_name = []
for name, xdim, index in self._x_meta:
if isinstance(index, int):
new_index = target_index[index]
if new_index < 0:
continue
else:
new_start = target_index[index.start]
if new_start < 0:
continue
new_stop = target_index[index.stop - 1] + 1
new_index = slice(new_start, new_stop)
new_meta.append((name, xdim, new_index))
self.x_name.append(name)
self._x_meta = new_meta
self._multiple_x = len(self._x_meta) > 1
def initialize_cross_validation(self, partitions=None, model=None, ds=None):
if partitions is not None and partitions <= 1:
raise ValueError(f"partitions={partitions}")
cv_segments = [] # list of (segments, train, test)
n_times = len(self.time)
if self.segments is None:
if model is not None:
raise TypeError(f'model={dataobj_repr(model)!r}: model cannot be specified in unsegmented data')
if partitions is None:
partitions = 10
seg_n_times = int(floor(n_times / partitions))
# first
for i in range(partitions):
test = ((seg_n_times * i, seg_n_times * (i + 1)),)
if i == 0: # first
train = ((seg_n_times, n_times),)
elif i == partitions - 1: # last
train = ((0, n_times - seg_n_times),)
else:
train = ((0, seg_n_times * i),
(seg_n_times * (i + 1), n_times))
cv_segments.append((np.vstack((train, test)), train, test))
cv_segments = (tuple(np.array(s, np.int64) for s in cv) for cv in cv_segments)
else:
n_total = len(self.segments)
if model is None:
cell_indexes = [np.arange(n_total)]
else:
model = ascategorial(model, ds=ds, n=n_total)
cell_indexes = [np.flatnonzero(model == cell) for cell in model.cells]
cell_sizes = [len(i) for i in cell_indexes]
cell_size = min(cell_sizes)
cell_sizes_are_equal = len(set(cell_sizes)) == 1
if partitions is None:
if cell_sizes_are_equal:
if 3 <= cell_size <= 10:
partitions = cell_size
else:
raise NotImplementedError(f"Automatic partition for {cell_size} cases")
else:
raise NotImplementedError(f'Automatic partition for variable cell size {tuple(cell_sizes)}')
if partitions > cell_size:
if not cell_sizes_are_equal:
raise ValueError(f'partitions={partitions}: > smallest cell size ({cell_size}) with unequal cell sizes')
elif partitions % cell_size:
raise ValueError(f'partitions={partitions}: not a multiple of cell_size ({cell_size})')
elif len(cell_sizes) > 1:
raise NotImplementedError(f'partitions={partitions} with more than one cell')
n_parts = partitions // cell_size
segments = []
for start, stop in self.segments:
d = (stop - start) / n_parts
starts = [int(round(start + i * d)) for i in range(n_parts)]
starts.append(stop)
for i in range(n_parts):
segments.append((starts[i], starts[i+1]))
segments = np.array(segments, np.int64)
index_range = np.arange(partitions)
indexes = [index_range == i for i in range(partitions)]
else:
segments = self.segments
indexes = []
for i in range(partitions):
index = np.zeros(n_total, bool)
for cell_index in cell_indexes:
index[cell_index[i::partitions]] = True
indexes.append(index)
cv_segments = ((segments, segments[np.invert(i)], segments[i]) for i in indexes)
self.cv_indexes = tuple(indexes)
self.partitions = partitions
self.cv_segments = tuple(cv_segments)
self.model = dataobj_repr(model)
def data_scale_ndvars(self):
if self._scale_data:
# y
if self.yshape:
y_mean = NDVar(self.y_mean.reshape(self.yshape), self.ydims, self.y_info, self.y_name)
else:
y_mean = self.y_mean[0]
# scale does not include vector dim
if self.vector_dim:
dims = self.ydims[:-1]
shape = self.yshape[:-1]
else:
dims = self.ydims
shape = self.yshape
if shape:
y_scale = NDVar(self.y_scale.reshape(shape), dims, self.y_info, self.y_name)
else:
y_scale = self.y_scale[0]
# x
x_mean = []
x_scale = []
for name, dim, index in self._x_meta:
if dim is None:
x_mean.append(self.x_mean[index])
x_scale.append(self.x_scale[index])
else:
dims = (dim,)
x_mean.append(NDVar(self.x_mean[index], dims, {}, name))
x_scale.append(NDVar(self.x_scale[index], dims, {}, name))
if self._multiple_x:
x_mean = tuple(x_mean)
x_scale = tuple(x_scale)
else:
x_mean = x_mean[0]
x_scale = x_scale[0]
else:
y_mean = y_scale = x_mean = x_scale = None
return y_mean, y_scale, x_mean, x_scale
def package_kernel(self, h, tstart):
"""Package kernel as NDVar
Parameters
----------
h : array (n_y, n_x, n_times)
Kernel data.
"""
h_time = UTS(tstart, self.time.tstep, h.shape[-1])
hs = []
if self._scale_data:
info = _info.for_normalized_data(self.y_info, 'Response')
else:
info = self.y_info
for name, dim, index in self._x_meta:
x = h[:, index, :]
if dim is None:
dims = (h_time,)
else:
dims = (dim, h_time)
if self.ydims:
dims = self.ydims + dims
if len(self.ydims) > 1:
x = x.reshape(self.yshape + x.shape[1:])
else:
x = x[0]
hs.append(NDVar(x, dims, info, name))
if self._multiple_x:
return tuple(hs)
else:
return hs[0]
def package_value(
self,
value: np.ndarray, # data
name: str, # NDVar name
info: dict = None, # NDVar info
meas: str = None, # for NDVar info
):
if not self.yshape:
return value[0]
# shape
has_vector = value.shape[0] > self.yshape[0]
if self.vector_dim and not has_vector:
dims = self.ydims[:-1]
shape = self.yshape[:-1]
else:
dims = self.ydims
shape = self.yshape
if not dims:
return value[0]
elif len(shape) > 1:
value = value.reshape(shape)
# info
if meas:
info = _info.for_stat_map(meas, old=info)
elif info is None:
info = self.y_info
return NDVar(value, dims, info, name)
def package_y_like(self, data, name):
shape = tuple(map(len, self.full_y_dims))
data = data.reshape(shape)
# roll Case to first axis
for axis, dim in enumerate(self.full_y_dims):
if isinstance(dim, Case):
data = np.rollaxis(data, axis)
dims = list(self.full_y_dims)
dims.insert(0, dims.pop(axis))
break
else:
dims = self.full_y_dims
return NDVar(data, dims, {}, name)
def vector_correlation(self, y, y_pred):
"Correlation for vector data"
# shape (..., space, time)
assert self._scale_data
assert self.error in ('l1', 'l2')
assert y.ndim == y_pred.ndim == 3
y_pred_norm = norm(y_pred, axis=1)
y_norm = norm(y, axis=1)
# l2 correlation
y_pred_scale = (y_pred_norm ** 2).mean(1) ** 0.5
y_pred_scale[y_pred_scale == 0] = 1
y_pred_l2 = y_pred / y_pred_scale[:, newaxis, newaxis]
if self.error == 'l1':
y_scale = (y_norm ** 2).mean(1) ** 0.5
y_l2 = y / y_scale[:, newaxis, newaxis]
else:
y_l2 = y
r_l2 = np.multiply(y_l2, y_pred_l2, out=y_pred_l2).sum(1).mean(1)
# l1 correlation
if self.error == 'l1':
y_pred_scale = y_pred_norm.mean(1)
y_pred_scale[y_pred_scale == 0] = 1
y_pred_l1 = y_pred / y_pred_scale[:, newaxis, newaxis]
# E|X| = 1 --> E√XX = 1
yy = np.multiply(y, y_pred_l1, out=y_pred_l1).sum(1)
sign = np.sign(yy)
np.abs(yy, out=yy)
yy **= 0.5
yy *= sign
r_l1 = yy.mean(1)
else:
r_l1 = None
return r_l2, r_l1 | python | 21 | 0.491214 | 124 | 38.548736 | 554 | Restructure input NDVars into arrays for reverse correlation
Attributes
----------
y : NDVar
Dependent variable.
x : NDVar | sequence of NDVar
Predictors.
segments : np.ndarray
``(n_segments, 2)`` array of segment ``[start, stop]`` indices.
cv_segments : Sequence
Sequence of ``(all_segments, train, test)`` tuples, where each is a
2d-array of ``[start, stop]`` indices.
cv_indexes : Sequence
Only available for segmented data. For each partition, the index into
:attr:`.segments` used as test set.
| class |
class RedBlackTree:
"""
A class used to represent a red-black binary search tree.
Attributes:
Methods:
insert(key: key)
Inserts an element into the search tree.
left_rotation(key)
Reorganizes a section of the search tree so the parent node, x,
becomes the left child of it's original right child, y, and y
becomes the parent of x.
right_rotation(key)
Reorganizes a section of the search tree so the parent node, x,
becomes the right child of its original left child, y, and y
becomes the parent of x.
recolor(key)
Recolors nodes starting at the key to ensure the red-black tree
invariants are maintained when a change to the search tree occurs.
delete(key)
Deletes a node from the search tree if the key exists.
traverse() -> list
Prints the keys of the search tree in ascending order, for example,
1, 2, 3, 4, ..., n.
successor(key: key) -> key
Provides the given key's closest node in value that is greater than
the key if it exists in the search tree.
predecessor(key: key) -> key
Provides the given key's closest node in value that is less than the
key if it exists in the search tree.
max() -> key
Provides the maximum value that exists in the search tree.
min() -> key
Provides the minimum vlaue that exists in the search tree.
contains(key) -> Tuple[key, bool]
Checks if a value exists in the search tree.
"""
def __init__(self):
"""
Parameters:
None
"""
self.root = None
def insert(self, key: key):
"""
Inserts a node into the search tree.
Parameters:
key : key
The key of the node you wish to insert
"""
# Search for a node with the value of key in the search tree
# Red-black tree specific
# If no successor the Node is the root and must be set to black
pass
def left_rotation(self, key: key):
"""
"""
pass
def right_rotation(self, key: key):
"""
"""
pass
def delete(self, key: key):
"""
"""
pass
def traverse(self):
"""
"""
pass
def successor(self, key: key) -> key:
"""
"""
pass
def predecessor(self, key: key) -> key:
"""
"""
pass
def max(self) -> key:
"""
"""
pass
def min(self) -> key:
"""
"""
pass
def contains(self, v, k: Node.key) -> Tuple[key, bool]:
"""
Checks if the given value is in the search tree.
Parameters:
v: The value you wish to check for.
k: The key of a comparison node.
Returns:
key: The value of the key if the search tree contains it, otherwise 0.
bool: True if the search tree contains key, otherwise False.
"""
# Start at the root
# Traverse left/right child pointers as needed. That is:
# if k < Node.key compared go left
# else go right
# Return node with k or null (k is not in the search tree) as appropriate
# if k == None:
# return None, False
# if v < k:
# contains(v, Node.left)
# elif v > k:
# contains(v, Node.right)
# else:
# return v, True
| python | 8 | 0.504604 | 82 | 25.550725 | 138 |
A class used to represent a red-black binary search tree.
Attributes:
Methods:
insert(key: key)
Inserts an element into the search tree.
left_rotation(key)
Reorganizes a section of the search tree so the parent node, x,
becomes the left child of it's original right child, y, and y
becomes the parent of x.
right_rotation(key)
Reorganizes a section of the search tree so the parent node, x,
becomes the right child of its original left child, y, and y
becomes the parent of x.
recolor(key)
Recolors nodes starting at the key to ensure the red-black tree
invariants are maintained when a change to the search tree occurs.
delete(key)
Deletes a node from the search tree if the key exists.
traverse() -> list
Prints the keys of the search tree in ascending order, for example,
1, 2, 3, 4, ..., n.
successor(key: key) -> key
Provides the given key's closest node in value that is greater than
the key if it exists in the search tree.
predecessor(key: key) -> key
Provides the given key's closest node in value that is less than the
key if it exists in the search tree.
max() -> key
Provides the maximum value that exists in the search tree.
min() -> key
Provides the minimum vlaue that exists in the search tree.
contains(key) -> Tuple[key, bool]
Checks if a value exists in the search tree.
| class |
class Node:
"""
A class used to represent a Node in a red-black search tree.
Attributes:
key: The key is the value the node shall be sorted on. The key can be an integer,
float, string, anything capable of being sorted.
color (str): The color attribute records whether a node is red or black.
"""
def __init__(self, key):
"""
Parameters:
key: The key is the value the node shall be sorted on. The key can be an integer,
float, string, anything capable of being sorted.
left: The pointer to the left child node.
right: The pointer to the right child node.
color (str): The color attribute keeps track of whether a node is red or black.
"""
self.key = key
self.left = None
self.right = None
self.color = "red"
def setColor(self):
"""
Sets the color of a Node.
"""
pass | python | 8 | 0.554672 | 93 | 30.516129 | 31 |
A class used to represent a Node in a red-black search tree.
Attributes:
key: The key is the value the node shall be sorted on. The key can be an integer,
float, string, anything capable of being sorted.
color (str): The color attribute records whether a node is red or black.
| class |
class State:
"""State of Lsystem"""
width: int
color: tuple
angle: int
y: int
x: int
def __init__(self):
"""Initialisation of state
>>> State().x
0
>>> State().y
0
>>> State().angle
0
>>> State().color
(255, 255, 255)
>>> State().width
0"""
self.x = 0
self.y = 0
self.angle = 0
self.color = (255, 255, 255)
self.width = 0
def __str__(self):
return str(self.x)
def __repr__(self):
return self.__str__() | python | 9 | 0.415385 | 36 | 17.3125 | 32 | State of Lsystem | class |
class Indentor:
"""This class manages indentation, for use with context manager
It is used to correctly indent the definition node tree hierarchy
"""
_index = 0
def __init__(self):
Indentor._index += 1
def __del__(self):
Indentor._index -= 1
@classmethod
def indent(cls,the_file):
for i in range(Indentor._index):
the_file.write(' ') | python | 12 | 0.598504 | 69 | 29.923077 | 13 | This class manages indentation, for use with context manager
It is used to correctly indent the definition node tree hierarchy
| class |
class DefsTraverser:
"""Traverse the ecflow.Defs definition and write to file.
This demonstrates that all nodes in the node tree and all attributes are accessible.
Additionally the state data is also accessible. This class will write state data as
comments. If the definition was returned from the server, it allows access to latest
snapshot of the state data held in the server.
"""
def __init__(self,defs,file_name):
assert (isinstance(defs,ecflow.Defs)),"Expected ecflow.Defs as first argument"
assert (isinstance(file_name,str)),"Expected a string argument. Representing a file name"
self.__defs = defs
self.__file = open(file_name, 'w')
def write_to_file(self):
for extern in self.__defs.externs:
self.__writeln("extern " + extern)
for suite in self.__defs:
self.__write("suite ")
self.__print_node(suite)
clock = suite.get_clock()
if clock:
indent = Indentor()
self.__writeln(str(clock))
del indent
self.__print_nc(suite)
self.__writeln("endsuite")
self.__file.close()
def __print_nc(self,node_container):
indent = Indentor()
for node in node_container:
if isinstance(node, ecflow.Task):
self.__write("task ")
self.__print_node(node)
self.__print_alias(node)
else:
self.__write("family ")
self.__print_node(node)
self.__print_nc(node)
self.__writeln("endfamily")
del indent
def __print_alias(self,task):
indent = Indentor()
for alias in task:
self.__write("alias ")
self.__print_node(alias)
self.__writeln("endalias")
del indent
def __print_node(self,node):
self.__file.write(node.name() + " # state:" + str(node.get_state()) + "\n")
indent = Indentor()
defStatus = node.get_defstatus()
if defStatus != ecflow.DState.queued:
self.__writeln("defstatus " + str(defStatus))
autocancel = node.get_autocancel()
if autocancel: self.__writeln(str(autocancel))
autoarchive = node.get_autoarchive()
if autoarchive: self.__writeln(str(autoarchive))
autorestore = node.get_autorestore()
if autorestore: self.__writeln(str(autorestore))
repeat = node.get_repeat()
if not repeat.empty(): self.__writeln(str(repeat) + " # value: " + str(repeat.value()))
late = node.get_late()
if late: self.__writeln(str(late) + " # is_late: " + str(late.is_late()))
complete_expr = node.get_complete()
if complete_expr:
for part_expr in complete_expr.parts:
trig = "complete "
if part_expr.and_expr(): trig = trig + "-a "
if part_expr.or_expr(): trig = trig + "-o "
self.__write(trig)
self.__file.write( part_expr.get_expression() + "\n")
trigger_expr = node.get_trigger()
if trigger_expr:
for part_expr in trigger_expr.parts:
trig = "trigger "
if part_expr.and_expr(): trig = trig + "-a "
if part_expr.or_expr(): trig = trig + "-o "
self.__write(trig)
self.__file.write( part_expr.get_expression() + "\n")
for var in node.variables: self.__writeln("edit " + var.name() + " '" + var.value() + "'")
for meter in node.meters: self.__writeln(str(meter) + " # value: " + str(meter.value()))
for event in node.events: self.__writeln(str(event) + " # value: " + str(event.value()))
for label in node.labels: self.__writeln(str(label) + " # value: " + label.new_value())
for limit in node.limits: self.__writeln(str(limit) + " # value: " + str(limit.value()))
for inlimit in node.inlimits: self.__writeln(str(inlimit))
for the_time in node.times: self.__writeln(str(the_time))
for today in node.todays: self.__writeln(str(today))
for date in node.dates: self.__writeln(str(date))
for day in node.days: self.__writeln(str(day))
for cron in node.crons: self.__writeln(str(cron))
for verify in node.verifies: self.__writeln(str(verify))
for zombie in node.zombies: self.__writeln(str(zombie))
for queue in node.queues: self.__writeln(str(queue))
for generic in node.generics: self.__writeln(str(generic))
del indent
def __write(self,the_string):
Indentor.indent(self.__file)
self.__file.write(the_string)
def __writeln(self,the_string):
Indentor.indent(self.__file)
self.__file.write(the_string + "\n") | python | 16 | 0.54759 | 101 | 41.939655 | 116 | Traverse the ecflow.Defs definition and write to file.
This demonstrates that all nodes in the node tree and all attributes are accessible.
Additionally the state data is also accessible. This class will write state data as
comments. If the definition was returned from the server, it allows access to latest
snapshot of the state data held in the server.
| class |
class AverageMeter:
"""
Stores the current value, average and sum of a variable
over `count` steps.
Taken from:
https://github.com/abhishekkrthakur/wtfml/blob/master/wtfml/utils/average_meter.py
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | python | 9 | 0.536379 | 86 | 21.769231 | 26 |
Stores the current value, average and sum of a variable
over `count` steps.
Taken from:
https://github.com/abhishekkrthakur/wtfml/blob/master/wtfml/utils/average_meter.py
| class |
class NeuralNetwork:
"""
define the NeuralNetwork class
"""
def __init__(self, nx, nodes):
"""initialize variables and methods"""
if not isinstance(nx, int):
raise TypeError('nx must be an integer')
if nx < 1:
raise ValueError('nx must be a positive integer')
if not isinstance(nodes, int):
raise TypeError('nodes must be an integer')
if nodes < 1:
raise ValueError('nodes must be a positive integer')
self.nx = nx
self.nodes = nodes
self.W1 = np.random.normal(loc=0.0, scale=1.0, size=(nodes, nx))
self.b1 = np.zeros(nodes).reshape(nodes, 1)
self.A1 = 0
self.W2 = np.random.normal(
loc=0.0, scale=1.0, size=nodes).reshape(1, nodes)
self.b2 = 0
self.A2 = 0 | python | 12 | 0.555556 | 72 | 33.916667 | 24 |
define the NeuralNetwork class
| class |
class KnowledgeUnit:
"""Definition of a knowledge unit.
Attributes:
parameters (dict): parameters for the knowledge unit.
"""
def __init__(self, **parameters):
"""
Initialize the knowledge unit.
Args:
parameters (dict): parameters for the knowledge unit.
"""
self.parameters = parameters
def to_dict(self):
"""
Knowledge unit to dictionary.
Returns:
dict: dictionary representing the knowledge unit.
"""
dict_representation = copy.deepcopy(self.parameters)
dict_representation['_id'] = self.get_id()
return dict_representation
@staticmethod
def from_dict(self, dict_representation):
"""
Create a knowledge unit from a dictionary.
Args:
dict: dictionary representing the knowledge unit.
Returns:
KnowledgeUnit: a knowledge unit.
"""
_ = dict_representation.pop('_id', None)
return KnowledgeUnit(**dict_representation)
def get_id(self, length=32):
"""
Get id by hashing the knowledge unit using the MD5
checksum of the parameters dumped in JSON.
Args:
length (int, optional): length of the id.
Defaults to 32, since it's a MD5 checksum.
Returns:
dict: dictionary representing the knowledge unit.
"""
json_string = str(self)
return hashlib.md5(json_string.encode()).hexdigest()[:length]
def __str__(self):
"""
Knowledge unit to string.
Returns:
str: string representing the knowledge unit.
"""
return json.dumps(self.parameters)
def __hash__(self):
"""Hash function for a KnowledgeUnit."""
return hash(self.get_id())
def __eq__(self, other):
"""
Test equality between KnowledgeUnits.
Args:
other (KnowledgeUnit): other knowledge unit.
Returns:
bool: true in case of equality, false otherwise.
"""
if not isinstance(other, type(self)):
raise NotImplementedError
return self.get_id() == other.get_id() | python | 13 | 0.569762 | 69 | 26.195122 | 82 | Definition of a knowledge unit.
Attributes:
parameters (dict): parameters for the knowledge unit.
| class |
class ContextClient:
"""Class to be subclassed by discord.py Client to register EventContext hooks."""
def __init__(self, *args, **kwargs):
# noinspection PyTypeChecker
EventContext.ctx.set_client(self)
super().__init__(*args, **kwargs)
def dispatch(self, event_name, *args, **kwargs):
EventContext.ctx.event_hook(event_name, *args, **kwargs)
super().dispatch(event_name, *args, **kwargs) | python | 10 | 0.641723 | 85 | 43.2 | 10 | Class to be subclassed by discord.py Client to register EventContext hooks. | class |
class command_result:
"""Class for keeping track of an item in inventory."""
returncode: int
stdout: str
stderr: str | python | 6 | 0.666667 | 58 | 25.6 | 5 | Class for keeping track of an item in inventory. | class |
class PicServ:
""" Simple webserver script to handle GET requests """
def __init__(self, addr, port, root_path):
self.root_path = root_path # './webres'
self.port = port
self.addr = addr
self.server_socket = socket.socket(family=socket.AF_INET,
type=socket.SOCK_STREAM)
# TODO: implement TLS
# SSLContext.wrap_socket()
# ssl.wrap_socket(self.server_socket, keyfile, certfile)
def start_async(self):
_thread.start_new_thread(self.start, ())
def stop(self):
self.active = False
self.server_socket.close()
def start(self):
self.server_socket.bind((self.addr, self.port))
self.server_socket.listen(5)
self.active = True
print('PicServ instance started on {0!s}:{1!s}'.format(self.addr,
self.port))
try:
while self.active:
(client_socket, address) = self.server_socket.accept()
print('client connected on port {0!s}, starting new thread'
.format(self.port))
_thread.start_new_thread(self._process_request, (client_socket,
address))
finally:
self.server_socket.close()
def _process_request(self, socket, address):
print('client connected on {0!s}:{1!s}'.format(address, self.port))
try:
# TODO: get rid of the decode - just use the plain bytes
# to prevent decoding errors
msg = self._receive_msg(socket).decode('utf-8')
print('message received:')
print(msg)
if msg[:3] == 'GET':
# send requested resource to client
# get requested resource path from msg
print('GET request received')
req_source_path = self._get_path(msg)
if (self._exists_resource(req_source_path) or
req_source_path == '/'):
print('requested resource:')
print(req_source_path)
file_bytes = self._get_res_file_bytes(req_source_path)
self._send(socket, file_bytes)
# else:
# not yet supported
finally: # be sure to close the socket
socket.close()
def _get_path(self, msg):
msg_parts = re.findall('[^ ]*', msg)
filtered_msg_parts = []
for s in msg_parts:
if len(s) > 0:
filtered_msg_parts.append(s)
if len(filtered_msg_parts) >= 3:
return filtered_msg_parts[1]
else:
print('unable to read message')
def _exists_resource(self, path):
complete_path = self.root_path + path
return os.path.isfile(complete_path)
def _get_res_file_bytes(self, path):
file_bytes = None
if path == '/':
path = '/index.html'
with open(self.root_path + path, 'rb') as f:
file_bytes = f.read()
return file_bytes
def _receive_msg(self, socket):
# recvbytes = []
# recv = [0]
# while len(recv) != 0 and recv != 0:
# TODO: check if there is a message to be received by the host...
# else the server will wait forever
recv = socket.recv(4096) # TODO: find a way to be sure that all bytes
# were received
# print(recv)
# recvbytes += recv
# print('msg completely received')
return recv
def _send(self, socket, msg_bytes):
msg_size = len(msg_bytes)
totalsent = 0
while totalsent < msg_size:
sent = socket.send(msg_bytes[totalsent:])
if sent == 0:
print('Client closed connection')
totalsent += sent
print('removed client on port {0!s}'.format(self.port)) | python | 15 | 0.50875 | 79 | 37.647619 | 105 | Simple webserver script to handle GET requests | class |
class LoadFromFile:
''' Collection of static methods for getting stuff out of files. '''
@staticmethod
def get_json_dict(filename):
''' Returns the entire JSON dict in a given file. '''
with open(filename, encoding="utf8") as infile:
return json.load(infile)
@staticmethod
def tweets_at(feed_index, filename):
"""
Loads a tweet or chain of tweets at feed_index
and returns them along with the total tweets available.
"""
next_tweets = []
feed_data = LoadFromFile.get_json_dict(filename)
feed_length = len(feed_data)
if feed_index >= feed_length:
return (None, feed_length)
next_tweets.append(feed_data[feed_index])
while feed_data[feed_index]['chain'] and feed_index < len(feed_data):
feed_index += 1
next_tweets.append(feed_data[feed_index])
return (next_tweets, feed_length)
@staticmethod
def load_last_feed_index(filename):
"""
Loads the feed index saved from a previous session if
feed stats exist
"""
stats = LoadFromFile.get_json_dict(filename)
if stats:
return stats['feed_index']
return 0 | python | 14 | 0.598558 | 77 | 32.756757 | 37 | Collection of static methods for getting stuff out of files. | class |
class Solver:
"""
Class to solve sudoku with backtracking
"""
def __init__(self, sudoku: Sudoku):
"""
:param sudoku:
"""
self.sudoku = sudoku
def solve(self, state: Sudoku, i=0, j=0):
"""
:param state: object of type Sudoku
:param i: row index
:param j: column index
:return: tuple representing a boolean value to indicate if it is solved and the solved state
"""
number_found = False
for number in range(1, 10):
if state.puzzle[i][j] == 0:
if self.sudoku.check_if_number_ok(i, j, number):
number_found = True
state.puzzle[i][j] = number
if i == 8 and j == 8:
return True, state
elif j == 8:
solvable, ret_state = self.solve(state, i+1, 0)
else:
solvable, ret_state = self.solve(state, i, j+1)
if not solvable:
number_found = False
state.puzzle[i][j] = 0
continue
else:
return True, ret_state
else:
number_found = True
if i == 8 and j == 8:
return True, state
elif j == 8:
return self.solve(state, i + 1, 0)
else:
return self.solve(state, i, j + 1)
if not number_found:
return False, state | python | 19 | 0.422098 | 100 | 31.897959 | 49 |
Class to solve sudoku with backtracking
| class |
class GraphCompositeIdAssigner:
"""
Each edge and final node is associated with a compositeid.
Each file node is associated with a group id.
target ids associated with a group id are unique.
A reset also increments the file id.
Reset points are algorithmically determined by detected pixel
changes for an on more than one path.
In the future, the algorithm should get the reset points
from the probe constuction where transforms themselves communicate
pixel changes along a path. HOWEVER, that interjects responsibility
in that transformation code. So, instead, post analysis is done here
to maintain some abstraction over efficiency.
"""
def __init__(self, graph, probes):
"""
:param graph:
:param probes:
@type graph: ImageGraph
@type probes : list of Probe
"""
self.graph = graph
self.repository = dict()
self.probe_target = dict()
for probe in probes:
self.repository[probe.edgeId] = dict()
if (probe.edgeId[0], probe.edgeId[1]) not in self.probe_target:
self.probe_target[(probe.edgeId[0], probe.edgeId[1])] = dict()
self.probe_target[(probe.edgeId[0], probe.edgeId[1])][probe.finalNodeId] = np.asarray(probe.targetMaskImage)
self.buildProbeEdgeIds(set([probe.targetBaseNodeId for probe in probes]))
def updateProbes(self, probes,builder):
for probe in probes:
idsPerFinalNode = self.repository[probe.edgeId]
idtuple = idsPerFinalNode[probe.finalNodeId]
probe.composites[builder] = {
'groupid': idtuple[0],
'bit number': idtuple[1]
}
return probes
def __recurseDFSLavelResetPoints(self, nodename, probe_resets):
"""
Determine reset points. A reset point the first node from a final where
two final node masks diverge for the same edge.
:param nodename:
:param probe_masks: dictionary edgeId -> mask array of the last mask produced by the image
:return: paths from final node up to the current provided node
@type nodename: str
@type probe_masks: dict
"""
successors = self.graph.successors(nodename)
if successors is None or len(successors) == 0:
return [[nodename]]
finalPaths = list()
for successor in self.graph.successors(nodename):
edge = self.graph.get_edge(nodename, successor)
if edge['op'] == 'Donor':
continue
edgeId = (nodename, successor)
childFinalPaths = self.__recurseDFSLavelResetPoints(successor, probe_resets)
last_array = None
last_path = None
for path in childFinalPaths:
current_path = path + [nodename]
finalPaths.append(current_path)
if edgeId in self.probe_target:
imarray = self.probe_target[edgeId][path[0]]
if last_array is not None and (last_array.shape != imarray.shape or sum(sum(abs(last_array - imarray))) != 0):
probe_resets.add([i for i in current_path if i in last_path][0])
last_array = imarray
last_path= current_path
return finalPaths
def __incementGroup(self,group, group_counters, local_counters):
"""
Managed target id counters per each group.
Increment the targetid if target it is not already associated with the given group,
thus inforcing that a target id used one per each group.
:param group:
:param group_counters: group associated with IntObject counter
:param local_counters: group associated last target id
:return:
@type group: int
@type group_counters: dict int:IntObject
@type local_counters: dict int:int
"""
if group in local_counters:
return local_counters[group]
if group not in group_counters:
group_counters[group] = IntObject()
local_counters[group] = group_counters[group].increment()
return local_counters[group]
def __recurseDFSProbeEdgeIds(self, nodename, group_counters,groupid, probe_resets):
"""
Each edge and final node is associated with a target id and a group id.
target ids associated with a group id are unique.
group ids ids reset if the current node participates in a reset
:param nodename:
:param group_counters: association of gruoup ids to target id counters
:param groupid: holds the current id value for group id
:param probe_resets: set of reset nodes
:return: list of (final node name, group id)
@type nodename: str
@type group_counters: dict of int:IntObject
@type groupid: IntObject
@type probe_resets: set of str
@retypr list of (str,int)
"""
successors = self.graph.successors(nodename)
if successors is None or len(successors) == 0:
return [(nodename, groupid.value)]
finalNodes = set()
qualifies = nodename in probe_resets
for successor in self.graph.successors(nodename):
local_counters = {}
edge = self.graph.get_edge(nodename, successor)
if edge['op'] == 'Donor':
continue
if qualifies:
groupid.increment()
childFinalNodes = self.__recurseDFSProbeEdgeIds(successor, group_counters,groupid, probe_resets)
for finalNodeNameTuple in childFinalNodes:
if (nodename, successor) in self.repository:
self.repository[(nodename, successor)][finalNodeNameTuple[0]] = \
(finalNodeNameTuple[1],
self.__incementGroup(finalNodeNameTuple[1],group_counters,local_counters))
finalNodes.add(finalNodeNameTuple)
return finalNodes
def buildProbeEdgeIds(self, baseNodes):
fileid = IntObject()
for node_name in self.graph.get_nodes():
node = self.graph.get_node(node_name)
if node['nodetype'] == 'base' or node_name in baseNodes:
reset_points = set()
group_counters = {}
self.__recurseDFSLavelResetPoints(node_name, reset_points)
self.__recurseDFSProbeEdgeIds(node_name, group_counters,fileid,reset_points)
fileid.increment() | python | 23 | 0.610152 | 130 | 44.840278 | 144 |
Each edge and final node is associated with a compositeid.
Each file node is associated with a group id.
target ids associated with a group id are unique.
A reset also increments the file id.
Reset points are algorithmically determined by detected pixel
changes for an on more than one path.
In the future, the algorithm should get the reset points
from the probe constuction where transforms themselves communicate
pixel changes along a path. HOWEVER, that interjects responsibility
in that transformation code. So, instead, post analysis is done here
to maintain some abstraction over efficiency.
| class |
class LolGamePlayerSnapshotChampionStats:
"""
Champion stats at a given snapshot for a player
"""
abilityHaste: int = None
abilityPower: int = None
armor: int = None
armorPen: int = None
armorPenPercent: int = None
attackDamage: int = None
attackSpeed: int = None
bonusArmorPenPercent: int = None
bonusMagicPenPercent: int = None
ccReduction: int = None
cooldownReduction: int = None
health: int = None
healthMax: int = None
healthRegen: int = None
lifesteal: int = None
magicPen: int = None
magicPenPercent: int = None
magicResist: int = None
movementSpeed: int = None
omnivamp: int = None
physicalVamp: int = None
power: int = None
powerMax: int = None
powerRegen: int = None
spellVamp: int = None | python | 6 | 0.652709 | 51 | 26.1 | 30 |
Champion stats at a given snapshot for a player
| class |
class LolGamePlayerSnapshotDamageStats:
"""
Damage stats at a given snapshot for a player
"""
magicDamageDone: int = None
magicDamageDoneToChampions: int = None
magicDamageTaken: int = None
physicalDamageDone: int = None
physicalDamageDoneToChampions: int = None
physicalDamageTaken: int = None
totalDamageDone: int = None
totalDamageDoneToChampions: int = None
totalDamageTaken: int = None
trueDamageDone: int = None
trueDamageDoneToChampions: int = None
trueDamageTaken: int = None | python | 6 | 0.720588 | 49 | 31.058824 | 17 |
Damage stats at a given snapshot for a player
| class |
class LolGamePlayerSnapshot:
"""
Information about a player at a specific point in the game
Riot's API gives this information with a 1 minute granularity in its MatchTimeline object
"""
timestamp: float # Timestamp of the event expressed in seconds from the game start, with possible ms precision
# Player position, None for the last "snapshot" in Riot's API
position: Position = None
currentGold: int = None # Current gold (at the time of the snapshot)
totalGold: int = None # Total gold earned
xp: int = None # Current experience
level: int = None # Current champion level
cs: int = None # Total number of minions and monsters killed
monstersKilled: int = None # Total monsters (neutral minions) killed
# Whether or not the player is alive at the time of the snapshot
isAlive: bool = None
# Whether or not a summoner spell is available or not
spell1Available: bool = None
spell2Available: bool = None
# Ultimate availability
ultimateAvailable: bool = None
# Absolutely no clue what this is supposed to be, match-v5 field
timeEnemySpentControlled: int = None
# New snapshot fields from match-v5
championStats: LolGamePlayerSnapshotChampionStats = field(
default_factory=LolGamePlayerSnapshotChampionStats
)
damageStats: LolGamePlayerSnapshotDamageStats = field(
default_factory=LolGamePlayerSnapshotDamageStats
) | python | 8 | 0.718407 | 115 | 33.690476 | 42 |
Information about a player at a specific point in the game
Riot's API gives this information with a 1 minute granularity in its MatchTimeline object
| class |
class LolGamePlayerEndOfGameStats:
"""End of game stats for a player in a game"""
# As first blood is player-specific, this does not appear in Team objects.
firstBlood: bool = None # True if the player performed the first blood
firstBloodAssist: bool = None # True if the player assisted the first blood kill
# True if the player dealt the last hit to the first turret kill
firstTurret: bool = None
firstTurretAssist: bool = None # True if the player assisted the first turret kill
# True if the player dealt the last hit to the first inhibitor kill
firstInhibitor: bool = None
# True if the player assisted in the first inhibitor kill
firstInhibitorAssist: bool = None
# TODO Add a proper description for every field
# All statistics here refer to end of game stats, so we do not preface them by anything.
kills: int = None
deaths: int = None
assists: int = None
gold: int = None
cs: int = None
level: int = None
# Warding-related statistics
wardsPlaced: int = None
wardsKilled: int = None
visionWardsBought: int = None
visionScore: int = None
# Kills-related statistics
killingSprees: int = None # Number of a time a player has initiated a killing spree (2 or more consecutive kills)
# Largest consecutive kills, above 0 only if it reached at least 2
largestKillingSpree: int = None
doubleKills: int = None
tripleKills: int = None
quadraKills: int = None
pentaKills: int = None
turretKills: int = None
inhibitorKills: int = None
# Using modern Riot nomenclature of monsters for "neutral minions"
monsterKills: int = None
monsterKillsInAlliedJungle: int = None
monsterKillsInEnemyJungle: int = None
# Damage-related statistics
# Total true damage dealt can be calculated by subtracting physical and magic damage to the total
totalDamageDealt: int = None # Includes damage to minions and monsters
physicalDamageDealt: int = None
magicDamageDealt: int = None
# Total true damage dealt to champions can be calculated by subtracting physical and magic damage to the total
totalDamageDealtToChampions: int = None
physicalDamageDealtToChampions: int = None
magicDamageDealtToChampions: int = None
# Total true damage taken can be calculated by subtracting physical and magic damage to the total
totalDamageTaken: int = None
physicalDamageTaken: int = None
magicDamageTaken: int = None
# Other damage statistics
damageDealtToObjectives: int = None
damageDealtToBuildings: int = None
damageDealtToTurrets: int = None
# Spell uses statistics, accessible in match-v5
# I hate the format, but am not sure where to put it otherwise where it would make sense
spell1Casts: int = None
spell2Casts: int = None
spell3Casts: int = None
spell4Casts: int = None
# Really random statistics
longestTimeSpentLiving: int = None # Expressed in seconds
largestCriticalStrike: int = None # Full raw damage of the largest critical strike
goldSpent: int = None # Can be useful to try and identify AFK players?
# The following fields need to have their behaviour properly explained as part of the specification
totalHeal: int = None
totalDamageShieldedOnTeammates: int = None
totalUnitsHealed: int = None
damageSelfMitigated: int = None
totalTimeCCDealt: int = None
timeCCingOthers: int = None
# New match-v5 end of game stats
xp: int = None
bountyLevel: int = None
baronKills: int = None
championTransform: int = None
consumablesPurchased: int = None
detectorWardsPlaced: int = None
dragonKills: int = None
inhibitorTakedowns: int = None
itemsPurchased: int = None
nexusKills: int = None
nexusTakedowns: int = None
objectivesStolen: int = None
objectivesStolenAssists: int = None
sightWardsBoughtInGame: int = None
totalHealsOnTeammates: int = None
totalTimeSpentDead: int = None
turretTakedowns: int = None
# Items are simply a list with the 'slot' field defining which item slot they occupied.
# The list cannot be simply indexed on this 'slot' as many players have empty slots at the end of games.
# List of end of game items
items: List[LolGamePlayerItem] = field(default_factory=list) | python | 9 | 0.714515 | 118 | 37.324561 | 114 | End of game stats for a player in a game | class |
class Teacher_Knowledge:
"""Tracker Based Class
"""
def __init__(self):
self.data = None
def childanalyzer(self):
"""
TODO: Some kind of data Analyzer
"""
pass
def get_features(self):
"""
Returns:
Name of Features
"""
return self.data.columns
def check_feature_length(self):
"""
Returns:
Length of Features
"""
return len(self.data.columns) | python | 10 | 0.490909 | 40 | 18.84 | 25 | Tracker Based Class
| class |
class ParallelGroupBy:
"""Fast parallel group by"""
def __init__(self, keys: torch.Tensor):
n = keys.shape[0]
# sort by key (keep key in GPU device)
relative_key = keys + torch.linspace(0, 0.9, n, dtype=torch.double, device=keys.device)
sorted_keys, sorted_indices = torch.sort(relative_key)
sorted_keys, sorted_indices = sorted_keys.int(), sorted_indices.cpu()
# get group boundary
diff = sorted_keys[1:] - sorted_keys[:-1]
boundary = (diff.nonzero(as_tuple=True)[0] + 1).tolist()
boundary = np.array([0] + boundary + [n])
# get inverse indices
width = np.diff(boundary).max()
groups = len(boundary) - 1
inverse_indices = sorted_indices.new_full((groups, width), n + 1)
for start, end, i in zip(boundary[:-1], boundary[1:], range(groups)):
inverse_indices[i, 0:(end - start)] = sorted_indices[start:end]
# keep inverse_indices in GPU for sort
inverse_indices = inverse_indices.flatten().to(keys.device, non_blocking=True)
inverse_indices = torch.sort(inverse_indices)[1][:n]
# for fast split
take_indices = sorted_indices.new_full((groups, width), -1)
for start, end, i in zip(boundary[:-1], boundary[1:], range(groups)):
take_indices[i, 0:(end - start)] = sorted_indices[start:end]
take_indices = take_indices.to(keys.device, non_blocking=True)
# class members
self._boundary = boundary
self._sorted_indices = take_indices
self._padding_mask = take_indices == -1
self._inverse_indices = inverse_indices
self._width = width
self._groups = groups
self._data_shape = (groups, width)
def split(self, data: torch.Tensor) -> torch.Tensor:
ret = torch.take(data, self._sorted_indices)
assert ret.dtype not in {torch.int8, torch.int16, torch.int32, torch.int64}, \
'tensor cannot be any type of int, recommended to use float32'
ret.masked_fill_(self._padding_mask, np.nan)
return ret
def revert(self, split_data: torch.Tensor, dbg_str='None') -> torch.Tensor:
if tuple(split_data.shape) != self._data_shape:
if tuple(split_data.shape[:2]) == self._data_shape[:2]:
raise ValueError('The downstream needs shape{2}, and the input factor "{1}" is '
'shape{0}. Look like this factor has multiple return values, '
'use slice to select a value before using it, for example: '
'`factor[0]`.'
.format(tuple(split_data.shape), dbg_str, self._data_shape))
else:
raise ValueError('The return data shape{} of Factor `{}` must same as input{}.'
.format(tuple(split_data.shape), dbg_str, self._data_shape))
return torch.take(split_data, self._inverse_indices)
def create(self, dtype, values, nan_fill=np.nan):
ret = self._sorted_indices.new_full(self._sorted_indices.shape, values, dtype=dtype)
ret.masked_fill_(self._padding_mask, nan_fill)
return ret | python | 18 | 0.589241 | 96 | 52.616667 | 60 | Fast parallel group by | class |
class SqlExecutor:
"""
This is the executor for the SQL query.
"""
def __init__(self):
self.parser = None
self.config = DbestConfig() # model-related configuration
self.runtime_config = RUNTIME_CONF
self.last_config = None
self.model_catalog = DBEstModelCatalog()
self.init_slaves()
self.init_model_catalog()
self.save_sample = False
# self.table_header = None
self.n_total_records = None
self.use_kde = True
def init_model_catalog(self):
# search the warehouse, and add all available models.
n_model = 0
t1 = datetime.now()
for file_name in os.listdir(self.config.get_config()['warehousedir']):
# load simple models
if file_name.endswith(self.runtime_config["model_suffix"]):
if n_model == 0:
print("start loading pre-existing models.")
with open(self.config.get_config()['warehousedir'] + "/" + file_name, 'rb') as f:
model = dill.load(f)
self.model_catalog.model_catalog[model.init_pickle_file_name(
self.runtime_config)] = model
n_model += 1
# # load group by models
# if os.path.isdir(self.config.get_config()['warehousedir'] + "/" + file_name):
# n_models_in_groupby = 0
# if n_model == 0:
# print("start loading pre-existing models.")
# for model_name in os.listdir(self.config.get_config()['warehousedir'] + "/" + file_name):
# if model_name.endswith(self.runtime_config["model_suffix"]):
# with open(self.config.get_config()['warehousedir'] + "/" + file_name + "/" + model_name, 'rb') as f:
# model = dill.load(f)
# n_models_in_groupby += 1
# if n_models_in_groupby == 1:
# groupby_model_wrapper = GroupByModelWrapper(model.mdl, model.tbl, model.x, model.y,
# model.groupby_attribute,
# x_min_value=model.x_min_value,
# x_max_value=model.x_max_value)
# groupby_model_wrapper.add_simple_model(model)
# self.model_catalog.model_catalog[file_name] = groupby_model_wrapper.models
# n_model += 1
if n_model > 0:
print("Loaded " + str(n_model) + " models.", end=" ")
if self.runtime_config["b_show_latency"]:
t2 = datetime.now()
print("time cost ", (t2-t1).total_seconds(), "s")
else:
print()
def init_slaves(self):
file_name = os.path.join(self.config.config["warehousedir"], "slaves")
if os.path.exists(file_name) and os.path.getsize(file_name) > 0:
with open(file_name, "r") as f:
for line in f:
if "#" not in line:
self.runtime_config["slaves"].add(Slave(line))
if self.runtime_config['v']:
print("Cluster mode is on, slaves are " +
self.runtime_config["slaves"].to_string())
else:
if self.runtime_config['v']:
print("Local mode is on, as no slaves are provided.")
def execute(self, sql):
# b_use_gg=False, n_per_gg=10, result2file=None,n_mdn_layer_node = 10, encoding = "onehot",n_jobs = 4, b_grid_search = True,device = "cpu", n_division = 20
# prepare the parser
if type(sql) == str:
self.parser = DBEstParser()
self.parser.parse(sql)
elif type(sql) == DBEstParser:
self.parser = sql
else:
print("Unrecognized SQL! Please check it!")
exit(-1)
# execute the query
if self.parser.if_nested_query():
warnings.warn("Nested query is currently not supported!")
else:
sql_type = self.parser.get_query_type()
if sql_type == "create": # process create query
# initialize the configure for each model creation.
if self.last_config:
self.config = self.last_config
else:
self.config = DbestConfig()
# DDL, create the model as requested
mdl = self.parser.get_ddl_model_name()
tbl = self.parser.get_from_name()
if self.parser.if_model_need_filter():
self.config.set_parameter("accept_filter", True)
# remove unnecessary charactor '
tbl = tbl.replace("'", "")
if os.path.isfile(tbl): # the absolute path is provided
original_data_file = tbl
else: # the file is in the warehouse direcotry
original_data_file = self.config.get_config()[
'warehousedir'] + "/" + tbl
yheader = self.parser.get_y()
xheader_continous, xheader_categorical = self.parser.get_x()
ratio = self.parser.get_sampling_ratio()
method = self.parser.get_sampling_method()
table_header = self.config.get_config()['table_header']
# print("table_header", table_header)
if table_header is not None:
table_header = table_header.split(
self.config.get_config()['csv_split_char'])
# make samples
if not self.parser.if_contain_groupby(): # if group by is not involved
sampler = DBEstSampling(
headers=table_header, usecols={"y": yheader, "x_continous": xheader_continous, "x_categorical": xheader_categorical, "gb": None})
else:
groupby_attribute = self.parser.get_groupby_value()
sampler = DBEstSampling(headers=table_header, usecols={
"y": yheader, "x_continous": xheader_continous, "x_categorical": xheader_categorical, "gb": groupby_attribute})
# print(self.config)
if os.path.exists(os.path.join(self.config.get_config()['warehousedir'], mdl + self.runtime_config["model_suffix"])):
print(
"Model {0} exists in the warehouse, please use"
" another model name to train it.".format(mdl))
return
# if self.parser.if_contain_groupby():
# groupby_attribute = self.parser.get_groupby_value()
# if os.path.exists(self.config['warehousedir'] + "/" + mdl + "_groupby_" + groupby_attribute):
# print(
# "Model {0} exists in the warehouse, please use"
# " another model name to train it.".format(mdl))
# return
print("Start creating model " + mdl)
time1 = datetime.now()
if self.save_sample:
sampler.make_sample(
original_data_file, ratio, method, split_char=self.config.get_config()[
'csv_split_char'],
file2save=self.config.get_config()['warehousedir'] +
"/" + mdl + '.csv',
num_total_records=self.n_total_records)
else:
sampler.make_sample(
original_data_file, ratio, method, split_char=self.config.get_config()[
'csv_split_char'],
num_total_records=self.n_total_records)
# set the n_total_point and scaling factor for each model.
# self.config.set_parameter(
# "n_total_point", sampler.n_total_point)
# self.config.set_parameter(
# "scaling_factor", sampler.scaling_factor)
# print("scaling_factor is ", sampler.scaling_factor)
if not self.parser.if_contain_groupby(): # if group by is not involved
# n_total_point = sampler.n_total_point
# xys = sampler.getyx(yheader, xheader_continous)
# simple_model_wrapper = SimpleModelTrainer(mdl, tbl, xheader_continous, yheader,
# n_total_point, ratio, config=self.config.copy()).fit_from_df(
# xys, self.runtime_config)
# reg = simple_model_wrapper.reg
# density = simple_model_wrapper.density
# n_sample_point = int(simple_model_wrapper.n_sample_point)
# n_total_point = int(simple_model_wrapper.n_total_point)
# x_min_value = float(simple_model_wrapper.x_min_value)
# x_max_value = float(simple_model_wrapper.x_max_value)
# query_engine = QueryEngine(mdl, reg, density, n_sample_point,
# n_total_point, x_min_value, x_max_value, xheader_continous[
# 0],
# self.config)
sampler.sample.sampledf["dummy_gb"] = "dummy"
sampler.sample.usecols = {"y": yheader, "x_continous": xheader_continous,
"x_categorical": xheader_categorical, "gb": "dummy_gb"}
n_total_point, xys = sampler.get_groupby_frequency_data()
# if not n_total_point['if_contain_x_categorical']:
n_total_point.pop("if_contain_x_categorical")
kdeModelWrapper = KdeModelTrainer(
mdl, tbl, xheader_continous[0], yheader,
groupby_attribute=["dummy_gb"],
groupby_values=list(
n_total_point.keys()),
n_total_point=n_total_point,
x_min_value=-np.inf, x_max_value=np.inf,
config=self.config.copy()).fit_from_df(
xys["data"], self.runtime_config, network_size="large")
qe_mdn = MdnQueryEngine(
kdeModelWrapper, config=self.config.copy())
qe_mdn.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
self.model_catalog.add_model_wrapper(
qe_mdn, self.runtime_config)
else: # if group by is involved in the query
if self.config.get_config()['reg_type'] == "qreg":
xys = sampler.getyx(yheader, xheader_continous)
n_total_point = get_group_count_from_table(
original_data_file, groupby_attribute, sep=self.config.get_config()[
'csv_split_char'],
headers=table_header)
n_sample_point = get_group_count_from_df(
xys, groupby_attribute)
groupby_model_wrapper = GroupByModelTrainer(mdl, tbl, xheader_continous, yheader, groupby_attribute,
n_total_point, n_sample_point,
x_min_value=-np.inf, x_max_value=np.inf,
config=self.config.copy()).fit_from_df(
xys, self.runtime_config)
groupby_model_wrapper.serialize2warehouse(
self.config.get_config()['warehousedir'] + "/" + groupby_model_wrapper.dir)
self.model_catalog.model_catalog[groupby_model_wrapper.dir] = groupby_model_wrapper.models
else: # "mdn"
xys = sampler.getyx(
yheader, xheader_continous, groupby=groupby_attribute)
# xys[groupby_attribute] = pd.to_numeric(xys[groupby_attribute], errors='coerce')
# xys=xys.dropna(subset=[yheader, xheader,groupby_attribute])
# n_total_point = get_group_count_from_table(
# original_data_file, groupby_attribute, sep=',',#self.config['csv_split_char'],
# headers=self.table_header)
if isinstance(ratio, str):
frequency_file = self.config.get_config()[
'warehousedir'] + "/" + ratio
# "/num_of_points.csv"
if os.path.exists(frequency_file):
n_total_point = get_group_count_from_summary_file(
frequency_file, sep=',')
n_total_point_sample, xys = sampler.get_groupby_frequency_data()
n_total_point["if_contain_x_categorical"] = n_total_point_sample["if_contain_x_categorical"]
else:
raise FileNotFoundError(
"scaling factor should come from the " +
ratio + " in the warehouse folder, as"
" stated in the SQL. However, the file is not found.")
else:
n_total_point, xys = sampler.get_groupby_frequency_data()
# print(n_total_point)
# for cases when the data file is treated as a sample, we need to scale up the frequency for each group.
if ratio > 1:
file_size = sampler.n_total_point
ratio = float(ratio)/file_size
# if 0 < ratio < 1:
scaled_n_total_point = {}
if "if_contain_x_categorical" in n_total_point:
scaled_n_total_point["if_contain_x_categorical"] = n_total_point.pop(
"if_contain_x_categorical")
if "categorical_distinct_values" in n_total_point:
scaled_n_total_point["categorical_distinct_values"] = n_total_point.pop(
"categorical_distinct_values")
if "x_categorical_columns" in n_total_point:
scaled_n_total_point["x_categorical_columns"] = n_total_point.pop(
"x_categorical_columns")
for key in n_total_point:
# print("key", key, n_total_point[key])
if not isinstance(n_total_point[key], dict):
scaled_n_total_point[key] = n_total_point[key]/ratio
else:
scaled_n_total_point[key] = {}
for sub_key in n_total_point[key]:
scaled_n_total_point[key][sub_key] = n_total_point[key][sub_key]/ratio
n_total_point = scaled_n_total_point
# print("scaled_n_total_point", scaled_n_total_point)
# no categorical x attributes
if not n_total_point['if_contain_x_categorical']:
if not self.config.get_config()["b_use_gg"]:
n_total_point.pop(
"if_contain_x_categorical")
# xys.pop("if_contain_x_categorical")
kdeModelWrapper = KdeModelTrainer(
mdl, tbl, xheader_continous[0], yheader,
groupby_attribute=groupby_attribute,
groupby_values=list(
n_total_point.keys()),
n_total_point=n_total_point,
x_min_value=-np.inf, x_max_value=np.inf,
config=self.config.copy()).fit_from_df(
xys["data"], self.runtime_config, network_size=None)
qe_mdn = MdnQueryEngine(
kdeModelWrapper, config=self.config.copy())
qe_mdn.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
# kdeModelWrapper.serialize2warehouse()
self.model_catalog.add_model_wrapper(
qe_mdn, self.runtime_config)
else:
# print("n_total_point ", n_total_point)
queryEngineBundle = MdnQueryEngineGoGs(
config=self.config.copy()).fit(xys["data"], groupby_attribute,
n_total_point, mdl, tbl,
xheader_continous[0], yheader,
self.runtime_config) # n_per_group=n_per_gg,n_mdn_layer_node = n_mdn_layer_node,encoding = encoding,b_grid_search = b_grid_search
self.model_catalog.add_model_wrapper(
queryEngineBundle, self.runtime_config)
queryEngineBundle.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
else: # x has categorical attributes
# if not self.config.get_config()["b_use_gg"]:
# use a single model to support categorical conditions.
if self.config.config["one_model"]:
qe = MdnQueryEngineXCategoricalOneModel(
self.config.copy())
usecols = {
"y": yheader, "x_continous": xheader_continous,
"x_categorical": xheader_categorical, "gb": groupby_attribute}
useCols = UseCols(usecols)
# get the training data from samples.
gbs, xs, ys = useCols.get_gb_x_y_cols_for_one_model()
gbs_data, xs_data, ys_data = sampler.sample.get_columns_from_original_sample(
gbs, xs, ys)
n_total_point = sampler.sample.get_frequency_of_categorical_columns_for_gbs(
groupby_attribute, xheader_categorical)
# print("n_total_point-----------before",
# n_total_point)
# print("ratio is ", ratio)
scaled_n_total_point = {}
for key in n_total_point:
scaled_n_total_point[key] = {}
for sub_key in n_total_point[key]:
scaled_n_total_point[key][sub_key] = n_total_point[key][sub_key]/ratio
n_total_point = scaled_n_total_point
# print("n_total_point-----------after",
# n_total_point)
# raise
qe.fit(mdl, tbl, gbs_data, xs_data, ys_data, n_total_point, usecols=usecols,
runtime_config=self.runtime_config)
else:
qe = MdnQueryEngineXCategorical(
self.config.copy())
qe.fit(mdl, tbl, xys, n_total_point, usecols={
"y": yheader, "x_continous": xheader_continous,
"x_categorical": xheader_categorical, "gb": groupby_attribute}, runtime_config=self.runtime_config
) # device=device, encoding=encoding, b_grid_search=b_grid_search
qe.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
self.model_catalog.add_model_wrapper(
qe, self.runtime_config)
# else:
# raise ValueError(
# "GoG support for categorical attributes is not supported.")
qe.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
self.model_catalog.add_model_wrapper(
qe, self.runtime_config)
time2 = datetime.now()
t = (time2 - time1).seconds
if self.runtime_config['b_show_latency']:
print("time cost: " + str(t) + "s.")
print("------------------------")
# rest config
self.last_config = None
return
elif sql_type == "select": # process SELECT query
start_time = datetime.now()
predictions = None
# DML, provide the prediction using models
mdl = self.parser.get_from_name()
gb_to_print, [
func, yheader, distinct_condition] = self.parser.get_dml_aggregate_function_and_variable()
if self.parser.if_where_exists():
print("OK")
where_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# xheader, x_lb, x_ub = self.parser.get_dml_where_categorical_equal_and_range()
model = self.model_catalog.model_catalog[mdl +
self.runtime_config["model_suffix"]]
x_header_density = model.density_column
# print("where_conditions", where_conditions)
[x_lb, x_ub] = [where_conditions[2][x_header_density][i]
for i in [0, 1]]
filter_dbest = dict(where_conditions[2])
filter_dbest = [filter_dbest[next(iter(filter_dbest))][i]
for i in [0, 1]]
# print("where_conditions",where_conditions)
# print("filter_dbest",filter_dbest)
predictions = model.predicts(func, x_lb, x_ub, where_conditions,
self.runtime_config, groups=None, filter_dbest=filter_dbest)
# predictions = model.predict_one_pass(
# func, x_lb, x_ub, n_jobs=n_jobs)
elif func == "var":
print("var!!")
model = self.model_catalog.model_catalog[mdl +
self.runtime_config["model_suffix"]]
x_header_density = model.density_column
# print(x_header_density)
predictions = model.predicts("var",runtime_config=self.runtime_config)
# return predictions
else:
print(
"support for query without where clause is not implemented yet! abort!")
return
# if not self.parser.if_contain_groupby(): # if group by is not involved in the query
# simple_model_wrapper = self.model_catalog.model_catalog[get_pickle_file_name(
# mdl)]
# reg = simple_model_wrapper.reg
# density = simple_model_wrapper.density
# n_sample_point = int(simple_model_wrapper.n_sample_point)
# n_total_point = int(simple_model_wrapper.n_total_point)
# x_min_value = float(simple_model_wrapper.x_min_value)
# x_max_value = float(simple_model_wrapper.x_max_value)
# query_engine = QueryEngine(reg, density, n_sample_point,
# n_total_point, x_min_value, x_max_value,
# self.config)
# p, t = query_engine.predict(func, x_lb=x_lb, x_ub=x_ub)
# print("OK")
# print(p)
# if self.config.get_config()['verbose']:
# print("time cost: " + str(t))
# print("------------------------")
# return p, t
# else: # if group by is involved in the query
# if self.config.get_config()['reg_type'] == "qreg":
# start = datetime.now()
# predictions = {}
# groupby_attribute = self.parser.get_groupby_value()
# groupby_key = mdl + "_groupby_" + groupby_attribute
# for group_value, model_wrapper in self.model_catalog.model_catalog[groupby_key].items():
# reg = model_wrapper.reg
# density = model_wrapper.density
# n_sample_point = int(model_wrapper.n_sample_point)
# n_total_point = int(model_wrapper.n_total_point)
# x_min_value = float(model_wrapper.x_min_value)
# x_max_value = float(model_wrapper.x_max_value)
# query_engine = QueryEngine(reg, density, n_sample_point, n_total_point, x_min_value,
# x_max_value,
# self.config)
# predictions[model_wrapper.groupby_value] = query_engine.predict(
# func, x_lb=x_lb, x_ub=x_ub)[0]
# print("OK")
# for key, item in predictions.items():
# print(key, item)
# else: # use mdn models to give the predictions.
# start = datetime.now()
# # predictions = {}
# groupby_attribute = self.parser.get_groupby_value()
# # no categorical x attributes
# # x_categorical_attributes, x_categorical_values, x_categorical_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# x_categorical_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# # no x categrical attributes, use a single model to predict.
# if not x_categorical_conditions[0]:
# if not self.config.get_config()["b_use_gg"]:
# # qe_mdn = MdnQueryEngine(self.model_catalog.model_catalog[mdl + ".pkl"],
# # self.config)
# where_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# # xheader, x_lb, x_ub = self.parser.get_dml_where_categorical_equal_and_range()
# qe_mdn = self.model_catalog.model_catalog[mdl + ".pkl"]
# x_header_density = qe_mdn.density_column
# [x_lb, x_ub] = [where_conditions[2][x_header_density][i]
# for i in [0, 1]]
# print("OK")
# predictions = qe_mdn.predict_one_pass(func, x_lb=x_lb, x_ub=x_ub,
# n_jobs=n_jobs, ) # result2file=result2file,n_division=n_division
# else:
# qe_mdn = self.model_catalog.model_catalog[mdl + ".pkl"]
# # qe_mdn = MdnQueryEngine(qe_mdn, self.config)
# print("OK")
# predictions = qe_mdn.predicts(func, x_lb=x_lb, x_ub=x_ub,
# n_jobs=n_jobs, )
# else:
# pass
# # print("OK")
# # if not self.config.get_config()["b_use_gg"]:
# # # print("x_categorical_values",
# # # x_categorical_values)
# # # print(",".join(x_categorical_values))
# # filter_dbest = self.parser.get_filter()
# # self.model_catalog.model_catalog[mdl + '.pkl'].predicts(
# # func, x_lb, x_ub, x_categorical_conditions, n_jobs=1, filter_dbest=filter_dbest) # ",".join(x_categorical_values)
# # else:
# # pass
if self.runtime_config['b_show_latency']:
end_time = datetime.now()
time_cost = (end_time - start_time).total_seconds()
print("Time cost: %.4fs." % time_cost)
print("------------------------")
return predictions
elif sql_type == "set": # process SET query
if self.last_config:
self.config = self.last_config
else:
self.config = DbestConfig()
try:
key, value = self.parser.get_set_variable_value()
if key in self.config.get_config():
# check variable value before assignment
if key.lower() == "encoder":
value = value.lower()
if value not in ["onehot", "binary", "embedding"]:
value = "binary"
print(
"encoder is not set to a proper value, use default encoding type: binary.")
self.config.get_config()[key] = value
print("OK, " + key + " is updated.")
else: # if variable is within runtime_config
# check if "device" is set. we need to make usre when GPU is not availabe, cpu is used instead.
if key.lower() == "device":
value = value.lower()
if value in ["cpu", "gpu"]:
if torch.cuda.is_available():
if value == "gpu":
value = "cuda:0"
try:
set_start_method_torch('spawn')
except RuntimeError:
print("Fail to set start method as spawn for pytorch multiprocessing, " +
"use default in advance. (see queryenginemdn "
"for more info.)")
else:
set_start_method_cpu("spawn")
if self.runtime_config["v"]:
print("device is set to " + value)
else:
if value == "gpu":
print(
"GPU is not available, use CPU instead")
value = "cpu"
if value == "cpu":
if self.runtime_config["v"]:
print("device is set to " + value)
else:
print("Only GPU or CPU is supported.")
return
self.runtime_config[key] = value
if key in self.runtime_config:
print("OK, " + key + " is updated.")
else:
print("OK, local variable "+key+" is defined.")
except TypeError:
# self.parser.get_set_variable_value() does not return correctly
print("Parameter is not changed. Please check your SQL!")
# save the config
self.last_config = self.config
return
elif sql_type == "drop": # process DROP query
model_name = self.parser.drop_get_model()
model_path = os.path.join(self.config.get_config(
)["warehousedir"], model_name+self.runtime_config["model_suffix"])
if os.path.isfile(model_path):
os.remove(model_path)
print("OK. model is dropped.")
return True
else:
print("Model does not exist!")
return False
elif sql_type == "show":
print("OK")
t_start = datetime.now()
if self.runtime_config['b_print_to_screen']:
for key in self.model_catalog.model_catalog:
print(key.replace(
self.runtime_config["model_suffix"], ''))
if self.runtime_config["v"]:
t_end = datetime.now()
time_cost = (t_end - t_start).total_seconds()
print("Time cost: %.4fs." % time_cost)
else:
print("Unsupported query type, please check your SQL.")
return
def set_table_counts(self, dic):
self.n_total_records = dic | python | 33 | 0.436209 | 197 | 56.791531 | 614 |
This is the executor for the SQL query.
| class |
class FakeReport:
"""Fake report to return some dummy data."""
def __init__(self, data):
"""Does nothing."""
pass
def run(self):
"""Run some dummy data."""
d = OrderedDict()
d['keya'] = 'value1'
d['keyb'] = 'value2'
return [d]
def columns(self):
"""Return dummy data columns."""
return ['keya', 'keyb'] | python | 9 | 0.491094 | 48 | 22.176471 | 17 | Fake report to return some dummy data. | class |
class AnsibleTestHost:
"""Represents the result of an Ansible task when it got run on a host."""
name: str
"""Name of the host."""
action: str
"""Description of the task that was performed on the host."""
result: AnsibleTestStatus = AnsibleTestStatus.UNKNOWN
"""Result of the task."""
msg: Optional[str] = None
"""Additional information on the task result.""" | python | 9 | 0.665823 | 77 | 38.6 | 10 | Represents the result of an Ansible task when it got run on a host. | class |
class AnsibleTest:
"""Represents an Ansible task that got run as part of a build."""
phase: str
"""Build phase when the task got executed. For example: 'Pre' or 'Run'."""
name: str
"""Name of the task."""
duration: float = 0
"""Time, in seconds, the task took to complete."""
url: Optional[str] = None
"""Page where to find more info of the task's execution."""
hosts: List[AnsibleTestHost] = field(default_factory=lambda: [])
"""Contains the results of the execution of the task on many hosts.""" | python | 10 | 0.64695 | 78 | 44.166667 | 12 | Represents an Ansible task that got run as part of a build. | class |
class ChecksumMixin:
"""
CSV mixin that will create and verify a checksum column in the CSV file
Specify a list checksum_columns in the subclass.
"""
secret = settings.SECRET_KEY
checksum_columns = []
checksum_fieldname = 'csum'
checksum_size = 4
def _get_checksum(self, row):
to_check = ''.join(str(row[key] if row[key] is not None else '') for key in self.checksum_columns)
to_check += self.secret
checksum = hashlib.md5(to_check.encode('utf8')).hexdigest()[:self.checksum_size]
return f'@{checksum!s}'
def preprocess_export_row(self, row):
"""
Set the checksum column in the row.
"""
row[self.checksum_fieldname] = self._get_checksum(row)
def validate_row(self, row):
"""
Verifies that the calculated checksum matches the stored checksum.
"""
if self._get_checksum(row) != row[self.checksum_fieldname]:
raise ValidationError(
_("Checksum mismatch. Required columns cannot be edited: {}").format(
','.join(self.checksum_columns)
)
) | python | 15 | 0.594102 | 106 | 35.0625 | 32 |
CSV mixin that will create and verify a checksum column in the CSV file
Specify a list checksum_columns in the subclass.
| class |
class DeferrableMixin:
"""
Mixin that automatically commits data using celery.
Subclasses should specify `size_to_defer` to tune when to
run the commit synchronously or asynchronously.
Subclasses must override get_unique_path to uniquely identify
this task.
"""
# if the number of rows is greater than size_to_defer,
# run the task asynchonously. Otherwise, commit immediately.
# 0 means: always run in a celery task
size_to_defer = 0
def get_unique_path(self):
raise NotImplementedError()
def save(self, operation_name=None, operating_user=None):
"""
Save the state of this object to django storage.
Clients may pass an optional ``operating_user`` kwarg to
indicate the ``auth.User`` who is saving this operation state.
Otherwise, the current request's (if any) user will be recorded.
"""
state = self.__dict__.copy()
for k in list(state):
v = state[k]
if k.startswith('_'):
del state[k]
elif isinstance(v, set):
state[k] = list(v)
state['__class__'] = (self.__class__.__module__, self.__class__.__name__)
if not operation_name:
operation_name = 'stage' if self.can_commit else 'commit'
operation = CSVOperation.record_operation(
self,
self.get_unique_path(),
operation_name,
json.dumps(state),
original_filename=state.get('filename', ''),
user=operating_user or get_current_user(),
)
return operation
@classmethod
def load(cls, operation_id, load_subclasses=False):
"""
Load the CSVProcessor from the saved state.
"""
operation = CSVOperation.objects.get(pk=operation_id)
log.info('Loading CSV state %s', operation.data.name)
state = json.load(operation.data)
module_name, classname = state.pop('__class__')
if classname != cls.__name__:
if not load_subclasses:
# this could indicate tampering
raise ValueError(f'{classname!s} != {cls.__name__!s}')
cls = getattr(importlib.import_module(module_name), classname) # pylint: disable=self-cls-assignment
instance = cls(**state)
return instance
@classmethod
def get_deferred_result(cls, result_id):
"""
Return the celery result for the given id.
"""
return AsyncResult(result_id)
def status(self):
"""
Return a status dict.
"""
status = super().status()
status['result_id'] = getattr(self, 'result_id', None)
status['saved_error_id'] = getattr(self, 'saved_error_id', None)
status['waiting'] = bool(status['result_id'])
status.update(getattr(self, '_status', {}))
return status
def preprocess_file(self, reader):
super().preprocess_file(reader)
if self.error_messages:
operation = self.save('error')
self.saved_error_id = operation.id
def commit(self, running_task=None):
"""
Automatically defer the commit to a celery task
if the number of rows is greater than self.size_to_defer
"""
if running_task or len(self.stage) <= self.size_to_defer:
# Either an async task is already in process,
# or the size of the request is small enough to commit synchronously
self.save()
super().commit()
else:
# We'll enqueue an async celery task.
try:
with transaction.atomic():
# We have to make sure that a CSVOperation record
# is created and committed before the task starts,
# because the task will look for that CSVOperation
# in the database outside of the context of the
# current transaction.
operation = self.save()
except DatabaseError:
log.exception("Error saving DeferrableMixin: %s", self)
raise
# Now enqueue the async task.
result = do_deferred_commit.delay(operation.id)
if not result.ready():
self.result_id = result.id
log.info('Queued task %s %r', operation.id, result)
else:
self._status = result.get()
def get_committed_history(self):
"""
Get the history of all committed CSV upload operations.
Returns a list of dictionaries.
"""
all_history = CSVOperation.get_all_history(self, self.get_unique_path())
committed_history = all_history.filter(operation='commit')
history_with_users = CSVOperationSerializer.get_related_queryset(committed_history).order_by('-created')
return CSVOperationSerializer(history_with_users, many=True).data | python | 16 | 0.581046 | 113 | 36.818182 | 132 |
Mixin that automatically commits data using celery.
Subclasses should specify `size_to_defer` to tune when to
run the commit synchronously or asynchronously.
Subclasses must override get_unique_path to uniquely identify
this task.
| class |
class CadastreApi:
"""
CadastreApi is a raw connection for the Spain Cadastre API.
Use CadastreService for get models associated to cadastre info
"""
def get_info_by_rc(self, rc: str):
"""
Get cadastre information from RC
"""
try:
response = requests.post(
url=BASE_URI + "OVCCallejero.asmx/Consulta_DNPRC",
data={"Provincia": "", "Municipio": "", "RC": rc.upper()},
timeout=TIMEOUT,
)
response.raise_for_status()
xml = response.text
return CadastreXml(xml)
except HTTPError as http_err:
logger.exception(f"HTTPError occurred: {http_err}. RC={rc}")
except Exception as err:
logger.exception(f"Error occurred: {err}. RC={rc}")
def get_info_by_address(
self,
province: str,
municipality: str,
type_road: str,
road: str,
number: str,
block: str,
stairs: str,
floor: str,
door: str,
):
"""
Get cadastre information from RC
"""
try:
response = requests.post(
url=BASE_URI + "OVCCallejero.asmx/Consulta_DNPLOC",
data={
"Provincia": province,
"Municipio": municipality,
"Sigla": type_road,
"Calle": road,
"Numero": number,
"Bloque": block,
"Escalera": stairs,
"Planta": floor,
"Puerta": door,
},
timeout=TIMEOUT,
)
response.raise_for_status()
xml = response.text
return CadastreXml(xml)
except HTTPError as http_err:
logger.exception(f"HTTPError occurred: {http_err}. RC={rc}")
except Exception as err:
logger.exception(f"Error occurred: {err}. RC={rc}")
def get_plot_location(self, plot_rc: str):
"""
Get location from plot RC
"""
try:
response = requests.post(
url=BASE_URI + "OVCCoordenadas.asmx/Consulta_CPMRC",
data={
"Provincia": "",
"Municipio": "",
"RC": plot_rc.upper(),
"SRS": SRS_CODE,
},
timeout=TIMEOUT,
)
response.raise_for_status()
xml = response.text
return LocationXml(xml)
except HTTPError as http_err:
logger.exception(f"HTTPError occurred: {http_err}. RC={plot_rc}")
except Exception as err:
logger.exception(f"Error occurred: {err}. RC={plot_rc}")
def get_roads(self, province: str, muncipality: str):
"""
Get all roads from municipality
"""
try:
response = requests.post(
url=BASE_URI + "OVCCallejero.asmx/ConsultaVia",
data={
"Provincia": province,
"Municipio": muncipality,
"TipoVia": "",
"NombreVia": "",
},
timeout=300,
)
response.raise_for_status()
xml = response.text
return RoadsXml(xml)
except HTTPError as http_err:
logger.exception(
f"HTTPError occurred: {http_err}. Province={province} Municipality={muncipality}"
)
except Exception as err:
logger.exception(
f"Error occurred: {err}. Province={province} Municipality={muncipality}"
) | python | 16 | 0.475554 | 97 | 32.428571 | 112 |
CadastreApi is a raw connection for the Spain Cadastre API.
Use CadastreService for get models associated to cadastre info
| class |
class UserAccount:
"""
Class that generates new instances of contacts.
"""
account_list = [] # Empty account list
# Init method up here
def save_account(self):
'''
save_account method saves account objects into account_list
'''
UserAccount.account_list.append(self)
def delete_account(self):
'''
delete_account method deletes a saved account from the account_list
'''
UserAccount.account_list.remove(self)
@classmethod
def find_by_username(cls,userName):
'''
Method that takes in a username and returns an acount that matches that username.
Args:
userName: User Name to search for
Returns :
Account of person that matches the user name.
'''
for account in cls.account_list:
if account.userName == userName:
return account
@classmethod
def account_exist(cls,userName):
'''
Method that checks if an account exists from the account list.
Args:
number: User Name to search if it exists
Returns :
Boolean: True or false depending if the account exists
'''
for account in cls.account_list:
if account.userName == userName:
return True
return False
@classmethod
def display_accounts(cls):
'''
method that returns the account list
'''
return cls.account_list
@classmethod
def copy_userName(cls,userName):
account_found = UserAccount.find_by_username(userName)
pyperclip.copy(account_found.userName)
def __init__(self,userName,Pword):
# docstring removed for simplicity
self.userName = userName
self.Pword = Pword | python | 11 | 0.589799 | 89 | 25.342857 | 70 |
Class that generates new instances of contacts.
| class |
class PaginatedQuery:
"""
Represents the paginated results of a query.
Attributes:
animes: The [anime](./anime.md)s recieved.
characters: The [character](./character.md)s recieved.
staffs: The [staff](./staff.md)s recieved.
studios: The [studio](./studio.md)s recieved.
users: The [user](./user.md)s recieved.
"""
def __init__(self, client, payload):
self.animes: t.List[Anime] = []
self.characters: t.List[Character] = []
self.staffs: t.List[Staff] = []
self.studios: t.List[Studio] = []
self.users: t.List[User] = []
self._payload = payload
self._page = -1
self._client = client
self._fill_lists()
self.pag_type = (
self.animes or self.characters or self.staffs or self.users or self.studios
)
def _fill_lists(self):
list_types = {
"media": (Anime, "animes"),
"characters": (Character, "characters"),
"staff": (Staff, "staffs"),
"users": (User, "users"),
"studios": (Studio, "studios"),
}
for item_type in self._payload["data"]["Page"]:
if item_type in list_types:
class_type = list_types[item_type][0]
setattr(
self,
list_types[item_type][1],
[
class_type(self._client, data)
for data in self._payload["data"]["Page"][item_type]
],
)
self._type = list_types[item_type]
def find(self, iterable: Iterable, check: t.Callable) -> list:
"""
This method returns a list of results that pass the check function from an iterable
Args:
iterable (Iterable): The iterable to search through
check (t.Callable): The check an item needs to pass
Returns:
A list of results that pass the check function
```python
print(paginator.find(paginator.animes, lambda a: a.status == "FINISHED")
```
"""
return [item for item in iterable if check(item)]
def walk(self, attribute: str, iterable: t.Iterable = None) -> list:
"""
This method walks through the paginator and returns the attribute that was passed in.
Args:
attribute (str): The attribute to look for and ultimately return
iterable (t.Iterable): The iterable to walk through. Default will auto search for you
Returns:
A list of the passed attribute to return from each the instances corresponding to the item in the iterable
"""
iterable = iterable or self.pag_type
return [getattr(item, attribute) for item in iterable]
def from_id(
self, id: int
) -> t.Optional[t.Union[Anime, Character, Staff, User, Studio]]:
"""
This method looks through all the characters or animes that were recieved
and outputs the Anime/Character instance that corresponds to the given id.
Args:
id (int): The id to search for.
Returns:
The instance that corresponds to the given id.
"""
search_from = (
self.animes or self.characters or self.staffs or self.users or self.studios
)
res = self.find(search_from, lambda item: item.id == id) or None
if res is not None:
return res[0]
async def next(self):
return await self.__anext__()
def __repr__(self) -> str:
return f"<PaginatedQuery type={self._type[1]}>"
def __aiter__(self) -> PaginatedQuery:
return self
async def __anext__(self) -> t.Union[Anime, Character, Staff, User, Studio]:
self._page += 1
if self._page >= len(self.pag_type):
raise StopIteration
return self.pag_type[self._page] | python | 18 | 0.55668 | 118 | 33.982301 | 113 |
Represents the paginated results of a query.
Attributes:
animes: The [anime](./anime.md)s recieved.
characters: The [character](./character.md)s recieved.
staffs: The [staff](./staff.md)s recieved.
studios: The [studio](./studio.md)s recieved.
users: The [user](./user.md)s recieved.
| class |
class Misspell:
"""Detect misspelled words."""
def __init__(self, config):
"""Setup whitelist."""
try:
options = toml.load(config)
except FileNotFoundError:
options = {}
self.sensitive = options.get('sensitive', [])
self.insensitive = options.get('insensitive', [])
def is_misspelled(self, token):
"""Detect if token is misspelled."""
if (
token.like_url
or token.like_num
or token.like_email
or token.text in self.sensitive
or token.lower_ in self.insensitive
):
return False
return token.is_oov | python | 12 | 0.533923 | 57 | 28.521739 | 23 | Detect misspelled words. | class |
class Flux:
"""Contain a flux of a fluxogram."""
def __init__(self, name, grid_size, from_storage, to_storage, amount=0):
"""Initialize a flux.
Arguments are:
- name: name of the flux
- grid_size: grid size of the diagram
- from_storage: storage the flux is originating from
- to_storage: storage the flux is going into
- amount: how much stuff fluxes.
"""
self.name = name
self.from_storage = from_storage
self.to_storage = to_storage
self.amount = amount
self.grid_size = grid_size
(self.x_start, self.y_start, self.x_end, self.y_end, self.d_x,
self.d_y, self.dire) = (self.calc_start_end_dx_dy())
def update_flux(self, amount):
"""Update the amount of the flux."""
self.amount = amount
def calc_start_end_dx_dy(self):
"""Scale the arrows.
Calculate the starting and ending point of an arrow depending on the
order and offset of the starting and ending storages. This helps
determine the direction of the arrow
returns the start and end xy coordinates of the arrow as tuples.
"""
# arrow pointing to left up
if (self.from_storage.offset > self.to_storage.offset
and self.from_storage.order > self.to_storage.order):
x_start = self.from_storage.x_p + 0.85 * self.grid_size
y_start = self.from_storage.y_p - self.grid_size * 0.5
x_end = self.to_storage.x_p + self.grid_size * 0.65
y_end = self.to_storage.y_p - 0.7 * self.grid_size
d_x = abs(x_start - x_end) * (-1)
d_y = abs(y_start - y_end)
dire = 'lup'
# arrow pointing up
elif (self.from_storage.offset == self.to_storage.offset
and self.from_storage.order > self.to_storage.order):
x_start = self.from_storage.x_p + 0.85 * self.grid_size
y_start = self.from_storage.y_p - 0.5 * self.grid_size
x_end = self.to_storage.x_p + 0.85 * self.grid_size
y_end = self.to_storage.y_p - 0.25 * self.grid_size
d_x = abs(x_start - x_end)
d_y = abs(y_start - y_end)
dire = 'up'
# arrow pointing right up
elif (self.from_storage.offset < self.to_storage.offset
and self.from_storage.order > self.to_storage.order):
x_start = (self.from_storage.x_p + self.grid_size)
y_start = self.from_storage.y_p - 0.5 * self.grid_size
x_end = self.to_storage.x_p + 0.05 * self.grid_size
y_end = self.to_storage.y_p - 0.75 * self.grid_size
d_x = abs(x_start - x_end)
d_y = abs(y_start - y_end)
dire = 'rup'
# arrow pointing right
elif (self.from_storage.offset < self.to_storage.offset
and self.from_storage.order == self.to_storage.order):
x_start = (self.from_storage.x_p + self.grid_size)
y_start = self.from_storage.y_p - 0.8 * self.grid_size
x_end = self.to_storage.x_p + 1.25 * self.grid_size
y_end = self.to_storage.y_p - 0.8 * self.grid_size
d_x = abs(x_start - x_end)
d_y = abs(y_start - y_end)
dire = 'r'
# arrow pointing right down
elif (self.from_storage.offset < self.to_storage.offset
and self.from_storage.order < self.to_storage.order):
x_start = (self.from_storage.x_p + 0.85 * self.grid_size)
y_start = self.from_storage.y_p - 1.12 * self.grid_size
x_end = self.to_storage.x_p + 0.85 * self.grid_size
y_end = self.to_storage.y_p - 0.9 * self.grid_size
d_x = abs(x_start - x_end)
d_y = abs(y_start - y_end) * (-1)
dire = 'rdn'
# arrow pointing down
elif (self.from_storage.offset == self.to_storage.offset
and self.from_storage.order < self.to_storage.order):
x_start = self.from_storage.x_p + 0.8 * self.grid_size
y_start = (self.from_storage.y_p - 1.12 * self.grid_size)
x_end = self.to_storage.x_p + 0.8 * self.grid_size
y_end = self.to_storage.y_p - 1.4 * self.grid_size
d_x = abs(x_start - x_end)
d_y = abs(y_start - y_end) * (-1)
dire = 'dn'
# arrow pointing left down
elif (self.from_storage.offset > self.to_storage.offset
and self.from_storage.order < self.to_storage.order):
x_start = self.from_storage.x_p + 0.75 * self.grid_size
y_start = (self.from_storage.y_p - 1.1 * self.grid_size)
x_end = self.to_storage.x_p + 0.6 * self.grid_size
y_end = self.to_storage.y_p - 0.9 * self.grid_size
d_x = abs(x_start - x_end) * (-1)
d_y = abs(y_start - y_end) * (-1)
dire = 'ldn'
# arrow pointing left
elif (self.from_storage.offset > self.to_storage.offset
and self.from_storage.order == self.to_storage.order):
x_start = self.from_storage.x_p + 0.5 * self.grid_size
y_start = self.from_storage.y_p - 0.75 * self.grid_size
x_end = self.to_storage.x_p + 0.25 * self.grid_size
y_end = self.to_storage.y_p - 0.75 * self.grid_size
d_x = abs(x_start - x_end) * (-1)
d_y = abs(y_start - y_end)
dire = 'l'
# multiply by 0.9 so there is a gap between storages and arrows
d_x = d_x * 0.75
d_y = d_y * 0.75
return x_start, y_start, x_end, y_end, d_x, d_y, dire | python | 14 | 0.54263 | 76 | 47.42735 | 117 | Contain a flux of a fluxogram. | class |
class Storage:
"""Contain a storage of a fluxogram."""
def __init__(self, name, grid_size, number, amount=0, order=0, offset=0):
"""Initialize a storage.
Arguments are:
- name: name of the storage
- number: consecutive number
- grid_size of the diagram
- amount: how much stuff is in it
- order: how much down it is in the hierachie (starts with 0)
- offset = how much the storage is offset to the left/right
in relationship to the center.
"""
self.name = name
self.amount = amount
self.number = number
self.order = order
self.offset = offset
self.grid_size = grid_size
self.x_p, self.y_p = self.calculate_xy()
def update_storage(self, amount):
"""Update the amount of the storage."""
self.amount = amount
def calculate_xy(self):
"""Provide coordinates of the blocks in the diagram.
Calculate the xy coordinates of the starting point from where
the rectangle is drawn. The additional multiplication by two is
to produce the gaps in the diagram.
"""
x_p = self.offset * self.grid_size * 2
# multiply by -1 to draw the diagram from top to bottom
y_p = self.order * self.grid_size * 2 * -1
return x_p, y_p | python | 11 | 0.571429 | 77 | 36.052632 | 38 | Contain a storage of a fluxogram. | class |
class PrepareTexts:
'''
class forms tree files:
1) .dict of text file - mapping of words
2) .mm file - vectorised text
3) .lsi - trained model
Arguments:
-------------
path - os path to file
stoplist - list of words
newName - name for files, produced be gensim.
n - number of topics
repo - folder in workng directory to save trained models. Specifying folder, don't forget type '/' to make folder, e.g. repo='newdata/'
-------------
Object make his best to save RAM resources, don't store full files in memmory.
Developed for Science Search (Monitoring) ends.
-------------------------------
TODO: add lda, word2vec models.
-------------------------------
'''
def __init__(self, path, stoplist=stoplist, newNames='processedFiles', n=5, repo=''):
'''
path to initial text. It's assumed that separate docs separeted by new line (\n)
'''
self.path = path
self.stop = stoplist
self.name = newNames
self.dictionary = None
self.n = n
self.corpus = None
self.lsi = None
self.repo = repo
if repo != '' and os.path.exists(repo) == False: # для того, чтобы сохрнять в директорию, которой может не быть
os.mkdir(repo)
#self.six = __import__('six')
def diction(self):
'''
Form dictonary from gensim.corpora. Corpora module should be imported'''
dictionary = corpora.Dictionary(line.lower().split() for line in open(self.path)) # memory friendly way to make dictionary
stop_ids = [dictionary.token2id[stopword] for stopword in self.stop if stopword in dictionary.token2id] # itartion over dictionary to remove stopwords
once_ids = [tokenid for tokenid, docfreq in dict.items(dictionary.dfs) if docfreq == 1] # take ids of stop words
dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once
self.dictionary = dictionary # make attribute
dictionary.save(self.repo+self.name+'.dict') # save .dict file
return dictionary # return to assign new variable
def bow(self):
'''
Method for BOW creation. Run after creation dictionary.
'''
b = MyCorpus(self.path, self.dictionary) # memory friendly way to convert document to bow
corpora.MmCorpus.serialize(self.repo+self.name+'.mm', b) # save to .mm file
self.corpus = b # make attribute
return b # return to assign new variable
def lsi_modeling(self):
'''
LSI model training. Run after corpus creation (bow method).
'''
tfidf = models.TfidfModel(self.corpus) # class initialization
corpus_tfidf = tfidf[self.corpus] # model training
lsi = models.LsiModel(corpus_tfidf, id2word=self.dictionary, num_topics=self.n) # model object creation
lsi.save(self.repo+self.name+'.lsi') # save model as separate file
self.lsi = lsi # make attribute
return lsi # return to assign new variable
def run(self):
self.diction()
self.bow()
self.lsi_modeling() | python | 13 | 0.613329 | 158 | 43.816901 | 71 |
class forms tree files:
1) .dict of text file - mapping of words
2) .mm file - vectorised text
3) .lsi - trained model
Arguments:
-------------
path - os path to file
stoplist - list of words
newName - name for files, produced be gensim.
n - number of topics
repo - folder in workng directory to save trained models. Specifying folder, don't forget type '/' to make folder, e.g. repo='newdata/'
-------------
Object make his best to save RAM resources, don't store full files in memmory.
Developed for Science Search (Monitoring) ends.
-------------------------------
TODO: add lda, word2vec models.
-------------------------------
| class |
class PStokenizer:
"""cursor to read a string token by token"""
def __init__(self, data, startstring=None, eattokensep=1, tokenseps=" \t\r\n", tokenstarts="()<>[]{}/%"):
"""creates a cursor for the string data
startstring is a string at which the cursor should start at. The first
ocurance of startstring is used. When startstring is not in data, an
exception is raised, otherwise the cursor is set to the position right
after the startstring. When eattokenseps is set, startstring must be
followed by a tokensep and this first tokensep is also consumed.
tokenseps is a string containing characters to be used as token
separators. tokenstarts is a string containing characters which
directly (even without intermediate token separator) start a new token.
"""
self.data = data
if startstring is not None:
self.pos = self.data.index(startstring) + len(startstring)
else:
self.pos = 0
self.tokenseps = tokenseps
self.tokenstarts = tokenstarts
if eattokensep:
if self.data[self.pos] not in self.tokenstarts:
if self.data[self.pos] not in self.tokenseps:
raise ValueError("cursor initialization string is not followed by a token separator")
self.pos += 1
def gettoken(self):
"""get the next token
Leading token separators and comments are silently consumed. The first token
separator after the token is also silently consumed."""
while self.data[self.pos] in self.tokenseps:
self.pos += 1
# ignore comments including subsequent whitespace characters
while self.data[self.pos] == "%":
while self.data[self.pos] not in "\r\n":
self.pos += 1
while self.data[self.pos] in self.tokenseps:
self.pos += 1
startpos = self.pos
while self.data[self.pos] not in self.tokenseps:
# any character in self.tokenstarts ends the token
if self.pos>startpos and self.data[self.pos] in self.tokenstarts:
break
self.pos += 1
result = self.data[startpos:self.pos]
if self.data[self.pos] in self.tokenseps:
self.pos += 1 # consume a single tokensep
return result
def getint(self):
"""get the next token as an integer"""
return int(self.gettoken())
def getbytes(self, count):
"""get the next count bytes"""
startpos = self.pos
self.pos += count
return self.data[startpos: self.pos] | python | 15 | 0.616973 | 109 | 42.672131 | 61 | cursor to read a string token by token | class |
class FileStore:
"""
A file based implementation of a key value store.
"""
def __init__(self,
dbDir,
dbName,
isLineNoKey: bool=False,
storeContentHash: bool=True,
ensureDurability: bool=True,
delimiter="\t",
lineSep="\r\n",
defaultFile=None):
"""
:param dbDir: The directory where the file storing the data would be
present
:param dbName: The name of the file that is used to store the data
:param isLineNoKey: If false then each line has the key followed by a
delimiter followed by the value
:param storeContentHash: Whether to store a hash of the value or not.
Storing hash can make it really fast to compare the value for equality
:param ensureDurability: Should the file be fysnced after every write.
This can ensure durability in most of the cases, but make
writes extremely slow. See testMeasureWriteTime. For frequent writes,
it makes sense to disable flush and fsync on every write
:param delimiter: delimiter between key and value
:param lineSep: line separator - defaults to \r\n
:param defaultFile: file or dir to use for initialization
"""
self.delimiter = delimiter
self.lineSep = lineSep
self.isLineNoKey = isLineNoKey
self.storeContentHash = storeContentHash
self.ensureDurability = ensureDurability
self._defaultFile = defaultFile
def _prepareFiles(self, dbDir, dbName, defaultFile):
if not defaultFile:
return
if not os.path.exists(defaultFile):
errMessage = "File that should be used for " \
"initialization does not exist: {}"\
.format(defaultFile)
logging.warning(errMessage)
raise ValueError(errMessage)
dataLocation = os.path.join(self.dbDir, dbName)
copy = shutil.copy if os.path.isfile(defaultFile) else shutil.copytree
copy(defaultFile, dataLocation)
def _prepareDBLocation(self, dbDir, dbName):
self.dbDir = dbDir
self.dbName = dbName
if not os.path.exists(self.dbDir):
os.makedirs(self.dbDir)
if not os.path.exists(os.path.join(dbDir, dbName)):
self._prepareFiles(dbDir, dbName, self._defaultFile)
def _initDB(self, dbDir, dbName):
self._prepareDBLocation(dbDir, dbName)
# noinspection PyUnresolvedReferences
def put(self, value, key=None):
# If line no is not treated as key then write the key and then the
# delimiter
if not self.isLineNoKey:
if key is None:
raise ValueError("Key must be provided for storing the value")
self.dbFile.write(key)
self.dbFile.write(self.delimiter)
self.dbFile.write(value)
if self.storeContentHash:
self.dbFile.write(self.delimiter)
if isinstance(value, str):
value = value.encode()
hexedHash = sha256(value).hexdigest()
self.dbFile.write(hexedHash)
self.dbFile.write(self.lineSep)
# A little bit smart strategy like flush every 2 seconds
# or every 10 writes or every 1 KB may be a better idea
# Make sure data get written to the disk
# Even flush slows down writes significantly
self.dbFile.flush()
if self.ensureDurability:
# fsync takes too much time on Windows.
# This is the reason of test_merkle_proof tests slowness on Windows.
# Even on Linux using fsync slows down the test by at least 2
# orders of magnitude. See testMeasureWriteTime
os.fsync(self.dbFile.fileno())
def get(self, key):
for k, v in self.iterator():
if k == key:
return v
def _keyIterator(self, lines, prefix=None):
return self._baseIterator(lines, prefix, True, False)
def _valueIterator(self, lines, prefix=None):
return self._baseIterator(lines, prefix, False, True)
def _keyValueIterator(self, lines, prefix=None):
return self._baseIterator(lines, prefix, True, True)
def _parse_line(self, line, prefix=None, returnKey: bool=True,
returnValue: bool=True, key=None):
if self.isLineNoKey:
k = key
v = line
else:
k, v = line.split(self.delimiter, 1)
if returnValue:
if self.storeContentHash:
value, _ = v.rsplit(self.delimiter, 1)
else:
value = v
if not prefix or k.startswith(prefix):
if returnKey and returnValue:
return k, value
elif returnKey:
return k
elif returnValue:
return value
# noinspection PyUnresolvedReferences
def _baseIterator(self, lines, prefix, returnKey: bool, returnValue: bool):
i = 1
for line in lines:
k = str(i)
yield self._parse_line(line, prefix, returnKey, returnValue, k)
if self.isLineNoKey:
i += 1
def _lines(self):
raise NotImplementedError()
# noinspection PyUnresolvedReferences
def iterator(self, includeKey=True, includeValue=True, prefix=None):
if not (includeKey or includeValue):
raise ValueError("At least one of includeKey or includeValue "
"should be true")
# Move to the beginning of file
self.dbFile.seek(0)
lines = self._lines()
if includeKey and includeValue:
return self._keyValueIterator(lines, prefix=prefix)
elif includeValue:
return self._valueIterator(lines, prefix=prefix)
else:
return self._keyIterator(lines, prefix=prefix)
def is_valid_range(self, start=None, end=None):
assert self.isLineNoKey
if start and end:
assert start <= end
def get_range(self, start=None, end=None):
self.is_valid_range(start, end)
for k, value in self.iterator():
k = int(k)
if (start is None or k >= start) and (end is None or k <= end):
yield k, value
if end is not None and k > end:
break
@property
def lastKey(self):
# TODO use the efficient way of seeking to the end and moving back till
# 2nd newline(1 st newline would be encountered immediately until its a
# blank file) is encountered and after newline read ahead till the
# delimiter or split the read string till now on delimiter
k = None
for k, v in self.iterator():
pass
return k
def appendNewLineIfReq(self):
try:
logging.debug("new line check for file: {}".format(self.dbPath))
with open(self.dbPath, 'a+b') as f:
size = f.tell()
if size > 0:
f.seek(-len(self.lineSep), 2) # last character in file
if f.read().decode() != self.lineSep:
linesep = self.lineSep if isinstance(self.lineSep, bytes) else self.lineSep.encode()
f.write(linesep)
logging.debug(
"new line added for file: {}".format(self.dbPath))
except FileNotFoundError:
pass
@property
def numKeys(self):
return sum(1 for l in self.iterator())
# noinspection PyUnresolvedReferences
def close(self):
self.dbFile.close()
# noinspection PyUnresolvedReferences
@property
def closed(self):
return self.dbFile.closed
# noinspection PyUnresolvedReferences
def reset(self):
self.dbFile.truncate(0)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close() | python | 19 | 0.58686 | 108 | 36.077982 | 218 |
A file based implementation of a key value store.
| class |