code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def upload(sess_id_or_alias, files):
"""
Upload files to user's home folder.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Path to upload.
"""
if len(files) < 1:
return
with Session() as session:
try:
print_wait('Uploading files...')
kernel = session.Kernel(sess_id_or_alias)
kernel.upload(files, show_progress=True)
print_done('Uploaded.')
except Exception as e:
print_error(e)
sys.exit(1) | Upload files to user's home folder.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Path to upload. | Below is the the instruction that describes the task:
### Input:
Upload files to user's home folder.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Path to upload.
### Response:
def upload(sess_id_or_alias, files):
"""
Upload files to user's home folder.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Path to upload.
"""
if len(files) < 1:
return
with Session() as session:
try:
print_wait('Uploading files...')
kernel = session.Kernel(sess_id_or_alias)
kernel.upload(files, show_progress=True)
print_done('Uploaded.')
except Exception as e:
print_error(e)
sys.exit(1) |
def lon360to180(lon):
"""Convert longitude from (0, 360) to (-180, 180)
"""
if np.any(lon > 360.0) or np.any(lon < 0.0):
print("Warning: lon outside expected range")
lon = wraplon(lon)
#lon[lon > 180.0] -= 360.0
#lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
lon = lon - (lon.astype(int)/180)*360.0
return lon | Convert longitude from (0, 360) to (-180, 180) | Below is the the instruction that describes the task:
### Input:
Convert longitude from (0, 360) to (-180, 180)
### Response:
def lon360to180(lon):
"""Convert longitude from (0, 360) to (-180, 180)
"""
if np.any(lon > 360.0) or np.any(lon < 0.0):
print("Warning: lon outside expected range")
lon = wraplon(lon)
#lon[lon > 180.0] -= 360.0
#lon180 = (lon+180) - np.floor((lon+180)/360)*360 - 180
lon = lon - (lon.astype(int)/180)*360.0
return lon |
def show_error(cls, error=True):
"""
Show `error` around the conspect elements. If the `error` is ``False``,
hide it.
"""
if error:
cls.input_el.style.border = "2px solid red"
cls.conspect_el.style.border = "2px solid red"
cls.subconspect_el.style.border = "2px solid red"
else:
cls.input_el.style.border = "0"
cls.conspect_el.style.border = "0"
cls.subconspect_el.style.border = "0" | Show `error` around the conspect elements. If the `error` is ``False``,
hide it. | Below is the the instruction that describes the task:
### Input:
Show `error` around the conspect elements. If the `error` is ``False``,
hide it.
### Response:
def show_error(cls, error=True):
"""
Show `error` around the conspect elements. If the `error` is ``False``,
hide it.
"""
if error:
cls.input_el.style.border = "2px solid red"
cls.conspect_el.style.border = "2px solid red"
cls.subconspect_el.style.border = "2px solid red"
else:
cls.input_el.style.border = "0"
cls.conspect_el.style.border = "0"
cls.subconspect_el.style.border = "0" |
def read_stdout(self):
"""
Reads the standard output of the QEMU process.
Only use when the process has been stopped or has crashed.
"""
output = ""
if self._stdout_file:
try:
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warning("Could not read {}: {}".format(self._stdout_file, e))
return output | Reads the standard output of the QEMU process.
Only use when the process has been stopped or has crashed. | Below is the the instruction that describes the task:
### Input:
Reads the standard output of the QEMU process.
Only use when the process has been stopped or has crashed.
### Response:
def read_stdout(self):
"""
Reads the standard output of the QEMU process.
Only use when the process has been stopped or has crashed.
"""
output = ""
if self._stdout_file:
try:
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
log.warning("Could not read {}: {}".format(self._stdout_file, e))
return output |
def issuer(self):
"""The certificate issuer field as :py:class:`~django_ca.subject.Subject`."""
return Subject([(s.oid, s.value) for s in self.x509.issuer]) | The certificate issuer field as :py:class:`~django_ca.subject.Subject`. | Below is the the instruction that describes the task:
### Input:
The certificate issuer field as :py:class:`~django_ca.subject.Subject`.
### Response:
def issuer(self):
"""The certificate issuer field as :py:class:`~django_ca.subject.Subject`."""
return Subject([(s.oid, s.value) for s in self.x509.issuer]) |
def call(subcommand, args):
"""Call a subcommand passing the args."""
args['<napp>'] = parse_napps(args['<napp>'])
func = getattr(NAppsAPI, subcommand)
func(args) | Call a subcommand passing the args. | Below is the the instruction that describes the task:
### Input:
Call a subcommand passing the args.
### Response:
def call(subcommand, args):
"""Call a subcommand passing the args."""
args['<napp>'] = parse_napps(args['<napp>'])
func = getattr(NAppsAPI, subcommand)
func(args) |
def interpret_pixel_data(data, dc, pixel_array, invert=True):
'''Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching.
The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit).
Parameters
----------
data : numpy.ndarray
The raw data words.
dc : int
The double column where the data is from.
pixel_array : numpy.ma.ndarray
The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data.
invert : boolean
Invert the read pixel data.
'''
# data validity cut, VR has to follow an AR
index_value = np.where(is_address_record(data))[0] + 1 # assume value record follows address record
index_value = index_value[is_value_record(data[index_value])] # delete all non value records
index_address = index_value - 1 # calculate address record indices that are followed by an value record
# create the pixel address/value arrays
address = get_address_record_address(data[index_address])
value = get_value_record(data[index_address + 1])
# split array for each bit in pixel data, split is done on decreasing address values
address_split = np.array_split(address, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1)
value_split = np.array_split(value, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1)
if len(address_split) > 5:
pixel_array.mask[dc * 2, :] = True
pixel_array.mask[dc * 2 + 1, :] = True
logging.warning('Invalid pixel data for DC %d', dc)
return
mask = np.empty_like(pixel_array.data) # BUG in numpy: pixel_array is de-masked if not .data is used
mask[:] = len(address_split)
for bit, (bit_address, bit_value) in enumerate(zip(address_split, value_split)): # loop over all bits of the pixel data
# error output, pixel data is often corrupt for FE-I4A
if len(bit_address) == 0:
logging.warning('No pixel data for DC %d', dc)
continue
if len(bit_address) != 42:
logging.warning('Some pixel data missing for DC %d', dc)
if (np.any(bit_address > 672)):
RuntimeError('Pixel data corrupt for DC %d', dc)
# set pixel that occurred in the data stream
pixel = []
for i in bit_address:
pixel.extend(range(i - 15, i + 1))
pixel = np.array(pixel)
# create bit set array
value_new = bit_value.view(np.uint8) # interpret 32 bit numpy array as uint8 to be able to use bit unpacking; byte unpacking is not supported yet
if invert:
value_new = np.invert(value_new) # read back values are inverted
value_new = np.insert(value_new[::4], np.arange(len(value_new[1::4])), value_new[1::4]) # delete 0 padding
value_bit = np.unpackbits(value_new, axis=0)
if len(address_split) == 5: # detect TDAC data, here the bit order is flipped
bit_set = len(address_split) - bit - 1
else:
bit_set = bit
pixel_array.data[dc * 2, pixel[pixel >= 336] - 336] = np.bitwise_or(pixel_array.data[dc * 2, pixel[pixel >= 336] - 336], np.left_shift(value_bit[pixel >= 336], bit_set))
pixel_array.data[dc * 2 + 1, pixel[pixel < 336]] = np.bitwise_or(pixel_array.data[dc * 2 + 1, pixel[pixel < 336]], np.left_shift(value_bit[pixel < 336], bit_set)[::-1])
mask[dc * 2, pixel[pixel >= 336] - 336] = mask[dc * 2, pixel[pixel >= 336] - 336] - 1
mask[dc * 2 + 1, pixel[pixel < 336]] = mask[dc * 2 + 1, pixel[pixel < 336]] - 1
pixel_array.mask[np.equal(mask, 0)] = False | Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching.
The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit).
Parameters
----------
data : numpy.ndarray
The raw data words.
dc : int
The double column where the data is from.
pixel_array : numpy.ma.ndarray
The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data.
invert : boolean
Invert the read pixel data. | Below is the the instruction that describes the task:
### Input:
Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching.
The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit).
Parameters
----------
data : numpy.ndarray
The raw data words.
dc : int
The double column where the data is from.
pixel_array : numpy.ma.ndarray
The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data.
invert : boolean
Invert the read pixel data.
### Response:
def interpret_pixel_data(data, dc, pixel_array, invert=True):
'''Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching.
The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit).
Parameters
----------
data : numpy.ndarray
The raw data words.
dc : int
The double column where the data is from.
pixel_array : numpy.ma.ndarray
The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data.
invert : boolean
Invert the read pixel data.
'''
# data validity cut, VR has to follow an AR
index_value = np.where(is_address_record(data))[0] + 1 # assume value record follows address record
index_value = index_value[is_value_record(data[index_value])] # delete all non value records
index_address = index_value - 1 # calculate address record indices that are followed by an value record
# create the pixel address/value arrays
address = get_address_record_address(data[index_address])
value = get_value_record(data[index_address + 1])
# split array for each bit in pixel data, split is done on decreasing address values
address_split = np.array_split(address, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1)
value_split = np.array_split(value, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1)
if len(address_split) > 5:
pixel_array.mask[dc * 2, :] = True
pixel_array.mask[dc * 2 + 1, :] = True
logging.warning('Invalid pixel data for DC %d', dc)
return
mask = np.empty_like(pixel_array.data) # BUG in numpy: pixel_array is de-masked if not .data is used
mask[:] = len(address_split)
for bit, (bit_address, bit_value) in enumerate(zip(address_split, value_split)): # loop over all bits of the pixel data
# error output, pixel data is often corrupt for FE-I4A
if len(bit_address) == 0:
logging.warning('No pixel data for DC %d', dc)
continue
if len(bit_address) != 42:
logging.warning('Some pixel data missing for DC %d', dc)
if (np.any(bit_address > 672)):
RuntimeError('Pixel data corrupt for DC %d', dc)
# set pixel that occurred in the data stream
pixel = []
for i in bit_address:
pixel.extend(range(i - 15, i + 1))
pixel = np.array(pixel)
# create bit set array
value_new = bit_value.view(np.uint8) # interpret 32 bit numpy array as uint8 to be able to use bit unpacking; byte unpacking is not supported yet
if invert:
value_new = np.invert(value_new) # read back values are inverted
value_new = np.insert(value_new[::4], np.arange(len(value_new[1::4])), value_new[1::4]) # delete 0 padding
value_bit = np.unpackbits(value_new, axis=0)
if len(address_split) == 5: # detect TDAC data, here the bit order is flipped
bit_set = len(address_split) - bit - 1
else:
bit_set = bit
pixel_array.data[dc * 2, pixel[pixel >= 336] - 336] = np.bitwise_or(pixel_array.data[dc * 2, pixel[pixel >= 336] - 336], np.left_shift(value_bit[pixel >= 336], bit_set))
pixel_array.data[dc * 2 + 1, pixel[pixel < 336]] = np.bitwise_or(pixel_array.data[dc * 2 + 1, pixel[pixel < 336]], np.left_shift(value_bit[pixel < 336], bit_set)[::-1])
mask[dc * 2, pixel[pixel >= 336] - 336] = mask[dc * 2, pixel[pixel >= 336] - 336] - 1
mask[dc * 2 + 1, pixel[pixel < 336]] = mask[dc * 2 + 1, pixel[pixel < 336]] - 1
pixel_array.mask[np.equal(mask, 0)] = False |
def get_area(self, geojson):
"""Read the first feature from the geojson and return it as a Polygon
object.
"""
geojson = json.load(open(geojson, 'r'))
self.area = Polygon(geojson['features'][0]['geometry']['coordinates'][0]) | Read the first feature from the geojson and return it as a Polygon
object. | Below is the the instruction that describes the task:
### Input:
Read the first feature from the geojson and return it as a Polygon
object.
### Response:
def get_area(self, geojson):
"""Read the first feature from the geojson and return it as a Polygon
object.
"""
geojson = json.load(open(geojson, 'r'))
self.area = Polygon(geojson['features'][0]['geometry']['coordinates'][0]) |
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs) | Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) | Below is the the instruction that describes the task:
### Input:
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
### Response:
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
self._baseLogger.debug(self, self.getExtendedMsg(msg), *args, **kwargs) |
def _ilshift(self, n):
"""Shift bits by n to the left in place. Return self."""
assert 0 < n <= self.len
self._append(Bits(n))
self._truncatestart(n)
return self | Shift bits by n to the left in place. Return self. | Below is the the instruction that describes the task:
### Input:
Shift bits by n to the left in place. Return self.
### Response:
def _ilshift(self, n):
"""Shift bits by n to the left in place. Return self."""
assert 0 < n <= self.len
self._append(Bits(n))
self._truncatestart(n)
return self |
def synctree(src, dst, onexist=None):
"""Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
"""
src = pathlib.Path(src).resolve()
dst = pathlib.Path(dst).resolve()
if not src.is_dir():
raise ValueError
if dst.exists() and not dst.is_dir():
raise ValueError
if onexist is None:
def onexist(): pass
_synctree(src, dst, onexist) | Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument | Below is the the instruction that describes the task:
### Input:
Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
### Response:
def synctree(src, dst, onexist=None):
"""Recursively sync files at directory src to dst
This is more or less equivalent to::
cp -n -R ${src}/ ${dst}/
If a file at the same path exists in src and dst, it is NOT overwritten
in dst. Pass ``onexist`` in order to raise an error on such conditions.
Args:
src (path-like): source directory
dst (path-like): destination directory, does not need to exist
onexist (callable): function to call if file exists at destination,
takes the full path to destination file as only argument
"""
src = pathlib.Path(src).resolve()
dst = pathlib.Path(dst).resolve()
if not src.is_dir():
raise ValueError
if dst.exists() and not dst.is_dir():
raise ValueError
if onexist is None:
def onexist(): pass
_synctree(src, dst, onexist) |
def current_session():
"""
Returns the :class:`Session` for the current driver and app, instantiating one if needed.
Returns:
Session: The :class:`Session` for the current driver and app.
"""
driver = current_driver or default_driver
session_key = "{driver}:{session}:{app}".format(
driver=driver, session=session_name, app=str(id(app)))
session = _session_pool.get(session_key, None)
if session is None:
from capybara.session import Session
session = Session(driver, app)
_session_pool[session_key] = session
return session | Returns the :class:`Session` for the current driver and app, instantiating one if needed.
Returns:
Session: The :class:`Session` for the current driver and app. | Below is the the instruction that describes the task:
### Input:
Returns the :class:`Session` for the current driver and app, instantiating one if needed.
Returns:
Session: The :class:`Session` for the current driver and app.
### Response:
def current_session():
"""
Returns the :class:`Session` for the current driver and app, instantiating one if needed.
Returns:
Session: The :class:`Session` for the current driver and app.
"""
driver = current_driver or default_driver
session_key = "{driver}:{session}:{app}".format(
driver=driver, session=session_name, app=str(id(app)))
session = _session_pool.get(session_key, None)
if session is None:
from capybara.session import Session
session = Session(driver, app)
_session_pool[session_key] = session
return session |
def merge_includes(code):
"""Merge all includes recursively."""
pattern = '\#\s*include\s*"(?P<filename>[a-zA-Z0-9\_\-\.\/]+)"'
regex = re.compile(pattern)
includes = []
def replace(match):
filename = match.group("filename")
if filename not in includes:
includes.append(filename)
path = glsl.find(filename)
if not path:
logger.critical('"%s" not found' % filename)
raise RuntimeError("File not found", filename)
text = '\n// --- start of "%s" ---\n' % filename
with open(path) as fh:
text += fh.read()
text += '// --- end of "%s" ---\n' % filename
return text
return ''
# Limit recursion to depth 10
for i in range(10):
if re.search(regex, code):
code = re.sub(regex, replace, code)
else:
break
return code | Merge all includes recursively. | Below is the the instruction that describes the task:
### Input:
Merge all includes recursively.
### Response:
def merge_includes(code):
"""Merge all includes recursively."""
pattern = '\#\s*include\s*"(?P<filename>[a-zA-Z0-9\_\-\.\/]+)"'
regex = re.compile(pattern)
includes = []
def replace(match):
filename = match.group("filename")
if filename not in includes:
includes.append(filename)
path = glsl.find(filename)
if not path:
logger.critical('"%s" not found' % filename)
raise RuntimeError("File not found", filename)
text = '\n// --- start of "%s" ---\n' % filename
with open(path) as fh:
text += fh.read()
text += '// --- end of "%s" ---\n' % filename
return text
return ''
# Limit recursion to depth 10
for i in range(10):
if re.search(regex, code):
code = re.sub(regex, replace, code)
else:
break
return code |
async def add_unit(self, count=1, to=None):
"""Add one or more units to this application.
:param int count: Number of units to add
:param str to: Placement directive, e.g.::
'23' - machine 23
'lxc:7' - new lxc container on machine 7
'24/lxc/3' - lxc container 3 or machine 24
If None, a new machine is provisioned.
"""
app_facade = client.ApplicationFacade.from_connection(self.connection)
log.debug(
'Adding %s unit%s to %s',
count, '' if count == 1 else 's', self.name)
result = await app_facade.AddUnits(
application=self.name,
placement=parse_placement(to) if to else None,
num_units=count,
)
return await asyncio.gather(*[
asyncio.ensure_future(self.model._wait_for_new('unit', unit_id))
for unit_id in result.units
]) | Add one or more units to this application.
:param int count: Number of units to add
:param str to: Placement directive, e.g.::
'23' - machine 23
'lxc:7' - new lxc container on machine 7
'24/lxc/3' - lxc container 3 or machine 24
If None, a new machine is provisioned. | Below is the the instruction that describes the task:
### Input:
Add one or more units to this application.
:param int count: Number of units to add
:param str to: Placement directive, e.g.::
'23' - machine 23
'lxc:7' - new lxc container on machine 7
'24/lxc/3' - lxc container 3 or machine 24
If None, a new machine is provisioned.
### Response:
async def add_unit(self, count=1, to=None):
"""Add one or more units to this application.
:param int count: Number of units to add
:param str to: Placement directive, e.g.::
'23' - machine 23
'lxc:7' - new lxc container on machine 7
'24/lxc/3' - lxc container 3 or machine 24
If None, a new machine is provisioned.
"""
app_facade = client.ApplicationFacade.from_connection(self.connection)
log.debug(
'Adding %s unit%s to %s',
count, '' if count == 1 else 's', self.name)
result = await app_facade.AddUnits(
application=self.name,
placement=parse_placement(to) if to else None,
num_units=count,
)
return await asyncio.gather(*[
asyncio.ensure_future(self.model._wait_for_new('unit', unit_id))
for unit_id in result.units
]) |
def add_validation_message(self, message):
"""
Adds a message to the messages dict
:param message:
"""
if message.file not in self.messages:
self.messages[message.file] = []
self.messages[message.file].append(message) | Adds a message to the messages dict
:param message: | Below is the the instruction that describes the task:
### Input:
Adds a message to the messages dict
:param message:
### Response:
def add_validation_message(self, message):
"""
Adds a message to the messages dict
:param message:
"""
if message.file not in self.messages:
self.messages[message.file] = []
self.messages[message.file].append(message) |
def update(cls, domain, source, dest_add, dest_del):
"""Update a domain mail forward destinations."""
result = None
if dest_add or dest_del:
current_destinations = cls.get_destinations(domain, source)
fwds = current_destinations[:]
if dest_add:
for dest in dest_add:
if dest not in fwds:
fwds.append(dest)
if dest_del:
for dest in dest_del:
if dest in fwds:
fwds.remove(dest)
if ((len(current_destinations) != len(fwds))
or (current_destinations != fwds)):
cls.echo('Updating mail forward %s@%s' % (source, domain))
options = {'destinations': fwds}
result = cls.call('domain.forward.update', domain, source,
options)
return result | Update a domain mail forward destinations. | Below is the the instruction that describes the task:
### Input:
Update a domain mail forward destinations.
### Response:
def update(cls, domain, source, dest_add, dest_del):
"""Update a domain mail forward destinations."""
result = None
if dest_add or dest_del:
current_destinations = cls.get_destinations(domain, source)
fwds = current_destinations[:]
if dest_add:
for dest in dest_add:
if dest not in fwds:
fwds.append(dest)
if dest_del:
for dest in dest_del:
if dest in fwds:
fwds.remove(dest)
if ((len(current_destinations) != len(fwds))
or (current_destinations != fwds)):
cls.echo('Updating mail forward %s@%s' % (source, domain))
options = {'destinations': fwds}
result = cls.call('domain.forward.update', domain, source,
options)
return result |
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
"""call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor
"""
tf.logging.info("PlacementMeshImpl::Print")
new_slices = x.tensor_list[:]
with tf.device(self._devices[0]):
new_slices[0] = tf.Print(
new_slices[0], [t for d in data for t in d.tensor_list],
message, **kwargs)
return self.LaidOutTensor(new_slices) | call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor | Below is the the instruction that describes the task:
### Input:
call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor
### Response:
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
"""call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor
"""
tf.logging.info("PlacementMeshImpl::Print")
new_slices = x.tensor_list[:]
with tf.device(self._devices[0]):
new_slices[0] = tf.Print(
new_slices[0], [t for d in data for t in d.tensor_list],
message, **kwargs)
return self.LaidOutTensor(new_slices) |
def directory():
"""Construct hitman_dir from os name"""
home = os.path.expanduser('~')
if platform.system() == 'Linux':
hitman_dir = os.path.join(home, '.hitman')
elif platform.system() == 'Darwin':
hitman_dir = os.path.join(home, 'Library', 'Application Support',
'hitman')
elif platform.system() == 'Windows':
hitman_dir = os.path.join(os.environ['appdata'], 'hitman')
else:
hitman_dir = os.path.join(home, '.hitman')
if not os.path.isdir(hitman_dir):
os.mkdir(hitman_dir)
return hitman_dir | Construct hitman_dir from os name | Below is the the instruction that describes the task:
### Input:
Construct hitman_dir from os name
### Response:
def directory():
"""Construct hitman_dir from os name"""
home = os.path.expanduser('~')
if platform.system() == 'Linux':
hitman_dir = os.path.join(home, '.hitman')
elif platform.system() == 'Darwin':
hitman_dir = os.path.join(home, 'Library', 'Application Support',
'hitman')
elif platform.system() == 'Windows':
hitman_dir = os.path.join(os.environ['appdata'], 'hitman')
else:
hitman_dir = os.path.join(home, '.hitman')
if not os.path.isdir(hitman_dir):
os.mkdir(hitman_dir)
return hitman_dir |
def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage):
"""Internal usage only"""
if not is_seq(rgb24):
raise TypeError("Expected sequence (str, numpy.ndarray, list, tuple "
"or bytearray) as first argument")
is_str = is_pure_str(rgb24)
if is_str:
if not width or not height:
raise ValueError("When giving a string as data, you must also "
"supply width and height")
if np and isinstance(rgb24, np.ndarray):
if rgb24.ndim != 3:
if not width or not height:
raise ValueError("When giving a non 2D numpy array, width and "
"height must be supplied")
if rgb24.nbytes / 3 != width * height:
raise ValueError("numpy array size mismatch")
else:
if rgb24.itemsize != 1:
raise TypeError("Expected numpy array with itemsize == 1")
if not rgb24.flags.c_contiguous:
raise TypeError("Currently, only contiguous, aligned numpy arrays "
"are supported")
if not rgb24.flags.aligned:
raise TypeError("Currently, only contiguous, aligned numpy arrays "
"are supported")
if not is_str and (not width or not height):
height = len(rgb24)
if height < 1:
raise IndexError("Expected sequence with at least one row")
row0 = rgb24[0]
if not is_seq(row0):
raise IndexError("Expected sequence (str, numpy.ndarray, list, tuple or "
"bytearray) inside a sequence")
width = len(row0)
if is_pure_str(row0) or type(row0) == bytearray:
width /= 3
if format == _ImageFormat.RawImage:
self._encode_rgb24(rgb24, width, height)
elif format == _ImageFormat.JpegImage:
self._encode_jpeg_rgb24(rgb24, width, height, quality) | Internal usage only | Below is the the instruction that describes the task:
### Input:
Internal usage only
### Response:
def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage):
"""Internal usage only"""
if not is_seq(rgb24):
raise TypeError("Expected sequence (str, numpy.ndarray, list, tuple "
"or bytearray) as first argument")
is_str = is_pure_str(rgb24)
if is_str:
if not width or not height:
raise ValueError("When giving a string as data, you must also "
"supply width and height")
if np and isinstance(rgb24, np.ndarray):
if rgb24.ndim != 3:
if not width or not height:
raise ValueError("When giving a non 2D numpy array, width and "
"height must be supplied")
if rgb24.nbytes / 3 != width * height:
raise ValueError("numpy array size mismatch")
else:
if rgb24.itemsize != 1:
raise TypeError("Expected numpy array with itemsize == 1")
if not rgb24.flags.c_contiguous:
raise TypeError("Currently, only contiguous, aligned numpy arrays "
"are supported")
if not rgb24.flags.aligned:
raise TypeError("Currently, only contiguous, aligned numpy arrays "
"are supported")
if not is_str and (not width or not height):
height = len(rgb24)
if height < 1:
raise IndexError("Expected sequence with at least one row")
row0 = rgb24[0]
if not is_seq(row0):
raise IndexError("Expected sequence (str, numpy.ndarray, list, tuple or "
"bytearray) inside a sequence")
width = len(row0)
if is_pure_str(row0) or type(row0) == bytearray:
width /= 3
if format == _ImageFormat.RawImage:
self._encode_rgb24(rgb24, width, height)
elif format == _ImageFormat.JpegImage:
self._encode_jpeg_rgb24(rgb24, width, height, quality) |
def _lonlat_from_geos_angle(x, y, geos_area):
"""Get lons and lats from x, y in projection coordinates."""
h = (geos_area.proj_dict['h'] + geos_area.proj_dict['a']) / 1000
b__ = (geos_area.proj_dict['a'] / geos_area.proj_dict['b']) ** 2
sd = np.sqrt((h * np.cos(x) * np.cos(y)) ** 2 -
(np.cos(y)**2 + b__ * np.sin(y)**2) *
(h**2 - (geos_area.proj_dict['a'] / 1000)**2))
# sd = 0
sn = (h * np.cos(x) * np.cos(y) - sd) / (np.cos(y)**2 + b__ * np.sin(y)**2)
s1 = h - sn * np.cos(x) * np.cos(y)
s2 = sn * np.sin(x) * np.cos(y)
s3 = -sn * np.sin(y)
sxy = np.sqrt(s1**2 + s2**2)
lons = np.rad2deg(np.arctan2(s2, s1)) + geos_area.proj_dict.get('lon_0', 0)
lats = np.rad2deg(-np.arctan2(b__ * s3, sxy))
return lons, lats | Get lons and lats from x, y in projection coordinates. | Below is the the instruction that describes the task:
### Input:
Get lons and lats from x, y in projection coordinates.
### Response:
def _lonlat_from_geos_angle(x, y, geos_area):
"""Get lons and lats from x, y in projection coordinates."""
h = (geos_area.proj_dict['h'] + geos_area.proj_dict['a']) / 1000
b__ = (geos_area.proj_dict['a'] / geos_area.proj_dict['b']) ** 2
sd = np.sqrt((h * np.cos(x) * np.cos(y)) ** 2 -
(np.cos(y)**2 + b__ * np.sin(y)**2) *
(h**2 - (geos_area.proj_dict['a'] / 1000)**2))
# sd = 0
sn = (h * np.cos(x) * np.cos(y) - sd) / (np.cos(y)**2 + b__ * np.sin(y)**2)
s1 = h - sn * np.cos(x) * np.cos(y)
s2 = sn * np.sin(x) * np.cos(y)
s3 = -sn * np.sin(y)
sxy = np.sqrt(s1**2 + s2**2)
lons = np.rad2deg(np.arctan2(s2, s1)) + geos_area.proj_dict.get('lon_0', 0)
lats = np.rad2deg(-np.arctan2(b__ * s3, sxy))
return lons, lats |
def _dump_query_timestamps(self, current_time: float):
"""Output the number of GraphQL queries grouped by their query_hash within the last time."""
windows = [10, 11, 15, 20, 30, 60]
print("GraphQL requests:", file=sys.stderr)
for query_hash, times in self._graphql_query_timestamps.items():
print(" {}".format(query_hash), file=sys.stderr)
for window in windows:
reqs_in_sliding_window = sum(t > current_time - window * 60 for t in times)
print(" last {} minutes: {} requests".format(window, reqs_in_sliding_window), file=sys.stderr) | Output the number of GraphQL queries grouped by their query_hash within the last time. | Below is the the instruction that describes the task:
### Input:
Output the number of GraphQL queries grouped by their query_hash within the last time.
### Response:
def _dump_query_timestamps(self, current_time: float):
"""Output the number of GraphQL queries grouped by their query_hash within the last time."""
windows = [10, 11, 15, 20, 30, 60]
print("GraphQL requests:", file=sys.stderr)
for query_hash, times in self._graphql_query_timestamps.items():
print(" {}".format(query_hash), file=sys.stderr)
for window in windows:
reqs_in_sliding_window = sum(t > current_time - window * 60 for t in times)
print(" last {} minutes: {} requests".format(window, reqs_in_sliding_window), file=sys.stderr) |
def secp256k1():
"""
create the secp256k1 curve
"""
GFp = FiniteField(2 ** 256 - 2 ** 32 - 977) # This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
ec = EllipticCurve(GFp, 0, 7)
return ECDSA(ec, ec.point(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8), 2 ** 256 - 432420386565659656852420866394968145599) | create the secp256k1 curve | Below is the the instruction that describes the task:
### Input:
create the secp256k1 curve
### Response:
def secp256k1():
"""
create the secp256k1 curve
"""
GFp = FiniteField(2 ** 256 - 2 ** 32 - 977) # This is P from below... aka FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
ec = EllipticCurve(GFp, 0, 7)
return ECDSA(ec, ec.point(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8), 2 ** 256 - 432420386565659656852420866394968145599) |
def estimate_tx_operational_gas(self, safe_address: str, data_bytes_length: int):
"""
Estimates the gas for the verification of the signatures and other safe related tasks
before and after executing a transaction.
Calculation will be the sum of:
- Base cost of 15000 gas
- 100 of gas per word of `data_bytes`
- Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas)
:param safe_address: Address of the safe
:param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1`
:return: gas costs per signature * threshold of Safe
"""
threshold = self.retrieve_threshold(safe_address)
return 15000 + data_bytes_length // 32 * 100 + 5000 * threshold | Estimates the gas for the verification of the signatures and other safe related tasks
before and after executing a transaction.
Calculation will be the sum of:
- Base cost of 15000 gas
- 100 of gas per word of `data_bytes`
- Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas)
:param safe_address: Address of the safe
:param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1`
:return: gas costs per signature * threshold of Safe | Below is the the instruction that describes the task:
### Input:
Estimates the gas for the verification of the signatures and other safe related tasks
before and after executing a transaction.
Calculation will be the sum of:
- Base cost of 15000 gas
- 100 of gas per word of `data_bytes`
- Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas)
:param safe_address: Address of the safe
:param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1`
:return: gas costs per signature * threshold of Safe
### Response:
def estimate_tx_operational_gas(self, safe_address: str, data_bytes_length: int):
"""
Estimates the gas for the verification of the signatures and other safe related tasks
before and after executing a transaction.
Calculation will be the sum of:
- Base cost of 15000 gas
- 100 of gas per word of `data_bytes`
- Validate the signatures 5000 * threshold (ecrecover for ecdsa ~= 4K gas)
:param safe_address: Address of the safe
:param data_bytes_length: Length of the data (in bytes, so `len(HexBytes('0x12'))` would be `1`
:return: gas costs per signature * threshold of Safe
"""
threshold = self.retrieve_threshold(safe_address)
return 15000 + data_bytes_length // 32 * 100 + 5000 * threshold |
def _handle_subscription(self, topics):
"""Handle subscription of topics."""
if not isinstance(topics, list):
topics = [topics]
for topic in topics:
topic_levels = topic.split('/')
try:
qos = int(topic_levels[-2])
except ValueError:
qos = 0
try:
_LOGGER.debug('Subscribing to: %s, qos: %s', topic, qos)
self._sub_callback(topic, self.recv, qos)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception(
'Subscribe to %s failed: %s', topic, exception) | Handle subscription of topics. | Below is the the instruction that describes the task:
### Input:
Handle subscription of topics.
### Response:
def _handle_subscription(self, topics):
"""Handle subscription of topics."""
if not isinstance(topics, list):
topics = [topics]
for topic in topics:
topic_levels = topic.split('/')
try:
qos = int(topic_levels[-2])
except ValueError:
qos = 0
try:
_LOGGER.debug('Subscribing to: %s, qos: %s', topic, qos)
self._sub_callback(topic, self.recv, qos)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.exception(
'Subscribe to %s failed: %s', topic, exception) |
def pre_build(self, traj, brian_list, network_dict):
"""Pre-builds the connections.
Pre-build is only performed if none of the
relevant parameters is explored and the relevant neuron groups
exist.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Connections, amount depends on clustering
:param network_dict:
Dictionary of elements shared among the components
Expects:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
Adds:
Connections, amount depends on clustering
"""
self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.connections)
self._pre_build = (self._pre_build and 'neurons_i' in network_dict and
'neurons_e' in network_dict)
if self._pre_build:
self._build_connections(traj, brian_list, network_dict) | Pre-builds the connections.
Pre-build is only performed if none of the
relevant parameters is explored and the relevant neuron groups
exist.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Connections, amount depends on clustering
:param network_dict:
Dictionary of elements shared among the components
Expects:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
Adds:
Connections, amount depends on clustering | Below is the the instruction that describes the task:
### Input:
Pre-builds the connections.
Pre-build is only performed if none of the
relevant parameters is explored and the relevant neuron groups
exist.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Connections, amount depends on clustering
:param network_dict:
Dictionary of elements shared among the components
Expects:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
Adds:
Connections, amount depends on clustering
### Response:
def pre_build(self, traj, brian_list, network_dict):
"""Pre-builds the connections.
Pre-build is only performed if none of the
relevant parameters is explored and the relevant neuron groups
exist.
:param traj: Trajectory container
:param brian_list:
List of objects passed to BRIAN network constructor.
Adds:
Connections, amount depends on clustering
:param network_dict:
Dictionary of elements shared among the components
Expects:
'neurons_i': Inhibitory neuron group
'neurons_e': Excitatory neuron group
Adds:
Connections, amount depends on clustering
"""
self._pre_build = not _explored_parameters_in_group(traj, traj.parameters.connections)
self._pre_build = (self._pre_build and 'neurons_i' in network_dict and
'neurons_e' in network_dict)
if self._pre_build:
self._build_connections(traj, brian_list, network_dict) |
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['assignedBinIds'] = [str(kwargs['bin_id'])]
self._my_map['group'] = self._group_default
self._my_map['avatarId'] = self._avatar_default | Initialize form map | Below is the the instruction that describes the task:
### Input:
Initialize form map
### Response:
def _init_map(self, record_types=None, **kwargs):
"""Initialize form map"""
osid_objects.OsidObjectForm._init_map(self, record_types=record_types)
self._my_map['assignedBinIds'] = [str(kwargs['bin_id'])]
self._my_map['group'] = self._group_default
self._my_map['avatarId'] = self._avatar_default |
def add_tot_length(self, qname, sname, value, sym=True):
"""Add a total length value to self.alignment_lengths."""
self.alignment_lengths.loc[qname, sname] = value
if sym:
self.alignment_lengths.loc[sname, qname] = value | Add a total length value to self.alignment_lengths. | Below is the the instruction that describes the task:
### Input:
Add a total length value to self.alignment_lengths.
### Response:
def add_tot_length(self, qname, sname, value, sym=True):
"""Add a total length value to self.alignment_lengths."""
self.alignment_lengths.loc[qname, sname] = value
if sym:
self.alignment_lengths.loc[sname, qname] = value |
def http_query(self, method, path, data={}, params={}, timeout=300):
"""
Make a query to the docker daemon
:param method: HTTP method
:param path: Endpoint in API
:param data: Dictionnary with the body. Will be transformed to a JSON
:param params: Parameters added as a query arg
:param timeout: Timeout
:returns: HTTP response
"""
data = json.dumps(data)
if timeout is None:
timeout = 60 * 60 * 24 * 31 # One month timeout
if path == 'version':
url = "http://docker/v1.12/" + path # API of docker v1.0
else:
url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path
try:
if path != "version": # version is use by check connection
yield from self._check_connection()
if self._session is None or self._session.closed:
connector = self.connector()
self._session = aiohttp.ClientSession(connector=connector)
response = yield from self._session.request(
method,
url,
params=params,
data=data,
headers={"content-type": "application/json", },
timeout=timeout
)
except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e:
raise DockerError("Docker has returned an error: {}".format(str(e)))
except (asyncio.TimeoutError):
raise DockerError("Docker timeout " + method + " " + path)
if response.status >= 300:
body = yield from response.read()
try:
body = json.loads(body.decode("utf-8"))["message"]
except ValueError:
pass
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
if response.status == 304:
raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body))
elif response.status == 404:
raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body))
else:
raise DockerError("Docker has returned an error: {} {}".format(response.status, body))
return response | Make a query to the docker daemon
:param method: HTTP method
:param path: Endpoint in API
:param data: Dictionnary with the body. Will be transformed to a JSON
:param params: Parameters added as a query arg
:param timeout: Timeout
:returns: HTTP response | Below is the the instruction that describes the task:
### Input:
Make a query to the docker daemon
:param method: HTTP method
:param path: Endpoint in API
:param data: Dictionnary with the body. Will be transformed to a JSON
:param params: Parameters added as a query arg
:param timeout: Timeout
:returns: HTTP response
### Response:
def http_query(self, method, path, data={}, params={}, timeout=300):
"""
Make a query to the docker daemon
:param method: HTTP method
:param path: Endpoint in API
:param data: Dictionnary with the body. Will be transformed to a JSON
:param params: Parameters added as a query arg
:param timeout: Timeout
:returns: HTTP response
"""
data = json.dumps(data)
if timeout is None:
timeout = 60 * 60 * 24 * 31 # One month timeout
if path == 'version':
url = "http://docker/v1.12/" + path # API of docker v1.0
else:
url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path
try:
if path != "version": # version is use by check connection
yield from self._check_connection()
if self._session is None or self._session.closed:
connector = self.connector()
self._session = aiohttp.ClientSession(connector=connector)
response = yield from self._session.request(
method,
url,
params=params,
data=data,
headers={"content-type": "application/json", },
timeout=timeout
)
except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e:
raise DockerError("Docker has returned an error: {}".format(str(e)))
except (asyncio.TimeoutError):
raise DockerError("Docker timeout " + method + " " + path)
if response.status >= 300:
body = yield from response.read()
try:
body = json.loads(body.decode("utf-8"))["message"]
except ValueError:
pass
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
if response.status == 304:
raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body))
elif response.status == 404:
raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body))
else:
raise DockerError("Docker has returned an error: {} {}".format(response.status, body))
return response |
def get_editorTab(self, editor):
"""
Returns the **Script_Editor_tabWidget** Widget tab associated with the given editor.
:param editor: Editor to search tab for.
:type editor: Editor
:return: Tab index.
:rtype: Editor
"""
for i in range(self.Script_Editor_tabWidget.count()):
if not self.get_widget(i) == editor:
continue
LOGGER.debug("> Editor '{0}': Tab index '{1}'.".format(editor, i))
return i | Returns the **Script_Editor_tabWidget** Widget tab associated with the given editor.
:param editor: Editor to search tab for.
:type editor: Editor
:return: Tab index.
:rtype: Editor | Below is the the instruction that describes the task:
### Input:
Returns the **Script_Editor_tabWidget** Widget tab associated with the given editor.
:param editor: Editor to search tab for.
:type editor: Editor
:return: Tab index.
:rtype: Editor
### Response:
def get_editorTab(self, editor):
"""
Returns the **Script_Editor_tabWidget** Widget tab associated with the given editor.
:param editor: Editor to search tab for.
:type editor: Editor
:return: Tab index.
:rtype: Editor
"""
for i in range(self.Script_Editor_tabWidget.count()):
if not self.get_widget(i) == editor:
continue
LOGGER.debug("> Editor '{0}': Tab index '{1}'.".format(editor, i))
return i |
def gaussian(x, mu, sigma):
"""
Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`.
.. versionadded:: 1.5
Parameters
----------
x : float
Function variable :math:`x`.
mu : float
Mean of the Gaussian function.
sigma : float
Standard deviation of the Gaussian function.
"""
return _np.exp(-(x-mu)**2/(2*sigma**2)) / (_np.sqrt(2*_np.pi) * sigma) | Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`.
.. versionadded:: 1.5
Parameters
----------
x : float
Function variable :math:`x`.
mu : float
Mean of the Gaussian function.
sigma : float
Standard deviation of the Gaussian function. | Below is the the instruction that describes the task:
### Input:
Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`.
.. versionadded:: 1.5
Parameters
----------
x : float
Function variable :math:`x`.
mu : float
Mean of the Gaussian function.
sigma : float
Standard deviation of the Gaussian function.
### Response:
def gaussian(x, mu, sigma):
"""
Gaussian function of the form :math:`\\frac{1}{\\sqrt{2 \\pi}\\sigma} e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}`.
.. versionadded:: 1.5
Parameters
----------
x : float
Function variable :math:`x`.
mu : float
Mean of the Gaussian function.
sigma : float
Standard deviation of the Gaussian function.
"""
return _np.exp(-(x-mu)**2/(2*sigma**2)) / (_np.sqrt(2*_np.pi) * sigma) |
def _replicate(n, tensor):
"""Replicate the input tensor n times along a new (major) dimension."""
# TODO(axch) Does this already exist somewhere? Should it get contributed?
multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0)
return tf.tile(tf.expand_dims(tensor, axis=0), multiples) | Replicate the input tensor n times along a new (major) dimension. | Below is the the instruction that describes the task:
### Input:
Replicate the input tensor n times along a new (major) dimension.
### Response:
def _replicate(n, tensor):
"""Replicate the input tensor n times along a new (major) dimension."""
# TODO(axch) Does this already exist somewhere? Should it get contributed?
multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0)
return tf.tile(tf.expand_dims(tensor, axis=0), multiples) |
def determine_file_type(self, z):
"""Determine file type."""
content = z.read('[Content_Types].xml')
with io.BytesIO(content) as b:
encoding = self._analyze_file(b)
if encoding is None:
encoding = 'utf-8'
b.seek(0)
text = b.read().decode(encoding)
soup = bs4.BeautifulSoup(text, 'xml')
for o in soup.find_all('Override'):
name = o.attrs.get('PartName')
for k, v in MIMEMAP.items():
if name.startswith('/{}/'.format(k)):
self.type = v
break
if self.type:
break
self.filepattern = DOC_PARAMS[self.type]['filepattern']
self.namespaces = DOC_PARAMS[self.type]['namespaces']
self.captures = sv.compile(DOC_PARAMS[self.type]['captures'], DOC_PARAMS[self.type]['namespaces']) | Determine file type. | Below is the the instruction that describes the task:
### Input:
Determine file type.
### Response:
def determine_file_type(self, z):
"""Determine file type."""
content = z.read('[Content_Types].xml')
with io.BytesIO(content) as b:
encoding = self._analyze_file(b)
if encoding is None:
encoding = 'utf-8'
b.seek(0)
text = b.read().decode(encoding)
soup = bs4.BeautifulSoup(text, 'xml')
for o in soup.find_all('Override'):
name = o.attrs.get('PartName')
for k, v in MIMEMAP.items():
if name.startswith('/{}/'.format(k)):
self.type = v
break
if self.type:
break
self.filepattern = DOC_PARAMS[self.type]['filepattern']
self.namespaces = DOC_PARAMS[self.type]['namespaces']
self.captures = sv.compile(DOC_PARAMS[self.type]['captures'], DOC_PARAMS[self.type]['namespaces']) |
def write_table(self, table):
"""Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
table_sql, serial_key_sql = super(PostgresDbWriter, self).write_table(table)
for sql in serial_key_sql + table_sql:
self.execute(sql) | Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None | Below is the the instruction that describes the task:
### Input:
Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
### Response:
def write_table(self, table):
"""Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
table_sql, serial_key_sql = super(PostgresDbWriter, self).write_table(table)
for sql in serial_key_sql + table_sql:
self.execute(sql) |
def render_pull_base_image(self):
"""Configure pull_base_image"""
phase = 'prebuild_plugins'
plugin = 'pull_base_image'
if self.user_params.parent_images_digests.value:
self.pt.set_plugin_arg(phase, plugin, 'parent_images_digests',
self.user_params.parent_images_digests.value) | Configure pull_base_image | Below is the the instruction that describes the task:
### Input:
Configure pull_base_image
### Response:
def render_pull_base_image(self):
"""Configure pull_base_image"""
phase = 'prebuild_plugins'
plugin = 'pull_base_image'
if self.user_params.parent_images_digests.value:
self.pt.set_plugin_arg(phase, plugin, 'parent_images_digests',
self.user_params.parent_images_digests.value) |
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
rs=npr.RandomState(0)):
"""Based on code by Ryan P. Adams."""
rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)
features = rs.randn(num_classes*num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:,0])
rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
rotations = np.reshape(rotations.T, (-1, 2, 2))
return np.einsum('ti,tij->tj', features, rotations) | Based on code by Ryan P. Adams. | Below is the the instruction that describes the task:
### Input:
Based on code by Ryan P. Adams.
### Response:
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
rs=npr.RandomState(0)):
"""Based on code by Ryan P. Adams."""
rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)
features = rs.randn(num_classes*num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:,0])
rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
rotations = np.reshape(rotations.T, (-1, 2, 2))
return np.einsum('ti,tij->tj', features, rotations) |
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon):
# https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp
""" float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
"""
scale = gamma / np.sqrt(var + epsilon)
bias = beta - gamma * mean / np.sqrt(var + epsilon)
return [scale, bias] | float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a; | Below is the the instruction that describes the task:
### Input:
float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
### Response:
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon):
# https://github.com/Tencent/ncnn/blob/master/src/layer/batchnorm.cpp
""" float sqrt_var = sqrt(var_data[i]);
a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var;
b_data[i] = slope_data[i] / sqrt_var;
...
ptr[i] = b * ptr[i] + a;
"""
scale = gamma / np.sqrt(var + epsilon)
bias = beta - gamma * mean / np.sqrt(var + epsilon)
return [scale, bias] |
def iterate_forever(func, *args, **kwargs):
"""Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating.
"""
output = func(*args, **kwargs)
while True:
try:
playlist_item = next(output)
playlist_item.prepare_playback()
yield playlist_item
except StopIteration:
output = func(*args, **kwargs) | Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating. | Below is the the instruction that describes the task:
### Input:
Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating.
### Response:
def iterate_forever(func, *args, **kwargs):
"""Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating.
"""
output = func(*args, **kwargs)
while True:
try:
playlist_item = next(output)
playlist_item.prepare_playback()
yield playlist_item
except StopIteration:
output = func(*args, **kwargs) |
def emit(self, event, data=None, room=None, include_self=True,
namespace=None, callback=None):
"""Emit a custom event to one or more connected clients."""
return self.socketio.emit(event, data, room=room,
include_self=include_self,
namespace=namespace or self.namespace,
callback=callback) | Emit a custom event to one or more connected clients. | Below is the the instruction that describes the task:
### Input:
Emit a custom event to one or more connected clients.
### Response:
def emit(self, event, data=None, room=None, include_self=True,
namespace=None, callback=None):
"""Emit a custom event to one or more connected clients."""
return self.socketio.emit(event, data, room=room,
include_self=include_self,
namespace=namespace or self.namespace,
callback=callback) |
def get_storage(self):
'''Get the storage instance.
:return Redis: Redis instance
'''
if self.storage:
return self.storage
self.storage = self.reconnect_redis()
return self.storage | Get the storage instance.
:return Redis: Redis instance | Below is the the instruction that describes the task:
### Input:
Get the storage instance.
:return Redis: Redis instance
### Response:
def get_storage(self):
'''Get the storage instance.
:return Redis: Redis instance
'''
if self.storage:
return self.storage
self.storage = self.reconnect_redis()
return self.storage |
def __envelope(x, hop):
'''Compute the max-envelope of x at a stride/frame length of h'''
return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0) | Compute the max-envelope of x at a stride/frame length of h | Below is the the instruction that describes the task:
### Input:
Compute the max-envelope of x at a stride/frame length of h
### Response:
def __envelope(x, hop):
'''Compute the max-envelope of x at a stride/frame length of h'''
return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0) |
def CSS_setMediaText(self, styleSheetId, range, text):
"""
Function path: CSS.setMediaText
Domain: CSS
Method name: setMediaText
Parameters:
Required arguments:
'styleSheetId' (type: StyleSheetId) -> No description
'range' (type: SourceRange) -> No description
'text' (type: string) -> No description
Returns:
'media' (type: CSSMedia) -> The resulting CSS media rule after modification.
Description: Modifies the rule selector.
"""
assert isinstance(text, (str,)
), "Argument 'text' must be of type '['str']'. Received type: '%s'" % type(
text)
subdom_funcs = self.synchronous_command('CSS.setMediaText', styleSheetId=
styleSheetId, range=range, text=text)
return subdom_funcs | Function path: CSS.setMediaText
Domain: CSS
Method name: setMediaText
Parameters:
Required arguments:
'styleSheetId' (type: StyleSheetId) -> No description
'range' (type: SourceRange) -> No description
'text' (type: string) -> No description
Returns:
'media' (type: CSSMedia) -> The resulting CSS media rule after modification.
Description: Modifies the rule selector. | Below is the the instruction that describes the task:
### Input:
Function path: CSS.setMediaText
Domain: CSS
Method name: setMediaText
Parameters:
Required arguments:
'styleSheetId' (type: StyleSheetId) -> No description
'range' (type: SourceRange) -> No description
'text' (type: string) -> No description
Returns:
'media' (type: CSSMedia) -> The resulting CSS media rule after modification.
Description: Modifies the rule selector.
### Response:
def CSS_setMediaText(self, styleSheetId, range, text):
"""
Function path: CSS.setMediaText
Domain: CSS
Method name: setMediaText
Parameters:
Required arguments:
'styleSheetId' (type: StyleSheetId) -> No description
'range' (type: SourceRange) -> No description
'text' (type: string) -> No description
Returns:
'media' (type: CSSMedia) -> The resulting CSS media rule after modification.
Description: Modifies the rule selector.
"""
assert isinstance(text, (str,)
), "Argument 'text' must be of type '['str']'. Received type: '%s'" % type(
text)
subdom_funcs = self.synchronous_command('CSS.setMediaText', styleSheetId=
styleSheetId, range=range, text=text)
return subdom_funcs |
def batch_iter(data, batch_size, num_epochs):
"""Generates a batch iterator for a dataset."""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] | Generates a batch iterator for a dataset. | Below is the the instruction that describes the task:
### Input:
Generates a batch iterator for a dataset.
### Response:
def batch_iter(data, batch_size, num_epochs):
"""Generates a batch iterator for a dataset."""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] |
def shift(schedule: ScheduleComponent, time: int, name: str = None) -> Schedule:
"""Return schedule shifted by `time`.
Args:
schedule: The schedule to shift
time: The time to shift by
name: Name of shifted schedule. Defaults to name of `schedule`
"""
if name is None:
name = schedule.name
return union((time, schedule), name=name) | Return schedule shifted by `time`.
Args:
schedule: The schedule to shift
time: The time to shift by
name: Name of shifted schedule. Defaults to name of `schedule` | Below is the the instruction that describes the task:
### Input:
Return schedule shifted by `time`.
Args:
schedule: The schedule to shift
time: The time to shift by
name: Name of shifted schedule. Defaults to name of `schedule`
### Response:
def shift(schedule: ScheduleComponent, time: int, name: str = None) -> Schedule:
"""Return schedule shifted by `time`.
Args:
schedule: The schedule to shift
time: The time to shift by
name: Name of shifted schedule. Defaults to name of `schedule`
"""
if name is None:
name = schedule.name
return union((time, schedule), name=name) |
def _get_dvportgroup_dict(pg_ref):
'''
Returns a dictionary with a distributed virutal portgroup data
pg_ref
Portgroup reference
'''
props = salt.utils.vmware.get_properties_of_managed_object(
pg_ref, ['name', 'config.description', 'config.numPorts',
'config.type', 'config.defaultPortConfig'])
pg_dict = {'name': props['name'],
'description': props.get('config.description'),
'num_ports': props['config.numPorts'],
'type': props['config.type']}
if props['config.defaultPortConfig']:
dpg = props['config.defaultPortConfig']
if dpg.vlan and \
isinstance(dpg.vlan,
vim.VmwareDistributedVirtualSwitchVlanIdSpec):
pg_dict.update({'vlan_id': dpg.vlan.vlanId})
pg_dict.update({'out_shaping':
_get_dvportgroup_out_shaping(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'security_policy':
_get_dvportgroup_security_policy(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'teaming':
_get_dvportgroup_teaming(
props['name'],
props['config.defaultPortConfig'])})
return pg_dict | Returns a dictionary with a distributed virutal portgroup data
pg_ref
Portgroup reference | Below is the the instruction that describes the task:
### Input:
Returns a dictionary with a distributed virutal portgroup data
pg_ref
Portgroup reference
### Response:
def _get_dvportgroup_dict(pg_ref):
'''
Returns a dictionary with a distributed virutal portgroup data
pg_ref
Portgroup reference
'''
props = salt.utils.vmware.get_properties_of_managed_object(
pg_ref, ['name', 'config.description', 'config.numPorts',
'config.type', 'config.defaultPortConfig'])
pg_dict = {'name': props['name'],
'description': props.get('config.description'),
'num_ports': props['config.numPorts'],
'type': props['config.type']}
if props['config.defaultPortConfig']:
dpg = props['config.defaultPortConfig']
if dpg.vlan and \
isinstance(dpg.vlan,
vim.VmwareDistributedVirtualSwitchVlanIdSpec):
pg_dict.update({'vlan_id': dpg.vlan.vlanId})
pg_dict.update({'out_shaping':
_get_dvportgroup_out_shaping(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'security_policy':
_get_dvportgroup_security_policy(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'teaming':
_get_dvportgroup_teaming(
props['name'],
props['config.defaultPortConfig'])})
return pg_dict |
def open(self):
"""initialize visit variables"""
self.stats = self.linter.add_stats()
self._returns = []
self._branches = defaultdict(int)
self._stmts = [] | initialize visit variables | Below is the the instruction that describes the task:
### Input:
initialize visit variables
### Response:
def open(self):
"""initialize visit variables"""
self.stats = self.linter.add_stats()
self._returns = []
self._branches = defaultdict(int)
self._stmts = [] |
def write(self, fn=None):
"""copy the zip file from its filename to the given filename."""
fn = fn or self.fn
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
f = open(self.fn, 'rb')
b = f.read()
f.close()
f = open(fn, 'wb')
f.write(b)
f.close() | copy the zip file from its filename to the given filename. | Below is the the instruction that describes the task:
### Input:
copy the zip file from its filename to the given filename.
### Response:
def write(self, fn=None):
"""copy the zip file from its filename to the given filename."""
fn = fn or self.fn
if not os.path.exists(os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn))
f = open(self.fn, 'rb')
b = f.read()
f.close()
f = open(fn, 'wb')
f.write(b)
f.close() |
def qzordered(A,B,crit=1.0):
"Eigenvalues bigger than crit are sorted in the top-left."
TOL = 1e-10
def select(alpha, beta):
return alpha**2>crit*beta**2
[S,T,alpha,beta,U,V] = ordqz(A,B,output='real',sort=select)
eigval = abs(numpy.diag(S)/numpy.diag(T))
return [S,T,U,V,eigval] | Eigenvalues bigger than crit are sorted in the top-left. | Below is the the instruction that describes the task:
### Input:
Eigenvalues bigger than crit are sorted in the top-left.
### Response:
def qzordered(A,B,crit=1.0):
"Eigenvalues bigger than crit are sorted in the top-left."
TOL = 1e-10
def select(alpha, beta):
return alpha**2>crit*beta**2
[S,T,alpha,beta,U,V] = ordqz(A,B,output='real',sort=select)
eigval = abs(numpy.diag(S)/numpy.diag(T))
return [S,T,U,V,eigval] |
def get_available_gpus():
"""
Returns a list of string names of all available GPUs
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU'] | Returns a list of string names of all available GPUs | Below is the the instruction that describes the task:
### Input:
Returns a list of string names of all available GPUs
### Response:
def get_available_gpus():
"""
Returns a list of string names of all available GPUs
"""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU'] |
def get_configdir(name):
"""
Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied,
choose that.
2a. On Linux, choose `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists, use that as the
configuration directory.
4. A directory: return None.
Notes
-----
This function is taken from the matplotlib [1] module
References
----------
[1]: http://matplotlib.org/api/"""
configdir = os.environ.get('%sCONFIGDIR' % name.upper())
if configdir is not None:
return os.path.abspath(configdir)
p = None
h = _get_home()
if ((sys.platform.startswith('linux') or
sys.platform.startswith('darwin')) and h is not None):
p = os.path.join(h, '.config/' + name)
elif h is not None:
p = os.path.join(h, '.' + name)
if not os.path.exists(p):
os.makedirs(p)
return p | Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied,
choose that.
2a. On Linux, choose `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists, use that as the
configuration directory.
4. A directory: return None.
Notes
-----
This function is taken from the matplotlib [1] module
References
----------
[1]: http://matplotlib.org/api/ | Below is the the instruction that describes the task:
### Input:
Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied,
choose that.
2a. On Linux, choose `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists, use that as the
configuration directory.
4. A directory: return None.
Notes
-----
This function is taken from the matplotlib [1] module
References
----------
[1]: http://matplotlib.org/api/
### Response:
def get_configdir(name):
"""
Return the string representing the configuration directory.
The directory is chosen as follows:
1. If the ``name.upper() + CONFIGDIR`` environment variable is supplied,
choose that.
2a. On Linux, choose `$HOME/.config`.
2b. On other platforms, choose `$HOME/.matplotlib`.
3. If the chosen directory exists, use that as the
configuration directory.
4. A directory: return None.
Notes
-----
This function is taken from the matplotlib [1] module
References
----------
[1]: http://matplotlib.org/api/"""
configdir = os.environ.get('%sCONFIGDIR' % name.upper())
if configdir is not None:
return os.path.abspath(configdir)
p = None
h = _get_home()
if ((sys.platform.startswith('linux') or
sys.platform.startswith('darwin')) and h is not None):
p = os.path.join(h, '.config/' + name)
elif h is not None:
p = os.path.join(h, '.' + name)
if not os.path.exists(p):
os.makedirs(p)
return p |
def exclude_range(self, field, start="*", stop="*", inclusive=True, new_group=False):
"""Exclude a ``field:[some range]`` term from the query.
Matches will not have any ``value`` in the range in the ``field``.
Arguments:
field (str): The field to check for the value.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
start (str or int): The starting value, or ``None`` for no lower bound.
**Default:** ``None``.
stop (str or int): The ending value, or ``None`` for no upper bound.
**Default:** ``None``.
inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be excluded
from the search.
If ``False``, the ``start`` and ``stop`` values will not be excluded
from the search.
**Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
# Accept None as *
if start is None:
start = "*"
if stop is None:
stop = "*"
# *-* is the same as field doesn't exist
if start == "*" and stop == "*":
return self.match_not_exists(field, new_group=new_group)
if inclusive:
value = "[" + str(start) + " TO " + str(stop) + "]"
else:
value = "{" + str(start) + " TO " + str(stop) + "}"
return self.exclude_field(field, value, new_group=new_group) | Exclude a ``field:[some range]`` term from the query.
Matches will not have any ``value`` in the range in the ``field``.
Arguments:
field (str): The field to check for the value.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
start (str or int): The starting value, or ``None`` for no lower bound.
**Default:** ``None``.
stop (str or int): The ending value, or ``None`` for no upper bound.
**Default:** ``None``.
inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be excluded
from the search.
If ``False``, the ``start`` and ``stop`` values will not be excluded
from the search.
**Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self | Below is the the instruction that describes the task:
### Input:
Exclude a ``field:[some range]`` term from the query.
Matches will not have any ``value`` in the range in the ``field``.
Arguments:
field (str): The field to check for the value.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
start (str or int): The starting value, or ``None`` for no lower bound.
**Default:** ``None``.
stop (str or int): The ending value, or ``None`` for no upper bound.
**Default:** ``None``.
inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be excluded
from the search.
If ``False``, the ``start`` and ``stop`` values will not be excluded
from the search.
**Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
### Response:
def exclude_range(self, field, start="*", stop="*", inclusive=True, new_group=False):
"""Exclude a ``field:[some range]`` term from the query.
Matches will not have any ``value`` in the range in the ``field``.
Arguments:
field (str): The field to check for the value.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
start (str or int): The starting value, or ``None`` for no lower bound.
**Default:** ``None``.
stop (str or int): The ending value, or ``None`` for no upper bound.
**Default:** ``None``.
inclusive (bool): If ``True``, the ``start`` and ``stop`` values will be excluded
from the search.
If ``False``, the ``start`` and ``stop`` values will not be excluded
from the search.
**Default:** ``True``.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self
"""
# Accept None as *
if start is None:
start = "*"
if stop is None:
stop = "*"
# *-* is the same as field doesn't exist
if start == "*" and stop == "*":
return self.match_not_exists(field, new_group=new_group)
if inclusive:
value = "[" + str(start) + " TO " + str(stop) + "]"
else:
value = "{" + str(start) + " TO " + str(stop) + "}"
return self.exclude_field(field, value, new_group=new_group) |
def generate_fault_source_model(self):
'''
Creates a resulting `openquake.hmtk` fault source set.
:returns:
source_model - list of instances of either the :class:
`openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource`
or :class:
`openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource`
model_weight - Corresponding weights for each source model
'''
source_model = []
model_weight = []
for iloc in range(0, self.get_number_mfd_models()):
model_mfd = EvenlyDiscretizedMFD(
self.mfd[0][iloc].min_mag,
self.mfd[0][iloc].bin_width,
self.mfd[0][iloc].occur_rates.tolist())
if isinstance(self.geometry, ComplexFaultGeometry):
# Complex fault class
source = mtkComplexFaultSource(
self.id,
self.name,
self.trt,
self.geometry.surface,
self.mfd[2][iloc],
self.rupt_aspect_ratio,
model_mfd,
self.rake)
source.fault_edges = self.geometry.trace
else:
# Simple Fault source
source = mtkSimpleFaultSource(
self.id,
self.name,
self.trt,
self.geometry.surface,
self.geometry.dip,
self.geometry.upper_depth,
self.geometry.lower_depth,
self.mfd[2][iloc],
self.rupt_aspect_ratio,
model_mfd,
self.rake)
source.fault_trace = self.geometry.trace
source_model.append(source)
model_weight.append(self.mfd[1][iloc])
return source_model, model_weight | Creates a resulting `openquake.hmtk` fault source set.
:returns:
source_model - list of instances of either the :class:
`openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource`
or :class:
`openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource`
model_weight - Corresponding weights for each source model | Below is the the instruction that describes the task:
### Input:
Creates a resulting `openquake.hmtk` fault source set.
:returns:
source_model - list of instances of either the :class:
`openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource`
or :class:
`openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource`
model_weight - Corresponding weights for each source model
### Response:
def generate_fault_source_model(self):
'''
Creates a resulting `openquake.hmtk` fault source set.
:returns:
source_model - list of instances of either the :class:
`openquake.hmtk.sources.simple_fault_source.mtkSimpleFaultSource`
or :class:
`openquake.hmtk.sources.complex_fault_source.mtkComplexFaultSource`
model_weight - Corresponding weights for each source model
'''
source_model = []
model_weight = []
for iloc in range(0, self.get_number_mfd_models()):
model_mfd = EvenlyDiscretizedMFD(
self.mfd[0][iloc].min_mag,
self.mfd[0][iloc].bin_width,
self.mfd[0][iloc].occur_rates.tolist())
if isinstance(self.geometry, ComplexFaultGeometry):
# Complex fault class
source = mtkComplexFaultSource(
self.id,
self.name,
self.trt,
self.geometry.surface,
self.mfd[2][iloc],
self.rupt_aspect_ratio,
model_mfd,
self.rake)
source.fault_edges = self.geometry.trace
else:
# Simple Fault source
source = mtkSimpleFaultSource(
self.id,
self.name,
self.trt,
self.geometry.surface,
self.geometry.dip,
self.geometry.upper_depth,
self.geometry.lower_depth,
self.mfd[2][iloc],
self.rupt_aspect_ratio,
model_mfd,
self.rake)
source.fault_trace = self.geometry.trace
source_model.append(source)
model_weight.append(self.mfd[1][iloc])
return source_model, model_weight |
def _check_flag_meanings(self, ds, name):
'''
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
'''
variable = ds.variables[name]
flag_meanings = getattr(variable, 'flag_meanings', None)
valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_meanings.assert_true(flag_meanings is not None,
"{}'s flag_meanings attribute is required for flag variables".format(name))
valid_meanings.assert_true(isinstance(flag_meanings, basestring),
"{}'s flag_meanings attribute must be a string".format(name))
# We can't perform any additional checks if it's not a string
if not isinstance(flag_meanings, basestring):
return valid_meanings.to_result()
valid_meanings.assert_true(len(flag_meanings) > 0,
"{}'s flag_meanings can't be empty".format(name))
flag_regx = regex.compile(r"^[0-9A-Za-z_\-.+@]+$")
meanings = flag_meanings.split()
for meaning in meanings:
if flag_regx.match(meaning) is None:
valid_meanings.assert_true(False,
"{}'s flag_meanings attribute defined an illegal flag meaning ".format(name)+\
"{}".format(meaning))
return valid_meanings.to_result() | Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result | Below is the the instruction that describes the task:
### Input:
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
### Response:
def _check_flag_meanings(self, ds, name):
'''
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
'''
variable = ds.variables[name]
flag_meanings = getattr(variable, 'flag_meanings', None)
valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles['3.5'])
valid_meanings.assert_true(flag_meanings is not None,
"{}'s flag_meanings attribute is required for flag variables".format(name))
valid_meanings.assert_true(isinstance(flag_meanings, basestring),
"{}'s flag_meanings attribute must be a string".format(name))
# We can't perform any additional checks if it's not a string
if not isinstance(flag_meanings, basestring):
return valid_meanings.to_result()
valid_meanings.assert_true(len(flag_meanings) > 0,
"{}'s flag_meanings can't be empty".format(name))
flag_regx = regex.compile(r"^[0-9A-Za-z_\-.+@]+$")
meanings = flag_meanings.split()
for meaning in meanings:
if flag_regx.match(meaning) is None:
valid_meanings.assert_true(False,
"{}'s flag_meanings attribute defined an illegal flag meaning ".format(name)+\
"{}".format(meaning))
return valid_meanings.to_result() |
def Stephan_Abdelsalam(rhol, rhog, mul, kl, Cpl, Hvap, sigma, Tsat, Te=None,
q=None, kw=401, rhow=8.96, Cpw=384, angle=None,
correlation='general'):
r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Five variants are possible.
Either heat flux or excess temperature is required. The forms for `Te` are
not shown here, but are similar to those of the other functions.
.. math::
h = 0.23X_1^{0.674} X_2^{0.35} X_3^{0.371} X_5^{0.297} X_8^{-1.73} k_L/d_B
X1 = \frac{q D_d}{K_L T_{sat}}
X2 = \frac{\alpha^2 \rho_L}{\sigma D_d}
X3 = \frac{C_{p,L} T_{sat} D_d^2}{\alpha^2}
X4 = \frac{H_{vap} D_d^2}{\alpha^2}
X5 = \frac{\rho_V}{\rho_L}
X6 = \frac{C_{p,l} \mu_L}{k_L}
X7 = \frac{\rho_W C_{p,W} k_W}{\rho_L C_{p,L} k_L}
X8 = \frac{\rho_L-\rho_V}{\rho_L}
D_b = 0.0146\theta\sqrt{\frac{2\sigma}{g(\rho_L-\rho_g)}}
Respectively, the following four correlations are for water, hydrocarbons,
cryogenic fluids, and refrigerants.
.. math::
h = 0.246\times 10^7 X1^{0.673} X4^{-1.58} X3^{1.26}X8^{5.22}k_L/d_B
h = 0.0546 X5^{0.335} X1^{0.67} X8^{-4.33} X4^{0.248}k_L/d_B
h = 4.82 X1^{0.624} X7^{0.117} X3^{0.374} X4^{-0.329}X5^{0.257} k_L/d_B
h = 207 X1^{0.745} X5^{0.581} X6^{0.533} k_L/d_B
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
Tsat : float
Saturation temperature at operating pressure [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
kw : float, optional
Thermal conductivity of wall (only for cryogenics) [W/m/K]
rhow : float, optional
Density of the wall (only for cryogenics) [kg/m^3]
Cpw : float, optional
Heat capacity of wall (only for cryogenics) [J/kg/K]
angle : float, optional
Contact angle of bubble with wall [degrees]
correlation : str, optional
Any of 'general', 'water', 'hydrocarbon', 'cryogenic', or 'refrigerant'
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
If cryogenic correlation is selected, metal properties are used. Default
values are the properties of copper at STP.
The angle is selected automatically if a correlation is selected; if angle
is provided anyway, the automatic selection is ignored. A IndexError
exception is raised if the correlation is not in the dictionary
_angles_Stephan_Abdelsalam.
Examples
--------
Example is from [3]_ and matches.
>>> Stephan_Abdelsalam(Te=16.2, Tsat=437.5, Cpl=2730., kl=0.086, mul=156E-6,
... sigma=0.0082, Hvap=272E3, rhol=567, rhog=18.09, angle=35)
26722.441071108373
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] Stephan, K., and M. Abdelsalam. "Heat-Transfer Correlations for
Natural Convection Boiling." International Journal of Heat and Mass
Transfer 23, no. 1 (January 1980): 73-87.
doi:10.1016/0017-9310(80)90140-4.
.. [3] Serth, R. W., Process Heat Transfer: Principles,
Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
'''
if Te is None and q is None:
raise Exception('Either q or Te is needed for this correlation')
angle = _angles_Stephan_Abdelsalam[correlation]
db = 0.0146*angle*(2*sigma/g/(rhol-rhog))**0.5
diffusivity_L = kl/rhol/Cpl
if Te:
X1 = db/kl/Tsat*Te
else:
X1 = db/kl/Tsat*q
X2 = diffusivity_L**2*rhol/sigma/db
X3 = Hvap*db**2/diffusivity_L**2
X4 = Hvap*db**2/diffusivity_L**2
X5 = rhog/rhol
X6 = Cpl*mul/kl
X7 = rhow*Cpw*kw/(rhol*Cpl*kl)
X8 = (rhol-rhog)/rhol
if correlation == 'general':
if Te:
h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db)**(1/0.326)
else:
h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db)
elif correlation == 'water':
if Te:
h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db)**(1/0.327)
else:
h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db)
elif correlation == 'hydrocarbon':
if Te:
h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db)**(1/0.33)
else:
h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db)
elif correlation == 'cryogenic':
if Te:
h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db)**(1/0.376)
else:
h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db)
else:
if Te:
h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db)**(1/0.255)
else:
h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db)
return h | r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Five variants are possible.
Either heat flux or excess temperature is required. The forms for `Te` are
not shown here, but are similar to those of the other functions.
.. math::
h = 0.23X_1^{0.674} X_2^{0.35} X_3^{0.371} X_5^{0.297} X_8^{-1.73} k_L/d_B
X1 = \frac{q D_d}{K_L T_{sat}}
X2 = \frac{\alpha^2 \rho_L}{\sigma D_d}
X3 = \frac{C_{p,L} T_{sat} D_d^2}{\alpha^2}
X4 = \frac{H_{vap} D_d^2}{\alpha^2}
X5 = \frac{\rho_V}{\rho_L}
X6 = \frac{C_{p,l} \mu_L}{k_L}
X7 = \frac{\rho_W C_{p,W} k_W}{\rho_L C_{p,L} k_L}
X8 = \frac{\rho_L-\rho_V}{\rho_L}
D_b = 0.0146\theta\sqrt{\frac{2\sigma}{g(\rho_L-\rho_g)}}
Respectively, the following four correlations are for water, hydrocarbons,
cryogenic fluids, and refrigerants.
.. math::
h = 0.246\times 10^7 X1^{0.673} X4^{-1.58} X3^{1.26}X8^{5.22}k_L/d_B
h = 0.0546 X5^{0.335} X1^{0.67} X8^{-4.33} X4^{0.248}k_L/d_B
h = 4.82 X1^{0.624} X7^{0.117} X3^{0.374} X4^{-0.329}X5^{0.257} k_L/d_B
h = 207 X1^{0.745} X5^{0.581} X6^{0.533} k_L/d_B
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
Tsat : float
Saturation temperature at operating pressure [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
kw : float, optional
Thermal conductivity of wall (only for cryogenics) [W/m/K]
rhow : float, optional
Density of the wall (only for cryogenics) [kg/m^3]
Cpw : float, optional
Heat capacity of wall (only for cryogenics) [J/kg/K]
angle : float, optional
Contact angle of bubble with wall [degrees]
correlation : str, optional
Any of 'general', 'water', 'hydrocarbon', 'cryogenic', or 'refrigerant'
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
If cryogenic correlation is selected, metal properties are used. Default
values are the properties of copper at STP.
The angle is selected automatically if a correlation is selected; if angle
is provided anyway, the automatic selection is ignored. A IndexError
exception is raised if the correlation is not in the dictionary
_angles_Stephan_Abdelsalam.
Examples
--------
Example is from [3]_ and matches.
>>> Stephan_Abdelsalam(Te=16.2, Tsat=437.5, Cpl=2730., kl=0.086, mul=156E-6,
... sigma=0.0082, Hvap=272E3, rhol=567, rhog=18.09, angle=35)
26722.441071108373
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] Stephan, K., and M. Abdelsalam. "Heat-Transfer Correlations for
Natural Convection Boiling." International Journal of Heat and Mass
Transfer 23, no. 1 (January 1980): 73-87.
doi:10.1016/0017-9310(80)90140-4.
.. [3] Serth, R. W., Process Heat Transfer: Principles,
Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014. | Below is the the instruction that describes the task:
### Input:
r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Five variants are possible.
Either heat flux or excess temperature is required. The forms for `Te` are
not shown here, but are similar to those of the other functions.
.. math::
h = 0.23X_1^{0.674} X_2^{0.35} X_3^{0.371} X_5^{0.297} X_8^{-1.73} k_L/d_B
X1 = \frac{q D_d}{K_L T_{sat}}
X2 = \frac{\alpha^2 \rho_L}{\sigma D_d}
X3 = \frac{C_{p,L} T_{sat} D_d^2}{\alpha^2}
X4 = \frac{H_{vap} D_d^2}{\alpha^2}
X5 = \frac{\rho_V}{\rho_L}
X6 = \frac{C_{p,l} \mu_L}{k_L}
X7 = \frac{\rho_W C_{p,W} k_W}{\rho_L C_{p,L} k_L}
X8 = \frac{\rho_L-\rho_V}{\rho_L}
D_b = 0.0146\theta\sqrt{\frac{2\sigma}{g(\rho_L-\rho_g)}}
Respectively, the following four correlations are for water, hydrocarbons,
cryogenic fluids, and refrigerants.
.. math::
h = 0.246\times 10^7 X1^{0.673} X4^{-1.58} X3^{1.26}X8^{5.22}k_L/d_B
h = 0.0546 X5^{0.335} X1^{0.67} X8^{-4.33} X4^{0.248}k_L/d_B
h = 4.82 X1^{0.624} X7^{0.117} X3^{0.374} X4^{-0.329}X5^{0.257} k_L/d_B
h = 207 X1^{0.745} X5^{0.581} X6^{0.533} k_L/d_B
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
Tsat : float
Saturation temperature at operating pressure [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
kw : float, optional
Thermal conductivity of wall (only for cryogenics) [W/m/K]
rhow : float, optional
Density of the wall (only for cryogenics) [kg/m^3]
Cpw : float, optional
Heat capacity of wall (only for cryogenics) [J/kg/K]
angle : float, optional
Contact angle of bubble with wall [degrees]
correlation : str, optional
Any of 'general', 'water', 'hydrocarbon', 'cryogenic', or 'refrigerant'
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
If cryogenic correlation is selected, metal properties are used. Default
values are the properties of copper at STP.
The angle is selected automatically if a correlation is selected; if angle
is provided anyway, the automatic selection is ignored. A IndexError
exception is raised if the correlation is not in the dictionary
_angles_Stephan_Abdelsalam.
Examples
--------
Example is from [3]_ and matches.
>>> Stephan_Abdelsalam(Te=16.2, Tsat=437.5, Cpl=2730., kl=0.086, mul=156E-6,
... sigma=0.0082, Hvap=272E3, rhol=567, rhog=18.09, angle=35)
26722.441071108373
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] Stephan, K., and M. Abdelsalam. "Heat-Transfer Correlations for
Natural Convection Boiling." International Journal of Heat and Mass
Transfer 23, no. 1 (January 1980): 73-87.
doi:10.1016/0017-9310(80)90140-4.
.. [3] Serth, R. W., Process Heat Transfer: Principles,
Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
### Response:
def Stephan_Abdelsalam(rhol, rhog, mul, kl, Cpl, Hvap, sigma, Tsat, Te=None,
q=None, kw=401, rhow=8.96, Cpw=384, angle=None,
correlation='general'):
r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Five variants are possible.
Either heat flux or excess temperature is required. The forms for `Te` are
not shown here, but are similar to those of the other functions.
.. math::
h = 0.23X_1^{0.674} X_2^{0.35} X_3^{0.371} X_5^{0.297} X_8^{-1.73} k_L/d_B
X1 = \frac{q D_d}{K_L T_{sat}}
X2 = \frac{\alpha^2 \rho_L}{\sigma D_d}
X3 = \frac{C_{p,L} T_{sat} D_d^2}{\alpha^2}
X4 = \frac{H_{vap} D_d^2}{\alpha^2}
X5 = \frac{\rho_V}{\rho_L}
X6 = \frac{C_{p,l} \mu_L}{k_L}
X7 = \frac{\rho_W C_{p,W} k_W}{\rho_L C_{p,L} k_L}
X8 = \frac{\rho_L-\rho_V}{\rho_L}
D_b = 0.0146\theta\sqrt{\frac{2\sigma}{g(\rho_L-\rho_g)}}
Respectively, the following four correlations are for water, hydrocarbons,
cryogenic fluids, and refrigerants.
.. math::
h = 0.246\times 10^7 X1^{0.673} X4^{-1.58} X3^{1.26}X8^{5.22}k_L/d_B
h = 0.0546 X5^{0.335} X1^{0.67} X8^{-4.33} X4^{0.248}k_L/d_B
h = 4.82 X1^{0.624} X7^{0.117} X3^{0.374} X4^{-0.329}X5^{0.257} k_L/d_B
h = 207 X1^{0.745} X5^{0.581} X6^{0.533} k_L/d_B
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
Tsat : float
Saturation temperature at operating pressure [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
kw : float, optional
Thermal conductivity of wall (only for cryogenics) [W/m/K]
rhow : float, optional
Density of the wall (only for cryogenics) [kg/m^3]
Cpw : float, optional
Heat capacity of wall (only for cryogenics) [J/kg/K]
angle : float, optional
Contact angle of bubble with wall [degrees]
correlation : str, optional
Any of 'general', 'water', 'hydrocarbon', 'cryogenic', or 'refrigerant'
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
If cryogenic correlation is selected, metal properties are used. Default
values are the properties of copper at STP.
The angle is selected automatically if a correlation is selected; if angle
is provided anyway, the automatic selection is ignored. A IndexError
exception is raised if the correlation is not in the dictionary
_angles_Stephan_Abdelsalam.
Examples
--------
Example is from [3]_ and matches.
>>> Stephan_Abdelsalam(Te=16.2, Tsat=437.5, Cpl=2730., kl=0.086, mul=156E-6,
... sigma=0.0082, Hvap=272E3, rhol=567, rhog=18.09, angle=35)
26722.441071108373
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] Stephan, K., and M. Abdelsalam. "Heat-Transfer Correlations for
Natural Convection Boiling." International Journal of Heat and Mass
Transfer 23, no. 1 (January 1980): 73-87.
doi:10.1016/0017-9310(80)90140-4.
.. [3] Serth, R. W., Process Heat Transfer: Principles,
Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014.
'''
if Te is None and q is None:
raise Exception('Either q or Te is needed for this correlation')
angle = _angles_Stephan_Abdelsalam[correlation]
db = 0.0146*angle*(2*sigma/g/(rhol-rhog))**0.5
diffusivity_L = kl/rhol/Cpl
if Te:
X1 = db/kl/Tsat*Te
else:
X1 = db/kl/Tsat*q
X2 = diffusivity_L**2*rhol/sigma/db
X3 = Hvap*db**2/diffusivity_L**2
X4 = Hvap*db**2/diffusivity_L**2
X5 = rhog/rhol
X6 = Cpl*mul/kl
X7 = rhow*Cpw*kw/(rhol*Cpl*kl)
X8 = (rhol-rhog)/rhol
if correlation == 'general':
if Te:
h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db)**(1/0.326)
else:
h = (0.23*X1**0.674*X2**0.35*X3**0.371*X5**0.297*X8**-1.73*kl/db)
elif correlation == 'water':
if Te:
h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db)**(1/0.327)
else:
h = (0.246E7*X1**0.673*X4**-1.58*X3**1.26*X8**5.22*kl/db)
elif correlation == 'hydrocarbon':
if Te:
h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db)**(1/0.33)
else:
h = (0.0546*X5**0.335*X1**0.67*X8**-4.33*X4**0.248*kl/db)
elif correlation == 'cryogenic':
if Te:
h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db)**(1/0.376)
else:
h = (4.82*X1**0.624*X7**0.117*X3**0.374*X4**-0.329*X5**0.257*kl/db)
else:
if Te:
h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db)**(1/0.255)
else:
h = (207*X1**0.745*X5**0.581*X6**0.533*kl/db)
return h |
def get_template_names(self):
"""Returns the template name to use for this request."""
if self.request.is_ajax():
template = self.ajax_template_name
else:
template = self.template_name
return template | Returns the template name to use for this request. | Below is the the instruction that describes the task:
### Input:
Returns the template name to use for this request.
### Response:
def get_template_names(self):
"""Returns the template name to use for this request."""
if self.request.is_ajax():
template = self.ajax_template_name
else:
template = self.template_name
return template |
def rhsm_register(self, rhsm):
"""Register the host on the RHSM.
:param rhsm: a dict of parameters (login, password, pool_id)
"""
# Get rhsm credentials
login = rhsm.get('login')
password = rhsm.get('password', os.environ.get('RHN_PW'))
pool_id = rhsm.get('pool_id')
# Ensure the RHEL beta channel are disabled
self.run('rm /etc/pki/product/69.pem', ignore_error=True)
custom_log = 'subscription-manager register --username %s --password *******' % login
self.run(
'subscription-manager register --username %s --password "%s"' % (
login, password),
success_status=(0, 64),
custom_log=custom_log,
retry=3)
if pool_id:
self.run('subscription-manager attach --pool %s' % pool_id)
else:
self.run('subscription-manager attach --auto')
self.rhsm_active = True | Register the host on the RHSM.
:param rhsm: a dict of parameters (login, password, pool_id) | Below is the the instruction that describes the task:
### Input:
Register the host on the RHSM.
:param rhsm: a dict of parameters (login, password, pool_id)
### Response:
def rhsm_register(self, rhsm):
"""Register the host on the RHSM.
:param rhsm: a dict of parameters (login, password, pool_id)
"""
# Get rhsm credentials
login = rhsm.get('login')
password = rhsm.get('password', os.environ.get('RHN_PW'))
pool_id = rhsm.get('pool_id')
# Ensure the RHEL beta channel are disabled
self.run('rm /etc/pki/product/69.pem', ignore_error=True)
custom_log = 'subscription-manager register --username %s --password *******' % login
self.run(
'subscription-manager register --username %s --password "%s"' % (
login, password),
success_status=(0, 64),
custom_log=custom_log,
retry=3)
if pool_id:
self.run('subscription-manager attach --pool %s' % pool_id)
else:
self.run('subscription-manager attach --auto')
self.rhsm_active = True |
def diff(s1, s2):
''' --word-diff=porcelain clone'''
delta = difflib.Differ().compare(s1.split(), s2.split())
difflist = []
fullline = ''
for line in delta:
if line[0] == '?':
continue
elif line[0] == ' ':
fullline += line.strip() + ' '
else:
if fullline:
difflist.append(fullline[:-1])
fullline = ''
difflist.append(line)
if fullline:
difflist.append(fullline[:-1])
return [l[:] for l in '\n'.join(difflist).splitlines() if l] | --word-diff=porcelain clone | Below is the the instruction that describes the task:
### Input:
--word-diff=porcelain clone
### Response:
def diff(s1, s2):
''' --word-diff=porcelain clone'''
delta = difflib.Differ().compare(s1.split(), s2.split())
difflist = []
fullline = ''
for line in delta:
if line[0] == '?':
continue
elif line[0] == ' ':
fullline += line.strip() + ' '
else:
if fullline:
difflist.append(fullline[:-1])
fullline = ''
difflist.append(line)
if fullline:
difflist.append(fullline[:-1])
return [l[:] for l in '\n'.join(difflist).splitlines() if l] |
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
):
"""Sorts by a column/row or list of columns/rows.
Args:
by: A list of labels for the axis to sort over.
axis: The axis to sort.
ascending: Sort in ascending or descending order.
inplace: If true, do the operation inplace.
kind: How to sort.
na_position: Where to put np.nan values.
Returns:
A sorted DataFrame.
"""
axis = self._get_axis_number(axis)
if not is_list_like(by):
by = [by]
# Currently, sort_values will just reindex based on the sorted values.
# TODO create a more efficient way to sort
if axis == 0:
broadcast_value_dict = {col: self[col] for col in by}
broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index)
new_index = broadcast_values.sort_values(
by=by,
axis=axis,
ascending=ascending,
kind=kind,
na_position=na_position,
).index
return self.reindex(index=new_index, copy=not inplace)
else:
broadcast_value_list = [
self[row :: len(self.index)]._to_pandas() for row in by
]
index_builder = list(zip(broadcast_value_list, by))
broadcast_values = pandas.concat(
[row for row, idx in index_builder], copy=False
)
broadcast_values.columns = self.columns
new_columns = broadcast_values.sort_values(
by=by,
axis=axis,
ascending=ascending,
kind=kind,
na_position=na_position,
).columns
return self.reindex(columns=new_columns, copy=not inplace) | Sorts by a column/row or list of columns/rows.
Args:
by: A list of labels for the axis to sort over.
axis: The axis to sort.
ascending: Sort in ascending or descending order.
inplace: If true, do the operation inplace.
kind: How to sort.
na_position: Where to put np.nan values.
Returns:
A sorted DataFrame. | Below is the the instruction that describes the task:
### Input:
Sorts by a column/row or list of columns/rows.
Args:
by: A list of labels for the axis to sort over.
axis: The axis to sort.
ascending: Sort in ascending or descending order.
inplace: If true, do the operation inplace.
kind: How to sort.
na_position: Where to put np.nan values.
Returns:
A sorted DataFrame.
### Response:
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
):
"""Sorts by a column/row or list of columns/rows.
Args:
by: A list of labels for the axis to sort over.
axis: The axis to sort.
ascending: Sort in ascending or descending order.
inplace: If true, do the operation inplace.
kind: How to sort.
na_position: Where to put np.nan values.
Returns:
A sorted DataFrame.
"""
axis = self._get_axis_number(axis)
if not is_list_like(by):
by = [by]
# Currently, sort_values will just reindex based on the sorted values.
# TODO create a more efficient way to sort
if axis == 0:
broadcast_value_dict = {col: self[col] for col in by}
broadcast_values = pandas.DataFrame(broadcast_value_dict, index=self.index)
new_index = broadcast_values.sort_values(
by=by,
axis=axis,
ascending=ascending,
kind=kind,
na_position=na_position,
).index
return self.reindex(index=new_index, copy=not inplace)
else:
broadcast_value_list = [
self[row :: len(self.index)]._to_pandas() for row in by
]
index_builder = list(zip(broadcast_value_list, by))
broadcast_values = pandas.concat(
[row for row, idx in index_builder], copy=False
)
broadcast_values.columns = self.columns
new_columns = broadcast_values.sort_values(
by=by,
axis=axis,
ascending=ascending,
kind=kind,
na_position=na_position,
).columns
return self.reindex(columns=new_columns, copy=not inplace) |
def get_json(self, prettyprint=False, translate=True):
"""
Get the data in JSON form
"""
j = []
if translate:
d = self.get_translated_data()
else:
d = self.data
for k in d:
j.append(d[k])
if prettyprint:
j = json.dumps(j, indent=2, separators=(',',': '))
else:
j = json.dumps(j)
return j | Get the data in JSON form | Below is the the instruction that describes the task:
### Input:
Get the data in JSON form
### Response:
def get_json(self, prettyprint=False, translate=True):
"""
Get the data in JSON form
"""
j = []
if translate:
d = self.get_translated_data()
else:
d = self.data
for k in d:
j.append(d[k])
if prettyprint:
j = json.dumps(j, indent=2, separators=(',',': '))
else:
j = json.dumps(j)
return j |
def MakeRequest(http, http_request, retries=7, max_retry_wait=60,
redirections=5,
retry_func=HandleExceptionsAndRebuildHttpConnections,
check_response_func=CheckResponse):
"""Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object.
"""
retry = 0
first_req_time = time.time()
while True:
try:
return _MakeRequestNoRetry(
http, http_request, redirections=redirections,
check_response_func=check_response_func)
# retry_func will consume the exception types it handles and raise.
# pylint: disable=broad-except
except Exception as e:
retry += 1
if retry >= retries:
raise
else:
total_wait_sec = time.time() - first_req_time
retry_func(ExceptionRetryArgs(http, http_request, e, retry,
max_retry_wait, total_wait_sec)) | Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object. | Below is the the instruction that describes the task:
### Input:
Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object.
### Response:
def MakeRequest(http, http_request, retries=7, max_retry_wait=60,
redirections=5,
retry_func=HandleExceptionsAndRebuildHttpConnections,
check_response_func=CheckResponse):
"""Send http_request via the given http, performing error/retry handling.
Args:
http: An httplib2.Http instance, or a http multiplexer that delegates to
an underlying http, for example, HTTPMultiplexer.
http_request: A Request to send.
retries: (int, default 7) Number of retries to attempt on retryable
replies (such as 429 or 5XX).
max_retry_wait: (int, default 60) Maximum number of seconds to wait
when retrying.
redirections: (int, default 5) Number of redirects to follow.
retry_func: Function to handle retries on exceptions. Argument is an
ExceptionRetryArgs tuple.
check_response_func: Function to validate the HTTP response.
Arguments are (Response, response content, url).
Raises:
InvalidDataFromServerError: if there is no response after retries.
Returns:
A Response object.
"""
retry = 0
first_req_time = time.time()
while True:
try:
return _MakeRequestNoRetry(
http, http_request, redirections=redirections,
check_response_func=check_response_func)
# retry_func will consume the exception types it handles and raise.
# pylint: disable=broad-except
except Exception as e:
retry += 1
if retry >= retries:
raise
else:
total_wait_sec = time.time() - first_req_time
retry_func(ExceptionRetryArgs(http, http_request, e, retry,
max_retry_wait, total_wait_sec)) |
def makeAB(self):
""" Munge A and B reads into single serial block with only unique fields."""
for fld in self.m_blk_a:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_req[fld] = self.m_blk_a[fld]
for fld in self.m_blk_b:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_req[fld] = self.m_blk_b[fld]
pass | Munge A and B reads into single serial block with only unique fields. | Below is the the instruction that describes the task:
### Input:
Munge A and B reads into single serial block with only unique fields.
### Response:
def makeAB(self):
""" Munge A and B reads into single serial block with only unique fields."""
for fld in self.m_blk_a:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_req[fld] = self.m_blk_a[fld]
for fld in self.m_blk_b:
compare_fld = fld.upper()
if not "RESERVED" in compare_fld and not "CRC" in compare_fld:
self.m_req[fld] = self.m_blk_b[fld]
pass |
def _get_album_or_image(json, imgur):
"""Return a gallery image/album depending on what the json represent."""
if json['is_album']:
return Gallery_album(json, imgur, has_fetched=False)
return Gallery_image(json, imgur) | Return a gallery image/album depending on what the json represent. | Below is the the instruction that describes the task:
### Input:
Return a gallery image/album depending on what the json represent.
### Response:
def _get_album_or_image(json, imgur):
"""Return a gallery image/album depending on what the json represent."""
if json['is_album']:
return Gallery_album(json, imgur, has_fetched=False)
return Gallery_image(json, imgur) |
def send_at_position(self, what, useSelection, where="range"):
"""Ask the server to perform an operation on a range (sometimes named point)
`what` is used as the prefix for the typehint.
If `useSelection` is `False` the range is calculated based on the word under de
cursor. Current selection start and end is used as the range otherwise.
`where` defines the name of the property holding the range info within the request.
Default value is 'range' but 'point' is sometimes used
"""
self.log.debug('send_at_position: in')
b, e = self.editor.selection_pos() if useSelection else self.editor.word_under_cursor_pos()
self.log.debug('useSelection: {}, beg: {}, end: {}'.format(useSelection, b, e))
beg = self.get_position(b[0], b[1])
end = self.get_position(e[0], e[1])
self.send_request(
{"typehint": what + "AtPointReq",
"file": self.editor.path(),
where: {"from": beg, "to": end}}) | Ask the server to perform an operation on a range (sometimes named point)
`what` is used as the prefix for the typehint.
If `useSelection` is `False` the range is calculated based on the word under de
cursor. Current selection start and end is used as the range otherwise.
`where` defines the name of the property holding the range info within the request.
Default value is 'range' but 'point' is sometimes used | Below is the the instruction that describes the task:
### Input:
Ask the server to perform an operation on a range (sometimes named point)
`what` is used as the prefix for the typehint.
If `useSelection` is `False` the range is calculated based on the word under de
cursor. Current selection start and end is used as the range otherwise.
`where` defines the name of the property holding the range info within the request.
Default value is 'range' but 'point' is sometimes used
### Response:
def send_at_position(self, what, useSelection, where="range"):
"""Ask the server to perform an operation on a range (sometimes named point)
`what` is used as the prefix for the typehint.
If `useSelection` is `False` the range is calculated based on the word under de
cursor. Current selection start and end is used as the range otherwise.
`where` defines the name of the property holding the range info within the request.
Default value is 'range' but 'point' is sometimes used
"""
self.log.debug('send_at_position: in')
b, e = self.editor.selection_pos() if useSelection else self.editor.word_under_cursor_pos()
self.log.debug('useSelection: {}, beg: {}, end: {}'.format(useSelection, b, e))
beg = self.get_position(b[0], b[1])
end = self.get_position(e[0], e[1])
self.send_request(
{"typehint": what + "AtPointReq",
"file": self.editor.path(),
where: {"from": beg, "to": end}}) |
def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None,
hms=True, precision=6,output=None,verbose=True):
""" Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x, np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for input image
inwcs = wcsutil.HSTWCS(input)
if inwcs.wcs.is_unity():
print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input))
# Now, convert pixel coordinates into sky coordinates
dra,ddec = inwcs.all_pix2world(xlist,ylist,1)
# convert to HH:MM:SS.S format, if specified
if hms:
ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision)
rastr = ra
decstr = dec
else:
# add formatting based on precision here...
rastr = []
decstr = []
fmt = "%."+repr(precision)+"f"
for r,d in zip(dra,ddec):
rastr.append(fmt%r)
decstr.append(fmt%d)
ra = dra
dec = ddec
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',input)
print('# X Y RA Dec\n')
for x,y,r,d in zip(xlist,ylist,rastr,decstr):
print("%.4f %.4f %s %s"%(x,y,r,d))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%input)
for r,d in zip(rastr,decstr):
f.write('%s %s\n'%(r,d))
f.close()
print('Wrote out results to: ',output)
if single_coord:
ra = ra[0]
dec = dec[0]
return ra,dec | Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header. | Below is the the instruction that describes the task:
### Input:
Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
### Response:
def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None,
hms=True, precision=6,output=None,verbose=True):
""" Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x, np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for input image
inwcs = wcsutil.HSTWCS(input)
if inwcs.wcs.is_unity():
print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input))
# Now, convert pixel coordinates into sky coordinates
dra,ddec = inwcs.all_pix2world(xlist,ylist,1)
# convert to HH:MM:SS.S format, if specified
if hms:
ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision)
rastr = ra
decstr = dec
else:
# add formatting based on precision here...
rastr = []
decstr = []
fmt = "%."+repr(precision)+"f"
for r,d in zip(dra,ddec):
rastr.append(fmt%r)
decstr.append(fmt%d)
ra = dra
dec = ddec
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',input)
print('# X Y RA Dec\n')
for x,y,r,d in zip(xlist,ylist,rastr,decstr):
print("%.4f %.4f %s %s"%(x,y,r,d))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%input)
for r,d in zip(rastr,decstr):
f.write('%s %s\n'%(r,d))
f.close()
print('Wrote out results to: ',output)
if single_coord:
ra = ra[0]
dec = dec[0]
return ra,dec |
def libvlc_media_add_option(p_md, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{libvlc_new}() instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
'''
f = _Cfunctions.get('libvlc_media_add_option', None) or \
_Cfunction('libvlc_media_add_option', ((1,), (1,),), None,
None, Media, ctypes.c_char_p)
return f(p_md, psz_options) | Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{libvlc_new}() instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string). | Below is the the instruction that describes the task:
### Input:
Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{libvlc_new}() instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
### Response:
def libvlc_media_add_option(p_md, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{libvlc_new}() instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
'''
f = _Cfunctions.get('libvlc_media_add_option', None) or \
_Cfunction('libvlc_media_add_option', ((1,), (1,),), None,
None, Media, ctypes.c_char_p)
return f(p_md, psz_options) |
def rename(self):
"""Renames media file to formatted name.
After parsing data from initial media filename and searching
for additional data to using a data service, a formatted
filename will be generated and the media file will be renamed
to the generated name and optionally relocated.
"""
renamer.execute(self.original, self.out_location)
if cfg.CONF.move_files_enabled:
LOG.debug('relocated: %s', self)
else:
LOG.debug('renamed: %s', self) | Renames media file to formatted name.
After parsing data from initial media filename and searching
for additional data to using a data service, a formatted
filename will be generated and the media file will be renamed
to the generated name and optionally relocated. | Below is the the instruction that describes the task:
### Input:
Renames media file to formatted name.
After parsing data from initial media filename and searching
for additional data to using a data service, a formatted
filename will be generated and the media file will be renamed
to the generated name and optionally relocated.
### Response:
def rename(self):
"""Renames media file to formatted name.
After parsing data from initial media filename and searching
for additional data to using a data service, a formatted
filename will be generated and the media file will be renamed
to the generated name and optionally relocated.
"""
renamer.execute(self.original, self.out_location)
if cfg.CONF.move_files_enabled:
LOG.debug('relocated: %s', self)
else:
LOG.debug('renamed: %s', self) |
def getHourTable(date, pos):
""" Returns an HourTable object. """
table = hourTable(date, pos)
return HourTable(table, date) | Returns an HourTable object. | Below is the the instruction that describes the task:
### Input:
Returns an HourTable object.
### Response:
def getHourTable(date, pos):
""" Returns an HourTable object. """
table = hourTable(date, pos)
return HourTable(table, date) |
def fetch_list_members(list_url):
""" Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name) | Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education | Below is the the instruction that describes the task:
### Input:
Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education
### Response:
def fetch_list_members(list_url):
""" Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """
match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url)
if not match:
print('cannot parse list url %s' % list_url)
return []
screen_name, slug = match.groups()
print('collecting list %s/%s' % (screen_name, slug))
return twutil.collect.list_members(slug, screen_name) |
def get_output(self, buildroot_id):
"""
Build the 'output' section of the metadata.
:return: list, Output instances
"""
def add_buildroot_id(output):
logfile, metadata = output
metadata.update({'buildroot_id': buildroot_id})
return Output(file=logfile, metadata=metadata)
def add_log_type(output, arch):
logfile, metadata = output
metadata.update({'type': 'log', 'arch': arch})
return Output(file=logfile, metadata=metadata)
arch = os.uname()[4]
output_files = [add_log_type(add_buildroot_id(metadata), arch)
for metadata in self.get_logs()]
# Parent of squashed built image is base image
image_id = self.workflow.builder.image_id
parent_id = None
if not self.workflow.builder.base_from_scratch:
parent_id = self.workflow.builder.base_image_inspect['Id']
# Read config from the registry using v2 schema 2 digest
registries = self.workflow.push_conf.docker_registries
if registries:
config = copy.deepcopy(registries[0].config)
else:
config = {}
# We don't need container_config section
if config and 'container_config' in config:
del config['container_config']
repositories, typed_digests = self.get_repositories_and_digests()
tags = set(image.tag for image in self.workflow.tag_conf.images)
metadata, output = self.get_image_output()
metadata.update({
'arch': arch,
'type': 'docker-image',
'components': self.get_image_components(),
'extra': {
'image': {
'arch': arch,
},
'docker': {
'id': image_id,
'parent_id': parent_id,
'repositories': repositories,
'layer_sizes': self.workflow.layer_sizes,
'tags': list(tags),
'config': config,
'digests': typed_digests
},
},
})
if self.workflow.builder.base_from_scratch:
del metadata['extra']['docker']['parent_id']
if not config:
del metadata['extra']['docker']['config']
if not typed_digests:
del metadata['extra']['docker']['digests']
# Add the 'docker save' image to the output
image = add_buildroot_id(output)
output_files.append(image)
# add operator manifests to output
operator_manifests_path = (self.workflow.postbuild_results
.get(PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY))
if operator_manifests_path:
operator_manifests_file = open(operator_manifests_path)
manifests_metadata = self.get_output_metadata(operator_manifests_path,
OPERATOR_MANIFESTS_ARCHIVE)
operator_manifests_output = Output(file=operator_manifests_file,
metadata=manifests_metadata)
# We use log type here until a more appropriate type name is supported by koji
operator_manifests_output.metadata.update({'arch': arch, 'type': 'log'})
operator_manifests = add_buildroot_id(operator_manifests_output)
output_files.append(operator_manifests)
return output_files | Build the 'output' section of the metadata.
:return: list, Output instances | Below is the the instruction that describes the task:
### Input:
Build the 'output' section of the metadata.
:return: list, Output instances
### Response:
def get_output(self, buildroot_id):
"""
Build the 'output' section of the metadata.
:return: list, Output instances
"""
def add_buildroot_id(output):
logfile, metadata = output
metadata.update({'buildroot_id': buildroot_id})
return Output(file=logfile, metadata=metadata)
def add_log_type(output, arch):
logfile, metadata = output
metadata.update({'type': 'log', 'arch': arch})
return Output(file=logfile, metadata=metadata)
arch = os.uname()[4]
output_files = [add_log_type(add_buildroot_id(metadata), arch)
for metadata in self.get_logs()]
# Parent of squashed built image is base image
image_id = self.workflow.builder.image_id
parent_id = None
if not self.workflow.builder.base_from_scratch:
parent_id = self.workflow.builder.base_image_inspect['Id']
# Read config from the registry using v2 schema 2 digest
registries = self.workflow.push_conf.docker_registries
if registries:
config = copy.deepcopy(registries[0].config)
else:
config = {}
# We don't need container_config section
if config and 'container_config' in config:
del config['container_config']
repositories, typed_digests = self.get_repositories_and_digests()
tags = set(image.tag for image in self.workflow.tag_conf.images)
metadata, output = self.get_image_output()
metadata.update({
'arch': arch,
'type': 'docker-image',
'components': self.get_image_components(),
'extra': {
'image': {
'arch': arch,
},
'docker': {
'id': image_id,
'parent_id': parent_id,
'repositories': repositories,
'layer_sizes': self.workflow.layer_sizes,
'tags': list(tags),
'config': config,
'digests': typed_digests
},
},
})
if self.workflow.builder.base_from_scratch:
del metadata['extra']['docker']['parent_id']
if not config:
del metadata['extra']['docker']['config']
if not typed_digests:
del metadata['extra']['docker']['digests']
# Add the 'docker save' image to the output
image = add_buildroot_id(output)
output_files.append(image)
# add operator manifests to output
operator_manifests_path = (self.workflow.postbuild_results
.get(PLUGIN_EXPORT_OPERATOR_MANIFESTS_KEY))
if operator_manifests_path:
operator_manifests_file = open(operator_manifests_path)
manifests_metadata = self.get_output_metadata(operator_manifests_path,
OPERATOR_MANIFESTS_ARCHIVE)
operator_manifests_output = Output(file=operator_manifests_file,
metadata=manifests_metadata)
# We use log type here until a more appropriate type name is supported by koji
operator_manifests_output.metadata.update({'arch': arch, 'type': 'log'})
operator_manifests = add_buildroot_id(operator_manifests_output)
output_files.append(operator_manifests)
return output_files |
def _convert_to_indexer(self, obj, axis=None, is_setter=False,
raise_missing=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
kwargs = {'raise_missing': True if is_setter else
raise_missing}
return self._get_listlike_indexer(obj, axis, **kwargs)[1]
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise | Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing | Below is the the instruction that describes the task:
### Input:
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
### Response:
def _convert_to_indexer(self, obj, axis=None, is_setter=False,
raise_missing=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
kwargs = {'raise_missing': True if is_setter else
raise_missing}
return self._get_listlike_indexer(obj, axis, **kwargs)[1]
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise |
def sign(self, tx=None, wifs=[]):
""" Sign a provided transaction witht he provided key(s)
:param dict tx: The transaction to be signed and returned
:param string wifs: One or many wif keys to use for signing
a transaction. If not present, the keys will be loaded
from the wallet as defined in "missing_signatures" key
of the transactions.
"""
if tx:
txbuffer = self.transactionbuilder_class(tx, blockchain_instance=self)
else:
txbuffer = self.txbuffer
txbuffer.appendWif(wifs)
txbuffer.appendMissingSignatures()
txbuffer.sign()
return txbuffer.json() | Sign a provided transaction witht he provided key(s)
:param dict tx: The transaction to be signed and returned
:param string wifs: One or many wif keys to use for signing
a transaction. If not present, the keys will be loaded
from the wallet as defined in "missing_signatures" key
of the transactions. | Below is the the instruction that describes the task:
### Input:
Sign a provided transaction witht he provided key(s)
:param dict tx: The transaction to be signed and returned
:param string wifs: One or many wif keys to use for signing
a transaction. If not present, the keys will be loaded
from the wallet as defined in "missing_signatures" key
of the transactions.
### Response:
def sign(self, tx=None, wifs=[]):
""" Sign a provided transaction witht he provided key(s)
:param dict tx: The transaction to be signed and returned
:param string wifs: One or many wif keys to use for signing
a transaction. If not present, the keys will be loaded
from the wallet as defined in "missing_signatures" key
of the transactions.
"""
if tx:
txbuffer = self.transactionbuilder_class(tx, blockchain_instance=self)
else:
txbuffer = self.txbuffer
txbuffer.appendWif(wifs)
txbuffer.appendMissingSignatures()
txbuffer.sign()
return txbuffer.json() |
def normalize(self, stats:Collection[Tensor]=None, do_x:bool=True, do_y:bool=False)->None:
"Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)"
if getattr(self,'norm',False): raise Exception('Can not call normalize twice')
if stats is None: self.stats = self.batch_stats()
else: self.stats = stats
self.norm,self.denorm = normalize_funcs(*self.stats, do_x=do_x, do_y=do_y)
self.add_tfm(self.norm)
return self | Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`) | Below is the the instruction that describes the task:
### Input:
Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)
### Response:
def normalize(self, stats:Collection[Tensor]=None, do_x:bool=True, do_y:bool=False)->None:
"Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)"
if getattr(self,'norm',False): raise Exception('Can not call normalize twice')
if stats is None: self.stats = self.batch_stats()
else: self.stats = stats
self.norm,self.denorm = normalize_funcs(*self.stats, do_x=do_x, do_y=do_y)
self.add_tfm(self.norm)
return self |
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
"""
parser = argparse.ArgumentParser(
description='Runs Preprocessing on structured CSV data.')
parser.add_argument('--input-file-pattern',
type=str,
required=True,
help='Input CSV file names. May contain a file pattern')
parser.add_argument('--output-dir',
type=str,
required=True,
help='Google Cloud Storage which to place outputs.')
parser.add_argument('--schema-file',
type=str,
required=True,
help=('BigQuery json schema file'))
args = parser.parse_args(args=argv[1:])
# Make sure the output folder exists if local folder.
file_io.recursive_create_dir(args.output_dir)
return args | Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object. | Below is the the instruction that describes the task:
### Input:
Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
### Response:
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
"""
parser = argparse.ArgumentParser(
description='Runs Preprocessing on structured CSV data.')
parser.add_argument('--input-file-pattern',
type=str,
required=True,
help='Input CSV file names. May contain a file pattern')
parser.add_argument('--output-dir',
type=str,
required=True,
help='Google Cloud Storage which to place outputs.')
parser.add_argument('--schema-file',
type=str,
required=True,
help=('BigQuery json schema file'))
args = parser.parse_args(args=argv[1:])
# Make sure the output folder exists if local folder.
file_io.recursive_create_dir(args.output_dir)
return args |
def init_app(self, app=None, blueprint=None, additional_blueprints=None):
"""Update flask application with our api
:param Application app: a flask application
"""
if app is not None:
self.app = app
if blueprint is not None:
self.blueprint = blueprint
for resource in self.resources:
self.route(resource['resource'],
resource['view'],
*resource['urls'],
url_rule_options=resource['url_rule_options'])
if self.blueprint is not None:
self.app.register_blueprint(self.blueprint)
if additional_blueprints is not None:
for blueprint in additional_blueprints:
self.app.register_blueprint(blueprint)
self.app.config.setdefault('PAGE_SIZE', 30) | Update flask application with our api
:param Application app: a flask application | Below is the the instruction that describes the task:
### Input:
Update flask application with our api
:param Application app: a flask application
### Response:
def init_app(self, app=None, blueprint=None, additional_blueprints=None):
"""Update flask application with our api
:param Application app: a flask application
"""
if app is not None:
self.app = app
if blueprint is not None:
self.blueprint = blueprint
for resource in self.resources:
self.route(resource['resource'],
resource['view'],
*resource['urls'],
url_rule_options=resource['url_rule_options'])
if self.blueprint is not None:
self.app.register_blueprint(self.blueprint)
if additional_blueprints is not None:
for blueprint in additional_blueprints:
self.app.register_blueprint(blueprint)
self.app.config.setdefault('PAGE_SIZE', 30) |
def _medianindex(self,v):
"""
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
"""
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]] | find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|) | Below is the the instruction that describes the task:
### Input:
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
### Response:
def _medianindex(self,v):
"""
find new position of vertex v according to adjacency in layer l+dir.
position is given by the median value of adjacent positions.
median heuristic is proven to achieve at most 3 times the minimum
of crossings (while barycenter achieve in theory the order of |V|)
"""
assert self.prevlayer()!=None
N = self._neighbors(v)
g=self.layout.grx
pos = [g[x].pos for x in N]
lp = len(pos)
if lp==0: return []
pos.sort()
pos = pos[::self.layout.dirh]
i,j = divmod(lp-1,2)
return [pos[i]] if j==0 else [pos[i],pos[i+j]] |
def transport_param(image):
""" Parse DockerImage info into skopeo parameter
:param image: DockerImage
:return: string. skopeo parameter specifying image
"""
transports = {SkopeoTransport.CONTAINERS_STORAGE: "containers-storage:",
SkopeoTransport.DIRECTORY: "dir:",
SkopeoTransport.DOCKER: "docker://",
SkopeoTransport.DOCKER_ARCHIVE: "docker-archive",
SkopeoTransport.DOCKER_DAEMON: "docker-daemon:",
SkopeoTransport.OCI: "oci:",
SkopeoTransport.OSTREE: "ostree:"}
transport = image.transport
tag = image.tag
repository = image.name
path = image.path
if not transport:
transport = SkopeoTransport.DOCKER
command = transports[transport]
path_required = [SkopeoTransport.DIRECTORY, SkopeoTransport.DOCKER_ARCHIVE, SkopeoTransport.OCI]
if transport in path_required and path is None:
raise ValueError(transports[transport] + " path is required to be specified")
if transport == SkopeoTransport.DIRECTORY:
return command + path
if transport == SkopeoTransport.DOCKER_ARCHIVE:
command += path
if repository is None:
return command
command += ":"
if transport in [SkopeoTransport.CONTAINERS_STORAGE, SkopeoTransport.DOCKER,
SkopeoTransport.DOCKER_ARCHIVE, transport.DOCKER_DAEMON]:
return command + repository + ":" + tag
if transport == SkopeoTransport.OCI:
return command + path + ":" + tag
if transport == SkopeoTransport.OSTREE:
return command + repository + ("@" + path if path else "")
raise ConuException("This transport is not supported") | Parse DockerImage info into skopeo parameter
:param image: DockerImage
:return: string. skopeo parameter specifying image | Below is the the instruction that describes the task:
### Input:
Parse DockerImage info into skopeo parameter
:param image: DockerImage
:return: string. skopeo parameter specifying image
### Response:
def transport_param(image):
""" Parse DockerImage info into skopeo parameter
:param image: DockerImage
:return: string. skopeo parameter specifying image
"""
transports = {SkopeoTransport.CONTAINERS_STORAGE: "containers-storage:",
SkopeoTransport.DIRECTORY: "dir:",
SkopeoTransport.DOCKER: "docker://",
SkopeoTransport.DOCKER_ARCHIVE: "docker-archive",
SkopeoTransport.DOCKER_DAEMON: "docker-daemon:",
SkopeoTransport.OCI: "oci:",
SkopeoTransport.OSTREE: "ostree:"}
transport = image.transport
tag = image.tag
repository = image.name
path = image.path
if not transport:
transport = SkopeoTransport.DOCKER
command = transports[transport]
path_required = [SkopeoTransport.DIRECTORY, SkopeoTransport.DOCKER_ARCHIVE, SkopeoTransport.OCI]
if transport in path_required and path is None:
raise ValueError(transports[transport] + " path is required to be specified")
if transport == SkopeoTransport.DIRECTORY:
return command + path
if transport == SkopeoTransport.DOCKER_ARCHIVE:
command += path
if repository is None:
return command
command += ":"
if transport in [SkopeoTransport.CONTAINERS_STORAGE, SkopeoTransport.DOCKER,
SkopeoTransport.DOCKER_ARCHIVE, transport.DOCKER_DAEMON]:
return command + repository + ":" + tag
if transport == SkopeoTransport.OCI:
return command + path + ":" + tag
if transport == SkopeoTransport.OSTREE:
return command + repository + ("@" + path if path else "")
raise ConuException("This transport is not supported") |
def addNewMainWindow(self, settings=None, inspectorFullName=None):
""" Creates and shows a new MainWindow.
If inspectorFullName is set, it will set the identifier from that name.
If the inspector identifier is not found in the registry, a KeyError is raised.
"""
mainWindow = MainWindow(self)
self.mainWindows.append(mainWindow)
self.windowActionGroup.addAction(mainWindow.activateWindowAction)
self.repopulateAllWindowMenus()
if settings:
mainWindow.readViewSettings(settings)
if inspectorFullName:
inspectorId = nameToIdentifier(inspectorFullName)
mainWindow.setInspectorById(inspectorId)
if mainWindow.inspectorRegItem: # can be None at start
inspectorId = mainWindow.inspectorRegItem.identifier
mainWindow.getInspectorActionById(inspectorId).setChecked(True)
logger.info("Created new window with inspector: {}"
.format(mainWindow.inspectorRegItem.fullName))
else:
logger.info("Created new window without inspector")
mainWindow.drawInspectorContents(reason=UpdateReason.NEW_MAIN_WINDOW)
mainWindow.show()
if sys.platform.startswith('darwin'):
# Calling raise before the QApplication.exec_ only shows the last window
# that was added. Therefore we also call activeWindow. However, this may not
# always be desirable. TODO: make optional?
mainWindow.raise_()
pass
return mainWindow | Creates and shows a new MainWindow.
If inspectorFullName is set, it will set the identifier from that name.
If the inspector identifier is not found in the registry, a KeyError is raised. | Below is the the instruction that describes the task:
### Input:
Creates and shows a new MainWindow.
If inspectorFullName is set, it will set the identifier from that name.
If the inspector identifier is not found in the registry, a KeyError is raised.
### Response:
def addNewMainWindow(self, settings=None, inspectorFullName=None):
""" Creates and shows a new MainWindow.
If inspectorFullName is set, it will set the identifier from that name.
If the inspector identifier is not found in the registry, a KeyError is raised.
"""
mainWindow = MainWindow(self)
self.mainWindows.append(mainWindow)
self.windowActionGroup.addAction(mainWindow.activateWindowAction)
self.repopulateAllWindowMenus()
if settings:
mainWindow.readViewSettings(settings)
if inspectorFullName:
inspectorId = nameToIdentifier(inspectorFullName)
mainWindow.setInspectorById(inspectorId)
if mainWindow.inspectorRegItem: # can be None at start
inspectorId = mainWindow.inspectorRegItem.identifier
mainWindow.getInspectorActionById(inspectorId).setChecked(True)
logger.info("Created new window with inspector: {}"
.format(mainWindow.inspectorRegItem.fullName))
else:
logger.info("Created new window without inspector")
mainWindow.drawInspectorContents(reason=UpdateReason.NEW_MAIN_WINDOW)
mainWindow.show()
if sys.platform.startswith('darwin'):
# Calling raise before the QApplication.exec_ only shows the last window
# that was added. Therefore we also call activeWindow. However, this may not
# always be desirable. TODO: make optional?
mainWindow.raise_()
pass
return mainWindow |
def mirror(self, axes='x'):
"""
Generates a symmetry of the Polyhedron respect global axes.
:param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'...
:type axes: str
:returns: ``pyny.Polyhedron``
"""
polygon = np.array([[0,0], [0,1], [1,1]])
space = Space(Place(polygon, polyhedra=self))
return space.mirror(axes, inplace=False)[0].polyhedra[0] | Generates a symmetry of the Polyhedron respect global axes.
:param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'...
:type axes: str
:returns: ``pyny.Polyhedron`` | Below is the the instruction that describes the task:
### Input:
Generates a symmetry of the Polyhedron respect global axes.
:param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'...
:type axes: str
:returns: ``pyny.Polyhedron``
### Response:
def mirror(self, axes='x'):
"""
Generates a symmetry of the Polyhedron respect global axes.
:param axes: 'x', 'y', 'z', 'xy', 'xz', 'yz'...
:type axes: str
:returns: ``pyny.Polyhedron``
"""
polygon = np.array([[0,0], [0,1], [1,1]])
space = Space(Place(polygon, polyhedra=self))
return space.mirror(axes, inplace=False)[0].polyhedra[0] |
def __get_merged_api_info(self, services):
"""Builds a description of an API.
Args:
services: List of protorpc.remote.Service instances implementing an
api/version.
Returns:
The _ApiInfo object to use for the API that the given services implement.
Raises:
ApiConfigurationError: If there's something wrong with the API
configuration, such as a multiclass API decorated with different API
descriptors (see the docstring for api()).
"""
merged_api_info = services[0].api_info
# Verify that, if there are multiple classes here, they're allowed to
# implement the same API.
for service in services[1:]:
if not merged_api_info.is_same_api(service.api_info):
raise api_exceptions.ApiConfigurationError(
_MULTICLASS_MISMATCH_ERROR_TEMPLATE % (service.api_info.name,
service.api_info.api_version))
return merged_api_info | Builds a description of an API.
Args:
services: List of protorpc.remote.Service instances implementing an
api/version.
Returns:
The _ApiInfo object to use for the API that the given services implement.
Raises:
ApiConfigurationError: If there's something wrong with the API
configuration, such as a multiclass API decorated with different API
descriptors (see the docstring for api()). | Below is the the instruction that describes the task:
### Input:
Builds a description of an API.
Args:
services: List of protorpc.remote.Service instances implementing an
api/version.
Returns:
The _ApiInfo object to use for the API that the given services implement.
Raises:
ApiConfigurationError: If there's something wrong with the API
configuration, such as a multiclass API decorated with different API
descriptors (see the docstring for api()).
### Response:
def __get_merged_api_info(self, services):
"""Builds a description of an API.
Args:
services: List of protorpc.remote.Service instances implementing an
api/version.
Returns:
The _ApiInfo object to use for the API that the given services implement.
Raises:
ApiConfigurationError: If there's something wrong with the API
configuration, such as a multiclass API decorated with different API
descriptors (see the docstring for api()).
"""
merged_api_info = services[0].api_info
# Verify that, if there are multiple classes here, they're allowed to
# implement the same API.
for service in services[1:]:
if not merged_api_info.is_same_api(service.api_info):
raise api_exceptions.ApiConfigurationError(
_MULTICLASS_MISMATCH_ERROR_TEMPLATE % (service.api_info.name,
service.api_info.api_version))
return merged_api_info |
def delete_session(self, ticket):
'''
Delete a session record associated with a service ticket.
'''
assert isinstance(self.session_storage_adapter, CASSessionAdapter)
logging.debug('[CAS] Deleting session for ticket {}'.format(ticket))
self.session_storage_adapter.delete(ticket) | Delete a session record associated with a service ticket. | Below is the the instruction that describes the task:
### Input:
Delete a session record associated with a service ticket.
### Response:
def delete_session(self, ticket):
'''
Delete a session record associated with a service ticket.
'''
assert isinstance(self.session_storage_adapter, CASSessionAdapter)
logging.debug('[CAS] Deleting session for ticket {}'.format(ticket))
self.session_storage_adapter.delete(ticket) |
def pyquil_to_circuit(program: pyquil.Program) -> Circuit:
"""Convert a protoquil pyQuil program to a QuantumFlow Circuit"""
circ = Circuit()
for inst in program.instructions:
# print(type(inst))
if isinstance(inst, pyquil.Declare): # Ignore
continue
if isinstance(inst, pyquil.Halt): # Ignore
continue
if isinstance(inst, pyquil.Pragma): # TODO Barrier?
continue
elif isinstance(inst, pyquil.Measurement):
circ += Measure(inst.qubit.index)
# elif isinstance(inst, pyquil.ResetQubit): # TODO
# continue
elif isinstance(inst, pyquil.Gate):
defgate = STDGATES[inst.name]
gate = defgate(*inst.params)
qubits = [q.index for q in inst.qubits]
gate = gate.relabel(qubits)
circ += gate
else:
raise ValueError('PyQuil program is not protoquil')
return circ | Convert a protoquil pyQuil program to a QuantumFlow Circuit | Below is the the instruction that describes the task:
### Input:
Convert a protoquil pyQuil program to a QuantumFlow Circuit
### Response:
def pyquil_to_circuit(program: pyquil.Program) -> Circuit:
"""Convert a protoquil pyQuil program to a QuantumFlow Circuit"""
circ = Circuit()
for inst in program.instructions:
# print(type(inst))
if isinstance(inst, pyquil.Declare): # Ignore
continue
if isinstance(inst, pyquil.Halt): # Ignore
continue
if isinstance(inst, pyquil.Pragma): # TODO Barrier?
continue
elif isinstance(inst, pyquil.Measurement):
circ += Measure(inst.qubit.index)
# elif isinstance(inst, pyquil.ResetQubit): # TODO
# continue
elif isinstance(inst, pyquil.Gate):
defgate = STDGATES[inst.name]
gate = defgate(*inst.params)
qubits = [q.index for q in inst.qubits]
gate = gate.relabel(qubits)
circ += gate
else:
raise ValueError('PyQuil program is not protoquil')
return circ |
def getVertices(self,data):
"""
Returns the vertices of this region already transformed and ready-to-use.
Internally uses :py:meth:`Bone.transformVertices()`\ .
"""
return self.bone.transformVertices(data,self.vertices,self.dims) | Returns the vertices of this region already transformed and ready-to-use.
Internally uses :py:meth:`Bone.transformVertices()`\ . | Below is the the instruction that describes the task:
### Input:
Returns the vertices of this region already transformed and ready-to-use.
Internally uses :py:meth:`Bone.transformVertices()`\ .
### Response:
def getVertices(self,data):
"""
Returns the vertices of this region already transformed and ready-to-use.
Internally uses :py:meth:`Bone.transformVertices()`\ .
"""
return self.bone.transformVertices(data,self.vertices,self.dims) |
def namer(cls, imageUrl, pageUrl):
"""Use page URL sequence which is apparently increasing."""
num = pageUrl.split('/')[-1]
ext = imageUrl.rsplit('.', 1)[1]
return "thethinhline-%s.%s" % (num, ext) | Use page URL sequence which is apparently increasing. | Below is the the instruction that describes the task:
### Input:
Use page URL sequence which is apparently increasing.
### Response:
def namer(cls, imageUrl, pageUrl):
"""Use page URL sequence which is apparently increasing."""
num = pageUrl.split('/')[-1]
ext = imageUrl.rsplit('.', 1)[1]
return "thethinhline-%s.%s" % (num, ext) |
def write_bvec_file(bvecs, bvec_file):
"""
Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to
"""
if bvec_file is None:
return
logger.info('Saving BVEC file: %s' % bvec_file)
with open(bvec_file, 'w') as text_file:
# Map a dicection to string join them using a space and write to the file
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 0])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 1])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 2]))) | Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to | Below is the the instruction that describes the task:
### Input:
Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to
### Response:
def write_bvec_file(bvecs, bvec_file):
"""
Write an array of bvecs to a bvec file
:param bvecs: array with the vectors
:param bvec_file: filepath to write to
"""
if bvec_file is None:
return
logger.info('Saving BVEC file: %s' % bvec_file)
with open(bvec_file, 'w') as text_file:
# Map a dicection to string join them using a space and write to the file
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 0])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 1])))
text_file.write('%s\n' % ' '.join(map(str, bvecs[:, 2]))) |
def get_users(self, usernames):
"""Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50)
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/whois', post_data={
"usernames":usernames
})
users = []
for item in response['results']:
u = User()
u.from_dict(item)
users.append(u)
return users | Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50) | Below is the the instruction that describes the task:
### Input:
Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50)
### Response:
def get_users(self, usernames):
"""Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50)
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/whois', post_data={
"usernames":usernames
})
users = []
for item in response['results']:
u = User()
u.from_dict(item)
users.append(u)
return users |
def needkwargs(*argnames):
"""Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call.
"""
required = set(argnames)
def decorator(func):
def inner(*args, **kwargs):
missing = required - set(kwargs)
if missing:
err = "%s kwargs are missing." % list(missing)
raise ValueError(err)
return func(*args, **kwargs)
return inner
return decorator | Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call. | Below is the the instruction that describes the task:
### Input:
Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call.
### Response:
def needkwargs(*argnames):
"""Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call.
"""
required = set(argnames)
def decorator(func):
def inner(*args, **kwargs):
missing = required - set(kwargs)
if missing:
err = "%s kwargs are missing." % list(missing)
raise ValueError(err)
return func(*args, **kwargs)
return inner
return decorator |
def _endpoint(self, endpoint, action, *url_args):
"""Return the URL for the action.
:param str endpoint: The controller
:param str action: The action provided by the controller
:param url_args: Additional endpoints(for endpoints that take part of
the url as option)
:return: Full URL for the requested action
"""
args = (self.api_base, endpoint, action)
if action == '':
args = (self.api_base, endpoint)
api_url = "/".join(args)
if url_args:
if len(url_args) == 1:
api_url += "/" + url_args[0]
else:
api_url += "/".join(url_args)
return api_url | Return the URL for the action.
:param str endpoint: The controller
:param str action: The action provided by the controller
:param url_args: Additional endpoints(for endpoints that take part of
the url as option)
:return: Full URL for the requested action | Below is the the instruction that describes the task:
### Input:
Return the URL for the action.
:param str endpoint: The controller
:param str action: The action provided by the controller
:param url_args: Additional endpoints(for endpoints that take part of
the url as option)
:return: Full URL for the requested action
### Response:
def _endpoint(self, endpoint, action, *url_args):
"""Return the URL for the action.
:param str endpoint: The controller
:param str action: The action provided by the controller
:param url_args: Additional endpoints(for endpoints that take part of
the url as option)
:return: Full URL for the requested action
"""
args = (self.api_base, endpoint, action)
if action == '':
args = (self.api_base, endpoint)
api_url = "/".join(args)
if url_args:
if len(url_args) == 1:
api_url += "/" + url_args[0]
else:
api_url += "/".join(url_args)
return api_url |
def until_any_child_in_state(self, state, timeout=None):
"""Return a tornado Future; resolves when any client is in specified state"""
return until_any(*[r.until_state(state) for r in dict.values(self.children)],
timeout=timeout) | Return a tornado Future; resolves when any client is in specified state | Below is the the instruction that describes the task:
### Input:
Return a tornado Future; resolves when any client is in specified state
### Response:
def until_any_child_in_state(self, state, timeout=None):
"""Return a tornado Future; resolves when any client is in specified state"""
return until_any(*[r.until_state(state) for r in dict.values(self.children)],
timeout=timeout) |
def clustering_gmm(data,
n_clusters,
tol=1e-7,
min_covar=None,
scale='logicle'):
"""
Find clusters in an array using a Gaussian Mixture Model.
Before clustering, `data` can be automatically rescaled as specified by
the `scale` argument.
Parameters
----------
data : FCSData or array_like
Data to cluster.
n_clusters : int
Number of clusters to find.
tol : float, optional
Tolerance for convergence. Directly passed to either
``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s
version.
min_covar : float, optional
The minimum trace that the initial covariance matrix will have. If
``scikit-learn``'s version is older than 0.18, `min_covar` is also
passed directly to ``GMM``.
scale : str, optional
Rescaling applied to `data` before performing clustering. Can be
either ``linear`` (no rescaling), ``log``, or ``logicle``.
Returns
-------
labels : array
Nx1 array with labels for each element in `data`, assigning
``data[i]`` to cluster ``labels[i]``.
Notes
-----
A Gaussian Mixture Model finds clusters by fitting a linear combination
of `n_clusters` Gaussian probability density functions (pdf) to `data`
using Expectation Maximization (EM).
This method can be fairly sensitive to the initial parameter choice. To
generate a reasonable set of initial conditions, `clustering_gmm`
first divides all points in `data` into `n_clusters` groups of the
same size based on their Euclidean distance to the minimum value. Then,
for each group, the 50% samples farther away from the mean are
discarded. The mean and covariance are calculated from the remaining
samples of each group, and used as initial conditions for the GMM EM
algorithm.
`clustering_gmm` internally uses a `GaussianMixture` object from the
``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is
lower than 0.18), with full covariance matrices for each cluster. For
more information, consult ``scikit-learn``'s documentation.
"""
# Initialize min_covar parameter
# Parameter is initialized differently depending on scikit's version
if min_covar is None:
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
min_covar = 1e-3
else:
min_covar = 5e-5
# Copy events before rescaling
data = data.copy()
# Apply rescaling
if scale=='linear':
# No rescaling
pass
elif scale=='log':
# Logarithm of zero and negatives is undefined. Therefore, saturate
# any non-positives to a small positive value.
# The machine epsilon `eps` is the smallest number such that
# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.
data[data < 1e-15] = 1e-15
# Rescale
data = np.log10(data)
elif scale=='logicle':
# Use the logicle transform class in the plot module, and transform
# data one channel at a time.
for ch in range(data.shape[1]):
# We need a transformation from "data value" to "display scale"
# units. To do so, we use an inverse logicle transformation.
t = FlowCal.plot._LogicleTransform(data=data, channel=ch).inverted()
data[:,ch] = t.transform_non_affine(data[:,ch],
mask_out_of_range=False)
else:
raise ValueError("scale {} not supported".format(scale))
###
# Parameter initialization
###
weights = np.tile(1.0 / n_clusters, n_clusters)
means = []
covars = []
# Calculate distance to minimum value. Then, sort based on this distance.
dist = np.sum((data - np.min(data, axis=0))**2., axis=1)
sorted_idx = np.argsort(dist)
# Expected number of elements per cluster
n_per_cluster = data.shape[0]/float(n_clusters)
# Get means and covariances per cluster
# We will just use a fraction of ``1 - discard_frac`` of the data.
# Data at the edges that actually corresponds to another cluster can
# really mess up the final result.
discard_frac = 0.5
for i in range(n_clusters):
il = int((i + discard_frac/2)*n_per_cluster)
ih = int((i + 1 - discard_frac/2)*n_per_cluster)
sorted_idx_cluster = sorted_idx[il:ih]
data_cluster = data[sorted_idx_cluster]
# Calculate means and covariances
means.append(np.mean(data_cluster, axis=0))
if data.shape[1] == 1:
cov = np.cov(data_cluster.T).reshape(1,1)
else:
cov = np.cov(data_cluster.T)
# Add small number to diagonal to avoid near-singular covariances
cov += np.eye(data.shape[1]) * min_covar
covars.append(cov)
# Means should be an array
means = np.array(means)
###
# Run Gaussian Mixture Model Clustering
###
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
# GaussianMixture uses precisions, the inverse of covariances.
# To get the inverse, we solve the linear equation C*P = I. We also
# use the fact that C is positive definite.
precisions = [scipy.linalg.solve(c,
np.eye(c.shape[0]),
assume_a='pos')
for c in covars]
precisions = np.array(precisions)
# Initialize GaussianMixture object
gmm = GaussianMixture(n_components=n_clusters,
tol=tol,
covariance_type='full',
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=500)
else:
# Initialize GMM object
gmm = GMM(n_components=n_clusters,
tol=tol,
min_covar=min_covar,
covariance_type='full',
params='mc',
init_params='')
# Set initial parameters
gmm.weight_ = weights
gmm.means_ = means
gmm.covars_ = covars
# Fit
gmm.fit(data)
# Get labels by sampling from the responsibilities
# This avoids the complete elimination of a cluster if two or more
# clusters have very similar means.
resp = gmm.predict_proba(data)
labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp]
return labels | Find clusters in an array using a Gaussian Mixture Model.
Before clustering, `data` can be automatically rescaled as specified by
the `scale` argument.
Parameters
----------
data : FCSData or array_like
Data to cluster.
n_clusters : int
Number of clusters to find.
tol : float, optional
Tolerance for convergence. Directly passed to either
``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s
version.
min_covar : float, optional
The minimum trace that the initial covariance matrix will have. If
``scikit-learn``'s version is older than 0.18, `min_covar` is also
passed directly to ``GMM``.
scale : str, optional
Rescaling applied to `data` before performing clustering. Can be
either ``linear`` (no rescaling), ``log``, or ``logicle``.
Returns
-------
labels : array
Nx1 array with labels for each element in `data`, assigning
``data[i]`` to cluster ``labels[i]``.
Notes
-----
A Gaussian Mixture Model finds clusters by fitting a linear combination
of `n_clusters` Gaussian probability density functions (pdf) to `data`
using Expectation Maximization (EM).
This method can be fairly sensitive to the initial parameter choice. To
generate a reasonable set of initial conditions, `clustering_gmm`
first divides all points in `data` into `n_clusters` groups of the
same size based on their Euclidean distance to the minimum value. Then,
for each group, the 50% samples farther away from the mean are
discarded. The mean and covariance are calculated from the remaining
samples of each group, and used as initial conditions for the GMM EM
algorithm.
`clustering_gmm` internally uses a `GaussianMixture` object from the
``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is
lower than 0.18), with full covariance matrices for each cluster. For
more information, consult ``scikit-learn``'s documentation. | Below is the the instruction that describes the task:
### Input:
Find clusters in an array using a Gaussian Mixture Model.
Before clustering, `data` can be automatically rescaled as specified by
the `scale` argument.
Parameters
----------
data : FCSData or array_like
Data to cluster.
n_clusters : int
Number of clusters to find.
tol : float, optional
Tolerance for convergence. Directly passed to either
``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s
version.
min_covar : float, optional
The minimum trace that the initial covariance matrix will have. If
``scikit-learn``'s version is older than 0.18, `min_covar` is also
passed directly to ``GMM``.
scale : str, optional
Rescaling applied to `data` before performing clustering. Can be
either ``linear`` (no rescaling), ``log``, or ``logicle``.
Returns
-------
labels : array
Nx1 array with labels for each element in `data`, assigning
``data[i]`` to cluster ``labels[i]``.
Notes
-----
A Gaussian Mixture Model finds clusters by fitting a linear combination
of `n_clusters` Gaussian probability density functions (pdf) to `data`
using Expectation Maximization (EM).
This method can be fairly sensitive to the initial parameter choice. To
generate a reasonable set of initial conditions, `clustering_gmm`
first divides all points in `data` into `n_clusters` groups of the
same size based on their Euclidean distance to the minimum value. Then,
for each group, the 50% samples farther away from the mean are
discarded. The mean and covariance are calculated from the remaining
samples of each group, and used as initial conditions for the GMM EM
algorithm.
`clustering_gmm` internally uses a `GaussianMixture` object from the
``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is
lower than 0.18), with full covariance matrices for each cluster. For
more information, consult ``scikit-learn``'s documentation.
### Response:
def clustering_gmm(data,
n_clusters,
tol=1e-7,
min_covar=None,
scale='logicle'):
"""
Find clusters in an array using a Gaussian Mixture Model.
Before clustering, `data` can be automatically rescaled as specified by
the `scale` argument.
Parameters
----------
data : FCSData or array_like
Data to cluster.
n_clusters : int
Number of clusters to find.
tol : float, optional
Tolerance for convergence. Directly passed to either
``GaussianMixture`` or ``GMM``, depending on ``scikit-learn``'s
version.
min_covar : float, optional
The minimum trace that the initial covariance matrix will have. If
``scikit-learn``'s version is older than 0.18, `min_covar` is also
passed directly to ``GMM``.
scale : str, optional
Rescaling applied to `data` before performing clustering. Can be
either ``linear`` (no rescaling), ``log``, or ``logicle``.
Returns
-------
labels : array
Nx1 array with labels for each element in `data`, assigning
``data[i]`` to cluster ``labels[i]``.
Notes
-----
A Gaussian Mixture Model finds clusters by fitting a linear combination
of `n_clusters` Gaussian probability density functions (pdf) to `data`
using Expectation Maximization (EM).
This method can be fairly sensitive to the initial parameter choice. To
generate a reasonable set of initial conditions, `clustering_gmm`
first divides all points in `data` into `n_clusters` groups of the
same size based on their Euclidean distance to the minimum value. Then,
for each group, the 50% samples farther away from the mean are
discarded. The mean and covariance are calculated from the remaining
samples of each group, and used as initial conditions for the GMM EM
algorithm.
`clustering_gmm` internally uses a `GaussianMixture` object from the
``scikit-learn`` library (``GMM`` if ``scikit-learn``'s version is
lower than 0.18), with full covariance matrices for each cluster. For
more information, consult ``scikit-learn``'s documentation.
"""
# Initialize min_covar parameter
# Parameter is initialized differently depending on scikit's version
if min_covar is None:
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
min_covar = 1e-3
else:
min_covar = 5e-5
# Copy events before rescaling
data = data.copy()
# Apply rescaling
if scale=='linear':
# No rescaling
pass
elif scale=='log':
# Logarithm of zero and negatives is undefined. Therefore, saturate
# any non-positives to a small positive value.
# The machine epsilon `eps` is the smallest number such that
# `1.0 + eps != eps`. For a 64-bit floating point, `eps ~= 1e-15`.
data[data < 1e-15] = 1e-15
# Rescale
data = np.log10(data)
elif scale=='logicle':
# Use the logicle transform class in the plot module, and transform
# data one channel at a time.
for ch in range(data.shape[1]):
# We need a transformation from "data value" to "display scale"
# units. To do so, we use an inverse logicle transformation.
t = FlowCal.plot._LogicleTransform(data=data, channel=ch).inverted()
data[:,ch] = t.transform_non_affine(data[:,ch],
mask_out_of_range=False)
else:
raise ValueError("scale {} not supported".format(scale))
###
# Parameter initialization
###
weights = np.tile(1.0 / n_clusters, n_clusters)
means = []
covars = []
# Calculate distance to minimum value. Then, sort based on this distance.
dist = np.sum((data - np.min(data, axis=0))**2., axis=1)
sorted_idx = np.argsort(dist)
# Expected number of elements per cluster
n_per_cluster = data.shape[0]/float(n_clusters)
# Get means and covariances per cluster
# We will just use a fraction of ``1 - discard_frac`` of the data.
# Data at the edges that actually corresponds to another cluster can
# really mess up the final result.
discard_frac = 0.5
for i in range(n_clusters):
il = int((i + discard_frac/2)*n_per_cluster)
ih = int((i + 1 - discard_frac/2)*n_per_cluster)
sorted_idx_cluster = sorted_idx[il:ih]
data_cluster = data[sorted_idx_cluster]
# Calculate means and covariances
means.append(np.mean(data_cluster, axis=0))
if data.shape[1] == 1:
cov = np.cov(data_cluster.T).reshape(1,1)
else:
cov = np.cov(data_cluster.T)
# Add small number to diagonal to avoid near-singular covariances
cov += np.eye(data.shape[1]) * min_covar
covars.append(cov)
# Means should be an array
means = np.array(means)
###
# Run Gaussian Mixture Model Clustering
###
if packaging.version.parse(sklearn.__version__) \
>= packaging.version.parse('0.18'):
# GaussianMixture uses precisions, the inverse of covariances.
# To get the inverse, we solve the linear equation C*P = I. We also
# use the fact that C is positive definite.
precisions = [scipy.linalg.solve(c,
np.eye(c.shape[0]),
assume_a='pos')
for c in covars]
precisions = np.array(precisions)
# Initialize GaussianMixture object
gmm = GaussianMixture(n_components=n_clusters,
tol=tol,
covariance_type='full',
weights_init=weights,
means_init=means,
precisions_init=precisions,
max_iter=500)
else:
# Initialize GMM object
gmm = GMM(n_components=n_clusters,
tol=tol,
min_covar=min_covar,
covariance_type='full',
params='mc',
init_params='')
# Set initial parameters
gmm.weight_ = weights
gmm.means_ = means
gmm.covars_ = covars
# Fit
gmm.fit(data)
# Get labels by sampling from the responsibilities
# This avoids the complete elimination of a cluster if two or more
# clusters have very similar means.
resp = gmm.predict_proba(data)
labels = [np.random.choice(range(n_clusters), p=ri) for ri in resp]
return labels |
def get_receive(self, script_list):
"""Return a list of received events contained in script_list."""
events = defaultdict(set)
for script in script_list:
if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE:
event = script.blocks[0].args[0].lower()
events[event].add(script)
return events | Return a list of received events contained in script_list. | Below is the the instruction that describes the task:
### Input:
Return a list of received events contained in script_list.
### Response:
def get_receive(self, script_list):
"""Return a list of received events contained in script_list."""
events = defaultdict(set)
for script in script_list:
if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE:
event = script.blocks[0].args[0].lower()
events[event].add(script)
return events |
def conv(arg,default=None,func=None):
'''
essentially, the generalization of
arg if arg else default
or
func(arg) if arg else default
'''
if func:
return func(arg) if arg else default;
else:
return arg if arg else default; | essentially, the generalization of
arg if arg else default
or
func(arg) if arg else default | Below is the the instruction that describes the task:
### Input:
essentially, the generalization of
arg if arg else default
or
func(arg) if arg else default
### Response:
def conv(arg,default=None,func=None):
'''
essentially, the generalization of
arg if arg else default
or
func(arg) if arg else default
'''
if func:
return func(arg) if arg else default;
else:
return arg if arg else default; |
def householder(self):
"""Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
"""
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0)
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0)
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += (val ** 2)
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += (val ** 2)
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - (2 * (w * w.transform()))
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk
else:
v = vk * v
return (u, bidiagMatrix, v) | Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured | Below is the the instruction that describes the task:
### Input:
Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
### Response:
def householder(self):
"""Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
"""
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0)
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0)
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += (val ** 2)
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += (val ** 2)
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - (2 * (w * w.transform()))
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk
else:
v = vk * v
return (u, bidiagMatrix, v) |
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>`
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>`
* 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>`
"""
if api_version == '2016-02-01':
from .v2016_02_01 import models
return models
elif api_version == '2016-09-01':
from .v2016_09_01 import models
return models
elif api_version == '2017-05-10':
from .v2017_05_10 import models
return models
elif api_version == '2018-02-01':
from .v2018_02_01 import models
return models
elif api_version == '2018-05-01':
from .v2018_05_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | Module depends on the API version:
* 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>`
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>`
* 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>` | Below is the the instruction that describes the task:
### Input:
Module depends on the API version:
* 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>`
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>`
* 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>`
### Response:
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>`
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>`
* 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>`
"""
if api_version == '2016-02-01':
from .v2016_02_01 import models
return models
elif api_version == '2016-09-01':
from .v2016_09_01 import models
return models
elif api_version == '2017-05-10':
from .v2017_05_10 import models
return models
elif api_version == '2018-02-01':
from .v2018_02_01 import models
return models
elif api_version == '2018-05-01':
from .v2018_05_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) |
def register(self, item):
"""
Registers a new orb object to this schema. This could be a column, index, or collector -- including
a virtual object defined through the orb.virtual decorator.
:param item: <variant>
:return:
"""
if callable(item) and hasattr(item, '__orb__'):
item = item.__orb__
key = item.name()
model = self.__model
# create class methods for indexes
if isinstance(item, orb.Index):
self.__indexes[key] = item
item.setSchema(self)
if model and not hasattr(model, key):
setattr(model, key, classmethod(item))
# create instance methods for collectors
elif isinstance(item, orb.Collector):
self.__collectors[key] = item
item.setSchema(self)
# create instance methods for columns
elif isinstance(item, orb.Column):
self.__columns[key] = item
item.setSchema(self) | Registers a new orb object to this schema. This could be a column, index, or collector -- including
a virtual object defined through the orb.virtual decorator.
:param item: <variant>
:return: | Below is the the instruction that describes the task:
### Input:
Registers a new orb object to this schema. This could be a column, index, or collector -- including
a virtual object defined through the orb.virtual decorator.
:param item: <variant>
:return:
### Response:
def register(self, item):
"""
Registers a new orb object to this schema. This could be a column, index, or collector -- including
a virtual object defined through the orb.virtual decorator.
:param item: <variant>
:return:
"""
if callable(item) and hasattr(item, '__orb__'):
item = item.__orb__
key = item.name()
model = self.__model
# create class methods for indexes
if isinstance(item, orb.Index):
self.__indexes[key] = item
item.setSchema(self)
if model and not hasattr(model, key):
setattr(model, key, classmethod(item))
# create instance methods for collectors
elif isinstance(item, orb.Collector):
self.__collectors[key] = item
item.setSchema(self)
# create instance methods for columns
elif isinstance(item, orb.Column):
self.__columns[key] = item
item.setSchema(self) |
def get_microscope_files(self, plate_name, acquisition_name):
'''Gets status and name of files that have been registered for upload.
Parameters
----------
plate_name: str
name of the parent plate
acquisition_name: str
name of the parent acquisition
Returns
-------
List[Dict[str, str]]
names and status of uploaded files
See also
--------
:func:`tmserver.api.acquisition.get_microscope_image_files_information`
:func:`tmserver.api.acquisition.get_microscope_metadata_file_information`
:class:`tmlib.models.acquisition.Acquisition`
:class:`tmlib.models.file.MicroscopeImageFile`
:class:`tmlib.models.file.MicroscopeMetadataFile`
'''
logger.info(
'get names of already uploaded files for experiment "%s", '
'plate "%s" and acquisition "%s"', self.experiment_name, plate_name,
acquisition_name
)
acquisition_id = self._get_acquisition_id(plate_name, acquisition_name)
image_files = self._get_image_files(acquisition_id)
metadata_files = self._get_metadata_files(acquisition_id)
return image_files + metadata_files | Gets status and name of files that have been registered for upload.
Parameters
----------
plate_name: str
name of the parent plate
acquisition_name: str
name of the parent acquisition
Returns
-------
List[Dict[str, str]]
names and status of uploaded files
See also
--------
:func:`tmserver.api.acquisition.get_microscope_image_files_information`
:func:`tmserver.api.acquisition.get_microscope_metadata_file_information`
:class:`tmlib.models.acquisition.Acquisition`
:class:`tmlib.models.file.MicroscopeImageFile`
:class:`tmlib.models.file.MicroscopeMetadataFile` | Below is the the instruction that describes the task:
### Input:
Gets status and name of files that have been registered for upload.
Parameters
----------
plate_name: str
name of the parent plate
acquisition_name: str
name of the parent acquisition
Returns
-------
List[Dict[str, str]]
names and status of uploaded files
See also
--------
:func:`tmserver.api.acquisition.get_microscope_image_files_information`
:func:`tmserver.api.acquisition.get_microscope_metadata_file_information`
:class:`tmlib.models.acquisition.Acquisition`
:class:`tmlib.models.file.MicroscopeImageFile`
:class:`tmlib.models.file.MicroscopeMetadataFile`
### Response:
def get_microscope_files(self, plate_name, acquisition_name):
'''Gets status and name of files that have been registered for upload.
Parameters
----------
plate_name: str
name of the parent plate
acquisition_name: str
name of the parent acquisition
Returns
-------
List[Dict[str, str]]
names and status of uploaded files
See also
--------
:func:`tmserver.api.acquisition.get_microscope_image_files_information`
:func:`tmserver.api.acquisition.get_microscope_metadata_file_information`
:class:`tmlib.models.acquisition.Acquisition`
:class:`tmlib.models.file.MicroscopeImageFile`
:class:`tmlib.models.file.MicroscopeMetadataFile`
'''
logger.info(
'get names of already uploaded files for experiment "%s", '
'plate "%s" and acquisition "%s"', self.experiment_name, plate_name,
acquisition_name
)
acquisition_id = self._get_acquisition_id(plate_name, acquisition_name)
image_files = self._get_image_files(acquisition_id)
metadata_files = self._get_metadata_files(acquisition_id)
return image_files + metadata_files |
def randomPositions(input, nside_pix, n=1):
"""
Generate n random positions within a full HEALPix mask of booleans, or a set of (lon, lat) coordinates.
Parameters:
-----------
input : (1) full HEALPix mask of booleans, or (2) a set of (lon, lat) coordinates for catalog objects that define the occupied pixels.
nside_pix : nside_pix is meant to be at coarser resolution than the input mask or catalog object positions
so that gaps from star holes, bleed trails, cosmic rays, etc. are filled in.
Returns:
--------
lon,lat,area : Return the longitude and latitude of the random positions (deg) and the total area (deg^2).
"""
input = np.array(input)
if len(input.shape) == 1:
if hp.npix2nside(len(input)) < nside_pix:
logger.warning('Expected coarser resolution nside_pix in skymap.randomPositions')
subpix = np.nonzero(input)[0] # All the valid pixels in the mask at the NSIDE for the input mask
lon, lat = pix2ang(hp.npix2nside(len(input)), subpix)
elif len(input.shape) == 2:
lon, lat = input[0], input[1] # All catalog object positions
else:
logger.warning('Unexpected input dimensions for skymap.randomPositions')
pix = surveyPixel(lon, lat, nside_pix)
# Area with which the random points are thrown
area = len(pix) * hp.nside2pixarea(nside_pix, degrees=True)
# Create mask at the coarser resolution
mask = np.tile(False, hp.nside2npix(nside_pix))
mask[pix] = True
# Estimate the number of points that need to be thrown based off
# coverage fraction of the HEALPix mask
coverage_fraction = float(np.sum(mask)) / len(mask)
n_throw = int(n / coverage_fraction)
lon, lat = [], []
count = 0
while len(lon) < n:
lon_throw = np.random.uniform(0., 360., n_throw)
lat_throw = np.degrees(np.arcsin(np.random.uniform(-1., 1., n_throw)))
pix_throw = ugali.utils.healpix.angToPix(nside_pix, lon_throw, lat_throw)
cut = mask[pix_throw].astype(bool)
lon = np.append(lon, lon_throw[cut])
lat = np.append(lat, lat_throw[cut])
count += 1
if count > 10:
raise RuntimeError('Too many loops...')
return lon[0:n], lat[0:n], area | Generate n random positions within a full HEALPix mask of booleans, or a set of (lon, lat) coordinates.
Parameters:
-----------
input : (1) full HEALPix mask of booleans, or (2) a set of (lon, lat) coordinates for catalog objects that define the occupied pixels.
nside_pix : nside_pix is meant to be at coarser resolution than the input mask or catalog object positions
so that gaps from star holes, bleed trails, cosmic rays, etc. are filled in.
Returns:
--------
lon,lat,area : Return the longitude and latitude of the random positions (deg) and the total area (deg^2). | Below is the the instruction that describes the task:
### Input:
Generate n random positions within a full HEALPix mask of booleans, or a set of (lon, lat) coordinates.
Parameters:
-----------
input : (1) full HEALPix mask of booleans, or (2) a set of (lon, lat) coordinates for catalog objects that define the occupied pixels.
nside_pix : nside_pix is meant to be at coarser resolution than the input mask or catalog object positions
so that gaps from star holes, bleed trails, cosmic rays, etc. are filled in.
Returns:
--------
lon,lat,area : Return the longitude and latitude of the random positions (deg) and the total area (deg^2).
### Response:
def randomPositions(input, nside_pix, n=1):
"""
Generate n random positions within a full HEALPix mask of booleans, or a set of (lon, lat) coordinates.
Parameters:
-----------
input : (1) full HEALPix mask of booleans, or (2) a set of (lon, lat) coordinates for catalog objects that define the occupied pixels.
nside_pix : nside_pix is meant to be at coarser resolution than the input mask or catalog object positions
so that gaps from star holes, bleed trails, cosmic rays, etc. are filled in.
Returns:
--------
lon,lat,area : Return the longitude and latitude of the random positions (deg) and the total area (deg^2).
"""
input = np.array(input)
if len(input.shape) == 1:
if hp.npix2nside(len(input)) < nside_pix:
logger.warning('Expected coarser resolution nside_pix in skymap.randomPositions')
subpix = np.nonzero(input)[0] # All the valid pixels in the mask at the NSIDE for the input mask
lon, lat = pix2ang(hp.npix2nside(len(input)), subpix)
elif len(input.shape) == 2:
lon, lat = input[0], input[1] # All catalog object positions
else:
logger.warning('Unexpected input dimensions for skymap.randomPositions')
pix = surveyPixel(lon, lat, nside_pix)
# Area with which the random points are thrown
area = len(pix) * hp.nside2pixarea(nside_pix, degrees=True)
# Create mask at the coarser resolution
mask = np.tile(False, hp.nside2npix(nside_pix))
mask[pix] = True
# Estimate the number of points that need to be thrown based off
# coverage fraction of the HEALPix mask
coverage_fraction = float(np.sum(mask)) / len(mask)
n_throw = int(n / coverage_fraction)
lon, lat = [], []
count = 0
while len(lon) < n:
lon_throw = np.random.uniform(0., 360., n_throw)
lat_throw = np.degrees(np.arcsin(np.random.uniform(-1., 1., n_throw)))
pix_throw = ugali.utils.healpix.angToPix(nside_pix, lon_throw, lat_throw)
cut = mask[pix_throw].astype(bool)
lon = np.append(lon, lon_throw[cut])
lat = np.append(lat, lat_throw[cut])
count += 1
if count > 10:
raise RuntimeError('Too many loops...')
return lon[0:n], lat[0:n], area |
def encode(self, pdu):
"""encode the contents of the APCI into the PDU."""
if _debug: APCI._debug("encode %r", pdu)
PCI.update(pdu, self)
if (self.apduType == ConfirmedRequestPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduSeg:
buff += 0x08
if self.apduMor:
buff += 0x04
if self.apduSA:
buff += 0x02
pdu.put(buff)
pdu.put((self.apduMaxSegs << 4) + self.apduMaxResp)
pdu.put(self.apduInvokeID)
if self.apduSeg:
pdu.put(self.apduSeq)
pdu.put(self.apduWin)
pdu.put(self.apduService)
elif (self.apduType == UnconfirmedRequestPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduService)
elif (self.apduType == SimpleAckPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduInvokeID)
pdu.put(self.apduService)
elif (self.apduType == ComplexAckPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduSeg:
buff += 0x08
if self.apduMor:
buff += 0x04
pdu.put(buff)
pdu.put(self.apduInvokeID)
if self.apduSeg:
pdu.put(self.apduSeq)
pdu.put(self.apduWin)
pdu.put(self.apduService)
elif (self.apduType == SegmentAckPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduNak:
buff += 0x02
if self.apduSrv:
buff += 0x01
pdu.put(buff)
pdu.put(self.apduInvokeID)
pdu.put(self.apduSeq)
pdu.put(self.apduWin)
elif (self.apduType == ErrorPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduInvokeID)
pdu.put(self.apduService)
elif (self.apduType == RejectPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduInvokeID)
pdu.put(self.apduAbortRejectReason)
elif (self.apduType == AbortPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduSrv:
buff += 0x01
pdu.put(buff)
pdu.put(self.apduInvokeID)
pdu.put(self.apduAbortRejectReason)
else:
raise ValueError("invalid APCI.apduType") | encode the contents of the APCI into the PDU. | Below is the the instruction that describes the task:
### Input:
encode the contents of the APCI into the PDU.
### Response:
def encode(self, pdu):
"""encode the contents of the APCI into the PDU."""
if _debug: APCI._debug("encode %r", pdu)
PCI.update(pdu, self)
if (self.apduType == ConfirmedRequestPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduSeg:
buff += 0x08
if self.apduMor:
buff += 0x04
if self.apduSA:
buff += 0x02
pdu.put(buff)
pdu.put((self.apduMaxSegs << 4) + self.apduMaxResp)
pdu.put(self.apduInvokeID)
if self.apduSeg:
pdu.put(self.apduSeq)
pdu.put(self.apduWin)
pdu.put(self.apduService)
elif (self.apduType == UnconfirmedRequestPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduService)
elif (self.apduType == SimpleAckPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduInvokeID)
pdu.put(self.apduService)
elif (self.apduType == ComplexAckPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduSeg:
buff += 0x08
if self.apduMor:
buff += 0x04
pdu.put(buff)
pdu.put(self.apduInvokeID)
if self.apduSeg:
pdu.put(self.apduSeq)
pdu.put(self.apduWin)
pdu.put(self.apduService)
elif (self.apduType == SegmentAckPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduNak:
buff += 0x02
if self.apduSrv:
buff += 0x01
pdu.put(buff)
pdu.put(self.apduInvokeID)
pdu.put(self.apduSeq)
pdu.put(self.apduWin)
elif (self.apduType == ErrorPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduInvokeID)
pdu.put(self.apduService)
elif (self.apduType == RejectPDU.pduType):
pdu.put(self.apduType << 4)
pdu.put(self.apduInvokeID)
pdu.put(self.apduAbortRejectReason)
elif (self.apduType == AbortPDU.pduType):
# PDU type
buff = self.apduType << 4
if self.apduSrv:
buff += 0x01
pdu.put(buff)
pdu.put(self.apduInvokeID)
pdu.put(self.apduAbortRejectReason)
else:
raise ValueError("invalid APCI.apduType") |
def pauli_constraints(X, Y, Z):
"""Return a set of constraints that define Pauli spin operators.
:param X: List of Pauli X operator on sites.
:type X: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Y: List of Pauli Y operator on sites.
:type Y: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Z: List of Pauli Z operator on sites.
:type Z: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:returns: tuple of substitutions and equalities.
"""
substitutions = {}
n_vars = len(X)
for i in range(n_vars):
# They square to the identity
substitutions[X[i] * X[i]] = 1
substitutions[Y[i] * Y[i]] = 1
substitutions[Z[i] * Z[i]] = 1
# Anticommutation relations
substitutions[Y[i] * X[i]] = - X[i] * Y[i]
substitutions[Z[i] * X[i]] = - X[i] * Z[i]
substitutions[Z[i] * Y[i]] = - Y[i] * Z[i]
# Commutation relations.
# equalities.append(X[i]*Y[i] - 1j*Z[i])
# equalities.append(X[i]*Z[i] + 1j*Y[i])
# equalities.append(Y[i]*Z[i] - 1j*X[i])
# They commute between the sites
for j in range(i + 1, n_vars):
substitutions[X[j] * X[i]] = X[i] * X[j]
substitutions[Y[j] * Y[i]] = Y[i] * Y[j]
substitutions[Y[j] * X[i]] = X[i] * Y[j]
substitutions[Y[i] * X[j]] = X[j] * Y[i]
substitutions[Z[j] * Z[i]] = Z[i] * Z[j]
substitutions[Z[j] * X[i]] = X[i] * Z[j]
substitutions[Z[i] * X[j]] = X[j] * Z[i]
substitutions[Z[j] * Y[i]] = Y[i] * Z[j]
substitutions[Z[i] * Y[j]] = Y[j] * Z[i]
return substitutions | Return a set of constraints that define Pauli spin operators.
:param X: List of Pauli X operator on sites.
:type X: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Y: List of Pauli Y operator on sites.
:type Y: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Z: List of Pauli Z operator on sites.
:type Z: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:returns: tuple of substitutions and equalities. | Below is the the instruction that describes the task:
### Input:
Return a set of constraints that define Pauli spin operators.
:param X: List of Pauli X operator on sites.
:type X: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Y: List of Pauli Y operator on sites.
:type Y: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Z: List of Pauli Z operator on sites.
:type Z: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:returns: tuple of substitutions and equalities.
### Response:
def pauli_constraints(X, Y, Z):
"""Return a set of constraints that define Pauli spin operators.
:param X: List of Pauli X operator on sites.
:type X: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Y: List of Pauli Y operator on sites.
:type Y: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:param Z: List of Pauli Z operator on sites.
:type Z: list of :class:`sympy.physics.quantum.operator.HermitianOperator`.
:returns: tuple of substitutions and equalities.
"""
substitutions = {}
n_vars = len(X)
for i in range(n_vars):
# They square to the identity
substitutions[X[i] * X[i]] = 1
substitutions[Y[i] * Y[i]] = 1
substitutions[Z[i] * Z[i]] = 1
# Anticommutation relations
substitutions[Y[i] * X[i]] = - X[i] * Y[i]
substitutions[Z[i] * X[i]] = - X[i] * Z[i]
substitutions[Z[i] * Y[i]] = - Y[i] * Z[i]
# Commutation relations.
# equalities.append(X[i]*Y[i] - 1j*Z[i])
# equalities.append(X[i]*Z[i] + 1j*Y[i])
# equalities.append(Y[i]*Z[i] - 1j*X[i])
# They commute between the sites
for j in range(i + 1, n_vars):
substitutions[X[j] * X[i]] = X[i] * X[j]
substitutions[Y[j] * Y[i]] = Y[i] * Y[j]
substitutions[Y[j] * X[i]] = X[i] * Y[j]
substitutions[Y[i] * X[j]] = X[j] * Y[i]
substitutions[Z[j] * Z[i]] = Z[i] * Z[j]
substitutions[Z[j] * X[i]] = X[i] * Z[j]
substitutions[Z[i] * X[j]] = X[j] * Z[i]
substitutions[Z[j] * Y[i]] = Y[i] * Z[j]
substitutions[Z[i] * Y[j]] = Y[j] * Z[i]
return substitutions |
def sort_name(self):
""" Get the sorting name of this category """
if self._record and self._record.sort_name:
return self._record.sort_name
return self.name | Get the sorting name of this category | Below is the the instruction that describes the task:
### Input:
Get the sorting name of this category
### Response:
def sort_name(self):
""" Get the sorting name of this category """
if self._record and self._record.sort_name:
return self._record.sort_name
return self.name |
def register():
"""Plugin registration."""
from pelican import signals
signals.initialized.connect(setup_git)
signals.article_generator_finalized.connect(replace_git_url) | Plugin registration. | Below is the the instruction that describes the task:
### Input:
Plugin registration.
### Response:
def register():
"""Plugin registration."""
from pelican import signals
signals.initialized.connect(setup_git)
signals.article_generator_finalized.connect(replace_git_url) |
def merge_dicts(*dicts, **kwargs):
""" merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*.
"""
# get or infer the class
cls = kwargs.get("cls", None)
if cls is None:
for d in dicts:
if isinstance(d, dict):
cls = d.__class__
break
else:
raise TypeError("cannot infer cls as none of the passed objects is of type dict")
# start merging
merged_dict = cls()
for d in dicts:
if isinstance(d, dict):
merged_dict.update(d)
return merged_dict | merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*. | Below is the the instruction that describes the task:
### Input:
merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*.
### Response:
def merge_dicts(*dicts, **kwargs):
""" merge_dicts(*dicts, cls=None)
Takes multiple *dicts* and returns a single merged dict. The merging takes place in order of the
passed dicts and therefore, values of rear objects have precedence in case of field collisions.
The class of the returned merged dict is configurable via *cls*. If it is *None*, the class is
inferred from the first dict object in *dicts*.
"""
# get or infer the class
cls = kwargs.get("cls", None)
if cls is None:
for d in dicts:
if isinstance(d, dict):
cls = d.__class__
break
else:
raise TypeError("cannot infer cls as none of the passed objects is of type dict")
# start merging
merged_dict = cls()
for d in dicts:
if isinstance(d, dict):
merged_dict.update(d)
return merged_dict |