complexity
int64 1
139
| fun_name
stringlengths 1
80
| code
stringlengths 101
62.2k
| commit_id
stringlengths 40
40
| ast_errors
stringlengths 0
3.11k
| ast_levels
int64 6
36
| file_name
stringlengths 5
79
| n_ast_nodes
int64 17
19.2k
| commit_message
stringlengths 3
15.3k
| d_id
int64 12
121k
| n_ast_errors
int64 0
9
| n_whitespaces
int64 4
10.8k
| token_counts
int64 5
3.06k
| vocab_size
int64 4
1.11k
| id
int64 20
338k
| n_words
int64 4
4.82k
| repo
stringlengths 3
22
| n_identifiers
int64 2
176
| path
stringlengths 7
134
| language
stringclasses 1
value | nloc
int64 1
413
| documentation
dict | url
stringlengths 31
59
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9 | save_model | def save_model(model, filepath, weights_format="h5"):
if not filepath.endswith(".keras"):
raise ValueError(
"Invalid filename: expected a `.keras` extension. "
f"Received: filepath={filepath}"
)
if weights_format == "h5" and h5py is None:
raise ImportError("h5py must be installed in order to save a model.")
if not model.built:
warnings.warn(
"You are saving a model that has not yet been built. "
"It might not contain any weights yet. "
"Consider building the model first by calling it "
"on some data.",
stacklevel=2,
)
saving_v3_enabled_value = getattr(_SAVING_V3_ENABLED, "value", False)
_SAVING_V3_ENABLED.value = True
serialized_model_dict = serialize_keras_object(model)
config_json = json.dumps(serialized_model_dict)
metadata_json = json.dumps(
{
"keras_version": keras.__version__,
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
)
try:
with zipfile.ZipFile(filepath, "w") as zf:
with zf.open(_METADATA_FILENAME, "w") as f:
f.write(metadata_json.encode())
with zf.open(_CONFIG_FILENAME, "w") as f:
f.write(config_json.encode())
if weights_format == "h5":
weights_store = H5IOStore(
_VARS_FNAME + ".h5", archive=zf, mode="w"
)
elif weights_format == "npz":
weights_store = NpzIOStore(
_VARS_FNAME + ".npz", archive=zf, mode="w"
)
else:
raise ValueError(
"Unknown weights_format. Expected 'h5' or 'npz'. "
f"Received: {weights_format}"
)
asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="w")
_save_state(
model,
weights_handler=weights_store,
assets_handler=asset_store,
inner_path="",
visited_trackables=set(),
)
weights_store.close()
asset_store.close()
except Exception as e:
raise e
finally:
_SAVING_V3_ENABLED.value = saving_v3_enabled_value
| e6f739a31247c43a86c37c33b0b8b2ba6be6a5f6 | 17 | saving_lib.py | 521 | - Add standalone weights file saving/loading functionality.
- Switch to in-memory, single write / single read archive saving for better performance.
- Remove ability to pick between zipping or not zipping a Keras saved artifact: it's always a zip archive now.
PiperOrigin-RevId: 483705728 | 83,285 | 0 | 770 | 291 | 129 | 280,200 | 181 | keras | 54 | keras/saving/experimental/saving_lib.py | Python | 59 | {
"docstring": "Save a zip-archive representing a Keras model to the given filepath.\n\n The zip-based archive contains the following structure:\n\n - JSON-based configuration file (config.json): Records of model, layer, and\n other trackables' configuration.\n - NPZ-based trackable state files, found in respective directories, such as\n model/states.npz, model/dense_layer/states.npz, etc.\n - Metadata file.\n\n The states of Keras trackables (layers, optimizers, loss, and metrics) are\n automatically saved as long as they can be discovered through the attributes\n returned by `dir(Model)`. Typically, the state includes the variables\n associated with the trackable, but some specially purposed layers may\n contain more such as the vocabularies stored in the hashmaps. The trackables\n define how their states are saved by exposing `save_state()` and\n `load_state()` APIs.\n\n For the case of layer states, the variables will be visited as long as\n they are either 1) referenced via layer attributes, or 2) referenced via a\n container (list, tuple, or dict), and the container is referenced via a\n layer attribute.\n ",
"language": "en",
"n_whitespaces": 217,
"n_words": 155,
"vocab_size": 106
} | https://github.com/keras-team/keras.git |
|
3 | update_dtype | def update_dtype(self, dtype) -> SparseDtype:
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
if not isinstance(dtype, np.dtype):
raise TypeError("sparse arrays of extension dtypes not supported")
fvarr = astype_nansafe(np.array(self.fill_value), dtype)
# NB: not fv_0d.item(), as that casts dt64->int
fill_value = fvarr[0]
dtype = cls(dtype, fill_value=fill_value)
return dtype
| eb2abb86616978ef6e4971b600849ccabc686de4 | 13 | dtype.py | 130 | CLN: address xfails (#46287) | 39,677 | 0 | 153 | 80 | 36 | 165,545 | 48 | pandas | 14 | pandas/core/arrays/sparse/dtype.py | Python | 47 | {
"docstring": "\n Convert the SparseDtype to a new dtype.\n\n This takes care of converting the ``fill_value``.\n\n Parameters\n ----------\n dtype : Union[str, numpy.dtype, SparseDtype]\n The new dtype to use.\n\n * For a SparseDtype, it is simply returned\n * For a NumPy dtype (or str), the current fill value\n is converted to the new dtype, and a SparseDtype\n with `dtype` and the new fill value is returned.\n\n Returns\n -------\n SparseDtype\n A new SparseDtype with the correct `dtype` and fill value\n for that `dtype`.\n\n Raises\n ------\n ValueError\n When the current fill value cannot be converted to the\n new `dtype` (e.g. trying to convert ``np.nan`` to an\n integer dtype).\n\n\n Examples\n --------\n >>> SparseDtype(int, 0).update_dtype(float)\n Sparse[float64, 0.0]\n\n >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))\n Sparse[float64, nan]\n ",
"language": "en",
"n_whitespaces": 357,
"n_words": 116,
"vocab_size": 71
} | https://github.com/pandas-dev/pandas.git |
|
4 | convert_to_legacy_optimizer | def convert_to_legacy_optimizer(optimizer):
if not isinstance(optimizer, base_optimizer.Optimizer):
raise ValueError(
"`convert_to_legacy_optimizer` should only be called "
"on instances of `tf.keras.optimizers.Optimizer`, but "
f"received {optimizer} of type {type(optimizer)}."
)
optimizer_name = optimizer.__class__.__name__.lower()
config = optimizer.get_config()
# Remove fields that only exist in experimental optimizer.
keys_to_remove = [
"weight_decay",
"use_ema",
"ema_momentum",
"ema_overwrite_frequency",
"jit_compile",
"is_legacy_optimizer",
]
for key in keys_to_remove:
config.pop(key, None)
# Learning rate can be a custom LearningRateSchedule, which is stored as
# a dict in config, and cannot be deserialized.
if isinstance(
optimizer._learning_rate, learning_rate_schedule.LearningRateSchedule
):
config["learning_rate"] = optimizer._learning_rate
legacy_optimizer_config = {
"class_name": optimizer_name,
"config": config,
}
return deserialize(legacy_optimizer_config, use_legacy_optimizer=True)
@keras_export("keras.optimizers.get") | 5a105aadbdc6fde2c2529280c4789864adbb81c7 | @keras_export("keras.optimizers.get") | 14 | __init__.py | 220 | Move new optimizer out of optimizer_experimental/ directory.
PiperOrigin-RevId: 488998585 | 83,358 | 1 | 266 | 113 | 82 | 280,501 | 98 | keras | 23 | keras/optimizers/__init__.py | Python | 28 | {
"docstring": "Convert experimental optimizer to legacy optimizer.\n\n This function takes in a `tf.keras.optimizers.experimental.Optimizer`\n instance and converts it to the corresponding\n `tf.keras.optimizers.legacy.Optimizer` instance.\n For example, `tf.keras.optimizers.experimental.Adam(...)` to\n `tf.keras.optimizers.legacy.Adam(...)`.\n\n Args:\n optimizer: An instance of `tf.keras.optimizers.experimental.Optimizer`.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 32,
"vocab_size": 29
} | https://github.com/keras-team/keras.git |
7 | is_symbolic_tensor | def is_symbolic_tensor(tensor):
if isinstance(tensor, tf.Tensor):
return hasattr(tensor, "graph")
elif is_extension_type(tensor):
component_tensors = tf.nest.flatten(tensor, expand_composites=True)
return any(hasattr(t, "graph") for t in component_tensors)
elif isinstance(tensor, tf.Variable):
# Variables that are output of a Keras Layer in Functional API mode
# should be considered symbolic.
# TODO(omalleyt): We need a better way to check this in order to
# enable `run_eagerly=True` for Models containing Layers that
# return Variables as outputs.
return (
getattr(tensor, "_keras_history", False)
or not tf.executing_eagerly()
)
elif isinstance(tensor, tuple(_user_convertible_tensor_types)):
tensor = ops.convert_to_tensor_or_composite(tensor)
return is_symbolic_tensor(tensor)
else:
return False
@keras_export("keras.__internal__.utils.register_symbolic_tensor_type", v1=[]) | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | @keras_export("keras.__internal__.utils.register_symbolic_tensor_type", v1=[]) | 13 | tf_utils.py | 205 | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 81,861 | 1 | 220 | 113 | 68 | 277,089 | 90 | keras | 22 | keras/utils/tf_utils.py | Python | 16 | {
"docstring": "Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.\n\n A Variable can be seen as either: it is considered symbolic\n when we are in a graph scope, and eager when we are in an eager scope.\n\n Args:\n tensor: A tensor instance to test.\n\n Returns:\n True for symbolic tensors, False for eager tensors.\n ",
"language": "en",
"n_whitespaces": 82,
"n_words": 57,
"vocab_size": 41
} | https://github.com/keras-team/keras.git |
1 | sys_info | def sys_info(self) -> GPUInfo:
return GPUInfo(vram=self._vram,
driver=self._driver,
devices=self._device_names,
devices_active=self._active_devices)
| bdbbad4d310fb606b6f412aa81e9f57ccd994e97 | 9 | _base.py | 54 | Refactor lib.gpu_stats (#1218)
* inital gpu_stats refactor
* Add dummy CPU Backend
* Update Sphinx documentation | 19,996 | 0 | 89 | 35 | 9 | 100,532 | 9 | faceswap | 11 | lib/gpu_stats/_base.py | Python | 20 | {
"docstring": " dict: GPU Stats that are required for system information logging.\n\n The dictionary contains the following data:\n\n **vram** (`list`): the total amount of VRAM in Megabytes for each GPU as pertaining to\n :attr:`_handles`\n\n **driver** (`str`): The GPU driver version that is installed on the OS\n\n **devices** (`list`): The device name of each GPU on the system as pertaining\n to :attr:`_handles`\n\n **devices_active** (`list`): The device name of each active GPU on the system as\n pertaining to :attr:`_handles`\n ",
"language": "en",
"n_whitespaces": 167,
"n_words": 75,
"vocab_size": 42
} | https://github.com/deepfakes/faceswap.git |
|
9 | _normalize_feature_columns | def _normalize_feature_columns(feature_columns):
if isinstance(
feature_columns, tf.__internal__.feature_column.FeatureColumn
):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections.abc.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError("Expected feature_columns to be iterable, found dict.")
for column in feature_columns:
if not isinstance(column, tf.__internal__.feature_column.FeatureColumn):
raise ValueError(
"Items of feature_columns must be a FeatureColumn. "
"Given (type {}): {}.".format(type(column), column)
)
if not feature_columns:
raise ValueError("feature_columns must not be empty.")
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError(
"Duplicate feature column name found for columns: {} "
"and {}. This usually means that these columns refer to "
"same base feature. Either one must be discarded or a "
"duplicated but renamed item must be inserted in "
"features dict.".format(column, name_to_column[column.name])
)
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
| 84afc5193d38057e2e2badf9c889ea87d80d8fbf | 15 | base_feature_layer.py | 266 | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 80,944 | 0 | 360 | 160 | 81 | 272,032 | 125 | keras | 21 | keras/feature_column/base_feature_layer.py | Python | 29 | {
"docstring": "Normalizes the `feature_columns` input.\n\n This method converts the `feature_columns` to list type as best as it can. In\n addition, verifies the type and other parts of feature_columns, required by\n downstream library.\n\n Args:\n feature_columns: The raw feature columns, usually passed by users.\n\n Returns:\n The normalized feature column list.\n\n Raises:\n ValueError: for any invalid inputs, such as empty, duplicated names, etc.\n ",
"language": "en",
"n_whitespaces": 95,
"n_words": 59,
"vocab_size": 50
} | https://github.com/keras-team/keras.git |
|
1 | test_loadtxt_converter_with_unicode_dtype | def test_loadtxt_converter_with_unicode_dtype():
txt = StringIO('abc,def\nrst,xyz')
conv = bytes.upper
res = np.loadtxt(txt, dtype=np.dtype("U3"), converters=conv, delimiter=",")
expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])
assert_equal(res, expected)
| 66a61b03658f3c9f312505dcf7eab07e4cf91ac6 | 12 | test_io.py | 118 | Port over tests from npreadtext test suite
- Add test for parsing scientific notation.
- Add multiple-char comment test.
- Port over tests for structured dtypes.
- Add tests for exceptions on skiprows/max_rows.
- port over ndmin tests.
- Make structured data reusable, add unpack tests.
- Port over delimiter tests.
- Port over maxrows test w/ various dtypes.
- Port over test of exception msg on parse failure.
- Port over test for converters w/neg indices.
- Port over usecols tests
- Port over unicode tests.
- Port over more converter tests.
- Port over test for large rows.
- Port over test for string-len discovery.
- Port over float conversion accuracy test.
- Port over bool test.
- Add test for implicit float->int conversion.
- Port over complex parsing tests.
- Port over tests for reading from generator.
- Port over object cleanup test.
- Port over bytes incompat test.
- Port over converters tests.
Co-authored-by: Warren Weckesser <warren.weckesser@gmail.com>
Co-authored-by: Sebastian Berg <sebastian@sipsolutions.net> | 38,421 | 0 | 40 | 67 | 19 | 159,776 | 22 | numpy | 15 | numpy/lib/tests/test_io.py | Python | 6 | {
"docstring": "\n With the default 'bytes' encoding, tokens are encoded prior to being passed\n to the converter. This means that the output of the converter may be bytes\n instead of unicode as expected by `read_rows`.\n\n This test checks that outputs from the above scenario are properly decoded\n prior to parsing by `read_rows`.\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 50,
"vocab_size": 37
} | https://github.com/numpy/numpy.git |
|
1 | required_resources | def required_resources(self) -> Dict[str, float]:
return _sum_bundles(self._bundles)
| 96cceb08e8bf73df990437002e25883c5a72d30c | 8 | placement_groups.py | 33 | [tune] Raise error in PGF if head and worker bundles are empty (#28445)
Scheduling empty placement groups is not supported by Ray core (see e.g. #28443), so we shouldn't allow them to be created in the first place.
If we need fully empty resource requests, we can include this in the upcoming execution/resource refactor.
Signed-off-by: Kai Fricke <kai@anyscale.com> | 28,458 | 0 | 21 | 20 | 7 | 127,516 | 7 | ray | 7 | python/ray/tune/execution/placement_groups.py | Python | 3 | {
"docstring": "Returns a dict containing the sums of all resources",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/ray-project/ray.git |
|
1 | test_supported_features_ignore_cache | async def test_supported_features_ignore_cache(hass, client):
mock_restore_cache(
hass,
[
State(
ENTITY_ID,
STATE_OFF,
attributes={
ATTR_SUPPORTED_FEATURES: SUPPORT_WEBOSTV | SUPPORT_WEBOSTV_VOLUME,
},
)
],
)
await setup_webostv(hass)
supported = (
SUPPORT_WEBOSTV | SUPPORT_WEBOSTV_VOLUME | MediaPlayerEntityFeature.VOLUME_SET
)
attrs = hass.states.get(ENTITY_ID).attributes
assert attrs[ATTR_SUPPORTED_FEATURES] == supported
| 0ac581a0b1fa438a53f048adfab9b787884a63f6 | 14 | test_media_player.py | 107 | Cleanup EntityFeature in tests (#78859) | 106,957 | 0 | 190 | 69 | 30 | 308,196 | 37 | core | 18 | tests/components/webostv/test_media_player.py | Python | 19 | {
"docstring": "Test ignore cached supported features if device is on at startup.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | https://github.com/home-assistant/core.git |
|
6 | search_next | def search_next(self, count=1):
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdutils.CommandError("No search done yet.")
tab.scroller.before_jump_requested.emit()
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_match=tab.search.match,
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
| 265b018c172f8c1f6d9e7f8850256363f0629f82 | 11 | commands.py | 243 | Add a SearchMatch helper class | 117,401 | 0 | 291 | 154 | 48 | 320,860 | 58 | qutebrowser | 30 | qutebrowser/browser/commands.py | Python | 20 | {
"docstring": "Continue the search to the ([count]th) next term.\n\n Args:\n count: How many elements to ignore.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 15,
"vocab_size": 13
} | https://github.com/qutebrowser/qutebrowser.git |
|
1 | test_logentry_change_message_localized_datetime_input | def test_logentry_change_message_localized_datetime_input(self):
post_data = {
"site": self.site.pk,
"title": "Changed",
"hist": "Some content",
"created_0": "12/03/2008",
"created_1": "11:54",
}
with translation.override("fr"):
change_url = reverse(
"admin:admin_utils_article_change", args=[quote(self.a1.pk)]
)
response = self.client.post(change_url, post_data)
self.assertRedirects(
response, reverse("admin:admin_utils_article_changelist")
)
logentry = LogEntry.objects.filter(
content_type__model__iexact="article"
).latest("id")
self.assertEqual(logentry.get_change_message(), "Changed Title and History.")
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 16 | test_logentry.py | 206 | Refs #33476 -- Reformatted code with Black. | 51,961 | 0 | 243 | 113 | 39 | 207,420 | 43 | django | 24 | tests/admin_utils/test_logentry.py | Python | 20 | {
"docstring": "\n Localized date/time inputs shouldn't affect changed form data detection.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | https://github.com/django/django.git |
|
1 | test_find_next_time_expression_microseconds | def test_find_next_time_expression_microseconds():
hour_minute_second = (None, "5", "10")
test_time = datetime(2022, 5, 13, 0, 5, 9, tzinfo=dt_util.UTC)
matching_hours, matching_minutes, matching_seconds = _get_matches(
*hour_minute_second
)
next_time = dt_util.find_next_time_expression_time(
test_time, matching_seconds, matching_minutes, matching_hours
)
assert next_time == datetime(2022, 5, 13, 0, 5, 10, tzinfo=dt_util.UTC)
next_time_last_microsecond_plus_one = next_time.replace(
microsecond=999999
) + timedelta(seconds=1)
time_after = dt_util.find_next_time_expression_time(
next_time_last_microsecond_plus_one,
matching_seconds,
matching_minutes,
matching_hours,
)
assert time_after == datetime(2022, 5, 13, 1, 5, 10, tzinfo=dt_util.UTC)
| 4e9bc9eaffd464f192d187a01771a86699b2f932 | 10 | test_dt.py | 197 | Small cleanups to find_next_time_expression and addition of tests (#71845) | 99,553 | 0 | 154 | 139 | 36 | 300,693 | 66 | core | 19 | tests/util/test_dt.py | Python | 20 | {
"docstring": "Test finding next time expression with microsecond clock drift.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | https://github.com/home-assistant/core.git |
|
2 | test_float32_float64_equivalence | def test_float32_float64_equivalence(is_sparse):
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if is_sparse:
X[X < 0.8] = 0
X = sp.csr_matrix(X)
km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X)
km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32))
assert_allclose(km32.cluster_centers_, km64.cluster_centers_)
assert_array_equal(km32.labels_, km64.labels_)
| 0822851f5cb17827939a7d7b4f8c84f43184ae89 | 11 | test_bisect_k_means.py | 167 | FEA Bisecting K-Means (#20031)
Co-authored-by: Gael Varoquaux <gael.varoquaux@normalesup.org>
Co-authored-by: Tom Dupré la Tour <tom.dupre-la-tour@m4x.org>
Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com> | 75,910 | 0 | 69 | 108 | 24 | 259,765 | 31 | scikit-learn | 22 | sklearn/cluster/tests/test_bisect_k_means.py | Python | 10 | {
"docstring": "Check that the results are the same between float32 and float64.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | https://github.com/scikit-learn/scikit-learn.git |
|
1 | test_all_day_reader_access | async def test_all_day_reader_access(hass, mock_events_list_items, component_setup):
week_from_today = dt_util.now().date() + datetime.timedelta(days=7)
end_event = week_from_today + datetime.timedelta(days=1)
event = {
**TEST_EVENT,
"start": {"date": week_from_today.isoformat()},
"end": {"date": end_event.isoformat()},
}
mock_events_list_items([event])
assert await component_setup()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": True,
"offset_reached": False,
"start_time": week_from_today.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
@pytest.mark.parametrize("calendar_access_role", ["reader", "freeBusyReader"]) | 5d1ca73a3491f0abf5925e01465c4525a49dafef | @pytest.mark.parametrize("calendar_access_role", ["reader", "freeBusyReader"]) | 12 | test_calendar.py | 312 | Add create and delete for Google Calendar events (#83034)
* Add Google Calendar create/delete support
Includes editing for recurring events
* Fix default calendar access role
* Formatting improvements
* Address other details that have changed due to local sync
* Update tests/components/google/test_calendar.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update tests/components/google/test_calendar.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Update tests/components/google/test_calendar.py
Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
* Increase test coverage
Co-authored-by: Martin Hjelmare <marhje52@gmail.com> | 95,868 | 1 | 177 | 167 | 52 | 296,896 | 65 | core | 29 | tests/components/google/test_calendar.py | Python | 23 | {
"docstring": "Test that reader / freebusy reader access can load properly.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | https://github.com/home-assistant/core.git |
4 | _set_resize_callback | def _set_resize_callback(self):
if self._full_size:
logger.debug("Setting resize callback for actual size display")
for fig, size in self._images.values():
self._resize_ids.append((fig, fig.canvas.mpl_connect("resize_event",
self._on_resize)))
fig.set_size_inches(size)
else:
logger.debug("Removing resize callback for screen-fit display")
for fig, cid in self._resize_ids:
fig.canvas.mpl_disconnect(cid)
self._resize_ids = []
| 7b9fc0454d982a2425ec44e90e5b05a87d149953 | 15 | train.py | 151 | Live Preview - Replace cv2 with matplotlib viewer | 20,476 | 0 | 225 | 90 | 27 | 101,037 | 36 | faceswap | 17 | scripts/train.py | Python | 12 | {
"docstring": " Sets the resize callback if displaying preview at actual size or removes it if\n displaying at screen-fit size. ",
"language": "en",
"n_whitespaces": 26,
"n_words": 18,
"vocab_size": 15
} | https://github.com/deepfakes/faceswap.git |
|
2 | block | def block(self, *, extra = None):
self.write(":")
if extra:
self.write(extra)
self._indent += 1
yield
self._indent -= 1
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 9 | ast.py | 67 | add python 3.10.4 for windows | 55,949 | 0 | 70 | 38 | 15 | 220,242 | 17 | XX-Net | 5 | python3.10.4/Lib/ast.py | Python | 7 | {
"docstring": "A context manager for preparing the source for blocks. It adds\n the character':', increases the indentation on enter and decreases\n the indentation on exit. If *extra* is given, it will be directly\n appended after the colon character.\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 37,
"vocab_size": 30
} | https://github.com/XX-net/XX-Net.git |
|
2 | get_avail_mem_per_ray_worker_node | def get_avail_mem_per_ray_worker_node(spark, object_store_memory_per_node):
num_cpus_per_spark_task = int(
spark.sparkContext.getConf().get("spark.task.cpus", "1")
)
| e76ccee69aaa7583be1a9d81cf7b2aa72cf25647 | 13 | utils.py | 49 | Ray on spark implementation (#28771)
REP: ray-project/enhancements#14 | 31,216 | 0 | 25 | 83 | 9 | 137,681 | 9 | ray | 8 | python/ray/util/spark/utils.py | Python | 20 | {
"docstring": "\n Return the available heap memory and object store memory for each ray worker.\n NB: We have one ray node per spark task.\n ",
"language": "en",
"n_whitespaces": 32,
"n_words": 22,
"vocab_size": 20
} | https://github.com/ray-project/ray.git |
|
2 | get_total_accepted_amount | def get_total_accepted_amount(scorecard):
supplier = frappe.get_doc("Supplier", scorecard.supplier)
# Look up all PO Items with delivery dates between our dates
data = frappe.db.sql(
,
{"supplier": supplier.name, "start_date": scorecard.start_date, "end_date": scorecard.end_date},
as_dict=0,
)[0][0]
if not data:
data = 0
return data
| 494bd9ef78313436f0424b918f200dab8fc7c20b | 13 | supplier_scorecard_variable.py | 114 | style: format code with black | 13,930 | 0 | 27 | 68 | 33 | 65,553 | 38 | erpnext | 12 | erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py | Python | 20 | {
"docstring": "Gets the total amount (in company currency) accepted in the period (based on Purchase Receipts)\n\t\t\tSELECT\n\t\t\t\tSUM(pr_item.qty * pr_item.base_rate)\n\t\t\tFROM\n\t\t\t\t`tabPurchase Receipt Item` pr_item,\n\t\t\t\t`tabPurchase Receipt` pr\n\t\t\tWHERE\n\t\t\t\tpr.supplier = %(supplier)s\n\t\t\t\tAND pr.posting_date BETWEEN %(start_date)s AND %(end_date)s\n\t\t\t\tAND pr_item.docstatus = 1\n\t\t\t\tAND pr_item.parent = pr.name",
"language": "en",
"n_whitespaces": 34,
"n_words": 45,
"vocab_size": 38
} | https://github.com/frappe/erpnext.git |
|
4 | icon | def icon(name=None, classname=None, title=None, wrapped=False, class_name=None):
if not name:
raise ValueError("You must supply an icon name")
return {
"name": name,
# supporting class_name for backwards compatibility
"classname": classname or class_name or "icon",
"title": title,
"wrapped": wrapped,
}
@register.filter() | 3d484e133dbf59ebc36da9a40172a454315b95b7 | @register.filter() | 10 | wagtailadmin_tags.py | 106 | Update icon template to allow `classname`
- Preserve the existing `class_name` behaviour in most other cases
- Update only docs reference to use `classname`
- Relates to #6107 & #6028 | 17,004 | 1 | 91 | 56 | 36 | 80,081 | 38 | wagtail | 9 | wagtail/admin/templatetags/wagtailadmin_tags.py | Python | 9 | {
"docstring": "\n Abstracts away the actual icon implementation.\n\n Usage:\n {% load wagtailadmin_tags %}\n ...\n {% icon name=\"cogs\" classname=\"icon--red\" title=\"Settings\" %}\n\n :param name: the icon name/id, required (string)\n :param classname: defaults to 'icon' if not provided (string)\n :param title: accessible label intended for screen readers (string)\n :return: Rendered template snippet (string)\n ",
"language": "en",
"n_whitespaces": 91,
"n_words": 48,
"vocab_size": 38
} | https://github.com/wagtail/wagtail.git |
1 | test_querysets | def test_querysets(self):
self.assertQuerysetEqual(
Employee.objects.filter(pk=123),
[
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
Employee.objects.filter(employee_code=123),
[
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
Employee.objects.filter(pk__in=[123, 456]),
[
"Fran Bones",
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
Employee.objects.all(),
[
"Fran Bones",
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
Business.objects.filter(name="Sears"), ["Sears"], lambda b: b.name
)
self.assertQuerysetEqual(
Business.objects.filter(pk="Sears"),
[
"Sears",
],
lambda b: b.name,
)
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 11 | tests.py | 250 | Refs #33476 -- Reformatted code with Black. | 50,144 | 0 | 482 | 157 | 24 | 202,523 | 55 | django | 14 | tests/custom_pk/tests.py | Python | 41 | {
"docstring": "\n Both pk and custom attribute_name can be used in filter and friends\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | https://github.com/django/django.git |
|
4 | _set_session_summary | def _set_session_summary(self, message):
if self._thread is None:
logger.debug("Setting session summary. (message: '%s')", message)
self._thread = LongRunningTask(target=self._summarise_data,
args=(Session, ),
widget=self)
self._thread.start()
self.after(1000, lambda msg=message: self._set_session_summary(msg))
elif not self._thread.complete.is_set():
logger.debug("Data not yet available")
self.after(1000, lambda msg=message: self._set_session_summary(msg))
else:
logger.debug("Retrieving data from thread")
result = self._thread.get_result()
if result is None:
logger.debug("No result from session summary. Clearing analysis view")
self._clear_session()
return
self._summary = result
self._thread = None
self.set_info(f"Session: {message}")
self._stats.tree_insert_data(self._summary)
| adb5975c94f0fb10296ef7f0c8d087d03a436e3c | 13 | display_analysis.py | 283 | Graph popup - Always open in same position | 19,968 | 0 | 366 | 168 | 47 | 100,497 | 66 | faceswap | 24 | lib/gui/display_analysis.py | Python | 22 | {
"docstring": " Set the summary data and info message.\n\n Parameters\n ----------\n message: str\n The information message to set\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 16,
"vocab_size": 16
} | https://github.com/deepfakes/faceswap.git |
|
4 | slice_filter | def slice_filter(value, arg):
try:
bits = []
for x in str(arg).split(":"):
if not x:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True) | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | @register.filter(is_safe=True, needs_autoescape=True) | 16 | defaultfilters.py | 132 | Refs #33476 -- Reformatted code with Black. | 51,440 | 1 | 118 | 66 | 28 | 206,249 | 29 | django | 16 | django/template/defaultfilters.py | Python | 11 | {
"docstring": "\n Return a slice of the list using the same syntax as Python's list slicing.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 14,
"vocab_size": 12
} | https://github.com/django/django.git |
1 | clear | def clear(self) -> AwaitRemove:
await_remove = self.query("ListView > ListItem").remove()
self.index = None
return await_remove
| 853d05631d69044c17dfbc568bb887128d704a1a | 11 | _list_view.py | 49 | PR feedback | 45,298 | 0 | 42 | 27 | 12 | 186,022 | 14 | textual | 7 | src/textual/widgets/_list_view.py | Python | 10 | {
"docstring": "Clear all items from the ListView.\n\n Returns:\n AwaitRemove: An awaitable that yields control to the event loop until\n the DOM has been updated to reflect all children being removed.\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 29,
"vocab_size": 25
} | https://github.com/Textualize/textual.git |
|
8 | _get_images | def _get_images(self):
logger.debug("Getting image paths")
images = {}
for side in ("a", "b"):
image_dir = getattr(self._args, f"input_{side}")
if not os.path.isdir(image_dir):
logger.error("Error: '%s' does not exist", image_dir)
sys.exit(1)
images[side] = get_image_paths(image_dir, ".png")
if not images[side]:
logger.error("Error: '%s' contains no images", image_dir)
sys.exit(1)
# Validate the first image is a detected face
test_image = next(img for img in images[side])
meta = read_image_meta(test_image)
logger.debug("Test file: (filename: %s, metadata: %s)", test_image, meta)
if "itxt" not in meta or "alignments" not in meta["itxt"]:
logger.error("The input folder '%s' contains images that are not extracted faces.",
image_dir)
logger.error("You can only train a model on faces generated from Faceswap's "
"extract process. Please check your sources and try again.")
sys.exit(1)
logger.info("Model %s Directory: '%s' (%s images)",
side.upper(), image_dir, len(images[side]))
logger.debug("Got image paths: %s", [(key, str(len(val)) + " images")
for key, val in images.items()])
self._validate_image_counts(images)
return images
| 0f7ee1603f093e70496da1585f137f268c0c5f87 | 14 | train.py | 375 | training - Enable resize in popup preview image | 20,070 | 0 | 525 | 219 | 105 | 100,607 | 138 | faceswap | 29 | scripts/train.py | Python | 27 | {
"docstring": " Check the image folders exist and contains valid extracted faces. Obtain image paths.\n\n Returns\n -------\n dict\n The image paths for each side. The key is the side, the value is the list of paths\n for that side.\n ",
"language": "en",
"n_whitespaces": 88,
"n_words": 37,
"vocab_size": 27
} | https://github.com/deepfakes/faceswap.git |