title
stringlengths 2
169
| diff
stringlengths 235
19.5k
| body
stringlengths 0
30.5k
| url
stringlengths 48
84
| created_at
stringlengths 20
20
| closed_at
stringlengths 20
20
| merged_at
stringlengths 20
20
| updated_at
stringlengths 20
20
| diff_len
float64 101
3.99k
| repo_name
stringclasses 83
values | __index_level_0__
int64 15
52.7k
|
---|---|---|---|---|---|---|---|---|---|---|
Import latest upstream augeas lens | diff --git a/letsencrypt-apache/letsencrypt_apache/augeas_lens/httpd.aug b/letsencrypt-apache/letsencrypt_apache/augeas_lens/httpd.aug
index 0f2cb7b4551..edaca3fef1b 100644
--- a/letsencrypt-apache/letsencrypt_apache/augeas_lens/httpd.aug
+++ b/letsencrypt-apache/letsencrypt_apache/augeas_lens/httpd.aug
@@ -106,11 +106,17 @@ let section (body:lens) =
let inner = (sep_spc . argv arg_sec)? . sep_osp .
dels ">" . opt_eol . ((body|comment) . (body|empty|comment)*)? .
indent . dels "</" in
- let kword = key word in
- let dword = del word "a" in
+ let kword = key (word - /perl/i) in
+ let dword = del (word - /perl/i) "a" in
[ indent . dels "<" . square kword inner dword . del />[ \t\n\r]*/ ">\n" ]
+let perl_section = [ indent . label "Perl" . del /<perl>/i "<Perl>"
+ . store /[^<]*/
+ . del /<\/perl>/i "</Perl>" . eol ]
+
+
let rec content = section (content|directive)
+ | perl_section
let lns = (content|directive|comment|empty)*
@@ -121,6 +127,7 @@ let filter = (incl "/etc/apache2/apache2.conf") .
(incl "/etc/apache2/conf-available/*.conf") .
(incl "/etc/apache2/mods-available/*") .
(incl "/etc/apache2/sites-available/*") .
+ (incl "/etc/apache2/vhosts.d/*.conf") .
(incl "/etc/httpd/conf.d/*.conf") .
(incl "/etc/httpd/httpd.conf") .
(incl "/etc/httpd/conf/httpd.conf") .
| - Handles perl sripts embedded in Apache conf files
- Fixes: #2079
| https://api.github.com/repos/certbot/certbot/pulls/2219 | 2016-01-18T19:45:26Z | 2016-01-19T21:36:20Z | 2016-01-19T21:36:20Z | 2016-05-06T19:21:56Z | 457 | certbot/certbot | 2,596 |
Create 2to00000010.py | diff --git a/Decimal to binary/2to00000010.py b/Decimal to binary/2to00000010.py
new file mode 100644
index 0000000000..ed7443cbad
--- /dev/null
+++ b/Decimal to binary/2to00000010.py
@@ -0,0 +1,30 @@
+# this uses GPL V3 LICENSE
+# code by @JymPatel
+
+import sys
+
+binary = '$' # just starting var
+n = 15 # can get 2**16 numbers
+
+# get integer as output which is less than limit 2**16
+try:
+ input = int(input("What is your Decimal Number?"))
+ limit = 2**(n + 1)
+ input <= limit
+except ValueError:
+ print("Please put integer in input! & less than", limit)
+ sys.exit()
+
+# main algorithm
+while n >= 0:
+ if input < 2**n:
+ binary = binary + '0'
+ else:
+ binary = binary + '1'
+ input = input - 2**n
+ n = n - 1
+
+print(binary)
+
+# get it at https://github.com/JymPatel/Python3-FirstEdition
+print("get it at https://github.com/JymPatel/Python3-FirstEdition")
| Can convert decimal number to binary | https://api.github.com/repos/geekcomputers/Python/pulls/1437 | 2021-11-22T12:03:20Z | 2021-11-24T17:18:46Z | 2021-11-24T17:18:46Z | 2021-11-24T17:18:46Z | 311 | geekcomputers/Python | 31,422 |
Deprecate all tls-sni related objects in acme module | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9761ad3e98a..2cc823d1c41 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,8 @@ Certbot adheres to [Semantic Versioning](https://semver.org/).
### Changed
* Support for TLS-SNI-01 has been removed from all official Certbot plugins.
+* Attributes related to the TLS-SNI-01 challenge in `acme.challenges` and `acme.standalone`
+ modules are deprecated and will be removed soon.
* CLI flags `--tls-sni-01-port` and `--tls-sni-01-address` are now no-op, will
generate a deprecation warning if used, and will be removed soon.
* Options `tls-sni` and `tls-sni-01` in `--preferred-challenges` flag are now no-op,
diff --git a/acme/acme/__init__.py b/acme/acme/__init__.py
index d91072a3b12..20c008d6433 100644
--- a/acme/acme/__init__.py
+++ b/acme/acme/__init__.py
@@ -6,6 +6,7 @@
"""
import sys
+import warnings
# This code exists to keep backwards compatibility with people using acme.jose
# before it became the standalone josepy package.
@@ -20,3 +21,30 @@
# preserved (acme.jose.* is josepy.*)
if mod == 'josepy' or mod.startswith('josepy.'):
sys.modules['acme.' + mod.replace('josepy', 'jose', 1)] = sys.modules[mod]
+
+
+# This class takes a similar approach to the cryptography project to deprecate attributes
+# in public modules. See the _ModuleWithDeprecation class here:
+# https://github.com/pyca/cryptography/blob/91105952739442a74582d3e62b3d2111365b0dc7/src/cryptography/utils.py#L129
+class _TLSSNI01DeprecationModule(object):
+ """
+ Internal class delegating to a module, and displaying warnings when
+ attributes related to TLS-SNI-01 are accessed.
+ """
+ def __init__(self, module):
+ self.__dict__['_module'] = module
+
+ def __getattr__(self, attr):
+ if 'TLSSNI01' in attr:
+ warnings.warn('{0} attribute is deprecated, and will be removed soon.'.format(attr),
+ DeprecationWarning, stacklevel=2)
+ return getattr(self._module, attr)
+
+ def __setattr__(self, attr, value): # pragma: no cover
+ setattr(self._module, attr, value)
+
+ def __delattr__(self, attr): # pragma: no cover
+ delattr(self._module, attr)
+
+ def __dir__(self): # pragma: no cover
+ return ['_module'] + dir(self._module)
diff --git a/acme/acme/challenges.py b/acme/acme/challenges.py
index 6f2b3757b76..36e7ab41ca8 100644
--- a/acme/acme/challenges.py
+++ b/acme/acme/challenges.py
@@ -4,7 +4,7 @@
import hashlib
import logging
import socket
-import warnings
+import sys
from cryptography.hazmat.primitives import hashes # type: ignore
import josepy as jose
@@ -15,6 +15,7 @@
from acme import errors
from acme import crypto_util
from acme import fields
+from acme import _TLSSNI01DeprecationModule
logger = logging.getLogger(__name__)
@@ -515,8 +516,6 @@ class TLSSNI01(KeyAuthorizationChallenge):
#n = jose.Field("n", encoder=int, decoder=int)
def __init__(self, *args, **kwargs):
- warnings.warn("TLS-SNI-01 is deprecated, and will stop working soon.",
- DeprecationWarning, stacklevel=2)
super(TLSSNI01, self).__init__(*args, **kwargs)
def validation(self, account_key, **kwargs):
@@ -641,3 +640,7 @@ def check_validation(self, chall, account_public_key):
"""
return chall.check_validation(self.validation, account_public_key)
+
+
+# Patching ourselves to warn about TLS-SNI challenge deprecation and removal.
+sys.modules[__name__] = _TLSSNI01DeprecationModule(sys.modules[__name__])
diff --git a/acme/acme/challenges_test.py b/acme/acme/challenges_test.py
index 4b905c1e5de..edfaa34237d 100644
--- a/acme/acme/challenges_test.py
+++ b/acme/acme/challenges_test.py
@@ -1,6 +1,5 @@
"""Tests for acme.challenges."""
import unittest
-import warnings
import josepy as jose
import mock
@@ -374,25 +373,16 @@ def setUp(self):
'type': 'tls-sni-01',
'token': 'a82d5ff8ef740d12881f6d3c2277ab2e',
}
-
- def _msg(self):
from acme.challenges import TLSSNI01
- with warnings.catch_warnings(record=True) as warn:
- warnings.simplefilter("always")
- msg = TLSSNI01(
- token=jose.b64decode('a82d5ff8ef740d12881f6d3c2277ab2e'))
- assert warn is not None # using a raw assert for mypy
- self.assertTrue(len(warn) == 1)
- self.assertTrue(issubclass(warn[-1].category, DeprecationWarning))
- self.assertTrue('deprecated' in str(warn[-1].message))
- return msg
+ self.msg = TLSSNI01(
+ token=jose.b64decode('a82d5ff8ef740d12881f6d3c2277ab2e'))
def test_to_partial_json(self):
- self.assertEqual(self.jmsg, self._msg().to_partial_json())
+ self.assertEqual(self.jmsg, self.msg.to_partial_json())
def test_from_json(self):
from acme.challenges import TLSSNI01
- self.assertEqual(self._msg(), TLSSNI01.from_json(self.jmsg))
+ self.assertEqual(self.msg, TLSSNI01.from_json(self.jmsg))
def test_from_json_hashable(self):
from acme.challenges import TLSSNI01
@@ -407,10 +397,18 @@ def test_from_json_invalid_token_length(self):
@mock.patch('acme.challenges.TLSSNI01Response.gen_cert')
def test_validation(self, mock_gen_cert):
mock_gen_cert.return_value = ('cert', 'key')
- self.assertEqual(('cert', 'key'), self._msg().validation(
+ self.assertEqual(('cert', 'key'), self.msg.validation(
KEY, cert_key=mock.sentinel.cert_key))
mock_gen_cert.assert_called_once_with(key=mock.sentinel.cert_key)
+ def test_deprecation_message(self):
+ with mock.patch('acme.warnings.warn') as mock_warn:
+ from acme.challenges import TLSSNI01
+ assert TLSSNI01
+ self.assertEqual(mock_warn.call_count, 1)
+ self.assertTrue('deprecated' in mock_warn.call_args[0][0])
+
+
class TLSALPN01ResponseTest(unittest.TestCase):
# pylint: disable=too-many-instance-attributes
diff --git a/acme/acme/crypto_util.py b/acme/acme/crypto_util.py
index c88cab9431d..c47e88e73ae 100644
--- a/acme/acme/crypto_util.py
+++ b/acme/acme/crypto_util.py
@@ -18,17 +18,14 @@
logger = logging.getLogger(__name__)
-# TLSSNI01 certificate serving and probing is not affected by SSL
-# vulnerabilities: prober needs to check certificate for expected
-# contents anyway. Working SNI is the only thing that's necessary for
-# the challenge and thus scoping down SSL/TLS method (version) would
-# cause interoperability issues: TLSv1_METHOD is only compatible with
+# Default SSL method selected here is the most compatible, while secure
+# SSL method: TLSv1_METHOD is only compatible with
# TLSv1_METHOD, while SSLv23_METHOD is compatible with all other
# methods, including TLSv2_METHOD (read more at
# https://www.openssl.org/docs/ssl/SSLv23_method.html). _serve_sni
# should be changed to use "set_options" to disable SSLv2 and SSLv3,
# in case it's used for things other than probing/serving!
-_DEFAULT_TLSSNI01_SSL_METHOD = SSL.SSLv23_METHOD # type: ignore
+_DEFAULT_SSL_METHOD = SSL.SSLv23_METHOD # type: ignore
class SSLSocket(object): # pylint: disable=too-few-public-methods
@@ -40,7 +37,7 @@ class SSLSocket(object): # pylint: disable=too-few-public-methods
:ivar method: See `OpenSSL.SSL.Context` for allowed values.
"""
- def __init__(self, sock, certs, method=_DEFAULT_TLSSNI01_SSL_METHOD):
+ def __init__(self, sock, certs, method=_DEFAULT_SSL_METHOD):
self.sock = sock
self.certs = certs
self.method = method
@@ -112,7 +109,7 @@ def accept(self): # pylint: disable=missing-docstring
def probe_sni(name, host, port=443, timeout=300,
- method=_DEFAULT_TLSSNI01_SSL_METHOD, source_address=('', 0)):
+ method=_DEFAULT_SSL_METHOD, source_address=('', 0)):
"""Probe SNI server for SSL certificate.
:param bytes name: Byte string to send as the server name in the
diff --git a/acme/acme/messages.py b/acme/acme/messages.py
index 7c82c85071a..d8684603c8f 100644
--- a/acme/acme/messages.py
+++ b/acme/acme/messages.py
@@ -3,7 +3,7 @@
import json
try:
from collections.abc import Hashable # pylint: disable=no-name-in-module
-except ImportError:
+except ImportError: # pragma: no cover
from collections import Hashable
import josepy as jose
diff --git a/acme/acme/standalone.py b/acme/acme/standalone.py
index ff91599335e..c82967897b9 100644
--- a/acme/acme/standalone.py
+++ b/acme/acme/standalone.py
@@ -17,6 +17,7 @@
from acme import challenges
from acme import crypto_util
from acme.magic_typing import List # pylint: disable=unused-import, no-name-in-module
+from acme import _TLSSNI01DeprecationModule
logger = logging.getLogger(__name__)
@@ -37,7 +38,7 @@ def __init__(self, *args, **kwargs):
self.certs = kwargs.pop("certs", {})
self.method = kwargs.pop(
# pylint: disable=protected-access
- "method", crypto_util._DEFAULT_TLSSNI01_SSL_METHOD)
+ "method", crypto_util._DEFAULT_SSL_METHOD)
self.allow_reuse_address = kwargs.pop("allow_reuse_address", True)
socketserver.TCPServer.__init__(self, *args, **kwargs)
@@ -296,5 +297,9 @@ def simple_tls_sni_01_server(cli_args, forever=True):
server.handle_request()
+# Patching ourselves to warn about TLS-SNI challenge deprecation and removal.
+sys.modules[__name__] = _TLSSNI01DeprecationModule(sys.modules[__name__])
+
+
if __name__ == "__main__":
sys.exit(simple_tls_sni_01_server(sys.argv)) # pragma: no cover
diff --git a/acme/acme/standalone_test.py b/acme/acme/standalone_test.py
index ee527782ab6..953df40d411 100644
--- a/acme/acme/standalone_test.py
+++ b/acme/acme/standalone_test.py
@@ -1,13 +1,15 @@
"""Tests for acme.standalone."""
+import multiprocessing
import os
import shutil
import socket
import threading
import tempfile
import unittest
+import time
+from contextlib import closing
from six.moves import http_client # pylint: disable=import-error
-from six.moves import queue # pylint: disable=import-error
from six.moves import socketserver # type: ignore # pylint: disable=import-error
import josepy as jose
@@ -16,6 +18,7 @@
from acme import challenges
from acme import crypto_util
+from acme import errors
from acme import test_util
from acme.magic_typing import Set # pylint: disable=unused-import, no-name-in-module
@@ -248,7 +251,6 @@ def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
-@test_util.broken_on_windows
class TestSimpleTLSSNI01Server(unittest.TestCase):
"""Tests for acme.standalone.simple_tls_sni_01_server."""
@@ -263,35 +265,41 @@ def setUp(self):
shutil.copy(test_util.vector_path('rsa2048_key.pem'),
os.path.join(localhost_dir, 'key.pem'))
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+ sock.bind(('', 0))
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self.port = sock.getsockname()[1]
+
from acme.standalone import simple_tls_sni_01_server
- self.thread = threading.Thread(
- target=simple_tls_sni_01_server, kwargs={
- 'cli_args': ('filename',),
- 'forever': False,
- },
- )
+ self.process = multiprocessing.Process(target=simple_tls_sni_01_server,
+ args=(['path', '-p', str(self.port)],))
self.old_cwd = os.getcwd()
os.chdir(self.test_cwd)
def tearDown(self):
os.chdir(self.old_cwd)
- self.thread.join()
+ if self.process.is_alive():
+ self.process.terminate()
shutil.rmtree(self.test_cwd)
- @mock.patch('acme.standalone.logger')
- def test_it(self, mock_logger):
- # Use a Queue because mock objects aren't thread safe.
- q = queue.Queue() # type: queue.Queue[int]
- # Add port number to the queue.
- mock_logger.info.side_effect = lambda *args: q.put(args[-1])
- self.thread.start()
-
- # After the timeout, an exception is raised if the queue is empty.
- port = q.get(timeout=5)
- cert = crypto_util.probe_sni(b'localhost', b'0.0.0.0', port)
+ @mock.patch('acme.standalone.TLSSNI01Server.handle_request')
+ def test_mock(self, handle):
+ from acme.standalone import simple_tls_sni_01_server
+ simple_tls_sni_01_server(cli_args=['path', '-p', str(self.port)], forever=False)
+ self.assertEqual(handle.call_count, 1)
+
+ def test_live(self):
+ self.process.start()
+ cert = None
+ for _ in range(50):
+ time.sleep(0.1)
+ try:
+ cert = crypto_util.probe_sni(b'localhost', b'127.0.0.1', self.port)
+ break
+ except errors.Error: # pragma: no cover
+ pass
self.assertEqual(jose.ComparableX509(cert),
- test_util.load_comparable_cert(
- 'rsa2048_cert.pem'))
+ test_util.load_comparable_cert('rsa2048_cert.pem'))
if __name__ == "__main__":
diff --git a/acme/acme/test_util.py b/acme/acme/test_util.py
index f9761470025..1a0b6705604 100644
--- a/acme/acme/test_util.py
+++ b/acme/acme/test_util.py
@@ -4,7 +4,6 @@
"""
import os
-import sys
import pkg_resources
import unittest
@@ -95,11 +94,3 @@ def skip_unless(condition, reason): # pragma: no cover
return lambda cls: cls
else:
return lambda cls: None
-
-def broken_on_windows(function):
- """Decorator to skip temporarily a broken test on Windows."""
- reason = 'Test is broken and ignored on windows but should be fixed.'
- return unittest.skipIf(
- sys.platform == 'win32'
- and os.environ.get('SKIP_BROKEN_TESTS_ON_WINDOWS', 'true') == 'true',
- reason)(function)
diff --git a/pytest.ini b/pytest.ini
index 49db7da09b8..2531e50d276 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -13,6 +13,6 @@
filterwarnings =
error
ignore:decodestring:DeprecationWarning
- ignore:TLS-SNI-01:DeprecationWarning
+ ignore:(TLSSNI01|TLS-SNI-01):DeprecationWarning
ignore:.*collections\.abc:DeprecationWarning
ignore:The `color_scheme` argument is deprecated:DeprecationWarning:IPython.*
| This PR is a part of the tls-sni-01 removal plan described in #6849.
As `acme` is a library, we need to put some efforts to make a decent deprecation path before totally removing tls-sni in it. While initialization of `acme.challenges.TLSSNI01` was already creating deprecation warning, not all cases were covered.
For instance, and innocent call like this ...
```python
if not isinstance(challenge, acme.challenges.TLSSNI01):
print('I am not using this TLS-SNI deprecated stuff, what could possibly go wrong?')
```
... would break if we suddenly remove all objects related to this challenge.
So, I use the _Deprecator Warning Machine, Let's Pacify this Technical Debt_ (Guido ยฎ), to make `acme.challenges` and `acme.standalone` patch themselves, and display a deprecation warning on stderr for any access to the tls-sni challenge objects.
No dev should be able to avoid the deprecation warning. I set the deprecation warning in the idea to remove the code on `0.34.0`, but the exact deprecation window is open to discussion of course. | https://api.github.com/repos/certbot/certbot/pulls/6859 | 2019-03-14T17:06:19Z | 2019-03-27T01:26:38Z | 2019-03-27T01:26:38Z | 2019-05-02T09:43:43Z | 3,964 | certbot/certbot | 2,782 |
[AIRFLOW-5553] MySqlToGoogleCloudStorageOperator: Bytes value check done based on the type of the variable | diff --git a/airflow/operators/mysql_to_gcs.py b/airflow/operators/mysql_to_gcs.py
index fef2c20b05f12..d7c07c81936b0 100644
--- a/airflow/operators/mysql_to_gcs.py
+++ b/airflow/operators/mysql_to_gcs.py
@@ -119,6 +119,6 @@ def convert_type(self, value, schema_type):
return value.total_seconds()
if isinstance(value, Decimal):
return float(value)
- if schema_type == "BYTES":
+ if isinstance(value, bytes) or schema_type == "BYTES":
return base64.standard_b64encode(value).decode('ascii')
return value
| Make sure you have checked _all_ steps below.
### Jira
- [x] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW) issues and references them in the PR title. For example, "\[AIRFLOW-XXX\] My Airflow PR"
- https://issues.apache.org/jira/browse/AIRFLOW-5553
- In case you are fixing a typo in the documentation you can prepend your commit with \[AIRFLOW-XXX\], code changes always need a Jira issue.
- In case you are proposing a fundamental code change, you need to create an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)).
- In case you are adding a dependency, check if the license complies with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
### Description
- [x] Here are some details about my PR, including screenshots of any UI changes:
Changed the condition to decode bytes values so they will now be dealt with without having to specify a BQ schema
### Tests
- [x] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason:
It's a trivial one line change adding a single condition to do the same task that had been done before.
### Commits
- [x] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)":
1. Subject is separated from body by a blank line
1. Subject is limited to 50 characters (not including Jira issue reference)
1. Subject does not end with a period
1. Subject uses the imperative mood ("add", not "adding")
1. Body wraps at 72 characters
1. Body explains "what" and "why", not "how"
### Documentation
- [x] In case of new functionality, my PR adds documentation that describes how to use it.
- All the public functions and the classes in the PR contain docstrings that explain what it does
- If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release
| https://api.github.com/repos/apache/airflow/pulls/6183 | 2019-09-25T18:27:13Z | 2019-09-28T11:24:25Z | 2019-09-28T11:24:25Z | 2019-10-02T16:48:56Z | 153 | apache/airflow | 14,525 |
Changing Environment Docstrings | diff --git a/gym/core.py b/gym/core.py
index 797f9216049..8d957ba6e20 100644
--- a/gym/core.py
+++ b/gym/core.py
@@ -65,7 +65,7 @@ def step(self, action: ActType) -> Tuple[ObsType, float, bool, dict]:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
- info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
+ info (dict): contains auxiliary diagnostic information (helpful for debugging, logging, and sometimes learning)
"""
raise NotImplementedError
@@ -76,11 +76,13 @@ def reset(
"""Resets the environment to an initial state and returns an initial
observation.
- Note that this function should not reset the environment's random
- number generator(s); random variables in the environment's state should
- be sampled independently between multiple calls to `reset()`. In other
- words, each call of `reset()` should yield an environment suitable for
- a new episode, independent of previous episodes.
+ This method should also reset the environment's random number
+ generator(s) if `seed` is an integer or if the environment has not
+ yet initialized a random number generator. If the environment already
+ has a random number generator and `reset` is called with `seed=None`,
+ the RNG should not be reset.
+ Moreover, `reset` should (in the typical use case) be called with an
+ integer seed right after initialization and then never again.
Returns:
observation (object): the initial observation.
@@ -94,8 +96,8 @@ def render(self, mode="human"):
"""Renders the environment.
The set of supported modes varies per environment. (And some
- environments do not support rendering at all.) By convention,
- if mode is:
+ third-party environments may not support rendering at all.)
+ By convention, if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
| I have made some changes to the docstrings of the `Env` methods.
`reset`'s docstring states that
> this function should not reset the environment's random number generator(s);
If I understand correctly, this used to be true before `seed()` was deprecated. In the current code, it seems that `reset()` is supposed to reset the RNG if
- No RNG exists yet
- The keyward argument `seed` is an integer
I have changed `reset()`'s docstring accordingly. Is my interpretation of the code correct?
Moreover, @RedTachyon suggested some changes to the Gym documentation in https://github.com/Farama-Foundation/gym-docs/pull/77 that are also applicable to the docstrings:
- Mention that `info` may contain metrics for logging
- Change "And some environments do not support rendering at all." to "And some third-party environments may not support rendering at all." because all native Gym environments should support rendering in some form.
Formatting of the docstrings seems to be very inconsistent in general, and some docstrings don't list their method's arguments. I have not made any changes in those cases, but this might be an avenue for future improvements. | https://api.github.com/repos/openai/gym/pulls/2611 | 2022-02-12T16:54:41Z | 2022-02-13T00:39:03Z | 2022-02-13T00:39:03Z | 2022-02-13T00:39:03Z | 503 | openai/gym | 5,254 |
TYP ensure bool_t is always used in pandas/core/generic.py | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d927be76843e1..7988012498db7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -206,3 +206,8 @@ repos:
files: ^pandas/core/
exclude: ^pandas/core/api\.py$
types: [python]
+ - id: no-bool-in-core-generic
+ name: Use bool_t instead of bool in pandas/core/generic.py
+ entry: python scripts/no_bool_in_generic.py
+ language: python
+ files: ^pandas/core/generic\.py$
diff --git a/LICENSES/PYUPGRADE_LICENSE b/LICENSES/PYUPGRADE_LICENSE
new file mode 100644
index 0000000000000..522fbe20b8991
--- /dev/null
+++ b/LICENSES/PYUPGRADE_LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Anthony Sottile
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index b3262c61a0597..8e20eeb16c7a8 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -232,7 +232,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin):
def __init__(
self,
data: Manager,
- copy: bool = False,
+ copy: bool_t = False,
attrs: Mapping[Hashable, Any] | None = None,
):
# copy kwarg is retained for mypy compat, is not used
@@ -249,7 +249,7 @@ def __init__(
@classmethod
def _init_mgr(
- cls, mgr, axes, dtype: Dtype | None = None, copy: bool = False
+ cls, mgr, axes, dtype: Dtype | None = None, copy: bool_t = False
) -> Manager:
""" passed a manager and a axes dict """
for a, axe in axes.items():
@@ -377,8 +377,8 @@ def flags(self) -> Flags:
def set_flags(
self: FrameOrSeries,
*,
- copy: bool = False,
- allows_duplicate_labels: bool | None = None,
+ copy: bool_t = False,
+ allows_duplicate_labels: bool_t | None = None,
) -> FrameOrSeries:
"""
Return a new object with updated flags.
@@ -467,7 +467,7 @@ def _data(self):
_stat_axis_name = "index"
_AXIS_ORDERS: list[str]
_AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {0: 0, "index": 0, "rows": 0}
- _AXIS_REVERSED: bool
+ _AXIS_REVERSED: bool_t
_info_axis_number: int
_info_axis_name: str
_AXIS_LEN: int
@@ -494,7 +494,7 @@ def _construct_axes_dict(self, axes=None, **kwargs):
@final
@classmethod
def _construct_axes_from_arguments(
- cls, args, kwargs, require_all: bool = False, sentinel=None
+ cls, args, kwargs, require_all: bool_t = False, sentinel=None
):
"""
Construct and returns axes if supplied in args/kwargs.
@@ -714,11 +714,11 @@ def set_axis(self: FrameOrSeries, labels, *, inplace: Literal[True]) -> None:
@overload
def set_axis(
- self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool = ...
+ self: FrameOrSeries, labels, axis: Axis = ..., inplace: bool_t = ...
) -> FrameOrSeries | None:
...
- def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
+ def set_axis(self, labels, axis: Axis = 0, inplace: bool_t = False):
"""
Assign desired index to given axis.
@@ -749,7 +749,7 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return self._set_axis_nocheck(labels, axis, inplace)
@final
- def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool):
+ def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t):
# NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy.
if inplace:
setattr(self, self._get_axis_name(axis), labels)
@@ -995,8 +995,8 @@ def rename(
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
- copy: bool = True,
- inplace: bool = False,
+ copy: bool_t = True,
+ inplace: bool_t = False,
level: Level | None = None,
errors: str = "ignore",
) -> FrameOrSeries | None:
@@ -1402,13 +1402,13 @@ def _set_axis_name(self, name, axis=0, inplace=False):
# Comparison Methods
@final
- def _indexed_same(self, other) -> bool:
+ def _indexed_same(self, other) -> bool_t:
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
@final
- def equals(self, other: object) -> bool:
+ def equals(self, other: object) -> bool_t:
"""
Test whether two objects contain the same elements.
@@ -5071,7 +5071,7 @@ def filter(
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
- def f(x) -> bool:
+ def f(x) -> bool_t:
assert like is not None # needed for mypy
return like in ensure_str(x)
@@ -5079,7 +5079,7 @@ def f(x) -> bool:
return self.loc(axis=axis)[values]
elif regex:
- def f(x) -> bool:
+ def f(x) -> bool_t:
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
diff --git a/scripts/no_bool_in_generic.py b/scripts/no_bool_in_generic.py
new file mode 100644
index 0000000000000..f80eff56b2729
--- /dev/null
+++ b/scripts/no_bool_in_generic.py
@@ -0,0 +1,92 @@
+"""
+Check that pandas/core/generic.py doesn't use bool as a type annotation.
+
+There is already the method `bool`, so the alias `bool_t` should be used instead.
+
+This is meant to be run as a pre-commit hook - to run it manually, you can do:
+
+ pre-commit run no-bool-in-core-generic --all-files
+
+The function `visit` is adapted from a function by the same name in pyupgrade:
+https://github.com/asottile/pyupgrade/blob/5495a248f2165941c5d3b82ac3226ba7ad1fa59d/pyupgrade/_data.py#L70-L113
+"""
+
+import argparse
+import ast
+import collections
+from typing import (
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+)
+
+
+def visit(tree: ast.Module) -> Dict[int, List[int]]:
+ "Step through tree, recording when nodes are in annotations."
+ in_annotation = False
+ nodes: List[Tuple[bool, ast.AST]] = [(in_annotation, tree)]
+ to_replace = collections.defaultdict(list)
+
+ while nodes:
+ in_annotation, node = nodes.pop()
+
+ if isinstance(node, ast.Name) and in_annotation and node.id == "bool":
+ to_replace[node.lineno].append(node.col_offset)
+
+ for name in reversed(node._fields):
+ value = getattr(node, name)
+ if name in {"annotation", "returns"}:
+ next_in_annotation = True
+ else:
+ next_in_annotation = in_annotation
+ if isinstance(value, ast.AST):
+ nodes.append((next_in_annotation, value))
+ elif isinstance(value, list):
+ for value in reversed(value):
+ if isinstance(value, ast.AST):
+ nodes.append((next_in_annotation, value))
+
+ return to_replace
+
+
+def replace_bool_with_bool_t(to_replace, content: str) -> str:
+ new_lines = []
+
+ for n, line in enumerate(content.splitlines(), start=1):
+ if n in to_replace:
+ for col_offset in reversed(to_replace[n]):
+ line = line[:col_offset] + "bool_t" + line[col_offset + 4 :]
+ new_lines.append(line)
+ return "\n".join(new_lines)
+
+
+def check_for_bool_in_generic(content: str) -> Tuple[bool, str]:
+ tree = ast.parse(content)
+ to_replace = visit(tree)
+
+ if not to_replace:
+ mutated = False
+ return mutated, content
+
+ mutated = True
+ return mutated, replace_bool_with_bool_t(to_replace, content)
+
+
+def main(argv: Optional[Sequence[str]] = None) -> None:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("paths", nargs="*")
+ args = parser.parse_args(argv)
+
+ for path in args.paths:
+ with open(path, encoding="utf-8") as fd:
+ content = fd.read()
+ mutated, new_content = check_for_bool_in_generic(content)
+ if mutated:
+ with open(path, "w", encoding="utf-8") as fd:
+ fd.write(new_content)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/tests/test_no_bool_in_generic.py b/scripts/tests/test_no_bool_in_generic.py
new file mode 100644
index 0000000000000..0bc91c5d1cf1e
--- /dev/null
+++ b/scripts/tests/test_no_bool_in_generic.py
@@ -0,0 +1,20 @@
+from scripts.no_bool_in_generic import check_for_bool_in_generic
+
+BAD_FILE = "def foo(a: bool) -> bool:\n return bool(0)"
+GOOD_FILE = "def foo(a: bool_t) -> bool_t:\n return bool(0)"
+
+
+def test_bad_file_with_replace():
+ content = BAD_FILE
+ mutated, result = check_for_bool_in_generic(content)
+ expected = GOOD_FILE
+ assert result == expected
+ assert mutated
+
+
+def test_good_file_with_replace():
+ content = GOOD_FILE
+ mutated, result = check_for_bool_in_generic(content)
+ expected = content
+ assert result == expected
+ assert not mutated
| - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
Currently, lots of methods use the `bool` annotation in that file. I'm not sure it affects `mypy` (some experiments with `reveal_type` suggest it doesn't) - however, it does confuse VSCode, which takes you to the method `bool` if you hover over one of these annotations.
The file already contains
```python
bool_t = bool # Need alias because NDFrame has def bool:
```
(added in #26024), but it's not always used | https://api.github.com/repos/pandas-dev/pandas/pulls/40175 | 2021-03-02T18:43:20Z | 2021-04-08T14:43:39Z | 2021-04-08T14:43:39Z | 2021-04-08T14:59:56Z | 2,699 | pandas-dev/pandas | 45,080 |
Add documentation about legacy cb-auto files | diff --git a/letsencrypt-auto-source/README.md b/letsencrypt-auto-source/README.md
new file mode 100644
index 00000000000..f37f3dfbd44
--- /dev/null
+++ b/letsencrypt-auto-source/README.md
@@ -0,0 +1,12 @@
+# Legacy letsencrypt-auto files
+
+`certbot-auto` and `letsencrypt-auto` were two names for the same self-updating
+shell script that wrapped Certbot. Old versions of the script continue to rely
+on pulling `letsencrypt-auto` and `letsencrypt-auto.sig` from this directory hosted on Github to download and
+verify updates. We're keeping these files and the tests for them around to
+prevent these old scripts from breaking.
+
+If we need or want to remove these files and tests in the future, we can, but
+before we do, we should write a Let's Encrypt forum post describing the error
+message users will see and how they can work around the problem. See
+https://github.com/certbot/certbot/issues/8812 for more info.
diff --git a/tests/modification-check.py b/tests/modification-check.py
index c1530d1f981..d685225a838 100755
--- a/tests/modification-check.py
+++ b/tests/modification-check.py
@@ -10,12 +10,11 @@
# taken from our v1.14.0 tag which was the last release we intended to make
# changes to certbot-auto.
#
-# Deleting letsencrypt-auto-source/letsencrypt-auto and
-# letsencrypt-auto-source/letsencrypt-auto.sig can be done once we're
-# comfortable breaking any certbot-auto scripts that haven't already updated to
-# the last version. See
-# https://opensource.eff.org/eff-open-source/pl/65geri7c4tr6iqunc1rpb3mpna for
-# more info.
+# We can delete this script and the files under letsencrypt-auto-source when
+# we're comfortable breaking any old certbot-auto scripts that haven't updated
+# to the last version of the script yet. See
+# https://opensource.eff.org/eff-open-source/pl/65geri7c4tr6iqunc1rpb3mpna and
+# letsencrypt-auto-source/README.md for more info.
EXPECTED_FILES = {
os.path.join('letsencrypt-auto-source', 'letsencrypt-auto'):
'b997e3608526650a08e36e682fc3bf0c29903c06fa5ba4cc49308c43832450c2',
| Fixes https://github.com/certbot/certbot/issues/8812 by implementing the plan Erica and I discussed there.
| https://api.github.com/repos/certbot/certbot/pulls/9011 | 2021-08-27T17:43:59Z | 2021-09-09T20:21:47Z | 2021-09-09T20:21:47Z | 2021-09-14T03:33:20Z | 570 | certbot/certbot | 3,029 |
Subaru Legacy 22 fingerprint | diff --git a/selfdrive/car/subaru/values.py b/selfdrive/car/subaru/values.py
index 7d8365a11f3435..d04e5f2cc62322 100644
--- a/selfdrive/car/subaru/values.py
+++ b/selfdrive/car/subaru/values.py
@@ -106,18 +106,23 @@ class SubaruCarInfo(CarInfo):
CAR.LEGACY: {
(Ecu.esp, 0x7b0, None): [
b'\xa1\\ x04\x01',
+ b'\xa1 \x03\x03'
],
(Ecu.eps, 0x746, None): [
b'\x9b\xc0\x11\x00',
+ b'\x9b\xc0\x11\x02'
],
(Ecu.fwdCamera, 0x787, None): [
b'\x00\x00e\x80\x00\x1f@ \x19\x00',
+ b'\x00\x00e\x9a\x00\x00\x00\x00\x00\x00'
],
(Ecu.engine, 0x7e0, None): [
b'\xde\"a0\x07',
+ b'\xe2"aq\x07'
],
(Ecu.transmission, 0x7e1, None): [
b'\xa5\xf6\x05@\x00',
+ b'\xa7\xf6\x04@\x00'
],
},
CAR.IMPREZA: {
| 2022 Subaru Legacy fingerprint
c56e69bbc74b8fad|2022-09-02--19-23-21--0 | https://api.github.com/repos/commaai/openpilot/pulls/25665 | 2022-09-05T03:12:22Z | 2022-09-05T20:24:17Z | 2022-09-05T20:24:17Z | 2022-12-28T19:01:29Z | 336 | commaai/openpilot | 8,938 |
Specify comma containers come from ghcr.io in docker pull commands. | diff --git a/tools/sim/build_container.sh b/tools/sim/build_container.sh
index 7fb94ba855b0ca..8a9d0e0e29e2a3 100755
--- a/tools/sim/build_container.sh
+++ b/tools/sim/build_container.sh
@@ -3,7 +3,7 @@
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR/../../
-docker pull commaai/openpilot-base:latest
+docker pull ghcr.io/commaai/openpilot-base:latest
docker build \
--cache-from commaai/openpilot-sim:latest \
-t commaai/openpilot-sim:latest \
diff --git a/tools/sim/start_openpilot_docker.sh b/tools/sim/start_openpilot_docker.sh
index 97119c36d0a1d6..4a3988394e7f16 100755
--- a/tools/sim/start_openpilot_docker.sh
+++ b/tools/sim/start_openpilot_docker.sh
@@ -3,7 +3,7 @@
# expose X to the container
xhost +local:root
-docker pull commaai/openpilot-sim:latest
+docker pull ghcr.io/commaai/openpilot-sim:latest
docker run --net=host\
--name openpilot_client \
| **Description**
This is a pull request to fix the pull-url of the openpilot docker containers.
I'm not sure what the `build_container.sh` script is used for but maybe the change is correct for it too.
Please correct me if I'm wrong about this being a fix.
**Verification**
I tried starting the simulation environment by following https://github.com/commaai/openpilot#testing-on-pc but I couldn't download the openpilot docker containers. I saw from the `Dockerfile.sim` that they are pulled from ghcr.io instead of the default docker repository, so I tried that and it worked. | https://api.github.com/repos/commaai/openpilot/pulls/20342 | 2021-03-14T13:16:29Z | 2021-03-14T19:59:05Z | 2021-03-14T19:59:05Z | 2021-03-14T19:59:05Z | 303 | commaai/openpilot | 9,767 |
test_models: check consitency between panda and openpilot for velocity | diff --git a/panda b/panda
index bb75afc84e4d33..7e2b312fa86e6c 160000
--- a/panda
+++ b/panda
@@ -1 +1 @@
-Subproject commit bb75afc84e4d33af1554ada81851547db648e33e
+Subproject commit 7e2b312fa86e6cdfeba496f261407cb47cb005d1
diff --git a/selfdrive/car/tests/test_models.py b/selfdrive/car/tests/test_models.py
index aaaa6b9d79589d..3a76eff3ab6f77 100755
--- a/selfdrive/car/tests/test_models.py
+++ b/selfdrive/car/tests/test_models.py
@@ -23,6 +23,7 @@
from openpilot.tools.lib.route import Route, SegmentName, RouteName
from panda.tests.libpanda import libpanda_py
+from panda.tests.safety.common import VEHICLE_SPEED_FACTOR
EventName = car.CarEvent.EventName
PandaType = log.PandaState.PandaType
@@ -345,6 +346,11 @@ def test_panda_safety_carstate(self):
checks['brakePressed'] += brake_pressed != self.safety.get_brake_pressed_prev()
checks['regenBraking'] += CS.regenBraking != self.safety.get_regen_braking_prev()
+ # Verify that panda has the correct velocity for cars that use it (angle based cars)
+ if self.CP.steerControlType in [car.CarParams.SteerControlType.angle] and not self.CP.notCar:
+ panda_velocity = self.safety.get_vehicle_speed_last() / VEHICLE_SPEED_FACTOR
+ checks['vEgo'] += abs(panda_velocity - CS.vEgoRaw) > 0.2
+
if self.CP.pcmCruise:
# On most pcmCruise cars, openpilot's state is always tied to the PCM's cruise state.
# On Honda Nidec, we always engage on the rising edge of the PCM cruise state, but
| good check to have for angle based cars that require it as part of the safety (wanted to check for the new subarus) | https://api.github.com/repos/commaai/openpilot/pulls/30750 | 2023-12-15T21:55:04Z | 2023-12-15T22:50:37Z | 2023-12-15T22:50:37Z | 2023-12-16T01:18:32Z | 457 | commaai/openpilot | 9,287 |
Lower case "console" in import makes example fail | diff --git a/docs/source/syntax.rst b/docs/source/syntax.rst
index 2b0f1425f..536a3de69 100644
--- a/docs/source/syntax.rst
+++ b/docs/source/syntax.rst
@@ -6,7 +6,7 @@ Rich can syntax highlight various programming languages with line numbers.
To syntax highlight code, construct a :class:`~rich.syntax.Syntax` object and print it to the console. Here's an example::
- from rich.console import console
+ from rich.console import Console
from rich.syntax import Syntax
console = Console()
@@ -22,4 +22,4 @@ You can use this class from the command line. Here's how you would syntax highli
For the full list of arguments, run the following::
python -m rich.syntax -h
-
\ No newline at end of file
+
| ## Type of changes
- [ ] Bug fix
- [ ] New feature
- [X] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [ ] I've run the latest [black](https://github.com/ambv/black) with default args on new code.
- [ ] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [ ] I've added tests for new code.
- [X] I accept that @willmcgugan may be pedantic in the code review.
## Description
This is a tiny fix to the code in the docs so the syntax example code works - hopefully you're fine I didn't run black, update CHANGELOG.md etc (given how small change is)
BTW: this library is awesome :sunglasses: - thank you for creating it!
| https://api.github.com/repos/Textualize/rich/pulls/105 | 2020-06-06T00:58:21Z | 2020-06-06T11:18:11Z | 2020-06-06T11:18:11Z | 2020-06-09T01:21:58Z | 195 | Textualize/rich | 48,487 |
get_sans_from_csr using pyOpenSSL | diff --git a/letsencrypt/crypto_util.py b/letsencrypt/crypto_util.py
index f89f281cbc0..94617eef65d 100644
--- a/letsencrypt/crypto_util.py
+++ b/letsencrypt/crypto_util.py
@@ -1,4 +1,4 @@
-"""Let's Encrypt client crypto utility functions
+"""Let's Encrypt client crypto utility functions.
.. todo:: Make the transition to use PSS rather than PKCS1_v1_5 when the server
is capable of handling the signatures.
@@ -13,6 +13,7 @@
import Crypto.Signature.PKCS1_v1_5
import M2Crypto
+import OpenSSL
from letsencrypt import le_util
@@ -231,3 +232,44 @@ def make_ss_cert(key_str, domains, not_before=None,
assert cert.verify()
# print check_purpose(,0
return cert.as_pem()
+
+
+def _request_san(req): # TODO: implement directly in PyOpenSSL!
+ # constants based on implementation of
+ # OpenSSL.crypto.X509Error._subjectAltNameString
+ parts_separator = ", "
+ part_separator = ":"
+ extension_short_name = "subjectAltName"
+
+ # pylint: disable=protected-access,no-member
+ label = OpenSSL.crypto.X509Extension._prefixes[OpenSSL.crypto._lib.GEN_DNS]
+ assert parts_separator not in label
+ prefix = label + part_separator
+
+ extensions = [ext._subjectAltNameString().split(parts_separator)
+ for ext in req.get_extensions()
+ if ext.get_short_name() == extension_short_name]
+ # WARNING: this function assumes that no SAN can include
+ # parts_separator, hence the split!
+
+ return [part.split(part_separator)[1] for parts in extensions
+ for part in parts if part.startswith(prefix)]
+
+
+def get_sans_from_csr(csr, typ=OpenSSL.crypto.FILETYPE_PEM):
+ """Get list of Subject Alternative Names from signing request.
+
+ :param str csr: Certificate Signing Request in PEM format (must contain
+ one or more subjectAlternativeNames, or the function will fail,
+ raising ValueError)
+
+ :returns: List of referenced subject alternative names
+ :rtype: list
+
+ """
+ try:
+ request = OpenSSL.crypto.load_certificate_request(typ, csr)
+ except OpenSSL.crypto.Error as error:
+ logging.exception(error)
+ raise
+ return _request_san(request)
diff --git a/letsencrypt/tests/crypto_util_test.py b/letsencrypt/tests/crypto_util_test.py
index bdd67da6ada..92cb4014bc9 100644
--- a/letsencrypt/tests/crypto_util_test.py
+++ b/letsencrypt/tests/crypto_util_test.py
@@ -7,6 +7,7 @@
import unittest
import M2Crypto
+import OpenSSL
import mock
@@ -150,5 +151,41 @@ def test_it(self): # pylint: disable=no-self-use
make_ss_cert(RSA512_KEY, ['example.com', 'www.example.com'])
+class GetSansFromCsrTest(unittest.TestCase):
+ """Tests for letsencrypt.crypto_util.get_sans_from_csr."""
+ def test_extract_one_san(self):
+ from letsencrypt.crypto_util import get_sans_from_csr
+ csr = pkg_resources.resource_string(
+ __name__, os.path.join('testdata', 'csr.pem'))
+ self.assertEqual(get_sans_from_csr(csr), ['example.com'])
+
+ def test_extract_two_sans(self):
+ from letsencrypt.crypto_util import get_sans_from_csr
+ csr = pkg_resources.resource_string(
+ __name__, os.path.join('testdata', 'csr-san.pem'))
+ self.assertEqual(get_sans_from_csr(csr), ['example.com',
+ 'www.example.com'])
+
+ def test_extract_six_sans(self):
+ from letsencrypt.crypto_util import get_sans_from_csr
+ csr = pkg_resources.resource_string(
+ __name__, os.path.join('testdata', 'csr-6sans.pem'))
+ self.assertEqual(get_sans_from_csr(csr),
+ ["example.com", "example.org", "example.net",
+ "example.info", "subdomain.example.com",
+ "other.subdomain.example.com"])
+
+ def test_parse_non_csr(self):
+ from letsencrypt.crypto_util import get_sans_from_csr
+ self.assertRaises(OpenSSL.crypto.Error, get_sans_from_csr,
+ "hello there")
+
+ def test_parse_no_sans(self):
+ from letsencrypt.crypto_util import get_sans_from_csr
+ csr = pkg_resources.resource_string(
+ __name__, os.path.join('testdata', 'csr-nosans.pem'))
+ self.assertEqual([], get_sans_from_csr(csr))
+
+
if __name__ == '__main__':
unittest.main() # pragma: no cover
diff --git a/letsencrypt/tests/testdata/csr-6sans.pem b/letsencrypt/tests/testdata/csr-6sans.pem
new file mode 100644
index 00000000000..8f6b52bd77d
--- /dev/null
+++ b/letsencrypt/tests/testdata/csr-6sans.pem
@@ -0,0 +1,12 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIBuzCCAWUCAQAweTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCE1pY2hpZ2FuMRIw
+EAYDVQQHEwlBbm4gQXJib3IxDDAKBgNVBAoTA0VGRjEfMB0GA1UECxMWVW5pdmVy
+c2l0eSBvZiBNaWNoaWdhbjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wXDANBgkqhkiG
+9w0BAQEFAANLADBIAkEA9LYRcVE3Nr+qleecEcX8JwVDnjeG1X7ucsCasuuZM0e0
+9cmYuUzxIkMjO/9x4AVcvXXRXPEV+LzWWkfkTlzRMwIDAQABoIGGMIGDBgkqhkiG
+9w0BCQ4xdjB0MHIGA1UdEQRrMGmCC2V4YW1wbGUuY29tggtleGFtcGxlLm9yZ4IL
+ZXhhbXBsZS5uZXSCDGV4YW1wbGUuaW5mb4IVc3ViZG9tYWluLmV4YW1wbGUuY29t
+ghtvdGhlci5zdWJkb21haW4uZXhhbXBsZS5jb20wDQYJKoZIhvcNAQELBQADQQBd
+k4BE5qvEvkYoZM/2++Xd9RrQ6wsdj0QiJQCozfsI4lQx6ZJnbtNc7HpDrX4W6XIv
+IvzVBz/nD11drfz/RNuX
+-----END CERTIFICATE REQUEST-----
diff --git a/letsencrypt/tests/testdata/csr-nosans.pem b/letsencrypt/tests/testdata/csr-nosans.pem
new file mode 100644
index 00000000000..813db67b0be
--- /dev/null
+++ b/letsencrypt/tests/testdata/csr-nosans.pem
@@ -0,0 +1,8 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIBFTCBwAIBADBbMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEh
+MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRQwEgYDVQQDDAtleGFt
+cGxlLm9yZzBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQD0thFxUTc2v6qV55wRxfwn
+BUOeN4bVfu5ywJqy65kzR7T1yZi5TPEiQyM7/3HgBVy9ddFc8RX4vNZaR+ROXNEz
+AgMBAAGgADANBgkqhkiG9w0BAQsFAANBAMikGL8Ch7hQCStXH7chhDp6+pt2+VSo
+wgsrPQ2Bw4veDMlSemUrH+4e0TwbbntHfvXTDHWs9P3BiIDJLxFrjuA=
+-----END CERTIFICATE REQUEST-----
diff --git a/setup.py b/setup.py
index c719bea60e2..815403b5ab1 100644
--- a/setup.py
+++ b/setup.py
@@ -38,7 +38,8 @@ def read_file(filename, encoding='utf8'):
'psutil>=2.1.0', # net_connections introduced in 2.1.0
'pyasn1', # urllib3 InsecurePlatformWarning (#304)
'pycrypto',
- 'PyOpenSSL',
+ # https://pyopenssl.readthedocs.org/en/latest/api/crypto.html#OpenSSL.crypto.X509Req.get_extensions
+ 'PyOpenSSL>=0.15',
'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
'pyrfc3339',
'python-augeas',
| Supersedes #247. Still hacky (using unexposed API, and using some, quite reasonable, assumptions), but we're getting there! I will try to submit appropriate patches upstream.
| https://api.github.com/repos/certbot/certbot/pulls/399 | 2015-05-10T09:45:17Z | 2015-05-18T17:28:55Z | 2015-05-18T17:28:55Z | 2016-05-06T19:21:29Z | 2,166 | certbot/certbot | 1,978 |
Sound stability test | diff --git a/selfdrive/modeld/test/polyfit/__init__.py b/selfdrive/modeld/test/polyfit/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/selfdrive/ui/test/__init__.py b/selfdrive/ui/test/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/selfdrive/ui/test/test_sound_stability.py b/selfdrive/ui/test/test_sound_stability.py
new file mode 100755
index 00000000000000..f0d51ec960dea1
--- /dev/null
+++ b/selfdrive/ui/test/test_sound_stability.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+import os
+import random
+import subprocess
+import time
+from pathlib import Path
+from common.basedir import BASEDIR
+
+os.environ["LD_LIBRARY_PATH"] = ""
+
+# pull this from the provisioning tests
+play_sound = os.path.join(BASEDIR, "selfdrive/ui/test/play_sound")
+waste = os.path.join(BASEDIR, "scripts/waste")
+sound_path = Path(os.path.join(BASEDIR, "selfdrive/assets/sounds"))
+
+def sound_test():
+
+ # max volume
+ vol = 15
+ sound_files = [p.absolute() for p in sound_path.iterdir() if str(p).endswith(".wav")]
+
+ # start waste
+ p = subprocess.Popen([waste], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+
+ start_time = time.monotonic()
+ frame = 0
+ while True:
+ # start a few processes
+ procs = []
+ for _ in range(random.randint(5, 20)):
+ sound = random.choice(sound_files)
+ p = subprocess.Popen([play_sound, str(sound), str(vol)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ procs.append(p)
+ time.sleep(random.uniform(0, 0.75))
+
+ # and kill them
+ time.sleep(random.uniform(0, 5))
+ for p in procs:
+ p.terminate()
+
+ # write stats
+ stats = f"running time {time.monotonic() - start_time}s, cycle {frame}"
+ with open("/tmp/sound_stats.txt", "a") as f:
+ f.write(stats)
+ print(stats)
+ frame +=1
+
+if __name__ == "__main__":
+ sound_test()
| https://api.github.com/repos/commaai/openpilot/pulls/2089 | 2020-08-27T00:45:37Z | 2020-08-27T22:50:49Z | 2020-08-27T22:50:49Z | 2020-08-27T22:50:50Z | 586 | commaai/openpilot | 8,927 |
|
Add SunTV support | diff --git a/README.md b/README.md
index 3130a33fc2..911197ac70 100644
--- a/README.md
+++ b/README.md
@@ -71,6 +71,7 @@ Fork me on GitHub: <https://github.com/soimort/you-get>
* Sohu (ๆ็่ง้ข) <http://tv.sohu.com>
* SongTaste <http://www.songtaste.com>
* SoundCloud <http://soundcloud.com>
+* SunTV (้ณๅ
ๅซ่ง) <http://www.isuntv.com/>
* TED <http://www.ted.com>
* Tudou (ๅ่ฑ) <http://www.tudou.com>
* Tumblr <http://www.tumblr.com>
diff --git a/src/you_get/common.py b/src/you_get/common.py
index 8cca99e687..9da66c35a5 100755
--- a/src/you_get/common.py
+++ b/src/you_get/common.py
@@ -1023,6 +1023,7 @@ def url_to_module(url):
sohu,
songtaste,
soundcloud,
+ suntv,
ted,
theplatform,
tucao,
@@ -1082,6 +1083,7 @@ def url_to_module(url):
'instagram': instagram,
'iqilu': iqilu,
'iqiyi': iqiyi,
+ 'isuntv': suntv,
'joy': joy,
'jpopsuki': jpopsuki,
'kankanews': bilibili,
diff --git a/src/you_get/extractors/__init__.py b/src/you_get/extractors/__init__.py
index 419169cf9f..e460772b57 100755
--- a/src/you_get/extractors/__init__.py
+++ b/src/you_get/extractors/__init__.py
@@ -46,6 +46,7 @@
from .sohu import *
from .songtaste import *
from .soundcloud import *
+from .suntv import *
from .theplatform import *
from .tucao import *
from .tudou import *
diff --git a/src/you_get/extractors/suntv.py b/src/you_get/extractors/suntv.py
new file mode 100644
index 0000000000..0b50644038
--- /dev/null
+++ b/src/you_get/extractors/suntv.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+__all__ = ['suntv_download']
+
+from ..common import *
+import urllib
+import re
+
+def suntv_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
+ if re.match(r'http://www.isuntv.com/\w+', url):
+ API_URL = "http://www.isuntv.com/ajaxpro/SunTv.pro_vod_playcatemp4,App_Web_playcatemp4.ascx.9f08f04f.ashx"
+
+ itemid = match1(url, r'http://www.isuntv.com/pro/ct(\d+).html')
+ values = {"itemid" : itemid, "vodid": ""}
+
+ data = str(values).replace("'", '"')
+ data = data.encode('utf-8')
+ req = urllib.request.Request(API_URL, data)
+ req.add_header('AjaxPro-Method', 'ToPlay') #important!
+ resp = urllib.request.urlopen(req)
+ respData = resp.read()
+ respData = respData.decode('ascii').strip('"') #Ahhhhhhh!
+
+ video_url = 'http://www.isuntv.com' + str(respData)
+
+ html = get_content(url, decoded=False)
+ html = html.decode('gbk')
+ title = match1(html, '<title>([^<]+)').strip() #get rid of \r\n s
+
+ type_ = ''
+ size = 0
+ type, ext, size = url_info(video_url)
+
+ print_info(site_info, title, type, size)
+ if not info_only:
+ download_urls([url], title, 'mp4', size, output_dir, merge=merge)
+
+site_info = "SunTV"
+download = suntv_download
+download_playlist = playlist_not_supported('suntv')
| ้ณๅ
ๅซ่ง๏ผ่ฆๆฏๅคง้่ฝ็ๅไธ้ใ
่ฆไบไบฒๅฝไบ๏ผไธๆธธไธๆธธๅไธค้ใ
Gayhub่ฟไธ่ฎฉไฟ็ไธคไธชproject๏ผๅช่ฝๅๅฎไบๅPR็ซๅณๅ ๆใ
```
python3 you-get http://www.isuntv.com/pro/ct3124.html
Video Site: SunTV
Title: ็ๅฉๆณข๏ผๆฉๅ
Type: Unknown type (None)
Size: 154.26 MiB (161756715 Bytes)
Downloading ็ๅฉๆณข๏ผๆฉๅ.mp4 ...
0.0% ( 0.0/154.3MB) [ ] 1/1
```
Streamๆฒกๆๆ ผๅผ๏ผๆไปฅๆฏNoneใไฝๆฏไธ่ฝฝๆฅๅฐฑๆฒกไบไบใ
<!-- Reviewable:start -->
[<img src="https://reviewable.io/review_button.png" height=40 alt="Review on Reviewable"/>](https://reviewable.io/reviews/soimort/you-get/699)
<!-- Reviewable:end -->
| https://api.github.com/repos/soimort/you-get/pulls/699 | 2015-10-14T21:43:56Z | 2015-10-16T23:21:56Z | 2015-10-16T23:21:56Z | 2015-10-16T23:21:56Z | 980 | soimort/you-get | 20,972 |
fix helm scheduler deployment / scheduler logs | diff --git a/chart/templates/scheduler/scheduler-deployment.yaml b/chart/templates/scheduler/scheduler-deployment.yaml
index 45be2ff7d5a50..9a928a6641086 100644
--- a/chart/templates/scheduler/scheduler-deployment.yaml
+++ b/chart/templates/scheduler/scheduler-deployment.yaml
@@ -173,7 +173,7 @@ spec:
- name: scheduler-logs
image: {{ template "airflow_image" . }}
imagePullPolicy: {{ .Values.images.airflow.pullPolicy }}
- args: ["airflow", "serve_logs"]
+ args: ["serve_logs"]
ports:
- name: worker-logs
containerPort: {{ .Values.ports.workerLogs }}
diff --git a/chart/tests/test_basic_helm_chart.py b/chart/tests/test_basic_helm_chart.py
index dcf20cf195ae8..cc7b8192179b6 100644
--- a/chart/tests/test_basic_helm_chart.py
+++ b/chart/tests/test_basic_helm_chart.py
@@ -16,6 +16,7 @@
# under the License.
import unittest
+from typing import Any, Dict, List, Union
from tests.helm_template_generator import render_chart
@@ -64,3 +65,33 @@ def test_basic_deployment_without_default_users(self):
]
self.assertNotIn(('Job', 'TEST-BASIC-create-user'), list_of_kind_names_tuples)
self.assertEqual(OBJECT_COUNT_IN_BASIC_DEPLOYMENT - 1, len(k8s_objects))
+
+ def test_chart_is_consistent_with_official_airflow_image(self):
+ def get_k8s_objs_with_image(obj: Union[List[Any], Dict[str, Any]]) -> List[Dict[str, Any]]:
+ """
+ Recursive helper to retrieve all the k8s objects that have an "image" key
+ inside k8s obj or list of k8s obj
+ """
+ out = []
+ if isinstance(obj, list):
+ for item in obj:
+ out += get_k8s_objs_with_image(item)
+ if isinstance(obj, dict):
+ if "image" in obj:
+ out += [obj]
+ # include sub objs, just in case
+ for val in obj.values():
+ out += get_k8s_objs_with_image(val)
+ return out
+
+ image_repo = "test-airflow-repo/airflow"
+ k8s_objects = render_chart("TEST-BASIC", {"defaultAirflowRepository": image_repo})
+
+ objs_with_image = get_k8s_objs_with_image(k8s_objects)
+ for obj in objs_with_image:
+ image: str = obj["image"] # pylint: disable=invalid-sequence-index
+ if image.startswith(image_repo):
+ # Make sure that a command is not specified
+ self.assertNotIn("command", obj)
+ # Make sure that the first arg is never airflow
+ self.assertNotEqual(obj["args"][0], "airflow") # pylint: disable=invalid-sequence-index
| Hello again,
Another very small fix to make the latest chart work.
Based on the airflow image entrypoint, we should use airflow commands directly (just like for everywhere else in the current chart).
The container exists otherwise.
<!--
Thank you for contributing! Please make sure that your code changes
are covered with tests. And in case of new features or big changes
remember to adjust the documentation.
Feel free to ping committers for the review!
In case of existing issue, reference it using one of the following:
closes: #ISSUE
related: #ISSUE
How to write a good git commit message:
http://chris.beams.io/posts/git-commit/
-->
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/master/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/master/UPDATING.md).
| https://api.github.com/repos/apache/airflow/pulls/11685 | 2020-10-20T11:52:31Z | 2020-10-31T17:55:51Z | 2020-10-31T17:55:51Z | 2020-11-15T12:13:25Z | 668 | apache/airflow | 13,946 |
[doc] updated documentation version list | diff --git a/.github/workflows/doc_build_after_merge.yml b/.github/workflows/doc_build_after_merge.yml
index dae3b70e1f4c..2f7b708ab3a8 100644
--- a/.github/workflows/doc_build_after_merge.yml
+++ b/.github/workflows/doc_build_after_merge.yml
@@ -5,6 +5,7 @@ on:
pull_request:
paths:
- 'version.txt'
+ - 'docs/'
types:
- closed
@@ -16,7 +17,6 @@ jobs:
steps:
- name: trigger workflow in ColossalAI-Documentation
run: |
- gh
curl \
-X POST \
-H "Accept: application/vnd.github+json" \
diff --git a/docs/versions.json b/docs/versions.json
index dde32982b798..49a0fab2bd55 100644
--- a/docs/versions.json
+++ b/docs/versions.json
@@ -1,3 +1,3 @@
[
- "current"
+ "v0.2.4"
]
| ## ๐ Checklist before creating the PR
- [ ] I have created an issue for this PR for traceability
- [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
- [x] I have added relevant tags if possible for us to better distinguish different PRs
## ๐จ Issue number
> Link this PR to your issue with words like fixed to automatically close the linked issue upon merge
>
> e.g. `fixed #1234`, `closed #1234`, `resolved #1234`
N/A
## ๐ What does this PR do?
> Summarize your work here.
> if you have any plots/diagrams/screenshots/tables, please attach them here.
This PR updated the version list to show the documentation for the latest version.
## ๐ฅ Checklist before requesting a review
- [ ] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
- [ ] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
- [x] I have performed a self-review of my code
- [ ] I have added thorough tests.
- [ ] I have added docstrings for all the functions/methods I implemented
## โญ๏ธ Do you enjoy contributing to Colossal-AI?
- [x] ๐ Yes, I do.
- [ ] ๐ No, I don't.
Tell us more if you don't enjoy contributing to Colossal-AI.
| https://api.github.com/repos/hpcaitech/ColossalAI/pulls/2715 | 2023-02-15T03:21:33Z | 2023-02-15T03:24:19Z | 2023-02-15T03:24:19Z | 2023-02-15T03:24:24Z | 243 | hpcaitech/ColossalAI | 11,064 |
Added unit test, travis file | diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000000..57605897861
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,20 @@
+language: python
+# Setup anaconda
+before_install:
+ - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
+ - chmod +x miniconda.sh
+ - ./miniconda.sh -b
+ - export PATH=/home/travis/miniconda/bin:$PATH
+ - conda update --yes conda
+ # The next couple lines fix a crash with multiprocessing on Travis and are not specific to using Miniconda
+ - sudo rm -rf /dev/shm
+ - sudo ln -s /run/shm /dev/shm
+python:
+ - "3.4"
+# command to install dependencies
+install:
+ - conda install --yes python=$TRAVIS_PYTHON_VERSION numpy scipy matplotlib pandas pytest
+ # Coverage packages are on my binstar channel
+ - python setup.py install
+# command to run tests
+script: py.test
\ No newline at end of file
diff --git a/keras/activations.py b/keras/activations.py
index e0bd897eb51..5f0f90feeb6 100644
--- a/keras/activations.py
+++ b/keras/activations.py
@@ -27,6 +27,9 @@ def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def linear(x):
+ '''
+ The function returns the variable that is passed in, so all types work
+ '''
return x
from .utils.generic_utils import get_from_module
diff --git a/tests/auto/keras/test_activations.py b/tests/auto/keras/test_activations.py
new file mode 100644
index 00000000000..60d908d8adc
--- /dev/null
+++ b/tests/auto/keras/test_activations.py
@@ -0,0 +1,104 @@
+import math
+
+import keras
+import theano
+import theano.tensor as T
+
+import numpy
+
+def list_assert_equal(a, b, round_to=7):
+ '''
+ This will do a pairwise, rounded equality test across two lists of
+ numbers.
+ '''
+ pairs = zip(a, b)
+ for i, j in pairs:
+ assert round(i, round_to) == round(j, round_to)
+
+def get_standard_values():
+ '''
+ These are just a set of floats used for testing the activation
+ functions, and are useful in multiple tests.
+ '''
+
+ return [0,0.1,0.5,0.9,1.0]
+
+def test_softmax():
+
+ from keras.activations import softmax as s
+
+ # Test using a reference implementation of softmax
+ def softmax(values):
+ m = max(values)
+ values = numpy.array(values)
+ e = numpy.exp(values - m)
+ dist = list(e / numpy.sum(e))
+
+ return dist
+
+ x = T.vector()
+ exp = s(x)
+ f = theano.function([x], exp)
+ test_values=get_standard_values()
+
+ result = f(test_values)
+ expected = softmax(test_values)
+
+ print(str(result))
+ print(str(expected))
+
+ list_assert_equal(result, expected)
+
+def test_relu():
+ '''
+ Relu implementation doesn't depend on the value being
+ a theano variable. Testing ints, floats and theano tensors.
+ '''
+
+ from keras.activations import relu as r
+
+ assert r(5) == 5
+ assert r(-5) == 0
+ assert r(-0.1) == 0
+ assert r(0.1) == 0.1
+
+ x = T.vector()
+ exp = r(x)
+ f = theano.function([x], exp)
+
+ test_values = get_standard_values()
+ result = f(test_values)
+
+ list_assert_equal(result, test_values) # because no negatives in test values
+
+
+def test_tanh():
+
+ from keras.activations import tanh as t
+ test_values = get_standard_values()
+
+ x = T.vector()
+ exp = t(x)
+ f = theano.function([x], exp)
+
+ result = f(test_values)
+ expected = [math.tanh(v) for v in test_values]
+
+ print(result)
+ print(expected)
+
+ list_assert_equal(result, expected)
+
+
+def test_linear():
+ '''
+ This function does no input validation, it just returns the thing
+ that was passed in.
+ '''
+
+ from keras.activations import linear as l
+
+ xs = [1, 5, True, None, 'foo']
+
+ for x in xs:
+ assert x == l(x)
diff --git a/tests/auto/test_lossweights.py b/tests/auto/test_lossweights.py
deleted file mode 100644
index caff3ddb521..00000000000
--- a/tests/auto/test_lossweights.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from __future__ import absolute_import
-from __future__ import print_function
-from keras.datasets import mnist
-from keras.models import Sequential
-from keras.layers.core import Dense, Activation
-from keras.utils import np_utils
-import numpy as np
-import unittest
-
-nb_classes = 10
-batch_size = 128
-nb_epoch = 5
-weighted_class = 9
-standard_weight = 1
-high_weight = 5
-max_train_samples = 5000
-max_test_samples = 1000
-
-np.random.seed(1337) # for reproducibility
-
-# the data, shuffled and split between tran and test sets
-(X_train, y_train), (X_test, y_test) = mnist.load_data()
-X_train = X_train.reshape(60000, 784)[:max_train_samples]
-X_test = X_test.reshape(10000, 784)[:max_test_samples]
-X_train = X_train.astype("float32") / 255
-X_test = X_test.astype("float32") / 255
-
-# convert class vectors to binary class matrices
-y_train = y_train[:max_train_samples]
-y_test = y_test[:max_test_samples]
-Y_train = np_utils.to_categorical(y_train, nb_classes)
-Y_test = np_utils.to_categorical(y_test, nb_classes)
-test_ids = np.where(y_test == np.array(weighted_class))[0]
-
-def create_model():
- model = Sequential()
- model.add(Dense(784, 50))
- model.add(Activation('relu'))
- model.add(Dense(50, 10))
- model.add(Activation('softmax'))
- return model
-
-def test_weights(model, class_weight=None, sample_weight=None):
- model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, \
- class_weight=class_weight, sample_weight=sample_weight)
- score = model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
- return score
-
-class TestConcatenation(unittest.TestCase):
-
- def test_loss_weighting(self):
- class_weight = dict([(i, standard_weight) for i in range(nb_classes)])
- class_weight[weighted_class] = high_weight
-
- sample_weight = np.ones((y_train.shape[0])) * standard_weight
- sample_weight[y_train == weighted_class] = high_weight
-
- for loss in ['mae', 'mse', 'categorical_crossentropy']:
- print('loss:', loss)
- # no weights: reference point
- model = create_model()
- model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
- standard_score = test_weights(model)
- # test class_weight
- model = create_model()
- model.compile(loss=loss, optimizer='rmsprop')
- score = test_weights(model, class_weight=class_weight)
- print('score:', score, ' vs.', standard_score)
- self.assertTrue(score < standard_score)
- # test sample_weight
- model = create_model()
- model.compile(loss=loss, optimizer='rmsprop')
- score = test_weights(model, sample_weight=sample_weight)
- print('score:', score, ' vs.', standard_score)
- self.assertTrue(score < standard_score)
-
-if __name__ == '__main__':
- print('Test class_weight and sample_weight')
- unittest.main()
\ No newline at end of file
| I should have split this up into multiple commits, but..
-- Added 'travis.yml' which can be used to integrate travis (done for my fork)
-- Removed failing test file completely as you suggested
-- Added a single unit test covering the softmax function (to show the approach)
| https://api.github.com/repos/keras-team/keras/pulls/280 | 2015-06-26T02:32:55Z | 2015-06-27T01:13:59Z | 2015-06-27T01:13:59Z | 2015-06-27T01:21:34Z | 1,950 | keras-team/keras | 47,217 |
Bump exllama module to 0.0.7 | diff --git a/requirements.txt b/requirements.txt
index 6382ea4315..cbcb8320b5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -26,5 +26,5 @@ llama-cpp-python==0.1.73; platform_system != "Windows"
https://github.com/abetlen/llama-cpp-python/releases/download/v0.1.73/llama_cpp_python-0.1.73-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.3.0/auto_gptq-0.3.0+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
https://github.com/PanQiWei/AutoGPTQ/releases/download/v0.3.0/auto_gptq-0.3.0+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
-https://github.com/jllllll/exllama/releases/download/0.0.6/exllama-0.0.6+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
-https://github.com/jllllll/exllama/releases/download/0.0.6/exllama-0.0.6+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
+https://github.com/jllllll/exllama/releases/download/0.0.7/exllama-0.0.7+cu117-cp310-cp310-win_amd64.whl; platform_system == "Windows"
+https://github.com/jllllll/exllama/releases/download/0.0.7/exllama-0.0.7+cu117-cp310-cp310-linux_x86_64.whl; platform_system == "Linux" and platform_machine == "x86_64"
| Adds support for LLaMa 2 | https://api.github.com/repos/oobabooga/text-generation-webui/pulls/3211 | 2023-07-19T23:22:48Z | 2023-07-20T01:24:47Z | 2023-07-20T01:24:47Z | 2023-07-20T20:56:50Z | 460 | oobabooga/text-generation-webui | 26,235 |
Update tensorflow to 2.4.0rc4 | diff --git a/lib/cli/launcher.py b/lib/cli/launcher.py
index d1030fb405..ad483bf868 100644
--- a/lib/cli/launcher.py
+++ b/lib/cli/launcher.py
@@ -53,10 +53,10 @@ def _test_for_tf_version(self):
Raises
------
FaceswapError
- If Tensorflow is not found, or is not between versions 2.2 and 2.2
+ If Tensorflow is not found, or is not between versions 2.2 and 2.4
"""
min_ver = 2.2
- max_ver = 2.2
+ max_ver = 2.4
try:
# Ensure tensorflow doesn't pin all threads to one core when using Math Kernel Library
os.environ["TF_MIN_GPU_MULTIPROCESSOR_COUNT"] = "4"
diff --git a/lib/gui/stats.py b/lib/gui/stats.py
index 7cbb323526..41ec71604c 100644
--- a/lib/gui/stats.py
+++ b/lib/gui/stats.py
@@ -17,7 +17,7 @@
import numpy as np
import tensorflow as tf
-from tensorflow.python import errors_impl as tf_errors # pylint:disable=no-name-in-module
+from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module
from tensorflow.core.util import event_pb2
from lib.serializer import get_serializer
diff --git a/plugins/extract/_base.py b/plugins/extract/_base.py
index 7c984281cb..530fc3840e 100644
--- a/plugins/extract/_base.py
+++ b/plugins/extract/_base.py
@@ -6,7 +6,7 @@
import os
import sys
-from tensorflow.python import errors_impl as tf_errors # pylint:disable=no-name-in-module
+from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module
from lib.multithreading import MultiThread
from lib.queue_manager import queue_manager
diff --git a/plugins/extract/align/_base.py b/plugins/extract/align/_base.py
index 77920f6055..5f871e94ac 100644
--- a/plugins/extract/align/_base.py
+++ b/plugins/extract/align/_base.py
@@ -17,7 +17,7 @@
import cv2
import numpy as np
-from tensorflow.python import errors_impl as tf_errors # pylint:disable=no-name-in-module
+from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module
from lib.utils import get_backend, FaceswapError
from plugins.extract._base import Extractor, logger, ExtractMedia
diff --git a/plugins/extract/detect/_base.py b/plugins/extract/detect/_base.py
index e4fcb89c62..522e5fab87 100644
--- a/plugins/extract/detect/_base.py
+++ b/plugins/extract/detect/_base.py
@@ -18,7 +18,7 @@
import cv2
import numpy as np
-from tensorflow.python import errors_impl as tf_errors # pylint:disable=no-name-in-module
+from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module
from lib.faces_detect import DetectedFace
from lib.utils import get_backend, FaceswapError
diff --git a/plugins/extract/mask/_base.py b/plugins/extract/mask/_base.py
index 433b7af148..ac092d5fa0 100644
--- a/plugins/extract/mask/_base.py
+++ b/plugins/extract/mask/_base.py
@@ -16,7 +16,7 @@
import cv2
import numpy as np
-from tensorflow.python import errors_impl as tf_errors # pylint:disable=no-name-in-module
+from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module
from lib.utils import get_backend, FaceswapError
from plugins.extract._base import Extractor, ExtractMedia, logger
diff --git a/plugins/train/model/_base.py b/plugins/train/model/_base.py
index 1bce2d545f..9676897e6a 100644
--- a/plugins/train/model/_base.py
+++ b/plugins/train/model/_base.py
@@ -385,7 +385,7 @@ def _compile_model(self):
self.config.get("clipnorm", False),
self._args).optimizer
if self._settings.use_mixed_precision:
- optimizer = self._settings.LossScaleOptimizer(optimizer, loss_scale="dynamic")
+ optimizer = self._settings.LossScaleOptimizer(optimizer, True)
if get_backend() == "amd":
self._rewrite_plaid_outputs()
self._loss.configure(self._model)
@@ -643,7 +643,7 @@ def __init__(self, arguments, mixed_precision, allow_growth, is_predict):
use_mixed_precision = not is_predict and mixed_precision and get_backend() == "nvidia"
if use_mixed_precision:
- self._mixed_precision = tf.keras.mixed_precision.experimental
+ self._mixed_precision = tf.keras.mixed_precision
else:
self._mixed_precision = None
@@ -746,7 +746,7 @@ def _set_keras_mixed_precision(self, use_mixed_precision, skip_check):
# TODO remove this hacky fix to disable mixed precision compatibility testing if/when
# fixed upstream.
# pylint:disable=import-outside-toplevel,protected-access
- from tensorflow.python.keras.mixed_precision.experimental import \
+ from tensorflow.python.keras.mixed_precision import \
device_compatibility_check
logger.debug("Overriding tensorflow _logged_compatibility_check parameter. Initial "
"value: %s", device_compatibility_check._logged_compatibility_check)
@@ -754,7 +754,7 @@ def _set_keras_mixed_precision(self, use_mixed_precision, skip_check):
logger.debug("New value: %s", device_compatibility_check._logged_compatibility_check)
policy = self._mixed_precision.Policy('mixed_float16')
- self._mixed_precision.set_policy(policy)
+ self._mixed_precision.set_global_policy(policy)
logger.debug("Enabled mixed precision. (Compute dtype: %s, variable_dtype: %s)",
policy.compute_dtype, policy.variable_dtype)
return True
diff --git a/plugins/train/trainer/_base.py b/plugins/train/trainer/_base.py
index b79fffb7fb..9761236dc5 100644
--- a/plugins/train/trainer/_base.py
+++ b/plugins/train/trainer/_base.py
@@ -18,7 +18,7 @@
import numpy as np
import tensorflow as tf
-from tensorflow.python import errors_impl as tf_errors # pylint:disable=no-name-in-module
+from tensorflow.python.framework import errors_impl as tf_errors # pylint:disable=no-name-in-module
from tqdm import tqdm
from lib.alignments import Alignments
diff --git a/requirements_nvidia.txt b/requirements_nvidia.txt
index f695054174..97623dfc31 100644
--- a/requirements_nvidia.txt
+++ b/requirements_nvidia.txt
@@ -1,2 +1,2 @@
-r _requirements_base.txt
-tensorflow-gpu>=2.2.0,<2.3.0
+tensorflow-gpu==2.4.0rc4
| I know this is only a draft, many things don't work like Analysis and learning graph. But I think this is a good point for starting supporting RTX 3080. I think you should create a new branch to allow people to work on that change. Best! PS. I will try to work on this but I'm not a Python person. | https://api.github.com/repos/deepfakes/faceswap/pulls/1086 | 2020-11-12T08:54:01Z | 2020-12-08T12:49:50Z | 2020-12-08T12:49:50Z | 2020-12-08T12:49:50Z | 1,586 | deepfakes/faceswap | 18,860 |
Fix junos netconf port issue in integration test (#32610) | diff --git a/test/integration/targets/junos_netconf/tests/cli/changeport.yaml b/test/integration/targets/junos_netconf/tests/cli/changeport.yaml
index ef0c2288f0617d..73ec15e2747b1b 100644
--- a/test/integration/targets/junos_netconf/tests/cli/changeport.yaml
+++ b/test/integration/targets/junos_netconf/tests/cli/changeport.yaml
@@ -12,7 +12,7 @@
- name: Change port
junos_netconf:
state: present
- netconf_port: 22
+ netconf_port: 8080
register: result
- assert:
@@ -22,23 +22,19 @@
- name: idempotent tests
junos_netconf:
state: present
- netconf_port: 22
+ netconf_port: 8080
register: result
- assert:
that:
- "result.changed == false"
-- name: wait for persistent socket to timeout, this ensures new socket creation with connection type netconf
- pause:
- seconds: 120
-
-- name: Ensure we can communicate over 22
+- name: Ensure we can communicate over 8080
junos_command:
rpcs:
- get-software-information
provider: "{{ netconf }}"
- port: 22
+ port: 8080
# This protects against the port override above not being honoured and a bug setting the port
- name: Ensure we can NOT communicate over default port
@@ -53,10 +49,6 @@
- "result.failed == true"
- "'unable to open shell' in result.msg"
-- name: wait for persistent socket to timeout, this ensures new socket creation with connection type netconf
- pause:
- seconds: 120
-
- name: Set back netconf to default port
junos_netconf:
state: present
diff --git a/test/integration/targets/junos_netconf/tests/cli/netconf.yaml b/test/integration/targets/junos_netconf/tests/cli/netconf.yaml
index ed4f91e6f0fb26..92c0a4f430a967 100644
--- a/test/integration/targets/junos_netconf/tests/cli/netconf.yaml
+++ b/test/integration/targets/junos_netconf/tests/cli/netconf.yaml
@@ -45,7 +45,7 @@
- name: wait for persistent socket to timeout
pause:
- seconds: 120
+ seconds: 150
- name: Ensure we can NOT talk via netconf
junos_command:
|
##### SUMMARY
<!--- Describe the change, including rationale and design decisions -->
(cherry picked from commit 6d1d06e0f7e1c7dd1e379a70375ea28a5ca6268d)
Integration test fix
<!---
If you are fixing an existing issue, please include "Fixes #nnn" in your
commit message and your description; but you should still explain what
the change does.
-->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Test Pull Request
##### COMPONENT NAME
<!--- Name of the module/plugin/module/task -->
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
2.4
```
##### ADDITIONAL INFORMATION
<!---
Include additional information to help people understand the change here.
For bugs that don't have a linked bug report, a step-by-step reproduction
of the problem is helpful.
-->
<!--- Paste verbatim command output below, e.g. before and after your change -->
```
```
| https://api.github.com/repos/ansible/ansible/pulls/32668 | 2017-11-08T08:52:39Z | 2017-11-10T15:02:07Z | 2017-11-10T15:02:07Z | 2019-04-26T23:16:33Z | 573 | ansible/ansible | 49,576 |
[fx]add uniform policy | diff --git a/colossalai/fx/passes/adding_split_node_pass.py b/colossalai/fx/passes/adding_split_node_pass.py
index 4d34c2b56410..91005fe6bd51 100644
--- a/colossalai/fx/passes/adding_split_node_pass.py
+++ b/colossalai/fx/passes/adding_split_node_pass.py
@@ -32,6 +32,35 @@ def balanced_split_pass(gm: torch.fx.GraphModule, pp_size: int):
return gm
+def uniform_split_pass(gm: torch.fx.GraphModule, pp_size: int):
+ mod_graph = gm.graph
+ valid_children_size = 0
+ valid_children = []
+ for module in mod_graph.owning_module.children():
+ valid_children_size += 1
+ valid_children.append(module)
+
+ if valid_children_size < pp_size:
+ # If valid children is not enough to shard, we will use balanced policy instead of uniform policy.
+ return balanced_split_pass(gm, pp_size)
+ layers_per_partition = valid_children_size // pp_size
+ accumulate_layer_amount = 0
+ for node in mod_graph.nodes:
+ if pp_size <= 1:
+ break
+ if node.op == "call_module":
+ target_module = node.graph.owning_module.get_submodule(node.target)
+ if target_module in valid_children:
+ accumulate_layer_amount += 1
+ if accumulate_layer_amount == layers_per_partition:
+ accumulate_layer_amount = 0
+ pp_size -= 1
+ with mod_graph.inserting_after(node):
+ split_node = mod_graph.create_node('call_function', pipe_split)
+ gm.recompile()
+ return gm
+
+
def split_with_split_nodes_pass(annotated_gm: torch.fx.GraphModule):
part_idx = 0
diff --git a/tests/test_fx/test_pipeline_passes.py b/tests/test_fx/test_pipeline_passes.py
new file mode 100644
index 000000000000..228fcb880344
--- /dev/null
+++ b/tests/test_fx/test_pipeline_passes.py
@@ -0,0 +1,48 @@
+import torch
+import torch.nn as nn
+import colossalai
+import colossalai.nn as col_nn
+from torch.fx import symbolic_trace
+from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass, \
+ uniform_split_pass
+
+MODEL_DIM = 16
+BATCH_SIZE = 8
+PIPELINE_SIZE = 2
+
+
+class MLP(torch.nn.Module):
+
+ def __init__(self, dim: int):
+ super().__init__()
+ self.linear1 = torch.nn.Linear(dim, dim)
+ self.linear2 = torch.nn.Linear(dim, dim)
+ self.linear3 = torch.nn.Linear(dim, dim)
+ self.linear4 = torch.nn.Linear(dim, dim)
+
+ def forward(self, x):
+ x = self.linear1(x)
+ x = self.linear2(x)
+ x = self.linear3(x)
+ x = self.linear4(x)
+ return x
+
+
+def pipeline_pass_test_helper(model, data, pass_func):
+ origin_output = model(data)
+ symbolic_traced = symbolic_trace(model)
+ annotated_model = pass_func(symbolic_traced, PIPELINE_SIZE)
+ split_model, split_submodules = split_with_split_nodes_pass(annotated_model)
+ output = split_model(data)
+ assert output.equal(origin_output)
+
+
+def test_pipeline_passes():
+ model = MLP(MODEL_DIM)
+ data = torch.rand(BATCH_SIZE, MODEL_DIM)
+ pipeline_pass_test_helper(model, data, balanced_split_pass)
+ pipeline_pass_test_helper(model, data, uniform_split_pass)
+
+
+if __name__ == '__main__':
+ test_pipeline_passes()
| https://api.github.com/repos/hpcaitech/ColossalAI/pulls/1208 | 2022-07-06T03:16:13Z | 2022-07-06T05:48:11Z | 2022-07-06T05:48:11Z | 2022-07-06T05:48:11Z | 863 | hpcaitech/ColossalAI | 11,720 |
|
Show tqdm progress bar | diff --git a/README.md b/README.md
index 6dd972314f..65b077f26c 100644
--- a/README.md
+++ b/README.md
@@ -264,7 +264,8 @@ torchrun --nproc_per_node=4 --master_port=20001 fastchat/train/train_mem.py \
--tf32 True \
--model_max_length 2048 \
--gradient_checkpointing True \
- --lazy_preprocess True
+ --lazy_preprocess True \
+ --disable_tqdm False
```
- If you meet out-of-memory due to "FSDP Warning: When using FSDP, it is efficient and recommended... ", see solutions [here](https://github.com/huggingface/transformers/issues/24724#issuecomment-1645189539).
diff --git a/fastchat/train/llama_flash_attn_monkey_patch.py b/fastchat/train/llama_flash_attn_monkey_patch.py
index 031546502e..9b3392ed62 100644
--- a/fastchat/train/llama_flash_attn_monkey_patch.py
+++ b/fastchat/train/llama_flash_attn_monkey_patch.py
@@ -8,8 +8,9 @@
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
from einops import rearrange
-
-from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func
+from flash_attn.flash_attn_interface import ( # pip3 install "flash-attn>=2.0"
+ flash_attn_varlen_qkvpacked_func,
+)
from flash_attn.bert_padding import unpad_input, pad_input
diff --git a/scripts/test_readme_train.sh b/scripts/test_readme_train.sh
index 769f2c678a..6c2c7019e7 100644
--- a/scripts/test_readme_train.sh
+++ b/scripts/test_readme_train.sh
@@ -21,4 +21,5 @@ torchrun --nproc_per_node=4 --master_port=20001 fastchat/train/train_mem.py \
--tf32 True \
--model_max_length 2048 \
--gradient_checkpointing True \
- --lazy_preprocess True
+ --lazy_preprocess True \
+ --disable_tqdm False
| https://api.github.com/repos/lm-sys/FastChat/pulls/2065 | 2023-07-24T18:14:28Z | 2023-07-24T18:14:56Z | 2023-07-24T18:14:56Z | 2023-07-24T18:15:35Z | 505 | lm-sys/FastChat | 41,444 |
|
python compress and decompress string solution | diff --git a/scripts/random_question.py b/scripts/random_question.py
index 7bf743891..8bf36fa3b 100644
--- a/scripts/random_question.py
+++ b/scripts/random_question.py
@@ -2,6 +2,7 @@
import optparse
import os
+
def main():
"""Reads through README.md for question/answer pairs and adds them to a
list to randomly select from and quiz yourself.
diff --git a/topics/python/solutions/compress_string_solution.md b/topics/python/solutions/compress_string_solution.md
new file mode 100644
index 000000000..b1a137e47
--- /dev/null
+++ b/topics/python/solutions/compress_string_solution.md
@@ -0,0 +1,41 @@
+## Compress String Solution
+
+1. Write a function that gets a string and compresses it
+ - 'aaaabbccc' -> 'a4b2c3'
+ - 'abbbc' -> 'a1b3c1'
+
+```
+def compress_str(mystr: str) -> str:
+
+ result = ''
+
+ if mystr:
+ prevchar = mystr[0]
+ else:
+ return result
+
+ count = 1
+ for nextchar in mystr[1:]:
+ if nextchar == prevchar:
+ count += 1
+ else:
+ result += prevchar + str(count)
+ count = 1
+ prevchar = nextchar
+
+ result += prevchar + str(count)
+ return result
+```
+
+
+2. Write a function that decompresses a given string
+ - 'a4b2c3' -> 'aaaabbccc'
+ - 'a1b3c1' -> 'abbbc'
+
+```
+def decompress_str(mystr: str) -> str:
+ result = ''
+ for index in range(0, len(mystr), 2):
+ result += mystr[index] * int(mystr[index + 1])
+ return result
+```
| https://api.github.com/repos/bregman-arie/devops-exercises/pulls/309 | 2022-11-07T05:36:41Z | 2022-11-07T06:58:06Z | 2022-11-07T06:58:06Z | 2022-11-07T06:58:06Z | 455 | bregman-arie/devops-exercises | 17,653 |
|
๐ Add Chinese translation for `docs/zh/docs/advanced/additional-responses.md` | diff --git a/docs/zh/docs/advanced/additional-responses.md b/docs/zh/docs/advanced/additional-responses.md
new file mode 100644
index 0000000000000..2a1e1ed891a79
--- /dev/null
+++ b/docs/zh/docs/advanced/additional-responses.md
@@ -0,0 +1,219 @@
+# OPENAPI ไธญ็ๅ
ถไปๅๅบ
+
+ๆจๅฏไปฅๅฃฐๆ้ๅ ๅๅบ๏ผๅ
ๆฌ้ๅ ็ถๆไปฃ็ ใๅชไฝ็ฑปๅใๆ่ฟฐ็ญใ
+
+่ฟไบ้ขๅค็ๅๅบๅฐๅ
ๅซๅจOpenAPIๆจกๅผไธญ๏ผๅ ๆญคๅฎไปฌไนๅฐๅบ็ฐๅจAPIๆๆกฃไธญใ
+
+ไฝๆฏๅฏนไบ้ฃไบ้ขๅค็ๅๅบ๏ผไฝ ๅฟ
้กป็กฎไฟไฝ ็ดๆฅ่ฟๅไธไธชๅ `JSONResponse` ไธๆ ท็ `Response` ๏ผๅนถๅ
ๅซไฝ ็็ถๆไปฃ็ ๅๅ
ๅฎนใ
+
+## `model`้ๅ ๅๅบ
+ๆจๅฏไปฅๅ่ทฏๅพๆไฝ่ฃ
้ฅฐๅจไผ ้ๅๆฐ `responses` ใ
+
+ๅฎๆฅๆถไธไธช `dict`๏ผ้ฎๆฏๆฏไธชๅๅบ็็ถๆไปฃ็ ๏ผๅฆ`200`๏ผ๏ผๅผๆฏๅ
ๅซๆฏไธชๅๅบไฟกๆฏ็ๅ
ถไป `dict`ใ
+
+ๆฏไธชๅๅบๅญๅ
ธ้ฝๅฏไปฅๆไธไธชๅ
ณ้ฎๆจกๅ๏ผๅ
ถไธญๅ
ๅซไธไธช `Pydantic` ๆจกๅ๏ผๅฐฑๅ `response_model` ไธๆ ทใ
+
+**FastAPI**ๅฐ้็จ่ฏฅๆจกๅ๏ผ็ๆๅ
ถ`JSON Schema`ๅนถๅฐๅ
ถๅ
ๅซๅจ`OpenAPI`ไธญ็ๆญฃ็กฎไฝ็ฝฎใ
+
+ไพๅฆ๏ผ่ฆๅฃฐๆๅฆไธไธชๅ
ทๆ็ถๆ็ `404` ๅ`Pydantic`ๆจกๅ `Message` ็ๅๅบ๏ผๅฏไปฅๅ๏ผ
+```Python hl_lines="18 22"
+{!../../../docs_src/additional_responses/tutorial001.py!}
+```
+
+
+!!! Note
+ ่ฏท่ฎฐไฝ๏ผๆจๅฟ
้กป็ดๆฅ่ฟๅ `JSONResponse` ใ
+
+!!! Info
+ `model` ๅฏ้ฅไธๆฏOpenAPI็ไธ้จๅใ
+ **FastAPI**ๅฐไป้ฃ้่ทๅ`Pydantic`ๆจกๅ๏ผ็ๆ` JSON Schema` ๏ผๅนถๅฐๅ
ถๆพๅจๆญฃ็กฎ็ไฝ็ฝฎใ
+ - ๆญฃ็กฎ็ไฝ็ฝฎๆฏ๏ผ
+ - ๅจ้ฎ `content` ไธญ๏ผๅ
ถๅ
ทๆๅฆไธไธช`JSON`ๅฏน่ฑก๏ผ `dict` ๏ผไฝไธบๅผ๏ผ่ฏฅ`JSON`ๅฏน่ฑกๅ
ๅซ๏ผ
+ - ๅชไฝ็ฑปๅ็ๅฏ้ฅ๏ผไพๅฆ `application/json` ๏ผๅฎๅ
ๅซๅฆไธไธช`JSON`ๅฏน่ฑกไฝไธบๅผ๏ผ่ฏฅๅฏน่ฑกๅ
ๅซ๏ผ
+ - ไธไธช้ฎ` schema` ๏ผๅฎ็ๅผๆฏๆฅ่ชๆจกๅ็`JSON Schema`๏ผๆญฃ็กฎ็ไฝ็ฝฎๅจ่ฟ้ใ
+ - **FastAPI**ๅจ่ฟ้ๆทปๅ ไบๅฏนOpenAPIไธญๅฆไธไธชๅฐๆน็ๅ
จๅฑJSONๆจกๅผ็ๅผ็จ๏ผ่ไธๆฏ็ดๆฅๅ
ๅซๅฎใ่ฟๆ ท๏ผๅ
ถไปๅบ็จ็จๅบๅๅฎขๆท็ซฏๅฏไปฅ็ดๆฅไฝฟ็จ่ฟไบJSONๆจกๅผ๏ผๆไพๆดๅฅฝ็ไปฃ็ ็ๆๅทฅๅ
ท็ญใ
+
+
+**ๅจOpenAPIไธญไธบ่ฏฅ่ทฏๅพๆไฝ็ๆ็ๅๅบๅฐๆฏ๏ผ**
+
+```json hl_lines="3-12"
+{
+ "responses": {
+ "404": {
+ "description": "Additional Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Message"
+ }
+ }
+ }
+ },
+ "200": {
+ "description": "Successful Response",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Item"
+ }
+ }
+ }
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/HTTPValidationError"
+ }
+ }
+ }
+ }
+ }
+}
+
+```
+**ๆจกๅผ่ขซๅผ็จๅฐOpenAPIๆจกๅผไธญ็ๅฆไธไธชไฝ็ฝฎ๏ผ**
+```json hl_lines="4-16"
+{
+ "components": {
+ "schemas": {
+ "Message": {
+ "title": "Message",
+ "required": [
+ "message"
+ ],
+ "type": "object",
+ "properties": {
+ "message": {
+ "title": "Message",
+ "type": "string"
+ }
+ }
+ },
+ "Item": {
+ "title": "Item",
+ "required": [
+ "id",
+ "value"
+ ],
+ "type": "object",
+ "properties": {
+ "id": {
+ "title": "Id",
+ "type": "string"
+ },
+ "value": {
+ "title": "Value",
+ "type": "string"
+ }
+ }
+ },
+ "ValidationError": {
+ "title": "ValidationError",
+ "required": [
+ "loc",
+ "msg",
+ "type"
+ ],
+ "type": "object",
+ "properties": {
+ "loc": {
+ "title": "Location",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "msg": {
+ "title": "Message",
+ "type": "string"
+ },
+ "type": {
+ "title": "Error Type",
+ "type": "string"
+ }
+ }
+ },
+ "HTTPValidationError": {
+ "title": "HTTPValidationError",
+ "type": "object",
+ "properties": {
+ "detail": {
+ "title": "Detail",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ValidationError"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+```
+## ไธปๅๅบ็ๅ
ถไปๅชไฝ็ฑปๅ
+
+ๆจๅฏไปฅไฝฟ็จ็ธๅ็ `responses` ๅๆฐไธบ็ธๅ็ไธปๅๅบๆทปๅ ไธๅ็ๅชไฝ็ฑปๅใ
+
+ไพๅฆ๏ผๆจๅฏไปฅๆทปๅ ไธไธช้ขๅค็ๅชไฝ็ฑปๅ` image/png` ๏ผๅฃฐๆๆจ็่ทฏๅพๆไฝๅฏไปฅ่ฟๅJSONๅฏน่ฑก๏ผๅชไฝ็ฑปๅ `application/json` ๏ผๆPNGๅพๅ๏ผ
+
+```Python hl_lines="19-24 28"
+{!../../../docs_src/additional_responses/tutorial002.py!}
+```
+
+!!! Note
+ - ่ฏทๆณจๆ๏ผๆจๅฟ
้กป็ดๆฅไฝฟ็จ `FileResponse` ่ฟๅๅพๅใ
+
+!!! Info
+ - ้ค้ๅจ `responses` ๅๆฐไธญๆ็กฎๆๅฎไธๅ็ๅชไฝ็ฑปๅ๏ผๅฆๅ**FastAPI**ๅฐๅๅฎๅๅบไธไธปๅๅบ็ฑปๅ
ทๆ็ธๅ็ๅชไฝ็ฑปๅ๏ผ้ป่ฎคไธบ` application/json` ๏ผใ
+ - ไฝๆฏๅฆๆๆจๆๅฎไบไธไธช่ชๅฎไนๅๅบ็ฑป๏ผๅนถๅฐ `None `ไฝไธบๅ
ถๅชไฝ็ฑปๅ๏ผ**FastAPI**ๅฐไฝฟ็จ `application/json` ไฝไธบๅ
ทๆๅ
ณ่ๆจกๅ็ไปปไฝๅ
ถไปๅๅบใ
+
+## ็ปๅไฟกๆฏ
+ๆจ่ฟๅฏไปฅ่ๅๆฅๆถๆฅ่ชๅคไธชไฝ็ฝฎ็ๅๅบไฟกๆฏ๏ผๅ
ๆฌ `response_model `ใ `status_code` ๅ `responses `ๅๆฐใ
+
+ๆจๅฏไปฅไฝฟ็จ้ป่ฎค็็ถๆ็ `200` ๏ผๆ่
ๆจ้่ฆ็่ชๅฎไน็ถๆ็ ๏ผๅฃฐๆไธไธช `response_model `๏ผ็ถๅ็ดๆฅๅจOpenAPIๆจกๅผไธญๅจ `responses` ไธญๅฃฐๆ็ธๅๅๅบ็ๅ
ถไปไฟกๆฏใ
+
+**FastAPI**ๅฐไฟ็ๆฅ่ช `responses` ็้ๅ ไฟกๆฏ๏ผๅนถๅฐๅ
ถไธๆจกๅไธญ็JSON Schema็ปๅ่ตทๆฅใ
+
+ไพๅฆ๏ผๆจๅฏไปฅไฝฟ็จ็ถๆ็ `404` ๅฃฐๆๅๅบ๏ผ่ฏฅๅๅบไฝฟ็จ`Pydantic`ๆจกๅๅนถๅ
ทๆ่ชๅฎไน็` description` ใ
+
+ไปฅๅไธไธช็ถๆ็ ไธบ `200` ็ๅๅบ๏ผๅฎไฝฟ็จๆจ็ `response_model` ๏ผไฝๅ
ๅซ่ชๅฎไน็ `example` ๏ผ
+
+```Python hl_lines="20-31"
+{!../../../docs_src/additional_responses/tutorial003.py!}
+```
+
+ๆๆ่ฟไบ้ฝๅฐ่ขซๅๅนถๅนถๅ
ๅซๅจๆจ็OpenAPIไธญ๏ผๅนถๅจAPIๆๆกฃไธญๆพ็คบ๏ผ
+
+## ่ๅ้ขๅฎไนๅๅบๅ่ชๅฎไนๅๅบ
+
+ๆจๅฏ่ฝๅธๆๆไธไบๅบ็จไบ่ฎธๅค่ทฏๅพๆไฝ็้ขๅฎไนๅๅบ๏ผไฝๆฏไฝ ๆณๅฐไธๅ็่ทฏๅพๅ่ชๅฎไน็็ธๅบ็ปๅๅจไธๅใ
+ๅฏนไบ่ฟไบๆ
ๅต๏ผไฝ ๅฏไปฅไฝฟ็จPython็ๆๆฏ๏ผๅฐ `dict` ไธ `**dict_to_unpack` ่งฃๅ
๏ผ
+```Python
+old_dict = {
+ "old key": "old value",
+ "second old key": "second old value",
+}
+new_dict = {**old_dict, "new key": "new value"}
+```
+
+่ฟ้๏ผ new_dict ๅฐๅ
ๅซๆฅ่ช old_dict ็ๆๆ้ฎๅผๅฏนๅ ไธๆฐ็้ฎๅผๅฏน๏ผ
+```python
+{
+ "old key": "old value",
+ "second old key": "second old value",
+ "new key": "new value",
+}
+```
+ๆจๅฏไปฅไฝฟ็จ่ฏฅๆๆฏๅจ่ทฏๅพๆไฝไธญ้็จไธไบ้ขๅฎไน็ๅๅบ๏ผๅนถๅฐๅฎไปฌไธๅ
ถไป่ชๅฎไนๅๅบ็ธ็ปๅใ
+**ไพๅฆ๏ผ**
+```Python hl_lines="13-17 26"
+{!../../../docs_src/additional_responses/tutorial004.py!}
+```
+## ๆๅ
ณOpenAPIๅๅบ็ๆดๅคไฟกๆฏ
+
+่ฆไบ่งฃๆจๅฏไปฅๅจๅๅบไธญๅ
ๅซๅชไบๅ
ๅฎน๏ผๆจๅฏไปฅๆฅ็OpenAPI่ง่ไธญ็ไปฅไธ้จๅ๏ผ
+ + [OpenAPIๅๅบๅฏน่ฑก](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.0.md#responsesObject)๏ผๅฎๅ
ๆฌ Response Object ใ
+ + [OpenAPIๅๅบๅฏน่ฑก](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.1.0.md#responseObject)๏ผๆจๅฏไปฅ็ดๆฅๅจ `responses` ๅๆฐไธญ็ๆฏไธชๅๅบไธญๅ
ๅซไปปไฝๅ
ๅฎนใๅ
ๆฌ `description` ใ `headers` ใ `content` ๏ผๅ
ถไธญๆฏๅฃฐๆไธๅ็ๅชไฝ็ฑปๅๅJSON Schemas๏ผๅ `links` ใ
| see the commit | https://api.github.com/repos/tiangolo/fastapi/pulls/10325 | 2023-09-26T01:55:49Z | 2024-01-09T15:53:39Z | 2024-01-09T15:53:39Z | 2024-01-09T15:53:39Z | 2,157 | tiangolo/fastapi | 22,673 |
[docs] [testing] distributed training | diff --git a/docs/source/testing.rst b/docs/source/testing.rst
index 4f294be9ea569..3b1d97f573045 100644
--- a/docs/source/testing.rst
+++ b/docs/source/testing.rst
@@ -451,6 +451,24 @@ Inside tests:
+Distributed training
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``pytest`` can't deal with distributed training directly. If this is attempted - the sub-processes don't do the right thing and end up thinking they are ``pytest`` and start running the test suite in loops. It works, however, if one spawns a normal process that then spawns off multiple workers and manages the IO pipes.
+
+This is still under development but you can study 2 different tests that perform this successfully:
+
+* `test_seq2seq_examples_multi_gpu.py <https://github.com/huggingface/transformers/blob/master/examples/seq2seq/test_seq2seq_examples_multi_gpu.py>`__ - a ``pytorch-lightning``-running test (had to use PL's ``ddp`` spawning method which is the default)
+* `test_finetune_trainer.py <https://github.com/huggingface/transformers/blob/master/examples/seq2seq/test_finetune_trainer.py>`__ - a normal (non-PL) test
+
+To jump right into the execution point, search for the ``execute_async_std`` function in those tests.
+
+You will need at least 2 GPUs to see these tests in action:
+
+.. code-block:: bash
+
+ CUDA_VISIBLE_DEVICES="0,1" RUN_SLOW=1 pytest -sv examples/seq2seq/test_finetune_trainer.py \
+ examples/seq2seq/test_seq2seq_examples_multi_gpu.py
Output capture
| We figured out how to support distributed training with `pytest`, this is a preliminary doc snippet to help those in need to find the current implementation. I'm sure it will evolve as we have more tests with varying needs, but for now that's all we have.
@sgugger | https://api.github.com/repos/huggingface/transformers/pulls/7993 | 2020-10-22T21:25:36Z | 2020-10-26T12:15:06Z | 2020-10-26T12:15:06Z | 2020-10-26T17:20:24Z | 388 | huggingface/transformers | 12,471 |
[refactor] moving grad acc logic to engine | diff --git a/colossalai/utils/gradient_accumulation/__init__.py b/colossalai/engine/gradient_accumulation/__init__.py
similarity index 94%
rename from colossalai/utils/gradient_accumulation/__init__.py
rename to colossalai/engine/gradient_accumulation/__init__.py
index d125b26eb36e..4585b9a2529c 100644
--- a/colossalai/utils/gradient_accumulation/__init__.py
+++ b/colossalai/engine/gradient_accumulation/__init__.py
@@ -6,6 +6,11 @@
from torch.optim.lr_scheduler import _LRScheduler
from ._gradient_accumulation import GradAccumDataloader, GradAccumOptimizer, GradAccumLrSchedulerByStep, GradAccumGradientHandler
+__all__ = [
+ 'accumulate_gradient', 'GradAccumDataloader', 'GradAccumOptimizer', 'GradAccumLrSchedulerByStep',
+ 'GradAccumGradientHandler'
+]
+
def accumulate_gradient(model: nn.Module,
optimizer: Optimizer,
@@ -43,7 +48,3 @@ def accumulate_gradient(model: nn.Module,
lr_scheduler = GradAccumLrSchedulerByStep(lr_scheduler, accumulate_size=accumulate_size)
return optimizer, dataloader, gradient_handlers, lr_scheduler
-
-
-__all__ = ['accumulate_gradient', 'GradAccumDataloader', 'GradAccumOptimizer',
- 'GradAccumLrSchedulerByStep', 'GradAccumGradientHandler']
diff --git a/colossalai/utils/gradient_accumulation/_gradient_accumulation.py b/colossalai/engine/gradient_accumulation/_gradient_accumulation.py
similarity index 100%
rename from colossalai/utils/gradient_accumulation/_gradient_accumulation.py
rename to colossalai/engine/gradient_accumulation/_gradient_accumulation.py
diff --git a/colossalai/engine/schedule/_pipeline_schedule.py b/colossalai/engine/schedule/_pipeline_schedule.py
index 06997f1a1ad5..77131ab7970e 100644
--- a/colossalai/engine/schedule/_pipeline_schedule.py
+++ b/colossalai/engine/schedule/_pipeline_schedule.py
@@ -12,6 +12,7 @@
from colossalai.logging import get_dist_logger
from colossalai.utils import switch_virtual_pipeline_parallel_rank
from colossalai.utils.cuda import get_current_device
+from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2
from ._base_schedule import BaseSchedule
@@ -115,7 +116,7 @@ def load_micro_batch(self):
def pre_processing(self, engine):
# TODO: remove this after testing new zero with pipeline parallelism
model = engine.model
- if isinstance(model, (NaiveAMPModel)) or hasattr(model, 'colo_attr'):
+ if isinstance(model, (NaiveAMPModel, ShardedModelV2)):
self.dtype = torch.half
model = model.model
sig = inspect.signature(model.forward)
@@ -386,8 +387,7 @@ def __init__(self,
self.num_model_chunks = num_model_chunks
def pre_processing(self, engine):
- # FIXME(jiaruifang) we shall not use ShardedModelV2 in pipeline mode, due to circular dependency.
- if hasattr(engine.model, 'colo_attr'):
+ if isinstance(engine.model, ShardedModelV2):
self.dtype = torch.half
elif isinstance(engine.model[0], NaiveAMPModel):
self.dtype = torch.half
diff --git a/colossalai/gemini/tensor_placement_policy.py b/colossalai/gemini/tensor_placement_policy.py
index 5d26b19175b2..3e0851c3e6f6 100644
--- a/colossalai/gemini/tensor_placement_policy.py
+++ b/colossalai/gemini/tensor_placement_policy.py
@@ -2,8 +2,9 @@
from typing import List, Optional
import torch
from colossalai.utils import get_current_device
-from colossalai.zero.sharded_param.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage
from colossalai.utils.memory import colo_device_memory_capacity
+
+from colossalai.zero.sharded_param.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage
from colossalai.zero.sharded_param.tensorful_state import StatefulTensor
from colossalai.gemini.memory_tracer import MemStatsCollector
from colossalai.gemini.memory_tracer import GLOBAL_MODEL_DATA_TRACER
diff --git a/colossalai/initialize.py b/colossalai/initialize.py
index f870477bfebf..b806356e4ae2 100644
--- a/colossalai/initialize.py
+++ b/colossalai/initialize.py
@@ -15,21 +15,26 @@
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
+from colossalai.core import global_context as gpc
+from colossalai.context.moe_context import MOE_CONTEXT
+
+from colossalai.logging import get_dist_logger
+
+from colossalai.engine.schedule import NonPipelineSchedule, PipelineSchedule, InterleavedPipelineSchedule, get_tensor_shape
+from colossalai.engine import Engine
+from colossalai.engine.ophooks import BaseOpHook
+
+from colossalai.utils import (get_current_device, is_using_ddp, is_using_pp, is_using_sequence, sync_model_param)
+from colossalai.utils.moe import sync_moe_model_param
+
from colossalai.amp import AMP_TYPE, convert_to_amp
from colossalai.amp.naive_amp import NaiveAMPModel
from colossalai.builder.builder import build_gradient_handler
from colossalai.context import Config, ConfigException, ParallelMode
-from colossalai.core import global_context as gpc
-from colossalai.engine.schedule import NonPipelineSchedule, PipelineSchedule, InterleavedPipelineSchedule, get_tensor_shape
+from colossalai.engine.gradient_accumulation import accumulate_gradient
-from colossalai.context.moe_context import MOE_CONTEXT
-from colossalai.engine import Engine
-from colossalai.engine.ophooks import BaseOpHook
-from colossalai.logging import get_dist_logger
from colossalai.nn.optimizer.colossalai_optimizer import ColossalaiOptimizer
-from colossalai.utils import (accumulate_gradient, get_current_device, is_using_ddp, is_using_pp, is_using_sequence,
- sync_model_param)
-from colossalai.utils.moe import sync_moe_model_param
+
from colossalai.zero import convert_to_zero_v2
from colossalai.zero.sharded_optim.sharded_optim_v2 import ShardedOptimizerV2
diff --git a/colossalai/utils/__init__.py b/colossalai/utils/__init__.py
index 2fd5a79512c6..fa69a33f65e0 100644
--- a/colossalai/utils/__init__.py
+++ b/colossalai/utils/__init__.py
@@ -7,7 +7,6 @@
param_is_not_tensor_parallel_duplicate, print_rank_0, switch_virtual_pipeline_parallel_rank,
sync_model_param, disposable)
from .data_sampler import DataParallelSampler, get_dataloader
-from .gradient_accumulation import accumulate_gradient
from .memory import report_memory_usage, colo_device_memory_used, colo_set_process_memory_fraction, colo_device_memory_capacity
from .timer import MultiTimer, Timer
from .tensor_detector import TensorDetector
@@ -18,7 +17,7 @@
'is_model_parallel_parameter', 'clip_grad_norm_fp32', 'count_zeros_fp32', 'copy_tensor_parallel_attributes',
'param_is_not_tensor_parallel_duplicate', 'get_current_device', 'synchronize', 'empty_cache', 'set_to_cuda',
'report_memory_usage', 'colo_device_memory_capacity', 'colo_device_memory_used', 'colo_set_process_memory_fraction',
- 'Timer', 'MultiTimer', 'multi_tensor_applier', 'accumulate_gradient', 'DataParallelSampler', 'get_dataloader',
+ 'Timer', 'MultiTimer', 'multi_tensor_applier', 'DataParallelSampler', 'get_dataloader',
'switch_virtual_pipeline_parallel_rank', 'TensorDetector', 'load_checkpoint', 'save_checkpoint',
'ensure_path_exists', 'disposable'
]
diff --git a/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py b/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py
index ed5753d9809f..965d722b4aa9 100644
--- a/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py
+++ b/tests/test_data_pipeline_tensor_parallel/test_cifar_with_data_pipeline_tensor.py
@@ -14,7 +14,7 @@
from colossalai.nn.loss import CrossEntropyLoss
from colossalai.trainer import Trainer, hooks
from colossalai.utils import free_port, get_dataloader
-from colossalai.utils.gradient_accumulation import GradAccumLrSchedulerByStep
+from colossalai.engine.gradient_accumulation import GradAccumLrSchedulerByStep
from colossalai.testing import rerun_if_address_is_in_use
from model_zoo.vit import vit_tiny_patch4_32
from torchvision import transforms
diff --git a/tests/test_utils/test_gradient_accumluation.py b/tests/test_engine/test_gradient_accumluation.py
similarity index 100%
rename from tests/test_utils/test_gradient_accumluation.py
rename to tests/test_engine/test_gradient_accumluation.py
| Avoid circle import. | https://api.github.com/repos/hpcaitech/ColossalAI/pulls/804 | 2022-04-19T02:18:20Z | 2022-04-19T06:03:21Z | 2022-04-19T06:03:21Z | 2022-04-19T06:03:24Z | 2,027 | hpcaitech/ColossalAI | 11,779 |
fix(ui): Project details transactions check | diff --git a/src/sentry/static/sentry/app/views/projectDetail/projectDetail.tsx b/src/sentry/static/sentry/app/views/projectDetail/projectDetail.tsx
index f0b0fcf8b1d5e..bae713845f79b 100644
--- a/src/sentry/static/sentry/app/views/projectDetail/projectDetail.tsx
+++ b/src/sentry/static/sentry/app/views/projectDetail/projectDetail.tsx
@@ -169,7 +169,8 @@ class ProjectDetail extends AsyncView<Props, State> {
} = this.props;
const project = this.project;
const {hasSessions} = this.state;
- const hasTransactions = project?.firstTransactionEvent;
+ const hasPerformance = organization.features.includes('performance-view');
+ const hasTransactions = hasPerformance && project?.firstTransactionEvent;
const isProjectStabilized = this.isProjectStabilized();
const visibleCharts = ['chart1'];
| https://api.github.com/repos/getsentry/sentry/pulls/24927 | 2021-04-02T16:16:21Z | 2021-04-02T16:38:44Z | 2021-04-02T16:38:44Z | 2021-04-18T00:02:34Z | 203 | getsentry/sentry | 44,480 |
|
fix: correctly set preset config and loras in meta parser | diff --git a/modules/config.py b/modules/config.py
index ba2a76fb7..76ffd3488 100644
--- a/modules/config.py
+++ b/modules/config.py
@@ -124,14 +124,6 @@ def try_get_preset_content(preset):
print(e)
return {}
-
-try:
- with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file:
- config_dict.update(json.load(json_file))
-except Exception as e:
- print(f'Load default preset failed.')
- print(e)
-
available_presets = get_presets()
preset = args_manager.args.preset
config_dict.update(try_get_preset_content(preset))
diff --git a/modules/meta_parser.py b/modules/meta_parser.py
index 15f0ad7bc..10bc68967 100644
--- a/modules/meta_parser.py
+++ b/modules/meta_parser.py
@@ -169,11 +169,20 @@ def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list,
def get_lora(key: str, fallback: str | None, source_dict: dict, results: list):
try:
- n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ')
- w = float(w)
- results.append(True)
- results.append(n)
- results.append(w)
+ split_data = source_dict.get(key, source_dict.get(fallback)).split(' : ')
+ enabled = True
+ name = split_data[0]
+ weight = split_data[1]
+
+ if len(split_data) == 3:
+ enabled = split_data[0] == 'True'
+ name = split_data[1]
+ weight = split_data[2]
+
+ weight = float(weight)
+ results.append(enabled)
+ results.append(name)
+ results.append(weight)
except:
results.append(True)
results.append('None')
| fixes https://github.com/lllyasviel/Fooocus/issues/2566 | https://api.github.com/repos/lllyasviel/Fooocus/pulls/2588 | 2024-03-20T20:14:20Z | 2024-03-20T20:16:03Z | 2024-03-20T20:16:03Z | 2024-03-20T20:16:03Z | 438 | lllyasviel/Fooocus | 7,267 |
[Wrappers]: RescaleAction | diff --git a/gym/wrappers/__init__.py b/gym/wrappers/__init__.py
index 5bc7c688600..7d028f34f56 100644
--- a/gym/wrappers/__init__.py
+++ b/gym/wrappers/__init__.py
@@ -4,6 +4,7 @@
from gym.wrappers.dict import FlattenDictWrapper
from gym.wrappers.filter_observation import FilterObservation
from gym.wrappers.atari_preprocessing import AtariPreprocessing
+from gym.wrappers.rescale_action import RescaleAction
from gym.wrappers.flatten_observation import FlattenObservation
from gym.wrappers.gray_scale_observation import GrayScaleObservation
from gym.wrappers.frame_stack import LazyFrames
diff --git a/gym/wrappers/rescale_action.py b/gym/wrappers/rescale_action.py
new file mode 100644
index 00000000000..cf2e063e0d7
--- /dev/null
+++ b/gym/wrappers/rescale_action.py
@@ -0,0 +1,32 @@
+import numpy as np
+
+import gym
+from gym import spaces
+
+
+class RescaleAction(gym.ActionWrapper):
+ r"""Rescales the continuous action space of the environment to a range [a,b].
+
+ Example::
+
+ >>> RescaleAction(env, a, b).action_space == Box(a,b)
+ True
+
+ """
+ def __init__(self, env, a, b):
+ assert isinstance(env.action_space, spaces.Box), (
+ "expected Box action space, got {}".format(type(env.action_space)))
+ assert np.less_equal(a, b).all(), (a, b)
+ super(RescaleAction, self).__init__(env)
+ self.a = np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + a
+ self.b = np.zeros(env.action_space.shape, dtype=env.action_space.dtype) + b
+ self.action_space = spaces.Box(low=a, high=b, shape=env.action_space.shape, dtype=env.action_space.dtype)
+
+ def action(self, action):
+ assert np.all(np.greater_equal(action, self.a)), (action, self.a)
+ assert np.all(np.less_equal(action, self.b)), (action, self.b)
+ low = self.env.action_space.low
+ high = self.env.action_space.high
+ action = low + (high - low)*((action - self.a)/(self.b - self.a))
+ action = np.clip(action, low, high)
+ return action
diff --git a/gym/wrappers/test_rescale_action.py b/gym/wrappers/test_rescale_action.py
new file mode 100644
index 00000000000..9eceadd7676
--- /dev/null
+++ b/gym/wrappers/test_rescale_action.py
@@ -0,0 +1,32 @@
+import pytest
+
+import numpy as np
+
+import gym
+from gym.wrappers import RescaleAction
+
+
+def test_rescale_action():
+ env = gym.make('CartPole-v1')
+ with pytest.raises(AssertionError):
+ env = RescaleAction(env, -1, 1)
+ del env
+
+ env = gym.make('Pendulum-v0')
+ wrapped_env = RescaleAction(gym.make('Pendulum-v0'), -1, 1)
+
+ seed = 0
+ env.seed(seed)
+ wrapped_env.seed(seed)
+
+ obs = env.reset()
+ wrapped_obs = wrapped_env.reset()
+ assert np.allclose(obs, wrapped_obs)
+
+ obs, reward, _, _ = env.step([1.5])
+ with pytest.raises(AssertionError):
+ wrapped_env.step([1.5])
+ wrapped_obs, wrapped_reward, _, _ = wrapped_env.step([0.75])
+
+ assert np.allclose(obs, wrapped_obs)
+ assert np.allclose(reward, wrapped_reward)
| https://api.github.com/repos/openai/gym/pulls/1491 | 2019-05-14T13:19:37Z | 2019-10-25T21:20:54Z | 2019-10-25T21:20:54Z | 2019-10-25T21:20:54Z | 893 | openai/gym | 5,505 |
|
Add flask-restless | diff --git a/README.md b/README.md
index 19031bb72..f3fc512f9 100644
--- a/README.md
+++ b/README.md
@@ -464,6 +464,7 @@ A curated list of awesome Python frameworks, libraries and software. Inspired by
* [django-formapi](https://github.com/5monkeys/django-formapi) - Create JSON APIs with HMAC authentication and Django form-validation.
* [flask-api](http://www.flaskapi.org/) - An implementation of the same web browsable APIs that django-rest-framework provides.
* [flask-restful](http://flask-restful.readthedocs.org/) - An extension for Flask that adds support for quickly building REST APIs.
+* [flask-restless](https://flask-restless.readthedocs.org/en/latest/) - A Flask extension for generating ReSTful APIs for database models defined with SQLAlchemy (or Flask-SQLAlchemy).
* [flask-api-utils](https://github.com/marselester/flask-api-utils) - Flask extension that takes care of API representation and authentication.
* [falcon](http://falconframework.org/) - A high-performance Python framework for building cloud APIs and web app backends.
* [eve](https://github.com/nicolaiarocci/eve) - REST API framework powered by Flask, MongoDB and good intentions.
| [Flask-Restless](https://flask-restless.readthedocs.org/en/latest/) provides simple generation of ReSTful APIs for database models defined using SQLAlchemy (or Flask-SQLAlchemy). The generated APIs send and receive messages in JSON format.
| https://api.github.com/repos/vinta/awesome-python/pulls/249 | 2014-10-22T07:45:06Z | 2014-10-25T05:33:38Z | 2014-10-25T05:33:38Z | 2014-10-25T05:33:38Z | 294 | vinta/awesome-python | 27,028 |
Add brotlicffi support | diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py
index aa3abe85379..0e5e215ac8e 100644
--- a/scrapy/downloadermiddlewares/httpcompression.py
+++ b/scrapy/downloadermiddlewares/httpcompression.py
@@ -29,7 +29,10 @@
ACCEPTED_ENCODINGS: List[bytes] = [b"gzip", b"deflate"]
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
pass
else:
diff --git a/scrapy/utils/_compression.py b/scrapy/utils/_compression.py
index 7c40d0a02d1..84c255c28f9 100644
--- a/scrapy/utils/_compression.py
+++ b/scrapy/utils/_compression.py
@@ -5,7 +5,10 @@
from scrapy.exceptions import ScrapyDeprecationWarning
try:
- import brotli
+ try:
+ import brotli
+ except ImportError:
+ import brotlicffi as brotli
except ImportError:
pass
else:
@@ -17,9 +20,9 @@
"You have brotlipy installed, and Scrapy will use it, but "
"Scrapy support for brotlipy is deprecated and will stop "
"working in a future version of Scrapy. brotlipy itself is "
- "deprecated, it has been superseded by brotlicffi (not "
- "currently supported by Scrapy). Please, uninstall brotlipy "
- "and install brotli instead. brotlipy has the same import "
+ "deprecated, it has been superseded by brotlicffi. "
+ "Please, uninstall brotlipy "
+ "and install brotli or brotlicffi instead. brotlipy has the same import "
"name as brotli, so keeping both installed is strongly "
"discouraged."
),
diff --git a/tests/requirements.txt b/tests/requirements.txt
index 5b75674f513..ca5f6ddbd93 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -11,8 +11,7 @@ uvloop; platform_system != "Windows"
bpython # optional for shell wrapper tests
brotli; implementation_name != 'pypy' # optional for HTTP compress downloader middleware tests
-# 1.1.0 is broken on PyPy: https://github.com/google/brotli/issues/1072
-brotli==1.0.9; implementation_name == 'pypy' # optional for HTTP compress downloader middleware tests
+brotlicffi; implementation_name == 'pypy' # optional for HTTP compress downloader middleware tests
zstandard; implementation_name != 'pypy' # optional for HTTP compress downloader middleware tests
ipython
pywin32; sys_platform == "win32"
diff --git a/tests/test_downloadermiddleware_httpcompression.py b/tests/test_downloadermiddleware_httpcompression.py
index ae5569d0a8a..7c36f748e35 100644
--- a/tests/test_downloadermiddleware_httpcompression.py
+++ b/tests/test_downloadermiddleware_httpcompression.py
@@ -130,7 +130,10 @@ def test_process_response_gzip(self):
def test_process_response_br(self):
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
raise SkipTest("no brotli")
response = self._getresponse("br")
@@ -448,7 +451,10 @@ def _test_compression_bomb_setting(self, compression_id):
def test_compression_bomb_setting_br(self):
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
raise SkipTest("no brotli")
self._test_compression_bomb_setting("br")
@@ -486,7 +492,10 @@ class DownloadMaxSizeSpider(Spider):
def test_compression_bomb_spider_attr_br(self):
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
raise SkipTest("no brotli")
self._test_compression_bomb_spider_attr("br")
@@ -522,7 +531,10 @@ def _test_compression_bomb_request_meta(self, compression_id):
def test_compression_bomb_request_meta_br(self):
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
raise SkipTest("no brotli")
self._test_compression_bomb_request_meta("br")
@@ -568,7 +580,10 @@ def _test_download_warnsize_setting(self, compression_id):
def test_download_warnsize_setting_br(self):
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
raise SkipTest("no brotli")
self._test_download_warnsize_setting("br")
@@ -616,7 +631,10 @@ class DownloadWarnSizeSpider(Spider):
def test_download_warnsize_spider_attr_br(self):
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
raise SkipTest("no brotli")
self._test_download_warnsize_spider_attr("br")
@@ -662,7 +680,10 @@ def _test_download_warnsize_request_meta(self, compression_id):
def test_download_warnsize_request_meta_br(self):
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
raise SkipTest("no brotli")
self._test_download_warnsize_request_meta("br")
| Fix #6263 | https://api.github.com/repos/scrapy/scrapy/pulls/6269 | 2024-03-06T01:20:27Z | 2024-03-06T14:04:43Z | 2024-03-06T14:04:42Z | 2024-03-06T14:04:43Z | 1,517 | scrapy/scrapy | 34,417 |
Adds wemake-python-styleguide | diff --git a/README.md b/README.md
index d9510375a..f97025b71 100644
--- a/README.md
+++ b/README.md
@@ -221,6 +221,7 @@ Inspired by [awesome-php](https://github.com/ziadoz/awesome-php).
* [awesome-flake8-extensions](https://github.com/DmytroLitvinov/awesome-flake8-extensions)
* [pylint](https://www.pylint.org/) - A fully customizable source code analyzer.
* [pylama](https://github.com/klen/pylama) - A code audit tool for Python and JavaScript.
+ * [wemake-python-styleguide](https://github.com/wemake-services/wemake-python-styleguide) - The strictest and most opinionated python linter ever.
* Code Formatters
* [black](https://github.com/python/black) - The uncompromising Python code formatter.
* [yapf](https://github.com/google/yapf) - Yet another Python code formatter from Google.
| ## What is this Python project?
The strictest and most opinionated python linter ever!
Repo: https://github.com/wemake-services/wemake-python-styleguide
Docs: https://wemake-python-stylegui.de/en/latest/
PyPI: https://pypi.org/project/wemake-python-styleguide
## What's the difference between this Python project and similar ones?
<img width="866" alt="ะกะฝะธะผะพะบ ัะบัะฐะฝะฐ 2019-09-09 ะฒ 14 50 12" src="https://user-images.githubusercontent.com/4660275/64528428-29297b80-d311-11e9-932a-007d89204ab7.png">
--
Anyone who agrees with this pull request could vote for it by adding a ๐ to it, and usually, the maintainer will merge it when votes reach 20. | https://api.github.com/repos/vinta/awesome-python/pulls/1348 | 2019-09-09T11:50:46Z | 2019-10-03T16:11:47Z | 2019-10-03T16:11:47Z | 2019-10-03T16:11:47Z | 234 | vinta/awesome-python | 26,987 |
Make dockerfiles Google Shell Guide Compliant | diff --git a/scripts/ci/dockerfiles/bats/build_and_push.sh b/scripts/ci/dockerfiles/bats/build_and_push.sh
index 61d069480fcb2..5197ca1467577 100755
--- a/scripts/ci/dockerfiles/bats/build_and_push.sh
+++ b/scripts/ci/dockerfiles/bats/build_and_push.sh
@@ -18,18 +18,28 @@
set -euo pipefail
DOCKERHUB_USER=${DOCKERHUB_USER:="apache"}
DOCKERHUB_REPO=${DOCKERHUB_REPO:="airflow"}
+readonly DOCKERHUB_USER
+readonly DOCKERHUB_REPO
+
BATS_VERSION="1.2.1"
BATS_ASSERT_VERSION="2.0.0"
BATS_SUPPORT_VERSION="0.3.0"
BATS_FILE_VERSION="0.3.0"
+readonly BATS_VERSION
+readonly BATS_ASSERT_VERSION
+readonly BATS_SUPPORT_VERSION
+readonly BATS_FILE_VERSION
AIRFLOW_BATS_VERSION="2020.09.05"
+readonly AIRFLOW_BATS_VERSION
COMMIT_SHA=$(git rev-parse HEAD)
+readonly COMMIT_SHA
cd "$( dirname "${BASH_SOURCE[0]}" )" || exit 1
TAG="${DOCKERHUB_USER}/${DOCKERHUB_REPO}:bats-${AIRFLOW_BATS_VERSION}-${BATS_VERSION}"
+readonly TAG
docker build . \
--pull \
|
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/master/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/master/UPDATING.md).
| https://api.github.com/repos/apache/airflow/pulls/10734 | 2020-09-04T23:11:53Z | 2020-09-09T12:04:17Z | 2020-09-09T12:04:17Z | 2020-11-14T16:46:03Z | 296 | apache/airflow | 14,007 |
TICK_SIZE - coinone | diff --git a/js/coinone.js b/js/coinone.js
index c21b027dcf5c..6e4e67743235 100644
--- a/js/coinone.js
+++ b/js/coinone.js
@@ -4,6 +4,7 @@
const Exchange = require ('./base/Exchange');
const { BadSymbol, BadRequest, ExchangeError, ArgumentsRequired, OrderNotFound, OnMaintenance } = require ('./base/errors');
+const { TICK_SIZE } = require ('./base/functions/number');
const Precise = require ('./base/Precise');
// ---------------------------------------------------------------------------
@@ -113,10 +114,11 @@ module.exports = class coinone extends Exchange {
},
},
'precision': {
- 'price': 4,
- 'amount': 4,
- 'cost': 8,
+ 'price': 0.0001,
+ 'amount': 0.0001,
+ 'cost': 0.00000001,
},
+ 'precisionMode': TICK_SIZE,
'exceptions': {
'405': OnMaintenance, // {"errorCode":"405","status":"maintenance","result":"error"}
'104': OrderNotFound, // {"errorCode":"104","errorMsg":"Order id is not exist","result":"error"}
| https://api.github.com/repos/ccxt/ccxt/pulls/13773 | 2022-06-10T20:19:44Z | 2022-06-10T22:15:47Z | 2022-06-10T22:15:47Z | 2022-06-11T07:21:18Z | 280 | ccxt/ccxt | 13,161 |
|
Fix #4382, add console integration tests | diff --git a/mitmproxy/master.py b/mitmproxy/master.py
index 720e19c3c1..f34e66f673 100644
--- a/mitmproxy/master.py
+++ b/mitmproxy/master.py
@@ -73,7 +73,7 @@ def run_loop(self, loop):
self.addons.trigger("done")
- def run(self, func=None):
+ def run(self):
loop = asyncio.get_event_loop()
self.run_loop(loop.run_forever)
diff --git a/mitmproxy/tools/console/consoleaddons.py b/mitmproxy/tools/console/consoleaddons.py
index 7d31811d6f..6d15056026 100644
--- a/mitmproxy/tools/console/consoleaddons.py
+++ b/mitmproxy/tools/console/consoleaddons.py
@@ -60,18 +60,6 @@ def websocket_end(self, f):
f.close_message,
f.close_reason))
- def tcp_message(self, f):
- message = f.messages[-1]
- direction = "->" if message.from_client else "<-"
- ctx.log.info("{client_host}:{client_port} {direction} tcp {direction} {server_host}:{server_port}".format(
- client_host=f.client_conn.peername[0],
- client_port=f.client_conn.peername[1],
- server_host=f.server_conn.address[0],
- server_port=f.server_conn.address[1],
- direction=direction,
- ))
- ctx.log.debug(strutils.bytes_to_escaped_str(message.content))
-
class ConsoleAddon:
"""
diff --git a/mitmproxy/tools/console/flowdetailview.py b/mitmproxy/tools/console/flowdetailview.py
index aa3d449c86..0844ac4d62 100644
--- a/mitmproxy/tools/console/flowdetailview.py
+++ b/mitmproxy/tools/console/flowdetailview.py
@@ -69,7 +69,7 @@ def flowdetails(state, flow: mitmproxy.flow.Flow):
]
if c.altnames:
- parts.append(("Alt names", ", ".join(strutils.bytes_to_escaped_str(x) for x in c.altnames)))
+ parts.append(("Alt names", ", ".join(c.altnames)))
text.extend(
common.format_keyvals(parts, indent=4)
)
diff --git a/mitmproxy/tools/console/master.py b/mitmproxy/tools/console/master.py
index 940f379fd4..f3d9887b3a 100644
--- a/mitmproxy/tools/console/master.py
+++ b/mitmproxy/tools/console/master.py
@@ -48,7 +48,6 @@ def __init__(self, opts):
self.view_stack = []
- signals.call_in.connect(self.sig_call_in)
self.addons.add(*addons.default_addons())
self.addons.add(
intercept.Intercept(),
@@ -193,6 +192,7 @@ def run(self):
"Please run mitmproxy in an interactive shell environment.", file=sys.stderr)
sys.exit(1)
+ signals.call_in.connect(self.sig_call_in)
self.ui = window.Screen()
self.ui.set_terminal_properties(256)
self.set_palette(self.options, None)
diff --git a/mitmproxy/tools/console/window.py b/mitmproxy/tools/console/window.py
index fb2e8c1e6a..bb8bdb28cf 100644
--- a/mitmproxy/tools/console/window.py
+++ b/mitmproxy/tools/console/window.py
@@ -1,18 +1,18 @@
import re
import urwid
+from mitmproxy.tools.console import commands
from mitmproxy.tools.console import common
-from mitmproxy.tools.console import signals
-from mitmproxy.tools.console import statusbar
+from mitmproxy.tools.console import eventlog
from mitmproxy.tools.console import flowlist
from mitmproxy.tools.console import flowview
-from mitmproxy.tools.console import commands
+from mitmproxy.tools.console import grideditor
+from mitmproxy.tools.console import help
from mitmproxy.tools.console import keybindings
from mitmproxy.tools.console import options
from mitmproxy.tools.console import overlay
-from mitmproxy.tools.console import help
-from mitmproxy.tools.console import grideditor
-from mitmproxy.tools.console import eventlog
+from mitmproxy.tools.console import signals
+from mitmproxy.tools.console import statusbar
class StackWidget(urwid.Frame):
@@ -52,23 +52,23 @@ class WindowStack:
def __init__(self, master, base):
self.master = master
self.windows = dict(
- flowlist = flowlist.FlowListBox(master),
- flowview = flowview.FlowView(master),
- commands = commands.Commands(master),
- keybindings = keybindings.KeyBindings(master),
- options = options.Options(master),
- help = help.HelpView(master),
- eventlog = eventlog.EventLog(master),
-
- edit_focus_query = grideditor.QueryEditor(master),
- edit_focus_cookies = grideditor.CookieEditor(master),
- edit_focus_setcookies = grideditor.SetCookieEditor(master),
- edit_focus_setcookie_attrs = grideditor.CookieAttributeEditor(master),
+ flowlist=flowlist.FlowListBox(master),
+ flowview=flowview.FlowView(master),
+ commands=commands.Commands(master),
+ keybindings=keybindings.KeyBindings(master),
+ options=options.Options(master),
+ help=help.HelpView(master),
+ eventlog=eventlog.EventLog(master),
+
+ edit_focus_query=grideditor.QueryEditor(master),
+ edit_focus_cookies=grideditor.CookieEditor(master),
+ edit_focus_setcookies=grideditor.SetCookieEditor(master),
+ edit_focus_setcookie_attrs=grideditor.CookieAttributeEditor(master),
edit_focus_multipart_form=grideditor.RequestMultipartEditor(master),
edit_focus_urlencoded_form=grideditor.RequestUrlEncodedEditor(master),
- edit_focus_path = grideditor.PathEditor(master),
- edit_focus_request_headers = grideditor.RequestHeaderEditor(master),
- edit_focus_response_headers = grideditor.ResponseHeaderEditor(master),
+ edit_focus_path=grideditor.PathEditor(master),
+ edit_focus_request_headers=grideditor.RequestHeaderEditor(master),
+ edit_focus_response_headers=grideditor.ResponseHeaderEditor(master),
)
self.stack = [base]
self.overlay = None
@@ -127,8 +127,8 @@ def __init__(self, master):
self.statusbar = statusbar.StatusBar(master)
super().__init__(
None,
- header = None,
- footer = urwid.AttrWrap(self.statusbar, "background")
+ header=None,
+ footer=urwid.AttrWrap(self.statusbar, "background")
)
self.master = master
self.master.view.sig_view_refresh.connect(self.view_changed)
@@ -295,8 +295,8 @@ def mouse_event(self, *args, **kwargs):
if not k:
if args[1] == "mouse drag":
signals.status_message.send(
- message = "Hold down fn, shift, alt or ctrl to select text or use the --set console_mouse=false parameter.",
- expire = 1
+ message="Hold down fn, shift, alt or ctrl to select text or use the --set console_mouse=false parameter.",
+ expire=1
)
elif args[1] == "mouse press" and args[2] == 4:
self.keypress(args[0], "up")
diff --git a/test/mitmproxy/tools/console/test_integration.py b/test/mitmproxy/tools/console/test_integration.py
new file mode 100644
index 0000000000..feac177467
--- /dev/null
+++ b/test/mitmproxy/tools/console/test_integration.py
@@ -0,0 +1,46 @@
+import re
+import sys
+from typing import List
+
+import pytest
+
+import mitmproxy.options
+from mitmproxy import master
+from mitmproxy.tools.console import window
+from mitmproxy.tools.console.master import ConsoleMaster
+from test.conftest import skip_windows
+
+
+def tokenize(input: str) -> List[str]:
+ keys = []
+ for i, k in enumerate(re.split("[<>]", input)):
+ if i % 2:
+ keys.append(k)
+ else:
+ keys.extend(k)
+ return keys
+
+
+class ConsoleTestMaster(ConsoleMaster):
+ def type(self, input: str) -> None:
+ for key in tokenize(input):
+ self.window.keypress(self.ui.get_cols_rows(), key)
+
+
+@pytest.fixture
+def console(monkeypatch):
+ monkeypatch.setattr(window.Screen, "get_cols_rows", lambda self: (120, 120))
+ monkeypatch.setattr(master.Master, "run_loop", lambda *_: True)
+ monkeypatch.setattr(ConsoleTestMaster, "sig_call_in", lambda *_, **__: True)
+ monkeypatch.setattr(sys.stdout, "isatty", lambda: True)
+
+ opts = mitmproxy.options.Options()
+ m = ConsoleTestMaster(opts)
+ m.run()
+ return m
+
+
+@skip_windows
+def test_integration(tdata, console):
+ console.type(f":view.flows.load {tdata.path('mitmproxy/data/dumpfile-018.bin')}<enter>")
+ console.type("<enter><tab><tab>")
| This PR fixes #4382, and (more importantly) adds a harness to drive integration tests for the UI. We can expand on tests here in the future, but we already catch problems like #4382 now!
/cc @Prinzhorn ๐ | https://api.github.com/repos/mitmproxy/mitmproxy/pulls/4383 | 2021-01-04T16:01:22Z | 2021-01-04T19:31:43Z | 2021-01-04T19:31:43Z | 2021-01-05T16:12:18Z | 2,041 | mitmproxy/mitmproxy | 28,399 |
[Wrappers]: add ClipReward | diff --git a/gym/wrappers/__init__.py b/gym/wrappers/__init__.py
index 8aaf686b427..701db24dd23 100644
--- a/gym/wrappers/__init__.py
+++ b/gym/wrappers/__init__.py
@@ -3,4 +3,5 @@
from gym.wrappers.time_limit import TimeLimit
from gym.wrappers.dict import FlattenDictWrapper
from gym.wrappers.atari_preprocessing import AtariPreprocessing
-from gym.wrappers.sign_reward import SignReward
+from gym.wrappers.clip_reward import ClipReward
+from gym.wrappers.sign_reward import SignReward
\ No newline at end of file
diff --git a/gym/wrappers/clip_reward.py b/gym/wrappers/clip_reward.py
new file mode 100644
index 00000000000..19cc1390900
--- /dev/null
+++ b/gym/wrappers/clip_reward.py
@@ -0,0 +1,14 @@
+import numpy as np
+
+from gym import RewardWrapper
+
+
+class ClipReward(RewardWrapper):
+ r""""Clip reward to [min, max]. """
+ def __init__(self, env, min_r, max_r):
+ super(ClipReward, self).__init__(env)
+ self.min_r = min_r
+ self.max_r = max_r
+
+ def reward(self, reward):
+ return np.clip(reward, self.min_r, self.max_r)
diff --git a/gym/wrappers/test_clip_reward.py b/gym/wrappers/test_clip_reward.py
new file mode 100644
index 00000000000..f1cbef209c6
--- /dev/null
+++ b/gym/wrappers/test_clip_reward.py
@@ -0,0 +1,21 @@
+import pytest
+
+import gym
+from gym.wrappers import ClipReward
+
+
+@pytest.mark.parametrize('env_id', ['CartPole-v1', 'Pendulum-v0', 'MountainCar-v0'])
+def test_clip_reward(env_id):
+ env = gym.make(env_id)
+ wrapped_env = ClipReward(env, -0.0005, 0.0002)
+
+ env.reset()
+ wrapped_env.reset()
+
+ action = env.action_space.sample()
+
+ _, reward, _, _ = env.step(action)
+ _, wrapped_reward, _, _ = wrapped_env.step(action)
+
+ assert abs(wrapped_reward) < abs(reward)
+ assert wrapped_reward == -0.0005 or wrapped_reward == 0.0002
| https://api.github.com/repos/openai/gym/pulls/1484 | 2019-05-14T12:45:14Z | 2019-05-24T22:42:11Z | 2019-05-24T22:42:11Z | 2019-05-24T22:42:11Z | 575 | openai/gym | 5,534 |
|
Change black.vim error message to specify its origin. | diff --git a/plugin/black.vim b/plugin/black.vim
index 385d0e1bd98..b174e598452 100644
--- a/plugin/black.vim
+++ b/plugin/black.vim
@@ -14,7 +14,7 @@
" - restore cursor/window position after formatting
if v:version < 700 || !has('python3')
- echo "This script requires vim7.0+ with Python 3.6 support."
+ echo "The black.vim plugin requires vim7.0+ with Python 3.6 support."
finish
endif
| Fixes #1230 .
This just changes the error message if the black.vim plugin is unable to load, to specify that it is coming from "The black.vim plugin" instead of "This script" when the user is viewing the message in vim. | https://api.github.com/repos/psf/black/pulls/1240 | 2020-01-21T04:25:19Z | 2020-01-23T16:18:58Z | 2020-01-23T16:18:58Z | 2020-01-23T19:10:25Z | 130 | psf/black | 23,802 |
Rename rnd to round in I.1 | diff --git a/CppCoreGuidelines.md b/CppCoreGuidelines.md
index 3d5dd8755..9dd3a94bf 100644
--- a/CppCoreGuidelines.md
+++ b/CppCoreGuidelines.md
@@ -1223,12 +1223,12 @@ Correctness. Assumptions not stated in an interface are easily overlooked and ha
Controlling the behavior of a function through a global (namespace scope) variable (a call mode) is implicit and potentially confusing. For example:
- int rnd(double d)
+ int round(double d)
{
- return (rnd_up) ? ceil(d) : d; // don't: "invisible" dependency
+ return (round_up) ? ceil(d) : d; // don't: "invisible" dependency
}
-It will not be obvious to a caller that the meaning of two calls of `rnd(7.2)` might give different results.
+It will not be obvious to a caller that the meaning of two calls of `round(7.2)` might give different results.
##### Exception
| At first, I assumed that this is "rand" function, not "round" function, and was confused why exactly is it a problem that this returns different values. While it's easy to notice when actually reading the code, this slows down the comprehension, and bad function naming is not point of that particular guideline. | https://api.github.com/repos/isocpp/CppCoreGuidelines/pulls/958 | 2017-06-10T10:10:22Z | 2017-06-12T01:44:55Z | 2017-06-12T01:44:55Z | 2017-06-12T01:44:55Z | 242 | isocpp/CppCoreGuidelines | 15,999 |
Make pre commit google shell guide compatible | diff --git a/scripts/ci/pre_commit/pre_commit_bat_tests.sh b/scripts/ci/pre_commit/pre_commit_bat_tests.sh
index 9531d5e841690..58078623552cb 100755
--- a/scripts/ci/pre_commit/pre_commit_bat_tests.sh
+++ b/scripts/ci/pre_commit/pre_commit_bat_tests.sh
@@ -21,10 +21,10 @@ export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
if [[ $# -eq 0 ]]; then
- PARAMS=("tests/bats")
+ params=("tests/bats")
else
- PARAMS=("${@}")
+ params=("${@}")
fi
# shellcheck source=scripts/ci/static_checks/bats_tests.sh
-. "$( dirname "${BASH_SOURCE[0]}" )/../static_checks/bats_tests.sh" "${PARAMS[@]}"
+. "$( dirname "${BASH_SOURCE[0]}" )/../static_checks/bats_tests.sh" "${params[@]}"
diff --git a/scripts/ci/pre_commit/pre_commit_breeze_cmd_line.sh b/scripts/ci/pre_commit/pre_commit_breeze_cmd_line.sh
index 92ec950378fd6..66955a89b6a6b 100755
--- a/scripts/ci/pre_commit/pre_commit_breeze_cmd_line.sh
+++ b/scripts/ci/pre_commit/pre_commit_breeze_cmd_line.sh
@@ -64,15 +64,16 @@ if (( MAX_LEN > MAX_SCREEN_WIDTH + 2 )); then
fi
BREEZE_RST_FILE="${AIRFLOW_SOURCES}/BREEZE.rst"
+readonly BREEZE_RST_FILE
-LEAD='^ \.\. START BREEZE HELP MARKER$'
-TAIL='^ \.\. END BREEZE HELP MARKER$'
+lead_marker='^ \.\. START BREEZE HELP MARKER$'
+tail_marker='^ \.\. END BREEZE HELP MARKER$'
-BEGIN_GEN=$(grep -n "${LEAD}" <"${BREEZE_RST_FILE}" | sed 's/\(.*\):.*/\1/g')
-END_GEN=$(grep -n "${TAIL}" <"${BREEZE_RST_FILE}" | sed 's/\(.*\):.*/\1/g')
-cat <(head -n "${BEGIN_GEN}" "${BREEZE_RST_FILE}") \
+beginning_of_generated_help_line_number=$(grep -n "${lead_marker}" <"${BREEZE_RST_FILE}" | sed 's/\(.*\):.*/\1/g')
+end_beginning_of_generated_help_line_number=$(grep -n "${tail_marker}" <"${BREEZE_RST_FILE}" | sed 's/\(.*\):.*/\1/g')
+cat <(head -n "${beginning_of_generated_help_line_number}" "${BREEZE_RST_FILE}") \
"${TMP_FILE}" \
- <(tail -n +"${END_GEN}" "${BREEZE_RST_FILE}") \
+ <(tail -n +"${end_beginning_of_generated_help_line_number}" "${BREEZE_RST_FILE}") \
>"${TMP_OUTPUT}"
mv "${TMP_OUTPUT}" "${BREEZE_RST_FILE}"
diff --git a/scripts/ci/pre_commit/pre_commit_build_providers_dependencies.sh b/scripts/ci/pre_commit/pre_commit_build_providers_dependencies.sh
index e5235e4b6d1c3..3aa606b6c5a4b 100755
--- a/scripts/ci/pre_commit/pre_commit_build_providers_dependencies.sh
+++ b/scripts/ci/pre_commit/pre_commit_build_providers_dependencies.sh
@@ -18,13 +18,18 @@
set -euo pipefail
PRE_COMMIT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+readonly PRE_COMMIT_DIR
+
AIRFLOW_SOURCES=$(cd "${PRE_COMMIT_DIR}/../../../" && pwd);
+readonly AIRFLOW_SOURCES
cd "${AIRFLOW_SOURCES}" || exit 1
+
export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
PYTHONPATH="$(pwd)"
export PYTHONPATH
+
find airflow/providers -name '*.py' -print0 | \
xargs -0 python3 tests/build_provider_packages_dependencies.py \
--provider-dependencies-file "airflow/providers/dependencies.json" \
diff --git a/scripts/ci/pre_commit/pre_commit_local_yml_mounts.sh b/scripts/ci/pre_commit/pre_commit_local_yml_mounts.sh
index 6e8b0a750ad00..dbf82b72412dd 100755
--- a/scripts/ci/pre_commit/pre_commit_local_yml_mounts.sh
+++ b/scripts/ci/pre_commit/pre_commit_local_yml_mounts.sh
@@ -22,20 +22,22 @@ export SKIP_CHECK_REMOTE_IMAGE="true"
. "$( dirname "${BASH_SOURCE[0]}" )/../libraries/_script_init.sh"
TMP_OUTPUT=$(mktemp)
+readonly TMP_OUTPUT
# Remove temp file if it's hanging around
traps::add_trap "rm -rf -- '${TMP_OUTPUT}' 2>/dev/null" EXIT HUP INT TERM
LOCAL_YML_FILE="${AIRFLOW_SOURCES}/scripts/ci/docker-compose/local.yml"
+readonly LOCAL_YML_FILE
-LEAD=' # START automatically generated volumes from LOCAL_MOUNTS in _local_mounts.sh'
-TAIL=' # END automatically generated volumes from LOCAL_MOUNTS in _local_mounts.sh'
+lead_marker=' # START automatically generated volumes from LOCAL_MOUNTS in _local_mounts.sh'
+tail_marker=' # END automatically generated volumes from LOCAL_MOUNTS in _local_mounts.sh'
local_mounts::generate_local_mounts_list " - ../../../"
-sed "/$LEAD/q" "${LOCAL_YML_FILE}" > "${TMP_OUTPUT}"
+sed "/$lead_marker/q" "${LOCAL_YML_FILE}" > "${TMP_OUTPUT}"
printf '%s\n' "${LOCAL_MOUNTS[@]}" >> "${TMP_OUTPUT}"
-sed -ne "/$TAIL/,\$ p" "${LOCAL_YML_FILE}" >> "${TMP_OUTPUT}"
+sed -ne "/$tail_marker/,\$ p" "${LOCAL_YML_FILE}" >> "${TMP_OUTPUT}"
mv "${TMP_OUTPUT}" "${LOCAL_YML_FILE}"
diff --git a/scripts/ci/pre_commit/pre_commit_mermaid.sh b/scripts/ci/pre_commit/pre_commit_mermaid.sh
index 4485cce9c3d1c..ebef846a460d6 100755
--- a/scripts/ci/pre_commit/pre_commit_mermaid.sh
+++ b/scripts/ci/pre_commit/pre_commit_mermaid.sh
@@ -16,7 +16,7 @@
# specific language governing permissions and limitations
# under the License.
set -euo pipefail
-export NO_TERMINAL_OUTPUT_FROM_SCRIPTS="true"
+export PRINT_INFO_FROM_SCRIPTS="false"
# shellcheck source=scripts/ci/libraries/_script_init.sh
. "$( dirname "${BASH_SOURCE[0]}" )/../libraries/_script_init.sh"
@@ -28,29 +28,38 @@ if ! command -v npm; then
exit 0
fi
-tmp_file="${CACHE_TMP_FILE_DIR}/tmp.mermaid"
+TMP_FILE="${CACHE_TMP_FILE_DIR}/tmp.mermaid"
+readonly TMP_FILE
+
cd "${AIRFLOW_SOURCES}"
MERMAID_INSTALLATION_DIR="${AIRFLOW_SOURCES}/.build/mermaid/"
+readonly MERMAID_INSTALLATION_DIR
+
MERMAID_CONFIG_FILE="${MERMAID_INSTALLATION_DIR}/mermaid-config.json"
+readonly MERMAID_CONFIG_FILE
+
MERMAID_CLI="${MERMAID_INSTALLATION_DIR}/node_modules/.bin/mmdc"
+readonly MERMAID_CLI
+
export NODE_VIRTUAL_ENV="${MERMAID_INSTALLATION_DIR}"
+readonly NODE_VIRTUAL_ENV
if [[ -f "${MERMAID_CLI}" ]]; then
- MERMAID_INSTALLED="true"
+ mermaid_installed="true"
else
- MERMAID_INSTALLED="false"
+ mermaid_installed="false"
fi
# shellcheck disable=SC2064
-traps::add_trap "rm -rf '${tmp_file}'" EXIT HUP INT TERM
+traps::add_trap "rm -rf '${TMP_FILE}'" EXIT HUP INT TERM
for file in "${@}"
do
basename_file=${AIRFLOW_SOURCES}/"$(dirname "${file}")/$(basename "${file}" .mermaid)"
md5sum_file="${basename_file}.md5"
if ! diff "${md5sum_file}" <(md5sum "${file}"); then
- if [[ ${MERMAID_INSTALLED} != "true" ]]; then
+ if [[ ${mermaid_installed} != "true" ]]; then
echo "Installing mermaid"
mkdir -p "${MERMAID_INSTALLATION_DIR}/node_modules"
pushd "${MERMAID_INSTALLATION_DIR}"
@@ -61,7 +70,7 @@ do
"themeCSS": ".label foreignObject { overflow: visible; }"
}
EOF
- MERMAID_INSTALLED="true"
+ mermaid_installed="true"
popd
fi
echo "Running generation for ${file}"
@@ -70,11 +79,11 @@ EOF
# unfortunately mermaid does not handle well multiline comments and we need licence comment
# Stripping them manually :(. Multiline comments are coming in the future
# https://github.com/mermaid-js/mermaid/issues/1249
- grep -v "^%%" <"${file}" > "${tmp_file}"
+ grep -v "^%%" <"${file}" > "${TMP_FILE}"
mkdir -p "${MERMAID_INSTALLATION_DIR}"
"${MERMAID_CLI}" \
- -i "${tmp_file}" \
+ -i "${TMP_FILE}" \
-w 2048 \
-o "${basename_file}.png" \
-c "${MERMAID_CONFIG_FILE}"
diff --git a/scripts/ci/pre_commit/pre_commit_setup_cfg_file.sh b/scripts/ci/pre_commit/pre_commit_setup_cfg_file.sh
index ede294cc71450..e741f9ea4b6ef 100755
--- a/scripts/ci/pre_commit/pre_commit_setup_cfg_file.sh
+++ b/scripts/ci/pre_commit/pre_commit_setup_cfg_file.sh
@@ -16,26 +16,36 @@
# specific language governing permissions and limitations
# under the License.
set -euo pipefail
+export PRINT_INFO_FROM_SCRIPTS="false"
+readonly PRINT_INFO_FROM_SCRIPTS
+
PRE_COMMIT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+readonly PRE_COMMIT_DIR
+
AIRFLOW_SOURCES=$(cd "${PRE_COMMIT_DIR}/../../../" && pwd);
+readonly AIRFLOW_SOURCES
+
cd "${AIRFLOW_SOURCES}" || exit 1
-export PRINT_INFO_FROM_SCRIPTS="false"
TMP_FILE=$(mktemp)
+readonly TMP_FILE
+
TMP_OUTPUT=$(mktemp)
+readonly TMP_OUTPUT
find "licenses" -type f -exec echo " " {} \; | LC_ALL=C sort >>"${TMP_FILE}"
SETUP_CFG_FILE="${AIRFLOW_SOURCES}/setup.cfg"
+readonly SETUP_CFG_FILE
-LEAD='^# Start of licenses generated automatically$'
-TAIL='^# End of licences generated automatically$'
+lead_marker='^# Start of licenses generated automatically$'
+tail_marker='^# End of licences generated automatically$'
-BEGIN_GEN=$(grep -n "${LEAD}" <"${SETUP_CFG_FILE}" | sed 's/\(.*\):.*/\1/g')
-END_GEN=$(grep -n "${TAIL}" <"${SETUP_CFG_FILE}" | sed 's/\(.*\):.*/\1/g')
-cat <(head -n "${BEGIN_GEN}" "${SETUP_CFG_FILE}") \
+beginning_of_generated_help_line_number=$(grep -n "${lead_marker}" <"${SETUP_CFG_FILE}" | sed 's/\(.*\):.*/\1/g')
+end_of_generated_help_line_number=$(grep -n "${tail_marker}" <"${SETUP_CFG_FILE}" | sed 's/\(.*\):.*/\1/g')
+cat <(head -n "${beginning_of_generated_help_line_number}" "${SETUP_CFG_FILE}") \
"${TMP_FILE}" \
- <(tail -n +"${END_GEN}" "${SETUP_CFG_FILE}") \
+ <(tail -n +"${end_of_generated_help_line_number}" "${SETUP_CFG_FILE}") \
>"${TMP_OUTPUT}"
mv "${TMP_OUTPUT}" "${SETUP_CFG_FILE}"
| Part of #10576
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/master/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/master/UPDATING.md).
| https://api.github.com/repos/apache/airflow/pulls/10748 | 2020-09-05T09:24:09Z | 2020-09-06T18:00:55Z | 2020-09-06T18:00:55Z | 2020-11-14T16:36:27Z | 2,670 | apache/airflow | 14,118 |
Retry failures to start boulder | diff --git a/.travis.yml b/.travis.yml
index 0ca0f3b5c8f..b33019ecbff 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -161,7 +161,9 @@ addons:
- libapache2-mod-macro
install: "travis_retry pip install tox coveralls"
-script: 'travis_retry tox && ([ "xxx$BOULDER_INTEGRATION" = "xxx" ] || ./tests/travis-integration.sh)'
+script:
+ - travis_retry tox
+ - '[ -z "${BOULDER_INTEGRATION+x}" ] || (travis_retry tests/boulder-fetch.sh && tests/tox-boulder-integration.sh)'
after_success: '[ "$TOXENV" == "cover" ] && coveralls'
diff --git a/tests/boulder-fetch.sh b/tests/boulder-fetch.sh
index 60538362e48..08eb736c2ef 100755
--- a/tests/boulder-fetch.sh
+++ b/tests/boulder-fetch.sh
@@ -17,3 +17,9 @@ FAKE_DNS=$(ifconfig docker0 | grep "inet addr:" | cut -d: -f2 | awk '{ print $1}
[ -z "$FAKE_DNS" ] && echo Unable to find the IP for docker0 && exit 1
sed -i "s/FAKE_DNS: .*/FAKE_DNS: ${FAKE_DNS}/" docker-compose.yml
docker-compose up -d
+
+set +x # reduce verbosity while waiting for boulder
+until curl http://localhost:4000/directory 2>/dev/null; do
+ echo waiting for boulder
+ sleep 1
+done
diff --git a/tests/tox-boulder-integration.sh b/tests/tox-boulder-integration.sh
new file mode 100755
index 00000000000..8c8a967fd49
--- /dev/null
+++ b/tests/tox-boulder-integration.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -e
+# A simple wrapper around tests/boulder-integration.sh that activates the tox
+# virtual environment defined by the environment variable TOXENV before running
+# integration tests.
+
+if [ -z "${TOXENV+x}" ]; then
+ echo "The environment variable TOXENV must be set to use this script!" >&2
+ exit 1
+fi
+
+source .tox/$TOXENV/bin/activate
+tests/boulder-integration.sh
diff --git a/tests/travis-integration.sh b/tests/travis-integration.sh
deleted file mode 100755
index b42617400b1..00000000000
--- a/tests/travis-integration.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-set -o errexit
-
-./tests/boulder-fetch.sh
-
-source .tox/$TOXENV/bin/activate
-
-until curl http://boulder:4000/directory 2>/dev/null; do
- echo waiting for boulder
- sleep 1
-done
-
-./tests/boulder-integration.sh
| Occasionally a network error prevents Docker from starting boulder causing Travis tests to fail like it did at https://travis-ci.org/certbot/certbot/jobs/282923098. This works around the problem by using travis_retry to try to start boulder again if it fails.
This also moves the logic of waiting for boulder to start into tests/boulder-fetch.sh so people running integration tests locally can benefit. | https://api.github.com/repos/certbot/certbot/pulls/5176 | 2017-10-06T01:06:21Z | 2017-10-13T00:00:14Z | 2017-10-13T00:00:14Z | 2017-10-13T00:00:16Z | 706 | certbot/certbot | 1,824 |
bug(replays): Do not double-encode replay `?referrer=` query param | diff --git a/static/app/components/events/eventReplay/replayContent.spec.tsx b/static/app/components/events/eventReplay/replayContent.spec.tsx
index 90ba09c9e61d82..f9b1f69302ff54 100644
--- a/static/app/components/events/eventReplay/replayContent.spec.tsx
+++ b/static/app/components/events/eventReplay/replayContent.spec.tsx
@@ -22,7 +22,7 @@ const mockEvent = {
};
const mockButtonHref =
- '/organizations/sentry-emerging-tech/replays/replays:761104e184c64d439ee1014b72b4d83b/?referrer=%252Forganizations%252F%253AorgId%252Fissues%252F%253AgroupId%252Freplays%252F&t=62&t_main=console';
+ '/organizations/sentry-emerging-tech/replays/replays:761104e184c64d439ee1014b72b4d83b/?referrer=%2Forganizations%2F%3AorgId%2Fissues%2F%3AgroupId%2Freplays%2F&t=62&t_main=console';
// Mock screenfull library
jest.mock('screenfull', () => ({
diff --git a/static/app/components/events/eventReplay/replayContent.tsx b/static/app/components/events/eventReplay/replayContent.tsx
index 213e7baeeb0118..01f56b3a1b0ffe 100644
--- a/static/app/components/events/eventReplay/replayContent.tsx
+++ b/static/app/components/events/eventReplay/replayContent.tsx
@@ -62,7 +62,7 @@ function ReplayContent({orgSlug, replaySlug, event}: Props) {
const fullReplayUrl = {
pathname: `/organizations/${orgSlug}/replays/${replaySlug}/`,
query: {
- referrer: encodeURIComponent(getRouteStringFromRoutes(routes)),
+ referrer: getRouteStringFromRoutes(routes),
t_main: 'console',
t: initialTimeOffset,
},
diff --git a/static/app/views/eventsV2/table/tableView.tsx b/static/app/views/eventsV2/table/tableView.tsx
index 01ec26ff0b2841..c1634109c81f13 100644
--- a/static/app/views/eventsV2/table/tableView.tsx
+++ b/static/app/views/eventsV2/table/tableView.tsx
@@ -305,10 +305,10 @@ class TableView extends Component<TableViewProps & WithRouterProps> {
} else if (columnKey === 'replayId') {
if (dataRow.replayId) {
const replaySlug = `${dataRow['project.name']}:${dataRow.replayId}`;
- const referrer = encodeURIComponent(getRouteStringFromRoutes(this.props.routes));
+ const referrer = getRouteStringFromRoutes(this.props.routes);
const target = {
- pathname: `/organizations/${organization.slug}/replays/${replaySlug}`,
+ pathname: `/organizations/${organization.slug}/replays/${replaySlug}/`,
query: {
referrer,
},
diff --git a/static/app/views/performance/transactionSummary/utils.tsx b/static/app/views/performance/transactionSummary/utils.tsx
index 026e94f2b6d026..6c7ced4b519c19 100644
--- a/static/app/views/performance/transactionSummary/utils.tsx
+++ b/static/app/views/performance/transactionSummary/utils.tsx
@@ -147,11 +147,11 @@ export function generateReplayLink(routes: PlainRoute<any>[]) {
}
const replaySlug = `${tableRow['project.name']}:${replayId}`;
- const referrer = encodeURIComponent(getRouteStringFromRoutes(routes));
+ const referrer = getRouteStringFromRoutes(routes);
if (!tableRow.timestamp) {
return {
- pathname: `/organizations/${organization.slug}/replays/${replaySlug}`,
+ pathname: `/organizations/${organization.slug}/replays/${replaySlug}/`,
query: {
referrer,
},
@@ -164,7 +164,7 @@ export function generateReplayLink(routes: PlainRoute<any>[]) {
transactionTimestamp - (tableRow['transaction.duration'] as number);
return {
- pathname: `/organizations/${organization.slug}/replays/${replaySlug}`,
+ pathname: `/organizations/${organization.slug}/replays/${replaySlug}/`,
query: {
event_t: transactionStartTimestamp,
referrer,
diff --git a/static/app/views/replays/replayTable.tsx b/static/app/views/replays/replayTable.tsx
index fa12af5ff76a7b..fff968297a4ccb 100644
--- a/static/app/views/replays/replayTable.tsx
+++ b/static/app/views/replays/replayTable.tsx
@@ -100,7 +100,7 @@ function ReplayTable({
showSlowestTxColumn = false,
}: Props) {
const routes = useRoutes();
- const referrer = encodeURIComponent(getRouteStringFromRoutes(routes));
+ const referrer = getRouteStringFromRoutes(routes);
const organization = useOrganization();
const theme = useTheme();
@@ -222,7 +222,12 @@ function ReplayTableRow({
avatarSize={32}
displayName={
<Link
- to={`/organizations/${organization.slug}/replays/${project?.slug}:${replay.id}/?referrer=${referrer}`}
+ to={{
+ pathname: `/organizations/${organization.slug}/replays/${project?.slug}:${replay.id}/`,
+ query: {
+ referrer,
+ },
+ }}
>
{replay.user.displayName || ''}
</Link>
| Before we would manually call `encodeURIComponent` and then also render `<Link>` with a `LocationDescriptorObject`.
When you use a LocationDescriptorObject all query params are automatically uri encoded. The result is that our urls were double-encoded and looked like this:
<img width="413" alt="url - before" src="https://user-images.githubusercontent.com/187460/197599503-fa0674bc-5ae1-40db-a896-fe74ee986266.png">
Notice the `%252f`
When we logged the data, before, we would log the url encoded string:
<img width="736" alt="log - before" src="https://user-images.githubusercontent.com/187460/197599515-7ed8822d-be3e-468d-9802-0492a4ac8297.png">
Going forward we can use LocationDescriptorObject in all the places, and not call encodeURIComponent manually.
Fixed:
<img width="485" alt="url - after" src="https://user-images.githubusercontent.com/187460/197599436-0b725eb4-11dd-471a-bdd6-168eef222ee1.png">
<img width="822" alt="log - after" src="https://user-images.githubusercontent.com/187460/197599456-27c30bf9-9ae5-4d2e-85b0-bd22d47957b5.png">
Fixes #39706 | https://api.github.com/repos/getsentry/sentry/pulls/40455 | 2022-10-24T18:32:21Z | 2022-10-25T15:19:24Z | 2022-10-25T15:19:24Z | 2022-11-10T00:02:47Z | 1,238 | getsentry/sentry | 44,042 |
Added more SNS related headers | diff --git a/localstack/services/sns/sns_listener.py b/localstack/services/sns/sns_listener.py
index 39b5b7495ebc1..d6ebf7627e5bd 100644
--- a/localstack/services/sns/sns_listener.py
+++ b/localstack/services/sns/sns_listener.py
@@ -222,7 +222,12 @@ def publish_message(topic_arn, req_data, subscription_arn=None):
subscriber['Endpoint'],
headers={
'Content-Type': 'text/plain',
- 'x-amz-sns-message-type': msg_type
+ # AWS headers according to
+ # https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html#http-header
+ 'x-amz-sns-message-type': msg_type,
+ 'x-amz-sns-topic-arn': subscriber['TopicArn'],
+ 'x-amz-sns-subscription-arn': subscriber['SubscriptionArn'],
+ 'User-Agent': 'Amazon Simple Notification Service Agent'
},
data=message_body,
verify=False)
| **Please refer to the contribution guidelines in the README when submitting PRs.**
| https://api.github.com/repos/localstack/localstack/pulls/1905 | 2019-12-27T19:45:22Z | 2019-12-30T17:39:35Z | 2019-12-30T17:39:35Z | 2019-12-30T17:39:35Z | 237 | localstack/localstack | 28,549 |
Fix another case where we format dummy implementation for non-functions/classes | diff --git a/CHANGES.md b/CHANGES.md
index 62caea41c31..dcf6613b70c 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -21,7 +21,7 @@
docstring (#4060)
- Fix crash in preview mode when using a short `--line-length` (#4086)
- Keep suites consisting of only an ellipsis on their own lines if they are not
- functions or class definitions (#4066)
+ functions or class definitions (#4066) (#4103)
### Configuration
diff --git a/src/black/linegen.py b/src/black/linegen.py
index 6934823d340..245be235231 100644
--- a/src/black/linegen.py
+++ b/src/black/linegen.py
@@ -42,6 +42,7 @@
is_atom_with_invisible_parens,
is_docstring,
is_empty_tuple,
+ is_function_or_class,
is_lpar_token,
is_multiline_string,
is_name_token,
@@ -299,11 +300,12 @@ def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
wrap_in_parentheses(node, child, visible=False)
prev_type = child.type
- is_suite_like = node.parent and node.parent.type in STATEMENT
- if is_suite_like:
- if (
- self.mode.is_pyi or Preview.dummy_implementations in self.mode
- ) and is_stub_body(node):
+ if node.parent and node.parent.type in STATEMENT:
+ if Preview.dummy_implementations in self.mode:
+ condition = is_function_or_class(node.parent)
+ else:
+ condition = self.mode.is_pyi
+ if condition and is_stub_body(node):
yield from self.visit_default(node)
else:
yield from self.line(+1)
diff --git a/src/black/nodes.py b/src/black/nodes.py
index 9b8d9a97835..a4f555b4032 100644
--- a/src/black/nodes.py
+++ b/src/black/nodes.py
@@ -736,15 +736,18 @@ def is_funcdef(node: Node) -> bool:
return node.type == syms.funcdef
+def is_function_or_class(node: Node) -> bool:
+ return node.type in {syms.funcdef, syms.classdef, syms.async_funcdef}
+
+
def is_stub_suite(node: Node, mode: Mode) -> bool:
"""Return True if `node` is a suite with a stub body."""
- if node.parent is not None:
- if Preview.dummy_implementations in mode and node.parent.type not in (
- syms.funcdef,
- syms.async_funcdef,
- syms.classdef,
- ):
- return False
+ if (
+ node.parent is not None
+ and Preview.dummy_implementations in mode
+ and not is_function_or_class(node.parent)
+ ):
+ return False
# If there is a comment, we want to keep it.
if node.prefix.strip():
diff --git a/tests/data/cases/preview_dummy_implementations.py b/tests/data/cases/preview_dummy_implementations.py
index 113ac36cdc5..28b23bb8609 100644
--- a/tests/data/cases/preview_dummy_implementations.py
+++ b/tests/data/cases/preview_dummy_implementations.py
@@ -56,6 +56,8 @@ def has_comment():
if some_condition:
...
+if already_dummy: ...
+
# output
from typing import NoReturn, Protocol, Union, overload
@@ -116,3 +118,6 @@ def has_comment(): ... # still a dummy
if some_condition:
...
+
+if already_dummy:
+ ...
| https://api.github.com/repos/psf/black/pulls/4103 | 2023-12-11T21:50:30Z | 2023-12-11T22:41:41Z | 2023-12-11T22:41:41Z | 2023-12-11T22:41:45Z | 844 | psf/black | 23,719 |
|
add references to typescript package | diff --git a/README.md b/README.md
index 40ea0b52ec691..2517b78ba4352 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,8 @@ PyPI:
- LlamaIndex: https://pypi.org/project/llama-index/.
- GPT Index (duplicate): https://pypi.org/project/gpt-index/.
+LlamaIndex.TS (Typescript/Javascript): https://github.com/run-llama/LlamaIndexTS.
+
Documentation: https://gpt-index.readthedocs.io/.
Twitter: https://twitter.com/llama_index.
diff --git a/docs/index.rst b/docs/index.rst
index 96956c9f3486d..064eef0d159e1 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -48,6 +48,8 @@ Our documentation includes detailed `Installation Instructions <./getting_starte
Once you're up and running, `High-Level Concepts <./getting_started/concepts.html>`_ has an overview of LlamaIndex's modular architecture. For more hands-on practical examples, look through our `End-to-End Tutorials <./end_to_end_tutorials/use_cases.html>`_ or learn how to `customize <./getting_started/customization.html>`_ components to fit your specific needs.
+**NOTE**: We have a Typescript package too! [[Repo]](https://github.com/run-llama/LlamaIndexTS) [[Docs]](https://ts.llamaindex.ai/)
+
๐บ๏ธ Ecosystem
************
@@ -59,6 +61,11 @@ To download or contribute, find LlamaIndex on:
- LlamaIndex: https://pypi.org/project/llama-index/.
- GPT Index (duplicate): https://pypi.org/project/gpt-index/.
+- NPM (Typescript/Javascript):
+ - Github: https://github.com/run-llama/LlamaIndexTS
+ - Docs: https://ts.llamaindex.ai/
+ - LlamaIndex.TS: https://www.npmjs.com/package/llamaindex
+
Community
---------
Need help? Have a feature suggestion? Join the LlamaIndex community:
| https://api.github.com/repos/run-llama/llama_index/pulls/7155 | 2023-08-04T14:56:07Z | 2023-08-04T15:16:52Z | 2023-08-04T15:16:52Z | 2023-08-04T15:16:53Z | 494 | run-llama/llama_index | 6,637 |
|
add header and update douyutv api fix StatusCode 403 | diff --git a/src/you_get/extractors/douyutv.py b/src/you_get/extractors/douyutv.py
index ae719e0d40..3b64066afc 100644
--- a/src/you_get/extractors/douyutv.py
+++ b/src/you_get/extractors/douyutv.py
@@ -40,31 +40,34 @@ def douyutv_download(url, output_dir = '.', merge = True, info_only = False, **k
douyutv_video_download(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
return
- html = get_content(url)
+ headers = {
+ 'user-agent': 'Mozilla/5.0 (iPad; CPU OS 8_1_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B466 Safari/600.1.4'
+ }
+ html = get_content(url, headers)
room_id_patt = r'"room_id"\s*:\s*(\d+),'
room_id = match1(html, room_id_patt)
if room_id == "0":
room_id = url[url.rfind('/')+1:]
- json_request_url = "http://m.douyu.com/html5/live?roomId=%s" % room_id
- content = get_content(json_request_url)
+ api_url = "http://www.douyutv.com/api/v1/"
+ args = "room/%s?aid=wp&client_sys=wp&time=%d" % (room_id, int(time.time()))
+ auth_md5 = (args + "zNzMV1y4EMxOHS6I5WKm").encode("utf-8")
+ auth_str = hashlib.md5(auth_md5).hexdigest()
+ json_request_url = "%s%s&auth=%s" % (api_url, args, auth_str)
+
+ content = get_content(json_request_url, headers)
json_content = json.loads(content)
data = json_content['data']
server_status = json_content.get('error',0)
if server_status is not 0:
raise ValueError("Server returned error:%s" % server_status)
- room_info_url = "http://open.douyucdn.cn/api/RoomApi/room/%s" % room_id
- room_info_content = get_content(room_info_url)
- room_info_obj = json.loads(room_info_content)
- room_info_data = room_info_obj.get('data')
-
- title = room_info_data.get('room_name')
- show_status = room_info_data.get('room_status')
+ title = data.get('room_name')
+ show_status = data.get('show_status')
if show_status is not "1":
raise ValueError("The live stream is not online! (Errno:%s)" % server_status)
- real_url = data.get('hls_url')
+ real_url = data.get('rtmp_url') + '/' + data.get('rtmp_live')
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
| ๆ่ฐขๆจ็pull request! `you-get`ๆฏ็จณๅฅๆ้ฟ็ๅผๆบ้กน็ฎ๏ผๆ่ฐขๆจ็่ดก็ฎ.
ไปฅไธ็ฎๅๆฃๆฅ้กน็ฎๆๆจๅคๆฅ:
- [X] ๅฆๆๆจ้ข่ฎกๆๅบไธคไธชๆๆดๅคไธ็ธๅ
ณ่กฅไธ๏ผ่ฏทไธบๆฏไธชไฝฟ็จไธๅ็pull requests๏ผ่ไธๆฏๅไธ;
- [X] ๆๆ็pull requestsๅบๅบไบๆๆฐ็`develop`ๅๆฏ;
- [X] ๆจ้ข่ฎกๆๅบpull requests็ๅๆฏๅบๆๆๆไนๅ็งฐ๏ผไพๅฆ`add-this-shining-feature`่ไธๆฏ`develop`;
- [X] ๆๆ็ๆไบคไฟกๆฏไธไปฃ็ ไธญๆณจ้ๅบไฝฟ็จๅฏ็่งฃ็่ฑ่ฏญ.
ไฝไธบ่ดก็ฎ่
๏ผๆจ้่ฆ็ฅๆ
- [X] ๆจๅๆๅจMITๅ่ฎฎไธ่ดก็ฎไปฃ็ ๏ผไปฅไพฟไปปไฝไบบ่ช็ฑไฝฟ็จๆๅๅ;ๅฝ็ถ๏ผไฝ ไปๆงไฟ็ไปฃ็ ็่ไฝๆ
- [X] ไฝ ไธๅพ่ดก็ฎ้่ชๅทฑ็ผๅ็ไปฃ็ ๏ผ้ค้ๅ
ถๅฑไบๅ
ฌๆ้ขๅๆไฝฟ็จMITๅ่ฎฎ.
ไธๆฏๆๆ็pull requests้ฝไผ่ขซๅๅนถ,็ถ่ๆ่ฎคไธบๅๅนถ/ไธๅๅนถ็่กฅไธไธๆ ท้่ฆ๏ผๅฆๆๆจ่ฎคไธบ่กฅไธ้่ฆ๏ผๅ
ถไปไบบไนๆๅฏ่ฝ่ฟไน่ฎคไธบ๏ผ้ฃไนไปไปฌๅฏไปฅไปไฝ ็forkไธญๆๅๅทฅไฝๅนถ่ท็ใๆ ่ฎบๅฆไฝ๏ผๆ่ฐขๆจ่ดนๅฟๅฏนๆฌ้กน็ฎ่ดก็ฎ.
็ฅๅฅฝ,
Mort
| https://api.github.com/repos/soimort/you-get/pulls/2485 | 2017-11-08T06:41:52Z | 2017-11-17T15:54:31Z | 2017-11-17T15:54:31Z | 2017-11-17T16:01:19Z | 709 | soimort/you-get | 20,990 |
Replacing flake8 with ruff in main.yml | diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index c1ac2548a..b822fb522 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -20,14 +20,14 @@ jobs:
- name: Install Dependencies
run: |
python -m pip install --upgrade pip
- pip install ruff flake8 pytest
+ pip install ruff pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with ruff
run: |
# stop the build if there are Python syntax errors or undefined names
ruff . --format=github --select=E9,F63,F7,F82
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+ ruff . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Sherlock Site Detect Tests
run: |
cd sherlock && python -m unittest tests.all.SherlockDetectTests --verbose
| replaced flake8 with ruff | https://api.github.com/repos/sherlock-project/sherlock/pulls/1760 | 2023-03-29T13:00:03Z | 2023-03-29T13:01:08Z | 2023-03-29T13:01:08Z | 2023-12-21T09:00:52Z | 275 | sherlock-project/sherlock | 36,377 |
Fix json serialization in Lambda layer | diff --git a/keras/layers/core.py b/keras/layers/core.py
index 0846d0880b1..e5b9aa69b01 100644
--- a/keras/layers/core.py
+++ b/keras/layers/core.py
@@ -460,9 +460,9 @@ def get_config(self):
if isinstance(self._output_shape, python_types.LambdaType):
if py3:
- output_shape = marshal.dumps(self._output_shape.__code__)
+ output_shape = marshal.dumps(self._output_shape.__code__).decode('raw_unicode_escape')
else:
- output_shape = marshal.dumps(self._output_shape.func_code)
+ output_shape = marshal.dumps(self._output_shape.func_code).decode('raw_unicode_escape')
output_shape_type = 'lambda'
elif callable(self._output_shape):
output_shape = self._output_shape.__name__
@@ -494,7 +494,7 @@ def from_config(cls, config):
if output_shape_type == 'function':
output_shape = globals()[config['output_shape']]
elif output_shape_type == 'lambda':
- output_shape = marshal.loads(config['output_shape'])
+ output_shape = marshal.loads(config['output_shape'].encode('raw_unicode_escape'))
output_shape = python_types.FunctionType(output_shape, globals())
else:
output_shape = config['output_shape']
| Fix #2582
Fix #3001
here is a sample code: https://gist.github.com/henry0312/e183da836489eb9a548a9bd095ec9bbb
| https://api.github.com/repos/keras-team/keras/pulls/3012 | 2016-06-18T02:46:44Z | 2016-06-18T04:26:46Z | 2016-06-18T04:26:46Z | 2016-11-19T15:06:32Z | 297 | keras-team/keras | 47,663 |
Update print_colors.py | diff --git a/Colors/print_colors.py b/Colors/print_colors.py
index 8a86356a1b..b1e90e7447 100644
--- a/Colors/print_colors.py
+++ b/Colors/print_colors.py
@@ -10,7 +10,7 @@ class colors:
def printc(color, message):
print(color + message + colors.ENDC)
-
+#color which we print or import
printc(colors.CYAN, sys.argv[1])
printc(colors.GREEN, sys.argv[1])
printc(colors.YELLOW, sys.argv[1])
| Description for colors.py | https://api.github.com/repos/geekcomputers/Python/pulls/1397 | 2021-10-01T15:19:41Z | 2021-10-16T12:03:00Z | 2021-10-16T12:03:00Z | 2021-10-16T12:03:00Z | 126 | geekcomputers/Python | 31,775 |
fix(browser-starfish): don't group by file_extension resource module | diff --git a/static/app/views/performance/browser/resources/jsCssView/resourceTable.tsx b/static/app/views/performance/browser/resources/jsCssView/resourceTable.tsx
index 6fbf9ce5fd732..967dcbf455d67 100644
--- a/static/app/views/performance/browser/resources/jsCssView/resourceTable.tsx
+++ b/static/app/views/performance/browser/resources/jsCssView/resourceTable.tsx
@@ -40,7 +40,6 @@ const {SPM} = SpanFunction;
type Row = {
'avg(http.response_content_length)': number;
'avg(span.self_time)': number;
- file_extension: string;
'http.decoded_response_content_length': number;
'project.id': number;
'resource.render_blocking_status': string;
@@ -133,7 +132,7 @@ function ResourceTable({sort, defaultResourceTypes}: Props) {
return <DurationCell milliseconds={row[key]} />;
}
if (key === SPAN_OP) {
- const fileExtension = row[FILE_EXTENSION];
+ const fileExtension = row[SPAN_DESCRIPTION].split('.').pop() || '';
const spanOp = row[key];
if (fileExtension === 'js' || spanOp === 'resource.script') {
return <span>{t('JavaScript')}</span>;
diff --git a/static/app/views/performance/browser/resources/utils/useResourcesQuery.ts b/static/app/views/performance/browser/resources/utils/useResourcesQuery.ts
index 8c77c4bb5997d..61d191efccb27 100644
--- a/static/app/views/performance/browser/resources/utils/useResourcesQuery.ts
+++ b/static/app/views/performance/browser/resources/utils/useResourcesQuery.ts
@@ -80,7 +80,6 @@ export const useResourcesQuery = ({sort, defaultResourceTypes, query, limit}: Pr
'project.id',
`${TIME_SPENT_PERCENTAGE}()`,
`sum(${SPAN_SELF_TIME})`,
- FILE_EXTENSION,
],
name: 'Resource module - resource table',
query: queryConditions.join(' '),
@@ -124,7 +123,6 @@ export const useResourcesQuery = ({sort, defaultResourceTypes, query, limit}: Pr
[`time_spent_percentage()`]: row[`${TIME_SPENT_PERCENTAGE}()`] as number,
['count_unique(transaction)']: row['count_unique(transaction)'] as number,
[`sum(span.self_time)`]: row[`sum(${SPAN_SELF_TIME})`] as number,
- [FILE_EXTENSION]: row[FILE_EXTENSION]?.toString(),
}));
return {...result, data: data || []};
| We can't group by file_extension here because we only recently added that tag, grouping by extension results in duplicate entries for the same resource, one entry for the one with a missing extension, one with the extension.
I also removed the alpha badges in this PR
Before
<img width="312" alt="image" src="https://github.com/getsentry/sentry/assets/44422760/e1f86485-eb71-4338-974a-ba88652c4365">
After
<img width="371" alt="image" src="https://github.com/getsentry/sentry/assets/44422760/939fb2a9-9fac-434c-9005-8e681897ca5a"> | https://api.github.com/repos/getsentry/sentry/pulls/60314 | 2023-11-20T21:42:21Z | 2023-11-20T22:12:54Z | 2023-11-20T22:12:54Z | 2024-03-15T21:09:10Z | 555 | getsentry/sentry | 44,679 |
[rutv] fix vbr for empty string value | diff --git a/youtube_dl/extractor/rutv.py b/youtube_dl/extractor/rutv.py
index d2713c19a05..05f3193968c 100644
--- a/youtube_dl/extractor/rutv.py
+++ b/youtube_dl/extractor/rutv.py
@@ -6,7 +6,8 @@
from .common import InfoExtractor
from ..utils import (
ExtractorError,
- int_or_none
+ int_or_none,
+ str_to_int
)
@@ -179,7 +180,7 @@ def _real_extract(self, url):
'player_url': 'http://player.rutv.ru/flash3v/osmf.swf?i=22',
'rtmp_live': True,
'ext': 'flv',
- 'vbr': int(quality),
+ 'vbr': str_to_int(quality),
'preference': preference,
}
elif transport == 'm3u8':
| ## Please follow the guide below
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *pull request* (like that [x])
- Use *Preview* tab to see how your *pull request* will actually look like
---
### Before submitting a *pull request* make sure you have:
- [x] [Searched](https://github.com/ytdl-org/youtube-dl/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Read [adding new extractor tutorial](https://github.com/ytdl-org/youtube-dl#adding-support-for-a-new-site)
- [x] Read [youtube-dl coding conventions](https://github.com/ytdl-org/youtube-dl#youtube-dl-coding-conventions) and adjusted the code to meet them
- [x] Covered the code with tests (note that PRs without tests will be REJECTED)
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8)
### In order to be accepted and merged into youtube-dl each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [x] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Bug fix
- [x] Improvement
- [ ] New extractor
- [ ] New feature
---
### Description of your *pull request* and other information
https://smotrim.ru/live/channel/2961
It fails due to returning quality as an empty string | https://api.github.com/repos/ytdl-org/youtube-dl/pulls/30623 | 2022-02-09T15:43:01Z | 2022-02-14T17:54:31Z | 2022-02-14T17:54:31Z | 2023-03-01T02:40:03Z | 220 | ytdl-org/youtube-dl | 50,421 |
Improve long values in dict literals | diff --git a/CHANGES.md b/CHANGES.md
index f6040359623..de88e7edaf6 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -17,6 +17,9 @@
- Fix a crash in preview style with assert + parenthesized string (#3415)
- Do not put the closing quotes in a docstring on a separate line, even if the line is
too long (#3430)
+- Long values in dict literals are now wrapped in parentheses; correspondingly
+ unnecessary parentheses around short values in dict literals are now removed; long
+ string lambda values are now wrapped in parentheses (#3440)
### Configuration
diff --git a/src/black/linegen.py b/src/black/linegen.py
index 644824a3c86..244dbe77eb5 100644
--- a/src/black/linegen.py
+++ b/src/black/linegen.py
@@ -179,6 +179,23 @@ def visit_stmt(
yield from self.visit(child)
+ def visit_dictsetmaker(self, node: Node) -> Iterator[Line]:
+ if Preview.wrap_long_dict_values_in_parens in self.mode:
+ for i, child in enumerate(node.children):
+ if i == 0:
+ continue
+ if node.children[i - 1].type == token.COLON:
+ if child.type == syms.atom and child.children[0].type == token.LPAR:
+ if maybe_make_parens_invisible_in_atom(
+ child,
+ parent=node,
+ remove_brackets_around_comma=False,
+ ):
+ wrap_in_parentheses(node, child, visible=False)
+ else:
+ wrap_in_parentheses(node, child, visible=False)
+ yield from self.visit_default(node)
+
def visit_funcdef(self, node: Node) -> Iterator[Line]:
"""Visit function definition."""
if Preview.annotation_parens not in self.mode:
diff --git a/src/black/mode.py b/src/black/mode.py
index a3ce20b8619..bcd35b4d4be 100644
--- a/src/black/mode.py
+++ b/src/black/mode.py
@@ -157,8 +157,11 @@ class Preview(Enum):
one_element_subscript = auto()
remove_block_trailing_newline = auto()
remove_redundant_parens = auto()
+ # NOTE: string_processing requires wrap_long_dict_values_in_parens
+ # for https://github.com/psf/black/issues/3117 to be fixed.
string_processing = auto()
skip_magic_trailing_comma_in_subscript = auto()
+ wrap_long_dict_values_in_parens = auto()
class Deprecated(UserWarning):
diff --git a/src/black/trans.py b/src/black/trans.py
index 8893ab02aab..b08a6d243d8 100644
--- a/src/black/trans.py
+++ b/src/black/trans.py
@@ -1638,6 +1638,8 @@ class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin):
* The line is a dictionary key assignment where some valid key is being
assigned the value of some string.
OR
+ * The line is an lambda expression and the value is a string.
+ OR
* The line starts with an "atom" string that prefers to be wrapped in
parens. It's preferred to be wrapped when the string is surrounded by
commas (or is the first/last child).
@@ -1683,7 +1685,7 @@ def do_splitter_match(self, line: Line) -> TMatchResult:
or self._else_match(LL)
or self._assert_match(LL)
or self._assign_match(LL)
- or self._dict_match(LL)
+ or self._dict_or_lambda_match(LL)
or self._prefer_paren_wrap_match(LL)
)
@@ -1841,22 +1843,23 @@ def _assign_match(LL: List[Leaf]) -> Optional[int]:
return None
@staticmethod
- def _dict_match(LL: List[Leaf]) -> Optional[int]:
+ def _dict_or_lambda_match(LL: List[Leaf]) -> Optional[int]:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the dictionary key assignment
- statement requirements listed in the 'Requirements' section of this
- classes' docstring.
+ statement or lambda expression requirements listed in the
+ 'Requirements' section of this classes' docstring.
OR
None, otherwise.
"""
- # If this line is apart of a dictionary key assignment...
- if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]:
+ # If this line is a part of a dictionary key assignment or lambda expression...
+ parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)]
+ if syms.dictsetmaker in parent_types or syms.lambdef in parent_types:
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
- # We MUST find a colon...
+ # We MUST find a colon, it can either be dict's or lambda's colon...
if leaf.type == token.COLON:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
@@ -1951,6 +1954,25 @@ def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
f" (left_leaves={left_leaves}, right_leaves={right_leaves})"
)
old_rpar_leaf = right_leaves.pop()
+ elif right_leaves and right_leaves[-1].type == token.RPAR:
+ # Special case for lambda expressions as dict's value, e.g.:
+ # my_dict = {
+ # "key": lambda x: f"formatted: {x},
+ # }
+ # After wrapping the dict's value with parentheses, the string is
+ # followed by a RPAR but its opening bracket is lambda's, not
+ # the string's:
+ # "key": (lambda x: f"formatted: {x}),
+ opening_bracket = right_leaves[-1].opening_bracket
+ if opening_bracket is not None and opening_bracket in left_leaves:
+ index = left_leaves.index(opening_bracket)
+ if (
+ index > 0
+ and index < len(left_leaves) - 1
+ and left_leaves[index - 1].type == token.COLON
+ and left_leaves[index + 1].value == "lambda"
+ ):
+ right_leaves.pop()
append_leaves(string_line, line, right_leaves)
diff --git a/tests/data/preview/long_dict_values.py b/tests/data/preview/long_dict_values.py
new file mode 100644
index 00000000000..f23c5d3dad1
--- /dev/null
+++ b/tests/data/preview/long_dict_values.py
@@ -0,0 +1,53 @@
+my_dict = {
+ "something_something":
+ r"Lorem ipsum dolor sit amet, an sed convenire eloquentiam \t"
+ r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t"
+ r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t",
+}
+
+my_dict = {
+ "a key in my dict": a_very_long_variable * and_a_very_long_function_call() / 100000.0
+}
+
+my_dict = {
+ "a key in my dict": a_very_long_variable * and_a_very_long_function_call() * and_another_long_func() / 100000.0
+}
+
+my_dict = {
+ "a key in my dict": MyClass.some_attribute.first_call().second_call().third_call(some_args="some value")
+}
+
+
+# output
+
+
+my_dict = {
+ "something_something": (
+ r"Lorem ipsum dolor sit amet, an sed convenire eloquentiam \t"
+ r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t"
+ r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t"
+ ),
+}
+
+my_dict = {
+ "a key in my dict": (
+ a_very_long_variable * and_a_very_long_function_call() / 100000.0
+ )
+}
+
+my_dict = {
+ "a key in my dict": (
+ a_very_long_variable
+ * and_a_very_long_function_call()
+ * and_another_long_func()
+ / 100000.0
+ )
+}
+
+my_dict = {
+ "a key in my dict": (
+ MyClass.some_attribute.first_call()
+ .second_call()
+ .third_call(some_args="some value")
+ )
+}
diff --git a/tests/data/preview/long_strings.py b/tests/data/preview/long_strings.py
index 9288b253b60..9c78f675b8f 100644
--- a/tests/data/preview/long_strings.py
+++ b/tests/data/preview/long_strings.py
@@ -278,6 +278,15 @@ def foo():
"........................................................................... \\N{LAO KO LA}"
)
+msg = lambda x: f"this is a very very very long lambda value {x} that doesn't fit on a single line"
+
+dict_with_lambda_values = {
+ "join": lambda j: (
+ f"{j.__class__.__name__}({some_function_call(j.left)}, "
+ f"{some_function_call(j.right)})"
+ ),
+}
+
# output
@@ -362,9 +371,8 @@ def foo():
"A %s %s"
% ("formatted", "string"): (
"This is a really really really long string that has to go inside of a"
- " dictionary. It is %s bad (#%d)."
- )
- % ("soooo", 2),
+ " dictionary. It is %s bad (#%d)." % ("soooo", 2)
+ ),
}
D5 = { # Test for https://github.com/psf/black/issues/3261
@@ -806,3 +814,17 @@ def foo():
"..........................................................................."
" \\N{LAO KO LA}"
)
+
+msg = (
+ lambda x: (
+ f"this is a very very very long lambda value {x} that doesn't fit on a single"
+ " line"
+ )
+)
+
+dict_with_lambda_values = {
+ "join": lambda j: (
+ f"{j.__class__.__name__}({some_function_call(j.left)}, "
+ f"{some_function_call(j.right)})"
+ ),
+}
diff --git a/tests/data/preview/long_strings__regression.py b/tests/data/preview/long_strings__regression.py
index 8b00e76f40e..6d56dcc635d 100644
--- a/tests/data/preview/long_strings__regression.py
+++ b/tests/data/preview/long_strings__regression.py
@@ -524,6 +524,13 @@ async def foo(self):
},
)
+# Regression test for https://github.com/psf/black/issues/3117.
+some_dict = {
+ "something_something":
+ r"Lorem ipsum dolor sit amet, an sed convenire eloquentiam \t"
+ r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t",
+}
+
# output
@@ -1178,3 +1185,11 @@ async def foo(self):
),
},
)
+
+# Regression test for https://github.com/psf/black/issues/3117.
+some_dict = {
+ "something_something": (
+ r"Lorem ipsum dolor sit amet, an sed convenire eloquentiam \t"
+ r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t"
+ ),
+}
| <!-- Hello! Thanks for submitting a PR. To help make things go a bit more
smoothly we would appreciate that you go through this template. -->
### Description
<!-- Good things to put here include: reasoning for the change (please link
any relevant issues!), any noteworthy (or hacky) choices to be aware of,
or what the problem resolved here looked like ... we won't mind a ranty
story :) -->
This PR changes the preview style:
- Wraps long values in dict literals in parentheses. Fixes #620. Fixes #808.
- Correspondingly, parentheses around short values in dict literals are removed.
- In preview string processing, long lambda string values are now wrapped in parentheses.
- Wrapping dict values happens to fix #3117 where implicitly concatenated r-string dict values crash in string processing. Fixes #3117
Usually I'd like to keep the PR's scope minimal, but the lambda change & dict value change are depended by each other so it's hard to split into two PRs.
### Checklist - did you ...
<!-- If any of the following items aren't relevant for your contribution
please still tick them so we know you've gone through the checklist.
All user-facing changes should get an entry. Otherwise, signal to us
this should get the magical label to silence the CHANGELOG entry check.
Tests are required for bugfixes and new features. Documentation changes
are necessary for formatting and most enhancement changes. -->
- [x] Add an entry in `CHANGES.md` if necessary?
- [x] Add / update tests if necessary?
- [x] Add new / update outdated documentation?
<!-- Just as a reminder, everyone in all psf/black spaces including PRs
must follow the PSF Code of Conduct (link below).
Finally, once again thanks for your time and effort. If you have any
feedback in regards to your experience contributing here, please
let us know!
Helpful links:
PSF COC: https://www.python.org/psf/conduct/
Contributing docs: https://black.readthedocs.io/en/latest/contributing/index.html
Chat on Python Discord: https://discord.gg/RtVdv86PrH -->
| https://api.github.com/repos/psf/black/pulls/3440 | 2022-12-14T07:09:54Z | 2022-12-15T16:25:28Z | 2022-12-15T16:25:28Z | 2022-12-16T01:44:05Z | 2,744 | psf/black | 23,746 |
Bump actions/cache from 3.2.6 to 3.3.1 | diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
index 73a32f29c8..55d57f8d0e 100644
--- a/.github/workflows/tests.yaml
+++ b/.github/workflows/tests.yaml
@@ -49,7 +49,7 @@ jobs:
pip install -U setuptools
python -m pip install -U pip
- name: cache mypy
- uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0
+ uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8
with:
path: ./.mypy_cache
key: mypy|${{ matrix.python }}|${{ hashFiles('pyproject.toml') }}
| Bumps [actions/cache](https://github.com/actions/cache) from 3.2.6 to 3.3.1.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/actions/cache/releases">actions/cache's releases</a>.</em></p>
<blockquote>
<h2>v3.3.1</h2>
<h2>What's Changed</h2>
<ul>
<li>Reduced download segment size to 128 MB and timeout to 10 minutes by <a href="https://github.com/kotewar"><code>@โkotewar</code></a> in <a href="https://redirect.github.com/actions/cache/pull/1129">actions/cache#1129</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/actions/cache/compare/v3...v3.3.1">https://github.com/actions/cache/compare/v3...v3.3.1</a></p>
<h2>v3.3.0</h2>
<h2>What's Changed</h2>
<ul>
<li>Bug: Permission is missing in cache delete example by <a href="https://github.com/kotokaze"><code>@โkotokaze</code></a> in <a href="https://redirect.github.com/actions/cache/pull/1123">actions/cache#1123</a></li>
<li>Add <code>lookup-only</code> option by <a href="https://github.com/cdce8p"><code>@โcdce8p</code></a> in <a href="https://redirect.github.com/actions/cache/pull/1041">actions/cache#1041</a></li>
</ul>
<h2>New Contributors</h2>
<ul>
<li><a href="https://github.com/kotokaze"><code>@โkotokaze</code></a> made their first contribution in <a href="https://redirect.github.com/actions/cache/pull/1123">actions/cache#1123</a></li>
</ul>
<p><strong>Full Changelog</strong>: <a href="https://github.com/actions/cache/compare/v3...v3.3.0">https://github.com/actions/cache/compare/v3...v3.3.0</a></p>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/actions/cache/blob/main/RELEASES.md">actions/cache's changelog</a>.</em></p>
<blockquote>
<h1>Releases</h1>
<h3>3.0.0</h3>
<ul>
<li>Updated minimum runner version support from node 12 -> node 16</li>
</ul>
<h3>3.0.1</h3>
<ul>
<li>Added support for caching from GHES 3.5.</li>
<li>Fixed download issue for files > 2GB during restore.</li>
</ul>
<h3>3.0.2</h3>
<ul>
<li>Added support for dynamic cache size cap on GHES.</li>
</ul>
<h3>3.0.3</h3>
<ul>
<li>Fixed avoiding empty cache save when no files are available for caching. (<a href="https://redirect.github.com/actions/cache/issues/624">issue</a>)</li>
</ul>
<h3>3.0.4</h3>
<ul>
<li>Fixed tar creation error while trying to create tar with path as <code>~/</code> home folder on <code>ubuntu-latest</code>. (<a href="https://redirect.github.com/actions/cache/issues/689">issue</a>)</li>
</ul>
<h3>3.0.5</h3>
<ul>
<li>Removed error handling by consuming actions/cache 3.0 toolkit, Now cache server error handling will be done by toolkit. (<a href="https://redirect.github.com/actions/cache/pull/834">PR</a>)</li>
</ul>
<h3>3.0.6</h3>
<ul>
<li>Fixed <a href="https://redirect.github.com/actions/cache/issues/809">#809</a> - zstd -d: no such file or directory error</li>
<li>Fixed <a href="https://redirect.github.com/actions/cache/issues/833">#833</a> - cache doesn't work with github workspace directory</li>
</ul>
<h3>3.0.7</h3>
<ul>
<li>Fixed <a href="https://redirect.github.com/actions/cache/issues/810">#810</a> - download stuck issue. A new timeout is introduced in the download process to abort the download if it gets stuck and doesn't finish within an hour.</li>
</ul>
<h3>3.0.8</h3>
<ul>
<li>Fix zstd not working for windows on gnu tar in issues <a href="https://redirect.github.com/actions/cache/issues/888">#888</a> and <a href="https://redirect.github.com/actions/cache/issues/891">#891</a>.</li>
<li>Allowing users to provide a custom timeout as input for aborting download of a cache segment using an environment variable <code>SEGMENT_DOWNLOAD_TIMEOUT_MINS</code>. Default is 60 minutes.</li>
</ul>
<h3>3.0.9</h3>
<ul>
<li>Enhanced the warning message for cache unavailablity in case of GHES.</li>
</ul>
<h3>3.0.10</h3>
<ul>
<li>Fix a bug with sorting inputs.</li>
<li>Update definition for restore-keys in README.md</li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/actions/cache/commit/88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8"><code>88522ab</code></a> Reduced download segment size to 128 MB and timeout to 10 minutes (<a href="https://redirect.github.com/actions/cache/issues/1129">#1129</a>)</li>
<li><a href="https://github.com/actions/cache/commit/940f3d7cf195ba83374c77632d1e2cbb2f24ae68"><code>940f3d7</code></a> Add <code>lookup-only</code> option (<a href="https://redirect.github.com/actions/cache/issues/1041">#1041</a>)</li>
<li><a href="https://github.com/actions/cache/commit/e0d62270e20d6eeecf2fd6397a1b8871b6269e38"><code>e0d6227</code></a> docs: Add missing permission in cache delete example (<a href="https://redirect.github.com/actions/cache/issues/1123">#1123</a>)</li>
<li>See full diff in <a href="https://github.com/actions/cache/compare/69d9d449aced6a2ede0bc19182fadc3a0a42d2b0...88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8">compare view</a></li>
</ul>
</details>
<br />
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/cache&package-manager=github_actions&previous-version=3.2.6&new-version=3.3.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details> | https://api.github.com/repos/pallets/flask/pulls/5040 | 2023-04-01T16:57:12Z | 2023-04-03T13:18:57Z | 2023-04-03T13:18:57Z | 2023-04-18T00:05:33Z | 198 | pallets/flask | 20,855 |
Extend chapter extraction for single chapter VODs | diff --git a/yt_dlp/extractor/twitch.py b/yt_dlp/extractor/twitch.py
index a0cb0be0266..43b4220fe6b 100644
--- a/yt_dlp/extractor/twitch.py
+++ b/yt_dlp/extractor/twitch.py
@@ -204,7 +204,13 @@ class TwitchVodIE(TwitchBaseIE):
'uploader_id': 'riotgames',
'view_count': int,
'start_time': 310,
- 'chapters': [],
+ 'chapters': [
+ {
+ 'start_time': 0,
+ 'end_time': 17208,
+ 'title': 'League of Legends'
+ }
+ ],
'live_status': 'was_live',
},
'params': {
@@ -321,6 +327,33 @@ class TwitchVodIE(TwitchBaseIE):
'format': 'mhtml',
'skip_download': True
}
+ }, {
+ 'note': 'VOD with single chapter',
+ 'url': 'https://www.twitch.tv/videos/1536751224',
+ 'info_dict': {
+ 'id': 'v1536751224',
+ 'ext': 'mp4',
+ 'title': 'Porter Robinson Star Guardian Stream Tour with LilyPichu',
+ 'duration': 8353,
+ 'uploader': 'Riot Games',
+ 'uploader_id': 'riotgames',
+ 'timestamp': 1658267731,
+ 'upload_date': '20220719',
+ 'chapters': [
+ {
+ 'start_time': 0,
+ 'end_time': 8353,
+ 'title': 'League of Legends'
+ }
+ ],
+ 'live_status': 'was_live',
+ 'thumbnail': r're:^https?://.*\.jpg$',
+ 'view_count': int,
+ },
+ 'params': {
+ 'skip_download': True
+ },
+ 'expected_warnings': ['Unable to download JSON metadata: HTTP Error 403: Forbidden']
}]
def _download_info(self, item_id):
@@ -392,8 +425,14 @@ def _extract_info(self, info):
'was_live': True,
}
- def _extract_moments(self, info, item_id):
- for moment in info.get('moments') or []:
+ def _extract_chapters(self, info, item_id):
+ if not info.get('moments'):
+ game = traverse_obj(info, ('game', 'displayName'))
+ if game:
+ yield {'title': game}
+ return
+
+ for moment in info['moments']:
start_time = int_or_none(moment.get('positionMilliseconds'), 1000)
duration = int_or_none(moment.get('durationMilliseconds'), 1000)
name = str_or_none(moment.get('description'))
@@ -432,7 +471,7 @@ def _extract_info_gql(self, info, item_id):
'uploader_id': try_get(info, lambda x: x['owner']['login'], compat_str),
'timestamp': unified_timestamp(info.get('publishedAt')),
'view_count': int_or_none(info.get('viewCount')),
- 'chapters': list(self._extract_moments(info, item_id)),
+ 'chapters': list(self._extract_chapters(info, item_id)),
'is_live': is_live,
'was_live': True,
}
| <details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
### Description of your *pull request* and other information
</details>
<!--
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
-->
Notes for the reviewer:
- I have updated the test case for vod with id `v6528877`, because it tested for an empty chapters array
- The test case I added for the new feature fails because of a 403 on the storyboard json url. What should I do? If I open the vod in browser, the devtools shows the same 403 error on the url twice
Fixes #4421 | https://api.github.com/repos/yt-dlp/yt-dlp/pulls/4453 | 2022-07-26T21:28:16Z | 2022-07-30T16:11:28Z | 2022-07-30T16:11:28Z | 2022-07-30T16:14:54Z | 774 | yt-dlp/yt-dlp | 7,706 |
Installs goose for boulder's create_db.sh | diff --git a/tests/boulder-start.sh b/tests/boulder-start.sh
index ccc79f8c299..e17716b54f5 100755
--- a/tests/boulder-start.sh
+++ b/tests/boulder-start.sh
@@ -7,6 +7,8 @@ export GOPATH="${GOPATH:-/tmp/go}"
# see `go help packages`
go get -d github.com/letsencrypt/boulder/...
cd $GOPATH/src/github.com/letsencrypt/boulder
+# goose is needed for ./test/create_db.sh
+go get bitbucket.org/liamstask/goose/cmd/goose
./test/create_db.sh
./start.py &
# Hopefully start.py bootstraps before integration test is started...
| The recent addition of `goose` in boulder's `create_db.sh` is causing our integration tests to fail. I wrote up #719 about doing a better job of coordinating such changes between the two repos, but this fixes the problem for now.
| https://api.github.com/repos/certbot/certbot/pulls/720 | 2015-08-27T15:55:19Z | 2015-08-27T18:44:21Z | 2015-08-27T18:44:21Z | 2016-05-06T19:22:28Z | 157 | certbot/certbot | 928 |
[MRG+1] Retry stats | diff --git a/scrapy/downloadermiddlewares/retry.py b/scrapy/downloadermiddlewares/retry.py
index c9c512be8a6..549d74f46f6 100644
--- a/scrapy/downloadermiddlewares/retry.py
+++ b/scrapy/downloadermiddlewares/retry.py
@@ -22,6 +22,7 @@
from scrapy.exceptions import NotConfigured
from scrapy.utils.response import response_status_message
from scrapy.core.downloader.handlers.http11 import TunnelError
+from scrapy.utils.python import global_object_name
logger = logging.getLogger(__name__)
@@ -62,6 +63,7 @@ def process_exception(self, request, exception, spider):
def _retry(self, request, reason, spider):
retries = request.meta.get('retry_times', 0) + 1
+ stats = spider.crawler.stats
if retries <= self.max_retry_times:
logger.debug("Retrying %(request)s (failed %(retries)d times): %(reason)s",
{'request': request, 'retries': retries, 'reason': reason},
@@ -70,8 +72,15 @@ def _retry(self, request, reason, spider):
retryreq.meta['retry_times'] = retries
retryreq.dont_filter = True
retryreq.priority = request.priority + self.priority_adjust
+
+ if isinstance(reason, Exception):
+ reason = global_object_name(reason.__class__)
+
+ stats.inc_value('retry/count')
+ stats.inc_value('retry/reason_count/%s' % reason)
return retryreq
else:
+ stats.inc_value('retry/max_reached')
logger.debug("Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
{'request': request, 'retries': retries, 'reason': reason},
extra={'spider': spider})
diff --git a/scrapy/downloadermiddlewares/stats.py b/scrapy/downloadermiddlewares/stats.py
index 9c0ad90a597..ef0aafce0fe 100644
--- a/scrapy/downloadermiddlewares/stats.py
+++ b/scrapy/downloadermiddlewares/stats.py
@@ -1,6 +1,8 @@
from scrapy.exceptions import NotConfigured
from scrapy.utils.request import request_httprepr
from scrapy.utils.response import response_httprepr
+from scrapy.utils.python import global_object_name
+
class DownloaderStats(object):
@@ -27,6 +29,6 @@ def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
- ex_class = "%s.%s" % (exception.__class__.__module__, exception.__class__.__name__)
+ ex_class = global_object_name(exception.__class__)
self.stats.inc_value('downloader/exception_count', spider=spider)
self.stats.inc_value('downloader/exception_type_count/%s' % ex_class, spider=spider)
diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
index 30c9e5058bf..35f8550072f 100644
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -113,7 +113,7 @@ def md5sum(file):
m.update(d)
return m.hexdigest()
+
def rel_has_nofollow(rel):
"""Return True if link rel attribute has nofollow type"""
return True if rel is not None and 'nofollow' in rel.split() else False
-
diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py
index 42fbbda7fb1..4c500abf4cc 100644
--- a/scrapy/utils/python.py
+++ b/scrapy/utils/python.py
@@ -344,3 +344,14 @@ def without_none_values(iterable):
return {k: v for k, v in six.iteritems(iterable) if v is not None}
except AttributeError:
return type(iterable)((v for v in iterable if v is not None))
+
+
+def global_object_name(obj):
+ """
+ Return full name of a global object.
+
+ >>> from scrapy import Request
+ >>> global_object_name(Request)
+ 'scrapy.http.request.Request'
+ """
+ return "%s.%s" % (obj.__module__, obj.__name__)
diff --git a/scrapy/utils/response.py b/scrapy/utils/response.py
index deb5741be1a..bf276b5caa9 100644
--- a/scrapy/utils/response.py
+++ b/scrapy/utils/response.py
@@ -43,7 +43,8 @@ def get_meta_refresh(response):
def response_status_message(status):
"""Return status code plus status text descriptive message
"""
- return '%s %s' % (status, to_native_str(http.RESPONSES.get(int(status), "Unknown Status")))
+ message = http.RESPONSES.get(int(status), "Unknown Status")
+ return '%s %s' % (status, to_native_str(message))
def response_httprepr(response):
diff --git a/tests/test_downloadermiddleware_retry.py b/tests/test_downloadermiddleware_retry.py
index e129b71f8bf..b833cb4488b 100644
--- a/tests/test_downloadermiddleware_retry.py
+++ b/tests/test_downloadermiddleware_retry.py
@@ -13,9 +13,9 @@
class RetryTest(unittest.TestCase):
def setUp(self):
- crawler = get_crawler(Spider)
- self.spider = crawler._create_spider('foo')
- self.mw = RetryMiddleware.from_crawler(crawler)
+ self.crawler = get_crawler(Spider)
+ self.spider = self.crawler._create_spider('foo')
+ self.mw = RetryMiddleware.from_crawler(self.crawler)
self.mw.max_retry_times = 2
def test_priority_adjust(self):
@@ -70,6 +70,10 @@ def test_503(self):
# discard it
assert self.mw.process_response(req, rsp, self.spider) is rsp
+ assert self.crawler.stats.get_value('retry/max_reached') == 1
+ assert self.crawler.stats.get_value('retry/reason_count/503 Service Unavailable') == 2
+ assert self.crawler.stats.get_value('retry/count') == 2
+
def test_twistederrors(self):
exceptions = [defer.TimeoutError, TCPTimedOutError, TimeoutError,
DNSLookupError, ConnectionRefusedError, ConnectionDone,
@@ -79,6 +83,11 @@ def test_twistederrors(self):
req = Request('http://www.scrapytest.org/%s' % exc.__name__)
self._test_retry_exception(req, exc('foo'))
+ stats = self.crawler.stats
+ assert stats.get_value('retry/max_reached') == len(exceptions)
+ assert stats.get_value('retry/count') == len(exceptions) * 2
+ assert stats.get_value('retry/reason_count/twisted.internet.defer.TimeoutError') == 2
+
def _test_retry_exception(self, req, exception):
# first retry
req = self.mw.process_exception(req, exception, self.spider)
diff --git a/tests/test_proxy_connect.py b/tests/test_proxy_connect.py
index 0f06fd53dea..6213a51e866 100644
--- a/tests/test_proxy_connect.py
+++ b/tests/test_proxy_connect.py
@@ -101,7 +101,9 @@ def test_https_noconnect_auth_error(self):
self._assert_got_response_code(407, l)
def _assert_got_response_code(self, code, log):
+ print(log)
self.assertEqual(str(log).count('Crawled (%d)' % code), 1)
def _assert_got_tunnel_error(self, log):
- self.assertEqual(str(log).count('TunnelError'), 1)
+ print(log)
+ self.assertIn('TunnelError', str(log))
| What do you think about providing retry counts in scrapy stats?
~~This PR is not backwards compatible for anyone who used `RetryMiddleware.__init__` directly.~~ | https://api.github.com/repos/scrapy/scrapy/pulls/2543 | 2017-02-07T13:26:26Z | 2017-02-27T16:03:36Z | 2017-02-27T16:03:36Z | 2017-02-27T19:40:00Z | 1,726 | scrapy/scrapy | 34,224 |
get func args partial support - based on #504 | diff --git a/scrapy/tests/test_contrib_loader.py b/scrapy/tests/test_contrib_loader.py
index afd740a86db..734bc31d1c6 100644
--- a/scrapy/tests/test_contrib_loader.py
+++ b/scrapy/tests/test_contrib_loader.py
@@ -1,4 +1,5 @@
import unittest
+from functools import partial
from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import Join, Identity, TakeFirst, \
@@ -332,6 +333,29 @@ class TestItemLoader(NameItemLoader):
item = il.load_item()
self.assertEqual(item['name'], u'Mart')
+ def test_partial_processor(self):
+ def join(values, sep=None, loader_context=None, ignored=None):
+ if sep is not None:
+ return sep.join(values)
+ elif loader_context and 'sep' in loader_context:
+ return loader_context['sep'].join(values)
+ else:
+ return ''.join(values)
+
+ class TestItemLoader(NameItemLoader):
+ name_out = Compose(partial(join, sep='+'))
+ url_out = Compose(partial(join, loader_context={'sep': '.'}))
+ summary_out = Compose(partial(join, ignored='foo'))
+
+ il = TestItemLoader()
+ il.add_value('name', [u'rabbit', u'hole'])
+ il.add_value('url', [u'rabbit', u'hole'])
+ il.add_value('summary', [u'rabbit', u'hole'])
+ item = il.load_item()
+ self.assertEqual(item['name'], u'rabbit+hole')
+ self.assertEqual(item['url'], u'rabbit.hole')
+ self.assertEqual(item['summary'], u'rabbithole')
+
class ProcessorsTest(unittest.TestCase):
diff --git a/scrapy/tests/test_utils_python.py b/scrapy/tests/test_utils_python.py
index 24e96275426..9122cf1be9e 100644
--- a/scrapy/tests/test_utils_python.py
+++ b/scrapy/tests/test_utils_python.py
@@ -1,3 +1,4 @@
+import functools
import operator
import unittest
from itertools import count
@@ -64,7 +65,7 @@ def test_isbinarytext(self):
assert not isbinarytext("hello")
# utf-16 strings contain null bytes
- assert not isbinarytext(u"hello".encode('utf-16'))
+ assert not isbinarytext(u"hello".encode('utf-16'))
# one with encoding
assert not isbinarytext("<div>Price \xa3</div>")
@@ -175,11 +176,17 @@ def __call__(self, a, b, c):
a = A(1, 2, 3)
cal = Callable()
+ partial_f1 = functools.partial(f1, None)
+ partial_f2 = functools.partial(f1, b=None)
+ partial_f3 = functools.partial(partial_f2, None)
self.assertEqual(get_func_args(f1), ['a', 'b', 'c'])
self.assertEqual(get_func_args(f2), ['a', 'b', 'c'])
self.assertEqual(get_func_args(A), ['a', 'b', 'c'])
self.assertEqual(get_func_args(a.method), ['a', 'b', 'c'])
+ self.assertEqual(get_func_args(partial_f1), ['b', 'c'])
+ self.assertEqual(get_func_args(partial_f2), ['a', 'c'])
+ self.assertEqual(get_func_args(partial_f3), ['c'])
self.assertEqual(get_func_args(cal), ['a', 'b', 'c'])
self.assertEqual(get_func_args(object), [])
diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py
index 8347730722a..566b50f17c2 100644
--- a/scrapy/utils/python.py
+++ b/scrapy/utils/python.py
@@ -10,7 +10,7 @@
import inspect
import weakref
import errno
-from functools import wraps
+from functools import partial, wraps
from sgmllib import SGMLParser
@@ -156,6 +156,9 @@ def get_func_args(func, stripself=False):
return get_func_args(func.__func__, True)
elif inspect.ismethoddescriptor(func):
return []
+ elif isinstance(func, partial):
+ return [x for x in get_func_args(func.func)[len(func.args):]
+ if not (func.keywords and x in func.keywords)]
elif hasattr(func, '__call__'):
if inspect.isroutine(func):
return []
| fixes #368 based on #504.
| https://api.github.com/repos/scrapy/scrapy/pulls/506 | 2013-12-30T11:28:33Z | 2013-12-30T16:45:38Z | 2013-12-30T16:45:38Z | 2014-06-12T16:07:58Z | 997 | scrapy/scrapy | 34,993 |
Bump sphinx from 4.4.0 to 4.5.0 | diff --git a/docs/requirements.txt b/docs/requirements.txt
index 193114721..6688094b2 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,4 +1,4 @@
alabaster==0.7.12
-Sphinx==4.4.0
+Sphinx==4.5.0
sphinx-rtd-theme==1.0.0
sphinx-copybutton==0.5.0
| Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 4.4.0 to 4.5.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/sphinx-doc/sphinx/releases">sphinx's releases</a>.</em></p>
<blockquote>
<h2>v4.5.0</h2>
<p>Changelog: <a href="https://www.sphinx-doc.org/en/master/changes.html">https://www.sphinx-doc.org/en/master/changes.html</a></p>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES">sphinx's changelog</a>.</em></p>
<blockquote>
<h1>Release 4.5.0 (released Mar 28, 2022)</h1>
<h2>Incompatible changes</h2>
<ul>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10112">#10112</a>: extlinks: Disable hardcoded links detector by default</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9993">#9993</a>, <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10177">#10177</a>: std domain: Disallow to refer an inline target via
:rst:role:<code>ref</code> role</li>
</ul>
<h2>Deprecated</h2>
<ul>
<li><code>sphinx.ext.napoleon.docstring.GoogleDocstring._qualify_name()</code></li>
</ul>
<h2>Features added</h2>
<ul>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10260">#10260</a>: Enable <code>FORCE_COLOR</code> and <code>NO_COLOR</code> for terminal colouring</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10234">#10234</a>: autosummary: Add "autosummary" CSS class to summary tables</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10125">#10125</a>: extlinks: Improve suggestion message for a reference having title</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10112">#10112</a>: extlinks: Add :confval:<code>extlinks_detect_hardcoded_links</code> to enable
hardcoded links detector feature</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9494">#9494</a>, <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9456">#9456</a>: html search: Add a config variable
:confval:<code>html_show_search_summary</code> to enable/disable the search summaries</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9337">#9337</a>: HTML theme, add option <code>enable_search_shortcuts</code> that enables :kbd:'/' as
a Quick search shortcut and :kbd:<code>Esc</code> shortcut that
removes search highlighting.</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10107">#10107</a>: i18n: Allow to suppress translation warnings by adding <code>#noqa</code>
comment to the tail of each translation message</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10252">#10252</a>: C++, support attributes on classes, unions, and enums.</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10253">#10253</a>: :rst:dir:<code>pep</code> role now generates URLs based on peps.python.org</li>
</ul>
<h2>Bugs fixed</h2>
<ul>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9876">#9876</a>: autodoc: Failed to document an imported class that is built from native
binary module</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10133">#10133</a>: autodoc: Crashed when mocked module is used for type annotation</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10146">#10146</a>: autodoc: :confval:<code>autodoc_default_options</code> does not support
<code>no-value</code> option</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9971">#9971</a>: autodoc: TypeError is raised when the target object is annotated by
unhashable object</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10205">#10205</a>: extlinks: Failed to compile regexp on checking hardcoded links</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10277">#10277</a>: html search: Could not search short words (ex. "use")</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9529">#9529</a>: LaTeX: named auto numbered footnote (ex. <code>[#named]</code>) that is referred
multiple times was rendered to a question mark</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/9924">#9924</a>: LaTeX: multi-line :rst:dir:<code>cpp:function</code> directive has big vertical
spacing in Latexpdf</li>
<li><a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10158">#10158</a>: LaTeX: excessive whitespace since v4.4.0 for undocumented</li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/2329fdef8c20c6c75194f5d842b8f62ebad5c79d"><code>2329fde</code></a> Bump to 4.5.0 final</li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/c34444149d7703ae2bf415adbec97324a5842632"><code>c344441</code></a> Update CHANGES for PR <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10263">#10263</a></li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/746df6145239fd525056907deccb431f89bf8e41"><code>746df61</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10263">#10263</a> from nicoa/escape_base_uri_in_extlinks</li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/81830cc77047ce39eab056a70b44a2d97848550c"><code>81830cc</code></a> Fix a flake8 warning</li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/aee4e42b81d56c57e1311176ce175ba3374baa0a"><code>aee4e42</code></a> extlink: Strip a leading backslash on compiling pattern</li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/8a1830ca36ddea80f8bcbc20c1090280a0a5197a"><code>8a1830c</code></a> Update CHANGES for PR <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10178">#10178</a></li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/4a496bfc98feced56e9e84eb6cf96264982d0e7a"><code>4a496bf</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10178">#10178</a> from stephenfin/issue-10177</li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/c93b95d685aea6c8e4392d624e4668587b9e5726"><code>c93b95d</code></a> Merge CHANGES entry for 4.4.1 to 4.5.0</li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/a001bf47d66ae804a9a6e5d754de9b5eda4d0eb9"><code>a001bf4</code></a> Update CHANGES for PR <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10107">#10107</a></li>
<li><a href="https://github.com/sphinx-doc/sphinx/commit/b20e04968e73234da9fff7d19b12dfbeebebe944"><code>b20e049</code></a> Merge pull request <a href="https://github-redirect.dependabot.com/sphinx-doc/sphinx/issues/10107">#10107</a> from Jean-Abou-Samra/intl-warnings</li>
<li>Additional commits viewable in <a href="https://github.com/sphinx-doc/sphinx/compare/v4.4.0...v4.5.0">compare view</a></li>
</ul>
</details>
<br />
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sphinx&package-manager=pip&previous-version=4.4.0&new-version=4.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details> | https://api.github.com/repos/Textualize/rich/pulls/2124 | 2022-03-28T13:36:30Z | 2022-03-29T16:02:26Z | 2022-03-29T16:02:26Z | 2022-03-29T16:02:27Z | 105 | Textualize/rich | 48,422 |
Fixed #27361 -- Cleaned up forms api documentation | diff --git a/docs/ref/forms/api.txt b/docs/ref/forms/api.txt
index 8849ce284dd81..194c390484ee0 100644
--- a/docs/ref/forms/api.txt
+++ b/docs/ref/forms/api.txt
@@ -396,11 +396,11 @@ When the ``Form`` is valid, ``cleaned_data`` will include a key and value for
fields. In this example, the data dictionary doesn't include a value for the
``nick_name`` field, but ``cleaned_data`` includes it, with an empty value::
- >>> from django.forms import Form
- >>> class OptionalPersonForm(Form):
- ... first_name = CharField()
- ... last_name = CharField()
- ... nick_name = CharField(required=False)
+ >>> from django import forms
+ >>> class OptionalPersonForm(forms.Form):
+ ... first_name = forms.CharField()
+ ... last_name = forms.CharField()
+ ... nick_name = forms.CharField(required=False)
>>> data = {'first_name': 'John', 'last_name': 'Lennon'}
>>> f = OptionalPersonForm(data)
>>> f.is_valid()
@@ -540,7 +540,7 @@ it calls its ``as_table()`` method behind the scenes::
>>> f = ContactForm()
>>> f.as_table()
'<tr><th><label for="id_subject">Subject:</label></th><td><input id="id_subject" type="text" name="subject" maxlength="100" required /></td></tr>\n<tr><th><label for="id_message">Message:</label></th><td><input type="text" name="message" id="id_message" required /></td></tr>\n<tr><th><label for="id_sender">Sender:</label></th><td><input type="email" name="sender" id="id_sender" required /></td></tr>\n<tr><th><label for="id_cc_myself">Cc myself:</label></th><td><input type="checkbox" name="cc_myself" id="id_cc_myself" /></td></tr>'
- >>> print(f.as_table())
+ >>> print(f)
<tr><th><label for="id_subject">Subject:</label></th><td><input id="id_subject" type="text" name="subject" maxlength="100" required /></td></tr>
<tr><th><label for="id_message">Message:</label></th><td><input type="text" name="message" id="id_message" required /></td></tr>
<tr><th><label for="id_sender">Sender:</label></th><td><input type="email" name="sender" id="id_sender" required /></td></tr>
@@ -563,9 +563,9 @@ attributes to required rows or to rows with errors: simply set the
:attr:`Form.error_css_class` and/or :attr:`Form.required_css_class`
attributes::
- from django.forms import Form
+ from django import forms
- class ContactForm(Form):
+ class ContactForm(forms.Form):
error_css_class = 'error'
required_css_class = 'required'
@@ -1158,14 +1158,14 @@ example, ``BeatleForm`` subclasses both ``PersonForm`` and ``InstrumentForm``
(in that order), and its field list includes the fields from the parent
classes::
- >>> from django.forms import Form
- >>> class PersonForm(Form):
- ... first_name = CharField()
- ... last_name = CharField()
- >>> class InstrumentForm(Form):
- ... instrument = CharField()
- >>> class BeatleForm(PersonForm, InstrumentForm):
- ... haircut_type = CharField()
+ >>> from django import forms
+ >>> class PersonForm(forms.Form):
+ ... first_name = forms.CharField()
+ ... last_name = forms.CharField()
+ >>> class InstrumentForm(forms.Form):
+ ... instrument = forms.CharField()
+ >>> class BeatleForm(InstrumentForm, PersonForm):
+ ... haircut_type = forms.CharField()
>>> b = BeatleForm(auto_id=False)
>>> print(b.as_ul())
<li>First name: <input type="text" name="first_name" required /></li>
| https://api.github.com/repos/django/django/pulls/7407 | 2016-10-19T06:37:34Z | 2016-10-19T13:55:21Z | 2016-10-19T13:55:21Z | 2016-10-19T13:55:22Z | 933 | django/django | 51,026 |
|
Update mappings.json | diff --git a/llama-index-cli/llama_index/cli/upgrade/mappings.json b/llama-index-cli/llama_index/cli/upgrade/mappings.json
index 9ad04974d42bb..1acb4e7bcef6f 100644
--- a/llama-index-cli/llama_index/cli/upgrade/mappings.json
+++ b/llama-index-cli/llama_index/cli/upgrade/mappings.json
@@ -333,10 +333,12 @@
"VectorStoreQueryResult": "llama_index.core.vector_stores",
"MetadataFilters": "llama_index.core.vector_stores",
"MetadataFilter": "llama_index.core.vector_stores",
+ "MetadataInfo": "llama_index.core.vector_stores",
"ExactMatchFilter": "llama_index.core.vector_stores",
"FilterCondition": "llama_index.core.vector_stores",
"FilterOperator": "llama_index.core.vector_stores",
"SimpleVectorStore": "llama_index.core.vector_stores",
+ "VectorStoreInfo": "llama_index.core.vector_stores",
"AutoMergingRetrieverPack": "llama_index.packs.auto_merging_retriever",
"ChainOfTablePack": "llama_index.packs.tables",
"MixSelfConsistencyPack": "llama_index.packs.tables",
@@ -414,6 +416,7 @@
"GoogleVectorStore": "llama_index.vector_stores.google",
"MetalVectorStore": "llama_index.vector_stores.metal",
"PathwayRetriever": "llama_index.retrievers.pathway",
+ "VideoDBRetriever": "llama_index.retrievers.videodb",
"YouRetriever": "llama_index.retrievers.you",
"ZillizCloudPipelineIndex": "llama_index.indices.managed.zilliz",
"ZillizCloudPipelineRetriever": "llama_index.indices.managed.zilliz",
@@ -487,6 +490,9 @@
"DashScopeBatchTextEmbeddingModels": "llama_index.embeddings.dashscope",
"DashScopeEmbedding": "llama_index.embeddings.dashscope",
"DashScopeMultiModalEmbeddingModels": "llama_index.embeddings.dashscope",
+ "VertexTextEmbedding": "llama_index.embeddings.vertex",
+ "VertexMultiModalEmbedding": "llama_index.embeddings.vertex",
+ "VertexEmbeddingMode": "llama_index.embeddings.vertex",
"HuggingFaceEmbedding": "llama_index.embeddings.huggingface",
"HuggingFaceInferenceAPIEmbedding": "llama_index.embeddings.huggingface",
"HuggingFaceInferenceAPIEmbeddings": "llama_index.embeddings.huggingface",
@@ -519,11 +525,11 @@
"TextEmbeddingsInference": "llama_index.embeddings.text_embeddings_inference",
"UpTrainCallbackHandler": "llama_index.callbacks.uptrain",
"deepeval_callback_handler": "llama_index.callbacks.deepeval",
- "langfuse_callback_handler": "llama_index.callbacks.langfuse",
"OpenInferenceCallbackHandler": "llama_index.callbacks.openinference",
"WandbCallbackHandler": "llama_index.callbacks.wandb",
"argilla_callback_handler": "llama_index.callbacks.argilla",
"honeyhive_callback_handler": "llama_index.callbacks.honeyhive",
+ "langfuse_callback_handler": "llama_index.callbacks.langfuse",
"arize_phoenix_callback_handler": "llama_index.callbacks.arize_phoenix",
"AimCallback": "llama_index.callbacks.aim",
"PromptLayerHandler": "llama_index.callbacks.promptlayer",
@@ -543,6 +549,7 @@
"AzureOpenAIMultiModal": "llama_index.multi_modal_llms.azure_openai",
"ReplicateMultiModal": "llama_index.multi_modal_llms.replicate",
"GeminiMultiModal": "llama_index.multi_modal_llms.gemini",
+ "AnthropicMultiModal": "llama_index.multi_modal_llms.anthropic",
"DashScopeMultiModal": "llama_index.multi_modal_llms.dashscope",
"DashScopeMultiModalModels": "llama_index.multi_modal_llms.dashscope",
"OpenAIAgent": "llama_index.agent.openai",
@@ -671,6 +678,7 @@
"format_list_to_string": "llama_index.readers.myscale",
"GuruReader": "llama_index.readers.guru",
"LinearReader": "llama_index.readers.linear",
+ "FeishuWikiReader": "llama_index.readers.feishu_wiki",
"TelegramReader": "llama_index.readers.telegram",
"SteamshipFileReader": "llama_index.readers.steamship",
"OpenMap": "llama_index.readers.maps",
@@ -846,6 +854,8 @@
"SelfRAGPack": "llama_index.packs.self_rag",
"SelfRAGQueryEngine": "llama_index.packs.self_rag",
"SelfDiscoverPack": "llama_index.packs.self_discover",
+ "RaptorPack": "llama_index.packs.raptor",
+ "RaptorRetriever": "llama_index.packs.raptor",
"LlamaDatasetMetadataPack": "llama_index.packs.llama_dataset_metadata",
"StockMarketDataQueryEnginePack": "llama_index.packs.stock_market_data_query_engine",
"RagEvaluatorPack": "llama_index.packs.rag_evaluator",
@@ -883,6 +893,7 @@
"AgentSearchRetrieverPack": "llama_index.packs.agent_search_retriever",
"HybridFusionRetrieverPack": "llama_index.packs.fusion_retriever",
"QueryRewritingRetrieverPack": "llama_index.packs.fusion_retriever",
+ "FinanceChatPack": "llama_index.packs.finchat",
"BaseNode": "llama_index.core.schema",
"TextNode": "llama_index.core.schema",
"ImageNode": "llama_index.core.schema",
@@ -891,7 +902,5 @@
"run_jobs": "llama_index.core.async_utils",
"DecomposeQueryTransform": "llama_index.core.query.query_transform.base",
"get_eval_results": "llama_index.core.evaluation.eval_utils",
- "VectorStoreInfo": "llama_index.core.vector_stores",
- "MetadataInfo": "llama_index.core.vector_stores",
"REPLICATE_MULTI_MODAL_LLM_MODELS": "llama_index.multi_modal_llms.replicate.base"
}
diff --git a/llama-index-core/llama_index/core/command_line/mappings.json b/llama-index-core/llama_index/core/command_line/mappings.json
index 9ad04974d42bb..1acb4e7bcef6f 100644
--- a/llama-index-core/llama_index/core/command_line/mappings.json
+++ b/llama-index-core/llama_index/core/command_line/mappings.json
@@ -333,10 +333,12 @@
"VectorStoreQueryResult": "llama_index.core.vector_stores",
"MetadataFilters": "llama_index.core.vector_stores",
"MetadataFilter": "llama_index.core.vector_stores",
+ "MetadataInfo": "llama_index.core.vector_stores",
"ExactMatchFilter": "llama_index.core.vector_stores",
"FilterCondition": "llama_index.core.vector_stores",
"FilterOperator": "llama_index.core.vector_stores",
"SimpleVectorStore": "llama_index.core.vector_stores",
+ "VectorStoreInfo": "llama_index.core.vector_stores",
"AutoMergingRetrieverPack": "llama_index.packs.auto_merging_retriever",
"ChainOfTablePack": "llama_index.packs.tables",
"MixSelfConsistencyPack": "llama_index.packs.tables",
@@ -414,6 +416,7 @@
"GoogleVectorStore": "llama_index.vector_stores.google",
"MetalVectorStore": "llama_index.vector_stores.metal",
"PathwayRetriever": "llama_index.retrievers.pathway",
+ "VideoDBRetriever": "llama_index.retrievers.videodb",
"YouRetriever": "llama_index.retrievers.you",
"ZillizCloudPipelineIndex": "llama_index.indices.managed.zilliz",
"ZillizCloudPipelineRetriever": "llama_index.indices.managed.zilliz",
@@ -487,6 +490,9 @@
"DashScopeBatchTextEmbeddingModels": "llama_index.embeddings.dashscope",
"DashScopeEmbedding": "llama_index.embeddings.dashscope",
"DashScopeMultiModalEmbeddingModels": "llama_index.embeddings.dashscope",
+ "VertexTextEmbedding": "llama_index.embeddings.vertex",
+ "VertexMultiModalEmbedding": "llama_index.embeddings.vertex",
+ "VertexEmbeddingMode": "llama_index.embeddings.vertex",
"HuggingFaceEmbedding": "llama_index.embeddings.huggingface",
"HuggingFaceInferenceAPIEmbedding": "llama_index.embeddings.huggingface",
"HuggingFaceInferenceAPIEmbeddings": "llama_index.embeddings.huggingface",
@@ -519,11 +525,11 @@
"TextEmbeddingsInference": "llama_index.embeddings.text_embeddings_inference",
"UpTrainCallbackHandler": "llama_index.callbacks.uptrain",
"deepeval_callback_handler": "llama_index.callbacks.deepeval",
- "langfuse_callback_handler": "llama_index.callbacks.langfuse",
"OpenInferenceCallbackHandler": "llama_index.callbacks.openinference",
"WandbCallbackHandler": "llama_index.callbacks.wandb",
"argilla_callback_handler": "llama_index.callbacks.argilla",
"honeyhive_callback_handler": "llama_index.callbacks.honeyhive",
+ "langfuse_callback_handler": "llama_index.callbacks.langfuse",
"arize_phoenix_callback_handler": "llama_index.callbacks.arize_phoenix",
"AimCallback": "llama_index.callbacks.aim",
"PromptLayerHandler": "llama_index.callbacks.promptlayer",
@@ -543,6 +549,7 @@
"AzureOpenAIMultiModal": "llama_index.multi_modal_llms.azure_openai",
"ReplicateMultiModal": "llama_index.multi_modal_llms.replicate",
"GeminiMultiModal": "llama_index.multi_modal_llms.gemini",
+ "AnthropicMultiModal": "llama_index.multi_modal_llms.anthropic",
"DashScopeMultiModal": "llama_index.multi_modal_llms.dashscope",
"DashScopeMultiModalModels": "llama_index.multi_modal_llms.dashscope",
"OpenAIAgent": "llama_index.agent.openai",
@@ -671,6 +678,7 @@
"format_list_to_string": "llama_index.readers.myscale",
"GuruReader": "llama_index.readers.guru",
"LinearReader": "llama_index.readers.linear",
+ "FeishuWikiReader": "llama_index.readers.feishu_wiki",
"TelegramReader": "llama_index.readers.telegram",
"SteamshipFileReader": "llama_index.readers.steamship",
"OpenMap": "llama_index.readers.maps",
@@ -846,6 +854,8 @@
"SelfRAGPack": "llama_index.packs.self_rag",
"SelfRAGQueryEngine": "llama_index.packs.self_rag",
"SelfDiscoverPack": "llama_index.packs.self_discover",
+ "RaptorPack": "llama_index.packs.raptor",
+ "RaptorRetriever": "llama_index.packs.raptor",
"LlamaDatasetMetadataPack": "llama_index.packs.llama_dataset_metadata",
"StockMarketDataQueryEnginePack": "llama_index.packs.stock_market_data_query_engine",
"RagEvaluatorPack": "llama_index.packs.rag_evaluator",
@@ -883,6 +893,7 @@
"AgentSearchRetrieverPack": "llama_index.packs.agent_search_retriever",
"HybridFusionRetrieverPack": "llama_index.packs.fusion_retriever",
"QueryRewritingRetrieverPack": "llama_index.packs.fusion_retriever",
+ "FinanceChatPack": "llama_index.packs.finchat",
"BaseNode": "llama_index.core.schema",
"TextNode": "llama_index.core.schema",
"ImageNode": "llama_index.core.schema",
@@ -891,7 +902,5 @@
"run_jobs": "llama_index.core.async_utils",
"DecomposeQueryTransform": "llama_index.core.query.query_transform.base",
"get_eval_results": "llama_index.core.evaluation.eval_utils",
- "VectorStoreInfo": "llama_index.core.vector_stores",
- "MetadataInfo": "llama_index.core.vector_stores",
"REPLICATE_MULTI_MODAL_LLM_MODELS": "llama_index.multi_modal_llms.replicate.base"
}
| # Description
- update mappings.json in both `llama-index-core` and `llamaindex-cli`
Fixes #11689
## Type of Change
Please delete options that are not relevant.
- [x] Bug fix (non-breaking change which fixes an issue)
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [x] I stared at the code and made sure it makes sense
| https://api.github.com/repos/run-llama/llama_index/pulls/11699 | 2024-03-06T16:28:31Z | 2024-03-06T16:38:24Z | 2024-03-06T16:38:24Z | 2024-03-06T16:38:25Z | 2,933 | run-llama/llama_index | 6,079 |
check if the player exists or not, and print informative error message if not | diff --git a/src/you_get/common.py b/src/you_get/common.py
index 7818216303..6d5764ff57 100755
--- a/src/you_get/common.py
+++ b/src/you_get/common.py
@@ -273,7 +273,15 @@ def matchall(text, patterns):
def launch_player(player, urls):
import subprocess
import shlex
- subprocess.call(shlex.split(player) + list(urls))
+ if (sys.version_info >= (3, 3)):
+ import shutil
+ exefile=shlex.split(player)[0]
+ if shutil.which(exefile) is not None:
+ subprocess.call(shlex.split(player) + list(urls))
+ else:
+ log.wtf('[Failed] Cannot find player "%s"' % exefile)
+ else:
+ subprocess.call(shlex.split(player) + list(urls))
def parse_query_param(url, param):
| **(PLEASE DELETE ALL THESE AFTER READING)**
Thank you for the pull request! `you-get` is a growing open source project, which would not have been possible without contributors like you.
Here are some simple rules to follow, please recheck them before sending the pull request:
- [ ] If you want to propose two or more unrelated patches, please open separate pull requests for them, instead of one;
- [ ] All pull requests should be based upon the latest `develop` branch;
- [ ] Name your branch (from which you will send the pull request) properly; use a meaningful name like `add-this-shining-feature` rather than just `develop`;
- [ ] All commit messages, as well as comments in code, should be written in understandable English.
As a contributor, you must be aware that
- [ ] You agree to contribute your code to this project, under the terms of the MIT license, so that any person may freely use or redistribute them; of course, you will still reserve the copyright for your own authorship.
- [ ] You may not contribute any code not authored by yourself, unless they are licensed under either public domain or the MIT license, literally.
Not all pull requests can eventually be merged. I consider merged / unmerged patches as equally important for the community: as long as you think a patch would be helpful, someone else might find it helpful, too, therefore they could take your fork and benefit in some way. In any case, I would like to thank you in advance for taking your time to contribute to this project.
Cheers,
Mort
**(PLEASE REPLACE ALL ABOVE WITH A DETAILED DESCRIPTION OF YOUR PULL REQUEST)**
ๆฑ่ฏญ็ฟป่ฏๆๅๆฅๆ๏ผ2016ๅนด02ๆ26ๆฅ
**(้
่ฏปๅ่ฏทๅ ้คๆๆๅ
ๅฎน)**
ๆ่ฐขๆจ็pull request! `you-get`ๆฏ็จณๅฅๆ้ฟ็ๅผๆบ้กน็ฎ๏ผๆ่ฐขๆจ็่ดก็ฎ.
ไปฅไธ็ฎๅๆฃๆฅ้กน็ฎๆๆจๅคๆฅ:
- [ ] ๅฆๆๆจ้ข่ฎกๆๅบไธคไธชๆๆดๅคไธ็ธๅ
ณ่กฅไธ๏ผ่ฏทไธบๆฏไธชไฝฟ็จไธๅ็pull requests๏ผ่ไธๆฏๅไธ;
- [ ] ๆๆ็pull requestsๅบๅบไบๆๆฐ็`develop`ๅๆฏ;
- [ ] ๆจ้ข่ฎกๆๅบpull requests็ๅๆฏๅบๆๆๆไนๅ็งฐ๏ผไพๅฆ`add-this-shining-feature`่ไธๆฏ`develop`;
- [ ] ๆๆ็ๆไบคไฟกๆฏไธไปฃ็ ไธญๆณจ้ๅบไฝฟ็จๅฏ็่งฃ็่ฑ่ฏญ.
ไฝไธบ่ดก็ฎ่
๏ผๆจ้่ฆ็ฅๆ
- [ ] ๆจๅๆๅจMITๅ่ฎฎไธ่ดก็ฎไปฃ็ ๏ผไปฅไพฟไปปไฝไบบ่ช็ฑไฝฟ็จๆๅๅ;ๅฝ็ถ๏ผไฝ ไปๆงไฟ็ไปฃ็ ็่ไฝๆ
- [ ] ไฝ ไธๅพ่ดก็ฎ้่ชๅทฑ็ผๅ็ไปฃ็ ๏ผ้ค้ๅ
ถๅฑไบๅ
ฌๆ้ขๅๆไฝฟ็จMITๅ่ฎฎ.
ไธๆฏๆๆ็pull requests้ฝไผ่ขซๅๅนถ,็ถ่ๆ่ฎคไธบๅๅนถ/ไธๅๅนถ็่กฅไธไธๆ ท้่ฆ๏ผๅฆๆๆจ่ฎคไธบ่กฅไธ้่ฆ๏ผๅ
ถไปไบบไนๆๅฏ่ฝ่ฟไน่ฎคไธบ๏ผ้ฃไนไปไปฌๅฏไปฅไปไฝ ็forkไธญๆๅๅทฅไฝๅนถ่ท็ใๆ ่ฎบๅฆไฝ๏ผๆ่ฐขๆจ่ดนๅฟๅฏนๆฌ้กน็ฎ่ดก็ฎ.
็ฅๅฅฝ,
Mort
**(่ฏทๅฐๆฌๅ
ๅฎนๅฎๆดๆฟๆขไธบPULL REQUEST็่ฏฆ็ปๅ
ๅฎน)**
| https://api.github.com/repos/soimort/you-get/pulls/2673 | 2019-01-02T00:35:46Z | 2019-01-14T12:46:24Z | 2019-01-14T12:46:24Z | 2019-01-14T12:46:24Z | 207 | soimort/you-get | 21,215 |
Don't use PEP 604 type hints, to stay compatible with Python<3.10. | diff --git a/execution.py b/execution.py
index e91e9a410e..bc5cfe55c5 100644
--- a/execution.py
+++ b/execution.py
@@ -751,7 +751,7 @@ def task_done(self, item_id, outputs,
if len(self.history) > MAXIMUM_HISTORY_SIZE:
self.history.pop(next(iter(self.history)))
- status_dict: dict|None = None
+ status_dict: Optional[dict] = None
if status is not None:
status_dict = copy.deepcopy(status._asdict())
| `status_dict: dict|None = None` relies on PEP 604 to use the `|` to mean "or". Previously (Python<3.10) one had to use `Union` or `Optional`. The previous change (https://github.com/comfyanonymous/ComfyUI/commit/1b3d65bd84c8026dea234643861491279886218c) is probably breaking ComfyUI for some people who are using older versions of python. This PR fixes the issue. | https://api.github.com/repos/comfyanonymous/ComfyUI/pulls/2578 | 2024-01-17T22:21:25Z | 2024-01-18T00:55:43Z | 2024-01-18T00:55:43Z | 2024-01-18T00:55:44Z | 129 | comfyanonymous/ComfyUI | 17,971 |
[extractor/reddit] Extract video embeds in text posts | diff --git a/yt_dlp/extractor/reddit.py b/yt_dlp/extractor/reddit.py
index 171affb9323..f1a5c852af0 100644
--- a/yt_dlp/extractor/reddit.py
+++ b/yt_dlp/extractor/reddit.py
@@ -1,15 +1,15 @@
import random
-from urllib.parse import urlparse
+import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
- int_or_none,
float_or_none,
+ int_or_none,
+ traverse_obj,
try_get,
unescapeHTML,
url_or_none,
- traverse_obj
)
@@ -56,6 +56,14 @@ class RedditIE(InfoExtractor):
'comment_count': int,
'age_limit': 0,
},
+ }, {
+ # videos embedded in reddit text post
+ 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
+ 'playlist_count': 2,
+ 'info_dict': {
+ 'id': 'wzqkxp',
+ 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
+ },
}, {
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
'only_matching': True,
@@ -102,10 +110,6 @@ def _real_extract(self, url):
data = data[0]['data']['children'][0]['data']
video_url = data['url']
- # Avoid recursing into the same reddit URL
- if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
- raise ExtractorError('No media found', expected=True)
-
over_18 = data.get('over_18')
if over_18 is True:
age_limit = 18
@@ -148,6 +152,32 @@ def add_thumbnail(src):
'age_limit': age_limit,
}
+ parsed_url = urllib.parse.urlparse(video_url)
+
+ # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
+ if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
+ entries = []
+ for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
+ if not media.get('id') or media.get('e') != 'RedditVideo':
+ continue
+ formats = []
+ if media.get('hlsUrl'):
+ formats.extend(self._extract_m3u8_formats(
+ unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
+ if media.get('dashUrl'):
+ formats.extend(self._extract_mpd_formats(
+ unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
+ if formats:
+ entries.append({
+ 'id': media['id'],
+ 'display_id': video_id,
+ 'formats': formats,
+ **info,
+ })
+ if entries:
+ return self.playlist_result(entries, video_id, info.get('title'))
+ raise ExtractorError('No media found', expected=True)
+
# Check if media is hosted on reddit:
reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)
if reddit_video:
@@ -189,7 +219,6 @@ def add_thumbnail(src):
'duration': int_or_none(reddit_video.get('duration')),
}
- parsed_url = urlparse(video_url)
if parsed_url.netloc == 'v.redd.it':
self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
return {
| Adds support for Reddit text submissions that contain video embeds.
Closes #5612
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Fix or improvement to an extractor (Make sure to add/update tests)
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/5677 | 2022-11-30T04:26:26Z | 2022-12-01T04:04:32Z | 2022-12-01T04:04:32Z | 2022-12-01T04:04:37Z | 877 | yt-dlp/yt-dlp | 7,965 |
Axis - improved internal parameter handling | diff --git a/homeassistant/components/axis/__init__.py b/homeassistant/components/axis/__init__.py
index 7d9cabd09fa3c3..e9e8a158a3be38 100644
--- a/homeassistant/components/axis/__init__.py
+++ b/homeassistant/components/axis/__init__.py
@@ -64,12 +64,9 @@ async def async_unload_entry(hass, config_entry):
async def async_populate_options(hass, config_entry):
"""Populate default options for device."""
- from axis.vapix import VAPIX_IMAGE_FORMAT
-
device = await get_device(hass, config_entry.data[CONF_DEVICE])
- supported_formats = device.vapix.get_param(VAPIX_IMAGE_FORMAT)
-
+ supported_formats = device.vapix.params.image_format
camera = bool(supported_formats)
options = {
diff --git a/homeassistant/components/axis/config_flow.py b/homeassistant/components/axis/config_flow.py
index 54d93f768d20a9..0c175de20c76b1 100644
--- a/homeassistant/components/axis/config_flow.py
+++ b/homeassistant/components/axis/config_flow.py
@@ -66,7 +66,6 @@ async def async_step_user(self, user_input=None):
Manage device specific parameters.
"""
- from axis.vapix import VAPIX_MODEL_ID, VAPIX_SERIAL_NUMBER
errors = {}
if user_input is not None:
@@ -79,13 +78,12 @@ async def async_step_user(self, user_input=None):
}
device = await get_device(self.hass, self.device_config)
- self.serial_number = device.vapix.get_param(
- VAPIX_SERIAL_NUMBER)
+ self.serial_number = device.vapix.params.system_serialnumber
if self.serial_number in configured_devices(self.hass):
raise AlreadyConfigured
- self.model = device.vapix.get_param(VAPIX_MODEL_ID)
+ self.model = device.vapix.params.prodnbr
return await self._create_entry()
diff --git a/homeassistant/components/axis/device.py b/homeassistant/components/axis/device.py
index 155d1c4760875f..48577799a1330d 100644
--- a/homeassistant/components/axis/device.py
+++ b/homeassistant/components/axis/device.py
@@ -67,13 +67,9 @@ async def async_update_device_registry(self):
async def async_setup(self):
"""Set up the device."""
- from axis.vapix import VAPIX_FW_VERSION, VAPIX_PROD_TYPE
-
- hass = self.hass
-
try:
self.api = await get_device(
- hass, self.config_entry.data[CONF_DEVICE])
+ self.hass, self.config_entry.data[CONF_DEVICE])
except CannotConnect:
raise ConfigEntryNotReady
@@ -83,8 +79,8 @@ async def async_setup(self):
'Unknown error connecting with Axis device on %s', self.host)
return False
- self.fw_version = self.api.vapix.get_param(VAPIX_FW_VERSION)
- self.product_type = self.api.vapix.get_param(VAPIX_PROD_TYPE)
+ self.fw_version = self.api.vapix.params.firmware_version
+ self.product_type = self.api.vapix.params.prodtype
if self.config_entry.options[CONF_CAMERA]:
self.hass.async_create_task(
@@ -188,9 +184,14 @@ async def get_device(hass, config):
password=config[CONF_PASSWORD],
port=config[CONF_PORT], web_proto='http')
+ device.vapix.initialize_params(preload_data=False)
+
try:
with async_timeout.timeout(15):
- await hass.async_add_executor_job(device.vapix.load_params)
+ await hass.async_add_executor_job(
+ device.vapix.params.update_brand)
+ await hass.async_add_executor_job(
+ device.vapix.params.update_properties)
return device
except axis.Unauthorized:
diff --git a/homeassistant/components/axis/manifest.json b/homeassistant/components/axis/manifest.json
index 0f2b39b9760e9a..4d102590184b6c 100644
--- a/homeassistant/components/axis/manifest.json
+++ b/homeassistant/components/axis/manifest.json
@@ -2,11 +2,7 @@
"domain": "axis",
"name": "Axis",
"documentation": "https://www.home-assistant.io/components/axis",
- "requirements": [
- "axis==20"
- ],
+ "requirements": ["axis==21"],
"dependencies": [],
- "codeowners": [
- "@kane610"
- ]
+ "codeowners": ["@kane610"]
}
diff --git a/requirements_all.txt b/requirements_all.txt
index 89054e68d62ebe..92627b444f7b48 100644
--- a/requirements_all.txt
+++ b/requirements_all.txt
@@ -192,7 +192,7 @@ av==6.1.2
# avion==0.10
# homeassistant.components.axis
-axis==20
+axis==21
# homeassistant.components.baidu
baidu-aip==1.6.6
diff --git a/requirements_test_all.txt b/requirements_test_all.txt
index 30903911e4f893..0b7da328ee1aba 100644
--- a/requirements_test_all.txt
+++ b/requirements_test_all.txt
@@ -61,7 +61,7 @@ apns2==0.3.0
av==6.1.2
# homeassistant.components.axis
-axis==20
+axis==21
# homeassistant.components.zha
bellows-homeassistant==0.7.2
diff --git a/tests/components/axis/test_config_flow.py b/tests/components/axis/test_config_flow.py
index 0ce5757578d0b6..1a83e9be8b54df 100644
--- a/tests/components/axis/test_config_flow.py
+++ b/tests/components/axis/test_config_flow.py
@@ -6,8 +6,6 @@
from tests.common import mock_coro, MockConfigEntry
-import axis as axis_lib
-
async def test_configured_devices(hass):
"""Test that configured devices works as expected."""
@@ -37,13 +35,9 @@ def mock_constructor(
mock_device.port = port
return mock_device
- def mock_get_param(param):
- """Fake get param method."""
- return param
-
mock_device.side_effect = mock_constructor
- mock_device.vapix.load_params.return_value = Mock()
- mock_device.vapix.get_param.side_effect = mock_get_param
+ mock_device.vapix.params.system_serialnumber = 'serialnumber'
+ mock_device.vapix.params.prodnbr = 'prodnbr'
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
@@ -59,23 +53,22 @@ def mock_get_param(param):
config_flow.CONF_HOST: '1.2.3.4',
config_flow.CONF_USERNAME: 'user',
config_flow.CONF_PASSWORD: 'pass',
- config_flow.CONF_PORT: 81
+ config_flow.CONF_PORT: 80
}
)
assert result['type'] == 'create_entry'
- assert result['title'] == '{} - {}'.format(
- axis_lib.vapix.VAPIX_MODEL_ID, axis_lib.vapix.VAPIX_SERIAL_NUMBER)
+ assert result['title'] == '{} - {}'.format('prodnbr', 'serialnumber')
assert result['data'] == {
axis.CONF_DEVICE: {
config_flow.CONF_HOST: '1.2.3.4',
config_flow.CONF_USERNAME: 'user',
config_flow.CONF_PASSWORD: 'pass',
- config_flow.CONF_PORT: 81
+ config_flow.CONF_PORT: 80
},
- config_flow.CONF_MAC: axis_lib.vapix.VAPIX_SERIAL_NUMBER,
- config_flow.CONF_MODEL: axis_lib.vapix.VAPIX_MODEL_ID,
- config_flow.CONF_NAME: 'Brand.ProdNbr 0'
+ config_flow.CONF_MAC: 'serialnumber',
+ config_flow.CONF_MODEL: 'prodnbr',
+ config_flow.CONF_NAME: 'prodnbr 0'
}
@@ -89,7 +82,7 @@ async def test_flow_fails_already_configured(hass):
entry.add_to_hass(hass)
mock_device = Mock()
- mock_device.vapix.get_param.return_value = '1234'
+ mock_device.vapix.params.system_serialnumber = '1234'
with patch('homeassistant.components.axis.config_flow.get_device',
return_value=mock_coro(mock_device)):
@@ -97,7 +90,7 @@ async def test_flow_fails_already_configured(hass):
config_flow.CONF_HOST: '1.2.3.4',
config_flow.CONF_USERNAME: 'user',
config_flow.CONF_PASSWORD: 'pass',
- config_flow.CONF_PORT: 81
+ config_flow.CONF_PORT: 80
})
assert result['errors'] == {'base': 'already_configured'}
@@ -114,7 +107,7 @@ async def test_flow_fails_faulty_credentials(hass):
config_flow.CONF_HOST: '1.2.3.4',
config_flow.CONF_USERNAME: 'user',
config_flow.CONF_PASSWORD: 'pass',
- config_flow.CONF_PORT: 81
+ config_flow.CONF_PORT: 80
})
assert result['errors'] == {'base': 'faulty_credentials'}
@@ -131,7 +124,7 @@ async def test_flow_fails_device_unavailable(hass):
config_flow.CONF_HOST: '1.2.3.4',
config_flow.CONF_USERNAME: 'user',
config_flow.CONF_PASSWORD: 'pass',
- config_flow.CONF_PORT: 81
+ config_flow.CONF_PORT: 80
})
assert result['errors'] == {'base': 'device_unavailable'}
@@ -207,13 +200,7 @@ def mock_constructor(
mock_device.port = port
return mock_device
- def mock_get_param(param):
- """Fake get param method."""
- return param
-
mock_device.side_effect = mock_constructor
- mock_device.vapix.load_params.return_value = Mock()
- mock_device.vapix.get_param.side_effect = mock_get_param
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
@@ -303,13 +290,9 @@ def mock_constructor(
mock_device.port = port
return mock_device
- def mock_get_param(param):
- """Fake get param method."""
- return param
-
mock_device.side_effect = mock_constructor
- mock_device.vapix.load_params.return_value = Mock()
- mock_device.vapix.get_param.side_effect = mock_get_param
+ mock_device.vapix.params.system_serialnumber = 'serialnumber'
+ mock_device.vapix.params.prodnbr = 'prodnbr'
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
@@ -324,8 +307,7 @@ def mock_get_param(param):
)
assert result['type'] == 'create_entry'
- assert result['title'] == '{} - {}'.format(
- axis_lib.vapix.VAPIX_MODEL_ID, axis_lib.vapix.VAPIX_SERIAL_NUMBER)
+ assert result['title'] == '{} - {}'.format('prodnbr', 'serialnumber')
assert result['data'] == {
axis.CONF_DEVICE: {
config_flow.CONF_HOST: '1.2.3.4',
@@ -333,7 +315,7 @@ def mock_get_param(param):
config_flow.CONF_PASSWORD: 'pass',
config_flow.CONF_PORT: 80
},
- config_flow.CONF_MAC: axis_lib.vapix.VAPIX_SERIAL_NUMBER,
- config_flow.CONF_MODEL: axis_lib.vapix.VAPIX_MODEL_ID,
+ config_flow.CONF_MAC: 'serialnumber',
+ config_flow.CONF_MODEL: 'prodnbr',
config_flow.CONF_NAME: 'name'
}
diff --git a/tests/components/axis/test_device.py b/tests/components/axis/test_device.py
index f6d17a3ef38ec6..d95352abe9c01b 100644
--- a/tests/components/axis/test_device.py
+++ b/tests/components/axis/test_device.py
@@ -190,8 +190,10 @@ async def test_shutdown():
async def test_get_device(hass):
"""Successful call."""
- with patch('axis.vapix.Vapix.load_params',
- return_value=mock_coro()):
+ with patch('axis.param_cgi.Params.update_brand',
+ return_value=mock_coro()), \
+ patch('axis.param_cgi.Params.update_properties',
+ return_value=mock_coro()):
assert await device.get_device(hass, DEVICE_DATA)
@@ -199,7 +201,7 @@ async def test_get_device_fails(hass):
"""Device unauthorized yields authentication required error."""
import axis
- with patch('axis.vapix.Vapix.load_params',
+ with patch('axis.param_cgi.Params.update_brand',
side_effect=axis.Unauthorized), \
pytest.raises(errors.AuthenticationRequired):
await device.get_device(hass, DEVICE_DATA)
@@ -209,7 +211,7 @@ async def test_get_device_device_unavailable(hass):
"""Device unavailable yields cannot connect error."""
import axis
- with patch('axis.vapix.Vapix.load_params',
+ with patch('axis.param_cgi.Params.update_brand',
side_effect=axis.RequestError), \
pytest.raises(errors.CannotConnect):
await device.get_device(hass, DEVICE_DATA)
@@ -219,7 +221,7 @@ async def test_get_device_unknown_error(hass):
"""Device yield unknown error."""
import axis
- with patch('axis.vapix.Vapix.load_params',
+ with patch('axis.param_cgi.Params.update_brand',
side_effect=axis.AxisException), \
pytest.raises(errors.AuthenticationRequired):
await device.get_device(hass, DEVICE_DATA)
| <!--
## Breaking Change:
-->
<!-- What is breaking and why we have to break it. Remove this section only if it was NOT a breaking change. -->
## Description:
This will result in a faster startup per entry since less network data will be retrieved.
<!--
**Related issue (if applicable):** fixes #<home-assistant issue number goes here>
**Pull request in [home-assistant.io](https://github.com/home-assistant/home-assistant.io) with documentation (if applicable):** home-assistant/home-assistant.io#<home-assistant.io PR number goes here>
-->
## Checklist:
- [x] The code change is tested and works locally.
- [x] Local tests pass with `tox`. **Your PR cannot be merged unless tests pass**
- [x] There is no commented out code in this PR.
<!--
If user exposed functionality or configuration variables are added/changed:
- [ ] Documentation added/updated in [home-assistant.io](https://github.com/home-assistant/home-assistant.io)
If the code communicates with devices, web services, or third-party tools:
- [ ] [_The manifest file_][manifest-docs] has all fields filled out correctly ([example][ex-manifest]).
- [ ] New dependencies have been added to `requirements` in the manifest ([example][ex-requir]).
- [ ] New dependencies are only imported inside functions that use them ([example][ex-import]).
- [ ] New or updated dependencies have been added to `requirements_all.txt` by running `script/gen_requirements_all.py`.
- [ ] New files were added to `.coveragerc`.
-->
If the code does not interact with devices:
- [x] Tests have been added to verify that the new code works.
[ex-manifest]: https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/mobile_app/manifest.json
[ex-requir]: https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/mobile_app/manifest.json#L5
[ex-import]: https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/keyboard/__init__.py#L23
[manifest-docs]: https://developers.home-assistant.io/docs/en/development_checklist.html#_the-manifest-file_ | https://api.github.com/repos/home-assistant/core/pulls/23122 | 2019-04-15T15:43:18Z | 2019-04-16T08:46:29Z | 2019-04-16T08:46:29Z | 2019-04-16T08:49:03Z | 3,159 | home-assistant/core | 39,206 |
chore: fix font not loaded | diff --git a/style.css b/style.css
index 49978a771a5..fe74ec41fc0 100644
--- a/style.css
+++ b/style.css
@@ -1,6 +1,6 @@
/* temporary fix to load default gradio font in frontend instead of backend */
-@import url('webui-assets/css/sourcesanspro.css');
+@import url('/webui-assets/css/sourcesanspro.css');
/* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */
| fix #15182
## Description
It's a bug relative to 2f98a35fc4508494355c01ec45f5bec725f570a6
UI is looking for `http://localhost:7863/file=/app/webui-assets/css/sourcesanspro.css` but the assets repo is mounted at `http://localhost:7863/webui-assets`. It is because `style.css` is loaded at `http://localhost:7863/file=/app/style.css` and `@import url()` with relative path.
## Screenshots/videos:
## Checklist:
- [X] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
- [X] I have performed a self-review of my own code
- [X] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
- [x] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
| https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/pulls/15183 | 2024-03-08T18:11:11Z | 2024-03-08T18:58:41Z | 2024-03-08T18:58:41Z | 2024-03-08T18:58:41Z | 122 | AUTOMATIC1111/stable-diffusion-webui | 40,369 |
updated ruff syntax | diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index c1ac2548a..dfa53be66 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -25,7 +25,7 @@ jobs:
- name: Lint with ruff
run: |
# stop the build if there are Python syntax errors or undefined names
- ruff . --format=github --select=E9,F63,F7,F82
+ ruff . -- --format=github --select=E9,F63,F7,F82
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Sherlock Site Detect Tests
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py
index 9fbc8c14a..bf6868966 100644
--- a/sherlock/sherlock.py
+++ b/sherlock/sherlock.py
@@ -368,11 +368,11 @@ def sherlock(
# Attempt to get request information
try:
http_status = r.status_code
- except:
+ except Exception:
http_status = "?"
try:
response_text = r.text.encode(r.encoding or "UTF-8")
- except:
+ except Exception:
response_text = ""
query_status = QueryStatus.UNKNOWN
| https://api.github.com/repos/sherlock-project/sherlock/pulls/1959 | 2023-12-21T19:37:24Z | 2023-12-21T19:38:20Z | 2023-12-21T19:38:20Z | 2023-12-21T19:38:21Z | 336 | sherlock-project/sherlock | 36,329 |
|
Avoid WorkerProcess._new_stdin FD sharing (#51623) | diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
index 32ea239a965b93..af831a44dd941e 100644
--- a/lib/ansible/executor/process/worker.py
+++ b/lib/ansible/executor/process/worker.py
@@ -67,25 +67,36 @@ def __init__(self, final_q, task_vars, host, task, play_context, loader, variabl
self._variable_manager = variable_manager
self._shared_loader_obj = shared_loader_obj
- if sys.stdin.isatty():
- # dupe stdin, if we have one
- self._new_stdin = sys.stdin
- try:
- fileno = sys.stdin.fileno()
- if fileno is not None:
- try:
- self._new_stdin = os.fdopen(os.dup(fileno))
- except OSError:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor, so we just rely on
- # using the one that was passed in
- pass
- except (AttributeError, ValueError):
- # couldn't get stdin's fileno, so we just carry on
- pass
- else:
- # set to /dev/null
- self._new_stdin = os.devnull
+ def _save_stdin(self):
+ self._new_stdin = os.devnull
+ try:
+ if sys.stdin.isatty() and sys.stdin.fileno() is not None:
+ try:
+ self._new_stdin = os.fdopen(os.dup(sys.stdin.fileno()))
+ except OSError:
+ # couldn't dupe stdin, most likely because it's
+ # not a valid file descriptor, so we just rely on
+ # using the one that was passed in
+ pass
+ except (AttributeError, ValueError):
+ # couldn't get stdin's fileno, so we just carry on
+ pass
+
+ def start(self):
+ '''
+ multiprocessing.Process replaces the worker's stdin with a new file
+ opened on os.devnull, but we wish to preserve it if it is connected to
+ a terminal. Therefore dup a copy prior to calling the real start(),
+ ensuring the descriptor is preserved somewhere in the new child, and
+ make sure it is closed in the parent when start() completes.
+ '''
+
+ self._save_stdin()
+ try:
+ return super(WorkerProcess, self).start()
+ finally:
+ if self._new_stdin != os.devnull:
+ self._new_stdin.close()
def run(self):
'''
|
##### SUMMARY
This avoids holding open _new_stdin within the parent process, where
subsequent WorkerProcess forks will duplicate it, producing significant
noise in the FD table of every worker.
Fix by overriding start() and moving the work to there, with a finally:
to ensure parent FD is closed after start().
Fixes #51623
##### ISSUE TYPE
- Bugfix Pull Request
##### COMPONENT NAME
lib/ansible/executor/process/worker.py
##### ADDITIONAL INFORMATION | https://api.github.com/repos/ansible/ansible/pulls/51624 | 2019-02-01T18:19:56Z | 2019-03-26T14:41:46Z | 2019-03-26T14:41:46Z | 2019-07-25T16:40:46Z | 600 | ansible/ansible | 49,218 |
Update multimodal anthropic docs | diff --git a/docs/module_guides/models/multi_modal.md b/docs/module_guides/models/multi_modal.md
index 16b09e3bb4df8..5e92d596423f3 100644
--- a/docs/module_guides/models/multi_modal.md
+++ b/docs/module_guides/models/multi_modal.md
@@ -148,7 +148,7 @@ Below table lists some vector stores supporting Multi-Modal use cases. Our Llama
## Multi-Modal LLM Modules
-We support integrations with GPT4-V, CLIP (OpenAI), BLIP (Salesforce), and Replicate (LLaVA, Fuyu-8B, MiniGPT-4, CogVLM), and more.
+We support integrations with GPT4-V, Anthropic (Opus, Sonnet), Gemini (Google), CLIP (OpenAI), BLIP (Salesforce), and Replicate (LLaVA, Fuyu-8B, MiniGPT-4, CogVLM), and more.
```{toctree}
---
@@ -160,6 +160,7 @@ maxdepth: 1
/examples/multi_modal/multi_modal_pydantic.ipynb
/examples/multi_modal/gpt4v_experiments_cot.ipynb
/examples/multi_modal/llava_multi_modal_tesla_10q.ipynb
+/examples/multi_modal/anthropic_multi_modal.ipynb
```
## Multi-Modal Retrieval Augmented Generation
| # Description
PR to update anthropic multi modal docs.
Fixes # (issue)
## Type of Change
Please delete options that are not relevant.
- [x] This change requires a documentation update
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] Added new unit/integration tests
- [ ] Added new notebook (that tests end-to-end)
- [ ] I stared at the code and made sure it makes sense
# Suggested Checklist:
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] I have added Google Colab support for the newly added notebooks.
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] I ran `make format; make lint` to appease the lint gods
| https://api.github.com/repos/run-llama/llama_index/pulls/11643 | 2024-03-05T10:32:11Z | 2024-03-05T14:48:11Z | 2024-03-05T14:48:11Z | 2024-03-05T14:48:11Z | 325 | run-llama/llama_index | 6,924 |
Remove note about Python 3.10 support availability | diff --git a/README.md b/README.md
index 3be7e3f527801..ab34326eea364 100644
--- a/README.md
+++ b/README.md
@@ -104,9 +104,6 @@ MariaDB is not tested/recommended.
**Note**: SQLite is used in Airflow tests. Do not use it in production. We recommend
using the latest stable version of SQLite for local development.
-**Note**: Support for Python v3.10 will be available from Airflow 2.3.0. The `main` (development) branch
-already supports Python 3.10.
-
**Note**: Airflow currently can be run on POSIX-compliant Operating Systems. For development it is regularly
tested on fairly modern Linux Distros and recent versions of MacOS.
On Windows you can run it via WSL2 (Windows Subsystem for Linux 2) or via Linux Containers.
| Airflow 2.3+ is released with Python 3.10 official support already.
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code changes, an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in a newsfragment file, named `{pr_number}.significant.rst`, in [newsfragments](https://github.com/apache/airflow/tree/main/newsfragments).
| https://api.github.com/repos/apache/airflow/pulls/24861 | 2022-07-06T07:12:32Z | 2022-07-06T10:59:29Z | 2022-07-06T10:59:29Z | 2022-07-06T15:17:04Z | 198 | apache/airflow | 14,177 |
[extractor/tempo] Add IVXPlayer extractor | diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py
index 2fe15f6d286..a0534d2b9be 100644
--- a/yt_dlp/extractor/_extractors.py
+++ b/yt_dlp/extractor/_extractors.py
@@ -1817,7 +1817,7 @@
)
from .teletask import TeleTaskIE
from .telewebion import TelewebionIE
-from .tempo import TempoIE
+from .tempo import TempoIE, IVXPlayerIE
from .tencent import (
IflixEpisodeIE,
IflixSeriesIE,
diff --git a/yt_dlp/extractor/tempo.py b/yt_dlp/extractor/tempo.py
index 1cfb956e50b..9318d6f9ad8 100644
--- a/yt_dlp/extractor/tempo.py
+++ b/yt_dlp/extractor/tempo.py
@@ -1,5 +1,81 @@
+import re
+
from .common import InfoExtractor
-from ..utils import int_or_none, parse_iso8601, str_or_none, traverse_obj
+from ..utils import (
+ int_or_none,
+ parse_iso8601,
+ traverse_obj,
+ try_call
+)
+
+
+class IVXPlayerIE(InfoExtractor):
+ _VALID_URL = r'ivxplayer:(?P<video_id>\d+):(?P<player_key>\w+)'
+ _TESTS = [{
+ 'url': 'ivxplayer:2366065:4a89dfe6bc8f002596b1dfbd600730b1',
+ 'info_dict': {
+ 'id': '2366065',
+ 'ext': 'mp4',
+ 'duration': 112,
+ 'upload_date': '20221204',
+ 'title': 'Film Indonesia di Disney Content Showcase Asia Pacific 2022',
+ 'timestamp': 1670151746,
+ 'thumbnail': 'https://ivx-image.ivideosmart.com/serve/image/video/2366065?width=300'
+ }
+ }]
+ _WEBPAGE_TESTS = [{
+ 'url': 'https://www.cantika.com/video/31737/film-indonesia-di-disney-content-showcase-asia-pacific-2022',
+ 'info_dict': {
+ 'id': '2374200',
+ 'ext': 'mp4',
+ 'duration': 110,
+ 'title': 'Serial Indonesia di Disney Content Showcase Asia Pacific 2022',
+ 'timestamp': 1670639416,
+ 'upload_date': '20221210',
+ 'thumbnail': 'https://ivx-image.ivideosmart.com/serve/image/video/2374200?width=300'
+ }
+ }, {
+ 'url': 'https://www.gooto.com/video/11437/wuling-suv-ramai-dikunjungi-di-giias-2018',
+ 'info_dict': {
+ 'id': '892109',
+ 'ext': 'mp4',
+ 'title': 'Wuling SUV Ramai Dikunjungi di GIIAS 2018',
+ 'upload_date': '20180811',
+ 'description': 'md5:6d901483d0aacc664aecb4489719aafa',
+ 'duration': 75,
+ 'timestamp': 1534011263,
+ 'thumbnail': 'https://ivx-image.ivideosmart.com/serve/image/video/892109?width=300'
+ }
+ }]
+
+ @classmethod
+ def _extract_embed_urls(cls, url, webpage):
+ # more info at https://player.ivideosmart.com/ivsplayer/v4/dist/js/loader.js
+ mobj = re.search(
+ r'<ivs-player\s*[^>]+data-ivs-key\s*=\s*"(?P<player_key>[\w]+)\s*[^>]+\bdata-ivs-vid="(?P<video_id>[\w-]+)',
+ webpage)
+ if mobj:
+ yield f'ivxplayer:{mobj.group("video_id")}:{mobj.group("player_key")}'
+ raise cls.StopExtraction()
+
+ def _real_extract(self, url):
+ video_id, player_key = self._match_valid_url(url).group('video_id', 'player_key')
+ json_data = self._download_json(
+ f'https://ivxplayer.ivideosmart.com/prod/video/{video_id}?key={player_key}', video_id)
+
+ formats, subtitles = self._extract_m3u8_formats_and_subtitles(
+ json_data['player']['video_url'], video_id)
+
+ return {
+ 'id': str(json_data['ivx']['id']),
+ 'title': traverse_obj(json_data, ('ivx', 'name')),
+ 'description': traverse_obj(json_data, ('ivx', 'description')),
+ 'duration': int_or_none(traverse_obj(json_data, ('ivx', 'duration'))),
+ 'timestamp': parse_iso8601(traverse_obj(json_data, ('ivx', 'published_at'))),
+ 'formats': formats,
+ 'subtitles': subtitles,
+ 'thumbnail': traverse_obj(json_data, ('ivx', 'thumbnail_url'))
+ }
class TempoIE(InfoExtractor):
@@ -7,14 +83,14 @@ class TempoIE(InfoExtractor):
_TESTS = [{
'url': 'https://video.tempo.co/read/30058/anies-baswedan-ajukan-banding-putusan-ptun-batalkan-ump-dki',
'info_dict': {
- 'id': '2144438',
+ 'id': '2144275',
+ 'display_id': 'anies-baswedan-ajukan-banding-putusan-ptun-batalkan-ump-dki',
'ext': 'mp4',
'title': 'Anies Baswedan Ajukan Banding Putusan PTUN Batalkan UMP DKI',
- 'display_id': 'anies-baswedan-ajukan-banding-putusan-ptun-batalkan-ump-dki',
- 'duration': 84,
+ 'duration': 85,
'description': 'md5:a6822b7c4c874fa7e5bd63e96a387b66',
'thumbnail': 'https://statik.tempo.co/data/2022/07/27/id_1128287/1128287_720.jpg',
- 'timestamp': 1658911277,
+ 'timestamp': 1658907970,
'upload_date': '20220727',
'tags': ['Anies Baswedan', ' PTUN', ' PTUN | Pengadilan Tata Usaha Negara', ' PTUN Batalkan UMP DKI', ' UMP DKI'],
}
@@ -24,30 +100,15 @@ def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
- player_key, widget_id = self._search_regex(
- r'<ivs-player\s*[^>]+data-ivs-key\s*=\s*"(?P<player_key>[\w]+)[^>]+\bdata-ivs-wid="(?P<widget_id>[\w-]+)',
- webpage, 'player_key, widget_id', group=('player_key', 'widget_id'))
+ _, video_id, player_key = next(IVXPlayerIE._extract_embed_urls(url, webpage)).split(':')
json_ld_data = self._search_json_ld(webpage, display_id)
- json_data = self._download_json(
- f'https://ivxplayer.ivideosmart.com/prod/widget/{widget_id}',
- display_id, query={'key': player_key})
- formats, subtitles = self._extract_m3u8_formats_and_subtitles(
- json_data['player']['video_url'], display_id, ext='mp4')
-
- return {
- 'id': str(json_data['ivx']['id']),
- 'display_id': display_id,
- 'formats': formats,
- 'subtitles': subtitles,
- 'title': (self._html_search_meta('twitter:title', webpage) or self._og_search_title(webpage)
- or traverse_obj(json_data, ('ivx', 'name'))),
- 'duration': int_or_none(traverse_obj(json_data, ('ivx', 'duration'))),
- 'thumbnail': (self._html_search_meta('twitter:image:src', webpage) or self._og_search_thumbnail(webpage)
- or traverse_obj(json_data, ('ivx', 'thumbnail_url'))),
- 'description': (json_ld_data.get('description') or self._html_search_meta(['description', 'twitter:description'], webpage)
- or self._og_search_description(webpage)),
- 'timestamp': parse_iso8601(traverse_obj(json_data, ('ivx', 'created_at'))),
- 'tags': str_or_none(self._html_search_meta('keywords', webpage), '').split(','),
- }
+ return self.url_result(
+ f'ivxplayer:{video_id}:{player_key}', display_id=display_id,
+ thumbnail=self._html_search_meta('twitter:image:src', webpage) or self._og_search_thumbnail(webpage),
+ tags=try_call(lambda: self._html_search_meta('keywords', webpage).split(',')),
+ description=(json_ld_data.get('description')
+ or self._html_search_meta(('description', 'twitter:description'), webpage)
+ or self._og_search_description(webpage)),
+ url_transparent=True)
| **IMPORTANT**: PRs without the template will be CLOSED
### Description of your *pull request* and other information
</details>
<!--
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
-->
This PR change code in tempo.py to be more general, as tempo use ivxplayer as backend. Ivxplayer also used by other website like cantika.com and gotoo.com. This change may change current tempo id as the old id seems inconsistent
Fixes #
<details open><summary>Template</summary> <!-- OPEN is intentional -->
<!--
# PLEASE FOLLOW THE GUIDE BELOW
- You will be asked some questions, please read them **carefully** and answer honestly
- Put an `x` into all the boxes `[ ]` relevant to your *pull request* (like [x])
- Use *Preview* tab to see how your *pull request* will actually look like
-->
### Before submitting a *pull request* make sure you have:
- [x] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
- [x] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
- [x] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check one of the following options:
- [x] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
- [ ] I am not the original author of this code but it is in public domain or released under [Unlicense](http://unlicense.org/) (provide reliable evidence)
### What is the purpose of your *pull request*?
- [x] Fix or improvement to an extractor (Make sure to add/update tests)
- [x] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
| https://api.github.com/repos/yt-dlp/yt-dlp/pulls/5837 | 2022-12-20T03:24:37Z | 2023-02-17T09:16:47Z | 2023-02-17T09:16:47Z | 2023-02-17T09:16:47Z | 2,170 | yt-dlp/yt-dlp | 7,490 |
Update aiohttp to 2.3.1 | diff --git a/homeassistant/components/http/__init__.py b/homeassistant/components/http/__init__.py
index c9de284067fdf2..0a7bd3b7b79e35 100644
--- a/homeassistant/components/http/__init__.py
+++ b/homeassistant/components/http/__init__.py
@@ -181,8 +181,6 @@ def __init__(self, hass, development, api_password, ssl_certificate,
use_x_forwarded_for, trusted_networks,
login_threshold, is_ban_enabled):
"""Initialize the WSGI Home Assistant server."""
- import aiohttp_cors
-
middlewares = [auth_middleware, staticresource_middleware]
if is_ban_enabled:
@@ -207,6 +205,8 @@ def __init__(self, hass, development, api_password, ssl_certificate,
self.server = None
if cors_origins:
+ import aiohttp_cors
+
self.cors = aiohttp_cors.setup(self.app, defaults={
host: aiohttp_cors.ResourceOptions(
allow_headers=ALLOWED_CORS_HEADERS,
@@ -336,7 +336,9 @@ def start(self):
_LOGGER.error("Failed to create HTTP server at port %d: %s",
self.server_port, error)
- self.app._frozen = False # pylint: disable=protected-access
+ # pylint: disable=protected-access
+ self.app._middlewares = tuple(self.app._prepare_middleware())
+ self.app._frozen = False
@asyncio.coroutine
def stop(self):
@@ -346,7 +348,7 @@ def stop(self):
yield from self.server.wait_closed()
yield from self.app.shutdown()
if self._handler:
- yield from self._handler.finish_connections(60.0)
+ yield from self._handler.shutdown(10)
yield from self.app.cleanup()
diff --git a/homeassistant/components/http/auth.py b/homeassistant/components/http/auth.py
index 4b971c883d3dd6..ce5bfca3ac1851 100644
--- a/homeassistant/components/http/auth.py
+++ b/homeassistant/components/http/auth.py
@@ -5,6 +5,7 @@
import logging
from aiohttp import hdrs
+from aiohttp.web import middleware
from homeassistant.const import HTTP_HEADER_HA_AUTH
from .util import get_real_ip
@@ -15,47 +16,37 @@
_LOGGER = logging.getLogger(__name__)
+@middleware
@asyncio.coroutine
-def auth_middleware(app, handler):
+def auth_middleware(request, handler):
"""Authenticate as middleware."""
# If no password set, just always set authenticated=True
- if app['hass'].http.api_password is None:
- @asyncio.coroutine
- def no_auth_middleware_handler(request):
- """Auth middleware to approve all requests."""
- request[KEY_AUTHENTICATED] = True
- return handler(request)
-
- return no_auth_middleware_handler
-
- @asyncio.coroutine
- def auth_middleware_handler(request):
- """Auth middleware to check authentication."""
- # Auth code verbose on purpose
- authenticated = False
-
- if (HTTP_HEADER_HA_AUTH in request.headers and
- validate_password(
- request, request.headers[HTTP_HEADER_HA_AUTH])):
- # A valid auth header has been set
- authenticated = True
+ if request.app['hass'].http.api_password is None:
+ request[KEY_AUTHENTICATED] = True
+ return handler(request)
- elif (DATA_API_PASSWORD in request.query and
- validate_password(request, request.query[DATA_API_PASSWORD])):
- authenticated = True
+ # Check authentication
+ authenticated = False
- elif (hdrs.AUTHORIZATION in request.headers and
- validate_authorization_header(request)):
- authenticated = True
+ if (HTTP_HEADER_HA_AUTH in request.headers and
+ validate_password(
+ request, request.headers[HTTP_HEADER_HA_AUTH])):
+ # A valid auth header has been set
+ authenticated = True
- elif is_trusted_ip(request):
- authenticated = True
+ elif (DATA_API_PASSWORD in request.query and
+ validate_password(request, request.query[DATA_API_PASSWORD])):
+ authenticated = True
- request[KEY_AUTHENTICATED] = authenticated
+ elif (hdrs.AUTHORIZATION in request.headers and
+ validate_authorization_header(request)):
+ authenticated = True
- return handler(request)
+ elif is_trusted_ip(request):
+ authenticated = True
- return auth_middleware_handler
+ request[KEY_AUTHENTICATED] = authenticated
+ return handler(request)
def is_trusted_ip(request):
diff --git a/homeassistant/components/http/ban.py b/homeassistant/components/http/ban.py
index aa01ccde8d763c..f636ad80c36e67 100644
--- a/homeassistant/components/http/ban.py
+++ b/homeassistant/components/http/ban.py
@@ -6,6 +6,7 @@
import logging
import os
+from aiohttp.web import middleware
from aiohttp.web_exceptions import HTTPForbidden, HTTPUnauthorized
import voluptuous as vol
@@ -32,35 +33,32 @@
})
+@middleware
@asyncio.coroutine
-def ban_middleware(app, handler):
+def ban_middleware(request, handler):
"""IP Ban middleware."""
- if not app[KEY_BANS_ENABLED]:
- return handler
+ if not request.app[KEY_BANS_ENABLED]:
+ return (yield from handler(request))
- if KEY_BANNED_IPS not in app:
- hass = app['hass']
- app[KEY_BANNED_IPS] = yield from hass.async_add_job(
+ if KEY_BANNED_IPS not in request.app:
+ hass = request.app['hass']
+ request.app[KEY_BANNED_IPS] = yield from hass.async_add_job(
load_ip_bans_config, hass.config.path(IP_BANS_FILE))
- @asyncio.coroutine
- def ban_middleware_handler(request):
- """Verify if IP is not banned."""
- ip_address_ = get_real_ip(request)
-
- is_banned = any(ip_ban.ip_address == ip_address_
- for ip_ban in request.app[KEY_BANNED_IPS])
+ # Verify if IP is not banned
+ ip_address_ = get_real_ip(request)
- if is_banned:
- raise HTTPForbidden()
+ is_banned = any(ip_ban.ip_address == ip_address_
+ for ip_ban in request.app[KEY_BANNED_IPS])
- try:
- return (yield from handler(request))
- except HTTPUnauthorized:
- yield from process_wrong_login(request)
- raise
+ if is_banned:
+ raise HTTPForbidden()
- return ban_middleware_handler
+ try:
+ return (yield from handler(request))
+ except HTTPUnauthorized:
+ yield from process_wrong_login(request)
+ raise
@asyncio.coroutine
diff --git a/homeassistant/components/http/static.py b/homeassistant/components/http/static.py
index 21e955fc9686ef..7ff8b2420426a6 100644
--- a/homeassistant/components/http/static.py
+++ b/homeassistant/components/http/static.py
@@ -3,7 +3,7 @@
import re
from aiohttp import hdrs
-from aiohttp.web import FileResponse
+from aiohttp.web import FileResponse, middleware
from aiohttp.web_exceptions import HTTPNotFound
from aiohttp.web_urldispatcher import StaticResource
from yarl import unquote
@@ -64,21 +64,17 @@ def sendfile(request, fobj, count):
self._sendfile = sendfile
+@middleware
@asyncio.coroutine
-def staticresource_middleware(app, handler):
+def staticresource_middleware(request, handler):
"""Middleware to strip out fingerprint from fingerprinted assets."""
- @asyncio.coroutine
- def static_middleware_handler(request):
- """Strip out fingerprints from resource names."""
- if not request.path.startswith('/static/'):
- return handler(request)
-
- fingerprinted = _FINGERPRINT.match(request.match_info['filename'])
+ if not request.path.startswith('/static/'):
+ return handler(request)
- if fingerprinted:
- request.match_info['filename'] = \
- '{}.{}'.format(*fingerprinted.groups())
+ fingerprinted = _FINGERPRINT.match(request.match_info['filename'])
- return handler(request)
+ if fingerprinted:
+ request.match_info['filename'] = \
+ '{}.{}'.format(*fingerprinted.groups())
- return static_middleware_handler
+ return handler(request)
diff --git a/homeassistant/package_constraints.txt b/homeassistant/package_constraints.txt
index 7da871606845ab..00df81290e5b14 100644
--- a/homeassistant/package_constraints.txt
+++ b/homeassistant/package_constraints.txt
@@ -5,7 +5,7 @@ pip>=8.0.3
jinja2>=2.9.6
voluptuous==0.10.5
typing>=3,<4
-aiohttp==2.2.5
+aiohttp==2.3.1
async_timeout==2.0.0
chardet==3.0.4
astral==1.4
diff --git a/requirements_all.txt b/requirements_all.txt
index 3d899ecbee7aee..2f0e7f8e1adeeb 100644
--- a/requirements_all.txt
+++ b/requirements_all.txt
@@ -6,7 +6,7 @@ pip>=8.0.3
jinja2>=2.9.6
voluptuous==0.10.5
typing>=3,<4
-aiohttp==2.2.5
+aiohttp==2.3.1
async_timeout==2.0.0
chardet==3.0.4
astral==1.4
diff --git a/setup.py b/setup.py
index cd7043650ad245..74ce4c4d2e3bd3 100755
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
'jinja2>=2.9.6',
'voluptuous==0.10.5',
'typing>=3,<4',
- 'aiohttp==2.2.5',
+ 'aiohttp==2.3.1',
'async_timeout==2.0.0',
'chardet==3.0.4',
'astral==1.4',
diff --git a/tests/components/http/test_init.py b/tests/components/http/test_init.py
index 4428b5043fdd4f..95c192b47e2232 100644
--- a/tests/components/http/test_init.py
+++ b/tests/components/http/test_init.py
@@ -139,22 +139,10 @@ def test_registering_view_while_running(hass, test_client):
}
)
- yield from setup.async_setup_component(hass, 'api')
-
yield from hass.async_start()
-
- yield from hass.async_block_till_done()
-
+ # This raises a RuntimeError if app is frozen
hass.http.register_view(TestView)
- client = yield from test_client(hass.http.app)
-
- resp = yield from client.get('/hello')
- assert resp.status == 200
-
- text = yield from resp.text()
- assert text == 'hello'
-
@asyncio.coroutine
def test_api_base_url_with_domain(hass):
| ## Description:
Update aiohttp to version 2.3.1
http://aiohttp.readthedocs.io/en/stable/changes.html
If the code does not interact with devices:
- [ ] Local tests with `tox` run successfully. **Your PR cannot be merged unless tests pass**
- [ ] Tests have been added to verify that the new code works.
[ex-requir]: https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/keyboard.py#L14
[ex-import]: https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/keyboard.py#L54
| https://api.github.com/repos/home-assistant/core/pulls/10139 | 2017-10-25T15:21:21Z | 2017-11-06T02:42:32Z | 2017-11-06T02:42:32Z | 2018-03-02T23:42:16Z | 2,574 | home-assistant/core | 38,852 |
fixed typos for CAPTCHA | diff --git a/removed_sites.md b/removed_sites.md
index bc91236c2..3be680be5 100644
--- a/removed_sites.md
+++ b/removed_sites.md
@@ -700,7 +700,7 @@ If an alternative way to check for usernames is found then it will added.
```
## YandexCollection
-As of 2020-08-11, YandexCollection presents us with a rechapta which prevents us from checking for usernames
+As of 2020-08-11, YandexCollection presents us with a recaptcha which prevents us from checking for usernames
```json
"YandexCollection": {
"errorType": "status_code",
@@ -1024,7 +1024,7 @@ As of 2021-06-27, GDProfiles takes way too long to respond. Must be an issue on
## AllTrails
-As of 2021-06-27, AllTrails has a chapta which prevents us from checking for usernames on the site.
+As of 2021-06-27, AllTrails has a captcha which prevents us from checking for usernames on the site.
```json
"AllTrails": {
"errorMsg": "class=\"home index\"",
@@ -1082,7 +1082,7 @@ As of 2021-06-27, Kali Community requires us to be logged in order to check if a
## NameMC
-As of 2021-06-27, NameMC uses chapta through CloudFlare which prevents us from checking if usernames exists on the site.
+As of 2021-06-27, NameMC uses captcha through CloudFlare which prevents us from checking if usernames exists on the site.
```json
"NameMC (Minecraft.net skins)": {
@@ -1097,7 +1097,7 @@ As of 2021-06-27, NameMC uses chapta through CloudFlare which prevents us from c
## SteamID
-As of 2021-06-27, Steam uses chapta through CloudFlare which prevents us from checking if usernames exists on the site.
+As of 2021-06-27, Steam uses captcha through CloudFlare which prevents us from checking if usernames exists on the site.
```json
"Steamid": {
"errorMsg": "<link rel=\"canonical\" href=\"https://steamid.uk\" />",
| https://api.github.com/repos/sherlock-project/sherlock/pulls/1587 | 2022-10-26T08:22:16Z | 2023-02-04T18:05:29Z | 2023-02-04T18:05:29Z | 2023-02-04T18:05:29Z | 522 | sherlock-project/sherlock | 36,482 |
|
๐ Fix JSON Schema accepting bools as valid JSON Schemas, e.g. `additionalProperties: false` | diff --git a/fastapi/openapi/models.py b/fastapi/openapi/models.py
index 7420d3b55a097..a2ea536073301 100644
--- a/fastapi/openapi/models.py
+++ b/fastapi/openapi/models.py
@@ -114,27 +114,30 @@ class Schema(BaseModel):
dynamicAnchor: Optional[str] = Field(default=None, alias="$dynamicAnchor")
ref: Optional[str] = Field(default=None, alias="$ref")
dynamicRef: Optional[str] = Field(default=None, alias="$dynamicRef")
- defs: Optional[Dict[str, "Schema"]] = Field(default=None, alias="$defs")
+ defs: Optional[Dict[str, "SchemaOrBool"]] = Field(default=None, alias="$defs")
comment: Optional[str] = Field(default=None, alias="$comment")
# Ref: JSON Schema 2020-12: https://json-schema.org/draft/2020-12/json-schema-core.html#name-a-vocabulary-for-applying-s
# A Vocabulary for Applying Subschemas
- allOf: Optional[List["Schema"]] = None
- anyOf: Optional[List["Schema"]] = None
- oneOf: Optional[List["Schema"]] = None
- not_: Optional["Schema"] = Field(default=None, alias="not")
- if_: Optional["Schema"] = Field(default=None, alias="if")
- then: Optional["Schema"] = None
- else_: Optional["Schema"] = Field(default=None, alias="else")
- dependentSchemas: Optional[Dict[str, "Schema"]] = None
- prefixItems: Optional[List["Schema"]] = None
- items: Optional[Union["Schema", List["Schema"]]] = None
- contains: Optional["Schema"] = None
- properties: Optional[Dict[str, "Schema"]] = None
- patternProperties: Optional[Dict[str, "Schema"]] = None
- additionalProperties: Optional["Schema"] = None
- propertyNames: Optional["Schema"] = None
- unevaluatedItems: Optional["Schema"] = None
- unevaluatedProperties: Optional["Schema"] = None
+ allOf: Optional[List["SchemaOrBool"]] = None
+ anyOf: Optional[List["SchemaOrBool"]] = None
+ oneOf: Optional[List["SchemaOrBool"]] = None
+ not_: Optional["SchemaOrBool"] = Field(default=None, alias="not")
+ if_: Optional["SchemaOrBool"] = Field(default=None, alias="if")
+ then: Optional["SchemaOrBool"] = None
+ else_: Optional["SchemaOrBool"] = Field(default=None, alias="else")
+ dependentSchemas: Optional[Dict[str, "SchemaOrBool"]] = None
+ prefixItems: Optional[List["SchemaOrBool"]] = None
+ # TODO: uncomment and remove below when deprecating Pydantic v1
+ # It generales a list of schemas for tuples, before prefixItems was available
+ # items: Optional["SchemaOrBool"] = None
+ items: Optional[Union["SchemaOrBool", List["SchemaOrBool"]]] = None
+ contains: Optional["SchemaOrBool"] = None
+ properties: Optional[Dict[str, "SchemaOrBool"]] = None
+ patternProperties: Optional[Dict[str, "SchemaOrBool"]] = None
+ additionalProperties: Optional["SchemaOrBool"] = None
+ propertyNames: Optional["SchemaOrBool"] = None
+ unevaluatedItems: Optional["SchemaOrBool"] = None
+ unevaluatedProperties: Optional["SchemaOrBool"] = None
# Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-structural
# A Vocabulary for Structural Validation
type: Optional[str] = None
@@ -164,7 +167,7 @@ class Schema(BaseModel):
# A Vocabulary for the Contents of String-Encoded Data
contentEncoding: Optional[str] = None
contentMediaType: Optional[str] = None
- contentSchema: Optional["Schema"] = None
+ contentSchema: Optional["SchemaOrBool"] = None
# Ref: JSON Schema Validation 2020-12: https://json-schema.org/draft/2020-12/json-schema-validation.html#name-a-vocabulary-for-basic-meta
# A Vocabulary for Basic Meta-Data Annotations
title: Optional[str] = None
@@ -191,6 +194,11 @@ class Config:
extra: str = "allow"
+# Ref: https://json-schema.org/draft/2020-12/json-schema-core.html#name-json-schema-documents
+# A JSON Schema MUST be an object or a boolean.
+SchemaOrBool = Union[Schema, bool]
+
+
class Example(BaseModel):
summary: Optional[str] = None
description: Optional[str] = None
diff --git a/tests/test_additional_properties_bool.py b/tests/test_additional_properties_bool.py
new file mode 100644
index 0000000000000..e35c263420627
--- /dev/null
+++ b/tests/test_additional_properties_bool.py
@@ -0,0 +1,115 @@
+from typing import Union
+
+from fastapi import FastAPI
+from fastapi.testclient import TestClient
+from pydantic import BaseModel
+
+
+class FooBaseModel(BaseModel):
+ class Config:
+ extra = "forbid"
+
+
+class Foo(FooBaseModel):
+ pass
+
+
+app = FastAPI()
+
+
+@app.post("/")
+async def post(
+ foo: Union[Foo, None] = None,
+):
+ return foo
+
+
+client = TestClient(app)
+
+
+def test_call_invalid():
+ response = client.post("/", json={"foo": {"bar": "baz"}})
+ assert response.status_code == 422
+
+
+def test_call_valid():
+ response = client.post("/", json={})
+ assert response.status_code == 200
+ assert response.json() == {}
+
+
+def test_openapi_schema():
+ response = client.get("/openapi.json")
+ assert response.status_code == 200, response.text
+ assert response.json() == {
+ "openapi": "3.1.0",
+ "info": {"title": "FastAPI", "version": "0.1.0"},
+ "paths": {
+ "/": {
+ "post": {
+ "summary": "Post",
+ "operationId": "post__post",
+ "requestBody": {
+ "content": {
+ "application/json": {
+ "schema": {"$ref": "#/components/schemas/Foo"}
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "Successful Response",
+ "content": {"application/json": {"schema": {}}},
+ },
+ "422": {
+ "description": "Validation Error",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/HTTPValidationError"
+ }
+ }
+ },
+ },
+ },
+ }
+ }
+ },
+ "components": {
+ "schemas": {
+ "Foo": {
+ "properties": {},
+ "additionalProperties": False,
+ "type": "object",
+ "title": "Foo",
+ },
+ "HTTPValidationError": {
+ "properties": {
+ "detail": {
+ "items": {"$ref": "#/components/schemas/ValidationError"},
+ "type": "array",
+ "title": "Detail",
+ }
+ },
+ "type": "object",
+ "title": "HTTPValidationError",
+ },
+ "ValidationError": {
+ "properties": {
+ "loc": {
+ "items": {
+ "anyOf": [{"type": "string"}, {"type": "integer"}]
+ },
+ "type": "array",
+ "title": "Location",
+ },
+ "msg": {"type": "string", "title": "Message"},
+ "type": {"type": "string", "title": "Error Type"},
+ },
+ "type": "object",
+ "required": ["loc", "msg", "type"],
+ "title": "ValidationError",
+ },
+ }
+ },
+ }
| ๐ Fix JSON Schema accepting bools as valid JSON Schemas, e.g. `additionalProperties: false`
This solves: https://github.com/tiangolo/fastapi/issues/9780
---
A "valid JSON Schema" includes a `bool` (i.e. `true` and `false`).
`additionalProperties` doesn't have to be a JSON object, it can be `false`, to mean no additional properties are allowed.
When I upgraded the JSON Schema models to include the new types and fields for the new JSON Schema 2020-12 I removed `bool` as a valid JSON Schema.
I reviewed all the spec again, this updates all the other fields that would allow `bool` as a valid value. | https://api.github.com/repos/tiangolo/fastapi/pulls/9781 | 2023-07-02T15:34:24Z | 2023-07-02T15:58:23Z | 2023-07-02T15:58:23Z | 2023-07-02T15:58:25Z | 1,882 | tiangolo/fastapi | 23,605 |
fix prompts | diff --git a/metagpt/prompts/generate_skill.md b/metagpt/prompts/generate_skill.md
index 74948cd15..e96f8181a 100644
--- a/metagpt/prompts/generate_skill.md
+++ b/metagpt/prompts/generate_skill.md
@@ -10,7 +10,7 @@ For instance, if the context is:
from typing import Optional
from abc import ABC
from metagpt.llm import LLM # Large language model, similar to GPT
-n
+
class Action(ABC):
def __init__(self, name='', context=None, llm: LLM = LLM()):
self.name = name
| https://api.github.com/repos/geekan/MetaGPT/pulls/545 | 2023-12-11T02:42:50Z | 2023-12-12T12:46:22Z | 2023-12-12T12:46:22Z | 2023-12-12T12:46:23Z | 148 | geekan/MetaGPT | 17,016 |
|
Update peft requirement from ==0.6.* to ==0.7.* | diff --git a/requirements.txt b/requirements.txt
index f4cbaeacb1..e15fb25c58 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_amd.txt b/requirements_amd.txt
index 892cae7ce6..e7648b685f 100644
--- a/requirements_amd.txt
+++ b/requirements_amd.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_amd_noavx2.txt b/requirements_amd_noavx2.txt
index 228c51724f..4747ca8772 100644
--- a/requirements_amd_noavx2.txt
+++ b/requirements_amd_noavx2.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_apple_intel.txt b/requirements_apple_intel.txt
index 377373f117..9ee4ec6e61 100644
--- a/requirements_apple_intel.txt
+++ b/requirements_apple_intel.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_apple_silicon.txt b/requirements_apple_silicon.txt
index e280108ba5..08297db448 100644
--- a/requirements_apple_silicon.txt
+++ b/requirements_apple_silicon.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_cpu_only.txt b/requirements_cpu_only.txt
index 7e83c66205..28ea2ca39f 100644
--- a/requirements_cpu_only.txt
+++ b/requirements_cpu_only.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_cpu_only_noavx2.txt b/requirements_cpu_only_noavx2.txt
index b31eec1435..45ea916e94 100644
--- a/requirements_cpu_only_noavx2.txt
+++ b/requirements_cpu_only_noavx2.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_noavx2.txt b/requirements_noavx2.txt
index c00d9bd169..6269fd2115 100644
--- a/requirements_noavx2.txt
+++ b/requirements_noavx2.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
diff --git a/requirements_nowheels.txt b/requirements_nowheels.txt
index b285e4c0ae..30ad3f5dbe 100644
--- a/requirements_nowheels.txt
+++ b/requirements_nowheels.txt
@@ -8,7 +8,7 @@ markdown
numpy==1.24.*
optimum==1.14.0
pandas
-peft==0.6.*
+peft==0.7.*
Pillow>=9.5.0
pyyaml
requests
| Updates the requirements on [peft](https://github.com/huggingface/peft) to permit the latest version.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/huggingface/peft/releases">peft's releases</a>.</em></p>
<blockquote>
<h2>v0.7.0: Orthogonal Fine-Tuning, Megatron support, better initialization, safetensors, and more</h2>
<h1>Highlights</h1>
<ul>
<li>Orthogonal Fine-Tuning (OFT): A new adapter that is similar to LoRA and shows a lot of promise for Stable Diffusion, especially with regard to controllability and compositionality. <a href="https://github.com/huggingface/peft/blob/main/examples/oft_dreambooth/train_dreambooth.py">Give it a try</a>! By <a href="https://github.com/okotaku"><code>@โokotaku</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1160">huggingface/peft#1160</a></li>
<li>Support for parallel linear LoRA layers using Megatron. This should lead to a speed up when using LoRA with Megatron. By <a href="https://github.com/zhangsheng377"><code>@โzhangsheng377</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1092">huggingface/peft#1092</a></li>
<li>LoftQ provides a new method to <a href="https://huggingface.co/docs/peft/main/en/conceptual_guides/lora#initialization-options">initialize LoRA layers</a> of quantized models. The big advantage is that the LoRA layer weights are chosen in a way to minimize the quantization error, as described here: <a href="https://arxiv.org/abs/2310.08659">https://arxiv.org/abs/2310.08659</a>. By <a href="https://github.com/yxli2123"><code>@โyxli2123</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1150">huggingface/peft#1150</a>.</li>
</ul>
<h2>Other notable additions</h2>
<ul>
<li>It is now possible to choose which adapters are merged when calling <code>merge</code> (<a href="https://redirect.github.com/huggingface/peft/issues/1132">#1132</a>)</li>
<li>IAยณ now supports adapter deletion, by <a href="https://github.com/alexrs"><code>@โalexrs</code></a> (<a href="https://redirect.github.com/huggingface/peft/issues/1153">#1153</a>)</li>
<li>A new initialization method for LoRA has been added, <code>"gaussian"</code> (<a href="https://redirect.github.com/huggingface/peft/issues/1189">#1189</a>)</li>
<li>When training PEFT models with new tokens being added to the embedding layers, the embedding layer is now saved by default (<a href="https://redirect.github.com/huggingface/peft/issues/1147">#1147</a>)</li>
<li>It is now possible to mix certain adapters like LoRA and LoKr in the same model, see <a href="https://huggingface.co/docs/peft/main/en/developer_guides/mixed_models">the docs</a> (<a href="https://redirect.github.com/huggingface/peft/issues/1163">#1163</a>)</li>
<li>We started an initiative to improve the documenation, some of which should already be reflected in the current docs. Still, help by the community is always welcome. Check out <a href="https://redirect.github.com/huggingface/peft/issues/1089">this issue</a> to get going.</li>
</ul>
<h2>Migration to v0.7.0</h2>
<ul>
<li><a href="https://huggingface.co/docs/safetensors/index">Safetensors</a> are now the <a href="https://redirect.github.com/huggingface/peft/pull/1088">default format</a> for PEFT adapters. In practice, users should not have to change anything in their code, PEFT takes care of everything -- just be aware that instead of creating a file <code>adapter_model.bin</code>, calling <code>save_pretrained</code> now creates <code>adapter_model.safetensors</code>. Safetensors have numerous advantages over pickle files (which is the PyTorch default format) and well supported on Hugging Face Hub.</li>
<li>When merging multiple LoRA adapter weights together using <code>add_weighted_adapter</code> with the option <code>combination_type="linear"</code>, the scaling of the adapter weights is now <a href="https://redirect.github.com/huggingface/peft/pull/1169">performed differently</a>, leading to improved results.</li>
<li>There was a big refactor of the inner workings of some PEFT adapters. For the vast majority of users, this should not make any difference (except making some code run faster). However, if your code is relying on PEFT internals, be aware that the inheritance structure of certain adapter layers has changed (e.g. <code>peft.lora.Linear</code> is no longer a subclass of <code>nn.Linear</code>, so <code>isinstance</code> checks may need updating). Also, to retrieve the original weight of an adapted layer, now use <code>self.get_base_layer().weight</code>, not <code>self.weight</code> (same for <code>bias</code>).</li>
</ul>
<h2>What's Changed</h2>
<p>As always, a bunch of small improvements, bug fixes and doc improvements were added. We thank all the external contributors, both new and recurring. Below is the list of all changes since the last release.</p>
<ul>
<li>After release: Bump version to 0.7.0.dev0 by <a href="https://github.com/BenjaminBossan"><code>@โBenjaminBossan</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1074">huggingface/peft#1074</a></li>
<li>FIX: Skip adaption prompt tests with new transformers versions by <a href="https://github.com/BenjaminBossan"><code>@โBenjaminBossan</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1077">huggingface/peft#1077</a></li>
<li>FIX: fix adaptation prompt CI and compatibility with latest transformers (4.35.0) by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1084">huggingface/peft#1084</a></li>
<li>Improve documentation for IAยณ by <a href="https://github.com/SumanthRH"><code>@โSumanthRH</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/984">huggingface/peft#984</a></li>
<li>[<code>Docker</code>] Update Dockerfile to force-use transformers main by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1085">huggingface/peft#1085</a></li>
<li>Update the release checklist by <a href="https://github.com/BenjaminBossan"><code>@โBenjaminBossan</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1075">huggingface/peft#1075</a></li>
<li>fix-gptq-training by <a href="https://github.com/SunMarc"><code>@โSunMarc</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1086">huggingface/peft#1086</a></li>
<li>fix the failing CI tests by <a href="https://github.com/pacman100"><code>@โpacman100</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1094">huggingface/peft#1094</a></li>
<li>Fix f-string in import_utils by <a href="https://github.com/KCFindstr"><code>@โKCFindstr</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1091">huggingface/peft#1091</a></li>
<li>Fix IA3 config for Falcon models by <a href="https://github.com/SumanthRH"><code>@โSumanthRH</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1007">huggingface/peft#1007</a></li>
<li>FIX: Failing nightly CI tests due to IA3 config by <a href="https://github.com/BenjaminBossan"><code>@โBenjaminBossan</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1100">huggingface/peft#1100</a></li>
<li>[<code>core</code>] Fix safetensors serialization for shared tensors by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1101">huggingface/peft#1101</a></li>
<li>Change to 0.6.1.dev0 by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1102">huggingface/peft#1102</a></li>
<li>Release: 0.6.1 by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1103">huggingface/peft#1103</a></li>
<li>set dev version by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1104">huggingface/peft#1104</a></li>
<li>avoid unnecessary import by <a href="https://github.com/winglian"><code>@โwinglian</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1109">huggingface/peft#1109</a></li>
<li>Refactor adapter deletion by <a href="https://github.com/BenjaminBossan"><code>@โBenjaminBossan</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1105">huggingface/peft#1105</a></li>
<li>Added num_dataloader_workers arg to fix Windows issue by <a href="https://github.com/lukaskuhn-lku"><code>@โlukaskuhn-lku</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1107">huggingface/peft#1107</a></li>
<li>Fix import issue transformers with <code>id_tensor_storage</code> by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1116">huggingface/peft#1116</a></li>
<li>Correctly deal with <code>ModulesToSaveWrapper</code> when using Low-level API by <a href="https://github.com/younesbelkada"><code>@โyounesbelkada</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1112">huggingface/peft#1112</a></li>
<li>fix doc typo by <a href="https://github.com/coding-famer"><code>@โcoding-famer</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1121">huggingface/peft#1121</a></li>
<li>Release: v0.6.2 by <a href="https://github.com/pacman100"><code>@โpacman100</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1125">huggingface/peft#1125</a></li>
<li>Release: v0.6.3.dev0 by <a href="https://github.com/pacman100"><code>@โpacman100</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1128">huggingface/peft#1128</a></li>
<li>FIX: Adding 2 adapters when target_modules is a str fails by <a href="https://github.com/BenjaminBossan"><code>@โBenjaminBossan</code></a> in <a href="https://redirect.github.com/huggingface/peft/pull/1111">huggingface/peft#1111</a></li>
</ul>
<!-- raw HTML omitted -->
</blockquote>
<p>... (truncated)</p>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/huggingface/peft/commit/2665f80a1738b315dce42da8f1a9c7fe793aa9ca"><code>2665f80</code></a> Release: 0.7.0 (<a href="https://redirect.github.com/huggingface/peft/issues/1214">#1214</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/9fd788bedbb8aeed2797a7521118801c7552a4a6"><code>9fd788b</code></a> TST: Add regression tests 2 (<a href="https://redirect.github.com/huggingface/peft/issues/1115">#1115</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/2336780f9e8e25f4d40670afbf1b6707b736367a"><code>2336780</code></a> Raise error when <code>modules_to_save</code> is specified and multiple adapters are bei...</li>
<li><a href="https://github.com/huggingface/peft/commit/c22a8e5d47774b3c9ed7352994ccd34e757fff58"><code>c22a8e5</code></a> DOC: How to configure new transformers models (<a href="https://redirect.github.com/huggingface/peft/issues/1195">#1195</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/1a7433b1362241699090daacfb09d8d80dc031e5"><code>1a7433b</code></a> TST Improve test for SD LoHa and OFT (<a href="https://redirect.github.com/huggingface/peft/issues/1210">#1210</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/70d559d029a17af8979841aa05ded602cd46cab1"><code>70d559d</code></a> DOC Initialization options for LoRA (<a href="https://redirect.github.com/huggingface/peft/issues/1218">#1218</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/bffbbbf76ab3af4563fa4c6079840d19dc423a9b"><code>bffbbbf</code></a> MNT Delete the delete doc workflows (<a href="https://redirect.github.com/huggingface/peft/issues/1213">#1213</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/9c70468a3c9efcd1aadc106642f59d38fccb8a5c"><code>9c70468</code></a> [docs] API docs (<a href="https://redirect.github.com/huggingface/peft/issues/1196">#1196</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/f7cf460f7c99b7c19fcf1b1874a733941562fa2c"><code>f7cf460</code></a> [docs] Update index and quicktour (<a href="https://redirect.github.com/huggingface/peft/issues/1191">#1191</a>)</li>
<li><a href="https://github.com/huggingface/peft/commit/1b1091c15835f5112d4c5460c80883f0136ea7af"><code>1b1091c</code></a> remove HF tokens (<a href="https://redirect.github.com/huggingface/peft/issues/1207">#1207</a>)</li>
<li>Additional commits viewable in <a href="https://github.com/huggingface/peft/compare/v0.6.0...v0.7.0">compare view</a></li>
</ul>
</details>
<br />
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details> | https://api.github.com/repos/oobabooga/text-generation-webui/pulls/4886 | 2023-12-11T20:22:09Z | 2023-12-12T05:31:30Z | 2023-12-12T05:31:30Z | 2023-12-12T05:31:39Z | 1,002 | oobabooga/text-generation-webui | 25,982 |
fix "cuda out of memory" when resuming training | diff --git a/train.py b/train.py
index 30d0145ef..3c405241a 100644
--- a/train.py
+++ b/train.py
@@ -189,6 +189,7 @@ def get_batch(split):
optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type)
if init_from == 'resume':
optimizer.load_state_dict(checkpoint['optimizer'])
+checkpoint = None # free up memory
# compile the model
if compile:
| ## The problem:
When training a large model that takes up almost the entire memory capacity of my GPU, I am unable to resume training from a checkpoint (despite being able to initialize and train the model from scratch), because CUDA keeps running out of memory.
After the checkpoint is loaded, `model.py` produces this error on the first forward pass of the model:
```
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB
GPU 0; 8.00 GiB total capacity; 7.20 GiB already allocated; 0 bytes free; 7.30 GiB reserved in total by PyTorch
If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation.
See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
```
## The solution:
The cause of the problem is how the checkpoint is being loaded in `train.py` here:
https://github.com/karpathy/nanoGPT/blob/a82b33b525ca9855d705656387698e13eb8e8d4b/train.py#L152
We map `checkpoint` directly to GPU memory to initialize the model and optimizer on the GPU, but after they've been initialized, `checkpoint` is still taking up precious memory, so it's necessary to set `checkpoint = None` after we're done with all the `.load_state_dict()` calls so that python can clean up that memory.
Adding this line of code resolves the issue for me, and doesn't produce any side-effects.
## System:
GPU: Nvidia RTX 3060 Ti 8GB
OS: Windows 10
Python: 3.9.13
Cuda: 11.8
Pytorch: 2.1.0.dev20230328
## Config:
batch_size = 2
block_size = 512
n_layer = 16
n_head = 16
n_embd = 1024
device = 'cuda'
dtype = 'bfloat16'
compile = False | https://api.github.com/repos/karpathy/nanoGPT/pulls/236 | 2023-04-05T22:11:51Z | 2023-04-13T05:09:43Z | 2023-04-13T05:09:43Z | 2023-04-13T05:09:48Z | 110 | karpathy/nanoGPT | 40,971 |
community[patch]: Invoke callback prior to yielding token | diff --git a/libs/community/langchain_community/llms/tongyi.py b/libs/community/langchain_community/llms/tongyi.py
index a11cf9c5153b31..3734e2f3a697c5 100644
--- a/libs/community/langchain_community/llms/tongyi.py
+++ b/libs/community/langchain_community/llms/tongyi.py
@@ -285,13 +285,13 @@ def _stream(
)
for stream_resp in stream_generate_with_retry(self, prompt=prompt, **params):
chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp))
- yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
+ yield chunk
async def _astream(
self,
@@ -307,13 +307,13 @@ async def _astream(
self, prompt=prompt, **params
):
chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp))
- yield chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
)
+ yield chunk
def _invocation_params(self, stop: Any, **kwargs: Any) -> Dict[str, Any]:
params = {
| ## PR title
community[patch]: Invoke callback prior to yielding token
## PR message
- Description: Invoke callback prior to yielding token in _stream method in llms/tongyi.
- Issue: https://github.com/langchain-ai/langchain/issues/16913
- Dependencies: None | https://api.github.com/repos/langchain-ai/langchain/pulls/18448 | 2024-03-03T11:04:17Z | 2024-03-03T22:14:22Z | 2024-03-03T22:14:22Z | 2024-03-03T22:14:22Z | 298 | langchain-ai/langchain | 43,430 |
Do not require mock in Python 3 in nginx module | diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py
index 0d62e7d5571..0e6deceb35f 100644
--- a/certbot-nginx/setup.py
+++ b/certbot-nginx/setup.py
@@ -1,5 +1,7 @@
+from distutils.version import StrictVersion
import sys
+from setuptools import __version__ as setuptools_version
from setuptools import find_packages
from setuptools import setup
from setuptools.command.test import test as TestCommand
@@ -11,13 +13,21 @@
install_requires = [
'acme>=1.4.0.dev0',
'certbot>=1.4.0.dev0',
- 'mock',
'PyOpenSSL',
'pyparsing>=1.5.5', # Python3 support
'setuptools',
'zope.interface',
]
+setuptools_known_environment_markers = (StrictVersion(setuptools_version) >= StrictVersion('36.2'))
+if setuptools_known_environment_markers:
+ install_requires.append('mock ; python_version < "3.3"')
+elif 'bdist_wheel' in sys.argv[1:]:
+ raise RuntimeError('Error, you are trying to build certbot wheels using an old version '
+ 'of setuptools. Version 36.2+ of setuptools is required.')
+elif sys.version_info < (3,3):
+ install_requires.append('mock')
+
class PyTest(TestCommand):
user_options = []
diff --git a/certbot-nginx/tests/configurator_test.py b/certbot-nginx/tests/configurator_test.py
index 0a04a22a494..2c3264a5fc4 100644
--- a/certbot-nginx/tests/configurator_test.py
+++ b/certbot-nginx/tests/configurator_test.py
@@ -1,7 +1,10 @@
"""Test for certbot_nginx._internal.configurator."""
import unittest
-import mock
+try:
+ import mock
+except ImportError: # pragma: no cover
+ from unittest import mock # type: ignore
import OpenSSL
from acme import challenges
diff --git a/certbot-nginx/tests/http_01_test.py b/certbot-nginx/tests/http_01_test.py
index 6418a884182..8f0673c1f1b 100644
--- a/certbot-nginx/tests/http_01_test.py
+++ b/certbot-nginx/tests/http_01_test.py
@@ -2,7 +2,10 @@
import unittest
import josepy as jose
-import mock
+try:
+ import mock
+except ImportError: # pragma: no cover
+ from unittest import mock # type: ignore
import six
from acme import challenges
diff --git a/certbot-nginx/tests/parser_obj_test.py b/certbot-nginx/tests/parser_obj_test.py
index bb7834701c7..8262c5f52c9 100644
--- a/certbot-nginx/tests/parser_obj_test.py
+++ b/certbot-nginx/tests/parser_obj_test.py
@@ -2,7 +2,10 @@
import unittest
-import mock
+try:
+ import mock
+except ImportError: # pragma: no cover
+ from unittest import mock # type: ignore
from certbot_nginx._internal.parser_obj import COMMENT_BLOCK
from certbot_nginx._internal.parser_obj import parse_raw
diff --git a/certbot-nginx/tests/test_util.py b/certbot-nginx/tests/test_util.py
index 4c9da84bdea..4b26f793501 100644
--- a/certbot-nginx/tests/test_util.py
+++ b/certbot-nginx/tests/test_util.py
@@ -4,7 +4,10 @@
import tempfile
import josepy as jose
-import mock
+try:
+ import mock
+except ImportError: # pragma: no cover
+ from unittest import mock # type: ignore
import pkg_resources
import zope.component
| Part of #7886.
This PR conditionally installs `mock` in `nginx/setup.py` based on setuptools version and python version, when possible. It then updates `nginx` tests to use `unittest.mock` when `mock` isn't available. | https://api.github.com/repos/certbot/certbot/pulls/7898 | 2020-04-13T20:51:14Z | 2020-04-15T18:39:45Z | 2020-04-15T18:39:45Z | 2020-04-15T18:40:05Z | 876 | certbot/certbot | 3,724 |
add support for netease cloud music | diff --git a/src/you_get/extractor/__init__.py b/src/you_get/extractor/__init__.py
index f4b6d379e9..9c99b80f3d 100644
--- a/src/you_get/extractor/__init__.py
+++ b/src/you_get/extractor/__init__.py
@@ -21,6 +21,7 @@
from .jpopsuki import *
from .ku6 import *
from .kugou import *
+from .kuwo import *
from .letv import *
from .magisto import *
from .miomio import *
diff --git a/src/you_get/extractor/__main__.py b/src/you_get/extractor/__main__.py
index 1d52d3ab92..5d16f7bf7c 100644
--- a/src/you_get/extractor/__main__.py
+++ b/src/you_get/extractor/__main__.py
@@ -41,6 +41,7 @@ def url_to_module(url):
'kankanews': bilibili,
'ku6': ku6,
'kugou':kugou,
+ 'kuwo':kuwo,
'letv': letv,
'magisto': magisto,
'miomio': miomio,
diff --git a/src/you_get/extractor/kuwo.py b/src/you_get/extractor/kuwo.py
new file mode 100644
index 0000000000..16a79567ef
--- /dev/null
+++ b/src/you_get/extractor/kuwo.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+__all__ = ['kuwo_download']
+
+from ..common import *
+import re
+
+def kuwo_download_by_rid(rid, output_dir = '.', merge = True, info_only = False):
+ html=get_content("http://player.kuwo.cn/webmusic/st/getNewMuiseByRid?rid=MUSIC_%s"%rid)
+ title=match1(html,r"<name>(.*)</name>")
+ #to get title
+ #format =aac|mp3 ->to get aac format=mp3 ->to get mp3
+ url=get_content("http://antiserver.kuwo.cn/anti.s?format=mp3&rid=MUSIC_%s&type=convert_url&response=url"%rid)
+ songtype, ext, size = url_info(url)
+ print_info(site_info, title, songtype, size)
+ if not info_only:
+ download_urls([url], title, ext, size, output_dir)
+
+def kuwo_playlist_download(url, output_dir = '.', merge = True, info_only = False):
+ html=get_content(url)
+ matched=set(re.compile("yinyue/(\d+)").findall(html))#reduce duplicated
+ for rid in matched:
+ kuwo_download_by_rid(rid,output_dir,merge,info_only)
+
+
+
+def kuwo_download(url, output_dir = '.', merge = True, info_only = False):
+ if "www.kuwo.cn/yinyue" in url:
+ rid=match1(url,'yinyue/(\d+)')
+ kuwo_download_by_rid(rid,output_dir, merge, info_only)
+ else:
+ kuwo_playlist_download(url,output_dir,merge,info_only)
+
+site_info = "kuwo.cn"
+download = kuwo_download
+# download_playlist = playlist_not_supported("kugou")
+# download_playlist=playlist_not_supported("kuwo")
+download_playlist=kuwo_playlist_download
diff --git a/src/you_get/extractor/netease.py b/src/you_get/extractor/netease.py
index 1321ba0fca..14dd5be7df 100644
--- a/src/you_get/extractor/netease.py
+++ b/src/you_get/extractor/netease.py
@@ -3,38 +3,65 @@
__all__ = ['netease_download']
from ..common import *
+from json import loads
+
+def netease_cloud_music_download(url, output_dir = '.', merge = True, info_only = False):
+ rid=match1(url,r'id=(.*)')
+ if "album" in url:
+ j=loads(get_content("http://music.163.com/api/album/%s?id=%s&csrf_token="%(rid,rid),headers={"Referer":"http://music.163.com/"}))
+ for i in j['album']['songs']:
+ title=i['name']
+ url=i['mp3Url']
+ songtype, ext, size = url_info(url)
+ print_info(site_info, title, songtype, size)
+ if not info_only:
+ download_urls([url], title, ext, size, output_dir)
+
+ elif "song" in url:
+ j=loads(get_content("http://music.163.com/api/song/detail/?id=%s&ids=[%s]&csrf_token="%(rid,rid),headers={"Referer":"http://music.163.com/"}))
+ title=j["songs"][0]['name']
+ url=j["songs"][0]['mp3Url']
+ songtype, ext, size = url_info(url)
+ print_info(site_info, title, songtype, size)
+ if not info_only:
+ download_urls([url], title, ext, size, output_dir)
+
+
def netease_download(url, output_dir = '.', merge = True, info_only = False):
- html = get_decoded_html(url)
-
- title = r1('movieDescription=\'([^\']+)\'', html) or r1('<title>(.+)</title>', html)
-
- if title[0] == ' ':
- title = title[1:]
-
- src = r1(r'<source src="([^"]+)"', html) or r1(r'<source type="[^"]+" src="([^"]+)"', html)
-
- if src:
- sd_url = r1(r'(.+)-mobile.mp4', src) + ".flv"
- _, _, sd_size = url_info(sd_url)
+ if "music.163.com" in url:
+ netease_cloud_music_download(url,output_dir,merge,info_only)
+ else:
+ html = get_decoded_html(url)
+
+ title = r1('movieDescription=\'([^\']+)\'', html) or r1('<title>(.+)</title>', html)
+
+ if title[0] == ' ':
+ title = title[1:]
- hd_url = re.sub('/SD/', '/HD/', sd_url)
- _, _, hd_size = url_info(hd_url)
+ src = r1(r'<source src="([^"]+)"', html) or r1(r'<source type="[^"]+" src="([^"]+)"', html)
- if hd_size > sd_size:
- url, size = hd_url, hd_size
+ if src:
+ sd_url = r1(r'(.+)-mobile.mp4', src) + ".flv"
+ _, _, sd_size = url_info(sd_url)
+
+ hd_url = re.sub('/SD/', '/HD/', sd_url)
+ _, _, hd_size = url_info(hd_url)
+
+ if hd_size > sd_size:
+ url, size = hd_url, hd_size
+ else:
+ url, size = sd_url, sd_size
+ ext = 'flv'
+
else:
- url, size = sd_url, sd_size
- ext = 'flv'
+ url = (r1(r'["\'](.+)-list.m3u8["\']', html) or r1(r'["\'](.+).m3u8["\']', html)) + ".mp4"
+ _, _, size = url_info(url)
+ ext = 'mp4'
- else:
- url = (r1(r'["\'](.+)-list.m3u8["\']', html) or r1(r'["\'](.+).m3u8["\']', html)) + ".mp4"
- _, _, size = url_info(url)
- ext = 'mp4'
-
- print_info(site_info, title, ext, size)
- if not info_only:
- download_urls([url], title, ext, size, output_dir = output_dir, merge = merge)
+ print_info(site_info, title, ext, size)
+ if not info_only:
+ download_urls([url], title, ext, size, output_dir = output_dir, merge = merge)
site_info = "163.com"
download = netease_download
| refer to #340
| https://api.github.com/repos/soimort/you-get/pulls/359 | 2014-07-12T07:21:46Z | 2014-07-13T23:57:26Z | 2014-07-13T23:57:26Z | 2014-07-13T23:58:32Z | 1,947 | soimort/you-get | 21,271 |
fix 404 of "%s/%s.html" in qq.py | diff --git a/src/you_get/extractor/qq.py b/src/you_get/extractor/qq.py
index b59c68bcbd..3ca87a5836 100644
--- a/src/you_get/extractor/qq.py
+++ b/src/you_get/extractor/qq.py
@@ -6,9 +6,9 @@
def qq_download_by_id(id, title = None, output_dir = '.', merge = True, info_only = False):
url = 'http://vsrc.store.qq.com/%s.flv' % id
-
+
_, _, size = url_info(url)
-
+
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge)
@@ -17,31 +17,39 @@ def qq_download(url, output_dir = '.', merge = True, info_only = False):
if re.match(r'http://v.qq.com/([^\?]+)\?vid', url):
aid = r1(r'(.*)\.html', url)
vid = r1(r'http://v.qq.com/[^\?]+\?vid=(\w+)', url)
- url = "%s/%s.html" % (aid, vid)
-
+ url = 'http://sns.video.qq.com/tvideo/fcgi-bin/video?vid=%s' % vid
+
if re.match(r'http://y.qq.com/([^\?]+)\?vid', url):
vid = r1(r'http://y.qq.com/[^\?]+\?vid=(\w+)', url)
-
+
url = "http://v.qq.com/page/%s.html" % vid
-
+
r_url = r1(r'<meta http-equiv="refresh" content="0;url=([^"]*)', get_html(url))
if r_url:
aid = r1(r'(.*)\.html', r_url)
url = "%s/%s.html" % (aid, vid)
-
+
if re.match(r'http://static.video.qq.com/.*vid=', url):
vid = r1(r'http://static.video.qq.com/.*vid=(\w+)', url)
url = "http://v.qq.com/page/%s.html" % vid
-
+
+ if re.match(r'http://v.qq.com/cover/.*\.html', url):
+ html = get_html(url)
+ vid = r1(r'vid:"([^"]+)"', html)
+ url = 'http://sns.video.qq.com/tvideo/fcgi-bin/video?vid=%s' % vid
+
html = get_html(url)
-
- title = r1(r'title:"([^"]+)"', html)
+
+ title = match1(html, r'<title>(.+?)</title>', r'title:"([^"]+)"')[0].strip()
assert title
title = unescape_html(title)
title = escape_file_path(title)
-
- id = r1(r'vid:"([^"]+)"', html)
-
+
+ try:
+ id = vid
+ except:
+ id = r1(r'vid:"([^"]+)"', html)
+
qq_download_by_id(id, title, output_dir = output_dir, merge = merge, info_only = info_only)
site_info = "QQ.com"
| Happy new year ๆMort, long time no PR for the project.
I've tested this fix in
```
http://v.qq.com/cover/j/j873qv1h7x54oqb.html?vid=i00139tqu0r&start=155
http://y.qq.com/y/static/mv/mv_play.html?vid=g0013ztbsf2
http://static.video.qq.com/TPout.swf?auto=1&vid=q0111msw4jo
```
It's not quiet easy for me to fix this, I'm very happy when found the video title hidden in http://sns.video.qq.com/tvideo/fcgi-bin/video?vid= in wireshark(seems easier than searching in dev console), so I hope that you could write sth. like your [post before](http://www.soimort.org/posts/149) about grabbing videos , thanks a lot for the project.
I've also removed some spaces, they are not nice in vim with vividchalk color scheme. It would be great if you could remove them through the project.
| https://api.github.com/repos/soimort/you-get/pulls/285 | 2013-12-31T14:09:11Z | 2014-01-05T19:36:37Z | 2014-01-05T19:36:37Z | 2014-06-29T06:18:57Z | 743 | soimort/you-get | 21,076 |
Contet-Type snippets for webroot | diff --git a/letsencrypt/plugins/webroot.py b/letsencrypt/plugins/webroot.py
index f11325f5771..f58c3397044 100644
--- a/letsencrypt/plugins/webroot.py
+++ b/letsencrypt/plugins/webroot.py
@@ -1,4 +1,43 @@
-"""Webroot plugin."""
+"""Webroot plugin.
+
+Content-Type
+------------
+
+This plugin requires your webserver to use a specific `Content-Type`
+header in the HTTP response.
+
+Apache2
+~~~~~~~
+
+.. note:: Instructions written and tested for Debian Jessie. Other
+ operating systems might use something very similar, but you might
+ still need to readjust some commands.
+
+Create ``/etc/apache2/conf-available/letsencrypt-simplehttp.conf``, with
+the following contents::
+
+ <IfModule mod_headers.c>
+ <LocationMatch "/.well-known/acme-challenge/*">
+ Header set Content-Type "application/jose+json"
+ </LocationMatch>
+ </IfModule>
+
+and then run ``a2enmod headers; a2enconf letsencrypt``; depending on the
+output you will have to either ``service apache2 restart`` or ``service
+apache2 reload``.
+
+nginx
+~~~~~
+
+Use the following snippet in your ``server{...}`` stanza::
+
+ location ~ /.well-known/acme-challenge/(.*) {
+ default_type application/jose+json;
+ }
+
+and reload your daemon.
+
+"""
import errno
import logging
import os
| I'll probably move those docs around after #1137.
cc @centminmod @chriscroome
| https://api.github.com/repos/certbot/certbot/pulls/1268 | 2015-10-31T20:34:56Z | 2015-11-02T06:33:29Z | 2015-11-02T06:33:29Z | 2016-05-06T19:21:37Z | 350 | certbot/certbot | 1,368 |
Add HFSummaryPairs class & fix AnthropicRLHF parsing | diff --git a/model/model_training/configs/config_rm.yaml b/model/model_training/configs/config_rm.yaml
index 447b75acca..1b66a1bb70 100644
--- a/model/model_training/configs/config_rm.yaml
+++ b/model/model_training/configs/config_rm.yaml
@@ -85,7 +85,7 @@ oasst-rm-1-pythia-1.4b:
- webgpt:
val_split: 0.05
max_val_set: 1000
- - hf_summary:
+ - hf_summary_pairs:
fraction: 0.1
max_val_set: 250
use_custom_sampler: true
@@ -97,10 +97,10 @@ oasst-rm-1-pythia-1.4b:
dtype: float32
max_length: 2048
use_flash_attention: true
- warmup_steps: 10
- gradient_accumulation_steps: 2
- per_device_train_batch_size: 2
- per_device_eval_batch_size: 6
+ warmup_steps: 50
+ gradient_accumulation_steps: 4
+ per_device_train_batch_size: 1
+ per_device_eval_batch_size: 5
num_train_epochs: 2
eval_steps: 500
save_steps: 1000
diff --git a/model/model_training/custom_datasets/__init__.py b/model/model_training/custom_datasets/__init__.py
index 01b415d4ec..2b7e085442 100644
--- a/model/model_training/custom_datasets/__init__.py
+++ b/model/model_training/custom_datasets/__init__.py
@@ -19,7 +19,7 @@
WebGPT,
)
from model_training.custom_datasets.rank_datasets import AugmentedOA
-from model_training.custom_datasets.summarization import HFSummary, SummarizationDataset
+from model_training.custom_datasets.summarization import HFSummary, HFSummaryPairs, SummarizationDataset
from model_training.custom_datasets.toxic_conversation import ProsocialDialogue, ProsocialDialogueExplaination
from model_training.custom_datasets.translation import WMT2019, DiveMT, TEDTalk
from sklearn.model_selection import train_test_split
@@ -50,6 +50,7 @@
"private_tuning",
"alpaca",
"hf_summary",
+ "hf_summary_pairs",
]
RM_DATASETS = [
@@ -57,6 +58,7 @@
"augment_oasst",
"anthropic_rlhf",
"hf_summary",
+ "hf_summary_pairs",
"shp",
"hellaswag",
"webgpt",
@@ -140,6 +142,9 @@ def get_one_dataset(
elif dataset_name == "hf_summary":
train = HFSummary(split="train", mode=mode)
eval = HFSummary(split="valid1", mode=mode)
+ elif dataset_name == "hf_summary_pairs":
+ train = HFSummaryPairs(split="train", mode=mode)
+ eval = HFSummaryPairs(split="valid1", mode=mode)
elif dataset_name == "augment_oasst":
# reward model mode only
assert mode == "rm"
diff --git a/model/model_training/custom_datasets/rank_datasets.py b/model/model_training/custom_datasets/rank_datasets.py
index 54a0510e29..a45c7f237e 100644
--- a/model/model_training/custom_datasets/rank_datasets.py
+++ b/model/model_training/custom_datasets/rank_datasets.py
@@ -183,10 +183,15 @@ class AnthropicRLHF(Dataset):
name = "anthropic_rlhf"
@staticmethod
- def _split_dialogue(text: str):
+ def _split_dialogue(text: str) -> list[tuple[str, str]]:
lines = text.split("\n\n")
- dialogue = []
+ dialogue: list[tuple[str, str]] = []
+
+ # go over messages and combine consecutive messages from the
+ # same speaker (OA v1 expects alternating roles)
+ role = None
+ messages = []
for line in lines:
if line.startswith("Human:"):
speaker = "Human"
@@ -196,16 +201,25 @@ def _split_dialogue(text: str):
message = line[11:]
else:
continue
- dialogue.append((speaker, message.strip()))
+ if role != speaker:
+ if role is not None:
+ dialogue.append((role, "\n".join(messages)))
+ messages = []
+ role = speaker
+ messages.append(message.strip())
+
+ if role is not None and len(messages) > 0:
+ dialogue.append((role, "\n".join(messages)))
return dialogue
- def __init__(self, split="train") -> None:
+ def __init__(self, split: str = "train") -> None:
super().__init__()
assert split in ("train", "test")
self.split = split
self.data = []
dataset = load_dataset("Anthropic/hh-rlhf")[split]
+
for entry in dataset:
chosen = entry["chosen"]
@@ -215,14 +229,17 @@ def __init__(self, split="train") -> None:
rejected = entry["rejected"]
chosen = self._split_dialogue(chosen)
rejected = self._split_dialogue(rejected)
+ assert rejected[0][0] == "Human" and chosen[0][0] == "Human"
- prefix = [line for (speaker, line) in chosen[:-1]]
- good_reply = chosen[-1][1] # last part of dialog, the text
- bad_reply = rejected[-1][1] # last part of dialog, the text
- self.data.append((prefix, [good_reply, bad_reply]))
+ # only very few items have non matching lengths
+ if len(rejected) == len(chosen):
+ prefix = [line for (speaker, line) in chosen[:-1]]
+ good_reply = chosen[-1][1] # last part of dialog, the text
+ bad_reply = rejected[-1][1] # last part of dialog, the text
+ self.data.append((prefix, [good_reply, bad_reply]))
- def __len__(self):
+ def __len__(self) -> int:
return len(self.data)
- def __getitem__(self, index):
+ def __getitem__(self, index: int) -> tuple[str, list[str]]:
return self.data[index]
diff --git a/model/model_training/custom_datasets/summarization.py b/model/model_training/custom_datasets/summarization.py
index e681110bc4..d9a7efdfe0 100644
--- a/model/model_training/custom_datasets/summarization.py
+++ b/model/model_training/custom_datasets/summarization.py
@@ -80,6 +80,89 @@ def __getitem__(self, idx):
return (context, summary)
+SUMMARIZATION_PROMPTS = [
+ "Please summarize the following content:\n{}",
+ "Write me a summary for the following article:\n{}",
+ "Kindly sum up the following information: {}",
+ "Please summarize the following text for me:\n{}",
+ "Give me a summary of the following text:\n\n{}",
+ "Describe the following information in brief: {}",
+ "Will you kindly summarize the following paragraph for me?\n{}",
+ "Summarize this: {}",
+ "TLDR this: {}",
+ "{}\nTLDR;",
+ "{}\n\nTL;DR",
+ "{} tl;dr:",
+ "{}\nPlease summarize the content above",
+ "{} Please summarize the preceding statements.",
+]
+
+
+class HFSummaryPairs(Dataset):
+ """
+ Simplified version of the HFSummary class which uses the original examples
+ of the OpenAI dataset.
+ https://huggingface.co/datasets/openai/summarize_from_feedback
+ """
+
+ def __init__(self, split="train", mode="sft", conf_threshold=-1) -> None:
+ super().__init__()
+ assert split in ("train", "valid1", "valid2", "test")
+ assert mode in ("sft", "rm", "rl")
+ self.mode = mode
+
+ self.posts = []
+ self.summary_pairs = []
+
+ major_split = split if "train" == split else "validation"
+ dataset = load_dataset("openai/summarize_from_feedback", "comparisons")[major_split]
+ for data in dataset:
+ if (
+ "extra" in data
+ and "confidence" in data["extra"]
+ and data["extra"]["confidence"] is not None
+ and conf_threshold > data["extra"]["confidence"]
+ ):
+ print("skipping {}".format(data["info"]["id"]))
+ continue
+
+ if split != "train" and split != data["split"]:
+ continue
+
+ if "article" in data["info"] and data["info"]["article"] is not None:
+ context = data["info"]["article"]
+ elif "post" in data["info"]:
+ context = data["info"]["post"]
+
+ self.posts.append(context)
+ pos, neg = (0, 1) if data["choice"] == 0 else (1, 0)
+ self.summary_pairs.append((data["summaries"][pos]["text"].strip(), data["summaries"][neg]["text"].strip()))
+
+ def __len__(self) -> int:
+ return len(self.posts)
+
+ def __getitem__(self, index: int) -> tuple | list:
+ if index < 0 or index >= len(self.posts):
+ raise IndexError()
+
+ context = self.posts[index]
+ # return pairs of comparison
+ good_summary, bad_summary = self.summary_pairs[index]
+ prompt = random.choice(SUMMARIZATION_PROMPTS)
+
+ # pair very big
+ # we are going to do some sampling
+ # not optimal but good for now
+ if self.mode == "sft":
+ return [prompt.format(context), good_summary]
+ elif self.mode == "rl":
+ return (prompt.format(context),)
+ elif self.mode == "rm":
+ return [prompt.format(context)], [good_summary, bad_summary]
+
+ raise RuntimeError(f"Unsupported mode '{self.mode}'")
+
+
class HFSummary(Dataset):
"""
Human feedback data from OpenAI
@@ -90,13 +173,6 @@ class HFSummary(Dataset):
"""
- PROMPTS = [
- "Please summarize the following content:\n{}",
- "{}\nTLDR;",
- "{}\nPlease summarize the content above",
- "Write a summary for the following article:\n{}",
- ]
-
def __init__(self, split="train", mode="sft", conf_threshold=-1, max_comparison_per_sample=5) -> None:
super().__init__()
assert split in ("train", "valid1", "valid2", "test")
@@ -165,17 +241,17 @@ def get_sorted_ranks(comparison_pairs):
return sorted_elements
- def __len__(self):
+ def __len__(self) -> int:
return len(self.index2summary)
- def __getitem__(self, index):
- if index >= len(self.index2summary):
+ def __getitem__(self, index) -> tuple | list:
+ if index < 0 or index >= len(self.index2summary):
raise IndexError()
- context = self.index2summary.get(index)
+ context = self.index2summary[index]
# return pairs of comparison
rows = self.summaries[context]
- prompt = random.choice(self.PROMPTS)
+ prompt = random.choice(SUMMARIZATION_PROMPTS)
# pair very big
# we are going to do some sampling
diff --git a/model/model_training/tests/test_ranking_collator.py b/model/model_training/tests/test_ranking_collator.py
index 363beb0be8..fb8938e6f8 100644
--- a/model/model_training/tests/test_ranking_collator.py
+++ b/model/model_training/tests/test_ranking_collator.py
@@ -12,17 +12,17 @@ def test_rm_datasets():
# dummy configuration
config = Namespace(cache_dir=".cache", model_name="EleutherAI/pythia-70m-deduped")
- dataset_names = ["webgpt", "hf_summary", "hellaswag", "shp", "anthropic_rlhf"]
+ dataset_names = ["anthropic_rlhf", "hf_summary_pairs", "webgpt", "hellaswag", "shp", "hf_summary"]
for name in dataset_names:
train, val = get_one_dataset(conf=config, dataset_name=name, mode="rm")
- print(f"dataset: {name} (train ({type(train)}): {len(train)}, val({type(val)}): {len(val)})")
+ print(f"dataset: '{name}' (train ({type(train)}): {len(train)}, val({type(val)}): {len(val)})")
avg_number_continuations = sum(len(x[1]) for x in train) / len(train)
num_more_than_two = sum(1 if len(x[1]) > 2 else 0 for x in train)
print(f"Average number of continuations: {avg_number_continuations} (with >2: {num_more_than_two})")
- for i in range(2):
- item = train[i]
+ for i in range(10):
+ item = train[i + 100]
print(f"[{i}] Prefix: {item[0]}")
continuations = item[1]
print(f"[{i}] Continuations ({len(continuations)}):")
| - add simpler version of HFSummaries which directly uses the pairs of the OpenAI dataset
- combine consecutive messages of same speaker during loading of AnthropicRLHF dataset, ignore examples with different structure between positive & negative example (only very few) | https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/2362 | 2023-04-07T00:11:58Z | 2023-04-07T02:20:53Z | 2023-04-07T02:20:53Z | 2023-04-07T02:20:54Z | 3,096 | LAION-AI/Open-Assistant | 37,447 |
[2.7] bpo-31675: Fix memory leaks in Tkinter's methods splitlist() and split() (GH-3866) | diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py
index 921d094d5a78fe..f0c9877862ec0b 100644
--- a/Lib/test/test_tcl.py
+++ b/Lib/test/test_tcl.py
@@ -705,25 +705,25 @@ def test_huge_unicode_builtins(self, size):
self.check_huge_string_builtins(value)
def check_huge_string_builtins(self, value):
- self.assertRaises(OverflowError, self.interp.tk.getint, value)
- self.assertRaises(OverflowError, self.interp.tk.getdouble, value)
- self.assertRaises(OverflowError, self.interp.tk.getboolean, value)
- self.assertRaises(OverflowError, self.interp.eval, value)
- self.assertRaises(OverflowError, self.interp.evalfile, value)
- self.assertRaises(OverflowError, self.interp.record, value)
- self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
- self.assertRaises(OverflowError, self.interp.setvar, value, 'x', 'a')
- self.assertRaises(OverflowError, self.interp.setvar, 'x', value, 'a')
- self.assertRaises(OverflowError, self.interp.unsetvar, value)
- self.assertRaises(OverflowError, self.interp.unsetvar, 'x', value)
- self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
- self.assertRaises(OverflowError, self.interp.exprstring, value)
- self.assertRaises(OverflowError, self.interp.exprlong, value)
- self.assertRaises(OverflowError, self.interp.exprboolean, value)
- self.assertRaises(OverflowError, self.interp.splitlist, value)
- self.assertRaises(OverflowError, self.interp.split, value)
- self.assertRaises(OverflowError, self.interp.createcommand, value, max)
- self.assertRaises(OverflowError, self.interp.deletecommand, value)
+ tk = self.interp.tk
+ self.assertRaises(OverflowError, tk.getint, value)
+ self.assertRaises(OverflowError, tk.getdouble, value)
+ self.assertRaises(OverflowError, tk.getboolean, value)
+ self.assertRaises(OverflowError, tk.eval, value)
+ self.assertRaises(OverflowError, tk.evalfile, value)
+ self.assertRaises(OverflowError, tk.record, value)
+ self.assertRaises(OverflowError, tk.adderrorinfo, value)
+ self.assertRaises(OverflowError, tk.setvar, value, 'x', 'a')
+ self.assertRaises(OverflowError, tk.setvar, 'x', value, 'a')
+ self.assertRaises(OverflowError, tk.unsetvar, value)
+ self.assertRaises(OverflowError, tk.unsetvar, 'x', value)
+ self.assertRaises(OverflowError, tk.exprstring, value)
+ self.assertRaises(OverflowError, tk.exprlong, value)
+ self.assertRaises(OverflowError, tk.exprboolean, value)
+ self.assertRaises(OverflowError, tk.splitlist, value)
+ self.assertRaises(OverflowError, tk.split, value)
+ self.assertRaises(OverflowError, tk.createcommand, value, max)
+ self.assertRaises(OverflowError, tk.deletecommand, value)
def setUpModule():
diff --git a/Misc/NEWS.d/next/Library/2017-10-03-15-06-24.bpo-31675.Nh7jJ3.rst b/Misc/NEWS.d/next/Library/2017-10-03-15-06-24.bpo-31675.Nh7jJ3.rst
new file mode 100644
index 00000000000000..4e4430773af894
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2017-10-03-15-06-24.bpo-31675.Nh7jJ3.rst
@@ -0,0 +1,2 @@
+Fixed memory leaks in Tkinter's methods splitlist() and split() when pass a
+string larger than 2 GiB.
diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c
index 6053e4bde44473..444c268c0b4738 100644
--- a/Modules/_tkinter.c
+++ b/Modules/_tkinter.c
@@ -2332,7 +2332,11 @@ Tkapp_SplitList(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "et:splitlist", "utf-8", &list))
return NULL;
- CHECK_STRING_LENGTH(list);
+ if (strlen(list) >= INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError, "string is too long");
+ PyMem_Free(list);
+ return NULL;
+ }
if (Tcl_SplitList(Tkapp_Interp(self), list,
&argc, &argv) == TCL_ERROR) {
PyMem_Free(list);
@@ -2394,7 +2398,11 @@ Tkapp_Split(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "et:split", "utf-8", &list))
return NULL;
- CHECK_STRING_LENGTH(list);
+ if (strlen(list) >= INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError, "string is too long");
+ PyMem_Free(list);
+ return NULL;
+ }
v = Split(list);
PyMem_Free(list);
return v;
| when pass a string larger than 2 GiB.
(cherry picked from commit 27c623c845dd6e4b8e1782666ca3a956636da266)
<!-- issue-number: bpo-31675 -->
https://bugs.python.org/issue31675
<!-- /issue-number -->
| https://api.github.com/repos/python/cpython/pulls/3876 | 2017-10-03T21:41:14Z | 2017-10-04T18:37:54Z | 2017-10-04T18:37:54Z | 2017-10-04T18:37:58Z | 1,214 | python/cpython | 4,682 |
Added Redis and Redis Insight to Ansible playbook. | diff --git a/ansible/README.md b/ansible/README.md
new file mode 100644
index 0000000000..2ab1943eaa
--- /dev/null
+++ b/ansible/README.md
@@ -0,0 +1,7 @@
+To test the ansible playbook on localhost run
+`ansible-playbook -i test.inventory.ini dev.yaml`.\
+In case you're missing the ansible docker depencency install it with `ansible-galaxy collection install community.docker`.\
+Point Redis Insights to the Redis database by visiting localhost:8001 in a
+browser and select "I already have a database" followed by "Connect to a Redis
+Database".\
+For host, port and name fill in `oasst-redis`, `6379` and `redis`.
diff --git a/ansible/dev.yaml b/ansible/dev.yaml
index 577abd68da..90f7a85ab8 100644
--- a/ansible/dev.yaml
+++ b/ansible/dev.yaml
@@ -10,6 +10,39 @@
state: present
driver: bridge
+ - name: Copy redis.conf to managed node
+ ansible.builtin.copy:
+ src: ./redis.conf
+ dest: ./redis.conf
+
+ - name: Set up Redis
+ community.docker.docker_container:
+ name: oasst-redis
+ image: redis
+ state: started
+ restart_policy: always
+ network_mode: oasst
+ ports:
+ - 6379:6379
+ healthcheck:
+ test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
+ interval: 2s
+ timeout: 2s
+ retries: 10
+ command: redis-server /usr/local/etc/redis/redis.conf
+ volumes:
+ - "./redis.conf:/usr/local/etc/redis/redis.conf"
+
+ - name: Set up Redis Insights
+ community.docker.docker_container:
+ name: oasst-redis-insights
+ image: redislabs/redisinsight:latest
+ state: started
+ restart_policy: always
+ network_mode: oasst
+ ports:
+ - 8001:8001
+
- name: Create postgres containers
community.docker.docker_container:
name: "{{ item.name }}"
@@ -51,6 +84,7 @@
network_mode: oasst
env:
POSTGRES_HOST: oasst-postgres
+ REDIS_HOST: oasst-redis
DEBUG_ALLOW_ANY_API_KEY: "true"
DEBUG_USE_SEED_DATA: "true"
MAX_WORKERS: "1"
diff --git a/ansible/redis.conf b/ansible/redis.conf
new file mode 100644
index 0000000000..58da1e0573
--- /dev/null
+++ b/ansible/redis.conf
@@ -0,0 +1,2 @@
+maxmemory 100mb
+maxmemory-policy allkeys-lru
diff --git a/ansible/test.inventory.ini b/ansible/test.inventory.ini
new file mode 100644
index 0000000000..bfe6d93fd5
--- /dev/null
+++ b/ansible/test.inventory.ini
@@ -0,0 +1,2 @@
+[test]
+dev ansible_connection=local
| I basically converted the [changes](https://github.com/LAION-AI/Open-Assistant/pull/187/files#diff-3fde9d1a396e140fefc7676e1bd237d67b6864552b6f45af1ebcc27bcd0bb6e9) in the ```docker-compose.yaml``` to the ansible playbook and tweaked it a bit.
I further added an ansible inventory file for local testing and docs. | https://api.github.com/repos/LAION-AI/Open-Assistant/pulls/405 | 2023-01-05T10:51:17Z | 2023-01-09T07:11:22Z | 2023-01-09T07:11:22Z | 2023-01-09T07:11:22Z | 746 | LAION-AI/Open-Assistant | 37,737 |
Remove duplicate answers in DNS queries | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 44b4369036..8e613d5da0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,8 @@
([#6599](https://github.com/mitmproxy/mitmproxy/pull/6599), @basedBaba)
* Add an arm64 variant for the precompiled macOS app.
([#6633](https://github.com/mitmproxy/mitmproxy/pull/6633), @mhils)
+* Fix duplicate answers being returned in DNS queries.
+ ([#6648](https://github.com/mitmproxymitmproxy/pull/6648), @sujaldev)
## 21 January 2024: mitmproxy 10.2.2
diff --git a/mitmproxy/addons/dns_resolver.py b/mitmproxy/addons/dns_resolver.py
index 63718050eb..e46ee52b04 100644
--- a/mitmproxy/addons/dns_resolver.py
+++ b/mitmproxy/addons/dns_resolver.py
@@ -26,7 +26,9 @@ async def resolve_question_by_name(
ip: Callable[[str], ipaddress.IPv4Address | ipaddress.IPv6Address],
) -> Iterable[dns.ResourceRecord]:
try:
- addrinfos = await loop.getaddrinfo(host=question.name, port=0, family=family)
+ addrinfos = await loop.getaddrinfo(
+ host=question.name, port=0, family=family, type=socket.SOCK_STREAM
+ )
except socket.gaierror as e:
if e.errno == socket.EAI_NONAME:
raise ResolveError(dns.response_codes.NXDOMAIN)
diff --git a/test/mitmproxy/addons/test_dns_resolver.py b/test/mitmproxy/addons/test_dns_resolver.py
index a72d5ed257..a998ef264e 100644
--- a/test/mitmproxy/addons/test_dns_resolver.py
+++ b/test/mitmproxy/addons/test_dns_resolver.py
@@ -41,18 +41,18 @@ async def getnameinfo(self, socketaddr: Address, flags: int = 0):
e.errno = socket.EAI_NONAME
raise e
- async def getaddrinfo(self, host: str, port: int, *, family: int):
+ async def getaddrinfo(self, host: str, port: int, *, family: int, type: int):
e = socket.gaierror()
e.errno = socket.EAI_NONAME
if family == socket.AF_INET:
if host == "dns.google":
- return [(socket.AF_INET, None, None, None, ("8.8.8.8", port))]
+ return [(socket.AF_INET, type, None, None, ("8.8.8.8", port))]
elif family == socket.AF_INET6:
if host == "dns.google":
return [
(
socket.AF_INET6,
- None,
+ type,
None,
None,
("2001:4860:4860::8888", port, None, None),
diff --git a/test/mitmproxy/addons/test_proxyserver.py b/test/mitmproxy/addons/test_proxyserver.py
index 2f22736a64..0269dcd403 100644
--- a/test/mitmproxy/addons/test_proxyserver.py
+++ b/test/mitmproxy/addons/test_proxyserver.py
@@ -270,9 +270,9 @@ class DummyResolver:
async def dns_request(self, flow: dns.DNSFlow) -> None:
flow.response = await dns_resolver.resolve_message(flow.request, self)
- async def getaddrinfo(self, host: str, port: int, *, family: int):
+ async def getaddrinfo(self, host: str, port: int, *, family: int, type: int):
if family == socket.AF_INET and host == "dns.google":
- return [(socket.AF_INET, None, None, None, ("8.8.8.8", port))]
+ return [(socket.AF_INET, type, None, None, ("8.8.8.8", port))]
e = socket.gaierror()
e.errno = socket.EAI_NONAME
raise e
| #### Description
Fixes #6647 by assuming all DNS queries are made over UDP, will need to be reworked when TCP support is added.
#### Checklist
- [x] I have updated tests where applicable.
- [x] I have added an entry to the CHANGELOG.
| https://api.github.com/repos/mitmproxy/mitmproxy/pulls/6648 | 2024-02-06T15:39:48Z | 2024-02-13T09:58:35Z | 2024-02-13T09:58:35Z | 2024-02-13T09:58:35Z | 947 | mitmproxy/mitmproxy | 27,760 |
Update tiny_vit.py to fix bug | diff --git a/timm/models/tiny_vit.py b/timm/models/tiny_vit.py
index 4b5836584c..96a88db7f3 100644
--- a/timm/models/tiny_vit.py
+++ b/timm/models/tiny_vit.py
@@ -378,6 +378,7 @@ def __init__(
super().__init__()
self.depth = depth
+ self.out_dim = out_dim
# patch merging layer
if downsample is not None:
| There was a minor error when attempting to use TinyVit. I found an undeclared variable and have fixed it. | https://api.github.com/repos/huggingface/pytorch-image-models/pulls/1964 | 2023-09-23T04:22:59Z | 2023-09-23T17:05:52Z | 2023-09-23T17:05:52Z | 2023-09-23T17:05:52Z | 111 | huggingface/pytorch-image-models | 16,277 |
add missing backtick | diff --git a/Methodology and Resources/Network Pivoting Techniques.md b/Methodology and Resources/Network Pivoting Techniques.md
index ebc262ee8b..90a60c0e85 100644
--- a/Methodology and Resources/Network Pivoting Techniques.md
+++ b/Methodology and Resources/Network Pivoting Techniques.md
@@ -256,7 +256,7 @@ unzip ngrok-stable-linux-amd64.zip
| :------------- | :------------------------------------------ |
| ncat | `ncat localhost 8080 -c "ncat localhost 9090"` |
| socat | `socat -v tcp-connect:localhost:8080,reuseaddr tcp-connect:localhost:9090` |
-| remote host 1 | `ncat -l -p 8080 < file |
+| remote host 1 | `ncat -l -p 8080 < file` |
| remote host 2 | `ncat -l -p 9090 > newfile` |
## References
@@ -265,4 +265,4 @@ unzip ngrok-stable-linux-amd64.zip
* [Port Forwarding in Windows - Windows OS Hub](http://woshub.com/port-forwarding-in-windows/)
* [Using the SSH "Konami Code" (SSH Control Sequences) - Jeff McJunkin](https://pen-testing.sans.org/blog/2015/11/10/protected-using-the-ssh-konami-code-ssh-control-sequences)
* [A Red Teamer's guide to pivoting- Mar 23, 2017 - Artem Kondratenko](https://artkond.com/2017/03/23/pivoting-guide/)
-* [Pivoting Meterpreter](https://www.information-security.fr/pivoting-meterpreter/)
\ No newline at end of file
+* [Pivoting Meterpreter](https://www.information-security.fr/pivoting-meterpreter/)
| https://api.github.com/repos/swisskyrepo/PayloadsAllTheThings/pulls/87 | 2019-08-29T07:49:21Z | 2019-08-29T08:31:48Z | 2019-08-29T08:31:48Z | 2019-08-29T13:53:07Z | 430 | swisskyrepo/PayloadsAllTheThings | 8,316 |
|
Fix 168 | diff --git a/letsencrypt/client/apache/parser.py b/letsencrypt/client/apache/parser.py
index 792257b5a03..efc692d97ca 100644
--- a/letsencrypt/client/apache/parser.py
+++ b/letsencrypt/client/apache/parser.py
@@ -240,27 +240,47 @@ def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
regex = regex + letter
return regex
- def _parse_file(self, file_path):
+ def _parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
- If file_path isn't parsed, the file is added and Augeas is reloaded
+ If filepath isn't parsed, the file is added and Augeas is reloaded
- :param str file_path: Apache config file path
+ :param str filepath: Apache config file path
"""
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
inc_test = self.aug.match(
- "/augeas/load/Httpd/incl [. ='%s']" % file_path)
+ "/augeas/load/Httpd/incl [. ='%s']" % filepath)
if not inc_test:
# Load up files
- # self.httpd_incl.append(file_path)
- # self.aug.add_transform("Httpd.lns",
- # self.httpd_incl, None, self.httpd_excl)
- self._add_httpd_transform(file_path)
+ # This doesn't seem to work on TravisCI
+ # self.aug.add_transform("Httpd.lns", [filepath])
+ self._add_httpd_transform(filepath)
self.aug.load()
+ def _add_httpd_transform(self, incl):
+ """Add a transform to Augeas.
+
+ This function will correctly add a transform to augeas
+ The existing augeas.add_transform in python doesn't seem to work for
+ Travis CI as it loads in libaugeas.so.0.10.0
+
+ :param str incl: filepath to include for transform
+
+ """
+ last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
+ if last_include:
+ # Insert a new node immediately after the last incl
+ self.aug.insert(last_include[0], "incl", False)
+ self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
+ # On first use... must load lens and add file to incl
+ else:
+ # Augeas uses base 1 indexing... insert at beginning...
+ self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
+ self.aug.set("/augeas/load/Httpd/incl", incl)
+
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
@@ -293,19 +313,6 @@ def standardize_excl(self):
self.aug.load()
- def _add_httpd_transform(self, incl):
- """Add a transform to Augeas.
-
- This function will correctly add a transform to augeas
- The existing augeas.add_transform in python is broken.
-
- :param str incl: TODO
-
- """
- last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
- self.aug.insert(last_include[0], "incl", False)
- self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
-
def _set_locations(self, ssl_options):
"""Set default location for directives.
diff --git a/letsencrypt/client/augeas_configurator.py b/letsencrypt/client/augeas_configurator.py
index 1c366c60e2b..793b141d6c5 100644
--- a/letsencrypt/client/augeas_configurator.py
+++ b/letsencrypt/client/augeas_configurator.py
@@ -36,10 +36,11 @@ def __init__(self, direc=None):
"progress": CONFIG.IN_PROGRESS_DIR}
self.direc = direc
- # TODO: this instantiation can be optimized to only load
- # relevant files - I believe -> NO_MODL_AUTOLOAD
- # Set Augeas flags to save backup
- self.aug = augeas.Augeas(flags=augeas.Augeas.NONE)
+
+ # Set Augeas flags to not save backup (we do it ourselves)
+ # Set Augeas to not load anything by default
+ my_flags = augeas.Augeas.NONE | augeas.Augeas.NO_MODL_AUTOLOAD
+ self.aug = augeas.Augeas(flags=my_flags)
self.save_notes = ""
def check_parsing_errors(self, lens):
@@ -187,7 +188,7 @@ def rollback_checkpoints(self, rollback=1):
self.aug.load()
- def show_config_changes(self):
+ def view_config_changes(self):
"""Displays all saved checkpoints.
All checkpoints are printed to the console.
| Should fix #168 and #169.
This was an artifact before the project was manually adding all of the transforms for Augeas.
| https://api.github.com/repos/certbot/certbot/pulls/170 | 2015-01-20T22:03:39Z | 2015-01-21T06:16:49Z | 2015-01-21T06:16:49Z | 2016-05-06T19:22:11Z | 1,180 | certbot/certbot | 670 |
Added a favicon for the docs | diff --git a/docs/_static/flask-favicon.ico b/docs/_static/flask-favicon.ico
new file mode 100644
index 0000000000..bf0a961573
Binary files /dev/null and b/docs/_static/flask-favicon.ico differ
diff --git a/docs/conf.py b/docs/conf.py
index c971a57dee..feed359fb5 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -125,7 +125,7 @@
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+html_favicon = "flask-favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
| Added a favicon for the docs as requested in issue https://github.com/mitsuhiko/flask/issues/934
| https://api.github.com/repos/pallets/flask/pulls/943 | 2014-01-06T16:22:44Z | 2014-01-06T16:28:03Z | 2014-01-06T16:28:03Z | 2020-11-14T05:33:44Z | 212 | pallets/flask | 20,399 |
Update 2.7.x from 2.7.4 release | diff --git a/acme/setup.py b/acme/setup.py
index 85f6ef62f81..d292f8def5e 100644
--- a/acme/setup.py
+++ b/acme/setup.py
@@ -3,7 +3,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'cryptography>=3.2.1',
diff --git a/certbot-apache/setup.py b/certbot-apache/setup.py
index 7e4fe144301..3e708146e7c 100644
--- a/certbot-apache/setup.py
+++ b/certbot-apache/setup.py
@@ -1,7 +1,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
# We specify the minimum acme and certbot version as the current plugin
diff --git a/certbot-compatibility-test/setup.py b/certbot-compatibility-test/setup.py
index d7e9a871302..35a820751d1 100644
--- a/certbot-compatibility-test/setup.py
+++ b/certbot-compatibility-test/setup.py
@@ -1,7 +1,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'certbot',
diff --git a/certbot-dns-cloudflare/setup.py b/certbot-dns-cloudflare/setup.py
index 04fcf61d348..bf5a36557f5 100644
--- a/certbot-dns-cloudflare/setup.py
+++ b/certbot-dns-cloudflare/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'cloudflare>=1.5.1',
diff --git a/certbot-dns-digitalocean/setup.py b/certbot-dns-digitalocean/setup.py
index 390af42bc1a..fd3483ff572 100644
--- a/certbot-dns-digitalocean/setup.py
+++ b/certbot-dns-digitalocean/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'python-digitalocean>=1.11', # 1.15.0 or newer is recommended for TTL support
diff --git a/certbot-dns-dnsimple/setup.py b/certbot-dns-dnsimple/setup.py
index 5e3eb6bb8c9..106324f8940 100644
--- a/certbot-dns-dnsimple/setup.py
+++ b/certbot-dns-dnsimple/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
# This version of lexicon is required to address the problem described in
diff --git a/certbot-dns-dnsmadeeasy/setup.py b/certbot-dns-dnsmadeeasy/setup.py
index e5a58e65894..3617c46ffa3 100644
--- a/certbot-dns-dnsmadeeasy/setup.py
+++ b/certbot-dns-dnsmadeeasy/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dns-lexicon>=3.14.1',
diff --git a/certbot-dns-gehirn/setup.py b/certbot-dns-gehirn/setup.py
index 5294895b35e..be4cfbc0798 100644
--- a/certbot-dns-gehirn/setup.py
+++ b/certbot-dns-gehirn/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dns-lexicon>=3.14.1',
diff --git a/certbot-dns-google/setup.py b/certbot-dns-google/setup.py
index e328cb629aa..b0f78d12879 100644
--- a/certbot-dns-google/setup.py
+++ b/certbot-dns-google/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'google-api-python-client>=1.6.5',
diff --git a/certbot-dns-linode/setup.py b/certbot-dns-linode/setup.py
index 47c108e8570..337b6b4d853 100644
--- a/certbot-dns-linode/setup.py
+++ b/certbot-dns-linode/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dns-lexicon>=3.14.1',
diff --git a/certbot-dns-luadns/setup.py b/certbot-dns-luadns/setup.py
index 8cce68501c2..5acece1ab1a 100644
--- a/certbot-dns-luadns/setup.py
+++ b/certbot-dns-luadns/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dns-lexicon>=3.14.1',
diff --git a/certbot-dns-nsone/setup.py b/certbot-dns-nsone/setup.py
index b05b7bb6617..2b6a1948f16 100644
--- a/certbot-dns-nsone/setup.py
+++ b/certbot-dns-nsone/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dns-lexicon>=3.14.1',
diff --git a/certbot-dns-ovh/setup.py b/certbot-dns-ovh/setup.py
index 479337e8f79..85be5c5bc9e 100644
--- a/certbot-dns-ovh/setup.py
+++ b/certbot-dns-ovh/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dns-lexicon>=3.15.1',
diff --git a/certbot-dns-rfc2136/setup.py b/certbot-dns-rfc2136/setup.py
index 7a2d66ba6e9..c5374dc0453 100644
--- a/certbot-dns-rfc2136/setup.py
+++ b/certbot-dns-rfc2136/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dnspython>=1.15.0',
diff --git a/certbot-dns-route53/setup.py b/certbot-dns-route53/setup.py
index d17793c9e76..dd1f03a5b0d 100644
--- a/certbot-dns-route53/setup.py
+++ b/certbot-dns-route53/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'boto3>=1.15.15',
diff --git a/certbot-dns-sakuracloud/setup.py b/certbot-dns-sakuracloud/setup.py
index 289de7a5839..455c4745c61 100644
--- a/certbot-dns-sakuracloud/setup.py
+++ b/certbot-dns-sakuracloud/setup.py
@@ -4,7 +4,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
'dns-lexicon>=3.14.1',
diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py
index a0c9680d504..775a361be58 100644
--- a/certbot-nginx/setup.py
+++ b/certbot-nginx/setup.py
@@ -1,7 +1,7 @@
from setuptools import find_packages
from setuptools import setup
-version = '2.7.3'
+version = '2.7.4'
install_requires = [
# We specify the minimum acme and certbot version as the current plugin
diff --git a/certbot/CHANGELOG.md b/certbot/CHANGELOG.md
index 1faadc4c467..9c87266f2c8 100644
--- a/certbot/CHANGELOG.md
+++ b/certbot/CHANGELOG.md
@@ -2,7 +2,7 @@
Certbot adheres to [Semantic Versioning](https://semver.org/).
-## 2.7.4 - master
+## 2.7.4 - 2023-11-01
### Fixed
diff --git a/certbot/certbot/__init__.py b/certbot/certbot/__init__.py
index a84a327fd69..fc427a33d37 100644
--- a/certbot/certbot/__init__.py
+++ b/certbot/certbot/__init__.py
@@ -3,7 +3,7 @@
import warnings
# version number like 1.2.3a0, must have at least 2 parts, like 1.2
-__version__ = '2.7.3'
+__version__ = '2.7.4'
if sys.version_info[:2] == (3, 7):
warnings.warn(
diff --git a/certbot/docs/cli-help.txt b/certbot/docs/cli-help.txt
index f191740b218..b190fb6b880 100644
--- a/certbot/docs/cli-help.txt
+++ b/certbot/docs/cli-help.txt
@@ -122,7 +122,7 @@ optional arguments:
case, and to know when to deprecate support for past
Python versions and flags. If you wish to hide this
information from the Let's Encrypt server, set this to
- "". (default: CertbotACMEClient/2.7.3 (certbot;
+ "". (default: CertbotACMEClient/2.7.4 (certbot;
OS_NAME OS_VERSION) Authenticator/XXX Installer/YYY
(SUBCOMMAND; flags: FLAGS) Py/major.minor.patchlevel).
The flags encoded in the user agent are: --duplicate,
| This PR should not be squashed to preserve the signed and tagged commit. | https://api.github.com/repos/certbot/certbot/pulls/9833 | 2023-11-01T14:53:11Z | 2023-11-01T18:09:43Z | 2023-11-01T18:09:43Z | 2023-11-01T18:09:44Z | 2,643 | certbot/certbot | 3,569 |
Refining contributing.rst | diff --git a/docs/contributing.rst b/docs/contributing.rst
index 5ec44470d36..e83657386a5 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -96,11 +96,32 @@ Integration testing with the boulder CA
Generally it is sufficient to open a pull request and let Github and Travis run
integration tests for you.
-Mac OS X users: Run `./tests/mac-bootstrap.sh` instead of `boulder-start.sh` to
-install dependencies, configure the environment, and start boulder.
+Mac OS X users: Run ``./tests/mac-bootstrap.sh`` instead of
+``boulder-start.sh`` to install dependencies, configure the
+environment, and start boulder.
-Otherwise, install `Go`_ 1.5, libtool-ltdl, mariadb-server and
-rabbitmq-server and then start Boulder_, an ACME CA server::
+Otherwise, install `Go`_ 1.5, ``libtool-ltdl``, ``mariadb-server`` and
+``rabbitmq-server`` and then start Boulder_, an ACME CA server.
+
+If you can't get packages of Go 1.5 for your Linux system,
+you can execute the following commands to install it:
+
+.. code-block:: shell
+
+ wget https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz -P /tmp/
+ sudo tar -C /usr/local -xzf /tmp/go1.5.3.linux-amd64.tar.gz
+ if ! grep -Fxq "export GOROOT=/usr/local/go" ~/.profile ; then echo "export GOROOT=/usr/local/go" >> ~/.profile; fi
+ if ! grep -Fxq "export PATH=\\$GOROOT/bin:\\$PATH" ~/.profile ; then echo "export PATH=\\$GOROOT/bin:\\$PATH" >> ~/.profile; fi
+
+These commands download `Go`_ 1.5.3 to ``/tmp/``, extracts to ``/usr/local``,
+and then adds the export lines required to execute ``boulder-start.sh`` to
+``~/.profile`` if they were not previously added
+
+Make sure you execute the following command after `Go`_ finishes installing::
+
+ if ! grep -Fxq "export GOPATH=\\$HOME/go" ~/.profile ; then echo "export GOPATH=\\$HOME/go" >> ~/.profile; fi
+
+Afterwards, you'd be able to start Boulder_ using the following command::
./tests/boulder-start.sh
| Adding instructions on how to install Go 1.5.3 on Linux to contributing.rst
| https://api.github.com/repos/certbot/certbot/pulls/2232 | 2016-01-19T18:46:57Z | 2016-01-25T18:20:07Z | 2016-01-25T18:20:07Z | 2016-05-06T19:22:22Z | 579 | certbot/certbot | 2,592 |
Update readme | diff --git a/README.md b/README.md
index 8342d052f0..0b204a48f3 100644
--- a/README.md
+++ b/README.md
@@ -80,11 +80,16 @@ python3 -m fastchat.serve.cli --model-name /path/to/vicuna/weights --num-gpus 2
```
#### CPU Only
-This runs on CPU only. It requires around 60GB of CPU memory for Vicuna-13B.
+This runs on the CPU only and does not require GPU. It requires around 60GB of CPU memory for Vicuna-13B.
```
python3 -m fastchat.serve.cli --model-name /path/to/vicuna/weights --device cpu
```
+#### Others (Quantization, More Platforms)
+Currently, we only provide some basic commands for running the model.
+We are actively exploring methods to make the model easier to run on more platforms.
+Contributions and pull requests are welcome.
+
### Web UI
#### Launch a controller
@@ -99,7 +104,7 @@ python3 -m fastchat.serve.model_worker --model-path /path/to/vicuna/weights
#### Send a test message
```bash
-python3 -m fastchat.serve.test_message
+python3 -m fastchat.serve.test_message --model-name vicuna-13b
```
#### Launch a gradio web server.
| https://api.github.com/repos/lm-sys/FastChat/pulls/148 | 2023-04-03T18:12:11Z | 2023-04-03T18:12:17Z | 2023-04-03T18:12:17Z | 2023-04-03T18:12:22Z | 309 | lm-sys/FastChat | 41,720 |
|
spelling, whitespace, capitalization consistency | diff --git a/README.md b/README.md
index c4612406..e8bda13c 100644
--- a/README.md
+++ b/README.md
@@ -134,7 +134,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
#### General-Purpose Machine Learning
* [Darknet](https://github.com/pjreddie/darknet) - Darknet is an open source neural network framework written in C and CUDA. It is fast, easy to install, and supports CPU and GPU computation.
* [Recommender](https://github.com/GHamrouni/Recommender) - A C library for product recommendations/suggestions using collaborative filtering (CF).
-* [Hybrid Recommender System](https://github.com/SeniorSA/hybrid-rs-trainner) - A hybrid recomender system based upon scikit-learn algorithms.
+* [Hybrid Recommender System](https://github.com/SeniorSA/hybrid-rs-trainner) - A hybrid recommender system based upon scikit-learn algorithms.
<a name="c-cv"></a>
#### Computer Vision
@@ -165,7 +165,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
* [CUDA](https://code.google.com/p/cuda-convnet/) - This is a fast C++/CUDA implementation of convolutional [DEEP LEARNING]
* [CXXNET](https://github.com/antinucleon/cxxnet) - Yet another deep learning framework with less than 1000 lines core code [DEEP LEARNING]
* [DeepDetect](https://github.com/beniz/deepdetect) - A machine learning API and server written in C++11. It makes state of the art machine learning easy to work with and integrate into existing applications.
-* [Disrtibuted Machine learning Tool Kit (DMTK)](http://www.dmtk.io/) - A distributed machine learning (parameter server) framework by Microsoft. Enables training models on large data sets across multiple machines. Current tools bundled with it include: LightLDA and Distributed (Multisense) Word Embedding.
+* [Distributed Machine learning Tool Kit (DMTK)](http://www.dmtk.io/) - A distributed machine learning (parameter server) framework by Microsoft. Enables training models on large data sets across multiple machines. Current tools bundled with it include: LightLDA and Distributed (Multisense) Word Embedding.
* [DLib](http://dlib.net/ml.html) - A suite of ML tools designed to be easy to imbed in other applications
* [DSSTNE](https://github.com/amznlabs/amazon-dsstne) - A software library created by Amazon for training and deploying deep neural networks using GPUs which emphasizes speed and scale over experimental flexibility.
* [DyNet](https://github.com/clab/dynet) - A dynamic neural network library working well with networks that have dynamic structures that change for every training instance. Written in C++ with bindings in Python.
@@ -234,7 +234,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
* [Touchstone](https://github.com/ptaoussanis/touchstone) - Clojure A/B testing library
* [Clojush](https://github.com/lspector/Clojush) - The Push programming language and the PushGP genetic programming system implemented in Clojure
-* [Infer](https://github.com/aria42/infer) - Inference and machine learning in clojure
+* [Infer](https://github.com/aria42/infer) - Inference and machine learning in Clojure
* [Clj-ML](https://github.com/antoniogarrote/clj-ml) - A machine learning library for Clojure built on top of Weka and friends
* [DL4CLJ](https://github.com/engagor/dl4clj/) - Clojure wrapper for Deeplearning4j
* [Encog](https://github.com/jimpil/enclog) - Clojure wrapper for Encog (v3) (Machine-Learning framework that specializes in neural-nets)
@@ -293,8 +293,8 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
* [go-pr](https://github.com/daviddengcn/go-pr) - Pattern recognition package in Go lang.
* [go-ml](https://github.com/alonsovidales/go_ml) - Linear / Logistic regression, Neural Networks, Collaborative Filtering and Gaussian Multivariate Distribution
* [bayesian](https://github.com/jbrukh/bayesian) - Naive Bayesian Classification for Golang.
-* [go-galib](https://github.com/thoj/go-galib) - Genetic Algorithms library written in Go / golang
-* [Cloudforest](https://github.com/ryanbressler/CloudForest) - Ensembles of decision trees in go/golang.
+* [go-galib](https://github.com/thoj/go-galib) - Genetic Algorithms library written in Go / Golang
+* [Cloudforest](https://github.com/ryanbressler/CloudForest) - Ensembles of decision trees in go/Golang.
* [gobrain](https://github.com/goml/gobrain) - Neural Networks written in go
* [GoNN](https://github.com/fxsjy/gonn) - GoNN is an implementation of Neural Network in Go Language, which includes BPNN, RBF, PCN
* [MXNet](https://github.com/dmlc/mxnet) - Lightweight, Portable, Flexible Distributed/Mobile Deep Learning with Dynamic, Mutation-aware Dataflow Dep Scheduler; for Python, R, Julia, Go, Javascript and more.
@@ -304,7 +304,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
<a name="go-data-analysis"></a>
#### Data Analysis / Data Visualization
-* [go-graph](https://github.com/StepLg/go-graph) - Graph library for Go/golang language.
+* [go-graph](https://github.com/StepLg/go-graph) - Graph library for Go/Golang language.
* [SVGo](http://www.svgopen.org/2011/papers/34-SVGo_a_Go_Library_for_SVG_generation/) - The Go Language library for SVG generation
* [RF](https://github.com/fxsjy/RF.go) - Random forests implementation in Go
@@ -359,7 +359,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
* [FlinkML in Apache Flink](https://ci.apache.org/projects/flink/flink-docs-master/apis/batch/libs/ml/index.html) - Distributed machine learning library in Flink
* [H2O](https://github.com/h2oai/h2o-3) - ML engine that supports distributed learning on Hadoop, Spark or your laptop via APIs in R, Python, Scala, REST/JSON.
* [htm.java](https://github.com/numenta/htm.java) - General Machine Learning library using Numentaโs Cortical Learning Algorithm
-* [java-deeplearning](https://github.com/deeplearning4j/deeplearning4j) - Distributed Deep Learning Platform for Java, Clojure,Scala
+* [java-deeplearning](https://github.com/deeplearning4j/deeplearning4j) - Distributed Deep Learning Platform for Java, Clojure, Scala
* [Mahout](https://github.com/apache/mahout) - Distributed machine learning
* [Meka](http://meka.sourceforge.net/) - An open source implementation of methods for multi-label classification and evaluation (extension to Weka).
* [MLlib in Apache Spark](http://spark.apache.org/docs/latest/mllib-guide.html) - Distributed machine learning library in Spark
@@ -447,19 +447,19 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
* [Gaussian Mixture Model](https://github.com/lukapopijac/gaussian-mixture-model) - Unsupervised machine learning with multivariate Gaussian mixture model
* [Node-fann](https://github.com/rlidwka/node-fann) - FANN (Fast Artificial Neural Network Library) bindings for Node.js
* [Kmeans.js](https://github.com/emilbayes/kMeans.js) - Simple Javascript implementation of the k-means algorithm, for node.js and the browser
-* [LDA.js](https://github.com/primaryobjects/lda) - LDA topic modeling for node.js
+* [LDA.js](https://github.com/primaryobjects/lda) - LDA topic modeling for Node.js
* [Learning.js](https://github.com/yandongliu/learningjs) - Javascript implementation of logistic regression/c4.5 decision tree
* [Machine Learning](http://joonku.com/project/machine_learning) - Machine learning library for Node.js
* [machineJS](https://github.com/ClimbsRocks/machineJS) - Automated machine learning, data formatting, ensembling, and hyperparameter optimization for competitions and exploration- just give it a .csv file!
* [mil-tokyo](https://github.com/mil-tokyo) - List of several machine learning libraries
-* [Node-SVM](https://github.com/nicolaspanel/node-svm) - Support Vector Machine for nodejs
+* [Node-SVM](https://github.com/nicolaspanel/node-svm) - Support Vector Machine for Node.js
* [Brain](https://github.com/harthur/brain) - Neural networks in JavaScript **[Deprecated]**
* [Bayesian-Bandit](https://github.com/omphalos/bayesian-bandit.js) - Bayesian bandit implementation for Node and the browser.
-* [Synaptic](https://github.com/cazala/synaptic) - Architecture-free neural network library for node.js and the browser
+* [Synaptic](https://github.com/cazala/synaptic) - Architecture-free neural network library for Node.js and the browser
* [kNear](https://github.com/NathanEpstein/kNear) - JavaScript implementation of the k nearest neighbors algorithm for supervised learning
* [NeuralN](https://github.com/totemstech/neuraln) - C++ Neural Network library for Node.js. It has advantage on large dataset and multi-threaded training.
* [kalman](https://github.com/itamarwe/kalman) - Kalman filter for Javascript.
-* [shaman](https://github.com/luccastera/shaman) - node.js library with support for both simple and multiple linear regression.
+* [shaman](https://github.com/luccastera/shaman) - Node.js library with support for both simple and multiple linear regression.
* [ml.js](https://github.com/mljs/ml) - Machine learning and numerical analysis tools for Node.js and the Browser!
* [Pavlov.js](https://github.com/NathanEpstein/Pavlov.js) - Reinforcement learning using Markov Decision Processes
* [MXNet](https://github.com/dmlc/mxnet) - Lightweight, Portable, Flexible Distributed/Mobile Deep Learning with Dynamic, Mutation-aware Dataflow Dep Scheduler; for Python, R, Julia, Go, Javascript and more.
@@ -468,7 +468,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
#### Misc
* [sylvester](https://github.com/jcoglan/sylvester) - Vector and Matrix math for JavaScript.
-* [simple-statistics](https://github.com/simple-statistics/simple-statistics) - A JavaScript implementation of descriptive, regression, and inference statistics. Implemented in literate JavaScript with no dependencies, designed to work in all modern browsers (including IE) as well as in node.js.
+* [simple-statistics](https://github.com/simple-statistics/simple-statistics) - A JavaScript implementation of descriptive, regression, and inference statistics. Implemented in literate JavaScript with no dependencies, designed to work in all modern browsers (including IE) as well as in Node.js.
* [regression-js](https://github.com/Tom-Alexander/regression-js) - A javascript library containing a collection of least squares fitting methods for finding a trend in a set of data.
* [Lyric](https://github.com/flurry/Lyric) - Linear Regression library.
* [GreatCircle](https://github.com/mwgg/GreatCircle) - Library for calculating great circle distance.
@@ -498,7 +498,7 @@ For a list of free-to-attend meetups and local events, go [here](https://github.
* [GLMNet](https://github.com/simonster/GLMNet.jl) - Julia wrapper for fitting Lasso/ElasticNet GLM models using glmnet
* [Clustering](https://github.com/JuliaStats/Clustering.jl) - Basic functions for clustering data: k-means, dp-means, etc.
* [SVM](https://github.com/JuliaStats/SVM.jl) - SVM's for Julia
-* [Kernal Density](https://github.com/JuliaStats/KernelDensity.jl) - Kernel density estimators for julia
+* [Kernel Density](https://github.com/JuliaStats/KernelDensity.jl) - Kernel density estimators for julia
* [Dimensionality Reduction](https://github.com/JuliaStats/DimensionalityReduction.jl) - Methods for dimensionality reduction
* [NMF](https://github.com/JuliaStats/NMF.jl) - A Julia package for non-negative matrix factorization
* [ANN](https://github.com/EricChiang/ANN.jl) - Julia artificial neural networks
@@ -645,7 +645,7 @@ on MNIST digits[DEEP LEARNING]
* [Spider](http://people.kyb.tuebingen.mpg.de/spider/) - The spider is intended to be a complete object orientated environment for machine learning in Matlab.
* [LibSVM](http://www.csie.ntu.edu.tw/~cjlin/libsvm/#matlab) - A Library for Support Vector Machines
* [LibLinear](http://www.csie.ntu.edu.tw/~cjlin/liblinear/#download) - A Library for Large Linear Classification
-* [Machine Learning Module](https://github.com/josephmisiti/machine-learning-module) - Class on machine w/ PDF,lectures,code
+* [Machine Learning Module](https://github.com/josephmisiti/machine-learning-module) - Class on machine w/ PDF, lectures, code
* [Caffe](http://caffe.berkeleyvision.org) - A deep learning framework developed with cleanliness, readability, and speed in mind.
* [Pattern Recognition Toolbox](https://github.com/covartech/PRT) - A complete object-oriented environment for machine learning in Matlab.
* [Pattern Recognition and Machine Learning](https://github.com/PRML/PRMLT) - This package contains the matlab implementation of the algorithms described in the book Pattern Recognition and Machine Learning by C. Bishop.
@@ -847,7 +847,7 @@ be
* [topik](https://github.com/ContinuumIO/topik) - Topic modelling toolkit
* [PyBrain](https://github.com/pybrain/pybrain) - Another Python Machine Learning Library.
* [Brainstorm](https://github.com/IDSIA/brainstorm) - Fast, flexible and fun neural networks. This is the successor of PyBrain.
-* [Crab](https://github.com/muricoca/crab) - A ๏ฌexible, fast recommender engine.
+* [Crab](https://github.com/muricoca/crab) - A flexible, fast recommender engine.
* [python-recsys](https://github.com/ocelma/python-recsys) - A Python library for implementing a Recommender System.
* [thinking bayes](https://github.com/AllenDowney/ThinkBayes) - Book on Bayesian Analysis
* [Image-to-Image Translation with Conditional Adversarial Networks](https://github.com/williamFalcon/pix2pix-keras) - Implementation of image to image (pix2pix) translation from the paper by [isola et al](https://arxiv.org/pdf/1611.07004.pdf).[DEEP LEARNING]
| https://api.github.com/repos/josephmisiti/awesome-machine-learning/pulls/401 | 2017-07-14T13:45:23Z | 2017-07-16T15:37:18Z | 2017-07-16T15:37:18Z | 2017-07-16T15:37:22Z | 3,576 | josephmisiti/awesome-machine-learning | 52,150 |
|
feat(grouping): Additional message parameterizations | diff --git a/src/sentry/grouping/strategies/message.py b/src/sentry/grouping/strategies/message.py
index 5692027d75c347..d3a17b939181db 100644
--- a/src/sentry/grouping/strategies/message.py
+++ b/src/sentry/grouping/strategies/message.py
@@ -15,7 +15,7 @@
from sentry.utils import metrics
_parameterization_regex = re.compile(
- # The `(?x)` tells the regex compiler to ingore comments and unescaped whitespace,
+ # The `(?x)` tells the regex compiler to ignore comments and unescaped whitespace,
# so we can use newlines and indentation for better legibility.
r"""(?x)
(?P<email>
@@ -64,6 +64,32 @@
\b[0-9a-fA-F]{32}\b
) |
(?P<date>
+ # No word boundaries required around dates. Should there be?
+ # RFC822, RFC1123, RFC1123Z
+ ((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat),\s\d{1,2}\s(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{2,4}\s\d{1,2}:\d{1,2}(:\d{1,2})?\s([-\+][\d]{2}[0-5][\d]|(?:UT|GMT|(?:E|C|M|P)(?:ST|DT)|[A-IK-Z])))
+ |
+ # RFC850
+ ((?:Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday),\s\d{2}-(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)-\d{2}\s\d{2}:\d{2}:\d{2}\s(?:UT|GMT|(?:E|C|M|P)(?:ST|DT)|[A-IK-Z]))
+ |
+ # RFC3339, RFC3339Nano
+ (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?Z?([+-]?\d{2}:\d{2})?)
+ |
+ # LongDate
+ ((?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+[0-3]\d,\s+\d{4})
+ |
+ # Datetime
+ (\d{4}-[01]\d-[0-3]\d\s[0-2]\d:[0-5]\d:[0-5]\d)
+ |
+ # Kitchen
+ (\d{1,2}:\d{2}(:\d{2})?(?: [aApP][Mm])?)
+ |
+ # Date
+ (\d{4}-[01]\d-[0-3]\d)
+ |
+ # Time
+ ([0-2]\d:[0-5]\d:[0-5]\d)
+ |
+ # Old Date Formats, TODO: possibly safe to remove?
(
(\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z))|
(\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d([+-][0-2]\d:[0-5]\d|Z))|
@@ -87,6 +113,9 @@
) |
(datetime.datetime\(.*?\))
) |
+ (?P<duration>
+ \b\d+ms\b
+ ) |
(?P<hex>
\b0[xX][0-9a-fA-F]+\b
) |
@@ -142,7 +171,12 @@ def _handle_match(match: Match[str]) -> str:
# For `quoted_str` and `bool` we want to preserve the `=` symbol, which we include in
# the match in order not to replace random quoted strings and the words 'true' and 'false'
# in contexts other than key-value pairs
- return f"=<{key}>" if key in ["quoted_str", "bool"] else f"<{key}>"
+ if key in ["quoted_str", "bool"]:
+ return f"=<{key}>"
+ elif key == "json_str_val":
+ return f": <{key}>"
+ else:
+ return f"<{key}>"
return ""
return _parameterization_regex.sub(_handle_match, trimmed)
diff --git a/tests/sentry/grouping/test_normalize_message.py b/tests/sentry/grouping/test_normalize_message.py
new file mode 100644
index 00000000000000..567ad1c9f91aa3
--- /dev/null
+++ b/tests/sentry/grouping/test_normalize_message.py
@@ -0,0 +1,94 @@
+import pytest
+
+from sentry.grouping.strategies.message import normalize_message_for_grouping
+
+
+@pytest.mark.parametrize(
+ ("name", "input", "expected"),
+ [
+ ("email", """blah test@email.com had a problem""", """blah <email> had a problem"""),
+ ("url", """blah http://some.email.com had a problem""", """blah <url> had a problem"""),
+ (
+ "url - existing behavior",
+ """blah tcp://user:pass@email.com:10 had a problem""",
+ """blah tcp://user:<email>:<int> had a problem""",
+ ),
+ ("ip", """blah 0.0.0.0 had a problem""", """blah <ip> had a problem"""),
+ (
+ "UUID",
+ """blah 7c1811ed-e98f-4c9c-a9f9-58c757ff494f had a problem""",
+ """blah <uuid> had a problem""",
+ ),
+ (
+ "SHA1",
+ """blah 5fc35719b9cf96ec602dbc748ff31c587a46961d had a problem""",
+ """blah <sha1> had a problem""",
+ ),
+ (
+ "MD5",
+ """blah 0751007cd28df267e8e051b51f918c60 had a problem""",
+ """blah <md5> had a problem""",
+ ),
+ (
+ "Date",
+ """blah 2024-02-20T22:16:36 had a problem""",
+ """blah <date> had a problem""",
+ ),
+ (
+ "Date RFC822",
+ """blah Mon, 02 Jan 06 15:04 MST had a problem""",
+ """blah <date> had a problem""",
+ ),
+ (
+ "Date RFC822Z",
+ """blah Mon, 02 Jan 06 15:04 -0700 had a problem""",
+ """blah <date> had a problem""",
+ ),
+ (
+ "Date RFC850",
+ """blah Monday, 02-Jan-06 15:04:05 MST had a problem""",
+ """blah <date> had a problem""",
+ ),
+ (
+ "Date RFC1123",
+ """blah Mon, 02 Jan 2006 15:04:05 MST had a problem""",
+ """blah <date> had a problem""",
+ ),
+ (
+ "Date RFC1123Z",
+ """blah Mon, 02 Jan 2006 15:04:05 -0700 had a problem""",
+ """blah <date> had a problem""",
+ ),
+ (
+ "Date RFC3339",
+ """blah 2006-01-02T15:04:05Z07:00 had a problem""",
+ """blah <date> had a problem""",
+ ),
+ (
+ "Date RFC3339Nano",
+ """blah 2006-01-02T15:04:05.999999999Z07:00 had a problem""",
+ """blah <date> had a problem""",
+ ),
+ ("Date plain", """blah 2006-01-02 had a problem""", """blah <date> had a problem"""),
+ ("Date - long", """blah Jan 18, 2019 had a problem""", """blah <date> had a problem"""),
+ (
+ "Date - Datetime",
+ """blah 2006-01-02 15:04:05 had a problem""",
+ """blah <date> had a problem""",
+ ),
+ ("Date - Kitchen", """blah 3:04PM had a problem""", """blah <date> had a problem"""),
+ ("Date - Time", """blah 15:04:05 had a problem""", """blah <date> had a problem"""),
+ ("hex", """blah 0x9af8c3b had a problem""", """blah <hex> had a problem"""),
+ ("float", """blah 0.23 had a problem""", """blah <float> had a problem"""),
+ ("int", """blah 23 had a problem""", """blah <int> had a problem"""),
+ ("quoted str", """blah b="1" had a problem""", """blah b=<quoted_str> had a problem"""),
+ ("bool", """blah a=true had a problem""", """blah a=<bool> had a problem"""),
+ (
+ "Duration - ms",
+ """blah connection failed after 12345ms""",
+ """blah connection failed after <duration>""",
+ ),
+ ],
+)
+def test_normalize_message(name, input, expected):
+ assert expected == normalize_message_for_grouping(input), f"Case {name} Failed"
| Add additional parameterization regular expressions.
This proposes small discrete improvements to parameterization. This PR is the uncontroversial ones. Controversial ones now live in: https://github.com/getsentry/sentry/pull/65679
* Support more date formats. Notably RFC822, RFC1123, RFC1123Z, RFC850, RFC3339, RFC3339Nano, and your run of the mill โKitchenโ 3:49pm were missing
It also adds non-exhaustive tests for `test_normalize_message` | https://api.github.com/repos/getsentry/sentry/pulls/65574 | 2024-02-21T20:59:18Z | 2024-02-26T17:19:55Z | 2024-02-26T17:19:55Z | 2024-03-13T00:40:51Z | 2,245 | getsentry/sentry | 44,423 |
Bump pypa/cibuildwheel from 2.16.2 to 2.16.4 | diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml
index 8e3eb67a10d..52525419f0a 100644
--- a/.github/workflows/pypi_upload.yml
+++ b/.github/workflows/pypi_upload.yml
@@ -89,7 +89,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- - uses: pypa/cibuildwheel@v2.16.2
+ - uses: pypa/cibuildwheel@v2.16.4
with:
only: ${{ matrix.only }}
| Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.16.2 to 2.16.4.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/releases">pypa/cibuildwheel's releases</a>.</em></p>
<blockquote>
<h2>v2.16.4</h2>
<p>๐ Update manylinux pins to upgrade from a problematic PyPy version. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1737">#1737</a>)</p>
<h2>v2.16.3</h2>
<ul>
<li>๐ Fix a bug when building from sdist, where relative paths to files in the package didn't work because the working directory was wrong (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1687">#1687</a>)</li>
<li>๐ Adds the ability to disable mounting the host filesystem in containers to <code>/host</code>, through the <code>disable_host_mount</code> suboption on <a href="https://cibuildwheel.readthedocs.io/en/stable/options/#container-engine"><code>CIBW_CONTAINER_ENGINE</code></a>.</li>
<li>๐ A lot of docs improvements! (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1708">#1708</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1705">#1705</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1686">#1686</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1679">#1679</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1667">#1667</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1665">#1665</a>)</li>
</ul>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a href="https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md">pypa/cibuildwheel's changelog</a>.</em></p>
<blockquote>
<h3>v2.16.4</h3>
<p><em>28 January 2024</em></p>
<ul>
<li>๐ Update manylinux pins to upgrade from a problematic PyPy version. (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1737">#1737</a>)</li>
</ul>
<h3>v2.16.3</h3>
<p><em>26 January 2024</em></p>
<ul>
<li>๐ Fix a bug when building from sdist, where relative paths to files in the package didn't work because the working directory was wrong (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1687">#1687</a>)</li>
<li>๐ Adds the ability to disable mounting the host filesystem in containers to <code>/host</code>, through the <code>disable_host_mount</code> suboption on <a href="https://cibuildwheel.readthedocs.io/en/stable/options/#container-engine"><code>CIBW_CONTAINER_ENGINE</code></a>.</li>
<li>๐ A lot of docs improvements! (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1708">#1708</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1705">#1705</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1686">#1686</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1679">#1679</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1667">#1667</a>, <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1665">#1665</a>)</li>
</ul>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a href="https://github.com/pypa/cibuildwheel/commit/0b04ab1040366101259658b355777e4ff2d16f83"><code>0b04ab1</code></a> Bump version: v2.16.4</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/34b049f1389fd9cfef5de1d1781ec67759770eea"><code>34b049f</code></a> [Bot] Update dependencies (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1737">#1737</a>)</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/4a79413fe6d6fd88b4317bcf0e80252292fd34f5"><code>4a79413</code></a> Remove the Cirrus CI badge from readme</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/e250df5d5da8c45226a8de1a80e6bfbbf46f5e4b"><code>e250df5</code></a> Bump version: v2.16.3</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/fd0aae31e315073be9015f63972bd30f6f0fb80c"><code>fd0aae3</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1686">#1686</a> from doronz88/refactor/doc-linux-clarification</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/2a83588552e256468770b630f6d7cc4e798a5d00"><code>2a83588</code></a> Apply suggestions from code review</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/a1e3efb81c738c54142149bf8e04a7818645e279"><code>a1e3efb</code></a> fix: correct path when building sdist package (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1687">#1687</a>)</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/c0e4b16ab9dde5dab19f7b6cf2d21e13f58058f0"><code>c0e4b16</code></a> [Bot] Update dependencies (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1728">#1728</a>)</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/40b63b834dd311eb4712ef784a5dfc63f8305d4f"><code>40b63b8</code></a> [pre-commit.ci] pre-commit autoupdate (<a href="https://redirect.github.com/pypa/cibuildwheel/issues/1729">#1729</a>)</li>
<li><a href="https://github.com/pypa/cibuildwheel/commit/23a6e88b0cc95cf581a8304293950a60939ce002"><code>23a6e88</code></a> Merge pull request <a href="https://redirect.github.com/pypa/cibuildwheel/issues/1672">#1672</a> from pypa/disable-host-mount</li>
<li>Additional commits viewable in <a href="https://github.com/pypa/cibuildwheel/compare/v2.16.2...v2.16.4">compare view</a></li>
</ul>
</details>
<br />
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pypa/cibuildwheel&package-manager=github_actions&previous-version=2.16.2&new-version=2.16.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
<details>
<summary>Dependabot commands and options</summary>
<br />
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
</details> | https://api.github.com/repos/psf/black/pulls/4191 | 2024-01-29T06:46:03Z | 2024-01-29T07:07:34Z | 2024-01-29T07:07:34Z | 2024-01-29T07:07:35Z | 145 | psf/black | 24,082 |
[shardformer] integrate with dist layer | diff --git a/colossalai/shardformer/policies/bert.py b/colossalai/shardformer/policies/bert.py
index 568f0e854125..10f11929de82 100644
--- a/colossalai/shardformer/policies/bert.py
+++ b/colossalai/shardformer/policies/bert.py
@@ -8,12 +8,6 @@
from .basepolicy import ModulePolicyDescription, Policy, SubModuleReplacementDescription
-class ParallelModule():
-
- def __init__(self):
- pass
-
-
class BertPolicy(Policy):
def preprocess(self, shard_config: ShardConfig = None):
@@ -49,7 +43,27 @@ def module_policy(self, shard_config: ShardConfig = None):
sub_module_replacement=[
SubModuleReplacementDescription(
suffix="attention.self.query",
- target_module=ParallelModule,
+ target_module=col_nn.Linear1D_Col,
+ ),
+ SubModuleReplacementDescription(
+ suffix="attention.self.key",
+ target_module=col_nn.Linear1D_Col,
+ ),
+ SubModuleReplacementDescription(
+ suffix="attention.self.value",
+ target_module=col_nn.Linear1D_Col,
+ ),
+ SubModuleReplacementDescription(
+ suffix="attention.output.dense",
+ target_module=col_nn.Linear1D_Row,
+ ),
+ SubModuleReplacementDescription(
+ suffix="intermediate.dense",
+ target_module=col_nn.Linear1D_Col,
+ ),
+ SubModuleReplacementDescription(
+ suffix="output.dense",
+ target_module=col_nn.Linear1D_Row,
),
])
}
diff --git a/colossalai/shardformer/shard/sharder.py b/colossalai/shardformer/shard/sharder.py
index 8eee3c6a3b7e..eb8300d5998e 100644
--- a/colossalai/shardformer/shard/sharder.py
+++ b/colossalai/shardformer/shard/sharder.py
@@ -7,8 +7,8 @@
from colossalai.cluster.process_group_manager import ProcessGroupManager
from ..policies.autopolicy import get_autopolicy
-from ..policies.basepolicy import Policy
-from ..utils.utils import setattr_
+from ..policies.basepolicy import Policy, SubModuleReplacementDescription
+from ..utils.utils import getattr_, setattr_
from .shard_config import ShardConfig
__all__ = ['ModelSharder', 'shard_model']
@@ -90,9 +90,7 @@ def replace_module(self,) -> None:
Args:
model (:class:`torch.nn.Module`): The model to shard
"""
- print(self.policy)
module_descriptions = self.policy.module_policy(self.shard_config)
- print(f"*******{module_descriptions}")
for module_description in module_descriptions.items():
origin_layer_cls = module_description[0]
attr_replacement = module_description[1].attribute_replacement
@@ -160,7 +158,7 @@ def _replace_param(
def _replace_sub_module(
self,
org_layer: nn.Module,
- sub_module_replacement: List[Callable],
+ sub_module_replacement: List[SubModuleReplacementDescription],
) -> None:
r"""
Shard one layer according to the policy, the layer should be the same class as the key in policy's argument_policy return dict
@@ -177,7 +175,8 @@ def _replace_sub_module(
assert target_module is not None, 'target_module should not be None'
- # TODO: integrate with new layer
- # replace_layer = target_module.from_native_layer(org_layer, self.pg_manager)
- replace_layer = None
+ # TODO: support different parallel mode
+ native_sub_module = getattr_(org_layer, suffix)
+ replace_layer = target_module.from_native_module(native_sub_module, self.pg_manager.pg_store['tp1d'])
+
setattr_(org_layer, suffix, replace_layer)
diff --git a/tests/test_shardformer/test_model/test_shard_bert.py b/tests/test_shardformer/test_model/test_shard_bert.py
index 55b78d040505..2ae68bf93f21 100644
--- a/tests/test_shardformer/test_model/test_shard_bert.py
+++ b/tests/test_shardformer/test_model/test_shard_bert.py
@@ -1,5 +1,5 @@
+import copy
import os
-import random
import pytest
import torch
@@ -7,7 +7,7 @@
import colossalai
from colossalai.logging import disable_existing_loggers
-from colossalai.shardformer.shard import ShardConfig, shard_model
+from colossalai.shardformer import ShardConfig, ShardFormer
from colossalai.testing import rerun_if_address_is_in_use, spawn
os.environ['TRANSFORMERS_NO_ADVISORY_WARNINGS'] = 'true'
@@ -20,15 +20,21 @@ def build_model(rank, world_size):
config.hidden_dropout_prob = 0
config.attention_probs_dropout_prob = 0
- org_model = BertForMaskedLM.from_pretrained('bert-base-uncased', config=config).to('cuda')
-
- shardconfig = ShardConfig(
- rank=rank,
- world_size=world_size,
- gather_output=True,
- )
- sharded_model = shard_model(BertForMaskedLM.from_pretrained('bert-base-uncased', config=config),
- shardconfig).to('cuda')
+ org_model = BertForMaskedLM.from_pretrained('bert-base-uncased', config=config)
+ org_model_forshard = copy.deepcopy(org_model)
+
+ org_model.to('cuda')
+ # TODO: no need to transfer to cuda
+ org_model_forshard.to('cuda')
+ shard_config = ShardConfig(tensor_parallel_size=2,
+ data_parallel_size=1,
+ pipeline_parallel_size=1,
+ tensor_parallel_mode='1d',
+ inference_only=True,
+ gather_output=True)
+ shard_former = ShardFormer(shard_config=shard_config)
+ shard_former.init_distributed()
+ sharded_model = shard_former.shard_model(org_model_forshard).to('cuda')
return org_model, sharded_model
| ## ๐ Checklist before creating the PR
- [x] I have created an issue for this PR for traceability
- [x] The title follows the standard format: `[doc/gemini/tensor/...]: A concise description`
- [x] I have added relevant tags if possible for us to better distinguish different PRs
## ๐จ Issue number
fixed #4010
## ๐ What does this PR do?
integrate shardformer with dist layer on bert
## ๐ฅ Checklist before requesting a review
- [x] I have linked my PR to an issue ([instruction](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue))
- [x] My issue clearly describes the problem/feature/proposal, with diagrams/charts/table/code if possible
- [x] I have performed a self-review of my code
- [x] I have added thorough tests.
- [x] I have added docstrings for all the functions/methods I implemented
## โญ๏ธ Do you enjoy contributing to Colossal-AI?
- [x] ๐ Yes, I do.
- [ ] ๐ No, I don't.
Tell us more if you don't enjoy contributing to Colossal-AI.
| https://api.github.com/repos/hpcaitech/ColossalAI/pulls/4011 | 2023-06-16T03:12:41Z | 2023-06-16T03:23:30Z | 2023-06-16T03:23:30Z | 2023-06-16T03:24:21Z | 1,382 | hpcaitech/ColossalAI | 11,568 |
Cover our deprecation policy in the documentation | diff --git a/docs/contributing.rst b/docs/contributing.rst
index 7b901dd0018..525ad3497c2 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -108,6 +108,11 @@ Well-written patches should:
tox -e docs-coverage
+* if you are removing deprecated code, first make sure that at least 1 year
+ (12 months) has passed since the release that introduced the deprecation.
+ See :ref:`deprecation-policy`.
+
+
.. _submitting-patches:
Submitting patches
diff --git a/docs/versioning.rst b/docs/versioning.rst
index 227085f024e..57643ea9a7a 100644
--- a/docs/versioning.rst
+++ b/docs/versioning.rst
@@ -1,7 +1,7 @@
.. _versioning:
============================
-Versioning and API Stability
+Versioning and API stability
============================
Versioning
@@ -34,7 +34,7 @@ For example:
production)
-API Stability
+API stability
=============
API stability was one of the major goals for the *1.0* release.
@@ -47,5 +47,23 @@ new methods or functionality but the existing methods should keep working the
same way.
+.. _deprecation-policy:
+
+Deprecation policy
+==================
+
+We aim to maintain support for deprecated Scrapy features for at least 1 year.
+
+For example, if a feature is deprecated in a Scrapy version released on
+June 15th 2020, that feature should continue to work in versions released on
+June 14th 2021 or before that.
+
+Any new Scrapy release after a year *may* remove support for that deprecated
+feature.
+
+All deprecated features removed in a Scrapy release are explicitly mentioned in
+the :ref:`release notes <news>`.
+
+
.. _odd-numbered versions for development releases: https://en.wikipedia.org/wiki/Software_versioning#Odd-numbered_versions_for_development_releases
| https://api.github.com/repos/scrapy/scrapy/pulls/4705 | 2020-07-30T12:03:40Z | 2020-08-13T19:27:38Z | 2020-08-13T19:27:38Z | 2020-08-13T19:27:38Z | 467 | scrapy/scrapy | 34,273 |
|
Make all doc URLs point to docs.streamlit.io (new domain) | diff --git a/Makefile b/Makefile
index b218c8f2ef0a..1a899a1fb4f1 100644
--- a/Makefile
+++ b/Makefile
@@ -181,30 +181,21 @@ devel-docs: docs
publish-docs: docs
cd docs/_build; \
aws s3 sync \
- --acl public-read html s3://streamlit.io/docs/ \
- --profile streamlit
-
- # For now, continue publishing to secret/docs.
- # TODO: Remove after 2020-01-01
- cd docs/_build; \
- aws s3 sync \
- --acl public-read html s3://streamlit.io/secret/docs/ \
+ --acl public-read html s3://docs.streamlit.io \
--profile streamlit
# The line below uses the distribution ID obtained with
# $ aws cloudfront list-distributions | \
# jq '.DistributionList.Items[] | \
# select(.Aliases.Items[0] | \
- # contains("www.streamlit.io")) | \
+ # contains("docs.streamlit.io")) | \
# .Id'
aws cloudfront create-invalidation \
- --distribution-id=E5G9JPT7IOJDV \
+ --distribution-id=E16K3UXOWYZ8U7 \
--paths \
- '/docs/*' \
- '/docs/tutorial/*' \
- '/secret/docs/*' \
- '/secret/docs/tutorial/*' \
+ '/*' \
+ '/tutorial/*' \
--profile streamlit
.PHONY: protobuf
diff --git a/README.md b/README.md
index d7aa75be6a2b..f2793b8eacb0 100644
--- a/README.md
+++ b/README.md
@@ -45,7 +45,7 @@ streamlit run https://raw.githubusercontent.com/streamlit/demo-self-driving/mast
- Our [launch post](https://towardsdatascience.com/coding-ml-tools-like-you-code-ml-models-ddba3357eace)
- Our lovely [community](https://discuss.streamlit.io/)
-- Streamlit [documentation](https://streamlit.io/docs)
+- Streamlit [documentation](https://docs.streamlit.io/)
- More [demo projects](https://github.com/streamlit/)
- If you would like to contribute, see [instructions here](https://github.com/streamlit/streamlit/wiki/Contributing)
diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
deleted file mode 100644
index 38f5535fdaea..000000000000
--- a/docs/_templates/layout.html
+++ /dev/null
@@ -1,7 +0,0 @@
-{% extends "!layout.html" %} {% block extrahead %}
-<script>
- if (document.location.pathname.indexOf("/secret") == 0) {
- document.location = document.location.href.replace("/secret", "");
- }
-</script>
-{% endblock %}
diff --git a/docs/changelog.md b/docs/changelog.md
index 0f20b8368e1a..d24645e854e6 100644
--- a/docs/changelog.md
+++ b/docs/changelog.md
@@ -29,7 +29,7 @@ _Release date: December 20, 2019_
**Highlights:**
- ๐ค Preview release of the file uploader widget. To try it out just call
- [`st.file_uploader`](https://streamlit.io/docs/api.html#streamlit.file_uploader)!
+ [`st.file_uploader`](https://docs.streamlit.io/api.html#streamlit.file_uploader)!
_Note that as a **preview release** things may change in the near future.
Looking forward to hearing input from the community before we stabilize the
API!_
@@ -44,7 +44,7 @@ _Release date: December 20, 2019_
having to call
[`pyplot.clf`](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.clf.html)
every time. If you want to turn this behavior off, use
- [`st.pyplot(clear_figure=False)`](https://streamlit.io/docs/api.html#streamlit.pyplot)
+ [`st.pyplot(clear_figure=False)`](https://docs.streamlit.io/api.html#streamlit.pyplot)
- ๐ฃ `st.cache` no longer checks for input mutations. This is the first change
of our ongoing effort to simplify the caching system and prepare Streamlit
for the launch of other caching primitives like Session State!
@@ -156,7 +156,7 @@ _Release date: September 19, 2019_
**Highlights:**
- โจ Magic commands! Use `st.write` without typing `st.write`. See
- https://streamlit.io/docs/api.html#magic-commands
+ https://docs.streamlit.io/api.html#magic-commands
- ๐๏ธ New `st.multiselect` widget.
- ๐ Fixed numerous install issues so now you can use `pip install streamlit`
even in Conda! We've therefore deactivated our Conda repo.
diff --git a/docs/index.md b/docs/index.md
index 03f1c14ab8b2..feb287479f57 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -4,7 +4,7 @@
:maxdepth: 2
:hidden:
- Home <https://streamlit.io/docs/>
+ Home <https://docs.streamlit.io/>
main_concepts
getting_started
diff --git a/docs/tutorial/create_a_data_explorer_app.md b/docs/tutorial/create_a_data_explorer_app.md
index 35afcfea84c3..6e6d89d27dfc 100644
--- a/docs/tutorial/create_a_data_explorer_app.md
+++ b/docs/tutorial/create_a_data_explorer_app.md
@@ -1,7 +1,7 @@
# Tutorial: Create a data explorer app
If you've made it this far, chances are you've
-[installed Streamlit](https://streamlit.io/docs/#install-streamlit) and
+[installed Streamlit](https://docs.streamlit.io/#install-streamlit) and
run through the basics in our [get started guide](../getting_started.md). If
not, now is a good time to take a look.
diff --git a/frontend/src/components/core/MainMenu/MainMenu.tsx b/frontend/src/components/core/MainMenu/MainMenu.tsx
index 6da84590b057..230609e991f5 100644
--- a/frontend/src/components/core/MainMenu/MainMenu.tsx
+++ b/frontend/src/components/core/MainMenu/MainMenu.tsx
@@ -25,7 +25,7 @@ import {
import Icon from "components/shared/Icon"
import "./MainMenu.scss"
-const ONLINE_DOCS_URL = "https://streamlit.io/docs"
+const ONLINE_DOCS_URL = "https://docs.streamlit.io"
const COMMUNITY_URL = "https://discuss.streamlit.io"
const TEAMS_URL = "https://streamlit.io/forteams"
const BUG_URL = "https://github.com/streamlit/streamlit/issues/new/choose"
diff --git a/lib/streamlit/DeltaGenerator.py b/lib/streamlit/DeltaGenerator.py
index 8f4fe2ad4d36..07865792ec07 100644
--- a/lib/streamlit/DeltaGenerator.py
+++ b/lib/streamlit/DeltaGenerator.py
@@ -500,12 +500,14 @@ def markdown(self, element, body, unsafe_allow_html=False):
information can be found at: https://github.github.com/gfm.
This also supports:
+
* Emoji shortcodes, such as `:+1:` and `:sunglasses:`.
- For a list of all supported codes,
- see https://www.webfx.com/tools/emoji-cheat-sheet/.
+ For a list of all supported codes,
+ see https://www.webfx.com/tools/emoji-cheat-sheet/.
+
* LaTeX expressions, by just wrapping them in "$" or "$$" (the "$$"
- must be on their own lines). Supported LaTeX functions are listed
- at https://katex.org/docs/supported.html.
+ must be on their own lines). Supported LaTeX functions are listed
+ at https://katex.org/docs/supported.html.
unsafe_allow_html : bool
By default, any HTML tags found in the body will be escaped and
@@ -2401,7 +2403,7 @@ def map(self, element, data, zoom=None):
To get a token for yourself, create an account at
https://mapbox.com. It's free! (for moderate usage levels) See
- https://streamlit.io/docs/cli.html#view-all-config-options for more
+ https://docs.streamlit.io/cli.html#view-all-config-options for more
info on how to set config options.
Parameters
@@ -2449,7 +2451,7 @@ def deck_gl_chart(self, element, spec=None, **kwargs):
To get a token for yourself, create an account at
https://mapbox.com. It's free! (for moderate usage levels) See
- https://streamlit.io/docs/cli.html#view-all-config-options for more
+ https://docs.streamlit.io/cli.html#view-all-config-options for more
info on how to set config options.
Parameters
diff --git a/lib/streamlit/__init__.py b/lib/streamlit/__init__.py
index c4d935dd9594..329b0b3ded69 100644
--- a/lib/streamlit/__init__.py
+++ b/lib/streamlit/__init__.py
@@ -38,7 +38,7 @@
$ streamlit hello
-For more detailed info, see https://streamlit.io/docs.
+For more detailed info, see https://docs.streamlit.io.
"""
# IMPORTANT: Prefix with an underscore anything that the user shouldn't see.
diff --git a/lib/streamlit/caching.py b/lib/streamlit/caching.py
index b2af7180cbc9..33d0b5e162cc 100644
--- a/lib/streamlit/caching.py
+++ b/lib/streamlit/caching.py
@@ -203,8 +203,7 @@ def _get_mutated_output_error_message():
By default, Streamlitโs cache is immutable. You received this warning
because Streamlit thinks you modified a cached object.
- [Click here to see how to fix this issue.]
- (https://streamlit.io/docs/advanced_concepts.html#advanced-caching)
+ [Click here to see how to fix this issue.](https://docs.streamlit.io/advanced_concepts.html#advanced-caching)
"""
).strip("\n")
diff --git a/lib/streamlit/cli.py b/lib/streamlit/cli.py
index 92ed8f6270a7..bce2af943e7a 100644
--- a/lib/streamlit/cli.py
+++ b/lib/streamlit/cli.py
@@ -173,7 +173,7 @@ def main_docs():
print("Showing help page in browser...")
from streamlit import util
- util.open_browser("https://streamlit.io/docs")
+ util.open_browser("https://docs.streamlit.io")
@main.command("hello")
@@ -212,7 +212,7 @@ def main_run(target, args=None, **kwargs):
_, extension = os.path.splitext(target)
if extension[1:] not in ACCEPTED_FILE_EXTENSIONS:
raise click.BadArgumentUsage(
- "Streamlit requires raw Python (.py) files, not %s.\nFor more information, please see https://streamlit.io/docs"
+ "Streamlit requires raw Python (.py) files, not %s.\nFor more information, please see https://docs.streamlit.io"
% extension
)
diff --git a/lib/streamlit/hello/demos.py b/lib/streamlit/hello/demos.py
index 955733063f20..fdc9516d7640 100644
--- a/lib/streamlit/hello/demos.py
+++ b/lib/streamlit/hello/demos.py
@@ -34,7 +34,7 @@ def intro():
### Want to learn more?
- Check out [streamlit.io](https://streamlit.io)
- - Jump into our [documentation](https://streamlit.io/docs)
+ - Jump into our [documentation](https://docs.streamlit.io)
- Ask a question in our [community
forums](https://discuss.streamlit.io)
diff --git a/lib/streamlit/hello/hello.py b/lib/streamlit/hello/hello.py
index cb2b79286612..ffa53f656b2b 100644
--- a/lib/streamlit/hello/hello.py
+++ b/lib/streamlit/hello/hello.py
@@ -58,7 +58,7 @@
demos.mapping_demo,
"""
This demo shows how to use
-[`st.deck_gl_chart`](https://streamlit.io/docs/api.html#streamlit.deck_gl_chart)
+[`st.deck_gl_chart`](https://docs.streamlit.io/api.html#streamlit.deck_gl_chart)
to display geospatial data.
""",
),
diff --git a/lib/streamlit/util.py b/lib/streamlit/util.py
index 9a71b58fd876..fcd0e7117537 100644
--- a/lib/streamlit/util.py
+++ b/lib/streamlit/util.py
@@ -29,7 +29,7 @@
from streamlit import env_util
# URL of Streamlit's help page.
-HELP_DOC = "https://streamlit.io/docs/"
+HELP_DOC = "https://docs.streamlit.io/"
# Make functools.wraps() in Python 2 set the __wrapped__ attribute, as
# is done in Python 3. This is required in st.cache.
diff --git a/lib/tests/streamlit/write_test.py b/lib/tests/streamlit/write_test.py
index 0a1411b52a4d..300571e16954 100644
--- a/lib/tests/streamlit/write_test.py
+++ b/lib/tests/streamlit/write_test.py
@@ -31,7 +31,7 @@
class StreamlitWriteTest(unittest.TestCase):
"""Test st.write.
- Unit tests for https://streamlit.io/docs/api/text.html#streamlit.write
+ Unit tests for https://docs.streamlit.io/api/text.html#streamlit.write
Because we're going to test st.markdown, st.pyplot, st.altair_chart
later on, we don't have to test it in st.write In st.write, all we're
| https://api.github.com/repos/streamlit/streamlit/pulls/918 | 2020-01-05T08:15:07Z | 2020-01-05T08:34:43Z | 2020-01-05T08:34:43Z | 2020-01-05T08:34:47Z | 3,231 | streamlit/streamlit | 22,343 |
|
Refs #29983 -- Added test for FIXTURES_DIRS pathlib support. | diff --git a/tests/fixtures_regress/tests.py b/tests/fixtures_regress/tests.py
index 1cac151367f03..cc4cee3de42e5 100644
--- a/tests/fixtures_regress/tests.py
+++ b/tests/fixtures_regress/tests.py
@@ -3,6 +3,7 @@
import os
import re
from io import StringIO
+from pathlib import Path
from django.core import management, serializers
from django.core.exceptions import ImproperlyConfigured
@@ -517,6 +518,11 @@ def test_loaddata_with_valid_fixture_dirs(self):
verbosity=0,
)
+ @override_settings(FIXTURE_DIRS=[Path(_cur_dir) / 'fixtures_1'])
+ def test_fixtures_dir_pathlib(self):
+ management.call_command('loaddata', 'inner/absolute.json', verbosity=0)
+ self.assertQuerysetEqual(Absolute.objects.all(), [1], transform=lambda o: o.pk)
+
class NaturalKeyFixtureTests(TestCase):
| https://api.github.com/repos/django/django/pulls/12026 | 2019-11-06T03:28:34Z | 2019-11-06T07:06:56Z | 2019-11-06T07:06:56Z | 2019-11-08T10:13:56Z | 215 | django/django | 51,266 |
|
Fix index delete - again! | diff --git a/gpt_index/data_structs/data_structs_v2.py b/gpt_index/data_structs/data_structs_v2.py
index 9dbd4ef8b7ae5..12e8d3cc4e41c 100644
--- a/gpt_index/data_structs/data_structs_v2.py
+++ b/gpt_index/data_structs/data_structs_v2.py
@@ -183,7 +183,6 @@ class IndexDict(V2IndexStruct):
def add_node(
self,
node: Node,
- # NOTE: unused
text_id: Optional[str] = None,
) -> str:
"""Add text to table, return current position in list."""
@@ -201,9 +200,10 @@ def add_node(
def delete(self, doc_id: str) -> None:
"""Delete a Node."""
if doc_id not in self.doc_id_dict:
- raise ValueError("doc_id not found in doc_id_dict")
+ return
for vector_id in self.doc_id_dict[doc_id]:
del self.nodes_dict[vector_id]
+ del self.doc_id_dict[doc_id]
@classmethod
def get_type(cls) -> IndexStructType:
diff --git a/tests/indices/vector_store/test_base.py b/tests/indices/vector_store/test_base.py
index e2c729bcb1f89..f094191c2ae7f 100644
--- a/tests/indices/vector_store/test_base.py
+++ b/tests/indices/vector_store/test_base.py
@@ -7,7 +7,7 @@
import numpy as np
import pytest
-from gpt_index.data_structs.node_v2 import Node
+from gpt_index.data_structs.node_v2 import DocumentRelationship, Node
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.vector_store.vector_indices import (
GPTFaissIndex,
@@ -17,7 +17,6 @@
from gpt_index.vector_stores.simple import SimpleVectorStore
from tests.mock_utils.mock_decorator import patch_common
from tests.mock_utils.mock_prompts import MOCK_REFINE_PROMPT, MOCK_TEXT_QA_PROMPT
-from gpt_index.data_structs.node_v2 import DocumentRelationship
@pytest.fixture
@@ -397,6 +396,7 @@ def test_simple_delete(
# test delete
index.delete("test_id_0")
assert len(index.index_struct.nodes_dict) == 3
+ assert len(index.index_struct.doc_id_dict) == 3
actual_node_tups = [
("This is a test.", [0, 1, 0, 0, 0], "test_id_1"),
("This is another test.", [0, 0, 1, 0, 0], "test_id_2"),
| One more delete bug!
If you construct a vector index using the documents from the client, calling delete will never work if we raise a ValueError
```python
index = GPTQdrantIndex([], client=qdrant_client_instance, collection_name=self._collection_name)
index.delete("some_remote_id")
``` | https://api.github.com/repos/run-llama/llama_index/pulls/1071 | 2023-04-05T15:21:49Z | 2023-04-05T20:51:31Z | 2023-04-05T20:51:30Z | 2023-04-05T20:51:31Z | 605 | run-llama/llama_index | 6,200 |
Handle duplicate column names in select_dtypes and get_dummies | diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt
index 5a0b4bb20f774..eb6c212731822 100644
--- a/doc/source/whatsnew/v0.23.0.txt
+++ b/doc/source/whatsnew/v0.23.0.txt
@@ -1359,6 +1359,7 @@ Reshaping
- Bug in :meth:`DataFrame.astype` where column metadata is lost when converting to categorical or a dictionary of dtypes (:issue:`19920`)
- Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`)
- Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`)
+- Bug in :func:`get_dummies`, and :func:`select_dtypes`, where duplicate column names caused incorrect behavior (:issue:`20848`)
Other
^^^^^
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index ffb124af4f5fc..ffb2ad046158f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3076,15 +3076,15 @@ def select_dtypes(self, include=None, exclude=None):
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
- def is_dtype_instance_mapper(column, dtype):
- return column, functools.partial(issubclass, dtype.type)
+ def is_dtype_instance_mapper(idx, dtype):
+ return idx, functools.partial(issubclass, dtype.type)
- for column, f in itertools.starmap(is_dtype_instance_mapper,
- self.dtypes.iteritems()):
+ for idx, f in itertools.starmap(is_dtype_instance_mapper,
+ enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
- include_these[column] = any(map(f, include))
+ include_these.iloc[idx] = any(map(f, include))
if exclude:
- exclude_these[column] = not any(map(f, exclude))
+ exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py
index 389f1af48434a..0829aa8f5a509 100644
--- a/pandas/core/reshape/reshape.py
+++ b/pandas/core/reshape/reshape.py
@@ -821,14 +821,15 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
from pandas.core.reshape.concat import concat
from itertools import cycle
+ dtypes_to_encode = ['object', 'category']
+
if isinstance(data, DataFrame):
# determine columns being encoded
-
if columns is None:
- columns_to_encode = data.select_dtypes(
- include=['object', 'category']).columns
+ data_to_encode = data.select_dtypes(
+ include=dtypes_to_encode)
else:
- columns_to_encode = columns
+ data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
@@ -836,35 +837,45 @@ def check_len(item, name):
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
- if not len(item) == len(columns_to_encode):
- len_msg = len_msg.format(name=name, len_item=len(item),
- len_enc=len(columns_to_encode))
+ if not len(item) == data_to_encode.shape[1]:
+ len_msg = \
+ len_msg.format(name=name, len_item=len(item),
+ len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
+
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
- prefix = [prefix[col] for col in columns_to_encode]
+ prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
- prefix = columns_to_encode
+ prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
- prefix_sep = [prefix_sep[col] for col in columns_to_encode]
+ prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
- if set(columns_to_encode) == set(data.columns):
+ if data_to_encode.shape == data.shape:
+ # Encoding the entire df, do not prepend any dropped columns
with_dummies = []
+ elif columns is not None:
+ # Encoding only cols specified in columns. Get all cols not in
+ # columns to prepend to result.
+ with_dummies = [data.drop(columns, axis=1)]
else:
- with_dummies = [data.drop(columns_to_encode, axis=1)]
-
- for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
-
- dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
+ # Encoding only object and category dtype columns. Get remaining
+ # columns to prepend to result.
+ with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
+
+ for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
+ prefix_sep):
+ # col is (column_name, column), use just column data here
+ dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 152159965036d..4c9f8c2ea0980 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -287,6 +287,23 @@ def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
ei = df[['b', 'c', 'f', 'k']]
assert_frame_equal(ri, ei)
+ def test_select_dtypes_duplicate_columns(self):
+ # GH20839
+ odict = compat.OrderedDict
+ df = DataFrame(odict([('a', list('abc')),
+ ('b', list(range(1, 4))),
+ ('c', np.arange(3, 6).astype('u1')),
+ ('d', np.arange(4.0, 7.0, dtype='float64')),
+ ('e', [True, False, True]),
+ ('f', pd.date_range('now', periods=3).values)]))
+ df.columns = ['a', 'a', 'b', 'b', 'b', 'c']
+
+ expected = DataFrame({'a': list(range(1, 4)),
+ 'b': np.arange(3, 6).astype('u1')})
+
+ result = df.select_dtypes(include=[np.number], exclude=['floating'])
+ assert_frame_equal(result, expected)
+
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py
index c4d925b83585b..295801f3e8def 100644
--- a/pandas/tests/reshape/test_reshape.py
+++ b/pandas/tests/reshape/test_reshape.py
@@ -465,6 +465,21 @@ def test_get_dummies_dont_sparsify_all_columns(self, sparse):
tm.assert_frame_equal(df[['GDP']], df2)
+ def test_get_dummies_duplicate_columns(self, df):
+ # GH20839
+ df.columns = ["A", "A", "A"]
+ result = get_dummies(df).sort_index(axis=1)
+
+ expected = DataFrame([[1, 1, 0, 1, 0],
+ [2, 0, 1, 1, 0],
+ [3, 1, 0, 0, 1]],
+ columns=['A', 'A_a', 'A_b', 'A_b', 'A_c'],
+ dtype=np.uint8).sort_index(axis=1)
+
+ expected = expected.astype({"A": np.int64})
+
+ tm.assert_frame_equal(result, expected)
+
class TestCategoricalReshape(object):
| Functions `select_dtypes` and `get_dummies` previously failed on DataFrames with duplicate column names and had strange behavior as shown below. This PR fixes that strange behavior.
Previous behavior:
```python
In [6]: df
Out[6]:
col1 col1
0 1 a
1 2 b
In [7]: df.select_dtypes(include=['int'])
Out[7]:
Empty DataFrame
Columns: []
Index: [0, 1]
In [8]: pd.get_dummies(df)
Out[8]:
col1_('c', 'o', 'l', '1') col1_('c', 'o', 'l', '1')
0 1 1
1 1 1
```
New behavior:
```python
In [6]: df
Out[6]:
col1 col1
0 1 a
1 2 b
In [7]: df.select_dtypes(include=['int'])
Out[7]:
col1
0 1
1 2
In [8]: pd.get_dummies(df)
Out[8]:
col1 col1_a col1_b
0 1 1 0
1 2 0 1
```
- [x] closes #20848
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
| https://api.github.com/repos/pandas-dev/pandas/pulls/20839 | 2018-04-27T07:48:11Z | 2018-05-05T12:59:06Z | 2018-05-05T12:59:06Z | 2018-05-06T06:12:05Z | 2,005 | pandas-dev/pandas | 45,026 |