commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
10
2.94k
new_contents
stringlengths
21
3.18k
subject
stringlengths
16
444
message
stringlengths
17
2.63k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43k
ndiff
stringlengths
51
3.32k
instruction
stringlengths
16
444
content
stringlengths
133
4.32k
733404ba2eb7218bb4d253cd74fe88107ff75afc
test/test_live_openid_login.py
test/test_live_openid_login.py
import time import pytest from chatexchange.browser import SEChatBrowser, LoginError import live_testing if live_testing.enabled: def test_openid_login(): """ Tests login to the Stack Exchange OpenID provider. """ browser = SEChatBrowser() # avoid hitting the SE servers too frequently time.sleep(2) # This will raise an error if login fails. browser.loginSEOpenID( live_testing.username, live_testing.password) def test_openid_login_recognizes_failure(): """ Tests that failed SE OpenID logins raise errors. """ browser = SEChatBrowser() # avoid hitting the SE servers too frequently time.sleep(2) with pytest.raises(LoginError): invalid_password = 'no' + 't' * len(live_testing.password) browser.loginSEOpenID( live_testing.username, invalid_password)
import time import pytest from chatexchange.browser import SEChatBrowser, LoginError import live_testing if live_testing.enabled: def test_openid_login_recognizes_failure(): """ Tests that failed SE OpenID logins raise errors. """ browser = SEChatBrowser() # avoid hitting the SE servers too frequently time.sleep(2) with pytest.raises(LoginError): invalid_password = 'no' + 't' * len(live_testing.password) browser.loginSEOpenID( live_testing.username, invalid_password)
Remove successful OpenID login live test. It's redundant with our message-related live tests.
Remove successful OpenID login live test. It's redundant with our message-related live tests.
Python
apache-2.0
ByteCommander/ChatExchange6,hichris1234/ChatExchange,Charcoal-SE/ChatExchange,hichris1234/ChatExchange,ByteCommander/ChatExchange6,Charcoal-SE/ChatExchange
import time import pytest from chatexchange.browser import SEChatBrowser, LoginError import live_testing if live_testing.enabled: - def test_openid_login(): - """ - Tests login to the Stack Exchange OpenID provider. - """ - browser = SEChatBrowser() - - # avoid hitting the SE servers too frequently - time.sleep(2) - - # This will raise an error if login fails. - browser.loginSEOpenID( - live_testing.username, - live_testing.password) - def test_openid_login_recognizes_failure(): """ Tests that failed SE OpenID logins raise errors. """ browser = SEChatBrowser() # avoid hitting the SE servers too frequently time.sleep(2) with pytest.raises(LoginError): invalid_password = 'no' + 't' * len(live_testing.password) browser.loginSEOpenID( live_testing.username, invalid_password)
Remove successful OpenID login live test. It's redundant with our message-related live tests.
## Code Before: import time import pytest from chatexchange.browser import SEChatBrowser, LoginError import live_testing if live_testing.enabled: def test_openid_login(): """ Tests login to the Stack Exchange OpenID provider. """ browser = SEChatBrowser() # avoid hitting the SE servers too frequently time.sleep(2) # This will raise an error if login fails. browser.loginSEOpenID( live_testing.username, live_testing.password) def test_openid_login_recognizes_failure(): """ Tests that failed SE OpenID logins raise errors. """ browser = SEChatBrowser() # avoid hitting the SE servers too frequently time.sleep(2) with pytest.raises(LoginError): invalid_password = 'no' + 't' * len(live_testing.password) browser.loginSEOpenID( live_testing.username, invalid_password) ## Instruction: Remove successful OpenID login live test. It's redundant with our message-related live tests. ## Code After: import time import pytest from chatexchange.browser import SEChatBrowser, LoginError import live_testing if live_testing.enabled: def test_openid_login_recognizes_failure(): """ Tests that failed SE OpenID logins raise errors. """ browser = SEChatBrowser() # avoid hitting the SE servers too frequently time.sleep(2) with pytest.raises(LoginError): invalid_password = 'no' + 't' * len(live_testing.password) browser.loginSEOpenID( live_testing.username, invalid_password)
9c428fbfb69c93ef3da935d0d2ab098fbeb1c317
dsh.py
dsh.py
__author__ = 'Michael Montero <mcmontero@gmail.com>' # ----- Imports --------------------------------------------------------------- from tinyAPI.base.data_store.provider import DataStoreProvider import tinyAPI __all__ = [ 'dsh' ] # ----- Private Classes ------------------------------------------------------- class NoOpDSH(object): ''' The use of this object in __DSH is ambiguous. It's unclear why a call to a commit or rollback command would be executed without a connection ever being established. ''' def close(self): pass def commit(self, ignore_exceptions=True): pass def rollback(self, ignore_exceptions=True): pass # ----- Instructions ---------------------------------------------------------- class __DSH(object): def __init__(self): self.__provider = None def __call__(self): return self.__provider if self.__provider is not None else NoOpDSH() def select_db(self, connection, db, persistent=True): self.__provider = \ DataStoreProvider() \ .get_data_store_handle( connection, db, tinyAPI.env_cli() is not True and persistent ) return self dsh = __DSH()
__author__ = 'Michael Montero <mcmontero@gmail.com>' # ----- Imports --------------------------------------------------------------- from tinyAPI.base.data_store.provider import DataStoreProvider import tinyAPI __all__ = [ 'dsh' ] # ----- Private Classes ------------------------------------------------------- class UnitTestNullDSH(object): ''' Supports unit test cases that do not perform transactional data store operations but attempt to close or rollback transactions. ''' def close(self): pass def rollback(self, ignore_exceptions=True): pass # ----- Instructions ---------------------------------------------------------- class __DSH(object): def __init__(self): self.__provider = None self.__unit_test_null_dsh = UnitTestNullDSH() def __call__(self): if self.__provider is None: if tinyAPI.env_unit_test() is True: return self.__unit_test_null_dsh else: raise RuntimeError('data store handle has not been selected') return self.__provider def select_db(self, connection, db, persistent=True): self.__provider = \ DataStoreProvider() \ .get_data_store_handle( connection, db, tinyAPI.env_cli() is not True and persistent ) return self dsh = __DSH()
Revert "Testing NoOpDSH() when database commands are executed without a connection being opened."
Revert "Testing NoOpDSH() when database commands are executed without a connection being opened." This reverts commit 57dd36da6f558e9bd5c9b7c97e955600c2fa0b8e.
Python
mit
mcmontero/tinyAPI,mcmontero/tinyAPI
__author__ = 'Michael Montero <mcmontero@gmail.com>' # ----- Imports --------------------------------------------------------------- from tinyAPI.base.data_store.provider import DataStoreProvider import tinyAPI __all__ = [ 'dsh' ] # ----- Private Classes ------------------------------------------------------- - class NoOpDSH(object): + class UnitTestNullDSH(object): ''' + Supports unit test cases that do not perform transactional data store + operations but attempt to close or rollback transactions. - The use of this object in __DSH is ambiguous. It's unclear why a call - to a commit or rollback command would be executed without a connection - ever being established. ''' def close(self): - pass - - def commit(self, ignore_exceptions=True): pass def rollback(self, ignore_exceptions=True): pass # ----- Instructions ---------------------------------------------------------- class __DSH(object): def __init__(self): self.__provider = None - + self.__unit_test_null_dsh = UnitTestNullDSH() def __call__(self): - return self.__provider if self.__provider is not None else NoOpDSH() + if self.__provider is None: + if tinyAPI.env_unit_test() is True: + return self.__unit_test_null_dsh + else: + raise RuntimeError('data store handle has not been selected') + + return self.__provider def select_db(self, connection, db, persistent=True): self.__provider = \ DataStoreProvider() \ .get_data_store_handle( connection, db, tinyAPI.env_cli() is not True and persistent ) return self dsh = __DSH()
Revert "Testing NoOpDSH() when database commands are executed without a connection being opened."
## Code Before: __author__ = 'Michael Montero <mcmontero@gmail.com>' # ----- Imports --------------------------------------------------------------- from tinyAPI.base.data_store.provider import DataStoreProvider import tinyAPI __all__ = [ 'dsh' ] # ----- Private Classes ------------------------------------------------------- class NoOpDSH(object): ''' The use of this object in __DSH is ambiguous. It's unclear why a call to a commit or rollback command would be executed without a connection ever being established. ''' def close(self): pass def commit(self, ignore_exceptions=True): pass def rollback(self, ignore_exceptions=True): pass # ----- Instructions ---------------------------------------------------------- class __DSH(object): def __init__(self): self.__provider = None def __call__(self): return self.__provider if self.__provider is not None else NoOpDSH() def select_db(self, connection, db, persistent=True): self.__provider = \ DataStoreProvider() \ .get_data_store_handle( connection, db, tinyAPI.env_cli() is not True and persistent ) return self dsh = __DSH() ## Instruction: Revert "Testing NoOpDSH() when database commands are executed without a connection being opened." ## Code After: __author__ = 'Michael Montero <mcmontero@gmail.com>' # ----- Imports --------------------------------------------------------------- from tinyAPI.base.data_store.provider import DataStoreProvider import tinyAPI __all__ = [ 'dsh' ] # ----- Private Classes ------------------------------------------------------- class UnitTestNullDSH(object): ''' Supports unit test cases that do not perform transactional data store operations but attempt to close or rollback transactions. ''' def close(self): pass def rollback(self, ignore_exceptions=True): pass # ----- Instructions ---------------------------------------------------------- class __DSH(object): def __init__(self): self.__provider = None self.__unit_test_null_dsh = UnitTestNullDSH() def __call__(self): if self.__provider is None: if tinyAPI.env_unit_test() is True: return self.__unit_test_null_dsh else: raise RuntimeError('data store handle has not been selected') return self.__provider def select_db(self, connection, db, persistent=True): self.__provider = \ DataStoreProvider() \ .get_data_store_handle( connection, db, tinyAPI.env_cli() is not True and persistent ) return self dsh = __DSH()
eced06f6f523fa6fd475987ae688b7ca2b6c3415
checks/system/__init__.py
checks/system/__init__.py
import sys class Platform(object): @staticmethod def is_darwin(name=None): name = name or sys.platform return 'darwin' in name @staticmethod def is_freebsd(name=None): name = name or sys.platform return name.startswith("freebsd") @staticmethod def is_linux(name=None): name = name or sys.platform return 'linux' in name @staticmethod def is_bsd(name=None): """ Return true if this is a BSD like operating system. """ name = name or sys.platform return Platform.is_darwin(name) or Platform.is_freebsd(name) @staticmethod def is_solaris(name=None): name = name or sys.platform return name == "sunos5" @staticmethod def is_unix(name=None): """ Return true if the platform is a unix, False otherwise. """ name = name or sys.platform return (Platform.is_darwin() or Platform.is_linux() or Platform.is_freebsd() )
import sys class Platform(object): @staticmethod def is_darwin(name=None): name = name or sys.platform return 'darwin' in name @staticmethod def is_freebsd(name=None): name = name or sys.platform return name.startswith("freebsd") @staticmethod def is_linux(name=None): name = name or sys.platform return 'linux' in name @staticmethod def is_bsd(name=None): """ Return true if this is a BSD like operating system. """ name = name or sys.platform return Platform.is_darwin(name) or Platform.is_freebsd(name) @staticmethod def is_solaris(name=None): name = name or sys.platform return name == "sunos5" @staticmethod def is_unix(name=None): """ Return true if the platform is a unix, False otherwise. """ name = name or sys.platform return (Platform.is_darwin() or Platform.is_linux() or Platform.is_freebsd() ) @staticmethod def is_win32(name=None): name = name or sys.platform return name == "win32"
Add win32 to platform information
Add win32 to platform information
Python
bsd-3-clause
jraede/dd-agent,tebriel/dd-agent,JohnLZeller/dd-agent,a20012251/dd-agent,remh/dd-agent,tebriel/dd-agent,AntoCard/powerdns-recursor_check,tebriel/dd-agent,AniruddhaSAtre/dd-agent,urosgruber/dd-agent,polynomial/dd-agent,JohnLZeller/dd-agent,Mashape/dd-agent,JohnLZeller/dd-agent,eeroniemi/dd-agent,c960657/dd-agent,mderomph-coolblue/dd-agent,ess/dd-agent,jraede/dd-agent,Shopify/dd-agent,truthbk/dd-agent,eeroniemi/dd-agent,relateiq/dd-agent,AntoCard/powerdns-recursor_check,oneandoneis2/dd-agent,darron/dd-agent,AniruddhaSAtre/dd-agent,packetloop/dd-agent,joelvanvelden/dd-agent,zendesk/dd-agent,tebriel/dd-agent,pfmooney/dd-agent,lookout/dd-agent,polynomial/dd-agent,yuecong/dd-agent,AniruddhaSAtre/dd-agent,yuecong/dd-agent,jyogi/purvar-agent,GabrielNicolasAvellaneda/dd-agent,GabrielNicolasAvellaneda/dd-agent,jraede/dd-agent,oneandoneis2/dd-agent,urosgruber/dd-agent,packetloop/dd-agent,citrusleaf/dd-agent,manolama/dd-agent,indeedops/dd-agent,zendesk/dd-agent,PagerDuty/dd-agent,gphat/dd-agent,takus/dd-agent,truthbk/dd-agent,guruxu/dd-agent,yuecong/dd-agent,Mashape/dd-agent,cberry777/dd-agent,jshum/dd-agent,jshum/dd-agent,cberry777/dd-agent,PagerDuty/dd-agent,c960657/dd-agent,gphat/dd-agent,ess/dd-agent,GabrielNicolasAvellaneda/dd-agent,benmccann/dd-agent,manolama/dd-agent,benmccann/dd-agent,jvassev/dd-agent,pfmooney/dd-agent,huhongbo/dd-agent,polynomial/dd-agent,jvassev/dd-agent,jraede/dd-agent,jamesandariese/dd-agent,benmccann/dd-agent,brettlangdon/dd-agent,takus/dd-agent,pfmooney/dd-agent,lookout/dd-agent,amalakar/dd-agent,cberry777/dd-agent,joelvanvelden/dd-agent,huhongbo/dd-agent,remh/dd-agent,jyogi/purvar-agent,Mashape/dd-agent,eeroniemi/dd-agent,indeedops/dd-agent,pmav99/praktoras,darron/dd-agent,yuecong/dd-agent,brettlangdon/dd-agent,urosgruber/dd-agent,relateiq/dd-agent,jamesandariese/dd-agent,AntoCard/powerdns-recursor_check,amalakar/dd-agent,ess/dd-agent,pmav99/praktoras,amalakar/dd-agent,citrusleaf/dd-agent,amalakar/dd-agent,c960657/dd-agent,jshum/dd-agent,huhongbo/dd-agent,polynomial/dd-agent,oneandoneis2/dd-agent,Shopify/dd-agent,Wattpad/dd-agent,brettlangdon/dd-agent,JohnLZeller/dd-agent,jshum/dd-agent,citrusleaf/dd-agent,joelvanvelden/dd-agent,tebriel/dd-agent,guruxu/dd-agent,lookout/dd-agent,packetloop/dd-agent,a20012251/dd-agent,manolama/dd-agent,PagerDuty/dd-agent,Wattpad/dd-agent,AntoCard/powerdns-recursor_check,jraede/dd-agent,mderomph-coolblue/dd-agent,takus/dd-agent,indeedops/dd-agent,Wattpad/dd-agent,Wattpad/dd-agent,joelvanvelden/dd-agent,amalakar/dd-agent,yuecong/dd-agent,jamesandariese/dd-agent,packetloop/dd-agent,benmccann/dd-agent,oneandoneis2/dd-agent,Shopify/dd-agent,mderomph-coolblue/dd-agent,jvassev/dd-agent,mderomph-coolblue/dd-agent,darron/dd-agent,AniruddhaSAtre/dd-agent,mderomph-coolblue/dd-agent,a20012251/dd-agent,gphat/dd-agent,c960657/dd-agent,remh/dd-agent,Mashape/dd-agent,remh/dd-agent,relateiq/dd-agent,benmccann/dd-agent,darron/dd-agent,GabrielNicolasAvellaneda/dd-agent,pmav99/praktoras,relateiq/dd-agent,indeedops/dd-agent,jyogi/purvar-agent,brettlangdon/dd-agent,joelvanvelden/dd-agent,zendesk/dd-agent,pfmooney/dd-agent,Shopify/dd-agent,guruxu/dd-agent,lookout/dd-agent,jyogi/purvar-agent,eeroniemi/dd-agent,pmav99/praktoras,manolama/dd-agent,jamesandariese/dd-agent,urosgruber/dd-agent,Mashape/dd-agent,huhongbo/dd-agent,Wattpad/dd-agent,truthbk/dd-agent,AntoCard/powerdns-recursor_check,relateiq/dd-agent,guruxu/dd-agent,JohnLZeller/dd-agent,a20012251/dd-agent,takus/dd-agent,truthbk/dd-agent,cberry777/dd-agent,remh/dd-agent,eeroniemi/dd-agent,urosgruber/dd-agent,brettlangdon/dd-agent,oneandoneis2/dd-agent,gphat/dd-agent,citrusleaf/dd-agent,pmav99/praktoras,zendesk/dd-agent,Shopify/dd-agent,GabrielNicolasAvellaneda/dd-agent,polynomial/dd-agent,jvassev/dd-agent,jamesandariese/dd-agent,PagerDuty/dd-agent,ess/dd-agent,truthbk/dd-agent,cberry777/dd-agent,PagerDuty/dd-agent,AniruddhaSAtre/dd-agent,a20012251/dd-agent,guruxu/dd-agent,jvassev/dd-agent,indeedops/dd-agent,c960657/dd-agent,packetloop/dd-agent,jshum/dd-agent,zendesk/dd-agent,takus/dd-agent,lookout/dd-agent,jyogi/purvar-agent,ess/dd-agent,manolama/dd-agent,gphat/dd-agent,citrusleaf/dd-agent,pfmooney/dd-agent,huhongbo/dd-agent,darron/dd-agent
import sys class Platform(object): @staticmethod def is_darwin(name=None): name = name or sys.platform return 'darwin' in name @staticmethod def is_freebsd(name=None): name = name or sys.platform return name.startswith("freebsd") @staticmethod def is_linux(name=None): name = name or sys.platform return 'linux' in name @staticmethod def is_bsd(name=None): """ Return true if this is a BSD like operating system. """ name = name or sys.platform return Platform.is_darwin(name) or Platform.is_freebsd(name) @staticmethod def is_solaris(name=None): name = name or sys.platform return name == "sunos5" @staticmethod def is_unix(name=None): """ Return true if the platform is a unix, False otherwise. """ name = name or sys.platform return (Platform.is_darwin() or Platform.is_linux() or Platform.is_freebsd() ) + @staticmethod + def is_win32(name=None): + name = name or sys.platform + return name == "win32"
Add win32 to platform information
## Code Before: import sys class Platform(object): @staticmethod def is_darwin(name=None): name = name or sys.platform return 'darwin' in name @staticmethod def is_freebsd(name=None): name = name or sys.platform return name.startswith("freebsd") @staticmethod def is_linux(name=None): name = name or sys.platform return 'linux' in name @staticmethod def is_bsd(name=None): """ Return true if this is a BSD like operating system. """ name = name or sys.platform return Platform.is_darwin(name) or Platform.is_freebsd(name) @staticmethod def is_solaris(name=None): name = name or sys.platform return name == "sunos5" @staticmethod def is_unix(name=None): """ Return true if the platform is a unix, False otherwise. """ name = name or sys.platform return (Platform.is_darwin() or Platform.is_linux() or Platform.is_freebsd() ) ## Instruction: Add win32 to platform information ## Code After: import sys class Platform(object): @staticmethod def is_darwin(name=None): name = name or sys.platform return 'darwin' in name @staticmethod def is_freebsd(name=None): name = name or sys.platform return name.startswith("freebsd") @staticmethod def is_linux(name=None): name = name or sys.platform return 'linux' in name @staticmethod def is_bsd(name=None): """ Return true if this is a BSD like operating system. """ name = name or sys.platform return Platform.is_darwin(name) or Platform.is_freebsd(name) @staticmethod def is_solaris(name=None): name = name or sys.platform return name == "sunos5" @staticmethod def is_unix(name=None): """ Return true if the platform is a unix, False otherwise. """ name = name or sys.platform return (Platform.is_darwin() or Platform.is_linux() or Platform.is_freebsd() ) @staticmethod def is_win32(name=None): name = name or sys.platform return name == "win32"
b974bbcc7e243fca7c3dc63fbbaf530fe9b69e50
runtests.py
runtests.py
import sys try: from django.conf import settings from django.test.utils import get_runner settings.configure( DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:", } }, INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.staticfiles", "django.contrib.sites", "chartit", "chartit_tests", ], SITE_ID=1, MIDDLEWARE_CLASSES=(), STATIC_URL='/static/' ) try: import django setup = django.setup except AttributeError: pass else: setup() except ImportError: import traceback traceback.print_exc() raise ImportError("To fix this error, run: pip install -r requirements.txt") def run_tests(*test_args): if not test_args: test_args = ["chartit_tests"] # Run tests TestRunner = get_runner(settings) test_runner = TestRunner() failures = test_runner.run_tests(test_args) if failures: sys.exit(bool(failures)) if __name__ == "__main__": run_tests(*sys.argv[1:])
import os import sys try: sys.path.append('demoproject') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demoproject.settings") from django.conf import settings from django.core.management import call_command settings.DATABASES['default']['NAME'] = ':memory:' settings.INSTALLED_APPS.append('chartit_tests') try: import django setup = django.setup except AttributeError: pass else: setup() except ImportError: import traceback traceback.print_exc() raise ImportError("To fix this error, run: pip install -r requirements.txt") def run_tests(*test_args): if not test_args: test_args = ["chartit_tests"] # ./manage.py test takes care of database creation and # application of migrations if any result = call_command('test', *test_args, verbosity=2, failfast=True) sys.exit(result) if __name__ == "__main__": run_tests(*sys.argv[1:])
Load DB migrations before testing and use verbose=2 and failfast
Load DB migrations before testing and use verbose=2 and failfast Note that we use `manage.py test` instead of `manage.py migrate` and manually running the tests. This lets Django take care of applying migrations before running tests. This works around https://code.djangoproject.com/ticket/22487 which causes a test failure on Django 1.8.14. In 1.8.14 somehow we end up without any actual data in the test DB and one of the tests fails if we use `manage.py migrate` and run the tests manually via TestRunner.
Python
bsd-2-clause
pgollakota/django-chartit,pgollakota/django-chartit,pgollakota/django-chartit
+ import os import sys try: + sys.path.append('demoproject') + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demoproject.settings") + from django.conf import settings - from django.test.utils import get_runner + from django.core.management import call_command + settings.DATABASES['default']['NAME'] = ':memory:' + settings.INSTALLED_APPS.append('chartit_tests') - settings.configure( - DEBUG=True, - USE_TZ=True, - DATABASES={ - "default": { - "ENGINE": "django.db.backends.sqlite3", - "NAME": ":memory:", - } - }, - INSTALLED_APPS=[ - "django.contrib.auth", - "django.contrib.contenttypes", - "django.contrib.staticfiles", - "django.contrib.sites", - "chartit", - "chartit_tests", - ], - SITE_ID=1, - MIDDLEWARE_CLASSES=(), - STATIC_URL='/static/' - ) try: import django setup = django.setup except AttributeError: pass else: setup() except ImportError: import traceback traceback.print_exc() raise ImportError("To fix this error, run: pip install -r requirements.txt") def run_tests(*test_args): if not test_args: test_args = ["chartit_tests"] + # ./manage.py test takes care of database creation and + # application of migrations if any + result = call_command('test', *test_args, verbosity=2, failfast=True) + sys.exit(result) - # Run tests - TestRunner = get_runner(settings) - test_runner = TestRunner() - - failures = test_runner.run_tests(test_args) - - if failures: - sys.exit(bool(failures)) if __name__ == "__main__": run_tests(*sys.argv[1:])
Load DB migrations before testing and use verbose=2 and failfast
## Code Before: import sys try: from django.conf import settings from django.test.utils import get_runner settings.configure( DEBUG=True, USE_TZ=True, DATABASES={ "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:", } }, INSTALLED_APPS=[ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.staticfiles", "django.contrib.sites", "chartit", "chartit_tests", ], SITE_ID=1, MIDDLEWARE_CLASSES=(), STATIC_URL='/static/' ) try: import django setup = django.setup except AttributeError: pass else: setup() except ImportError: import traceback traceback.print_exc() raise ImportError("To fix this error, run: pip install -r requirements.txt") def run_tests(*test_args): if not test_args: test_args = ["chartit_tests"] # Run tests TestRunner = get_runner(settings) test_runner = TestRunner() failures = test_runner.run_tests(test_args) if failures: sys.exit(bool(failures)) if __name__ == "__main__": run_tests(*sys.argv[1:]) ## Instruction: Load DB migrations before testing and use verbose=2 and failfast ## Code After: import os import sys try: sys.path.append('demoproject') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demoproject.settings") from django.conf import settings from django.core.management import call_command settings.DATABASES['default']['NAME'] = ':memory:' settings.INSTALLED_APPS.append('chartit_tests') try: import django setup = django.setup except AttributeError: pass else: setup() except ImportError: import traceback traceback.print_exc() raise ImportError("To fix this error, run: pip install -r requirements.txt") def run_tests(*test_args): if not test_args: test_args = ["chartit_tests"] # ./manage.py test takes care of database creation and # application of migrations if any result = call_command('test', *test_args, verbosity=2, failfast=True) sys.exit(result) if __name__ == "__main__": run_tests(*sys.argv[1:])
75289980c658e081fec2d7e34651837c4629d4b7
settings.py
settings.py
# Replace the following lines with client IDs obtained from the APIs # Console or Cloud Console. WEB_CLIENT_ID = 'your-app-id'
# Replace the following lines with client IDs obtained from the APIs # Console or Cloud Console. WEB_CLIENT_ID = 'your-web-client-id'
Fix the placeholder for better understanding
fix: Fix the placeholder for better understanding
Python
mit
iraquitan/udacity-fsnd-p4-conference-app,iraquitan/udacity-fsnd-p4-conference-app,iraquitan/udacity-fsnd-p4-conference-app
# Replace the following lines with client IDs obtained from the APIs # Console or Cloud Console. - WEB_CLIENT_ID = 'your-app-id' + WEB_CLIENT_ID = 'your-web-client-id'
Fix the placeholder for better understanding
## Code Before: # Replace the following lines with client IDs obtained from the APIs # Console or Cloud Console. WEB_CLIENT_ID = 'your-app-id' ## Instruction: Fix the placeholder for better understanding ## Code After: # Replace the following lines with client IDs obtained from the APIs # Console or Cloud Console. WEB_CLIENT_ID = 'your-web-client-id'
68b52fedf5b22891a4fc9cf121417ced38d0ea00
rolepermissions/utils.py
rolepermissions/utils.py
from __future__ import unicode_literals import re import collections def user_is_authenticated(user): if isinstance(user.is_authenticated, collections.Callable): authenticated = user.is_authenticated() else: authenticated = user.is_authenticated return authenticated def camelToSnake(s): """ https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm.. """ _underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower() def snake_to_title(s): return ' '.join(x.capitalize() for x in s.split('_')) def camel_or_snake_to_title(s): return snake_to_title(camelToSnake(s))
from __future__ import unicode_literals import re try: from collections.abc import Callable except ImportError: from collections import Callable def user_is_authenticated(user): if isinstance(user.is_authenticated, Callable): authenticated = user.is_authenticated() else: authenticated = user.is_authenticated return authenticated def camelToSnake(s): """ https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm.. """ _underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower() def snake_to_title(s): return ' '.join(x.capitalize() for x in s.split('_')) def camel_or_snake_to_title(s): return snake_to_title(camelToSnake(s))
Fix import of Callable for Python 3.9
Fix import of Callable for Python 3.9 Python 3.3 moved Callable to collections.abc and Python 3.9 removes Callable from collections module
Python
mit
vintasoftware/django-role-permissions
from __future__ import unicode_literals import re - import collections + try: + from collections.abc import Callable + except ImportError: + from collections import Callable def user_is_authenticated(user): - if isinstance(user.is_authenticated, collections.Callable): + if isinstance(user.is_authenticated, Callable): authenticated = user.is_authenticated() else: authenticated = user.is_authenticated return authenticated def camelToSnake(s): """ https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm.. """ _underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower() def snake_to_title(s): return ' '.join(x.capitalize() for x in s.split('_')) def camel_or_snake_to_title(s): return snake_to_title(camelToSnake(s))
Fix import of Callable for Python 3.9
## Code Before: from __future__ import unicode_literals import re import collections def user_is_authenticated(user): if isinstance(user.is_authenticated, collections.Callable): authenticated = user.is_authenticated() else: authenticated = user.is_authenticated return authenticated def camelToSnake(s): """ https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm.. """ _underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower() def snake_to_title(s): return ' '.join(x.capitalize() for x in s.split('_')) def camel_or_snake_to_title(s): return snake_to_title(camelToSnake(s)) ## Instruction: Fix import of Callable for Python 3.9 ## Code After: from __future__ import unicode_literals import re try: from collections.abc import Callable except ImportError: from collections import Callable def user_is_authenticated(user): if isinstance(user.is_authenticated, Callable): authenticated = user.is_authenticated() else: authenticated = user.is_authenticated return authenticated def camelToSnake(s): """ https://gist.github.com/jaytaylor/3660565 Is it ironic that this function is written in camel case, yet it converts to snake case? hmm.. """ _underscorer1 = re.compile(r'(.)([A-Z][a-z]+)') _underscorer2 = re.compile('([a-z0-9])([A-Z])') subbed = _underscorer1.sub(r'\1_\2', s) return _underscorer2.sub(r'\1_\2', subbed).lower() def snake_to_title(s): return ' '.join(x.capitalize() for x in s.split('_')) def camel_or_snake_to_title(s): return snake_to_title(camelToSnake(s))
7f7fd4e7547af3a6d7e3cd4da025c2b0ab24508b
widgy/contrib/widgy_mezzanine/migrations/0001_initial.py
widgy/contrib/widgy_mezzanine/migrations/0001_initial.py
from __future__ import unicode_literals from django.db import models, migrations import widgy.db.fields import django.db.models.deletion import widgy.contrib.widgy_mezzanine.models class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), ('review_queue', '0001_initial'), ] operations = [ migrations.CreateModel( name='WidgyPage', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')), ('root_node', widgy.db.fields.VersionedWidgyField(on_delete=django.db.models.deletion.SET_NULL, verbose_name='widgy content', to='review_queue.ReviewedVersionTracker', null=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'widgy page', 'verbose_name_plural': 'widgy pages', }, bases=(widgy.contrib.widgy_mezzanine.models.WidgyPageMixin, 'pages.page'), ), migrations.CreateModel( name='UndeletePage', fields=[ ], options={ 'ordering': ('_order',), 'verbose_name': 'restore deleted page', 'proxy': True, }, bases=('widgy_mezzanine.widgypage',), ), ]
from __future__ import unicode_literals from django.db import models, migrations import widgy.db.fields import django.db.models.deletion import widgy.contrib.widgy_mezzanine.models class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), ('widgy', '0001_initial'), ] operations = [ migrations.CreateModel( name='WidgyPage', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')), ('root_node', widgy.db.fields.VersionedWidgyField(on_delete=django.db.models.deletion.SET_NULL, verbose_name='widgy content', to='widgy.VersionTracker', null=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'widgy page', 'verbose_name_plural': 'widgy pages', }, bases=(widgy.contrib.widgy_mezzanine.models.WidgyPageMixin, 'pages.page'), ), migrations.CreateModel( name='UndeletePage', fields=[ ], options={ 'ordering': ('_order',), 'verbose_name': 'restore deleted page', 'proxy': True, }, bases=('widgy_mezzanine.widgypage',), ), ]
Remove dependency for ReviewedVersionTracker in migrations
Remove dependency for ReviewedVersionTracker in migrations The base widgy migrations had references to ReviewedVersionTracker, which is not part of the base widgy install. This commit changes the dependency to VersionTracker instead, which is part of the base widgy install.
Python
apache-2.0
j00bar/django-widgy,j00bar/django-widgy,j00bar/django-widgy
from __future__ import unicode_literals from django.db import models, migrations import widgy.db.fields import django.db.models.deletion import widgy.contrib.widgy_mezzanine.models class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), - ('review_queue', '0001_initial'), + ('widgy', '0001_initial'), ] operations = [ migrations.CreateModel( name='WidgyPage', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')), - ('root_node', widgy.db.fields.VersionedWidgyField(on_delete=django.db.models.deletion.SET_NULL, verbose_name='widgy content', to='review_queue.ReviewedVersionTracker', null=True)), + ('root_node', widgy.db.fields.VersionedWidgyField(on_delete=django.db.models.deletion.SET_NULL, verbose_name='widgy content', to='widgy.VersionTracker', null=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'widgy page', 'verbose_name_plural': 'widgy pages', }, bases=(widgy.contrib.widgy_mezzanine.models.WidgyPageMixin, 'pages.page'), ), migrations.CreateModel( name='UndeletePage', fields=[ ], options={ 'ordering': ('_order',), 'verbose_name': 'restore deleted page', 'proxy': True, }, bases=('widgy_mezzanine.widgypage',), ), ]
Remove dependency for ReviewedVersionTracker in migrations
## Code Before: from __future__ import unicode_literals from django.db import models, migrations import widgy.db.fields import django.db.models.deletion import widgy.contrib.widgy_mezzanine.models class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), ('review_queue', '0001_initial'), ] operations = [ migrations.CreateModel( name='WidgyPage', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')), ('root_node', widgy.db.fields.VersionedWidgyField(on_delete=django.db.models.deletion.SET_NULL, verbose_name='widgy content', to='review_queue.ReviewedVersionTracker', null=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'widgy page', 'verbose_name_plural': 'widgy pages', }, bases=(widgy.contrib.widgy_mezzanine.models.WidgyPageMixin, 'pages.page'), ), migrations.CreateModel( name='UndeletePage', fields=[ ], options={ 'ordering': ('_order',), 'verbose_name': 'restore deleted page', 'proxy': True, }, bases=('widgy_mezzanine.widgypage',), ), ] ## Instruction: Remove dependency for ReviewedVersionTracker in migrations ## Code After: from __future__ import unicode_literals from django.db import models, migrations import widgy.db.fields import django.db.models.deletion import widgy.contrib.widgy_mezzanine.models class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), ('widgy', '0001_initial'), ] operations = [ migrations.CreateModel( name='WidgyPage', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')), ('root_node', widgy.db.fields.VersionedWidgyField(on_delete=django.db.models.deletion.SET_NULL, verbose_name='widgy content', to='widgy.VersionTracker', null=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'widgy page', 'verbose_name_plural': 'widgy pages', }, bases=(widgy.contrib.widgy_mezzanine.models.WidgyPageMixin, 'pages.page'), ), migrations.CreateModel( name='UndeletePage', fields=[ ], options={ 'ordering': ('_order',), 'verbose_name': 'restore deleted page', 'proxy': True, }, bases=('widgy_mezzanine.widgypage',), ), ]
16369ed6a11aaa39e94479b06ed78eb75f5b33e1
src/args.py
src/args.py
from argparse import ArgumentParser from glob import glob from os import path def is_valid_file(f, parser): if path.isfile(f): return f else: return parser.optparser.error("%s does not exist!" % f) def parse_args(): parser = ArgumentParser() parser.add_argument("--non-headless", action="store_true", help="do not use a virtual display") parser.add_argument("--crx", metavar='CRX_FILE_PATH', action="store", type=lambda x: is_valid_file(x, parser), default=max(glob("*.crx"), key=path.getmtime), help="path to Chrome extension CRX package") return parser.parse_args()
from glob import glob from os import path import argparse def is_valid_file(f, parser): if path.isfile(f): return f raise argparse.ArgumentTypeError("%s does not exist!" % f) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--non-headless", action="store_true", help="do not use a virtual display") parser.add_argument("--crx", metavar='CRX_FILE_PATH', action="store", type=lambda x: is_valid_file(x, parser), default=max(glob("*.crx"), key=path.getmtime), help="path to Chrome extension CRX package") return parser.parse_args()
Fix --crx arg error reporting.
Fix --crx arg error reporting.
Python
mpl-2.0
ghostwords/chameleon-crawler,ghostwords/chameleon-crawler,ghostwords/chameleon-crawler
- from argparse import ArgumentParser from glob import glob from os import path + + import argparse def is_valid_file(f, parser): if path.isfile(f): return f + raise argparse.ArgumentTypeError("%s does not exist!" % f) - else: - return parser.optparser.error("%s does not exist!" % f) def parse_args(): - parser = ArgumentParser() + parser = argparse.ArgumentParser() parser.add_argument("--non-headless", action="store_true", help="do not use a virtual display") parser.add_argument("--crx", metavar='CRX_FILE_PATH', action="store", type=lambda x: is_valid_file(x, parser), default=max(glob("*.crx"), key=path.getmtime), help="path to Chrome extension CRX package") return parser.parse_args()
Fix --crx arg error reporting.
## Code Before: from argparse import ArgumentParser from glob import glob from os import path def is_valid_file(f, parser): if path.isfile(f): return f else: return parser.optparser.error("%s does not exist!" % f) def parse_args(): parser = ArgumentParser() parser.add_argument("--non-headless", action="store_true", help="do not use a virtual display") parser.add_argument("--crx", metavar='CRX_FILE_PATH', action="store", type=lambda x: is_valid_file(x, parser), default=max(glob("*.crx"), key=path.getmtime), help="path to Chrome extension CRX package") return parser.parse_args() ## Instruction: Fix --crx arg error reporting. ## Code After: from glob import glob from os import path import argparse def is_valid_file(f, parser): if path.isfile(f): return f raise argparse.ArgumentTypeError("%s does not exist!" % f) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--non-headless", action="store_true", help="do not use a virtual display") parser.add_argument("--crx", metavar='CRX_FILE_PATH', action="store", type=lambda x: is_valid_file(x, parser), default=max(glob("*.crx"), key=path.getmtime), help="path to Chrome extension CRX package") return parser.parse_args()
a7be90536618ac52c91f599bb167e05f831cddfb
mangopaysdk/entities/transaction.py
mangopaysdk/entities/transaction.py
from mangopaysdk.entities.entitybase import EntityBase from mangopaysdk.types.money import Money class Transaction (EntityBase): """Transaction entity. Base class for: PayIn, PayOut, Transfer. """ def __init__(self, id = None): self.AuthorId = None self.CreditedUserId = None # Money self.DebitedFunds = None # Money self.CreditedFunds = None # Money self.Fees = None # TransactionType {PAYIN, PAYOUT, TRANSFER} self.Type = None # TransactionNature {REGULAR, REFUND, REPUDIATION} self.Nature = None # TransactionStatus {CREATED, SUCCEEDED, FAILED} self.Status = None self.ResultCode = None # timestamp self.ExecutionDate = None return super(Transaction, self).__init__(id) def GetSubObjects(self): return { 'DebitedFunds': 'Money' , 'CreditedFunds': 'Money' , 'Fees': 'Money' } def GetReadOnlyProperties(self): properties = super(Transaction, self).GetReadOnlyProperties() properties.append('Status' ) properties.append('ResultCode' ) properties.append('ExecutionDate' ) return properties
from mangopaysdk.entities.entitybase import EntityBase from mangopaysdk.types.money import Money class Transaction (EntityBase): """Transaction entity. Base class for: PayIn, PayOut, Transfer. """ def __init__(self, id = None): self.AuthorId = None self.CreditedUserId = None # Money self.DebitedFunds = None # Money self.CreditedFunds = None # Money self.Fees = None # TransactionType {PAYIN, PAYOUT, TRANSFER} self.Type = None # TransactionNature {REGULAR, REFUND, REPUDIATION} self.Nature = None # TransactionStatus {CREATED, SUCCEEDED, FAILED} self.Status = None self.ResultCode = None self.ResultMessage = None # timestamp self.ExecutionDate = None return super(Transaction, self).__init__(id) def GetSubObjects(self): return { 'DebitedFunds': 'Money' , 'CreditedFunds': 'Money' , 'Fees': 'Money' } def GetReadOnlyProperties(self): properties = super(Transaction, self).GetReadOnlyProperties() properties.append('Status' ) properties.append('ResultCode' ) properties.append('ExecutionDate' ) return properties
Add possibilty to get ResultMessage
Add possibilty to get ResultMessage
Python
mit
chocopoche/mangopay2-python-sdk,Mangopay/mangopay2-python-sdk
from mangopaysdk.entities.entitybase import EntityBase from mangopaysdk.types.money import Money class Transaction (EntityBase): """Transaction entity. Base class for: PayIn, PayOut, Transfer. """ def __init__(self, id = None): self.AuthorId = None self.CreditedUserId = None # Money self.DebitedFunds = None # Money self.CreditedFunds = None # Money self.Fees = None # TransactionType {PAYIN, PAYOUT, TRANSFER} self.Type = None # TransactionNature {REGULAR, REFUND, REPUDIATION} self.Nature = None # TransactionStatus {CREATED, SUCCEEDED, FAILED} self.Status = None self.ResultCode = None + self.ResultMessage = None # timestamp self.ExecutionDate = None return super(Transaction, self).__init__(id) def GetSubObjects(self): return { 'DebitedFunds': 'Money' , 'CreditedFunds': 'Money' , 'Fees': 'Money' } def GetReadOnlyProperties(self): properties = super(Transaction, self).GetReadOnlyProperties() properties.append('Status' ) properties.append('ResultCode' ) properties.append('ExecutionDate' ) return properties +
Add possibilty to get ResultMessage
## Code Before: from mangopaysdk.entities.entitybase import EntityBase from mangopaysdk.types.money import Money class Transaction (EntityBase): """Transaction entity. Base class for: PayIn, PayOut, Transfer. """ def __init__(self, id = None): self.AuthorId = None self.CreditedUserId = None # Money self.DebitedFunds = None # Money self.CreditedFunds = None # Money self.Fees = None # TransactionType {PAYIN, PAYOUT, TRANSFER} self.Type = None # TransactionNature {REGULAR, REFUND, REPUDIATION} self.Nature = None # TransactionStatus {CREATED, SUCCEEDED, FAILED} self.Status = None self.ResultCode = None # timestamp self.ExecutionDate = None return super(Transaction, self).__init__(id) def GetSubObjects(self): return { 'DebitedFunds': 'Money' , 'CreditedFunds': 'Money' , 'Fees': 'Money' } def GetReadOnlyProperties(self): properties = super(Transaction, self).GetReadOnlyProperties() properties.append('Status' ) properties.append('ResultCode' ) properties.append('ExecutionDate' ) return properties ## Instruction: Add possibilty to get ResultMessage ## Code After: from mangopaysdk.entities.entitybase import EntityBase from mangopaysdk.types.money import Money class Transaction (EntityBase): """Transaction entity. Base class for: PayIn, PayOut, Transfer. """ def __init__(self, id = None): self.AuthorId = None self.CreditedUserId = None # Money self.DebitedFunds = None # Money self.CreditedFunds = None # Money self.Fees = None # TransactionType {PAYIN, PAYOUT, TRANSFER} self.Type = None # TransactionNature {REGULAR, REFUND, REPUDIATION} self.Nature = None # TransactionStatus {CREATED, SUCCEEDED, FAILED} self.Status = None self.ResultCode = None self.ResultMessage = None # timestamp self.ExecutionDate = None return super(Transaction, self).__init__(id) def GetSubObjects(self): return { 'DebitedFunds': 'Money' , 'CreditedFunds': 'Money' , 'Fees': 'Money' } def GetReadOnlyProperties(self): properties = super(Transaction, self).GetReadOnlyProperties() properties.append('Status' ) properties.append('ResultCode' ) properties.append('ExecutionDate' ) return properties
3cacced39d9cb8bd5d6a2b3db8aa4b5aa1b37f58
jaraco/util/meta.py
jaraco/util/meta.py
from __future__ import unicode_literals class LeafClassesMeta(type): """ A metaclass for classes that keeps track of all of them that aren't base classes. """ _leaf_classes = set() def __init__(cls, name, bases, attrs): if not hasattr(cls, '_leaf_classes'): cls._leaf_classes = set() leaf_classes = getattr(cls, '_leaf_classes') leaf_classes.add(cls) # remove any base classes leaf_classes -= set(bases) class TagRegistered(type): """ As classes of this metaclass are created, they keep a registry in the base class of all classes by a class attribute, 'tag'. """ def __init__(cls, name, bases, namespace): super(TagRegistered, cls).__init__(name, bases, namespace) if not hasattr(cls, '_registry'): cls._registry = {} attr = getattr(cls, 'tag', None) if attr: cls._registry[attr] = cls
from __future__ import unicode_literals class LeafClassesMeta(type): """ A metaclass for classes that keeps track of all of them that aren't base classes. """ _leaf_classes = set() def __init__(cls, name, bases, attrs): if not hasattr(cls, '_leaf_classes'): cls._leaf_classes = set() leaf_classes = getattr(cls, '_leaf_classes') leaf_classes.add(cls) # remove any base classes leaf_classes -= set(bases) class TagRegistered(type): """ As classes of this metaclass are created, they keep a registry in the base class of all classes by a class attribute, indicated by attr_name. """ attr_name = 'tag' def __init__(cls, name, bases, namespace): super(TagRegistered, cls).__init__(name, bases, namespace) if not hasattr(cls, '_registry'): cls._registry = {} meta = cls.__class__ attr = getattr(cls, meta.attr_name, None) if attr: cls._registry[attr] = cls
Allow attribute to be customized in TagRegistered
Allow attribute to be customized in TagRegistered
Python
mit
jaraco/jaraco.classes
from __future__ import unicode_literals class LeafClassesMeta(type): """ A metaclass for classes that keeps track of all of them that aren't base classes. """ _leaf_classes = set() def __init__(cls, name, bases, attrs): if not hasattr(cls, '_leaf_classes'): cls._leaf_classes = set() leaf_classes = getattr(cls, '_leaf_classes') leaf_classes.add(cls) # remove any base classes leaf_classes -= set(bases) class TagRegistered(type): """ As classes of this metaclass are created, they keep a registry in the - base class of all classes by a class attribute, 'tag'. + base class of all classes by a class attribute, indicated by attr_name. """ + attr_name = 'tag' + def __init__(cls, name, bases, namespace): super(TagRegistered, cls).__init__(name, bases, namespace) if not hasattr(cls, '_registry'): cls._registry = {} + meta = cls.__class__ - attr = getattr(cls, 'tag', None) + attr = getattr(cls, meta.attr_name, None) if attr: cls._registry[attr] = cls
Allow attribute to be customized in TagRegistered
## Code Before: from __future__ import unicode_literals class LeafClassesMeta(type): """ A metaclass for classes that keeps track of all of them that aren't base classes. """ _leaf_classes = set() def __init__(cls, name, bases, attrs): if not hasattr(cls, '_leaf_classes'): cls._leaf_classes = set() leaf_classes = getattr(cls, '_leaf_classes') leaf_classes.add(cls) # remove any base classes leaf_classes -= set(bases) class TagRegistered(type): """ As classes of this metaclass are created, they keep a registry in the base class of all classes by a class attribute, 'tag'. """ def __init__(cls, name, bases, namespace): super(TagRegistered, cls).__init__(name, bases, namespace) if not hasattr(cls, '_registry'): cls._registry = {} attr = getattr(cls, 'tag', None) if attr: cls._registry[attr] = cls ## Instruction: Allow attribute to be customized in TagRegistered ## Code After: from __future__ import unicode_literals class LeafClassesMeta(type): """ A metaclass for classes that keeps track of all of them that aren't base classes. """ _leaf_classes = set() def __init__(cls, name, bases, attrs): if not hasattr(cls, '_leaf_classes'): cls._leaf_classes = set() leaf_classes = getattr(cls, '_leaf_classes') leaf_classes.add(cls) # remove any base classes leaf_classes -= set(bases) class TagRegistered(type): """ As classes of this metaclass are created, they keep a registry in the base class of all classes by a class attribute, indicated by attr_name. """ attr_name = 'tag' def __init__(cls, name, bases, namespace): super(TagRegistered, cls).__init__(name, bases, namespace) if not hasattr(cls, '_registry'): cls._registry = {} meta = cls.__class__ attr = getattr(cls, meta.attr_name, None) if attr: cls._registry[attr] = cls
7b6838ea292e011f96f5212992d00c1009e1f6b2
examples/gitter_example.py
examples/gitter_example.py
from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer from settings import GITTER # Uncomment the following lines to enable verbose logging # import logging # logging.basicConfig(level=logging.INFO) chatbot = ChatBot( 'GitterBot', gitter_room=GITTER['ROOM'], gitter_api_token=GITTER['API_TOKEN'], gitter_only_respond_to_mentions=False, input_adapter='chatterbot.input.Gitter', output_adapter='chatterbot.output.Gitter' ) trainer = ChatterBotCorpusTrainer(chatbot) trainer.train('chatterbot.corpus.english') # The following loop will execute each time the user enters input while True: try: response = chatbot.get_response(None) # Press ctrl-c or ctrl-d on the keyboard to exit except (KeyboardInterrupt, EOFError, SystemExit): break
from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer from settings import GITTER # Uncomment the following lines to enable verbose logging # import logging # logging.basicConfig(level=logging.INFO) ''' To use this example, create a new file called settings.py. In settings.py define the following: GITTER = { "API_TOKEN": "my-api-token", "ROOM": "example_project/test_room" } ''' chatbot = ChatBot( 'GitterBot', gitter_room=GITTER['ROOM'], gitter_api_token=GITTER['API_TOKEN'], gitter_only_respond_to_mentions=False, input_adapter='chatterbot.input.Gitter', output_adapter='chatterbot.output.Gitter' ) trainer = ChatterBotCorpusTrainer(chatbot) trainer.train('chatterbot.corpus.english') # The following loop will execute each time the user enters input while True: try: response = chatbot.get_response(None) # Press ctrl-c or ctrl-d on the keyboard to exit except (KeyboardInterrupt, EOFError, SystemExit): break
Add better instructions to the Gitter example
Add better instructions to the Gitter example
Python
bsd-3-clause
gunthercox/ChatterBot,vkosuri/ChatterBot
from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer from settings import GITTER # Uncomment the following lines to enable verbose logging # import logging # logging.basicConfig(level=logging.INFO) + + + ''' + To use this example, create a new file called settings.py. + In settings.py define the following: + + GITTER = { + "API_TOKEN": "my-api-token", + "ROOM": "example_project/test_room" + } + ''' chatbot = ChatBot( 'GitterBot', gitter_room=GITTER['ROOM'], gitter_api_token=GITTER['API_TOKEN'], gitter_only_respond_to_mentions=False, input_adapter='chatterbot.input.Gitter', output_adapter='chatterbot.output.Gitter' ) trainer = ChatterBotCorpusTrainer(chatbot) trainer.train('chatterbot.corpus.english') # The following loop will execute each time the user enters input while True: try: response = chatbot.get_response(None) # Press ctrl-c or ctrl-d on the keyboard to exit except (KeyboardInterrupt, EOFError, SystemExit): break
Add better instructions to the Gitter example
## Code Before: from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer from settings import GITTER # Uncomment the following lines to enable verbose logging # import logging # logging.basicConfig(level=logging.INFO) chatbot = ChatBot( 'GitterBot', gitter_room=GITTER['ROOM'], gitter_api_token=GITTER['API_TOKEN'], gitter_only_respond_to_mentions=False, input_adapter='chatterbot.input.Gitter', output_adapter='chatterbot.output.Gitter' ) trainer = ChatterBotCorpusTrainer(chatbot) trainer.train('chatterbot.corpus.english') # The following loop will execute each time the user enters input while True: try: response = chatbot.get_response(None) # Press ctrl-c or ctrl-d on the keyboard to exit except (KeyboardInterrupt, EOFError, SystemExit): break ## Instruction: Add better instructions to the Gitter example ## Code After: from chatterbot import ChatBot from chatterbot.trainers import ChatterBotCorpusTrainer from settings import GITTER # Uncomment the following lines to enable verbose logging # import logging # logging.basicConfig(level=logging.INFO) ''' To use this example, create a new file called settings.py. In settings.py define the following: GITTER = { "API_TOKEN": "my-api-token", "ROOM": "example_project/test_room" } ''' chatbot = ChatBot( 'GitterBot', gitter_room=GITTER['ROOM'], gitter_api_token=GITTER['API_TOKEN'], gitter_only_respond_to_mentions=False, input_adapter='chatterbot.input.Gitter', output_adapter='chatterbot.output.Gitter' ) trainer = ChatterBotCorpusTrainer(chatbot) trainer.train('chatterbot.corpus.english') # The following loop will execute each time the user enters input while True: try: response = chatbot.get_response(None) # Press ctrl-c or ctrl-d on the keyboard to exit except (KeyboardInterrupt, EOFError, SystemExit): break
260a5601a9b2990374d2f97d92898236e0b9342e
tests/profiling_test_script.py
tests/profiling_test_script.py
from __future__ import ( print_function, division, unicode_literals, absolute_import) import subdir.profiling_test_script2 as script2 @profile def fact(n): result = 1 for i in xrange(2, n + 1): result *= i return result @profile def sum_(n): result = 0 for i in xrange(1, n + 1): result += i return result if __name__ == "__main__": print(fact(120)) print(sum_(120)) print(script2.fact2(120)) print(script2.sum2(120))
from __future__ import ( print_function, division, unicode_literals, absolute_import) import subdir.profiling_test_script2 as script2 @profile def fact(n): result = 1 for i in xrange(2, n // 4): result *= i result = 1 for i in xrange(2, n // 16): result *= i result = 1 for i in xrange(2, n + 1): result *= i return result @profile def sum_(n): result = 0 for i in xrange(1, n + 1): result += i return result if __name__ == "__main__": print(fact(120)) print(sum_(120)) print(script2.fact2(120)) print(script2.sum2(120))
Add diversity to test script
Add diversity to test script
Python
mit
jitseniesen/spyder-memory-profiler,jitseniesen/spyder-memory-profiler,Nodd/spyder_line_profiler,spyder-ide/spyder.line_profiler,spyder-ide/spyder.memory_profiler,spyder-ide/spyder.line-profiler,Nodd/spyder.line_profiler
from __future__ import ( print_function, division, unicode_literals, absolute_import) import subdir.profiling_test_script2 as script2 @profile def fact(n): + result = 1 + for i in xrange(2, n // 4): + result *= i + result = 1 + for i in xrange(2, n // 16): + result *= i result = 1 for i in xrange(2, n + 1): result *= i return result @profile def sum_(n): result = 0 for i in xrange(1, n + 1): result += i return result if __name__ == "__main__": print(fact(120)) print(sum_(120)) print(script2.fact2(120)) print(script2.sum2(120))
Add diversity to test script
## Code Before: from __future__ import ( print_function, division, unicode_literals, absolute_import) import subdir.profiling_test_script2 as script2 @profile def fact(n): result = 1 for i in xrange(2, n + 1): result *= i return result @profile def sum_(n): result = 0 for i in xrange(1, n + 1): result += i return result if __name__ == "__main__": print(fact(120)) print(sum_(120)) print(script2.fact2(120)) print(script2.sum2(120)) ## Instruction: Add diversity to test script ## Code After: from __future__ import ( print_function, division, unicode_literals, absolute_import) import subdir.profiling_test_script2 as script2 @profile def fact(n): result = 1 for i in xrange(2, n // 4): result *= i result = 1 for i in xrange(2, n // 16): result *= i result = 1 for i in xrange(2, n + 1): result *= i return result @profile def sum_(n): result = 0 for i in xrange(1, n + 1): result += i return result if __name__ == "__main__": print(fact(120)) print(sum_(120)) print(script2.fact2(120)) print(script2.sum2(120))
97a67e022d094743e806896386bdbe317cb56fb6
gitcloner.py
gitcloner.py
import sys from gitaccount import GitAccount def main(): if len(sys.argv) < 2: print("""Usage: gitcloner.py [OPTION] [NAME] OPTIONS: -u - for user repositories -o - for organization repositories NAME: Username or Organization Name """) sys.exit(1) args = sys.argv[1:3] repoType, name = args if repoType == '-u': repoType = 'user' elif repoType == '-o': repoType = 'org' else: raise ValueError() account = GitAccount(repoType, name) account.cloneRepos() if __name__ == '__main__': main()
import sys import argparse from gitaccount import GitAccount def main(): parser = argparse.ArgumentParser( prog='gitcloner', description='Clone all the repositories from a github user/org\naccount to the current directory') group = parser.add_mutually_exclusive_group() group.add_argument('-u', '--user', help='For user accounts [DEFAULT]', action='store_true') group.add_argument('-o', '--org', help='For organization accounts', action='store_true') parser.add_argument('name', help='name of the user / organization') args = parser.parse_args() if not(args.user or args.org): args.user = True print('Default account type is user account') if args.user: print('Username: {}'.format(args.name)) accType = 'user' else: print('Organization: {}'.format(args.name)) accType = 'org' account = GitAccount(accType, args.name) account.cloneRepos() if __name__ == '__main__': main()
Use argparse instead of sys.argv
Use argparse instead of sys.argv
Python
mit
shakib609/gitcloner
import sys + import argparse from gitaccount import GitAccount def main(): - if len(sys.argv) < 2: - print("""Usage: - gitcloner.py [OPTION] [NAME] + parser = argparse.ArgumentParser( + prog='gitcloner', + description='Clone all the repositories from a github user/org\naccount to the current directory') + group = parser.add_mutually_exclusive_group() + group.add_argument('-u', '--user', help='For user accounts [DEFAULT]', + action='store_true') + group.add_argument('-o', '--org', help='For organization accounts', + action='store_true') + parser.add_argument('name', help='name of the user / organization') - OPTIONS: - -u - for user repositories - -o - for organization repositories - NAME: - Username or Organization Name - """) - sys.exit(1) - args = sys.argv[1:3] - repoType, name = args - if repoType == '-u': + args = parser.parse_args() + + if not(args.user or args.org): + args.user = True + print('Default account type is user account') + + if args.user: + print('Username: {}'.format(args.name)) - repoType = 'user' + accType = 'user' - elif repoType == '-o': - repoType = 'org' else: - raise ValueError() + print('Organization: {}'.format(args.name)) + accType = 'org' - account = GitAccount(repoType, name) + account = GitAccount(accType, args.name) account.cloneRepos() if __name__ == '__main__': main()
Use argparse instead of sys.argv
## Code Before: import sys from gitaccount import GitAccount def main(): if len(sys.argv) < 2: print("""Usage: gitcloner.py [OPTION] [NAME] OPTIONS: -u - for user repositories -o - for organization repositories NAME: Username or Organization Name """) sys.exit(1) args = sys.argv[1:3] repoType, name = args if repoType == '-u': repoType = 'user' elif repoType == '-o': repoType = 'org' else: raise ValueError() account = GitAccount(repoType, name) account.cloneRepos() if __name__ == '__main__': main() ## Instruction: Use argparse instead of sys.argv ## Code After: import sys import argparse from gitaccount import GitAccount def main(): parser = argparse.ArgumentParser( prog='gitcloner', description='Clone all the repositories from a github user/org\naccount to the current directory') group = parser.add_mutually_exclusive_group() group.add_argument('-u', '--user', help='For user accounts [DEFAULT]', action='store_true') group.add_argument('-o', '--org', help='For organization accounts', action='store_true') parser.add_argument('name', help='name of the user / organization') args = parser.parse_args() if not(args.user or args.org): args.user = True print('Default account type is user account') if args.user: print('Username: {}'.format(args.name)) accType = 'user' else: print('Organization: {}'.format(args.name)) accType = 'org' account = GitAccount(accType, args.name) account.cloneRepos() if __name__ == '__main__': main()
d42b9da06d5cde89a6116d711fc6ae216256cabc
shell/view/home/IconLayout.py
shell/view/home/IconLayout.py
import random class IconLayout: def __init__(self, width, height): self._icons = [] self._width = width self._height = height def add_icon(self, icon): self._icons.append(icon) self._layout_icon(icon) def remove_icon(self, icon): self._icons.remove(icon) def _is_valid_position(self, icon, x, y): icon_size = icon.props.size border = 20 if not (border < x < self._width - icon_size - border and \ border < y < self._height - icon_size - border): return False return True def _layout_icon(self, icon): while True: x = random.random() * self._width y = random.random() * self._height if self._is_valid_position(icon, x, y): break icon.props.x = x icon.props.y = y
import random class IconLayout: def __init__(self, width, height): self._icons = [] self._width = width self._height = height def add_icon(self, icon): self._icons.append(icon) self._layout_icon(icon) def remove_icon(self, icon): self._icons.remove(icon) def _is_valid_position(self, icon, x, y): icon_size = icon.get_property('size') border = 20 if not (border < x < self._width - icon_size - border and \ border < y < self._height - icon_size - border): return False return True def _layout_icon(self, icon): while True: x = random.random() * self._width y = random.random() * self._height if self._is_valid_position(icon, x, y): break icon.set_property('x', x) icon.set_property('y', y)
Use get/set_property rather than direct accessors
Use get/set_property rather than direct accessors
Python
lgpl-2.1
Daksh/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,quozl/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,tchx84/debian-pkg-sugar-toolkit,ceibal-tatu/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,tchx84/debian-pkg-sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,i5o/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,puneetgkaur/backup_sugar_sugartoolkit,sugarlabs/sugar-toolkit,ceibal-tatu/sugar-toolkit,tchx84/sugar-toolkit-gtk3
import random class IconLayout: def __init__(self, width, height): self._icons = [] self._width = width self._height = height def add_icon(self, icon): self._icons.append(icon) self._layout_icon(icon) def remove_icon(self, icon): self._icons.remove(icon) def _is_valid_position(self, icon, x, y): - icon_size = icon.props.size + icon_size = icon.get_property('size') border = 20 if not (border < x < self._width - icon_size - border and \ border < y < self._height - icon_size - border): return False return True def _layout_icon(self, icon): while True: x = random.random() * self._width y = random.random() * self._height if self._is_valid_position(icon, x, y): break - icon.props.x = x - icon.props.y = y + icon.set_property('x', x) + icon.set_property('y', y)
Use get/set_property rather than direct accessors
## Code Before: import random class IconLayout: def __init__(self, width, height): self._icons = [] self._width = width self._height = height def add_icon(self, icon): self._icons.append(icon) self._layout_icon(icon) def remove_icon(self, icon): self._icons.remove(icon) def _is_valid_position(self, icon, x, y): icon_size = icon.props.size border = 20 if not (border < x < self._width - icon_size - border and \ border < y < self._height - icon_size - border): return False return True def _layout_icon(self, icon): while True: x = random.random() * self._width y = random.random() * self._height if self._is_valid_position(icon, x, y): break icon.props.x = x icon.props.y = y ## Instruction: Use get/set_property rather than direct accessors ## Code After: import random class IconLayout: def __init__(self, width, height): self._icons = [] self._width = width self._height = height def add_icon(self, icon): self._icons.append(icon) self._layout_icon(icon) def remove_icon(self, icon): self._icons.remove(icon) def _is_valid_position(self, icon, x, y): icon_size = icon.get_property('size') border = 20 if not (border < x < self._width - icon_size - border and \ border < y < self._height - icon_size - border): return False return True def _layout_icon(self, icon): while True: x = random.random() * self._width y = random.random() * self._height if self._is_valid_position(icon, x, y): break icon.set_property('x', x) icon.set_property('y', y)
5c61d7f125078cb6b3bd0c5700ae9219baab0078
webapp/tests/test_dashboard.py
webapp/tests/test_dashboard.py
from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('graphite.dashboard.views.dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
Update reverse call to use named URL
Update reverse call to use named URL
Python
apache-2.0
redice/graphite-web,dbn/graphite-web,Skyscanner/graphite-web,penpen/graphite-web,lyft/graphite-web,esnet/graphite-web,bpaquet/graphite-web,atnak/graphite-web,section-io/graphite-web,kkdk5535/graphite-web,cosm0s/graphite-web,cgvarela/graphite-web,gwaldo/graphite-web,redice/graphite-web,edwardmlyte/graphite-web,atnak/graphite-web,criteo-forks/graphite-web,bmhatfield/graphite-web,obfuscurity/graphite-web,AICIDNN/graphite-web,edwardmlyte/graphite-web,gwaldo/graphite-web,penpen/graphite-web,cbowman0/graphite-web,Skyscanner/graphite-web,JeanFred/graphite-web,cgvarela/graphite-web,bruce-lyft/graphite-web,criteo-forks/graphite-web,phreakocious/graphite-web,lyft/graphite-web,mcoolive/graphite-web,piotr1212/graphite-web,esnet/graphite-web,zBMNForks/graphite-web,axibase/graphite-web,blacked/graphite-web,AICIDNN/graphite-web,kkdk5535/graphite-web,atnak/graphite-web,ZelunZhang/graphite-web,graphite-server/graphite-web,cgvarela/graphite-web,graphite-server/graphite-web,zBMNForks/graphite-web,synedge/graphite-web,pu239ppy/graphite-web,axibase/graphite-web,gwaldo/graphite-web,jssjr/graphite-web,esnet/graphite-web,Skyscanner/graphite-web,bmhatfield/graphite-web,DanCech/graphite-web,lfckop/graphite-web,JeanFred/graphite-web,edwardmlyte/graphite-web,brutasse/graphite-web,esnet/graphite-web,Invoca/graphite-web,johnseekins/graphite-web,esnet/graphite-web,criteo-forks/graphite-web,mcoolive/graphite-web,pu239ppy/graphite-web,bmhatfield/graphite-web,edwardmlyte/graphite-web,ZelunZhang/graphite-web,DanCech/graphite-web,section-io/graphite-web,penpen/graphite-web,cbowman0/graphite-web,Invoca/graphite-web,markolson/graphite-web,markolson/graphite-web,Aloomaio/graphite-web,graphite-project/graphite-web,mcoolive/graphite-web,edwardmlyte/graphite-web,atnak/graphite-web,graphite-project/graphite-web,Squarespace/graphite-web,cbowman0/graphite-web,kkdk5535/graphite-web,lyft/graphite-web,criteo-forks/graphite-web,JeanFred/graphite-web,redice/graphite-web,graphite-server/graphite-web,dbn/graphite-web,pu239ppy/graphite-web,piotr1212/graphite-web,brutasse/graphite-web,phreakocious/graphite-web,mcoolive/graphite-web,lfckop/graphite-web,penpen/graphite-web,DanCech/graphite-web,johnseekins/graphite-web,blacked/graphite-web,bpaquet/graphite-web,cbowman0/graphite-web,cgvarela/graphite-web,markolson/graphite-web,bpaquet/graphite-web,graphite-project/graphite-web,bpaquet/graphite-web,bruce-lyft/graphite-web,Invoca/graphite-web,Squarespace/graphite-web,lyft/graphite-web,synedge/graphite-web,bbc/graphite-web,piotr1212/graphite-web,pu239ppy/graphite-web,johnseekins/graphite-web,JeanFred/graphite-web,DanCech/graphite-web,obfuscurity/graphite-web,lyft/graphite-web,cosm0s/graphite-web,Invoca/graphite-web,deniszh/graphite-web,krux/graphite-web,bbc/graphite-web,goir/graphite-web,Squarespace/graphite-web,DanCech/graphite-web,Aloomaio/graphite-web,bpaquet/graphite-web,johnseekins/graphite-web,disqus/graphite-web,penpen/graphite-web,section-io/graphite-web,cgvarela/graphite-web,lfckop/graphite-web,Invoca/graphite-web,jssjr/graphite-web,AICIDNN/graphite-web,bruce-lyft/graphite-web,cosm0s/graphite-web,ZelunZhang/graphite-web,graphite-project/graphite-web,zBMNForks/graphite-web,cgvarela/graphite-web,JeanFred/graphite-web,piotr1212/graphite-web,goir/graphite-web,cbowman0/graphite-web,drax68/graphite-web,criteo-forks/graphite-web,synedge/graphite-web,synedge/graphite-web,nkhuyu/graphite-web,JeanFred/graphite-web,phreakocious/graphite-web,Aloomaio/graphite-web,bbc/graphite-web,phreakocious/graphite-web,cosm0s/graphite-web,phreakocious/graphite-web,brutasse/graphite-web,graphite-server/graphite-web,Squarespace/graphite-web,gwaldo/graphite-web,cosm0s/graphite-web,deniszh/graphite-web,pu239ppy/graphite-web,blacked/graphite-web,Invoca/graphite-web,krux/graphite-web,AICIDNN/graphite-web,redice/graphite-web,piotr1212/graphite-web,cbowman0/graphite-web,section-io/graphite-web,obfuscurity/graphite-web,redice/graphite-web,section-io/graphite-web,deniszh/graphite-web,krux/graphite-web,nkhuyu/graphite-web,gwaldo/graphite-web,nkhuyu/graphite-web,graphite-server/graphite-web,deniszh/graphite-web,goir/graphite-web,Aloomaio/graphite-web,drax68/graphite-web,markolson/graphite-web,krux/graphite-web,obfuscurity/graphite-web,dbn/graphite-web,bruce-lyft/graphite-web,markolson/graphite-web,bmhatfield/graphite-web,brutasse/graphite-web,atnak/graphite-web,Skyscanner/graphite-web,krux/graphite-web,blacked/graphite-web,kkdk5535/graphite-web,jssjr/graphite-web,goir/graphite-web,lfckop/graphite-web,bmhatfield/graphite-web,cosm0s/graphite-web,Squarespace/graphite-web,disqus/graphite-web,ZelunZhang/graphite-web,Aloomaio/graphite-web,axibase/graphite-web,disqus/graphite-web,bbc/graphite-web,ZelunZhang/graphite-web,redice/graphite-web,goir/graphite-web,bruce-lyft/graphite-web,ZelunZhang/graphite-web,AICIDNN/graphite-web,axibase/graphite-web,mcoolive/graphite-web,disqus/graphite-web,johnseekins/graphite-web,drax68/graphite-web,drax68/graphite-web,lyft/graphite-web,graphite-project/graphite-web,lfckop/graphite-web,bmhatfield/graphite-web,blacked/graphite-web,axibase/graphite-web,axibase/graphite-web,krux/graphite-web,disqus/graphite-web,drax68/graphite-web,bbc/graphite-web,criteo-forks/graphite-web,pu239ppy/graphite-web,obfuscurity/graphite-web,nkhuyu/graphite-web,drax68/graphite-web,jssjr/graphite-web,obfuscurity/graphite-web,kkdk5535/graphite-web,jssjr/graphite-web,bruce-lyft/graphite-web,Aloomaio/graphite-web,kkdk5535/graphite-web,zBMNForks/graphite-web,DanCech/graphite-web,dbn/graphite-web,Squarespace/graphite-web,AICIDNN/graphite-web,zBMNForks/graphite-web,section-io/graphite-web,mcoolive/graphite-web,edwardmlyte/graphite-web,synedge/graphite-web,graphite-project/graphite-web,atnak/graphite-web,nkhuyu/graphite-web,brutasse/graphite-web,jssjr/graphite-web,disqus/graphite-web,johnseekins/graphite-web,synedge/graphite-web,Skyscanner/graphite-web,Skyscanner/graphite-web,lfckop/graphite-web,zBMNForks/graphite-web,deniszh/graphite-web,nkhuyu/graphite-web,penpen/graphite-web,dbn/graphite-web,blacked/graphite-web,brutasse/graphite-web,deniszh/graphite-web,graphite-server/graphite-web,bpaquet/graphite-web,gwaldo/graphite-web,goir/graphite-web,phreakocious/graphite-web,piotr1212/graphite-web,dbn/graphite-web
from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): - url = reverse('graphite.dashboard.views.dashboard') + url = reverse('dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
Update reverse call to use named URL
## Code Before: from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('graphite.dashboard.views.dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200) ## Instruction: Update reverse call to use named URL ## Code After: from django.core.urlresolvers import reverse from django.test import TestCase class DashboardTest(TestCase): def test_dashboard(self): url = reverse('dashboard') response = self.client.get(url) self.assertEqual(response.status_code, 200)
4124297475fb7d77bf492e721a74fcfa02547a14
benchmark/bench_logger_level_low.py
benchmark/bench_logger_level_low.py
"""Benchmarks too low logger levels""" from logbook import Logger, ERROR log = Logger('Test logger') log.level = ERROR def run(): for x in xrange(500): log.warning('this is not handled')
"""Benchmarks too low logger levels""" from logbook import Logger, StreamHandler, ERROR from cStringIO import StringIO log = Logger('Test logger') log.level = ERROR def run(): out = StringIO() with StreamHandler(out): for x in xrange(500): log.warning('this is not handled')
Create a stream handler even though it's not used to have the same overhead on both logbook and logging
Create a stream handler even though it's not used to have the same overhead on both logbook and logging
Python
bsd-3-clause
DasIch/logbook,maykinmedia/logbook,alonho/logbook,fayazkhan/logbook,maykinmedia/logbook,Rafiot/logbook,dvarrazzo/logbook,mbr/logbook,mitsuhiko/logbook,dvarrazzo/logbook,RazerM/logbook,FintanH/logbook,omergertel/logbook,dommert/logbook,DasIch/logbook,alex/logbook,alonho/logbook,pombredanne/logbook,alonho/logbook,alex/logbook,DasIch/logbook,mbr/logbook,fayazkhan/logbook,narfdotpl/logbook,redtoad/logbook,redtoad/logbook,Rafiot/logbook,omergertel/logbook,Rafiot/logbook,omergertel/logbook
"""Benchmarks too low logger levels""" - from logbook import Logger, ERROR + from logbook import Logger, StreamHandler, ERROR + from cStringIO import StringIO log = Logger('Test logger') log.level = ERROR def run(): + out = StringIO() + with StreamHandler(out): - for x in xrange(500): + for x in xrange(500): - log.warning('this is not handled') + log.warning('this is not handled')
Create a stream handler even though it's not used to have the same overhead on both logbook and logging
## Code Before: """Benchmarks too low logger levels""" from logbook import Logger, ERROR log = Logger('Test logger') log.level = ERROR def run(): for x in xrange(500): log.warning('this is not handled') ## Instruction: Create a stream handler even though it's not used to have the same overhead on both logbook and logging ## Code After: """Benchmarks too low logger levels""" from logbook import Logger, StreamHandler, ERROR from cStringIO import StringIO log = Logger('Test logger') log.level = ERROR def run(): out = StringIO() with StreamHandler(out): for x in xrange(500): log.warning('this is not handled')
b71db5eb72fd5529be060d5f90ad744f0ea0870e
library.py
library.py
class Library: """This class represents a simaris target and is initialized with data in JSON format """ def __init__(self, data): self.target = data['CurrentTarget']['EnemyType'] self.scans = data['CurrentTarget']['PersonalScansRequired'] self.progress = data['CurrentTarget']['ProgressPercent'] def __str__(self): """Returns a string with all the information about this alert """ library_string = ('Target: {0}\n' 'Scans needed: {1}\n' 'Progress: {2:.2f}%' ) return library_string.format(self.target, self.scans, self.progress)
class Library: """This class represents a simaris target and is initialized with data in JSON format """ def __init__(self, data): if 'CurrentTarget' in data: self.target = data['CurrentTarget']['EnemyType'] self.scans = data['CurrentTarget']['PersonalScansRequired'] self.progress = data['CurrentTarget']['ProgressPercent'] self.active = True else: self.active = False def __str__(self): """Returns a string with all the information about this alert """ if not self.is_active(): return None library_string = ('Target: {0}\n' 'Scans needed: {1}\n' 'Progress: {2:.2f}%' ) return library_string.format(self.target, self.scans, self.progress) def is_active(self): """ Returns True if there is a currently active target, False otherwise """ return self.active
Add is_active() method to the Library class
Add is_active() method to the Library class
Python
mit
pabletos/Hubot-Warframe,pabletos/Hubot-Warframe
class Library: """This class represents a simaris target and is initialized with data in JSON format """ def __init__(self, data): + if 'CurrentTarget' in data: - self.target = data['CurrentTarget']['EnemyType'] + self.target = data['CurrentTarget']['EnemyType'] - self.scans = data['CurrentTarget']['PersonalScansRequired'] + self.scans = data['CurrentTarget']['PersonalScansRequired'] - self.progress = data['CurrentTarget']['ProgressPercent'] + self.progress = data['CurrentTarget']['ProgressPercent'] + self.active = True + else: + self.active = False def __str__(self): """Returns a string with all the information about this alert """ + if not self.is_active(): + return None library_string = ('Target: {0}\n' 'Scans needed: {1}\n' 'Progress: {2:.2f}%' ) return library_string.format(self.target, self.scans, self.progress) + + def is_active(self): + """ Returns True if there is a currently active target, False otherwise + + """ + return self.active +
Add is_active() method to the Library class
## Code Before: class Library: """This class represents a simaris target and is initialized with data in JSON format """ def __init__(self, data): self.target = data['CurrentTarget']['EnemyType'] self.scans = data['CurrentTarget']['PersonalScansRequired'] self.progress = data['CurrentTarget']['ProgressPercent'] def __str__(self): """Returns a string with all the information about this alert """ library_string = ('Target: {0}\n' 'Scans needed: {1}\n' 'Progress: {2:.2f}%' ) return library_string.format(self.target, self.scans, self.progress) ## Instruction: Add is_active() method to the Library class ## Code After: class Library: """This class represents a simaris target and is initialized with data in JSON format """ def __init__(self, data): if 'CurrentTarget' in data: self.target = data['CurrentTarget']['EnemyType'] self.scans = data['CurrentTarget']['PersonalScansRequired'] self.progress = data['CurrentTarget']['ProgressPercent'] self.active = True else: self.active = False def __str__(self): """Returns a string with all the information about this alert """ if not self.is_active(): return None library_string = ('Target: {0}\n' 'Scans needed: {1}\n' 'Progress: {2:.2f}%' ) return library_string.format(self.target, self.scans, self.progress) def is_active(self): """ Returns True if there is a currently active target, False otherwise """ return self.active
5c6f277caf3496da5f10b0150abb2c3b856e6584
nagare/services/prg.py
nagare/services/prg.py
from nagare.services import plugin class PRGService(plugin.Plugin): LOAD_PRIORITY = 120 @staticmethod def handle_request(chain, request, response, session_id, previous_state_id, **params): if (request.method == 'POST') and not request.is_xhr: response = request.create_redirect_response( response=response, _s=session_id, _c='%05d' % previous_state_id ) response.use_same_state = True else: response = chain.next( request=request, response=response, session_id=session_id, previous_state_id=previous_state_id, **params ) return response
from nagare.services import plugin class PRGService(plugin.Plugin): LOAD_PRIORITY = 120 @staticmethod def handle_request(chain, request, response, session_id, state_id, **params): if (request.method == 'POST') and not request.is_xhr: response = request.create_redirect_response( response=response, _s=session_id, _c='%05d' % state_id, ) else: response = chain.next( request=request, response=response, session_id=session_id, state_id=state_id, **params ) return response
Store in the current state, not the previous one
Store in the current state, not the previous one
Python
bsd-3-clause
nagareproject/core,nagareproject/core
from nagare.services import plugin class PRGService(plugin.Plugin): LOAD_PRIORITY = 120 @staticmethod - def handle_request(chain, request, response, session_id, previous_state_id, **params): + def handle_request(chain, request, response, session_id, state_id, **params): if (request.method == 'POST') and not request.is_xhr: response = request.create_redirect_response( response=response, _s=session_id, - _c='%05d' % previous_state_id + _c='%05d' % state_id, ) - response.use_same_state = True else: response = chain.next( request=request, response=response, session_id=session_id, - previous_state_id=previous_state_id, + state_id=state_id, **params ) return response
Store in the current state, not the previous one
## Code Before: from nagare.services import plugin class PRGService(plugin.Plugin): LOAD_PRIORITY = 120 @staticmethod def handle_request(chain, request, response, session_id, previous_state_id, **params): if (request.method == 'POST') and not request.is_xhr: response = request.create_redirect_response( response=response, _s=session_id, _c='%05d' % previous_state_id ) response.use_same_state = True else: response = chain.next( request=request, response=response, session_id=session_id, previous_state_id=previous_state_id, **params ) return response ## Instruction: Store in the current state, not the previous one ## Code After: from nagare.services import plugin class PRGService(plugin.Plugin): LOAD_PRIORITY = 120 @staticmethod def handle_request(chain, request, response, session_id, state_id, **params): if (request.method == 'POST') and not request.is_xhr: response = request.create_redirect_response( response=response, _s=session_id, _c='%05d' % state_id, ) else: response = chain.next( request=request, response=response, session_id=session_id, state_id=state_id, **params ) return response
313aee17c8e2e1c86b96b40017ac4618c66df463
__init__.py
__init__.py
ENTITIES_INDEX = ['men', 'foy'] # Some variables needed by the test case plugins CURRENCY = u"DT" # Some variables needed by the test case graph widget # REVENUES_CATEGORIES XAXIS_PROPERTIES = { 'sali': { 'name' : 'sal', 'typ_tot' : {'salsuperbrut' : 'Salaire super brut', 'salbrut': 'Salaire brut', 'sal': 'Salaire imposable', 'salnet': 'Salaire net'}, 'typ_tot_default' : 'sal'}, } # Some variables used by other plugins
ENTITIES_INDEX = ['men', 'foy'] # Some variables needed by the test case plugins CURRENCY = u"DT" # Some variables needed by the test case graph widget REVENUES_CATEGORIES = {'imposable' : ['sal',]} XAXIS_PROPERTIES = { 'sali': { 'name' : 'sal', 'typ_tot' : {'salsuperbrut' : 'Salaire super brut', 'salbrut': 'Salaire brut', 'sal': 'Salaire imposable', 'salnet': 'Salaire net'}, 'typ_tot_default' : 'sal'}, } # Some variables used by other plugins
Generalize graph and some new example scripts
Generalize graph and some new example scripts
Python
agpl-3.0
openfisca/openfisca-tunisia,openfisca/openfisca-tunisia
ENTITIES_INDEX = ['men', 'foy'] # Some variables needed by the test case plugins CURRENCY = u"DT" # Some variables needed by the test case graph widget - # REVENUES_CATEGORIES + + + REVENUES_CATEGORIES = {'imposable' : ['sal',]} + XAXIS_PROPERTIES = { 'sali': { 'name' : 'sal', 'typ_tot' : {'salsuperbrut' : 'Salaire super brut', 'salbrut': 'Salaire brut', 'sal': 'Salaire imposable', 'salnet': 'Salaire net'}, 'typ_tot_default' : 'sal'}, } # Some variables used by other plugins
Generalize graph and some new example scripts
## Code Before: ENTITIES_INDEX = ['men', 'foy'] # Some variables needed by the test case plugins CURRENCY = u"DT" # Some variables needed by the test case graph widget # REVENUES_CATEGORIES XAXIS_PROPERTIES = { 'sali': { 'name' : 'sal', 'typ_tot' : {'salsuperbrut' : 'Salaire super brut', 'salbrut': 'Salaire brut', 'sal': 'Salaire imposable', 'salnet': 'Salaire net'}, 'typ_tot_default' : 'sal'}, } # Some variables used by other plugins ## Instruction: Generalize graph and some new example scripts ## Code After: ENTITIES_INDEX = ['men', 'foy'] # Some variables needed by the test case plugins CURRENCY = u"DT" # Some variables needed by the test case graph widget REVENUES_CATEGORIES = {'imposable' : ['sal',]} XAXIS_PROPERTIES = { 'sali': { 'name' : 'sal', 'typ_tot' : {'salsuperbrut' : 'Salaire super brut', 'salbrut': 'Salaire brut', 'sal': 'Salaire imposable', 'salnet': 'Salaire net'}, 'typ_tot_default' : 'sal'}, } # Some variables used by other plugins
09fc3d53b2814f940bcaf7d6136ed2ce0595fb2f
hyperion/importers/tests/test_sph.py
hyperion/importers/tests/test_sph.py
import os import h5py import numpy as np from ..sph import construct_octree DATA = os.path.join(os.path.dirname(__file__), 'data') def test_construct_octree(): np.random.seed(0) N = 5000 px = np.random.uniform(-10., 10., N) py = np.random.uniform(-10., 10., N) pz = np.random.uniform(-10., 10., N) mass = np.random.uniform(0., 1., N) sigma = np.random.uniform(0., 0.1, N) o = construct_octree(0.1, 0.2, 0.3, 6., 5., 4., px, py, pz, sigma, mass, n_levels=10) # The following lines can be used to write out the reference file if the # SPH gridding code is updated. # f = h5py.File('reference_octree.hdf5', 'w') # o.write(f) # f.close() from hyperion.grid import OctreeGrid f = h5py.File(os.path.join(DATA, 'reference_octree.hdf5'), 'r') o_ref = OctreeGrid() o_ref.read(f) f.close() assert np.all(o_ref.refined == o.refined) assert np.all(o_ref['density'][0].array == o['density'][0].array)
import os import h5py import numpy as np from numpy.testing import assert_allclose from ..sph import construct_octree DATA = os.path.join(os.path.dirname(__file__), 'data') def test_construct_octree(): np.random.seed(0) N = 5000 px = np.random.uniform(-10., 10., N) py = np.random.uniform(-10., 10., N) pz = np.random.uniform(-10., 10., N) mass = np.random.uniform(0., 1., N) sigma = np.random.uniform(0., 0.1, N) o = construct_octree(0.1, 0.2, 0.3, 6., 5., 4., px, py, pz, sigma, mass, n_levels=10) # The following lines can be used to write out the reference file if the # SPH gridding code is updated. # f = h5py.File('reference_octree.hdf5', 'w') # o.write(f) # f.close() from hyperion.grid import OctreeGrid f = h5py.File(os.path.join(DATA, 'reference_octree.hdf5'), 'r') o_ref = OctreeGrid() o_ref.read(f) f.close() assert np.all(o_ref.refined == o.refined) assert_allclose(o_ref['density'][0].array, o['density'][0].array)
Use assert_allclose for comparison of octree densities
Use assert_allclose for comparison of octree densities
Python
bsd-2-clause
hyperion-rt/hyperion,bluescarni/hyperion,hyperion-rt/hyperion,hyperion-rt/hyperion,bluescarni/hyperion
import os import h5py import numpy as np + from numpy.testing import assert_allclose from ..sph import construct_octree DATA = os.path.join(os.path.dirname(__file__), 'data') def test_construct_octree(): np.random.seed(0) N = 5000 px = np.random.uniform(-10., 10., N) py = np.random.uniform(-10., 10., N) pz = np.random.uniform(-10., 10., N) mass = np.random.uniform(0., 1., N) sigma = np.random.uniform(0., 0.1, N) o = construct_octree(0.1, 0.2, 0.3, 6., 5., 4., px, py, pz, sigma, mass, n_levels=10) # The following lines can be used to write out the reference file if the # SPH gridding code is updated. # f = h5py.File('reference_octree.hdf5', 'w') # o.write(f) # f.close() from hyperion.grid import OctreeGrid f = h5py.File(os.path.join(DATA, 'reference_octree.hdf5'), 'r') o_ref = OctreeGrid() o_ref.read(f) f.close() assert np.all(o_ref.refined == o.refined) - assert np.all(o_ref['density'][0].array == o['density'][0].array) + assert_allclose(o_ref['density'][0].array, o['density'][0].array)
Use assert_allclose for comparison of octree densities
## Code Before: import os import h5py import numpy as np from ..sph import construct_octree DATA = os.path.join(os.path.dirname(__file__), 'data') def test_construct_octree(): np.random.seed(0) N = 5000 px = np.random.uniform(-10., 10., N) py = np.random.uniform(-10., 10., N) pz = np.random.uniform(-10., 10., N) mass = np.random.uniform(0., 1., N) sigma = np.random.uniform(0., 0.1, N) o = construct_octree(0.1, 0.2, 0.3, 6., 5., 4., px, py, pz, sigma, mass, n_levels=10) # The following lines can be used to write out the reference file if the # SPH gridding code is updated. # f = h5py.File('reference_octree.hdf5', 'w') # o.write(f) # f.close() from hyperion.grid import OctreeGrid f = h5py.File(os.path.join(DATA, 'reference_octree.hdf5'), 'r') o_ref = OctreeGrid() o_ref.read(f) f.close() assert np.all(o_ref.refined == o.refined) assert np.all(o_ref['density'][0].array == o['density'][0].array) ## Instruction: Use assert_allclose for comparison of octree densities ## Code After: import os import h5py import numpy as np from numpy.testing import assert_allclose from ..sph import construct_octree DATA = os.path.join(os.path.dirname(__file__), 'data') def test_construct_octree(): np.random.seed(0) N = 5000 px = np.random.uniform(-10., 10., N) py = np.random.uniform(-10., 10., N) pz = np.random.uniform(-10., 10., N) mass = np.random.uniform(0., 1., N) sigma = np.random.uniform(0., 0.1, N) o = construct_octree(0.1, 0.2, 0.3, 6., 5., 4., px, py, pz, sigma, mass, n_levels=10) # The following lines can be used to write out the reference file if the # SPH gridding code is updated. # f = h5py.File('reference_octree.hdf5', 'w') # o.write(f) # f.close() from hyperion.grid import OctreeGrid f = h5py.File(os.path.join(DATA, 'reference_octree.hdf5'), 'r') o_ref = OctreeGrid() o_ref.read(f) f.close() assert np.all(o_ref.refined == o.refined) assert_allclose(o_ref['density'][0].array, o['density'][0].array)
fd96170fd15ccbe0b42463fe8d4ac78f511d10c7
example/testtags/admin.py
example/testtags/admin.py
from django.contrib import admin from models import TestName class TestNameAdmin(admin.ModelAdmin): model = TestName alphabet_filter = 'sorted_name' admin.site.register(TestName, TestNameAdmin)
from django.contrib import admin from models import TestName class TestNameAdmin(admin.ModelAdmin): model = TestName alphabet_filter = 'sorted_name' ## Testing a custom Default Alphabet #DEFAULT_ALPHABET = 'ABC' ## Testing a blank alphabet-- only shows the characters in the database #DEFAULT_ALPHABET = '' ## Testing a callable def DEFAULT_ALPHABET(self): return "I D K W" admin.site.register(TestName, TestNameAdmin)
Put in some testing code to test the new overrides
Put in some testing code to test the new overrides
Python
apache-2.0
bltravis/django-alphabetfilter,affan2/django-alphabetfilter,affan2/django-alphabetfilter,affan2/django-alphabetfilter,bltravis/django-alphabetfilter,bltravis/django-alphabetfilter
from django.contrib import admin from models import TestName class TestNameAdmin(admin.ModelAdmin): model = TestName alphabet_filter = 'sorted_name' + + ## Testing a custom Default Alphabet + #DEFAULT_ALPHABET = 'ABC' + + ## Testing a blank alphabet-- only shows the characters in the database + #DEFAULT_ALPHABET = '' + + ## Testing a callable + def DEFAULT_ALPHABET(self): + return "I D K W" admin.site.register(TestName, TestNameAdmin)
Put in some testing code to test the new overrides
## Code Before: from django.contrib import admin from models import TestName class TestNameAdmin(admin.ModelAdmin): model = TestName alphabet_filter = 'sorted_name' admin.site.register(TestName, TestNameAdmin) ## Instruction: Put in some testing code to test the new overrides ## Code After: from django.contrib import admin from models import TestName class TestNameAdmin(admin.ModelAdmin): model = TestName alphabet_filter = 'sorted_name' ## Testing a custom Default Alphabet #DEFAULT_ALPHABET = 'ABC' ## Testing a blank alphabet-- only shows the characters in the database #DEFAULT_ALPHABET = '' ## Testing a callable def DEFAULT_ALPHABET(self): return "I D K W" admin.site.register(TestName, TestNameAdmin)
731e48b1b81e9249fc8bdd0f826c6e009559fcc3
mempoke.py
mempoke.py
import gdb import struct class DeviceMemory: def __init__(self): self.inferior = gdb.selected_inferior() def __del__(self): del self.inferior def read(self, address): return struct.unpack('I', self.inferior.read_memory(address, 4))[0] def write(self, address, value): value_bytes = struct.pack('I', value) self.inferior.write_memory(address, value_bytes)
import gdb import struct class DeviceMemory: def __init__(self): self.inferior = gdb.selected_inferior() def __del__(self): del self.inferior def read(self, address): return struct.unpack('I', self.inferior.read_memory(address, 4))[0] def write(self, address, value): value_bytes = struct.pack('I', value) self.inferior.write_memory(address, value_bytes) def create_memory_reg(offset, name): def reg_getter(self): return self.device_memory.read(self.address + offset) def reg_setter(self, value): self.device_memory.write(self.address + offset, value) return property(reg_getter, reg_setter, None, name) def create_mem_struct(name, registers): structure_fields = {} for register, offset in registers: structure_fields[register] = create_memory_reg(offset, register) def memory_structure_init(self, address, device_memory): self.address = address self.device_memory = device_memory structure_fields['__init__'] = memory_structure_init return type(name, (object,), structure_fields)
Add mechanism for defining MCU control structures
Add mechanism for defining MCU control structures
Python
mit
fmfi-svt-deadlock/hw-testing,fmfi-svt-deadlock/hw-testing
import gdb import struct class DeviceMemory: def __init__(self): self.inferior = gdb.selected_inferior() def __del__(self): del self.inferior def read(self, address): return struct.unpack('I', self.inferior.read_memory(address, 4))[0] def write(self, address, value): value_bytes = struct.pack('I', value) self.inferior.write_memory(address, value_bytes) + + def create_memory_reg(offset, name): + def reg_getter(self): + return self.device_memory.read(self.address + offset) + + def reg_setter(self, value): + self.device_memory.write(self.address + offset, value) + + return property(reg_getter, reg_setter, None, name) + + + def create_mem_struct(name, registers): + structure_fields = {} + + for register, offset in registers: + structure_fields[register] = create_memory_reg(offset, register) + + def memory_structure_init(self, address, device_memory): + self.address = address + self.device_memory = device_memory + + structure_fields['__init__'] = memory_structure_init + + return type(name, (object,), structure_fields) +
Add mechanism for defining MCU control structures
## Code Before: import gdb import struct class DeviceMemory: def __init__(self): self.inferior = gdb.selected_inferior() def __del__(self): del self.inferior def read(self, address): return struct.unpack('I', self.inferior.read_memory(address, 4))[0] def write(self, address, value): value_bytes = struct.pack('I', value) self.inferior.write_memory(address, value_bytes) ## Instruction: Add mechanism for defining MCU control structures ## Code After: import gdb import struct class DeviceMemory: def __init__(self): self.inferior = gdb.selected_inferior() def __del__(self): del self.inferior def read(self, address): return struct.unpack('I', self.inferior.read_memory(address, 4))[0] def write(self, address, value): value_bytes = struct.pack('I', value) self.inferior.write_memory(address, value_bytes) def create_memory_reg(offset, name): def reg_getter(self): return self.device_memory.read(self.address + offset) def reg_setter(self, value): self.device_memory.write(self.address + offset, value) return property(reg_getter, reg_setter, None, name) def create_mem_struct(name, registers): structure_fields = {} for register, offset in registers: structure_fields[register] = create_memory_reg(offset, register) def memory_structure_init(self, address, device_memory): self.address = address self.device_memory = device_memory structure_fields['__init__'] = memory_structure_init return type(name, (object,), structure_fields)
afa3cd7c6e4c82f94eef7edd4bc8db609943226c
api/urls.py
api/urls.py
from django.conf.urls import url from django.views.generic import TemplateView from django.contrib.auth.decorators import login_required from . import views urlpatterns = [ url(r'^learningcircles/$', views.LearningCircleListView.as_view(), name='api_learningcircles'), ]
from django.conf.urls import url from django.views.generic import TemplateView from django.contrib.auth.decorators import login_required from . import views urlpatterns = [ url(r'^learningcircles/$', views.LearningCircleListView.as_view(), name='api_learningcircles'), url(r'^signup/$', views.SignupView.as_view(), name='api_learningcircles_signup') ]
Add URL for signup api endpoint
Add URL for signup api endpoint
Python
mit
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
from django.conf.urls import url from django.views.generic import TemplateView from django.contrib.auth.decorators import login_required from . import views urlpatterns = [ url(r'^learningcircles/$', views.LearningCircleListView.as_view(), name='api_learningcircles'), + url(r'^signup/$', views.SignupView.as_view(), name='api_learningcircles_signup') ]
Add URL for signup api endpoint
## Code Before: from django.conf.urls import url from django.views.generic import TemplateView from django.contrib.auth.decorators import login_required from . import views urlpatterns = [ url(r'^learningcircles/$', views.LearningCircleListView.as_view(), name='api_learningcircles'), ] ## Instruction: Add URL for signup api endpoint ## Code After: from django.conf.urls import url from django.views.generic import TemplateView from django.contrib.auth.decorators import login_required from . import views urlpatterns = [ url(r'^learningcircles/$', views.LearningCircleListView.as_view(), name='api_learningcircles'), url(r'^signup/$', views.SignupView.as_view(), name='api_learningcircles_signup') ]
f59ac3cc752698ce1755d8953c8771dc978ae6b7
opendebates/urls.py
opendebates/urls.py
from django.conf.urls import include, url from django.contrib import admin urlpatterns = [ url(r'^admin/', include(admin.site.urls)), url(r'^healthcheck.html$', 'opendebates.views.health_check', name='health_check'), url(r'^(?P<prefix>[-\w]+)/', include('opendebates.prefixed_urls')), ]
from django.conf.urls import include, url from django.contrib import admin from django.views.generic.base import RedirectView urlpatterns = [ url(r'^$', RedirectView.as_view(url='https://opendebatecoalition.com', permanent=False)), url(r'^admin/', include(admin.site.urls)), url(r'^healthcheck.html$', 'opendebates.views.health_check', name='health_check'), url(r'^(?P<prefix>[-\w]+)/', include('opendebates.prefixed_urls')), ]
Add a (temporary) redirect to opendebatecoalition.com from /
Add a (temporary) redirect to opendebatecoalition.com from / Temporary because permanent is really hard to take back, if you decide later that you wanted something else.
Python
apache-2.0
caktus/django-opendebates,caktus/django-opendebates,caktus/django-opendebates,caktus/django-opendebates
from django.conf.urls import include, url from django.contrib import admin + from django.views.generic.base import RedirectView urlpatterns = [ + url(r'^$', RedirectView.as_view(url='https://opendebatecoalition.com', permanent=False)), url(r'^admin/', include(admin.site.urls)), url(r'^healthcheck.html$', 'opendebates.views.health_check', name='health_check'), url(r'^(?P<prefix>[-\w]+)/', include('opendebates.prefixed_urls')), ]
Add a (temporary) redirect to opendebatecoalition.com from /
## Code Before: from django.conf.urls import include, url from django.contrib import admin urlpatterns = [ url(r'^admin/', include(admin.site.urls)), url(r'^healthcheck.html$', 'opendebates.views.health_check', name='health_check'), url(r'^(?P<prefix>[-\w]+)/', include('opendebates.prefixed_urls')), ] ## Instruction: Add a (temporary) redirect to opendebatecoalition.com from / ## Code After: from django.conf.urls import include, url from django.contrib import admin from django.views.generic.base import RedirectView urlpatterns = [ url(r'^$', RedirectView.as_view(url='https://opendebatecoalition.com', permanent=False)), url(r'^admin/', include(admin.site.urls)), url(r'^healthcheck.html$', 'opendebates.views.health_check', name='health_check'), url(r'^(?P<prefix>[-\w]+)/', include('opendebates.prefixed_urls')), ]
aa0b61b44e631c3a12a16025e93d7e962de23c2f
fabix/system/crontab.py
fabix/system/crontab.py
import fabric.api as fab import cuisine def install(filename, user="root", append=False): """ Installs crontab from a given cronfile """ new_crontab = fab.run("mktemp fabixcron.XXXX") cuisine.file_upload(new_crontab, filename) if append is True: sorted_crontab = fab.run("mktemp fabixcron.XXXX") # When user have no crontab, then crontab command returns 1 error code with fab.settings(warn_only=True): fab.sudo("crontab -u {} -l >> {} 2> /dev/null".format(user, new_crontab)) fab.sudo("sort -u -o {} {}".format(sorted_crontab, new_crontab)) new_crontab = sorted_crontab fab.sudo("crontab -u {} {}".format(user, new_crontab))
import fabric.api as fab import cuisine def install(filename, user="root", append=False): """ Installs crontab from a given cronfile """ new_crontab = fab.run("mktemp fabixcron.XXXX") cuisine.file_upload(new_crontab, filename) if append is True: # When user have no crontab, then crontab command returns 1 error code with fab.settings(warn_only=True): fab.sudo("crontab -u {} -l 2> /dev/null | awk '!x[$0]++{{print $0}}' >> {}".format(user, new_crontab)) fab.sudo("crontab -u {} {}".format(user, new_crontab))
Remove duplicate lines from cron file without sorting
Remove duplicate lines from cron file without sorting
Python
mit
vmalavolta/fabix
import fabric.api as fab import cuisine def install(filename, user="root", append=False): """ Installs crontab from a given cronfile """ new_crontab = fab.run("mktemp fabixcron.XXXX") cuisine.file_upload(new_crontab, filename) if append is True: - sorted_crontab = fab.run("mktemp fabixcron.XXXX") # When user have no crontab, then crontab command returns 1 error code with fab.settings(warn_only=True): - fab.sudo("crontab -u {} -l >> {} 2> /dev/null".format(user, new_crontab)) + fab.sudo("crontab -u {} -l 2> /dev/null | awk '!x[$0]++{{print $0}}' >> {}".format(user, new_crontab)) - fab.sudo("sort -u -o {} {}".format(sorted_crontab, new_crontab)) - new_crontab = sorted_crontab fab.sudo("crontab -u {} {}".format(user, new_crontab))
Remove duplicate lines from cron file without sorting
## Code Before: import fabric.api as fab import cuisine def install(filename, user="root", append=False): """ Installs crontab from a given cronfile """ new_crontab = fab.run("mktemp fabixcron.XXXX") cuisine.file_upload(new_crontab, filename) if append is True: sorted_crontab = fab.run("mktemp fabixcron.XXXX") # When user have no crontab, then crontab command returns 1 error code with fab.settings(warn_only=True): fab.sudo("crontab -u {} -l >> {} 2> /dev/null".format(user, new_crontab)) fab.sudo("sort -u -o {} {}".format(sorted_crontab, new_crontab)) new_crontab = sorted_crontab fab.sudo("crontab -u {} {}".format(user, new_crontab)) ## Instruction: Remove duplicate lines from cron file without sorting ## Code After: import fabric.api as fab import cuisine def install(filename, user="root", append=False): """ Installs crontab from a given cronfile """ new_crontab = fab.run("mktemp fabixcron.XXXX") cuisine.file_upload(new_crontab, filename) if append is True: # When user have no crontab, then crontab command returns 1 error code with fab.settings(warn_only=True): fab.sudo("crontab -u {} -l 2> /dev/null | awk '!x[$0]++{{print $0}}' >> {}".format(user, new_crontab)) fab.sudo("crontab -u {} {}".format(user, new_crontab))
02da417b238256878cfab7c0adef8f86f5532b01
tamper/randomcomments.py
tamper/randomcomments.py
import re from lib.core.common import randomRange from lib.core.data import kb from lib.core.enums import PRIORITY __priority__ = PRIORITY.LOW def tamper(payload, **kwargs): """ Add random comments to SQL keywords >>> import random >>> random.seed(0) >>> tamper('INSERT') 'I/**/N/**/SERT' """ retVal = payload if payload: for match in re.finditer(r"[A-Za-z_]+", payload): word = match.group() if len(word) < 2: continue if word.upper() in kb.keywords: _ = word[0] for i in xrange(1, len(word) - 1): _ += "%s%s" % ("/**/" if randomRange(0, 1) else "", word[i]) _ += word[-1] retVal = retVal.replace(word, _) return retVal
import re from lib.core.common import randomRange from lib.core.data import kb from lib.core.enums import PRIORITY __priority__ = PRIORITY.LOW def tamper(payload, **kwargs): """ Add random comments to SQL keywords >>> import random >>> random.seed(0) >>> tamper('INSERT') 'I/**/N/**/SERT' """ retVal = payload if payload: for match in re.finditer(r"\b[A-Za-z_]+\b", payload): word = match.group() if len(word) < 2: continue if word.upper() in kb.keywords: _ = word[0] for i in xrange(1, len(word) - 1): _ += "%s%s" % ("/**/" if randomRange(0, 1) else "", word[i]) _ += word[-1] if "/**/" not in _: index = randomRange(1, len(word) - 1) _ = word[:index] + "/**/" + word[index:] retVal = retVal.replace(word, _) return retVal
Fix for a tamper script (in some cases comments were not inserted)
Fix for a tamper script (in some cases comments were not inserted)
Python
mit
dtrip/.ubuntu,RexGene/monsu-server,RexGene/monsu-server,dtrip/.ubuntu
import re from lib.core.common import randomRange from lib.core.data import kb from lib.core.enums import PRIORITY __priority__ = PRIORITY.LOW def tamper(payload, **kwargs): """ Add random comments to SQL keywords >>> import random >>> random.seed(0) >>> tamper('INSERT') 'I/**/N/**/SERT' """ retVal = payload if payload: - for match in re.finditer(r"[A-Za-z_]+", payload): + for match in re.finditer(r"\b[A-Za-z_]+\b", payload): word = match.group() if len(word) < 2: continue if word.upper() in kb.keywords: _ = word[0] for i in xrange(1, len(word) - 1): _ += "%s%s" % ("/**/" if randomRange(0, 1) else "", word[i]) _ += word[-1] + + if "/**/" not in _: + index = randomRange(1, len(word) - 1) + _ = word[:index] + "/**/" + word[index:] + retVal = retVal.replace(word, _) return retVal
Fix for a tamper script (in some cases comments were not inserted)
## Code Before: import re from lib.core.common import randomRange from lib.core.data import kb from lib.core.enums import PRIORITY __priority__ = PRIORITY.LOW def tamper(payload, **kwargs): """ Add random comments to SQL keywords >>> import random >>> random.seed(0) >>> tamper('INSERT') 'I/**/N/**/SERT' """ retVal = payload if payload: for match in re.finditer(r"[A-Za-z_]+", payload): word = match.group() if len(word) < 2: continue if word.upper() in kb.keywords: _ = word[0] for i in xrange(1, len(word) - 1): _ += "%s%s" % ("/**/" if randomRange(0, 1) else "", word[i]) _ += word[-1] retVal = retVal.replace(word, _) return retVal ## Instruction: Fix for a tamper script (in some cases comments were not inserted) ## Code After: import re from lib.core.common import randomRange from lib.core.data import kb from lib.core.enums import PRIORITY __priority__ = PRIORITY.LOW def tamper(payload, **kwargs): """ Add random comments to SQL keywords >>> import random >>> random.seed(0) >>> tamper('INSERT') 'I/**/N/**/SERT' """ retVal = payload if payload: for match in re.finditer(r"\b[A-Za-z_]+\b", payload): word = match.group() if len(word) < 2: continue if word.upper() in kb.keywords: _ = word[0] for i in xrange(1, len(word) - 1): _ += "%s%s" % ("/**/" if randomRange(0, 1) else "", word[i]) _ += word[-1] if "/**/" not in _: index = randomRange(1, len(word) - 1) _ = word[:index] + "/**/" + word[index:] retVal = retVal.replace(word, _) return retVal
2b758185e0de0d41c5ecc9a5511308ee36c60c91
Python_Data/smpl4.py
Python_Data/smpl4.py
''' 2.1 - Numpy Array library ''' #!/usr/bin/env import numpy as np def main(): '''' # 1) Create two vectors 'a' and 'b', and sum them. # Standart n = 3 a = [x**2 for x in range(1, n + 1)] b = [x**3 for x in range(1, n + 1)] v = plainVectorAddition(a, b) # Numpy n = 3 a = np.arange(1, n + 1) ** 2 b = np.arange(1, n + 1) ** 3 v = npSum(a, b) ''' print(v) def plainSum(a, b): ''' Sum two vectors using standart python functions ''' return [i + j for i,j in zip(a,b)] def npSum(a, b): return a + b if __name__ == '__main__': main()
''' 2.1 - Numpy Array library - Sum of two vectors - Usage: python smpl4.py n Where 'n' specifies the size of the vector. The program make performance comparisons and print the results. ''' #!/usr/bin/env import numpy as np import sys from datetime import datetime def main(): size = int(sys.argv[1]) start = datetime.now() v = plainSum(size) delta = datetime.now() - start print("plainSum() - elapsed time: ",delta) start = datetime.now() v = npSum(size) delta = datetime.now() - start print("npSum() - elapsed time: ",delta) '''' # 1) Create two vectors 'a' and 'b', and sum them. # Standart size = int(sys.argv[1]) v = plainVectorAddition(a, b) # Numpy size = int(sys.argv[1]) v = npSum(a, b) ''' #print(v) def plainSum(n): ''' Sum two vectors using standart python functions ''' a = [x**2 for x in range(1, n + 1)] b = [x**3 for x in range(1, n + 1)] return [i + j for i,j in zip(a,b)] def npSum(n): ''' Sum two vectors using numpy functions ''' a = np.arange(1, n + 1) ** 2 b = np.arange(1, n + 1) ** 3 return a + b if __name__ == '__main__': main()
Add file with linear algebra numpy operations
Add file with linear algebra numpy operations
Python
unlicense
robotenique/RandomAccessMemory,robotenique/RandomAccessMemory,robotenique/RandomAccessMemory
''' 2.1 - Numpy Array library + - Sum of two vectors - + + Usage: python smpl4.py n + + Where 'n' specifies the size of the vector. + The program make performance comparisons and print the results. + ''' #!/usr/bin/env import numpy as np + import sys + from datetime import datetime def main(): + size = int(sys.argv[1]) + start = datetime.now() + v = plainSum(size) + delta = datetime.now() - start + print("plainSum() - elapsed time: ",delta) + start = datetime.now() + v = npSum(size) + delta = datetime.now() - start + print("npSum() - elapsed time: ",delta) '''' # 1) Create two vectors 'a' and 'b', and sum them. # Standart + size = int(sys.argv[1]) - n = 3 - a = [x**2 for x in range(1, n + 1)] - b = [x**3 for x in range(1, n + 1)] - v = plainVectorAddition(a, b) + v = plainVectorAddition(a, b) # Numpy + size = int(sys.argv[1]) - n = 3 - a = np.arange(1, n + 1) ** 2 - b = np.arange(1, n + 1) ** 3 v = npSum(a, b) ''' - print(v) + #print(v) + - def plainSum(a, b): + def plainSum(n): ''' Sum two vectors using standart python functions ''' + a = [x**2 for x in range(1, n + 1)] + b = [x**3 for x in range(1, n + 1)] return [i + j for i,j in zip(a,b)] - def npSum(a, b): + def npSum(n): + ''' + Sum two vectors using numpy functions + ''' + a = np.arange(1, n + 1) ** 2 + b = np.arange(1, n + 1) ** 3 return a + b - if __name__ == '__main__': main()
Add file with linear algebra numpy operations
## Code Before: ''' 2.1 - Numpy Array library ''' #!/usr/bin/env import numpy as np def main(): '''' # 1) Create two vectors 'a' and 'b', and sum them. # Standart n = 3 a = [x**2 for x in range(1, n + 1)] b = [x**3 for x in range(1, n + 1)] v = plainVectorAddition(a, b) # Numpy n = 3 a = np.arange(1, n + 1) ** 2 b = np.arange(1, n + 1) ** 3 v = npSum(a, b) ''' print(v) def plainSum(a, b): ''' Sum two vectors using standart python functions ''' return [i + j for i,j in zip(a,b)] def npSum(a, b): return a + b if __name__ == '__main__': main() ## Instruction: Add file with linear algebra numpy operations ## Code After: ''' 2.1 - Numpy Array library - Sum of two vectors - Usage: python smpl4.py n Where 'n' specifies the size of the vector. The program make performance comparisons and print the results. ''' #!/usr/bin/env import numpy as np import sys from datetime import datetime def main(): size = int(sys.argv[1]) start = datetime.now() v = plainSum(size) delta = datetime.now() - start print("plainSum() - elapsed time: ",delta) start = datetime.now() v = npSum(size) delta = datetime.now() - start print("npSum() - elapsed time: ",delta) '''' # 1) Create two vectors 'a' and 'b', and sum them. # Standart size = int(sys.argv[1]) v = plainVectorAddition(a, b) # Numpy size = int(sys.argv[1]) v = npSum(a, b) ''' #print(v) def plainSum(n): ''' Sum two vectors using standart python functions ''' a = [x**2 for x in range(1, n + 1)] b = [x**3 for x in range(1, n + 1)] return [i + j for i,j in zip(a,b)] def npSum(n): ''' Sum two vectors using numpy functions ''' a = np.arange(1, n + 1) ** 2 b = np.arange(1, n + 1) ** 3 return a + b if __name__ == '__main__': main()
8b7aef341aadefb859790684f41453f561813083
tmi/views/__init__.py
tmi/views/__init__.py
from flask import g from flask.ext.login import current_user from tmi.core import app from tmi.assets import assets # noqa from tmi.views.ui import ui # noqa from tmi.views.auth import login, logout # noqa from tmi.views.admin import admin # noqa from tmi.views.cards_api import blueprint as cards_api @app.before_request def before_request(): g.user = current_user app.register_blueprint(cards_api)
from flask import g, request from flask.ext.login import current_user from werkzeug.exceptions import HTTPException from tmi.core import app from tmi.forms import Invalid from tmi.util import jsonify from tmi.assets import assets # noqa from tmi.views.ui import ui # noqa from tmi.views.auth import login, logout # noqa from tmi.views.admin import admin # noqa from tmi.views.cards_api import blueprint as cards_api @app.before_request def before_request(): g.user = current_user app.register_blueprint(cards_api) @app.errorhandler(401) @app.errorhandler(403) @app.errorhandler(404) @app.errorhandler(410) @app.errorhandler(500) def handle_exceptions(exc): if isinstance(exc, HTTPException): message = exc.get_description(request.environ) message = message.replace('<p>', '').replace('</p>', '') body = { 'status': exc.code, 'name': exc.name, 'message': message } headers = exc.get_headers(request.environ) else: body = { 'status': 500, 'name': exc.__class__.__name__, 'message': unicode(exc) } headers = {} return jsonify(body, status=body.get('status'), headers=headers) @app.errorhandler(Invalid) def handle_invalid(exc): body = { 'status': 400, 'name': 'Invalid Data', 'message': unicode(exc), 'errors': exc.asdict() } return jsonify(body, status=400)
Handle errors with JSON messages.
Handle errors with JSON messages.
Python
mit
pudo/storyweb,pudo/storyweb
- from flask import g + from flask import g, request from flask.ext.login import current_user + from werkzeug.exceptions import HTTPException from tmi.core import app + from tmi.forms import Invalid + from tmi.util import jsonify from tmi.assets import assets # noqa from tmi.views.ui import ui # noqa from tmi.views.auth import login, logout # noqa from tmi.views.admin import admin # noqa from tmi.views.cards_api import blueprint as cards_api @app.before_request def before_request(): g.user = current_user app.register_blueprint(cards_api) + + @app.errorhandler(401) + @app.errorhandler(403) + @app.errorhandler(404) + @app.errorhandler(410) + @app.errorhandler(500) + def handle_exceptions(exc): + if isinstance(exc, HTTPException): + message = exc.get_description(request.environ) + message = message.replace('<p>', '').replace('</p>', '') + body = { + 'status': exc.code, + 'name': exc.name, + 'message': message + } + headers = exc.get_headers(request.environ) + else: + body = { + 'status': 500, + 'name': exc.__class__.__name__, + 'message': unicode(exc) + } + headers = {} + return jsonify(body, status=body.get('status'), + headers=headers) + + + @app.errorhandler(Invalid) + def handle_invalid(exc): + body = { + 'status': 400, + 'name': 'Invalid Data', + 'message': unicode(exc), + 'errors': exc.asdict() + } + return jsonify(body, status=400) +
Handle errors with JSON messages.
## Code Before: from flask import g from flask.ext.login import current_user from tmi.core import app from tmi.assets import assets # noqa from tmi.views.ui import ui # noqa from tmi.views.auth import login, logout # noqa from tmi.views.admin import admin # noqa from tmi.views.cards_api import blueprint as cards_api @app.before_request def before_request(): g.user = current_user app.register_blueprint(cards_api) ## Instruction: Handle errors with JSON messages. ## Code After: from flask import g, request from flask.ext.login import current_user from werkzeug.exceptions import HTTPException from tmi.core import app from tmi.forms import Invalid from tmi.util import jsonify from tmi.assets import assets # noqa from tmi.views.ui import ui # noqa from tmi.views.auth import login, logout # noqa from tmi.views.admin import admin # noqa from tmi.views.cards_api import blueprint as cards_api @app.before_request def before_request(): g.user = current_user app.register_blueprint(cards_api) @app.errorhandler(401) @app.errorhandler(403) @app.errorhandler(404) @app.errorhandler(410) @app.errorhandler(500) def handle_exceptions(exc): if isinstance(exc, HTTPException): message = exc.get_description(request.environ) message = message.replace('<p>', '').replace('</p>', '') body = { 'status': exc.code, 'name': exc.name, 'message': message } headers = exc.get_headers(request.environ) else: body = { 'status': 500, 'name': exc.__class__.__name__, 'message': unicode(exc) } headers = {} return jsonify(body, status=body.get('status'), headers=headers) @app.errorhandler(Invalid) def handle_invalid(exc): body = { 'status': 400, 'name': 'Invalid Data', 'message': unicode(exc), 'errors': exc.asdict() } return jsonify(body, status=400)
f5f728074b257aac371fc59af8de02e440e57819
furikura/desktop/unity.py
furikura/desktop/unity.py
import gi gi.require_version('Unity', '7.0') from gi.repository import Unity, Dbusmenu launcher = Unity.LauncherEntry.get_for_desktop_id("furikura.desktop") def update_counter(count): launcher.set_property("count", count) launcher.set_property("count_visible", True) def add_quicklist_item(item): quick_list = Dbusmenu.Menuitem.new() list_item = Dbusmenu.Menuitem.new() list_item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, item) list_item.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True) quick_list.child_append(list_item) launcher.set_property("quicklist", quick_list)
import gi from threading import Timer gi.require_version('Unity', '7.0') from gi.repository import Unity, Dbusmenu launcher = Unity.LauncherEntry.get_for_desktop_id("furikura.desktop") def update_counter(count): launcher.set_property("count", count) launcher.set_property("count_visible", True) if count > 0: launcher.set_property("urgent", True) timer = Timer(3, launcher.set_property, ['urgent', False]) timer.start() def add_quicklist_item(item): quick_list = Dbusmenu.Menuitem.new() list_item = Dbusmenu.Menuitem.new() list_item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, item) list_item.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True) quick_list.child_append(list_item) launcher.set_property("quicklist", quick_list)
Apply "Urgent" icon animation on new message
Apply "Urgent" icon animation on new message
Python
mit
benjamindean/furi-kura,benjamindean/furi-kura
import gi + from threading import Timer gi.require_version('Unity', '7.0') from gi.repository import Unity, Dbusmenu + launcher = Unity.LauncherEntry.get_for_desktop_id("furikura.desktop") def update_counter(count): launcher.set_property("count", count) launcher.set_property("count_visible", True) + + if count > 0: + launcher.set_property("urgent", True) + timer = Timer(3, launcher.set_property, ['urgent', False]) + timer.start() def add_quicklist_item(item): quick_list = Dbusmenu.Menuitem.new() list_item = Dbusmenu.Menuitem.new() list_item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, item) list_item.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True) quick_list.child_append(list_item) launcher.set_property("quicklist", quick_list)
Apply "Urgent" icon animation on new message
## Code Before: import gi gi.require_version('Unity', '7.0') from gi.repository import Unity, Dbusmenu launcher = Unity.LauncherEntry.get_for_desktop_id("furikura.desktop") def update_counter(count): launcher.set_property("count", count) launcher.set_property("count_visible", True) def add_quicklist_item(item): quick_list = Dbusmenu.Menuitem.new() list_item = Dbusmenu.Menuitem.new() list_item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, item) list_item.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True) quick_list.child_append(list_item) launcher.set_property("quicklist", quick_list) ## Instruction: Apply "Urgent" icon animation on new message ## Code After: import gi from threading import Timer gi.require_version('Unity', '7.0') from gi.repository import Unity, Dbusmenu launcher = Unity.LauncherEntry.get_for_desktop_id("furikura.desktop") def update_counter(count): launcher.set_property("count", count) launcher.set_property("count_visible", True) if count > 0: launcher.set_property("urgent", True) timer = Timer(3, launcher.set_property, ['urgent', False]) timer.start() def add_quicklist_item(item): quick_list = Dbusmenu.Menuitem.new() list_item = Dbusmenu.Menuitem.new() list_item.property_set(Dbusmenu.MENUITEM_PROP_LABEL, item) list_item.property_set_bool(Dbusmenu.MENUITEM_PROP_VISIBLE, True) quick_list.child_append(list_item) launcher.set_property("quicklist", quick_list)
3b5ad132f3670d1c1210190ecd7b41a379fbd10e
trakt/core/helpers.py
trakt/core/helpers.py
import arrow def to_datetime(value): if value is None: return None # Parse ISO8601 datetime dt = arrow.get(value) # Return python datetime object return dt.datetime
import arrow def to_datetime(value): if value is None: return None # Parse ISO8601 datetime dt = arrow.get(value) # Convert to UTC dt = dt.to('UTC') # Return naive datetime object return dt.naive
Convert all datetime properties to UTC
Convert all datetime properties to UTC
Python
mit
shad7/trakt.py,fuzeman/trakt.py
import arrow def to_datetime(value): if value is None: return None # Parse ISO8601 datetime dt = arrow.get(value) - # Return python datetime object - return dt.datetime + # Convert to UTC + dt = dt.to('UTC') + # Return naive datetime object + return dt.naive +
Convert all datetime properties to UTC
## Code Before: import arrow def to_datetime(value): if value is None: return None # Parse ISO8601 datetime dt = arrow.get(value) # Return python datetime object return dt.datetime ## Instruction: Convert all datetime properties to UTC ## Code After: import arrow def to_datetime(value): if value is None: return None # Parse ISO8601 datetime dt = arrow.get(value) # Convert to UTC dt = dt.to('UTC') # Return naive datetime object return dt.naive
5e3b712c4c2eac7227d6e894ce05db6f1ede074a
hgtools/tests/conftest.py
hgtools/tests/conftest.py
import os import pytest from hgtools import managers def _ensure_present(mgr): try: mgr.version() except Exception: pytest.skip() @pytest.fixture def hg_repo(tmpdir): tmpdir.chdir() mgr = managers.MercurialManager() _ensure_present(mgr) mgr._invoke('init', '.') os.makedirs('bar') touch('bar/baz') mgr._invoke('addremove') mgr._invoke('ci', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('ci', '-m', 'added content') return tmpdir @pytest.fixture def git_repo(tmpdir): tmpdir.chdir() mgr = managers.GitManager() _ensure_present(mgr) mgr._invoke('init') mgr._invoke('config', 'user.email', 'hgtools@example.com') mgr._invoke('config', 'user.name', 'HGTools') os.makedirs('bar') touch('bar/baz') mgr._invoke('add', '.') mgr._invoke('commit', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('commit', '-am', 'added content') return tmpdir def touch(filename): with open(filename, 'a'): pass
import os import pytest from hgtools import managers def _ensure_present(mgr): try: mgr.version() except Exception: pytest.skip() @pytest.fixture def tmpdir_as_cwd(tmpdir): with tmpdir.as_cwd(): yield tmpdir @pytest.fixture def hg_repo(tmpdir_as_cwd): mgr = managers.MercurialManager() _ensure_present(mgr) mgr._invoke('init', '.') os.makedirs('bar') touch('bar/baz') mgr._invoke('addremove') mgr._invoke('ci', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('ci', '-m', 'added content') return tmpdir_as_cwd @pytest.fixture def git_repo(tmpdir_as_cwd): mgr = managers.GitManager() _ensure_present(mgr) mgr._invoke('init') mgr._invoke('config', 'user.email', 'hgtools@example.com') mgr._invoke('config', 'user.name', 'HGTools') os.makedirs('bar') touch('bar/baz') mgr._invoke('add', '.') mgr._invoke('commit', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('commit', '-am', 'added content') return tmpdir_as_cwd def touch(filename): with open(filename, 'a'): pass
Fix test failures by restoring working directory in fixtures.
Fix test failures by restoring working directory in fixtures.
Python
mit
jaraco/hgtools
import os import pytest from hgtools import managers def _ensure_present(mgr): try: mgr.version() except Exception: pytest.skip() @pytest.fixture + def tmpdir_as_cwd(tmpdir): + with tmpdir.as_cwd(): + yield tmpdir + + + @pytest.fixture - def hg_repo(tmpdir): + def hg_repo(tmpdir_as_cwd): - tmpdir.chdir() mgr = managers.MercurialManager() _ensure_present(mgr) mgr._invoke('init', '.') os.makedirs('bar') touch('bar/baz') mgr._invoke('addremove') mgr._invoke('ci', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('ci', '-m', 'added content') - return tmpdir + return tmpdir_as_cwd @pytest.fixture - def git_repo(tmpdir): + def git_repo(tmpdir_as_cwd): - tmpdir.chdir() mgr = managers.GitManager() _ensure_present(mgr) mgr._invoke('init') mgr._invoke('config', 'user.email', 'hgtools@example.com') mgr._invoke('config', 'user.name', 'HGTools') os.makedirs('bar') touch('bar/baz') mgr._invoke('add', '.') mgr._invoke('commit', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('commit', '-am', 'added content') - return tmpdir + return tmpdir_as_cwd def touch(filename): with open(filename, 'a'): pass
Fix test failures by restoring working directory in fixtures.
## Code Before: import os import pytest from hgtools import managers def _ensure_present(mgr): try: mgr.version() except Exception: pytest.skip() @pytest.fixture def hg_repo(tmpdir): tmpdir.chdir() mgr = managers.MercurialManager() _ensure_present(mgr) mgr._invoke('init', '.') os.makedirs('bar') touch('bar/baz') mgr._invoke('addremove') mgr._invoke('ci', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('ci', '-m', 'added content') return tmpdir @pytest.fixture def git_repo(tmpdir): tmpdir.chdir() mgr = managers.GitManager() _ensure_present(mgr) mgr._invoke('init') mgr._invoke('config', 'user.email', 'hgtools@example.com') mgr._invoke('config', 'user.name', 'HGTools') os.makedirs('bar') touch('bar/baz') mgr._invoke('add', '.') mgr._invoke('commit', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('commit', '-am', 'added content') return tmpdir def touch(filename): with open(filename, 'a'): pass ## Instruction: Fix test failures by restoring working directory in fixtures. ## Code After: import os import pytest from hgtools import managers def _ensure_present(mgr): try: mgr.version() except Exception: pytest.skip() @pytest.fixture def tmpdir_as_cwd(tmpdir): with tmpdir.as_cwd(): yield tmpdir @pytest.fixture def hg_repo(tmpdir_as_cwd): mgr = managers.MercurialManager() _ensure_present(mgr) mgr._invoke('init', '.') os.makedirs('bar') touch('bar/baz') mgr._invoke('addremove') mgr._invoke('ci', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('ci', '-m', 'added content') return tmpdir_as_cwd @pytest.fixture def git_repo(tmpdir_as_cwd): mgr = managers.GitManager() _ensure_present(mgr) mgr._invoke('init') mgr._invoke('config', 'user.email', 'hgtools@example.com') mgr._invoke('config', 'user.name', 'HGTools') os.makedirs('bar') touch('bar/baz') mgr._invoke('add', '.') mgr._invoke('commit', '-m', 'committed') with open('bar/baz', 'w') as baz: baz.write('content') mgr._invoke('commit', '-am', 'added content') return tmpdir_as_cwd def touch(filename): with open(filename, 'a'): pass
c2eccb4ce1259830dd641d19624358af83c09549
webcomix/comic_spider.py
webcomix/comic_spider.py
from urllib.parse import urljoin import scrapy class ComicSpider(scrapy.Spider): name = "My spider" def __init__(self, *args, **kwargs): self.start_urls = kwargs.get('start_urls') or [] self.next_page_selector = kwargs.get('next_page_selector', None) self.comic_image_selector = kwargs.get('comic_image_selector', None) super(ComicSpider, self).__init__(*args, **kwargs) def parse(self, response): comic_image_url = response.xpath( self.comic_image_selector).extract_first() page = response.meta.get('page') or 1 yield { "image_element": urljoin(response.url, comic_image_url), "page": page } next_page_url = response.xpath(self.next_page_selector).extract_first() if next_page_url is not None and not next_page_url.endswith('#'): yield scrapy.Request( response.urljoin(next_page_url), meta={'page': page + 1})
from urllib.parse import urljoin import click import scrapy class ComicSpider(scrapy.Spider): name = "Comic Spider" def __init__(self, *args, **kwargs): self.start_urls = kwargs.get('start_urls') or [] self.next_page_selector = kwargs.get('next_page_selector', None) self.comic_image_selector = kwargs.get('comic_image_selector', None) super(ComicSpider, self).__init__(*args, **kwargs) def parse(self, response): click.echo("Downloading page {}".format(response.url)) comic_image_url = response.xpath( self.comic_image_selector).extract_first() page = response.meta.get('page') or 1 if comic_image_url is not None: yield { "image_element": urljoin(response.url, comic_image_url), "page": page } else: click.echo("Could not find comic image.") next_page_url = response.xpath(self.next_page_selector).extract_first() if next_page_url is not None and not next_page_url.endswith('#'): yield scrapy.Request( response.urljoin(next_page_url), meta={'page': page + 1})
Copy logging from previous version and only yield item to pipeline if a comic image was found
Copy logging from previous version and only yield item to pipeline if a comic image was found
Python
mit
J-CPelletier/webcomix,J-CPelletier/webcomix,J-CPelletier/WebComicToCBZ
from urllib.parse import urljoin + import click import scrapy class ComicSpider(scrapy.Spider): - name = "My spider" + name = "Comic Spider" def __init__(self, *args, **kwargs): self.start_urls = kwargs.get('start_urls') or [] self.next_page_selector = kwargs.get('next_page_selector', None) self.comic_image_selector = kwargs.get('comic_image_selector', None) super(ComicSpider, self).__init__(*args, **kwargs) def parse(self, response): + click.echo("Downloading page {}".format(response.url)) comic_image_url = response.xpath( self.comic_image_selector).extract_first() page = response.meta.get('page') or 1 + if comic_image_url is not None: - yield { + yield { - "image_element": urljoin(response.url, comic_image_url), + "image_element": urljoin(response.url, comic_image_url), - "page": page + "page": page - } + } + else: + click.echo("Could not find comic image.") next_page_url = response.xpath(self.next_page_selector).extract_first() if next_page_url is not None and not next_page_url.endswith('#'): yield scrapy.Request( response.urljoin(next_page_url), meta={'page': page + 1})
Copy logging from previous version and only yield item to pipeline if a comic image was found
## Code Before: from urllib.parse import urljoin import scrapy class ComicSpider(scrapy.Spider): name = "My spider" def __init__(self, *args, **kwargs): self.start_urls = kwargs.get('start_urls') or [] self.next_page_selector = kwargs.get('next_page_selector', None) self.comic_image_selector = kwargs.get('comic_image_selector', None) super(ComicSpider, self).__init__(*args, **kwargs) def parse(self, response): comic_image_url = response.xpath( self.comic_image_selector).extract_first() page = response.meta.get('page') or 1 yield { "image_element": urljoin(response.url, comic_image_url), "page": page } next_page_url = response.xpath(self.next_page_selector).extract_first() if next_page_url is not None and not next_page_url.endswith('#'): yield scrapy.Request( response.urljoin(next_page_url), meta={'page': page + 1}) ## Instruction: Copy logging from previous version and only yield item to pipeline if a comic image was found ## Code After: from urllib.parse import urljoin import click import scrapy class ComicSpider(scrapy.Spider): name = "Comic Spider" def __init__(self, *args, **kwargs): self.start_urls = kwargs.get('start_urls') or [] self.next_page_selector = kwargs.get('next_page_selector', None) self.comic_image_selector = kwargs.get('comic_image_selector', None) super(ComicSpider, self).__init__(*args, **kwargs) def parse(self, response): click.echo("Downloading page {}".format(response.url)) comic_image_url = response.xpath( self.comic_image_selector).extract_first() page = response.meta.get('page') or 1 if comic_image_url is not None: yield { "image_element": urljoin(response.url, comic_image_url), "page": page } else: click.echo("Could not find comic image.") next_page_url = response.xpath(self.next_page_selector).extract_first() if next_page_url is not None and not next_page_url.endswith('#'): yield scrapy.Request( response.urljoin(next_page_url), meta={'page': page + 1})
1a6344ea1fac51a8024e1803a0391662d4ab81e0
pyeda/boolalg/vexpr.py
pyeda/boolalg/vexpr.py
from pyeda.boolalg import expr from pyeda.boolalg import bfarray def bitvec(name, *dims): """Return a new array of given dimensions, filled with Expressions. Parameters ---------- name : str dims : (int or (int, int)) An int N means a slice from [0:N] A tuple (M, N) means a slice from [M:N] """ if dims: return bfarray.exprvars(name, *dims) else: return expr.exprvar(name)
from warnings import warn from pyeda.boolalg import expr from pyeda.boolalg import bfarray def bitvec(name, *dims): """Return a new array of given dimensions, filled with Expressions. Parameters ---------- name : str dims : (int or (int, int)) An int N means a slice from [0:N] A tuple (M, N) means a slice from [M:N] """ warn("The 'bitvec' function is deprecated. Use 'exprvars' instead.") if dims: return bfarray.exprvars(name, *dims) else: return expr.exprvar(name)
Add deprecation warning to bitvec function
Add deprecation warning to bitvec function
Python
bsd-2-clause
pombredanne/pyeda,GtTmy/pyeda,karissa/pyeda,sschnug/pyeda,sschnug/pyeda,cjdrake/pyeda,sschnug/pyeda,cjdrake/pyeda,GtTmy/pyeda,GtTmy/pyeda,karissa/pyeda,pombredanne/pyeda,karissa/pyeda,cjdrake/pyeda,pombredanne/pyeda
+ + from warnings import warn from pyeda.boolalg import expr from pyeda.boolalg import bfarray def bitvec(name, *dims): """Return a new array of given dimensions, filled with Expressions. Parameters ---------- name : str dims : (int or (int, int)) An int N means a slice from [0:N] A tuple (M, N) means a slice from [M:N] """ + warn("The 'bitvec' function is deprecated. Use 'exprvars' instead.") if dims: return bfarray.exprvars(name, *dims) else: return expr.exprvar(name)
Add deprecation warning to bitvec function
## Code Before: from pyeda.boolalg import expr from pyeda.boolalg import bfarray def bitvec(name, *dims): """Return a new array of given dimensions, filled with Expressions. Parameters ---------- name : str dims : (int or (int, int)) An int N means a slice from [0:N] A tuple (M, N) means a slice from [M:N] """ if dims: return bfarray.exprvars(name, *dims) else: return expr.exprvar(name) ## Instruction: Add deprecation warning to bitvec function ## Code After: from warnings import warn from pyeda.boolalg import expr from pyeda.boolalg import bfarray def bitvec(name, *dims): """Return a new array of given dimensions, filled with Expressions. Parameters ---------- name : str dims : (int or (int, int)) An int N means a slice from [0:N] A tuple (M, N) means a slice from [M:N] """ warn("The 'bitvec' function is deprecated. Use 'exprvars' instead.") if dims: return bfarray.exprvars(name, *dims) else: return expr.exprvar(name)
cd621061773b7eafcea9358c9b762663a070ccc5
cc/license/jurisdiction.py
cc/license/jurisdiction.py
import RDF import zope.interface import interfaces import rdf_helper class Jurisdiction(object): zope.interface.implements(interfaces.IJurisdiction) def __init__(self, short_name): '''@param short_name can be e.g. mx''' model = rdf_helper.init_model( rdf_helper.JURI_RDF_PATH) self.code = short_name self.id = 'http://creativecommons.org/international/%s/' % short_name id_uri = RDF.Uri(self.id) self.local_url = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) self.launched = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None)
import RDF import zope.interface import interfaces import rdf_helper class Jurisdiction(object): zope.interface.implements(interfaces.IJurisdiction) def __init__(self, short_name): """Creates an object representing a jurisdiction. short_name is a (usually) two-letter code representing the same jurisdiction; for a complete list, see cc.license.jurisdiction_codes()""" model = rdf_helper.init_model( rdf_helper.JURI_RDF_PATH) self.code = short_name self.id = 'http://creativecommons.org/international/%s/' % short_name id_uri = RDF.Uri(self.id) try: self.local_url = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) except rdf_helper.NoValuesFoundException: self.local_url = None try: self.launched = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None) except rdf_helper.NoValuesFoundException: self.launched = None
Add documentation and make Jurisdiction calls not fail when some of the values aren't found.
Add documentation and make Jurisdiction calls not fail when some of the values aren't found.
Python
mit
creativecommons/cc.license,creativecommons/cc.license
import RDF import zope.interface import interfaces import rdf_helper class Jurisdiction(object): zope.interface.implements(interfaces.IJurisdiction) def __init__(self, short_name): - '''@param short_name can be e.g. mx''' + """Creates an object representing a jurisdiction. + short_name is a (usually) two-letter code representing + the same jurisdiction; for a complete list, see + cc.license.jurisdiction_codes()""" model = rdf_helper.init_model( rdf_helper.JURI_RDF_PATH) self.code = short_name self.id = 'http://creativecommons.org/international/%s/' % short_name id_uri = RDF.Uri(self.id) + try: - self.local_url = rdf_helper.query_to_single_value(model, + self.local_url = rdf_helper.query_to_single_value(model, - id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) + id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) + except rdf_helper.NoValuesFoundException: + self.local_url = None + try: - self.launched = rdf_helper.query_to_single_value(model, + self.launched = rdf_helper.query_to_single_value(model, - id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None) + id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None) + except rdf_helper.NoValuesFoundException: + self.launched = None
Add documentation and make Jurisdiction calls not fail when some of the values aren't found.
## Code Before: import RDF import zope.interface import interfaces import rdf_helper class Jurisdiction(object): zope.interface.implements(interfaces.IJurisdiction) def __init__(self, short_name): '''@param short_name can be e.g. mx''' model = rdf_helper.init_model( rdf_helper.JURI_RDF_PATH) self.code = short_name self.id = 'http://creativecommons.org/international/%s/' % short_name id_uri = RDF.Uri(self.id) self.local_url = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) self.launched = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None) ## Instruction: Add documentation and make Jurisdiction calls not fail when some of the values aren't found. ## Code After: import RDF import zope.interface import interfaces import rdf_helper class Jurisdiction(object): zope.interface.implements(interfaces.IJurisdiction) def __init__(self, short_name): """Creates an object representing a jurisdiction. short_name is a (usually) two-letter code representing the same jurisdiction; for a complete list, see cc.license.jurisdiction_codes()""" model = rdf_helper.init_model( rdf_helper.JURI_RDF_PATH) self.code = short_name self.id = 'http://creativecommons.org/international/%s/' % short_name id_uri = RDF.Uri(self.id) try: self.local_url = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'jurisdictionSite'), None) except rdf_helper.NoValuesFoundException: self.local_url = None try: self.launched = rdf_helper.query_to_single_value(model, id_uri, RDF.Uri(rdf_helper.NS_CC + 'launched'), None) except rdf_helper.NoValuesFoundException: self.launched = None
9c39105f2dcb296590e895aaf35de5d4c3105ddb
ephypype/commands/tests/test_cli.py
ephypype/commands/tests/test_cli.py
"""Test neuropycon command line interface""" # Authors: Dmitrii Altukhov <daltuhov@hse.ru> # # License: BSD (3-clause) import os import os.path as op from ephypype.commands import neuropycon from click.testing import CliRunner def test_input_linear(): """Test input node with Linear plugin (serial workflow execution)""" runner = CliRunner() wf_name = 'test_input_linear' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name)) def test_input_multiproc(): """Test input node with MultiProc plugin (parallel workflow execution)""" runner = CliRunner() wf_name = 'test_input_multiproc' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, '-p', 'MultiProc', 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name))
"""Test neuropycon command line interface""" # Authors: Dmitrii Altukhov <daltuhov@hse.ru> # # License: BSD (3-clause) import matplotlib # noqa matplotlib.use('Agg') # noqa; for testing don't use X server import os import os.path as op from ephypype.commands import neuropycon from click.testing import CliRunner def test_input_linear(): """Test input node with Linear plugin (serial workflow execution)""" runner = CliRunner() wf_name = 'test_input_linear' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name)) def test_input_multiproc(): """Test input node with MultiProc plugin (parallel workflow execution)""" runner = CliRunner() wf_name = 'test_input_multiproc' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, '-p', 'MultiProc', 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name))
FIX matplotlib backend problem in cli tests
FIX matplotlib backend problem in cli tests
Python
bsd-3-clause
neuropycon/ephypype
"""Test neuropycon command line interface""" # Authors: Dmitrii Altukhov <daltuhov@hse.ru> # # License: BSD (3-clause) + + import matplotlib # noqa + matplotlib.use('Agg') # noqa; for testing don't use X server import os import os.path as op from ephypype.commands import neuropycon from click.testing import CliRunner def test_input_linear(): """Test input node with Linear plugin (serial workflow execution)""" runner = CliRunner() wf_name = 'test_input_linear' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name)) def test_input_multiproc(): """Test input node with MultiProc plugin (parallel workflow execution)""" runner = CliRunner() wf_name = 'test_input_multiproc' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, '-p', 'MultiProc', 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name))
FIX matplotlib backend problem in cli tests
## Code Before: """Test neuropycon command line interface""" # Authors: Dmitrii Altukhov <daltuhov@hse.ru> # # License: BSD (3-clause) import os import os.path as op from ephypype.commands import neuropycon from click.testing import CliRunner def test_input_linear(): """Test input node with Linear plugin (serial workflow execution)""" runner = CliRunner() wf_name = 'test_input_linear' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name)) def test_input_multiproc(): """Test input node with MultiProc plugin (parallel workflow execution)""" runner = CliRunner() wf_name = 'test_input_multiproc' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, '-p', 'MultiProc', 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name)) ## Instruction: FIX matplotlib backend problem in cli tests ## Code After: """Test neuropycon command line interface""" # Authors: Dmitrii Altukhov <daltuhov@hse.ru> # # License: BSD (3-clause) import matplotlib # noqa matplotlib.use('Agg') # noqa; for testing don't use X server import os import os.path as op from ephypype.commands import neuropycon from click.testing import CliRunner def test_input_linear(): """Test input node with Linear plugin (serial workflow execution)""" runner = CliRunner() wf_name = 'test_input_linear' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name)) def test_input_multiproc(): """Test input node with MultiProc plugin (parallel workflow execution)""" runner = CliRunner() wf_name = 'test_input_multiproc' with runner.isolated_filesystem(): with open('temp.fif', 'w') as f: f.write('temp') result = runner.invoke(neuropycon.cli, ['-s', os.getcwd(), '-w', wf_name, '-p', 'MultiProc', 'input', 'temp.fif']) assert result.exit_code == 0 assert os.path.exists(op.join(os.getcwd(), wf_name))
92d25e86620fcdc415e94d5867cc22a95f88ca3a
mint/rest/db/awshandler.py
mint/rest/db/awshandler.py
from mint import amiperms class AWSHandler(object): def __init__(self, cfg, db): self.db = db self.amiPerms = amiperms.AMIPermissionsManager(cfg, db) def notify_UserProductRemoved(self, event, userId, projectId, userlevel = None): self.amiPerms.addMemberToProject(userId, projectId) def notify_UserProductAdded(self, event, userId, projectId, userlevel = None): self.amiPerms.deleteMemberFromProject(userId, projectId) def notify_UserProductChanged(self, event, userId, projectId, oldLevel, newLevel): self.amiPerms.setMemberLevel(userId, projectId, oldLevel, newLevel) def notify_UserCancelled(self, userId): # yuck. awsFound, oldAwsAccountNumber = self.db.userData.getDataValue( userId, 'awsAccountNumber') self.amiPerms.setUserKey(userId, oldAwsAccountNumber, None) def notify_ReleasePublished(self, releaseId): self.amiPerms.publishRelease(releaseId) def notify_ReleaseUnpublished(self, releaseId): self.amiPerms.unpublishRelease(releaseId)
from mint import amiperms class AWSHandler(object): def __init__(self, cfg, db): self.db = db self.amiPerms = amiperms.AMIPermissionsManager(cfg, db) def notify_UserProductRemoved(self, event, userId, projectId, userlevel = None): self.amiPerms.deleteMemberFromProject(userId, projectId) def notify_UserProductAdded(self, event, userId, projectId, userlevel = None): self.amiPerms.addMemberToProject(userId, projectId) def notify_UserProductChanged(self, event, userId, projectId, oldLevel, newLevel): self.amiPerms.setMemberLevel(userId, projectId, oldLevel, newLevel) def notify_UserCancelled(self, userId): # yuck. awsFound, oldAwsAccountNumber = self.db.userData.getDataValue( userId, 'awsAccountNumber') self.amiPerms.setUserKey(userId, oldAwsAccountNumber, None) def notify_ReleasePublished(self, releaseId): self.amiPerms.publishRelease(releaseId) def notify_ReleaseUnpublished(self, releaseId): self.amiPerms.unpublishRelease(releaseId)
Fix typo when setting up handler.
Fix typo when setting up handler.
Python
apache-2.0
sassoftware/mint,sassoftware/mint,sassoftware/mint,sassoftware/mint,sassoftware/mint
from mint import amiperms class AWSHandler(object): def __init__(self, cfg, db): self.db = db self.amiPerms = amiperms.AMIPermissionsManager(cfg, db) def notify_UserProductRemoved(self, event, userId, projectId, userlevel = None): - self.amiPerms.addMemberToProject(userId, projectId) + self.amiPerms.deleteMemberFromProject(userId, projectId) def notify_UserProductAdded(self, event, userId, projectId, userlevel = None): - self.amiPerms.deleteMemberFromProject(userId, projectId) + self.amiPerms.addMemberToProject(userId, projectId) def notify_UserProductChanged(self, event, userId, projectId, oldLevel, newLevel): self.amiPerms.setMemberLevel(userId, projectId, oldLevel, newLevel) def notify_UserCancelled(self, userId): # yuck. awsFound, oldAwsAccountNumber = self.db.userData.getDataValue( userId, 'awsAccountNumber') self.amiPerms.setUserKey(userId, oldAwsAccountNumber, None) def notify_ReleasePublished(self, releaseId): self.amiPerms.publishRelease(releaseId) def notify_ReleaseUnpublished(self, releaseId): self.amiPerms.unpublishRelease(releaseId)
Fix typo when setting up handler.
## Code Before: from mint import amiperms class AWSHandler(object): def __init__(self, cfg, db): self.db = db self.amiPerms = amiperms.AMIPermissionsManager(cfg, db) def notify_UserProductRemoved(self, event, userId, projectId, userlevel = None): self.amiPerms.addMemberToProject(userId, projectId) def notify_UserProductAdded(self, event, userId, projectId, userlevel = None): self.amiPerms.deleteMemberFromProject(userId, projectId) def notify_UserProductChanged(self, event, userId, projectId, oldLevel, newLevel): self.amiPerms.setMemberLevel(userId, projectId, oldLevel, newLevel) def notify_UserCancelled(self, userId): # yuck. awsFound, oldAwsAccountNumber = self.db.userData.getDataValue( userId, 'awsAccountNumber') self.amiPerms.setUserKey(userId, oldAwsAccountNumber, None) def notify_ReleasePublished(self, releaseId): self.amiPerms.publishRelease(releaseId) def notify_ReleaseUnpublished(self, releaseId): self.amiPerms.unpublishRelease(releaseId) ## Instruction: Fix typo when setting up handler. ## Code After: from mint import amiperms class AWSHandler(object): def __init__(self, cfg, db): self.db = db self.amiPerms = amiperms.AMIPermissionsManager(cfg, db) def notify_UserProductRemoved(self, event, userId, projectId, userlevel = None): self.amiPerms.deleteMemberFromProject(userId, projectId) def notify_UserProductAdded(self, event, userId, projectId, userlevel = None): self.amiPerms.addMemberToProject(userId, projectId) def notify_UserProductChanged(self, event, userId, projectId, oldLevel, newLevel): self.amiPerms.setMemberLevel(userId, projectId, oldLevel, newLevel) def notify_UserCancelled(self, userId): # yuck. awsFound, oldAwsAccountNumber = self.db.userData.getDataValue( userId, 'awsAccountNumber') self.amiPerms.setUserKey(userId, oldAwsAccountNumber, None) def notify_ReleasePublished(self, releaseId): self.amiPerms.publishRelease(releaseId) def notify_ReleaseUnpublished(self, releaseId): self.amiPerms.unpublishRelease(releaseId)
09b2fe8b248e70300470fcf71f6df0741376c548
misc/disassemble_linear.py
misc/disassemble_linear.py
import sys import time import bracoujl.processor.gb_z80 as proc dis = proc.CPU_CONF['disassembler']() def disassemble(lines): res = '' for line in lines: op = proc.CPU_CONF['parse_line'](line) if op is None: continue res += '{:04X}'.format(op['pc']) + ' - ' + dis.disassemble(op) + '\n' res += '-' * 30 return res try: N = int(sys.argv[1]) except (ValueError, IndexError): N = -1 uniq, lines, count = set(), [], 0 for line in sys.stdin: if line == '--\n': tlines = disassemble(lines) if tlines not in uniq: uniq.add(tlines) print(tlines) lines = [] if N == count: sys.exit(0) count += 1 lines.append(line[:-1])
import argparse import sys import time import bracoujl.processor.gb_z80 as proc dis = proc.CPU_CONF['disassembler']() def disassemble(lines, keep_logs=False): res = [] for line in lines: op, gline = proc.CPU_CONF['parse_line'](line), '' if keep_logs: gline += line + (' | DIS: ' if op is not None else '') else: gline += '{:04X}'.format(op['pc']) + ' - ' if op is not None: gline += dis.disassemble(op) res.append(gline) res.append('-' * 20) return '\n'.join(res) uniq = set() def display_lines(lines, **kwds): tlines = disassemble(lines, **kwds) if tlines not in uniq: uniq.add(tlines) print(tlines) return [] if __name__ == '__main__': parser = argparse.ArgumentParser(description='Little disassembly helper.') parser.add_argument('-N', action='store', default=-1, help='number of uniq blocks displayed') parser.add_argument('-k', '--keep-logs', action='store_true', default=False, help='keep log lines') args = parser.parse_args(sys.argv[1:]) lines, count = [], 0 for line in sys.stdin: if line == '--\n': lines = display_lines(lines, keep_logs=args.keep_logs) if args.N == count: sys.exit(0) count += 1 lines.append(line[:-1]) if lines: display_lines(lines, keep_logs=args.keep_logs)
Fix and enhance disassemble miscellaneous script.
Fix and enhance disassemble miscellaneous script.
Python
bsd-3-clause
fmichea/bracoujl
+ import argparse import sys import time import bracoujl.processor.gb_z80 as proc dis = proc.CPU_CONF['disassembler']() - def disassemble(lines): + def disassemble(lines, keep_logs=False): - res = '' + res = [] for line in lines: - op = proc.CPU_CONF['parse_line'](line) + op, gline = proc.CPU_CONF['parse_line'](line), '' + if keep_logs: + gline += line + (' | DIS: ' if op is not None else '') + else: + gline += '{:04X}'.format(op['pc']) + ' - ' - if op is None: + if op is not None: - continue - res += '{:04X}'.format(op['pc']) + ' - ' + dis.disassemble(op) + '\n' - res += '-' * 30 - return res + gline += dis.disassemble(op) + res.append(gline) + res.append('-' * 20) + return '\n'.join(res) - try: - N = int(sys.argv[1]) - except (ValueError, IndexError): - N = -1 + uniq = set() + def display_lines(lines, **kwds): + tlines = disassemble(lines, **kwds) + if tlines not in uniq: + uniq.add(tlines) + print(tlines) + return [] + if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Little disassembly helper.') + parser.add_argument('-N', action='store', default=-1, + help='number of uniq blocks displayed') + parser.add_argument('-k', '--keep-logs', action='store_true', default=False, + help='keep log lines') + args = parser.parse_args(sys.argv[1:]) - uniq, lines, count = set(), [], 0 - for line in sys.stdin: - if line == '--\n': - tlines = disassemble(lines) - if tlines not in uniq: - uniq.add(tlines) - print(tlines) - lines = [] - if N == count: - sys.exit(0) - count += 1 - lines.append(line[:-1]) + lines, count = [], 0 + for line in sys.stdin: + if line == '--\n': + lines = display_lines(lines, keep_logs=args.keep_logs) + if args.N == count: + sys.exit(0) + count += 1 + lines.append(line[:-1]) + + if lines: + display_lines(lines, keep_logs=args.keep_logs) +
Fix and enhance disassemble miscellaneous script.
## Code Before: import sys import time import bracoujl.processor.gb_z80 as proc dis = proc.CPU_CONF['disassembler']() def disassemble(lines): res = '' for line in lines: op = proc.CPU_CONF['parse_line'](line) if op is None: continue res += '{:04X}'.format(op['pc']) + ' - ' + dis.disassemble(op) + '\n' res += '-' * 30 return res try: N = int(sys.argv[1]) except (ValueError, IndexError): N = -1 uniq, lines, count = set(), [], 0 for line in sys.stdin: if line == '--\n': tlines = disassemble(lines) if tlines not in uniq: uniq.add(tlines) print(tlines) lines = [] if N == count: sys.exit(0) count += 1 lines.append(line[:-1]) ## Instruction: Fix and enhance disassemble miscellaneous script. ## Code After: import argparse import sys import time import bracoujl.processor.gb_z80 as proc dis = proc.CPU_CONF['disassembler']() def disassemble(lines, keep_logs=False): res = [] for line in lines: op, gline = proc.CPU_CONF['parse_line'](line), '' if keep_logs: gline += line + (' | DIS: ' if op is not None else '') else: gline += '{:04X}'.format(op['pc']) + ' - ' if op is not None: gline += dis.disassemble(op) res.append(gline) res.append('-' * 20) return '\n'.join(res) uniq = set() def display_lines(lines, **kwds): tlines = disassemble(lines, **kwds) if tlines not in uniq: uniq.add(tlines) print(tlines) return [] if __name__ == '__main__': parser = argparse.ArgumentParser(description='Little disassembly helper.') parser.add_argument('-N', action='store', default=-1, help='number of uniq blocks displayed') parser.add_argument('-k', '--keep-logs', action='store_true', default=False, help='keep log lines') args = parser.parse_args(sys.argv[1:]) lines, count = [], 0 for line in sys.stdin: if line == '--\n': lines = display_lines(lines, keep_logs=args.keep_logs) if args.N == count: sys.exit(0) count += 1 lines.append(line[:-1]) if lines: display_lines(lines, keep_logs=args.keep_logs)
e0063c0d5604372c1a07a179f5206a0a27570817
package_reviewer/check/repo/check_semver_tags.py
package_reviewer/check/repo/check_semver_tags.py
import re from . import RepoChecker class CheckSemverTags(RepoChecker): def check(self): if not self.semver_tags: msg = "No semantic version tags found. See http://semver.org." for tag in self.tags: if re.search(r"(v|^)\d+\.\d+$", tag.name): msg += " Semantic versions consist of exactly three numeric parts." break self.fail(msg)
import re from . import RepoChecker class CheckSemverTags(RepoChecker): def check(self): if not self.semver_tags: msg = "No semantic version tags found" for tag in self.tags: if re.search(r"(v|^)\d+\.\d+$", tag.name): msg += " (semantic versions consist of exactly three numeric parts)" break self.fail(msg)
Change message of semver tag check
Change message of semver tag check
Python
mit
packagecontrol/st_package_reviewer,packagecontrol/package_reviewer
import re from . import RepoChecker class CheckSemverTags(RepoChecker): def check(self): if not self.semver_tags: - msg = "No semantic version tags found. See http://semver.org." + msg = "No semantic version tags found" for tag in self.tags: if re.search(r"(v|^)\d+\.\d+$", tag.name): - msg += " Semantic versions consist of exactly three numeric parts." + msg += " (semantic versions consist of exactly three numeric parts)" break self.fail(msg)
Change message of semver tag check
## Code Before: import re from . import RepoChecker class CheckSemverTags(RepoChecker): def check(self): if not self.semver_tags: msg = "No semantic version tags found. See http://semver.org." for tag in self.tags: if re.search(r"(v|^)\d+\.\d+$", tag.name): msg += " Semantic versions consist of exactly three numeric parts." break self.fail(msg) ## Instruction: Change message of semver tag check ## Code After: import re from . import RepoChecker class CheckSemverTags(RepoChecker): def check(self): if not self.semver_tags: msg = "No semantic version tags found" for tag in self.tags: if re.search(r"(v|^)\d+\.\d+$", tag.name): msg += " (semantic versions consist of exactly three numeric parts)" break self.fail(msg)
d3163d8a7695da9687f82d9d40c6767322998fc2
python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep-py3/test_collections.py
python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep-py3/test_collections.py
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__)))) from taintlib import * # This has no runtime impact, but allows autocomplete to work from typing import TYPE_CHECKING if TYPE_CHECKING: from ..taintlib import * # Actual tests def test_access(): tainted_list = TAINTED_LIST ensure_tainted( tainted_list.copy(), # $ tainted ) def list_clear(): tainted_string = TAINTED_STRING tainted_list = [tainted_string] ensure_tainted(tainted_list) # $ tainted tainted_list.clear() ensure_not_tainted(tainted_list) # $ SPURIOUS: tainted # Make tests runable test_access() list_clear()
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__)))) from taintlib import * # This has no runtime impact, but allows autocomplete to work from typing import TYPE_CHECKING if TYPE_CHECKING: from ..taintlib import * # Actual tests def test_access(): tainted_list = TAINTED_LIST ensure_tainted( tainted_list.copy(), # $ tainted ) for ((x, y, *z), a, b) in tainted_list: ensure_tainted( x, # $ tainted y, # $ tainted z, # $ tainted a, # $ tainted b, # $ tainted ) def list_clear(): tainted_string = TAINTED_STRING tainted_list = [tainted_string] ensure_tainted(tainted_list) # $ tainted tainted_list.clear() ensure_not_tainted(tainted_list) # $ SPURIOUS: tainted # Make tests runable test_access() list_clear()
Add iterable-unpacking in for test
Python: Add iterable-unpacking in for test
Python
mit
github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__)))) from taintlib import * # This has no runtime impact, but allows autocomplete to work from typing import TYPE_CHECKING if TYPE_CHECKING: from ..taintlib import * # Actual tests def test_access(): tainted_list = TAINTED_LIST ensure_tainted( tainted_list.copy(), # $ tainted ) + for ((x, y, *z), a, b) in tainted_list: + ensure_tainted( + x, # $ tainted + y, # $ tainted + z, # $ tainted + a, # $ tainted + b, # $ tainted + ) + def list_clear(): tainted_string = TAINTED_STRING tainted_list = [tainted_string] ensure_tainted(tainted_list) # $ tainted tainted_list.clear() ensure_not_tainted(tainted_list) # $ SPURIOUS: tainted # Make tests runable test_access() list_clear()
Add iterable-unpacking in for test
## Code Before: import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__)))) from taintlib import * # This has no runtime impact, but allows autocomplete to work from typing import TYPE_CHECKING if TYPE_CHECKING: from ..taintlib import * # Actual tests def test_access(): tainted_list = TAINTED_LIST ensure_tainted( tainted_list.copy(), # $ tainted ) def list_clear(): tainted_string = TAINTED_STRING tainted_list = [tainted_string] ensure_tainted(tainted_list) # $ tainted tainted_list.clear() ensure_not_tainted(tainted_list) # $ SPURIOUS: tainted # Make tests runable test_access() list_clear() ## Instruction: Add iterable-unpacking in for test ## Code After: import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__)))) from taintlib import * # This has no runtime impact, but allows autocomplete to work from typing import TYPE_CHECKING if TYPE_CHECKING: from ..taintlib import * # Actual tests def test_access(): tainted_list = TAINTED_LIST ensure_tainted( tainted_list.copy(), # $ tainted ) for ((x, y, *z), a, b) in tainted_list: ensure_tainted( x, # $ tainted y, # $ tainted z, # $ tainted a, # $ tainted b, # $ tainted ) def list_clear(): tainted_string = TAINTED_STRING tainted_list = [tainted_string] ensure_tainted(tainted_list) # $ tainted tainted_list.clear() ensure_not_tainted(tainted_list) # $ SPURIOUS: tainted # Make tests runable test_access() list_clear()
679abfdd2b6a3c4d18170d93bfd42d73c47ff9c5
phasm/typing.py
phasm/typing.py
from typing import Mapping, Set, Callable, Union, Tuple, Iterable # Pairwise local alignments OrientedDNASegment = 'phasm.alignments.OrientedDNASegment' OrientedRead = 'phasm.alignments.OrientedRead' LocalAlignment = 'phasm.alignments.LocalAlignment' AlignmentsT = Mapping[OrientedRead, Set[LocalAlignment]] # Assembly Graphs AssemblyGraph = 'phasm.assembly_graph.AssemblyGraph' Node = OrientedDNASegment Edge = Tuple[Node, Node] Path = Iterable[Edge] Bubble = Tuple[Node, Node] # Phasing algorithm parameters PruneParam = Union[float, Callable[[float], float]]
from typing import Mapping, Set, Callable, Union, Tuple, Iterable # Pairwise local alignments OrientedDNASegment = 'phasm.alignments.OrientedDNASegment' OrientedRead = 'phasm.alignments.OrientedRead' LocalAlignment = 'phasm.alignments.LocalAlignment' AlignmentsT = Mapping[OrientedRead, Set[LocalAlignment]] # Assembly Graphs AssemblyGraph = 'phasm.assembly_graph.AssemblyGraph' Node = Union[OrientedDNASegment, str] Edge = Tuple[Node, Node] Path = Iterable[Edge] Bubble = Tuple[Node, Node] # Phasing algorithm parameters PruneParam = Union[float, Callable[[float], float]]
Change Node type a bit
Change Node type a bit In a reconstructed assembly graph sometimes the nodes can be str
Python
mit
AbeelLab/phasm,AbeelLab/phasm
from typing import Mapping, Set, Callable, Union, Tuple, Iterable # Pairwise local alignments OrientedDNASegment = 'phasm.alignments.OrientedDNASegment' OrientedRead = 'phasm.alignments.OrientedRead' LocalAlignment = 'phasm.alignments.LocalAlignment' AlignmentsT = Mapping[OrientedRead, Set[LocalAlignment]] # Assembly Graphs AssemblyGraph = 'phasm.assembly_graph.AssemblyGraph' - Node = OrientedDNASegment + Node = Union[OrientedDNASegment, str] Edge = Tuple[Node, Node] Path = Iterable[Edge] Bubble = Tuple[Node, Node] # Phasing algorithm parameters PruneParam = Union[float, Callable[[float], float]]
Change Node type a bit
## Code Before: from typing import Mapping, Set, Callable, Union, Tuple, Iterable # Pairwise local alignments OrientedDNASegment = 'phasm.alignments.OrientedDNASegment' OrientedRead = 'phasm.alignments.OrientedRead' LocalAlignment = 'phasm.alignments.LocalAlignment' AlignmentsT = Mapping[OrientedRead, Set[LocalAlignment]] # Assembly Graphs AssemblyGraph = 'phasm.assembly_graph.AssemblyGraph' Node = OrientedDNASegment Edge = Tuple[Node, Node] Path = Iterable[Edge] Bubble = Tuple[Node, Node] # Phasing algorithm parameters PruneParam = Union[float, Callable[[float], float]] ## Instruction: Change Node type a bit ## Code After: from typing import Mapping, Set, Callable, Union, Tuple, Iterable # Pairwise local alignments OrientedDNASegment = 'phasm.alignments.OrientedDNASegment' OrientedRead = 'phasm.alignments.OrientedRead' LocalAlignment = 'phasm.alignments.LocalAlignment' AlignmentsT = Mapping[OrientedRead, Set[LocalAlignment]] # Assembly Graphs AssemblyGraph = 'phasm.assembly_graph.AssemblyGraph' Node = Union[OrientedDNASegment, str] Edge = Tuple[Node, Node] Path = Iterable[Edge] Bubble = Tuple[Node, Node] # Phasing algorithm parameters PruneParam = Union[float, Callable[[float], float]]
fea07e0fe53049963a744b52c38a9abdfeb1c09e
commands.py
commands.py
from os import path import shutil import sublime import sublime_plugin SUBLIME_ROOT = path.normpath(path.join(sublime.packages_path(), '..')) COMMANDS_FILEPATH = path.join('Packages', 'User', 'Commands.sublime-commands') COMMANDS_FULL_FILEPATH = path.join(SUBLIME_ROOT, COMMANDS_FILEPATH) COMMANDS_SOURCE_FULL_FILEPATH = path.abspath('default-prompt.json') class CommandsOpenCommand(sublime_plugin.WindowCommand): def run(self, **kwargs): """Open `.sublime-commands` file for custom definitions""" # If no file is provided, default to `COMMANDS_FULL_FILEPATH` dest_filepath = kwargs.get('file', COMMANDS_FULL_FILEPATH) # If the file doesn't exist, provide a prompt if not path.exists(dest_filepath): shutil.copy(COMMANDS_SOURCE_FULL_FILEPATH, dest_filepath) # Open the User commands file view = self.window.open_file(dest_filepath) # If the syntax is plain text, move to JSON if view.settings().get('syntax') == path.join('Packages', 'Text', 'Plain text.tmLanguage'): view.set_syntax_file(path.join('Packages', 'JavaScript', 'JSON.tmLanguage'))
from os import path import shutil import sublime import sublime_plugin SUBLIME_ROOT = path.normpath(path.join(sublime.packages_path(), '..')) COMMANDS_FILEPATH = path.join('Packages', 'User', 'Commands.sublime-commands') COMMANDS_FULL_FILEPATH = path.join(SUBLIME_ROOT, COMMANDS_FILEPATH) COMMANDS_SOURCE_FULL_FILEPATH = path.abspath('default-prompt.json') class CommandsOpenCommand(sublime_plugin.WindowCommand): def run(self): """Open `Packages/User/Commands.sublime-commands` for custom definitions""" # If the User commands doesn't exist, provide a prompt if not path.exists(COMMANDS_FULL_FILEPATH): shutil.copy(COMMANDS_SOURCE_FULL_FILEPATH, COMMANDS_FULL_FILEPATH) # Open the User commands file view = self.window.open_file(COMMANDS_FULL_FILEPATH) # If the syntax is plain text, move to JSON if view.settings().get('syntax') == path.join('Packages', 'Text', 'Plain text.tmLanguage'): view.set_syntax_file(path.join('Packages', 'JavaScript', 'JSON.tmLanguage'))
Revert "Started exploring using argument but realizing this is a rabbit hole"
Revert "Started exploring using argument but realizing this is a rabbit hole" This reverts commit b899d5613c0f4425aa4cc69bac9561b503ba83d4.
Python
unlicense
twolfson/sublime-edit-command-palette,twolfson/sublime-edit-command-palette,twolfson/sublime-edit-command-palette
from os import path import shutil import sublime import sublime_plugin SUBLIME_ROOT = path.normpath(path.join(sublime.packages_path(), '..')) COMMANDS_FILEPATH = path.join('Packages', 'User', 'Commands.sublime-commands') COMMANDS_FULL_FILEPATH = path.join(SUBLIME_ROOT, COMMANDS_FILEPATH) COMMANDS_SOURCE_FULL_FILEPATH = path.abspath('default-prompt.json') class CommandsOpenCommand(sublime_plugin.WindowCommand): - def run(self, **kwargs): + def run(self): - """Open `.sublime-commands` file for custom definitions""" + """Open `Packages/User/Commands.sublime-commands` for custom definitions""" - # If no file is provided, default to `COMMANDS_FULL_FILEPATH` - dest_filepath = kwargs.get('file', COMMANDS_FULL_FILEPATH) - - # If the file doesn't exist, provide a prompt + # If the User commands doesn't exist, provide a prompt - if not path.exists(dest_filepath): + if not path.exists(COMMANDS_FULL_FILEPATH): - shutil.copy(COMMANDS_SOURCE_FULL_FILEPATH, dest_filepath) + shutil.copy(COMMANDS_SOURCE_FULL_FILEPATH, COMMANDS_FULL_FILEPATH) # Open the User commands file - view = self.window.open_file(dest_filepath) + view = self.window.open_file(COMMANDS_FULL_FILEPATH) # If the syntax is plain text, move to JSON if view.settings().get('syntax') == path.join('Packages', 'Text', 'Plain text.tmLanguage'): view.set_syntax_file(path.join('Packages', 'JavaScript', 'JSON.tmLanguage'))
Revert "Started exploring using argument but realizing this is a rabbit hole"
## Code Before: from os import path import shutil import sublime import sublime_plugin SUBLIME_ROOT = path.normpath(path.join(sublime.packages_path(), '..')) COMMANDS_FILEPATH = path.join('Packages', 'User', 'Commands.sublime-commands') COMMANDS_FULL_FILEPATH = path.join(SUBLIME_ROOT, COMMANDS_FILEPATH) COMMANDS_SOURCE_FULL_FILEPATH = path.abspath('default-prompt.json') class CommandsOpenCommand(sublime_plugin.WindowCommand): def run(self, **kwargs): """Open `.sublime-commands` file for custom definitions""" # If no file is provided, default to `COMMANDS_FULL_FILEPATH` dest_filepath = kwargs.get('file', COMMANDS_FULL_FILEPATH) # If the file doesn't exist, provide a prompt if not path.exists(dest_filepath): shutil.copy(COMMANDS_SOURCE_FULL_FILEPATH, dest_filepath) # Open the User commands file view = self.window.open_file(dest_filepath) # If the syntax is plain text, move to JSON if view.settings().get('syntax') == path.join('Packages', 'Text', 'Plain text.tmLanguage'): view.set_syntax_file(path.join('Packages', 'JavaScript', 'JSON.tmLanguage')) ## Instruction: Revert "Started exploring using argument but realizing this is a rabbit hole" ## Code After: from os import path import shutil import sublime import sublime_plugin SUBLIME_ROOT = path.normpath(path.join(sublime.packages_path(), '..')) COMMANDS_FILEPATH = path.join('Packages', 'User', 'Commands.sublime-commands') COMMANDS_FULL_FILEPATH = path.join(SUBLIME_ROOT, COMMANDS_FILEPATH) COMMANDS_SOURCE_FULL_FILEPATH = path.abspath('default-prompt.json') class CommandsOpenCommand(sublime_plugin.WindowCommand): def run(self): """Open `Packages/User/Commands.sublime-commands` for custom definitions""" # If the User commands doesn't exist, provide a prompt if not path.exists(COMMANDS_FULL_FILEPATH): shutil.copy(COMMANDS_SOURCE_FULL_FILEPATH, COMMANDS_FULL_FILEPATH) # Open the User commands file view = self.window.open_file(COMMANDS_FULL_FILEPATH) # If the syntax is plain text, move to JSON if view.settings().get('syntax') == path.join('Packages', 'Text', 'Plain text.tmLanguage'): view.set_syntax_file(path.join('Packages', 'JavaScript', 'JSON.tmLanguage'))
b21c5a1b0f8d176cdd59c8131a316f142540d9ec
materials.py
materials.py
import color def parse(mat_node): materials = [] for node in mat_node: materials.append(Material(node)) class Material: ''' it’s a material ''' def __init__(self, node): for c in node: if c.tag == 'ambient': self.ambient_color = color.parse(c[0]) self.ambient_color *= float(c.attrib['factor'].replace(',', '.')) elif c.tag == 'diffuse': self.diffuse_color = color.parse(c[0]) self.diffuse_color *= float(c.attrib['factor'].replace(',', '.')) elif c.tag == 'specular': self.specular_color = color.parse(c[0]) self.specular_color *= float(c.attrib['factor'].replace(',', '.')) elif c.tag == 'reflection': self.reflection_color = color.parse(c[0]) self.reflection_color *= float(c.attrib['factor'].replace(',', '.'))
import color def parse(mat_node): materials = [] for node in mat_node: materials.append(Material(node)) class Material: ''' it’s a material ''' def __init__(self, node): for c in node: if c.tag == 'ambient': self.ambient_color = color.parse(c[0]) self.ambient_color *= float(c.attrib['factor']) elif c.tag == 'diffuse': self.diffuse_color = color.parse(c[0]) self.diffuse_color *= float(c.attrib['factor']) elif c.tag == 'specular': self.specular_color = color.parse(c[0]) self.specular_color *= float(c.attrib['factor']) elif c.tag == 'reflection': self.reflection_color = color.parse(c[0]) self.reflection_color *= float(c.attrib['factor'])
Remove replacement of commas by points
Remove replacement of commas by points
Python
mit
cocreature/pytracer
import color def parse(mat_node): materials = [] for node in mat_node: materials.append(Material(node)) class Material: ''' it’s a material ''' def __init__(self, node): for c in node: if c.tag == 'ambient': self.ambient_color = color.parse(c[0]) - self.ambient_color *= float(c.attrib['factor'].replace(',', '.')) + self.ambient_color *= float(c.attrib['factor']) elif c.tag == 'diffuse': self.diffuse_color = color.parse(c[0]) - self.diffuse_color *= float(c.attrib['factor'].replace(',', '.')) + self.diffuse_color *= float(c.attrib['factor']) elif c.tag == 'specular': self.specular_color = color.parse(c[0]) - self.specular_color *= float(c.attrib['factor'].replace(',', '.')) + self.specular_color *= float(c.attrib['factor']) elif c.tag == 'reflection': self.reflection_color = color.parse(c[0]) - self.reflection_color *= float(c.attrib['factor'].replace(',', '.')) + self.reflection_color *= float(c.attrib['factor'])
Remove replacement of commas by points
## Code Before: import color def parse(mat_node): materials = [] for node in mat_node: materials.append(Material(node)) class Material: ''' it’s a material ''' def __init__(self, node): for c in node: if c.tag == 'ambient': self.ambient_color = color.parse(c[0]) self.ambient_color *= float(c.attrib['factor'].replace(',', '.')) elif c.tag == 'diffuse': self.diffuse_color = color.parse(c[0]) self.diffuse_color *= float(c.attrib['factor'].replace(',', '.')) elif c.tag == 'specular': self.specular_color = color.parse(c[0]) self.specular_color *= float(c.attrib['factor'].replace(',', '.')) elif c.tag == 'reflection': self.reflection_color = color.parse(c[0]) self.reflection_color *= float(c.attrib['factor'].replace(',', '.')) ## Instruction: Remove replacement of commas by points ## Code After: import color def parse(mat_node): materials = [] for node in mat_node: materials.append(Material(node)) class Material: ''' it’s a material ''' def __init__(self, node): for c in node: if c.tag == 'ambient': self.ambient_color = color.parse(c[0]) self.ambient_color *= float(c.attrib['factor']) elif c.tag == 'diffuse': self.diffuse_color = color.parse(c[0]) self.diffuse_color *= float(c.attrib['factor']) elif c.tag == 'specular': self.specular_color = color.parse(c[0]) self.specular_color *= float(c.attrib['factor']) elif c.tag == 'reflection': self.reflection_color = color.parse(c[0]) self.reflection_color *= float(c.attrib['factor'])
2fe72f41b1b62cf770869b8d3ccefeef1096ea11
conftest.py
conftest.py
import behave import pytest @pytest.fixture(autouse=True) def _annotate_environment(request): """Add project-specific information to test-run environment: * behave.version NOTE: autouse: Fixture is automatically used when test-module is imported. """ # -- USEFULL FOR: pytest --html=report.html ... behave_version = behave.__version__ request.config._environment.append(("behave", behave_version))
import behave import pytest @pytest.fixture(autouse=True) def _annotate_environment(request): """Add project-specific information to test-run environment: * behave.version NOTE: autouse: Fixture is automatically used when test-module is imported. """ # -- USEFULL FOR: pytest --html=report.html ... environment = getattr(request.config, "_environment", None) if environment: # -- PROVIDED-BY: pytest-html behave_version = behave.__version__ environment.append(("behave", behave_version))
FIX when pytest-html is not installed.
FIX when pytest-html is not installed.
Python
bsd-2-clause
Abdoctor/behave,Abdoctor/behave,jenisys/behave,jenisys/behave
import behave import pytest @pytest.fixture(autouse=True) def _annotate_environment(request): """Add project-specific information to test-run environment: * behave.version NOTE: autouse: Fixture is automatically used when test-module is imported. """ # -- USEFULL FOR: pytest --html=report.html ... + environment = getattr(request.config, "_environment", None) + if environment: + # -- PROVIDED-BY: pytest-html - behave_version = behave.__version__ + behave_version = behave.__version__ - request.config._environment.append(("behave", behave_version)) + environment.append(("behave", behave_version))
FIX when pytest-html is not installed.
## Code Before: import behave import pytest @pytest.fixture(autouse=True) def _annotate_environment(request): """Add project-specific information to test-run environment: * behave.version NOTE: autouse: Fixture is automatically used when test-module is imported. """ # -- USEFULL FOR: pytest --html=report.html ... behave_version = behave.__version__ request.config._environment.append(("behave", behave_version)) ## Instruction: FIX when pytest-html is not installed. ## Code After: import behave import pytest @pytest.fixture(autouse=True) def _annotate_environment(request): """Add project-specific information to test-run environment: * behave.version NOTE: autouse: Fixture is automatically used when test-module is imported. """ # -- USEFULL FOR: pytest --html=report.html ... environment = getattr(request.config, "_environment", None) if environment: # -- PROVIDED-BY: pytest-html behave_version = behave.__version__ environment.append(("behave", behave_version))
6267fa5d9a3ff2573dc33a23d3456942976b0b7e
cyder/base/models.py
cyder/base/models.py
from django.db import models from django.utils.safestring import mark_safe from cyder.base.utils import classproperty class BaseModel(models.Model): """ Base class for models to abstract some common features. * Adds automatic created and modified fields to the model. """ created = models.DateTimeField(auto_now_add=True, null=True) modified = models.DateTimeField(auto_now=True, null=True) class Meta: abstract = True get_latest_by = 'created' @classproperty @classmethod def pretty_type(cls): return cls.__name__.lower() @property def pretty_name(self): return unicode(self) def unique_error_message(self, model_class, unique_check): error = super(BaseModel, self).unique_error_message( model_class, unique_check) kwargs = {} for field in unique_check: kwargs[field] = getattr(self, field) obj = model_class.objects.filter(**kwargs) if obj and hasattr(obj.get(), 'get_detail_url'): error = error[:-1] + ' at <a href={0}>{1}.</a>'.format( obj.get().get_detail_url(), obj.get()) error = mark_safe(error) return error class ExpirableMixin(models.Model): expire = models.DateTimeField(null=True, blank=True) class Meta: abstract = True
from django.db import models from django.utils.safestring import mark_safe from cyder.base.utils import classproperty class BaseModel(models.Model): """ Base class for models to abstract some common features. * Adds automatic created and modified fields to the model. """ created = models.DateTimeField(auto_now_add=True, null=True) modified = models.DateTimeField(auto_now=True, null=True) class Meta: abstract = True get_latest_by = 'created' @classproperty @classmethod def pretty_type(cls): return cls.__name__.lower() @property def pretty_name(self): return unicode(self) def unique_error_message(self, model_class, unique_check): error = super(BaseModel, self).unique_error_message( model_class, unique_check) kwargs = {} for field in unique_check: kwargs[field] = getattr(self, field) obj = model_class.objects.filter(**kwargs) if obj and hasattr(obj.get(), 'get_detail_url'): error = error[:-1] + ' at <a href={0}>{1}.</a>'.format( obj.get().get_detail_url(), obj.get()) error = mark_safe(error) return error class ExpirableMixin(models.Model): expire = models.DateTimeField(null=True, blank=True, help_text='Format: MM/DD/YYYY') class Meta: abstract = True
Add help_text to interface 'expire' field
Add help_text to interface 'expire' field
Python
bsd-3-clause
OSU-Net/cyder,zeeman/cyder,drkitty/cyder,zeeman/cyder,OSU-Net/cyder,murrown/cyder,OSU-Net/cyder,murrown/cyder,murrown/cyder,drkitty/cyder,drkitty/cyder,OSU-Net/cyder,akeym/cyder,murrown/cyder,akeym/cyder,akeym/cyder,drkitty/cyder,zeeman/cyder,zeeman/cyder,akeym/cyder
from django.db import models from django.utils.safestring import mark_safe from cyder.base.utils import classproperty class BaseModel(models.Model): """ Base class for models to abstract some common features. * Adds automatic created and modified fields to the model. """ created = models.DateTimeField(auto_now_add=True, null=True) modified = models.DateTimeField(auto_now=True, null=True) class Meta: abstract = True get_latest_by = 'created' @classproperty @classmethod def pretty_type(cls): return cls.__name__.lower() @property def pretty_name(self): return unicode(self) def unique_error_message(self, model_class, unique_check): error = super(BaseModel, self).unique_error_message( model_class, unique_check) kwargs = {} for field in unique_check: kwargs[field] = getattr(self, field) obj = model_class.objects.filter(**kwargs) if obj and hasattr(obj.get(), 'get_detail_url'): error = error[:-1] + ' at <a href={0}>{1}.</a>'.format( obj.get().get_detail_url(), obj.get()) error = mark_safe(error) return error class ExpirableMixin(models.Model): - expire = models.DateTimeField(null=True, blank=True) + expire = models.DateTimeField(null=True, blank=True, + help_text='Format: MM/DD/YYYY') class Meta: abstract = True
Add help_text to interface 'expire' field
## Code Before: from django.db import models from django.utils.safestring import mark_safe from cyder.base.utils import classproperty class BaseModel(models.Model): """ Base class for models to abstract some common features. * Adds automatic created and modified fields to the model. """ created = models.DateTimeField(auto_now_add=True, null=True) modified = models.DateTimeField(auto_now=True, null=True) class Meta: abstract = True get_latest_by = 'created' @classproperty @classmethod def pretty_type(cls): return cls.__name__.lower() @property def pretty_name(self): return unicode(self) def unique_error_message(self, model_class, unique_check): error = super(BaseModel, self).unique_error_message( model_class, unique_check) kwargs = {} for field in unique_check: kwargs[field] = getattr(self, field) obj = model_class.objects.filter(**kwargs) if obj and hasattr(obj.get(), 'get_detail_url'): error = error[:-1] + ' at <a href={0}>{1}.</a>'.format( obj.get().get_detail_url(), obj.get()) error = mark_safe(error) return error class ExpirableMixin(models.Model): expire = models.DateTimeField(null=True, blank=True) class Meta: abstract = True ## Instruction: Add help_text to interface 'expire' field ## Code After: from django.db import models from django.utils.safestring import mark_safe from cyder.base.utils import classproperty class BaseModel(models.Model): """ Base class for models to abstract some common features. * Adds automatic created and modified fields to the model. """ created = models.DateTimeField(auto_now_add=True, null=True) modified = models.DateTimeField(auto_now=True, null=True) class Meta: abstract = True get_latest_by = 'created' @classproperty @classmethod def pretty_type(cls): return cls.__name__.lower() @property def pretty_name(self): return unicode(self) def unique_error_message(self, model_class, unique_check): error = super(BaseModel, self).unique_error_message( model_class, unique_check) kwargs = {} for field in unique_check: kwargs[field] = getattr(self, field) obj = model_class.objects.filter(**kwargs) if obj and hasattr(obj.get(), 'get_detail_url'): error = error[:-1] + ' at <a href={0}>{1}.</a>'.format( obj.get().get_detail_url(), obj.get()) error = mark_safe(error) return error class ExpirableMixin(models.Model): expire = models.DateTimeField(null=True, blank=True, help_text='Format: MM/DD/YYYY') class Meta: abstract = True
f29a6b205a872d7df63e8c45b5829959c98de227
comics/comics/pcweenies.py
comics/comics/pcweenies.py
from comics.aggregator.crawler import CrawlerBase, CrawlerResult from comics.meta.base import MetaBase class Meta(MetaBase): name = 'The PC Weenies' language = 'en' url = 'http://www.pcweenies.com/' start_date = '1998-10-21' rights = 'Krishna M. Sadasivam' class Crawler(CrawlerBase): history_capable_days = 10 schedule = 'Mo,We,Fr' time_zone = -8 def crawl(self, pub_date): feed = self.parse_feed('http://www.pcweenies.com/feed/') for entry in feed.for_date(pub_date): if 'Comic' in entry.tags: title = entry.title url = entry.content0.src(u'img') return CrawlerResult(url, title)
from comics.aggregator.crawler import CrawlerBase, CrawlerResult from comics.meta.base import MetaBase class Meta(MetaBase): name = 'The PC Weenies' language = 'en' url = 'http://www.pcweenies.com/' start_date = '1998-10-21' rights = 'Krishna M. Sadasivam' class Crawler(CrawlerBase): history_capable_days = 10 schedule = 'Mo,We,Fr' time_zone = -8 def crawl(self, pub_date): feed = self.parse_feed('http://www.pcweenies.com/feed/') for entry in feed.for_date(pub_date): if 'Comic' in entry.tags: title = entry.title url = entry.content0.src(u'img[src*="/comics/"]') return CrawlerResult(url, title)
Update CSS selector which matched two img elements
Update CSS selector which matched two img elements
Python
agpl-3.0
klette/comics,jodal/comics,jodal/comics,datagutten/comics,jodal/comics,datagutten/comics,jodal/comics,klette/comics,klette/comics,datagutten/comics,datagutten/comics
from comics.aggregator.crawler import CrawlerBase, CrawlerResult from comics.meta.base import MetaBase class Meta(MetaBase): name = 'The PC Weenies' language = 'en' url = 'http://www.pcweenies.com/' start_date = '1998-10-21' rights = 'Krishna M. Sadasivam' class Crawler(CrawlerBase): history_capable_days = 10 schedule = 'Mo,We,Fr' time_zone = -8 def crawl(self, pub_date): feed = self.parse_feed('http://www.pcweenies.com/feed/') for entry in feed.for_date(pub_date): if 'Comic' in entry.tags: title = entry.title - url = entry.content0.src(u'img') + url = entry.content0.src(u'img[src*="/comics/"]') return CrawlerResult(url, title)
Update CSS selector which matched two img elements
## Code Before: from comics.aggregator.crawler import CrawlerBase, CrawlerResult from comics.meta.base import MetaBase class Meta(MetaBase): name = 'The PC Weenies' language = 'en' url = 'http://www.pcweenies.com/' start_date = '1998-10-21' rights = 'Krishna M. Sadasivam' class Crawler(CrawlerBase): history_capable_days = 10 schedule = 'Mo,We,Fr' time_zone = -8 def crawl(self, pub_date): feed = self.parse_feed('http://www.pcweenies.com/feed/') for entry in feed.for_date(pub_date): if 'Comic' in entry.tags: title = entry.title url = entry.content0.src(u'img') return CrawlerResult(url, title) ## Instruction: Update CSS selector which matched two img elements ## Code After: from comics.aggregator.crawler import CrawlerBase, CrawlerResult from comics.meta.base import MetaBase class Meta(MetaBase): name = 'The PC Weenies' language = 'en' url = 'http://www.pcweenies.com/' start_date = '1998-10-21' rights = 'Krishna M. Sadasivam' class Crawler(CrawlerBase): history_capable_days = 10 schedule = 'Mo,We,Fr' time_zone = -8 def crawl(self, pub_date): feed = self.parse_feed('http://www.pcweenies.com/feed/') for entry in feed.for_date(pub_date): if 'Comic' in entry.tags: title = entry.title url = entry.content0.src(u'img[src*="/comics/"]') return CrawlerResult(url, title)
6ad647899d044cb46be6172cbea9c93a369ddc78
pymanopt/solvers/theano_functions/comp_diff.py
pymanopt/solvers/theano_functions/comp_diff.py
import theano.tensor as T import theano # Compile objective function defined in Theano. def compile(objective, argument): return theano.function([argument], objective) # Compute the gradient of 'objective' with respect to 'argument' and return # compiled function. def gradient(objective, argument): g = T.grad(objective, argument) return theano.function([argument], g)
import theano.tensor as T import theano # Compile objective function defined in Theano. def compile(objective, argument): return theano.function([argument], objective) # Compute the gradient of 'objective' with respect to 'argument' and return # compiled function. def gradient(objective, argument): g = T.grad(objective, argument) return compile(g, argument)
Use `compile` function for `gradient` function
Use `compile` function for `gradient` function Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>
Python
bsd-3-clause
j-towns/pymanopt,nkoep/pymanopt,pymanopt/pymanopt,tingelst/pymanopt,nkoep/pymanopt,pymanopt/pymanopt,nkoep/pymanopt
import theano.tensor as T import theano # Compile objective function defined in Theano. def compile(objective, argument): return theano.function([argument], objective) # Compute the gradient of 'objective' with respect to 'argument' and return # compiled function. def gradient(objective, argument): g = T.grad(objective, argument) - return theano.function([argument], g) + return compile(g, argument) + +
Use `compile` function for `gradient` function
## Code Before: import theano.tensor as T import theano # Compile objective function defined in Theano. def compile(objective, argument): return theano.function([argument], objective) # Compute the gradient of 'objective' with respect to 'argument' and return # compiled function. def gradient(objective, argument): g = T.grad(objective, argument) return theano.function([argument], g) ## Instruction: Use `compile` function for `gradient` function ## Code After: import theano.tensor as T import theano # Compile objective function defined in Theano. def compile(objective, argument): return theano.function([argument], objective) # Compute the gradient of 'objective' with respect to 'argument' and return # compiled function. def gradient(objective, argument): g = T.grad(objective, argument) return compile(g, argument)
48e4203bc87fda407d0e5f804c854b53f7bf54fc
lemon/publications/managers.py
lemon/publications/managers.py
from django.db import models from lemon.publications.querysets import PublicationQuerySet class PublicationManager(models.Manager): def expired(self): return self.get_query_set().expired() def future(self): return self.get_query_set().future() def enabled(self): return self.get_query_set().enabled() def disabled(self): return self.get_query_set().disabled() def unpublished(self): return self.get_query_set().unpublished() def published(self): return self.get_query_set().published() def get_query_set(self): return PublicationQuerySet(self.model)
from django.db import models from lemon.publications.querysets import PublicationQuerySet class PublicationManager(models.Manager): def expired(self): return self.get_query_set().expired() def future(self): return self.get_query_set().future() def enabled(self): return self.get_query_set().enabled() def disabled(self): return self.get_query_set().disabled() def unpublished(self): return self.get_query_set().unpublished() def published(self): return self.get_query_set().published() def get_query_set(self): return PublicationQuerySet(self.model, using=self._db)
Fix handling of the _db attribute on the PublicationManager in get_query_set
Fix handling of the _db attribute on the PublicationManager in get_query_set
Python
bsd-3-clause
trilan/lemon,trilan/lemon,trilan/lemon
from django.db import models from lemon.publications.querysets import PublicationQuerySet class PublicationManager(models.Manager): def expired(self): return self.get_query_set().expired() def future(self): return self.get_query_set().future() def enabled(self): return self.get_query_set().enabled() def disabled(self): return self.get_query_set().disabled() def unpublished(self): return self.get_query_set().unpublished() def published(self): return self.get_query_set().published() def get_query_set(self): - return PublicationQuerySet(self.model) + return PublicationQuerySet(self.model, using=self._db)
Fix handling of the _db attribute on the PublicationManager in get_query_set
## Code Before: from django.db import models from lemon.publications.querysets import PublicationQuerySet class PublicationManager(models.Manager): def expired(self): return self.get_query_set().expired() def future(self): return self.get_query_set().future() def enabled(self): return self.get_query_set().enabled() def disabled(self): return self.get_query_set().disabled() def unpublished(self): return self.get_query_set().unpublished() def published(self): return self.get_query_set().published() def get_query_set(self): return PublicationQuerySet(self.model) ## Instruction: Fix handling of the _db attribute on the PublicationManager in get_query_set ## Code After: from django.db import models from lemon.publications.querysets import PublicationQuerySet class PublicationManager(models.Manager): def expired(self): return self.get_query_set().expired() def future(self): return self.get_query_set().future() def enabled(self): return self.get_query_set().enabled() def disabled(self): return self.get_query_set().disabled() def unpublished(self): return self.get_query_set().unpublished() def published(self): return self.get_query_set().published() def get_query_set(self): return PublicationQuerySet(self.model, using=self._db)
3c4565dcf6222af0e3b7cabf5c52f9ab18488be2
tests/test_main.py
tests/test_main.py
from cookiecutter.main import is_repo_url, expand_abbreviations def test_is_repo_url(): """Verify is_repo_url works.""" assert is_repo_url('gitolite@server:team/repo') is True assert is_repo_url('git@github.com:audreyr/cookiecutter.git') is True assert is_repo_url('https://github.com/audreyr/cookiecutter.git') is True assert is_repo_url('https://bitbucket.org/pokoli/cookiecutter.hg') is True assert is_repo_url('/audreyr/cookiecutter.git') is False assert is_repo_url('/home/audreyr/cookiecutter') is False appveyor_temp_dir = ( 'c:\\users\\appveyor\\appdata\\local\\temp\\1\\pytest-0\\' 'test_default_output_dir0\\template' ) assert is_repo_url(appveyor_temp_dir) is False def test_expand_abbreviations(): template = 'gh:audreyr/cookiecutter-pypackage' # This is not a valid repo url just yet! # First `main.expand_abbreviations` needs to translate it assert is_repo_url(template) is False expanded_template = expand_abbreviations(template, {}) assert is_repo_url(expanded_template) is True
import pytest from cookiecutter.main import is_repo_url, expand_abbreviations @pytest.fixture(params=[ 'gitolite@server:team/repo', 'git@github.com:audreyr/cookiecutter.git', 'https://github.com/audreyr/cookiecutter.git', 'https://bitbucket.org/pokoli/cookiecutter.hg', ]) def remote_repo_url(request): return request.param def test_is_repo_url_for_remote_urls(remote_repo_url): """Verify is_repo_url works.""" assert is_repo_url(remote_repo_url) is True @pytest.fixture(params=[ '/audreyr/cookiecutter.git', '/home/audreyr/cookiecutter', ( 'c:\\users\\appveyor\\appdata\\local\\temp\\1\\pytest-0\\' 'test_default_output_dir0\\template' ), ]) def local_repo_url(request): return request.param def test_is_repo_url_for_local_urls(local_repo_url): """Verify is_repo_url works.""" assert is_repo_url(local_repo_url) is False def test_expand_abbreviations(): template = 'gh:audreyr/cookiecutter-pypackage' # This is not a valid repo url just yet! # First `main.expand_abbreviations` needs to translate it assert is_repo_url(template) is False expanded_template = expand_abbreviations(template, {}) assert is_repo_url(expanded_template) is True
Refactor tests for is_repo_url to be parametrized
Refactor tests for is_repo_url to be parametrized
Python
bsd-3-clause
luzfcb/cookiecutter,terryjbates/cookiecutter,michaeljoseph/cookiecutter,willingc/cookiecutter,pjbull/cookiecutter,stevepiercy/cookiecutter,hackebrot/cookiecutter,dajose/cookiecutter,Springerle/cookiecutter,dajose/cookiecutter,stevepiercy/cookiecutter,terryjbates/cookiecutter,audreyr/cookiecutter,pjbull/cookiecutter,audreyr/cookiecutter,luzfcb/cookiecutter,hackebrot/cookiecutter,willingc/cookiecutter,Springerle/cookiecutter,michaeljoseph/cookiecutter
+ + import pytest + from cookiecutter.main import is_repo_url, expand_abbreviations - def test_is_repo_url(): + @pytest.fixture(params=[ + 'gitolite@server:team/repo', + 'git@github.com:audreyr/cookiecutter.git', + 'https://github.com/audreyr/cookiecutter.git', + 'https://bitbucket.org/pokoli/cookiecutter.hg', + ]) + def remote_repo_url(request): + return request.param + + + def test_is_repo_url_for_remote_urls(remote_repo_url): """Verify is_repo_url works.""" + assert is_repo_url(remote_repo_url) is True - assert is_repo_url('gitolite@server:team/repo') is True - assert is_repo_url('git@github.com:audreyr/cookiecutter.git') is True - assert is_repo_url('https://github.com/audreyr/cookiecutter.git') is True - assert is_repo_url('https://bitbucket.org/pokoli/cookiecutter.hg') is True - assert is_repo_url('/audreyr/cookiecutter.git') is False - assert is_repo_url('/home/audreyr/cookiecutter') is False - appveyor_temp_dir = ( + @pytest.fixture(params=[ + '/audreyr/cookiecutter.git', + '/home/audreyr/cookiecutter', + ( 'c:\\users\\appveyor\\appdata\\local\\temp\\1\\pytest-0\\' 'test_default_output_dir0\\template' - ) + ), + ]) + def local_repo_url(request): + return request.param + + + def test_is_repo_url_for_local_urls(local_repo_url): + """Verify is_repo_url works.""" - assert is_repo_url(appveyor_temp_dir) is False + assert is_repo_url(local_repo_url) is False def test_expand_abbreviations(): template = 'gh:audreyr/cookiecutter-pypackage' # This is not a valid repo url just yet! # First `main.expand_abbreviations` needs to translate it assert is_repo_url(template) is False expanded_template = expand_abbreviations(template, {}) assert is_repo_url(expanded_template) is True
Refactor tests for is_repo_url to be parametrized
## Code Before: from cookiecutter.main import is_repo_url, expand_abbreviations def test_is_repo_url(): """Verify is_repo_url works.""" assert is_repo_url('gitolite@server:team/repo') is True assert is_repo_url('git@github.com:audreyr/cookiecutter.git') is True assert is_repo_url('https://github.com/audreyr/cookiecutter.git') is True assert is_repo_url('https://bitbucket.org/pokoli/cookiecutter.hg') is True assert is_repo_url('/audreyr/cookiecutter.git') is False assert is_repo_url('/home/audreyr/cookiecutter') is False appveyor_temp_dir = ( 'c:\\users\\appveyor\\appdata\\local\\temp\\1\\pytest-0\\' 'test_default_output_dir0\\template' ) assert is_repo_url(appveyor_temp_dir) is False def test_expand_abbreviations(): template = 'gh:audreyr/cookiecutter-pypackage' # This is not a valid repo url just yet! # First `main.expand_abbreviations` needs to translate it assert is_repo_url(template) is False expanded_template = expand_abbreviations(template, {}) assert is_repo_url(expanded_template) is True ## Instruction: Refactor tests for is_repo_url to be parametrized ## Code After: import pytest from cookiecutter.main import is_repo_url, expand_abbreviations @pytest.fixture(params=[ 'gitolite@server:team/repo', 'git@github.com:audreyr/cookiecutter.git', 'https://github.com/audreyr/cookiecutter.git', 'https://bitbucket.org/pokoli/cookiecutter.hg', ]) def remote_repo_url(request): return request.param def test_is_repo_url_for_remote_urls(remote_repo_url): """Verify is_repo_url works.""" assert is_repo_url(remote_repo_url) is True @pytest.fixture(params=[ '/audreyr/cookiecutter.git', '/home/audreyr/cookiecutter', ( 'c:\\users\\appveyor\\appdata\\local\\temp\\1\\pytest-0\\' 'test_default_output_dir0\\template' ), ]) def local_repo_url(request): return request.param def test_is_repo_url_for_local_urls(local_repo_url): """Verify is_repo_url works.""" assert is_repo_url(local_repo_url) is False def test_expand_abbreviations(): template = 'gh:audreyr/cookiecutter-pypackage' # This is not a valid repo url just yet! # First `main.expand_abbreviations` needs to translate it assert is_repo_url(template) is False expanded_template = expand_abbreviations(template, {}) assert is_repo_url(expanded_template) is True
4b6bb7b7d258a9f130b7d10f390f44dec855cc19
admin/src/gui/NewScoville.py
admin/src/gui/NewScoville.py
import pygtk pygtk.require("2.0") import gtk builder = gtk.Builder() builder.add_from_file(os.path[0]+"/src/gui/NewScoville.ui") class NewScovilleWindow(object): def __init__(self): pass
import pygtk pygtk.require("2.0") import gtk builder = gtk.Builder() builder.add_from_file(os.path[0]+"/src/gui/NewScoville.ui") class NewScovilleWindow(object): pass
Revert "added constructor (testcommit for new git interface)"
Revert "added constructor (testcommit for new git interface)" This reverts commit d5c0252b75e97103d61c3203e2a8d04a061c8a2f.
Python
agpl-3.0
skarphed/skarphed,skarphed/skarphed
import pygtk pygtk.require("2.0") import gtk builder = gtk.Builder() builder.add_from_file(os.path[0]+"/src/gui/NewScoville.ui") class NewScovilleWindow(object): - def __init__(self): - pass + pass
Revert "added constructor (testcommit for new git interface)"
## Code Before: import pygtk pygtk.require("2.0") import gtk builder = gtk.Builder() builder.add_from_file(os.path[0]+"/src/gui/NewScoville.ui") class NewScovilleWindow(object): def __init__(self): pass ## Instruction: Revert "added constructor (testcommit for new git interface)" ## Code After: import pygtk pygtk.require("2.0") import gtk builder = gtk.Builder() builder.add_from_file(os.path[0]+"/src/gui/NewScoville.ui") class NewScovilleWindow(object): pass
589598a9fc3871fe534e4dde60b61c9a0a56e224
legistar/ext/pupa/orgs.py
legistar/ext/pupa/orgs.py
import pupa.scrape from legistar.utils.itemgenerator import make_item from legistar.ext.pupa.base import Adapter, Converter class OrgsAdapter(Adapter): '''Converts legistar data into a pupa.scrape.Person instance. Note the make_item methods are popping values out the dict, because the associated keys aren't valid pupa.scrape.Person fields. ''' pupa_model = pupa.scrape.Organization aliases = [] extras_keys = ['meeting_location', 'num_members', 'num_vacancies'] @make_item('classification') def get_classn(self): legistar_type = self.data.pop('type') return self.config.get_org_classification(legistar_type) class OrgsConverter(Converter): '''Invokes the person and membership adapters to output pupa Person objects. ''' adapter = OrgsAdapter def gen_agenda_items(self): yield from self.make_child(AgendaItemConverter, self.agenda) def __iter__(self): self.agenda = self.data.pop('agenda', []) yield self.get_adapter().get_instance() yield from self.gen_agenda_items()
import pupa.scrape from legistar.utils.itemgenerator import make_item from legistar.ext.pupa.base import Adapter, Converter class OrgsAdapter(Adapter): '''Converts legistar data into a pupa.scrape.Person instance. Note the make_item methods are popping values out the dict, because the associated keys aren't valid pupa.scrape.Person fields. ''' pupa_model = pupa.scrape.Organization aliases = [] extras_keys = [ 'meeting_location', 'num_members', 'num_vacancies', 'type'] @make_item('classification') def get_classn(self): legistar_type = self.data.pop('type') return self.config.get_org_classification(legistar_type) def get_instance(self, **extra_instance_data): instance_data = self.get_instance_data() instance_data.update(extra_instance_data) extras = instance_data.pop('extras') sources = instance_data.pop('sources') identifiers = instance_data.pop('identifiers') instance = self.pupa_model(**instance_data) instance.extras.update(extras) for source in sources: instance.add_source(**source) for identifier in identifiers: instance.add_identifier(**identifier) return instance class OrgsConverter(Converter): '''Invokes the person and membership adapters to output pupa Person objects. ''' adapter = OrgsAdapter
Remove cruft copied from Memberships adapter
Remove cruft copied from Memberships adapter
Python
bsd-3-clause
opencivicdata/python-legistar-scraper,datamade/python-legistar-scraper
import pupa.scrape from legistar.utils.itemgenerator import make_item from legistar.ext.pupa.base import Adapter, Converter class OrgsAdapter(Adapter): '''Converts legistar data into a pupa.scrape.Person instance. Note the make_item methods are popping values out the dict, because the associated keys aren't valid pupa.scrape.Person fields. ''' pupa_model = pupa.scrape.Organization aliases = [] + extras_keys = [ - extras_keys = ['meeting_location', 'num_members', 'num_vacancies'] + 'meeting_location', 'num_members', 'num_vacancies', 'type'] @make_item('classification') def get_classn(self): legistar_type = self.data.pop('type') return self.config.get_org_classification(legistar_type) + + def get_instance(self, **extra_instance_data): + + instance_data = self.get_instance_data() + instance_data.update(extra_instance_data) + + extras = instance_data.pop('extras') + sources = instance_data.pop('sources') + identifiers = instance_data.pop('identifiers') + + instance = self.pupa_model(**instance_data) + instance.extras.update(extras) + for source in sources: + instance.add_source(**source) + for identifier in identifiers: + instance.add_identifier(**identifier) + + return instance class OrgsConverter(Converter): '''Invokes the person and membership adapters to output pupa Person objects. ''' adapter = OrgsAdapter - def gen_agenda_items(self): - yield from self.make_child(AgendaItemConverter, self.agenda) - - def __iter__(self): - self.agenda = self.data.pop('agenda', []) - yield self.get_adapter().get_instance() - yield from self.gen_agenda_items() -
Remove cruft copied from Memberships adapter
## Code Before: import pupa.scrape from legistar.utils.itemgenerator import make_item from legistar.ext.pupa.base import Adapter, Converter class OrgsAdapter(Adapter): '''Converts legistar data into a pupa.scrape.Person instance. Note the make_item methods are popping values out the dict, because the associated keys aren't valid pupa.scrape.Person fields. ''' pupa_model = pupa.scrape.Organization aliases = [] extras_keys = ['meeting_location', 'num_members', 'num_vacancies'] @make_item('classification') def get_classn(self): legistar_type = self.data.pop('type') return self.config.get_org_classification(legistar_type) class OrgsConverter(Converter): '''Invokes the person and membership adapters to output pupa Person objects. ''' adapter = OrgsAdapter def gen_agenda_items(self): yield from self.make_child(AgendaItemConverter, self.agenda) def __iter__(self): self.agenda = self.data.pop('agenda', []) yield self.get_adapter().get_instance() yield from self.gen_agenda_items() ## Instruction: Remove cruft copied from Memberships adapter ## Code After: import pupa.scrape from legistar.utils.itemgenerator import make_item from legistar.ext.pupa.base import Adapter, Converter class OrgsAdapter(Adapter): '''Converts legistar data into a pupa.scrape.Person instance. Note the make_item methods are popping values out the dict, because the associated keys aren't valid pupa.scrape.Person fields. ''' pupa_model = pupa.scrape.Organization aliases = [] extras_keys = [ 'meeting_location', 'num_members', 'num_vacancies', 'type'] @make_item('classification') def get_classn(self): legistar_type = self.data.pop('type') return self.config.get_org_classification(legistar_type) def get_instance(self, **extra_instance_data): instance_data = self.get_instance_data() instance_data.update(extra_instance_data) extras = instance_data.pop('extras') sources = instance_data.pop('sources') identifiers = instance_data.pop('identifiers') instance = self.pupa_model(**instance_data) instance.extras.update(extras) for source in sources: instance.add_source(**source) for identifier in identifiers: instance.add_identifier(**identifier) return instance class OrgsConverter(Converter): '''Invokes the person and membership adapters to output pupa Person objects. ''' adapter = OrgsAdapter
42d06a15dd30e770dfccfccbd20fbf9bba52494d
platforms/m3/programming/goc_gdp_test.py
platforms/m3/programming/goc_gdp_test.py
import code try: import Image except ImportError: from PIL import Image import gdp gdp.gdp_init() gcl_name = gdp.GDP_NAME("edu.umich.eecs.m3.test01") gcl_handle = gdp.GDP_GCL(gcl_name, gdp.GDP_MODE_RA) #j = Image.open('/tmp/capture1060.jpeg') #d = {"data": j.tostring()} #gcl_handle.append(d) record = gcl_handle.read(3) raw = record['data'] image = Image.fromstring('RGB', (640,640), raw) image.show() #code.interact(local=locals())
import code try: import Image except ImportError: from PIL import Image import gdp gdp.gdp_init() gcl_name = gdp.GDP_NAME("edu.umich.eecs.m3.test01") gcl_handle = gdp.GDP_GCL(gcl_name, gdp.GDP_MODE_RA) #j = Image.open('/tmp/capture1060.jpeg') #d = {"data": j.tostring()} #gcl_handle.append(d) while True: try: idx = int(raw_input("Image index: ")) record = gcl_handle.read(idx) raw = record['data'] image = Image.fromstring('RGB', (640,640), raw) image.show() except: print("That image not availabe.") #code.interact(local=locals())
Update gdp test to open random images
Update gdp test to open random images
Python
apache-2.0
lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator,lab11/M-ulator
import code try: import Image except ImportError: from PIL import Image import gdp gdp.gdp_init() gcl_name = gdp.GDP_NAME("edu.umich.eecs.m3.test01") gcl_handle = gdp.GDP_GCL(gcl_name, gdp.GDP_MODE_RA) #j = Image.open('/tmp/capture1060.jpeg') #d = {"data": j.tostring()} #gcl_handle.append(d) - record = gcl_handle.read(3) - raw = record['data'] + while True: + try: + idx = int(raw_input("Image index: ")) + record = gcl_handle.read(idx) + raw = record['data'] + - image = Image.fromstring('RGB', (640,640), raw) + image = Image.fromstring('RGB', (640,640), raw) - image.show() + image.show() + except: + print("That image not availabe.") #code.interact(local=locals())
Update gdp test to open random images
## Code Before: import code try: import Image except ImportError: from PIL import Image import gdp gdp.gdp_init() gcl_name = gdp.GDP_NAME("edu.umich.eecs.m3.test01") gcl_handle = gdp.GDP_GCL(gcl_name, gdp.GDP_MODE_RA) #j = Image.open('/tmp/capture1060.jpeg') #d = {"data": j.tostring()} #gcl_handle.append(d) record = gcl_handle.read(3) raw = record['data'] image = Image.fromstring('RGB', (640,640), raw) image.show() #code.interact(local=locals()) ## Instruction: Update gdp test to open random images ## Code After: import code try: import Image except ImportError: from PIL import Image import gdp gdp.gdp_init() gcl_name = gdp.GDP_NAME("edu.umich.eecs.m3.test01") gcl_handle = gdp.GDP_GCL(gcl_name, gdp.GDP_MODE_RA) #j = Image.open('/tmp/capture1060.jpeg') #d = {"data": j.tostring()} #gcl_handle.append(d) while True: try: idx = int(raw_input("Image index: ")) record = gcl_handle.read(idx) raw = record['data'] image = Image.fromstring('RGB', (640,640), raw) image.show() except: print("That image not availabe.") #code.interact(local=locals())
e29039cf5b1cd0b40b8227ef73c2d5327450c162
app/core/servicemanager.py
app/core/servicemanager.py
import threading import logging logger = logging.getLogger(__name__) class ServiceManager(threading.Thread): """ Sequentially starts services using service.service_start(). When a new service is activated, view_manager is updated with its view. """ def __init__(self, services, socket_manager): self.services = services self.cur_service = None self.socket_manager = socket_manager super().__init__() def run(self): """ Sequentially starts all the services.""" logger.info("Starting services...") for service_cls in self.services: self.cur_service = service_cls(self.socket_manager) self.cur_service.service_start()
import importlib import logging from collections import namedtuple from app.core.toposort import toposort logger = logging.getLogger(__name__) Module = namedtuple('Module', ["name", "deps", "meta"]) def list_modules(module): res = [] # for name in os.listdir(module.__path__): for name in ["messaging"]: module = importlib.import_module("app.services." + name) module_meta = module.__meta__ deps = module_meta["deps"] res.append(Module(name=name, deps=deps, meta=module_meta)) return res def topo_sort_modules(modules): module_map = {x.name: x for x in modules} dep_map = {x.name: x.deps for x in modules} res = [] for item in toposort(dep_map): res.append(module_map[item]) return res class ServiceManager(threading.Thread): """ Sequentially starts services using service.service_start(). When a new service is activated, view_manager is updated with its view. """ def __init__(self, services, socket_manager): self.services = services self.cur_service = None self.socket_manager = socket_manager super().__init__() def run(self): """ Sequentially starts all the services.""" logger.info("Starting services...") for service_cls in self.services: self.cur_service = service_cls(self.socket_manager) self.cur_service.service_start()
Use toposort for sequencing services.
Use toposort for sequencing services.
Python
mit
supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer
- import threading + import importlib import logging + from collections import namedtuple + + from app.core.toposort import toposort + + logger = logging.getLogger(__name__) + Module = namedtuple('Module', ["name", "deps", "meta"]) - logger = logging.getLogger(__name__) + def list_modules(module): + res = [] + # for name in os.listdir(module.__path__): + for name in ["messaging"]: + module = importlib.import_module("app.services." + name) + module_meta = module.__meta__ + deps = module_meta["deps"] + res.append(Module(name=name, deps=deps, meta=module_meta)) + return res + + + def topo_sort_modules(modules): + module_map = {x.name: x for x in modules} + dep_map = {x.name: x.deps for x in modules} + res = [] + for item in toposort(dep_map): + res.append(module_map[item]) + return res class ServiceManager(threading.Thread): """ Sequentially starts services using service.service_start(). When a new service is activated, view_manager is updated with its view. """ def __init__(self, services, socket_manager): self.services = services self.cur_service = None self.socket_manager = socket_manager super().__init__() def run(self): """ Sequentially starts all the services.""" logger.info("Starting services...") for service_cls in self.services: self.cur_service = service_cls(self.socket_manager) self.cur_service.service_start()
Use toposort for sequencing services.
## Code Before: import threading import logging logger = logging.getLogger(__name__) class ServiceManager(threading.Thread): """ Sequentially starts services using service.service_start(). When a new service is activated, view_manager is updated with its view. """ def __init__(self, services, socket_manager): self.services = services self.cur_service = None self.socket_manager = socket_manager super().__init__() def run(self): """ Sequentially starts all the services.""" logger.info("Starting services...") for service_cls in self.services: self.cur_service = service_cls(self.socket_manager) self.cur_service.service_start() ## Instruction: Use toposort for sequencing services. ## Code After: import importlib import logging from collections import namedtuple from app.core.toposort import toposort logger = logging.getLogger(__name__) Module = namedtuple('Module', ["name", "deps", "meta"]) def list_modules(module): res = [] # for name in os.listdir(module.__path__): for name in ["messaging"]: module = importlib.import_module("app.services." + name) module_meta = module.__meta__ deps = module_meta["deps"] res.append(Module(name=name, deps=deps, meta=module_meta)) return res def topo_sort_modules(modules): module_map = {x.name: x for x in modules} dep_map = {x.name: x.deps for x in modules} res = [] for item in toposort(dep_map): res.append(module_map[item]) return res class ServiceManager(threading.Thread): """ Sequentially starts services using service.service_start(). When a new service is activated, view_manager is updated with its view. """ def __init__(self, services, socket_manager): self.services = services self.cur_service = None self.socket_manager = socket_manager super().__init__() def run(self): """ Sequentially starts all the services.""" logger.info("Starting services...") for service_cls in self.services: self.cur_service = service_cls(self.socket_manager) self.cur_service.service_start()
fc1505865f919764aa066e5e43dcde1bc7e760b2
frappe/patches/v13_0/rename_desk_page_to_workspace.py
frappe/patches/v13_0/rename_desk_page_to_workspace.py
import frappe from frappe.model.rename_doc import rename_doc def execute(): if frappe.db.exists("DocType", "Desk Page"): if frappe.db.exists('DocType', 'Workspace'): # this patch was not added initially, so this page might still exist frappe.delete_doc('DocType', 'Desk Page') else: rename_doc('DocType', 'Desk Page', 'Workspace') rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True) rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True) frappe.reload_doc('desk', 'doctype', 'workspace')
import frappe from frappe.model.rename_doc import rename_doc def execute(): if frappe.db.exists("DocType", "Desk Page"): if frappe.db.exists('DocType', 'Workspace'): # this patch was not added initially, so this page might still exist frappe.delete_doc('DocType', 'Desk Page') else: rename_doc('DocType', 'Desk Page', 'Workspace') rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True) rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True) if frappe.db.exist('DocType', 'Desk Link'): rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) frappe.reload_doc('desk', 'doctype', 'workspace')
Rename Desk Link only if it exists
fix(Patch): Rename Desk Link only if it exists
Python
mit
frappe/frappe,StrellaGroup/frappe,StrellaGroup/frappe,saurabh6790/frappe,StrellaGroup/frappe,mhbu50/frappe,almeidapaulopt/frappe,yashodhank/frappe,saurabh6790/frappe,mhbu50/frappe,frappe/frappe,yashodhank/frappe,almeidapaulopt/frappe,mhbu50/frappe,saurabh6790/frappe,almeidapaulopt/frappe,almeidapaulopt/frappe,yashodhank/frappe,saurabh6790/frappe,mhbu50/frappe,frappe/frappe,yashodhank/frappe
import frappe from frappe.model.rename_doc import rename_doc def execute(): if frappe.db.exists("DocType", "Desk Page"): if frappe.db.exists('DocType', 'Workspace'): # this patch was not added initially, so this page might still exist frappe.delete_doc('DocType', 'Desk Page') else: rename_doc('DocType', 'Desk Page', 'Workspace') rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True) - rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True) + if frappe.db.exist('DocType', 'Desk Link'): + rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) frappe.reload_doc('desk', 'doctype', 'workspace')
Rename Desk Link only if it exists
## Code Before: import frappe from frappe.model.rename_doc import rename_doc def execute(): if frappe.db.exists("DocType", "Desk Page"): if frappe.db.exists('DocType', 'Workspace'): # this patch was not added initially, so this page might still exist frappe.delete_doc('DocType', 'Desk Page') else: rename_doc('DocType', 'Desk Page', 'Workspace') rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True) rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True) frappe.reload_doc('desk', 'doctype', 'workspace') ## Instruction: Rename Desk Link only if it exists ## Code After: import frappe from frappe.model.rename_doc import rename_doc def execute(): if frappe.db.exists("DocType", "Desk Page"): if frappe.db.exists('DocType', 'Workspace'): # this patch was not added initially, so this page might still exist frappe.delete_doc('DocType', 'Desk Page') else: rename_doc('DocType', 'Desk Page', 'Workspace') rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True) rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True) if frappe.db.exist('DocType', 'Desk Link'): rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True) frappe.reload_doc('desk', 'doctype', 'workspace')
828fec30b5b1deddcca79eae8fd1029bd5bd7b54
py/desispec/io/__init__.py
py/desispec/io/__init__.py
# help with 2to3 support from __future__ import absolute_import, division from .meta import findfile, get_exposures, get_files, rawdata_root, specprod_root from .frame import read_frame, write_frame from .sky import read_sky, write_sky from .fiberflat import read_fiberflat, write_fiberflat from .fibermap import read_fibermap, write_fibermap, empty_fibermap from .brick import Brick from .qa import read_qa_frame, write_qa_frame from .zfind import read_zbest, write_zbest from .image import read_image, write_image from .util import (header2wave, fitsheader, native_endian, makepath, write_bintable, iterfiles) from .fluxcalibration import ( read_stdstar_templates, write_stdstar_model, read_flux_calibration, write_flux_calibration) from .filters import read_filter_response from .download import download, filepath2url from .crc import memcrc, cksum from .database import (load_brick, is_night, load_night, is_flavor, load_flavor, get_bricks_by_name, get_brickid_by_name, load_data)
# help with 2to3 support from __future__ import absolute_import, division from .meta import (findfile, get_exposures, get_files, rawdata_root, specprod_root, validate_night) from .frame import read_frame, write_frame from .sky import read_sky, write_sky from .fiberflat import read_fiberflat, write_fiberflat from .fibermap import read_fibermap, write_fibermap, empty_fibermap from .brick import Brick from .qa import read_qa_frame, write_qa_frame from .zfind import read_zbest, write_zbest from .image import read_image, write_image from .util import (header2wave, fitsheader, native_endian, makepath, write_bintable, iterfiles) from .fluxcalibration import ( read_stdstar_templates, write_stdstar_model, read_flux_calibration, write_flux_calibration) from .filters import read_filter_response from .download import download, filepath2url from .crc import memcrc, cksum from .database import (load_brick, is_night, load_night, is_flavor, load_flavor, get_bricks_by_name, get_brickid_by_name, load_data)
Add validate_night to public API
Add validate_night to public API
Python
bsd-3-clause
desihub/desispec,gdhungana/desispec,timahutchinson/desispec,gdhungana/desispec,desihub/desispec,timahutchinson/desispec
# help with 2to3 support from __future__ import absolute_import, division - from .meta import findfile, get_exposures, get_files, rawdata_root, specprod_root + from .meta import (findfile, get_exposures, get_files, rawdata_root, + specprod_root, validate_night) from .frame import read_frame, write_frame from .sky import read_sky, write_sky from .fiberflat import read_fiberflat, write_fiberflat from .fibermap import read_fibermap, write_fibermap, empty_fibermap from .brick import Brick from .qa import read_qa_frame, write_qa_frame from .zfind import read_zbest, write_zbest from .image import read_image, write_image from .util import (header2wave, fitsheader, native_endian, makepath, write_bintable, iterfiles) from .fluxcalibration import ( read_stdstar_templates, write_stdstar_model, read_flux_calibration, write_flux_calibration) from .filters import read_filter_response from .download import download, filepath2url from .crc import memcrc, cksum from .database import (load_brick, is_night, load_night, is_flavor, load_flavor, get_bricks_by_name, get_brickid_by_name, load_data)
Add validate_night to public API
## Code Before: # help with 2to3 support from __future__ import absolute_import, division from .meta import findfile, get_exposures, get_files, rawdata_root, specprod_root from .frame import read_frame, write_frame from .sky import read_sky, write_sky from .fiberflat import read_fiberflat, write_fiberflat from .fibermap import read_fibermap, write_fibermap, empty_fibermap from .brick import Brick from .qa import read_qa_frame, write_qa_frame from .zfind import read_zbest, write_zbest from .image import read_image, write_image from .util import (header2wave, fitsheader, native_endian, makepath, write_bintable, iterfiles) from .fluxcalibration import ( read_stdstar_templates, write_stdstar_model, read_flux_calibration, write_flux_calibration) from .filters import read_filter_response from .download import download, filepath2url from .crc import memcrc, cksum from .database import (load_brick, is_night, load_night, is_flavor, load_flavor, get_bricks_by_name, get_brickid_by_name, load_data) ## Instruction: Add validate_night to public API ## Code After: # help with 2to3 support from __future__ import absolute_import, division from .meta import (findfile, get_exposures, get_files, rawdata_root, specprod_root, validate_night) from .frame import read_frame, write_frame from .sky import read_sky, write_sky from .fiberflat import read_fiberflat, write_fiberflat from .fibermap import read_fibermap, write_fibermap, empty_fibermap from .brick import Brick from .qa import read_qa_frame, write_qa_frame from .zfind import read_zbest, write_zbest from .image import read_image, write_image from .util import (header2wave, fitsheader, native_endian, makepath, write_bintable, iterfiles) from .fluxcalibration import ( read_stdstar_templates, write_stdstar_model, read_flux_calibration, write_flux_calibration) from .filters import read_filter_response from .download import download, filepath2url from .crc import memcrc, cksum from .database import (load_brick, is_night, load_night, is_flavor, load_flavor, get_bricks_by_name, get_brickid_by_name, load_data)
6d2f9bfbe1011c04e014016171d98fef1d12e840
tests/test_samtools_python.py
tests/test_samtools_python.py
import pysam def test_idxstats_parse(): bam_filename = "./pysam_data/ex2.bam" lines = pysam.idxstats(bam_filename) for line in lines: _seqname, _seqlen, nmapped, _nunmapped = line.split() def test_bedcov(): bam_filename = "./pysam_data/ex1.bam" bed_filename = "./pysam_data/ex1.bed" lines = pysam.bedcov(bed_filename, bam_filename) for line in lines: fields = line.split('\t') assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields))
import pysam def test_idxstats_parse_old_style_output(): bam_filename = "./pysam_data/ex2.bam" lines = pysam.idxstats(bam_filename, old_style_output=True) for line in lines: _seqname, _seqlen, nmapped, _nunmapped = line.split() def test_bedcov_old_style_output(): bam_filename = "./pysam_data/ex1.bam" bed_filename = "./pysam_data/ex1.bed" lines = pysam.bedcov(bed_filename, bam_filename, old_style_output=True) for line in lines: fields = line.split('\t') assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields)) def test_idxstats_parse(): bam_filename = "./pysam_data/ex2.bam" idxstats_string = pysam.idxstats(bam_filename, old_style_output=True) lines = idxstats_string.splitlines() for line in lines: splt = line.split("\t") _seqname, _seqlen, nmapped, _nunmapped = splt def test_bedcov(): bam_filename = "./pysam_data/ex1.bam" bed_filename = "./pysam_data/ex1.bed" bedcov_string = pysam.bedcov(bed_filename, bam_filename, old_style_output=True) lines = bedcov_string.splitlines() for line in lines: fields = line.split('\t') assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields))
Add test for both new and old style output
Add test for both new and old style output
Python
mit
pysam-developers/pysam,kyleabeauchamp/pysam,pysam-developers/pysam,TyberiusPrime/pysam,bioinformed/pysam,TyberiusPrime/pysam,bioinformed/pysam,TyberiusPrime/pysam,pysam-developers/pysam,bioinformed/pysam,kyleabeauchamp/pysam,TyberiusPrime/pysam,kyleabeauchamp/pysam,kyleabeauchamp/pysam,kyleabeauchamp/pysam,pysam-developers/pysam,bioinformed/pysam,TyberiusPrime/pysam,bioinformed/pysam
import pysam + + def test_idxstats_parse_old_style_output(): + bam_filename = "./pysam_data/ex2.bam" + lines = pysam.idxstats(bam_filename, old_style_output=True) + for line in lines: + _seqname, _seqlen, nmapped, _nunmapped = line.split() + + + def test_bedcov_old_style_output(): + bam_filename = "./pysam_data/ex1.bam" + bed_filename = "./pysam_data/ex1.bed" + lines = pysam.bedcov(bed_filename, bam_filename, old_style_output=True) + for line in lines: + fields = line.split('\t') + assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields)) def test_idxstats_parse(): bam_filename = "./pysam_data/ex2.bam" - lines = pysam.idxstats(bam_filename) + idxstats_string = pysam.idxstats(bam_filename, old_style_output=True) + lines = idxstats_string.splitlines() for line in lines: + splt = line.split("\t") - _seqname, _seqlen, nmapped, _nunmapped = line.split() + _seqname, _seqlen, nmapped, _nunmapped = splt + def test_bedcov(): bam_filename = "./pysam_data/ex1.bam" bed_filename = "./pysam_data/ex1.bed" - lines = pysam.bedcov(bed_filename, bam_filename) + bedcov_string = pysam.bedcov(bed_filename, bam_filename, old_style_output=True) + lines = bedcov_string.splitlines() for line in lines: fields = line.split('\t') assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields))
Add test for both new and old style output
## Code Before: import pysam def test_idxstats_parse(): bam_filename = "./pysam_data/ex2.bam" lines = pysam.idxstats(bam_filename) for line in lines: _seqname, _seqlen, nmapped, _nunmapped = line.split() def test_bedcov(): bam_filename = "./pysam_data/ex1.bam" bed_filename = "./pysam_data/ex1.bed" lines = pysam.bedcov(bed_filename, bam_filename) for line in lines: fields = line.split('\t') assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields)) ## Instruction: Add test for both new and old style output ## Code After: import pysam def test_idxstats_parse_old_style_output(): bam_filename = "./pysam_data/ex2.bam" lines = pysam.idxstats(bam_filename, old_style_output=True) for line in lines: _seqname, _seqlen, nmapped, _nunmapped = line.split() def test_bedcov_old_style_output(): bam_filename = "./pysam_data/ex1.bam" bed_filename = "./pysam_data/ex1.bed" lines = pysam.bedcov(bed_filename, bam_filename, old_style_output=True) for line in lines: fields = line.split('\t') assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields)) def test_idxstats_parse(): bam_filename = "./pysam_data/ex2.bam" idxstats_string = pysam.idxstats(bam_filename, old_style_output=True) lines = idxstats_string.splitlines() for line in lines: splt = line.split("\t") _seqname, _seqlen, nmapped, _nunmapped = splt def test_bedcov(): bam_filename = "./pysam_data/ex1.bam" bed_filename = "./pysam_data/ex1.bed" bedcov_string = pysam.bedcov(bed_filename, bam_filename, old_style_output=True) lines = bedcov_string.splitlines() for line in lines: fields = line.split('\t') assert len(fields) in [4, 5], "bedcov should give tab delimited output with 4 or 5 fields. Split line (%s) gives %d fields." % (fields, len(fields))
1ff19fcd0bcbb396b7cb676c5dddf8d3c8652419
live/components/misc.py
live/components/misc.py
from live.helpers import Timer def timed(fun, time, next_fun=None): """A component that runs another component for a fixed length of time. Can optionally be given a follow-up component for chaining. :param callable fun: The component to be run: :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the timed component is finished """ timer = Timer(time) def timed_callback(self, id): nonlocal timer if timer > 0.0: fun(self, id) else: if next_fun: self.logic_components.set(next_fun, id=id) else: self.logic_components.remove(id) return timed_callback def suspend(time, next_fun): """A component that suspends a component currently in the component list for a fixed length of time. Can optionally be given a different component to be run after the suspension is lifted. :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the suspension is lifted """ def suspend_callback(self, id): pass return timed(suspend_callback, time, next_fun )
from live.helpers import Timer def timed(fun, time, next_fun=None): """A component that runs another component for a fixed length of time. Can optionally be given a follow-up component for chaining. :param callable fun: The component to be run: :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the timed component is finished """ timer = Timer(time) def timed_callback(self, id, *args): nonlocal timer if timer > 0.0: fun(self, id) else: if len(args) == 0: correct_queue = self.logic_components else: correct_queue = self.collision_components if next_fun: correct_queue.set(next_fun, id=id) else: correct_queue.remove(id) return timed_callback def suspend(time, next_fun): """A component that suspends a component currently in the component list for a fixed length of time. Can optionally be given a different component to be run after the suspension is lifted. :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the suspension is lifted """ def suspend_callback(self, id): pass return timed(suspend_callback, time, next_fun )
Update timed_callback to support collision callbacks.
Update timed_callback to support collision callbacks.
Python
lgpl-2.1
GalanCM/BGELive
from live.helpers import Timer def timed(fun, time, next_fun=None): """A component that runs another component for a fixed length of time. Can optionally be given a follow-up component for chaining. :param callable fun: The component to be run: :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the timed component is finished """ timer = Timer(time) - def timed_callback(self, id): + def timed_callback(self, id, *args): nonlocal timer if timer > 0.0: fun(self, id) else: + if len(args) == 0: + correct_queue = self.logic_components + else: + correct_queue = self.collision_components + if next_fun: - self.logic_components.set(next_fun, id=id) + correct_queue.set(next_fun, id=id) else: - self.logic_components.remove(id) + correct_queue.remove(id) return timed_callback def suspend(time, next_fun): """A component that suspends a component currently in the component list for a fixed length of time. Can optionally be given a different component to be run after the suspension is lifted. :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the suspension is lifted """ def suspend_callback(self, id): pass return timed(suspend_callback, time, next_fun )
Update timed_callback to support collision callbacks.
## Code Before: from live.helpers import Timer def timed(fun, time, next_fun=None): """A component that runs another component for a fixed length of time. Can optionally be given a follow-up component for chaining. :param callable fun: The component to be run: :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the timed component is finished """ timer = Timer(time) def timed_callback(self, id): nonlocal timer if timer > 0.0: fun(self, id) else: if next_fun: self.logic_components.set(next_fun, id=id) else: self.logic_components.remove(id) return timed_callback def suspend(time, next_fun): """A component that suspends a component currently in the component list for a fixed length of time. Can optionally be given a different component to be run after the suspension is lifted. :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the suspension is lifted """ def suspend_callback(self, id): pass return timed(suspend_callback, time, next_fun ) ## Instruction: Update timed_callback to support collision callbacks. ## Code After: from live.helpers import Timer def timed(fun, time, next_fun=None): """A component that runs another component for a fixed length of time. Can optionally be given a follow-up component for chaining. :param callable fun: The component to be run: :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the timed component is finished """ timer = Timer(time) def timed_callback(self, id, *args): nonlocal timer if timer > 0.0: fun(self, id) else: if len(args) == 0: correct_queue = self.logic_components else: correct_queue = self.collision_components if next_fun: correct_queue.set(next_fun, id=id) else: correct_queue.remove(id) return timed_callback def suspend(time, next_fun): """A component that suspends a component currently in the component list for a fixed length of time. Can optionally be given a different component to be run after the suspension is lifted. :param number time: The amount of time to run the component :keyword callable next_fun: A component to run after the suspension is lifted """ def suspend_callback(self, id): pass return timed(suspend_callback, time, next_fun )
28ac2b259d89c168f1e822fe087c66f2f618321a
setup.py
setup.py
from distutils.core import setup scripts = ['sed_fit', 'sed_plot', 'sed_filter_output', 'sed_fitinfo2data', 'sed_fitinfo2ascii'] setup(name='sedfitter', version='0.1.1', description='SED Fitter in python', author='Thomas Robitaille', author_email='trobitaille@cfa.harvard.edu', packages=['sedfitter', 'sedfitter.convolve', 'sedfitter.filter', 'sedfitter.sed', 'sedfitter.utils'], scripts=['scripts/' + x for x in scripts] )
from distutils.core import setup scripts = ['sed_fit', 'sed_plot', 'sed_filter_output', 'sed_fitinfo2data', 'sed_fitinfo2ascii'] setup(name='sedfitter', version='0.1.1', description='SED Fitter in python', author='Thomas Robitaille', author_email='trobitaille@cfa.harvard.edu', packages=['sedfitter', 'sedfitter.convolve', 'sedfitter.filter', 'sedfitter.sed', 'sedfitter.source', 'sedfitter.utils'], scripts=['scripts/' + x for x in scripts] )
Add sedfitter.source to list of sub-packages to install (otherwise results in ImportError)
Add sedfitter.source to list of sub-packages to install (otherwise results in ImportError)
Python
bsd-2-clause
astrofrog/sedfitter
from distutils.core import setup scripts = ['sed_fit', 'sed_plot', 'sed_filter_output', 'sed_fitinfo2data', 'sed_fitinfo2ascii'] setup(name='sedfitter', version='0.1.1', description='SED Fitter in python', author='Thomas Robitaille', author_email='trobitaille@cfa.harvard.edu', - packages=['sedfitter', 'sedfitter.convolve', 'sedfitter.filter', 'sedfitter.sed', 'sedfitter.utils'], + packages=['sedfitter', 'sedfitter.convolve', 'sedfitter.filter', 'sedfitter.sed', 'sedfitter.source', 'sedfitter.utils'], scripts=['scripts/' + x for x in scripts] )
Add sedfitter.source to list of sub-packages to install (otherwise results in ImportError)
## Code Before: from distutils.core import setup scripts = ['sed_fit', 'sed_plot', 'sed_filter_output', 'sed_fitinfo2data', 'sed_fitinfo2ascii'] setup(name='sedfitter', version='0.1.1', description='SED Fitter in python', author='Thomas Robitaille', author_email='trobitaille@cfa.harvard.edu', packages=['sedfitter', 'sedfitter.convolve', 'sedfitter.filter', 'sedfitter.sed', 'sedfitter.utils'], scripts=['scripts/' + x for x in scripts] ) ## Instruction: Add sedfitter.source to list of sub-packages to install (otherwise results in ImportError) ## Code After: from distutils.core import setup scripts = ['sed_fit', 'sed_plot', 'sed_filter_output', 'sed_fitinfo2data', 'sed_fitinfo2ascii'] setup(name='sedfitter', version='0.1.1', description='SED Fitter in python', author='Thomas Robitaille', author_email='trobitaille@cfa.harvard.edu', packages=['sedfitter', 'sedfitter.convolve', 'sedfitter.filter', 'sedfitter.sed', 'sedfitter.source', 'sedfitter.utils'], scripts=['scripts/' + x for x in scripts] )
d2cc077bfce9bef654a8ef742996e5aca8858fc7
setup.py
setup.py
from distutils.core import setup setup(name='Pyranha', description='Elegant IRC client', version='0.1', author='John Reese', author_email='john@noswap.com', url='https://github.com/jreese/pyranha', classifiers=['License :: OSI Approved :: MIT License', 'Topic :: Communications :: Chat :: Internet Relay Chat', 'Development Status :: 2 - Pre-Alpha', ], license='MIT License', packages=['pyranha', 'pyranha.irc'], package_data={'pyranha': []}, scripts=['bin/pyranha'], )
from distutils.core import setup setup(name='Pyranha', description='Elegant IRC client', version='0.1', author='John Reese', author_email='john@noswap.com', url='https://github.com/jreese/pyranha', classifiers=['License :: OSI Approved :: MIT License', 'Topic :: Communications :: Chat :: Internet Relay Chat', 'Development Status :: 2 - Pre-Alpha', ], license='MIT License', packages=['pyranha', 'pyranha.irc'], package_data={'pyranha': ['dotfiles/*']}, scripts=['bin/pyranha'], )
Install default dotfiles with package
Install default dotfiles with package
Python
mit
jreese/pyranha
from distutils.core import setup setup(name='Pyranha', description='Elegant IRC client', version='0.1', author='John Reese', author_email='john@noswap.com', url='https://github.com/jreese/pyranha', classifiers=['License :: OSI Approved :: MIT License', 'Topic :: Communications :: Chat :: Internet Relay Chat', 'Development Status :: 2 - Pre-Alpha', ], license='MIT License', packages=['pyranha', 'pyranha.irc'], - package_data={'pyranha': []}, + package_data={'pyranha': ['dotfiles/*']}, scripts=['bin/pyranha'], )
Install default dotfiles with package
## Code Before: from distutils.core import setup setup(name='Pyranha', description='Elegant IRC client', version='0.1', author='John Reese', author_email='john@noswap.com', url='https://github.com/jreese/pyranha', classifiers=['License :: OSI Approved :: MIT License', 'Topic :: Communications :: Chat :: Internet Relay Chat', 'Development Status :: 2 - Pre-Alpha', ], license='MIT License', packages=['pyranha', 'pyranha.irc'], package_data={'pyranha': []}, scripts=['bin/pyranha'], ) ## Instruction: Install default dotfiles with package ## Code After: from distutils.core import setup setup(name='Pyranha', description='Elegant IRC client', version='0.1', author='John Reese', author_email='john@noswap.com', url='https://github.com/jreese/pyranha', classifiers=['License :: OSI Approved :: MIT License', 'Topic :: Communications :: Chat :: Internet Relay Chat', 'Development Status :: 2 - Pre-Alpha', ], license='MIT License', packages=['pyranha', 'pyranha.irc'], package_data={'pyranha': ['dotfiles/*']}, scripts=['bin/pyranha'], )
f56d8b35aa7d1d2c06d5c98ef49696e829459042
log_request_id/tests.py
log_request_id/tests.py
import logging from django.test import TestCase, RequestFactory from log_request_id.middleware import RequestIDMiddleware from testproject.views import test_view class RequestIDLoggingTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.handler = logging.getLogger('testproject').handlers[0] def test_id_generation(self): request = self.factory.get('/') middleware = RequestIDMiddleware() middleware.process_request(request) self.assertTrue(hasattr(request, 'id')) test_view(request) self.assertTrue(request.id in self.handler.messages[0])
import logging from django.test import TestCase, RequestFactory from log_request_id.middleware import RequestIDMiddleware from testproject.views import test_view class RequestIDLoggingTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.handler = logging.getLogger('testproject').handlers[0] self.handler.messages = [] def test_id_generation(self): request = self.factory.get('/') middleware = RequestIDMiddleware() middleware.process_request(request) self.assertTrue(hasattr(request, 'id')) test_view(request) self.assertTrue(request.id in self.handler.messages[0]) def test_external_id_in_http_header(self): with self.settings(LOG_REQUEST_ID_HEADER='REQUEST_ID_HEADER'): request = self.factory.get('/') request.META['REQUEST_ID_HEADER'] = 'some_request_id' middleware = RequestIDMiddleware() middleware.process_request(request) self.assertEqual(request.id, 'some_request_id') test_view(request) self.assertTrue('some_request_id' in self.handler.messages[0])
Add test for externally-generated request IDs
Add test for externally-generated request IDs
Python
bsd-2-clause
dabapps/django-log-request-id
import logging from django.test import TestCase, RequestFactory from log_request_id.middleware import RequestIDMiddleware from testproject.views import test_view class RequestIDLoggingTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.handler = logging.getLogger('testproject').handlers[0] + self.handler.messages = [] def test_id_generation(self): request = self.factory.get('/') middleware = RequestIDMiddleware() middleware.process_request(request) self.assertTrue(hasattr(request, 'id')) test_view(request) self.assertTrue(request.id in self.handler.messages[0]) + def test_external_id_in_http_header(self): + with self.settings(LOG_REQUEST_ID_HEADER='REQUEST_ID_HEADER'): + request = self.factory.get('/') + request.META['REQUEST_ID_HEADER'] = 'some_request_id' + middleware = RequestIDMiddleware() + middleware.process_request(request) + self.assertEqual(request.id, 'some_request_id') + test_view(request) + self.assertTrue('some_request_id' in self.handler.messages[0]) +
Add test for externally-generated request IDs
## Code Before: import logging from django.test import TestCase, RequestFactory from log_request_id.middleware import RequestIDMiddleware from testproject.views import test_view class RequestIDLoggingTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.handler = logging.getLogger('testproject').handlers[0] def test_id_generation(self): request = self.factory.get('/') middleware = RequestIDMiddleware() middleware.process_request(request) self.assertTrue(hasattr(request, 'id')) test_view(request) self.assertTrue(request.id in self.handler.messages[0]) ## Instruction: Add test for externally-generated request IDs ## Code After: import logging from django.test import TestCase, RequestFactory from log_request_id.middleware import RequestIDMiddleware from testproject.views import test_view class RequestIDLoggingTestCase(TestCase): def setUp(self): self.factory = RequestFactory() self.handler = logging.getLogger('testproject').handlers[0] self.handler.messages = [] def test_id_generation(self): request = self.factory.get('/') middleware = RequestIDMiddleware() middleware.process_request(request) self.assertTrue(hasattr(request, 'id')) test_view(request) self.assertTrue(request.id in self.handler.messages[0]) def test_external_id_in_http_header(self): with self.settings(LOG_REQUEST_ID_HEADER='REQUEST_ID_HEADER'): request = self.factory.get('/') request.META['REQUEST_ID_HEADER'] = 'some_request_id' middleware = RequestIDMiddleware() middleware.process_request(request) self.assertEqual(request.id, 'some_request_id') test_view(request) self.assertTrue('some_request_id' in self.handler.messages[0])
1002f40dc0ca118308144d3a51b696815501519f
account_direct_debit/wizard/payment_order_create.py
account_direct_debit/wizard/payment_order_create.py
from openerp import models, api class PaymentOrderCreate(models.TransientModel): _inherit = 'payment.order.create' @api.multi def extend_payment_order_domain(self, payment_order, domain): super(PaymentOrderCreate, self).extend_payment_order_domain( payment_order, domain) if payment_order.payment_order_type == 'debit': # With the new system with bank.payment.line, we want # to be able to have payment lines linked to customer # invoices and payment lines linked to customer refunds # in order to debit the customer of the total of his # invoices minus his refunds domain += [('account_id.type', '=', 'receivable')] return True
from openerp import models, api class PaymentOrderCreate(models.TransientModel): _inherit = 'payment.order.create' @api.multi def extend_payment_order_domain(self, payment_order, domain): super(PaymentOrderCreate, self).extend_payment_order_domain( payment_order, domain) if payment_order.payment_order_type == 'debit': # For receivables, propose all unreconciled debit lines, # including partially reconciled ones. # If they are partially reconciled with a customer refund, # the residual will be added to the payment order. # # For payables, normally suppliers will be the initiating party # for possible supplier refunds (via a transfer for example), # or they keep the amount for decreasing future supplier invoices, # so there's not too much sense for adding them to a direct debit # order domain += [ ('debit', '>', 0), ('account_id.type', '=', 'receivable'), ] return True
Fix move lines domain for direct debits
[FIX] account_direct_debit: Fix move lines domain for direct debits
Python
agpl-3.0
acsone/bank-payment,diagramsoftware/bank-payment,CompassionCH/bank-payment,CompassionCH/bank-payment,open-synergy/bank-payment,hbrunn/bank-payment
from openerp import models, api class PaymentOrderCreate(models.TransientModel): _inherit = 'payment.order.create' @api.multi def extend_payment_order_domain(self, payment_order, domain): super(PaymentOrderCreate, self).extend_payment_order_domain( payment_order, domain) if payment_order.payment_order_type == 'debit': - # With the new system with bank.payment.line, we want - # to be able to have payment lines linked to customer - # invoices and payment lines linked to customer refunds - # in order to debit the customer of the total of his - # invoices minus his refunds + # For receivables, propose all unreconciled debit lines, + # including partially reconciled ones. + # If they are partially reconciled with a customer refund, + # the residual will be added to the payment order. + # + # For payables, normally suppliers will be the initiating party + # for possible supplier refunds (via a transfer for example), + # or they keep the amount for decreasing future supplier invoices, + # so there's not too much sense for adding them to a direct debit + # order + domain += [ + ('debit', '>', 0), - domain += [('account_id.type', '=', 'receivable')] + ('account_id.type', '=', 'receivable'), + ] return True
Fix move lines domain for direct debits
## Code Before: from openerp import models, api class PaymentOrderCreate(models.TransientModel): _inherit = 'payment.order.create' @api.multi def extend_payment_order_domain(self, payment_order, domain): super(PaymentOrderCreate, self).extend_payment_order_domain( payment_order, domain) if payment_order.payment_order_type == 'debit': # With the new system with bank.payment.line, we want # to be able to have payment lines linked to customer # invoices and payment lines linked to customer refunds # in order to debit the customer of the total of his # invoices minus his refunds domain += [('account_id.type', '=', 'receivable')] return True ## Instruction: Fix move lines domain for direct debits ## Code After: from openerp import models, api class PaymentOrderCreate(models.TransientModel): _inherit = 'payment.order.create' @api.multi def extend_payment_order_domain(self, payment_order, domain): super(PaymentOrderCreate, self).extend_payment_order_domain( payment_order, domain) if payment_order.payment_order_type == 'debit': # For receivables, propose all unreconciled debit lines, # including partially reconciled ones. # If they are partially reconciled with a customer refund, # the residual will be added to the payment order. # # For payables, normally suppliers will be the initiating party # for possible supplier refunds (via a transfer for example), # or they keep the amount for decreasing future supplier invoices, # so there's not too much sense for adding them to a direct debit # order domain += [ ('debit', '>', 0), ('account_id.type', '=', 'receivable'), ] return True
989ff44354d624906d72f20aac933a9243214cf8
corehq/dbaccessors/couchapps/cases_by_server_date/by_owner_server_modified_on.py
corehq/dbaccessors/couchapps/cases_by_server_date/by_owner_server_modified_on.py
from __future__ import absolute_import from __future__ import unicode_literals from casexml.apps.case.models import CommCareCase from dimagi.utils.parsing import json_format_datetime def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date): """ Gets all cases with a specified owner ID that have been modified since a particular reference_date (using the server's timestamp) """ return [ row['id'] for row in CommCareCase.get_db().view( 'cases_by_server_date/by_owner_server_modified_on', startkey=[domain, owner_id, json_format_datetime(reference_date)], endkey=[domain, owner_id, {}], include_docs=False, reduce=False ) ]
from __future__ import absolute_import from __future__ import unicode_literals from casexml.apps.case.models import CommCareCase from dimagi.utils.parsing import json_format_datetime def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date, until_date=None): """ Gets all cases with a specified owner ID that have been modified since a particular reference_date (using the server's timestamp) """ return [ row['id'] for row in CommCareCase.get_db().view( 'cases_by_server_date/by_owner_server_modified_on', startkey=[domain, owner_id, json_format_datetime(reference_date)], endkey=[domain, owner_id, {} if not until_date else json_format_datetime(until_date)], include_docs=False, reduce=False ) ]
Make get_case_ids_modified_with_owner_since accept an end date as well
Make get_case_ids_modified_with_owner_since accept an end date as well
Python
bsd-3-clause
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
from __future__ import absolute_import from __future__ import unicode_literals from casexml.apps.case.models import CommCareCase from dimagi.utils.parsing import json_format_datetime - def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date): + def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date, until_date=None): """ Gets all cases with a specified owner ID that have been modified since a particular reference_date (using the server's timestamp) """ return [ row['id'] for row in CommCareCase.get_db().view( 'cases_by_server_date/by_owner_server_modified_on', startkey=[domain, owner_id, json_format_datetime(reference_date)], - endkey=[domain, owner_id, {}], + endkey=[domain, owner_id, {} if not until_date else json_format_datetime(until_date)], include_docs=False, reduce=False ) ]
Make get_case_ids_modified_with_owner_since accept an end date as well
## Code Before: from __future__ import absolute_import from __future__ import unicode_literals from casexml.apps.case.models import CommCareCase from dimagi.utils.parsing import json_format_datetime def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date): """ Gets all cases with a specified owner ID that have been modified since a particular reference_date (using the server's timestamp) """ return [ row['id'] for row in CommCareCase.get_db().view( 'cases_by_server_date/by_owner_server_modified_on', startkey=[domain, owner_id, json_format_datetime(reference_date)], endkey=[domain, owner_id, {}], include_docs=False, reduce=False ) ] ## Instruction: Make get_case_ids_modified_with_owner_since accept an end date as well ## Code After: from __future__ import absolute_import from __future__ import unicode_literals from casexml.apps.case.models import CommCareCase from dimagi.utils.parsing import json_format_datetime def get_case_ids_modified_with_owner_since(domain, owner_id, reference_date, until_date=None): """ Gets all cases with a specified owner ID that have been modified since a particular reference_date (using the server's timestamp) """ return [ row['id'] for row in CommCareCase.get_db().view( 'cases_by_server_date/by_owner_server_modified_on', startkey=[domain, owner_id, json_format_datetime(reference_date)], endkey=[domain, owner_id, {} if not until_date else json_format_datetime(until_date)], include_docs=False, reduce=False ) ]
16d65c211b00871ac7384baa3934d88410e2c977
tests/test_planetary_test_data_2.py
tests/test_planetary_test_data_2.py
from planetary_test_data import PlanetaryTestDataProducts import os def test_planetary_test_data_object(): """Tests simple PlanetaryTestDataProducts attributes.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert data.all_products is None # handle running this test individually versus within a suite if os.path.exists('tests'): assert data.directory == os.path.join('tests', 'mission_data') else: assert data.directory == os.path.join('mission_data') assert os.path.exists(data.data_path) def test_planetary_test_core_products(): """Tests the list of core data products.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert u'2p129641989eth0361p2600r8m1.img' in data.products assert u'1p190678905erp64kcp2600l8c1.img' in data.products def test_planetary_test_all_products(): """Tests the list of all data products.""" data = PlanetaryTestDataProducts(all_products=True) assert len(data.products) == 151 assert data.all_products is True
from planetary_test_data import PlanetaryTestDataProducts import os def test_planetary_test_data_object(): """Tests simple PlanetaryTestDataProducts attributes.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert data.all_products is None # handle running this test individually versus within a suite if os.path.exists('tests'): assert data.directory == os.path.join('tests', 'mission_data') else: assert data.directory == os.path.join('mission_data') assert os.path.exists(data.data_path) def test_planetary_test_core_products(): """Tests the list of core data products.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert u'2p129641989eth0361p2600r8m1.img' in data.products assert u'1p190678905erp64kcp2600l8c1.img' in data.products assert u'0047MH0000110010100214C00_DRCL.IMG' in data.products assert u'1p134482118erp0902p2600r8m1.img' in data.products assert u'h58n3118.img' in data.products assert u'r01090al.img' in data.products def test_planetary_test_all_products(): """Tests the list of all data products.""" data = PlanetaryTestDataProducts(all_products=True) assert len(data.products) == 151 assert data.all_products is True
Update test to account for more core products
Update test to account for more core products
Python
bsd-3-clause
pbvarga1/planetary_test_data,planetarypy/planetary_test_data
from planetary_test_data import PlanetaryTestDataProducts import os def test_planetary_test_data_object(): """Tests simple PlanetaryTestDataProducts attributes.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert data.all_products is None # handle running this test individually versus within a suite if os.path.exists('tests'): assert data.directory == os.path.join('tests', 'mission_data') else: assert data.directory == os.path.join('mission_data') assert os.path.exists(data.data_path) def test_planetary_test_core_products(): """Tests the list of core data products.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert u'2p129641989eth0361p2600r8m1.img' in data.products assert u'1p190678905erp64kcp2600l8c1.img' in data.products + assert u'0047MH0000110010100214C00_DRCL.IMG' in data.products + assert u'1p134482118erp0902p2600r8m1.img' in data.products + assert u'h58n3118.img' in data.products + assert u'r01090al.img' in data.products def test_planetary_test_all_products(): """Tests the list of all data products.""" data = PlanetaryTestDataProducts(all_products=True) assert len(data.products) == 151 assert data.all_products is True
Update test to account for more core products
## Code Before: from planetary_test_data import PlanetaryTestDataProducts import os def test_planetary_test_data_object(): """Tests simple PlanetaryTestDataProducts attributes.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert data.all_products is None # handle running this test individually versus within a suite if os.path.exists('tests'): assert data.directory == os.path.join('tests', 'mission_data') else: assert data.directory == os.path.join('mission_data') assert os.path.exists(data.data_path) def test_planetary_test_core_products(): """Tests the list of core data products.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert u'2p129641989eth0361p2600r8m1.img' in data.products assert u'1p190678905erp64kcp2600l8c1.img' in data.products def test_planetary_test_all_products(): """Tests the list of all data products.""" data = PlanetaryTestDataProducts(all_products=True) assert len(data.products) == 151 assert data.all_products is True ## Instruction: Update test to account for more core products ## Code After: from planetary_test_data import PlanetaryTestDataProducts import os def test_planetary_test_data_object(): """Tests simple PlanetaryTestDataProducts attributes.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert data.all_products is None # handle running this test individually versus within a suite if os.path.exists('tests'): assert data.directory == os.path.join('tests', 'mission_data') else: assert data.directory == os.path.join('mission_data') assert os.path.exists(data.data_path) def test_planetary_test_core_products(): """Tests the list of core data products.""" data = PlanetaryTestDataProducts() assert data.tags == ['core'] assert u'2p129641989eth0361p2600r8m1.img' in data.products assert u'1p190678905erp64kcp2600l8c1.img' in data.products assert u'0047MH0000110010100214C00_DRCL.IMG' in data.products assert u'1p134482118erp0902p2600r8m1.img' in data.products assert u'h58n3118.img' in data.products assert u'r01090al.img' in data.products def test_planetary_test_all_products(): """Tests the list of all data products.""" data = PlanetaryTestDataProducts(all_products=True) assert len(data.products) == 151 assert data.all_products is True
1ee414611fa6e01516d545bb284695a62bd69f0a
rtrss/daemon.py
rtrss/daemon.py
import sys import os import logging import atexit from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import app_init, worker_action worker_action('import_categories') # TODO run() _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)
import os import logging from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import worker_action worker_action('run') _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)
Change debug action to production
Change debug action to production
Python
apache-2.0
notapresent/rtrss,notapresent/rtrss,notapresent/rtrss,notapresent/rtrss
- import sys import os import logging - import atexit + from rtrss.basedaemon import BaseDaemon + _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) - + - from rtrss.worker import app_init, worker_action + from rtrss.worker import worker_action - worker_action('import_categories') # TODO run() - + worker_action('run') + _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() - + def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile) - +
Change debug action to production
## Code Before: import sys import os import logging import atexit from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import app_init, worker_action worker_action('import_categories') # TODO run() _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile) ## Instruction: Change debug action to production ## Code After: import os import logging from rtrss.basedaemon import BaseDaemon _logger = logging.getLogger(__name__) class WorkerDaemon(BaseDaemon): def run(self): _logger.info('Daemon started ith pid %d', os.getpid()) from rtrss.worker import worker_action worker_action('run') _logger.info('Daemon is done and exiting') def start(self): _logger.info('Starting daemon') super(WorkerDaemon, self).start() def stop(self): _logger.info('Stopping daemon') super(WorkerDaemon, self).stop() def restart(self): _logger.info('Restarting daemon') super(WorkerDaemon, self).restart() def make_daemon(config): '''Returns WorkerDaemon instance''' pidfile = os.path.join(config.DATA_DIR, 'daemon.pid') logdir = os.environ.get('OPENSHIFT_LOG_DIR') or config.DATA_DIR logfile = os.path.join(logdir, 'daemon.log') return WorkerDaemon(pidfile, stdout=logfile, stderr=logfile)
acdf380a5463ae8bd9c6dc76ce02069371b6f5fd
backend/restapp/restapp/urls.py
backend/restapp/restapp/urls.py
from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ]
from django.conf.urls import url, include from django.contrib import admin from books.models import Author, Book from rest_framework import routers, serializers, viewsets class AuthorSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Author fields = ('first_name', 'last_name', 'description') class AuthorViewSet(viewsets.ModelViewSet): queryset = Author.objects.all() serializer_class = AuthorSerializer class BookSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Book fields = ('name', 'description', 'author') class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer router = routers.DefaultRouter() router.register(r'authors', AuthorViewSet) router.register(r'books', BookViewSet) urlpatterns = [ url(r'^', include(router.urls)), url(r'^admin/', admin.site.urls), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ]
Add simple serializers for books and authors
Add simple serializers for books and authors
Python
mit
TomaszGabrysiak/django-rest-angular-seed,TomaszGabrysiak/django-rest-angular-seed,TomaszGabrysiak/django-rest-angular-seed
from django.conf.urls import url, include from django.contrib import admin + from books.models import Author, Book + from rest_framework import routers, serializers, viewsets + + + class AuthorSerializer(serializers.HyperlinkedModelSerializer): + class Meta: + model = Author + fields = ('first_name', 'last_name', 'description') + + + class AuthorViewSet(viewsets.ModelViewSet): + queryset = Author.objects.all() + serializer_class = AuthorSerializer + + + class BookSerializer(serializers.HyperlinkedModelSerializer): + class Meta: + model = Book + fields = ('name', 'description', 'author') + + + class BookViewSet(viewsets.ModelViewSet): + queryset = Book.objects.all() + serializer_class = BookSerializer + + + router = routers.DefaultRouter() + router.register(r'authors', AuthorViewSet) + router.register(r'books', BookViewSet) + + urlpatterns = [ + url(r'^', include(router.urls)), url(r'^admin/', admin.site.urls), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ] +
Add simple serializers for books and authors
## Code Before: from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ] ## Instruction: Add simple serializers for books and authors ## Code After: from django.conf.urls import url, include from django.contrib import admin from books.models import Author, Book from rest_framework import routers, serializers, viewsets class AuthorSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Author fields = ('first_name', 'last_name', 'description') class AuthorViewSet(viewsets.ModelViewSet): queryset = Author.objects.all() serializer_class = AuthorSerializer class BookSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = Book fields = ('name', 'description', 'author') class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer router = routers.DefaultRouter() router.register(r'authors', AuthorViewSet) router.register(r'books', BookViewSet) urlpatterns = [ url(r'^', include(router.urls)), url(r'^admin/', admin.site.urls), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), ]
a7908d39e24384881c30042e1b4c7e93e85eb38e
test/TestTaskIncludes.py
test/TestTaskIncludes.py
import os import unittest from ansiblelint import Runner, RulesCollection class TestTaskIncludes(unittest.TestCase): def setUp(self): rulesdir = os.path.join('lib', 'ansiblelint', 'rules') self.rules = RulesCollection.create_from_directory(rulesdir) def test_block_included_tasks(self): filename = 'test/blockincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_block_included_tasks_with_rescue_and_always(self): filename = 'test/blockincludes2.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_included_tasks(self): filename = 'test/taskincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4)
import os import unittest from ansiblelint import Runner, RulesCollection class TestTaskIncludes(unittest.TestCase): def setUp(self): rulesdir = os.path.join('lib', 'ansiblelint', 'rules') self.rules = RulesCollection.create_from_directory(rulesdir) def test_block_included_tasks(self): filename = 'test/blockincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_block_included_tasks_with_rescue_and_always(self): filename = 'test/blockincludes2.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_included_tasks(self): filename = 'test/taskincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_include_tasks_with_block_include(self): filename = 'test/include-in-block.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 3)
Add test that exercises block includes
Add test that exercises block includes
Python
mit
MatrixCrawler/ansible-lint,dataxu/ansible-lint,willthames/ansible-lint
import os import unittest from ansiblelint import Runner, RulesCollection class TestTaskIncludes(unittest.TestCase): def setUp(self): rulesdir = os.path.join('lib', 'ansiblelint', 'rules') self.rules = RulesCollection.create_from_directory(rulesdir) def test_block_included_tasks(self): filename = 'test/blockincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_block_included_tasks_with_rescue_and_always(self): filename = 'test/blockincludes2.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_included_tasks(self): filename = 'test/taskincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) + def test_include_tasks_with_block_include(self): + filename = 'test/include-in-block.yml' + runner = Runner(self.rules, filename, [], [], []) + runner.run() + self.assertEqual(len(runner.playbooks), 3) +
Add test that exercises block includes
## Code Before: import os import unittest from ansiblelint import Runner, RulesCollection class TestTaskIncludes(unittest.TestCase): def setUp(self): rulesdir = os.path.join('lib', 'ansiblelint', 'rules') self.rules = RulesCollection.create_from_directory(rulesdir) def test_block_included_tasks(self): filename = 'test/blockincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_block_included_tasks_with_rescue_and_always(self): filename = 'test/blockincludes2.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_included_tasks(self): filename = 'test/taskincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) ## Instruction: Add test that exercises block includes ## Code After: import os import unittest from ansiblelint import Runner, RulesCollection class TestTaskIncludes(unittest.TestCase): def setUp(self): rulesdir = os.path.join('lib', 'ansiblelint', 'rules') self.rules = RulesCollection.create_from_directory(rulesdir) def test_block_included_tasks(self): filename = 'test/blockincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_block_included_tasks_with_rescue_and_always(self): filename = 'test/blockincludes2.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_included_tasks(self): filename = 'test/taskincludes.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 4) def test_include_tasks_with_block_include(self): filename = 'test/include-in-block.yml' runner = Runner(self.rules, filename, [], [], []) runner.run() self.assertEqual(len(runner.playbooks), 3)
0ba063edc4aec690efca5c5ba9faf64042bb7707
demo/demo/urls.py
demo/demo/urls.py
from __future__ import unicode_literals from django.conf.urls import patterns, url from .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \ DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'), url(r'^form$', DefaultFormView.as_view(), name='form_default'), url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'), url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'), url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'), url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'), url(r'^pagination$', PaginationView.as_view(), name='pagination'), url(r'^misc$', MiscView.as_view(), name='misc'), ]
from __future__ import unicode_literals from django.conf.urls import url from .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \ DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'), url(r'^form$', DefaultFormView.as_view(), name='form_default'), url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'), url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'), url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'), url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'), url(r'^pagination$', PaginationView.as_view(), name='pagination'), url(r'^misc$', MiscView.as_view(), name='misc'), ]
Remove obsolete import (removed in Django 1.10)
Remove obsolete import (removed in Django 1.10)
Python
bsd-3-clause
dyve/django-bootstrap3,dyve/django-bootstrap3,zostera/django-bootstrap4,zostera/django-bootstrap4
from __future__ import unicode_literals - from django.conf.urls import patterns, url + from django.conf.urls import url from .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \ DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'), url(r'^form$', DefaultFormView.as_view(), name='form_default'), url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'), url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'), url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'), url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'), url(r'^pagination$', PaginationView.as_view(), name='pagination'), url(r'^misc$', MiscView.as_view(), name='misc'), ] +
Remove obsolete import (removed in Django 1.10)
## Code Before: from __future__ import unicode_literals from django.conf.urls import patterns, url from .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \ DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'), url(r'^form$', DefaultFormView.as_view(), name='form_default'), url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'), url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'), url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'), url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'), url(r'^pagination$', PaginationView.as_view(), name='pagination'), url(r'^misc$', MiscView.as_view(), name='misc'), ] ## Instruction: Remove obsolete import (removed in Django 1.10) ## Code After: from __future__ import unicode_literals from django.conf.urls import url from .views import HomePageView, FormHorizontalView, FormInlineView, PaginationView, FormWithFilesView, \ DefaultFormView, MiscView, DefaultFormsetView, DefaultFormByFieldView urlpatterns = [ url(r'^$', HomePageView.as_view(), name='home'), url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'), url(r'^form$', DefaultFormView.as_view(), name='form_default'), url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'), url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'), url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'), url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'), url(r'^pagination$', PaginationView.as_view(), name='pagination'), url(r'^misc$', MiscView.as_view(), name='misc'), ]
faa67c81ad2ebb8ba8cb407982cbced72d1fa899
tests/test_config_tree.py
tests/test_config_tree.py
import pytest from pyhocon.config_tree import ConfigTree from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException class TestConfigParser(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree() config_tree.put("a.b.c", "value") assert config_tree.get("a.b.c") == "value" with pytest.raises(ConfigMissingException): assert config_tree.get("a.b.d") with pytest.raises(ConfigMissingException): config_tree.get("a.d.e") with pytest.raises(ConfigWrongTypeException): config_tree.get("a.b.c.e") def test_config_tree_number(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) assert config_tree.get("a.b.c") == 5
import pytest from pyhocon.config_tree import ConfigTree from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException class TestConfigParser(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree() config_tree.put("a.b.c", "value") assert config_tree.get("a.b.c") == "value" with pytest.raises(ConfigMissingException): assert config_tree.get("a.b.d") with pytest.raises(ConfigMissingException): config_tree.get("a.d.e") with pytest.raises(ConfigWrongTypeException): config_tree.get("a.b.c.e") def test_config_tree_number(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) assert config_tree.get("a.b.c") == 5 def test_config_tree_iterator(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) for k in config_tree: assert k == "a" assert config_tree[k]["b.c"] == 5 def test_config_logging(self): import logging, logging.config config_tree = ConfigTree() config_tree.put('version', 1) config_tree.put('root.level', logging.INFO) assert dict(config_tree)['version'] == 1 logging.config.dictConfig(config_tree)
Add failing tests for iteration and logging config
Add failing tests for iteration and logging config
Python
apache-2.0
acx2015/pyhocon,chimpler/pyhocon,vamega/pyhocon,peoplepattern/pyhocon
import pytest from pyhocon.config_tree import ConfigTree from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException class TestConfigParser(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree() config_tree.put("a.b.c", "value") assert config_tree.get("a.b.c") == "value" with pytest.raises(ConfigMissingException): assert config_tree.get("a.b.d") with pytest.raises(ConfigMissingException): config_tree.get("a.d.e") with pytest.raises(ConfigWrongTypeException): config_tree.get("a.b.c.e") def test_config_tree_number(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) assert config_tree.get("a.b.c") == 5 + def test_config_tree_iterator(self): + config_tree = ConfigTree() + config_tree.put("a.b.c", 5) + for k in config_tree: + assert k == "a" + assert config_tree[k]["b.c"] == 5 + + def test_config_logging(self): + import logging, logging.config + config_tree = ConfigTree() + config_tree.put('version', 1) + config_tree.put('root.level', logging.INFO) + assert dict(config_tree)['version'] == 1 + logging.config.dictConfig(config_tree) +
Add failing tests for iteration and logging config
## Code Before: import pytest from pyhocon.config_tree import ConfigTree from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException class TestConfigParser(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree() config_tree.put("a.b.c", "value") assert config_tree.get("a.b.c") == "value" with pytest.raises(ConfigMissingException): assert config_tree.get("a.b.d") with pytest.raises(ConfigMissingException): config_tree.get("a.d.e") with pytest.raises(ConfigWrongTypeException): config_tree.get("a.b.c.e") def test_config_tree_number(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) assert config_tree.get("a.b.c") == 5 ## Instruction: Add failing tests for iteration and logging config ## Code After: import pytest from pyhocon.config_tree import ConfigTree from pyhocon.exceptions import ConfigMissingException, ConfigWrongTypeException class TestConfigParser(object): def test_config_tree_quoted_string(self): config_tree = ConfigTree() config_tree.put("a.b.c", "value") assert config_tree.get("a.b.c") == "value" with pytest.raises(ConfigMissingException): assert config_tree.get("a.b.d") with pytest.raises(ConfigMissingException): config_tree.get("a.d.e") with pytest.raises(ConfigWrongTypeException): config_tree.get("a.b.c.e") def test_config_tree_number(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) assert config_tree.get("a.b.c") == 5 def test_config_tree_iterator(self): config_tree = ConfigTree() config_tree.put("a.b.c", 5) for k in config_tree: assert k == "a" assert config_tree[k]["b.c"] == 5 def test_config_logging(self): import logging, logging.config config_tree = ConfigTree() config_tree.put('version', 1) config_tree.put('root.level', logging.INFO) assert dict(config_tree)['version'] == 1 logging.config.dictConfig(config_tree)
d07a7ad25f69a18c57c50d6c32df212e1f987bd4
www/tests/test_collections.py
www/tests/test_collections.py
import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9]
import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] # namedtuple a = collections.namedtuple("foo", "bar bash bing")(1, 2, 3) assert a.bar == 1 assert a.bash == 2 assert repr(a) == 'foo(bar=1, bash=2, bing=3)'
Add a test on namedtuple
Add a test on namedtuple
Python
bsd-3-clause
kikocorreoso/brython,Mozhuowen/brython,Hasimir/brython,Isendir/brython,Isendir/brython,amrdraz/brython,jonathanverner/brython,kevinmel2000/brython,brython-dev/brython,Mozhuowen/brython,jonathanverner/brython,Hasimir/brython,rubyinhell/brython,Hasimir/brython,molebot/brython,Isendir/brython,JohnDenker/brython,olemis/brython,kevinmel2000/brython,molebot/brython,Mozhuowen/brython,kevinmel2000/brython,brython-dev/brython,rubyinhell/brython,kikocorreoso/brython,amrdraz/brython,Lh4cKg/brython,rubyinhell/brython,Lh4cKg/brython,Hasimir/brython,olemis/brython,olemis/brython,jonathanverner/brython,kikocorreoso/brython,amrdraz/brython,molebot/brython,Lh4cKg/brython,molebot/brython,JohnDenker/brython,jonathanverner/brython,Lh4cKg/brython,olemis/brython,kevinmel2000/brython,rubyinhell/brython,Mozhuowen/brython,amrdraz/brython,brython-dev/brython,Isendir/brython,JohnDenker/brython,JohnDenker/brython
import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] + # namedtuple + a = collections.namedtuple("foo", "bar bash bing")(1, 2, 3) + assert a.bar == 1 + assert a.bash == 2 + assert repr(a) == 'foo(bar=1, bash=2, bing=3)' +
Add a test on namedtuple
## Code Before: import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] ## Instruction: Add a test on namedtuple ## Code After: import collections _d=collections.defaultdict(int) _d['a']+=1 _d['a']+=2 _d['b']+=4 assert _d['a'] == 3 assert _d['b'] == 4 s = 'mississippi' for k in s: _d[k] += 1 _values=list(_d.values()) _values.sort() assert _values == [1, 2, 3, 4, 4, 4] _keys=list(_d.keys()) _keys.sort() assert _keys == ['a', 'b', 'i', 'm', 'p', 's'] #now try with default being list (ie, empty list) _listdict=collections.defaultdict(list) for _i in range(10): _listdict['mylist'].append(_i) assert _listdict['not called'] == [] assert _listdict['mylist'] == [0,1,2,3,4,5,6,7,8,9] # namedtuple a = collections.namedtuple("foo", "bar bash bing")(1, 2, 3) assert a.bar == 1 assert a.bash == 2 assert repr(a) == 'foo(bar=1, bash=2, bing=3)'
888584a49e697551c4f680cc8651be2fe80fc65d
configgen/generators/ppsspp/ppssppGenerator.py
configgen/generators/ppsspp/ppssppGenerator.py
import Command #~ import reicastControllers import recalboxFiles from generators.Generator import Generator import ppssppControllers import shutil import os.path import ConfigParser class PPSSPPGenerator(Generator): # Main entry of the module # Configure fba and return a command def generate(self, system, rom, playersControllers): if not system.config['configfile']: # Write emu.cfg to map joysticks, init with the default emu.cfg Config = ConfigParser.ConfigParser() Config.read(recalboxFiles.reicastConfigInit) section = "input" # For each pad detected for index in playersControllers : controller = playersControllers[index] # we only care about player 1 if controller.player != "1": continue ppssppControllers.generateControllerConfig(controller) # the command to run #~ commandArray = [recalboxFiles.ppssppBin, rom, "--escape-exit"] commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom] return Command.Command(videomode=system.config['videomode'], array=commandArray, env={"XDG_CONFIG_HOME":recalboxFiles.CONF, "SDL_VIDEO_GL_DRIVER": "/usr/lib/libGLESv2.so"}, delay=1)
import Command #~ import reicastControllers import recalboxFiles from generators.Generator import Generator import ppssppControllers import shutil import os.path import ConfigParser class PPSSPPGenerator(Generator): # Main entry of the module # Configure fba and return a command def generate(self, system, rom, playersControllers): if not system.config['configfile']: for index in playersControllers : controller = playersControllers[index] # we only care about player 1 if controller.player != "1": continue ppssppControllers.generateControllerConfig(controller) break # the command to run commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom] # The next line is a reminder on how to quit PPSSPP with just the HK #commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom, "--escape-exit"] return Command.Command(videomode=system.config['videomode'], array=commandArray, env={"XDG_CONFIG_HOME":recalboxFiles.CONF, "SDL_VIDEO_GL_DRIVER": "/usr/lib/libGLESv2.so"}, delay=1)
Remove a bad typo from reicast
Remove a bad typo from reicast
Python
mit
nadenislamarre/recalbox-configgen,recalbox/recalbox-configgen,digitalLumberjack/recalbox-configgen
import Command #~ import reicastControllers import recalboxFiles from generators.Generator import Generator import ppssppControllers import shutil import os.path import ConfigParser class PPSSPPGenerator(Generator): # Main entry of the module # Configure fba and return a command def generate(self, system, rom, playersControllers): if not system.config['configfile']: - # Write emu.cfg to map joysticks, init with the default emu.cfg - Config = ConfigParser.ConfigParser() - Config.read(recalboxFiles.reicastConfigInit) - section = "input" - # For each pad detected for index in playersControllers : controller = playersControllers[index] # we only care about player 1 if controller.player != "1": continue ppssppControllers.generateControllerConfig(controller) + break # the command to run - #~ commandArray = [recalboxFiles.ppssppBin, rom, "--escape-exit"] commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom] + # The next line is a reminder on how to quit PPSSPP with just the HK + #commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom, "--escape-exit"] return Command.Command(videomode=system.config['videomode'], array=commandArray, env={"XDG_CONFIG_HOME":recalboxFiles.CONF, "SDL_VIDEO_GL_DRIVER": "/usr/lib/libGLESv2.so"}, delay=1)
Remove a bad typo from reicast
## Code Before: import Command #~ import reicastControllers import recalboxFiles from generators.Generator import Generator import ppssppControllers import shutil import os.path import ConfigParser class PPSSPPGenerator(Generator): # Main entry of the module # Configure fba and return a command def generate(self, system, rom, playersControllers): if not system.config['configfile']: # Write emu.cfg to map joysticks, init with the default emu.cfg Config = ConfigParser.ConfigParser() Config.read(recalboxFiles.reicastConfigInit) section = "input" # For each pad detected for index in playersControllers : controller = playersControllers[index] # we only care about player 1 if controller.player != "1": continue ppssppControllers.generateControllerConfig(controller) # the command to run #~ commandArray = [recalboxFiles.ppssppBin, rom, "--escape-exit"] commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom] return Command.Command(videomode=system.config['videomode'], array=commandArray, env={"XDG_CONFIG_HOME":recalboxFiles.CONF, "SDL_VIDEO_GL_DRIVER": "/usr/lib/libGLESv2.so"}, delay=1) ## Instruction: Remove a bad typo from reicast ## Code After: import Command #~ import reicastControllers import recalboxFiles from generators.Generator import Generator import ppssppControllers import shutil import os.path import ConfigParser class PPSSPPGenerator(Generator): # Main entry of the module # Configure fba and return a command def generate(self, system, rom, playersControllers): if not system.config['configfile']: for index in playersControllers : controller = playersControllers[index] # we only care about player 1 if controller.player != "1": continue ppssppControllers.generateControllerConfig(controller) break # the command to run commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom] # The next line is a reminder on how to quit PPSSPP with just the HK #commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], rom, "--escape-exit"] return Command.Command(videomode=system.config['videomode'], array=commandArray, env={"XDG_CONFIG_HOME":recalboxFiles.CONF, "SDL_VIDEO_GL_DRIVER": "/usr/lib/libGLESv2.so"}, delay=1)
cbdfc1b1cb4162256538576cabe2b6832aa83bca
django_mysqlpool/__init__.py
django_mysqlpool/__init__.py
from functools import wraps from django.db import connection def auto_close_db(f): "Ensures the database connection is closed when the function returns." @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) finally: connection.close() return wrapper
from functools import wraps def auto_close_db(f): "Ensures the database connection is closed when the function returns." from django.db import connection @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) finally: connection.close() return wrapper
Fix circular import when used with other add-ons that import django.db
Fix circular import when used with other add-ons that import django.db eg sorl_thumbnail: Traceback (most recent call last): File "/home/rpatterson/src/work/retrans/src/ReTransDjango/bin/manage", line 40, in <module> sys.exit(manage.main()) File "/home/rpatterson/src/work/retrans/src/ReTransDjango/retrans/manage.py", line 15, in main execute_manager(settings) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/__init__.py", line 438, in execute_manager utility.execute() File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/__init__.py", line 379, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/base.py", line 191, in run_from_argv self.execute(*args, **options.__dict__) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/core/management/base.py", line 209, in execute translation.activate('en-us') File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/__init__.py", line 100, in activate return _trans.activate(language) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/trans_real.py", line 202, in activate _active.value = translation(language) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/trans_real.py", line 185, in translation default_translation = _fetch(settings.LANGUAGE_CODE) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/translation/trans_real.py", line 162, in _fetch app = import_module(appname) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/utils/importlib.py", line 35, in import_module __import__(name) File "/opt/src/eggs/sorl_thumbnail-11.12-py2.7.egg/sorl/thumbnail/__init__.py", line 1, in <module> from sorl.thumbnail.fields import ImageField File "/opt/src/eggs/sorl_thumbnail-11.12-py2.7.egg/sorl/thumbnail/fields.py", line 2, in <module> from django.db import models File "/opt/src/eggs/Django-1.3-py2.7.egg/django/db/__init__.py", line 78, in <module> connection = connections[DEFAULT_DB_ALIAS] File "/opt/src/eggs/Django-1.3-py2.7.egg/django/db/utils.py", line 94, in __getitem__ backend = load_backend(db['ENGINE']) File "/opt/src/eggs/Django-1.3-py2.7.egg/django/db/utils.py", line 47, in load_backend if backend_name not in available_backends: django.core.exceptions.ImproperlyConfigured: 'django_mysqlpool.backends.mysqlpool' isn't an available database backend. Try using django.db.backends.XXX, where XXX is one of: 'dummy', 'mysql', 'oracle', 'postgresql', 'postgresql_psycopg2', 'sqlite3' Error was: cannot import name connection
Python
mit
smartfile/django-mysqlpool
from functools import wraps - from django.db import connection def auto_close_db(f): "Ensures the database connection is closed when the function returns." + from django.db import connection @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) finally: connection.close() return wrapper
Fix circular import when used with other add-ons that import django.db
## Code Before: from functools import wraps from django.db import connection def auto_close_db(f): "Ensures the database connection is closed when the function returns." @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) finally: connection.close() return wrapper ## Instruction: Fix circular import when used with other add-ons that import django.db ## Code After: from functools import wraps def auto_close_db(f): "Ensures the database connection is closed when the function returns." from django.db import connection @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) finally: connection.close() return wrapper
0f7816676eceb42f13786408f1d1a09527919a1e
Modules/Biophotonics/python/iMC/msi/io/spectrometerreader.py
Modules/Biophotonics/python/iMC/msi/io/spectrometerreader.py
import numpy as np from msi.io.reader import Reader from msi.msi import Msi class SpectrometerReader(Reader): def __init__(self): pass def read(self, file_to_read): # our spectrometer like to follow german standards in files, we need # to switch to english ones transformed="" replacements = {',': '.', '\r\n': ''} with open(file_to_read) as infile: for line in infile: for src, target in replacements.iteritems(): line = line.replace(src, target) transformed = "\n".join([transformed, line]) for num, line in enumerate(transformed.splitlines(), 1): if ">>>>>Begin Spectral Data<<<<<" in line: break string_only_spectrum = "\n".join(transformed.splitlines()[num:]) data_vector = np.fromstring(string_only_spectrum, sep="\t").reshape(-1, 2) msi = Msi(data_vector[:, 1], {'wavelengths': data_vector[:, 0] * 10 ** -9}) return msi
import numpy as np from msi.io.reader import Reader from msi.msi import Msi class SpectrometerReader(Reader): def __init__(self): pass def read(self, file_to_read): # our spectrometer like to follow german standards in files, we need # to switch to english ones transformed="" replacements = {',': '.', '\r\n': ''} with open(file_to_read) as infile: for line in infile: for src, target in replacements.iteritems(): line = line.replace(src, target) transformed = "\n".join([transformed, line]) for num, line in enumerate(transformed.splitlines(), 1): if ">>>>>Begin" in line: break for num_end, line in enumerate(transformed.splitlines(), 1): if ">>>>>End" in line: num_end -= 1 break string_only_spectrum = "\n".join(transformed.splitlines()[num:num_end]) data_vector = np.fromstring(string_only_spectrum, sep="\t").reshape(-1, 2) msi = Msi(data_vector[:, 1], {'wavelengths': data_vector[:, 0] * 10 ** -9}) return msi
Change SpectrometerReader a little so it can handle more data formats.
Change SpectrometerReader a little so it can handle more data formats.
Python
bsd-3-clause
MITK/MITK,iwegner/MITK,RabadanLab/MITKats,RabadanLab/MITKats,iwegner/MITK,fmilano/mitk,fmilano/mitk,RabadanLab/MITKats,RabadanLab/MITKats,fmilano/mitk,fmilano/mitk,MITK/MITK,RabadanLab/MITKats,RabadanLab/MITKats,fmilano/mitk,fmilano/mitk,iwegner/MITK,fmilano/mitk,MITK/MITK,iwegner/MITK,iwegner/MITK,MITK/MITK,MITK/MITK,iwegner/MITK,MITK/MITK
import numpy as np from msi.io.reader import Reader from msi.msi import Msi class SpectrometerReader(Reader): def __init__(self): pass def read(self, file_to_read): # our spectrometer like to follow german standards in files, we need # to switch to english ones transformed="" replacements = {',': '.', '\r\n': ''} with open(file_to_read) as infile: for line in infile: for src, target in replacements.iteritems(): line = line.replace(src, target) transformed = "\n".join([transformed, line]) for num, line in enumerate(transformed.splitlines(), 1): - if ">>>>>Begin Spectral Data<<<<<" in line: + if ">>>>>Begin" in line: break + + for num_end, line in enumerate(transformed.splitlines(), 1): + if ">>>>>End" in line: + num_end -= 1 + break - string_only_spectrum = "\n".join(transformed.splitlines()[num:]) + string_only_spectrum = "\n".join(transformed.splitlines()[num:num_end]) data_vector = np.fromstring(string_only_spectrum, sep="\t").reshape(-1, 2) msi = Msi(data_vector[:, 1], {'wavelengths': data_vector[:, 0] * 10 ** -9}) return msi
Change SpectrometerReader a little so it can handle more data formats.
## Code Before: import numpy as np from msi.io.reader import Reader from msi.msi import Msi class SpectrometerReader(Reader): def __init__(self): pass def read(self, file_to_read): # our spectrometer like to follow german standards in files, we need # to switch to english ones transformed="" replacements = {',': '.', '\r\n': ''} with open(file_to_read) as infile: for line in infile: for src, target in replacements.iteritems(): line = line.replace(src, target) transformed = "\n".join([transformed, line]) for num, line in enumerate(transformed.splitlines(), 1): if ">>>>>Begin Spectral Data<<<<<" in line: break string_only_spectrum = "\n".join(transformed.splitlines()[num:]) data_vector = np.fromstring(string_only_spectrum, sep="\t").reshape(-1, 2) msi = Msi(data_vector[:, 1], {'wavelengths': data_vector[:, 0] * 10 ** -9}) return msi ## Instruction: Change SpectrometerReader a little so it can handle more data formats. ## Code After: import numpy as np from msi.io.reader import Reader from msi.msi import Msi class SpectrometerReader(Reader): def __init__(self): pass def read(self, file_to_read): # our spectrometer like to follow german standards in files, we need # to switch to english ones transformed="" replacements = {',': '.', '\r\n': ''} with open(file_to_read) as infile: for line in infile: for src, target in replacements.iteritems(): line = line.replace(src, target) transformed = "\n".join([transformed, line]) for num, line in enumerate(transformed.splitlines(), 1): if ">>>>>Begin" in line: break for num_end, line in enumerate(transformed.splitlines(), 1): if ">>>>>End" in line: num_end -= 1 break string_only_spectrum = "\n".join(transformed.splitlines()[num:num_end]) data_vector = np.fromstring(string_only_spectrum, sep="\t").reshape(-1, 2) msi = Msi(data_vector[:, 1], {'wavelengths': data_vector[:, 0] * 10 ** -9}) return msi
33a3ebacabda376826f0470129e8583e4974fd9d
examples/markdown/build.py
examples/markdown/build.py
import os import jinja2 # Markdown to HTML library # https://pypi.org/project/Markdown/ import markdown from staticjinja import Site markdowner = markdown.Markdown(output_format="html5") def md_context(template): with open(template.filename) as f: markdown_content = f.read() return {'post_content_html': markdowner.convert(markdown_content)} def render_md(site, template, **kwargs): # Given a template such as posts/post1.md # Determine the post's title (post1) and it's directory (posts/) directory, fname = os.path.split(template.name) post_title, _ = fname.split(".") # Determine where the result will be streamed (build/posts/post1.html) out_dir = os.path.join(site.outpath, directory) post_fname = "{}.html".format(post_title) out = os.path.join(out_dir, post_fname) # Render and stream the result if not os.path.exists(out_dir): os.makedirs(out_dir) post_template = site.get_template("_post.html") post_template.stream(**kwargs).dump(out, encoding="utf-8") site = Site.make_site( searchpath='src', outpath='build', contexts=[('.*.md', md_context)], rules = [('.*.md', render_md)], ) site.render()
import os # Markdown to HTML library # https://pypi.org/project/Markdown/ import markdown from staticjinja import Site markdowner = markdown.Markdown(output_format="html5") def md_context(template): with open(template.filename) as f: markdown_content = f.read() return {'post_content_html': markdowner.convert(markdown_content)} def render_md(site, template, **kwargs): # Given a template such as posts/post1.md # Determine the post's title (post1) and it's directory (posts/) directory, fname = os.path.split(template.name) post_title, _ = fname.split(".") # Determine where the result will be streamed (build/posts/post1.html) out_dir = os.path.join(site.outpath, directory) post_fname = "{}.html".format(post_title) out = os.path.join(out_dir, post_fname) # Render and stream the result if not os.path.exists(out_dir): os.makedirs(out_dir) post_template = site.get_template("_post.html") post_template.stream(**kwargs).dump(out, encoding="utf-8") site = Site.make_site( searchpath='src', outpath='build', contexts=[('.*.md', md_context)], rules = [('.*.md', render_md)], ) site.render()
Remove unneeded jinja2 import in markdown example
Remove unneeded jinja2 import in markdown example
Python
mit
Ceasar/staticjinja,Ceasar/staticjinja
import os - import jinja2 # Markdown to HTML library # https://pypi.org/project/Markdown/ import markdown from staticjinja import Site markdowner = markdown.Markdown(output_format="html5") def md_context(template): with open(template.filename) as f: markdown_content = f.read() return {'post_content_html': markdowner.convert(markdown_content)} def render_md(site, template, **kwargs): # Given a template such as posts/post1.md # Determine the post's title (post1) and it's directory (posts/) directory, fname = os.path.split(template.name) post_title, _ = fname.split(".") # Determine where the result will be streamed (build/posts/post1.html) out_dir = os.path.join(site.outpath, directory) post_fname = "{}.html".format(post_title) out = os.path.join(out_dir, post_fname) # Render and stream the result if not os.path.exists(out_dir): os.makedirs(out_dir) post_template = site.get_template("_post.html") post_template.stream(**kwargs).dump(out, encoding="utf-8") site = Site.make_site( searchpath='src', outpath='build', contexts=[('.*.md', md_context)], rules = [('.*.md', render_md)], ) site.render()
Remove unneeded jinja2 import in markdown example
## Code Before: import os import jinja2 # Markdown to HTML library # https://pypi.org/project/Markdown/ import markdown from staticjinja import Site markdowner = markdown.Markdown(output_format="html5") def md_context(template): with open(template.filename) as f: markdown_content = f.read() return {'post_content_html': markdowner.convert(markdown_content)} def render_md(site, template, **kwargs): # Given a template such as posts/post1.md # Determine the post's title (post1) and it's directory (posts/) directory, fname = os.path.split(template.name) post_title, _ = fname.split(".") # Determine where the result will be streamed (build/posts/post1.html) out_dir = os.path.join(site.outpath, directory) post_fname = "{}.html".format(post_title) out = os.path.join(out_dir, post_fname) # Render and stream the result if not os.path.exists(out_dir): os.makedirs(out_dir) post_template = site.get_template("_post.html") post_template.stream(**kwargs).dump(out, encoding="utf-8") site = Site.make_site( searchpath='src', outpath='build', contexts=[('.*.md', md_context)], rules = [('.*.md', render_md)], ) site.render() ## Instruction: Remove unneeded jinja2 import in markdown example ## Code After: import os # Markdown to HTML library # https://pypi.org/project/Markdown/ import markdown from staticjinja import Site markdowner = markdown.Markdown(output_format="html5") def md_context(template): with open(template.filename) as f: markdown_content = f.read() return {'post_content_html': markdowner.convert(markdown_content)} def render_md(site, template, **kwargs): # Given a template such as posts/post1.md # Determine the post's title (post1) and it's directory (posts/) directory, fname = os.path.split(template.name) post_title, _ = fname.split(".") # Determine where the result will be streamed (build/posts/post1.html) out_dir = os.path.join(site.outpath, directory) post_fname = "{}.html".format(post_title) out = os.path.join(out_dir, post_fname) # Render and stream the result if not os.path.exists(out_dir): os.makedirs(out_dir) post_template = site.get_template("_post.html") post_template.stream(**kwargs).dump(out, encoding="utf-8") site = Site.make_site( searchpath='src', outpath='build', contexts=[('.*.md', md_context)], rules = [('.*.md', render_md)], ) site.render()
c9735ce9ea330737cf47474ef420303c56a32873
apps/demos/admin.py
apps/demos/admin.py
from django.contrib import admin from .models import Submission class SubmissionAdmin(admin.ModelAdmin): list_display = ( 'title', 'creator', 'featured', 'hidden', 'tags', 'modified', ) admin.site.register(Submission, SubmissionAdmin)
from django.contrib import admin from .models import Submission class SubmissionAdmin(admin.ModelAdmin): list_display = ( 'title', 'creator', 'featured', 'hidden', 'tags', 'modified', ) list_editable = ( 'featured', 'hidden' ) admin.site.register(Submission, SubmissionAdmin)
Make featured and hidden flags editable from demo listing
Make featured and hidden flags editable from demo listing
Python
mpl-2.0
bluemini/kuma,openjck/kuma,hoosteeno/kuma,Elchi3/kuma,davehunt/kuma,anaran/kuma,robhudson/kuma,davidyezsetz/kuma,darkwing/kuma,anaran/kuma,chirilo/kuma,jezdez/kuma,yfdyh000/kuma,nhenezi/kuma,Elchi3/kuma,ronakkhunt/kuma,a2sheppy/kuma,groovecoder/kuma,SphinxKnight/kuma,escattone/kuma,jezdez/kuma,tximikel/kuma,a2sheppy/kuma,varunkamra/kuma,ollie314/kuma,ronakkhunt/kuma,biswajitsahu/kuma,hoosteeno/kuma,escattone/kuma,SphinxKnight/kuma,cindyyu/kuma,Elchi3/kuma,groovecoder/kuma,biswajitsahu/kuma,utkbansal/kuma,FrankBian/kuma,nhenezi/kuma,davehunt/kuma,openjck/kuma,jgmize/kuma,nhenezi/kuma,escattone/kuma,bluemini/kuma,jwhitlock/kuma,bluemini/kuma,varunkamra/kuma,RanadeepPolavarapu/kuma,biswajitsahu/kuma,SphinxKnight/kuma,carnell69/kuma,anaran/kuma,cindyyu/kuma,hoosteeno/kuma,ronakkhunt/kuma,cindyyu/kuma,mozilla/kuma,SphinxKnight/kuma,tximikel/kuma,scrollback/kuma,jezdez/kuma,mozilla/kuma,scrollback/kuma,SphinxKnight/kuma,surajssd/kuma,robhudson/kuma,YOTOV-LIMITED/kuma,FrankBian/kuma,tximikel/kuma,jgmize/kuma,robhudson/kuma,varunkamra/kuma,carnell69/kuma,yfdyh000/kuma,scrollback/kuma,jwhitlock/kuma,utkbansal/kuma,scrollback/kuma,varunkamra/kuma,chirilo/kuma,safwanrahman/kuma,Elchi3/kuma,cindyyu/kuma,bluemini/kuma,openjck/kuma,groovecoder/kuma,utkbansal/kuma,jezdez/kuma,surajssd/kuma,ronakkhunt/kuma,RanadeepPolavarapu/kuma,davidyezsetz/kuma,jgmize/kuma,utkbansal/kuma,surajssd/kuma,surajssd/kuma,whip112/Whip112,yfdyh000/kuma,biswajitsahu/kuma,davehunt/kuma,whip112/Whip112,FrankBian/kuma,jgmize/kuma,hoosteeno/kuma,chirilo/kuma,safwanrahman/kuma,mastizada/kuma,jezdez/kuma,robhudson/kuma,tximikel/kuma,mozilla/kuma,mastizada/kuma,surajssd/kuma,a2sheppy/kuma,tximikel/kuma,FrankBian/kuma,openjck/kuma,varunkamra/kuma,YOTOV-LIMITED/kuma,carnell69/kuma,RanadeepPolavarapu/kuma,MenZil/kuma,varunkamra/kuma,darkwing/kuma,tximikel/kuma,RanadeepPolavarapu/kuma,groovecoder/kuma,nhenezi/kuma,biswajitsahu/kuma,hoosteeno/kuma,FrankBian/kuma,carnell69/kuma,robhudson/kuma,MenZil/kuma,anaran/kuma,jgmize/kuma,cindyyu/kuma,mastizada/kuma,ollie314/kuma,safwanrahman/kuma,chirilo/kuma,whip112/Whip112,carnell69/kuma,anaran/kuma,utkbansal/kuma,ollie314/kuma,whip112/Whip112,safwanrahman/kuma,YOTOV-LIMITED/kuma,jwhitlock/kuma,jwhitlock/kuma,openjck/kuma,safwanrahman/kuma,surajssd/kuma,openjck/kuma,cindyyu/kuma,MenZil/kuma,groovecoder/kuma,nhenezi/kuma,a2sheppy/kuma,YOTOV-LIMITED/kuma,ollie314/kuma,ronakkhunt/kuma,darkwing/kuma,chirilo/kuma,ollie314/kuma,groovecoder/kuma,robhudson/kuma,YOTOV-LIMITED/kuma,darkwing/kuma,a2sheppy/kuma,RanadeepPolavarapu/kuma,mastizada/kuma,MenZil/kuma,biswajitsahu/kuma,ollie314/kuma,anaran/kuma,whip112/Whip112,RanadeepPolavarapu/kuma,bluemini/kuma,YOTOV-LIMITED/kuma,yfdyh000/kuma,mozilla/kuma,ronakkhunt/kuma,davidyezsetz/kuma,davidyezsetz/kuma,davehunt/kuma,hoosteeno/kuma,Elchi3/kuma,SphinxKnight/kuma,MenZil/kuma,davidyezsetz/kuma,mozilla/kuma,jwhitlock/kuma,jgmize/kuma,MenZil/kuma,darkwing/kuma,davehunt/kuma,yfdyh000/kuma,jezdez/kuma,utkbansal/kuma,davehunt/kuma,carnell69/kuma,scrollback/kuma,chirilo/kuma,yfdyh000/kuma,safwanrahman/kuma,bluemini/kuma,darkwing/kuma,whip112/Whip112
from django.contrib import admin from .models import Submission class SubmissionAdmin(admin.ModelAdmin): list_display = ( 'title', 'creator', 'featured', 'hidden', 'tags', 'modified', ) + list_editable = ( 'featured', 'hidden' ) admin.site.register(Submission, SubmissionAdmin)
Make featured and hidden flags editable from demo listing
## Code Before: from django.contrib import admin from .models import Submission class SubmissionAdmin(admin.ModelAdmin): list_display = ( 'title', 'creator', 'featured', 'hidden', 'tags', 'modified', ) admin.site.register(Submission, SubmissionAdmin) ## Instruction: Make featured and hidden flags editable from demo listing ## Code After: from django.contrib import admin from .models import Submission class SubmissionAdmin(admin.ModelAdmin): list_display = ( 'title', 'creator', 'featured', 'hidden', 'tags', 'modified', ) list_editable = ( 'featured', 'hidden' ) admin.site.register(Submission, SubmissionAdmin)
b43e06dd5a80814e15ce20f50d683f0daaa19a93
addons/hr/models/hr_employee_base.py
addons/hr/models/hr_employee_base.py
from odoo import fields, models class HrEmployeeBase(models.AbstractModel): _name = "hr.employee.base" _description = "Basic Employee" _order = 'name' name = fields.Char() active = fields.Boolean("Active") department_id = fields.Many2one('hr.department', 'Department') job_id = fields.Many2one('hr.job', 'Job Position') job_title = fields.Char("Job Title") company_id = fields.Many2one('res.company', 'Company') address_id = fields.Many2one('res.partner', 'Work Address') work_phone = fields.Char('Work Phone') mobile_phone = fields.Char('Work Mobile') work_email = fields.Char('Work Email') work_location = fields.Char('Work Location') user_id = fields.Many2one('res.users') resource_id = fields.Many2one('resource.resource') resource_calendar_id = fields.Many2one('resource.calendar')
from odoo import fields, models class HrEmployeeBase(models.AbstractModel): _name = "hr.employee.base" _description = "Basic Employee" _order = 'name' name = fields.Char() active = fields.Boolean("Active") color = fields.Integer('Color Index', default=0) department_id = fields.Many2one('hr.department', 'Department') job_id = fields.Many2one('hr.job', 'Job Position') job_title = fields.Char("Job Title") company_id = fields.Many2one('res.company', 'Company') address_id = fields.Many2one('res.partner', 'Work Address') work_phone = fields.Char('Work Phone') mobile_phone = fields.Char('Work Mobile') work_email = fields.Char('Work Email') work_location = fields.Char('Work Location') user_id = fields.Many2one('res.users') resource_id = fields.Many2one('resource.resource') resource_calendar_id = fields.Many2one('resource.calendar')
Add the color field to public employee
[FIX] hr: Add the color field to public employee The color field is necessary to be able to display some fields (many2many_tags) and used in the kanban views closes odoo/odoo#35216 Signed-off-by: Yannick Tivisse (yti) <yti@odoo.com> closes odoo/odoo#35462 Signed-off-by: Romain Libert (rli) <d3a53ea3f0d6ebbbf1eb8431093f132bc4577d93@odoo.com>
Python
agpl-3.0
ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo
from odoo import fields, models class HrEmployeeBase(models.AbstractModel): _name = "hr.employee.base" _description = "Basic Employee" _order = 'name' name = fields.Char() active = fields.Boolean("Active") + color = fields.Integer('Color Index', default=0) department_id = fields.Many2one('hr.department', 'Department') job_id = fields.Many2one('hr.job', 'Job Position') job_title = fields.Char("Job Title") company_id = fields.Many2one('res.company', 'Company') address_id = fields.Many2one('res.partner', 'Work Address') work_phone = fields.Char('Work Phone') mobile_phone = fields.Char('Work Mobile') work_email = fields.Char('Work Email') work_location = fields.Char('Work Location') user_id = fields.Many2one('res.users') resource_id = fields.Many2one('resource.resource') resource_calendar_id = fields.Many2one('resource.calendar')
Add the color field to public employee
## Code Before: from odoo import fields, models class HrEmployeeBase(models.AbstractModel): _name = "hr.employee.base" _description = "Basic Employee" _order = 'name' name = fields.Char() active = fields.Boolean("Active") department_id = fields.Many2one('hr.department', 'Department') job_id = fields.Many2one('hr.job', 'Job Position') job_title = fields.Char("Job Title") company_id = fields.Many2one('res.company', 'Company') address_id = fields.Many2one('res.partner', 'Work Address') work_phone = fields.Char('Work Phone') mobile_phone = fields.Char('Work Mobile') work_email = fields.Char('Work Email') work_location = fields.Char('Work Location') user_id = fields.Many2one('res.users') resource_id = fields.Many2one('resource.resource') resource_calendar_id = fields.Many2one('resource.calendar') ## Instruction: Add the color field to public employee ## Code After: from odoo import fields, models class HrEmployeeBase(models.AbstractModel): _name = "hr.employee.base" _description = "Basic Employee" _order = 'name' name = fields.Char() active = fields.Boolean("Active") color = fields.Integer('Color Index', default=0) department_id = fields.Many2one('hr.department', 'Department') job_id = fields.Many2one('hr.job', 'Job Position') job_title = fields.Char("Job Title") company_id = fields.Many2one('res.company', 'Company') address_id = fields.Many2one('res.partner', 'Work Address') work_phone = fields.Char('Work Phone') mobile_phone = fields.Char('Work Mobile') work_email = fields.Char('Work Email') work_location = fields.Char('Work Location') user_id = fields.Many2one('res.users') resource_id = fields.Many2one('resource.resource') resource_calendar_id = fields.Many2one('resource.calendar')
81a03d81ece17487f7b7296f524339a052d45a0d
src/gcf.py
src/gcf.py
def gcf(a, b): while b: a, b = b, a % b return a def xgcf(a, b): s1, s2 = 1, 0 t1, t2 = 0, 1 while b: q, r = divmod(a, b) a, b = b, r s2, s1 = s1 - q * s2, s2 t2, t1 = t1 - q * t2, t2 return a, s1, t1
def gcf(a, b): while b: a, b = b, a % b return a def xgcf(a, b): s1, s2 = 1, 0 t1, t2 = 0, 1 while b: q, r = divmod(a, b) a, b = b, r s2, s1 = s1 - q * s2, s2 t2, t1 = t1 - q * t2, t2 # Bézout's identity says: s1 * a + t1 * b == gcd(a, b) return a, s1, t1
Add Bézout's identity into comments
Add Bézout's identity into comments
Python
mit
all3fox/algos-py
def gcf(a, b): while b: a, b = b, a % b return a def xgcf(a, b): s1, s2 = 1, 0 t1, t2 = 0, 1 while b: q, r = divmod(a, b) a, b = b, r s2, s1 = s1 - q * s2, s2 t2, t1 = t1 - q * t2, t2 + # Bézout's identity says: s1 * a + t1 * b == gcd(a, b) return a, s1, t1
Add Bézout's identity into comments
## Code Before: def gcf(a, b): while b: a, b = b, a % b return a def xgcf(a, b): s1, s2 = 1, 0 t1, t2 = 0, 1 while b: q, r = divmod(a, b) a, b = b, r s2, s1 = s1 - q * s2, s2 t2, t1 = t1 - q * t2, t2 return a, s1, t1 ## Instruction: Add Bézout's identity into comments ## Code After: def gcf(a, b): while b: a, b = b, a % b return a def xgcf(a, b): s1, s2 = 1, 0 t1, t2 = 0, 1 while b: q, r = divmod(a, b) a, b = b, r s2, s1 = s1 - q * s2, s2 t2, t1 = t1 - q * t2, t2 # Bézout's identity says: s1 * a + t1 * b == gcd(a, b) return a, s1, t1
04df5c189d6d1760c692d1985faf558058e56eb2
flask_pagedown/__init__.py
flask_pagedown/__init__.py
from jinja2 import Markup from flask import current_app, request class _pagedown(object): def include_pagedown(self): if request.is_secure: protocol = 'https' else: protocol = 'http' return Markup(''' <script type="text/javascript" src="{0}://cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Converter.min.js"></script> <script type="text/javascript" src="{0}://cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Sanitizer.min.js"></script> '''.format(protocol)) def html_head(self): return self.include_pagedown() class PageDown(object): def __init__(self, app = None): if app is not None: self.init_app(app) def init_app(self, app): if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['pagedown'] = _pagedown() app.context_processor(self.context_processor) @staticmethod def context_processor(): return { 'pagedown': current_app.extensions['pagedown'] }
from jinja2 import Markup from flask import current_app, request class _pagedown(object): def include_pagedown(self): return Markup(''' <script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Converter.min.js"></script> <script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Sanitizer.min.js"></script> ''') def html_head(self): return self.include_pagedown() class PageDown(object): def __init__(self, app = None): if app is not None: self.init_app(app) def init_app(self, app): if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['pagedown'] = _pagedown() app.context_processor(self.context_processor) @staticmethod def context_processor(): return { 'pagedown': current_app.extensions['pagedown'] }
Fix support for SSL for proxied sites, or otherwise uncertain situations
Fix support for SSL for proxied sites, or otherwise uncertain situations My particular situation is deployed through ElasticBeanstalk, proxying HTTPS to HTTP on the actual endpoints. This makes flask think that it is only running with http, not https
Python
mit
miguelgrinberg/Flask-PageDown,miguelgrinberg/Flask-PageDown
from jinja2 import Markup from flask import current_app, request class _pagedown(object): def include_pagedown(self): - if request.is_secure: - protocol = 'https' - else: - protocol = 'http' return Markup(''' - <script type="text/javascript" src="{0}://cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Converter.min.js"></script> + <script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Converter.min.js"></script> - <script type="text/javascript" src="{0}://cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Sanitizer.min.js"></script> + <script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Sanitizer.min.js"></script> - '''.format(protocol)) + ''') def html_head(self): return self.include_pagedown() class PageDown(object): def __init__(self, app = None): if app is not None: self.init_app(app) def init_app(self, app): if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['pagedown'] = _pagedown() app.context_processor(self.context_processor) @staticmethod def context_processor(): return { 'pagedown': current_app.extensions['pagedown'] }
Fix support for SSL for proxied sites, or otherwise uncertain situations
## Code Before: from jinja2 import Markup from flask import current_app, request class _pagedown(object): def include_pagedown(self): if request.is_secure: protocol = 'https' else: protocol = 'http' return Markup(''' <script type="text/javascript" src="{0}://cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Converter.min.js"></script> <script type="text/javascript" src="{0}://cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Sanitizer.min.js"></script> '''.format(protocol)) def html_head(self): return self.include_pagedown() class PageDown(object): def __init__(self, app = None): if app is not None: self.init_app(app) def init_app(self, app): if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['pagedown'] = _pagedown() app.context_processor(self.context_processor) @staticmethod def context_processor(): return { 'pagedown': current_app.extensions['pagedown'] } ## Instruction: Fix support for SSL for proxied sites, or otherwise uncertain situations ## Code After: from jinja2 import Markup from flask import current_app, request class _pagedown(object): def include_pagedown(self): return Markup(''' <script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Converter.min.js"></script> <script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/pagedown/1.0/Markdown.Sanitizer.min.js"></script> ''') def html_head(self): return self.include_pagedown() class PageDown(object): def __init__(self, app = None): if app is not None: self.init_app(app) def init_app(self, app): if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['pagedown'] = _pagedown() app.context_processor(self.context_processor) @staticmethod def context_processor(): return { 'pagedown': current_app.extensions['pagedown'] }
fc9fdd2115b46c71c36ba7d86f14395ac4cf1e3e
genome_designer/scripts/generate_coverage_data.py
genome_designer/scripts/generate_coverage_data.py
import os import subprocess from django.conf import settings from main.models import get_dataset_with_type from main.models import AlignmentGroup from main.models import Dataset from utils import generate_safe_filename_prefix_from_label def analyze_coverage(sample_alignment, output_dir): ref_genome_fasta_location = get_dataset_with_type( sample_alignment.alignment_group.reference_genome, Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location() input_bam_file = sample_alignment.dataset_set.get( type=Dataset.TYPE.BWA_ALIGN).get_absolute_location() output_filename = generate_safe_filename_prefix_from_label( sample_alignment.experiment_sample.label + '_' + sample_alignment.uid) + '.coverage' output_path = os.path.join(output_dir, output_filename) with open(output_path, 'w') as fh: subprocess.check_call([ '%s/samtools/samtools' % settings.TOOLS_DIR, 'mpileup', '-f', ref_genome_fasta_location, input_bam_file ], stdout=fh)
import os import subprocess from django.conf import settings from main.models import get_dataset_with_type from main.models import AlignmentGroup from main.models import Dataset from utils import generate_safe_filename_prefix_from_label def analyze_coverage(sample_alignment, output_dir): ref_genome_fasta_location = get_dataset_with_type( sample_alignment.alignment_group.reference_genome, Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location() input_bam_file = sample_alignment.dataset_set.get( type=Dataset.TYPE.BWA_ALIGN).get_absolute_location() output_filename = generate_safe_filename_prefix_from_label( sample_alignment.experiment_sample.label + '_' + sample_alignment.uid) + '.coverage' output_path = os.path.join(output_dir, output_filename) with open(output_path, 'w') as fh: p_mpileup = subprocess.Popen([ '%s/samtools/samtools' % settings.TOOLS_DIR, 'mpileup', '-f', ref_genome_fasta_location, input_bam_file ], stdout=subprocess.PIPE) subprocess.check_call([ 'cut', '-f', '-4' ], stdin=p_mpileup.stdout, stdout=fh)
Update coverage script to only output the first 4 cols which shows coverage.
Update coverage script to only output the first 4 cols which shows coverage.
Python
mit
woodymit/millstone,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source,woodymit/millstone,woodymit/millstone_accidental_source,woodymit/millstone_accidental_source,woodymit/millstone,churchlab/millstone,woodymit/millstone,churchlab/millstone,churchlab/millstone,churchlab/millstone
import os import subprocess from django.conf import settings from main.models import get_dataset_with_type from main.models import AlignmentGroup from main.models import Dataset from utils import generate_safe_filename_prefix_from_label def analyze_coverage(sample_alignment, output_dir): ref_genome_fasta_location = get_dataset_with_type( sample_alignment.alignment_group.reference_genome, Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location() input_bam_file = sample_alignment.dataset_set.get( type=Dataset.TYPE.BWA_ALIGN).get_absolute_location() output_filename = generate_safe_filename_prefix_from_label( sample_alignment.experiment_sample.label + '_' + sample_alignment.uid) + '.coverage' output_path = os.path.join(output_dir, output_filename) with open(output_path, 'w') as fh: - subprocess.check_call([ + p_mpileup = subprocess.Popen([ '%s/samtools/samtools' % settings.TOOLS_DIR, 'mpileup', '-f', ref_genome_fasta_location, input_bam_file - ], stdout=fh) + ], stdout=subprocess.PIPE) + subprocess.check_call([ + 'cut', + '-f', + '-4' + ], stdin=p_mpileup.stdout, stdout=fh) +
Update coverage script to only output the first 4 cols which shows coverage.
## Code Before: import os import subprocess from django.conf import settings from main.models import get_dataset_with_type from main.models import AlignmentGroup from main.models import Dataset from utils import generate_safe_filename_prefix_from_label def analyze_coverage(sample_alignment, output_dir): ref_genome_fasta_location = get_dataset_with_type( sample_alignment.alignment_group.reference_genome, Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location() input_bam_file = sample_alignment.dataset_set.get( type=Dataset.TYPE.BWA_ALIGN).get_absolute_location() output_filename = generate_safe_filename_prefix_from_label( sample_alignment.experiment_sample.label + '_' + sample_alignment.uid) + '.coverage' output_path = os.path.join(output_dir, output_filename) with open(output_path, 'w') as fh: subprocess.check_call([ '%s/samtools/samtools' % settings.TOOLS_DIR, 'mpileup', '-f', ref_genome_fasta_location, input_bam_file ], stdout=fh) ## Instruction: Update coverage script to only output the first 4 cols which shows coverage. ## Code After: import os import subprocess from django.conf import settings from main.models import get_dataset_with_type from main.models import AlignmentGroup from main.models import Dataset from utils import generate_safe_filename_prefix_from_label def analyze_coverage(sample_alignment, output_dir): ref_genome_fasta_location = get_dataset_with_type( sample_alignment.alignment_group.reference_genome, Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location() input_bam_file = sample_alignment.dataset_set.get( type=Dataset.TYPE.BWA_ALIGN).get_absolute_location() output_filename = generate_safe_filename_prefix_from_label( sample_alignment.experiment_sample.label + '_' + sample_alignment.uid) + '.coverage' output_path = os.path.join(output_dir, output_filename) with open(output_path, 'w') as fh: p_mpileup = subprocess.Popen([ '%s/samtools/samtools' % settings.TOOLS_DIR, 'mpileup', '-f', ref_genome_fasta_location, input_bam_file ], stdout=subprocess.PIPE) subprocess.check_call([ 'cut', '-f', '-4' ], stdin=p_mpileup.stdout, stdout=fh)
e7bda027780da26183f84f7af5c50cd37649c76b
functional_tests/remote.py
functional_tests/remote.py
from unipath import Path import subprocess THIS_FOLDER = Path(__file__).parent def reset_database(host): subprocess.check_call(['fab', 'reset_database', '--host={}'.format(host)], cwd=THIS_FOLDER) def create_user(host, user, email, password): subprocess.check_call(['fab', 'create_user:user={},password={},email={}' \ .format(user, password, email), '--host={}'.format(host)], cwd=THIS_FOLDER) def get_sitename(host): return subprocess.check_output(['fab', 'get_sitename', '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER).decode().strip() def create_project(host, user, name, description=''): return subprocess.check_output(['fab', 'create_project:user={},name={},description={}'.format(user, name, description), '--host={}'.format(host)], cwd=THIS_FOLDER) def create_action(host, user, text, project=''): return subprocess.check_output(['fab', 'create_action:user={},text={},project={}'.format(user, text, project), '--host={}'.format(host)], cwd=THIS_FOLDER)
from unipath import Path import subprocess THIS_FOLDER = Path(__file__).parent def reset_database(host): subprocess.check_call(['fab', 'reset_database', '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER) def create_user(host, user, email, password): subprocess.check_call(['fab', 'create_user:user={},password={},email={}' \ .format(user, password, email), '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER) def get_sitename(host): return subprocess.check_output(['fab', 'get_sitename', '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER).decode().strip() def create_project(host, user, name, description=''): return subprocess.check_output(['fab', 'create_project:user={},name={},description={}'.format(user, name, description), '--host={}'.format(host)], cwd=THIS_FOLDER) def create_action(host, user, text, project=''): return subprocess.check_output(['fab', 'create_action:user={},text={},project={}'.format(user, text, project), '--host={}'.format(host)], cwd=THIS_FOLDER)
Make running FTs against staging a bit less verbose
Make running FTs against staging a bit less verbose
Python
mit
XeryusTC/projman,XeryusTC/projman,XeryusTC/projman
from unipath import Path import subprocess THIS_FOLDER = Path(__file__).parent def reset_database(host): - subprocess.check_call(['fab', 'reset_database', '--host={}'.format(host)], + subprocess.check_call(['fab', 'reset_database', '--host={}'.format(host), + '--hide=everything,status'], cwd=THIS_FOLDER) def create_user(host, user, email, password): subprocess.check_call(['fab', 'create_user:user={},password={},email={}' \ - .format(user, password, email), '--host={}'.format(host)], + .format(user, password, email), '--host={}'.format(host), + '--hide=everything,status'], cwd=THIS_FOLDER) def get_sitename(host): return subprocess.check_output(['fab', 'get_sitename', '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER).decode().strip() def create_project(host, user, name, description=''): return subprocess.check_output(['fab', 'create_project:user={},name={},description={}'.format(user, name, description), '--host={}'.format(host)], cwd=THIS_FOLDER) def create_action(host, user, text, project=''): return subprocess.check_output(['fab', 'create_action:user={},text={},project={}'.format(user, text, project), '--host={}'.format(host)], cwd=THIS_FOLDER)
Make running FTs against staging a bit less verbose
## Code Before: from unipath import Path import subprocess THIS_FOLDER = Path(__file__).parent def reset_database(host): subprocess.check_call(['fab', 'reset_database', '--host={}'.format(host)], cwd=THIS_FOLDER) def create_user(host, user, email, password): subprocess.check_call(['fab', 'create_user:user={},password={},email={}' \ .format(user, password, email), '--host={}'.format(host)], cwd=THIS_FOLDER) def get_sitename(host): return subprocess.check_output(['fab', 'get_sitename', '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER).decode().strip() def create_project(host, user, name, description=''): return subprocess.check_output(['fab', 'create_project:user={},name={},description={}'.format(user, name, description), '--host={}'.format(host)], cwd=THIS_FOLDER) def create_action(host, user, text, project=''): return subprocess.check_output(['fab', 'create_action:user={},text={},project={}'.format(user, text, project), '--host={}'.format(host)], cwd=THIS_FOLDER) ## Instruction: Make running FTs against staging a bit less verbose ## Code After: from unipath import Path import subprocess THIS_FOLDER = Path(__file__).parent def reset_database(host): subprocess.check_call(['fab', 'reset_database', '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER) def create_user(host, user, email, password): subprocess.check_call(['fab', 'create_user:user={},password={},email={}' \ .format(user, password, email), '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER) def get_sitename(host): return subprocess.check_output(['fab', 'get_sitename', '--host={}'.format(host), '--hide=everything,status'], cwd=THIS_FOLDER).decode().strip() def create_project(host, user, name, description=''): return subprocess.check_output(['fab', 'create_project:user={},name={},description={}'.format(user, name, description), '--host={}'.format(host)], cwd=THIS_FOLDER) def create_action(host, user, text, project=''): return subprocess.check_output(['fab', 'create_action:user={},text={},project={}'.format(user, text, project), '--host={}'.format(host)], cwd=THIS_FOLDER)
4daefdb0a4def961572fc22d0fe01a394b11fad9
tests/test_httpclient.py
tests/test_httpclient.py
try: import unittest2 as unittest except ImportError: import unittest import sys sys.path.append('..') from pyrabbit import http class TestHTTPClient(unittest.TestCase): """ Except for the init test, these are largely functional tests that require a RabbitMQ management API to be available on localhost:55672 """ def setUp(self): self.c = http.HTTPClient('localhost:55672', 'guest', 'guest') def test_client_init(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest') self.assertIsInstance(c, http.HTTPClient) def test_client_init_sets_default_timeout(self): self.assertEqual(self.c.client.timeout, 1) def test_client_init_with_timeout(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest', 5) self.assertEqual(c.client.timeout, 5)
try: import unittest2 as unittest except ImportError: import unittest import sys sys.path.append('..') from pyrabbit import http class TestHTTPClient(unittest.TestCase): """ Except for the init test, these are largely functional tests that require a RabbitMQ management API to be available on localhost:55672 """ def setUp(self): self.c = http.HTTPClient('localhost:55672', 'guest', 'guest') def test_client_init(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest') self.assertIsInstance(c, http.HTTPClient) def test_client_init_sets_credentials(self): domain = '' expected_credentials = [(domain, 'guest', 'guest')] self.assertEqual( self.c.client.credentials.credentials, expected_credentials) def test_client_init_sets_default_timeout(self): self.assertEqual(self.c.client.timeout, 1) def test_client_init_with_timeout(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest', 5) self.assertEqual(c.client.timeout, 5)
Test creation of HTTP credentials
tests.http: Test creation of HTTP credentials
Python
bsd-3-clause
ranjithlav/pyrabbit,bkjones/pyrabbit,NeCTAR-RC/pyrabbit,chaos95/pyrabbit,switchtower/pyrabbit
try: import unittest2 as unittest except ImportError: import unittest import sys sys.path.append('..') from pyrabbit import http class TestHTTPClient(unittest.TestCase): """ Except for the init test, these are largely functional tests that require a RabbitMQ management API to be available on localhost:55672 """ def setUp(self): self.c = http.HTTPClient('localhost:55672', 'guest', 'guest') def test_client_init(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest') self.assertIsInstance(c, http.HTTPClient) + def test_client_init_sets_credentials(self): + domain = '' + expected_credentials = [(domain, 'guest', 'guest')] + self.assertEqual( + self.c.client.credentials.credentials, expected_credentials) + def test_client_init_sets_default_timeout(self): self.assertEqual(self.c.client.timeout, 1) def test_client_init_with_timeout(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest', 5) self.assertEqual(c.client.timeout, 5)
Test creation of HTTP credentials
## Code Before: try: import unittest2 as unittest except ImportError: import unittest import sys sys.path.append('..') from pyrabbit import http class TestHTTPClient(unittest.TestCase): """ Except for the init test, these are largely functional tests that require a RabbitMQ management API to be available on localhost:55672 """ def setUp(self): self.c = http.HTTPClient('localhost:55672', 'guest', 'guest') def test_client_init(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest') self.assertIsInstance(c, http.HTTPClient) def test_client_init_sets_default_timeout(self): self.assertEqual(self.c.client.timeout, 1) def test_client_init_with_timeout(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest', 5) self.assertEqual(c.client.timeout, 5) ## Instruction: Test creation of HTTP credentials ## Code After: try: import unittest2 as unittest except ImportError: import unittest import sys sys.path.append('..') from pyrabbit import http class TestHTTPClient(unittest.TestCase): """ Except for the init test, these are largely functional tests that require a RabbitMQ management API to be available on localhost:55672 """ def setUp(self): self.c = http.HTTPClient('localhost:55672', 'guest', 'guest') def test_client_init(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest') self.assertIsInstance(c, http.HTTPClient) def test_client_init_sets_credentials(self): domain = '' expected_credentials = [(domain, 'guest', 'guest')] self.assertEqual( self.c.client.credentials.credentials, expected_credentials) def test_client_init_sets_default_timeout(self): self.assertEqual(self.c.client.timeout, 1) def test_client_init_with_timeout(self): c = http.HTTPClient('localhost:55672', 'guest', 'guest', 5) self.assertEqual(c.client.timeout, 5)
dbbd29a1cdfcd3f11a968c0aeb38bd54ef7014e3
gfusion/tests/test_main.py
gfusion/tests/test_main.py
"""Tests for main.py""" from ..main import _solve_weight_vector import numpy as np from nose.tools import assert_raises, assert_equal, assert_true def test_solve_weight_vector(): # smoke test n_nodes = 4 n_communities = 2 n_similarities = 3 delta = 0.3 similarities = np.random.random((n_similarities, n_nodes * (n_nodes-1)/2)) * 10 grouping_matrix = np.random.random((n_nodes, n_communities)) weight = _solve_weight_vector(similarities, grouping_matrix, delta) assert_equal(weight.ndim, 2) assert_equal(weight.shape[1], n_similarities) assert_true(np.all(weight >= 0)) # check raises assert_raises(ValueError, _solve_weight_vector, similarities, grouping_matrix, -1) similarities_invalid = similarities.copy() similarities_invalid[0, 3] = -4. assert_raises(ValueError, _solve_weight_vector, similarities_invalid, grouping_matrix, delta)
"""Tests for main.py""" from ..main import _solve_weight_vector import numpy as np from numpy.testing import assert_array_almost_equal from nose.tools import assert_raises, assert_equal, assert_true def test_solve_weight_vector(): # smoke test n_nodes = 4 n_communities = 2 n_similarities = 3 delta = 0.3 similarities = np.random.random((n_similarities, n_nodes * (n_nodes-1)/2)) * 10 grouping_matrix = np.random.random((n_nodes, n_communities)) weight = _solve_weight_vector(similarities, grouping_matrix, delta) assert_equal(weight.ndim, 2) assert_equal(weight.shape[1], n_similarities) assert_true(np.all(weight >= 0)) # check raises assert_raises(ValueError, _solve_weight_vector, similarities, grouping_matrix, -1) similarities_invalid = similarities.copy() similarities_invalid[0, 3] = -4. assert_raises(ValueError, _solve_weight_vector, similarities_invalid, grouping_matrix, delta) # if I have two similarities, and one is null + the grouping matrix is all # to all, and delta is 0 (no regularization), then I expect that the weight # vector is [1, 0] similarities = np.vstack((1000*np.ones((1, 6)), np.zeros((1, 6)) )) grouping_matrix = np.ones((4, 4)) delta = 1 assert_array_almost_equal(np.atleast_2d([1., 0.]), _solve_weight_vector(similarities, grouping_matrix, delta))
Add a more semantic test
Add a more semantic test
Python
mit
mvdoc/gfusion
"""Tests for main.py""" from ..main import _solve_weight_vector import numpy as np + from numpy.testing import assert_array_almost_equal from nose.tools import assert_raises, assert_equal, assert_true def test_solve_weight_vector(): # smoke test n_nodes = 4 n_communities = 2 n_similarities = 3 delta = 0.3 similarities = np.random.random((n_similarities, n_nodes * (n_nodes-1)/2)) * 10 grouping_matrix = np.random.random((n_nodes, n_communities)) weight = _solve_weight_vector(similarities, grouping_matrix, delta) assert_equal(weight.ndim, 2) assert_equal(weight.shape[1], n_similarities) assert_true(np.all(weight >= 0)) # check raises assert_raises(ValueError, _solve_weight_vector, similarities, grouping_matrix, -1) similarities_invalid = similarities.copy() similarities_invalid[0, 3] = -4. assert_raises(ValueError, _solve_weight_vector, similarities_invalid, grouping_matrix, delta) + # if I have two similarities, and one is null + the grouping matrix is all + # to all, and delta is 0 (no regularization), then I expect that the weight + # vector is [1, 0] + similarities = np.vstack((1000*np.ones((1, 6)), + np.zeros((1, 6)) + )) + grouping_matrix = np.ones((4, 4)) + delta = 1 + assert_array_almost_equal(np.atleast_2d([1., 0.]), + _solve_weight_vector(similarities, + grouping_matrix, + delta)) +
Add a more semantic test
## Code Before: """Tests for main.py""" from ..main import _solve_weight_vector import numpy as np from nose.tools import assert_raises, assert_equal, assert_true def test_solve_weight_vector(): # smoke test n_nodes = 4 n_communities = 2 n_similarities = 3 delta = 0.3 similarities = np.random.random((n_similarities, n_nodes * (n_nodes-1)/2)) * 10 grouping_matrix = np.random.random((n_nodes, n_communities)) weight = _solve_weight_vector(similarities, grouping_matrix, delta) assert_equal(weight.ndim, 2) assert_equal(weight.shape[1], n_similarities) assert_true(np.all(weight >= 0)) # check raises assert_raises(ValueError, _solve_weight_vector, similarities, grouping_matrix, -1) similarities_invalid = similarities.copy() similarities_invalid[0, 3] = -4. assert_raises(ValueError, _solve_weight_vector, similarities_invalid, grouping_matrix, delta) ## Instruction: Add a more semantic test ## Code After: """Tests for main.py""" from ..main import _solve_weight_vector import numpy as np from numpy.testing import assert_array_almost_equal from nose.tools import assert_raises, assert_equal, assert_true def test_solve_weight_vector(): # smoke test n_nodes = 4 n_communities = 2 n_similarities = 3 delta = 0.3 similarities = np.random.random((n_similarities, n_nodes * (n_nodes-1)/2)) * 10 grouping_matrix = np.random.random((n_nodes, n_communities)) weight = _solve_weight_vector(similarities, grouping_matrix, delta) assert_equal(weight.ndim, 2) assert_equal(weight.shape[1], n_similarities) assert_true(np.all(weight >= 0)) # check raises assert_raises(ValueError, _solve_weight_vector, similarities, grouping_matrix, -1) similarities_invalid = similarities.copy() similarities_invalid[0, 3] = -4. assert_raises(ValueError, _solve_weight_vector, similarities_invalid, grouping_matrix, delta) # if I have two similarities, and one is null + the grouping matrix is all # to all, and delta is 0 (no regularization), then I expect that the weight # vector is [1, 0] similarities = np.vstack((1000*np.ones((1, 6)), np.zeros((1, 6)) )) grouping_matrix = np.ones((4, 4)) delta = 1 assert_array_almost_equal(np.atleast_2d([1., 0.]), _solve_weight_vector(similarities, grouping_matrix, delta))
55b7b07986590c4ab519fcda3c973c87ad23596b
flask_admin/model/typefmt.py
flask_admin/model/typefmt.py
from jinja2 import Markup def null_formatter(value): """ Return `NULL` as the string for `None` value :param value: Value to check """ return Markup('<i>NULL</i>') def empty_formatter(value): """ Return empty string for `None` value :param value: Value to check """ return '' def bool_formatter(value): """ Return check icon if value is `True` or empty string otherwise. :param value: Value to check """ return Markup('<i class="icon-ok"></i>' if value else '') DEFAULT_FORMATTERS = { type(None): empty_formatter, bool: bool_formatter }
from jinja2 import Markup def null_formatter(value): """ Return `NULL` as the string for `None` value :param value: Value to check """ return Markup('<i>NULL</i>') def empty_formatter(value): """ Return empty string for `None` value :param value: Value to check """ return '' def bool_formatter(value): """ Return check icon if value is `True` or empty string otherwise. :param value: Value to check """ return Markup('<i class="icon-ok"></i>' if value else '') def list_formatter(values): """ Return string with comma separated values :param values: Value to check """ return u', '.join(values) DEFAULT_FORMATTERS = { type(None): empty_formatter, bool: bool_formatter, list: list_formatter, }
Add extra type formatter for `list` type
Add extra type formatter for `list` type
Python
bsd-3-clause
mrjoes/flask-admin,janusnic/flask-admin,Kha/flask-admin,wuxiangfeng/flask-admin,litnimax/flask-admin,HermasT/flask-admin,quokkaproject/flask-admin,Kha/flask-admin,flabe81/flask-admin,porduna/flask-admin,Junnplus/flask-admin,ibushong/test-repo,janusnic/flask-admin,jschneier/flask-admin,closeio/flask-admin,chase-seibert/flask-admin,litnimax/flask-admin,ArtemSerga/flask-admin,flask-admin/flask-admin,NickWoodhams/flask-admin,LennartP/flask-admin,late-warrior/flask-admin,likaiguo/flask-admin,iurisilvio/flask-admin,mikelambert/flask-admin,jamesbeebop/flask-admin,quokkaproject/flask-admin,mrjoes/flask-admin,pawl/flask-admin,jschneier/flask-admin,toddetzel/flask-admin,rochacbruno/flask-admin,ArtemSerga/flask-admin,Junnplus/flask-admin,torotil/flask-admin,ondoheer/flask-admin,plaes/flask-admin,AlmogCohen/flask-admin,plaes/flask-admin,wangjun/flask-admin,dxmo/flask-admin,jmagnusson/flask-admin,marrybird/flask-admin,torotil/flask-admin,wuxiangfeng/flask-admin,CoolCloud/flask-admin,toddetzel/flask-admin,lifei/flask-admin,ondoheer/flask-admin,phantomxc/flask-admin,mikelambert/flask-admin,mrjoes/flask-admin,petrus-jvrensburg/flask-admin,CoolCloud/flask-admin,wangjun/flask-admin,iurisilvio/flask-admin,petrus-jvrensburg/flask-admin,lifei/flask-admin,mikelambert/flask-admin,sfermigier/flask-admin,radioprotector/flask-admin,wuxiangfeng/flask-admin,petrus-jvrensburg/flask-admin,iurisilvio/flask-admin,likaiguo/flask-admin,jschneier/flask-admin,litnimax/flask-admin,flask-admin/flask-admin,petrus-jvrensburg/flask-admin,plaes/flask-admin,ibushong/test-repo,flask-admin/flask-admin,torotil/flask-admin,radioprotector/flask-admin,rochacbruno/flask-admin,wuxiangfeng/flask-admin,HermasT/flask-admin,LennartP/flask-admin,marrybird/flask-admin,dxmo/flask-admin,flask-admin/flask-admin,phantomxc/flask-admin,LennartP/flask-admin,chase-seibert/flask-admin,plaes/flask-admin,marrybird/flask-admin,mikelambert/flask-admin,wangjun/flask-admin,ArtemSerga/flask-admin,AlmogCohen/flask-admin,AlmogCohen/flask-admin,ondoheer/flask-admin,closeio/flask-admin,rochacbruno/flask-admin,flabe81/flask-admin,AlmogCohen/flask-admin,lifei/flask-admin,jmagnusson/flask-admin,mrjoes/flask-admin,pawl/flask-admin,torotil/flask-admin,likaiguo/flask-admin,HermasT/flask-admin,flabe81/flask-admin,porduna/flask-admin,iurisilvio/flask-admin,NickWoodhams/flask-admin,late-warrior/flask-admin,porduna/flask-admin,radioprotector/flask-admin,chase-seibert/flask-admin,CoolCloud/flask-admin,toddetzel/flask-admin,betterlife/flask-admin,betterlife/flask-admin,lifei/flask-admin,porduna/flask-admin,quokkaproject/flask-admin,rochacbruno/flask-admin,jschneier/flask-admin,late-warrior/flask-admin,pawl/flask-admin,toddetzel/flask-admin,phantomxc/flask-admin,late-warrior/flask-admin,wangjun/flask-admin,ondoheer/flask-admin,ibushong/test-repo,jmagnusson/flask-admin,CoolCloud/flask-admin,closeio/flask-admin,ArtemSerga/flask-admin,jamesbeebop/flask-admin,janusnic/flask-admin,marrybird/flask-admin,jamesbeebop/flask-admin,LennartP/flask-admin,phantomxc/flask-admin,Kha/flask-admin,radioprotector/flask-admin,flabe81/flask-admin,betterlife/flask-admin,sfermigier/flask-admin,jamesbeebop/flask-admin,closeio/flask-admin,Kha/flask-admin,Junnplus/flask-admin,Junnplus/flask-admin,ibushong/test-repo,dxmo/flask-admin,NickWoodhams/flask-admin,NickWoodhams/flask-admin,quokkaproject/flask-admin,sfermigier/flask-admin,likaiguo/flask-admin,HermasT/flask-admin,litnimax/flask-admin,jmagnusson/flask-admin,dxmo/flask-admin,betterlife/flask-admin,chase-seibert/flask-admin,janusnic/flask-admin
from jinja2 import Markup def null_formatter(value): """ Return `NULL` as the string for `None` value :param value: Value to check """ return Markup('<i>NULL</i>') def empty_formatter(value): """ Return empty string for `None` value :param value: Value to check """ return '' def bool_formatter(value): """ Return check icon if value is `True` or empty string otherwise. :param value: Value to check """ return Markup('<i class="icon-ok"></i>' if value else '') + def list_formatter(values): + """ + Return string with comma separated values + + :param values: + Value to check + """ + return u', '.join(values) + + DEFAULT_FORMATTERS = { type(None): empty_formatter, - bool: bool_formatter + bool: bool_formatter, + list: list_formatter, }
Add extra type formatter for `list` type
## Code Before: from jinja2 import Markup def null_formatter(value): """ Return `NULL` as the string for `None` value :param value: Value to check """ return Markup('<i>NULL</i>') def empty_formatter(value): """ Return empty string for `None` value :param value: Value to check """ return '' def bool_formatter(value): """ Return check icon if value is `True` or empty string otherwise. :param value: Value to check """ return Markup('<i class="icon-ok"></i>' if value else '') DEFAULT_FORMATTERS = { type(None): empty_formatter, bool: bool_formatter } ## Instruction: Add extra type formatter for `list` type ## Code After: from jinja2 import Markup def null_formatter(value): """ Return `NULL` as the string for `None` value :param value: Value to check """ return Markup('<i>NULL</i>') def empty_formatter(value): """ Return empty string for `None` value :param value: Value to check """ return '' def bool_formatter(value): """ Return check icon if value is `True` or empty string otherwise. :param value: Value to check """ return Markup('<i class="icon-ok"></i>' if value else '') def list_formatter(values): """ Return string with comma separated values :param values: Value to check """ return u', '.join(values) DEFAULT_FORMATTERS = { type(None): empty_formatter, bool: bool_formatter, list: list_formatter, }
ad757857b7878904c6d842e115074c4fac24bed7
tweetar.py
tweetar.py
import twitter import urllib2 NOAA_URL = "http://weather.noaa.gov/pub/data/observations/metar/stations/*station_id*.TXT" def retrieve_and_post(conf): post = False pull_url = NOAA_URL.replace('*station_id*', conf['station']) request = urllib2.Request(pull_url, None) response = urllib2.urlopen(request) metar = response.read().split('\n')[1] # NOAA includes a "real" timestamp as the first line of the response if getattr(conf, 'hashtag', False): metar = '%s #%s' % (metar, conf['hashtag']) api = twitter.Api(username=conf['twitter_user'], password=conf['twitter_password']) # get the last posted message and make sure it's different before attempting to post. Twitter isn't supposed to allow dupes through but I'm seeing it happen anyway past_statuses = api.GetUserTimeline(conf['twitter_user']) if past_statuses[-0].text != metar: post = True if post: api.PostUpdate(metar) if __name__ == '__main__': retrieve_and_post({'station': '<station_id>', 'twitter_user': '<twitter_user>', 'twitter_password': '<twitter_pass>'})
import twitter import urllib2 NOAA_URL = "http://weather.noaa.gov/pub/data/observations/metar/stations/*station_id*.TXT" def retrieve_and_post(conf): post = False pull_url = NOAA_URL.replace('*station_id*', conf['station']) request = urllib2.Request(pull_url, None) response = urllib2.urlopen(request) metar = response.read().split('\n')[1] # NOAA includes a "real" timestamp as the first line of the response if conf.get('hashtag', False): metar = '%s #%s' % (metar, conf['hashtag']) api = twitter.Api(username=conf['twitter_user'], password=conf['twitter_password']) # get the last posted message and make sure it's different before attempting to post. Twitter isn't supposed to allow dupes through but I'm seeing it happen anyway past_statuses = api.GetUserTimeline(conf['twitter_user']) if past_statuses[-0].text != metar: post = True if post: api.PostUpdate(metar) if __name__ == '__main__': retrieve_and_post({'station': '<station_id>', 'twitter_user': '<twitter_user>', 'twitter_password': '<twitter_pass>'})
Use .get instead of getattr, dummy.
Use .get instead of getattr, dummy.
Python
bsd-3-clause
adamfast/python-tweetar
import twitter import urllib2 NOAA_URL = "http://weather.noaa.gov/pub/data/observations/metar/stations/*station_id*.TXT" def retrieve_and_post(conf): post = False pull_url = NOAA_URL.replace('*station_id*', conf['station']) request = urllib2.Request(pull_url, None) response = urllib2.urlopen(request) metar = response.read().split('\n')[1] # NOAA includes a "real" timestamp as the first line of the response - if getattr(conf, 'hashtag', False): + if conf.get('hashtag', False): metar = '%s #%s' % (metar, conf['hashtag']) api = twitter.Api(username=conf['twitter_user'], password=conf['twitter_password']) # get the last posted message and make sure it's different before attempting to post. Twitter isn't supposed to allow dupes through but I'm seeing it happen anyway past_statuses = api.GetUserTimeline(conf['twitter_user']) if past_statuses[-0].text != metar: post = True if post: api.PostUpdate(metar) if __name__ == '__main__': retrieve_and_post({'station': '<station_id>', 'twitter_user': '<twitter_user>', 'twitter_password': '<twitter_pass>'})
Use .get instead of getattr, dummy.
## Code Before: import twitter import urllib2 NOAA_URL = "http://weather.noaa.gov/pub/data/observations/metar/stations/*station_id*.TXT" def retrieve_and_post(conf): post = False pull_url = NOAA_URL.replace('*station_id*', conf['station']) request = urllib2.Request(pull_url, None) response = urllib2.urlopen(request) metar = response.read().split('\n')[1] # NOAA includes a "real" timestamp as the first line of the response if getattr(conf, 'hashtag', False): metar = '%s #%s' % (metar, conf['hashtag']) api = twitter.Api(username=conf['twitter_user'], password=conf['twitter_password']) # get the last posted message and make sure it's different before attempting to post. Twitter isn't supposed to allow dupes through but I'm seeing it happen anyway past_statuses = api.GetUserTimeline(conf['twitter_user']) if past_statuses[-0].text != metar: post = True if post: api.PostUpdate(metar) if __name__ == '__main__': retrieve_and_post({'station': '<station_id>', 'twitter_user': '<twitter_user>', 'twitter_password': '<twitter_pass>'}) ## Instruction: Use .get instead of getattr, dummy. ## Code After: import twitter import urllib2 NOAA_URL = "http://weather.noaa.gov/pub/data/observations/metar/stations/*station_id*.TXT" def retrieve_and_post(conf): post = False pull_url = NOAA_URL.replace('*station_id*', conf['station']) request = urllib2.Request(pull_url, None) response = urllib2.urlopen(request) metar = response.read().split('\n')[1] # NOAA includes a "real" timestamp as the first line of the response if conf.get('hashtag', False): metar = '%s #%s' % (metar, conf['hashtag']) api = twitter.Api(username=conf['twitter_user'], password=conf['twitter_password']) # get the last posted message and make sure it's different before attempting to post. Twitter isn't supposed to allow dupes through but I'm seeing it happen anyway past_statuses = api.GetUserTimeline(conf['twitter_user']) if past_statuses[-0].text != metar: post = True if post: api.PostUpdate(metar) if __name__ == '__main__': retrieve_and_post({'station': '<station_id>', 'twitter_user': '<twitter_user>', 'twitter_password': '<twitter_pass>'})
0461fad1a3d81aa2d937a1734f1ebb07b3e81d79
undercloud_heat_plugins/server_update_allowed.py
undercloud_heat_plugins/server_update_allowed.py
from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def needs_replace_with_prop_diff(self, changed_properties_set, after_props, before_props): return False def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
Fix no-replace-server to accurately preview update
Fix no-replace-server to accurately preview update This override of OS::Nova::Server needs to reflect the fact that it never replaces on update or the update --dry-run output ends up being wrong. Closes-Bug: 1561076 Change-Id: I9256872b877fbe7f91befb52995c62de006210ef
Python
apache-2.0
openstack/tripleo-common,openstack/tripleo-common
from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() + def needs_replace_with_prop_diff(self, changed_properties_set, + after_props, before_props): + return False + def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
Fix no-replace-server to accurately preview update
## Code Before: from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed} ## Instruction: Fix no-replace-server to accurately preview update ## Code After: from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def needs_replace_with_prop_diff(self, changed_properties_set, after_props, before_props): return False def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
84a2aa1187cf7a9ec7593920d9ad0708b7d28f55
sqlobject/tests/test_pickle.py
sqlobject/tests/test_pickle.py
import pickle from sqlobject import * from sqlobject.tests.dbtest import * ######################################## ## Pickle instances ######################################## class TestPickle(SQLObject): question = StringCol() answer = IntCol() test_question = 'The Ulimate Question of Life, the Universe and Everything' test_answer = 42 def test_pickleCol(): setupClass(TestPickle) connection = TestPickle._connection test = TestPickle(question=test_question, answer=test_answer) pickle_data = pickle.dumps(test, pickle.HIGHEST_PROTOCOL) connection.cache.clear() test = pickle.loads(pickle_data) test2 = connection.cache.tryGet(test.id, TestPickle) assert test2 is test assert test.question == test_question assert test.answer == test_answer if (connection.dbName == 'sqlite') and connection._memory: return # The following test requires a different connection test = TestPickle.get(test.id, connection=getConnection(registry='')) # to make a different DB URI # and open another connection raises(pickle.PicklingError, pickle.dumps, test, pickle.HIGHEST_PROTOCOL)
import pickle from sqlobject import * from sqlobject.tests.dbtest import * ######################################## ## Pickle instances ######################################## class TestPickle(SQLObject): question = StringCol() answer = IntCol() test_question = 'The Ulimate Question of Life, the Universe and Everything' test_answer = 42 def test_pickleCol(): setupClass(TestPickle) connection = TestPickle._connection test = TestPickle(question=test_question, answer=test_answer) pickle_data = pickle.dumps(test, pickle.HIGHEST_PROTOCOL) connection.cache.clear() test = pickle.loads(pickle_data) test2 = connection.cache.tryGet(test.id, TestPickle) assert test2 is test assert test.question == test_question assert test.answer == test_answer if (connection.dbName == 'sqlite') and connection._memory: return # The following test requires a different connection test = TestPickle.get( test.id, # make a different DB URI and open another connection connection=getConnection(registry='')) raises(pickle.PicklingError, pickle.dumps, test, pickle.HIGHEST_PROTOCOL)
Fix flake8 E113 unexpected indentation
Fix flake8 E113 unexpected indentation
Python
lgpl-2.1
drnlm/sqlobject,sqlobject/sqlobject,sqlobject/sqlobject,drnlm/sqlobject
import pickle from sqlobject import * from sqlobject.tests.dbtest import * ######################################## ## Pickle instances ######################################## class TestPickle(SQLObject): question = StringCol() answer = IntCol() test_question = 'The Ulimate Question of Life, the Universe and Everything' test_answer = 42 def test_pickleCol(): setupClass(TestPickle) connection = TestPickle._connection test = TestPickle(question=test_question, answer=test_answer) pickle_data = pickle.dumps(test, pickle.HIGHEST_PROTOCOL) connection.cache.clear() test = pickle.loads(pickle_data) test2 = connection.cache.tryGet(test.id, TestPickle) assert test2 is test assert test.question == test_question assert test.answer == test_answer if (connection.dbName == 'sqlite') and connection._memory: return # The following test requires a different connection - test = TestPickle.get(test.id, + test = TestPickle.get( + test.id, + # make a different DB URI and open another connection - connection=getConnection(registry='')) # to make a different DB URI + connection=getConnection(registry='')) - # and open another connection raises(pickle.PicklingError, pickle.dumps, test, pickle.HIGHEST_PROTOCOL)
Fix flake8 E113 unexpected indentation
## Code Before: import pickle from sqlobject import * from sqlobject.tests.dbtest import * ######################################## ## Pickle instances ######################################## class TestPickle(SQLObject): question = StringCol() answer = IntCol() test_question = 'The Ulimate Question of Life, the Universe and Everything' test_answer = 42 def test_pickleCol(): setupClass(TestPickle) connection = TestPickle._connection test = TestPickle(question=test_question, answer=test_answer) pickle_data = pickle.dumps(test, pickle.HIGHEST_PROTOCOL) connection.cache.clear() test = pickle.loads(pickle_data) test2 = connection.cache.tryGet(test.id, TestPickle) assert test2 is test assert test.question == test_question assert test.answer == test_answer if (connection.dbName == 'sqlite') and connection._memory: return # The following test requires a different connection test = TestPickle.get(test.id, connection=getConnection(registry='')) # to make a different DB URI # and open another connection raises(pickle.PicklingError, pickle.dumps, test, pickle.HIGHEST_PROTOCOL) ## Instruction: Fix flake8 E113 unexpected indentation ## Code After: import pickle from sqlobject import * from sqlobject.tests.dbtest import * ######################################## ## Pickle instances ######################################## class TestPickle(SQLObject): question = StringCol() answer = IntCol() test_question = 'The Ulimate Question of Life, the Universe and Everything' test_answer = 42 def test_pickleCol(): setupClass(TestPickle) connection = TestPickle._connection test = TestPickle(question=test_question, answer=test_answer) pickle_data = pickle.dumps(test, pickle.HIGHEST_PROTOCOL) connection.cache.clear() test = pickle.loads(pickle_data) test2 = connection.cache.tryGet(test.id, TestPickle) assert test2 is test assert test.question == test_question assert test.answer == test_answer if (connection.dbName == 'sqlite') and connection._memory: return # The following test requires a different connection test = TestPickle.get( test.id, # make a different DB URI and open another connection connection=getConnection(registry='')) raises(pickle.PicklingError, pickle.dumps, test, pickle.HIGHEST_PROTOCOL)
2e9a6a2babb16f4ed9c3367b21ee28514d1988a8
srm/__main__.py
srm/__main__.py
import click from . import __version__, status @click.group() @click.version_option(__version__) def cli() -> None: """Main command-line entry method.""" cli.add_command(status.cli) if __name__ == '__main__': cli()
import click from . import __version__, status @click.group() @click.version_option(__version__) def cli() -> None: """Main command-line entry method.""" cli.add_command(status.cli) cli(prog_name='srm')
Set correct program name in 'help' output
Set correct program name in 'help' output
Python
mit
cmcginty/simple-rom-manager,cmcginty/simple-rom-manager
import click from . import __version__, status @click.group() @click.version_option(__version__) def cli() -> None: """Main command-line entry method.""" cli.add_command(status.cli) + cli(prog_name='srm') - if __name__ == '__main__': - cli() -
Set correct program name in 'help' output
## Code Before: import click from . import __version__, status @click.group() @click.version_option(__version__) def cli() -> None: """Main command-line entry method.""" cli.add_command(status.cli) if __name__ == '__main__': cli() ## Instruction: Set correct program name in 'help' output ## Code After: import click from . import __version__, status @click.group() @click.version_option(__version__) def cli() -> None: """Main command-line entry method.""" cli.add_command(status.cli) cli(prog_name='srm')
e54d753a3fb58032936cbf5e137bb5ef67e2813c
task_15.py
task_15.py
"""Provides variables for string and integer conversion.""" NOT_THE_QUESTION = 'The answer to life, the universe, and everything? It\'s ' ANSWER = 42
"""Provides variables for string and integer conversion.""" NOT_THE_QUESTION = 'The answer to life, the universe, and everything? It\'s ' ANSWER = 42 THANKS_FOR_THE_FISH = str(NOT_THE_QUESTION) + str(ANSWER)
Change the string to concatenate it by using str() and then make new variable equal the first str() to the second str()
Change the string to concatenate it by using str() and then make new variable equal the first str() to the second str()
Python
mpl-2.0
gracehyemin/is210-week-03-warmup,gracehyemin/is210-week-03-warmup
"""Provides variables for string and integer conversion.""" NOT_THE_QUESTION = 'The answer to life, the universe, and everything? It\'s ' ANSWER = 42 + THANKS_FOR_THE_FISH = str(NOT_THE_QUESTION) + str(ANSWER)
Change the string to concatenate it by using str() and then make new variable equal the first str() to the second str()
## Code Before: """Provides variables for string and integer conversion.""" NOT_THE_QUESTION = 'The answer to life, the universe, and everything? It\'s ' ANSWER = 42 ## Instruction: Change the string to concatenate it by using str() and then make new variable equal the first str() to the second str() ## Code After: """Provides variables for string and integer conversion.""" NOT_THE_QUESTION = 'The answer to life, the universe, and everything? It\'s ' ANSWER = 42 THANKS_FOR_THE_FISH = str(NOT_THE_QUESTION) + str(ANSWER)
6c2dae9bad86bf3f40d892eba50853d704f696b7
pombola/settings/tests.py
pombola/settings/tests.py
from .base import * COUNTRY_APP = None INSTALLED_APPS = INSTALLED_APPS + \ ('pombola.hansard', 'pombola.projects', 'pombola.place_data', 'pombola.votematch', 'speeches', 'pombola.spinner' ) + \ APPS_REQUIRED_BY_SPEECHES # create the ENABLED_FEATURES hash that is used to toggle features on and off. ENABLED_FEATURES = {} for key in ALL_OPTIONAL_APPS: # add in the optional apps ENABLED_FEATURES[key] = ('pombola.' + key in INSTALLED_APPS) or (key in INSTALLED_APPS) BREADCRUMB_URL_NAME_MAPPINGS = { 'organisation' : ('Organisations', '/organisation/all/'), }
from .base import * COUNTRY_APP = None INSTALLED_APPS = INSTALLED_APPS + \ ('pombola.hansard', 'pombola.projects', 'pombola.place_data', 'pombola.votematch', 'speeches', 'pombola.spinner', 'pombola.interests_register') + \ APPS_REQUIRED_BY_SPEECHES # create the ENABLED_FEATURES hash that is used to toggle features on and off. ENABLED_FEATURES = {} for key in ALL_OPTIONAL_APPS: # add in the optional apps ENABLED_FEATURES[key] = ('pombola.' + key in INSTALLED_APPS) or (key in INSTALLED_APPS) BREADCRUMB_URL_NAME_MAPPINGS = { 'organisation' : ('Organisations', '/organisation/all/'), }
Make sure that the interests_register tables are created
Make sure that the interests_register tables are created Nose tries to run the interests_register tests, but they will fail unless the interest_register app is added to INSTALLED_APPS, because its tables won't be created in the test database.
Python
agpl-3.0
patricmutwiri/pombola,geoffkilpin/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,patricmutwiri/pombola,hzj123/56th,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,patricmutwiri/pombola,mysociety/pombola,mysociety/pombola,hzj123/56th
from .base import * COUNTRY_APP = None INSTALLED_APPS = INSTALLED_APPS + \ ('pombola.hansard', 'pombola.projects', 'pombola.place_data', 'pombola.votematch', 'speeches', - 'pombola.spinner' ) + \ + 'pombola.spinner', + 'pombola.interests_register') + \ APPS_REQUIRED_BY_SPEECHES # create the ENABLED_FEATURES hash that is used to toggle features on and off. ENABLED_FEATURES = {} for key in ALL_OPTIONAL_APPS: # add in the optional apps ENABLED_FEATURES[key] = ('pombola.' + key in INSTALLED_APPS) or (key in INSTALLED_APPS) BREADCRUMB_URL_NAME_MAPPINGS = { 'organisation' : ('Organisations', '/organisation/all/'), }
Make sure that the interests_register tables are created
## Code Before: from .base import * COUNTRY_APP = None INSTALLED_APPS = INSTALLED_APPS + \ ('pombola.hansard', 'pombola.projects', 'pombola.place_data', 'pombola.votematch', 'speeches', 'pombola.spinner' ) + \ APPS_REQUIRED_BY_SPEECHES # create the ENABLED_FEATURES hash that is used to toggle features on and off. ENABLED_FEATURES = {} for key in ALL_OPTIONAL_APPS: # add in the optional apps ENABLED_FEATURES[key] = ('pombola.' + key in INSTALLED_APPS) or (key in INSTALLED_APPS) BREADCRUMB_URL_NAME_MAPPINGS = { 'organisation' : ('Organisations', '/organisation/all/'), } ## Instruction: Make sure that the interests_register tables are created ## Code After: from .base import * COUNTRY_APP = None INSTALLED_APPS = INSTALLED_APPS + \ ('pombola.hansard', 'pombola.projects', 'pombola.place_data', 'pombola.votematch', 'speeches', 'pombola.spinner', 'pombola.interests_register') + \ APPS_REQUIRED_BY_SPEECHES # create the ENABLED_FEATURES hash that is used to toggle features on and off. ENABLED_FEATURES = {} for key in ALL_OPTIONAL_APPS: # add in the optional apps ENABLED_FEATURES[key] = ('pombola.' + key in INSTALLED_APPS) or (key in INSTALLED_APPS) BREADCRUMB_URL_NAME_MAPPINGS = { 'organisation' : ('Organisations', '/organisation/all/'), }
a5ce35c44938d37aa9727d37c0cbe0232b8e92d3
socializr/management/commands/socializr_update.py
socializr/management/commands/socializr_update.py
''' Main command which is meant to be run daily to get the information from various social networks into the local db. ''' import traceback from django.core.management.base import BaseCommand, CommandError from django.core.exceptions import ImproperlyConfigured from socializr.base import get_socializr_configs class Command(BaseCommand): help = 'Performs the oauth2 dance and save the creds for future use.' def handle(self, *args, **options): configs = get_socializr_configs() for config_class in configs: config_obj = config_class() self.stdout.write("Processing {}".format(config_class.__name__)) try: config_obj.collect() except Exception: self.stderr.write("There was an exception processing {}".format(config_class.__name__)) traceback.print_exc()
''' Main command which is meant to be run daily to get the information from various social networks into the local db. ''' import traceback from django.core.management.base import BaseCommand, CommandError from django.core.exceptions import ImproperlyConfigured from socializr.base import get_socializr_configs class Command(BaseCommand): help = 'Performs the oauth2 dance and save the creds for future use.' def handle(self, *args, **options): configs = get_socializr_configs() for config_class in configs: config_obj = config_class() try: config_obj.collect() except Exception: self.stderr.write("There was an exception processing {}".format(config_class.__name__)) traceback.print_exc()
Remove output expect when there is an error.
Remove output expect when there is an error.
Python
mit
CIGIHub/django-socializr,albertoconnor/django-socializr
''' Main command which is meant to be run daily to get the information from various social networks into the local db. ''' import traceback from django.core.management.base import BaseCommand, CommandError from django.core.exceptions import ImproperlyConfigured from socializr.base import get_socializr_configs class Command(BaseCommand): help = 'Performs the oauth2 dance and save the creds for future use.' def handle(self, *args, **options): configs = get_socializr_configs() for config_class in configs: config_obj = config_class() - self.stdout.write("Processing {}".format(config_class.__name__)) try: config_obj.collect() except Exception: self.stderr.write("There was an exception processing {}".format(config_class.__name__)) traceback.print_exc()
Remove output expect when there is an error.
## Code Before: ''' Main command which is meant to be run daily to get the information from various social networks into the local db. ''' import traceback from django.core.management.base import BaseCommand, CommandError from django.core.exceptions import ImproperlyConfigured from socializr.base import get_socializr_configs class Command(BaseCommand): help = 'Performs the oauth2 dance and save the creds for future use.' def handle(self, *args, **options): configs = get_socializr_configs() for config_class in configs: config_obj = config_class() self.stdout.write("Processing {}".format(config_class.__name__)) try: config_obj.collect() except Exception: self.stderr.write("There was an exception processing {}".format(config_class.__name__)) traceback.print_exc() ## Instruction: Remove output expect when there is an error. ## Code After: ''' Main command which is meant to be run daily to get the information from various social networks into the local db. ''' import traceback from django.core.management.base import BaseCommand, CommandError from django.core.exceptions import ImproperlyConfigured from socializr.base import get_socializr_configs class Command(BaseCommand): help = 'Performs the oauth2 dance and save the creds for future use.' def handle(self, *args, **options): configs = get_socializr_configs() for config_class in configs: config_obj = config_class() try: config_obj.collect() except Exception: self.stderr.write("There was an exception processing {}".format(config_class.__name__)) traceback.print_exc()
c8a41bbf11538dbc17de12e32ba5af5e93fd0b2c
src/utils/plugins.py
src/utils/plugins.py
from utils import models class Plugin: plugin_name = None display_name = None description = None author = None short_name = None stage = None manager_url = None version = None janeway_version = None is_workflow_plugin = False jump_url = None handshake_url = None article_pk_in_handshake_url = False press_wide = False kanban_card = '{plugin_name}/kanban_card.html'.format( plugin_name=plugin_name, ) @classmethod def install(cls): plugin, created = cls.get_or_create_plugin_object() if not created and plugin.version != cls.version: plugin.version = cls.version plugin.save() return plugin, created @classmethod def hook_registry(cls): pass @classmethod def get_or_create_plugin_object(cls): plugin, created = models.Plugin.objects.get_or_create( name=cls.short_name, display_name=cls.display_name, press_wide=cls.press_wide, defaults={'version': cls.version, 'enabled': True}, ) return plugin, created
from utils import models class Plugin: plugin_name = None display_name = None description = None author = None short_name = None stage = None manager_url = None version = None janeway_version = None is_workflow_plugin = False jump_url = None handshake_url = None article_pk_in_handshake_url = False press_wide = False kanban_card = '{plugin_name}/kanban_card.html'.format( plugin_name=plugin_name, ) @classmethod def install(cls): plugin, created = cls.get_or_create_plugin_object() if not created and plugin.version != cls.version: print('Plugin updated: {0} -> {1}'.format(cls.version, plugin.version)) plugin.version = cls.version plugin.save() return plugin, created @classmethod def hook_registry(cls): pass @classmethod def get_or_create_plugin_object(cls): plugin, created = models.Plugin.objects.get_or_create( name=cls.short_name, defaults={ 'display_name': cls.display_name, 'version': cls.version, 'enabled': True, 'press_wide': cls.press_wide, }, ) return plugin, created @classmethod def get_self(cls): try: plugin = models.Plugin.objects.get( name=cls.short_name, ) except models.Plugin.MultipleObjectsReturned: plugin = models.Plugin.objects.filter( name=cls.short_name, ).order_by( '-version' ).first() except models.Plugin.DoesNotExist: return None return plugin
Add get_self and change get_or_create to avoid mis-creation.
Add get_self and change get_or_create to avoid mis-creation.
Python
agpl-3.0
BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway
from utils import models class Plugin: plugin_name = None display_name = None description = None author = None short_name = None stage = None manager_url = None version = None janeway_version = None is_workflow_plugin = False jump_url = None handshake_url = None article_pk_in_handshake_url = False press_wide = False kanban_card = '{plugin_name}/kanban_card.html'.format( plugin_name=plugin_name, ) @classmethod def install(cls): plugin, created = cls.get_or_create_plugin_object() if not created and plugin.version != cls.version: + print('Plugin updated: {0} -> {1}'.format(cls.version, plugin.version)) plugin.version = cls.version plugin.save() return plugin, created @classmethod def hook_registry(cls): pass @classmethod def get_or_create_plugin_object(cls): plugin, created = models.Plugin.objects.get_or_create( name=cls.short_name, + defaults={ - display_name=cls.display_name, + 'display_name': cls.display_name, + 'version': cls.version, + 'enabled': True, - press_wide=cls.press_wide, + 'press_wide': cls.press_wide, - defaults={'version': cls.version, 'enabled': True}, + }, ) return plugin, created + @classmethod + def get_self(cls): + try: + plugin = models.Plugin.objects.get( + name=cls.short_name, + ) + except models.Plugin.MultipleObjectsReturned: + plugin = models.Plugin.objects.filter( + name=cls.short_name, + ).order_by( + '-version' + ).first() + except models.Plugin.DoesNotExist: + return None + + return plugin +
Add get_self and change get_or_create to avoid mis-creation.
## Code Before: from utils import models class Plugin: plugin_name = None display_name = None description = None author = None short_name = None stage = None manager_url = None version = None janeway_version = None is_workflow_plugin = False jump_url = None handshake_url = None article_pk_in_handshake_url = False press_wide = False kanban_card = '{plugin_name}/kanban_card.html'.format( plugin_name=plugin_name, ) @classmethod def install(cls): plugin, created = cls.get_or_create_plugin_object() if not created and plugin.version != cls.version: plugin.version = cls.version plugin.save() return plugin, created @classmethod def hook_registry(cls): pass @classmethod def get_or_create_plugin_object(cls): plugin, created = models.Plugin.objects.get_or_create( name=cls.short_name, display_name=cls.display_name, press_wide=cls.press_wide, defaults={'version': cls.version, 'enabled': True}, ) return plugin, created ## Instruction: Add get_self and change get_or_create to avoid mis-creation. ## Code After: from utils import models class Plugin: plugin_name = None display_name = None description = None author = None short_name = None stage = None manager_url = None version = None janeway_version = None is_workflow_plugin = False jump_url = None handshake_url = None article_pk_in_handshake_url = False press_wide = False kanban_card = '{plugin_name}/kanban_card.html'.format( plugin_name=plugin_name, ) @classmethod def install(cls): plugin, created = cls.get_or_create_plugin_object() if not created and plugin.version != cls.version: print('Plugin updated: {0} -> {1}'.format(cls.version, plugin.version)) plugin.version = cls.version plugin.save() return plugin, created @classmethod def hook_registry(cls): pass @classmethod def get_or_create_plugin_object(cls): plugin, created = models.Plugin.objects.get_or_create( name=cls.short_name, defaults={ 'display_name': cls.display_name, 'version': cls.version, 'enabled': True, 'press_wide': cls.press_wide, }, ) return plugin, created @classmethod def get_self(cls): try: plugin = models.Plugin.objects.get( name=cls.short_name, ) except models.Plugin.MultipleObjectsReturned: plugin = models.Plugin.objects.filter( name=cls.short_name, ).order_by( '-version' ).first() except models.Plugin.DoesNotExist: return None return plugin
c5671ab2e5115ce9c022a97a088300dc408e2aa4
opendc/util/path_parser.py
opendc/util/path_parser.py
import json import sys import re def parse(version, endpoint_path): """Map an HTTP call to an API path""" with open('opendc/api/{}/paths.json'.format(version)) as paths_file: paths = json.load(paths_file) endpoint_path_parts = endpoint_path.split('/') paths_parts = [x.split('/') for x in paths if len(x.split('/')) == len(endpoint_path_parts)] for path_parts in paths_parts: found = True for (endpoint_part, part) in zip(endpoint_path_parts, path_parts): print endpoint_part, part if not part.startswith('{') and endpoint_part != part: found = False break if found: sys.stdout.flush() return '{}/{}'.format(version, '/'.join(path_parts)) return None
import json import sys import re def parse(version, endpoint_path): """Map an HTTP endpoint path to an API path""" with open('opendc/api/{}/paths.json'.format(version)) as paths_file: paths = json.load(paths_file) endpoint_path_parts = endpoint_path.strip('/').split('/') paths_parts = [x.split('/') for x in paths if len(x.split('/')) == len(endpoint_path_parts)] for path_parts in paths_parts: found = True for (endpoint_part, part) in zip(endpoint_path_parts, path_parts): if not part.startswith('{') and endpoint_part != part: found = False break if found: sys.stdout.flush() return '{}/{}'.format(version, '/'.join(path_parts)) return None
Make path parser robust to trailing /
Make path parser robust to trailing /
Python
mit
atlarge-research/opendc-web-server,atlarge-research/opendc-web-server
import json import sys import re def parse(version, endpoint_path): - """Map an HTTP call to an API path""" + """Map an HTTP endpoint path to an API path""" with open('opendc/api/{}/paths.json'.format(version)) as paths_file: paths = json.load(paths_file) - endpoint_path_parts = endpoint_path.split('/') + endpoint_path_parts = endpoint_path.strip('/').split('/') paths_parts = [x.split('/') for x in paths if len(x.split('/')) == len(endpoint_path_parts)] for path_parts in paths_parts: found = True for (endpoint_part, part) in zip(endpoint_path_parts, path_parts): - print endpoint_part, part if not part.startswith('{') and endpoint_part != part: found = False break if found: sys.stdout.flush() return '{}/{}'.format(version, '/'.join(path_parts)) return None
Make path parser robust to trailing /
## Code Before: import json import sys import re def parse(version, endpoint_path): """Map an HTTP call to an API path""" with open('opendc/api/{}/paths.json'.format(version)) as paths_file: paths = json.load(paths_file) endpoint_path_parts = endpoint_path.split('/') paths_parts = [x.split('/') for x in paths if len(x.split('/')) == len(endpoint_path_parts)] for path_parts in paths_parts: found = True for (endpoint_part, part) in zip(endpoint_path_parts, path_parts): print endpoint_part, part if not part.startswith('{') and endpoint_part != part: found = False break if found: sys.stdout.flush() return '{}/{}'.format(version, '/'.join(path_parts)) return None ## Instruction: Make path parser robust to trailing / ## Code After: import json import sys import re def parse(version, endpoint_path): """Map an HTTP endpoint path to an API path""" with open('opendc/api/{}/paths.json'.format(version)) as paths_file: paths = json.load(paths_file) endpoint_path_parts = endpoint_path.strip('/').split('/') paths_parts = [x.split('/') for x in paths if len(x.split('/')) == len(endpoint_path_parts)] for path_parts in paths_parts: found = True for (endpoint_part, part) in zip(endpoint_path_parts, path_parts): if not part.startswith('{') and endpoint_part != part: found = False break if found: sys.stdout.flush() return '{}/{}'.format(version, '/'.join(path_parts)) return None
87844a776c2d409bdf7eaa99da06d07d77d7098e
tests/test_gingerit.py
tests/test_gingerit.py
import pytest from gingerit.gingerit import GingerIt @pytest.mark.parametrize("text,expected", [ ( "The smelt of fliwers bring back memories.", "The smell of flowers brings back memories." ), ( "Edwards will be sck yesterday", "Edwards was sick yesterday" ), ( "Edwards was sick yesterday.", "Edwards was sick yesterday." ), ( "", "" ) ]) def test_gingerit(text, expected): parser = GingerIt() assert parser.parse(text)["result"] == expected
import pytest from gingerit.gingerit import GingerIt @pytest.mark.parametrize("text,expected,corrections", [ ( "The smelt of fliwers bring back memories.", "The smell of flowers brings back memories.", [ {'start': 21, 'definition': None, 'correct': u'brings', 'text': 'bring'}, {'start': 13, 'definition': u'a plant cultivated for its blooms or blossoms', 'correct': u'flowers', 'text': 'fliwers'}, {'start': 4, 'definition': None, 'correct': u'smell', 'text': 'smelt'} ] ), ( "Edwards will be sck yesterday", "Edwards was sick yesterday", [ {'start': 16, 'definition': u'affected by an impairment of normal physical or mental function', 'correct': u'sick', 'text': 'sck'}, {'start': 8, 'definition': None, 'correct': u'was', 'text': 'will be'} ] ), ( "Edwards was sick yesterday.", "Edwards was sick yesterday.", [] ), ( "", "", [] ) ]) def test_gingerit(text, expected, corrections): output = GingerIt().parse(text) assert output["result"] == expected assert output["corrections"] == corrections
Extend test to cover corrections output
Extend test to cover corrections output
Python
mit
Azd325/gingerit
import pytest from gingerit.gingerit import GingerIt - @pytest.mark.parametrize("text,expected", [ + @pytest.mark.parametrize("text,expected,corrections", [ ( "The smelt of fliwers bring back memories.", - "The smell of flowers brings back memories." + "The smell of flowers brings back memories.", + [ + {'start': 21, 'definition': None, 'correct': u'brings', 'text': 'bring'}, + {'start': 13, 'definition': u'a plant cultivated for its blooms or blossoms', 'correct': u'flowers', + 'text': 'fliwers'}, + {'start': 4, 'definition': None, 'correct': u'smell', 'text': 'smelt'} + ] ), ( "Edwards will be sck yesterday", - "Edwards was sick yesterday" + "Edwards was sick yesterday", + [ + {'start': 16, 'definition': u'affected by an impairment of normal physical or mental function', + 'correct': u'sick', 'text': 'sck'}, + {'start': 8, 'definition': None, 'correct': u'was', 'text': 'will be'} + ] ), ( "Edwards was sick yesterday.", - "Edwards was sick yesterday." + "Edwards was sick yesterday.", + [] ), ( "", - "" + "", + [] ) ]) - def test_gingerit(text, expected): + def test_gingerit(text, expected, corrections): + output = GingerIt().parse(text) - parser = GingerIt() - assert parser.parse(text)["result"] == expected + assert output["result"] == expected + assert output["corrections"] == corrections +
Extend test to cover corrections output
## Code Before: import pytest from gingerit.gingerit import GingerIt @pytest.mark.parametrize("text,expected", [ ( "The smelt of fliwers bring back memories.", "The smell of flowers brings back memories." ), ( "Edwards will be sck yesterday", "Edwards was sick yesterday" ), ( "Edwards was sick yesterday.", "Edwards was sick yesterday." ), ( "", "" ) ]) def test_gingerit(text, expected): parser = GingerIt() assert parser.parse(text)["result"] == expected ## Instruction: Extend test to cover corrections output ## Code After: import pytest from gingerit.gingerit import GingerIt @pytest.mark.parametrize("text,expected,corrections", [ ( "The smelt of fliwers bring back memories.", "The smell of flowers brings back memories.", [ {'start': 21, 'definition': None, 'correct': u'brings', 'text': 'bring'}, {'start': 13, 'definition': u'a plant cultivated for its blooms or blossoms', 'correct': u'flowers', 'text': 'fliwers'}, {'start': 4, 'definition': None, 'correct': u'smell', 'text': 'smelt'} ] ), ( "Edwards will be sck yesterday", "Edwards was sick yesterday", [ {'start': 16, 'definition': u'affected by an impairment of normal physical or mental function', 'correct': u'sick', 'text': 'sck'}, {'start': 8, 'definition': None, 'correct': u'was', 'text': 'will be'} ] ), ( "Edwards was sick yesterday.", "Edwards was sick yesterday.", [] ), ( "", "", [] ) ]) def test_gingerit(text, expected, corrections): output = GingerIt().parse(text) assert output["result"] == expected assert output["corrections"] == corrections
c823a476b265b46d27b221831be952a811fe3468
ANN.py
ANN.py
class Neuron: pass class NeuronNetwork: neurons = []
class Neuron: pass class NeuronNetwork: neurons = [] def __init__(self, rows, columns): self.neurons = [] for row in xrange(rows): self.neurons.append([]) for column in xrange(columns): self.neurons[row].append(Neuron())
Create 2D list of Neurons in NeuronNetwork's init
Create 2D list of Neurons in NeuronNetwork's init
Python
mit
tysonzero/py-ann
class Neuron: pass class NeuronNetwork: neurons = [] + def __init__(self, rows, columns): + self.neurons = [] + for row in xrange(rows): + self.neurons.append([]) + for column in xrange(columns): + self.neurons[row].append(Neuron()) +
Create 2D list of Neurons in NeuronNetwork's init
## Code Before: class Neuron: pass class NeuronNetwork: neurons = [] ## Instruction: Create 2D list of Neurons in NeuronNetwork's init ## Code After: class Neuron: pass class NeuronNetwork: neurons = [] def __init__(self, rows, columns): self.neurons = [] for row in xrange(rows): self.neurons.append([]) for column in xrange(columns): self.neurons[row].append(Neuron())
168937c586b228c05ada2da79a55c9416c3180d3
antifuzz.py
antifuzz.py
''' File: antifuzz.py Authors: Kaitlin Keenan and Ryan Frank ''' import sys from shutil import copy2 import subprocess import ssdeep #http://python-ssdeep.readthedocs.io/en/latest/installation.html def main(): # Take in file ogFile = sys.argv[1] # Make copy of file newFile = sys.argv[2] # Mess with the given file cmd(['lame','--quiet', '--scale', '1', ogFile]) print cmd(['mv', ogFile + ".mp3", newFile]) # Hash files ogHash = ssdeep.hash_from_file(ogFile) newHash = ssdeep.hash_from_file(newFile) # Compare the hashes #print ogHash print ssdeep.compare(ogHash, newHash) def cmd(command): #if (arg2 && arg1): p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE) out, err = p.communicate() return out if __name__ == "__main__": main()
''' File: antifuzz.py Authors: Kaitlin Keenan and Ryan Frank ''' import sys from shutil import copy2 import subprocess import ssdeep #http://python-ssdeep.readthedocs.io/en/latest/installation.html import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument("originalFile", help="File to antifuzz") parser.add_argument("newFile", help="Name of the antifuzzed file") args = parser.parse_args() # Take in file ogFile = args.originalFile # Make copy of file nFile = args.newFile # Mess with the given file mp3(ogFile, nFile) # Hash files ogHash = ssdeep.hash_from_file(ogFile) newHash = ssdeep.hash_from_file(nFile) # Compare the hashes #print ogHash diff=str(ssdeep.compare(ogHash, newHash)) print("The files are " + diff + "% different") def mp3(ogFile, newFile): cmd(['lame','--quiet', '--scale', '1', ogFile]) cmd(['mv', ogFile + ".mp3", newFile]) def cmd(command): #if (arg2 && arg1): p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE) out, err = p.communicate() return out if __name__ == "__main__": main()
Add help, make output more user friendly
Add help, make output more user friendly
Python
mit
ForensicTools/antifuzzyhashing-475-2161_Keenan_Frank
''' File: antifuzz.py Authors: Kaitlin Keenan and Ryan Frank ''' import sys from shutil import copy2 import subprocess import ssdeep #http://python-ssdeep.readthedocs.io/en/latest/installation.html + import argparse def main(): + parser = argparse.ArgumentParser() + parser.add_argument("originalFile", help="File to antifuzz") + parser.add_argument("newFile", help="Name of the antifuzzed file") + args = parser.parse_args() + # Take in file - ogFile = sys.argv[1] + ogFile = args.originalFile # Make copy of file - newFile = sys.argv[2] + nFile = args.newFile # Mess with the given file + mp3(ogFile, nFile) - cmd(['lame','--quiet', '--scale', '1', ogFile]) - print cmd(['mv', ogFile + ".mp3", newFile]) # Hash files ogHash = ssdeep.hash_from_file(ogFile) - newHash = ssdeep.hash_from_file(newFile) + newHash = ssdeep.hash_from_file(nFile) # Compare the hashes #print ogHash - print ssdeep.compare(ogHash, newHash) + diff=str(ssdeep.compare(ogHash, newHash)) + print("The files are " + diff + "% different") + + def mp3(ogFile, newFile): + cmd(['lame','--quiet', '--scale', '1', ogFile]) + cmd(['mv', ogFile + ".mp3", newFile]) def cmd(command): #if (arg2 && arg1): p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE) out, err = p.communicate() return out if __name__ == "__main__": main()
Add help, make output more user friendly
## Code Before: ''' File: antifuzz.py Authors: Kaitlin Keenan and Ryan Frank ''' import sys from shutil import copy2 import subprocess import ssdeep #http://python-ssdeep.readthedocs.io/en/latest/installation.html def main(): # Take in file ogFile = sys.argv[1] # Make copy of file newFile = sys.argv[2] # Mess with the given file cmd(['lame','--quiet', '--scale', '1', ogFile]) print cmd(['mv', ogFile + ".mp3", newFile]) # Hash files ogHash = ssdeep.hash_from_file(ogFile) newHash = ssdeep.hash_from_file(newFile) # Compare the hashes #print ogHash print ssdeep.compare(ogHash, newHash) def cmd(command): #if (arg2 && arg1): p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE) out, err = p.communicate() return out if __name__ == "__main__": main() ## Instruction: Add help, make output more user friendly ## Code After: ''' File: antifuzz.py Authors: Kaitlin Keenan and Ryan Frank ''' import sys from shutil import copy2 import subprocess import ssdeep #http://python-ssdeep.readthedocs.io/en/latest/installation.html import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument("originalFile", help="File to antifuzz") parser.add_argument("newFile", help="Name of the antifuzzed file") args = parser.parse_args() # Take in file ogFile = args.originalFile # Make copy of file nFile = args.newFile # Mess with the given file mp3(ogFile, nFile) # Hash files ogHash = ssdeep.hash_from_file(ogFile) newHash = ssdeep.hash_from_file(nFile) # Compare the hashes #print ogHash diff=str(ssdeep.compare(ogHash, newHash)) print("The files are " + diff + "% different") def mp3(ogFile, newFile): cmd(['lame','--quiet', '--scale', '1', ogFile]) cmd(['mv', ogFile + ".mp3", newFile]) def cmd(command): #if (arg2 && arg1): p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE) out, err = p.communicate() return out if __name__ == "__main__": main()
ee81d8966a5ef68edd6bb4459fc015234d6e0814
setup.py
setup.py
"""Open-ovf installer""" import os from distutils.core import setup CODE_BASE_DIR = 'py' SCRIPTS_DIR = 'py/scripts/' def list_scripts(): """List all scripts that should go to /usr/bin""" file_list = os.listdir(SCRIPTS_DIR) return [os.path.join(SCRIPTS_DIR, f) for f in file_list] setup(name='open-ovf', version='0.1', description='OVF implementation', url='http://open-ovf.sourceforge.net', license='EPL', packages=['ovf', 'ovf.commands'], package_dir = {'': CODE_BASE_DIR}, scripts=list_scripts(), )
"""Open-ovf installer""" import os from distutils.core import setup CODE_BASE_DIR = 'py' SCRIPTS_DIR = 'py/scripts/' def list_scripts(): """List all scripts that should go to /usr/bin""" file_list = os.listdir(SCRIPTS_DIR) return [os.path.join(SCRIPTS_DIR, f) for f in file_list] setup(name='open-ovf', version='0.1', description='OVF implementation', url='http://open-ovf.sourceforge.net', license='EPL', packages=['ovf', 'ovf.commands', 'ovf.env'], package_dir = {'': CODE_BASE_DIR}, scripts=list_scripts(), )
Add env subdirectory to package list
Add env subdirectory to package list Hi, This patch adds the ovf/env subdirectory to the package list so that setup.py installs it properly. Signed-off-by: David L. Leskovec <376f07f909b7d4aee248a1433ee4548cc2bf1d1b@linux.vnet.ibm.com> Signed-off-by: Scott Moser <f411aed5b71f5ab75e7f202cdde1f0f4410975aa@linux.vnet.ibm.com>
Python
epl-1.0
Awingu/open-ovf,Awingu/open-ovf,Awingu/open-ovf,Awingu/open-ovf
"""Open-ovf installer""" import os from distutils.core import setup CODE_BASE_DIR = 'py' SCRIPTS_DIR = 'py/scripts/' def list_scripts(): """List all scripts that should go to /usr/bin""" file_list = os.listdir(SCRIPTS_DIR) return [os.path.join(SCRIPTS_DIR, f) for f in file_list] setup(name='open-ovf', version='0.1', description='OVF implementation', url='http://open-ovf.sourceforge.net', license='EPL', - packages=['ovf', 'ovf.commands'], + packages=['ovf', 'ovf.commands', 'ovf.env'], package_dir = {'': CODE_BASE_DIR}, scripts=list_scripts(), )
Add env subdirectory to package list
## Code Before: """Open-ovf installer""" import os from distutils.core import setup CODE_BASE_DIR = 'py' SCRIPTS_DIR = 'py/scripts/' def list_scripts(): """List all scripts that should go to /usr/bin""" file_list = os.listdir(SCRIPTS_DIR) return [os.path.join(SCRIPTS_DIR, f) for f in file_list] setup(name='open-ovf', version='0.1', description='OVF implementation', url='http://open-ovf.sourceforge.net', license='EPL', packages=['ovf', 'ovf.commands'], package_dir = {'': CODE_BASE_DIR}, scripts=list_scripts(), ) ## Instruction: Add env subdirectory to package list ## Code After: """Open-ovf installer""" import os from distutils.core import setup CODE_BASE_DIR = 'py' SCRIPTS_DIR = 'py/scripts/' def list_scripts(): """List all scripts that should go to /usr/bin""" file_list = os.listdir(SCRIPTS_DIR) return [os.path.join(SCRIPTS_DIR, f) for f in file_list] setup(name='open-ovf', version='0.1', description='OVF implementation', url='http://open-ovf.sourceforge.net', license='EPL', packages=['ovf', 'ovf.commands', 'ovf.env'], package_dir = {'': CODE_BASE_DIR}, scripts=list_scripts(), )
20764887bc338c2cd366ad11fb41d8932c2326a2
bot.py
bot.py
import json import discord from handlers.message_handler import MessageHandler with open("config.json", "r") as f: config = json.load(f) client = discord.Client() message_handler = MessageHandler(config, client) @client.event async def on_ready(): print("Logged in as", client.user.name) @client.event async def on_message(message): await message_handler.handle(message) client.run(config["token"])
import argparse import json import discord from handlers.message_handler import MessageHandler def main(): p = argparse.ArgumentParser() p.add_argument("--config", required=True, help="Path to configuration file") args = p.parse_args() with open(args.config, "r") as f: config = json.load(f) client = discord.Client() message_handler = MessageHandler(config, client) @client.event async def on_ready(): print("Logged in as", client.user.name) @client.event async def on_message(message): await message_handler.handle(message) client.run(config["token"]) if __name__ == "__main__": main()
Add --config argument as a path to the config file
Add --config argument as a path to the config file
Python
mit
azeier/hearthbot
+ + import argparse import json import discord from handlers.message_handler import MessageHandler + def main(): + p = argparse.ArgumentParser() + p.add_argument("--config", required=True, help="Path to configuration file") + args = p.parse_args() + - with open("config.json", "r") as f: + with open(args.config, "r") as f: - config = json.load(f) + config = json.load(f) + - client = discord.Client() + client = discord.Client() - message_handler = MessageHandler(config, client) + message_handler = MessageHandler(config, client) + + @client.event + async def on_ready(): + print("Logged in as", client.user.name) + + @client.event + async def on_message(message): + await message_handler.handle(message) + + client.run(config["token"]) + if __name__ == "__main__": + main() - @client.event - async def on_ready(): - print("Logged in as", client.user.name) - - @client.event - async def on_message(message): - await message_handler.handle(message) - - - client.run(config["token"]) -
Add --config argument as a path to the config file
## Code Before: import json import discord from handlers.message_handler import MessageHandler with open("config.json", "r") as f: config = json.load(f) client = discord.Client() message_handler = MessageHandler(config, client) @client.event async def on_ready(): print("Logged in as", client.user.name) @client.event async def on_message(message): await message_handler.handle(message) client.run(config["token"]) ## Instruction: Add --config argument as a path to the config file ## Code After: import argparse import json import discord from handlers.message_handler import MessageHandler def main(): p = argparse.ArgumentParser() p.add_argument("--config", required=True, help="Path to configuration file") args = p.parse_args() with open(args.config, "r") as f: config = json.load(f) client = discord.Client() message_handler = MessageHandler(config, client) @client.event async def on_ready(): print("Logged in as", client.user.name) @client.event async def on_message(message): await message_handler.handle(message) client.run(config["token"]) if __name__ == "__main__": main()
a2fb1efc918e18bb0ecebce4604192b03af662b2
fib.py
fib.py
def fibrepr(n): fibs = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89] def fib_iter(n, fibs, l): for i, f in enumerate(fibs): if f == n: yield '1' + i*'0' + l elif n > f: for fib in fib_iter(n - f, fibs[i+1:], '1' + i*'0' + l): yield fib else: break return fib_iter(n, fibs, '')
class Fibonacci(object): _cache = {0: 1, 1: 2} def __init__(self, n): self.n = n def get(self, n): if not n in Fibonacci._cache: Fibonacci._cache[n] = self.get(n-1) + self.get(n-2) return Fibonacci._cache[n] def next(self): return Fibonacci(self.n + 1) def __iter__(self): while True: yield self.get(self.n) self.n += 1 def fibrepr(n): def fib_iter(n, fib, l): for i, f in enumerate(fib): if f == n: yield '1' + i*'0' + l elif n > f: for match in fib_iter(n - f, fib.next(), '1' + i*'0' + l): yield match else: break return fib_iter(n, Fibonacci(0), '')
Add Fibonacci class and use it in representation
Add Fibonacci class and use it in representation
Python
mit
kynan/CodeDojo30
+ class Fibonacci(object): + _cache = {0: 1, 1: 2} + + def __init__(self, n): + self.n = n + + def get(self, n): + if not n in Fibonacci._cache: + Fibonacci._cache[n] = self.get(n-1) + self.get(n-2) + return Fibonacci._cache[n] + + def next(self): + return Fibonacci(self.n + 1) + + def __iter__(self): + while True: + yield self.get(self.n) + self.n += 1 + + def fibrepr(n): - fibs = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89] - def fib_iter(n, fibs, l): + def fib_iter(n, fib, l): - for i, f in enumerate(fibs): + for i, f in enumerate(fib): if f == n: yield '1' + i*'0' + l elif n > f: - for fib in fib_iter(n - f, fibs[i+1:], '1' + i*'0' + l): + for match in fib_iter(n - f, fib.next(), '1' + i*'0' + l): - yield fib + yield match else: break - return fib_iter(n, fibs, '') + return fib_iter(n, Fibonacci(0), '')
Add Fibonacci class and use it in representation
## Code Before: def fibrepr(n): fibs = [1, 2, 3, 5, 8, 13, 21, 34, 55, 89] def fib_iter(n, fibs, l): for i, f in enumerate(fibs): if f == n: yield '1' + i*'0' + l elif n > f: for fib in fib_iter(n - f, fibs[i+1:], '1' + i*'0' + l): yield fib else: break return fib_iter(n, fibs, '') ## Instruction: Add Fibonacci class and use it in representation ## Code After: class Fibonacci(object): _cache = {0: 1, 1: 2} def __init__(self, n): self.n = n def get(self, n): if not n in Fibonacci._cache: Fibonacci._cache[n] = self.get(n-1) + self.get(n-2) return Fibonacci._cache[n] def next(self): return Fibonacci(self.n + 1) def __iter__(self): while True: yield self.get(self.n) self.n += 1 def fibrepr(n): def fib_iter(n, fib, l): for i, f in enumerate(fib): if f == n: yield '1' + i*'0' + l elif n > f: for match in fib_iter(n - f, fib.next(), '1' + i*'0' + l): yield match else: break return fib_iter(n, Fibonacci(0), '')
57f3bec127148c80a9304194e5c3c8a3d3f3bae2
tests/scoring_engine/web/views/test_scoreboard.py
tests/scoring_engine/web/views/test_scoreboard.py
from tests.scoring_engine.web.web_test import WebTest class TestScoreboard(WebTest): def test_home(self): # todo fix this up!!!! # resp = self.client.get('/scoreboard') # assert resp.status_code == 200 # lazy AF assert 1 == 1
from tests.scoring_engine.web.web_test import WebTest from tests.scoring_engine.helpers import populate_sample_data class TestScoreboard(WebTest): def test_scoreboard(self): populate_sample_data(self.session) resp = self.client.get('/scoreboard') assert resp.status_code == 200 assert self.mock_obj.call_args[0][0] == 'scoreboard.html' assert self.mock_obj.call_args[1]['team_labels'] == ['Blue Team 1'] assert self.mock_obj.call_args[1]['team_scores'] == [100] assert self.mock_obj.call_args[1]['round_labels'] == ['Round 0', 'Round 1', 'Round 2'] assert 'scores_colors' in self.mock_obj.call_args[1] assert self.mock_obj.call_args[1]['team_data'][1]['data'] == [0, 100, 100] assert self.mock_obj.call_args[1]['team_data'][1]['label'] == 'Blue Team 1' assert 'color' in self.mock_obj.call_args[1]['team_data'][1]
Add tests for scoreboard view
Add tests for scoreboard view
Python
mit
pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine
from tests.scoring_engine.web.web_test import WebTest + from tests.scoring_engine.helpers import populate_sample_data class TestScoreboard(WebTest): - def test_home(self): + def test_scoreboard(self): - # todo fix this up!!!! + populate_sample_data(self.session) - # resp = self.client.get('/scoreboard') + resp = self.client.get('/scoreboard') - # assert resp.status_code == 200 + assert resp.status_code == 200 - # lazy AF - assert 1 == 1 + assert self.mock_obj.call_args[0][0] == 'scoreboard.html' + assert self.mock_obj.call_args[1]['team_labels'] == ['Blue Team 1'] + assert self.mock_obj.call_args[1]['team_scores'] == [100] + assert self.mock_obj.call_args[1]['round_labels'] == ['Round 0', 'Round 1', 'Round 2'] + assert 'scores_colors' in self.mock_obj.call_args[1] + assert self.mock_obj.call_args[1]['team_data'][1]['data'] == [0, 100, 100] + assert self.mock_obj.call_args[1]['team_data'][1]['label'] == 'Blue Team 1' + assert 'color' in self.mock_obj.call_args[1]['team_data'][1]
Add tests for scoreboard view
## Code Before: from tests.scoring_engine.web.web_test import WebTest class TestScoreboard(WebTest): def test_home(self): # todo fix this up!!!! # resp = self.client.get('/scoreboard') # assert resp.status_code == 200 # lazy AF assert 1 == 1 ## Instruction: Add tests for scoreboard view ## Code After: from tests.scoring_engine.web.web_test import WebTest from tests.scoring_engine.helpers import populate_sample_data class TestScoreboard(WebTest): def test_scoreboard(self): populate_sample_data(self.session) resp = self.client.get('/scoreboard') assert resp.status_code == 200 assert self.mock_obj.call_args[0][0] == 'scoreboard.html' assert self.mock_obj.call_args[1]['team_labels'] == ['Blue Team 1'] assert self.mock_obj.call_args[1]['team_scores'] == [100] assert self.mock_obj.call_args[1]['round_labels'] == ['Round 0', 'Round 1', 'Round 2'] assert 'scores_colors' in self.mock_obj.call_args[1] assert self.mock_obj.call_args[1]['team_data'][1]['data'] == [0, 100, 100] assert self.mock_obj.call_args[1]['team_data'][1]['label'] == 'Blue Team 1' assert 'color' in self.mock_obj.call_args[1]['team_data'][1]
ebac72a3753205d3e45041c6db636a378187e3cf
pylua/tests/test_compiled.py
pylua/tests/test_compiled.py
import os import subprocess from pylua.tests.helpers import test_file class TestCompiled(object): """ Tests compiled binary """ def test_addition(self, capsys): f = test_file(src=""" -- short add x = 10 y = 5 z = y + y + x print(z) print(z+y) --a = 100+y lx = 1234567890 ly = 99999999 print(lx+ly) --print(lx+1234567890) """, suffix=".l" ) out = subprocess.check_output(['bin/pylua', f.name]) assert out == "20.000000\n25.000000\n1334567889.000000\n"
import os import subprocess from pylua.tests.helpers import test_file class TestCompiled(object): """ Tests compiled binary """ PYLUA_BIN = os.path.join(os.path.dirname(os.path.abspath(__file__)), ('../../bin/pylua')) def test_addition(self, capsys): f = test_file(src=""" -- short add x = 10 y = 5 z = y + y + x print(z) print(z+y) --a = 100+y lx = 1234567890 ly = 99999999 print(lx+ly) --print(lx+1234567890) """, suffix=".l" ) out = subprocess.check_output([TestCompiled.PYLUA_BIN, f.name]) assert out == "20.000000\n25.000000\n1334567889.000000\n"
Use absolute path for lua binary in tests
Use absolute path for lua binary in tests
Python
bsd-3-clause
fhahn/luna,fhahn/luna
import os import subprocess from pylua.tests.helpers import test_file class TestCompiled(object): """ Tests compiled binary """ + + PYLUA_BIN = os.path.join(os.path.dirname(os.path.abspath(__file__)), ('../../bin/pylua')) def test_addition(self, capsys): f = test_file(src=""" -- short add x = 10 y = 5 z = y + y + x print(z) print(z+y) --a = 100+y lx = 1234567890 ly = 99999999 print(lx+ly) --print(lx+1234567890) """, suffix=".l" ) - out = subprocess.check_output(['bin/pylua', f.name]) + out = subprocess.check_output([TestCompiled.PYLUA_BIN, f.name]) assert out == "20.000000\n25.000000\n1334567889.000000\n"
Use absolute path for lua binary in tests
## Code Before: import os import subprocess from pylua.tests.helpers import test_file class TestCompiled(object): """ Tests compiled binary """ def test_addition(self, capsys): f = test_file(src=""" -- short add x = 10 y = 5 z = y + y + x print(z) print(z+y) --a = 100+y lx = 1234567890 ly = 99999999 print(lx+ly) --print(lx+1234567890) """, suffix=".l" ) out = subprocess.check_output(['bin/pylua', f.name]) assert out == "20.000000\n25.000000\n1334567889.000000\n" ## Instruction: Use absolute path for lua binary in tests ## Code After: import os import subprocess from pylua.tests.helpers import test_file class TestCompiled(object): """ Tests compiled binary """ PYLUA_BIN = os.path.join(os.path.dirname(os.path.abspath(__file__)), ('../../bin/pylua')) def test_addition(self, capsys): f = test_file(src=""" -- short add x = 10 y = 5 z = y + y + x print(z) print(z+y) --a = 100+y lx = 1234567890 ly = 99999999 print(lx+ly) --print(lx+1234567890) """, suffix=".l" ) out = subprocess.check_output([TestCompiled.PYLUA_BIN, f.name]) assert out == "20.000000\n25.000000\n1334567889.000000\n"
5577b2a20a98aa232f5591a46269e5ee6c88070d
MyMoment.py
MyMoment.py
import datetime #Humanize time in milliseconds #Reference: http://stackoverflow.com/questions/26276906/python-convert-seconds-from-epoch-time-into-human-readable-time def HTM(aa): a = int(aa) b = int(datetime.datetime.now().strftime("%s")) c = b - a days = c // 86400 hours = c // 3600 % 24 minutes = c // 60 % 60 seconds = c % 60 ago = "ago" if (days > 0): return ( str(days) + " days " + ago) elif (hours > 0): return (str(hours) + " hours " + ago) elif (minutes > 0): return ( str(minutes) + " minutes " + ago) elif (seconds > 0): return (str(seconds) + " seconds " + ago) else: return (a) #Error #http://www.epochconverter.com/ #1/6/2015, 8:19:34 AM PST -> 23 hours ago #print HTM(1420561174000/1000)
import datetime from time import gmtime, strftime import pytz #Humanize time in milliseconds #Reference: http://stackoverflow.com/questions/26276906/python-convert-seconds-from-epoch-time-into-human-readable-time #http://www.epochconverter.com/ #1/6/2015, 8:19:34 AM PST -> 23 hours ago #print HTM(1420561174000/1000) def HTM(aa): a = int(aa) b = int(datetime.datetime.now().strftime("%s")) c = b - a days = c // 86400 hours = c // 3600 % 24 minutes = c // 60 % 60 seconds = c % 60 ago = "ago" if (days > 0): return ( str(days) + " days " + ago) elif (hours > 0): return (str(hours) + " hours " + ago) elif (minutes > 0): return ( str(minutes) + " minutes " + ago) elif (seconds > 0): return (str(seconds) + " seconds " + ago) else: return (a) #Error #My Timestamp used in logfile def MT(): fmt = '%Y-%m-%d %H:%M:%S' return (datetime.datetime.now(pytz.timezone("America/Los_Angeles")).strftime(fmt)) #My Timestamp for filename def FT(): fmt = '%d%b%Y-%H%M%S' return ( datetime.datetime.now(pytz.timezone("America/Los_Angeles")).strftime(fmt) )
Add functions to generate timestamp for logfiles & filenames; use localtimezone
Add functions to generate timestamp for logfiles & filenames; use localtimezone
Python
mit
harishvc/githubanalytics,harishvc/githubanalytics,harishvc/githubanalytics
import datetime + from time import gmtime, strftime + import pytz #Humanize time in milliseconds #Reference: http://stackoverflow.com/questions/26276906/python-convert-seconds-from-epoch-time-into-human-readable-time + #http://www.epochconverter.com/ + #1/6/2015, 8:19:34 AM PST -> 23 hours ago + #print HTM(1420561174000/1000) + def HTM(aa): a = int(aa) b = int(datetime.datetime.now().strftime("%s")) c = b - a days = c // 86400 hours = c // 3600 % 24 minutes = c // 60 % 60 seconds = c % 60 ago = "ago" if (days > 0): return ( str(days) + " days " + ago) elif (hours > 0): return (str(hours) + " hours " + ago) elif (minutes > 0): return ( str(minutes) + " minutes " + ago) elif (seconds > 0): return (str(seconds) + " seconds " + ago) else: return (a) #Error - #http://www.epochconverter.com/ - #1/6/2015, 8:19:34 AM PST -> 23 hours ago - #print HTM(1420561174000/1000) + #My Timestamp used in logfile + def MT(): + fmt = '%Y-%m-%d %H:%M:%S' + return (datetime.datetime.now(pytz.timezone("America/Los_Angeles")).strftime(fmt)) + #My Timestamp for filename + def FT(): + fmt = '%d%b%Y-%H%M%S' + return ( datetime.datetime.now(pytz.timezone("America/Los_Angeles")).strftime(fmt) ) +
Add functions to generate timestamp for logfiles & filenames; use localtimezone
## Code Before: import datetime #Humanize time in milliseconds #Reference: http://stackoverflow.com/questions/26276906/python-convert-seconds-from-epoch-time-into-human-readable-time def HTM(aa): a = int(aa) b = int(datetime.datetime.now().strftime("%s")) c = b - a days = c // 86400 hours = c // 3600 % 24 minutes = c // 60 % 60 seconds = c % 60 ago = "ago" if (days > 0): return ( str(days) + " days " + ago) elif (hours > 0): return (str(hours) + " hours " + ago) elif (minutes > 0): return ( str(minutes) + " minutes " + ago) elif (seconds > 0): return (str(seconds) + " seconds " + ago) else: return (a) #Error #http://www.epochconverter.com/ #1/6/2015, 8:19:34 AM PST -> 23 hours ago #print HTM(1420561174000/1000) ## Instruction: Add functions to generate timestamp for logfiles & filenames; use localtimezone ## Code After: import datetime from time import gmtime, strftime import pytz #Humanize time in milliseconds #Reference: http://stackoverflow.com/questions/26276906/python-convert-seconds-from-epoch-time-into-human-readable-time #http://www.epochconverter.com/ #1/6/2015, 8:19:34 AM PST -> 23 hours ago #print HTM(1420561174000/1000) def HTM(aa): a = int(aa) b = int(datetime.datetime.now().strftime("%s")) c = b - a days = c // 86400 hours = c // 3600 % 24 minutes = c // 60 % 60 seconds = c % 60 ago = "ago" if (days > 0): return ( str(days) + " days " + ago) elif (hours > 0): return (str(hours) + " hours " + ago) elif (minutes > 0): return ( str(minutes) + " minutes " + ago) elif (seconds > 0): return (str(seconds) + " seconds " + ago) else: return (a) #Error #My Timestamp used in logfile def MT(): fmt = '%Y-%m-%d %H:%M:%S' return (datetime.datetime.now(pytz.timezone("America/Los_Angeles")).strftime(fmt)) #My Timestamp for filename def FT(): fmt = '%d%b%Y-%H%M%S' return ( datetime.datetime.now(pytz.timezone("America/Los_Angeles")).strftime(fmt) )
5cf17b6a46a3d4bbf4cecb65e4b9ef43066869d9
feincms/templatetags/applicationcontent_tags.py
feincms/templatetags/applicationcontent_tags.py
from django import template # backwards compatibility import from feincms.templatetags.fragment_tags import fragment, get_fragment, has_fragment register = template.Library() register.tag(fragment) register.tag(get_fragment) register.filter(has_fragment) @register.simple_tag def feincms_render_region_appcontent(page, region, request): """Render only the application content for the region This allows template authors to choose whether their page behaves differently when displaying embedded application subpages by doing something like this:: {% if not in_appcontent_subpage %} {% feincms_render_region feincms_page "main" request %} {% else %} {% feincms_render_region_appcontent feincms_page "main" request %} {% endif %} """ from feincms.content.application.models import ApplicationContent from feincms.templatetags.feincms_tags import _render_content return u''.join(_render_content(content, request=request) for content in\ getattr(page.content, region) if isinstance(content, ApplicationContent))
from django import template # backwards compatibility import from feincms.templatetags.fragment_tags import fragment, get_fragment, has_fragment register = template.Library() register.tag(fragment) register.tag(get_fragment) register.filter(has_fragment) @register.simple_tag def feincms_render_region_appcontent(page, region, request): """Render only the application content for the region This allows template authors to choose whether their page behaves differently when displaying embedded application subpages by doing something like this:: {% if not in_appcontent_subpage %} {% feincms_render_region feincms_page "main" request %} {% else %} {% feincms_render_region_appcontent feincms_page "main" request %} {% endif %} """ from feincms.content.application.models import ApplicationContent from feincms.templatetags.feincms_tags import _render_content return u''.join(_render_content(content, request=request) for content in\ page.content.all_of_type(ApplicationContent) if content.region == region)
Use all_of_type instead of isinstance check in feincms_render_region_appcontent
Use all_of_type instead of isinstance check in feincms_render_region_appcontent
Python
bsd-3-clause
feincms/feincms,joshuajonah/feincms,feincms/feincms,matthiask/feincms2-content,matthiask/django-content-editor,michaelkuty/feincms,mjl/feincms,matthiask/feincms2-content,mjl/feincms,matthiask/django-content-editor,matthiask/django-content-editor,michaelkuty/feincms,nickburlett/feincms,matthiask/django-content-editor,joshuajonah/feincms,matthiask/feincms2-content,nickburlett/feincms,pjdelport/feincms,pjdelport/feincms,joshuajonah/feincms,pjdelport/feincms,michaelkuty/feincms,michaelkuty/feincms,nickburlett/feincms,feincms/feincms,nickburlett/feincms,joshuajonah/feincms,mjl/feincms
from django import template # backwards compatibility import from feincms.templatetags.fragment_tags import fragment, get_fragment, has_fragment register = template.Library() register.tag(fragment) register.tag(get_fragment) register.filter(has_fragment) @register.simple_tag def feincms_render_region_appcontent(page, region, request): """Render only the application content for the region This allows template authors to choose whether their page behaves differently when displaying embedded application subpages by doing something like this:: {% if not in_appcontent_subpage %} {% feincms_render_region feincms_page "main" request %} {% else %} {% feincms_render_region_appcontent feincms_page "main" request %} {% endif %} """ from feincms.content.application.models import ApplicationContent from feincms.templatetags.feincms_tags import _render_content return u''.join(_render_content(content, request=request) for content in\ - getattr(page.content, region) if isinstance(content, ApplicationContent)) + page.content.all_of_type(ApplicationContent) if content.region == region)
Use all_of_type instead of isinstance check in feincms_render_region_appcontent
## Code Before: from django import template # backwards compatibility import from feincms.templatetags.fragment_tags import fragment, get_fragment, has_fragment register = template.Library() register.tag(fragment) register.tag(get_fragment) register.filter(has_fragment) @register.simple_tag def feincms_render_region_appcontent(page, region, request): """Render only the application content for the region This allows template authors to choose whether their page behaves differently when displaying embedded application subpages by doing something like this:: {% if not in_appcontent_subpage %} {% feincms_render_region feincms_page "main" request %} {% else %} {% feincms_render_region_appcontent feincms_page "main" request %} {% endif %} """ from feincms.content.application.models import ApplicationContent from feincms.templatetags.feincms_tags import _render_content return u''.join(_render_content(content, request=request) for content in\ getattr(page.content, region) if isinstance(content, ApplicationContent)) ## Instruction: Use all_of_type instead of isinstance check in feincms_render_region_appcontent ## Code After: from django import template # backwards compatibility import from feincms.templatetags.fragment_tags import fragment, get_fragment, has_fragment register = template.Library() register.tag(fragment) register.tag(get_fragment) register.filter(has_fragment) @register.simple_tag def feincms_render_region_appcontent(page, region, request): """Render only the application content for the region This allows template authors to choose whether their page behaves differently when displaying embedded application subpages by doing something like this:: {% if not in_appcontent_subpage %} {% feincms_render_region feincms_page "main" request %} {% else %} {% feincms_render_region_appcontent feincms_page "main" request %} {% endif %} """ from feincms.content.application.models import ApplicationContent from feincms.templatetags.feincms_tags import _render_content return u''.join(_render_content(content, request=request) for content in\ page.content.all_of_type(ApplicationContent) if content.region == region)