Unnamed: 0
int64 0
2.44k
| repo
stringlengths 32
81
| hash
stringlengths 40
40
| diff
stringlengths 113
1.17k
| old_path
stringlengths 5
84
| rewrite
stringlengths 34
79
| initial_state
stringlengths 75
980
| final_state
stringlengths 76
980
|
---|---|---|---|---|---|---|---|
900 | https://:@github.com/chanedwin/pydistinct.git | 2920f6fe09daa3362513fbc516c711035bcb8c6a | @@ -109,7 +109,7 @@ def _get_confidence_interval(bootstrap_dist, stat_val, alpha, is_pivotal):
low = _np.percentile(bootstrap_dist, 100 * ((alpha / 2)))
val = _np.percentile(bootstrap_dist, 50)
high = _np.percentile(bootstrap_dist, 100 * (1 - (alpha / 2)))
- return BootstrapResults(low, stat_val, high)
+ return BootstrapResults(low, val, high)
def _needs_sparse_unification(values_lists):
| pydistinct/bootstrap.py | ReplaceText(target='val' @(112,33)->(112,41)) | def _get_confidence_interval(bootstrap_dist, stat_val, alpha, is_pivotal):
low = _np.percentile(bootstrap_dist, 100 * ((alpha / 2)))
val = _np.percentile(bootstrap_dist, 50)
high = _np.percentile(bootstrap_dist, 100 * (1 - (alpha / 2)))
return BootstrapResults(low, stat_val, high)
def _needs_sparse_unification(values_lists): | def _get_confidence_interval(bootstrap_dist, stat_val, alpha, is_pivotal):
low = _np.percentile(bootstrap_dist, 100 * ((alpha / 2)))
val = _np.percentile(bootstrap_dist, 50)
high = _np.percentile(bootstrap_dist, 100 * (1 - (alpha / 2)))
return BootstrapResults(low, val, high)
def _needs_sparse_unification(values_lists): |
901 | https://:@github.com/coreofscience/python-wostools.git | 2163905ba13f68da61ed6b7a4da0c2f524399039 | @@ -47,7 +47,7 @@ class Article:
self.year: Optional[int] = year
self.journal: Optional[str] = journal
self.volume: Optional[str] = volume
- self.issue: Optional[str] = volume
+ self.issue: Optional[str] = issue
self.page: Optional[str] = page
self.doi: Optional[str] = doi
self.references: List[str] = references or []
| wostools/article.py | ReplaceText(target='issue' @(50,36)->(50,42)) | class Article:
self.year: Optional[int] = year
self.journal: Optional[str] = journal
self.volume: Optional[str] = volume
self.issue: Optional[str] = volume
self.page: Optional[str] = page
self.doi: Optional[str] = doi
self.references: List[str] = references or [] | class Article:
self.year: Optional[int] = year
self.journal: Optional[str] = journal
self.volume: Optional[str] = volume
self.issue: Optional[str] = issue
self.page: Optional[str] = page
self.doi: Optional[str] = doi
self.references: List[str] = references or [] |
902 | https://:@github.com/snipsco/respeaker_python_library.git | 0f5991317aa815356f8225f6088f85cd2f3bc27f | @@ -66,7 +66,7 @@ class WebRTCVAD:
break
elif len(self.history) == self.history.maxlen and sum(self.history) == 0:
sys.stdout.write('Todo: increase capture volume')
- for _ in range(self.history.maxlen / 2):
+ for _ in range(self.history.maxlen // 2):
self.history.popleft()
else:
| respeaker/vad.py | ReplaceText(target='//' @(69,55)->(69,56)) | class WebRTCVAD:
break
elif len(self.history) == self.history.maxlen and sum(self.history) == 0:
sys.stdout.write('Todo: increase capture volume')
for _ in range(self.history.maxlen / 2):
self.history.popleft()
else: | class WebRTCVAD:
break
elif len(self.history) == self.history.maxlen and sum(self.history) == 0:
sys.stdout.write('Todo: increase capture volume')
for _ in range(self.history.maxlen // 2):
self.history.popleft()
else: |
903 | https://:@github.com/luphord/longstaff_schwartz.git | a70ee9438f4a36cfc25498c1091b5f8fc6879151 | @@ -91,7 +91,7 @@ def american_put_exercise_barrier(mdl, strike):
exercises = []
payoff = put_payoff(strike)
for cnt, s, ex, opt in mdl.evaluate_american_exercisable_iter(payoff):
- ex_idx = ex > cnt
+ ex_idx = ex >= cnt
ex_spots = s[ex_idx]
exercises.append(ex_spots.max() if ex_idx.any() else None)
exercises.reverse()
| longstaff_schwartz/binomial.py | ReplaceText(target='>=' @(94,20)->(94,21)) | def american_put_exercise_barrier(mdl, strike):
exercises = []
payoff = put_payoff(strike)
for cnt, s, ex, opt in mdl.evaluate_american_exercisable_iter(payoff):
ex_idx = ex > cnt
ex_spots = s[ex_idx]
exercises.append(ex_spots.max() if ex_idx.any() else None)
exercises.reverse() | def american_put_exercise_barrier(mdl, strike):
exercises = []
payoff = put_payoff(strike)
for cnt, s, ex, opt in mdl.evaluate_american_exercisable_iter(payoff):
ex_idx = ex >= cnt
ex_spots = s[ex_idx]
exercises.append(ex_spots.max() if ex_idx.any() else None)
exercises.reverse() |
904 | https://:@github.com/ietf-tools/RfcEditor.git | 9e509f9cbc2995ab8ad72636fe3a4739c23515bb | @@ -174,7 +174,7 @@ def check(el, depth=0):
if ns is not None and ns not in wp.xmlns_urls:
log.error("Element '{0}' does not allow attributes with namespace '{1}'".
format(element, ns), where=el)
- attribs_to_remove.append(attr)
+ attribs_to_remove.append(nsAttrib)
continue
# look to see if the attribute is either an attribute for a specific
| svgcheck/svgcheck/checksvg.py | ReplaceText(target='nsAttrib' @(177,37)->(177,41)) | def check(el, depth=0):
if ns is not None and ns not in wp.xmlns_urls:
log.error("Element '{0}' does not allow attributes with namespace '{1}'".
format(element, ns), where=el)
attribs_to_remove.append(attr)
continue
# look to see if the attribute is either an attribute for a specific | def check(el, depth=0):
if ns is not None and ns not in wp.xmlns_urls:
log.error("Element '{0}' does not allow attributes with namespace '{1}'".
format(element, ns), where=el)
attribs_to_remove.append(nsAttrib)
continue
# look to see if the attribute is either an attribute for a specific |
905 | https://:@github.com/ietf-tools/RfcEditor.git | 2086136adaeb71096ae8075d0b540ed0391608ad | @@ -242,7 +242,7 @@ def main():
# Validate any embedded ABNF
if not options.no_abnf:
- checker = AbnfChecker(options)
+ checker = AbnfChecker(config)
checker.validate(xmlrfc.tree)
| rfclint/rfclint/run.py | ReplaceText(target='config' @(245,30)->(245,37)) | def main():
# Validate any embedded ABNF
if not options.no_abnf:
checker = AbnfChecker(options)
checker.validate(xmlrfc.tree)
| def main():
# Validate any embedded ABNF
if not options.no_abnf:
checker = AbnfChecker(config)
checker.validate(xmlrfc.tree)
|
906 | https://:@github.com/openlegis-br/sagl.git | 9ea9984b17df551e9b7600c3c9ba7eb6a4f914e3 | @@ -53,7 +53,7 @@ for periodo in context.zsql.periodo_comp_comissao_obter_zsql(data = DateTime(),c
destinatarios=[]
for composicao_comissao in context.zsql.composicao_comissao_obter_zsql(cod_comissao=comissao.cod_comissao,cod_periodo_comp=periodo.cod_periodo_comp):
- if composicao_comissao.dat_desligamento == None or composicao_comissao.dat_desligamento <= DateTime():
+ if composicao_comissao.dat_desligamento == None or composicao_comissao.dat_desligamento >= DateTime():
for destinatario in context.zsql.autor_obter_zsql(cod_parlamentar=composicao_comissao.cod_parlamentar):
dic={}
dic['end_email'] = destinatario.end_email
| branches/3.1_buildout/il/sapl/skins/pysc/envia_despacho_comissao_pysc.py | ReplaceText(target='>=' @(56,89)->(56,91)) | for periodo in context.zsql.periodo_comp_comissao_obter_zsql(data = DateTime(),c
destinatarios=[]
for composicao_comissao in context.zsql.composicao_comissao_obter_zsql(cod_comissao=comissao.cod_comissao,cod_periodo_comp=periodo.cod_periodo_comp):
if composicao_comissao.dat_desligamento == None or composicao_comissao.dat_desligamento <= DateTime():
for destinatario in context.zsql.autor_obter_zsql(cod_parlamentar=composicao_comissao.cod_parlamentar):
dic={}
dic['end_email'] = destinatario.end_email | for periodo in context.zsql.periodo_comp_comissao_obter_zsql(data = DateTime(),c
destinatarios=[]
for composicao_comissao in context.zsql.composicao_comissao_obter_zsql(cod_comissao=comissao.cod_comissao,cod_periodo_comp=periodo.cod_periodo_comp):
if composicao_comissao.dat_desligamento == None or composicao_comissao.dat_desligamento >= DateTime():
for destinatario in context.zsql.autor_obter_zsql(cod_parlamentar=composicao_comissao.cod_parlamentar):
dic={}
dic['end_email'] = destinatario.end_email |
907 | https://:@github.com/Mellcap/MellPlayer.git | 8f5bcc0ee48e58743f595df9f738a6c1c0c158a6 | @@ -98,7 +98,7 @@ class Netease(object):
'song_name': d['name'],
'song_url': d['mp3Url'],
'song_artists': ';'.join(map(lambda a: a['name'], d['artists']))
- } for d in data]
+ } for d in tracks]
elif parse_type == 'lyric_detail':
if 'lrc' in data:
res = {
| MellPlayer/api.py | ReplaceText(target='tracks' @(101,23)->(101,27)) | class Netease(object):
'song_name': d['name'],
'song_url': d['mp3Url'],
'song_artists': ';'.join(map(lambda a: a['name'], d['artists']))
} for d in data]
elif parse_type == 'lyric_detail':
if 'lrc' in data:
res = { | class Netease(object):
'song_name': d['name'],
'song_url': d['mp3Url'],
'song_artists': ';'.join(map(lambda a: a['name'], d['artists']))
} for d in tracks]
elif parse_type == 'lyric_detail':
if 'lrc' in data:
res = { |
908 | https://:@github.com/kobejohn/investigators.git | 678d9eeee6cb90d3ad60bea640f8170ba9cc0473 | @@ -64,7 +64,7 @@ class ImageIdentifier(object):
scale = min(h_scale, w_scale) # min --> most shrinking
scaled_h = int(round(scale * template_h))
scaled_w = int(round(scale * template_w))
- eq_template = cv2.resize(image, (scaled_w, scaled_h),
+ eq_template = cv2.resize(template, (scaled_w, scaled_h),
interpolation=cv2.INTER_AREA)
return eq_template, eq_image
| investigators/visuals.py | ReplaceText(target='template' @(67,37)->(67,42)) | class ImageIdentifier(object):
scale = min(h_scale, w_scale) # min --> most shrinking
scaled_h = int(round(scale * template_h))
scaled_w = int(round(scale * template_w))
eq_template = cv2.resize(image, (scaled_w, scaled_h),
interpolation=cv2.INTER_AREA)
return eq_template, eq_image
| class ImageIdentifier(object):
scale = min(h_scale, w_scale) # min --> most shrinking
scaled_h = int(round(scale * template_h))
scaled_w = int(round(scale * template_w))
eq_template = cv2.resize(template, (scaled_w, scaled_h),
interpolation=cv2.INTER_AREA)
return eq_template, eq_image
|
909 | https://:@github.com/swfiua/karmapi.git | 10471bc3be40e81ed93d809a865e27e4fcf95b08 | @@ -101,7 +101,7 @@ class WeatherHat(pig.Widget):
self.interval = 1
# build a Grid and add to self
- monitor = pig.Grid(meta, self)
+ monitor = pig.Grid(self, meta)
self.monitor = monitor
layout.addWidget(monitor)
| karmapi/sense.py | ArgSwap(idxs=0<->1 @(104,18)->(104,26)) | class WeatherHat(pig.Widget):
self.interval = 1
# build a Grid and add to self
monitor = pig.Grid(meta, self)
self.monitor = monitor
layout.addWidget(monitor)
| class WeatherHat(pig.Widget):
self.interval = 1
# build a Grid and add to self
monitor = pig.Grid(self, meta)
self.monitor = monitor
layout.addWidget(monitor)
|
910 | https://:@github.com/swfiua/karmapi.git | 3b50b753a4309fb9eced9736c77b3bf9afea5227 | @@ -95,7 +95,7 @@ def pick_pixels(image, size=8):
width, height = len(image), len(image[0])
pwidth = int(width / size)
- pheight = int(width / size)
+ pheight = int(height / size)
pickx = random.randint(0, pwidth-1)
picky = random.randint(0, pheight-1)
| karmapi/backends/hatpig.py | ReplaceText(target='height' @(98,18)->(98,23)) | def pick_pixels(image, size=8):
width, height = len(image), len(image[0])
pwidth = int(width / size)
pheight = int(width / size)
pickx = random.randint(0, pwidth-1)
picky = random.randint(0, pheight-1) | def pick_pixels(image, size=8):
width, height = len(image), len(image[0])
pwidth = int(width / size)
pheight = int(height / size)
pickx = random.randint(0, pwidth-1)
picky = random.randint(0, pheight-1) |
911 | https://:@github.com/swfiua/karmapi.git | b0380f746dc68f7fec78d6ab0e425b6259336ce8 | @@ -792,7 +792,7 @@ class JeuxSansFrontieres:
wteam = group.winner()
setattr(kgame, label, wteam)
if group.is_finished():
- wteam.games.append(game)
+ wteam.games.append(kgame)
kgame, label = self.seconds[key]
steam = group.second()
| karmapi/wc.py | ReplaceText(target='kgame' @(795,35)->(795,39)) | class JeuxSansFrontieres:
wteam = group.winner()
setattr(kgame, label, wteam)
if group.is_finished():
wteam.games.append(game)
kgame, label = self.seconds[key]
steam = group.second() | class JeuxSansFrontieres:
wteam = group.winner()
setattr(kgame, label, wteam)
if group.is_finished():
wteam.games.append(kgame)
kgame, label = self.seconds[key]
steam = group.second() |
912 | https://:@github.com/swfiua/karmapi.git | e6231e7cbc0d0b521c361bc5ac2fae3be0e8f194 | @@ -154,7 +154,7 @@ class BeanStalk:
def draw(self, canvas, width, height, colour):
xx = self.xx * width
- yy = self.yy * width
+ yy = self.yy * height
canvas.create_text(
| karmapi/beanstalk.py | ReplaceText(target='height' @(157,23)->(157,28)) | class BeanStalk:
def draw(self, canvas, width, height, colour):
xx = self.xx * width
yy = self.yy * width
canvas.create_text( | class BeanStalk:
def draw(self, canvas, width, height, colour):
xx = self.xx * width
yy = self.yy * height
canvas.create_text( |
913 | https://:@github.com/swfiua/karmapi.git | 16cae396fa2510bbebd47239d9ffb2eb5f70882d | @@ -216,7 +216,7 @@ class Sphere:
for ix, (r, g, b) in enumerate(value):
self.red[ix] = r
self.green[ix] = g
- self.blue[ix] = r
+ self.blue[ix] = b
def quantise(self, value):
| karmapi/cpr.py | ReplaceText(target='b' @(219,28)->(219,29)) | class Sphere:
for ix, (r, g, b) in enumerate(value):
self.red[ix] = r
self.green[ix] = g
self.blue[ix] = r
def quantise(self, value):
| class Sphere:
for ix, (r, g, b) in enumerate(value):
self.red[ix] = r
self.green[ix] = g
self.blue[ix] = b
def quantise(self, value):
|
914 | https://:@github.com/yetone/baidu_tts.git | c9fb8ff550735a0dd776324b8cf10da53bf2fa25 | @@ -32,7 +32,7 @@ class BaiduTTS(object):
url = 'https://openapi.baidu.com/oauth/2.0/token'
key = 'access_token'
res = self.cache.get(key)
- if res and res['expire_time'] < time.time():
+ if res and res['expire_time'] > time.time():
return res['data']
resp = requests.get(
url,
| baidu_tts/__init__.py | ReplaceText(target='>' @(35,38)->(35,39)) | class BaiduTTS(object):
url = 'https://openapi.baidu.com/oauth/2.0/token'
key = 'access_token'
res = self.cache.get(key)
if res and res['expire_time'] < time.time():
return res['data']
resp = requests.get(
url, | class BaiduTTS(object):
url = 'https://openapi.baidu.com/oauth/2.0/token'
key = 'access_token'
res = self.cache.get(key)
if res and res['expire_time'] > time.time():
return res['data']
resp = requests.get(
url, |
915 | https://:@github.com/Btibert3/pypeds.git | 705d6a5d4636e573c20825eff40da5822c2e0d55 | @@ -18,7 +18,7 @@ def zip_parser(url=None, survey=None):
# path = "/tmp/pypeds/" + str(int(time.time())) + "/" # hacky way to make unique path to extract time
_today = datetime.datetime.today().strftime('%Y%m%d')
survey_lower = survey.lower()
- path = "/tmp/" + str(_today) + str(survey) + "/" # hacky way to make unique path to extract date and survey
+ path = "/tmp/" + str(_today) + str(survey_lower) + "/" # hacky way to make unique path to extract date and survey
file = survey + ".zip"
# naive way to do cacheing - if the path for today exists, dont do anything, if it doesnt, get the data
| pypeds/ipeds.py | ReplaceText(target='survey_lower' @(21,39)->(21,45)) | def zip_parser(url=None, survey=None):
# path = "/tmp/pypeds/" + str(int(time.time())) + "/" # hacky way to make unique path to extract time
_today = datetime.datetime.today().strftime('%Y%m%d')
survey_lower = survey.lower()
path = "/tmp/" + str(_today) + str(survey) + "/" # hacky way to make unique path to extract date and survey
file = survey + ".zip"
# naive way to do cacheing - if the path for today exists, dont do anything, if it doesnt, get the data | def zip_parser(url=None, survey=None):
# path = "/tmp/pypeds/" + str(int(time.time())) + "/" # hacky way to make unique path to extract time
_today = datetime.datetime.today().strftime('%Y%m%d')
survey_lower = survey.lower()
path = "/tmp/" + str(_today) + str(survey_lower) + "/" # hacky way to make unique path to extract date and survey
file = survey + ".zip"
# naive way to do cacheing - if the path for today exists, dont do anything, if it doesnt, get the data |
916 | https://:@github.com/FabriceSalvaire/Pyterate.git | b23a194f0f7785ef190d0cec7171b65d4313d455 | @@ -49,7 +49,7 @@ def setup_logging(application_name=APPLICATION_NAME, config_file=None):
formatter_config = logging_config['formatters']['ansi']['format']
logging_config['formatters']['ansi']['format'] = formatter_config.replace('<ESC>', '\033')
- if ConfigInstall.OS.on_windows and ConfigInstall.OS.on_osx:
+ if ConfigInstall.OS.on_windows or ConfigInstall.OS.on_osx:
formatter = 'simple'
else:
formatter = 'ansi'
| Pyterate/Logging/Logging.py | ReplaceText(target='or' @(52,35)->(52,38)) | def setup_logging(application_name=APPLICATION_NAME, config_file=None):
formatter_config = logging_config['formatters']['ansi']['format']
logging_config['formatters']['ansi']['format'] = formatter_config.replace('<ESC>', '\033')
if ConfigInstall.OS.on_windows and ConfigInstall.OS.on_osx:
formatter = 'simple'
else:
formatter = 'ansi' | def setup_logging(application_name=APPLICATION_NAME, config_file=None):
formatter_config = logging_config['formatters']['ansi']['format']
logging_config['formatters']['ansi']['format'] = formatter_config.replace('<ESC>', '\033')
if ConfigInstall.OS.on_windows or ConfigInstall.OS.on_osx:
formatter = 'simple'
else:
formatter = 'ansi' |
917 | https://:@github.com/andrewsanchez/genbank-qc.git | 1f822c8490c0187631a666e420704f28c4d653f7 | @@ -64,7 +64,7 @@ class FilteredSpecies(Species):
"""
self.passed = self.stats[self.stats["N_Count"] <= self.max_n_count]
self._criteria_dict["N_Count"]["failed"] = self.stats.index[
- self.stats["N_Count"] >= self.max_n_count]
+ self.stats["N_Count"] > self.max_n_count]
# self.failed_N_Count = self.stats.index[self.stats["N_Count"] >=
# self.max_n_count]
| genbankfilter/filter.py | ReplaceText(target='>' @(67,34)->(67,36)) | class FilteredSpecies(Species):
"""
self.passed = self.stats[self.stats["N_Count"] <= self.max_n_count]
self._criteria_dict["N_Count"]["failed"] = self.stats.index[
self.stats["N_Count"] >= self.max_n_count]
# self.failed_N_Count = self.stats.index[self.stats["N_Count"] >=
# self.max_n_count]
| class FilteredSpecies(Species):
"""
self.passed = self.stats[self.stats["N_Count"] <= self.max_n_count]
self._criteria_dict["N_Count"]["failed"] = self.stats.index[
self.stats["N_Count"] > self.max_n_count]
# self.failed_N_Count = self.stats.index[self.stats["N_Count"] >=
# self.max_n_count]
|
918 | https://:@github.com/andrewsanchez/genbank-qc.git | 2aa87a49e09fefbbe7fe2cba2e6074bba157322b | @@ -38,7 +38,7 @@ def cli(filter_level, max_unknowns, c_deviations, s_deviations, m_deviations,
print("Completed ", s.species)
print(s)
except Exception:
- print('Failed ', species.species)
+ print('Failed ', s.species)
traceback.print_exc()
else:
from genbankqc import Genbank
| genbankqc/__main__.py | ReplaceText(target='s' @(41,29)->(41,36)) | def cli(filter_level, max_unknowns, c_deviations, s_deviations, m_deviations,
print("Completed ", s.species)
print(s)
except Exception:
print('Failed ', species.species)
traceback.print_exc()
else:
from genbankqc import Genbank | def cli(filter_level, max_unknowns, c_deviations, s_deviations, m_deviations,
print("Completed ", s.species)
print(s)
except Exception:
print('Failed ', s.species)
traceback.print_exc()
else:
from genbankqc import Genbank |
919 | https://:@github.com/andrewsanchez/genbank-qc.git | 807883f82e4ab00bb0231c80f221e9777bf7a6e3 | @@ -22,7 +22,7 @@ def test_download_assembly_summary():
@pytest.fixture()
def biosample():
temp = Path(tempfile.mkdtemp())
- biosample = metadata.BioSample("inbox.asanchez@gmail.com", temp, sample=100)
+ biosample = metadata.BioSample(temp, "inbox.asanchez@gmail.com", sample=100)
yield biosample
shutil.rmtree(temp)
| test/metadata_test.py | ArgSwap(idxs=0<->1 @(25,16)->(25,34)) | def test_download_assembly_summary():
@pytest.fixture()
def biosample():
temp = Path(tempfile.mkdtemp())
biosample = metadata.BioSample("inbox.asanchez@gmail.com", temp, sample=100)
yield biosample
shutil.rmtree(temp)
| def test_download_assembly_summary():
@pytest.fixture()
def biosample():
temp = Path(tempfile.mkdtemp())
biosample = metadata.BioSample(temp, "inbox.asanchez@gmail.com", sample=100)
yield biosample
shutil.rmtree(temp)
|
920 | https://:@github.com/atilaneves/ropemode.git | c37cfc181d56d5b8bb67bcb7501b8ac4eac6a747 | @@ -579,7 +579,7 @@ class _CodeAssist(object):
for assist in import_assists:
p = codeassist.CompletionProposal(' : '.join(assist),
'autoimport')
- import_assists.append(p)
+ proposals.append(p)
return proposals
def _insert_import(self, name, module):
| ropemode/interface.py | ReplaceText(target='proposals' @(582,20)->(582,34)) | class _CodeAssist(object):
for assist in import_assists:
p = codeassist.CompletionProposal(' : '.join(assist),
'autoimport')
import_assists.append(p)
return proposals
def _insert_import(self, name, module): | class _CodeAssist(object):
for assist in import_assists:
p = codeassist.CompletionProposal(' : '.join(assist),
'autoimport')
proposals.append(p)
return proposals
def _insert_import(self, name, module): |
921 | https://:@github.com/pcraster/lue.git | 7b961a25b3c8a55f443705d700924bef21ac918b | @@ -129,7 +129,7 @@ def sort_benchmarks_by_time(
time_points = [item[0] for item in items]
idxs = [item[1] for item in items]
- assert all(t1 < t2 for t1, t2 in zip(time_points, time_points[1:])), time_points
+ assert all(t1 <= t2 for t1, t2 in zip(time_points, time_points[1:])), time_points
epoch = time_points[0]
return idxs, epoch
| benchmark/lue/benchmark/util.py | ReplaceText(target='<=' @(132,18)->(132,19)) | def sort_benchmarks_by_time(
time_points = [item[0] for item in items]
idxs = [item[1] for item in items]
assert all(t1 < t2 for t1, t2 in zip(time_points, time_points[1:])), time_points
epoch = time_points[0]
return idxs, epoch | def sort_benchmarks_by_time(
time_points = [item[0] for item in items]
idxs = [item[1] for item in items]
assert all(t1 <= t2 for t1, t2 in zip(time_points, time_points[1:])), time_points
epoch = time_points[0]
return idxs, epoch |
922 | https://:@github.com/dutradda/falcon-swagger.git | 1c532bf5a816b895a7964e5725c633a540a50b24 | @@ -53,7 +53,7 @@ model.__routes__.add(Route('/test/{id}', 'POST', action))
@pytest.fixture
def app(session):
- return HttpAPI(session.bind, [model])
+ return HttpAPI([model], session.bind)
class TestUsersModelIntegrationWithAuthorizationHook(object):
| tests/integration/domain/users/test_users_model_integration.py | ArgSwap(idxs=0<->1 @(56,11)->(56,18)) | model.__routes__.add(Route('/test/{id}', 'POST', action))
@pytest.fixture
def app(session):
return HttpAPI(session.bind, [model])
class TestUsersModelIntegrationWithAuthorizationHook(object): | model.__routes__.add(Route('/test/{id}', 'POST', action))
@pytest.fixture
def app(session):
return HttpAPI([model], session.bind)
class TestUsersModelIntegrationWithAuthorizationHook(object): |
923 | https://:@github.com/dutradda/falcon-swagger.git | 4fbd5298af47fb91f80a602671e1920751bb6936 | @@ -226,7 +226,7 @@ class ModelJobsMetaMixin(type):
def _set_job(cls, job_hash, status, session):
key = cls._build_jobs_key()
session.redis_bind.hset(key, job_hash, json.dumps(status))
- if session.redis_bind.ttl(key) > 0:
+ if session.redis_bind.ttl(key) < 0:
session.redis_bind.expire(key, 7*24*60*60)
def _build_jobs_key(cls):
| falconswagger/models/orm/http.py | ReplaceText(target='<' @(229,39)->(229,40)) | class ModelJobsMetaMixin(type):
def _set_job(cls, job_hash, status, session):
key = cls._build_jobs_key()
session.redis_bind.hset(key, job_hash, json.dumps(status))
if session.redis_bind.ttl(key) > 0:
session.redis_bind.expire(key, 7*24*60*60)
def _build_jobs_key(cls): | class ModelJobsMetaMixin(type):
def _set_job(cls, job_hash, status, session):
key = cls._build_jobs_key()
session.redis_bind.hset(key, job_hash, json.dumps(status))
if session.redis_bind.ttl(key) < 0:
session.redis_bind.expire(key, 7*24*60*60)
def _build_jobs_key(cls): |
924 | https://:@github.com/peter-schmidbauer/pythovolve.git | 85961b03f0086d97a01d82bcaa520be1a4fde052 | @@ -40,7 +40,7 @@ class Individual:
if not isinstance(other, Individual):
return NotImplemented
try:
- return self.score < other.score
+ return self.score == other.score
except ValueError:
return NotImplemented
| pythovolve/individuals.py | ReplaceText(target='==' @(43,30)->(43,31)) | class Individual:
if not isinstance(other, Individual):
return NotImplemented
try:
return self.score < other.score
except ValueError:
return NotImplemented
| class Individual:
if not isinstance(other, Individual):
return NotImplemented
try:
return self.score == other.score
except ValueError:
return NotImplemented
|
925 | https://:@github.com/Balandat/pyDR.git | d9405eb75fa03ad09891816342e0e210b29d8d7f | @@ -668,7 +668,7 @@ def get_energy_charges(index, tariff, isRT=False, LMP=None,
cidx = chronRates.index
pdpind = ((cidx.hour >= 12) & (cidx.hour < 18) &
(cidx.normalize().isin(pdp_days)))
- chronRates.loc[pdpind, 'EnergyCharge'] = pdpchrg
+ chronRates.loc[pdpind, 'EnergyCharge'] += pdpchrg
chronRates = chronRates.tz_convert('GMT')
if isRT:
chronRates['EnergyCharge'] += LMP.loc[index[0]:index[-1]] / 1000.0
| pyDR/utils.py | ReplaceText(target='+=' @(671,47)->(671,48)) | def get_energy_charges(index, tariff, isRT=False, LMP=None,
cidx = chronRates.index
pdpind = ((cidx.hour >= 12) & (cidx.hour < 18) &
(cidx.normalize().isin(pdp_days)))
chronRates.loc[pdpind, 'EnergyCharge'] = pdpchrg
chronRates = chronRates.tz_convert('GMT')
if isRT:
chronRates['EnergyCharge'] += LMP.loc[index[0]:index[-1]] / 1000.0 | def get_energy_charges(index, tariff, isRT=False, LMP=None,
cidx = chronRates.index
pdpind = ((cidx.hour >= 12) & (cidx.hour < 18) &
(cidx.normalize().isin(pdp_days)))
chronRates.loc[pdpind, 'EnergyCharge'] += pdpchrg
chronRates = chronRates.tz_convert('GMT')
if isRT:
chronRates['EnergyCharge'] += LMP.loc[index[0]:index[-1]] / 1000.0 |
926 | https://:@github.com/Toranktto/CraftProtocol.git | 83edf8e43fcac26d0fb517f3bc4890e5350c93de | @@ -35,7 +35,7 @@ class WindowItemsPacket(BasePacket):
for slot_data in packet.get_slots():
StreamIO.write_short(stream, slot_data.get_id())
- if slot_data.is_empty():
+ if not slot_data.is_empty():
StreamIO.write_byte(stream, slot_data.get_count())
StreamIO.write_short(stream, slot_data.get_damage())
NBTSerializer.write(stream, slot_data.get_tag())
| CraftProtocol/Protocol/v1_10/Packet/Play/WindowItemsPacket.py | ReplaceText(target='not ' @(38,15)->(38,15)) | class WindowItemsPacket(BasePacket):
for slot_data in packet.get_slots():
StreamIO.write_short(stream, slot_data.get_id())
if slot_data.is_empty():
StreamIO.write_byte(stream, slot_data.get_count())
StreamIO.write_short(stream, slot_data.get_damage())
NBTSerializer.write(stream, slot_data.get_tag()) | class WindowItemsPacket(BasePacket):
for slot_data in packet.get_slots():
StreamIO.write_short(stream, slot_data.get_id())
if not slot_data.is_empty():
StreamIO.write_byte(stream, slot_data.get_count())
StreamIO.write_short(stream, slot_data.get_damage())
NBTSerializer.write(stream, slot_data.get_tag()) |
927 | https://:@github.com/Christensen-Lab-Dartmouth/PyMethylProcess.git | 01ff59ea0c1b13bfc3bac07e16851209ac279f21 | @@ -299,7 +299,7 @@ def combine_methylation_arrays(input_pkls, optional_input_pkl_dir, output_pkl, e
input_pkls=glob.glob(os.path.join(optional_input_pkl_dir,'*','methyl_array.pkl'))
if exclude:
input_pkls=(np.array(input_pkls)[~np.isin(np.vectorize(lambda x: x.split('/')[-2])(input_pkls),np.array(exclude))]).tolist()
- if len(input_pkls) > 0:
+ if len(input_pkls) > 1:
base_methyl_array=MethylationArray(*extract_pheno_beta_df_from_pickle_dict(pickle.load(open(input_pkls[0],'rb')), ''))
methyl_arrays_generator = (MethylationArray(*extract_pheno_beta_df_from_pickle_dict(pickle.load(open(input_pkl,'rb')), '')) for input_pkl in input_pkls[1:])
list_methyl_arrays = MethylationArrays([base_methyl_array])
| build/lib/pymethylprocess/preprocess.py | ReplaceText(target='1' @(302,25)->(302,26)) | def combine_methylation_arrays(input_pkls, optional_input_pkl_dir, output_pkl, e
input_pkls=glob.glob(os.path.join(optional_input_pkl_dir,'*','methyl_array.pkl'))
if exclude:
input_pkls=(np.array(input_pkls)[~np.isin(np.vectorize(lambda x: x.split('/')[-2])(input_pkls),np.array(exclude))]).tolist()
if len(input_pkls) > 0:
base_methyl_array=MethylationArray(*extract_pheno_beta_df_from_pickle_dict(pickle.load(open(input_pkls[0],'rb')), ''))
methyl_arrays_generator = (MethylationArray(*extract_pheno_beta_df_from_pickle_dict(pickle.load(open(input_pkl,'rb')), '')) for input_pkl in input_pkls[1:])
list_methyl_arrays = MethylationArrays([base_methyl_array]) | def combine_methylation_arrays(input_pkls, optional_input_pkl_dir, output_pkl, e
input_pkls=glob.glob(os.path.join(optional_input_pkl_dir,'*','methyl_array.pkl'))
if exclude:
input_pkls=(np.array(input_pkls)[~np.isin(np.vectorize(lambda x: x.split('/')[-2])(input_pkls),np.array(exclude))]).tolist()
if len(input_pkls) > 1:
base_methyl_array=MethylationArray(*extract_pheno_beta_df_from_pickle_dict(pickle.load(open(input_pkls[0],'rb')), ''))
methyl_arrays_generator = (MethylationArray(*extract_pheno_beta_df_from_pickle_dict(pickle.load(open(input_pkl,'rb')), '')) for input_pkl in input_pkls[1:])
list_methyl_arrays = MethylationArrays([base_methyl_array]) |
928 | https://:@github.com/AndreasSteiner/python-pptx.git | 3a66082b1326a4263ea6f624a3a263faad65d4c5 | @@ -998,7 +998,7 @@ def then_categories_levels_contains_count_CategoryLevel_objs(context, count):
def then_categories_number_format_is_value(context, value):
expected_value = value
number_format = context.categories.number_format
- assert number_format == expected_value, 'got %s' % expected_value
+ assert number_format == expected_value, 'got %s' % number_format
@then('category.add_sub_category(name) is a Category object')
| features/steps/chart.py | ReplaceText(target='number_format' @(1001,55)->(1001,69)) | def then_categories_levels_contains_count_CategoryLevel_objs(context, count):
def then_categories_number_format_is_value(context, value):
expected_value = value
number_format = context.categories.number_format
assert number_format == expected_value, 'got %s' % expected_value
@then('category.add_sub_category(name) is a Category object') | def then_categories_levels_contains_count_CategoryLevel_objs(context, count):
def then_categories_number_format_is_value(context, value):
expected_value = value
number_format = context.categories.number_format
assert number_format == expected_value, 'got %s' % number_format
@then('category.add_sub_category(name) is a Category object') |
929 | https://:@github.com/dtkav/specific.git | 44fc513eaa8f8746eafce36749b8564a0a03a0e8 | @@ -127,7 +127,7 @@ def validate_array(schema, data):
return error
# Run each sub-item through the list of validators.
for func in VALIDATORS:
- error = func(subschema, subval)
+ error = func(subschema, converted_value)
if error:
return error
| connexion/decorators/validation.py | ReplaceText(target='converted_value' @(130,36)->(130,42)) | def validate_array(schema, data):
return error
# Run each sub-item through the list of validators.
for func in VALIDATORS:
error = func(subschema, subval)
if error:
return error
| def validate_array(schema, data):
return error
# Run each sub-item through the list of validators.
for func in VALIDATORS:
error = func(subschema, converted_value)
if error:
return error
|
930 | https://:@github.com/kreczko/gitlab-migrate.git | 8dd6f993bc443574e0d3e45776726b6f71141037 | @@ -36,7 +36,7 @@ def migration_instructions(conn_src, conn_dst, migrate):
if user:
names = None
if user['projects'] != '--all--':
- names = content['projects']
+ names = user['projects']
user_projects = glc.user_projects(conn_src, names=names, statistics=False)
return instructions, user_projects
| gitlab_migrate/migrate.py | ReplaceText(target='user' @(39,20)->(39,27)) | def migration_instructions(conn_src, conn_dst, migrate):
if user:
names = None
if user['projects'] != '--all--':
names = content['projects']
user_projects = glc.user_projects(conn_src, names=names, statistics=False)
return instructions, user_projects
| def migration_instructions(conn_src, conn_dst, migrate):
if user:
names = None
if user['projects'] != '--all--':
names = user['projects']
user_projects = glc.user_projects(conn_src, names=names, statistics=False)
return instructions, user_projects
|
931 | https://:@github.com/kreczko/gitlab-migrate.git | ec939be20fccea9337bda4fafce7605170bb31be | @@ -57,7 +57,7 @@ def cli(config_file, version, plain, noop):
dst_server = config.servers['destination']
gl_src = glc.connect(src_server.url, src_server.auth_token, ssl_verify=src_server.ssl_verify)
- gl_dst = glc.connect(dst_server.url, dst_server.auth_token, ssl_verify=src_server.ssl_verify)
+ gl_dst = glc.connect(dst_server.url, dst_server.auth_token, ssl_verify=dst_server.ssl_verify)
group_instructions, user_instructions = migration_instructions(gl_src, gl_dst, config.migrate)
| gitlab_migrate/migrate.py | ReplaceText(target='dst_server' @(60,75)->(60,85)) | def cli(config_file, version, plain, noop):
dst_server = config.servers['destination']
gl_src = glc.connect(src_server.url, src_server.auth_token, ssl_verify=src_server.ssl_verify)
gl_dst = glc.connect(dst_server.url, dst_server.auth_token, ssl_verify=src_server.ssl_verify)
group_instructions, user_instructions = migration_instructions(gl_src, gl_dst, config.migrate)
| def cli(config_file, version, plain, noop):
dst_server = config.servers['destination']
gl_src = glc.connect(src_server.url, src_server.auth_token, ssl_verify=src_server.ssl_verify)
gl_dst = glc.connect(dst_server.url, dst_server.auth_token, ssl_verify=dst_server.ssl_verify)
group_instructions, user_instructions = migration_instructions(gl_src, gl_dst, config.migrate)
|
932 | https://:@github.com/igrek51/cliglue.git | 2de2f9599f15422234a4ccdb9d62c08bd88074ce | @@ -116,7 +116,7 @@ def test_default_help_when_no_arguments():
def test_hiding_internal_options():
with MockIO('--help') as mockio:
CliBuilder(hide_internal=True).run()
- assert '--bash-install' in mockio.output()
+ assert '--bash-install' not in mockio.output()
assert '--bash-autocomplete' not in mockio.output()
with MockIO('--help') as mockio:
CliBuilder(hide_internal=False).run()
| tests/help/test_help.py | ReplaceText(target=' not in ' @(119,31)->(119,35)) | def test_default_help_when_no_arguments():
def test_hiding_internal_options():
with MockIO('--help') as mockio:
CliBuilder(hide_internal=True).run()
assert '--bash-install' in mockio.output()
assert '--bash-autocomplete' not in mockio.output()
with MockIO('--help') as mockio:
CliBuilder(hide_internal=False).run() | def test_default_help_when_no_arguments():
def test_hiding_internal_options():
with MockIO('--help') as mockio:
CliBuilder(hide_internal=True).run()
assert '--bash-install' not in mockio.output()
assert '--bash-autocomplete' not in mockio.output()
with MockIO('--help') as mockio:
CliBuilder(hide_internal=False).run() |
933 | https://:@github.com/alex-fe/Graphml-to-SVG-converter.git | 759cb682f91e023a80f64fa4234edf0cded4fd5f | @@ -53,7 +53,7 @@ class Graph(NameMixin):
**label_kwargs
):
if geometry is None:
- geometry = Geometry(height, width, x, y)
+ geometry = Geometry(width, height, x, y)
if fill is None:
fill = Fill(fill_color, transparent)
if border is None:
| app.py | ArgSwap(idxs=0<->1 @(56,23)->(56,31)) | class Graph(NameMixin):
**label_kwargs
):
if geometry is None:
geometry = Geometry(height, width, x, y)
if fill is None:
fill = Fill(fill_color, transparent)
if border is None: | class Graph(NameMixin):
**label_kwargs
):
if geometry is None:
geometry = Geometry(width, height, x, y)
if fill is None:
fill = Fill(fill_color, transparent)
if border is None: |
934 | https://:@bitbucket.org/mbachett/maltpynt.git | f79c6b6ff1fb7e6bb0f3d33a866e220f0b9ff0ed | @@ -14,7 +14,7 @@ def mp_calc_lags(freqs, cpds, pds1, pds2, n_chunks, rebin):
lags = np.angle(cpds) / (2 * np.pi * freqs)
sigcpd = np.absolute(cpds)
- rawcof = (sigcpd) ** 2 / ((pds1) * (pds1))
+ rawcof = (sigcpd) ** 2 / ((pds1) * (pds2))
dum = (1. - rawcof) / (2. * rawcof)
| maltpynt/mp_lags.py | ReplaceText(target='pds2' @(17,40)->(17,44)) | def mp_calc_lags(freqs, cpds, pds1, pds2, n_chunks, rebin):
lags = np.angle(cpds) / (2 * np.pi * freqs)
sigcpd = np.absolute(cpds)
rawcof = (sigcpd) ** 2 / ((pds1) * (pds1))
dum = (1. - rawcof) / (2. * rawcof)
| def mp_calc_lags(freqs, cpds, pds1, pds2, n_chunks, rebin):
lags = np.angle(cpds) / (2 * np.pi * freqs)
sigcpd = np.absolute(cpds)
rawcof = (sigcpd) ** 2 / ((pds1) * (pds2))
dum = (1. - rawcof) / (2. * rawcof)
|
935 | https://:@github.com/Lantero/battlebots.git | 253d8e64895f7b7490e8d79e6b65fb39249f0257 | @@ -41,7 +41,7 @@ class Board(object):
def transposed_index(self, index):
""" Return the transposed index """
- return index % self.row_size * self.row_size + index / self.row_size
+ return index % self.row_size * self.row_size + index // self.row_size
def transposed_board(self):
""" Return the transposed board """
| battlebots/board.py | ReplaceText(target='//' @(44,61)->(44,62)) | class Board(object):
def transposed_index(self, index):
""" Return the transposed index """
return index % self.row_size * self.row_size + index / self.row_size
def transposed_board(self):
""" Return the transposed board """ | class Board(object):
def transposed_index(self, index):
""" Return the transposed index """
return index % self.row_size * self.row_size + index // self.row_size
def transposed_board(self):
""" Return the transposed board """ |
936 | https://:@github.com/narfman0/jeeves.git | a4e0401053c41d7d584d316d05845ec75d79c9a8 | @@ -30,7 +30,7 @@ class Conversation(object):
plugin, text = self.brain.query(input)
if plugin and text:
try:
- plugin.handle(input, self.mic)
+ plugin.handle(text, self.mic)
except Exception:
self._logger.error('Failed to execute module',
exc_info=True)
| client/conversation.py | ReplaceText(target='text' @(33,38)->(33,43)) | class Conversation(object):
plugin, text = self.brain.query(input)
if plugin and text:
try:
plugin.handle(input, self.mic)
except Exception:
self._logger.error('Failed to execute module',
exc_info=True) | class Conversation(object):
plugin, text = self.brain.query(input)
if plugin and text:
try:
plugin.handle(text, self.mic)
except Exception:
self._logger.error('Failed to execute module',
exc_info=True) |
937 | https://:@bitbucket.org/dranew/remixt.git | 6aa375e5e1ba81d06e3b5f9d1739e79b22678d9c | @@ -22,5 +22,5 @@ if __name__ == '__main__':
if args['config'] is not None:
execfile(args['config'], {}, config)
- remixt.ref_data.create_ref_data(ref_data_dir, config)
-
+ remixt.ref_data.create_ref_data(config, ref_data_dir)
+
| remixt/setup/remixt_create_ref_data.py | ArgSwap(idxs=0<->1 @(25,4)->(25,35)) | if __name__ == '__main__':
if args['config'] is not None:
execfile(args['config'], {}, config)
remixt.ref_data.create_ref_data(ref_data_dir, config)
| if __name__ == '__main__':
if args['config'] is not None:
execfile(args['config'], {}, config)
remixt.ref_data.create_ref_data(config, ref_data_dir)
|
938 | https://:@bitbucket.org/dranew/remixt.git | 6aa375e5e1ba81d06e3b5f9d1739e79b22678d9c | @@ -28,7 +28,7 @@ if __name__ == '__main__':
pyp = pypeliner.app.Pypeline(config=config)
- workflow = remixt.mappability.bwa.workflow.create_bwa_mappability_workflow(ref_data_dir, config)
+ workflow = remixt.mappability.bwa.workflow.create_bwa_mappability_workflow(config, ref_data_dir)
pyp.run(workflow)
| remixt/setup/remixt_mappability_bwa.py | ArgSwap(idxs=0<->1 @(31,15)->(31,78)) | if __name__ == '__main__':
pyp = pypeliner.app.Pypeline(config=config)
workflow = remixt.mappability.bwa.workflow.create_bwa_mappability_workflow(ref_data_dir, config)
pyp.run(workflow)
| if __name__ == '__main__':
pyp = pypeliner.app.Pypeline(config=config)
workflow = remixt.mappability.bwa.workflow.create_bwa_mappability_workflow(config, ref_data_dir)
pyp.run(workflow)
|
939 | https://:@github.com/visym/vipy.git | 8ac8b78489487631a161fa7ed71cd700ea700154 | @@ -375,7 +375,7 @@ class Image(object):
def url(self, url=None, username=None, password=None, sha1=None, ignoreUrlErrors=None):
"""Image URL and URL download properties"""
- if url is None:
+ if url is not None:
self._url = url # this does not change anything else, better to use constructor
if username is not None:
self._urluser = username # basic authentication
| vipy/image.py | ReplaceText(target=' is not ' @(378,14)->(378,18)) | class Image(object):
def url(self, url=None, username=None, password=None, sha1=None, ignoreUrlErrors=None):
"""Image URL and URL download properties"""
if url is None:
self._url = url # this does not change anything else, better to use constructor
if username is not None:
self._urluser = username # basic authentication | class Image(object):
def url(self, url=None, username=None, password=None, sha1=None, ignoreUrlErrors=None):
"""Image URL and URL download properties"""
if url is not None:
self._url = url # this does not change anything else, better to use constructor
if username is not None:
self._urluser = username # basic authentication |
940 | https://:@github.com/scubbx/webguitest.git | ad615df8b42c2adadf5759951ed1029e7a186217 | @@ -35,7 +35,7 @@ if pyautoguiAvailable:
numTries += 1
time.sleep(1)
if elemToClick is None:
- print(" (x): could not locate image {}".format(elemToClick))
+ print(" (x): could not locate image {}".format(imagepath))
return False
time.sleep(1)
pyautogui.click(pyautogui.center(elemToClick))
| webguitest/clickGraphic.py | ReplaceText(target='imagepath' @(38,62)->(38,73)) | if pyautoguiAvailable:
numTries += 1
time.sleep(1)
if elemToClick is None:
print(" (x): could not locate image {}".format(elemToClick))
return False
time.sleep(1)
pyautogui.click(pyautogui.center(elemToClick)) | if pyautoguiAvailable:
numTries += 1
time.sleep(1)
if elemToClick is None:
print(" (x): could not locate image {}".format(imagepath))
return False
time.sleep(1)
pyautogui.click(pyautogui.center(elemToClick)) |
941 | https://:@github.com/PragmaticMates/django-inventor.git | 2762f3a34313372dd548e0bda6fd24db622bf703 | @@ -11,5 +11,5 @@ class PlanQuerySet(models.QuerySet):
class UserPlanQuerySet(models.QuerySet):
def expires_in(self, days=7):
- threshold = now() - timedelta(days=days)
+ threshold = now() + timedelta(days=days)
return self.filter(expiration=threshold.date())
| inventor/core/subscriptions/querysets.py | ReplaceText(target='+' @(14,26)->(14,27)) | class PlanQuerySet(models.QuerySet):
class UserPlanQuerySet(models.QuerySet):
def expires_in(self, days=7):
threshold = now() - timedelta(days=days)
return self.filter(expiration=threshold.date()) | class PlanQuerySet(models.QuerySet):
class UserPlanQuerySet(models.QuerySet):
def expires_in(self, days=7):
threshold = now() + timedelta(days=days)
return self.filter(expiration=threshold.date()) |
942 | https://:@github.com/brentp/combined-pvalues.git | 7726e0f8b5b35bb43059ae547ca2297bee54f5ec | @@ -141,7 +141,7 @@ def pipeline(col_num, step, dist, prefix, threshold, seed, bed_files, mlog=False
fregions = prefix + ".regions.bed"
with open(fregions, "w") as fh:
list(peaks.peaks(prefix + ".fdr.bed", -1, threshold, seed,
- step, fh, operator.le))
+ dist, fh, operator.le))
n_regions = sum(1 for _ in open(fregions))
print >>sys.stderr, "wrote: %s (%i regions)" % (fregions, n_regions)
| cpv/pipeline.py | ReplaceText(target='dist' @(144,12)->(144,16)) | def pipeline(col_num, step, dist, prefix, threshold, seed, bed_files, mlog=False
fregions = prefix + ".regions.bed"
with open(fregions, "w") as fh:
list(peaks.peaks(prefix + ".fdr.bed", -1, threshold, seed,
step, fh, operator.le))
n_regions = sum(1 for _ in open(fregions))
print >>sys.stderr, "wrote: %s (%i regions)" % (fregions, n_regions)
| def pipeline(col_num, step, dist, prefix, threshold, seed, bed_files, mlog=False
fregions = prefix + ".regions.bed"
with open(fregions, "w") as fh:
list(peaks.peaks(prefix + ".fdr.bed", -1, threshold, seed,
dist, fh, operator.le))
n_regions = sum(1 for _ in open(fregions))
print >>sys.stderr, "wrote: %s (%i regions)" % (fregions, n_regions)
|
943 | https://:@github.com/axiomabsolute/cs410-information-retrieval.git | 18725a0cb50a579dc3ad2c32b6fc54451bdb23b3 | @@ -24,7 +24,7 @@ by_lookup_match_piece = by(by_lookup_match, by_piece_id)
by_lookup_match_stem = by(by_lookup_match, by_stem)
def bm25_idf(N, df):
- assert(N > df)
+ assert(N >= df)
return log( (N - df + 0.5) / (df + 0.5) )
def bm25_tf(tf, k=1.2):
| firms/graders.py | ReplaceText(target='>=' @(27,13)->(27,14)) | by_lookup_match_piece = by(by_lookup_match, by_piece_id)
by_lookup_match_stem = by(by_lookup_match, by_stem)
def bm25_idf(N, df):
assert(N > df)
return log( (N - df + 0.5) / (df + 0.5) )
def bm25_tf(tf, k=1.2): | by_lookup_match_piece = by(by_lookup_match, by_piece_id)
by_lookup_match_stem = by(by_lookup_match, by_stem)
def bm25_idf(N, df):
assert(N >= df)
return log( (N - df + 0.5) / (df + 0.5) )
def bm25_tf(tf, k=1.2): |
944 | https://:@github.com/JayYip/bert-multitask-service.git | a22de7ec24ba873f699e989533122f36b4f6f693 | @@ -169,7 +169,7 @@ class BertModel(object):
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
- with tf.variable_scope("bert", scope):
+ with tf.variable_scope(scope, "bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
| modeling.py | ArgSwap(idxs=0<->1 @(172,9)->(172,26)) | class BertModel(object):
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope("bert", scope):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup( | class BertModel(object):
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, "bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup( |
945 | https://:@github.com/JayYip/bert-multitask-service.git | 52d354bf04ba003a32b5c175775a574064569dcc | @@ -22,7 +22,7 @@ def get_args():
args = parser.parse_args()
param_str = '\n'.join(['%20s = %s' % (k, v) for k, v in sorted(vars(args).items())])
print('usage:\n{0}\nparameters: \n{1}'.format(' '.join([x for x in sys.argv]), param_str))
- return parser
+ return args
if __name__ == '__main__':
| app.py | ReplaceText(target='args' @(25,11)->(25,17)) | def get_args():
args = parser.parse_args()
param_str = '\n'.join(['%20s = %s' % (k, v) for k, v in sorted(vars(args).items())])
print('usage:\n{0}\nparameters: \n{1}'.format(' '.join([x for x in sys.argv]), param_str))
return parser
if __name__ == '__main__': | def get_args():
args = parser.parse_args()
param_str = '\n'.join(['%20s = %s' % (k, v) for k, v in sorted(vars(args).items())])
print('usage:\n{0}\nparameters: \n{1}'.format(' '.join([x for x in sys.argv]), param_str))
return args
if __name__ == '__main__': |
946 | https://:@github.com/JayYip/bert-multitask-service.git | 4edc2497de0d8ea45d1afdbe636f20e51f2fa044 | @@ -166,7 +166,7 @@ class BertWorker(Process):
def input_fn_builder(self, worker):
def gen():
- while not True:
+ while True:
if self.result:
num_result = len(self.result)
worker.send_multipart([ident, b'', pickle.dumps(self.result)])
| service/server.py | ReplaceText(target='' @(169,18)->(169,22)) | class BertWorker(Process):
def input_fn_builder(self, worker):
def gen():
while not True:
if self.result:
num_result = len(self.result)
worker.send_multipart([ident, b'', pickle.dumps(self.result)]) | class BertWorker(Process):
def input_fn_builder(self, worker):
def gen():
while True:
if self.result:
num_result = len(self.result)
worker.send_multipart([ident, b'', pickle.dumps(self.result)]) |
947 | https://:@github.com/WZBSocialScienceCenter/patternlite.git | cdb9262675a96df4ce7b5d25f53b7dc22f043078 | @@ -129,7 +129,7 @@ m.save(f, final=True)
print("loading model...")
f = os.path.join(os.path.dirname(__file__), "en-model.slp")
-lexicon.model = Model.load(lexicon, f)
+lexicon.model = Model.load(f, lexicon)
# To test the accuracy of the language model,
# we can compare a tagged corpus to the predicted tags.
| examples/05-vector/07-slp.py | ArgSwap(idxs=0<->1 @(132,16)->(132,26)) | m.save(f, final=True)
print("loading model...")
f = os.path.join(os.path.dirname(__file__), "en-model.slp")
lexicon.model = Model.load(lexicon, f)
# To test the accuracy of the language model,
# we can compare a tagged corpus to the predicted tags. | m.save(f, final=True)
print("loading model...")
f = os.path.join(os.path.dirname(__file__), "en-model.slp")
lexicon.model = Model.load(f, lexicon)
# To test the accuracy of the language model,
# we can compare a tagged corpus to the predicted tags. |
948 | https://:@github.com/jsh9/python-plot-utilities.git | bbf48002e85a5c27afa879e256184dfa185943ce | @@ -1174,7 +1174,7 @@ def piechart(target_array, class_names=None, dropna=False, top_n=None,
val = int(round(pct*total/100.0))
return '{p:.1f}% ({v:d})'.format(p=pct, v=val)
return my_autopct
- autopct = make_autopct(x)
+ autopct = make_autopct(counts)
elif display == None:
autopct = ''
else:
| plot_utils.py | ReplaceText(target='counts' @(1177,31)->(1177,32)) | def piechart(target_array, class_names=None, dropna=False, top_n=None,
val = int(round(pct*total/100.0))
return '{p:.1f}% ({v:d})'.format(p=pct, v=val)
return my_autopct
autopct = make_autopct(x)
elif display == None:
autopct = ''
else: | def piechart(target_array, class_names=None, dropna=False, top_n=None,
val = int(round(pct*total/100.0))
return '{p:.1f}% ({v:d})'.format(p=pct, v=val)
return my_autopct
autopct = make_autopct(counts)
elif display == None:
autopct = ''
else: |
949 | https://:@github.com/maurosilber/binlet.git | 0c47b4979709a285c437e1090ca6503e6d3dfccf | @@ -57,7 +57,7 @@ def binlet_level(inputs, threshold, valfun, covfun, bin_args, args, level, axes)
# Calculate current level
inputs_coeffs = tuple(modwt_level_nd(x, level, axes) for x in inputs)
- bin_args = tuple(modwt_level_nd(x, level, axes, approx_only=True) for x in inputs)
+ bin_args = tuple(modwt_level_nd(x, level, axes, approx_only=True) for x in bin_args)
# Threshold current level
for key, mask in threshold_masks.items():
| binlet/binlet.py | ReplaceText(target='bin_args' @(60,79)->(60,85)) | def binlet_level(inputs, threshold, valfun, covfun, bin_args, args, level, axes)
# Calculate current level
inputs_coeffs = tuple(modwt_level_nd(x, level, axes) for x in inputs)
bin_args = tuple(modwt_level_nd(x, level, axes, approx_only=True) for x in inputs)
# Threshold current level
for key, mask in threshold_masks.items(): | def binlet_level(inputs, threshold, valfun, covfun, bin_args, args, level, axes)
# Calculate current level
inputs_coeffs = tuple(modwt_level_nd(x, level, axes) for x in inputs)
bin_args = tuple(modwt_level_nd(x, level, axes, approx_only=True) for x in bin_args)
# Threshold current level
for key, mask in threshold_masks.items(): |
950 | https://:@gitlab.com/betse/betsee.git | 83abaec314e607797890d3f25983c03bc6727ab8 | @@ -120,7 +120,7 @@ def sanitize_classifiers(
# formally classify this version as such.
for python_version_minor in range(
python_version_min_parts[1], python_version_minor_max):
- classifiers.append(
+ classifiers_sane.append(
'Programming Language :: Python :: {}.{}'.format(
PYTHON_VERSION_MAJOR, python_version_minor,))
# print('classifiers: {}'.format(_CLASSIFIERS))
| betsee_setup/beuputil.py | ReplaceText(target='classifiers_sane' @(123,8)->(123,19)) | def sanitize_classifiers(
# formally classify this version as such.
for python_version_minor in range(
python_version_min_parts[1], python_version_minor_max):
classifiers.append(
'Programming Language :: Python :: {}.{}'.format(
PYTHON_VERSION_MAJOR, python_version_minor,))
# print('classifiers: {}'.format(_CLASSIFIERS)) | def sanitize_classifiers(
# formally classify this version as such.
for python_version_minor in range(
python_version_min_parts[1], python_version_minor_max):
classifiers_sane.append(
'Programming Language :: Python :: {}.{}'.format(
PYTHON_VERSION_MAJOR, python_version_minor,))
# print('classifiers: {}'.format(_CLASSIFIERS)) |
951 | https://:@github.com/lensvol/pybetter.git | 625e8fc854c7544df702d6001a57e46838b7df70 | @@ -15,7 +15,7 @@ class EqualsNoneIsNoneTransformer(cst.CSTTransformer):
):
return original_node
- return original_node.with_changes(
+ return updated_node.with_changes(
operator=cst.Is(
whitespace_after=original_node.operator.whitespace_after,
whitespace_before=original_node.operator.whitespace_before,
| pybetter/transformers/equals_none.py | ReplaceText(target='updated_node' @(18,15)->(18,28)) | class EqualsNoneIsNoneTransformer(cst.CSTTransformer):
):
return original_node
return original_node.with_changes(
operator=cst.Is(
whitespace_after=original_node.operator.whitespace_after,
whitespace_before=original_node.operator.whitespace_before, | class EqualsNoneIsNoneTransformer(cst.CSTTransformer):
):
return original_node
return updated_node.with_changes(
operator=cst.Is(
whitespace_after=original_node.operator.whitespace_after,
whitespace_before=original_node.operator.whitespace_before, |
952 | https://:@github.com/lensvol/pybetter.git | 625e8fc854c7544df702d6001a57e46838b7df70 | @@ -14,4 +14,4 @@ class RemoveParenthesesFromReturn(cst.CSTTransformer):
changed_tuple = original_node.value.with_changes(lpar=[], rpar=[])
- return original_node.with_changes(value=changed_tuple)
+ return updated_node.with_changes(value=changed_tuple)
| pybetter/transformers/parenthesized_return.py | ReplaceText(target='updated_node' @(17,15)->(17,28)) | class RemoveParenthesesFromReturn(cst.CSTTransformer):
changed_tuple = original_node.value.with_changes(lpar=[], rpar=[])
return original_node.with_changes(value=changed_tuple) | class RemoveParenthesesFromReturn(cst.CSTTransformer):
changed_tuple = original_node.value.with_changes(lpar=[], rpar=[])
return updated_node.with_changes(value=changed_tuple) |
953 | https://:@github.com/leprikon-cz/leprikon.git | 1c31a9f054d79c1f75ee4bcd82be41d903e4f0f3 | @@ -149,7 +149,7 @@ class EventRegistration(SubjectRegistration):
if discount.accounted.date() <= d and discount.explanation.strip()
)
return PaymentStatus(
- price=self.price if self.approved and self.approved.date() < d else 0,
+ price=self.price if self.approved and self.approved.date() <= d else 0,
discount=self.get_discounted(d),
explanation=explanation,
paid=self.get_paid(d),
| leprikon/models/events.py | ReplaceText(target='<=' @(152,71)->(152,72)) | class EventRegistration(SubjectRegistration):
if discount.accounted.date() <= d and discount.explanation.strip()
)
return PaymentStatus(
price=self.price if self.approved and self.approved.date() < d else 0,
discount=self.get_discounted(d),
explanation=explanation,
paid=self.get_paid(d), | class EventRegistration(SubjectRegistration):
if discount.accounted.date() <= d and discount.explanation.strip()
)
return PaymentStatus(
price=self.price if self.approved and self.approved.date() <= d else 0,
discount=self.get_discounted(d),
explanation=explanation,
paid=self.get_paid(d), |
954 | https://:@github.com/openclimatedata/openscm.git | 1f492d38db2808bf7730b707266d53f35f0a2e09 | @@ -1036,7 +1036,7 @@ class ScmDataFrameBase: # pylint: disable=too-many-public-methods
# Convert from ParameterType to str
parameter_type_str = (
"average"
- if parameter_type == ParameterType.AVERAGE_TIMESERIES
+ if p_type == ParameterType.AVERAGE_TIMESERIES
else "point"
)
res._meta.loc[grp.index] = res._meta.loc[grp.index].assign(
| openscm/scmdataframe/base.py | ReplaceText(target='p_type' @(1039,19)->(1039,33)) | class ScmDataFrameBase: # pylint: disable=too-many-public-methods
# Convert from ParameterType to str
parameter_type_str = (
"average"
if parameter_type == ParameterType.AVERAGE_TIMESERIES
else "point"
)
res._meta.loc[grp.index] = res._meta.loc[grp.index].assign( | class ScmDataFrameBase: # pylint: disable=too-many-public-methods
# Convert from ParameterType to str
parameter_type_str = (
"average"
if p_type == ParameterType.AVERAGE_TIMESERIES
else "point"
)
res._meta.loc[grp.index] = res._meta.loc[grp.index].assign( |
955 | https://:@github.com/jvs/sourcer.git | f8175948d0ed0721ceb555837439b77246cdd0f9 | @@ -341,7 +341,7 @@ class Parser(object):
def _parse_text(self, term, pos):
end = pos + len(term)
part = self.source[pos : end]
- yield ParseResult(part, end) if part == term else ParseFailure
+ yield ParseResult(term, end) if part == term else ParseFailure
def _parse_token(self, term, pos):
if pos >= len(self.source):
| peg.py | ReplaceText(target='term' @(344,26)->(344,30)) | class Parser(object):
def _parse_text(self, term, pos):
end = pos + len(term)
part = self.source[pos : end]
yield ParseResult(part, end) if part == term else ParseFailure
def _parse_token(self, term, pos):
if pos >= len(self.source): | class Parser(object):
def _parse_text(self, term, pos):
end = pos + len(term)
part = self.source[pos : end]
yield ParseResult(term, end) if part == term else ParseFailure
def _parse_token(self, term, pos):
if pos >= len(self.source): |
956 | https://:@github.com/SergSHV/SLRIC.git | 30dccd92ce43affef13bd2731d20c525dcca6a31 | @@ -38,7 +38,7 @@ def indirect_paths(g, path_lim, aggregation, criterion):
if path_lim % 2 == 0:
return indirect_paths(compute_path(g, g, aggregation, criterion), path_lim // 2, type, criterion)
else:
- return compute_path(g, indirect_paths(g, path_lim - 1, aggregation, criterion), type, criterion)
+ return compute_path(indirect_paths(g, path_lim - 1, aggregation, criterion), g, type, criterion)
# Evaluate path strength [criterion: 0 (sum), 1 (min), 2 (multiplication)]
| SLRIC/methods/indirect_influence.py | ArgSwap(idxs=0<->1 @(41,19)->(41,31)) | def indirect_paths(g, path_lim, aggregation, criterion):
if path_lim % 2 == 0:
return indirect_paths(compute_path(g, g, aggregation, criterion), path_lim // 2, type, criterion)
else:
return compute_path(g, indirect_paths(g, path_lim - 1, aggregation, criterion), type, criterion)
# Evaluate path strength [criterion: 0 (sum), 1 (min), 2 (multiplication)] | def indirect_paths(g, path_lim, aggregation, criterion):
if path_lim % 2 == 0:
return indirect_paths(compute_path(g, g, aggregation, criterion), path_lim // 2, type, criterion)
else:
return compute_path(indirect_paths(g, path_lim - 1, aggregation, criterion), g, type, criterion)
# Evaluate path strength [criterion: 0 (sum), 1 (min), 2 (multiplication)] |
957 | https://:@github.com/jianhuupenn/ItClust.git | e09c84bfe42ededd15d95a5f618e83e3ded26271 | @@ -163,7 +163,7 @@ class transfer_learning_clf(object):
adata_test.obs["trajectory_"+str(i)]=trajectory_l[i]
#labels=change_to_continuous(q_pred)
- y_pred=np.asarray(np.argmax(q,axis=1),dtype=int)
+ y_pred=np.asarray(np.argmax(q_pred,axis=1),dtype=int)
labels=y_pred.astype('U')
labels=pd.Categorical(values=labels,categories=natsorted(np.unique(y_pred).astype('U')))
| ItClust_package/ItClust/ItClust.py | ReplaceText(target='q_pred' @(166,36)->(166,37)) | class transfer_learning_clf(object):
adata_test.obs["trajectory_"+str(i)]=trajectory_l[i]
#labels=change_to_continuous(q_pred)
y_pred=np.asarray(np.argmax(q,axis=1),dtype=int)
labels=y_pred.astype('U')
labels=pd.Categorical(values=labels,categories=natsorted(np.unique(y_pred).astype('U')))
| class transfer_learning_clf(object):
adata_test.obs["trajectory_"+str(i)]=trajectory_l[i]
#labels=change_to_continuous(q_pred)
y_pred=np.asarray(np.argmax(q_pred,axis=1),dtype=int)
labels=y_pred.astype('U')
labels=pd.Categorical(values=labels,categories=natsorted(np.unique(y_pred).astype('U')))
|
958 | https://:@github.com/datalad/git-annex-ria-remote.git | 556dd2877b24dd277160c36ebeeb03082d19706f | @@ -117,7 +117,7 @@ class Install(Clone):
path=path,
dataset=dataset,
description=description,
- reckless=ephemeral,
+ reckless=reckless,
alt_sources=alt_sources,
result_filter=None,
result_renderer='disabled',
| ria_remote/install.py | ReplaceText(target='reckless' @(120,25)->(120,34)) | class Install(Clone):
path=path,
dataset=dataset,
description=description,
reckless=ephemeral,
alt_sources=alt_sources,
result_filter=None,
result_renderer='disabled', | class Install(Clone):
path=path,
dataset=dataset,
description=description,
reckless=reckless,
alt_sources=alt_sources,
result_filter=None,
result_renderer='disabled', |
959 | https://:@github.com/dmnfarrell/smallrnaseq.git | e4d03158818e7b2d8a67428ace3c1ba9c0723c14 | @@ -544,7 +544,7 @@ def print_read_stack(reads, refseq=None, outfile=None, cutoff=0, by=None, label=
else:
seqlen = reads.end.max()
f = None
- reads = reads[reads.reads>cutoff]
+ reads = reads[reads.reads>=cutoff]
if by is not None:
reads = reads.sort_values(by, ascending=False)
| smallrnaseq/utils.py | ReplaceText(target='>=' @(547,29)->(547,30)) | def print_read_stack(reads, refseq=None, outfile=None, cutoff=0, by=None, label=
else:
seqlen = reads.end.max()
f = None
reads = reads[reads.reads>cutoff]
if by is not None:
reads = reads.sort_values(by, ascending=False)
| def print_read_stack(reads, refseq=None, outfile=None, cutoff=0, by=None, label=
else:
seqlen = reads.end.max()
f = None
reads = reads[reads.reads>=cutoff]
if by is not None:
reads = reads.sort_values(by, ascending=False)
|
960 | https://:@github.com/dmnfarrell/smallrnaseq.git | eeed9c4b43346501da633ce78ac4d22440df4608 | @@ -70,7 +70,7 @@ def run(opts):
#novel prediction
if ref_genome != '':
print ('predicting novel mirnas..')
- allreads = utils.combine_aligned_reads(temp_path, files, ref_genome)
+ allreads = utils.combine_aligned_reads(path, files, ref_genome)
new,cl = novel.find_mirnas(allreads, cow_fasta)
new.to_csv(os.path.join(out,'novel.csv'), index=False)
novel.create_report(new, cl, species, filename=os.path.join(out, 'novel.html'))
| smallrnaseq/app.py | ReplaceText(target='path' @(73,51)->(73,60)) | def run(opts):
#novel prediction
if ref_genome != '':
print ('predicting novel mirnas..')
allreads = utils.combine_aligned_reads(temp_path, files, ref_genome)
new,cl = novel.find_mirnas(allreads, cow_fasta)
new.to_csv(os.path.join(out,'novel.csv'), index=False)
novel.create_report(new, cl, species, filename=os.path.join(out, 'novel.html')) | def run(opts):
#novel prediction
if ref_genome != '':
print ('predicting novel mirnas..')
allreads = utils.combine_aligned_reads(path, files, ref_genome)
new,cl = novel.find_mirnas(allreads, cow_fasta)
new.to_csv(os.path.join(out,'novel.csv'), index=False)
novel.create_report(new, cl, species, filename=os.path.join(out, 'novel.html')) |
961 | https://:@gitlab.com/danwin/fairways_py.git | 28c15f9a3e5f59d8767bb803ec8024488b0bf4bd | @@ -64,7 +64,7 @@ class HttpQueryTemplate:
rq_kwargs["headers"] = headers
body = encoded_data
if body:
- rq_kwargs["data"] = data
+ rq_kwargs["data"] = encoded_data
return rq_kwargs
| fairways/io/generic/net.py | ReplaceText(target='encoded_data' @(67,32)->(67,36)) | class HttpQueryTemplate:
rq_kwargs["headers"] = headers
body = encoded_data
if body:
rq_kwargs["data"] = data
return rq_kwargs
| class HttpQueryTemplate:
rq_kwargs["headers"] = headers
body = encoded_data
if body:
rq_kwargs["data"] = encoded_data
return rq_kwargs
|
962 | https://:@github.com/clusterking/clusterking.git | 86816095fc6c1383b3088dbd93c72c16df8c9710 | @@ -129,7 +129,7 @@ class BMFOM(FOM):
(data1.df["cluster"] == cluster) & data1.df["bpoint"]
]
bpoints2 = data2.df[
- (data1.df["cluster"] == cluster) & data2.df["bpoint"]
+ (data2.df["cluster"] == cluster) & data2.df["bpoint"]
]
msg = "Found {} bpoints instead of 1 for dataset {}."
if len(bpoints1) != 1:
| clusterking/stability/fom.py | ReplaceText(target='data2' @(132,17)->(132,22)) | class BMFOM(FOM):
(data1.df["cluster"] == cluster) & data1.df["bpoint"]
]
bpoints2 = data2.df[
(data1.df["cluster"] == cluster) & data2.df["bpoint"]
]
msg = "Found {} bpoints instead of 1 for dataset {}."
if len(bpoints1) != 1: | class BMFOM(FOM):
(data1.df["cluster"] == cluster) & data1.df["bpoint"]
]
bpoints2 = data2.df[
(data2.df["cluster"] == cluster) & data2.df["bpoint"]
]
msg = "Found {} bpoints instead of 1 for dataset {}."
if len(bpoints1) != 1: |
963 | https://:@github.com/clusterking/clusterking.git | bdc88d5a931d268d77e5f00119fcca5f16bd562a | @@ -611,7 +611,7 @@ class BundlePlot(object):
plot_histogram(self.ax, self._bins, data, **hist_kw)
hf_kw = dict(color=light_color)
- hf_kw.update(hist_kwargs)
+ hf_kw.update(hist_fill_kwargs)
plot_histogram_fill(
self.ax, self._bins, data - err_low, data + err_high, **hf_kw
| clusterking/plots/plot_bundles.py | ReplaceText(target='hist_fill_kwargs' @(614,21)->(614,32)) | class BundlePlot(object):
plot_histogram(self.ax, self._bins, data, **hist_kw)
hf_kw = dict(color=light_color)
hf_kw.update(hist_kwargs)
plot_histogram_fill(
self.ax, self._bins, data - err_low, data + err_high, **hf_kw | class BundlePlot(object):
plot_histogram(self.ax, self._bins, data, **hist_kw)
hf_kw = dict(color=light_color)
hf_kw.update(hist_fill_kwargs)
plot_histogram_fill(
self.ax, self._bins, data - err_low, data + err_high, **hf_kw |
964 | https://:@github.com/julien6387/supvisors.git | 5163605677362dbe30e3999bc45a44696e2222d9 | @@ -60,7 +60,7 @@ class ProcessRules(object):
a required process that is not in the starting sequence is forced to optional
If addresses are not defined, all addresses are applicable """
# required MUST have start_sequence, so force to optional if no start_sequence
- if self.required and self.start_sequence <= 0:
+ if self.required and self.start_sequence == 0:
self.logger.warn('required forced to False because no start_sequence defined')
self.required = False
# if no addresses, consider all addresses
| supervisors/process.py | ReplaceText(target='==' @(63,49)->(63,51)) | class ProcessRules(object):
a required process that is not in the starting sequence is forced to optional
If addresses are not defined, all addresses are applicable """
# required MUST have start_sequence, so force to optional if no start_sequence
if self.required and self.start_sequence <= 0:
self.logger.warn('required forced to False because no start_sequence defined')
self.required = False
# if no addresses, consider all addresses | class ProcessRules(object):
a required process that is not in the starting sequence is forced to optional
If addresses are not defined, all addresses are applicable """
# required MUST have start_sequence, so force to optional if no start_sequence
if self.required and self.start_sequence == 0:
self.logger.warn('required forced to False because no start_sequence defined')
self.required = False
# if no addresses, consider all addresses |
965 | https://:@github.com/hncuong/topicmodel-lib.git | 6257da7e5c9bf7d70d504a6acb62a538dbf91694 | @@ -344,7 +344,7 @@ def load_mini_batch_term_frequency_from_term_frequency_file(fp, batch_size):
tf = list_word[j].split(":")
doc_terms[j - 1] = int(tf[0])
doc_frequency[j - 1] = int(tf[1])
- mini_batch.append_doc(doc, doc_frequency)
+ mini_batch.append_doc(doc_terms, doc_frequency)
return mini_batch, end_file
except Exception as inst:
logging.error(inst)
| tmlib/datasets/base.py | ReplaceText(target='doc_terms' @(347,34)->(347,37)) | def load_mini_batch_term_frequency_from_term_frequency_file(fp, batch_size):
tf = list_word[j].split(":")
doc_terms[j - 1] = int(tf[0])
doc_frequency[j - 1] = int(tf[1])
mini_batch.append_doc(doc, doc_frequency)
return mini_batch, end_file
except Exception as inst:
logging.error(inst) | def load_mini_batch_term_frequency_from_term_frequency_file(fp, batch_size):
tf = list_word[j].split(":")
doc_terms[j - 1] = int(tf[0])
doc_frequency[j - 1] = int(tf[1])
mini_batch.append_doc(doc_terms, doc_frequency)
return mini_batch, end_file
except Exception as inst:
logging.error(inst) |
966 | https://:@github.com/adaptivescale/lxdui.git | fce2cbadebf5400fa109a7055fcf2059613c463d | @@ -48,7 +48,7 @@ def createContainer():
for container in input:
client = LXCContainer(container)
result.append(client.create())
- return response.reply(result, message='Container {} created successfully.'.format(input.get('name')))
+ return response.reply(result, message='Container {} created successfully.'.format(container.get('name')))
except ValueError as ex:
return response.reply(message=ex.__str__(), status=403)
| app/api/controllers/container.py | ReplaceText(target='container' @(51,90)->(51,95)) | def createContainer():
for container in input:
client = LXCContainer(container)
result.append(client.create())
return response.reply(result, message='Container {} created successfully.'.format(input.get('name')))
except ValueError as ex:
return response.reply(message=ex.__str__(), status=403)
| def createContainer():
for container in input:
client = LXCContainer(container)
result.append(client.create())
return response.reply(result, message='Container {} created successfully.'.format(container.get('name')))
except ValueError as ex:
return response.reply(message=ex.__str__(), status=403)
|
967 | https://:@github.com/desihub/desiutil.git | 51e58e1174bb9bba78a3fbbe8491c2f6f4ae2d91 | @@ -66,7 +66,7 @@ def setdep(header, name, version):
verkey = 'DEPVER{:02d}'.format(i)
if namekey in header:
if header[namekey] == name:
- header[namekey] = version
+ header[verkey] = version
return
else:
continue
| py/desiutil/depend.py | ReplaceText(target='verkey' @(69,23)->(69,30)) | def setdep(header, name, version):
verkey = 'DEPVER{:02d}'.format(i)
if namekey in header:
if header[namekey] == name:
header[namekey] = version
return
else:
continue | def setdep(header, name, version):
verkey = 'DEPVER{:02d}'.format(i)
if namekey in header:
if header[namekey] == name:
header[verkey] = version
return
else:
continue |
968 | https://:@github.com/jmoiron/par2ools.git | 74b8d8885c8ad90f5649a8eb8228c62a840c5f3a | @@ -75,7 +75,7 @@ class Par2File(object):
else:
self.contents = obj_or_path.read()
if getattr(obj_or_path, 'name', None):
- self.path = f.name
+ self.path = obj_or_path.name
self.packets = self.read_packets()
def read_packets(self):
| par2ools/par2.py | ReplaceText(target='obj_or_path' @(78,28)->(78,29)) | class Par2File(object):
else:
self.contents = obj_or_path.read()
if getattr(obj_or_path, 'name', None):
self.path = f.name
self.packets = self.read_packets()
def read_packets(self): | class Par2File(object):
else:
self.contents = obj_or_path.read()
if getattr(obj_or_path, 'name', None):
self.path = obj_or_path.name
self.packets = self.read_packets()
def read_packets(self): |
969 | https://:@github.com/futurecolors/django-cked.git | e61ce34219c69582f131dbd922763d0253c2ba83 | @@ -33,7 +33,7 @@ def elfinder(request):
'dictionary type.')
return render(request, 'cked/elfinder.html', {
- 'options': json_encode(options),
+ 'options': json_encode(user_options),
})
| cked/views.py | ReplaceText(target='user_options' @(36,31)->(36,38)) | def elfinder(request):
'dictionary type.')
return render(request, 'cked/elfinder.html', {
'options': json_encode(options),
})
| def elfinder(request):
'dictionary type.')
return render(request, 'cked/elfinder.html', {
'options': json_encode(user_options),
})
|
970 | https://:@github.com/divi255/bakauditor.git | 4604cdef011cd2a4d5c4307704a69e1b87d0bb2f | @@ -12,7 +12,7 @@ def check(**kwargs):
result.time = t
result.size = size
if 'min-size' in kwargs:
- result.ok = size > kwargs.get('min-size')
+ result.ok = size >= kwargs.get('min-size')
if not result.ok:
result.err = 'Too small'
else:
| bakauditor/plugins/file.py | ReplaceText(target='>=' @(15,25)->(15,26)) | def check(**kwargs):
result.time = t
result.size = size
if 'min-size' in kwargs:
result.ok = size > kwargs.get('min-size')
if not result.ok:
result.err = 'Too small'
else: | def check(**kwargs):
result.time = t
result.size = size
if 'min-size' in kwargs:
result.ok = size >= kwargs.get('min-size')
if not result.ok:
result.err = 'Too small'
else: |
971 | https://:@github.com/cwebber/xudd.git | 69a387685cb87e3e9fd0a68f3161fe21e3eb60fb | @@ -139,7 +139,7 @@ class Hive(Thread):
"""
message_id = id or self.gen_message_id()
message = Message(
- to=to, directive=to, from_id=from_id, body=body,
+ to=to, directive=directive, from_id=from_id, body=body,
in_reply_to=in_reply_to, id=message_id)
self.hive_action_queue.put(
("queue_message", message))
| xudd/hive.py | ReplaceText(target='directive' @(142,29)->(142,31)) | class Hive(Thread):
"""
message_id = id or self.gen_message_id()
message = Message(
to=to, directive=to, from_id=from_id, body=body,
in_reply_to=in_reply_to, id=message_id)
self.hive_action_queue.put(
("queue_message", message)) | class Hive(Thread):
"""
message_id = id or self.gen_message_id()
message = Message(
to=to, directive=directive, from_id=from_id, body=body,
in_reply_to=in_reply_to, id=message_id)
self.hive_action_queue.put(
("queue_message", message)) |
972 | https://:@github.com/NotYetGames/po-excel-translate.git | 5a50131472f50d15354df8a274395d69d8a937b5 | @@ -219,7 +219,7 @@ def ConvertPoXls():
for (i, cat) in enumerate(catalogs):
cat = cat[1]
msg = cat.find(msgid)
- if msgid is not None:
+ if msg is not None:
if 'fuzzy' in msg.flags:
sheet.write(row, column, msg.msgstr, italic_style)
else:
| lingua/xlsconvert.py | ReplaceText(target='msg' @(222,15)->(222,20)) | def ConvertPoXls():
for (i, cat) in enumerate(catalogs):
cat = cat[1]
msg = cat.find(msgid)
if msgid is not None:
if 'fuzzy' in msg.flags:
sheet.write(row, column, msg.msgstr, italic_style)
else: | def ConvertPoXls():
for (i, cat) in enumerate(catalogs):
cat = cat[1]
msg = cat.find(msgid)
if msg is not None:
if 'fuzzy' in msg.flags:
sheet.write(row, column, msg.msgstr, italic_style)
else: |
973 | https://:@github.com/chen0040/pyalgs.git | 52de8860e4a4e4fdde27c055064d1a4de5dd7074 | @@ -13,7 +13,7 @@ class BinarySelection(object):
hi = len(a) - 1
while lo <= hi:
- mid = lo + (hi - lo) / 2
+ mid = lo + (hi - lo) // 2
if less(x, a[mid]):
hi = mid - 1
elif less(a[mid], x):
| pyalgs/algorithms/commons/selecting.py | ReplaceText(target='//' @(16,33)->(16,34)) | class BinarySelection(object):
hi = len(a) - 1
while lo <= hi:
mid = lo + (hi - lo) / 2
if less(x, a[mid]):
hi = mid - 1
elif less(a[mid], x): | class BinarySelection(object):
hi = len(a) - 1
while lo <= hi:
mid = lo + (hi - lo) // 2
if less(x, a[mid]):
hi = mid - 1
elif less(a[mid], x): |
974 | https://:@github.com/jvivian/rnaseq-lib.git | c72b002e39cd0a0646e3a4f228970e5078fecac6 | @@ -75,7 +75,7 @@ class Holoview:
:rtype: hv.Overlay
"""
# Subset dataframe by tissue and gene
- df = self._subset(tissue, gene)
+ df = self._subset(gene, tissue)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
| src/rnaseq_lib/plot/__init__.py | ArgSwap(idxs=0<->1 @(78,13)->(78,25)) | class Holoview:
:rtype: hv.Overlay
"""
# Subset dataframe by tissue and gene
df = self._subset(tissue, gene)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df) | class Holoview:
:rtype: hv.Overlay
"""
# Subset dataframe by tissue and gene
df = self._subset(gene, tissue)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df) |
975 | https://:@github.com/tylertrussell/gae-catnado.git | 1c469d607226dec29e79b09d3b9a63fc6b558dee | @@ -33,4 +33,4 @@ class TestImmutableProperty(SimpleAppEngineTestCase):
self.assertEqual(refetched_entity.name, NAME)
with self.assertRaises(ImmutablePropertyException):
- entity.name = 'anything'
+ refetched_entity.name = 'anything'
| catnado/properties/test/test_immutable_property.py | ReplaceText(target='refetched_entity' @(36,6)->(36,12)) | class TestImmutableProperty(SimpleAppEngineTestCase):
self.assertEqual(refetched_entity.name, NAME)
with self.assertRaises(ImmutablePropertyException):
entity.name = 'anything' | class TestImmutableProperty(SimpleAppEngineTestCase):
self.assertEqual(refetched_entity.name, NAME)
with self.assertRaises(ImmutablePropertyException):
refetched_entity.name = 'anything' |
976 | https://:@github.com/oscar-franzen/adobo.git | 9c42ed1fad2799077765cb76d531c4218399ef66 | @@ -183,7 +183,7 @@ https://oscar-franzen.github.io/adobo/adobo.html#adobo.normalize.norm')
comp, contr = svd(data, ncomp)
else:
raise Exception('Unkown PCA method spefified. Valid choices are: irlb and svd')
- obj.index = data.columns
+ comp.index = data.columns
obj.norm_data[k]['dr']['pca'] = {'comp' : comp,
'contr' : contr,
'method' : method}
| adobo/dr.py | ReplaceText(target='comp' @(186,8)->(186,11)) | https://oscar-franzen.github.io/adobo/adobo.html#adobo.normalize.norm')
comp, contr = svd(data, ncomp)
else:
raise Exception('Unkown PCA method spefified. Valid choices are: irlb and svd')
obj.index = data.columns
obj.norm_data[k]['dr']['pca'] = {'comp' : comp,
'contr' : contr,
'method' : method} | https://oscar-franzen.github.io/adobo/adobo.html#adobo.normalize.norm')
comp, contr = svd(data, ncomp)
else:
raise Exception('Unkown PCA method spefified. Valid choices are: irlb and svd')
comp.index = data.columns
obj.norm_data[k]['dr']['pca'] = {'comp' : comp,
'contr' : contr,
'method' : method} |
977 | https://:@github.com/ngmarchant/oasis.git | 3b39116447a4d3e4662790e357c257027d398435 | @@ -62,7 +62,7 @@ def verify_consistency(predictions, scores, proba):
def verify_unit_interval(value):
"""Throw an exception if the value is not on the unit interval [0,1].
"""
- if not (value >= 0 or value <= 1):
+ if not (value >= 0 and value <= 1):
raise ValueError("expected value on the interval [0, 1].")
return value
| oasis/input_verification.py | ReplaceText(target='and' @(65,23)->(65,25)) | def verify_consistency(predictions, scores, proba):
def verify_unit_interval(value):
"""Throw an exception if the value is not on the unit interval [0,1].
"""
if not (value >= 0 or value <= 1):
raise ValueError("expected value on the interval [0, 1].")
return value
| def verify_consistency(predictions, scores, proba):
def verify_unit_interval(value):
"""Throw an exception if the value is not on the unit interval [0,1].
"""
if not (value >= 0 and value <= 1):
raise ValueError("expected value on the interval [0, 1].")
return value
|
978 | https://:@github.com/featurelabs/henchman.git | 283c4812767a4d5b8701e2e7a0077343da8cec57 | @@ -135,7 +135,7 @@ class Dendrogram():
self.graphs = templist
def transform(self, df, n_feats=10):
- assert df.shape[1] <= n_feats
+ assert df.shape[1] >= n_feats
step = self.find_set_of_size(n_feats)
return df[self.features_at_step(step)]
| henchman/selection.py | ReplaceText(target='>=' @(138,27)->(138,29)) | class Dendrogram():
self.graphs = templist
def transform(self, df, n_feats=10):
assert df.shape[1] <= n_feats
step = self.find_set_of_size(n_feats)
return df[self.features_at_step(step)]
| class Dendrogram():
self.graphs = templist
def transform(self, df, n_feats=10):
assert df.shape[1] >= n_feats
step = self.find_set_of_size(n_feats)
return df[self.features_at_step(step)]
|
979 | https://:@github.com/jackuoll/ultima-py.git | de0b3c0964cda0accc65e79e8b6b9c1e260d559f | @@ -53,7 +53,7 @@ class Gumps:
copy = bitmap.copy()
if hue:
hue = (hue & 0x3FFF) - 1
- return Hues.HUES[hue].apply_to(bitmap, only_grey_pixels=partial_hue)
+ return Hues.HUES[hue].apply_to(copy, only_grey_pixels=partial_hue)
return copy
| ultimapy/sdk/gumps.py | ReplaceText(target='copy' @(56,43)->(56,49)) | class Gumps:
copy = bitmap.copy()
if hue:
hue = (hue & 0x3FFF) - 1
return Hues.HUES[hue].apply_to(bitmap, only_grey_pixels=partial_hue)
return copy
| class Gumps:
copy = bitmap.copy()
if hue:
hue = (hue & 0x3FFF) - 1
return Hues.HUES[hue].apply_to(copy, only_grey_pixels=partial_hue)
return copy
|
980 | https://:@github.com/duguyue100/minesweeper.git | 7d48ad6eb8e1d65b8e61f370e181e246443092f2 | @@ -39,7 +39,7 @@ class MSGame(object):
if (board_height <= 0):
raise ValueError("the board height cannot be non-positive!")
else:
- self.board_height = board_width
+ self.board_height = board_height
if (num_mines >= (board_width*board_height)):
raise ValueError("The number of mines cannot be larger than "
| minesweeper/msgame.py | ReplaceText(target='board_height' @(42,32)->(42,43)) | class MSGame(object):
if (board_height <= 0):
raise ValueError("the board height cannot be non-positive!")
else:
self.board_height = board_width
if (num_mines >= (board_width*board_height)):
raise ValueError("The number of mines cannot be larger than " | class MSGame(object):
if (board_height <= 0):
raise ValueError("the board height cannot be non-positive!")
else:
self.board_height = board_height
if (num_mines >= (board_width*board_height)):
raise ValueError("The number of mines cannot be larger than " |
981 | https://:@github.com/duguyue100/minesweeper.git | e65fd9f5f46cb9c409746889d68d1e8cdc32876d | @@ -33,7 +33,7 @@ class MSBoard(object):
if (board_height <= 0):
raise ValueError("the board height cannot be non-positive!")
else:
- self.board_height = board_width
+ self.board_height = board_height
if (num_mines >= (board_width*board_height)):
raise ValueError("The number of mines cannot be larger than "
| minesweeper/msboard.py | ReplaceText(target='board_height' @(36,32)->(36,43)) | class MSBoard(object):
if (board_height <= 0):
raise ValueError("the board height cannot be non-positive!")
else:
self.board_height = board_width
if (num_mines >= (board_width*board_height)):
raise ValueError("The number of mines cannot be larger than " | class MSBoard(object):
if (board_height <= 0):
raise ValueError("the board height cannot be non-positive!")
else:
self.board_height = board_height
if (num_mines >= (board_width*board_height)):
raise ValueError("The number of mines cannot be larger than " |
982 | https://:@github.com/mtgreenway/python-openid.git | 8b6d87a5a95552814265911c9d449932c39db768 | @@ -90,7 +90,7 @@ class DiffieHelmanAssociator(object):
'openid.session_type':'DH-SHA1',
'openid.dh_modulus': to_b64(long2a(p)),
'openid.dh_gen': to_b64(long2a(g)),
- 'openid.dh_consumer_public': to_b64(long2a(pow(p, priv_key, p))),
+ 'openid.dh_consumer_public': to_b64(long2a(pow(g, priv_key, p))),
}
body = urllib.urlencode(args)
| association.py | ReplaceText(target='g' @(93,59)->(93,60)) | class DiffieHelmanAssociator(object):
'openid.session_type':'DH-SHA1',
'openid.dh_modulus': to_b64(long2a(p)),
'openid.dh_gen': to_b64(long2a(g)),
'openid.dh_consumer_public': to_b64(long2a(pow(p, priv_key, p))),
}
body = urllib.urlencode(args) | class DiffieHelmanAssociator(object):
'openid.session_type':'DH-SHA1',
'openid.dh_modulus': to_b64(long2a(p)),
'openid.dh_gen': to_b64(long2a(g)),
'openid.dh_consumer_public': to_b64(long2a(pow(g, priv_key, p))),
}
body = urllib.urlencode(args) |
983 | https://:@github.com/hhuuggoo/kitchensink.git | 752722df9109ca74a4fd321915e710eb7def9cb5 | @@ -33,7 +33,7 @@ def make_app(redis_connection_obj, port, host_url, host_name, datadir):
rpcblueprint.task_queue = TaskQueue(rpcblueprint.r)
server_manager = Servers(rpcblueprint.r)
settings.setup_server(rpcblueprint.r, datadir, host_url, host_name,
- Catalog(rpcblueprint.r, datadir, host_url),
+ Catalog(rpcblueprint.r, datadir, host_name),
server_manager
)
rpcblueprint.heartbeat_thread = HeartbeatThread()
| kitchensink/rpc/server.py | ReplaceText(target='host_name' @(36,59)->(36,67)) | def make_app(redis_connection_obj, port, host_url, host_name, datadir):
rpcblueprint.task_queue = TaskQueue(rpcblueprint.r)
server_manager = Servers(rpcblueprint.r)
settings.setup_server(rpcblueprint.r, datadir, host_url, host_name,
Catalog(rpcblueprint.r, datadir, host_url),
server_manager
)
rpcblueprint.heartbeat_thread = HeartbeatThread() | def make_app(redis_connection_obj, port, host_url, host_name, datadir):
rpcblueprint.task_queue = TaskQueue(rpcblueprint.r)
server_manager = Servers(rpcblueprint.r)
settings.setup_server(rpcblueprint.r, datadir, host_url, host_name,
Catalog(rpcblueprint.r, datadir, host_name),
server_manager
)
rpcblueprint.heartbeat_thread = HeartbeatThread() |
984 | https://:@github.com/hhuuggoo/kitchensink.git | 752722df9109ca74a4fd321915e710eb7def9cb5 | @@ -45,7 +45,7 @@ def run(redis_connection, node_url, node_name, queue, datadir):
db=redis_connection_obj['db'])
server_manager = Servers(r)
settings.setup_server(r, datadir, node_url, node_name,
- Catalog(r, datadir, node_url),
+ Catalog(r, datadir, node_name),
server_manager
)
if queue is None:
| kitchensink/scripts/start_worker.py | ReplaceText(target='node_name' @(48,46)->(48,54)) | def run(redis_connection, node_url, node_name, queue, datadir):
db=redis_connection_obj['db'])
server_manager = Servers(r)
settings.setup_server(r, datadir, node_url, node_name,
Catalog(r, datadir, node_url),
server_manager
)
if queue is None: | def run(redis_connection, node_url, node_name, queue, datadir):
db=redis_connection_obj['db'])
server_manager = Servers(r)
settings.setup_server(r, datadir, node_url, node_name,
Catalog(r, datadir, node_name),
server_manager
)
if queue is None: |
985 | https://:@github.com/MPBA/pyphysio.git | a86854cc97f7c917f35d6cb424b1edc27e3167ee | @@ -317,7 +317,7 @@ class UnevenlySignal(Signal):
if start_time is None:
start_time = x_values[0]
else:
- assert start_time >= x_values[0], "More than one sample at or before start_time"
+ assert start_time <= x_values[0], "More than one sample at or before start_time"
# WARN: limitation to 10 decimals due to workaround to prevent wrong cast flooring
# (e.g. np.floor(0.29 * 100) == 28)
x_values = _np.round((x_values - start_time) * sampling_freq, 10).astype(int)
| pyphysio/Signal.py | ReplaceText(target='<=' @(320,34)->(320,36)) | class UnevenlySignal(Signal):
if start_time is None:
start_time = x_values[0]
else:
assert start_time >= x_values[0], "More than one sample at or before start_time"
# WARN: limitation to 10 decimals due to workaround to prevent wrong cast flooring
# (e.g. np.floor(0.29 * 100) == 28)
x_values = _np.round((x_values - start_time) * sampling_freq, 10).astype(int) | class UnevenlySignal(Signal):
if start_time is None:
start_time = x_values[0]
else:
assert start_time <= x_values[0], "More than one sample at or before start_time"
# WARN: limitation to 10 decimals due to workaround to prevent wrong cast flooring
# (e.g. np.floor(0.29 * 100) == 28)
x_values = _np.round((x_values - start_time) * sampling_freq, 10).astype(int) |
986 | https://:@github.com/rhasspy/rhasspy-hermes.git | aec600af8dfa1112fdd20e0d2187b777e0f1799f | @@ -84,7 +84,7 @@ class NluIntent(Message):
@classmethod
def from_dict(cls, message_dict: typing.Dict[str, typing.Any]):
"""Construct message from dictionary."""
- message_dict = message_dict.only_fields(message_dict)
+ message_dict = cls.only_fields(message_dict)
intent_dict = message_dict.pop("intent", {})
slot_dicts = message_dict.pop("slots", [])
message = NluIntent( # type: ignore
| rhasspyhermes/nlu.py | ReplaceText(target='cls' @(87,23)->(87,35)) | class NluIntent(Message):
@classmethod
def from_dict(cls, message_dict: typing.Dict[str, typing.Any]):
"""Construct message from dictionary."""
message_dict = message_dict.only_fields(message_dict)
intent_dict = message_dict.pop("intent", {})
slot_dicts = message_dict.pop("slots", [])
message = NluIntent( # type: ignore | class NluIntent(Message):
@classmethod
def from_dict(cls, message_dict: typing.Dict[str, typing.Any]):
"""Construct message from dictionary."""
message_dict = cls.only_fields(message_dict)
intent_dict = message_dict.pop("intent", {})
slot_dicts = message_dict.pop("slots", [])
message = NluIntent( # type: ignore |
987 | https://:@github.com/alpacahq/alpaca-backtrader-api.git | f198c13c87b8736d0903498fbdb233310ef6f09c | @@ -469,7 +469,7 @@ class AlpacaStore(with_metaclass(MetaSingleton, object)):
# (https://stackoverflow.com/a/1592837/2739124)
cdl = cdl.loc[
pytz.timezone(NY).localize(dtbegin):
- pytz.timezone(NY).localize(dtbegin)
+ pytz.timezone(NY).localize(dtend)
].dropna(subset=['high'])
records = cdl.reset_index().to_dict('records')
for r in records:
| alpaca_backtrader_api/alpacastore.py | ReplaceText(target='dtend' @(472,41)->(472,48)) | class AlpacaStore(with_metaclass(MetaSingleton, object)):
# (https://stackoverflow.com/a/1592837/2739124)
cdl = cdl.loc[
pytz.timezone(NY).localize(dtbegin):
pytz.timezone(NY).localize(dtbegin)
].dropna(subset=['high'])
records = cdl.reset_index().to_dict('records')
for r in records: | class AlpacaStore(with_metaclass(MetaSingleton, object)):
# (https://stackoverflow.com/a/1592837/2739124)
cdl = cdl.loc[
pytz.timezone(NY).localize(dtbegin):
pytz.timezone(NY).localize(dtend)
].dropna(subset=['high'])
records = cdl.reset_index().to_dict('records')
for r in records: |
988 | https://:@github.com/willforde/urlquick.git | 3023fbbb7646f0e0dfb0393912720137f11165f7 | @@ -854,7 +854,7 @@ class Session(CacheAdapter):
reqParams = UnicodeDict(self._params, params)
# Add cookies to headers
- if reqCookies and not u"Cookie" in headers:
+ if reqCookies and not u"Cookie" in reqHeaders:
header = u"; ".join([u"{}={}".format(key, value) for key, value in reqCookies.items()])
reqHeaders[u"Cookie"] = header
| urlquick.py | ReplaceText(target='reqHeaders' @(857,43)->(857,50)) | class Session(CacheAdapter):
reqParams = UnicodeDict(self._params, params)
# Add cookies to headers
if reqCookies and not u"Cookie" in headers:
header = u"; ".join([u"{}={}".format(key, value) for key, value in reqCookies.items()])
reqHeaders[u"Cookie"] = header
| class Session(CacheAdapter):
reqParams = UnicodeDict(self._params, params)
# Add cookies to headers
if reqCookies and not u"Cookie" in reqHeaders:
header = u"; ".join([u"{}={}".format(key, value) for key, value in reqCookies.items()])
reqHeaders[u"Cookie"] = header
|
989 | https://:@github.com/willforde/urlquick.git | d207cf47f7bf35163849f35f919a024b001a0f33 | @@ -962,7 +962,7 @@ class Session(ConnectionManager):
if req_cookies:
logger.debug("Request cookies: %s", req_cookies)
if json:
- logger.debug("Request json: %s", req_cookies)
+ logger.debug("Request json: %s", json)
if data:
logger.debug("Request data: %s", data)
| urlquick.py | ReplaceText(target='json' @(965,45)->(965,56)) | class Session(ConnectionManager):
if req_cookies:
logger.debug("Request cookies: %s", req_cookies)
if json:
logger.debug("Request json: %s", req_cookies)
if data:
logger.debug("Request data: %s", data)
| class Session(ConnectionManager):
if req_cookies:
logger.debug("Request cookies: %s", req_cookies)
if json:
logger.debug("Request json: %s", json)
if data:
logger.debug("Request data: %s", data)
|
990 | https://:@github.com/dlukes/corpy.git | 4bf35b8dd0ed5971bd05896adda646f4452e47a8 | @@ -23,7 +23,7 @@ def print_position(lines, line_no):
"represent the same corpus?"
)
position.extend(line[1:])
- print("\t".join(line))
+ print("\t".join(position))
@cli.command()
| corpy/scripts/zip_verticals.py | ReplaceText(target='position' @(26,20)->(26,24)) | def print_position(lines, line_no):
"represent the same corpus?"
)
position.extend(line[1:])
print("\t".join(line))
@cli.command() | def print_position(lines, line_no):
"represent the same corpus?"
)
position.extend(line[1:])
print("\t".join(position))
@cli.command() |
991 | https://:@github.com/steinitzu/humblebee.git | d95954f99098481ab17e935f63ca734dc8bdf519 | @@ -50,7 +50,7 @@ def zero_prefix_int(num):
strnum = str(num)
if len(strnum) == 1:
return '0'+strnum
- return num
+ return strnum
def timestamp(dt):
return mktime(dt.timetuple())
| src/tvunfucker/util.py | ReplaceText(target='strnum' @(53,11)->(53,14)) | def zero_prefix_int(num):
strnum = str(num)
if len(strnum) == 1:
return '0'+strnum
return num
def timestamp(dt):
return mktime(dt.timetuple()) | def zero_prefix_int(num):
strnum = str(num)
if len(strnum) == 1:
return '0'+strnum
return strnum
def timestamp(dt):
return mktime(dt.timetuple()) |
992 | https://:@github.com/ramasrirama99/AlgoTradeFramework.git | 011a70532972aa883c66bb9d4f32351bcc24a922 | @@ -6,7 +6,7 @@ def chunks(l, n):
n = max(1, n)
step = int(len(l) / n)
for i in range(0, len(l), step):
- big_list.append(l[i:i+n])
+ big_list.append(l[i:i+step])
return big_list
| algotaf/backend/config.py | ReplaceText(target='step' @(9,30)->(9,31)) | def chunks(l, n):
n = max(1, n)
step = int(len(l) / n)
for i in range(0, len(l), step):
big_list.append(l[i:i+n])
return big_list
| def chunks(l, n):
n = max(1, n)
step = int(len(l) / n)
for i in range(0, len(l), step):
big_list.append(l[i:i+step])
return big_list
|
993 | https://:@github.com/JonnyTran/OpenOmics.git | d49006c61b12a8013c9c1f4a12c5ff390980f9e3 | @@ -454,7 +454,7 @@ class LncRNAExpression(GenomicData):
if gene_name not in lnc_seq:
lnc_seq[gene_name] = str(record.seq)
else:
- if len(lnc_seq[gene_name]) > len(str(record.seq)):
+ if len(lnc_seq[gene_name]) < len(str(record.seq)):
lnc_seq[gene_name] = str(record.seq)
# Multiple transcripts each lncRNA gene
| TCGAMultiOmics/genomic.py | ReplaceText(target='<' @(457,43)->(457,44)) | class LncRNAExpression(GenomicData):
if gene_name not in lnc_seq:
lnc_seq[gene_name] = str(record.seq)
else:
if len(lnc_seq[gene_name]) > len(str(record.seq)):
lnc_seq[gene_name] = str(record.seq)
# Multiple transcripts each lncRNA gene | class LncRNAExpression(GenomicData):
if gene_name not in lnc_seq:
lnc_seq[gene_name] = str(record.seq)
else:
if len(lnc_seq[gene_name]) < len(str(record.seq)):
lnc_seq[gene_name] = str(record.seq)
# Multiple transcripts each lncRNA gene |
994 | https://:@github.com/JonnyTran/OpenOmics.git | b034a1b50639f3d302e6da13280093662394b35d | @@ -95,7 +95,7 @@ class GeneOntology(Dataset):
leaf_terms = self.get_child_terms()
go_terms_parents = annotation.map(
- lambda x: list({term for term in x if term not in leaf_terms}) \
+ lambda x: list({term for term in x if term in leaf_terms}) \
if isinstance(x, list) else None)
return go_terms_parents
| openomics/database/ontology.py | ReplaceText(target=' in ' @(98,54)->(98,62)) | class GeneOntology(Dataset):
leaf_terms = self.get_child_terms()
go_terms_parents = annotation.map(
lambda x: list({term for term in x if term not in leaf_terms}) \
if isinstance(x, list) else None)
return go_terms_parents
| class GeneOntology(Dataset):
leaf_terms = self.get_child_terms()
go_terms_parents = annotation.map(
lambda x: list({term for term in x if term in leaf_terms}) \
if isinstance(x, list) else None)
return go_terms_parents
|
995 | https://:@github.com/JonnyTran/OpenOmics.git | 153599d5ce118fcd4c4fb6c44eba9c28fd762983 | @@ -95,7 +95,7 @@ class GeneOntology(Dataset):
leaf_terms = self.get_child_terms()
go_terms_parents = annotation.map(
- lambda x: list({term for term in x if term not in leaf_terms}) \
+ lambda x: list({term for term in x if term in leaf_terms}) \
if isinstance(x, list) else None)
return go_terms_parents
| openomics/database/ontology.py | ReplaceText(target=' in ' @(98,54)->(98,62)) | class GeneOntology(Dataset):
leaf_terms = self.get_child_terms()
go_terms_parents = annotation.map(
lambda x: list({term for term in x if term not in leaf_terms}) \
if isinstance(x, list) else None)
return go_terms_parents
| class GeneOntology(Dataset):
leaf_terms = self.get_child_terms()
go_terms_parents = annotation.map(
lambda x: list({term for term in x if term in leaf_terms}) \
if isinstance(x, list) else None)
return go_terms_parents
|
996 | https://:@github.com/jqb/django-settings.git | 6b594eca70c7addfc9e1b30d8e91334da3c6254f | @@ -106,7 +106,7 @@ class DataAPI(object):
# XXX: fix this mechanism
def _set_cache_for(self, name, value):
- self.get._cache_set(value, name)
+ self.get._cache_set(name, value)
data = DataAPI()
| django_settings/dataapi.py | ArgSwap(idxs=0<->1 @(109,8)->(109,27)) | class DataAPI(object):
# XXX: fix this mechanism
def _set_cache_for(self, name, value):
self.get._cache_set(value, name)
data = DataAPI() | class DataAPI(object):
# XXX: fix this mechanism
def _set_cache_for(self, name, value):
self.get._cache_set(name, value)
data = DataAPI() |
997 | https://:@github.com/jqb/django-settings.git | 72acf0e93c8b2a4bc8dd0c78fa1c703e63b7a26f | @@ -11,7 +11,7 @@ def initialize_data(sender, **kwargs):
for name, type_name_and_value in DEFAULT_SETTINGS.items():
type_name, value = type_name_and_value
- if not dataapi.data.exists(type_name):
+ if not dataapi.data.exists(name):
dataapi.data.set(type_name, name, value)
signals.post_syncdb.connect(initialize_data, sender=models)
| django_settings/management.py | ReplaceText(target='name' @(14,35)->(14,44)) | def initialize_data(sender, **kwargs):
for name, type_name_and_value in DEFAULT_SETTINGS.items():
type_name, value = type_name_and_value
if not dataapi.data.exists(type_name):
dataapi.data.set(type_name, name, value)
signals.post_syncdb.connect(initialize_data, sender=models) | def initialize_data(sender, **kwargs):
for name, type_name_and_value in DEFAULT_SETTINGS.items():
type_name, value = type_name_and_value
if not dataapi.data.exists(name):
dataapi.data.set(type_name, name, value)
signals.post_syncdb.connect(initialize_data, sender=models) |
998 | https://:@github.com/qutech/qtune.git | a6ca5b76c17a8092c67f2d495c7f422d038008f5 | @@ -45,5 +45,5 @@ def find_lead_transition(data: np.ndarray, center: float, scan_range: float, npo
y_red = np.absolute(y_red)
max_index = int(np.argmax(y_red) + int(round(n / 2)))
- return x_red[max_index]
+ return x[max_index]
| qtune/util.py | ReplaceText(target='x' @(48,11)->(48,16)) | def find_lead_transition(data: np.ndarray, center: float, scan_range: float, npo
y_red = np.absolute(y_red)
max_index = int(np.argmax(y_red) + int(round(n / 2)))
return x_red[max_index]
| def find_lead_transition(data: np.ndarray, center: float, scan_range: float, npo
y_red = np.absolute(y_red)
max_index = int(np.argmax(y_red) + int(round(n / 2)))
return x[max_index]
|
999 | https://:@github.com/qutech/qtune.git | b68e78b9da5d2150450fcda75d7266d388794caf | @@ -42,7 +42,7 @@ class SubsetTunerTest(unittest.TestCase):
# assert that the solver is called with the right arguments
self.assertEqual(solver.update_after_step.call_count, 1)
- pd.testing.assert_series_equal(solver_voltages, solver.update_after_step.call_args[0][0])
+ pd.testing.assert_series_equal(full_voltages, solver.update_after_step.call_args[0][0])
parameter = pd.Series(data=[10 * i for i in range(n_evaluator)],
index=["parameter_" + str(i) for i in range(n_evaluator)])
| tests/test_parameter_tuner.py | ReplaceText(target='full_voltages' @(45,39)->(45,54)) | class SubsetTunerTest(unittest.TestCase):
# assert that the solver is called with the right arguments
self.assertEqual(solver.update_after_step.call_count, 1)
pd.testing.assert_series_equal(solver_voltages, solver.update_after_step.call_args[0][0])
parameter = pd.Series(data=[10 * i for i in range(n_evaluator)],
index=["parameter_" + str(i) for i in range(n_evaluator)]) | class SubsetTunerTest(unittest.TestCase):
# assert that the solver is called with the right arguments
self.assertEqual(solver.update_after_step.call_count, 1)
pd.testing.assert_series_equal(full_voltages, solver.update_after_step.call_args[0][0])
parameter = pd.Series(data=[10 * i for i in range(n_evaluator)],
index=["parameter_" + str(i) for i in range(n_evaluator)]) |