id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequencelengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
sequencelengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
docstring_summary
stringclasses
1 value
parameters
stringclasses
1 value
return_statement
stringclasses
1 value
argument_list
stringclasses
1 value
identifier
stringclasses
1 value
nwo
stringclasses
1 value
score
float32
-1
-1
251,400
nickjj/ansigenome
ansigenome/ui.py
usage
def usage(): """ Return the usage for the help command. """ l_bracket = clr.stringc("[", "dark gray") r_bracket = clr.stringc("]", "dark gray") pipe = clr.stringc("|", "dark gray") app_name = clr.stringc("%prog", "bright blue") commands = clr.stringc("{0}".format(pipe).join(c.VALID_ACTIONS), "normal") help = clr.stringc("--help", "green") options = clr.stringc("options", "yellow") guide = "\n\n" for action in c.VALID_ACTIONS: guide += command_name(app_name, action, c.MESSAGES["help_" + action]) # remove the last line break guide = guide[:-1] return "{0} {1}{2}{3} {1}{4}{3} {1}{5}{3}\n{6}".format(app_name, l_bracket, commands, r_bracket, help, options, guide)
python
def usage(): """ Return the usage for the help command. """ l_bracket = clr.stringc("[", "dark gray") r_bracket = clr.stringc("]", "dark gray") pipe = clr.stringc("|", "dark gray") app_name = clr.stringc("%prog", "bright blue") commands = clr.stringc("{0}".format(pipe).join(c.VALID_ACTIONS), "normal") help = clr.stringc("--help", "green") options = clr.stringc("options", "yellow") guide = "\n\n" for action in c.VALID_ACTIONS: guide += command_name(app_name, action, c.MESSAGES["help_" + action]) # remove the last line break guide = guide[:-1] return "{0} {1}{2}{3} {1}{4}{3} {1}{5}{3}\n{6}".format(app_name, l_bracket, commands, r_bracket, help, options, guide)
[ "def", "usage", "(", ")", ":", "l_bracket", "=", "clr", ".", "stringc", "(", "\"[\"", ",", "\"dark gray\"", ")", "r_bracket", "=", "clr", ".", "stringc", "(", "\"]\"", ",", "\"dark gray\"", ")", "pipe", "=", "clr", ".", "stringc", "(", "\"|\"", ",", "\"dark gray\"", ")", "app_name", "=", "clr", ".", "stringc", "(", "\"%prog\"", ",", "\"bright blue\"", ")", "commands", "=", "clr", ".", "stringc", "(", "\"{0}\"", ".", "format", "(", "pipe", ")", ".", "join", "(", "c", ".", "VALID_ACTIONS", ")", ",", "\"normal\"", ")", "help", "=", "clr", ".", "stringc", "(", "\"--help\"", ",", "\"green\"", ")", "options", "=", "clr", ".", "stringc", "(", "\"options\"", ",", "\"yellow\"", ")", "guide", "=", "\"\\n\\n\"", "for", "action", "in", "c", ".", "VALID_ACTIONS", ":", "guide", "+=", "command_name", "(", "app_name", ",", "action", ",", "c", ".", "MESSAGES", "[", "\"help_\"", "+", "action", "]", ")", "# remove the last line break", "guide", "=", "guide", "[", ":", "-", "1", "]", "return", "\"{0} {1}{2}{3} {1}{4}{3} {1}{5}{3}\\n{6}\"", ".", "format", "(", "app_name", ",", "l_bracket", ",", "commands", ",", "r_bracket", ",", "help", ",", "options", ",", "guide", ")" ]
Return the usage for the help command.
[ "Return", "the", "usage", "for", "the", "help", "command", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L39-L66
-1
251,401
nickjj/ansigenome
ansigenome/ui.py
epilogue
def epilogue(app_name): """ Return the epilogue for the help command. """ app_name = clr.stringc(app_name, "bright blue") command = clr.stringc("command", "cyan") help = clr.stringc("--help", "green") return "\n%s %s %s for more info on a command\n" % (app_name, command, help)
python
def epilogue(app_name): """ Return the epilogue for the help command. """ app_name = clr.stringc(app_name, "bright blue") command = clr.stringc("command", "cyan") help = clr.stringc("--help", "green") return "\n%s %s %s for more info on a command\n" % (app_name, command, help)
[ "def", "epilogue", "(", "app_name", ")", ":", "app_name", "=", "clr", ".", "stringc", "(", "app_name", ",", "\"bright blue\"", ")", "command", "=", "clr", ".", "stringc", "(", "\"command\"", ",", "\"cyan\"", ")", "help", "=", "clr", ".", "stringc", "(", "\"--help\"", ",", "\"green\"", ")", "return", "\"\\n%s %s %s for more info on a command\\n\"", "%", "(", "app_name", ",", "command", ",", "help", ")" ]
Return the epilogue for the help command.
[ "Return", "the", "epilogue", "for", "the", "help", "command", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L69-L78
-1
251,402
nickjj/ansigenome
ansigenome/ui.py
command_name
def command_name(app_name, command, help_text): """ Return a snippet of help text for this command. """ command = clr.stringc(command, "cyan") help = clr.stringc("--help", "green") return "{0} {1} {2}\n{3}\n\n".format(app_name, command, help, help_text)
python
def command_name(app_name, command, help_text): """ Return a snippet of help text for this command. """ command = clr.stringc(command, "cyan") help = clr.stringc("--help", "green") return "{0} {1} {2}\n{3}\n\n".format(app_name, command, help, help_text)
[ "def", "command_name", "(", "app_name", ",", "command", ",", "help_text", ")", ":", "command", "=", "clr", ".", "stringc", "(", "command", ",", "\"cyan\"", ")", "help", "=", "clr", ".", "stringc", "(", "\"--help\"", ",", "\"green\"", ")", "return", "\"{0} {1} {2}\\n{3}\\n\\n\"", ".", "format", "(", "app_name", ",", "command", ",", "help", ",", "help_text", ")" ]
Return a snippet of help text for this command.
[ "Return", "a", "snippet", "of", "help", "text", "for", "this", "command", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L81-L89
-1
251,403
nickjj/ansigenome
ansigenome/ui.py
role
def role(name, report, role_name_length): """ Print the role information. """ pad_role_name_by = 11 + role_name_length defaults = field_value(report["total_defaults"], "defaults", "blue", 16) facts = field_value(report["total_facts"], "facts", "purple", 16) files = field_value(report["total_files"], "files", "dark gray", 16) lines = field_value(report["total_lines"], "lines", "normal", 16) print "{0:<{1}} {2} {3} {4} {5}".format( clr.stringc(name, c.LOG_COLOR[report["state"]]), pad_role_name_by, defaults, facts, files, lines)
python
def role(name, report, role_name_length): """ Print the role information. """ pad_role_name_by = 11 + role_name_length defaults = field_value(report["total_defaults"], "defaults", "blue", 16) facts = field_value(report["total_facts"], "facts", "purple", 16) files = field_value(report["total_files"], "files", "dark gray", 16) lines = field_value(report["total_lines"], "lines", "normal", 16) print "{0:<{1}} {2} {3} {4} {5}".format( clr.stringc(name, c.LOG_COLOR[report["state"]]), pad_role_name_by, defaults, facts, files, lines)
[ "def", "role", "(", "name", ",", "report", ",", "role_name_length", ")", ":", "pad_role_name_by", "=", "11", "+", "role_name_length", "defaults", "=", "field_value", "(", "report", "[", "\"total_defaults\"", "]", ",", "\"defaults\"", ",", "\"blue\"", ",", "16", ")", "facts", "=", "field_value", "(", "report", "[", "\"total_facts\"", "]", ",", "\"facts\"", ",", "\"purple\"", ",", "16", ")", "files", "=", "field_value", "(", "report", "[", "\"total_files\"", "]", ",", "\"files\"", ",", "\"dark gray\"", ",", "16", ")", "lines", "=", "field_value", "(", "report", "[", "\"total_lines\"", "]", ",", "\"lines\"", ",", "\"normal\"", ",", "16", ")", "print", "\"{0:<{1}} {2} {3} {4} {5}\"", ".", "format", "(", "clr", ".", "stringc", "(", "name", ",", "c", ".", "LOG_COLOR", "[", "report", "[", "\"state\"", "]", "]", ")", ",", "pad_role_name_by", ",", "defaults", ",", "facts", ",", "files", ",", "lines", ")" ]
Print the role information.
[ "Print", "the", "role", "information", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L92-L105
-1
251,404
nickjj/ansigenome
ansigenome/ui.py
field_value
def field_value(key, label, color, padding): """ Print a specific field's stats. """ if not clr.has_colors and padding > 0: padding = 7 if color == "bright gray" or color == "dark gray": bright_prefix = "" else: bright_prefix = "bright " field = clr.stringc(key, "{0}{1}".format(bright_prefix, color)) field_label = clr.stringc(label, color) return "{0:>{1}} {2}".format(field, padding, field_label)
python
def field_value(key, label, color, padding): """ Print a specific field's stats. """ if not clr.has_colors and padding > 0: padding = 7 if color == "bright gray" or color == "dark gray": bright_prefix = "" else: bright_prefix = "bright " field = clr.stringc(key, "{0}{1}".format(bright_prefix, color)) field_label = clr.stringc(label, color) return "{0:>{1}} {2}".format(field, padding, field_label)
[ "def", "field_value", "(", "key", ",", "label", ",", "color", ",", "padding", ")", ":", "if", "not", "clr", ".", "has_colors", "and", "padding", ">", "0", ":", "padding", "=", "7", "if", "color", "==", "\"bright gray\"", "or", "color", "==", "\"dark gray\"", ":", "bright_prefix", "=", "\"\"", "else", ":", "bright_prefix", "=", "\"bright \"", "field", "=", "clr", ".", "stringc", "(", "key", ",", "\"{0}{1}\"", ".", "format", "(", "bright_prefix", ",", "color", ")", ")", "field_label", "=", "clr", ".", "stringc", "(", "label", ",", "color", ")", "return", "\"{0:>{1}} {2}\"", ".", "format", "(", "field", ",", "padding", ",", "field_label", ")" ]
Print a specific field's stats.
[ "Print", "a", "specific", "field", "s", "stats", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L108-L123
-1
251,405
nickjj/ansigenome
ansigenome/ui.py
totals
def totals(report, total_roles, role_name_length): """ Print the totals for each role's stats. """ roles_len_string = len(str(total_roles)) roles_label_len = 6 # "r" "o" "l" "e" "s" " " if clr.has_colors: roles_count_offset = 22 else: roles_count_offset = 13 roles_count_offset += role_name_length # no idea honestly but it fixes the formatting # it will probably break the formatting if you have 100+ roles if roles_len_string > 1: roles_count_offset -= 2 pad_roles_by = roles_count_offset + roles_len_string + roles_label_len roles = field_value(total_roles, "roles", "normal", 0) defaults = field_value(report["defaults"], "defaults", "blue", 16) facts = field_value(report["facts"], "facts", "purple", 16) files = field_value(report["files"], "files", "dark gray", 16) lines = field_value(report["lines"], "lines", "normal", 16) print "".join(clr.stringc("-", "black") * 79) print "{0} {2:>{1}} {3} {4} {5}".format( roles, pad_roles_by, defaults, facts, files, lines)
python
def totals(report, total_roles, role_name_length): """ Print the totals for each role's stats. """ roles_len_string = len(str(total_roles)) roles_label_len = 6 # "r" "o" "l" "e" "s" " " if clr.has_colors: roles_count_offset = 22 else: roles_count_offset = 13 roles_count_offset += role_name_length # no idea honestly but it fixes the formatting # it will probably break the formatting if you have 100+ roles if roles_len_string > 1: roles_count_offset -= 2 pad_roles_by = roles_count_offset + roles_len_string + roles_label_len roles = field_value(total_roles, "roles", "normal", 0) defaults = field_value(report["defaults"], "defaults", "blue", 16) facts = field_value(report["facts"], "facts", "purple", 16) files = field_value(report["files"], "files", "dark gray", 16) lines = field_value(report["lines"], "lines", "normal", 16) print "".join(clr.stringc("-", "black") * 79) print "{0} {2:>{1}} {3} {4} {5}".format( roles, pad_roles_by, defaults, facts, files, lines)
[ "def", "totals", "(", "report", ",", "total_roles", ",", "role_name_length", ")", ":", "roles_len_string", "=", "len", "(", "str", "(", "total_roles", ")", ")", "roles_label_len", "=", "6", "# \"r\" \"o\" \"l\" \"e\" \"s\" \" \"", "if", "clr", ".", "has_colors", ":", "roles_count_offset", "=", "22", "else", ":", "roles_count_offset", "=", "13", "roles_count_offset", "+=", "role_name_length", "# no idea honestly but it fixes the formatting", "# it will probably break the formatting if you have 100+ roles", "if", "roles_len_string", ">", "1", ":", "roles_count_offset", "-=", "2", "pad_roles_by", "=", "roles_count_offset", "+", "roles_len_string", "+", "roles_label_len", "roles", "=", "field_value", "(", "total_roles", ",", "\"roles\"", ",", "\"normal\"", ",", "0", ")", "defaults", "=", "field_value", "(", "report", "[", "\"defaults\"", "]", ",", "\"defaults\"", ",", "\"blue\"", ",", "16", ")", "facts", "=", "field_value", "(", "report", "[", "\"facts\"", "]", ",", "\"facts\"", ",", "\"purple\"", ",", "16", ")", "files", "=", "field_value", "(", "report", "[", "\"files\"", "]", ",", "\"files\"", ",", "\"dark gray\"", ",", "16", ")", "lines", "=", "field_value", "(", "report", "[", "\"lines\"", "]", ",", "\"lines\"", ",", "\"normal\"", ",", "16", ")", "print", "\"\"", ".", "join", "(", "clr", ".", "stringc", "(", "\"-\"", ",", "\"black\"", ")", "*", "79", ")", "print", "\"{0} {2:>{1}} {3} {4} {5}\"", ".", "format", "(", "roles", ",", "pad_roles_by", ",", "defaults", ",", "facts", ",", "files", ",", "lines", ")" ]
Print the totals for each role's stats.
[ "Print", "the", "totals", "for", "each", "role", "s", "stats", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L126-L155
-1
251,406
nickjj/ansigenome
ansigenome/ui.py
gen_totals
def gen_totals(report, file_type): """ Print the gen totals. """ label = clr.stringc(file_type + " files ", "bright purple") ok = field_value(report["ok_role"], "ok", c.LOG_COLOR["ok"], 0) skipped = field_value(report["skipped_role"], "skipped", c.LOG_COLOR["skipped"], 16) changed = field_value(report["changed_role"], "changed", c.LOG_COLOR["changed"], 16) # missing_meta = field_value(report["missing_meta_role"], # "missing meta(s)", # c.LOG_COLOR["missing_meta"], 16) # print "\n{0} {1} {2} {3}".format(ok, skipped, changed, missing_meta) print "\n{0} {1} {2} {3}".format(label, ok, skipped, changed)
python
def gen_totals(report, file_type): """ Print the gen totals. """ label = clr.stringc(file_type + " files ", "bright purple") ok = field_value(report["ok_role"], "ok", c.LOG_COLOR["ok"], 0) skipped = field_value(report["skipped_role"], "skipped", c.LOG_COLOR["skipped"], 16) changed = field_value(report["changed_role"], "changed", c.LOG_COLOR["changed"], 16) # missing_meta = field_value(report["missing_meta_role"], # "missing meta(s)", # c.LOG_COLOR["missing_meta"], 16) # print "\n{0} {1} {2} {3}".format(ok, skipped, changed, missing_meta) print "\n{0} {1} {2} {3}".format(label, ok, skipped, changed)
[ "def", "gen_totals", "(", "report", ",", "file_type", ")", ":", "label", "=", "clr", ".", "stringc", "(", "file_type", "+", "\" files \"", ",", "\"bright purple\"", ")", "ok", "=", "field_value", "(", "report", "[", "\"ok_role\"", "]", ",", "\"ok\"", ",", "c", ".", "LOG_COLOR", "[", "\"ok\"", "]", ",", "0", ")", "skipped", "=", "field_value", "(", "report", "[", "\"skipped_role\"", "]", ",", "\"skipped\"", ",", "c", ".", "LOG_COLOR", "[", "\"skipped\"", "]", ",", "16", ")", "changed", "=", "field_value", "(", "report", "[", "\"changed_role\"", "]", ",", "\"changed\"", ",", "c", ".", "LOG_COLOR", "[", "\"changed\"", "]", ",", "16", ")", "# missing_meta = field_value(report[\"missing_meta_role\"],", "# \"missing meta(s)\",", "# c.LOG_COLOR[\"missing_meta\"], 16)", "# print \"\\n{0} {1} {2} {3}\".format(ok, skipped, changed, missing_meta)", "print", "\"\\n{0} {1} {2} {3}\"", ".", "format", "(", "label", ",", "ok", ",", "skipped", ",", "changed", ")" ]
Print the gen totals.
[ "Print", "the", "gen", "totals", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L158-L176
-1
251,407
nickjj/ansigenome
ansigenome/ui.py
scan_totals
def scan_totals(report): """ Print the scan totals. """ ok = field_value(report["ok_role"], "ok", c.LOG_COLOR["ok"], 0) missing_readme = field_value(report["missing_readme_role"], "missing readme(s)", c.LOG_COLOR["missing_readme"], 16) missing_meta = field_value(report["missing_meta_role"], "missing meta(s)", c.LOG_COLOR["missing_meta"], 16) print "\n{0} {1} {2}".format(ok, missing_readme, missing_meta)
python
def scan_totals(report): """ Print the scan totals. """ ok = field_value(report["ok_role"], "ok", c.LOG_COLOR["ok"], 0) missing_readme = field_value(report["missing_readme_role"], "missing readme(s)", c.LOG_COLOR["missing_readme"], 16) missing_meta = field_value(report["missing_meta_role"], "missing meta(s)", c.LOG_COLOR["missing_meta"], 16) print "\n{0} {1} {2}".format(ok, missing_readme, missing_meta)
[ "def", "scan_totals", "(", "report", ")", ":", "ok", "=", "field_value", "(", "report", "[", "\"ok_role\"", "]", ",", "\"ok\"", ",", "c", ".", "LOG_COLOR", "[", "\"ok\"", "]", ",", "0", ")", "missing_readme", "=", "field_value", "(", "report", "[", "\"missing_readme_role\"", "]", ",", "\"missing readme(s)\"", ",", "c", ".", "LOG_COLOR", "[", "\"missing_readme\"", "]", ",", "16", ")", "missing_meta", "=", "field_value", "(", "report", "[", "\"missing_meta_role\"", "]", ",", "\"missing meta(s)\"", ",", "c", ".", "LOG_COLOR", "[", "\"missing_meta\"", "]", ",", "16", ")", "print", "\"\\n{0} {1} {2}\"", ".", "format", "(", "ok", ",", "missing_readme", ",", "missing_meta", ")" ]
Print the scan totals.
[ "Print", "the", "scan", "totals", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/ui.py#L179-L192
-1
251,408
nickjj/ansigenome
ansigenome/color.py
has_colors
def has_colors(stream): """ Determine if the terminal supports ansi colors. """ if not hasattr(stream, "isatty"): return False if not stream.isatty(): return False # auto color only on TTYs try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except: return False
python
def has_colors(stream): """ Determine if the terminal supports ansi colors. """ if not hasattr(stream, "isatty"): return False if not stream.isatty(): return False # auto color only on TTYs try: import curses curses.setupterm() return curses.tigetnum("colors") > 2 except: return False
[ "def", "has_colors", "(", "stream", ")", ":", "if", "not", "hasattr", "(", "stream", ",", "\"isatty\"", ")", ":", "return", "False", "if", "not", "stream", ".", "isatty", "(", ")", ":", "return", "False", "# auto color only on TTYs", "try", ":", "import", "curses", "curses", ".", "setupterm", "(", ")", "return", "curses", ".", "tigetnum", "(", "\"colors\"", ")", ">", "2", "except", ":", "return", "False" ]
Determine if the terminal supports ansi colors.
[ "Determine", "if", "the", "terminal", "supports", "ansi", "colors", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/color.py#L4-L17
-1
251,409
nickjj/ansigenome
ansigenome/color.py
stringc
def stringc(text, color): """ Return a string with terminal colors. """ if has_colors: text = str(text) return "\033["+codeCodes[color]+"m"+text+"\033[0m" else: return text
python
def stringc(text, color): """ Return a string with terminal colors. """ if has_colors: text = str(text) return "\033["+codeCodes[color]+"m"+text+"\033[0m" else: return text
[ "def", "stringc", "(", "text", ",", "color", ")", ":", "if", "has_colors", ":", "text", "=", "str", "(", "text", ")", "return", "\"\\033[\"", "+", "codeCodes", "[", "color", "]", "+", "\"m\"", "+", "text", "+", "\"\\033[0m\"", "else", ":", "return", "text" ]
Return a string with terminal colors.
[ "Return", "a", "string", "with", "terminal", "colors", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/color.py#L47-L56
-1
251,410
nickjj/ansigenome
ansigenome/run.py
Run.execute_command
def execute_command(self): """ Execute the shell command. """ stderr = "" role_count = 0 for role in utils.roles_dict(self.roles_path): self.command = self.command.replace("%role_name", role) (_, err) = utils.capture_shell("cd {0} && {1}". format(os.path.join( self.roles_path, role), self.command)) stderr = err role_count += 1 utils.exit_if_no_roles(role_count, self.roles_path) if len(stderr) > 0: ui.error(c.MESSAGES["run_error"], stderr[:-1]) else: if not self.config["options_quiet"]: ui.ok(c.MESSAGES["run_success"].replace( "%role_count", str(role_count)), self.options.command)
python
def execute_command(self): """ Execute the shell command. """ stderr = "" role_count = 0 for role in utils.roles_dict(self.roles_path): self.command = self.command.replace("%role_name", role) (_, err) = utils.capture_shell("cd {0} && {1}". format(os.path.join( self.roles_path, role), self.command)) stderr = err role_count += 1 utils.exit_if_no_roles(role_count, self.roles_path) if len(stderr) > 0: ui.error(c.MESSAGES["run_error"], stderr[:-1]) else: if not self.config["options_quiet"]: ui.ok(c.MESSAGES["run_success"].replace( "%role_count", str(role_count)), self.options.command)
[ "def", "execute_command", "(", "self", ")", ":", "stderr", "=", "\"\"", "role_count", "=", "0", "for", "role", "in", "utils", ".", "roles_dict", "(", "self", ".", "roles_path", ")", ":", "self", ".", "command", "=", "self", ".", "command", ".", "replace", "(", "\"%role_name\"", ",", "role", ")", "(", "_", ",", "err", ")", "=", "utils", ".", "capture_shell", "(", "\"cd {0} && {1}\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "self", ".", "roles_path", ",", "role", ")", ",", "self", ".", "command", ")", ")", "stderr", "=", "err", "role_count", "+=", "1", "utils", ".", "exit_if_no_roles", "(", "role_count", ",", "self", ".", "roles_path", ")", "if", "len", "(", "stderr", ")", ">", "0", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"run_error\"", "]", ",", "stderr", "[", ":", "-", "1", "]", ")", "else", ":", "if", "not", "self", ".", "config", "[", "\"options_quiet\"", "]", ":", "ui", ".", "ok", "(", "c", ".", "MESSAGES", "[", "\"run_success\"", "]", ".", "replace", "(", "\"%role_count\"", ",", "str", "(", "role_count", ")", ")", ",", "self", ".", "options", ".", "command", ")" ]
Execute the shell command.
[ "Execute", "the", "shell", "command", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/run.py#L17-L40
-1
251,411
nickjj/ansigenome
ansigenome/utils.py
mkdir_p
def mkdir_p(path): """ Emulate the behavior of mkdir -p. """ try: os.makedirs(path) except OSError as err: if err.errno == errno.EEXIST and os.path.isdir(path): pass else: ui.error(c.MESSAGES["path_unmakable"], err) sys.exit(1)
python
def mkdir_p(path): """ Emulate the behavior of mkdir -p. """ try: os.makedirs(path) except OSError as err: if err.errno == errno.EEXIST and os.path.isdir(path): pass else: ui.error(c.MESSAGES["path_unmakable"], err) sys.exit(1)
[ "def", "mkdir_p", "(", "path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "EEXIST", "and", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "pass", "else", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"path_unmakable\"", "]", ",", "err", ")", "sys", ".", "exit", "(", "1", ")" ]
Emulate the behavior of mkdir -p.
[ "Emulate", "the", "behavior", "of", "mkdir", "-", "p", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L35-L46
-1
251,412
nickjj/ansigenome
ansigenome/utils.py
string_to_file
def string_to_file(path, input): """ Write a file from a given string. """ mkdir_p(os.path.dirname(path)) with codecs.open(path, "w+", "UTF-8") as file: file.write(input)
python
def string_to_file(path, input): """ Write a file from a given string. """ mkdir_p(os.path.dirname(path)) with codecs.open(path, "w+", "UTF-8") as file: file.write(input)
[ "def", "string_to_file", "(", "path", ",", "input", ")", ":", "mkdir_p", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "with", "codecs", ".", "open", "(", "path", ",", "\"w+\"", ",", "\"UTF-8\"", ")", "as", "file", ":", "file", ".", "write", "(", "input", ")" ]
Write a file from a given string.
[ "Write", "a", "file", "from", "a", "given", "string", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L49-L56
-1
251,413
nickjj/ansigenome
ansigenome/utils.py
file_to_string
def file_to_string(path): """ Return the contents of a file when given a path. """ if not os.path.exists(path): ui.error(c.MESSAGES["path_missing"], path) sys.exit(1) with codecs.open(path, "r", "UTF-8") as contents: return contents.read()
python
def file_to_string(path): """ Return the contents of a file when given a path. """ if not os.path.exists(path): ui.error(c.MESSAGES["path_missing"], path) sys.exit(1) with codecs.open(path, "r", "UTF-8") as contents: return contents.read()
[ "def", "file_to_string", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"path_missing\"", "]", ",", "path", ")", "sys", ".", "exit", "(", "1", ")", "with", "codecs", ".", "open", "(", "path", ",", "\"r\"", ",", "\"UTF-8\"", ")", "as", "contents", ":", "return", "contents", ".", "read", "(", ")" ]
Return the contents of a file when given a path.
[ "Return", "the", "contents", "of", "a", "file", "when", "given", "a", "path", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L59-L68
-1
251,414
nickjj/ansigenome
ansigenome/utils.py
file_to_list
def file_to_list(path): """ Return the contents of a file as a list when given a path. """ if not os.path.exists(path): ui.error(c.MESSAGES["path_missing"], path) sys.exit(1) with codecs.open(path, "r", "UTF-8") as contents: lines = contents.read().splitlines() return lines
python
def file_to_list(path): """ Return the contents of a file as a list when given a path. """ if not os.path.exists(path): ui.error(c.MESSAGES["path_missing"], path) sys.exit(1) with codecs.open(path, "r", "UTF-8") as contents: lines = contents.read().splitlines() return lines
[ "def", "file_to_list", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"path_missing\"", "]", ",", "path", ")", "sys", ".", "exit", "(", "1", ")", "with", "codecs", ".", "open", "(", "path", ",", "\"r\"", ",", "\"UTF-8\"", ")", "as", "contents", ":", "lines", "=", "contents", ".", "read", "(", ")", ".", "splitlines", "(", ")", "return", "lines" ]
Return the contents of a file as a list when given a path.
[ "Return", "the", "contents", "of", "a", "file", "as", "a", "list", "when", "given", "a", "path", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L71-L82
-1
251,415
nickjj/ansigenome
ansigenome/utils.py
url_to_string
def url_to_string(url): """ Return the contents of a web site url as a string. """ try: page = urllib2.urlopen(url) except (urllib2.HTTPError, urllib2.URLError) as err: ui.error(c.MESSAGES["url_unreachable"], err) sys.exit(1) return page
python
def url_to_string(url): """ Return the contents of a web site url as a string. """ try: page = urllib2.urlopen(url) except (urllib2.HTTPError, urllib2.URLError) as err: ui.error(c.MESSAGES["url_unreachable"], err) sys.exit(1) return page
[ "def", "url_to_string", "(", "url", ")", ":", "try", ":", "page", "=", "urllib2", ".", "urlopen", "(", "url", ")", "except", "(", "urllib2", ".", "HTTPError", ",", "urllib2", ".", "URLError", ")", "as", "err", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"url_unreachable\"", "]", ",", "err", ")", "sys", ".", "exit", "(", "1", ")", "return", "page" ]
Return the contents of a web site url as a string.
[ "Return", "the", "contents", "of", "a", "web", "site", "url", "as", "a", "string", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L92-L102
-1
251,416
nickjj/ansigenome
ansigenome/utils.py
template
def template(path, extend_path, out): """ Return a jinja2 template instance with extends support. """ files = [] # add the "extender" template when it exists if len(extend_path) > 0: # determine the base readme path base_path = os.path.dirname(extend_path) new_base_path = os.path.join(base_path, "README.{0}.j2".format(out)) if os.path.exists(new_base_path): path = new_base_path if os.path.exists(extend_path): files = [path, extend_path] else: ui.error(c.MESSAGES["template_extender_missing"]) ui.error(extend_path) sys.exit(1) else: files = [path] try: # Use the subclassed relative environment class env = RelEnvironment(trim_blocks=True) # Add a unique dict filter, by key. # DEPRECATION WARNING: This is only used for backwards compatibility, # please use the unique filter instead. def unique_dict(items, key): return {v[key]: v for v in items}.values() env.filters["unique_dict"] = unique_dict def unique(a): # Don’t use the following commented out optimization which is used # in ansible/lib/ansible/plugins/filter/mathstuff.py in Ansigenome # as it resorts the role dependencies: # if isinstance(a,collections.Hashable): # c = set(a) c = [] for x in a: if x not in c: c.append(x) return c env.filters["unique"] = unique # create a dictionary of templates templates = dict( (name, codecs.open(name, "rb", 'UTF-8').read()) for name in files) env.loader = DictLoader(templates) # return the final result (the last template in the list) return env.get_template(files[len(files) - 1]) except Exception as err: ui.error(c.MESSAGES["template_error"], err) sys.exit(1)
python
def template(path, extend_path, out): """ Return a jinja2 template instance with extends support. """ files = [] # add the "extender" template when it exists if len(extend_path) > 0: # determine the base readme path base_path = os.path.dirname(extend_path) new_base_path = os.path.join(base_path, "README.{0}.j2".format(out)) if os.path.exists(new_base_path): path = new_base_path if os.path.exists(extend_path): files = [path, extend_path] else: ui.error(c.MESSAGES["template_extender_missing"]) ui.error(extend_path) sys.exit(1) else: files = [path] try: # Use the subclassed relative environment class env = RelEnvironment(trim_blocks=True) # Add a unique dict filter, by key. # DEPRECATION WARNING: This is only used for backwards compatibility, # please use the unique filter instead. def unique_dict(items, key): return {v[key]: v for v in items}.values() env.filters["unique_dict"] = unique_dict def unique(a): # Don’t use the following commented out optimization which is used # in ansible/lib/ansible/plugins/filter/mathstuff.py in Ansigenome # as it resorts the role dependencies: # if isinstance(a,collections.Hashable): # c = set(a) c = [] for x in a: if x not in c: c.append(x) return c env.filters["unique"] = unique # create a dictionary of templates templates = dict( (name, codecs.open(name, "rb", 'UTF-8').read()) for name in files) env.loader = DictLoader(templates) # return the final result (the last template in the list) return env.get_template(files[len(files) - 1]) except Exception as err: ui.error(c.MESSAGES["template_error"], err) sys.exit(1)
[ "def", "template", "(", "path", ",", "extend_path", ",", "out", ")", ":", "files", "=", "[", "]", "# add the \"extender\" template when it exists", "if", "len", "(", "extend_path", ")", ">", "0", ":", "# determine the base readme path", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "extend_path", ")", "new_base_path", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\"README.{0}.j2\"", ".", "format", "(", "out", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "new_base_path", ")", ":", "path", "=", "new_base_path", "if", "os", ".", "path", ".", "exists", "(", "extend_path", ")", ":", "files", "=", "[", "path", ",", "extend_path", "]", "else", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"template_extender_missing\"", "]", ")", "ui", ".", "error", "(", "extend_path", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "files", "=", "[", "path", "]", "try", ":", "# Use the subclassed relative environment class", "env", "=", "RelEnvironment", "(", "trim_blocks", "=", "True", ")", "# Add a unique dict filter, by key.", "# DEPRECATION WARNING: This is only used for backwards compatibility,", "# please use the unique filter instead.", "def", "unique_dict", "(", "items", ",", "key", ")", ":", "return", "{", "v", "[", "key", "]", ":", "v", "for", "v", "in", "items", "}", ".", "values", "(", ")", "env", ".", "filters", "[", "\"unique_dict\"", "]", "=", "unique_dict", "def", "unique", "(", "a", ")", ":", "# Don’t use the following commented out optimization which is used", "# in ansible/lib/ansible/plugins/filter/mathstuff.py in Ansigenome", "# as it resorts the role dependencies:", "# if isinstance(a,collections.Hashable):", "# c = set(a)", "c", "=", "[", "]", "for", "x", "in", "a", ":", "if", "x", "not", "in", "c", ":", "c", ".", "append", "(", "x", ")", "return", "c", "env", ".", "filters", "[", "\"unique\"", "]", "=", "unique", "# create a dictionary of templates", "templates", "=", "dict", "(", "(", "name", ",", "codecs", ".", "open", "(", "name", ",", "\"rb\"", ",", "'UTF-8'", ")", ".", "read", "(", ")", ")", "for", "name", "in", "files", ")", "env", ".", "loader", "=", "DictLoader", "(", "templates", ")", "# return the final result (the last template in the list)", "return", "env", ".", "get_template", "(", "files", "[", "len", "(", "files", ")", "-", "1", "]", ")", "except", "Exception", "as", "err", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"template_error\"", "]", ",", "err", ")", "sys", ".", "exit", "(", "1", ")" ]
Return a jinja2 template instance with extends support.
[ "Return", "a", "jinja2", "template", "instance", "with", "extends", "support", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L105-L167
-1
251,417
nickjj/ansigenome
ansigenome/utils.py
files_in_path
def files_in_path(path): """ Return a list of all files in a path but exclude git folders. """ aggregated_files = [] for dir_, _, files in os.walk(path): for file in files: relative_dir = os.path.relpath(dir_, path) if ".git" not in relative_dir: relative_file = os.path.join(relative_dir, file) aggregated_files.append(relative_file) return aggregated_files
python
def files_in_path(path): """ Return a list of all files in a path but exclude git folders. """ aggregated_files = [] for dir_, _, files in os.walk(path): for file in files: relative_dir = os.path.relpath(dir_, path) if ".git" not in relative_dir: relative_file = os.path.join(relative_dir, file) aggregated_files.append(relative_file) return aggregated_files
[ "def", "files_in_path", "(", "path", ")", ":", "aggregated_files", "=", "[", "]", "for", "dir_", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "file", "in", "files", ":", "relative_dir", "=", "os", ".", "path", ".", "relpath", "(", "dir_", ",", "path", ")", "if", "\".git\"", "not", "in", "relative_dir", ":", "relative_file", "=", "os", ".", "path", ".", "join", "(", "relative_dir", ",", "file", ")", "aggregated_files", ".", "append", "(", "relative_file", ")", "return", "aggregated_files" ]
Return a list of all files in a path but exclude git folders.
[ "Return", "a", "list", "of", "all", "files", "in", "a", "path", "but", "exclude", "git", "folders", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L170-L184
-1
251,418
nickjj/ansigenome
ansigenome/utils.py
exit_if_path_not_found
def exit_if_path_not_found(path): """ Exit if the path is not found. """ if not os.path.exists(path): ui.error(c.MESSAGES["path_missing"], path) sys.exit(1)
python
def exit_if_path_not_found(path): """ Exit if the path is not found. """ if not os.path.exists(path): ui.error(c.MESSAGES["path_missing"], path) sys.exit(1)
[ "def", "exit_if_path_not_found", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"path_missing\"", "]", ",", "path", ")", "sys", ".", "exit", "(", "1", ")" ]
Exit if the path is not found.
[ "Exit", "if", "the", "path", "is", "not", "found", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L187-L193
-1
251,419
nickjj/ansigenome
ansigenome/utils.py
yaml_load
def yaml_load(path, input="", err_quit=False): """ Return a yaml dict from a file or string with error handling. """ try: if len(input) > 0: return yaml.safe_load(input) elif len(path) > 0: return yaml.safe_load(file_to_string(path)) except Exception as err: file = os.path.basename(path) ui.error("", c.MESSAGES["yaml_error"].replace("%file", file), err, "") if err_quit: sys.exit(1) return False
python
def yaml_load(path, input="", err_quit=False): """ Return a yaml dict from a file or string with error handling. """ try: if len(input) > 0: return yaml.safe_load(input) elif len(path) > 0: return yaml.safe_load(file_to_string(path)) except Exception as err: file = os.path.basename(path) ui.error("", c.MESSAGES["yaml_error"].replace("%file", file), err, "") if err_quit: sys.exit(1) return False
[ "def", "yaml_load", "(", "path", ",", "input", "=", "\"\"", ",", "err_quit", "=", "False", ")", ":", "try", ":", "if", "len", "(", "input", ")", ">", "0", ":", "return", "yaml", ".", "safe_load", "(", "input", ")", "elif", "len", "(", "path", ")", ">", "0", ":", "return", "yaml", ".", "safe_load", "(", "file_to_string", "(", "path", ")", ")", "except", "Exception", "as", "err", ":", "file", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "ui", ".", "error", "(", "\"\"", ",", "c", ".", "MESSAGES", "[", "\"yaml_error\"", "]", ".", "replace", "(", "\"%file\"", ",", "file", ")", ",", "err", ",", "\"\"", ")", "if", "err_quit", ":", "sys", ".", "exit", "(", "1", ")", "return", "False" ]
Return a yaml dict from a file or string with error handling.
[ "Return", "a", "yaml", "dict", "from", "a", "file", "or", "string", "with", "error", "handling", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L209-L227
-1
251,420
nickjj/ansigenome
ansigenome/utils.py
to_nice_yaml
def to_nice_yaml(yaml_input, indentation=2): """ Return condensed yaml into human readable yaml. """ return yaml.safe_dump(yaml_input, indent=indentation, allow_unicode=True, default_flow_style=False)
python
def to_nice_yaml(yaml_input, indentation=2): """ Return condensed yaml into human readable yaml. """ return yaml.safe_dump(yaml_input, indent=indentation, allow_unicode=True, default_flow_style=False)
[ "def", "to_nice_yaml", "(", "yaml_input", ",", "indentation", "=", "2", ")", ":", "return", "yaml", ".", "safe_dump", "(", "yaml_input", ",", "indent", "=", "indentation", ",", "allow_unicode", "=", "True", ",", "default_flow_style", "=", "False", ")" ]
Return condensed yaml into human readable yaml.
[ "Return", "condensed", "yaml", "into", "human", "readable", "yaml", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L230-L235
-1
251,421
nickjj/ansigenome
ansigenome/utils.py
keys_in_dict
def keys_in_dict(d, parent_key, keys): """ Create a list of keys from a dict recursively. """ for key, value in d.iteritems(): if isinstance(value, dict): keys_in_dict(value, key, keys) else: if parent_key: prefix = parent_key + "." else: prefix = "" keys.append(prefix + key) return keys
python
def keys_in_dict(d, parent_key, keys): """ Create a list of keys from a dict recursively. """ for key, value in d.iteritems(): if isinstance(value, dict): keys_in_dict(value, key, keys) else: if parent_key: prefix = parent_key + "." else: prefix = "" keys.append(prefix + key) return keys
[ "def", "keys_in_dict", "(", "d", ",", "parent_key", ",", "keys", ")", ":", "for", "key", ",", "value", "in", "d", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "keys_in_dict", "(", "value", ",", "key", ",", "keys", ")", "else", ":", "if", "parent_key", ":", "prefix", "=", "parent_key", "+", "\".\"", "else", ":", "prefix", "=", "\"\"", "keys", ".", "append", "(", "prefix", "+", "key", ")", "return", "keys" ]
Create a list of keys from a dict recursively.
[ "Create", "a", "list", "of", "keys", "from", "a", "dict", "recursively", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L258-L273
-1
251,422
nickjj/ansigenome
ansigenome/utils.py
swap_yaml_string
def swap_yaml_string(file_path, swaps): """ Swap a string in a yaml file without touching the existing formatting. """ original_file = file_to_string(file_path) new_file = original_file changed = False for item in swaps: match = re.compile(r'(?<={0}: )(["\']?)(.*)\1'.format(item[0]), re.MULTILINE) new_file = re.sub(match, item[1], new_file) if new_file != original_file: changed = True string_to_file(file_path, new_file) return (new_file, changed)
python
def swap_yaml_string(file_path, swaps): """ Swap a string in a yaml file without touching the existing formatting. """ original_file = file_to_string(file_path) new_file = original_file changed = False for item in swaps: match = re.compile(r'(?<={0}: )(["\']?)(.*)\1'.format(item[0]), re.MULTILINE) new_file = re.sub(match, item[1], new_file) if new_file != original_file: changed = True string_to_file(file_path, new_file) return (new_file, changed)
[ "def", "swap_yaml_string", "(", "file_path", ",", "swaps", ")", ":", "original_file", "=", "file_to_string", "(", "file_path", ")", "new_file", "=", "original_file", "changed", "=", "False", "for", "item", "in", "swaps", ":", "match", "=", "re", ".", "compile", "(", "r'(?<={0}: )([\"\\']?)(.*)\\1'", ".", "format", "(", "item", "[", "0", "]", ")", ",", "re", ".", "MULTILINE", ")", "new_file", "=", "re", ".", "sub", "(", "match", ",", "item", "[", "1", "]", ",", "new_file", ")", "if", "new_file", "!=", "original_file", ":", "changed", "=", "True", "string_to_file", "(", "file_path", ",", "new_file", ")", "return", "(", "new_file", ",", "changed", ")" ]
Swap a string in a yaml file without touching the existing formatting.
[ "Swap", "a", "string", "in", "a", "yaml", "file", "without", "touching", "the", "existing", "formatting", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L276-L295
-1
251,423
nickjj/ansigenome
ansigenome/utils.py
exit_if_no_roles
def exit_if_no_roles(roles_count, roles_path): """ Exit if there were no roles found. """ if roles_count == 0: ui.warn(c.MESSAGES["empty_roles_path"], roles_path) sys.exit()
python
def exit_if_no_roles(roles_count, roles_path): """ Exit if there were no roles found. """ if roles_count == 0: ui.warn(c.MESSAGES["empty_roles_path"], roles_path) sys.exit()
[ "def", "exit_if_no_roles", "(", "roles_count", ",", "roles_path", ")", ":", "if", "roles_count", "==", "0", ":", "ui", ".", "warn", "(", "c", ".", "MESSAGES", "[", "\"empty_roles_path\"", "]", ",", "roles_path", ")", "sys", ".", "exit", "(", ")" ]
Exit if there were no roles found.
[ "Exit", "if", "there", "were", "no", "roles", "found", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L303-L309
-1
251,424
nickjj/ansigenome
ansigenome/utils.py
roles_dict
def roles_dict(path, repo_prefix="", repo_sub_dir=""): """ Return a dict of role names and repo paths. """ exit_if_path_not_found(path) aggregated_roles = {} roles = os.walk(path).next()[1] # First scan all directories for role in roles: for sub_role in roles_dict(path + "/" + role, repo_prefix="", repo_sub_dir=role + "/"): aggregated_roles[role + "/" + sub_role] = role + "/" + sub_role # Then format them for role in roles: if is_role(os.path.join(path, role)): if isinstance(role, basestring): role_repo = "{0}{1}".format(repo_prefix, role_name(role)) aggregated_roles[role] = role_repo return aggregated_roles
python
def roles_dict(path, repo_prefix="", repo_sub_dir=""): """ Return a dict of role names and repo paths. """ exit_if_path_not_found(path) aggregated_roles = {} roles = os.walk(path).next()[1] # First scan all directories for role in roles: for sub_role in roles_dict(path + "/" + role, repo_prefix="", repo_sub_dir=role + "/"): aggregated_roles[role + "/" + sub_role] = role + "/" + sub_role # Then format them for role in roles: if is_role(os.path.join(path, role)): if isinstance(role, basestring): role_repo = "{0}{1}".format(repo_prefix, role_name(role)) aggregated_roles[role] = role_repo return aggregated_roles
[ "def", "roles_dict", "(", "path", ",", "repo_prefix", "=", "\"\"", ",", "repo_sub_dir", "=", "\"\"", ")", ":", "exit_if_path_not_found", "(", "path", ")", "aggregated_roles", "=", "{", "}", "roles", "=", "os", ".", "walk", "(", "path", ")", ".", "next", "(", ")", "[", "1", "]", "# First scan all directories", "for", "role", "in", "roles", ":", "for", "sub_role", "in", "roles_dict", "(", "path", "+", "\"/\"", "+", "role", ",", "repo_prefix", "=", "\"\"", ",", "repo_sub_dir", "=", "role", "+", "\"/\"", ")", ":", "aggregated_roles", "[", "role", "+", "\"/\"", "+", "sub_role", "]", "=", "role", "+", "\"/\"", "+", "sub_role", "# Then format them", "for", "role", "in", "roles", ":", "if", "is_role", "(", "os", ".", "path", ".", "join", "(", "path", ",", "role", ")", ")", ":", "if", "isinstance", "(", "role", ",", "basestring", ")", ":", "role_repo", "=", "\"{0}{1}\"", ".", "format", "(", "repo_prefix", ",", "role_name", "(", "role", ")", ")", "aggregated_roles", "[", "role", "]", "=", "role_repo", "return", "aggregated_roles" ]
Return a dict of role names and repo paths.
[ "Return", "a", "dict", "of", "role", "names", "and", "repo", "paths", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L312-L336
-1
251,425
nickjj/ansigenome
ansigenome/utils.py
is_role
def is_role(path): """ Determine if a path is an ansible role. """ seems_legit = False for folder in c.ANSIBLE_FOLDERS: if os.path.exists(os.path.join(path, folder)): seems_legit = True return seems_legit
python
def is_role(path): """ Determine if a path is an ansible role. """ seems_legit = False for folder in c.ANSIBLE_FOLDERS: if os.path.exists(os.path.join(path, folder)): seems_legit = True return seems_legit
[ "def", "is_role", "(", "path", ")", ":", "seems_legit", "=", "False", "for", "folder", "in", "c", ".", "ANSIBLE_FOLDERS", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "folder", ")", ")", ":", "seems_legit", "=", "True", "return", "seems_legit" ]
Determine if a path is an ansible role.
[ "Determine", "if", "a", "path", "is", "an", "ansible", "role", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L349-L358
-1
251,426
nickjj/ansigenome
ansigenome/utils.py
stripped_args
def stripped_args(args): """ Return the stripped version of the arguments. """ stripped_args = [] for arg in args: stripped_args.append(arg.strip()) return stripped_args
python
def stripped_args(args): """ Return the stripped version of the arguments. """ stripped_args = [] for arg in args: stripped_args.append(arg.strip()) return stripped_args
[ "def", "stripped_args", "(", "args", ")", ":", "stripped_args", "=", "[", "]", "for", "arg", "in", "args", ":", "stripped_args", ".", "append", "(", "arg", ".", "strip", "(", ")", ")", "return", "stripped_args" ]
Return the stripped version of the arguments.
[ "Return", "the", "stripped", "version", "of", "the", "arguments", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L361-L369
-1
251,427
nickjj/ansigenome
ansigenome/utils.py
normalize_role
def normalize_role(role, config): """ Normalize a role name. """ if role.startswith(config["scm_repo_prefix"]): role_name = role.replace(config["scm_repo_prefix"], "") else: if "." in role: galaxy_prefix = "{0}.".format(config["scm_user"]) role_name = role.replace(galaxy_prefix, "") elif "-" in role: role_name = role.replace("-", "_") else: role_name = role return role_name
python
def normalize_role(role, config): """ Normalize a role name. """ if role.startswith(config["scm_repo_prefix"]): role_name = role.replace(config["scm_repo_prefix"], "") else: if "." in role: galaxy_prefix = "{0}.".format(config["scm_user"]) role_name = role.replace(galaxy_prefix, "") elif "-" in role: role_name = role.replace("-", "_") else: role_name = role return role_name
[ "def", "normalize_role", "(", "role", ",", "config", ")", ":", "if", "role", ".", "startswith", "(", "config", "[", "\"scm_repo_prefix\"", "]", ")", ":", "role_name", "=", "role", ".", "replace", "(", "config", "[", "\"scm_repo_prefix\"", "]", ",", "\"\"", ")", "else", ":", "if", "\".\"", "in", "role", ":", "galaxy_prefix", "=", "\"{0}.\"", ".", "format", "(", "config", "[", "\"scm_user\"", "]", ")", "role_name", "=", "role", ".", "replace", "(", "galaxy_prefix", ",", "\"\"", ")", "elif", "\"-\"", "in", "role", ":", "role_name", "=", "role", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "else", ":", "role_name", "=", "role", "return", "role_name" ]
Normalize a role name.
[ "Normalize", "a", "role", "name", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L372-L387
-1
251,428
nickjj/ansigenome
ansigenome/utils.py
create_meta_main
def create_meta_main(create_path, config, role, categories): """ Create a meta template. """ meta_file = c.DEFAULT_META_FILE.replace( "%author_name", config["author_name"]) meta_file = meta_file.replace( "%author_company", config["author_company"]) meta_file = meta_file.replace("%license_type", config["license_type"]) meta_file = meta_file.replace("%role_name", role) # Normalize the category so %categories always gets replaced. if not categories: categories = "" meta_file = meta_file.replace("%categories", categories) string_to_file(create_path, meta_file)
python
def create_meta_main(create_path, config, role, categories): """ Create a meta template. """ meta_file = c.DEFAULT_META_FILE.replace( "%author_name", config["author_name"]) meta_file = meta_file.replace( "%author_company", config["author_company"]) meta_file = meta_file.replace("%license_type", config["license_type"]) meta_file = meta_file.replace("%role_name", role) # Normalize the category so %categories always gets replaced. if not categories: categories = "" meta_file = meta_file.replace("%categories", categories) string_to_file(create_path, meta_file)
[ "def", "create_meta_main", "(", "create_path", ",", "config", ",", "role", ",", "categories", ")", ":", "meta_file", "=", "c", ".", "DEFAULT_META_FILE", ".", "replace", "(", "\"%author_name\"", ",", "config", "[", "\"author_name\"", "]", ")", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%author_company\"", ",", "config", "[", "\"author_company\"", "]", ")", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%license_type\"", ",", "config", "[", "\"license_type\"", "]", ")", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%role_name\"", ",", "role", ")", "# Normalize the category so %categories always gets replaced.", "if", "not", "categories", ":", "categories", "=", "\"\"", "meta_file", "=", "meta_file", ".", "replace", "(", "\"%categories\"", ",", "categories", ")", "string_to_file", "(", "create_path", ",", "meta_file", ")" ]
Create a meta template.
[ "Create", "a", "meta", "template", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L390-L407
-1
251,429
nickjj/ansigenome
ansigenome/utils.py
get_version
def get_version(path, default="master"): """ Return the version from a VERSION file """ version = default if os.path.exists(path): version_contents = file_to_string(path) if version_contents: version = version_contents.strip() return version
python
def get_version(path, default="master"): """ Return the version from a VERSION file """ version = default if os.path.exists(path): version_contents = file_to_string(path) if version_contents: version = version_contents.strip() return version
[ "def", "get_version", "(", "path", ",", "default", "=", "\"master\"", ")", ":", "version", "=", "default", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "version_contents", "=", "file_to_string", "(", "path", ")", "if", "version_contents", ":", "version", "=", "version_contents", ".", "strip", "(", ")", "return", "version" ]
Return the version from a VERSION file
[ "Return", "the", "version", "from", "a", "VERSION", "file" ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L410-L420
-1
251,430
nickjj/ansigenome
ansigenome/utils.py
write_config
def write_config(path, config): """ Write the config with a little post-converting formatting. """ config_as_string = to_nice_yaml(config) config_as_string = "---\n" + config_as_string string_to_file(path, config_as_string)
python
def write_config(path, config): """ Write the config with a little post-converting formatting. """ config_as_string = to_nice_yaml(config) config_as_string = "---\n" + config_as_string string_to_file(path, config_as_string)
[ "def", "write_config", "(", "path", ",", "config", ")", ":", "config_as_string", "=", "to_nice_yaml", "(", "config", ")", "config_as_string", "=", "\"---\\n\"", "+", "config_as_string", "string_to_file", "(", "path", ",", "config_as_string", ")" ]
Write the config with a little post-converting formatting.
[ "Write", "the", "config", "with", "a", "little", "post", "-", "converting", "formatting", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L423-L431
-1
251,431
nickjj/ansigenome
ansigenome/scan.py
Scan.limit_roles
def limit_roles(self): """ Limit the roles being scanned. """ new_roles = {} roles = self.options.limit.split(",") for key, value in self.roles.iteritems(): for role in roles: role = role.strip() if key == role: new_roles[key] = value self.roles = new_roles
python
def limit_roles(self): """ Limit the roles being scanned. """ new_roles = {} roles = self.options.limit.split(",") for key, value in self.roles.iteritems(): for role in roles: role = role.strip() if key == role: new_roles[key] = value self.roles = new_roles
[ "def", "limit_roles", "(", "self", ")", ":", "new_roles", "=", "{", "}", "roles", "=", "self", ".", "options", ".", "limit", ".", "split", "(", "\",\"", ")", "for", "key", ",", "value", "in", "self", ".", "roles", ".", "iteritems", "(", ")", ":", "for", "role", "in", "roles", ":", "role", "=", "role", ".", "strip", "(", ")", "if", "key", "==", "role", ":", "new_roles", "[", "key", "]", "=", "value", "self", ".", "roles", "=", "new_roles" ]
Limit the roles being scanned.
[ "Limit", "the", "roles", "being", "scanned", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L88-L101
-1
251,432
nickjj/ansigenome
ansigenome/scan.py
Scan.scan_roles
def scan_roles(self): """ Iterate over each role and report its stats. """ for key, value in sorted(self.roles.iteritems()): self.paths["role"] = os.path.join(self.roles_path, key) self.paths["meta"] = os.path.join(self.paths["role"], "meta", "main.yml") self.paths["readme"] = os.path.join(self.paths["role"], "README.{0}" .format(self.readme_format)) self.paths["defaults"] = os.path.join(self.paths["role"], "defaults", "main.yml") self.report["roles"][key] = self.report_role(key) # we are writing a readme file which means the state of the role # needs to be updated before it gets output by the ui if self.gendoc: if self.valid_meta(key): self.make_meta_dict_consistent() self.set_readme_template_vars(key, value) self.write_readme(key) # only load the meta file when generating meta files elif self.genmeta: self.make_or_augment_meta(key) if self.valid_meta(key): self.make_meta_dict_consistent() self.write_meta(key) else: self.update_scan_report(key) if not self.config["options_quiet"] and not self.export: ui.role(key, self.report["roles"][key], self.report["stats"]["longest_role_name_length"]) self.tally_role_columns() # below this point is only UI output, so we can return if self.config["options_quiet"] or self.export: return ui.totals(self.report["totals"], len(self.report["roles"].keys()), self.report["stats"]["longest_role_name_length"]) if self.gendoc: ui.gen_totals(self.report["state"], "readme") elif self.genmeta: ui.gen_totals(self.report["state"], "meta") else: ui.scan_totals(self.report["state"])
python
def scan_roles(self): """ Iterate over each role and report its stats. """ for key, value in sorted(self.roles.iteritems()): self.paths["role"] = os.path.join(self.roles_path, key) self.paths["meta"] = os.path.join(self.paths["role"], "meta", "main.yml") self.paths["readme"] = os.path.join(self.paths["role"], "README.{0}" .format(self.readme_format)) self.paths["defaults"] = os.path.join(self.paths["role"], "defaults", "main.yml") self.report["roles"][key] = self.report_role(key) # we are writing a readme file which means the state of the role # needs to be updated before it gets output by the ui if self.gendoc: if self.valid_meta(key): self.make_meta_dict_consistent() self.set_readme_template_vars(key, value) self.write_readme(key) # only load the meta file when generating meta files elif self.genmeta: self.make_or_augment_meta(key) if self.valid_meta(key): self.make_meta_dict_consistent() self.write_meta(key) else: self.update_scan_report(key) if not self.config["options_quiet"] and not self.export: ui.role(key, self.report["roles"][key], self.report["stats"]["longest_role_name_length"]) self.tally_role_columns() # below this point is only UI output, so we can return if self.config["options_quiet"] or self.export: return ui.totals(self.report["totals"], len(self.report["roles"].keys()), self.report["stats"]["longest_role_name_length"]) if self.gendoc: ui.gen_totals(self.report["state"], "readme") elif self.genmeta: ui.gen_totals(self.report["state"], "meta") else: ui.scan_totals(self.report["state"])
[ "def", "scan_roles", "(", "self", ")", ":", "for", "key", ",", "value", "in", "sorted", "(", "self", ".", "roles", ".", "iteritems", "(", ")", ")", ":", "self", ".", "paths", "[", "\"role\"", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "roles_path", ",", "key", ")", "self", ".", "paths", "[", "\"meta\"", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "paths", "[", "\"role\"", "]", ",", "\"meta\"", ",", "\"main.yml\"", ")", "self", ".", "paths", "[", "\"readme\"", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "paths", "[", "\"role\"", "]", ",", "\"README.{0}\"", ".", "format", "(", "self", ".", "readme_format", ")", ")", "self", ".", "paths", "[", "\"defaults\"", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "paths", "[", "\"role\"", "]", ",", "\"defaults\"", ",", "\"main.yml\"", ")", "self", ".", "report", "[", "\"roles\"", "]", "[", "key", "]", "=", "self", ".", "report_role", "(", "key", ")", "# we are writing a readme file which means the state of the role", "# needs to be updated before it gets output by the ui", "if", "self", ".", "gendoc", ":", "if", "self", ".", "valid_meta", "(", "key", ")", ":", "self", ".", "make_meta_dict_consistent", "(", ")", "self", ".", "set_readme_template_vars", "(", "key", ",", "value", ")", "self", ".", "write_readme", "(", "key", ")", "# only load the meta file when generating meta files", "elif", "self", ".", "genmeta", ":", "self", ".", "make_or_augment_meta", "(", "key", ")", "if", "self", ".", "valid_meta", "(", "key", ")", ":", "self", ".", "make_meta_dict_consistent", "(", ")", "self", ".", "write_meta", "(", "key", ")", "else", ":", "self", ".", "update_scan_report", "(", "key", ")", "if", "not", "self", ".", "config", "[", "\"options_quiet\"", "]", "and", "not", "self", ".", "export", ":", "ui", ".", "role", "(", "key", ",", "self", ".", "report", "[", "\"roles\"", "]", "[", "key", "]", ",", "self", ".", "report", "[", "\"stats\"", "]", "[", "\"longest_role_name_length\"", "]", ")", "self", ".", "tally_role_columns", "(", ")", "# below this point is only UI output, so we can return", "if", "self", ".", "config", "[", "\"options_quiet\"", "]", "or", "self", ".", "export", ":", "return", "ui", ".", "totals", "(", "self", ".", "report", "[", "\"totals\"", "]", ",", "len", "(", "self", ".", "report", "[", "\"roles\"", "]", ".", "keys", "(", ")", ")", ",", "self", ".", "report", "[", "\"stats\"", "]", "[", "\"longest_role_name_length\"", "]", ")", "if", "self", ".", "gendoc", ":", "ui", ".", "gen_totals", "(", "self", ".", "report", "[", "\"state\"", "]", ",", "\"readme\"", ")", "elif", "self", ".", "genmeta", ":", "ui", ".", "gen_totals", "(", "self", ".", "report", "[", "\"state\"", "]", ",", "\"meta\"", ")", "else", ":", "ui", ".", "scan_totals", "(", "self", ".", "report", "[", "\"state\"", "]", ")" ]
Iterate over each role and report its stats.
[ "Iterate", "over", "each", "role", "and", "report", "its", "stats", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L103-L155
-1
251,433
nickjj/ansigenome
ansigenome/scan.py
Scan.export_roles
def export_roles(self): """ Export the roles to one of the export types. """ # prepare the report by removing unnecessary fields del self.report["state"] del self.report["stats"] for role in self.report["roles"]: del self.report["roles"][role]["state"] defaults_path = os.path.join(self.roles_path, role, "defaults", "main.yml") if os.path.exists(defaults_path): defaults = self.report["roles"][role]["defaults"] self.report["roles"][role]["defaults"] = \ utils.yaml_load("", defaults) Export(self.roles_path, self.report, self.config, self.options)
python
def export_roles(self): """ Export the roles to one of the export types. """ # prepare the report by removing unnecessary fields del self.report["state"] del self.report["stats"] for role in self.report["roles"]: del self.report["roles"][role]["state"] defaults_path = os.path.join(self.roles_path, role, "defaults", "main.yml") if os.path.exists(defaults_path): defaults = self.report["roles"][role]["defaults"] self.report["roles"][role]["defaults"] = \ utils.yaml_load("", defaults) Export(self.roles_path, self.report, self.config, self.options)
[ "def", "export_roles", "(", "self", ")", ":", "# prepare the report by removing unnecessary fields", "del", "self", ".", "report", "[", "\"state\"", "]", "del", "self", ".", "report", "[", "\"stats\"", "]", "for", "role", "in", "self", ".", "report", "[", "\"roles\"", "]", ":", "del", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "defaults_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "roles_path", ",", "role", ",", "\"defaults\"", ",", "\"main.yml\"", ")", "if", "os", ".", "path", ".", "exists", "(", "defaults_path", ")", ":", "defaults", "=", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"defaults\"", "]", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"defaults\"", "]", "=", "utils", ".", "yaml_load", "(", "\"\"", ",", "defaults", ")", "Export", "(", "self", ".", "roles_path", ",", "self", ".", "report", ",", "self", ".", "config", ",", "self", ".", "options", ")" ]
Export the roles to one of the export types.
[ "Export", "the", "roles", "to", "one", "of", "the", "export", "types", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L157-L174
-1
251,434
nickjj/ansigenome
ansigenome/scan.py
Scan.report_role
def report_role(self, role): """ Return the fields gathered. """ self.yaml_files = [] fields = { "state": "skipped", "total_files": self.gather_files(), "total_lines": self.gather_lines(), "total_facts": self.gather_facts(), "total_defaults": self.gather_defaults(), "facts": self.facts, "defaults": self.defaults, "meta": self.gather_meta(), "readme": self.gather_readme(), "dependencies": self.dependencies, "total_dependencies": len(self.dependencies) } return fields
python
def report_role(self, role): """ Return the fields gathered. """ self.yaml_files = [] fields = { "state": "skipped", "total_files": self.gather_files(), "total_lines": self.gather_lines(), "total_facts": self.gather_facts(), "total_defaults": self.gather_defaults(), "facts": self.facts, "defaults": self.defaults, "meta": self.gather_meta(), "readme": self.gather_readme(), "dependencies": self.dependencies, "total_dependencies": len(self.dependencies) } return fields
[ "def", "report_role", "(", "self", ",", "role", ")", ":", "self", ".", "yaml_files", "=", "[", "]", "fields", "=", "{", "\"state\"", ":", "\"skipped\"", ",", "\"total_files\"", ":", "self", ".", "gather_files", "(", ")", ",", "\"total_lines\"", ":", "self", ".", "gather_lines", "(", ")", ",", "\"total_facts\"", ":", "self", ".", "gather_facts", "(", ")", ",", "\"total_defaults\"", ":", "self", ".", "gather_defaults", "(", ")", ",", "\"facts\"", ":", "self", ".", "facts", ",", "\"defaults\"", ":", "self", ".", "defaults", ",", "\"meta\"", ":", "self", ".", "gather_meta", "(", ")", ",", "\"readme\"", ":", "self", ".", "gather_readme", "(", ")", ",", "\"dependencies\"", ":", "self", ".", "dependencies", ",", "\"total_dependencies\"", ":", "len", "(", "self", ".", "dependencies", ")", "}", "return", "fields" ]
Return the fields gathered.
[ "Return", "the", "fields", "gathered", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L176-L196
-1
251,435
nickjj/ansigenome
ansigenome/scan.py
Scan.gather_meta
def gather_meta(self): """ Return the meta file. """ if not os.path.exists(self.paths["meta"]): return "" meta_dict = utils.yaml_load(self.paths["meta"]) # gather the dependencies if meta_dict and "dependencies" in meta_dict: # create a simple list of each role that is a dependency dep_list = [] for dependency in meta_dict["dependencies"]: if type(dependency) is dict: dep_list.append(dependency["role"]) else: dep_list.append(dependency) # unique set of dependencies meta_dict["dependencies"] = list(set(dep_list)) self.dependencies = meta_dict["dependencies"] else: self.dependencies = [] return utils.file_to_string(self.paths["meta"])
python
def gather_meta(self): """ Return the meta file. """ if not os.path.exists(self.paths["meta"]): return "" meta_dict = utils.yaml_load(self.paths["meta"]) # gather the dependencies if meta_dict and "dependencies" in meta_dict: # create a simple list of each role that is a dependency dep_list = [] for dependency in meta_dict["dependencies"]: if type(dependency) is dict: dep_list.append(dependency["role"]) else: dep_list.append(dependency) # unique set of dependencies meta_dict["dependencies"] = list(set(dep_list)) self.dependencies = meta_dict["dependencies"] else: self.dependencies = [] return utils.file_to_string(self.paths["meta"])
[ "def", "gather_meta", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "\"meta\"", "]", ")", ":", "return", "\"\"", "meta_dict", "=", "utils", ".", "yaml_load", "(", "self", ".", "paths", "[", "\"meta\"", "]", ")", "# gather the dependencies", "if", "meta_dict", "and", "\"dependencies\"", "in", "meta_dict", ":", "# create a simple list of each role that is a dependency", "dep_list", "=", "[", "]", "for", "dependency", "in", "meta_dict", "[", "\"dependencies\"", "]", ":", "if", "type", "(", "dependency", ")", "is", "dict", ":", "dep_list", ".", "append", "(", "dependency", "[", "\"role\"", "]", ")", "else", ":", "dep_list", ".", "append", "(", "dependency", ")", "# unique set of dependencies", "meta_dict", "[", "\"dependencies\"", "]", "=", "list", "(", "set", "(", "dep_list", ")", ")", "self", ".", "dependencies", "=", "meta_dict", "[", "\"dependencies\"", "]", "else", ":", "self", ".", "dependencies", "=", "[", "]", "return", "utils", ".", "file_to_string", "(", "self", ".", "paths", "[", "\"meta\"", "]", ")" ]
Return the meta file.
[ "Return", "the", "meta", "file", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L198-L225
-1
251,436
nickjj/ansigenome
ansigenome/scan.py
Scan.gather_readme
def gather_readme(self): """ Return the readme file. """ if not os.path.exists(self.paths["readme"]): return "" return utils.file_to_string(self.paths["readme"])
python
def gather_readme(self): """ Return the readme file. """ if not os.path.exists(self.paths["readme"]): return "" return utils.file_to_string(self.paths["readme"])
[ "def", "gather_readme", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "\"readme\"", "]", ")", ":", "return", "\"\"", "return", "utils", ".", "file_to_string", "(", "self", ".", "paths", "[", "\"readme\"", "]", ")" ]
Return the readme file.
[ "Return", "the", "readme", "file", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L227-L234
-1
251,437
nickjj/ansigenome
ansigenome/scan.py
Scan.gather_defaults
def gather_defaults(self): """ Return the number of default variables. """ total_defaults = 0 defaults_lines = [] if not os.path.exists(self.paths["defaults"]): # reset the defaults if no defaults were found self.defaults = "" return 0 file = open(self.paths["defaults"], "r") for line in file: if len(line) > 0: first_char = line[0] else: first_char = "" defaults_lines.append(line) if (first_char != "#" and first_char != "-" and first_char != " " and first_char != "\r" and first_char != "\n" and first_char != "\t"): total_defaults += 1 file.close() self.defaults = "".join(defaults_lines) return total_defaults
python
def gather_defaults(self): """ Return the number of default variables. """ total_defaults = 0 defaults_lines = [] if not os.path.exists(self.paths["defaults"]): # reset the defaults if no defaults were found self.defaults = "" return 0 file = open(self.paths["defaults"], "r") for line in file: if len(line) > 0: first_char = line[0] else: first_char = "" defaults_lines.append(line) if (first_char != "#" and first_char != "-" and first_char != " " and first_char != "\r" and first_char != "\n" and first_char != "\t"): total_defaults += 1 file.close() self.defaults = "".join(defaults_lines) return total_defaults
[ "def", "gather_defaults", "(", "self", ")", ":", "total_defaults", "=", "0", "defaults_lines", "=", "[", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "\"defaults\"", "]", ")", ":", "# reset the defaults if no defaults were found", "self", ".", "defaults", "=", "\"\"", "return", "0", "file", "=", "open", "(", "self", ".", "paths", "[", "\"defaults\"", "]", ",", "\"r\"", ")", "for", "line", "in", "file", ":", "if", "len", "(", "line", ")", ">", "0", ":", "first_char", "=", "line", "[", "0", "]", "else", ":", "first_char", "=", "\"\"", "defaults_lines", ".", "append", "(", "line", ")", "if", "(", "first_char", "!=", "\"#\"", "and", "first_char", "!=", "\"-\"", "and", "first_char", "!=", "\" \"", "and", "first_char", "!=", "\"\\r\"", "and", "first_char", "!=", "\"\\n\"", "and", "first_char", "!=", "\"\\t\"", ")", ":", "total_defaults", "+=", "1", "file", ".", "close", "(", ")", "self", ".", "defaults", "=", "\"\"", ".", "join", "(", "defaults_lines", ")", "return", "total_defaults" ]
Return the number of default variables.
[ "Return", "the", "number", "of", "default", "variables", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L236-L267
-1
251,438
nickjj/ansigenome
ansigenome/scan.py
Scan.gather_facts
def gather_facts(self): """ Return the number of facts. """ facts = [] for file in self.yaml_files: facts += self.gather_facts_list(file) unique_facts = list(set(facts)) self.facts = unique_facts return len(unique_facts)
python
def gather_facts(self): """ Return the number of facts. """ facts = [] for file in self.yaml_files: facts += self.gather_facts_list(file) unique_facts = list(set(facts)) self.facts = unique_facts return len(unique_facts)
[ "def", "gather_facts", "(", "self", ")", ":", "facts", "=", "[", "]", "for", "file", "in", "self", ".", "yaml_files", ":", "facts", "+=", "self", ".", "gather_facts_list", "(", "file", ")", "unique_facts", "=", "list", "(", "set", "(", "facts", ")", ")", "self", ".", "facts", "=", "unique_facts", "return", "len", "(", "unique_facts", ")" ]
Return the number of facts.
[ "Return", "the", "number", "of", "facts", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L269-L281
-1
251,439
nickjj/ansigenome
ansigenome/scan.py
Scan.gather_facts_list
def gather_facts_list(self, file): """ Return a list of facts. """ facts = [] contents = utils.file_to_string(os.path.join(self.paths["role"], file)) contents = re.sub(r"\s+", "", contents) matches = self.regex_facts.findall(contents) for match in matches: facts.append(match.split(":")[1]) return facts
python
def gather_facts_list(self, file): """ Return a list of facts. """ facts = [] contents = utils.file_to_string(os.path.join(self.paths["role"], file)) contents = re.sub(r"\s+", "", contents) matches = self.regex_facts.findall(contents) for match in matches: facts.append(match.split(":")[1]) return facts
[ "def", "gather_facts_list", "(", "self", ",", "file", ")", ":", "facts", "=", "[", "]", "contents", "=", "utils", ".", "file_to_string", "(", "os", ".", "path", ".", "join", "(", "self", ".", "paths", "[", "\"role\"", "]", ",", "file", ")", ")", "contents", "=", "re", ".", "sub", "(", "r\"\\s+\"", ",", "\"\"", ",", "contents", ")", "matches", "=", "self", ".", "regex_facts", ".", "findall", "(", "contents", ")", "for", "match", "in", "matches", ":", "facts", ".", "append", "(", "match", ".", "split", "(", "\":\"", ")", "[", "1", "]", ")", "return", "facts" ]
Return a list of facts.
[ "Return", "a", "list", "of", "facts", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L283-L297
-1
251,440
nickjj/ansigenome
ansigenome/scan.py
Scan.gather_files
def gather_files(self): """ Return the number of files. """ self.all_files = utils.files_in_path(self.paths["role"]) return len(self.all_files)
python
def gather_files(self): """ Return the number of files. """ self.all_files = utils.files_in_path(self.paths["role"]) return len(self.all_files)
[ "def", "gather_files", "(", "self", ")", ":", "self", ".", "all_files", "=", "utils", ".", "files_in_path", "(", "self", ".", "paths", "[", "\"role\"", "]", ")", "return", "len", "(", "self", ".", "all_files", ")" ]
Return the number of files.
[ "Return", "the", "number", "of", "files", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L299-L305
-1
251,441
nickjj/ansigenome
ansigenome/scan.py
Scan.gather_lines
def gather_lines(self): """ Return the number of lines. """ total_lines = 0 for file in self.all_files: full_path = os.path.join(self.paths["role"], file) with open(full_path, "r") as f: for line in f: total_lines += 1 if full_path.endswith(".yml"): self.yaml_files.append(full_path) return total_lines
python
def gather_lines(self): """ Return the number of lines. """ total_lines = 0 for file in self.all_files: full_path = os.path.join(self.paths["role"], file) with open(full_path, "r") as f: for line in f: total_lines += 1 if full_path.endswith(".yml"): self.yaml_files.append(full_path) return total_lines
[ "def", "gather_lines", "(", "self", ")", ":", "total_lines", "=", "0", "for", "file", "in", "self", ".", "all_files", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "paths", "[", "\"role\"", "]", ",", "file", ")", "with", "open", "(", "full_path", ",", "\"r\"", ")", "as", "f", ":", "for", "line", "in", "f", ":", "total_lines", "+=", "1", "if", "full_path", ".", "endswith", "(", "\".yml\"", ")", ":", "self", ".", "yaml_files", ".", "append", "(", "full_path", ")", "return", "total_lines" ]
Return the number of lines.
[ "Return", "the", "number", "of", "lines", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L307-L322
-1
251,442
nickjj/ansigenome
ansigenome/scan.py
Scan.tally_role_columns
def tally_role_columns(self): """ Sum up all of the stat columns. """ totals = self.report["totals"] roles = self.report["roles"] totals["dependencies"] = sum(roles[item] ["total_dependencies"] for item in roles) totals["defaults"] = sum(roles[item] ["total_defaults"] for item in roles) totals["facts"] = sum(roles[item]["total_facts"] for item in roles) totals["files"] = sum(roles[item]["total_files"] for item in roles) totals["lines"] = sum(roles[item]["total_lines"] for item in roles)
python
def tally_role_columns(self): """ Sum up all of the stat columns. """ totals = self.report["totals"] roles = self.report["roles"] totals["dependencies"] = sum(roles[item] ["total_dependencies"] for item in roles) totals["defaults"] = sum(roles[item] ["total_defaults"] for item in roles) totals["facts"] = sum(roles[item]["total_facts"] for item in roles) totals["files"] = sum(roles[item]["total_files"] for item in roles) totals["lines"] = sum(roles[item]["total_lines"] for item in roles)
[ "def", "tally_role_columns", "(", "self", ")", ":", "totals", "=", "self", ".", "report", "[", "\"totals\"", "]", "roles", "=", "self", ".", "report", "[", "\"roles\"", "]", "totals", "[", "\"dependencies\"", "]", "=", "sum", "(", "roles", "[", "item", "]", "[", "\"total_dependencies\"", "]", "for", "item", "in", "roles", ")", "totals", "[", "\"defaults\"", "]", "=", "sum", "(", "roles", "[", "item", "]", "[", "\"total_defaults\"", "]", "for", "item", "in", "roles", ")", "totals", "[", "\"facts\"", "]", "=", "sum", "(", "roles", "[", "item", "]", "[", "\"total_facts\"", "]", "for", "item", "in", "roles", ")", "totals", "[", "\"files\"", "]", "=", "sum", "(", "roles", "[", "item", "]", "[", "\"total_files\"", "]", "for", "item", "in", "roles", ")", "totals", "[", "\"lines\"", "]", "=", "sum", "(", "roles", "[", "item", "]", "[", "\"total_lines\"", "]", "for", "item", "in", "roles", ")" ]
Sum up all of the stat columns.
[ "Sum", "up", "all", "of", "the", "stat", "columns", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L324-L337
-1
251,443
nickjj/ansigenome
ansigenome/scan.py
Scan.valid_meta
def valid_meta(self, role): """ Return whether or not the meta file being read is valid. """ if os.path.exists(self.paths["meta"]): self.meta_dict = utils.yaml_load(self.paths["meta"]) else: self.report["state"]["missing_meta_role"] += 1 self.report["roles"][role]["state"] = "missing_meta" return False is_valid = True # utils.yaml_load returns False when the file is invalid if isinstance(self.meta_dict, bool): is_valid = False sys.exit(1) return is_valid
python
def valid_meta(self, role): """ Return whether or not the meta file being read is valid. """ if os.path.exists(self.paths["meta"]): self.meta_dict = utils.yaml_load(self.paths["meta"]) else: self.report["state"]["missing_meta_role"] += 1 self.report["roles"][role]["state"] = "missing_meta" return False is_valid = True # utils.yaml_load returns False when the file is invalid if isinstance(self.meta_dict, bool): is_valid = False sys.exit(1) return is_valid
[ "def", "valid_meta", "(", "self", ",", "role", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "\"meta\"", "]", ")", ":", "self", ".", "meta_dict", "=", "utils", ".", "yaml_load", "(", "self", ".", "paths", "[", "\"meta\"", "]", ")", "else", ":", "self", ".", "report", "[", "\"state\"", "]", "[", "\"missing_meta_role\"", "]", "+=", "1", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "=", "\"missing_meta\"", "return", "False", "is_valid", "=", "True", "# utils.yaml_load returns False when the file is invalid", "if", "isinstance", "(", "self", ".", "meta_dict", ",", "bool", ")", ":", "is_valid", "=", "False", "sys", ".", "exit", "(", "1", ")", "return", "is_valid" ]
Return whether or not the meta file being read is valid.
[ "Return", "whether", "or", "not", "the", "meta", "file", "being", "read", "is", "valid", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L339-L358
-1
251,444
nickjj/ansigenome
ansigenome/scan.py
Scan.make_or_augment_meta
def make_or_augment_meta(self, role): """ Create or augment a meta file. """ if not os.path.exists(self.paths["meta"]): utils.create_meta_main(self.paths["meta"], self.config, role, "") self.report["state"]["ok_role"] += 1 self.report["roles"][role]["state"] = "ok" # swap values in place to use the config values swaps = [ ("author", self.config["author_name"]), ("company", self.config["author_company"]), ("license", self.config["license_type"]), ] (new_meta, _) = utils.swap_yaml_string(self.paths["meta"], swaps) # normalize the --- at the top of the file by removing it first new_meta = new_meta.replace("---", "") new_meta = new_meta.lstrip() # augment missing main keys augments = [ ("ansigenome_info", "{}"), ("galaxy_info", "{}"), ("dependencies", "[]"), ] new_meta = self.augment_main_keys(augments, new_meta) # re-attach the --- new_meta = "---\n\n" + new_meta travis_path = os.path.join(self.paths["role"], ".travis.yml") if os.path.exists(travis_path): new_meta = new_meta.replace("travis: False", "travis: True") utils.string_to_file(self.paths["meta"], new_meta)
python
def make_or_augment_meta(self, role): """ Create or augment a meta file. """ if not os.path.exists(self.paths["meta"]): utils.create_meta_main(self.paths["meta"], self.config, role, "") self.report["state"]["ok_role"] += 1 self.report["roles"][role]["state"] = "ok" # swap values in place to use the config values swaps = [ ("author", self.config["author_name"]), ("company", self.config["author_company"]), ("license", self.config["license_type"]), ] (new_meta, _) = utils.swap_yaml_string(self.paths["meta"], swaps) # normalize the --- at the top of the file by removing it first new_meta = new_meta.replace("---", "") new_meta = new_meta.lstrip() # augment missing main keys augments = [ ("ansigenome_info", "{}"), ("galaxy_info", "{}"), ("dependencies", "[]"), ] new_meta = self.augment_main_keys(augments, new_meta) # re-attach the --- new_meta = "---\n\n" + new_meta travis_path = os.path.join(self.paths["role"], ".travis.yml") if os.path.exists(travis_path): new_meta = new_meta.replace("travis: False", "travis: True") utils.string_to_file(self.paths["meta"], new_meta)
[ "def", "make_or_augment_meta", "(", "self", ",", "role", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "\"meta\"", "]", ")", ":", "utils", ".", "create_meta_main", "(", "self", ".", "paths", "[", "\"meta\"", "]", ",", "self", ".", "config", ",", "role", ",", "\"\"", ")", "self", ".", "report", "[", "\"state\"", "]", "[", "\"ok_role\"", "]", "+=", "1", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "=", "\"ok\"", "# swap values in place to use the config values", "swaps", "=", "[", "(", "\"author\"", ",", "self", ".", "config", "[", "\"author_name\"", "]", ")", ",", "(", "\"company\"", ",", "self", ".", "config", "[", "\"author_company\"", "]", ")", ",", "(", "\"license\"", ",", "self", ".", "config", "[", "\"license_type\"", "]", ")", ",", "]", "(", "new_meta", ",", "_", ")", "=", "utils", ".", "swap_yaml_string", "(", "self", ".", "paths", "[", "\"meta\"", "]", ",", "swaps", ")", "# normalize the --- at the top of the file by removing it first", "new_meta", "=", "new_meta", ".", "replace", "(", "\"---\"", ",", "\"\"", ")", "new_meta", "=", "new_meta", ".", "lstrip", "(", ")", "# augment missing main keys", "augments", "=", "[", "(", "\"ansigenome_info\"", ",", "\"{}\"", ")", ",", "(", "\"galaxy_info\"", ",", "\"{}\"", ")", ",", "(", "\"dependencies\"", ",", "\"[]\"", ")", ",", "]", "new_meta", "=", "self", ".", "augment_main_keys", "(", "augments", ",", "new_meta", ")", "# re-attach the ---", "new_meta", "=", "\"---\\n\\n\"", "+", "new_meta", "travis_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "paths", "[", "\"role\"", "]", ",", "\".travis.yml\"", ")", "if", "os", ".", "path", ".", "exists", "(", "travis_path", ")", ":", "new_meta", "=", "new_meta", ".", "replace", "(", "\"travis: False\"", ",", "\"travis: True\"", ")", "utils", ".", "string_to_file", "(", "self", ".", "paths", "[", "\"meta\"", "]", ",", "new_meta", ")" ]
Create or augment a meta file.
[ "Create", "or", "augment", "a", "meta", "file", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L360-L398
-1
251,445
nickjj/ansigenome
ansigenome/scan.py
Scan.write_readme
def write_readme(self, role): """ Write out a new readme file. """ j2_out = self.readme_template.render(self.readme_template_vars) self.update_gen_report(role, "readme", j2_out)
python
def write_readme(self, role): """ Write out a new readme file. """ j2_out = self.readme_template.render(self.readme_template_vars) self.update_gen_report(role, "readme", j2_out)
[ "def", "write_readme", "(", "self", ",", "role", ")", ":", "j2_out", "=", "self", ".", "readme_template", ".", "render", "(", "self", ".", "readme_template_vars", ")", "self", ".", "update_gen_report", "(", "role", ",", "\"readme\"", ",", "j2_out", ")" ]
Write out a new readme file.
[ "Write", "out", "a", "new", "readme", "file", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L431-L437
-1
251,446
nickjj/ansigenome
ansigenome/scan.py
Scan.write_meta
def write_meta(self, role): """ Write out a new meta file. """ meta_file = utils.file_to_string(self.paths["meta"]) self.update_gen_report(role, "meta", meta_file)
python
def write_meta(self, role): """ Write out a new meta file. """ meta_file = utils.file_to_string(self.paths["meta"]) self.update_gen_report(role, "meta", meta_file)
[ "def", "write_meta", "(", "self", ",", "role", ")", ":", "meta_file", "=", "utils", ".", "file_to_string", "(", "self", ".", "paths", "[", "\"meta\"", "]", ")", "self", ".", "update_gen_report", "(", "role", ",", "\"meta\"", ",", "meta_file", ")" ]
Write out a new meta file.
[ "Write", "out", "a", "new", "meta", "file", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L439-L445
-1
251,447
nickjj/ansigenome
ansigenome/scan.py
Scan.update_scan_report
def update_scan_report(self, role): """ Update the role state and adjust the scan totals. """ state = self.report["state"] # ensure the missing meta state is colored up and the ok count is good if self.gendoc: if self.report["roles"][role]["state"] == "missing_meta": return if os.path.exists(self.paths["readme"]): state["ok_role"] += 1 self.report["roles"][role]["state"] = "ok" else: state["missing_readme_role"] += 1 self.report["roles"][role]["state"] = "missing_readme"
python
def update_scan_report(self, role): """ Update the role state and adjust the scan totals. """ state = self.report["state"] # ensure the missing meta state is colored up and the ok count is good if self.gendoc: if self.report["roles"][role]["state"] == "missing_meta": return if os.path.exists(self.paths["readme"]): state["ok_role"] += 1 self.report["roles"][role]["state"] = "ok" else: state["missing_readme_role"] += 1 self.report["roles"][role]["state"] = "missing_readme"
[ "def", "update_scan_report", "(", "self", ",", "role", ")", ":", "state", "=", "self", ".", "report", "[", "\"state\"", "]", "# ensure the missing meta state is colored up and the ok count is good", "if", "self", ".", "gendoc", ":", "if", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "==", "\"missing_meta\"", ":", "return", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "\"readme\"", "]", ")", ":", "state", "[", "\"ok_role\"", "]", "+=", "1", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "=", "\"ok\"", "else", ":", "state", "[", "\"missing_readme_role\"", "]", "+=", "1", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "=", "\"missing_readme\"" ]
Update the role state and adjust the scan totals.
[ "Update", "the", "role", "state", "and", "adjust", "the", "scan", "totals", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L447-L463
-1
251,448
nickjj/ansigenome
ansigenome/scan.py
Scan.update_gen_report
def update_gen_report(self, role, file, original): """ Update the role state and adjust the gen totals. """ state = self.report["state"] if not os.path.exists(self.paths[file]): state["ok_role"] += 1 self.report["roles"][role]["state"] = "ok" elif (self.report["roles"][role][file] != original and self.report["roles"][role]["state"] != "ok"): state["changed_role"] += 1 self.report["roles"][role]["state"] = "changed" elif self.report["roles"][role][file] == original: state["skipped_role"] += 1 self.report["roles"][role]["state"] = "skipped" return utils.string_to_file(self.paths[file], original)
python
def update_gen_report(self, role, file, original): """ Update the role state and adjust the gen totals. """ state = self.report["state"] if not os.path.exists(self.paths[file]): state["ok_role"] += 1 self.report["roles"][role]["state"] = "ok" elif (self.report["roles"][role][file] != original and self.report["roles"][role]["state"] != "ok"): state["changed_role"] += 1 self.report["roles"][role]["state"] = "changed" elif self.report["roles"][role][file] == original: state["skipped_role"] += 1 self.report["roles"][role]["state"] = "skipped" return utils.string_to_file(self.paths[file], original)
[ "def", "update_gen_report", "(", "self", ",", "role", ",", "file", ",", "original", ")", ":", "state", "=", "self", ".", "report", "[", "\"state\"", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "paths", "[", "file", "]", ")", ":", "state", "[", "\"ok_role\"", "]", "+=", "1", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "=", "\"ok\"", "elif", "(", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "file", "]", "!=", "original", "and", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "!=", "\"ok\"", ")", ":", "state", "[", "\"changed_role\"", "]", "+=", "1", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "=", "\"changed\"", "elif", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "file", "]", "==", "original", ":", "state", "[", "\"skipped_role\"", "]", "+=", "1", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"state\"", "]", "=", "\"skipped\"", "return", "utils", ".", "string_to_file", "(", "self", ".", "paths", "[", "file", "]", ",", "original", ")" ]
Update the role state and adjust the gen totals.
[ "Update", "the", "role", "state", "and", "adjust", "the", "gen", "totals", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L465-L483
-1
251,449
nickjj/ansigenome
ansigenome/scan.py
Scan.make_meta_dict_consistent
def make_meta_dict_consistent(self): """ Remove the possibility of the main keys being undefined. """ if self.meta_dict is None: self.meta_dict = {} if "galaxy_info" not in self.meta_dict: self.meta_dict["galaxy_info"] = {} if "dependencies" not in self.meta_dict: self.meta_dict["dependencies"] = [] if "ansigenome_info" not in self.meta_dict: self.meta_dict["ansigenome_info"] = {}
python
def make_meta_dict_consistent(self): """ Remove the possibility of the main keys being undefined. """ if self.meta_dict is None: self.meta_dict = {} if "galaxy_info" not in self.meta_dict: self.meta_dict["galaxy_info"] = {} if "dependencies" not in self.meta_dict: self.meta_dict["dependencies"] = [] if "ansigenome_info" not in self.meta_dict: self.meta_dict["ansigenome_info"] = {}
[ "def", "make_meta_dict_consistent", "(", "self", ")", ":", "if", "self", ".", "meta_dict", "is", "None", ":", "self", ".", "meta_dict", "=", "{", "}", "if", "\"galaxy_info\"", "not", "in", "self", ".", "meta_dict", ":", "self", ".", "meta_dict", "[", "\"galaxy_info\"", "]", "=", "{", "}", "if", "\"dependencies\"", "not", "in", "self", ".", "meta_dict", ":", "self", ".", "meta_dict", "[", "\"dependencies\"", "]", "=", "[", "]", "if", "\"ansigenome_info\"", "not", "in", "self", ".", "meta_dict", ":", "self", ".", "meta_dict", "[", "\"ansigenome_info\"", "]", "=", "{", "}" ]
Remove the possibility of the main keys being undefined.
[ "Remove", "the", "possibility", "of", "the", "main", "keys", "being", "undefined", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L485-L499
-1
251,450
nickjj/ansigenome
ansigenome/scan.py
Scan.set_readme_template_vars
def set_readme_template_vars(self, role, repo_name): """ Set the readme template variables. """ # normalize and expose a bunch of fields to the template authors = [] author = { "name": self.config["author_name"], "company": self.config["author_company"], "url": self.config["author_url"], "email": self.config["author_email"], "twitter": self.config["author_twitter"], } scm = { "host": self.config["scm_host"], "repo_prefix": self.config["scm_repo_prefix"], "type": self.config["scm_type"], "user": self.config["scm_user"], } license = { "type": self.config["license_type"], "url": self.config["license_url"], } role_name = utils.normalize_role(role, self.config) normalized_role = { "name": role_name, "galaxy_name": "{0}.{1}".format(self.config["scm_user"], role_name), "slug": "{0}{1}".format(self.config["scm_repo_prefix"], role_name), } if "authors" in self.meta_dict["ansigenome_info"]: authors = self.meta_dict["ansigenome_info"]["authors"] else: authors = [author] if "github" in self.config["scm_host"]: self.config["author_github"] = "{0}/{1}".format( self.config["scm_host"], self.config["scm_user"]) self.readme_template_vars = { "authors": authors, "scm": scm, "role": normalized_role, "license": license, "galaxy_info": self.meta_dict["galaxy_info"], "dependencies": self.meta_dict["dependencies"], "ansigenome_info": self.meta_dict["ansigenome_info"] } # add the defaults and facts r_defaults = self.report["roles"][role]["defaults"] self.readme_template_vars["ansigenome_info"]["defaults"] = r_defaults facts = "\n".join(self.report["roles"][role]["facts"]) self.readme_template_vars["ansigenome_info"]["facts"] = facts
python
def set_readme_template_vars(self, role, repo_name): """ Set the readme template variables. """ # normalize and expose a bunch of fields to the template authors = [] author = { "name": self.config["author_name"], "company": self.config["author_company"], "url": self.config["author_url"], "email": self.config["author_email"], "twitter": self.config["author_twitter"], } scm = { "host": self.config["scm_host"], "repo_prefix": self.config["scm_repo_prefix"], "type": self.config["scm_type"], "user": self.config["scm_user"], } license = { "type": self.config["license_type"], "url": self.config["license_url"], } role_name = utils.normalize_role(role, self.config) normalized_role = { "name": role_name, "galaxy_name": "{0}.{1}".format(self.config["scm_user"], role_name), "slug": "{0}{1}".format(self.config["scm_repo_prefix"], role_name), } if "authors" in self.meta_dict["ansigenome_info"]: authors = self.meta_dict["ansigenome_info"]["authors"] else: authors = [author] if "github" in self.config["scm_host"]: self.config["author_github"] = "{0}/{1}".format( self.config["scm_host"], self.config["scm_user"]) self.readme_template_vars = { "authors": authors, "scm": scm, "role": normalized_role, "license": license, "galaxy_info": self.meta_dict["galaxy_info"], "dependencies": self.meta_dict["dependencies"], "ansigenome_info": self.meta_dict["ansigenome_info"] } # add the defaults and facts r_defaults = self.report["roles"][role]["defaults"] self.readme_template_vars["ansigenome_info"]["defaults"] = r_defaults facts = "\n".join(self.report["roles"][role]["facts"]) self.readme_template_vars["ansigenome_info"]["facts"] = facts
[ "def", "set_readme_template_vars", "(", "self", ",", "role", ",", "repo_name", ")", ":", "# normalize and expose a bunch of fields to the template", "authors", "=", "[", "]", "author", "=", "{", "\"name\"", ":", "self", ".", "config", "[", "\"author_name\"", "]", ",", "\"company\"", ":", "self", ".", "config", "[", "\"author_company\"", "]", ",", "\"url\"", ":", "self", ".", "config", "[", "\"author_url\"", "]", ",", "\"email\"", ":", "self", ".", "config", "[", "\"author_email\"", "]", ",", "\"twitter\"", ":", "self", ".", "config", "[", "\"author_twitter\"", "]", ",", "}", "scm", "=", "{", "\"host\"", ":", "self", ".", "config", "[", "\"scm_host\"", "]", ",", "\"repo_prefix\"", ":", "self", ".", "config", "[", "\"scm_repo_prefix\"", "]", ",", "\"type\"", ":", "self", ".", "config", "[", "\"scm_type\"", "]", ",", "\"user\"", ":", "self", ".", "config", "[", "\"scm_user\"", "]", ",", "}", "license", "=", "{", "\"type\"", ":", "self", ".", "config", "[", "\"license_type\"", "]", ",", "\"url\"", ":", "self", ".", "config", "[", "\"license_url\"", "]", ",", "}", "role_name", "=", "utils", ".", "normalize_role", "(", "role", ",", "self", ".", "config", ")", "normalized_role", "=", "{", "\"name\"", ":", "role_name", ",", "\"galaxy_name\"", ":", "\"{0}.{1}\"", ".", "format", "(", "self", ".", "config", "[", "\"scm_user\"", "]", ",", "role_name", ")", ",", "\"slug\"", ":", "\"{0}{1}\"", ".", "format", "(", "self", ".", "config", "[", "\"scm_repo_prefix\"", "]", ",", "role_name", ")", ",", "}", "if", "\"authors\"", "in", "self", ".", "meta_dict", "[", "\"ansigenome_info\"", "]", ":", "authors", "=", "self", ".", "meta_dict", "[", "\"ansigenome_info\"", "]", "[", "\"authors\"", "]", "else", ":", "authors", "=", "[", "author", "]", "if", "\"github\"", "in", "self", ".", "config", "[", "\"scm_host\"", "]", ":", "self", ".", "config", "[", "\"author_github\"", "]", "=", "\"{0}/{1}\"", ".", "format", "(", "self", ".", "config", "[", "\"scm_host\"", "]", ",", "self", ".", "config", "[", "\"scm_user\"", "]", ")", "self", ".", "readme_template_vars", "=", "{", "\"authors\"", ":", "authors", ",", "\"scm\"", ":", "scm", ",", "\"role\"", ":", "normalized_role", ",", "\"license\"", ":", "license", ",", "\"galaxy_info\"", ":", "self", ".", "meta_dict", "[", "\"galaxy_info\"", "]", ",", "\"dependencies\"", ":", "self", ".", "meta_dict", "[", "\"dependencies\"", "]", ",", "\"ansigenome_info\"", ":", "self", ".", "meta_dict", "[", "\"ansigenome_info\"", "]", "}", "# add the defaults and facts", "r_defaults", "=", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"defaults\"", "]", "self", ".", "readme_template_vars", "[", "\"ansigenome_info\"", "]", "[", "\"defaults\"", "]", "=", "r_defaults", "facts", "=", "\"\\n\"", ".", "join", "(", "self", ".", "report", "[", "\"roles\"", "]", "[", "role", "]", "[", "\"facts\"", "]", ")", "self", ".", "readme_template_vars", "[", "\"ansigenome_info\"", "]", "[", "\"facts\"", "]", "=", "facts" ]
Set the readme template variables.
[ "Set", "the", "readme", "template", "variables", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/scan.py#L501-L561
-1
251,451
nickjj/ansigenome
ansigenome/init.py
Init.exit_if_path_exists
def exit_if_path_exists(self): """ Exit early if the path cannot be found. """ if os.path.exists(self.output_path): ui.error(c.MESSAGES["path_exists"], self.output_path) sys.exit(1)
python
def exit_if_path_exists(self): """ Exit early if the path cannot be found. """ if os.path.exists(self.output_path): ui.error(c.MESSAGES["path_exists"], self.output_path) sys.exit(1)
[ "def", "exit_if_path_exists", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "output_path", ")", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"path_exists\"", "]", ",", "self", ".", "output_path", ")", "sys", ".", "exit", "(", "1", ")" ]
Exit early if the path cannot be found.
[ "Exit", "early", "if", "the", "path", "cannot", "be", "found", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/init.py#L62-L68
-1
251,452
nickjj/ansigenome
ansigenome/init.py
Init.create_skeleton
def create_skeleton(self): """ Create the role's directory and file structure. """ utils.string_to_file(os.path.join(self.output_path, "VERSION"), "master\n") for folder in c.ANSIBLE_FOLDERS: create_folder_path = os.path.join(self.output_path, folder) utils.mkdir_p(create_folder_path) mainyml_template = default_mainyml_template.replace( "%role_name", self.role_name) mainyml_template = mainyml_template.replace( "%values", folder) out_path = os.path.join(create_folder_path, "main.yml") if folder not in ("templates", "meta", "tests", "files"): utils.string_to_file(out_path, mainyml_template) if folder == "meta": utils.create_meta_main(out_path, self.config, self.role_name, self.options.galaxy_categories)
python
def create_skeleton(self): """ Create the role's directory and file structure. """ utils.string_to_file(os.path.join(self.output_path, "VERSION"), "master\n") for folder in c.ANSIBLE_FOLDERS: create_folder_path = os.path.join(self.output_path, folder) utils.mkdir_p(create_folder_path) mainyml_template = default_mainyml_template.replace( "%role_name", self.role_name) mainyml_template = mainyml_template.replace( "%values", folder) out_path = os.path.join(create_folder_path, "main.yml") if folder not in ("templates", "meta", "tests", "files"): utils.string_to_file(out_path, mainyml_template) if folder == "meta": utils.create_meta_main(out_path, self.config, self.role_name, self.options.galaxy_categories)
[ "def", "create_skeleton", "(", "self", ")", ":", "utils", ".", "string_to_file", "(", "os", ".", "path", ".", "join", "(", "self", ".", "output_path", ",", "\"VERSION\"", ")", ",", "\"master\\n\"", ")", "for", "folder", "in", "c", ".", "ANSIBLE_FOLDERS", ":", "create_folder_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "output_path", ",", "folder", ")", "utils", ".", "mkdir_p", "(", "create_folder_path", ")", "mainyml_template", "=", "default_mainyml_template", ".", "replace", "(", "\"%role_name\"", ",", "self", ".", "role_name", ")", "mainyml_template", "=", "mainyml_template", ".", "replace", "(", "\"%values\"", ",", "folder", ")", "out_path", "=", "os", ".", "path", ".", "join", "(", "create_folder_path", ",", "\"main.yml\"", ")", "if", "folder", "not", "in", "(", "\"templates\"", ",", "\"meta\"", ",", "\"tests\"", ",", "\"files\"", ")", ":", "utils", ".", "string_to_file", "(", "out_path", ",", "mainyml_template", ")", "if", "folder", "==", "\"meta\"", ":", "utils", ".", "create_meta_main", "(", "out_path", ",", "self", ".", "config", ",", "self", ".", "role_name", ",", "self", ".", "options", ".", "galaxy_categories", ")" ]
Create the role's directory and file structure.
[ "Create", "the", "role", "s", "directory", "and", "file", "structure", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/init.py#L70-L94
-1
251,453
nickjj/ansigenome
ansigenome/init.py
Init.create_travis_config
def create_travis_config(self): """ Create a travis test setup. """ test_runner = self.config["options_test_runner"] role_url = "{0}".format(os.path.join(self.config["scm_host"], self.config["scm_user"], self.config["scm_repo_prefix"] + self.normalized_role)) travisyml_template = default_travisyml_template.replace( "%test_runner", test_runner) travisyml_template = travisyml_template.replace( "%basename", test_runner.split("/")[-1]) travisyml_template = travisyml_template.replace( "%role_url", role_url) utils.string_to_file(os.path.join(self.output_path, ".travis.yml"), travisyml_template)
python
def create_travis_config(self): """ Create a travis test setup. """ test_runner = self.config["options_test_runner"] role_url = "{0}".format(os.path.join(self.config["scm_host"], self.config["scm_user"], self.config["scm_repo_prefix"] + self.normalized_role)) travisyml_template = default_travisyml_template.replace( "%test_runner", test_runner) travisyml_template = travisyml_template.replace( "%basename", test_runner.split("/")[-1]) travisyml_template = travisyml_template.replace( "%role_url", role_url) utils.string_to_file(os.path.join(self.output_path, ".travis.yml"), travisyml_template)
[ "def", "create_travis_config", "(", "self", ")", ":", "test_runner", "=", "self", ".", "config", "[", "\"options_test_runner\"", "]", "role_url", "=", "\"{0}\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "\"scm_host\"", "]", ",", "self", ".", "config", "[", "\"scm_user\"", "]", ",", "self", ".", "config", "[", "\"scm_repo_prefix\"", "]", "+", "self", ".", "normalized_role", ")", ")", "travisyml_template", "=", "default_travisyml_template", ".", "replace", "(", "\"%test_runner\"", ",", "test_runner", ")", "travisyml_template", "=", "travisyml_template", ".", "replace", "(", "\"%basename\"", ",", "test_runner", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", ")", "travisyml_template", "=", "travisyml_template", ".", "replace", "(", "\"%role_url\"", ",", "role_url", ")", "utils", ".", "string_to_file", "(", "os", ".", "path", ".", "join", "(", "self", ".", "output_path", ",", "\".travis.yml\"", ")", ",", "travisyml_template", ")" ]
Create a travis test setup.
[ "Create", "a", "travis", "test", "setup", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/init.py#L96-L115
-1
251,454
nickjj/ansigenome
ansigenome/export.py
Export.set_format
def set_format(self, format): """ Pick the correct default format. """ if self.options.format: self.format = self.options.format else: self.format = \ self.config["default_format_" + format]
python
def set_format(self, format): """ Pick the correct default format. """ if self.options.format: self.format = self.options.format else: self.format = \ self.config["default_format_" + format]
[ "def", "set_format", "(", "self", ",", "format", ")", ":", "if", "self", ".", "options", ".", "format", ":", "self", ".", "format", "=", "self", ".", "options", ".", "format", "else", ":", "self", ".", "format", "=", "self", ".", "config", "[", "\"default_format_\"", "+", "format", "]" ]
Pick the correct default format.
[ "Pick", "the", "correct", "default", "format", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L61-L69
-1
251,455
nickjj/ansigenome
ansigenome/export.py
Export.validate_format
def validate_format(self, allowed_formats): """ Validate the allowed formats for a specific type. """ if self.format in allowed_formats: return ui.error("Export type '{0}' does not accept '{1}' format, only: " "{2}".format(self.type, self.format, allowed_formats)) sys.exit(1)
python
def validate_format(self, allowed_formats): """ Validate the allowed formats for a specific type. """ if self.format in allowed_formats: return ui.error("Export type '{0}' does not accept '{1}' format, only: " "{2}".format(self.type, self.format, allowed_formats)) sys.exit(1)
[ "def", "validate_format", "(", "self", ",", "allowed_formats", ")", ":", "if", "self", ".", "format", "in", "allowed_formats", ":", "return", "ui", ".", "error", "(", "\"Export type '{0}' does not accept '{1}' format, only: \"", "\"{2}\"", ".", "format", "(", "self", ".", "type", ",", "self", ".", "format", ",", "allowed_formats", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
Validate the allowed formats for a specific type.
[ "Validate", "the", "allowed", "formats", "for", "a", "specific", "type", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L71-L80
-1
251,456
nickjj/ansigenome
ansigenome/export.py
Export.graph_dot
def graph_dot(self): """ Export a graph of the data in dot format. """ default_graphviz_template = """ digraph role_dependencies { size="%size" dpi=%dpi ratio="fill" landscape=false rankdir="BT"; node [shape = "box", style = "rounded,filled", fillcolor = "lightgrey", fontsize = 20]; edge [style = "dashed", dir = "forward", penwidth = 1.5]; %roles_list %dependencies } """ roles_list = "" edges = "" # remove the darkest and brightest colors, still have 100+ colors adjusted_colors = c.X11_COLORS[125:-325] random.shuffle(adjusted_colors) backup_colors = adjusted_colors[:] for role, fields in sorted(self.report["roles"].iteritems()): name = utils.normalize_role(role, self.config) color_length = len(adjusted_colors) - 1 # reset the colors if we run out if color_length == 0: adjusted_colors = backup_colors[:] color_length = len(adjusted_colors) - 1 random_index = random.randint(1, color_length) roles_list += " role_{0} [label = \"{1}\"]\n" \ .format(re.sub(r'[.-/]', '_', name), name) edge = '\n edge [color = "{0}"];\n' \ .format(adjusted_colors[random_index]) del adjusted_colors[random_index] if fields["dependencies"]: dependencies = "" for dependency in sorted(fields["dependencies"]): dependency_name = utils.role_name(dependency) dependencies += " role_{0} -> role_{1}\n".format( re.sub(r'[.-/]', '_', name), re.sub(r'[.-/]', '_', utils.normalize_role(dependency_name, self.config) ) ) edges += "{0}{1}\n".format(edge, dependencies) graphviz_template = default_graphviz_template.replace("%roles_list", roles_list) graphviz_template = graphviz_template.replace("%dependencies", edges) graphviz_template = graphviz_template.replace("%size", self.size) graphviz_template = graphviz_template.replace("%dpi", str(self.dpi)) if self.out_file: utils.string_to_file(self.out_file, graphviz_template) else: print graphviz_template
python
def graph_dot(self): """ Export a graph of the data in dot format. """ default_graphviz_template = """ digraph role_dependencies { size="%size" dpi=%dpi ratio="fill" landscape=false rankdir="BT"; node [shape = "box", style = "rounded,filled", fillcolor = "lightgrey", fontsize = 20]; edge [style = "dashed", dir = "forward", penwidth = 1.5]; %roles_list %dependencies } """ roles_list = "" edges = "" # remove the darkest and brightest colors, still have 100+ colors adjusted_colors = c.X11_COLORS[125:-325] random.shuffle(adjusted_colors) backup_colors = adjusted_colors[:] for role, fields in sorted(self.report["roles"].iteritems()): name = utils.normalize_role(role, self.config) color_length = len(adjusted_colors) - 1 # reset the colors if we run out if color_length == 0: adjusted_colors = backup_colors[:] color_length = len(adjusted_colors) - 1 random_index = random.randint(1, color_length) roles_list += " role_{0} [label = \"{1}\"]\n" \ .format(re.sub(r'[.-/]', '_', name), name) edge = '\n edge [color = "{0}"];\n' \ .format(adjusted_colors[random_index]) del adjusted_colors[random_index] if fields["dependencies"]: dependencies = "" for dependency in sorted(fields["dependencies"]): dependency_name = utils.role_name(dependency) dependencies += " role_{0} -> role_{1}\n".format( re.sub(r'[.-/]', '_', name), re.sub(r'[.-/]', '_', utils.normalize_role(dependency_name, self.config) ) ) edges += "{0}{1}\n".format(edge, dependencies) graphviz_template = default_graphviz_template.replace("%roles_list", roles_list) graphviz_template = graphviz_template.replace("%dependencies", edges) graphviz_template = graphviz_template.replace("%size", self.size) graphviz_template = graphviz_template.replace("%dpi", str(self.dpi)) if self.out_file: utils.string_to_file(self.out_file, graphviz_template) else: print graphviz_template
[ "def", "graph_dot", "(", "self", ")", ":", "default_graphviz_template", "=", "\"\"\"\ndigraph role_dependencies {\n size=\"%size\"\n dpi=%dpi\n ratio=\"fill\"\n landscape=false\n rankdir=\"BT\";\n\n node [shape = \"box\",\n style = \"rounded,filled\",\n fillcolor = \"lightgrey\",\n fontsize = 20];\n\n edge [style = \"dashed\",\n dir = \"forward\",\n penwidth = 1.5];\n\n%roles_list\n\n%dependencies\n}\n\"\"\"", "roles_list", "=", "\"\"", "edges", "=", "\"\"", "# remove the darkest and brightest colors, still have 100+ colors", "adjusted_colors", "=", "c", ".", "X11_COLORS", "[", "125", ":", "-", "325", "]", "random", ".", "shuffle", "(", "adjusted_colors", ")", "backup_colors", "=", "adjusted_colors", "[", ":", "]", "for", "role", ",", "fields", "in", "sorted", "(", "self", ".", "report", "[", "\"roles\"", "]", ".", "iteritems", "(", ")", ")", ":", "name", "=", "utils", ".", "normalize_role", "(", "role", ",", "self", ".", "config", ")", "color_length", "=", "len", "(", "adjusted_colors", ")", "-", "1", "# reset the colors if we run out", "if", "color_length", "==", "0", ":", "adjusted_colors", "=", "backup_colors", "[", ":", "]", "color_length", "=", "len", "(", "adjusted_colors", ")", "-", "1", "random_index", "=", "random", ".", "randint", "(", "1", ",", "color_length", ")", "roles_list", "+=", "\" role_{0} [label = \\\"{1}\\\"]\\n\"", ".", "format", "(", "re", ".", "sub", "(", "r'[.-/]'", ",", "'_'", ",", "name", ")", ",", "name", ")", "edge", "=", "'\\n edge [color = \"{0}\"];\\n'", ".", "format", "(", "adjusted_colors", "[", "random_index", "]", ")", "del", "adjusted_colors", "[", "random_index", "]", "if", "fields", "[", "\"dependencies\"", "]", ":", "dependencies", "=", "\"\"", "for", "dependency", "in", "sorted", "(", "fields", "[", "\"dependencies\"", "]", ")", ":", "dependency_name", "=", "utils", ".", "role_name", "(", "dependency", ")", "dependencies", "+=", "\" role_{0} -> role_{1}\\n\"", ".", "format", "(", "re", ".", "sub", "(", "r'[.-/]'", ",", "'_'", ",", "name", ")", ",", "re", ".", "sub", "(", "r'[.-/]'", ",", "'_'", ",", "utils", ".", "normalize_role", "(", "dependency_name", ",", "self", ".", "config", ")", ")", ")", "edges", "+=", "\"{0}{1}\\n\"", ".", "format", "(", "edge", ",", "dependencies", ")", "graphviz_template", "=", "default_graphviz_template", ".", "replace", "(", "\"%roles_list\"", ",", "roles_list", ")", "graphviz_template", "=", "graphviz_template", ".", "replace", "(", "\"%dependencies\"", ",", "edges", ")", "graphviz_template", "=", "graphviz_template", ".", "replace", "(", "\"%size\"", ",", "self", ".", "size", ")", "graphviz_template", "=", "graphviz_template", ".", "replace", "(", "\"%dpi\"", ",", "str", "(", "self", ".", "dpi", ")", ")", "if", "self", ".", "out_file", ":", "utils", ".", "string_to_file", "(", "self", ".", "out_file", ",", "graphviz_template", ")", "else", ":", "print", "graphviz_template" ]
Export a graph of the data in dot format.
[ "Export", "a", "graph", "of", "the", "data", "in", "dot", "format", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L82-L161
-1
251,457
nickjj/ansigenome
ansigenome/export.py
Export.exit_if_missing_graphviz
def exit_if_missing_graphviz(self): """ Detect the presence of the dot utility to make a png graph. """ (out, err) = utils.capture_shell("which dot") if "dot" not in out: ui.error(c.MESSAGES["dot_missing"])
python
def exit_if_missing_graphviz(self): """ Detect the presence of the dot utility to make a png graph. """ (out, err) = utils.capture_shell("which dot") if "dot" not in out: ui.error(c.MESSAGES["dot_missing"])
[ "def", "exit_if_missing_graphviz", "(", "self", ")", ":", "(", "out", ",", "err", ")", "=", "utils", ".", "capture_shell", "(", "\"which dot\"", ")", "if", "\"dot\"", "not", "in", "out", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"dot_missing\"", "]", ")" ]
Detect the presence of the dot utility to make a png graph.
[ "Detect", "the", "presence", "of", "the", "dot", "utility", "to", "make", "a", "png", "graph", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L182-L189
-1
251,458
nickjj/ansigenome
ansigenome/export.py
Export.reqs_txt
def reqs_txt(self): """ Export a requirements file in txt format. """ role_lines = "" for role in sorted(self.report["roles"]): name = utils.normalize_role(role, self.config) galaxy_name = "{0}.{1}".format(self.config["scm_user"], name) version_path = os.path.join(self.roles_path, role, "VERSION") version = utils.get_version(version_path) role_lines += "{0},{1}\n".format(galaxy_name, version) if self.out_file: utils.string_to_file(self.out_file, role_lines) else: print role_lines
python
def reqs_txt(self): """ Export a requirements file in txt format. """ role_lines = "" for role in sorted(self.report["roles"]): name = utils.normalize_role(role, self.config) galaxy_name = "{0}.{1}".format(self.config["scm_user"], name) version_path = os.path.join(self.roles_path, role, "VERSION") version = utils.get_version(version_path) role_lines += "{0},{1}\n".format(galaxy_name, version) if self.out_file: utils.string_to_file(self.out_file, role_lines) else: print role_lines
[ "def", "reqs_txt", "(", "self", ")", ":", "role_lines", "=", "\"\"", "for", "role", "in", "sorted", "(", "self", ".", "report", "[", "\"roles\"", "]", ")", ":", "name", "=", "utils", ".", "normalize_role", "(", "role", ",", "self", ".", "config", ")", "galaxy_name", "=", "\"{0}.{1}\"", ".", "format", "(", "self", ".", "config", "[", "\"scm_user\"", "]", ",", "name", ")", "version_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "roles_path", ",", "role", ",", "\"VERSION\"", ")", "version", "=", "utils", ".", "get_version", "(", "version_path", ")", "role_lines", "+=", "\"{0},{1}\\n\"", ".", "format", "(", "galaxy_name", ",", "version", ")", "if", "self", ".", "out_file", ":", "utils", ".", "string_to_file", "(", "self", ".", "out_file", ",", "role_lines", ")", "else", ":", "print", "role_lines" ]
Export a requirements file in txt format.
[ "Export", "a", "requirements", "file", "in", "txt", "format", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L191-L208
-1
251,459
nickjj/ansigenome
ansigenome/export.py
Export.reqs_yml
def reqs_yml(self): """ Export a requirements file in yml format. """ default_yml_item = """ - src: '%src' name: '%name' scm: '%scm' version: '%version' """ role_lines = "---\n" for role in sorted(self.report["roles"]): name = utils.normalize_role(role, self.config) galaxy_name = "{0}.{1}".format(self.config["scm_user"], name) yml_item = default_yml_item if self.config["scm_host"]: yml_item = yml_item.replace("%name", "{0}".format(galaxy_name)) if self.config["scm_repo_prefix"]: role = self.config["scm_repo_prefix"] + name src = os.path.join(self.config["scm_host"], self.config["scm_user"], role) else: src = galaxy_name yml_item = yml_item.replace(" name: '%name'\n", "") yml_item = yml_item.replace(" scm: '%scm'\n", "") yml_item = yml_item.replace("%src", src) if self.config["scm_type"]: yml_item = yml_item.replace("%scm", self.config["scm_type"]) else: yml_item = yml_item.replace(" scm: '%scm'\n", "") version_path = os.path.join(self.roles_path, role, "VERSION") version = utils.get_version(version_path) yml_item = yml_item.replace("%version", version) role_lines += "{0}".format(yml_item) if self.out_file: utils.string_to_file(self.out_file, role_lines) else: print role_lines
python
def reqs_yml(self): """ Export a requirements file in yml format. """ default_yml_item = """ - src: '%src' name: '%name' scm: '%scm' version: '%version' """ role_lines = "---\n" for role in sorted(self.report["roles"]): name = utils.normalize_role(role, self.config) galaxy_name = "{0}.{1}".format(self.config["scm_user"], name) yml_item = default_yml_item if self.config["scm_host"]: yml_item = yml_item.replace("%name", "{0}".format(galaxy_name)) if self.config["scm_repo_prefix"]: role = self.config["scm_repo_prefix"] + name src = os.path.join(self.config["scm_host"], self.config["scm_user"], role) else: src = galaxy_name yml_item = yml_item.replace(" name: '%name'\n", "") yml_item = yml_item.replace(" scm: '%scm'\n", "") yml_item = yml_item.replace("%src", src) if self.config["scm_type"]: yml_item = yml_item.replace("%scm", self.config["scm_type"]) else: yml_item = yml_item.replace(" scm: '%scm'\n", "") version_path = os.path.join(self.roles_path, role, "VERSION") version = utils.get_version(version_path) yml_item = yml_item.replace("%version", version) role_lines += "{0}".format(yml_item) if self.out_file: utils.string_to_file(self.out_file, role_lines) else: print role_lines
[ "def", "reqs_yml", "(", "self", ")", ":", "default_yml_item", "=", "\"\"\"\n- src: '%src'\n name: '%name'\n scm: '%scm'\n version: '%version'\n\"\"\"", "role_lines", "=", "\"---\\n\"", "for", "role", "in", "sorted", "(", "self", ".", "report", "[", "\"roles\"", "]", ")", ":", "name", "=", "utils", ".", "normalize_role", "(", "role", ",", "self", ".", "config", ")", "galaxy_name", "=", "\"{0}.{1}\"", ".", "format", "(", "self", ".", "config", "[", "\"scm_user\"", "]", ",", "name", ")", "yml_item", "=", "default_yml_item", "if", "self", ".", "config", "[", "\"scm_host\"", "]", ":", "yml_item", "=", "yml_item", ".", "replace", "(", "\"%name\"", ",", "\"{0}\"", ".", "format", "(", "galaxy_name", ")", ")", "if", "self", ".", "config", "[", "\"scm_repo_prefix\"", "]", ":", "role", "=", "self", ".", "config", "[", "\"scm_repo_prefix\"", "]", "+", "name", "src", "=", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "\"scm_host\"", "]", ",", "self", ".", "config", "[", "\"scm_user\"", "]", ",", "role", ")", "else", ":", "src", "=", "galaxy_name", "yml_item", "=", "yml_item", ".", "replace", "(", "\" name: '%name'\\n\"", ",", "\"\"", ")", "yml_item", "=", "yml_item", ".", "replace", "(", "\" scm: '%scm'\\n\"", ",", "\"\"", ")", "yml_item", "=", "yml_item", ".", "replace", "(", "\"%src\"", ",", "src", ")", "if", "self", ".", "config", "[", "\"scm_type\"", "]", ":", "yml_item", "=", "yml_item", ".", "replace", "(", "\"%scm\"", ",", "self", ".", "config", "[", "\"scm_type\"", "]", ")", "else", ":", "yml_item", "=", "yml_item", ".", "replace", "(", "\" scm: '%scm'\\n\"", ",", "\"\"", ")", "version_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "roles_path", ",", "role", ",", "\"VERSION\"", ")", "version", "=", "utils", ".", "get_version", "(", "version_path", ")", "yml_item", "=", "yml_item", ".", "replace", "(", "\"%version\"", ",", "version", ")", "role_lines", "+=", "\"{0}\"", ".", "format", "(", "yml_item", ")", "if", "self", ".", "out_file", ":", "utils", ".", "string_to_file", "(", "self", ".", "out_file", ",", "role_lines", ")", "else", ":", "print", "role_lines" ]
Export a requirements file in yml format.
[ "Export", "a", "requirements", "file", "in", "yml", "format", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L210-L256
-1
251,460
nickjj/ansigenome
ansigenome/export.py
Export.dump
def dump(self): """ Dump the output to json. """ report_as_json_string = utils.dict_to_json(self.report) if self.out_file: utils.string_to_file(self.out_file, report_as_json_string) else: print report_as_json_string
python
def dump(self): """ Dump the output to json. """ report_as_json_string = utils.dict_to_json(self.report) if self.out_file: utils.string_to_file(self.out_file, report_as_json_string) else: print report_as_json_string
[ "def", "dump", "(", "self", ")", ":", "report_as_json_string", "=", "utils", ".", "dict_to_json", "(", "self", ".", "report", ")", "if", "self", ".", "out_file", ":", "utils", ".", "string_to_file", "(", "self", ".", "out_file", ",", "report_as_json_string", ")", "else", ":", "print", "report_as_json_string" ]
Dump the output to json.
[ "Dump", "the", "output", "to", "json", "." ]
70cd98d7a23d36c56f4e713ea820cfb4c485c81c
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L258-L266
-1
251,461
SethMMorton/natsort
natsort/compat/fake_fastnumbers.py
fast_float
def fast_float( x, key=lambda x: x, nan=None, _uni=unicodedata.numeric, _nan_inf=NAN_INF, _first_char=POTENTIAL_FIRST_CHAR, ): """ Convert a string to a float quickly, return input as-is if not possible. We don't need to accept all input that the real fast_int accepts because natsort is controlling what is passed to this function. Parameters ---------- x : str String to attempt to convert to a float. key : callable Single-argument function to apply to *x* if conversion fails. nan : object Value to return instead of NaN if NaN would be returned. Returns ------- *str* or *float* """ if x[0] in _first_char or x.lstrip()[:3] in _nan_inf: try: x = float(x) return nan if nan is not None and x != x else x except ValueError: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x) else: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x)
python
def fast_float( x, key=lambda x: x, nan=None, _uni=unicodedata.numeric, _nan_inf=NAN_INF, _first_char=POTENTIAL_FIRST_CHAR, ): """ Convert a string to a float quickly, return input as-is if not possible. We don't need to accept all input that the real fast_int accepts because natsort is controlling what is passed to this function. Parameters ---------- x : str String to attempt to convert to a float. key : callable Single-argument function to apply to *x* if conversion fails. nan : object Value to return instead of NaN if NaN would be returned. Returns ------- *str* or *float* """ if x[0] in _first_char or x.lstrip()[:3] in _nan_inf: try: x = float(x) return nan if nan is not None and x != x else x except ValueError: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x) else: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x)
[ "def", "fast_float", "(", "x", ",", "key", "=", "lambda", "x", ":", "x", ",", "nan", "=", "None", ",", "_uni", "=", "unicodedata", ".", "numeric", ",", "_nan_inf", "=", "NAN_INF", ",", "_first_char", "=", "POTENTIAL_FIRST_CHAR", ",", ")", ":", "if", "x", "[", "0", "]", "in", "_first_char", "or", "x", ".", "lstrip", "(", ")", "[", ":", "3", "]", "in", "_nan_inf", ":", "try", ":", "x", "=", "float", "(", "x", ")", "return", "nan", "if", "nan", "is", "not", "None", "and", "x", "!=", "x", "else", "x", "except", "ValueError", ":", "try", ":", "return", "_uni", "(", "x", ",", "key", "(", "x", ")", ")", "if", "len", "(", "x", ")", "==", "1", "else", "key", "(", "x", ")", "except", "TypeError", ":", "# pragma: no cover", "return", "key", "(", "x", ")", "else", ":", "try", ":", "return", "_uni", "(", "x", ",", "key", "(", "x", ")", ")", "if", "len", "(", "x", ")", "==", "1", "else", "key", "(", "x", ")", "except", "TypeError", ":", "# pragma: no cover", "return", "key", "(", "x", ")" ]
Convert a string to a float quickly, return input as-is if not possible. We don't need to accept all input that the real fast_int accepts because natsort is controlling what is passed to this function. Parameters ---------- x : str String to attempt to convert to a float. key : callable Single-argument function to apply to *x* if conversion fails. nan : object Value to return instead of NaN if NaN would be returned. Returns ------- *str* or *float*
[ "Convert", "a", "string", "to", "a", "float", "quickly", "return", "input", "as", "-", "is", "if", "not", "possible", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/compat/fake_fastnumbers.py#L44-L85
-1
251,462
SethMMorton/natsort
natsort/compat/fake_fastnumbers.py
fast_int
def fast_int( x, key=lambda x: x, _uni=unicodedata.digit, _first_char=POTENTIAL_FIRST_CHAR, ): """ Convert a string to a int quickly, return input as-is if not possible. We don't need to accept all input that the real fast_int accepts because natsort is controlling what is passed to this function. Parameters ---------- x : str String to attempt to convert to an int. key : callable Single-argument function to apply to *x* if conversion fails. Returns ------- *str* or *int* """ if x[0] in _first_char: try: return long(x) except ValueError: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x) else: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x)
python
def fast_int( x, key=lambda x: x, _uni=unicodedata.digit, _first_char=POTENTIAL_FIRST_CHAR, ): """ Convert a string to a int quickly, return input as-is if not possible. We don't need to accept all input that the real fast_int accepts because natsort is controlling what is passed to this function. Parameters ---------- x : str String to attempt to convert to an int. key : callable Single-argument function to apply to *x* if conversion fails. Returns ------- *str* or *int* """ if x[0] in _first_char: try: return long(x) except ValueError: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x) else: try: return _uni(x, key(x)) if len(x) == 1 else key(x) except TypeError: # pragma: no cover return key(x)
[ "def", "fast_int", "(", "x", ",", "key", "=", "lambda", "x", ":", "x", ",", "_uni", "=", "unicodedata", ".", "digit", ",", "_first_char", "=", "POTENTIAL_FIRST_CHAR", ",", ")", ":", "if", "x", "[", "0", "]", "in", "_first_char", ":", "try", ":", "return", "long", "(", "x", ")", "except", "ValueError", ":", "try", ":", "return", "_uni", "(", "x", ",", "key", "(", "x", ")", ")", "if", "len", "(", "x", ")", "==", "1", "else", "key", "(", "x", ")", "except", "TypeError", ":", "# pragma: no cover", "return", "key", "(", "x", ")", "else", ":", "try", ":", "return", "_uni", "(", "x", ",", "key", "(", "x", ")", ")", "if", "len", "(", "x", ")", "==", "1", "else", "key", "(", "x", ")", "except", "TypeError", ":", "# pragma: no cover", "return", "key", "(", "x", ")" ]
Convert a string to a int quickly, return input as-is if not possible. We don't need to accept all input that the real fast_int accepts because natsort is controlling what is passed to this function. Parameters ---------- x : str String to attempt to convert to an int. key : callable Single-argument function to apply to *x* if conversion fails. Returns ------- *str* or *int*
[ "Convert", "a", "string", "to", "a", "int", "quickly", "return", "input", "as", "-", "is", "if", "not", "possible", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/compat/fake_fastnumbers.py#L89-L125
-1
251,463
SethMMorton/natsort
natsort/__main__.py
check_filters
def check_filters(filters): """ Execute range_check for every element of an iterable. Parameters ---------- filters : iterable The collection of filters to check. Each element must be a two-element tuple of floats or ints. Returns ------- The input as-is, or None if it evaluates to False. Raises ------ ValueError Low is greater than or equal to high for any element. """ if not filters: return None try: return [range_check(f[0], f[1]) for f in filters] except ValueError as err: raise ValueError("Error in --filter: " + py23_str(err))
python
def check_filters(filters): """ Execute range_check for every element of an iterable. Parameters ---------- filters : iterable The collection of filters to check. Each element must be a two-element tuple of floats or ints. Returns ------- The input as-is, or None if it evaluates to False. Raises ------ ValueError Low is greater than or equal to high for any element. """ if not filters: return None try: return [range_check(f[0], f[1]) for f in filters] except ValueError as err: raise ValueError("Error in --filter: " + py23_str(err))
[ "def", "check_filters", "(", "filters", ")", ":", "if", "not", "filters", ":", "return", "None", "try", ":", "return", "[", "range_check", "(", "f", "[", "0", "]", ",", "f", "[", "1", "]", ")", "for", "f", "in", "filters", "]", "except", "ValueError", "as", "err", ":", "raise", "ValueError", "(", "\"Error in --filter: \"", "+", "py23_str", "(", "err", ")", ")" ]
Execute range_check for every element of an iterable. Parameters ---------- filters : iterable The collection of filters to check. Each element must be a two-element tuple of floats or ints. Returns ------- The input as-is, or None if it evaluates to False. Raises ------ ValueError Low is greater than or equal to high for any element.
[ "Execute", "range_check", "for", "every", "element", "of", "an", "iterable", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L169-L194
-1
251,464
SethMMorton/natsort
natsort/__main__.py
keep_entry_range
def keep_entry_range(entry, lows, highs, converter, regex): """ Check if an entry falls into a desired range. Every number in the entry will be extracted using *regex*, if any are within a given low to high range the entry will be kept. Parameters ---------- entry : str lows : iterable Collection of low values against which to compare the entry. highs : iterable Collection of high values against which to compare the entry. converter : callable Function to convert a string to a number. regex : regex object Regular expression to locate numbers in a string. Returns ------- True if the entry should be kept, False otherwise. """ return any( low <= converter(num) <= high for num in regex.findall(entry) for low, high in zip(lows, highs) )
python
def keep_entry_range(entry, lows, highs, converter, regex): """ Check if an entry falls into a desired range. Every number in the entry will be extracted using *regex*, if any are within a given low to high range the entry will be kept. Parameters ---------- entry : str lows : iterable Collection of low values against which to compare the entry. highs : iterable Collection of high values against which to compare the entry. converter : callable Function to convert a string to a number. regex : regex object Regular expression to locate numbers in a string. Returns ------- True if the entry should be kept, False otherwise. """ return any( low <= converter(num) <= high for num in regex.findall(entry) for low, high in zip(lows, highs) )
[ "def", "keep_entry_range", "(", "entry", ",", "lows", ",", "highs", ",", "converter", ",", "regex", ")", ":", "return", "any", "(", "low", "<=", "converter", "(", "num", ")", "<=", "high", "for", "num", "in", "regex", ".", "findall", "(", "entry", ")", "for", "low", ",", "high", "in", "zip", "(", "lows", ",", "highs", ")", ")" ]
Check if an entry falls into a desired range. Every number in the entry will be extracted using *regex*, if any are within a given low to high range the entry will be kept. Parameters ---------- entry : str lows : iterable Collection of low values against which to compare the entry. highs : iterable Collection of high values against which to compare the entry. converter : callable Function to convert a string to a number. regex : regex object Regular expression to locate numbers in a string. Returns ------- True if the entry should be kept, False otherwise.
[ "Check", "if", "an", "entry", "falls", "into", "a", "desired", "range", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L197-L226
-1
251,465
SethMMorton/natsort
natsort/__main__.py
keep_entry_value
def keep_entry_value(entry, values, converter, regex): """ Check if an entry does not match a given value. Every number in the entry will be extracted using *regex*, if any match a given value the entry will not be kept. Parameters ---------- entry : str values : iterable Collection of values against which to compare the entry. converter : callable Function to convert a string to a number. regex : regex object Regular expression to locate numbers in a string. Returns ------- True if the entry should be kept, False otherwise. """ return not any(converter(num) in values for num in regex.findall(entry))
python
def keep_entry_value(entry, values, converter, regex): """ Check if an entry does not match a given value. Every number in the entry will be extracted using *regex*, if any match a given value the entry will not be kept. Parameters ---------- entry : str values : iterable Collection of values against which to compare the entry. converter : callable Function to convert a string to a number. regex : regex object Regular expression to locate numbers in a string. Returns ------- True if the entry should be kept, False otherwise. """ return not any(converter(num) in values for num in regex.findall(entry))
[ "def", "keep_entry_value", "(", "entry", ",", "values", ",", "converter", ",", "regex", ")", ":", "return", "not", "any", "(", "converter", "(", "num", ")", "in", "values", "for", "num", "in", "regex", ".", "findall", "(", "entry", ")", ")" ]
Check if an entry does not match a given value. Every number in the entry will be extracted using *regex*, if any match a given value the entry will not be kept. Parameters ---------- entry : str values : iterable Collection of values against which to compare the entry. converter : callable Function to convert a string to a number. regex : regex object Regular expression to locate numbers in a string. Returns ------- True if the entry should be kept, False otherwise.
[ "Check", "if", "an", "entry", "does", "not", "match", "a", "given", "value", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L229-L251
-1
251,466
SethMMorton/natsort
natsort/__main__.py
sort_and_print_entries
def sort_and_print_entries(entries, args): """Sort the entries, applying the filters first if necessary.""" # Extract the proper number type. is_float = args.number_type in ("float", "real", "f", "r") signed = args.signed or args.number_type in ("real", "r") alg = ( natsort.ns.FLOAT * is_float | natsort.ns.SIGNED * signed | natsort.ns.NOEXP * (not args.exp) | natsort.ns.PATH * args.paths | natsort.ns.LOCALE * args.locale ) # Pre-remove entries that don't pass the filtering criteria # Make sure we use the same searching algorithm for filtering # as for sorting. do_filter = args.filter is not None or args.reverse_filter is not None if do_filter or args.exclude: inp_options = ( natsort.ns.FLOAT * is_float | natsort.ns.SIGNED * signed | natsort.ns.NOEXP * (not args.exp) ) regex = regex_chooser(inp_options) if args.filter is not None: lows, highs = ([f[0] for f in args.filter], [f[1] for f in args.filter]) entries = [ entry for entry in entries if keep_entry_range(entry, lows, highs, float, regex) ] if args.reverse_filter is not None: lows, highs = ( [f[0] for f in args.reverse_filter], [f[1] for f in args.reverse_filter], ) entries = [ entry for entry in entries if not keep_entry_range(entry, lows, highs, float, regex) ] if args.exclude: exclude = set(args.exclude) entries = [ entry for entry in entries if keep_entry_value(entry, exclude, float, regex) ] # Print off the sorted results for entry in natsort.natsorted(entries, reverse=args.reverse, alg=alg): print(entry)
python
def sort_and_print_entries(entries, args): """Sort the entries, applying the filters first if necessary.""" # Extract the proper number type. is_float = args.number_type in ("float", "real", "f", "r") signed = args.signed or args.number_type in ("real", "r") alg = ( natsort.ns.FLOAT * is_float | natsort.ns.SIGNED * signed | natsort.ns.NOEXP * (not args.exp) | natsort.ns.PATH * args.paths | natsort.ns.LOCALE * args.locale ) # Pre-remove entries that don't pass the filtering criteria # Make sure we use the same searching algorithm for filtering # as for sorting. do_filter = args.filter is not None or args.reverse_filter is not None if do_filter or args.exclude: inp_options = ( natsort.ns.FLOAT * is_float | natsort.ns.SIGNED * signed | natsort.ns.NOEXP * (not args.exp) ) regex = regex_chooser(inp_options) if args.filter is not None: lows, highs = ([f[0] for f in args.filter], [f[1] for f in args.filter]) entries = [ entry for entry in entries if keep_entry_range(entry, lows, highs, float, regex) ] if args.reverse_filter is not None: lows, highs = ( [f[0] for f in args.reverse_filter], [f[1] for f in args.reverse_filter], ) entries = [ entry for entry in entries if not keep_entry_range(entry, lows, highs, float, regex) ] if args.exclude: exclude = set(args.exclude) entries = [ entry for entry in entries if keep_entry_value(entry, exclude, float, regex) ] # Print off the sorted results for entry in natsort.natsorted(entries, reverse=args.reverse, alg=alg): print(entry)
[ "def", "sort_and_print_entries", "(", "entries", ",", "args", ")", ":", "# Extract the proper number type.", "is_float", "=", "args", ".", "number_type", "in", "(", "\"float\"", ",", "\"real\"", ",", "\"f\"", ",", "\"r\"", ")", "signed", "=", "args", ".", "signed", "or", "args", ".", "number_type", "in", "(", "\"real\"", ",", "\"r\"", ")", "alg", "=", "(", "natsort", ".", "ns", ".", "FLOAT", "*", "is_float", "|", "natsort", ".", "ns", ".", "SIGNED", "*", "signed", "|", "natsort", ".", "ns", ".", "NOEXP", "*", "(", "not", "args", ".", "exp", ")", "|", "natsort", ".", "ns", ".", "PATH", "*", "args", ".", "paths", "|", "natsort", ".", "ns", ".", "LOCALE", "*", "args", ".", "locale", ")", "# Pre-remove entries that don't pass the filtering criteria", "# Make sure we use the same searching algorithm for filtering", "# as for sorting.", "do_filter", "=", "args", ".", "filter", "is", "not", "None", "or", "args", ".", "reverse_filter", "is", "not", "None", "if", "do_filter", "or", "args", ".", "exclude", ":", "inp_options", "=", "(", "natsort", ".", "ns", ".", "FLOAT", "*", "is_float", "|", "natsort", ".", "ns", ".", "SIGNED", "*", "signed", "|", "natsort", ".", "ns", ".", "NOEXP", "*", "(", "not", "args", ".", "exp", ")", ")", "regex", "=", "regex_chooser", "(", "inp_options", ")", "if", "args", ".", "filter", "is", "not", "None", ":", "lows", ",", "highs", "=", "(", "[", "f", "[", "0", "]", "for", "f", "in", "args", ".", "filter", "]", ",", "[", "f", "[", "1", "]", "for", "f", "in", "args", ".", "filter", "]", ")", "entries", "=", "[", "entry", "for", "entry", "in", "entries", "if", "keep_entry_range", "(", "entry", ",", "lows", ",", "highs", ",", "float", ",", "regex", ")", "]", "if", "args", ".", "reverse_filter", "is", "not", "None", ":", "lows", ",", "highs", "=", "(", "[", "f", "[", "0", "]", "for", "f", "in", "args", ".", "reverse_filter", "]", ",", "[", "f", "[", "1", "]", "for", "f", "in", "args", ".", "reverse_filter", "]", ",", ")", "entries", "=", "[", "entry", "for", "entry", "in", "entries", "if", "not", "keep_entry_range", "(", "entry", ",", "lows", ",", "highs", ",", "float", ",", "regex", ")", "]", "if", "args", ".", "exclude", ":", "exclude", "=", "set", "(", "args", ".", "exclude", ")", "entries", "=", "[", "entry", "for", "entry", "in", "entries", "if", "keep_entry_value", "(", "entry", ",", "exclude", ",", "float", ",", "regex", ")", "]", "# Print off the sorted results", "for", "entry", "in", "natsort", ".", "natsorted", "(", "entries", ",", "reverse", "=", "args", ".", "reverse", ",", "alg", "=", "alg", ")", ":", "print", "(", "entry", ")" ]
Sort the entries, applying the filters first if necessary.
[ "Sort", "the", "entries", "applying", "the", "filters", "first", "if", "necessary", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/__main__.py#L254-L306
-1
251,467
SethMMorton/natsort
natsort/utils.py
regex_chooser
def regex_chooser(alg): """ Select an appropriate regex for the type of number of interest. Parameters ---------- alg : ns enum Used to indicate the regular expression to select. Returns ------- regex : compiled regex object Regular expression object that matches the desired number type. """ if alg & ns.FLOAT: alg &= ns.FLOAT | ns.SIGNED | ns.NOEXP else: alg &= ns.INT | ns.SIGNED return { ns.INT: NumericalRegularExpressions.int_nosign(), ns.FLOAT: NumericalRegularExpressions.float_nosign_exp(), ns.INT | ns.SIGNED: NumericalRegularExpressions.int_sign(), ns.FLOAT | ns.SIGNED: NumericalRegularExpressions.float_sign_exp(), ns.FLOAT | ns.NOEXP: NumericalRegularExpressions.float_nosign_noexp(), ns.FLOAT | ns.SIGNED | ns.NOEXP: NumericalRegularExpressions.float_sign_noexp(), }[alg]
python
def regex_chooser(alg): """ Select an appropriate regex for the type of number of interest. Parameters ---------- alg : ns enum Used to indicate the regular expression to select. Returns ------- regex : compiled regex object Regular expression object that matches the desired number type. """ if alg & ns.FLOAT: alg &= ns.FLOAT | ns.SIGNED | ns.NOEXP else: alg &= ns.INT | ns.SIGNED return { ns.INT: NumericalRegularExpressions.int_nosign(), ns.FLOAT: NumericalRegularExpressions.float_nosign_exp(), ns.INT | ns.SIGNED: NumericalRegularExpressions.int_sign(), ns.FLOAT | ns.SIGNED: NumericalRegularExpressions.float_sign_exp(), ns.FLOAT | ns.NOEXP: NumericalRegularExpressions.float_nosign_noexp(), ns.FLOAT | ns.SIGNED | ns.NOEXP: NumericalRegularExpressions.float_sign_noexp(), }[alg]
[ "def", "regex_chooser", "(", "alg", ")", ":", "if", "alg", "&", "ns", ".", "FLOAT", ":", "alg", "&=", "ns", ".", "FLOAT", "|", "ns", ".", "SIGNED", "|", "ns", ".", "NOEXP", "else", ":", "alg", "&=", "ns", ".", "INT", "|", "ns", ".", "SIGNED", "return", "{", "ns", ".", "INT", ":", "NumericalRegularExpressions", ".", "int_nosign", "(", ")", ",", "ns", ".", "FLOAT", ":", "NumericalRegularExpressions", ".", "float_nosign_exp", "(", ")", ",", "ns", ".", "INT", "|", "ns", ".", "SIGNED", ":", "NumericalRegularExpressions", ".", "int_sign", "(", ")", ",", "ns", ".", "FLOAT", "|", "ns", ".", "SIGNED", ":", "NumericalRegularExpressions", ".", "float_sign_exp", "(", ")", ",", "ns", ".", "FLOAT", "|", "ns", ".", "NOEXP", ":", "NumericalRegularExpressions", ".", "float_nosign_noexp", "(", ")", ",", "ns", ".", "FLOAT", "|", "ns", ".", "SIGNED", "|", "ns", ".", "NOEXP", ":", "NumericalRegularExpressions", ".", "float_sign_noexp", "(", ")", ",", "}", "[", "alg", "]" ]
Select an appropriate regex for the type of number of interest. Parameters ---------- alg : ns enum Used to indicate the regular expression to select. Returns ------- regex : compiled regex object Regular expression object that matches the desired number type.
[ "Select", "an", "appropriate", "regex", "for", "the", "type", "of", "number", "of", "interest", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L127-L154
-1
251,468
SethMMorton/natsort
natsort/utils.py
_normalize_input_factory
def _normalize_input_factory(alg): """ Create a function that will normalize unicode input data. Parameters ---------- alg : ns enum Used to indicate how to normalize unicode. Returns ------- func : callable A function that accepts string (unicode) input and returns the the input normalized with the desired normalization scheme. """ normalization_form = "NFKD" if alg & ns.COMPATIBILITYNORMALIZE else "NFD" wrapped = partial(normalize, normalization_form) if NEWPY: return wrapped else: return lambda x, _f=wrapped: _f(x) if isinstance(x, py23_str) else x
python
def _normalize_input_factory(alg): """ Create a function that will normalize unicode input data. Parameters ---------- alg : ns enum Used to indicate how to normalize unicode. Returns ------- func : callable A function that accepts string (unicode) input and returns the the input normalized with the desired normalization scheme. """ normalization_form = "NFKD" if alg & ns.COMPATIBILITYNORMALIZE else "NFD" wrapped = partial(normalize, normalization_form) if NEWPY: return wrapped else: return lambda x, _f=wrapped: _f(x) if isinstance(x, py23_str) else x
[ "def", "_normalize_input_factory", "(", "alg", ")", ":", "normalization_form", "=", "\"NFKD\"", "if", "alg", "&", "ns", ".", "COMPATIBILITYNORMALIZE", "else", "\"NFD\"", "wrapped", "=", "partial", "(", "normalize", ",", "normalization_form", ")", "if", "NEWPY", ":", "return", "wrapped", "else", ":", "return", "lambda", "x", ",", "_f", "=", "wrapped", ":", "_f", "(", "x", ")", "if", "isinstance", "(", "x", ",", "py23_str", ")", "else", "x" ]
Create a function that will normalize unicode input data. Parameters ---------- alg : ns enum Used to indicate how to normalize unicode. Returns ------- func : callable A function that accepts string (unicode) input and returns the the input normalized with the desired normalization scheme.
[ "Create", "a", "function", "that", "will", "normalize", "unicode", "input", "data", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L162-L183
-1
251,469
SethMMorton/natsort
natsort/utils.py
natsort_key
def natsort_key(val, key, string_func, bytes_func, num_func): """ Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory """ # Apply key if needed if key is not None: val = key(val) # Assume the input are strings, which is the most common case try: return string_func(val) except (TypeError, AttributeError): # If bytes type, use the bytes_func if type(val) in (bytes,): return bytes_func(val) # Otherwise, assume it is an iterable that must be parsed recursively. # Do not apply the key recursively. try: return tuple( natsort_key(x, None, string_func, bytes_func, num_func) for x in val ) # If that failed, it must be a number. except TypeError: return num_func(val)
python
def natsort_key(val, key, string_func, bytes_func, num_func): """ Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory """ # Apply key if needed if key is not None: val = key(val) # Assume the input are strings, which is the most common case try: return string_func(val) except (TypeError, AttributeError): # If bytes type, use the bytes_func if type(val) in (bytes,): return bytes_func(val) # Otherwise, assume it is an iterable that must be parsed recursively. # Do not apply the key recursively. try: return tuple( natsort_key(x, None, string_func, bytes_func, num_func) for x in val ) # If that failed, it must be a number. except TypeError: return num_func(val)
[ "def", "natsort_key", "(", "val", ",", "key", ",", "string_func", ",", "bytes_func", ",", "num_func", ")", ":", "# Apply key if needed", "if", "key", "is", "not", "None", ":", "val", "=", "key", "(", "val", ")", "# Assume the input are strings, which is the most common case", "try", ":", "return", "string_func", "(", "val", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "# If bytes type, use the bytes_func", "if", "type", "(", "val", ")", "in", "(", "bytes", ",", ")", ":", "return", "bytes_func", "(", "val", ")", "# Otherwise, assume it is an iterable that must be parsed recursively.", "# Do not apply the key recursively.", "try", ":", "return", "tuple", "(", "natsort_key", "(", "x", ",", "None", ",", "string_func", ",", "bytes_func", ",", "num_func", ")", "for", "x", "in", "val", ")", "# If that failed, it must be a number.", "except", "TypeError", ":", "return", "num_func", "(", "val", ")" ]
Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory
[ "Key", "to", "sort", "strings", "and", "numbers", "naturally", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L186-L251
-1
251,470
SethMMorton/natsort
natsort/utils.py
parse_number_factory
def parse_number_factory(alg, sep, pre_sep): """ Create a function that will format a number into a tuple. Parameters ---------- alg : ns enum Indicate how to format the *bytes*. sep : str The string character to be inserted before the number in the returned tuple. pre_sep : str In the event that *alg* contains ``UNGROUPLETTERS``, this string will be placed in a single-element tuple at the front of the returned nested tuple. Returns ------- func : callable A function that accepts numeric input (e.g. *int* or *float*) and returns a tuple containing the number with the leading string *sep*. Intended to be used as the *num_func* argument to *natsort_key*. See Also -------- natsort_key """ nan_replace = float("+inf") if alg & ns.NANLAST else float("-inf") def func(val, _nan_replace=nan_replace, _sep=sep): """Given a number, place it in a tuple with a leading null string.""" return _sep, _nan_replace if val != val else val # Return the function, possibly wrapping in tuple if PATH is selected. if alg & ns.PATH and alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: (((pre_sep,), func(x)),) elif alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: ((pre_sep,), func(x)) elif alg & ns.PATH: return lambda x: (func(x),) else: return func
python
def parse_number_factory(alg, sep, pre_sep): """ Create a function that will format a number into a tuple. Parameters ---------- alg : ns enum Indicate how to format the *bytes*. sep : str The string character to be inserted before the number in the returned tuple. pre_sep : str In the event that *alg* contains ``UNGROUPLETTERS``, this string will be placed in a single-element tuple at the front of the returned nested tuple. Returns ------- func : callable A function that accepts numeric input (e.g. *int* or *float*) and returns a tuple containing the number with the leading string *sep*. Intended to be used as the *num_func* argument to *natsort_key*. See Also -------- natsort_key """ nan_replace = float("+inf") if alg & ns.NANLAST else float("-inf") def func(val, _nan_replace=nan_replace, _sep=sep): """Given a number, place it in a tuple with a leading null string.""" return _sep, _nan_replace if val != val else val # Return the function, possibly wrapping in tuple if PATH is selected. if alg & ns.PATH and alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: (((pre_sep,), func(x)),) elif alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: ((pre_sep,), func(x)) elif alg & ns.PATH: return lambda x: (func(x),) else: return func
[ "def", "parse_number_factory", "(", "alg", ",", "sep", ",", "pre_sep", ")", ":", "nan_replace", "=", "float", "(", "\"+inf\"", ")", "if", "alg", "&", "ns", ".", "NANLAST", "else", "float", "(", "\"-inf\"", ")", "def", "func", "(", "val", ",", "_nan_replace", "=", "nan_replace", ",", "_sep", "=", "sep", ")", ":", "\"\"\"Given a number, place it in a tuple with a leading null string.\"\"\"", "return", "_sep", ",", "_nan_replace", "if", "val", "!=", "val", "else", "val", "# Return the function, possibly wrapping in tuple if PATH is selected.", "if", "alg", "&", "ns", ".", "PATH", "and", "alg", "&", "ns", ".", "UNGROUPLETTERS", "and", "alg", "&", "ns", ".", "LOCALEALPHA", ":", "return", "lambda", "x", ":", "(", "(", "(", "pre_sep", ",", ")", ",", "func", "(", "x", ")", ")", ",", ")", "elif", "alg", "&", "ns", ".", "UNGROUPLETTERS", "and", "alg", "&", "ns", ".", "LOCALEALPHA", ":", "return", "lambda", "x", ":", "(", "(", "pre_sep", ",", ")", ",", "func", "(", "x", ")", ")", "elif", "alg", "&", "ns", ".", "PATH", ":", "return", "lambda", "x", ":", "(", "func", "(", "x", ")", ",", ")", "else", ":", "return", "func" ]
Create a function that will format a number into a tuple. Parameters ---------- alg : ns enum Indicate how to format the *bytes*. sep : str The string character to be inserted before the number in the returned tuple. pre_sep : str In the event that *alg* contains ``UNGROUPLETTERS``, this string will be placed in a single-element tuple at the front of the returned nested tuple. Returns ------- func : callable A function that accepts numeric input (e.g. *int* or *float*) and returns a tuple containing the number with the leading string *sep*. Intended to be used as the *num_func* argument to *natsort_key*. See Also -------- natsort_key
[ "Create", "a", "function", "that", "will", "format", "a", "number", "into", "a", "tuple", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L287-L330
-1
251,471
SethMMorton/natsort
natsort/utils.py
sep_inserter
def sep_inserter(iterable, sep): """ Insert '' between numbers in an iterable. Parameters ---------- iterable sep : str The string character to be inserted between adjacent numeric objects. Yields ------ The values of *iterable* in order, with *sep* inserted where adjacent elements are numeric. If the first element in the input is numeric then *sep* will be the first value yielded. """ try: # Get the first element. A StopIteration indicates an empty iterable. # Since we are controlling the types of the input, 'type' is used # instead of 'isinstance' for the small speed advantage it offers. types = (int, float, long) first = next(iterable) if type(first) in types: yield sep yield first # Now, check if pair of elements are both numbers. If so, add ''. second = next(iterable) if type(first) in types and type(second) in types: yield sep yield second # Now repeat in a loop. for x in iterable: first, second = second, x if type(first) in types and type(second) in types: yield sep yield second except StopIteration: # Catch StopIteration per deprecation in PEP 479: # "Change StopIteration handling inside generators" return
python
def sep_inserter(iterable, sep): """ Insert '' between numbers in an iterable. Parameters ---------- iterable sep : str The string character to be inserted between adjacent numeric objects. Yields ------ The values of *iterable* in order, with *sep* inserted where adjacent elements are numeric. If the first element in the input is numeric then *sep* will be the first value yielded. """ try: # Get the first element. A StopIteration indicates an empty iterable. # Since we are controlling the types of the input, 'type' is used # instead of 'isinstance' for the small speed advantage it offers. types = (int, float, long) first = next(iterable) if type(first) in types: yield sep yield first # Now, check if pair of elements are both numbers. If so, add ''. second = next(iterable) if type(first) in types and type(second) in types: yield sep yield second # Now repeat in a loop. for x in iterable: first, second = second, x if type(first) in types and type(second) in types: yield sep yield second except StopIteration: # Catch StopIteration per deprecation in PEP 479: # "Change StopIteration handling inside generators" return
[ "def", "sep_inserter", "(", "iterable", ",", "sep", ")", ":", "try", ":", "# Get the first element. A StopIteration indicates an empty iterable.", "# Since we are controlling the types of the input, 'type' is used", "# instead of 'isinstance' for the small speed advantage it offers.", "types", "=", "(", "int", ",", "float", ",", "long", ")", "first", "=", "next", "(", "iterable", ")", "if", "type", "(", "first", ")", "in", "types", ":", "yield", "sep", "yield", "first", "# Now, check if pair of elements are both numbers. If so, add ''.", "second", "=", "next", "(", "iterable", ")", "if", "type", "(", "first", ")", "in", "types", "and", "type", "(", "second", ")", "in", "types", ":", "yield", "sep", "yield", "second", "# Now repeat in a loop.", "for", "x", "in", "iterable", ":", "first", ",", "second", "=", "second", ",", "x", "if", "type", "(", "first", ")", "in", "types", "and", "type", "(", "second", ")", "in", "types", ":", "yield", "sep", "yield", "second", "except", "StopIteration", ":", "# Catch StopIteration per deprecation in PEP 479:", "# \"Change StopIteration handling inside generators\"", "return" ]
Insert '' between numbers in an iterable. Parameters ---------- iterable sep : str The string character to be inserted between adjacent numeric objects. Yields ------ The values of *iterable* in order, with *sep* inserted where adjacent elements are numeric. If the first element in the input is numeric then *sep* will be the first value yielded.
[ "Insert", "between", "numbers", "in", "an", "iterable", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L428-L470
-1
251,472
SethMMorton/natsort
natsort/utils.py
input_string_transform_factory
def input_string_transform_factory(alg): """ Create a function to transform a string. Parameters ---------- alg : ns enum Indicate how to format the *str*. Returns ------- func : callable A function to be used as the *input_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory """ # Shortcuts. lowfirst = alg & ns.LOWERCASEFIRST dumb = alg & NS_DUMB # Build the chain of functions to execute in order. function_chain = [] if (dumb and not lowfirst) or (lowfirst and not dumb): function_chain.append(methodcaller("swapcase")) if alg & ns.IGNORECASE: if NEWPY: function_chain.append(methodcaller("casefold")) else: function_chain.append(methodcaller("lower")) if alg & ns.LOCALENUM: # Create a regular expression that will remove thousands separators. strip_thousands = r""" (?<=[0-9]{{1}}) # At least 1 number (?<![0-9]{{4}}) # No more than 3 numbers {nodecimal} # Cannot follow decimal {thou} # The thousands separator (?=[0-9]{{3}} # Three numbers must follow ([^0-9]|$) # But a non-number after that ) """ nodecimal = r"" if alg & ns.FLOAT: # Make a regular expression component that will ensure no # separators are removed after a decimal point. d = get_decimal_point() d = r"\." if d == r"." else d nodecimal += r"(?<!" + d + r"[0-9])" nodecimal += r"(?<!" + d + r"[0-9]{2})" nodecimal += r"(?<!" + d + r"[0-9]{3})" strip_thousands = strip_thousands.format( thou=get_thousands_sep(), nodecimal=nodecimal ) strip_thousands = re.compile(strip_thousands, flags=re.VERBOSE) function_chain.append(partial(strip_thousands.sub, "")) # Create a regular expression that will change the decimal point to # a period if not already a period. decimal = get_decimal_point() if alg & ns.FLOAT and decimal != ".": switch_decimal = r"(?<=[0-9]){decimal}|{decimal}(?=[0-9])" switch_decimal = switch_decimal.format(decimal=decimal) switch_decimal = re.compile(switch_decimal) function_chain.append(partial(switch_decimal.sub, ".")) # Return the chained functions. return chain_functions(function_chain)
python
def input_string_transform_factory(alg): """ Create a function to transform a string. Parameters ---------- alg : ns enum Indicate how to format the *str*. Returns ------- func : callable A function to be used as the *input_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory """ # Shortcuts. lowfirst = alg & ns.LOWERCASEFIRST dumb = alg & NS_DUMB # Build the chain of functions to execute in order. function_chain = [] if (dumb and not lowfirst) or (lowfirst and not dumb): function_chain.append(methodcaller("swapcase")) if alg & ns.IGNORECASE: if NEWPY: function_chain.append(methodcaller("casefold")) else: function_chain.append(methodcaller("lower")) if alg & ns.LOCALENUM: # Create a regular expression that will remove thousands separators. strip_thousands = r""" (?<=[0-9]{{1}}) # At least 1 number (?<![0-9]{{4}}) # No more than 3 numbers {nodecimal} # Cannot follow decimal {thou} # The thousands separator (?=[0-9]{{3}} # Three numbers must follow ([^0-9]|$) # But a non-number after that ) """ nodecimal = r"" if alg & ns.FLOAT: # Make a regular expression component that will ensure no # separators are removed after a decimal point. d = get_decimal_point() d = r"\." if d == r"." else d nodecimal += r"(?<!" + d + r"[0-9])" nodecimal += r"(?<!" + d + r"[0-9]{2})" nodecimal += r"(?<!" + d + r"[0-9]{3})" strip_thousands = strip_thousands.format( thou=get_thousands_sep(), nodecimal=nodecimal ) strip_thousands = re.compile(strip_thousands, flags=re.VERBOSE) function_chain.append(partial(strip_thousands.sub, "")) # Create a regular expression that will change the decimal point to # a period if not already a period. decimal = get_decimal_point() if alg & ns.FLOAT and decimal != ".": switch_decimal = r"(?<=[0-9]){decimal}|{decimal}(?=[0-9])" switch_decimal = switch_decimal.format(decimal=decimal) switch_decimal = re.compile(switch_decimal) function_chain.append(partial(switch_decimal.sub, ".")) # Return the chained functions. return chain_functions(function_chain)
[ "def", "input_string_transform_factory", "(", "alg", ")", ":", "# Shortcuts.", "lowfirst", "=", "alg", "&", "ns", ".", "LOWERCASEFIRST", "dumb", "=", "alg", "&", "NS_DUMB", "# Build the chain of functions to execute in order.", "function_chain", "=", "[", "]", "if", "(", "dumb", "and", "not", "lowfirst", ")", "or", "(", "lowfirst", "and", "not", "dumb", ")", ":", "function_chain", ".", "append", "(", "methodcaller", "(", "\"swapcase\"", ")", ")", "if", "alg", "&", "ns", ".", "IGNORECASE", ":", "if", "NEWPY", ":", "function_chain", ".", "append", "(", "methodcaller", "(", "\"casefold\"", ")", ")", "else", ":", "function_chain", ".", "append", "(", "methodcaller", "(", "\"lower\"", ")", ")", "if", "alg", "&", "ns", ".", "LOCALENUM", ":", "# Create a regular expression that will remove thousands separators.", "strip_thousands", "=", "r\"\"\"\n (?<=[0-9]{{1}}) # At least 1 number\n (?<![0-9]{{4}}) # No more than 3 numbers\n {nodecimal} # Cannot follow decimal\n {thou} # The thousands separator\n (?=[0-9]{{3}} # Three numbers must follow\n ([^0-9]|$) # But a non-number after that\n )\n \"\"\"", "nodecimal", "=", "r\"\"", "if", "alg", "&", "ns", ".", "FLOAT", ":", "# Make a regular expression component that will ensure no", "# separators are removed after a decimal point.", "d", "=", "get_decimal_point", "(", ")", "d", "=", "r\"\\.\"", "if", "d", "==", "r\".\"", "else", "d", "nodecimal", "+=", "r\"(?<!\"", "+", "d", "+", "r\"[0-9])\"", "nodecimal", "+=", "r\"(?<!\"", "+", "d", "+", "r\"[0-9]{2})\"", "nodecimal", "+=", "r\"(?<!\"", "+", "d", "+", "r\"[0-9]{3})\"", "strip_thousands", "=", "strip_thousands", ".", "format", "(", "thou", "=", "get_thousands_sep", "(", ")", ",", "nodecimal", "=", "nodecimal", ")", "strip_thousands", "=", "re", ".", "compile", "(", "strip_thousands", ",", "flags", "=", "re", ".", "VERBOSE", ")", "function_chain", ".", "append", "(", "partial", "(", "strip_thousands", ".", "sub", ",", "\"\"", ")", ")", "# Create a regular expression that will change the decimal point to", "# a period if not already a period.", "decimal", "=", "get_decimal_point", "(", ")", "if", "alg", "&", "ns", ".", "FLOAT", "and", "decimal", "!=", "\".\"", ":", "switch_decimal", "=", "r\"(?<=[0-9]){decimal}|{decimal}(?=[0-9])\"", "switch_decimal", "=", "switch_decimal", ".", "format", "(", "decimal", "=", "decimal", ")", "switch_decimal", "=", "re", ".", "compile", "(", "switch_decimal", ")", "function_chain", ".", "append", "(", "partial", "(", "switch_decimal", ".", "sub", ",", "\".\"", ")", ")", "# Return the chained functions.", "return", "chain_functions", "(", "function_chain", ")" ]
Create a function to transform a string. Parameters ---------- alg : ns enum Indicate how to format the *str*. Returns ------- func : callable A function to be used as the *input_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory
[ "Create", "a", "function", "to", "transform", "a", "string", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L473-L544
-1
251,473
SethMMorton/natsort
natsort/utils.py
string_component_transform_factory
def string_component_transform_factory(alg): """ Create a function to either transform a string or convert to a number. Parameters ---------- alg : ns enum Indicate how to format the *str*. Returns ------- func : callable A function to be used as the *component_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory """ # Shortcuts. use_locale = alg & ns.LOCALEALPHA dumb = alg & NS_DUMB group_letters = (alg & ns.GROUPLETTERS) or (use_locale and dumb) nan_val = float("+inf") if alg & ns.NANLAST else float("-inf") # Build the chain of functions to execute in order. func_chain = [] if group_letters: func_chain.append(groupletters) if use_locale: func_chain.append(get_strxfrm()) kwargs = {"key": chain_functions(func_chain)} if func_chain else {} # Return the correct chained functions. if alg & ns.FLOAT: # noinspection PyTypeChecker kwargs["nan"] = nan_val return partial(fast_float, **kwargs) else: return partial(fast_int, **kwargs)
python
def string_component_transform_factory(alg): """ Create a function to either transform a string or convert to a number. Parameters ---------- alg : ns enum Indicate how to format the *str*. Returns ------- func : callable A function to be used as the *component_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory """ # Shortcuts. use_locale = alg & ns.LOCALEALPHA dumb = alg & NS_DUMB group_letters = (alg & ns.GROUPLETTERS) or (use_locale and dumb) nan_val = float("+inf") if alg & ns.NANLAST else float("-inf") # Build the chain of functions to execute in order. func_chain = [] if group_letters: func_chain.append(groupletters) if use_locale: func_chain.append(get_strxfrm()) kwargs = {"key": chain_functions(func_chain)} if func_chain else {} # Return the correct chained functions. if alg & ns.FLOAT: # noinspection PyTypeChecker kwargs["nan"] = nan_val return partial(fast_float, **kwargs) else: return partial(fast_int, **kwargs)
[ "def", "string_component_transform_factory", "(", "alg", ")", ":", "# Shortcuts.", "use_locale", "=", "alg", "&", "ns", ".", "LOCALEALPHA", "dumb", "=", "alg", "&", "NS_DUMB", "group_letters", "=", "(", "alg", "&", "ns", ".", "GROUPLETTERS", ")", "or", "(", "use_locale", "and", "dumb", ")", "nan_val", "=", "float", "(", "\"+inf\"", ")", "if", "alg", "&", "ns", ".", "NANLAST", "else", "float", "(", "\"-inf\"", ")", "# Build the chain of functions to execute in order.", "func_chain", "=", "[", "]", "if", "group_letters", ":", "func_chain", ".", "append", "(", "groupletters", ")", "if", "use_locale", ":", "func_chain", ".", "append", "(", "get_strxfrm", "(", ")", ")", "kwargs", "=", "{", "\"key\"", ":", "chain_functions", "(", "func_chain", ")", "}", "if", "func_chain", "else", "{", "}", "# Return the correct chained functions.", "if", "alg", "&", "ns", ".", "FLOAT", ":", "# noinspection PyTypeChecker", "kwargs", "[", "\"nan\"", "]", "=", "nan_val", "return", "partial", "(", "fast_float", ",", "*", "*", "kwargs", ")", "else", ":", "return", "partial", "(", "fast_int", ",", "*", "*", "kwargs", ")" ]
Create a function to either transform a string or convert to a number. Parameters ---------- alg : ns enum Indicate how to format the *str*. Returns ------- func : callable A function to be used as the *component_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory
[ "Create", "a", "function", "to", "either", "transform", "a", "string", "or", "convert", "to", "a", "number", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L547-L587
-1
251,474
SethMMorton/natsort
natsort/utils.py
final_data_transform_factory
def final_data_transform_factory(alg, sep, pre_sep): """ Create a function to transform a tuple. Parameters ---------- alg : ns enum Indicate how to format the *str*. sep : str Separator that was passed to *parse_string_factory*. pre_sep : str String separator to insert at the at the front of the return tuple in the case that the first element is *sep*. Returns ------- func : callable A function to be used as the *final_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory """ if alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: swap = alg & NS_DUMB and alg & ns.LOWERCASEFIRST transform = methodcaller("swapcase") if swap else _no_op def func(split_val, val, _transform=transform, _sep=sep, _pre_sep=pre_sep): """ Return a tuple with the first character of the first element of the return value as the first element, and the return value as the second element. This will be used to perform gross sorting by the first letter. """ split_val = tuple(split_val) if not split_val: return (), () elif split_val[0] == _sep: return (_pre_sep,), split_val else: return (_transform(val[0]),), split_val return func else: return lambda split_val, val: tuple(split_val)
python
def final_data_transform_factory(alg, sep, pre_sep): """ Create a function to transform a tuple. Parameters ---------- alg : ns enum Indicate how to format the *str*. sep : str Separator that was passed to *parse_string_factory*. pre_sep : str String separator to insert at the at the front of the return tuple in the case that the first element is *sep*. Returns ------- func : callable A function to be used as the *final_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory """ if alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: swap = alg & NS_DUMB and alg & ns.LOWERCASEFIRST transform = methodcaller("swapcase") if swap else _no_op def func(split_val, val, _transform=transform, _sep=sep, _pre_sep=pre_sep): """ Return a tuple with the first character of the first element of the return value as the first element, and the return value as the second element. This will be used to perform gross sorting by the first letter. """ split_val = tuple(split_val) if not split_val: return (), () elif split_val[0] == _sep: return (_pre_sep,), split_val else: return (_transform(val[0]),), split_val return func else: return lambda split_val, val: tuple(split_val)
[ "def", "final_data_transform_factory", "(", "alg", ",", "sep", ",", "pre_sep", ")", ":", "if", "alg", "&", "ns", ".", "UNGROUPLETTERS", "and", "alg", "&", "ns", ".", "LOCALEALPHA", ":", "swap", "=", "alg", "&", "NS_DUMB", "and", "alg", "&", "ns", ".", "LOWERCASEFIRST", "transform", "=", "methodcaller", "(", "\"swapcase\"", ")", "if", "swap", "else", "_no_op", "def", "func", "(", "split_val", ",", "val", ",", "_transform", "=", "transform", ",", "_sep", "=", "sep", ",", "_pre_sep", "=", "pre_sep", ")", ":", "\"\"\"\n Return a tuple with the first character of the first element\n of the return value as the first element, and the return value\n as the second element. This will be used to perform gross sorting\n by the first letter.\n \"\"\"", "split_val", "=", "tuple", "(", "split_val", ")", "if", "not", "split_val", ":", "return", "(", ")", ",", "(", ")", "elif", "split_val", "[", "0", "]", "==", "_sep", ":", "return", "(", "_pre_sep", ",", ")", ",", "split_val", "else", ":", "return", "(", "_transform", "(", "val", "[", "0", "]", ")", ",", ")", ",", "split_val", "return", "func", "else", ":", "return", "lambda", "split_val", ",", "val", ":", "tuple", "(", "split_val", ")" ]
Create a function to transform a tuple. Parameters ---------- alg : ns enum Indicate how to format the *str*. sep : str Separator that was passed to *parse_string_factory*. pre_sep : str String separator to insert at the at the front of the return tuple in the case that the first element is *sep*. Returns ------- func : callable A function to be used as the *final_transform* argument to *parse_string_factory*. See Also -------- parse_string_factory
[ "Create", "a", "function", "to", "transform", "a", "tuple", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L590-L637
-1
251,475
SethMMorton/natsort
natsort/utils.py
groupletters
def groupletters(x, _low=lower_function): """ Double all characters, making doubled letters lowercase. Parameters ---------- x : str Returns ------- str Examples -------- >>> groupletters("Apple") {u}'aAppppllee' """ return "".join(ichain.from_iterable((_low(y), y) for y in x))
python
def groupletters(x, _low=lower_function): """ Double all characters, making doubled letters lowercase. Parameters ---------- x : str Returns ------- str Examples -------- >>> groupletters("Apple") {u}'aAppppllee' """ return "".join(ichain.from_iterable((_low(y), y) for y in x))
[ "def", "groupletters", "(", "x", ",", "_low", "=", "lower_function", ")", ":", "return", "\"\"", ".", "join", "(", "ichain", ".", "from_iterable", "(", "(", "_low", "(", "y", ")", ",", "y", ")", "for", "y", "in", "x", ")", ")" ]
Double all characters, making doubled letters lowercase. Parameters ---------- x : str Returns ------- str Examples -------- >>> groupletters("Apple") {u}'aAppppllee'
[ "Double", "all", "characters", "making", "doubled", "letters", "lowercase", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L645-L664
-1
251,476
SethMMorton/natsort
natsort/utils.py
chain_functions
def chain_functions(functions): """ Chain a list of single-argument functions together and return. The functions are applied in list order, and the output of the previous functions is passed to the next function. Parameters ---------- functions : list A list of single-argument functions to chain together. Returns ------- func : callable A single argument function. Examples -------- Chain several functions together! >>> funcs = [lambda x: x * 4, len, lambda x: x + 5] >>> func = chain_functions(funcs) >>> func('hey') 17 """ functions = list(functions) if not functions: return _no_op elif len(functions) == 1: return functions[0] else: # See https://stackoverflow.com/a/39123400/1399279 return partial(reduce, lambda res, f: f(res), functions)
python
def chain_functions(functions): """ Chain a list of single-argument functions together and return. The functions are applied in list order, and the output of the previous functions is passed to the next function. Parameters ---------- functions : list A list of single-argument functions to chain together. Returns ------- func : callable A single argument function. Examples -------- Chain several functions together! >>> funcs = [lambda x: x * 4, len, lambda x: x + 5] >>> func = chain_functions(funcs) >>> func('hey') 17 """ functions = list(functions) if not functions: return _no_op elif len(functions) == 1: return functions[0] else: # See https://stackoverflow.com/a/39123400/1399279 return partial(reduce, lambda res, f: f(res), functions)
[ "def", "chain_functions", "(", "functions", ")", ":", "functions", "=", "list", "(", "functions", ")", "if", "not", "functions", ":", "return", "_no_op", "elif", "len", "(", "functions", ")", "==", "1", ":", "return", "functions", "[", "0", "]", "else", ":", "# See https://stackoverflow.com/a/39123400/1399279", "return", "partial", "(", "reduce", ",", "lambda", "res", ",", "f", ":", "f", "(", "res", ")", ",", "functions", ")" ]
Chain a list of single-argument functions together and return. The functions are applied in list order, and the output of the previous functions is passed to the next function. Parameters ---------- functions : list A list of single-argument functions to chain together. Returns ------- func : callable A single argument function. Examples -------- Chain several functions together! >>> funcs = [lambda x: x * 4, len, lambda x: x + 5] >>> func = chain_functions(funcs) >>> func('hey') 17
[ "Chain", "a", "list", "of", "single", "-", "argument", "functions", "together", "and", "return", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L667-L701
-1
251,477
SethMMorton/natsort
natsort/utils.py
path_splitter
def path_splitter(s, _d_match=re.compile(r"\.\d").match): """ Split a string into its path components. Assumes a string is a path or is path-like. Parameters ---------- s : str | pathlib.Path Returns ------- split : tuple The path split by directory components and extensions. Examples -------- >>> tuple(path_splitter("this/thing.ext")) ({u}'this', {u}'thing', {u}'.ext') """ if has_pathlib and isinstance(s, PurePath): s = py23_str(s) path_parts = deque() p_appendleft = path_parts.appendleft # Continue splitting the path from the back until we have reached # '..' or '.', or until there is nothing left to split. path_location = s while path_location != os_curdir and path_location != os_pardir: parent_path = path_location path_location, child_path = path_split(parent_path) if path_location == parent_path: break p_appendleft(child_path) # This last append is the base path. # Only append if the string is non-empty. # Make sure the proper path separator for this OS is used # no matter what was actually given. if path_location: p_appendleft(py23_str(os_sep)) # Now, split off the file extensions using a similar method to above. # Continue splitting off file extensions until we reach a decimal number # or there are no more extensions. # We are not using built-in functionality of PathLib here because of # the recursive splitting up to a decimal. base = path_parts.pop() base_parts = deque() b_appendleft = base_parts.appendleft while True: front = base base, ext = path_splitext(front) if _d_match(ext) or not ext: # Reset base to before the split if the split is invalid. base = front break b_appendleft(ext) b_appendleft(base) # Return the split parent paths and then the split basename. return ichain(path_parts, base_parts)
python
def path_splitter(s, _d_match=re.compile(r"\.\d").match): """ Split a string into its path components. Assumes a string is a path or is path-like. Parameters ---------- s : str | pathlib.Path Returns ------- split : tuple The path split by directory components and extensions. Examples -------- >>> tuple(path_splitter("this/thing.ext")) ({u}'this', {u}'thing', {u}'.ext') """ if has_pathlib and isinstance(s, PurePath): s = py23_str(s) path_parts = deque() p_appendleft = path_parts.appendleft # Continue splitting the path from the back until we have reached # '..' or '.', or until there is nothing left to split. path_location = s while path_location != os_curdir and path_location != os_pardir: parent_path = path_location path_location, child_path = path_split(parent_path) if path_location == parent_path: break p_appendleft(child_path) # This last append is the base path. # Only append if the string is non-empty. # Make sure the proper path separator for this OS is used # no matter what was actually given. if path_location: p_appendleft(py23_str(os_sep)) # Now, split off the file extensions using a similar method to above. # Continue splitting off file extensions until we reach a decimal number # or there are no more extensions. # We are not using built-in functionality of PathLib here because of # the recursive splitting up to a decimal. base = path_parts.pop() base_parts = deque() b_appendleft = base_parts.appendleft while True: front = base base, ext = path_splitext(front) if _d_match(ext) or not ext: # Reset base to before the split if the split is invalid. base = front break b_appendleft(ext) b_appendleft(base) # Return the split parent paths and then the split basename. return ichain(path_parts, base_parts)
[ "def", "path_splitter", "(", "s", ",", "_d_match", "=", "re", ".", "compile", "(", "r\"\\.\\d\"", ")", ".", "match", ")", ":", "if", "has_pathlib", "and", "isinstance", "(", "s", ",", "PurePath", ")", ":", "s", "=", "py23_str", "(", "s", ")", "path_parts", "=", "deque", "(", ")", "p_appendleft", "=", "path_parts", ".", "appendleft", "# Continue splitting the path from the back until we have reached", "# '..' or '.', or until there is nothing left to split.", "path_location", "=", "s", "while", "path_location", "!=", "os_curdir", "and", "path_location", "!=", "os_pardir", ":", "parent_path", "=", "path_location", "path_location", ",", "child_path", "=", "path_split", "(", "parent_path", ")", "if", "path_location", "==", "parent_path", ":", "break", "p_appendleft", "(", "child_path", ")", "# This last append is the base path.", "# Only append if the string is non-empty.", "# Make sure the proper path separator for this OS is used", "# no matter what was actually given.", "if", "path_location", ":", "p_appendleft", "(", "py23_str", "(", "os_sep", ")", ")", "# Now, split off the file extensions using a similar method to above.", "# Continue splitting off file extensions until we reach a decimal number", "# or there are no more extensions.", "# We are not using built-in functionality of PathLib here because of", "# the recursive splitting up to a decimal.", "base", "=", "path_parts", ".", "pop", "(", ")", "base_parts", "=", "deque", "(", ")", "b_appendleft", "=", "base_parts", ".", "appendleft", "while", "True", ":", "front", "=", "base", "base", ",", "ext", "=", "path_splitext", "(", "front", ")", "if", "_d_match", "(", "ext", ")", "or", "not", "ext", ":", "# Reset base to before the split if the split is invalid.", "base", "=", "front", "break", "b_appendleft", "(", "ext", ")", "b_appendleft", "(", "base", ")", "# Return the split parent paths and then the split basename.", "return", "ichain", "(", "path_parts", ",", "base_parts", ")" ]
Split a string into its path components. Assumes a string is a path or is path-like. Parameters ---------- s : str | pathlib.Path Returns ------- split : tuple The path split by directory components and extensions. Examples -------- >>> tuple(path_splitter("this/thing.ext")) ({u}'this', {u}'thing', {u}'.ext')
[ "Split", "a", "string", "into", "its", "path", "components", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L729-L791
-1
251,478
SethMMorton/natsort
natsort/utils.py
NumericalRegularExpressions._construct_regex
def _construct_regex(cls, fmt): """Given a format string, construct the regex with class attributes.""" return re.compile(fmt.format(**vars(cls)), flags=re.U)
python
def _construct_regex(cls, fmt): """Given a format string, construct the regex with class attributes.""" return re.compile(fmt.format(**vars(cls)), flags=re.U)
[ "def", "_construct_regex", "(", "cls", ",", "fmt", ")", ":", "return", "re", ".", "compile", "(", "fmt", ".", "format", "(", "*", "*", "vars", "(", "cls", ")", ")", ",", "flags", "=", "re", ".", "U", ")" ]
Given a format string, construct the regex with class attributes.
[ "Given", "a", "format", "string", "construct", "the", "regex", "with", "class", "attributes", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/utils.py#L92-L94
-1
251,479
SethMMorton/natsort
natsort/natsort.py
natsort_keygen
def natsort_keygen(key=None, alg=ns.DEFAULT): """ Generate a key to sort strings and numbers naturally. This key is designed for use as the `key` argument to functions such as the `sorted` builtin. The user may customize the generated function with the arguments to `natsort_keygen`, including an optional `key` function. Parameters ---------- key : callable, optional A key used to manipulate the input value before parsing for numbers. It is **not** applied recursively. It should accept a single argument and return a single value. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out : function A function that parses input for natural sorting that is suitable for passing as the `key` argument to functions such as `sorted`. See Also -------- natsorted natsort_key Examples -------- `natsort_keygen` is a convenient way to create a custom key to sort lists in-place (for example).:: >>> a = ['num5.10', 'num-3', 'num5.3', 'num2'] >>> a.sort(key=natsort_keygen(alg=ns.REAL)) >>> a [{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3'] """ try: ns.DEFAULT | alg except TypeError: msg = "natsort_keygen: 'alg' argument must be from the enum 'ns'" raise ValueError(msg + ", got {}".format(py23_str(alg))) # Add the NS_DUMB option if the locale library is broken. if alg & ns.LOCALEALPHA and natsort.compat.locale.dumb_sort(): alg |= NS_DUMB # Set some variables that will be passed to the factory functions if alg & ns.NUMAFTER: if alg & ns.LOCALEALPHA: sep = natsort.compat.locale.null_string_locale_max else: sep = natsort.compat.locale.null_string_max pre_sep = natsort.compat.locale.null_string_max else: if alg & ns.LOCALEALPHA: sep = natsort.compat.locale.null_string_locale else: sep = natsort.compat.locale.null_string pre_sep = natsort.compat.locale.null_string regex = utils.regex_chooser(alg) # Create the functions that will be used to split strings. input_transform = utils.input_string_transform_factory(alg) component_transform = utils.string_component_transform_factory(alg) final_transform = utils.final_data_transform_factory(alg, sep, pre_sep) # Create the high-level parsing functions for strings, bytes, and numbers. string_func = utils.parse_string_factory( alg, sep, regex.split, input_transform, component_transform, final_transform ) if alg & ns.PATH: string_func = utils.parse_path_factory(string_func) bytes_func = utils.parse_bytes_factory(alg) num_func = utils.parse_number_factory(alg, sep, pre_sep) # Return the natsort key with the parsing path pre-chosen. return partial( utils.natsort_key, key=key, string_func=string_func, bytes_func=bytes_func, num_func=num_func, )
python
def natsort_keygen(key=None, alg=ns.DEFAULT): """ Generate a key to sort strings and numbers naturally. This key is designed for use as the `key` argument to functions such as the `sorted` builtin. The user may customize the generated function with the arguments to `natsort_keygen`, including an optional `key` function. Parameters ---------- key : callable, optional A key used to manipulate the input value before parsing for numbers. It is **not** applied recursively. It should accept a single argument and return a single value. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out : function A function that parses input for natural sorting that is suitable for passing as the `key` argument to functions such as `sorted`. See Also -------- natsorted natsort_key Examples -------- `natsort_keygen` is a convenient way to create a custom key to sort lists in-place (for example).:: >>> a = ['num5.10', 'num-3', 'num5.3', 'num2'] >>> a.sort(key=natsort_keygen(alg=ns.REAL)) >>> a [{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3'] """ try: ns.DEFAULT | alg except TypeError: msg = "natsort_keygen: 'alg' argument must be from the enum 'ns'" raise ValueError(msg + ", got {}".format(py23_str(alg))) # Add the NS_DUMB option if the locale library is broken. if alg & ns.LOCALEALPHA and natsort.compat.locale.dumb_sort(): alg |= NS_DUMB # Set some variables that will be passed to the factory functions if alg & ns.NUMAFTER: if alg & ns.LOCALEALPHA: sep = natsort.compat.locale.null_string_locale_max else: sep = natsort.compat.locale.null_string_max pre_sep = natsort.compat.locale.null_string_max else: if alg & ns.LOCALEALPHA: sep = natsort.compat.locale.null_string_locale else: sep = natsort.compat.locale.null_string pre_sep = natsort.compat.locale.null_string regex = utils.regex_chooser(alg) # Create the functions that will be used to split strings. input_transform = utils.input_string_transform_factory(alg) component_transform = utils.string_component_transform_factory(alg) final_transform = utils.final_data_transform_factory(alg, sep, pre_sep) # Create the high-level parsing functions for strings, bytes, and numbers. string_func = utils.parse_string_factory( alg, sep, regex.split, input_transform, component_transform, final_transform ) if alg & ns.PATH: string_func = utils.parse_path_factory(string_func) bytes_func = utils.parse_bytes_factory(alg) num_func = utils.parse_number_factory(alg, sep, pre_sep) # Return the natsort key with the parsing path pre-chosen. return partial( utils.natsort_key, key=key, string_func=string_func, bytes_func=bytes_func, num_func=num_func, )
[ "def", "natsort_keygen", "(", "key", "=", "None", ",", "alg", "=", "ns", ".", "DEFAULT", ")", ":", "try", ":", "ns", ".", "DEFAULT", "|", "alg", "except", "TypeError", ":", "msg", "=", "\"natsort_keygen: 'alg' argument must be from the enum 'ns'\"", "raise", "ValueError", "(", "msg", "+", "\", got {}\"", ".", "format", "(", "py23_str", "(", "alg", ")", ")", ")", "# Add the NS_DUMB option if the locale library is broken.", "if", "alg", "&", "ns", ".", "LOCALEALPHA", "and", "natsort", ".", "compat", ".", "locale", ".", "dumb_sort", "(", ")", ":", "alg", "|=", "NS_DUMB", "# Set some variables that will be passed to the factory functions", "if", "alg", "&", "ns", ".", "NUMAFTER", ":", "if", "alg", "&", "ns", ".", "LOCALEALPHA", ":", "sep", "=", "natsort", ".", "compat", ".", "locale", ".", "null_string_locale_max", "else", ":", "sep", "=", "natsort", ".", "compat", ".", "locale", ".", "null_string_max", "pre_sep", "=", "natsort", ".", "compat", ".", "locale", ".", "null_string_max", "else", ":", "if", "alg", "&", "ns", ".", "LOCALEALPHA", ":", "sep", "=", "natsort", ".", "compat", ".", "locale", ".", "null_string_locale", "else", ":", "sep", "=", "natsort", ".", "compat", ".", "locale", ".", "null_string", "pre_sep", "=", "natsort", ".", "compat", ".", "locale", ".", "null_string", "regex", "=", "utils", ".", "regex_chooser", "(", "alg", ")", "# Create the functions that will be used to split strings.", "input_transform", "=", "utils", ".", "input_string_transform_factory", "(", "alg", ")", "component_transform", "=", "utils", ".", "string_component_transform_factory", "(", "alg", ")", "final_transform", "=", "utils", ".", "final_data_transform_factory", "(", "alg", ",", "sep", ",", "pre_sep", ")", "# Create the high-level parsing functions for strings, bytes, and numbers.", "string_func", "=", "utils", ".", "parse_string_factory", "(", "alg", ",", "sep", ",", "regex", ".", "split", ",", "input_transform", ",", "component_transform", ",", "final_transform", ")", "if", "alg", "&", "ns", ".", "PATH", ":", "string_func", "=", "utils", ".", "parse_path_factory", "(", "string_func", ")", "bytes_func", "=", "utils", ".", "parse_bytes_factory", "(", "alg", ")", "num_func", "=", "utils", ".", "parse_number_factory", "(", "alg", ",", "sep", ",", "pre_sep", ")", "# Return the natsort key with the parsing path pre-chosen.", "return", "partial", "(", "utils", ".", "natsort_key", ",", "key", "=", "key", ",", "string_func", "=", "string_func", ",", "bytes_func", "=", "bytes_func", ",", "num_func", "=", "num_func", ",", ")" ]
Generate a key to sort strings and numbers naturally. This key is designed for use as the `key` argument to functions such as the `sorted` builtin. The user may customize the generated function with the arguments to `natsort_keygen`, including an optional `key` function. Parameters ---------- key : callable, optional A key used to manipulate the input value before parsing for numbers. It is **not** applied recursively. It should accept a single argument and return a single value. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out : function A function that parses input for natural sorting that is suitable for passing as the `key` argument to functions such as `sorted`. See Also -------- natsorted natsort_key Examples -------- `natsort_keygen` is a convenient way to create a custom key to sort lists in-place (for example).:: >>> a = ['num5.10', 'num-3', 'num5.3', 'num2'] >>> a.sort(key=natsort_keygen(alg=ns.REAL)) >>> a [{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
[ "Generate", "a", "key", "to", "sort", "strings", "and", "numbers", "naturally", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L111-L203
-1
251,480
SethMMorton/natsort
natsort/natsort.py
natsorted
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Sorts an iterable naturally. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the iterable. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out: list The sorted input. See Also -------- natsort_keygen : Generates the key that makes natural sorting possible. realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``. humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``. index_natsorted : Returns the sorted indexes from `natsorted`. Examples -------- Use `natsorted` just like the builtin `sorted`:: >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num3', {u}'num5'] """ key = natsort_keygen(key, alg) return sorted(seq, reverse=reverse, key=key)
python
def natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Sorts an iterable naturally. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the iterable. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out: list The sorted input. See Also -------- natsort_keygen : Generates the key that makes natural sorting possible. realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``. humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``. index_natsorted : Returns the sorted indexes from `natsorted`. Examples -------- Use `natsorted` just like the builtin `sorted`:: >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num3', {u}'num5'] """ key = natsort_keygen(key, alg) return sorted(seq, reverse=reverse, key=key)
[ "def", "natsorted", "(", "seq", ",", "key", "=", "None", ",", "reverse", "=", "False", ",", "alg", "=", "ns", ".", "DEFAULT", ")", ":", "key", "=", "natsort_keygen", "(", "key", ",", "alg", ")", "return", "sorted", "(", "seq", ",", "reverse", "=", "reverse", ",", "key", "=", "key", ")" ]
Sorts an iterable naturally. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the iterable. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out: list The sorted input. See Also -------- natsort_keygen : Generates the key that makes natural sorting possible. realsorted : A wrapper for ``natsorted(seq, alg=ns.REAL)``. humansorted : A wrapper for ``natsorted(seq, alg=ns.LOCALE)``. index_natsorted : Returns the sorted indexes from `natsorted`. Examples -------- Use `natsorted` just like the builtin `sorted`:: >>> a = ['num3', 'num5', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num3', {u}'num5']
[ "Sorts", "an", "iterable", "naturally", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L222-L267
-1
251,481
SethMMorton/natsort
natsort/natsort.py
humansorted
def humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Convenience function to properly sort non-numeric characters. This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.LOCALE`. Returns ------- out : list The sorted input. See Also -------- index_humansorted : Returns the sorted indexes from `humansorted`. Notes ----- Please read :ref:`locale_issues` before using `humansorted`. Examples -------- Use `humansorted` just like the builtin `sorted`:: >>> a = ['Apple', 'Banana', 'apple', 'banana'] >>> natsorted(a) [{u}'Apple', {u}'Banana', {u}'apple', {u}'banana'] >>> humansorted(a) [{u}'apple', {u}'Apple', {u}'banana', {u}'Banana'] """ return natsorted(seq, key, reverse, alg | ns.LOCALE)
python
def humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Convenience function to properly sort non-numeric characters. This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.LOCALE`. Returns ------- out : list The sorted input. See Also -------- index_humansorted : Returns the sorted indexes from `humansorted`. Notes ----- Please read :ref:`locale_issues` before using `humansorted`. Examples -------- Use `humansorted` just like the builtin `sorted`:: >>> a = ['Apple', 'Banana', 'apple', 'banana'] >>> natsorted(a) [{u}'Apple', {u}'Banana', {u}'apple', {u}'banana'] >>> humansorted(a) [{u}'apple', {u}'Apple', {u}'banana', {u}'Banana'] """ return natsorted(seq, key, reverse, alg | ns.LOCALE)
[ "def", "humansorted", "(", "seq", ",", "key", "=", "None", ",", "reverse", "=", "False", ",", "alg", "=", "ns", ".", "DEFAULT", ")", ":", "return", "natsorted", "(", "seq", ",", "key", ",", "reverse", ",", "alg", "|", "ns", ".", "LOCALE", ")" ]
Convenience function to properly sort non-numeric characters. This is a wrapper around ``natsorted(seq, alg=ns.LOCALE)``. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.LOCALE`. Returns ------- out : list The sorted input. See Also -------- index_humansorted : Returns the sorted indexes from `humansorted`. Notes ----- Please read :ref:`locale_issues` before using `humansorted`. Examples -------- Use `humansorted` just like the builtin `sorted`:: >>> a = ['Apple', 'Banana', 'apple', 'banana'] >>> natsorted(a) [{u}'Apple', {u}'Banana', {u}'apple', {u}'banana'] >>> humansorted(a) [{u}'apple', {u}'Apple', {u}'banana', {u}'Banana']
[ "Convenience", "function", "to", "properly", "sort", "non", "-", "numeric", "characters", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L271-L320
-1
251,482
SethMMorton/natsort
natsort/natsort.py
realsorted
def realsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Convenience function to properly sort signed floats. A signed float in a string could be "a-5.7". This is a wrapper around ``natsorted(seq, alg=ns.REAL)``. The behavior of :func:`realsorted` for `natsort` version >= 4.0.0 was the default behavior of :func:`natsorted` for `natsort` version < 4.0.0. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.REAL`. Returns ------- out : list The sorted input. See Also -------- index_realsorted : Returns the sorted indexes from `realsorted`. Examples -------- Use `realsorted` just like the builtin `sorted`:: >>> a = ['num5.10', 'num-3', 'num5.3', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num5.3', {u}'num5.10', {u}'num-3'] >>> realsorted(a) [{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3'] """ return natsorted(seq, key, reverse, alg | ns.REAL)
python
def realsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Convenience function to properly sort signed floats. A signed float in a string could be "a-5.7". This is a wrapper around ``natsorted(seq, alg=ns.REAL)``. The behavior of :func:`realsorted` for `natsort` version >= 4.0.0 was the default behavior of :func:`natsorted` for `natsort` version < 4.0.0. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.REAL`. Returns ------- out : list The sorted input. See Also -------- index_realsorted : Returns the sorted indexes from `realsorted`. Examples -------- Use `realsorted` just like the builtin `sorted`:: >>> a = ['num5.10', 'num-3', 'num5.3', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num5.3', {u}'num5.10', {u}'num-3'] >>> realsorted(a) [{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3'] """ return natsorted(seq, key, reverse, alg | ns.REAL)
[ "def", "realsorted", "(", "seq", ",", "key", "=", "None", ",", "reverse", "=", "False", ",", "alg", "=", "ns", ".", "DEFAULT", ")", ":", "return", "natsorted", "(", "seq", ",", "key", ",", "reverse", ",", "alg", "|", "ns", ".", "REAL", ")" ]
Convenience function to properly sort signed floats. A signed float in a string could be "a-5.7". This is a wrapper around ``natsorted(seq, alg=ns.REAL)``. The behavior of :func:`realsorted` for `natsort` version >= 4.0.0 was the default behavior of :func:`natsorted` for `natsort` version < 4.0.0. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.REAL`. Returns ------- out : list The sorted input. See Also -------- index_realsorted : Returns the sorted indexes from `realsorted`. Examples -------- Use `realsorted` just like the builtin `sorted`:: >>> a = ['num5.10', 'num-3', 'num5.3', 'num2'] >>> natsorted(a) [{u}'num2', {u}'num5.3', {u}'num5.10', {u}'num-3'] >>> realsorted(a) [{u}'num-3', {u}'num2', {u}'num5.10', {u}'num5.3']
[ "Convenience", "function", "to", "properly", "sort", "signed", "floats", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L324-L374
-1
251,483
SethMMorton/natsort
natsort/natsort.py
index_natsorted
def index_natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Determine the list of the indexes used to sort the input sequence. Sorts a sequence naturally, but returns a list of sorted the indexes and not the sorted list itself. This list of indexes can be used to sort multiple lists by the sorted order of the given sequence. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out : tuple The ordered indexes of the input. See Also -------- natsorted order_by_index Examples -------- Use index_natsorted if you want to sort multiple lists by the sorted order of one list:: >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> order_by_index(a, index) [{u}'num2', {u}'num3', {u}'num5'] >>> order_by_index(b, index) [{u}'baz', {u}'foo', {u}'bar'] """ if key is None: newkey = itemgetter(1) else: def newkey(x): return key(itemgetter(1)(x)) # Pair the index and sequence together, then sort by element index_seq_pair = [[x, y] for x, y in enumerate(seq)] index_seq_pair.sort(reverse=reverse, key=natsort_keygen(newkey, alg)) return [x for x, _ in index_seq_pair]
python
def index_natsorted(seq, key=None, reverse=False, alg=ns.DEFAULT): """ Determine the list of the indexes used to sort the input sequence. Sorts a sequence naturally, but returns a list of sorted the indexes and not the sorted list itself. This list of indexes can be used to sort multiple lists by the sorted order of the given sequence. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out : tuple The ordered indexes of the input. See Also -------- natsorted order_by_index Examples -------- Use index_natsorted if you want to sort multiple lists by the sorted order of one list:: >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> order_by_index(a, index) [{u}'num2', {u}'num3', {u}'num5'] >>> order_by_index(b, index) [{u}'baz', {u}'foo', {u}'bar'] """ if key is None: newkey = itemgetter(1) else: def newkey(x): return key(itemgetter(1)(x)) # Pair the index and sequence together, then sort by element index_seq_pair = [[x, y] for x, y in enumerate(seq)] index_seq_pair.sort(reverse=reverse, key=natsort_keygen(newkey, alg)) return [x for x, _ in index_seq_pair]
[ "def", "index_natsorted", "(", "seq", ",", "key", "=", "None", ",", "reverse", "=", "False", ",", "alg", "=", "ns", ".", "DEFAULT", ")", ":", "if", "key", "is", "None", ":", "newkey", "=", "itemgetter", "(", "1", ")", "else", ":", "def", "newkey", "(", "x", ")", ":", "return", "key", "(", "itemgetter", "(", "1", ")", "(", "x", ")", ")", "# Pair the index and sequence together, then sort by element", "index_seq_pair", "=", "[", "[", "x", ",", "y", "]", "for", "x", ",", "y", "in", "enumerate", "(", "seq", ")", "]", "index_seq_pair", ".", "sort", "(", "reverse", "=", "reverse", ",", "key", "=", "natsort_keygen", "(", "newkey", ",", "alg", ")", ")", "return", "[", "x", "for", "x", ",", "_", "in", "index_seq_pair", "]" ]
Determine the list of the indexes used to sort the input sequence. Sorts a sequence naturally, but returns a list of sorted the indexes and not the sorted list itself. This list of indexes can be used to sort multiple lists by the sorted order of the given sequence. Parameters ---------- seq : iterable The input to sort. key : callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.INT`. Returns ------- out : tuple The ordered indexes of the input. See Also -------- natsorted order_by_index Examples -------- Use index_natsorted if you want to sort multiple lists by the sorted order of one list:: >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> order_by_index(a, index) [{u}'num2', {u}'num3', {u}'num5'] >>> order_by_index(b, index) [{u}'baz', {u}'foo', {u}'bar']
[ "Determine", "the", "list", "of", "the", "indexes", "used", "to", "sort", "the", "input", "sequence", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L378-L444
-1
251,484
SethMMorton/natsort
natsort/natsort.py
order_by_index
def order_by_index(seq, index, iter=False): """ Order a given sequence by an index sequence. The output of `index_natsorted` is a sequence of integers (index) that correspond to how its input sequence **would** be sorted. The idea is that this index can be used to reorder multiple sequences by the sorted order of the first sequence. This function is a convenient wrapper to apply this ordering to a sequence. Parameters ---------- seq : sequence The sequence to order. index : iterable The iterable that indicates how to order `seq`. It should be the same length as `seq` and consist of integers only. iter : {{True, False}}, optional If `True`, the ordered sequence is returned as a iterator; otherwise it is returned as a list. The default is `False`. Returns ------- out : {{list, iterator}} The sequence ordered by `index`, as a `list` or as an iterator (depending on the value of `iter`). See Also -------- index_natsorted index_humansorted index_realsorted Examples -------- `order_by_index` is a convenience function that helps you apply the result of `index_natsorted`:: >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> order_by_index(a, index) [{u}'num2', {u}'num3', {u}'num5'] >>> order_by_index(b, index) [{u}'baz', {u}'foo', {u}'bar'] """ return (seq[i] for i in index) if iter else [seq[i] for i in index]
python
def order_by_index(seq, index, iter=False): """ Order a given sequence by an index sequence. The output of `index_natsorted` is a sequence of integers (index) that correspond to how its input sequence **would** be sorted. The idea is that this index can be used to reorder multiple sequences by the sorted order of the first sequence. This function is a convenient wrapper to apply this ordering to a sequence. Parameters ---------- seq : sequence The sequence to order. index : iterable The iterable that indicates how to order `seq`. It should be the same length as `seq` and consist of integers only. iter : {{True, False}}, optional If `True`, the ordered sequence is returned as a iterator; otherwise it is returned as a list. The default is `False`. Returns ------- out : {{list, iterator}} The sequence ordered by `index`, as a `list` or as an iterator (depending on the value of `iter`). See Also -------- index_natsorted index_humansorted index_realsorted Examples -------- `order_by_index` is a convenience function that helps you apply the result of `index_natsorted`:: >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> order_by_index(a, index) [{u}'num2', {u}'num3', {u}'num5'] >>> order_by_index(b, index) [{u}'baz', {u}'foo', {u}'bar'] """ return (seq[i] for i in index) if iter else [seq[i] for i in index]
[ "def", "order_by_index", "(", "seq", ",", "index", ",", "iter", "=", "False", ")", ":", "return", "(", "seq", "[", "i", "]", "for", "i", "in", "index", ")", "if", "iter", "else", "[", "seq", "[", "i", "]", "for", "i", "in", "index", "]" ]
Order a given sequence by an index sequence. The output of `index_natsorted` is a sequence of integers (index) that correspond to how its input sequence **would** be sorted. The idea is that this index can be used to reorder multiple sequences by the sorted order of the first sequence. This function is a convenient wrapper to apply this ordering to a sequence. Parameters ---------- seq : sequence The sequence to order. index : iterable The iterable that indicates how to order `seq`. It should be the same length as `seq` and consist of integers only. iter : {{True, False}}, optional If `True`, the ordered sequence is returned as a iterator; otherwise it is returned as a list. The default is `False`. Returns ------- out : {{list, iterator}} The sequence ordered by `index`, as a `list` or as an iterator (depending on the value of `iter`). See Also -------- index_natsorted index_humansorted index_realsorted Examples -------- `order_by_index` is a convenience function that helps you apply the result of `index_natsorted`:: >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> order_by_index(a, index) [{u}'num2', {u}'num3', {u}'num5'] >>> order_by_index(b, index) [{u}'baz', {u}'foo', {u}'bar']
[ "Order", "a", "given", "sequence", "by", "an", "index", "sequence", "." ]
ea0d37ef790b42c424a096e079edd9ea0d5717e3
https://github.com/SethMMorton/natsort/blob/ea0d37ef790b42c424a096e079edd9ea0d5717e3/natsort/natsort.py#L545-L601
-1
251,485
vpelletier/pprofile
zpprofile.py
ZopeMixIn._iterOutFiles
def _iterOutFiles(self): """ Yields path, data, mimetype for each file involved on or produced by profiling. """ out = StringIO() self.callgrind(out, relative_path=True) yield ( 'cachegrind.out.pprofile', out.getvalue(), 'application/x-kcachegrind', ) for name, lines in self.iterSource(): lines = ''.join(lines) if lines: if isinstance(lines, unicode): lines = lines.encode('utf-8') yield ( os.path.normpath( os.path.splitdrive(name)[1] ).lstrip(_ALLSEP), lines, 'text/x-python', ) sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len( str(len(self.sql_dict)), ) for index, (query, time_list) in enumerate( sorted( self.sql_dict.iteritems(), key=lambda x: (sum(x[1]), len(x[1])), reverse=True, ), ): yield ( sql_name_template % ( index, len(time_list), sum(time_list), ), b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query, 'application/sql', ) if self.zodb_dict: yield ( 'ZODB_setstate.txt', '\n\n'.join( ( '%s (%fs)\n' % ( db_name, sum(sum(x) for x in oid_dict.itervalues()), ) ) + '\n'.join( '%s (%i): %s' % ( oid.encode('hex'), len(time_list), ', '.join('%fs' % x for x in time_list), ) for oid, time_list in oid_dict.iteritems() ) for db_name, oid_dict in self.zodb_dict.iteritems() ), 'text/plain', ) if self.traverse_dict: yield ( 'unrestrictedTraverse_pathlist.txt', tabulate( ('self', 'path', 'hit', 'total duration'), sorted( ( (context, path, len(duration_list), sum(duration_list)) for (context, path), duration_list in self.traverse_dict.iteritems() ), key=lambda x: x[3], reverse=True, ), ), 'text/plain', )
python
def _iterOutFiles(self): """ Yields path, data, mimetype for each file involved on or produced by profiling. """ out = StringIO() self.callgrind(out, relative_path=True) yield ( 'cachegrind.out.pprofile', out.getvalue(), 'application/x-kcachegrind', ) for name, lines in self.iterSource(): lines = ''.join(lines) if lines: if isinstance(lines, unicode): lines = lines.encode('utf-8') yield ( os.path.normpath( os.path.splitdrive(name)[1] ).lstrip(_ALLSEP), lines, 'text/x-python', ) sql_name_template = 'query_%%0%ii-%%i_hits_%%6fs.sql' % len( str(len(self.sql_dict)), ) for index, (query, time_list) in enumerate( sorted( self.sql_dict.iteritems(), key=lambda x: (sum(x[1]), len(x[1])), reverse=True, ), ): yield ( sql_name_template % ( index, len(time_list), sum(time_list), ), b'\n'.join(b'-- %10.6fs' % x for x in time_list) + b'\n' + query, 'application/sql', ) if self.zodb_dict: yield ( 'ZODB_setstate.txt', '\n\n'.join( ( '%s (%fs)\n' % ( db_name, sum(sum(x) for x in oid_dict.itervalues()), ) ) + '\n'.join( '%s (%i): %s' % ( oid.encode('hex'), len(time_list), ', '.join('%fs' % x for x in time_list), ) for oid, time_list in oid_dict.iteritems() ) for db_name, oid_dict in self.zodb_dict.iteritems() ), 'text/plain', ) if self.traverse_dict: yield ( 'unrestrictedTraverse_pathlist.txt', tabulate( ('self', 'path', 'hit', 'total duration'), sorted( ( (context, path, len(duration_list), sum(duration_list)) for (context, path), duration_list in self.traverse_dict.iteritems() ), key=lambda x: x[3], reverse=True, ), ), 'text/plain', )
[ "def", "_iterOutFiles", "(", "self", ")", ":", "out", "=", "StringIO", "(", ")", "self", ".", "callgrind", "(", "out", ",", "relative_path", "=", "True", ")", "yield", "(", "'cachegrind.out.pprofile'", ",", "out", ".", "getvalue", "(", ")", ",", "'application/x-kcachegrind'", ",", ")", "for", "name", ",", "lines", "in", "self", ".", "iterSource", "(", ")", ":", "lines", "=", "''", ".", "join", "(", "lines", ")", "if", "lines", ":", "if", "isinstance", "(", "lines", ",", "unicode", ")", ":", "lines", "=", "lines", ".", "encode", "(", "'utf-8'", ")", "yield", "(", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "splitdrive", "(", "name", ")", "[", "1", "]", ")", ".", "lstrip", "(", "_ALLSEP", ")", ",", "lines", ",", "'text/x-python'", ",", ")", "sql_name_template", "=", "'query_%%0%ii-%%i_hits_%%6fs.sql'", "%", "len", "(", "str", "(", "len", "(", "self", ".", "sql_dict", ")", ")", ",", ")", "for", "index", ",", "(", "query", ",", "time_list", ")", "in", "enumerate", "(", "sorted", "(", "self", ".", "sql_dict", ".", "iteritems", "(", ")", ",", "key", "=", "lambda", "x", ":", "(", "sum", "(", "x", "[", "1", "]", ")", ",", "len", "(", "x", "[", "1", "]", ")", ")", ",", "reverse", "=", "True", ",", ")", ",", ")", ":", "yield", "(", "sql_name_template", "%", "(", "index", ",", "len", "(", "time_list", ")", ",", "sum", "(", "time_list", ")", ",", ")", ",", "b'\\n'", ".", "join", "(", "b'-- %10.6fs'", "%", "x", "for", "x", "in", "time_list", ")", "+", "b'\\n'", "+", "query", ",", "'application/sql'", ",", ")", "if", "self", ".", "zodb_dict", ":", "yield", "(", "'ZODB_setstate.txt'", ",", "'\\n\\n'", ".", "join", "(", "(", "'%s (%fs)\\n'", "%", "(", "db_name", ",", "sum", "(", "sum", "(", "x", ")", "for", "x", "in", "oid_dict", ".", "itervalues", "(", ")", ")", ",", ")", ")", "+", "'\\n'", ".", "join", "(", "'%s (%i): %s'", "%", "(", "oid", ".", "encode", "(", "'hex'", ")", ",", "len", "(", "time_list", ")", ",", "', '", ".", "join", "(", "'%fs'", "%", "x", "for", "x", "in", "time_list", ")", ",", ")", "for", "oid", ",", "time_list", "in", "oid_dict", ".", "iteritems", "(", ")", ")", "for", "db_name", ",", "oid_dict", "in", "self", ".", "zodb_dict", ".", "iteritems", "(", ")", ")", ",", "'text/plain'", ",", ")", "if", "self", ".", "traverse_dict", ":", "yield", "(", "'unrestrictedTraverse_pathlist.txt'", ",", "tabulate", "(", "(", "'self'", ",", "'path'", ",", "'hit'", ",", "'total duration'", ")", ",", "sorted", "(", "(", "(", "context", ",", "path", ",", "len", "(", "duration_list", ")", ",", "sum", "(", "duration_list", ")", ")", "for", "(", "context", ",", "path", ")", ",", "duration_list", "in", "self", ".", "traverse_dict", ".", "iteritems", "(", ")", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "3", "]", ",", "reverse", "=", "True", ",", ")", ",", ")", ",", "'text/plain'", ",", ")" ]
Yields path, data, mimetype for each file involved on or produced by profiling.
[ "Yields", "path", "data", "mimetype", "for", "each", "file", "involved", "on", "or", "produced", "by", "profiling", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/zpprofile.py#L407-L486
-1
251,486
vpelletier/pprofile
pprofile.py
run
def run(cmd, filename=None, threads=True, verbose=False): """Similar to profile.run .""" _run(threads, verbose, 'run', filename, cmd)
python
def run(cmd, filename=None, threads=True, verbose=False): """Similar to profile.run .""" _run(threads, verbose, 'run', filename, cmd)
[ "def", "run", "(", "cmd", ",", "filename", "=", "None", ",", "threads", "=", "True", ",", "verbose", "=", "False", ")", ":", "_run", "(", "threads", ",", "verbose", ",", "'run'", ",", "filename", ",", "cmd", ")" ]
Similar to profile.run .
[ "Similar", "to", "profile", ".", "run", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1182-L1184
-1
251,487
vpelletier/pprofile
pprofile.py
runctx
def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False): """Similar to profile.runctx .""" _run(threads, verbose, 'runctx', filename, cmd, globals, locals)
python
def runctx(cmd, globals, locals, filename=None, threads=True, verbose=False): """Similar to profile.runctx .""" _run(threads, verbose, 'runctx', filename, cmd, globals, locals)
[ "def", "runctx", "(", "cmd", ",", "globals", ",", "locals", ",", "filename", "=", "None", ",", "threads", "=", "True", ",", "verbose", "=", "False", ")", ":", "_run", "(", "threads", ",", "verbose", ",", "'runctx'", ",", "filename", ",", "cmd", ",", "globals", ",", "locals", ")" ]
Similar to profile.runctx .
[ "Similar", "to", "profile", ".", "runctx", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1186-L1188
-1
251,488
vpelletier/pprofile
pprofile.py
runfile
def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1, filename=None, threads=True, verbose=False): """ Run code from given file descriptor with profiling enabled. Closes fd before executing contained code. """ _run(threads, verbose, 'runfile', filename, fd, argv, fd_name, compile_flags, dont_inherit)
python
def runfile(fd, argv, fd_name='<unknown>', compile_flags=0, dont_inherit=1, filename=None, threads=True, verbose=False): """ Run code from given file descriptor with profiling enabled. Closes fd before executing contained code. """ _run(threads, verbose, 'runfile', filename, fd, argv, fd_name, compile_flags, dont_inherit)
[ "def", "runfile", "(", "fd", ",", "argv", ",", "fd_name", "=", "'<unknown>'", ",", "compile_flags", "=", "0", ",", "dont_inherit", "=", "1", ",", "filename", "=", "None", ",", "threads", "=", "True", ",", "verbose", "=", "False", ")", ":", "_run", "(", "threads", ",", "verbose", ",", "'runfile'", ",", "filename", ",", "fd", ",", "argv", ",", "fd_name", ",", "compile_flags", ",", "dont_inherit", ")" ]
Run code from given file descriptor with profiling enabled. Closes fd before executing contained code.
[ "Run", "code", "from", "given", "file", "descriptor", "with", "profiling", "enabled", ".", "Closes", "fd", "before", "executing", "contained", "code", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1190-L1197
-1
251,489
vpelletier/pprofile
pprofile.py
runpath
def runpath(path, argv, filename=None, threads=True, verbose=False): """ Run code from open-accessible file path with profiling enabled. """ _run(threads, verbose, 'runpath', filename, path, argv)
python
def runpath(path, argv, filename=None, threads=True, verbose=False): """ Run code from open-accessible file path with profiling enabled. """ _run(threads, verbose, 'runpath', filename, path, argv)
[ "def", "runpath", "(", "path", ",", "argv", ",", "filename", "=", "None", ",", "threads", "=", "True", ",", "verbose", "=", "False", ")", ":", "_run", "(", "threads", ",", "verbose", ",", "'runpath'", ",", "filename", ",", "path", ",", "argv", ")" ]
Run code from open-accessible file path with profiling enabled.
[ "Run", "code", "from", "open", "-", "accessible", "file", "path", "with", "profiling", "enabled", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1199-L1203
-1
251,490
vpelletier/pprofile
pprofile.py
pprofile
def pprofile(line, cell=None): """ Profile line execution. """ if cell is None: # TODO: detect and use arguments (statistical profiling, ...) ? return run(line) return _main( ['%%pprofile', '-m', '-'] + shlex.split(line), io.StringIO(cell), )
python
def pprofile(line, cell=None): """ Profile line execution. """ if cell is None: # TODO: detect and use arguments (statistical profiling, ...) ? return run(line) return _main( ['%%pprofile', '-m', '-'] + shlex.split(line), io.StringIO(cell), )
[ "def", "pprofile", "(", "line", ",", "cell", "=", "None", ")", ":", "if", "cell", "is", "None", ":", "# TODO: detect and use arguments (statistical profiling, ...) ?", "return", "run", "(", "line", ")", "return", "_main", "(", "[", "'%%pprofile'", ",", "'-m'", ",", "'-'", "]", "+", "shlex", ".", "split", "(", "line", ")", ",", "io", ".", "StringIO", "(", "cell", ")", ",", ")" ]
Profile line execution.
[ "Profile", "line", "execution", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L1388-L1398
-1
251,491
vpelletier/pprofile
pprofile.py
_FileTiming.hit
def hit(self, code, line, duration): """ A line has finished executing. code (code) container function's code object line (int) line number of just executed line duration (float) duration of the line, in seconds """ entry = self.line_dict[line][code] entry[0] += 1 entry[1] += duration
python
def hit(self, code, line, duration): """ A line has finished executing. code (code) container function's code object line (int) line number of just executed line duration (float) duration of the line, in seconds """ entry = self.line_dict[line][code] entry[0] += 1 entry[1] += duration
[ "def", "hit", "(", "self", ",", "code", ",", "line", ",", "duration", ")", ":", "entry", "=", "self", ".", "line_dict", "[", "line", "]", "[", "code", "]", "entry", "[", "0", "]", "+=", "1", "entry", "[", "1", "]", "+=", "duration" ]
A line has finished executing. code (code) container function's code object line (int) line number of just executed line duration (float) duration of the line, in seconds
[ "A", "line", "has", "finished", "executing", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L192-L205
-1
251,492
vpelletier/pprofile
pprofile.py
_FileTiming.call
def call(self, code, line, callee_file_timing, callee, duration, frame): """ A call originating from this file returned. code (code) caller's code object line (int) caller's line number callee_file_timing (FileTiming) callee's FileTiming callee (code) callee's code object duration (float) duration of the call, in seconds frame (frame) calle's entire frame as of its return """ try: entry = self.call_dict[(code, line, callee)] except KeyError: self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration] else: entry[1] += 1 entry[2] += duration
python
def call(self, code, line, callee_file_timing, callee, duration, frame): """ A call originating from this file returned. code (code) caller's code object line (int) caller's line number callee_file_timing (FileTiming) callee's FileTiming callee (code) callee's code object duration (float) duration of the call, in seconds frame (frame) calle's entire frame as of its return """ try: entry = self.call_dict[(code, line, callee)] except KeyError: self.call_dict[(code, line, callee)] = [callee_file_timing, 1, duration] else: entry[1] += 1 entry[2] += duration
[ "def", "call", "(", "self", ",", "code", ",", "line", ",", "callee_file_timing", ",", "callee", ",", "duration", ",", "frame", ")", ":", "try", ":", "entry", "=", "self", ".", "call_dict", "[", "(", "code", ",", "line", ",", "callee", ")", "]", "except", "KeyError", ":", "self", ".", "call_dict", "[", "(", "code", ",", "line", ",", "callee", ")", "]", "=", "[", "callee_file_timing", ",", "1", ",", "duration", "]", "else", ":", "entry", "[", "1", "]", "+=", "1", "entry", "[", "2", "]", "+=", "duration" ]
A call originating from this file returned. code (code) caller's code object line (int) caller's line number callee_file_timing (FileTiming) callee's FileTiming callee (code) callee's code object duration (float) duration of the call, in seconds frame (frame) calle's entire frame as of its return
[ "A", "call", "originating", "from", "this", "file", "returned", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L207-L230
-1
251,493
vpelletier/pprofile
pprofile.py
ProfileBase.dump_stats
def dump_stats(self, filename): """ Similar to profile.Profile.dump_stats - but different output format ! """ if _isCallgrindName(filename): with open(filename, 'w') as out: self.callgrind(out) else: with io.open(filename, 'w', errors='replace') as out: self.annotate(out)
python
def dump_stats(self, filename): """ Similar to profile.Profile.dump_stats - but different output format ! """ if _isCallgrindName(filename): with open(filename, 'w') as out: self.callgrind(out) else: with io.open(filename, 'w', errors='replace') as out: self.annotate(out)
[ "def", "dump_stats", "(", "self", ",", "filename", ")", ":", "if", "_isCallgrindName", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "out", ":", "self", ".", "callgrind", "(", "out", ")", "else", ":", "with", "io", ".", "open", "(", "filename", ",", "'w'", ",", "errors", "=", "'replace'", ")", "as", "out", ":", "self", ".", "annotate", "(", "out", ")" ]
Similar to profile.Profile.dump_stats - but different output format !
[ "Similar", "to", "profile", ".", "Profile", ".", "dump_stats", "-", "but", "different", "output", "format", "!" ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L723-L732
-1
251,494
vpelletier/pprofile
pprofile.py
ProfileRunnerBase.runctx
def runctx(self, cmd, globals, locals): """Similar to profile.Profile.runctx .""" with self(): exec(cmd, globals, locals) return self
python
def runctx(self, cmd, globals, locals): """Similar to profile.Profile.runctx .""" with self(): exec(cmd, globals, locals) return self
[ "def", "runctx", "(", "self", ",", "cmd", ",", "globals", ",", "locals", ")", ":", "with", "self", "(", ")", ":", "exec", "(", "cmd", ",", "globals", ",", "locals", ")", "return", "self" ]
Similar to profile.Profile.runctx .
[ "Similar", "to", "profile", ".", "Profile", ".", "runctx", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L752-L756
-1
251,495
vpelletier/pprofile
pprofile.py
Profile.enable
def enable(self): """ Enable profiling. """ if self.enabled_start: warn('Duplicate "enable" call') else: self._enable() sys.settrace(self._global_trace)
python
def enable(self): """ Enable profiling. """ if self.enabled_start: warn('Duplicate "enable" call') else: self._enable() sys.settrace(self._global_trace)
[ "def", "enable", "(", "self", ")", ":", "if", "self", ".", "enabled_start", ":", "warn", "(", "'Duplicate \"enable\" call'", ")", "else", ":", "self", ".", "_enable", "(", ")", "sys", ".", "settrace", "(", "self", ".", "_global_trace", ")" ]
Enable profiling.
[ "Enable", "profiling", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L843-L851
-1
251,496
vpelletier/pprofile
pprofile.py
Profile._disable
def _disable(self): """ Overload this method when subclassing. Called after actually disabling trace. """ self.total_time += time() - self.enabled_start self.enabled_start = None del self.stack
python
def _disable(self): """ Overload this method when subclassing. Called after actually disabling trace. """ self.total_time += time() - self.enabled_start self.enabled_start = None del self.stack
[ "def", "_disable", "(", "self", ")", ":", "self", ".", "total_time", "+=", "time", "(", ")", "-", "self", ".", "enabled_start", "self", ".", "enabled_start", "=", "None", "del", "self", ".", "stack" ]
Overload this method when subclassing. Called after actually disabling trace.
[ "Overload", "this", "method", "when", "subclassing", ".", "Called", "after", "actually", "disabling", "trace", "." ]
51a36896727565faf23e5abccc9204e5f935fe1e
https://github.com/vpelletier/pprofile/blob/51a36896727565faf23e5abccc9204e5f935fe1e/pprofile.py#L853-L860
-1
251,497
rsinger86/drf-flex-fields
rest_flex_fields/serializers.py
FlexFieldsSerializerMixin._make_expanded_field_serializer
def _make_expanded_field_serializer( self, name, nested_expand, nested_fields, nested_omit ): """ Returns an instance of the dynamically created nested serializer. """ field_options = self.expandable_fields[name] serializer_class = field_options[0] serializer_settings = copy.deepcopy(field_options[1]) if name in nested_expand: serializer_settings["expand"] = nested_expand[name] if name in nested_fields: serializer_settings["fields"] = nested_fields[name] if name in nested_omit: serializer_settings["omit"] = nested_omit[name] if serializer_settings.get("source") == name: del serializer_settings["source"] if type(serializer_class) == str: serializer_class = self._import_serializer_class(serializer_class) return serializer_class(**serializer_settings)
python
def _make_expanded_field_serializer( self, name, nested_expand, nested_fields, nested_omit ): """ Returns an instance of the dynamically created nested serializer. """ field_options = self.expandable_fields[name] serializer_class = field_options[0] serializer_settings = copy.deepcopy(field_options[1]) if name in nested_expand: serializer_settings["expand"] = nested_expand[name] if name in nested_fields: serializer_settings["fields"] = nested_fields[name] if name in nested_omit: serializer_settings["omit"] = nested_omit[name] if serializer_settings.get("source") == name: del serializer_settings["source"] if type(serializer_class) == str: serializer_class = self._import_serializer_class(serializer_class) return serializer_class(**serializer_settings)
[ "def", "_make_expanded_field_serializer", "(", "self", ",", "name", ",", "nested_expand", ",", "nested_fields", ",", "nested_omit", ")", ":", "field_options", "=", "self", ".", "expandable_fields", "[", "name", "]", "serializer_class", "=", "field_options", "[", "0", "]", "serializer_settings", "=", "copy", ".", "deepcopy", "(", "field_options", "[", "1", "]", ")", "if", "name", "in", "nested_expand", ":", "serializer_settings", "[", "\"expand\"", "]", "=", "nested_expand", "[", "name", "]", "if", "name", "in", "nested_fields", ":", "serializer_settings", "[", "\"fields\"", "]", "=", "nested_fields", "[", "name", "]", "if", "name", "in", "nested_omit", ":", "serializer_settings", "[", "\"omit\"", "]", "=", "nested_omit", "[", "name", "]", "if", "serializer_settings", ".", "get", "(", "\"source\"", ")", "==", "name", ":", "del", "serializer_settings", "[", "\"source\"", "]", "if", "type", "(", "serializer_class", ")", "==", "str", ":", "serializer_class", "=", "self", ".", "_import_serializer_class", "(", "serializer_class", ")", "return", "serializer_class", "(", "*", "*", "serializer_settings", ")" ]
Returns an instance of the dynamically created nested serializer.
[ "Returns", "an", "instance", "of", "the", "dynamically", "created", "nested", "serializer", "." ]
56495f15977d76697972acac571792e8fd67003d
https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L50-L75
-1
251,498
rsinger86/drf-flex-fields
rest_flex_fields/serializers.py
FlexFieldsSerializerMixin._clean_fields
def _clean_fields(self, omit_fields, sparse_fields, next_level_omits): """ Remove fields that are found in omit list, and if sparse names are passed, remove any fields not found in that list. """ sparse = len(sparse_fields) > 0 to_remove = [] if not sparse and len(omit_fields) == 0: return for field_name in self.fields: is_present = self._should_field_exist( field_name, omit_fields, sparse_fields, next_level_omits ) if not is_present: to_remove.append(field_name) for remove_field in to_remove: self.fields.pop(remove_field)
python
def _clean_fields(self, omit_fields, sparse_fields, next_level_omits): """ Remove fields that are found in omit list, and if sparse names are passed, remove any fields not found in that list. """ sparse = len(sparse_fields) > 0 to_remove = [] if not sparse and len(omit_fields) == 0: return for field_name in self.fields: is_present = self._should_field_exist( field_name, omit_fields, sparse_fields, next_level_omits ) if not is_present: to_remove.append(field_name) for remove_field in to_remove: self.fields.pop(remove_field)
[ "def", "_clean_fields", "(", "self", ",", "omit_fields", ",", "sparse_fields", ",", "next_level_omits", ")", ":", "sparse", "=", "len", "(", "sparse_fields", ")", ">", "0", "to_remove", "=", "[", "]", "if", "not", "sparse", "and", "len", "(", "omit_fields", ")", "==", "0", ":", "return", "for", "field_name", "in", "self", ".", "fields", ":", "is_present", "=", "self", ".", "_should_field_exist", "(", "field_name", ",", "omit_fields", ",", "sparse_fields", ",", "next_level_omits", ")", "if", "not", "is_present", ":", "to_remove", ".", "append", "(", "field_name", ")", "for", "remove_field", "in", "to_remove", ":", "self", ".", "fields", ".", "pop", "(", "remove_field", ")" ]
Remove fields that are found in omit list, and if sparse names are passed, remove any fields not found in that list.
[ "Remove", "fields", "that", "are", "found", "in", "omit", "list", "and", "if", "sparse", "names", "are", "passed", "remove", "any", "fields", "not", "found", "in", "that", "list", "." ]
56495f15977d76697972acac571792e8fd67003d
https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L92-L112
-1
251,499
rsinger86/drf-flex-fields
rest_flex_fields/serializers.py
FlexFieldsSerializerMixin._can_access_request
def _can_access_request(self): """ Can access current request object if all are true - The serializer is the root. - A request context was passed in. - The request method is GET. """ if self.parent: return False if not hasattr(self, "context") or not self.context.get("request", None): return False return self.context["request"].method == "GET"
python
def _can_access_request(self): """ Can access current request object if all are true - The serializer is the root. - A request context was passed in. - The request method is GET. """ if self.parent: return False if not hasattr(self, "context") or not self.context.get("request", None): return False return self.context["request"].method == "GET"
[ "def", "_can_access_request", "(", "self", ")", ":", "if", "self", ".", "parent", ":", "return", "False", "if", "not", "hasattr", "(", "self", ",", "\"context\"", ")", "or", "not", "self", ".", "context", ".", "get", "(", "\"request\"", ",", "None", ")", ":", "return", "False", "return", "self", ".", "context", "[", "\"request\"", "]", ".", "method", "==", "\"GET\"" ]
Can access current request object if all are true - The serializer is the root. - A request context was passed in. - The request method is GET.
[ "Can", "access", "current", "request", "object", "if", "all", "are", "true", "-", "The", "serializer", "is", "the", "root", ".", "-", "A", "request", "context", "was", "passed", "in", ".", "-", "The", "request", "method", "is", "GET", "." ]
56495f15977d76697972acac571792e8fd67003d
https://github.com/rsinger86/drf-flex-fields/blob/56495f15977d76697972acac571792e8fd67003d/rest_flex_fields/serializers.py#L158-L171
-1