docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Send html document to user. Args: - data: Dict to render template, or string with rendered HTML. - template: Name of template to render HTML document with passed data.
def html(self, data=None, template=None): if data is None: data = {} if template: return render(self.request, template, data) return HttpResponse(data)
1,145,713
Try to look for meta tag in given `dom`. Args: dom (obj): pyDHTMLParser dom of HTML elements. default (default "utr-8"): What to use if encoding is not found in `dom`. Returns: str/default: Given encoding or `default` parameter if not found.
def _get_encoding(dom, default="utf-8"): encoding = dom.find("meta", {"http-equiv": "Content-Type"}) if not encoding: return default encoding = encoding[0].params.get("content", None) if not encoding: return default return encoding.lower().split("=")[-1]
1,145,939
Look for encoding in given `html`. Try to convert `html` to utf-8. Args: html (str): HTML code as string. Returns: str: HTML code encoded in UTF.
def handle_encodnig(html): encoding = _get_encoding( dhtmlparser.parseString( html.split("</head>")[0] ) ) if encoding == "utf-8": return html return html.decode(encoding).encode("utf-8")
1,145,940
Check is `element` object match rest of the parameters. All checks are performed only if proper attribute is set in the HTMLElement. Args: element (obj): HTMLElement instance. tag_name (str): Tag name. params (dict): Parameters of the tag. content (str): Content of the tag. Returns: bool: True if everyhing matchs, False otherwise.
def is_equal_tag(element, tag_name, params, content): if tag_name and tag_name != element.getTagName(): return False if params and not element.containsParamSubset(params): return False if content is not None and content.strip() != element.getContent().strip(): return False return True
1,145,941
Create a ConsoleWidget. Parameters: ----------- parent : QWidget, optional [default None] The parent for this widget.
def __init__(self, parent=None, **kw): QtGui.QWidget.__init__(self, parent) LoggingConfigurable.__init__(self, **kw) # While scrolling the pager on Mac OS X, it tears badly. The # NativeGesture is platform and perhaps build-specific hence # we take adequate precautions here. self._pager_scroll_events = [QtCore.QEvent.Wheel] if hasattr(QtCore.QEvent, 'NativeGesture'): self._pager_scroll_events.append(QtCore.QEvent.NativeGesture) # Create the layout and underlying text widget. layout = QtGui.QStackedLayout(self) layout.setContentsMargins(0, 0, 0, 0) self._control = self._create_control() if self.paging in ('hsplit', 'vsplit'): self._splitter = QtGui.QSplitter() if self.paging == 'hsplit': self._splitter.setOrientation(QtCore.Qt.Horizontal) else: self._splitter.setOrientation(QtCore.Qt.Vertical) self._splitter.addWidget(self._control) layout.addWidget(self._splitter) else: layout.addWidget(self._control) # Create the paging widget, if necessary. if self.paging in ('inside', 'hsplit', 'vsplit'): self._page_control = self._create_page_control() if self._splitter: self._page_control.hide() self._splitter.addWidget(self._page_control) else: layout.addWidget(self._page_control) # Initialize protected variables. Some variables contain useful state # information for subclasses; they should be considered read-only. self._append_before_prompt_pos = 0 self._ansi_processor = QtAnsiCodeProcessor() if self.gui_completion == 'ncurses': self._completion_widget = CompletionHtml(self) elif self.gui_completion == 'droplist': self._completion_widget = CompletionWidget(self) elif self.gui_completion == 'plain': self._completion_widget = CompletionPlain(self) self._continuation_prompt = '> ' self._continuation_prompt_html = None self._executing = False self._filter_drag = False self._filter_resize = False self._html_exporter = HtmlExporter(self._control) self._input_buffer_executing = '' self._input_buffer_pending = '' self._kill_ring = QtKillRing(self._control) self._prompt = '' self._prompt_html = None self._prompt_pos = 0 self._prompt_sep = '' self._reading = False self._reading_callback = None self._tab_width = 8 # Set a monospaced font. self.reset_font() # Configure actions. action = QtGui.QAction('Print', None) action.setEnabled(True) printkey = QtGui.QKeySequence(QtGui.QKeySequence.Print) if printkey.matches("Ctrl+P") and sys.platform != 'darwin': # Only override the default if there is a collision. # Qt ctrl = cmd on OSX, so the match gets a false positive on OSX. printkey = "Ctrl+Shift+P" action.setShortcut(printkey) action.setShortcutContext(QtCore.Qt.WidgetWithChildrenShortcut) action.triggered.connect(self.print_) self.addAction(action) self.print_action = action action = QtGui.QAction('Save as HTML/XML', None) action.setShortcut(QtGui.QKeySequence.Save) action.setShortcutContext(QtCore.Qt.WidgetWithChildrenShortcut) action.triggered.connect(self.export_html) self.addAction(action) self.export_action = action action = QtGui.QAction('Select All', None) action.setEnabled(True) selectall = QtGui.QKeySequence(QtGui.QKeySequence.SelectAll) if selectall.matches("Ctrl+A") and sys.platform != 'darwin': # Only override the default if there is a collision. # Qt ctrl = cmd on OSX, so the match gets a false positive on OSX. selectall = "Ctrl+Shift+A" action.setShortcut(selectall) action.setShortcutContext(QtCore.Qt.WidgetWithChildrenShortcut) action.triggered.connect(self.select_all) self.addAction(action) self.select_all_action = action self.increase_font_size = QtGui.QAction("Bigger Font", self, shortcut=QtGui.QKeySequence.ZoomIn, shortcutContext=QtCore.Qt.WidgetWithChildrenShortcut, statusTip="Increase the font size by one point", triggered=self._increase_font_size) self.addAction(self.increase_font_size) self.decrease_font_size = QtGui.QAction("Smaller Font", self, shortcut=QtGui.QKeySequence.ZoomOut, shortcutContext=QtCore.Qt.WidgetWithChildrenShortcut, statusTip="Decrease the font size by one point", triggered=self._decrease_font_size) self.addAction(self.decrease_font_size) self.reset_font_size = QtGui.QAction("Normal Font", self, shortcut="Ctrl+0", shortcutContext=QtCore.Qt.WidgetWithChildrenShortcut, statusTip="Restore the Normal font size", triggered=self.reset_font) self.addAction(self.reset_font_size)
1,146,080
Clear the console. Parameters: ----------- keep_input : bool, optional (default True) If set, restores the old input buffer if a new prompt is written.
def clear(self, keep_input=True): if self._executing: self._control.clear() else: if keep_input: input_buffer = self.input_buffer self._control.clear() self._show_prompt() if keep_input: self.input_buffer = input_buffer
1,146,085
Paste the contents of the clipboard into the input region. Parameters: ----------- mode : QClipboard::Mode, optional [default QClipboard::Clipboard] Controls which part of the system clipboard is used. This can be used to access the selection clipboard in X11 and the Find buffer in Mac OS. By default, the regular clipboard is used.
def paste(self, mode=QtGui.QClipboard.Clipboard): if self._control.textInteractionFlags() & QtCore.Qt.TextEditable: # Make sure the paste is safe. self._keep_cursor_in_buffer() cursor = self._control.textCursor() # Remove any trailing newline, which confuses the GUI and forces the # user to backspace. text = QtGui.QApplication.clipboard().text(mode).rstrip() self._insert_plain_text_into_buffer(cursor, dedent(text))
1,146,091
Given a KeyboardModifiers flags object, return whether the Control key is down. Parameters: ----------- include_command : bool, optional (default True) Whether to treat the Command key as a (mutually exclusive) synonym for Control when in Mac OS.
def _control_key_down(self, modifiers, include_command=False): # Note that on Mac OS, ControlModifier corresponds to the Command key # while MetaModifier corresponds to the Control key. if sys.platform == 'darwin': down = include_command and (modifiers & QtCore.Qt.ControlModifier) return bool(down) ^ bool(modifiers & QtCore.Qt.MetaModifier) else: return bool(modifiers & QtCore.Qt.ControlModifier)
1,146,105
Displays text using the pager if it exceeds the height of the viewport. Parameters: ----------- html : bool, optional (default False) If set, the text will be interpreted as HTML instead of plain text.
def _page(self, text, html=False): line_height = QtGui.QFontMetrics(self.font).height() minlines = self._control.viewport().height() / line_height if self.paging != 'none' and \ re.match("(?:[^\n]*\n){%i}" % minlines, text): if self.paging == 'custom': self.custom_page_requested.emit(text) else: self._page_control.clear() cursor = self._page_control.textCursor() if html: self._insert_html(cursor, text) else: self._insert_plain_text(cursor, text) self._page_control.moveCursor(QtGui.QTextCursor.Start) self._page_control.viewport().resize(self._control.size()) if self._splitter: self._page_control.show() self._page_control.setFocus() else: self.layout().setCurrentWidget(self._page_control) elif html: self._append_html(text) else: self._append_plain_text(text)
1,146,128
Return last <td> found in `el` DOM. Args: el (obj): :class:`dhtmlparser.HTMLElement` instance. Returns: obj: HTMLElement instance if found, or None if there are no <td> tags.
def _get_last_td(el): if not el: return None if type(el) in [list, tuple, set]: el = el[0] last = el.find("td") if not last: return None return last[-1]
1,146,276
Get <tr> tag with given `ID` and return content of the last <td> tag from <tr> root. Args: details (obj): :class:`dhtmlparser.HTMLElement` instance. ID (str): id property of the <tr> tag. Returns: str: Content of the last <td> as strign.
def _get_td_or_none(details, ID): content = details.find("tr", {"id": ID}) content = _get_last_td(content) # if content is None, return it if not content: return None content = content.getContent().strip() # if content is blank string, return None if not content: return None return content
1,146,277
Parse title/name of the book. Args: dom (obj): HTMLElement containing whole HTML page. details (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title. Raises: AssertionError: If title not found.
def _parse_title(dom, details): title = details.find("h1") # if the header is missing, try to parse title from the <title> tag if not title: title = dom.find("title") assert title, "Can't find <title> tag!" return title[0].getContent().split("|")[0].strip() return title[0].getContent().strip()
1,146,278
Parse authors of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found.
def _parse_authors(details): authors = details.find( "tr", {"id": "ctl00_ContentPlaceHolder1_tblRowAutor"} ) if not authors: return [] # book with unspecified authors # parse authors from HTML and convert them to Author objects author_list = [] for author in authors[0].find("a"): author_obj = Author(author.getContent()) if "href" in author.params: author_obj.URL = author.params["href"] author_list.append(author_obj) return author_list
1,146,279
Parse publisher of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: str/None: Publisher's name as string or None if not found.
def _parse_publisher(details): publisher = _get_td_or_none( details, "ctl00_ContentPlaceHolder1_tblRowNakladatel" ) # publisher is not specified if not publisher: return None publisher = dhtmlparser.removeTags(publisher).strip() # return None instead of blank string if not publisher: return None return publisher
1,146,280
Parse number of pages and binding of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: (pages, binding): Tuple with two string or two None.
def _parse_pages_binding(details): pages = _get_td_or_none( details, "ctl00_ContentPlaceHolder1_tblRowRozsahVazba" ) if not pages: return None, None binding = None # binding info and number of pages is stored in same string if "/" in pages: binding = pages.split("/")[1].strip() pages = pages.split("/")[0].strip() if not pages: pages = None return pages, binding
1,146,281
Parse ISBN and EAN. Args: details (obj): HTMLElement containing slice of the page with details. Returns: (ISBN, EAN): Tuple with two string or two None.
def _parse_ISBN_EAN(details): isbn_ean = _get_td_or_none( details, "ctl00_ContentPlaceHolder1_tblRowIsbnEan" ) if not isbn_ean: return None, None ean = None isbn = None if "/" in isbn_ean: # ISBN and EAN are stored in same string isbn, ean = isbn_ean.split("/") isbn = isbn.strip() ean = ean.strip() else: isbn = isbn_ean.strip() if not isbn: isbn = None return isbn, ean
1,146,282
Parse description of the book. Args: details (obj): HTMLElement containing slice of the page with details. Returns: str/None: Details as string with currency or None if not found.
def _parse_description(details): description = details.find("div", {"class": "detailPopis"}) # description not found if not description: return None # remove links to ebook version ekniha = description[0].find("div", {"class": "ekniha"}) if ekniha: ekniha[0].replaceWith(dhtmlparser.HTMLElement("")) # remove links to other books from same cathegory detail = description[0].find("p", {"class": "detailKat"}) if detail: detail[0].replaceWith(dhtmlparser.HTMLElement("")) # remove all HTML elements description = dhtmlparser.removeTags(description[0]).strip() # description is blank if not description: return None return description
1,146,283
Parse available informations about book from the book details page. Args: book_url (str): Absolute URL of the book. Returns: obj: :class:`structures.Publication` instance with book details.
def _process_book(book_url): data = DOWNER.download(book_url) dom = dhtmlparser.parseString(data) details_tags = dom.find("div", {"id": "contentDetail"}) assert details_tags, "Can't find details of the book." details = details_tags[0] # parse required informations title = _parse_title(dom, details) authors = _parse_authors(details) publisher = _parse_publisher(details) price = _parse_price(details) pages, binding = _parse_pages_binding(details) pub = Publication( title, authors, price, publisher ) # parse optional informations pub.optionals.URL = book_url pub.optionals.binding = binding pub.optionals.pages = pages pub.optionals.ISBN, pub.optionals.EAN = _parse_ISBN_EAN(details) pub.optionals.edition = _parse_edition(details) pub.optionals.description = _parse_description(details) return pub
1,146,284
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the number of items up to the page.
def pi_to_number(self, page=1, item=1): if page > 1: return ((page - 1) * self.page_items) + item else: return 0 + item
1,146,334
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the number of items up to the page.
def sub_pi_to_number(self, subpage=1, subitem=1): if subitem == None: subitem = 0 if subpage == None: return 0 else: if subpage > 1: return ((subpage - 1) * self.subpage_items) + subitem else: return 0 + subitem
1,146,335
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the number of items up to the page.
def current_spi_to_number(self): if self.slots['subpage'] == None: return self.sub_pi_to_number(0, 0) else: return self.sub_pi_to_number(self.slots['subpage'], self.slots['subitem'])
1,146,336
Return the number of items on page. Args: * page = The Page to test for * total_items = the total item count Returns: * Integer - Which represents the calculated number of items on page.
def return_item_count_on_page(self, page=1, total_items=1): up_to_page = ((page - 1) * self.page_items) # Number of items up to the page in question if total_items > up_to_page: # Remove all the items up to the page in question # count = total_items - up_to_page if count >= self.page_items: # The remaining items are greater than the items per page # so the answer is a full page return self.page_items else: # There are less items than a full page, # return count
1,146,345
Return the number of items on page. Args: * page = The Page to test for * total_items = the total item count Returns: * Integer - Which represents the calculated number of items on page.
def return_item_count_on_subpage(self, subpage=1, total_items=1): up_to_subpage = ((subpage - 1) * self.subpage_items) # Number of items up to the page in question if total_items > up_to_subpage: # Remove all the items up to the page in question # count = total_items - up_to_subpage else: count = total_items if count >= self.subpage_items: # The remaining items are greater than the items per page # so the answer is a full page return self.subpage_items else: # There are less items than a full page, # return count
1,146,346
Set a value by key. Arguments: cache: instance of Cache key: 'user:342:username',
def set_value(cache, key, value): with cache as redis_connection: return redis_connection.set(key, value)
1,146,523
Get data from the .dat files args: inputfile: file Input File close: bool, default=False Closes inputfile if True inputfile (File): Input file close (boolean): Closes inputfile if True (default: False) returns: dictionary: data: list of parsed data variables: dictionary of errors and other additional variables
def getdata(inputfile, argnum=None, close=False): # get data and converts them to list # outputtype - list, dict, all output = [] add_data = {} line_num = 0 for line in inputfile: line_num += 1 if ("#" not in line) and (line != ""): linesplit = line.split() if argnum is not None and len(linesplit) != int(argnum): raise ValueError( "Line {:d} has {:d} arguments (need {:d})".format( line_num, len(linesplit), argnum)) output.append(linesplit) # additional float variable if "#f" in line: data = line.split()[1].split("=") add_data[data[0]] = float(data[1]) # additional list float variable if "#l" in line: data = line.split()[1].split("=") add_data[data[0]] = [float(e) for e in data[1].split(",")] if close: inputfile.close() output = cleandata(output) return { "data": np.array(output), "variables": add_data, }
1,146,587
Helper function for parse.getdata. Remove empty variables, convert strings to float args: inputlist: list List of Variables Returns: ouput: Cleaned list
def cleandata(inputlist): output = [] for e in inputlist: new = [] for f in e: if f == "--": new.append(None) else: new.append(float(f)) output.append(new) return output
1,146,588
Compute the quadratic estimate of the centroid in a 2d-array. Args: data (2darray): two dimensional data array Returns center (tuple): centroid estimate on the row and column directions, respectively
def quadratic_2d(data): arg_data_max = np.argmax(data) i, j = np.unravel_index(arg_data_max, data.shape) z_ = data[i-1:i+2, j-1:j+2] # our quadratic function is defined as # f(x, y | a, b, c, d, e, f) := a + b * x + c * y + d * x^2 + e * xy + f * y^2 # therefore, the best fit coeffiecients are given as # note that they are unique and the uncertainty in each of them (#TODO) can be # computed following the derivations done by Vakili & Hogg (2016) and # Teague & Foreman-Mackey (2018) try: a = (-z_[0,0] + 2*z_[0,1] - z_[0,2] + 2*z_[1,0] + 5*z_[1,1] + 2*z_[1,2] - z_[2,0] + 2*z_[2,1] - z_[2,2]) / 9 b = (-z_[0,0] - z_[0,1] - z_[0,2] + z_[2,0] + z_[2,1] + z_[2,2]) / 6 c = (-z_[0,0] + z_[0,2] - z_[1,0] + z_[1,2] - z_[2,0] + z_[2,2]) / 6 d = (z_[0,0] + z_[0,1] + z_[0,2] - z_[1,0]*2 - z_[1,1]*2 - z_[1,2]*2 + z_[2,0] + z_[2,1] + z_[2,2])/6 e = (z_[0,0] - z_[0,2] - z_[2,0] + z_[2,2]) * .25 f = (z_[0,0] - 2 * z_[0,1] + z_[0,2] + z_[1,0] - 2 * z_[1,1] + z_[1,2] + z_[2,0] - 2 * z_[2,1] + z_[2,2]) / 6 except IndexError: return (i, j) # see https://en.wikipedia.org/wiki/Quadratic_function det = 4 * d * f - e ** 2 xm = - (2 * f * b - c * e) / det ym = - (2 * d * c - b * e) / det return (i+xm, j+ym)
1,146,675
Create zone records. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name identifier: Template ID dtype: MASTER|SLAVE|NATIVE (default: MASTER) master: master server ip address when dtype is SLAVE (default: None) ContentType: application/json x-authentication-token: token
def create_zone(server, token, domain, identifier, dtype, master=None): method = 'PUT' uri = 'https://' + server + '/zone' obj = JSONConverter(domain) obj.generate_zone(domain, identifier, dtype, master) connect.tonicdns_client(uri, method, token, obj.zone)
1,146,881
Create records of specific domain. Arguments: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name data: Create records ContentType: application/json x-authentication-token: token
def create_records(server, token, domain, data): method = 'PUT' uri = 'https://' + server + '/zone/' + domain for i in data: connect.tonicdns_client(uri, method, token, i)
1,146,882
Delete records of specific domain. Arguments: server: TonicDNS API server token: TonicDNS API authentication token data: Delete records ContentType: application/json x-authentication-token: token
def delete_records(server, token, data): method = 'DELETE' uri = 'https://' + server + '/zone' for i in data: connect.tonicdns_client(uri, method, token, i)
1,146,883
Look for negihbours of the `element`, return proper :class:`PathCall`. Args: element (obj): HTMLElement instance of the object you are looking for. Returns: list: List of :class:`PathCall` instances.
def neighbours_pattern(element): # check if there are any neighbours if not element.parent: return [] parent = element.parent # filter only visible tags/neighbours neighbours = filter( lambda x: x.isTag() and not x.isEndTag() or x.getContent().strip() \ or x is element, parent.childs ) if len(neighbours) <= 1: return [] output = [] element_index = neighbours.index(element) # pick left neighbour if element_index >= 1: output.append( _neighbour_to_path_call( "left", neighbours[element_index - 1], element ) ) # pick right neighbour if element_index + 1 < len(neighbours): output.append( _neighbour_to_path_call( "right", neighbours[element_index + 1], element ) ) return output
1,147,311
Look for `element` by its predecesors. Args: element (obj): HTMLElement instance of the object you are looking for. root (obj): Root of the `DOM`. Returns: list: ``[PathCall()]`` - list with one :class:`PathCall` object (to \ allow use with ``.extend(predecesors_pattern())``).
def predecesors_pattern(element, root): def is_root_container(el): return el.parent.parent.getTagName() == "" if not element.parent or not element.parent.parent or \ is_root_container(element): return [] trail = [ [ element.parent.parent.getTagName(), _params_or_none(element.parent.parent.params) ], [ element.parent.getTagName(), _params_or_none(element.parent.params) ], [element.getTagName(), _params_or_none(element.params)], ] match = root.match(*trail) if element in match: return [ PathCall("match", match.index(element), trail) ]
1,147,312
Give the server information about this node Arguments: node -- node_name or token for the node this data belongs to key -- identifiable key, that you use later to retrieve that piece of data kwargs -- the data you need to store
def post(node_name, key, **kwargs): node = nago.core.get_node(node_name) if not node: raise ValueError("Node named %s not found" % node_name) token = node.token node_data[token] = node_data[token] or {} node_data[token][key] = kwargs return "thanks!"
1,147,409
Send our information to a remote nago instance Arguments: node -- node_name or token for the node this data belongs to
def send(node_name): my_data = nago.core.get_my_info() if not node_name: node_name = nago.settings.get('server') node = nago.core.get_node(node_name) json_params = {} json_params['node_name'] = node_name json_params['key'] = "node_info" for k, v in my_data.items(): nago.core.log("sending %s to %s" % (k, node['host_name']), level="notice") json_params[k] = v return node.send_command('info', 'post', node_name=node.token, key="node_info", **my_data)
1,147,410
Returns bestfit_function args: bestfit_x: scalar, array_like x value return: scalar, array_like bestfit y value
def bestfit_func(self, bestfit_x): if not self.done_bestfit: raise KeyError("Do do_bestfit first") bestfit_y = self.fit_args[1] * (bestfit_x ** self.fit_args[0]) return bestfit_y
1,147,584
Only dispatch if the event does not correspond to an ignored file. Args: event (watchdog.events.FileSystemEvent)
def dispatch(self, event): if event.is_directory: return paths = [] if has_attribute(event, 'dest_path'): paths.append(os.path.realpath( unicode_paths.decode(event.dest_path))) if event.src_path: paths.append(os.path.realpath( unicode_paths.decode(event.src_path))) paths = [p for p in paths if not p.startswith(os.path.realpath(self.vcs.repository_dir())) and not self.vcs.path_is_ignored(p)] if len(paths) > 0: super(VcsEventHandler, self).dispatch(event)
1,147,625
Return content of the first element in `el_list` or `alt`. Also return `alt` if the content string of first element is blank. Args: el_list (list): List of HTMLElement objects. alt (default None): Value returner when list or content is blank. strip (bool, default True): Call .strip() to content. Returns: str or alt: String representation of the content of the first element \ or `alt` if not found.
def get_first_content(el_list, alt=None, strip=True): if not el_list: return alt content = el_list[0].getContent() if strip: content = content.strip() if not content: return alt return content
1,147,726
Test whether `url` is absolute url (``http://domain.tld/something``) or relative (``../something``). Args: url (str): Tested string. protocol (str, default "http"): Protocol which will be seek at the beginning of the `url`. Returns: bool: True if url is absolute, False if not.
def is_absolute_url(url, protocol="http"): if ":" not in url: return False protocol, rest = url.split(":", 1) if protocol.startswith(protocol) and rest.startswith("//"): return True return False
1,147,727
Normalize the `url` - from relative, create absolute URL. Args: base_url (str): Domain with ``protocol://`` string rel_url (str): Relative or absolute url. Returns: str/None: Normalized URL or None if `url` is blank.
def normalize_url(base_url, rel_url): if not rel_url: return None if not is_absolute_url(rel_url): rel_url = rel_url.replace("../", "/") if (not base_url.endswith("/")) and (not rel_url.startswith("/")): return base_url + "/" + rel_url.replace("../", "/") return base_url + rel_url.replace("../", "/") return rel_url
1,147,728
Generate function, which checks whether the content of the tag matchs `tag_content`. Args: tag_content (str): Content of the tag which will be matched thru whole DOM. content_transformer (fn, default None): Function used to transform all tags before matching. This function can be used as parameter for .find() method in HTMLElement.
def content_matchs(tag_content, content_transformer=None): def content_matchs_closure(element): if not element.isTag(): return False cont = element.getContent() if content_transformer: cont = content_transformer(cont) return tag_content == cont return content_matchs_closure
1,147,731
Send `post_dict` to the :attr:`.ALEPH_EXPORT_URL`. Args: post_dict (dict): dictionary from :class:`PostData.get_POST_data()` Returns: str: Reponse from webform.
def _sendPostDict(post_dict): downer = Downloader() downer.headers["Referer"] = settings.EDEPOSIT_EXPORT_REFERER data = downer.download(settings.ALEPH_EXPORT_URL, post=post_dict) rheaders = downer.response_headers error_msg = rheaders.get("aleph-info", "").lower().strip() if "aleph-info" in rheaders and error_msg.startswith("error"): raise ExportRejectedException( "Export request was rejected by import webform: %s" % rheaders["aleph-info"] ) return data
1,147,751
Gets basic information from a binary stream to allow correct processing of the attribute header. This function allows the interpretation of the Attribute type, attribute length and if the attribute is non resident. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: An tuple with the attribute type, the attribute length, in bytes, and if the attribute is resident or not.
def get_attr_info(binary_view): global _ATTR_BASIC attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9]) return (AttrTypes(attr_type), attr_len, bool(non_resident))
1,148,032
Changes the time zones of all timestamps. Receives a new timezone and applies to all timestamps, if necessary. Args: timezone (:obj:`tzinfo`): Time zone to be applied Returns: A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.
def _astimezone_ts(self, timezone): if self.created.tzinfo is timezone: return self else: nw_obj = Timestamps((None,)*4) nw_obj.created = self.created.astimezone(timezone) nw_obj.changed = self.changed.astimezone(timezone) nw_obj.mft_changed = self.mft_changed.astimezone(timezone) nw_obj.accessed = self.accessed.astimezone(timezone) return nw_obj
1,148,035
Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise.
def _entry_allocated_bitmap(self, entry_number): index, offset = divmod(entry_number, 8) return bool(self._bitmap[index] & (1 << offset))
1,148,048
Creates a new object DataRuns from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: DataRuns: New object using hte binary stream as source
def create_from_binary(cls, binary_view): nw_obj = cls() offset = 0 previous_dr_offset = 0 header_size = cls._INFO.size #"header" of a data run is always a byte while binary_view[offset] != 0: #the runlist ends with an 0 as the "header" header = cls._INFO.unpack(binary_view[offset:offset+header_size])[0] length_len = header & 0x0F length_offset = (header & 0xF0) >> 4 temp_len = offset+header_size+length_len #helper variable just to make things simpler dr_length = int.from_bytes(binary_view[offset+header_size:temp_len], "little", signed=False) if length_offset: #the offset is relative to the previous data run dr_offset = int.from_bytes(binary_view[temp_len:temp_len+length_offset], "little", signed=True) + previous_dr_offset previous_dr_offset = dr_offset else: #if it is sparse, requires a a different approach dr_offset = None offset += header_size + length_len + length_offset nw_obj.data_runs.append((dr_length, dr_offset)) #nw_obj.data_runs.append(DataRun(dr_length, dr_offset)) _MOD_LOGGER.debug("DataRuns object created successfully") return nw_obj
1,148,066
Creates a new object AttributeHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: AttributeHeader: New object using hte binary stream as source
def create_from_binary(cls, binary_view): attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \ content_len, content_offset, indexed_flag = cls._REPR.unpack(binary_view[:cls._REPR.size]) if name_len: name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le") else: name = None nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (content_len, content_offset, indexed_flag)) return nw_obj
1,148,069
Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf
def get_pdffilepath(pdffilename): return FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=pdffilename, folder=PURPOSE.get("plots").get("folder", "plots"), ext=PURPOSE.get("plots").get("extension", "pdf") )
1,148,156
Do make_tex_table and pass all arguments args: inputlist: list outputfilename: string fmt: dictionary key: integer column index starting with 0 values: string format string. eg "{:g}" **kwargs: nonestring: string string when objecttype is None
def make_tex_table(inputlist, outputfilename, fmt=None, **kwargs): outputfilepath = FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=outputfilename, folder=PURPOSE.get("tables").get("folder", "tables"), ext=PURPOSE.get("tables").get("extension", "tex") ) table.make_tex_table(inputlist, open(outputfilepath, 'wb'), fmt=fmt, close=kwargs.get("close", True), **kwargs)
1,148,157
reads an elasticsearh mapping dictionary and returns a list of fields cojoined with a dot notation args: obj: the dictionary to parse parent: name for a parent key. used with a recursive call
def mapping_fields(mapping, parent=[]): rtn_obj = {} for key, value in mapping.items(): new_key = parent + [key] new_key = ".".join(new_key) rtn_obj.update({new_key: value.get('type')}) if value.get('properties'): rtn_obj.update(mapping_fields(value['properties'], [new_key])) elif value.get('fields'): rtn_obj.update(mapping_fields(value['fields'], [new_key])) rtn_obj[new_key] = [rtn_obj[new_key]] + \ list(value['fields'].keys()) return rtn_obj
1,148,159
reads a dictionary and returns a list of fields cojoined with a dot notation args: obj: the dictionary to parse parent: name for a parent key. used with a recursive call
def dict_fields(obj, parent=[]): rtn_obj = {} for key, value in obj.items(): new_key = parent + [key] new_key = ".".join(new_key) if isinstance(value, list): if value: value = value[0] if isinstance(value, dict): rtn_obj.update(dict_fields(value, [new_key])) else: rtn_obj.update({new_key: value}) return rtn_obj
1,148,160
Returns all the rdfclasses that have and associated elasticsearch mapping Args: None
def list_mapped_classes(): cls_dict = {key: value for key, value in MODULE.rdfclass.__dict__.items() if not isinstance(value, RdfConfigManager) and key not in ['properties'] and hasattr(value, 'es_defs') and value.es_defs.get('kds_esIndex')} new_dict = {} # remove items that are appearing as a subclass of a main mapping class # the intersion of the set of the cls_dict values and the a classes # individual hierarchy will be >1 if the class is a subclass of another # class in the list potential_maps = set([cls_.__name__ for cls_ in cls_dict.values()]) for name, cls_ in cls_dict.items(): parents = set(cls_.hierarchy) if len(parents.intersection(potential_maps)) <= 1: new_dict[name] = cls_ return new_dict
1,148,162
Returns a dictionary with the key as the es_index name and the object is a list of rdfclasses for that index args: None
def list_indexes(cls): cls_list = cls.list_mapped_classes() rtn_obj = {} for key, value in cls_list.items(): idx = value.es_defs.get('kds_esIndex')[0] try: rtn_obj[idx].append(value) except KeyError: rtn_obj[idx] = [value] return rtn_obj
1,148,163
Returns an elasticsearch mapping for the specified index based off of the mapping defined by rdf class definitions args: idx_obj: Dictionary of the index and a list of rdfclasses included in the mapping
def get_rdf_es_idx_map(cls, idx_obj): idx_name = list(idx_obj)[0] es_map = { "index": idx_name, "body" : { "mappings": {}, "settings": { # "read_only_allow_delete": False, "index": { # "blocks" : { # "read_only_allow_delete" : "false" # }, "analysis": { "analyzer": { "keylower": { "tokenizer": "keyword", "type": "custom", "filter": "lowercase", "ignore_above" : 256 } } } } } } } for idx_cls in idx_obj[idx_name]: # pdb.set_trace() es_map['body']['mappings'][idx_cls.es_defs['kds_esDocType'][0]] = \ {'properties': idx_cls.es_mapping(idx_cls)} return es_map
1,148,164
sends the mapping to elasticsearch args: es_map: dictionary of the index mapping kwargs: reset_idx: WARNING! If True the current referenced es index will be deleted destroying all data in that index in elasticsearch. if False an incremented index will be created and data-migration will start from the old to the new index
def send_es_mapping(self, es_map, **kwargs): log.setLevel(kwargs.get('log_level', self.log_level)) def next_es_index_version(curr_alias): try: alias_def = self.es.indices.get_alias(alias) except es_except.NotFoundError: alias_def = {alias + "_v0":{}} old_idx = list(alias_def)[0] parts = old_idx.split("_v") try: parts[1] = str(int(parts[1]) + 1) except IndexError: parts = [old_idx,'1'] return {'old': old_idx, 'new': "_v".join(parts)} reset_idx= kwargs.get('reset_idx', False) alias = es_map.pop('index') idx_names = next_es_index_version(alias) # Delete if the index series if reset_idx was passed if reset_idx: log.warning("DELETING Elasticsearch INDEX => %s ******", alias) self.es.indices.delete(index=alias + "_v*", ignore=[400, 404]) idx_names['new'] = alias + "_v1" # Create the new index and apply the mapping self.es.indices.create(index=idx_names['new'], body=es_map['body'], update_all_types=True) # if the index was not deleted transfer documents from old to the # new index if not reset_idx and self.es.indices.exists(idx_names['old']): url = os.path.join(self.es_url,'_reindex').replace('\\','/') data = {"source":{"index": idx_names['old']}, "dest":{"index": idx_names['new']}} # Python elasticsearch recommends using a direct call to the # es 5+ _reindex URL vice using their helper. result = requests.post(url, headers={'Content-Type':'application/json'}, data = json.dumps(data)) self.es.indices.delete_alias(index=idx_names['old'], name=alias, ignore=[403]) self.es.indices.delete(index=idx_names['old'], ignore=[400, 404]) # add the alias to the new index self.es.indices.put_alias(index=idx_names['new'], name=alias)
1,148,165
Retruns a dictionary of mappings and the fiels names in dot notation args: mappings: es mapping defitions to parse
def mapping_ref(self, es_mappings): new_map = {} for key, value in es_mappings.items(): for sub_key, sub_value in value.items(): new_map["/".join([key, sub_key])] = \ mapping_fields(sub_value['properties']) return new_map
1,148,168
Validate value. Args: value: Returns: A validated value. Raises: UnitError
def validate(self, value): for validate in self.validates: value = validate(value) return value
1,148,312
Fetch a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch``. Returns A dict with data about the ref.
def get_ref(profile, ref): resource = "/refs/" + ref data = api.get_request(profile, resource) return prepare(data)
1,148,350
Create a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to create, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref.
def create_ref(profile, ref, sha): resource = "/refs" payload = {"ref": "refs/" + ref, "sha": sha} data = api.post_request(profile, resource, payload) return prepare(data)
1,148,351
Point a ref to a new SHA. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to update, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref.
def update_ref(profile, ref, sha): resource = "/refs/" + ref payload = {"sha": sha} data = api.patch_request(profile, resource, payload) return prepare(data)
1,148,352
Filter :class:`.Publication` objects using settings declared in :mod:`~harvester.settings` submodule. Args: publication (obj): :class:`.Publication` instance. Returns: obj/None: None if the publication was found in Aleph or `publication` \ if not.
def filter_publication(publication): if settings.USE_DUP_FILTER: publication = dup_filter.filter_publication(publication) if publication and settings.USE_ALEPH_FILTER: publication = aleph_filter.filter_publication( publication, cmp_authors=settings.ALEPH_FILTER_BY_AUTHOR ) return publication
1,148,430
tests to see if the directory is writable. If the directory does it can attempt to create it. If unable returns False args: directory: filepath to the directory kwargs: mkdir[bool]: create the directory if it does not exist returns
def is_writable_dir(directory, **kwargs): try: testfile = tempfile.TemporaryFile(dir = directory) testfile.close() except OSError as e: if e.errno == errno.EACCES: # 13 return False elif e.errno == errno.ENOENT: # 2 if kwargs.get('mkdir') == True: try: os.makedirs(directory) except OSError as e2: if e2.errno == errno.EACCES: # 13 return False else: return False e.filename = directory return True
1,148,471
Returns a list of files args: file_directory: a sting path to the file directory file_extensions: a list of file extensions to filter example ['xml', 'rdf']. If none include all files include_subfolders: as implied include_root: whether to include the root in the path root_dir: the root directory to remove if include_root is False returns: (tuple) (file_name, file_path_with_root_mod, modified_time, full_path)
def list_files(file_directory, file_extensions=None, include_subfolders=True, include_root=True, root_dir=None): log = logging.getLogger("%s" % (inspect.stack()[0][3])) log.setLevel(__LOG_LEVEL__) rtn_list = [] if not root_dir: root_dir = file_directory root_dir = root_dir.strip() if root_dir.endswith(os.path.sep): root_dir = root_dir.strip()[:-1] dir_parts_len = len(root_dir.split(os.path.sep)) level = 0 for root, dirnames, filenames in os.walk(file_directory): root_str = root if level > 0 and not include_subfolders: break if not include_root: root_str = os.path.sep.join(root.split(os.path.sep)[dir_parts_len:]) if file_extensions: files = [(x, os.path.join(root_str, x), os.path.getmtime(os.path.join(root, x)), os.path.join(root, x)) for x in filenames \ if "." in x \ and x.split(".")[len(x.split("."))-1] in file_extensions] else: files = [(x, os.path.join(root_str, x), os.path.getmtime(os.path.join(root, x)), os.path.join(root, x)) for x in filenames] rtn_list += files level += 1 rtn_list.sort(key=lambda tup: tup[0], reverse=True) return rtn_list
1,148,472
Formats a namespace and ending value into a python friendly format args: namespace: RdfNamespace or tuple in the format of (prefix, uri,) value: end value to attach to the namespace
def pyuri_formatter(namespace, value): if namespace[0]: return "%s_%s" %(namespace[0], value) else: return "pyuri_%s_%s" % (base64.b64encode(bytes(namespace[1], "utf-8")).decode(), value)
1,148,596
Create a new tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. tree A list of blob objects (each with a path, mode, type, and content or sha) to put in the tree. Returns: A dict with data about the tree.
def create_tree(profile, tree): resource = "/trees" payload = {"tree": tree} data = api.post_request(profile, resource, payload) return prepare(data)
1,148,774
Converts datetime isoformat string to datetime (dt) object Args: :dt_str (str): input string in '2017-12-30T18:48:00.353Z' form or similar Returns: TYPE: datetime object
def convert_strtime_datetime(dt_str): dt, _, us = dt_str.partition(".") dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") us = int(us.rstrip("Z"), 10) return dt + datetime.timedelta(microseconds=us)
1,148,906
Summary: Convert duration into component time units Args: :duration (datetime.timedelta): time duration to convert Returns: days, hours, minutes, seconds | TYPE: tuple (integers)
def convert_timedelta(duration): days, seconds = duration.days, duration.seconds hours = seconds // 3600 minutes = (seconds % 3600) // 60 seconds = (seconds % 60) return days, hours, minutes, seconds
1,148,907
Summary: convert timedelta objects to human readable output Args: :duration (datetime.timedelta): time duration to convert :return_iter (tuple): tuple containing time sequence Returns: days, hours, minutes, seconds | TYPE: tuple (integers), OR human readable, notated units | TYPE: string
def convert_dt_time(duration, return_iter=False): try: days, hours, minutes, seconds = convert_timedelta(duration) if return_iter: return days, hours, minutes, seconds # string format conversions if days > 0: format_string = ( '{} day{}, {} hour{}'.format( days, 's' if days != 1 else '', hours, 's' if hours != 1 else '')) elif hours > 1: format_string = ( '{} hour{}, {} minute{}'.format( hours, 's' if hours != 1 else '', minutes, 's' if minutes != 1 else '')) else: format_string = ( '{} minute{}, {} sec{}'.format( minutes, 's' if minutes != 1 else '', seconds, 's' if seconds != 1 else '')) except AttributeError as e: logger.exception( '%s: Type mismatch when converting timedelta objects (Code: %s)' % (inspect.stack()[0][3], str(e))) except Exception as e: logger.exception( '%s: Unknown error when converting datetime objects (Code: %s)' % (inspect.stack()[0][3], str(e))) return format_string
1,148,908
Summary: Retrieve local operating system environment characteristics Args: :user (str): USERNAME, only required when run on windows os Returns: TYPE: dict object containing key, value pairs describing os information
def get_os(detailed=False): try: os_type = platform.system() if os_type == 'Linux': os_detail = platform.uname() distribution = platform.linux_distribution() HOME = os.environ['HOME'] username = os.getenv('USER') elif os_type == 'Windows': username = os.getenv('username') HOME = 'C:\\Users\\' + username elif os_type == 'Java': logger.warning('Unsupported OS. No information') except OSError as e: raise e except Exception as e: logger.exception( '%s: problem determining local os environment %s' % (inspect.stack()[0][3], str(e)) ) if detailed and os_type == 'Linux': return { 'os_type': os_type, 'os_detail': os_detail, 'linux_distribution': distribution, 'HOME': HOME } elif detailed and os_type == 'Windows': return { 'os_type': os_type, 'platform': platform, 'HOME': HOME } elif not detailed: return {'os_type': os_type}
1,148,910
Summary: Parse, update local awscli config credentials Args: :user (str): USERNAME, only required when run on windows os Returns: TYPE: dict object containing key, value pairs describing os information
def awscli_defaults(os_type=None): try: if os_type is None: os_type = platform.system() if os_type == 'Linux': HOME = os.environ['HOME'] awscli_credentials = HOME + '/.aws/credentials' awscli_config = HOME + '/.aws/config' elif os_type == 'Windows': username = os.getenv('username') awscli_credentials = 'C:\\Users\\' + username + '\\.aws\\credentials' awscli_config = 'C:\\Users\\' + username + '\\.aws\\config' elif os_type == 'Java': logger.warning('Unsupported OS. No information') HOME = os.environ['HOME'] awscli_credentials = HOME + '/.aws/credentials' awscli_config = HOME + '/.aws/config' alt_credentials = os.getenv('AWS_SHARED_CREDENTIALS_FILE') except OSError as e: logger.exception( '%s: problem determining local os environment %s' % (inspect.stack()[0][3], str(e)) ) raise e return { 'awscli_defaults': { 'awscli_credentials': awscli_credentials, 'awscli_config': awscli_config, 'alt_credentials': alt_credentials } }
1,148,911
Summary: Creates local config from JSON seed template Args: :config_file (str): filesystem object containing json dict of config values :json_config_obj (json): data to be written to config_file :config_dirname (str): dir name containing config_file Returns: TYPE: bool, Success | Failure
def config_init(config_file, json_config_obj, config_dirname=None): HOME = os.environ['HOME'] # client config dir if config_dirname: dir_path = HOME + '/' + config_dirname if not os.path.exists(dir_path): os.mkdir(dir_path) os.chmod(dir_path, 0o755) else: dir_path = HOME # client config file r = export_json_object( dict_obj=json_config_obj, filename=dir_path + '/' + config_file ) return r
1,148,912
Summary: exports object to block filesystem object Args: :dict_obj (dict): dictionary object :filename (str): name of file to be exported (optional) Returns: True | False Boolean export status
def export_json_object(dict_obj, filename=None): try: if filename: try: with open(filename, 'w') as handle: handle.write(json.dumps(dict_obj, indent=4, sort_keys=True)) logger.info( '%s: Wrote %s to local filesystem location' % (inspect.stack()[0][3], filename)) handle.close() except TypeError as e: logger.warning( '%s: object in dict not serializable: %s' % (inspect.stack()[0][3], str(e))) else: json_str = json.dumps(dict_obj, indent=4, sort_keys=True) print(highlight(json_str, lexers.JsonLexer(), formatters.TerminalFormatter())) logger.info('%s: successful export to stdout' % inspect.stack()[0][3]) return True except IOError as e: logger.critical( '%s: export_file_object: error writing to %s to filesystem. Error: %s' % (inspect.stack()[0][3], filename, str(e))) return False else: logger.info('export_file_object: successful export to %s' % filename) return True
1,148,913
Summary: Imports block filesystem object Args: :filename (str): block filesystem object Returns: dictionary obj (valid json file), file data object
def import_file_object(filename): try: handle = open(filename, 'r') file_obj = handle.read() dict_obj = json.loads(file_obj) except IOError as e: logger.critical( 'import_file_object: %s error opening %s' % (str(e), str(filename)) ) raise e except ValueError: logger.info( '%s: import_file_object: %s not json. file object returned' % (inspect.stack()[0][3], str(filename)) ) return file_obj # reg file, not valid json return dict_obj
1,148,914
Summary: Validates baseline dict against suspect dict to ensure contain USERNAME k,v parameters. Args: baseline (dict): baseline json structure suspect (dict): json object validated against baseline structure Returns: Success (matches baseline) | Failure (no match), TYPE: bool
def json_integrity(baseline, suspect): try: for k,v in baseline.items(): for ks, vs in suspect.items(): keys_baseline = set(v.keys()) keys_suspect = set(vs.keys()) intersect_keys = keys_baseline.intersection(keys_suspect) added = keys_baseline - keys_suspect rm = keys_suspect - keys_baseline logger.info('keys added: %s, keys removed %s' % (str(added), str(rm))) if keys_baseline != keys_suspect: return False except KeyError as e: logger.info( 'KeyError parsing pre-existing config (%s). Replacing config file' % str(e)) return True
1,148,915
Parses local config file for override values Args: :local_file (str): filename of local config file Returns: dict object of values contained in local config file
def read_local_config(cfg): try: if os.path.exists(cfg): config = import_file_object(cfg) return config else: logger.warning( '%s: local config file (%s) not found, cannot be read' % (inspect.stack()[0][3], str(cfg))) except IOError as e: logger.warning( 'import_file_object: %s error opening %s' % (str(e), str(cfg)) ) return {}
1,148,917
Withdraws given number of NPs from the shop till, returns result Parameters: nps (int) -- Number of NPs to withdraw Returns bool - True if successful, False otherwise
def grabTill(self, nps): if not int(nps): return False pg = self.usr.getPage("http://www.neopets.com/market.phtml?type=till") form = pg.form(action="process_market.phtml") form['amount'] = str(nps) form.usePin = True pg = form.submit() # If successful redirects to till page if "You currently have" in pg.content: return True else: logging.getLogger("neolib.shop").exception("Could not grab shop till.", {'pg': pg}) return False
1,148,958
Get the list of committed signatures Args: vcs (easyci.vcs.base.Vcs) Returns: list(basestring) - list of signatures
def get_committed_signatures(vcs): committed_path = _get_committed_history_path(vcs) known_signatures = [] if os.path.exists(committed_path): with open(committed_path, 'r') as f: known_signatures = f.read().split() return known_signatures
1,148,998
Get the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) Returns: list(basestring) - list of signatures
def get_staged_signatures(vcs): staged_path = _get_staged_history_path(vcs) known_signatures = [] if os.path.exists(staged_path): with open(staged_path, 'r') as f: known_signatures = f.read().split() return known_signatures
1,148,999
Add `signature` to the list of committed signatures The signature must already be staged Args: vcs (easyci.vcs.base.Vcs) user_config (dict) signature (basestring) Raises: NotStagedError AlreadyCommittedError
def commit_signature(vcs, user_config, signature): if signature not in get_staged_signatures(vcs): raise NotStagedError evidence_path = _get_committed_history_path(vcs) committed_signatures = get_committed_signatures(vcs) if signature in committed_signatures: raise AlreadyCommittedError committed_signatures.append(signature) string = '\n'.join(committed_signatures[-user_config['history_limit']:]) with open(evidence_path, 'w') as f: f.write(string) unstage_signature(vcs, signature)
1,149,000
Add `signature` to the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) signature (basestring) Raises: AlreadyStagedError
def stage_signature(vcs, signature): evidence_path = _get_staged_history_path(vcs) staged = get_staged_signatures(vcs) if signature in staged: raise AlreadyStagedError staged.append(signature) string = '\n'.join(staged) with open(evidence_path, 'w') as f: f.write(string)
1,149,001
Remove `signature` from the list of staged signatures Args: vcs (easyci.vcs.base.Vcs) signature (basestring) Raises: NotStagedError
def unstage_signature(vcs, signature): evidence_path = _get_staged_history_path(vcs) staged = get_staged_signatures(vcs) if signature not in staged: raise NotStagedError staged.remove(signature) string = '\n'.join(staged) with open(evidence_path, 'w') as f: f.write(string)
1,149,002
Clear (committed) test run history from this project. Args: vcs (easyci.vcs.base.Vcs)
def clear_history(vcs): evidence_path = _get_committed_history_path(vcs) if os.path.exists(evidence_path): os.remove(evidence_path)
1,149,003
Decorator for restrict access to views according by list of themes. Params: * ``theme`` - string or list of themes where decorated view must be * ``redirect_to`` - url or name of url pattern for redirect if CURRENT_THEME not in themes * ``raise_error`` - error class for raising Example: .. code:: python # views.py from django_vest import only_for @only_for('black_theme') def my_view(request): ...
def only_for(theme, redirect_to='/', raise_error=None): def check_theme(*args, **kwargs): if isinstance(theme, six.string_types): themes = (theme,) else: themes = theme if settings.CURRENT_THEME is None: return True result = settings.CURRENT_THEME in themes if not result and raise_error is not None: raise raise_error return result return user_passes_test(check_theme, login_url=redirect_to)
1,149,053
Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
1,149,056
Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found.
def _SkipFieldValue(tokenizer): # String/bytes tokens can come in multiple adjacent string literals. # If we can consume one, consume as many as we can. if tokenizer.TryConsumeByteString(): while tokenizer.TryConsumeByteString(): pass return if (not tokenizer.TryConsumeIdentifier() and not tokenizer.TryConsumeInt64() and not tokenizer.TryConsumeUint64() and not tokenizer.TryConsumeFloat()): raise ParseError('Invalid field value: ' + tokenizer.token)
1,149,057
Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer.
def ParseInteger(text, is_signed=False, is_long=False): # Do the actual parsing. Exception handling is propagated to caller. try: # We force 32-bit values to int and 64-bit values to long to make # alternate implementations where the distinction is more significant # (e.g. the C++ implementation) simpler. if is_long: result = long(text, 0) else: result = int(text, 0) except ValueError: raise ValueError('Couldn\'t parse integer: %s' % text) # Check if the integer is sane. Exceptions handled by callers. checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] checker.CheckValue(result) return result
1,149,058
Convert protobuf message to text format. Args: message: The protocol buffers message.
def PrintMessage(self, message): fields = message.ListFields() if self.use_index_order: fields.sort(key=lambda x: x[0].index) for field, value in fields: if _IsMapEntry(field): for key in sorted(value): # This is slow for maps with submessage entires because it copies the # entire tree. Unfortunately this would take significant refactoring # of this file to work around. # # TODO(haberman): refactor and optimize if this becomes an issue. entry_submsg = field.message_type._concrete_class( key=key, value=value[key]) self.PrintField(field, entry_submsg) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: for element in value: self.PrintField(field, element) else: self.PrintField(field, value)
1,149,060
Print a single field value (not including name). For repeated fields, the value should be a single element. Args: field: The descriptor of the field to be printed. value: The value of the field.
def PrintFieldValue(self, field, value): out = self.out if self.pointy_brackets: openb = '<' closeb = '>' else: openb = '{' closeb = '}' if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: if self.as_one_line: out.write(' %s ' % openb) self.PrintMessage(value) out.write(closeb) else: out.write(' %s\n' % openb) self.indent += 2 self.PrintMessage(value) self.indent -= 2 out.write(' ' * self.indent + closeb) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: out.write(enum_value.name) else: out.write(str(value)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: out.write('\"') if isinstance(value, six.text_type): out_value = value.encode('utf-8') else: out_value = value if field.type == descriptor.FieldDescriptor.TYPE_BYTES: # We need to escape non-UTF8 chars in TYPE_BYTES field. out_as_utf8 = False else: out_as_utf8 = self.as_utf8 out.write(text_encoding.CEscape(out_value, out_as_utf8)) out.write('\"') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: if value: out.write('true') else: out.write('false') elif field.cpp_type in _FLOAT_TYPES and self.float_format is not None: out.write('{1:{0}}'.format(self.float_format, value)) else: out.write(str(value))
1,149,061
Converts an text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems.
def _ParseOrMerge(self, lines, message): tokenizer = _Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message)
1,149,063
Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems.
def _MergeMessageField(self, tokenizer, message, field): is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: # pylint: disable=protected-access sub_message = field.message_type._concrete_class() else: sub_message = getattr(message, field.name).add() else: if field.is_extension: sub_message = message.Extensions[field] else: sub_message = getattr(message, field.name) sub_message.SetInParent() while not tokenizer.TryConsume(end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) self._MergeField(tokenizer, sub_message) if is_map_entry: value_cpptype = field.message_type.fields_by_name['value'].cpp_type if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: value = getattr(message, field.name)[sub_message.key] value.MergeFrom(sub_message.value) else: getattr(message, field.name)[sub_message.key] = sub_message.value
1,149,064
Convert :class:`.MARCXMLRecord` object to :class:`.EPublication` namedtuple. Args: xml (str/MARCXMLRecord): MARC XML which will be converted to EPublication. In case of str, ``<record>`` tag is required. Returns: structure: :class:`.EPublication` namedtuple with data about \ publication.
def from_xml(xml): parsed = xml if not isinstance(xml, MARCXMLRecord): parsed = MARCXMLRecord(str(xml)) # check whether the document was deleted if "DEL" in parsed.datafields: raise DocumentNotFoundException("Document was deleted.") # i know, that this is not PEP8, but you dont want to see it without # proper formating (it looks bad, really bad) return EPeriodical( url=parsed.get_urls(), ISSN=parsed.get_ISSNs(), nazev=parsed.get_name(), anotace=None, # TODO: read the annotation podnazev=parsed.get_subname(), id_number=parsed.controlfields.get("001", None), datumVydani=parsed.get_pub_date(), mistoVydani=parsed.get_pub_place(), internal_url=parsed.get_internal_urls(), invalid_ISSNs=parsed.get_invalid_ISSNs(), nakladatelVydavatel=parsed.get_publisher(), ISSNSouboruPublikaci=parsed.get_linking_ISSNs(), )
1,149,244
Export the contents of the ConsoleWidget as XHTML with inline SVGs. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML to export. filename : str The file to be saved. image_tag : callable, optional (default None) Used to convert images. See ``default_image_tag()`` for information.
def export_xhtml(html, filename, image_tag=None): if image_tag is None: image_tag = default_image_tag else: image_tag = ensure_utf8(image_tag) with open(filename, 'w') as f: # Hack to make xhtml header -- note that we are not doing any check for # valid XML. offset = html.find("<html>") assert offset > -1, 'Invalid HTML string: no <html> tag.' html = ('<html xmlns="http://www.w3.org/1999/xhtml">\n'+ html[offset+6:]) html = fix_html(html) f.write(IMG_RE.sub(lambda x: image_tag(x, path = None, format = "svg"), html))
1,149,301
Transforms a Qt-generated HTML string into a standards-compliant one. Parameters: ----------- html : str, A utf-8 encoded Python string containing the Qt HTML.
def fix_html(html): # A UTF-8 declaration is needed for proper rendering of some characters # (e.g., indented commands) when viewing exported HTML on a local system # (i.e., without seeing an encoding declaration in an HTTP header). # C.f. http://www.w3.org/International/O-charset for details. offset = html.find('<head>') if offset > -1: html = (html[:offset+6]+ '\n<meta http-equiv="Content-Type" '+ 'content="text/html; charset=utf-8" />\n'+ html[offset+6:]) # Replace empty paragraphs tags with line breaks. html = re.sub(EMPTY_P_RE, '<br/>', html) return html
1,149,303
Set a rules as object attribute. Arguments: name (string): Rule name to set as attribute name. properties (dict): Dictionnary of properties.
def set_rule(self, name, properties): self._rule_attrs.append(name) setattr(self, name, properties)
1,149,308
Remove a rule from attributes. Arguments: name (string): Rule name to remove.
def remove_rule(self, name): self._rule_attrs.remove(name) delattr(self, name)
1,149,309
Adds a list of file locations to the current list Args: file_locations: list of file location tuples
def add_file_locations(self, file_locations=[]): if not hasattr(self, '__file_locations__'): self.__file_locations__ = copy.copy(file_locations) else: self.__file_locations__ += copy.copy(file_locations)
1,149,559
Loads the file_locations into the triplestores args: file_locations: list of tuples to load [('vocabularies', [list of vocabs to load]) ('directory', '/directory/path') ('filepath', '/path/to/a/file') ('package_all', 'name.of.a.package.with.defs') ('package_file','name.of.package', 'filename')] custom: list of custom definitions to load
def load(self, file_locations=[], **kwargs): self.set_load_state(**kwargs) if file_locations: self.__file_locations__ += file_locations else: file_locations = self.__file_locations__ conn = self.__get_conn__(**kwargs) if file_locations: log.info("Uploading files to conn '%s'", conn) for item in file_locations: log.info("loading '%s", item) if item[0] == 'directory': self.load_directory(item[1], **kwargs) elif item[0] == 'filepath': kwargs['is_file'] = True self.load_file(item[1],**kwargs) elif item[0].startswith('package'): log.info("package: %s\nspec: %s", item[1], importlib.util.find_spec(item[1])) try: pkg_path = \ importlib.util.find_spec(\ item[1]).submodule_search_locations[0] except TypeError: pkg_path = importlib.util.find_spec(item[1]).origin pkg_path = os.path.split(pkg_path)[0] if item[0].endswith('_all'): self.load_directory(pkg_path, **kwargs) elif item[0].endswith('_file'): filepath = os.path.join(pkg_path, item[2]) self.load_file(filepath, **kwargs) else: raise NotImplementedError self.loaded_files(reset=True) self.loaded_times = self.load_times(**kwargs)
1,149,563
sets the cache directory by test write permissions for various locations args: directories: list of directories to test. First one with read-write permissions is selected.
def __set_cache_dir__(self, cache_dirs=[], **kwargs): # add a path for a subfolder 'vocabularies' log.setLevel(kwargs.get("log_level", self.log_level)) log.debug("setting cache_dir") test_dirs = cache_dirs try: test_dirs += [__CFG__.dirs.data] except (RuntimeWarning, TypeError): pass cache_dir = None for directory in test_dirs: try: if is_writable_dir(directory, mkdir=True): cache_dir = directory break except TypeError: pass self.cache_dir = cache_dir log.debug("cache dir set as: '%s'", cache_dir) log.setLevel(self.log_level)
1,149,564
loads a file into the defintion triplestore args: filepath: the path to the file
def load_file(self, filepath, **kwargs): log.setLevel(kwargs.get("log_level", self.log_level)) filename = os.path.split(filepath)[-1] if filename in self.loaded: if self.loaded_times.get(filename, datetime.datetime(2001,1,1)).timestamp() \ < os.path.getmtime(filepath): self.drop_file(filename, **kwargs) else: return conn = self.__get_conn__(**kwargs) conn.load_data(graph=getattr(__NSM__.kdr, filename).clean_uri, data=filepath, # log_level=logging.DEBUG, is_file=True) self.__update_time__(filename, **kwargs) log.warning("\n\tfile: '%s' loaded\n\tconn: '%s'\n\tpath: %s", filename, conn, filepath) self.loaded.append(filename)
1,149,565
updated the mod time for a file saved to the definition_store Args: filename: the name of the file
def __update_time__(self, filename, **kwargs): conn = self.__get_conn__(**kwargs) load_time = XsdDatetime(datetime.datetime.utcnow()) conn.update_query(.format(file=filename, ctime=load_time.sparql, graph="kdr:load_times"), **kwargs) self.loaded_times[filename] = load_time
1,149,566
removes the passed in file from the connected triplestore args: filename: the filename to remove
def drop_file(self, filename, **kwargs): log.setLevel(kwargs.get("log_level", self.log_level)) conn = self.__get_conn__(**kwargs) result = conn.update_query("DROP GRAPH %s" % \ getattr(__NSM__.kdr, filename).sparql, **kwargs) # Remove the load time from the triplestore conn.update_query(.format(file=filename, graph="kdr:load_times"), **kwargs) self.loaded.remove(filename) log.warning("Dropped file '%s' from conn %s", filename, conn) return result
1,149,567