id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequencelengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
sequencelengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
docstring_summary
stringclasses
1 value
parameters
stringclasses
1 value
return_statement
stringclasses
1 value
argument_list
stringclasses
1 value
identifier
stringclasses
1 value
nwo
stringclasses
1 value
score
float32
-1
-1
251,200
roclark/sportsreference
sportsreference/nba/boxscore.py
Boxscore.home_wins
def home_wins(self): """ Returns an ``int`` of the number of games the home team won after the conclusion of the game. """ try: wins, losses = re.findall(r'\d+', self._home_record) return wins except ValueError: return 0
python
def home_wins(self): """ Returns an ``int`` of the number of games the home team won after the conclusion of the game. """ try: wins, losses = re.findall(r'\d+', self._home_record) return wins except ValueError: return 0
[ "def", "home_wins", "(", "self", ")", ":", "try", ":", "wins", ",", "losses", "=", "re", ".", "findall", "(", "r'\\d+'", ",", "self", ".", "_home_record", ")", "return", "wins", "except", "ValueError", ":", "return", "0" ]
Returns an ``int`` of the number of games the home team won after the conclusion of the game.
[ "Returns", "an", "int", "of", "the", "number", "of", "games", "the", "home", "team", "won", "after", "the", "conclusion", "of", "the", "game", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/boxscore.py#L1115-L1124
-1
251,201
roclark/sportsreference
sportsreference/nba/boxscore.py
Boxscore.home_two_point_field_goal_percentage
def home_two_point_field_goal_percentage(self): """ Returns a ``float`` of the number of two point field goals made divided by the number of two point field goal attempts by the home team. Percentage ranges from 0-1. """ result = float(self.home_two_point_field_goals) / \ float(self.home_two_point_field_goal_attempts) return round(float(result), 3)
python
def home_two_point_field_goal_percentage(self): """ Returns a ``float`` of the number of two point field goals made divided by the number of two point field goal attempts by the home team. Percentage ranges from 0-1. """ result = float(self.home_two_point_field_goals) / \ float(self.home_two_point_field_goal_attempts) return round(float(result), 3)
[ "def", "home_two_point_field_goal_percentage", "(", "self", ")", ":", "result", "=", "float", "(", "self", ".", "home_two_point_field_goals", ")", "/", "float", "(", "self", ".", "home_two_point_field_goal_attempts", ")", "return", "round", "(", "float", "(", "result", ")", ",", "3", ")" ]
Returns a ``float`` of the number of two point field goals made divided by the number of two point field goal attempts by the home team. Percentage ranges from 0-1.
[ "Returns", "a", "float", "of", "the", "number", "of", "two", "point", "field", "goals", "made", "divided", "by", "the", "number", "of", "two", "point", "field", "goal", "attempts", "by", "the", "home", "team", ".", "Percentage", "ranges", "from", "0", "-", "1", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/boxscore.py#L1214-L1222
-1
251,202
roclark/sportsreference
sportsreference/nfl/teams.py
Team.dataframe
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string abbreviation of the team, such as 'KAN'. """ fields_to_include = { 'abbreviation': self.abbreviation, 'defensive_simple_rating_system': self.defensive_simple_rating_system, 'first_downs': self.first_downs, 'first_downs_from_penalties': self.first_downs_from_penalties, 'fumbles': self.fumbles, 'games_played': self.games_played, 'interceptions': self.interceptions, 'losses': self.losses, 'margin_of_victory': self.margin_of_victory, 'name': self.name, 'offensive_simple_rating_system': self.offensive_simple_rating_system, 'pass_attempts': self.pass_attempts, 'pass_completions': self.pass_completions, 'pass_first_downs': self.pass_first_downs, 'pass_net_yards_per_attempt': self.pass_net_yards_per_attempt, 'pass_touchdowns': self.pass_touchdowns, 'pass_yards': self.pass_yards, 'penalties': self.penalties, 'percent_drives_with_points': self.percent_drives_with_points, 'percent_drives_with_turnovers': self.percent_drives_with_turnovers, 'plays': self.plays, 'points_against': self.points_against, 'points_contributed_by_offense': self.points_contributed_by_offense, 'points_difference': self.points_difference, 'points_for': self.points_for, 'rank': self.rank, 'rush_attempts': self.rush_attempts, 'rush_first_downs': self.rush_first_downs, 'rush_touchdowns': self.rush_touchdowns, 'rush_yards': self.rush_yards, 'rush_yards_per_attempt': self.rush_yards_per_attempt, 'simple_rating_system': self.simple_rating_system, 'strength_of_schedule': self.strength_of_schedule, 'turnovers': self.turnovers, 'win_percentage': self.win_percentage, 'wins': self.wins, 'yards': self.yards, 'yards_from_penalties': self.yards_from_penalties, 'yards_per_play': self.yards_per_play } return pd.DataFrame([fields_to_include], index=[self._abbreviation])
python
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string abbreviation of the team, such as 'KAN'. """ fields_to_include = { 'abbreviation': self.abbreviation, 'defensive_simple_rating_system': self.defensive_simple_rating_system, 'first_downs': self.first_downs, 'first_downs_from_penalties': self.first_downs_from_penalties, 'fumbles': self.fumbles, 'games_played': self.games_played, 'interceptions': self.interceptions, 'losses': self.losses, 'margin_of_victory': self.margin_of_victory, 'name': self.name, 'offensive_simple_rating_system': self.offensive_simple_rating_system, 'pass_attempts': self.pass_attempts, 'pass_completions': self.pass_completions, 'pass_first_downs': self.pass_first_downs, 'pass_net_yards_per_attempt': self.pass_net_yards_per_attempt, 'pass_touchdowns': self.pass_touchdowns, 'pass_yards': self.pass_yards, 'penalties': self.penalties, 'percent_drives_with_points': self.percent_drives_with_points, 'percent_drives_with_turnovers': self.percent_drives_with_turnovers, 'plays': self.plays, 'points_against': self.points_against, 'points_contributed_by_offense': self.points_contributed_by_offense, 'points_difference': self.points_difference, 'points_for': self.points_for, 'rank': self.rank, 'rush_attempts': self.rush_attempts, 'rush_first_downs': self.rush_first_downs, 'rush_touchdowns': self.rush_touchdowns, 'rush_yards': self.rush_yards, 'rush_yards_per_attempt': self.rush_yards_per_attempt, 'simple_rating_system': self.simple_rating_system, 'strength_of_schedule': self.strength_of_schedule, 'turnovers': self.turnovers, 'win_percentage': self.win_percentage, 'wins': self.wins, 'yards': self.yards, 'yards_from_penalties': self.yards_from_penalties, 'yards_per_play': self.yards_per_play } return pd.DataFrame([fields_to_include], index=[self._abbreviation])
[ "def", "dataframe", "(", "self", ")", ":", "fields_to_include", "=", "{", "'abbreviation'", ":", "self", ".", "abbreviation", ",", "'defensive_simple_rating_system'", ":", "self", ".", "defensive_simple_rating_system", ",", "'first_downs'", ":", "self", ".", "first_downs", ",", "'first_downs_from_penalties'", ":", "self", ".", "first_downs_from_penalties", ",", "'fumbles'", ":", "self", ".", "fumbles", ",", "'games_played'", ":", "self", ".", "games_played", ",", "'interceptions'", ":", "self", ".", "interceptions", ",", "'losses'", ":", "self", ".", "losses", ",", "'margin_of_victory'", ":", "self", ".", "margin_of_victory", ",", "'name'", ":", "self", ".", "name", ",", "'offensive_simple_rating_system'", ":", "self", ".", "offensive_simple_rating_system", ",", "'pass_attempts'", ":", "self", ".", "pass_attempts", ",", "'pass_completions'", ":", "self", ".", "pass_completions", ",", "'pass_first_downs'", ":", "self", ".", "pass_first_downs", ",", "'pass_net_yards_per_attempt'", ":", "self", ".", "pass_net_yards_per_attempt", ",", "'pass_touchdowns'", ":", "self", ".", "pass_touchdowns", ",", "'pass_yards'", ":", "self", ".", "pass_yards", ",", "'penalties'", ":", "self", ".", "penalties", ",", "'percent_drives_with_points'", ":", "self", ".", "percent_drives_with_points", ",", "'percent_drives_with_turnovers'", ":", "self", ".", "percent_drives_with_turnovers", ",", "'plays'", ":", "self", ".", "plays", ",", "'points_against'", ":", "self", ".", "points_against", ",", "'points_contributed_by_offense'", ":", "self", ".", "points_contributed_by_offense", ",", "'points_difference'", ":", "self", ".", "points_difference", ",", "'points_for'", ":", "self", ".", "points_for", ",", "'rank'", ":", "self", ".", "rank", ",", "'rush_attempts'", ":", "self", ".", "rush_attempts", ",", "'rush_first_downs'", ":", "self", ".", "rush_first_downs", ",", "'rush_touchdowns'", ":", "self", ".", "rush_touchdowns", ",", "'rush_yards'", ":", "self", ".", "rush_yards", ",", "'rush_yards_per_attempt'", ":", "self", ".", "rush_yards_per_attempt", ",", "'simple_rating_system'", ":", "self", ".", "simple_rating_system", ",", "'strength_of_schedule'", ":", "self", ".", "strength_of_schedule", ",", "'turnovers'", ":", "self", ".", "turnovers", ",", "'win_percentage'", ":", "self", ".", "win_percentage", ",", "'wins'", ":", "self", ".", "wins", ",", "'yards'", ":", "self", ".", "yards", ",", "'yards_from_penalties'", ":", "self", ".", "yards_from_penalties", ",", "'yards_per_play'", ":", "self", ".", "yards_per_play", "}", "return", "pd", ".", "DataFrame", "(", "[", "fields_to_include", "]", ",", "index", "=", "[", "self", ".", "_abbreviation", "]", ")" ]
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string abbreviation of the team, such as 'KAN'.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "class", "properties", "and", "values", ".", "The", "index", "for", "the", "DataFrame", "is", "the", "string", "abbreviation", "of", "the", "team", "such", "as", "KAN", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nfl/teams.py#L106-L157
-1
251,203
roclark/sportsreference
sportsreference/mlb/roster.py
Player._parse_team_name
def _parse_team_name(self, team): """ Parse the team name in the contract table. The team names in the contract table contain special encoded characters that are not supported by Python 2.7. These characters should be filtered out to get the proper team name. Parameters ---------- team : string A string representing the team_name tag in a row in the player's contract table. Returns ------- string A string of the team's name, such as 'Houston Astros'. """ team = team.replace(' ', ' ') team = team.replace('\xa0', ' ') team_html = pq(team) return team_html.text()
python
def _parse_team_name(self, team): """ Parse the team name in the contract table. The team names in the contract table contain special encoded characters that are not supported by Python 2.7. These characters should be filtered out to get the proper team name. Parameters ---------- team : string A string representing the team_name tag in a row in the player's contract table. Returns ------- string A string of the team's name, such as 'Houston Astros'. """ team = team.replace(' ', ' ') team = team.replace('\xa0', ' ') team_html = pq(team) return team_html.text()
[ "def", "_parse_team_name", "(", "self", ",", "team", ")", ":", "team", "=", "team", ".", "replace", "(", "' '", ",", "' '", ")", "team", "=", "team", ".", "replace", "(", "'\\xa0'", ",", "' '", ")", "team_html", "=", "pq", "(", "team", ")", "return", "team_html", ".", "text", "(", ")" ]
Parse the team name in the contract table. The team names in the contract table contain special encoded characters that are not supported by Python 2.7. These characters should be filtered out to get the proper team name. Parameters ---------- team : string A string representing the team_name tag in a row in the player's contract table. Returns ------- string A string of the team's name, such as 'Houston Astros'.
[ "Parse", "the", "team", "name", "in", "the", "contract", "table", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/roster.py#L403-L425
-1
251,204
roclark/sportsreference
sportsreference/mlb/roster.py
Player._parse_value
def _parse_value(self, html_data, field): """ Parse the HTML table to find the requested field's value. All of the values are passed in an HTML table row instead of as individual items. The values need to be parsed by matching the requested attribute with a parsing scheme that sports-reference uses to differentiate stats. This function returns a single value for the given attribute. Parameters ---------- html_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. field : string The name of the attribute to match. Field must be a key in the PLAYER_SCHEME dictionary. Returns ------- list A list of all values that match the requested field. If no value could be found, returns None. """ scheme = PLAYER_SCHEME[field] items = [i.text() for i in html_data(scheme).items()] # Stats can be added and removed on a yearly basis. If no stats are # found, return None and have that be the value. if len(items) == 0: return None return items
python
def _parse_value(self, html_data, field): """ Parse the HTML table to find the requested field's value. All of the values are passed in an HTML table row instead of as individual items. The values need to be parsed by matching the requested attribute with a parsing scheme that sports-reference uses to differentiate stats. This function returns a single value for the given attribute. Parameters ---------- html_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. field : string The name of the attribute to match. Field must be a key in the PLAYER_SCHEME dictionary. Returns ------- list A list of all values that match the requested field. If no value could be found, returns None. """ scheme = PLAYER_SCHEME[field] items = [i.text() for i in html_data(scheme).items()] # Stats can be added and removed on a yearly basis. If no stats are # found, return None and have that be the value. if len(items) == 0: return None return items
[ "def", "_parse_value", "(", "self", ",", "html_data", ",", "field", ")", ":", "scheme", "=", "PLAYER_SCHEME", "[", "field", "]", "items", "=", "[", "i", ".", "text", "(", ")", "for", "i", "in", "html_data", "(", "scheme", ")", ".", "items", "(", ")", "]", "# Stats can be added and removed on a yearly basis. If no stats are", "# found, return None and have that be the value.", "if", "len", "(", "items", ")", "==", "0", ":", "return", "None", "return", "items" ]
Parse the HTML table to find the requested field's value. All of the values are passed in an HTML table row instead of as individual items. The values need to be parsed by matching the requested attribute with a parsing scheme that sports-reference uses to differentiate stats. This function returns a single value for the given attribute. Parameters ---------- html_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. field : string The name of the attribute to match. Field must be a key in the PLAYER_SCHEME dictionary. Returns ------- list A list of all values that match the requested field. If no value could be found, returns None.
[ "Parse", "the", "HTML", "table", "to", "find", "the", "requested", "field", "s", "value", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/roster.py#L459-L491
-1
251,205
roclark/sportsreference
sportsreference/mlb/roster.py
Roster._get_id
def _get_id(self, player): """ Parse the player ID. Given a PyQuery object representing a single player on the team roster, parse the player ID and return it as a string. Parameters ---------- player : PyQuery object A PyQuery object representing the player information from the roster table. Returns ------- string Returns a string of the player ID. """ name_tag = player('td[data-stat="player"] a') name = re.sub(r'.*/players/./', '', str(name_tag)) return re.sub(r'\.shtml.*', '', name)
python
def _get_id(self, player): """ Parse the player ID. Given a PyQuery object representing a single player on the team roster, parse the player ID and return it as a string. Parameters ---------- player : PyQuery object A PyQuery object representing the player information from the roster table. Returns ------- string Returns a string of the player ID. """ name_tag = player('td[data-stat="player"] a') name = re.sub(r'.*/players/./', '', str(name_tag)) return re.sub(r'\.shtml.*', '', name)
[ "def", "_get_id", "(", "self", ",", "player", ")", ":", "name_tag", "=", "player", "(", "'td[data-stat=\"player\"] a'", ")", "name", "=", "re", ".", "sub", "(", "r'.*/players/./'", ",", "''", ",", "str", "(", "name_tag", ")", ")", "return", "re", ".", "sub", "(", "r'\\.shtml.*'", ",", "''", ",", "name", ")" ]
Parse the player ID. Given a PyQuery object representing a single player on the team roster, parse the player ID and return it as a string. Parameters ---------- player : PyQuery object A PyQuery object representing the player information from the roster table. Returns ------- string Returns a string of the player ID.
[ "Parse", "the", "player", "ID", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/roster.py#L1489-L1509
-1
251,206
roclark/sportsreference
sportsreference/nhl/boxscore.py
BoxscorePlayer.dataframe
def dataframe(self): """ Returns a ``pandas DataFrame`` containing all other relevant properties and values for the specified game. """ fields_to_include = { 'assists': self.assists, 'blocks_at_even_strength': self.blocks_at_even_strength, 'corsi_for_percentage': self.corsi_for_percentage, 'decision': self.decision, 'defensive_zone_starts': self.defensive_zone_starts, 'defensive_zone_start_percentage': self.defensive_zone_start_percentage, 'even_strength_assists': self.even_strength_assists, 'even_strength_goals': self.even_strength_goals, 'game_winning_goals': self.game_winning_goals, 'goals': self.goals, 'goals_against': self.goals_against, 'hits_at_even_strength': self.hits_at_even_strength, 'invidual_corsi_for_events': self.individual_corsi_for_events, 'name': self.name, 'offensive_zone_start_percentage': self.offensive_zone_start_percentage, 'offensive_zone_starts': self.offensive_zone_starts, 'on_ice_shot_attempts_against': self.on_ice_shot_attempts_against, 'on_ice_shot_attempts_for': self.on_ice_shot_attempts_for, 'penalties_in_minutes': self.penalties_in_minutes, 'player_id': self.player_id, 'plus_minus': self.plus_minus, 'points': self.points, 'power_play_assists': self.power_play_assists, 'power_play_goals': self.power_play_goals, 'relative_corsi_for_percentage': self.relative_corsi_for_percentage, 'save_percentage': self.save_percentage, 'saves': self.saves, 'shifts': self.shifts, 'shooting_percentage': self.shooting_percentage, 'short_handed_assists': self.short_handed_assists, 'short_handed_goals': self.short_handed_goals, 'shots_against': self.shots_against, 'shots_on_goal': self.shots_on_goal, 'shutouts': self.shutouts, 'time_on_ice': self.time_on_ice } return pd.DataFrame([fields_to_include], index=[self._player_id])
python
def dataframe(self): """ Returns a ``pandas DataFrame`` containing all other relevant properties and values for the specified game. """ fields_to_include = { 'assists': self.assists, 'blocks_at_even_strength': self.blocks_at_even_strength, 'corsi_for_percentage': self.corsi_for_percentage, 'decision': self.decision, 'defensive_zone_starts': self.defensive_zone_starts, 'defensive_zone_start_percentage': self.defensive_zone_start_percentage, 'even_strength_assists': self.even_strength_assists, 'even_strength_goals': self.even_strength_goals, 'game_winning_goals': self.game_winning_goals, 'goals': self.goals, 'goals_against': self.goals_against, 'hits_at_even_strength': self.hits_at_even_strength, 'invidual_corsi_for_events': self.individual_corsi_for_events, 'name': self.name, 'offensive_zone_start_percentage': self.offensive_zone_start_percentage, 'offensive_zone_starts': self.offensive_zone_starts, 'on_ice_shot_attempts_against': self.on_ice_shot_attempts_against, 'on_ice_shot_attempts_for': self.on_ice_shot_attempts_for, 'penalties_in_minutes': self.penalties_in_minutes, 'player_id': self.player_id, 'plus_minus': self.plus_minus, 'points': self.points, 'power_play_assists': self.power_play_assists, 'power_play_goals': self.power_play_goals, 'relative_corsi_for_percentage': self.relative_corsi_for_percentage, 'save_percentage': self.save_percentage, 'saves': self.saves, 'shifts': self.shifts, 'shooting_percentage': self.shooting_percentage, 'short_handed_assists': self.short_handed_assists, 'short_handed_goals': self.short_handed_goals, 'shots_against': self.shots_against, 'shots_on_goal': self.shots_on_goal, 'shutouts': self.shutouts, 'time_on_ice': self.time_on_ice } return pd.DataFrame([fields_to_include], index=[self._player_id])
[ "def", "dataframe", "(", "self", ")", ":", "fields_to_include", "=", "{", "'assists'", ":", "self", ".", "assists", ",", "'blocks_at_even_strength'", ":", "self", ".", "blocks_at_even_strength", ",", "'corsi_for_percentage'", ":", "self", ".", "corsi_for_percentage", ",", "'decision'", ":", "self", ".", "decision", ",", "'defensive_zone_starts'", ":", "self", ".", "defensive_zone_starts", ",", "'defensive_zone_start_percentage'", ":", "self", ".", "defensive_zone_start_percentage", ",", "'even_strength_assists'", ":", "self", ".", "even_strength_assists", ",", "'even_strength_goals'", ":", "self", ".", "even_strength_goals", ",", "'game_winning_goals'", ":", "self", ".", "game_winning_goals", ",", "'goals'", ":", "self", ".", "goals", ",", "'goals_against'", ":", "self", ".", "goals_against", ",", "'hits_at_even_strength'", ":", "self", ".", "hits_at_even_strength", ",", "'invidual_corsi_for_events'", ":", "self", ".", "individual_corsi_for_events", ",", "'name'", ":", "self", ".", "name", ",", "'offensive_zone_start_percentage'", ":", "self", ".", "offensive_zone_start_percentage", ",", "'offensive_zone_starts'", ":", "self", ".", "offensive_zone_starts", ",", "'on_ice_shot_attempts_against'", ":", "self", ".", "on_ice_shot_attempts_against", ",", "'on_ice_shot_attempts_for'", ":", "self", ".", "on_ice_shot_attempts_for", ",", "'penalties_in_minutes'", ":", "self", ".", "penalties_in_minutes", ",", "'player_id'", ":", "self", ".", "player_id", ",", "'plus_minus'", ":", "self", ".", "plus_minus", ",", "'points'", ":", "self", ".", "points", ",", "'power_play_assists'", ":", "self", ".", "power_play_assists", ",", "'power_play_goals'", ":", "self", ".", "power_play_goals", ",", "'relative_corsi_for_percentage'", ":", "self", ".", "relative_corsi_for_percentage", ",", "'save_percentage'", ":", "self", ".", "save_percentage", ",", "'saves'", ":", "self", ".", "saves", ",", "'shifts'", ":", "self", ".", "shifts", ",", "'shooting_percentage'", ":", "self", ".", "shooting_percentage", ",", "'short_handed_assists'", ":", "self", ".", "short_handed_assists", ",", "'short_handed_goals'", ":", "self", ".", "short_handed_goals", ",", "'shots_against'", ":", "self", ".", "shots_against", ",", "'shots_on_goal'", ":", "self", ".", "shots_on_goal", ",", "'shutouts'", ":", "self", ".", "shutouts", ",", "'time_on_ice'", ":", "self", ".", "time_on_ice", "}", "return", "pd", ".", "DataFrame", "(", "[", "fields_to_include", "]", ",", "index", "=", "[", "self", ".", "_player_id", "]", ")" ]
Returns a ``pandas DataFrame`` containing all other relevant properties and values for the specified game.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "relevant", "properties", "and", "values", "for", "the", "specified", "game", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/boxscore.py#L96-L141
-1
251,207
roclark/sportsreference
sportsreference/nhl/boxscore.py
Boxscore._find_player_id
def _find_player_id(self, row): """ Find the player's ID. Find the player's ID as embedded in the 'data-append-csv' attribute, such as 'zettehe01' for Henrik Zetterberg. Parameters ---------- row : PyQuery object A PyQuery object representing a single row in a boxscore table for a single player. Returns ------- str Returns a ``string`` of the player's ID, such as 'zettehe01' for Henrik Zetterberg. """ player_id = row('th').attr('data-append-csv') if not player_id: player_id = row('td').attr('data-append-csv') return player_id
python
def _find_player_id(self, row): """ Find the player's ID. Find the player's ID as embedded in the 'data-append-csv' attribute, such as 'zettehe01' for Henrik Zetterberg. Parameters ---------- row : PyQuery object A PyQuery object representing a single row in a boxscore table for a single player. Returns ------- str Returns a ``string`` of the player's ID, such as 'zettehe01' for Henrik Zetterberg. """ player_id = row('th').attr('data-append-csv') if not player_id: player_id = row('td').attr('data-append-csv') return player_id
[ "def", "_find_player_id", "(", "self", ",", "row", ")", ":", "player_id", "=", "row", "(", "'th'", ")", ".", "attr", "(", "'data-append-csv'", ")", "if", "not", "player_id", ":", "player_id", "=", "row", "(", "'td'", ")", ".", "attr", "(", "'data-append-csv'", ")", "return", "player_id" ]
Find the player's ID. Find the player's ID as embedded in the 'data-append-csv' attribute, such as 'zettehe01' for Henrik Zetterberg. Parameters ---------- row : PyQuery object A PyQuery object representing a single row in a boxscore table for a single player. Returns ------- str Returns a ``string`` of the player's ID, such as 'zettehe01' for Henrik Zetterberg.
[ "Find", "the", "player", "s", "ID", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/boxscore.py#L413-L435
-1
251,208
roclark/sportsreference
sportsreference/nhl/boxscore.py
Boxscore.dataframe
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '201806070VEG'. """ if self._away_goals is None and self._home_goals is None: return None fields_to_include = { 'arena': self.arena, 'attendance': self.attendance, 'away_assists': self.away_assists, 'away_even_strength_assists': self.away_even_strength_assists, 'away_even_strength_goals': self.away_even_strength_goals, 'away_game_winning_goals': self.away_game_winning_goals, 'away_goals': self.away_goals, 'away_penalties_in_minutes': self.away_penalties_in_minutes, 'away_points': self.away_points, 'away_power_play_assists': self.away_power_play_assists, 'away_power_play_goals': self.away_power_play_goals, 'away_save_percentage': self.away_save_percentage, 'away_saves': self.away_saves, 'away_shooting_percentage': self.away_shooting_percentage, 'away_short_handed_assists': self.away_short_handed_assists, 'away_short_handed_goals': self.away_short_handed_goals, 'away_shots_on_goal': self.away_shots_on_goal, 'away_shutout': self.away_shutout, 'date': self.date, 'duration': self.duration, 'home_assists': self.home_assists, 'home_even_strength_assists': self.home_even_strength_assists, 'home_even_strength_goals': self.home_even_strength_goals, 'home_game_winning_goals': self.home_game_winning_goals, 'home_goals': self.home_goals, 'home_penalties_in_minutes': self.home_penalties_in_minutes, 'home_points': self.home_points, 'home_power_play_assists': self.home_power_play_assists, 'home_power_play_goals': self.home_power_play_goals, 'home_save_percentage': self.home_save_percentage, 'home_saves': self.home_saves, 'home_shooting_percentage': self.home_shooting_percentage, 'home_short_handed_assists': self.home_short_handed_assists, 'home_short_handed_goals': self.home_short_handed_goals, 'home_shots_on_goal': self.home_shots_on_goal, 'home_shutout': self.home_shutout, 'losing_abbr': self.losing_abbr, 'losing_name': self.losing_name, 'time': self.time, 'winner': self.winner, 'winning_abbr': self.winning_abbr, 'winning_name': self.winning_name } return pd.DataFrame([fields_to_include], index=[self._uri])
python
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '201806070VEG'. """ if self._away_goals is None and self._home_goals is None: return None fields_to_include = { 'arena': self.arena, 'attendance': self.attendance, 'away_assists': self.away_assists, 'away_even_strength_assists': self.away_even_strength_assists, 'away_even_strength_goals': self.away_even_strength_goals, 'away_game_winning_goals': self.away_game_winning_goals, 'away_goals': self.away_goals, 'away_penalties_in_minutes': self.away_penalties_in_minutes, 'away_points': self.away_points, 'away_power_play_assists': self.away_power_play_assists, 'away_power_play_goals': self.away_power_play_goals, 'away_save_percentage': self.away_save_percentage, 'away_saves': self.away_saves, 'away_shooting_percentage': self.away_shooting_percentage, 'away_short_handed_assists': self.away_short_handed_assists, 'away_short_handed_goals': self.away_short_handed_goals, 'away_shots_on_goal': self.away_shots_on_goal, 'away_shutout': self.away_shutout, 'date': self.date, 'duration': self.duration, 'home_assists': self.home_assists, 'home_even_strength_assists': self.home_even_strength_assists, 'home_even_strength_goals': self.home_even_strength_goals, 'home_game_winning_goals': self.home_game_winning_goals, 'home_goals': self.home_goals, 'home_penalties_in_minutes': self.home_penalties_in_minutes, 'home_points': self.home_points, 'home_power_play_assists': self.home_power_play_assists, 'home_power_play_goals': self.home_power_play_goals, 'home_save_percentage': self.home_save_percentage, 'home_saves': self.home_saves, 'home_shooting_percentage': self.home_shooting_percentage, 'home_short_handed_assists': self.home_short_handed_assists, 'home_short_handed_goals': self.home_short_handed_goals, 'home_shots_on_goal': self.home_shots_on_goal, 'home_shutout': self.home_shutout, 'losing_abbr': self.losing_abbr, 'losing_name': self.losing_name, 'time': self.time, 'winner': self.winner, 'winning_abbr': self.winning_abbr, 'winning_name': self.winning_name } return pd.DataFrame([fields_to_include], index=[self._uri])
[ "def", "dataframe", "(", "self", ")", ":", "if", "self", ".", "_away_goals", "is", "None", "and", "self", ".", "_home_goals", "is", "None", ":", "return", "None", "fields_to_include", "=", "{", "'arena'", ":", "self", ".", "arena", ",", "'attendance'", ":", "self", ".", "attendance", ",", "'away_assists'", ":", "self", ".", "away_assists", ",", "'away_even_strength_assists'", ":", "self", ".", "away_even_strength_assists", ",", "'away_even_strength_goals'", ":", "self", ".", "away_even_strength_goals", ",", "'away_game_winning_goals'", ":", "self", ".", "away_game_winning_goals", ",", "'away_goals'", ":", "self", ".", "away_goals", ",", "'away_penalties_in_minutes'", ":", "self", ".", "away_penalties_in_minutes", ",", "'away_points'", ":", "self", ".", "away_points", ",", "'away_power_play_assists'", ":", "self", ".", "away_power_play_assists", ",", "'away_power_play_goals'", ":", "self", ".", "away_power_play_goals", ",", "'away_save_percentage'", ":", "self", ".", "away_save_percentage", ",", "'away_saves'", ":", "self", ".", "away_saves", ",", "'away_shooting_percentage'", ":", "self", ".", "away_shooting_percentage", ",", "'away_short_handed_assists'", ":", "self", ".", "away_short_handed_assists", ",", "'away_short_handed_goals'", ":", "self", ".", "away_short_handed_goals", ",", "'away_shots_on_goal'", ":", "self", ".", "away_shots_on_goal", ",", "'away_shutout'", ":", "self", ".", "away_shutout", ",", "'date'", ":", "self", ".", "date", ",", "'duration'", ":", "self", ".", "duration", ",", "'home_assists'", ":", "self", ".", "home_assists", ",", "'home_even_strength_assists'", ":", "self", ".", "home_even_strength_assists", ",", "'home_even_strength_goals'", ":", "self", ".", "home_even_strength_goals", ",", "'home_game_winning_goals'", ":", "self", ".", "home_game_winning_goals", ",", "'home_goals'", ":", "self", ".", "home_goals", ",", "'home_penalties_in_minutes'", ":", "self", ".", "home_penalties_in_minutes", ",", "'home_points'", ":", "self", ".", "home_points", ",", "'home_power_play_assists'", ":", "self", ".", "home_power_play_assists", ",", "'home_power_play_goals'", ":", "self", ".", "home_power_play_goals", ",", "'home_save_percentage'", ":", "self", ".", "home_save_percentage", ",", "'home_saves'", ":", "self", ".", "home_saves", ",", "'home_shooting_percentage'", ":", "self", ".", "home_shooting_percentage", ",", "'home_short_handed_assists'", ":", "self", ".", "home_short_handed_assists", ",", "'home_short_handed_goals'", ":", "self", ".", "home_short_handed_goals", ",", "'home_shots_on_goal'", ":", "self", ".", "home_shots_on_goal", ",", "'home_shutout'", ":", "self", ".", "home_shutout", ",", "'losing_abbr'", ":", "self", ".", "losing_abbr", ",", "'losing_name'", ":", "self", ".", "losing_name", ",", "'time'", ":", "self", ".", "time", ",", "'winner'", ":", "self", ".", "winner", ",", "'winning_abbr'", ":", "self", ".", "winning_abbr", ",", "'winning_name'", ":", "self", ".", "winning_name", "}", "return", "pd", ".", "DataFrame", "(", "[", "fields_to_include", "]", ",", "index", "=", "[", "self", ".", "_uri", "]", ")" ]
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '201806070VEG'.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "class", "properties", "and", "values", ".", "The", "index", "for", "the", "DataFrame", "is", "the", "string", "URI", "that", "is", "used", "to", "instantiate", "the", "class", "such", "as", "201806070VEG", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/boxscore.py#L667-L719
-1
251,209
roclark/sportsreference
sportsreference/nhl/boxscore.py
Boxscore.away_save_percentage
def away_save_percentage(self): """ Returns a ``float`` of the percentage of shots the away team saved. Percentage ranges from 0-1. """ try: save_pct = float(self.away_saves) / float(self.home_shots_on_goal) return round(save_pct, 3) except ZeroDivisionError: return 0.0
python
def away_save_percentage(self): """ Returns a ``float`` of the percentage of shots the away team saved. Percentage ranges from 0-1. """ try: save_pct = float(self.away_saves) / float(self.home_shots_on_goal) return round(save_pct, 3) except ZeroDivisionError: return 0.0
[ "def", "away_save_percentage", "(", "self", ")", ":", "try", ":", "save_pct", "=", "float", "(", "self", ".", "away_saves", ")", "/", "float", "(", "self", ".", "home_shots_on_goal", ")", "return", "round", "(", "save_pct", ",", "3", ")", "except", "ZeroDivisionError", ":", "return", "0.0" ]
Returns a ``float`` of the percentage of shots the away team saved. Percentage ranges from 0-1.
[ "Returns", "a", "float", "of", "the", "percentage", "of", "shots", "the", "away", "team", "saved", ".", "Percentage", "ranges", "from", "0", "-", "1", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/boxscore.py#L941-L950
-1
251,210
roclark/sportsreference
sportsreference/nhl/boxscore.py
Boxscore.home_save_percentage
def home_save_percentage(self): """ Returns a ``float`` of the percentage of shots the home team saved. Percentage ranges from 0-1. """ try: save_pct = float(self.home_saves) / float(self.away_shots_on_goal) return round(save_pct, 3) except ZeroDivisionError: return 0.0
python
def home_save_percentage(self): """ Returns a ``float`` of the percentage of shots the home team saved. Percentage ranges from 0-1. """ try: save_pct = float(self.home_saves) / float(self.away_shots_on_goal) return round(save_pct, 3) except ZeroDivisionError: return 0.0
[ "def", "home_save_percentage", "(", "self", ")", ":", "try", ":", "save_pct", "=", "float", "(", "self", ".", "home_saves", ")", "/", "float", "(", "self", ".", "away_shots_on_goal", ")", "return", "round", "(", "save_pct", ",", "3", ")", "except", "ZeroDivisionError", ":", "return", "0.0" ]
Returns a ``float`` of the percentage of shots the home team saved. Percentage ranges from 0-1.
[ "Returns", "a", "float", "of", "the", "percentage", "of", "shots", "the", "home", "team", "saved", ".", "Percentage", "ranges", "from", "0", "-", "1", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/boxscore.py#L1069-L1078
-1
251,211
roclark/sportsreference
sportsreference/ncaaf/schedule.py
Game.location
def location(self): """ Returns a ``string`` constant to indicate whether the game was played at home, away, or in a neutral location. """ if self._location.lower() == 'n': return NEUTRAL if self._location.lower() == '@': return AWAY return HOME
python
def location(self): """ Returns a ``string`` constant to indicate whether the game was played at home, away, or in a neutral location. """ if self._location.lower() == 'n': return NEUTRAL if self._location.lower() == '@': return AWAY return HOME
[ "def", "location", "(", "self", ")", ":", "if", "self", ".", "_location", ".", "lower", "(", ")", "==", "'n'", ":", "return", "NEUTRAL", "if", "self", ".", "_location", ".", "lower", "(", ")", "==", "'@'", ":", "return", "AWAY", "return", "HOME" ]
Returns a ``string`` constant to indicate whether the game was played at home, away, or in a neutral location.
[ "Returns", "a", "string", "constant", "to", "indicate", "whether", "the", "game", "was", "played", "at", "home", "away", "or", "in", "a", "neutral", "location", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/schedule.py#L223-L232
-1
251,212
roclark/sportsreference
sportsreference/ncaaf/schedule.py
Game.rank
def rank(self): """ Returns an ``int`` of the team's rank at the time the game was played. """ rank = re.findall(r'\d+', self._rank) if len(rank) == 0: return None return rank[0]
python
def rank(self): """ Returns an ``int`` of the team's rank at the time the game was played. """ rank = re.findall(r'\d+', self._rank) if len(rank) == 0: return None return rank[0]
[ "def", "rank", "(", "self", ")", ":", "rank", "=", "re", ".", "findall", "(", "r'\\d+'", ",", "self", ".", "_rank", ")", "if", "len", "(", "rank", ")", "==", "0", ":", "return", "None", "return", "rank", "[", "0", "]" ]
Returns an ``int`` of the team's rank at the time the game was played.
[ "Returns", "an", "int", "of", "the", "team", "s", "rank", "at", "the", "time", "the", "game", "was", "played", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/schedule.py#L235-L242
-1
251,213
roclark/sportsreference
sportsreference/mlb/schedule.py
Game.datetime
def datetime(self): """ Returns a datetime object of the month, day, year, and time the game was played. """ date_string = '%s %s' % (self._date, self._year) date_string = re.sub(r' \(\d+\)', '', date_string) return datetime.strptime(date_string, '%A, %b %d %Y')
python
def datetime(self): """ Returns a datetime object of the month, day, year, and time the game was played. """ date_string = '%s %s' % (self._date, self._year) date_string = re.sub(r' \(\d+\)', '', date_string) return datetime.strptime(date_string, '%A, %b %d %Y')
[ "def", "datetime", "(", "self", ")", ":", "date_string", "=", "'%s %s'", "%", "(", "self", ".", "_date", ",", "self", ".", "_year", ")", "date_string", "=", "re", ".", "sub", "(", "r' \\(\\d+\\)'", ",", "''", ",", "date_string", ")", "return", "datetime", ".", "strptime", "(", "date_string", ",", "'%A, %b %d %Y'", ")" ]
Returns a datetime object of the month, day, year, and time the game was played.
[ "Returns", "a", "datetime", "object", "of", "the", "month", "day", "year", "and", "time", "the", "game", "was", "played", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/schedule.py#L172-L179
-1
251,214
roclark/sportsreference
sportsreference/mlb/schedule.py
Game.game_number_for_day
def game_number_for_day(self): """ Returns an ``int`` denoting which game is played for the team during the given day. Default value is 1 where a team plays only one game during the day, but can be higher for double headers, etc. For example, if a team has a double header one day, the first game of the day will return 1 while the second game will return 2. """ game_number = re.findall(r'\(\d+\)', self._date) if len(game_number) == 0: return 1 game_number = re.findall(r'\d+', game_number[0]) return int(game_number[0])
python
def game_number_for_day(self): """ Returns an ``int`` denoting which game is played for the team during the given day. Default value is 1 where a team plays only one game during the day, but can be higher for double headers, etc. For example, if a team has a double header one day, the first game of the day will return 1 while the second game will return 2. """ game_number = re.findall(r'\(\d+\)', self._date) if len(game_number) == 0: return 1 game_number = re.findall(r'\d+', game_number[0]) return int(game_number[0])
[ "def", "game_number_for_day", "(", "self", ")", ":", "game_number", "=", "re", ".", "findall", "(", "r'\\(\\d+\\)'", ",", "self", ".", "_date", ")", "if", "len", "(", "game_number", ")", "==", "0", ":", "return", "1", "game_number", "=", "re", ".", "findall", "(", "r'\\d+'", ",", "game_number", "[", "0", "]", ")", "return", "int", "(", "game_number", "[", "0", "]", ")" ]
Returns an ``int`` denoting which game is played for the team during the given day. Default value is 1 where a team plays only one game during the day, but can be higher for double headers, etc. For example, if a team has a double header one day, the first game of the day will return 1 while the second game will return 2.
[ "Returns", "an", "int", "denoting", "which", "game", "is", "played", "for", "the", "team", "during", "the", "given", "day", ".", "Default", "value", "is", "1", "where", "a", "team", "plays", "only", "one", "game", "during", "the", "day", "but", "can", "be", "higher", "for", "double", "headers", "etc", ".", "For", "example", "if", "a", "team", "has", "a", "double", "header", "one", "day", "the", "first", "game", "of", "the", "day", "will", "return", "1", "while", "the", "second", "game", "will", "return", "2", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/schedule.py#L182-L194
-1
251,215
roclark/sportsreference
sportsreference/mlb/schedule.py
Game.games_behind
def games_behind(self): """ Returns a ``float`` of the number of games behind the leader the team is. 0.0 indicates the team is tied for first. Negative numbers indicate the number of games a team is ahead of the second place team. """ if 'up' in self._games_behind.lower(): games_behind = re.sub('up *', '', self._games_behind.lower()) try: return float(games_behind) * -1.0 except ValueError: return None if 'tied' in self._games_behind.lower(): return 0.0 try: return float(self._games_behind) except ValueError: return None
python
def games_behind(self): """ Returns a ``float`` of the number of games behind the leader the team is. 0.0 indicates the team is tied for first. Negative numbers indicate the number of games a team is ahead of the second place team. """ if 'up' in self._games_behind.lower(): games_behind = re.sub('up *', '', self._games_behind.lower()) try: return float(games_behind) * -1.0 except ValueError: return None if 'tied' in self._games_behind.lower(): return 0.0 try: return float(self._games_behind) except ValueError: return None
[ "def", "games_behind", "(", "self", ")", ":", "if", "'up'", "in", "self", ".", "_games_behind", ".", "lower", "(", ")", ":", "games_behind", "=", "re", ".", "sub", "(", "'up *'", ",", "''", ",", "self", ".", "_games_behind", ".", "lower", "(", ")", ")", "try", ":", "return", "float", "(", "games_behind", ")", "*", "-", "1.0", "except", "ValueError", ":", "return", "None", "if", "'tied'", "in", "self", ".", "_games_behind", ".", "lower", "(", ")", ":", "return", "0.0", "try", ":", "return", "float", "(", "self", ".", "_games_behind", ")", "except", "ValueError", ":", "return", "None" ]
Returns a ``float`` of the number of games behind the leader the team is. 0.0 indicates the team is tied for first. Negative numbers indicate the number of games a team is ahead of the second place team.
[ "Returns", "a", "float", "of", "the", "number", "of", "games", "behind", "the", "leader", "the", "team", "is", ".", "0", ".", "0", "indicates", "the", "team", "is", "tied", "for", "first", ".", "Negative", "numbers", "indicate", "the", "number", "of", "games", "a", "team", "is", "ahead", "of", "the", "second", "place", "team", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/schedule.py#L279-L296
-1
251,216
roclark/sportsreference
sportsreference/mlb/teams.py
Team._parse_name
def _parse_name(self, team_data): """ Parses the team's name. On the pages being parsed, the team's name doesn't follow the standard parsing algorithm that we use for the fields, and requires a special one-off algorithm. The name is attached in the 'title' attribute from within 'team_ID'. A few simple regex subs captures the team name. The '_name' attribute is applied with the captured team name from this function. Parameters ---------- team_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. """ name = team_data('td[data-stat="team_ID"]:first') name = re.sub(r'.*title="', '', str(name)) name = re.sub(r'".*', '', name) setattr(self, '_name', name)
python
def _parse_name(self, team_data): """ Parses the team's name. On the pages being parsed, the team's name doesn't follow the standard parsing algorithm that we use for the fields, and requires a special one-off algorithm. The name is attached in the 'title' attribute from within 'team_ID'. A few simple regex subs captures the team name. The '_name' attribute is applied with the captured team name from this function. Parameters ---------- team_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string. """ name = team_data('td[data-stat="team_ID"]:first') name = re.sub(r'.*title="', '', str(name)) name = re.sub(r'".*', '', name) setattr(self, '_name', name)
[ "def", "_parse_name", "(", "self", ",", "team_data", ")", ":", "name", "=", "team_data", "(", "'td[data-stat=\"team_ID\"]:first'", ")", "name", "=", "re", ".", "sub", "(", "r'.*title=\"'", ",", "''", ",", "str", "(", "name", ")", ")", "name", "=", "re", ".", "sub", "(", "r'\".*'", ",", "''", ",", "name", ")", "setattr", "(", "self", ",", "'_name'", ",", "name", ")" ]
Parses the team's name. On the pages being parsed, the team's name doesn't follow the standard parsing algorithm that we use for the fields, and requires a special one-off algorithm. The name is attached in the 'title' attribute from within 'team_ID'. A few simple regex subs captures the team name. The '_name' attribute is applied with the captured team name from this function. Parameters ---------- team_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string.
[ "Parses", "the", "team", "s", "name", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/mlb/teams.py#L140-L161
-1
251,217
roclark/sportsreference
sportsreference/nba/schedule.py
Game._parse_opponent_abbr
def _parse_opponent_abbr(self, game_data): """ Parses the opponent's abbreviation for the game. The opponent's 3-letter abbreviation is embedded within the HTML tag and needs a special parsing scheme in order to be extracted. Parameters ---------- game_data : PyQuery object A PyQuery object containing the information specific to a game. """ opponent = game_data('td[data-stat="opp_name"]:first') opponent = re.sub(r'.*/teams/', '', str(opponent)) opponent = re.sub(r'\/.*.html.*', '', opponent) setattr(self, '_opponent_abbr', opponent)
python
def _parse_opponent_abbr(self, game_data): """ Parses the opponent's abbreviation for the game. The opponent's 3-letter abbreviation is embedded within the HTML tag and needs a special parsing scheme in order to be extracted. Parameters ---------- game_data : PyQuery object A PyQuery object containing the information specific to a game. """ opponent = game_data('td[data-stat="opp_name"]:first') opponent = re.sub(r'.*/teams/', '', str(opponent)) opponent = re.sub(r'\/.*.html.*', '', opponent) setattr(self, '_opponent_abbr', opponent)
[ "def", "_parse_opponent_abbr", "(", "self", ",", "game_data", ")", ":", "opponent", "=", "game_data", "(", "'td[data-stat=\"opp_name\"]:first'", ")", "opponent", "=", "re", ".", "sub", "(", "r'.*/teams/'", ",", "''", ",", "str", "(", "opponent", ")", ")", "opponent", "=", "re", ".", "sub", "(", "r'\\/.*.html.*'", ",", "''", ",", "opponent", ")", "setattr", "(", "self", ",", "'_opponent_abbr'", ",", "opponent", ")" ]
Parses the opponent's abbreviation for the game. The opponent's 3-letter abbreviation is embedded within the HTML tag and needs a special parsing scheme in order to be extracted. Parameters ---------- game_data : PyQuery object A PyQuery object containing the information specific to a game.
[ "Parses", "the", "opponent", "s", "abbreviation", "for", "the", "game", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/schedule.py#L66-L81
-1
251,218
roclark/sportsreference
sportsreference/nba/schedule.py
Schedule._add_games_to_schedule
def _add_games_to_schedule(self, schedule): """ Add game information to list of games. Create a Game instance for the given game in the schedule and add it to the list of games the team has or will play during the season. Parameters ---------- schedule : PyQuery object A PyQuery object pertaining to a team's schedule table. year : string The requested year to pull stats from. """ for item in schedule: if 'class="thead"' in str(item) or \ 'class="over_header thead"' in str(item): continue # pragma: no cover game = Game(item) self._games.append(game)
python
def _add_games_to_schedule(self, schedule): """ Add game information to list of games. Create a Game instance for the given game in the schedule and add it to the list of games the team has or will play during the season. Parameters ---------- schedule : PyQuery object A PyQuery object pertaining to a team's schedule table. year : string The requested year to pull stats from. """ for item in schedule: if 'class="thead"' in str(item) or \ 'class="over_header thead"' in str(item): continue # pragma: no cover game = Game(item) self._games.append(game)
[ "def", "_add_games_to_schedule", "(", "self", ",", "schedule", ")", ":", "for", "item", "in", "schedule", ":", "if", "'class=\"thead\"'", "in", "str", "(", "item", ")", "or", "'class=\"over_header thead\"'", "in", "str", "(", "item", ")", ":", "continue", "# pragma: no cover", "game", "=", "Game", "(", "item", ")", "self", ".", "_games", ".", "append", "(", "game", ")" ]
Add game information to list of games. Create a Game instance for the given game in the schedule and add it to the list of games the team has or will play during the season. Parameters ---------- schedule : PyQuery object A PyQuery object pertaining to a team's schedule table. year : string The requested year to pull stats from.
[ "Add", "game", "information", "to", "list", "of", "games", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nba/schedule.py#L359-L378
-1
251,219
roclark/sportsreference
sportsreference/ncaaf/boxscore.py
BoxscorePlayer.dataframe
def dataframe(self): """ Returns a ``pandas DataFrame`` containing all other relevant class properties and value for the specified game. """ fields_to_include = { 'completed_passes': self.completed_passes, 'attempted_passes': self.attempted_passes, 'passing_completion': self.passing_completion, 'passing_yards': self.passing_yards, 'pass_yards_per_attempt': self.pass_yards_per_attempt, 'adjusted_yards_per_attempt': self.adjusted_yards_per_attempt, 'passing_touchdowns': self.passing_touchdowns, 'interceptions_thrown': self.interceptions_thrown, 'quarterback_rating': self.quarterback_rating, 'rush_attempts': self.rush_attempts, 'rush_yards': self.rush_yards, 'rush_yards_per_attempt': self.rush_yards_per_attempt, 'rush_touchdowns': self.rush_touchdowns, 'receptions': self.receptions, 'receiving_yards': self.receiving_yards, 'receiving_yards_per_reception': self.receiving_yards_per_reception, 'receiving_touchdowns': self.receiving_touchdowns, 'plays_from_scrimmage': self.plays_from_scrimmage, 'yards_from_scrimmage': self.yards_from_scrimmage, 'yards_from_scrimmage_per_play': self.yards_from_scrimmage_per_play, 'rushing_and_receiving_touchdowns': self.rushing_and_receiving_touchdowns, 'solo_tackles': self.solo_tackles, 'assists_on_tackles': self.assists_on_tackles, 'total_tackles': self.total_tackles, 'tackles_for_loss': self.tackles_for_loss, 'sacks': self.sacks, 'interceptions': self.interceptions, 'yards_returned_from_interceptions': self.yards_returned_from_interceptions, 'yards_returned_per_interception': self.yards_returned_per_interception, 'interceptions_returned_for_touchdown': self.interceptions_returned_for_touchdown, 'passes_defended': self.passes_defended, 'fumbles_recovered': self.fumbles_recovered, 'yards_recovered_from_fumble': self.yards_recovered_from_fumble, 'fumbles_recovered_for_touchdown': self.fumbles_recovered_for_touchdown, 'fumbles_forced': self.fumbles_forced, 'kickoff_returns': self.kickoff_returns, 'kickoff_return_yards': self.kickoff_return_yards, 'average_kickoff_return_yards': self.average_kickoff_return_yards, 'kickoff_return_touchdowns': self.kickoff_return_touchdowns, 'punt_returns': self.punt_returns, 'punt_return_yards': self.punt_return_yards, 'average_punt_return_yards': self.average_punt_return_yards, 'punt_return_touchdowns': self.punt_return_touchdowns, 'extra_points_made': self.extra_points_made, 'extra_points_attempted': self.extra_points_attempted, 'extra_point_percentage': self.extra_point_percentage, 'field_goals_made': self.field_goals_made, 'field_goals_attempted': self.field_goals_attempted, 'field_goal_percentage': self.field_goal_percentage, 'points_kicking': self.points_kicking, 'punts': self.punts, 'punting_yards': self.punting_yards, 'punting_yards_per_punt': self.punting_yards_per_attempt } return pd.DataFrame([fields_to_include], index=[self._player_id])
python
def dataframe(self): """ Returns a ``pandas DataFrame`` containing all other relevant class properties and value for the specified game. """ fields_to_include = { 'completed_passes': self.completed_passes, 'attempted_passes': self.attempted_passes, 'passing_completion': self.passing_completion, 'passing_yards': self.passing_yards, 'pass_yards_per_attempt': self.pass_yards_per_attempt, 'adjusted_yards_per_attempt': self.adjusted_yards_per_attempt, 'passing_touchdowns': self.passing_touchdowns, 'interceptions_thrown': self.interceptions_thrown, 'quarterback_rating': self.quarterback_rating, 'rush_attempts': self.rush_attempts, 'rush_yards': self.rush_yards, 'rush_yards_per_attempt': self.rush_yards_per_attempt, 'rush_touchdowns': self.rush_touchdowns, 'receptions': self.receptions, 'receiving_yards': self.receiving_yards, 'receiving_yards_per_reception': self.receiving_yards_per_reception, 'receiving_touchdowns': self.receiving_touchdowns, 'plays_from_scrimmage': self.plays_from_scrimmage, 'yards_from_scrimmage': self.yards_from_scrimmage, 'yards_from_scrimmage_per_play': self.yards_from_scrimmage_per_play, 'rushing_and_receiving_touchdowns': self.rushing_and_receiving_touchdowns, 'solo_tackles': self.solo_tackles, 'assists_on_tackles': self.assists_on_tackles, 'total_tackles': self.total_tackles, 'tackles_for_loss': self.tackles_for_loss, 'sacks': self.sacks, 'interceptions': self.interceptions, 'yards_returned_from_interceptions': self.yards_returned_from_interceptions, 'yards_returned_per_interception': self.yards_returned_per_interception, 'interceptions_returned_for_touchdown': self.interceptions_returned_for_touchdown, 'passes_defended': self.passes_defended, 'fumbles_recovered': self.fumbles_recovered, 'yards_recovered_from_fumble': self.yards_recovered_from_fumble, 'fumbles_recovered_for_touchdown': self.fumbles_recovered_for_touchdown, 'fumbles_forced': self.fumbles_forced, 'kickoff_returns': self.kickoff_returns, 'kickoff_return_yards': self.kickoff_return_yards, 'average_kickoff_return_yards': self.average_kickoff_return_yards, 'kickoff_return_touchdowns': self.kickoff_return_touchdowns, 'punt_returns': self.punt_returns, 'punt_return_yards': self.punt_return_yards, 'average_punt_return_yards': self.average_punt_return_yards, 'punt_return_touchdowns': self.punt_return_touchdowns, 'extra_points_made': self.extra_points_made, 'extra_points_attempted': self.extra_points_attempted, 'extra_point_percentage': self.extra_point_percentage, 'field_goals_made': self.field_goals_made, 'field_goals_attempted': self.field_goals_attempted, 'field_goal_percentage': self.field_goal_percentage, 'points_kicking': self.points_kicking, 'punts': self.punts, 'punting_yards': self.punting_yards, 'punting_yards_per_punt': self.punting_yards_per_attempt } return pd.DataFrame([fields_to_include], index=[self._player_id])
[ "def", "dataframe", "(", "self", ")", ":", "fields_to_include", "=", "{", "'completed_passes'", ":", "self", ".", "completed_passes", ",", "'attempted_passes'", ":", "self", ".", "attempted_passes", ",", "'passing_completion'", ":", "self", ".", "passing_completion", ",", "'passing_yards'", ":", "self", ".", "passing_yards", ",", "'pass_yards_per_attempt'", ":", "self", ".", "pass_yards_per_attempt", ",", "'adjusted_yards_per_attempt'", ":", "self", ".", "adjusted_yards_per_attempt", ",", "'passing_touchdowns'", ":", "self", ".", "passing_touchdowns", ",", "'interceptions_thrown'", ":", "self", ".", "interceptions_thrown", ",", "'quarterback_rating'", ":", "self", ".", "quarterback_rating", ",", "'rush_attempts'", ":", "self", ".", "rush_attempts", ",", "'rush_yards'", ":", "self", ".", "rush_yards", ",", "'rush_yards_per_attempt'", ":", "self", ".", "rush_yards_per_attempt", ",", "'rush_touchdowns'", ":", "self", ".", "rush_touchdowns", ",", "'receptions'", ":", "self", ".", "receptions", ",", "'receiving_yards'", ":", "self", ".", "receiving_yards", ",", "'receiving_yards_per_reception'", ":", "self", ".", "receiving_yards_per_reception", ",", "'receiving_touchdowns'", ":", "self", ".", "receiving_touchdowns", ",", "'plays_from_scrimmage'", ":", "self", ".", "plays_from_scrimmage", ",", "'yards_from_scrimmage'", ":", "self", ".", "yards_from_scrimmage", ",", "'yards_from_scrimmage_per_play'", ":", "self", ".", "yards_from_scrimmage_per_play", ",", "'rushing_and_receiving_touchdowns'", ":", "self", ".", "rushing_and_receiving_touchdowns", ",", "'solo_tackles'", ":", "self", ".", "solo_tackles", ",", "'assists_on_tackles'", ":", "self", ".", "assists_on_tackles", ",", "'total_tackles'", ":", "self", ".", "total_tackles", ",", "'tackles_for_loss'", ":", "self", ".", "tackles_for_loss", ",", "'sacks'", ":", "self", ".", "sacks", ",", "'interceptions'", ":", "self", ".", "interceptions", ",", "'yards_returned_from_interceptions'", ":", "self", ".", "yards_returned_from_interceptions", ",", "'yards_returned_per_interception'", ":", "self", ".", "yards_returned_per_interception", ",", "'interceptions_returned_for_touchdown'", ":", "self", ".", "interceptions_returned_for_touchdown", ",", "'passes_defended'", ":", "self", ".", "passes_defended", ",", "'fumbles_recovered'", ":", "self", ".", "fumbles_recovered", ",", "'yards_recovered_from_fumble'", ":", "self", ".", "yards_recovered_from_fumble", ",", "'fumbles_recovered_for_touchdown'", ":", "self", ".", "fumbles_recovered_for_touchdown", ",", "'fumbles_forced'", ":", "self", ".", "fumbles_forced", ",", "'kickoff_returns'", ":", "self", ".", "kickoff_returns", ",", "'kickoff_return_yards'", ":", "self", ".", "kickoff_return_yards", ",", "'average_kickoff_return_yards'", ":", "self", ".", "average_kickoff_return_yards", ",", "'kickoff_return_touchdowns'", ":", "self", ".", "kickoff_return_touchdowns", ",", "'punt_returns'", ":", "self", ".", "punt_returns", ",", "'punt_return_yards'", ":", "self", ".", "punt_return_yards", ",", "'average_punt_return_yards'", ":", "self", ".", "average_punt_return_yards", ",", "'punt_return_touchdowns'", ":", "self", ".", "punt_return_touchdowns", ",", "'extra_points_made'", ":", "self", ".", "extra_points_made", ",", "'extra_points_attempted'", ":", "self", ".", "extra_points_attempted", ",", "'extra_point_percentage'", ":", "self", ".", "extra_point_percentage", ",", "'field_goals_made'", ":", "self", ".", "field_goals_made", ",", "'field_goals_attempted'", ":", "self", ".", "field_goals_attempted", ",", "'field_goal_percentage'", ":", "self", ".", "field_goal_percentage", ",", "'points_kicking'", ":", "self", ".", "points_kicking", ",", "'punts'", ":", "self", ".", "punts", ",", "'punting_yards'", ":", "self", ".", "punting_yards", ",", "'punting_yards_per_punt'", ":", "self", ".", "punting_yards_per_attempt", "}", "return", "pd", ".", "DataFrame", "(", "[", "fields_to_include", "]", ",", "index", "=", "[", "self", ".", "_player_id", "]", ")" ]
Returns a ``pandas DataFrame`` containing all other relevant class properties and value for the specified game.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "relevant", "class", "properties", "and", "value", "for", "the", "specified", "game", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/boxscore.py#L101-L168
-1
251,220
roclark/sportsreference
sportsreference/ncaaf/boxscore.py
Boxscore.dataframe
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '2018-01-08-georgia'. """ if self._away_points is None and self._home_points is None: return None fields_to_include = { 'away_first_downs': self.away_first_downs, 'away_fumbles': self.away_fumbles, 'away_fumbles_lost': self.away_fumbles_lost, 'away_interceptions': self.away_interceptions, 'away_pass_attempts': self.away_pass_attempts, 'away_pass_completions': self.away_pass_completions, 'away_pass_touchdowns': self.away_pass_touchdowns, 'away_pass_yards': self.away_pass_yards, 'away_penalties': self.away_penalties, 'away_points': self.away_points, 'away_rush_attempts': self.away_rush_attempts, 'away_rush_touchdowns': self.away_rush_touchdowns, 'away_rush_yards': self.away_rush_yards, 'away_total_yards': self.away_total_yards, 'away_turnovers': self.away_turnovers, 'away_yards_from_penalties': self.away_yards_from_penalties, 'date': self.date, 'home_first_downs': self.home_first_downs, 'home_fumbles': self.home_fumbles, 'home_fumbles_lost': self.home_fumbles_lost, 'home_interceptions': self.home_interceptions, 'home_pass_attempts': self.home_pass_attempts, 'home_pass_completions': self.home_pass_completions, 'home_pass_touchdowns': self.home_pass_touchdowns, 'home_pass_yards': self.home_pass_yards, 'home_penalties': self.home_penalties, 'home_points': self.home_points, 'home_rush_attempts': self.home_rush_attempts, 'home_rush_touchdowns': self.home_rush_touchdowns, 'home_rush_yards': self.home_rush_yards, 'home_total_yards': self.home_total_yards, 'home_turnovers': self.home_turnovers, 'home_yards_from_penalties': self.home_yards_from_penalties, 'losing_abbr': self.losing_abbr, 'losing_name': self.losing_name, 'stadium': self.stadium, 'time': self.time, 'winner': self.winner, 'winning_abbr': self.winning_abbr, 'winning_name': self.winning_name } return pd.DataFrame([fields_to_include], index=[self._uri])
python
def dataframe(self): """ Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '2018-01-08-georgia'. """ if self._away_points is None and self._home_points is None: return None fields_to_include = { 'away_first_downs': self.away_first_downs, 'away_fumbles': self.away_fumbles, 'away_fumbles_lost': self.away_fumbles_lost, 'away_interceptions': self.away_interceptions, 'away_pass_attempts': self.away_pass_attempts, 'away_pass_completions': self.away_pass_completions, 'away_pass_touchdowns': self.away_pass_touchdowns, 'away_pass_yards': self.away_pass_yards, 'away_penalties': self.away_penalties, 'away_points': self.away_points, 'away_rush_attempts': self.away_rush_attempts, 'away_rush_touchdowns': self.away_rush_touchdowns, 'away_rush_yards': self.away_rush_yards, 'away_total_yards': self.away_total_yards, 'away_turnovers': self.away_turnovers, 'away_yards_from_penalties': self.away_yards_from_penalties, 'date': self.date, 'home_first_downs': self.home_first_downs, 'home_fumbles': self.home_fumbles, 'home_fumbles_lost': self.home_fumbles_lost, 'home_interceptions': self.home_interceptions, 'home_pass_attempts': self.home_pass_attempts, 'home_pass_completions': self.home_pass_completions, 'home_pass_touchdowns': self.home_pass_touchdowns, 'home_pass_yards': self.home_pass_yards, 'home_penalties': self.home_penalties, 'home_points': self.home_points, 'home_rush_attempts': self.home_rush_attempts, 'home_rush_touchdowns': self.home_rush_touchdowns, 'home_rush_yards': self.home_rush_yards, 'home_total_yards': self.home_total_yards, 'home_turnovers': self.home_turnovers, 'home_yards_from_penalties': self.home_yards_from_penalties, 'losing_abbr': self.losing_abbr, 'losing_name': self.losing_name, 'stadium': self.stadium, 'time': self.time, 'winner': self.winner, 'winning_abbr': self.winning_abbr, 'winning_name': self.winning_name } return pd.DataFrame([fields_to_include], index=[self._uri])
[ "def", "dataframe", "(", "self", ")", ":", "if", "self", ".", "_away_points", "is", "None", "and", "self", ".", "_home_points", "is", "None", ":", "return", "None", "fields_to_include", "=", "{", "'away_first_downs'", ":", "self", ".", "away_first_downs", ",", "'away_fumbles'", ":", "self", ".", "away_fumbles", ",", "'away_fumbles_lost'", ":", "self", ".", "away_fumbles_lost", ",", "'away_interceptions'", ":", "self", ".", "away_interceptions", ",", "'away_pass_attempts'", ":", "self", ".", "away_pass_attempts", ",", "'away_pass_completions'", ":", "self", ".", "away_pass_completions", ",", "'away_pass_touchdowns'", ":", "self", ".", "away_pass_touchdowns", ",", "'away_pass_yards'", ":", "self", ".", "away_pass_yards", ",", "'away_penalties'", ":", "self", ".", "away_penalties", ",", "'away_points'", ":", "self", ".", "away_points", ",", "'away_rush_attempts'", ":", "self", ".", "away_rush_attempts", ",", "'away_rush_touchdowns'", ":", "self", ".", "away_rush_touchdowns", ",", "'away_rush_yards'", ":", "self", ".", "away_rush_yards", ",", "'away_total_yards'", ":", "self", ".", "away_total_yards", ",", "'away_turnovers'", ":", "self", ".", "away_turnovers", ",", "'away_yards_from_penalties'", ":", "self", ".", "away_yards_from_penalties", ",", "'date'", ":", "self", ".", "date", ",", "'home_first_downs'", ":", "self", ".", "home_first_downs", ",", "'home_fumbles'", ":", "self", ".", "home_fumbles", ",", "'home_fumbles_lost'", ":", "self", ".", "home_fumbles_lost", ",", "'home_interceptions'", ":", "self", ".", "home_interceptions", ",", "'home_pass_attempts'", ":", "self", ".", "home_pass_attempts", ",", "'home_pass_completions'", ":", "self", ".", "home_pass_completions", ",", "'home_pass_touchdowns'", ":", "self", ".", "home_pass_touchdowns", ",", "'home_pass_yards'", ":", "self", ".", "home_pass_yards", ",", "'home_penalties'", ":", "self", ".", "home_penalties", ",", "'home_points'", ":", "self", ".", "home_points", ",", "'home_rush_attempts'", ":", "self", ".", "home_rush_attempts", ",", "'home_rush_touchdowns'", ":", "self", ".", "home_rush_touchdowns", ",", "'home_rush_yards'", ":", "self", ".", "home_rush_yards", ",", "'home_total_yards'", ":", "self", ".", "home_total_yards", ",", "'home_turnovers'", ":", "self", ".", "home_turnovers", ",", "'home_yards_from_penalties'", ":", "self", ".", "home_yards_from_penalties", ",", "'losing_abbr'", ":", "self", ".", "losing_abbr", ",", "'losing_name'", ":", "self", ".", "losing_name", ",", "'stadium'", ":", "self", ".", "stadium", ",", "'time'", ":", "self", ".", "time", ",", "'winner'", ":", "self", ".", "winner", ",", "'winning_abbr'", ":", "self", ".", "winning_abbr", ",", "'winning_name'", ":", "self", ".", "winning_name", "}", "return", "pd", ".", "DataFrame", "(", "[", "fields_to_include", "]", ",", "index", "=", "[", "self", ".", "_uri", "]", ")" ]
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '2018-01-08-georgia'.
[ "Returns", "a", "pandas", "DataFrame", "containing", "all", "other", "class", "properties", "and", "values", ".", "The", "index", "for", "the", "DataFrame", "is", "the", "string", "URI", "that", "is", "used", "to", "instantiate", "the", "class", "such", "as", "2018", "-", "01", "-", "08", "-", "georgia", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/boxscore.py#L708-L758
-1
251,221
roclark/sportsreference
sportsreference/ncaaf/boxscore.py
Boxscore.winning_abbr
def winning_abbr(self): """ Returns a ``string`` of the winning team's abbreviation, such as 'ALABAMA' for the Alabama Crimson Tide. """ if self.winner == HOME: if 'cfb/schools' not in str(self._home_name): return self._home_name.text() return utils._parse_abbreviation(self._home_name) if 'cfb/schools' not in str(self._away_name): return self._away_name.text() return utils._parse_abbreviation(self._away_name)
python
def winning_abbr(self): """ Returns a ``string`` of the winning team's abbreviation, such as 'ALABAMA' for the Alabama Crimson Tide. """ if self.winner == HOME: if 'cfb/schools' not in str(self._home_name): return self._home_name.text() return utils._parse_abbreviation(self._home_name) if 'cfb/schools' not in str(self._away_name): return self._away_name.text() return utils._parse_abbreviation(self._away_name)
[ "def", "winning_abbr", "(", "self", ")", ":", "if", "self", ".", "winner", "==", "HOME", ":", "if", "'cfb/schools'", "not", "in", "str", "(", "self", ".", "_home_name", ")", ":", "return", "self", ".", "_home_name", ".", "text", "(", ")", "return", "utils", ".", "_parse_abbreviation", "(", "self", ".", "_home_name", ")", "if", "'cfb/schools'", "not", "in", "str", "(", "self", ".", "_away_name", ")", ":", "return", "self", ".", "_away_name", ".", "text", "(", ")", "return", "utils", ".", "_parse_abbreviation", "(", "self", ".", "_away_name", ")" ]
Returns a ``string`` of the winning team's abbreviation, such as 'ALABAMA' for the Alabama Crimson Tide.
[ "Returns", "a", "string", "of", "the", "winning", "team", "s", "abbreviation", "such", "as", "ALABAMA", "for", "the", "Alabama", "Crimson", "Tide", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/boxscore.py#L818-L830
-1
251,222
roclark/sportsreference
sportsreference/ncaaf/roster.py
Player._combine_all_stats
def _combine_all_stats(self, player_info): """ Pull stats from all tables into a single data structure. Pull the stats from all of the requested tables into a dictionary that is separated by season to allow easy queries of the player's stats for each season. Parameters ---------- player_info : PyQuery object A PyQuery object containing all of the stats information for the requested player. Returns ------- dictionary Returns a dictionary where all stats from each table are combined by season to allow easy queries by year. """ all_stats_dict = {} for table_id in ['passing', 'rushing', 'defense', 'scoring']: table_items = utils._get_stats_table(player_info, 'table#%s' % table_id) career_items = utils._get_stats_table(player_info, 'table#%s' % table_id, footer=True) all_stats_dict = self._combine_season_stats(table_items, career_items, all_stats_dict) return all_stats_dict
python
def _combine_all_stats(self, player_info): """ Pull stats from all tables into a single data structure. Pull the stats from all of the requested tables into a dictionary that is separated by season to allow easy queries of the player's stats for each season. Parameters ---------- player_info : PyQuery object A PyQuery object containing all of the stats information for the requested player. Returns ------- dictionary Returns a dictionary where all stats from each table are combined by season to allow easy queries by year. """ all_stats_dict = {} for table_id in ['passing', 'rushing', 'defense', 'scoring']: table_items = utils._get_stats_table(player_info, 'table#%s' % table_id) career_items = utils._get_stats_table(player_info, 'table#%s' % table_id, footer=True) all_stats_dict = self._combine_season_stats(table_items, career_items, all_stats_dict) return all_stats_dict
[ "def", "_combine_all_stats", "(", "self", ",", "player_info", ")", ":", "all_stats_dict", "=", "{", "}", "for", "table_id", "in", "[", "'passing'", ",", "'rushing'", ",", "'defense'", ",", "'scoring'", "]", ":", "table_items", "=", "utils", ".", "_get_stats_table", "(", "player_info", ",", "'table#%s'", "%", "table_id", ")", "career_items", "=", "utils", ".", "_get_stats_table", "(", "player_info", ",", "'table#%s'", "%", "table_id", ",", "footer", "=", "True", ")", "all_stats_dict", "=", "self", ".", "_combine_season_stats", "(", "table_items", ",", "career_items", ",", "all_stats_dict", ")", "return", "all_stats_dict" ]
Pull stats from all tables into a single data structure. Pull the stats from all of the requested tables into a dictionary that is separated by season to allow easy queries of the player's stats for each season. Parameters ---------- player_info : PyQuery object A PyQuery object containing all of the stats information for the requested player. Returns ------- dictionary Returns a dictionary where all stats from each table are combined by season to allow easy queries by year.
[ "Pull", "stats", "from", "all", "tables", "into", "a", "single", "data", "structure", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/roster.py#L233-L264
-1
251,223
roclark/sportsreference
sportsreference/ncaaf/roster.py
Player._pull_player_data
def _pull_player_data(self): """ Pull and aggregate all player information. Pull the player's HTML stats page and parse unique properties, such as the player's height, weight, and name. Next, combine all stats for all seasons plus the player's career stats into a single object which can easily be iterated upon. Returns ------- dictionary Returns a dictionary of the player's combined stats where each key is a string of the season and the value is the season's associated stats. """ player_info = self._retrieve_html_page() if not player_info: return self._parse_player_information(player_info) all_stats = self._combine_all_stats(player_info) setattr(self, '_season', list(all_stats.keys())) return all_stats
python
def _pull_player_data(self): """ Pull and aggregate all player information. Pull the player's HTML stats page and parse unique properties, such as the player's height, weight, and name. Next, combine all stats for all seasons plus the player's career stats into a single object which can easily be iterated upon. Returns ------- dictionary Returns a dictionary of the player's combined stats where each key is a string of the season and the value is the season's associated stats. """ player_info = self._retrieve_html_page() if not player_info: return self._parse_player_information(player_info) all_stats = self._combine_all_stats(player_info) setattr(self, '_season', list(all_stats.keys())) return all_stats
[ "def", "_pull_player_data", "(", "self", ")", ":", "player_info", "=", "self", ".", "_retrieve_html_page", "(", ")", "if", "not", "player_info", ":", "return", "self", ".", "_parse_player_information", "(", "player_info", ")", "all_stats", "=", "self", ".", "_combine_all_stats", "(", "player_info", ")", "setattr", "(", "self", ",", "'_season'", ",", "list", "(", "all_stats", ".", "keys", "(", ")", ")", ")", "return", "all_stats" ]
Pull and aggregate all player information. Pull the player's HTML stats page and parse unique properties, such as the player's height, weight, and name. Next, combine all stats for all seasons plus the player's career stats into a single object which can easily be iterated upon. Returns ------- dictionary Returns a dictionary of the player's combined stats where each key is a string of the season and the value is the season's associated stats.
[ "Pull", "and", "aggregate", "all", "player", "information", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/roster.py#L284-L306
-1
251,224
roclark/sportsreference
sportsreference/nhl/roster.py
Player.team_abbreviation
def team_abbreviation(self): """ Returns a ``string`` of the team's abbreviation, such as 'DET' for the Detroit Red Wings. """ # For career stats, skip the team abbreviation. if self._season[self._index].lower() == 'career': return None return self._team_abbreviation[self._index]
python
def team_abbreviation(self): """ Returns a ``string`` of the team's abbreviation, such as 'DET' for the Detroit Red Wings. """ # For career stats, skip the team abbreviation. if self._season[self._index].lower() == 'career': return None return self._team_abbreviation[self._index]
[ "def", "team_abbreviation", "(", "self", ")", ":", "# For career stats, skip the team abbreviation.", "if", "self", ".", "_season", "[", "self", ".", "_index", "]", ".", "lower", "(", ")", "==", "'career'", ":", "return", "None", "return", "self", ".", "_team_abbreviation", "[", "self", ".", "_index", "]" ]
Returns a ``string`` of the team's abbreviation, such as 'DET' for the Detroit Red Wings.
[ "Returns", "a", "string", "of", "the", "team", "s", "abbreviation", "such", "as", "DET", "for", "the", "Detroit", "Red", "Wings", "." ]
ea0bae432be76450e137671d2998eb38f962dffd
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/nhl/roster.py#L548-L556
-1
251,225
elehcimd/pynb
pynb/notebook.py
Notebook.add_cell_footer
def add_cell_footer(self): """ Add footer cell """ # check if there's already a cell footer... if true, do not add a second cell footer. # this situation happens when exporting to ipynb and then importing from ipynb. logging.info('Adding footer cell') for cell in self.nb['cells']: if cell.cell_type == 'markdown': if 'pynb_footer_tag' in cell.source: logging.debug('Footer cell already present') return m = """ --- * **Notebook class name**: {class_name} * **Notebook cells name**: {cells_name} * **Execution time**: {exec_begin} * **Execution duration**: {exec_time:.2f}s * **Command line**: {argv} [//]: # (pynb_footer_tag) """ self.add_cell_markdown( m.format(exec_time=self.exec_time, exec_begin=self.exec_begin_dt, class_name=self.__class__.__name__, argv=str(sys.argv), cells_name=self.cells_name))
python
def add_cell_footer(self): """ Add footer cell """ # check if there's already a cell footer... if true, do not add a second cell footer. # this situation happens when exporting to ipynb and then importing from ipynb. logging.info('Adding footer cell') for cell in self.nb['cells']: if cell.cell_type == 'markdown': if 'pynb_footer_tag' in cell.source: logging.debug('Footer cell already present') return m = """ --- * **Notebook class name**: {class_name} * **Notebook cells name**: {cells_name} * **Execution time**: {exec_begin} * **Execution duration**: {exec_time:.2f}s * **Command line**: {argv} [//]: # (pynb_footer_tag) """ self.add_cell_markdown( m.format(exec_time=self.exec_time, exec_begin=self.exec_begin_dt, class_name=self.__class__.__name__, argv=str(sys.argv), cells_name=self.cells_name))
[ "def", "add_cell_footer", "(", "self", ")", ":", "# check if there's already a cell footer... if true, do not add a second cell footer.", "# this situation happens when exporting to ipynb and then importing from ipynb.", "logging", ".", "info", "(", "'Adding footer cell'", ")", "for", "cell", "in", "self", ".", "nb", "[", "'cells'", "]", ":", "if", "cell", ".", "cell_type", "==", "'markdown'", ":", "if", "'pynb_footer_tag'", "in", "cell", ".", "source", ":", "logging", ".", "debug", "(", "'Footer cell already present'", ")", "return", "m", "=", "\"\"\"\n\n ---\n * **Notebook class name**: {class_name}\n * **Notebook cells name**: {cells_name}\n * **Execution time**: {exec_begin}\n * **Execution duration**: {exec_time:.2f}s\n * **Command line**: {argv}\n [//]: # (pynb_footer_tag)\n \"\"\"", "self", ".", "add_cell_markdown", "(", "m", ".", "format", "(", "exec_time", "=", "self", ".", "exec_time", ",", "exec_begin", "=", "self", ".", "exec_begin_dt", ",", "class_name", "=", "self", ".", "__class__", ".", "__name__", ",", "argv", "=", "str", "(", "sys", ".", "argv", ")", ",", "cells_name", "=", "self", ".", "cells_name", ")", ")" ]
Add footer cell
[ "Add", "footer", "cell" ]
a32af1f0e574f880eccda4a46aede6d65151f8c9
https://github.com/elehcimd/pynb/blob/a32af1f0e574f880eccda4a46aede6d65151f8c9/pynb/notebook.py#L278-L306
-1
251,226
elehcimd/pynb
pynb/notebook.py
Notebook.get_kernelspec
def get_kernelspec(self, name): """Get a kernel specification dictionary given a kernel name """ ksm = KernelSpecManager() kernelspec = ksm.get_kernel_spec(name).to_dict() kernelspec['name'] = name kernelspec.pop('argv') return kernelspec
python
def get_kernelspec(self, name): """Get a kernel specification dictionary given a kernel name """ ksm = KernelSpecManager() kernelspec = ksm.get_kernel_spec(name).to_dict() kernelspec['name'] = name kernelspec.pop('argv') return kernelspec
[ "def", "get_kernelspec", "(", "self", ",", "name", ")", ":", "ksm", "=", "KernelSpecManager", "(", ")", "kernelspec", "=", "ksm", ".", "get_kernel_spec", "(", "name", ")", ".", "to_dict", "(", ")", "kernelspec", "[", "'name'", "]", "=", "name", "kernelspec", ".", "pop", "(", "'argv'", ")", "return", "kernelspec" ]
Get a kernel specification dictionary given a kernel name
[ "Get", "a", "kernel", "specification", "dictionary", "given", "a", "kernel", "name" ]
a32af1f0e574f880eccda4a46aede6d65151f8c9
https://github.com/elehcimd/pynb/blob/a32af1f0e574f880eccda4a46aede6d65151f8c9/pynb/notebook.py#L558-L565
-1
251,227
elehcimd/pynb
fabfile.py
docker_start
def docker_start(develop=True): """ Start docker container """ curr_dir = os.path.dirname(os.path.realpath(__file__)) local('docker run --rm --name pynb -d -ti -p 127.0.0.1:8889:8888 -v {}:/code -t pynb'.format(curr_dir)) if develop: # Install package in develop mode: the code in /code is mapped to the installed package. docker_exec('python3 setup.py develop') print('Jupyter available at http://127.0.0.1:8889')
python
def docker_start(develop=True): """ Start docker container """ curr_dir = os.path.dirname(os.path.realpath(__file__)) local('docker run --rm --name pynb -d -ti -p 127.0.0.1:8889:8888 -v {}:/code -t pynb'.format(curr_dir)) if develop: # Install package in develop mode: the code in /code is mapped to the installed package. docker_exec('python3 setup.py develop') print('Jupyter available at http://127.0.0.1:8889')
[ "def", "docker_start", "(", "develop", "=", "True", ")", ":", "curr_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "local", "(", "'docker run --rm --name pynb -d -ti -p 127.0.0.1:8889:8888 -v {}:/code -t pynb'", ".", "format", "(", "curr_dir", ")", ")", "if", "develop", ":", "# Install package in develop mode: the code in /code is mapped to the installed package.", "docker_exec", "(", "'python3 setup.py develop'", ")", "print", "(", "'Jupyter available at http://127.0.0.1:8889'", ")" ]
Start docker container
[ "Start", "docker", "container" ]
a32af1f0e574f880eccda4a46aede6d65151f8c9
https://github.com/elehcimd/pynb/blob/a32af1f0e574f880eccda4a46aede6d65151f8c9/fabfile.py#L99-L110
-1
251,228
glut23/webvtt-py
webvtt/parsers.py
TextBasedParser.read
def read(self, file): """Reads the captions file.""" content = self._read_content(file) self._validate(content) self._parse(content) return self
python
def read(self, file): """Reads the captions file.""" content = self._read_content(file) self._validate(content) self._parse(content) return self
[ "def", "read", "(", "self", ",", "file", ")", ":", "content", "=", "self", ".", "_read_content", "(", "file", ")", "self", ".", "_validate", "(", "content", ")", "self", ".", "_parse", "(", "content", ")", "return", "self" ]
Reads the captions file.
[ "Reads", "the", "captions", "file", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/parsers.py#L22-L28
-1
251,229
glut23/webvtt-py
webvtt/parsers.py
TextBasedParser._parse_timeframe_line
def _parse_timeframe_line(self, line): """Parse timeframe line and return start and end timestamps.""" tf = self._validate_timeframe_line(line) if not tf: raise MalformedCaptionError('Invalid time format') return tf.group(1), tf.group(2)
python
def _parse_timeframe_line(self, line): """Parse timeframe line and return start and end timestamps.""" tf = self._validate_timeframe_line(line) if not tf: raise MalformedCaptionError('Invalid time format') return tf.group(1), tf.group(2)
[ "def", "_parse_timeframe_line", "(", "self", ",", "line", ")", ":", "tf", "=", "self", ".", "_validate_timeframe_line", "(", "line", ")", "if", "not", "tf", ":", "raise", "MalformedCaptionError", "(", "'Invalid time format'", ")", "return", "tf", ".", "group", "(", "1", ")", ",", "tf", ".", "group", "(", "2", ")" ]
Parse timeframe line and return start and end timestamps.
[ "Parse", "timeframe", "line", "and", "return", "start", "and", "end", "timestamps", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/parsers.py#L49-L55
-1
251,230
glut23/webvtt-py
webvtt/cli.py
main
def main(): """Main entry point for CLI commands.""" options = docopt(__doc__, version=__version__) if options['segment']: segment( options['<file>'], options['--output'], options['--target-duration'], options['--mpegts'], )
python
def main(): """Main entry point for CLI commands.""" options = docopt(__doc__, version=__version__) if options['segment']: segment( options['<file>'], options['--output'], options['--target-duration'], options['--mpegts'], )
[ "def", "main", "(", ")", ":", "options", "=", "docopt", "(", "__doc__", ",", "version", "=", "__version__", ")", "if", "options", "[", "'segment'", "]", ":", "segment", "(", "options", "[", "'<file>'", "]", ",", "options", "[", "'--output'", "]", ",", "options", "[", "'--target-duration'", "]", ",", "options", "[", "'--mpegts'", "]", ",", ")" ]
Main entry point for CLI commands.
[ "Main", "entry", "point", "for", "CLI", "commands", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/cli.py#L23-L32
-1
251,231
glut23/webvtt-py
webvtt/cli.py
segment
def segment(f, output, target_duration, mpegts): """Segment command.""" try: target_duration = int(target_duration) except ValueError: exit('Error: Invalid target duration.') try: mpegts = int(mpegts) except ValueError: exit('Error: Invalid MPEGTS value.') WebVTTSegmenter().segment(f, output, target_duration, mpegts)
python
def segment(f, output, target_duration, mpegts): """Segment command.""" try: target_duration = int(target_duration) except ValueError: exit('Error: Invalid target duration.') try: mpegts = int(mpegts) except ValueError: exit('Error: Invalid MPEGTS value.') WebVTTSegmenter().segment(f, output, target_duration, mpegts)
[ "def", "segment", "(", "f", ",", "output", ",", "target_duration", ",", "mpegts", ")", ":", "try", ":", "target_duration", "=", "int", "(", "target_duration", ")", "except", "ValueError", ":", "exit", "(", "'Error: Invalid target duration.'", ")", "try", ":", "mpegts", "=", "int", "(", "mpegts", ")", "except", "ValueError", ":", "exit", "(", "'Error: Invalid MPEGTS value.'", ")", "WebVTTSegmenter", "(", ")", ".", "segment", "(", "f", ",", "output", ",", "target_duration", ",", "mpegts", ")" ]
Segment command.
[ "Segment", "command", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/cli.py#L35-L47
-1
251,232
glut23/webvtt-py
webvtt/webvtt.py
WebVTT.from_srt
def from_srt(cls, file): """Reads captions from a file in SubRip format.""" parser = SRTParser().read(file) return cls(file=file, captions=parser.captions)
python
def from_srt(cls, file): """Reads captions from a file in SubRip format.""" parser = SRTParser().read(file) return cls(file=file, captions=parser.captions)
[ "def", "from_srt", "(", "cls", ",", "file", ")", ":", "parser", "=", "SRTParser", "(", ")", ".", "read", "(", "file", ")", "return", "cls", "(", "file", "=", "file", ",", "captions", "=", "parser", ".", "captions", ")" ]
Reads captions from a file in SubRip format.
[ "Reads", "captions", "from", "a", "file", "in", "SubRip", "format", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/webvtt.py#L46-L49
-1
251,233
glut23/webvtt-py
webvtt/webvtt.py
WebVTT.from_sbv
def from_sbv(cls, file): """Reads captions from a file in YouTube SBV format.""" parser = SBVParser().read(file) return cls(file=file, captions=parser.captions)
python
def from_sbv(cls, file): """Reads captions from a file in YouTube SBV format.""" parser = SBVParser().read(file) return cls(file=file, captions=parser.captions)
[ "def", "from_sbv", "(", "cls", ",", "file", ")", ":", "parser", "=", "SBVParser", "(", ")", ".", "read", "(", "file", ")", "return", "cls", "(", "file", "=", "file", ",", "captions", "=", "parser", ".", "captions", ")" ]
Reads captions from a file in YouTube SBV format.
[ "Reads", "captions", "from", "a", "file", "in", "YouTube", "SBV", "format", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/webvtt.py#L52-L55
-1
251,234
glut23/webvtt-py
webvtt/webvtt.py
WebVTT.read
def read(cls, file): """Reads a WebVTT captions file.""" parser = WebVTTParser().read(file) return cls(file=file, captions=parser.captions, styles=parser.styles)
python
def read(cls, file): """Reads a WebVTT captions file.""" parser = WebVTTParser().read(file) return cls(file=file, captions=parser.captions, styles=parser.styles)
[ "def", "read", "(", "cls", ",", "file", ")", ":", "parser", "=", "WebVTTParser", "(", ")", ".", "read", "(", "file", ")", "return", "cls", "(", "file", "=", "file", ",", "captions", "=", "parser", ".", "captions", ",", "styles", "=", "parser", ".", "styles", ")" ]
Reads a WebVTT captions file.
[ "Reads", "a", "WebVTT", "captions", "file", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/webvtt.py#L58-L61
-1
251,235
glut23/webvtt-py
webvtt/webvtt.py
WebVTT.save
def save(self, output=''): """Save the document. If no output is provided the file will be saved in the same location. Otherwise output can determine a target directory or file. """ self.file = self._get_output_file(output) with open(self.file, 'w', encoding='utf-8') as f: self.write(f)
python
def save(self, output=''): """Save the document. If no output is provided the file will be saved in the same location. Otherwise output can determine a target directory or file. """ self.file = self._get_output_file(output) with open(self.file, 'w', encoding='utf-8') as f: self.write(f)
[ "def", "save", "(", "self", ",", "output", "=", "''", ")", ":", "self", ".", "file", "=", "self", ".", "_get_output_file", "(", "output", ")", "with", "open", "(", "self", ".", "file", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "self", ".", "write", "(", "f", ")" ]
Save the document. If no output is provided the file will be saved in the same location. Otherwise output can determine a target directory or file.
[ "Save", "the", "document", ".", "If", "no", "output", "is", "provided", "the", "file", "will", "be", "saved", "in", "the", "same", "location", ".", "Otherwise", "output", "can", "determine", "a", "target", "directory", "or", "file", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/webvtt.py#L84-L91
-1
251,236
glut23/webvtt-py
webvtt/webvtt.py
WebVTT.total_length
def total_length(self): """Returns the total length of the captions.""" if not self._captions: return 0 return int(self._captions[-1].end_in_seconds) - int(self._captions[0].start_in_seconds)
python
def total_length(self): """Returns the total length of the captions.""" if not self._captions: return 0 return int(self._captions[-1].end_in_seconds) - int(self._captions[0].start_in_seconds)
[ "def", "total_length", "(", "self", ")", ":", "if", "not", "self", ".", "_captions", ":", "return", "0", "return", "int", "(", "self", ".", "_captions", "[", "-", "1", "]", ".", "end_in_seconds", ")", "-", "int", "(", "self", ".", "_captions", "[", "0", "]", ".", "start_in_seconds", ")" ]
Returns the total length of the captions.
[ "Returns", "the", "total", "length", "of", "the", "captions", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/webvtt.py#L117-L121
-1
251,237
glut23/webvtt-py
webvtt/segmenter.py
WebVTTSegmenter.segment
def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS): """Segments the captions based on a number of seconds.""" if isinstance(webvtt, str): # if a string is supplied we parse the file captions = WebVTT().read(webvtt).captions elif not self._validate_webvtt(webvtt): raise InvalidCaptionsError('The captions provided are invalid') else: # we expect to have a webvtt object captions = webvtt.captions self._total_segments = 0 if not captions else int(ceil(captions[-1].end_in_seconds / seconds)) self._output_folder = output self._seconds = seconds self._mpegts = mpegts output_folder = os.path.join(os.getcwd(), output) if not os.path.exists(output_folder): os.makedirs(output_folder) self._slice_segments(captions) self._write_segments() self._write_manifest()
python
def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS): """Segments the captions based on a number of seconds.""" if isinstance(webvtt, str): # if a string is supplied we parse the file captions = WebVTT().read(webvtt).captions elif not self._validate_webvtt(webvtt): raise InvalidCaptionsError('The captions provided are invalid') else: # we expect to have a webvtt object captions = webvtt.captions self._total_segments = 0 if not captions else int(ceil(captions[-1].end_in_seconds / seconds)) self._output_folder = output self._seconds = seconds self._mpegts = mpegts output_folder = os.path.join(os.getcwd(), output) if not os.path.exists(output_folder): os.makedirs(output_folder) self._slice_segments(captions) self._write_segments() self._write_manifest()
[ "def", "segment", "(", "self", ",", "webvtt", ",", "output", "=", "''", ",", "seconds", "=", "SECONDS", ",", "mpegts", "=", "MPEGTS", ")", ":", "if", "isinstance", "(", "webvtt", ",", "str", ")", ":", "# if a string is supplied we parse the file", "captions", "=", "WebVTT", "(", ")", ".", "read", "(", "webvtt", ")", ".", "captions", "elif", "not", "self", ".", "_validate_webvtt", "(", "webvtt", ")", ":", "raise", "InvalidCaptionsError", "(", "'The captions provided are invalid'", ")", "else", ":", "# we expect to have a webvtt object", "captions", "=", "webvtt", ".", "captions", "self", ".", "_total_segments", "=", "0", "if", "not", "captions", "else", "int", "(", "ceil", "(", "captions", "[", "-", "1", "]", ".", "end_in_seconds", "/", "seconds", ")", ")", "self", ".", "_output_folder", "=", "output", "self", ".", "_seconds", "=", "seconds", "self", ".", "_mpegts", "=", "mpegts", "output_folder", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "output", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_folder", ")", ":", "os", ".", "makedirs", "(", "output_folder", ")", "self", ".", "_slice_segments", "(", "captions", ")", "self", ".", "_write_segments", "(", ")", "self", ".", "_write_manifest", "(", ")" ]
Segments the captions based on a number of seconds.
[ "Segments", "the", "captions", "based", "on", "a", "number", "of", "seconds", "." ]
7b4da0123c2e2afaf31402107528721eb1d3d481
https://github.com/glut23/webvtt-py/blob/7b4da0123c2e2afaf31402107528721eb1d3d481/webvtt/segmenter.py#L73-L95
-1
251,238
simonw/csvs-to-sqlite
csvs_to_sqlite/utils.py
best_fts_version
def best_fts_version(): "Discovers the most advanced supported SQLite FTS version" conn = sqlite3.connect(":memory:") for fts in ("FTS5", "FTS4", "FTS3"): try: conn.execute("CREATE VIRTUAL TABLE v USING {} (t TEXT);".format(fts)) return fts except sqlite3.OperationalError: continue return None
python
def best_fts_version(): "Discovers the most advanced supported SQLite FTS version" conn = sqlite3.connect(":memory:") for fts in ("FTS5", "FTS4", "FTS3"): try: conn.execute("CREATE VIRTUAL TABLE v USING {} (t TEXT);".format(fts)) return fts except sqlite3.OperationalError: continue return None
[ "def", "best_fts_version", "(", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "\":memory:\"", ")", "for", "fts", "in", "(", "\"FTS5\"", ",", "\"FTS4\"", ",", "\"FTS3\"", ")", ":", "try", ":", "conn", ".", "execute", "(", "\"CREATE VIRTUAL TABLE v USING {} (t TEXT);\"", ".", "format", "(", "fts", ")", ")", "return", "fts", "except", "sqlite3", ".", "OperationalError", ":", "continue", "return", "None" ]
Discovers the most advanced supported SQLite FTS version
[ "Discovers", "the", "most", "advanced", "supported", "SQLite", "FTS", "version" ]
0a014284eac75c1b06cbdaca362f2a66648c11d2
https://github.com/simonw/csvs-to-sqlite/blob/0a014284eac75c1b06cbdaca362f2a66648c11d2/csvs_to_sqlite/utils.py#L359-L368
-1
251,239
rochacbruno/manage
manage/commands_collector.py
add_click_commands
def add_click_commands(module, cli, command_dict, namespaced): """Loads all click commands""" module_commands = [ item for item in getmembers(module) if isinstance(item[1], BaseCommand) ] options = command_dict.get('config', {}) namespace = command_dict.get('namespace') for name, function in module_commands: f_options = options.get(name, {}) command_name = f_options.get('name', getattr(function, 'name', name)) if namespace: command_name = '{}_{}'.format(namespace, command_name) elif namespaced: module_namespace = module.__name__.split('.')[-1] command_name = '{}_{}'.format(module_namespace, command_name) function.short_help = f_options.get('help_text', function.short_help) cli.add_command(function, name=command_name)
python
def add_click_commands(module, cli, command_dict, namespaced): """Loads all click commands""" module_commands = [ item for item in getmembers(module) if isinstance(item[1], BaseCommand) ] options = command_dict.get('config', {}) namespace = command_dict.get('namespace') for name, function in module_commands: f_options = options.get(name, {}) command_name = f_options.get('name', getattr(function, 'name', name)) if namespace: command_name = '{}_{}'.format(namespace, command_name) elif namespaced: module_namespace = module.__name__.split('.')[-1] command_name = '{}_{}'.format(module_namespace, command_name) function.short_help = f_options.get('help_text', function.short_help) cli.add_command(function, name=command_name)
[ "def", "add_click_commands", "(", "module", ",", "cli", ",", "command_dict", ",", "namespaced", ")", ":", "module_commands", "=", "[", "item", "for", "item", "in", "getmembers", "(", "module", ")", "if", "isinstance", "(", "item", "[", "1", "]", ",", "BaseCommand", ")", "]", "options", "=", "command_dict", ".", "get", "(", "'config'", ",", "{", "}", ")", "namespace", "=", "command_dict", ".", "get", "(", "'namespace'", ")", "for", "name", ",", "function", "in", "module_commands", ":", "f_options", "=", "options", ".", "get", "(", "name", ",", "{", "}", ")", "command_name", "=", "f_options", ".", "get", "(", "'name'", ",", "getattr", "(", "function", ",", "'name'", ",", "name", ")", ")", "if", "namespace", ":", "command_name", "=", "'{}_{}'", ".", "format", "(", "namespace", ",", "command_name", ")", "elif", "namespaced", ":", "module_namespace", "=", "module", ".", "__name__", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "command_name", "=", "'{}_{}'", ".", "format", "(", "module_namespace", ",", "command_name", ")", "function", ".", "short_help", "=", "f_options", ".", "get", "(", "'help_text'", ",", "function", ".", "short_help", ")", "cli", ".", "add_command", "(", "function", ",", "name", "=", "command_name", ")" ]
Loads all click commands
[ "Loads", "all", "click", "commands" ]
e904c451862f036f4be8723df5704a9844103c74
https://github.com/rochacbruno/manage/blob/e904c451862f036f4be8723df5704a9844103c74/manage/commands_collector.py#L10-L27
-1
251,240
rochacbruno/manage
manage/commands_collector.py
load_commands
def load_commands(cli, manage_dict): """Loads the commands defined in manage file""" namespaced = manage_dict.get('namespaced') # get click commands commands = manage_dict.get('click_commands', []) for command_dict in commands: root_module = import_string(command_dict['module']) group = cli.manage_groups.get(command_dict.get('group'), cli) if getattr(root_module, '__path__', None): # This is a package iter_modules = pkgutil.iter_modules( root_module.__path__, prefix=root_module.__name__ + '.' ) submodules_names = [item[1] for item in iter_modules] submodules = [import_string(name) for name in submodules_names] for module in submodules: add_click_commands(module, group, command_dict, namespaced) else: # a single file module add_click_commands(root_module, group, command_dict, namespaced) # get inline commands commands = manage_dict.get('inline_commands', []) for command_dict in commands: name = command_dict['name'] help_text = command_dict.get('help_text') options = command_dict.get('options', {}) arguments = command_dict.get('arguments', {}) context = command_dict.get('context', []) code = command_dict['code'] group = cli.manage_groups.get(command_dict.get('group'), cli) group.add_command( make_command_from_string( code=code, cmd_context=get_context(context), options=options, arguments=arguments, help_text=help_text ), name=name ) # get function commands commands = manage_dict.get('function_commands', []) for command_dict in commands: name = command_dict['name'] help_text = command_dict.get('help_text') options = command_dict.get('options', {}) arguments = command_dict.get('arguments', {}) function = import_string(command_dict['function']) group = cli.manage_groups.get(command_dict.get('group'), cli) group.add_command( make_command_from_function( function=function, options=options, arguments=arguments, help_text=help_text ), name=name )
python
def load_commands(cli, manage_dict): """Loads the commands defined in manage file""" namespaced = manage_dict.get('namespaced') # get click commands commands = manage_dict.get('click_commands', []) for command_dict in commands: root_module = import_string(command_dict['module']) group = cli.manage_groups.get(command_dict.get('group'), cli) if getattr(root_module, '__path__', None): # This is a package iter_modules = pkgutil.iter_modules( root_module.__path__, prefix=root_module.__name__ + '.' ) submodules_names = [item[1] for item in iter_modules] submodules = [import_string(name) for name in submodules_names] for module in submodules: add_click_commands(module, group, command_dict, namespaced) else: # a single file module add_click_commands(root_module, group, command_dict, namespaced) # get inline commands commands = manage_dict.get('inline_commands', []) for command_dict in commands: name = command_dict['name'] help_text = command_dict.get('help_text') options = command_dict.get('options', {}) arguments = command_dict.get('arguments', {}) context = command_dict.get('context', []) code = command_dict['code'] group = cli.manage_groups.get(command_dict.get('group'), cli) group.add_command( make_command_from_string( code=code, cmd_context=get_context(context), options=options, arguments=arguments, help_text=help_text ), name=name ) # get function commands commands = manage_dict.get('function_commands', []) for command_dict in commands: name = command_dict['name'] help_text = command_dict.get('help_text') options = command_dict.get('options', {}) arguments = command_dict.get('arguments', {}) function = import_string(command_dict['function']) group = cli.manage_groups.get(command_dict.get('group'), cli) group.add_command( make_command_from_function( function=function, options=options, arguments=arguments, help_text=help_text ), name=name )
[ "def", "load_commands", "(", "cli", ",", "manage_dict", ")", ":", "namespaced", "=", "manage_dict", ".", "get", "(", "'namespaced'", ")", "# get click commands", "commands", "=", "manage_dict", ".", "get", "(", "'click_commands'", ",", "[", "]", ")", "for", "command_dict", "in", "commands", ":", "root_module", "=", "import_string", "(", "command_dict", "[", "'module'", "]", ")", "group", "=", "cli", ".", "manage_groups", ".", "get", "(", "command_dict", ".", "get", "(", "'group'", ")", ",", "cli", ")", "if", "getattr", "(", "root_module", ",", "'__path__'", ",", "None", ")", ":", "# This is a package", "iter_modules", "=", "pkgutil", ".", "iter_modules", "(", "root_module", ".", "__path__", ",", "prefix", "=", "root_module", ".", "__name__", "+", "'.'", ")", "submodules_names", "=", "[", "item", "[", "1", "]", "for", "item", "in", "iter_modules", "]", "submodules", "=", "[", "import_string", "(", "name", ")", "for", "name", "in", "submodules_names", "]", "for", "module", "in", "submodules", ":", "add_click_commands", "(", "module", ",", "group", ",", "command_dict", ",", "namespaced", ")", "else", ":", "# a single file module", "add_click_commands", "(", "root_module", ",", "group", ",", "command_dict", ",", "namespaced", ")", "# get inline commands", "commands", "=", "manage_dict", ".", "get", "(", "'inline_commands'", ",", "[", "]", ")", "for", "command_dict", "in", "commands", ":", "name", "=", "command_dict", "[", "'name'", "]", "help_text", "=", "command_dict", ".", "get", "(", "'help_text'", ")", "options", "=", "command_dict", ".", "get", "(", "'options'", ",", "{", "}", ")", "arguments", "=", "command_dict", ".", "get", "(", "'arguments'", ",", "{", "}", ")", "context", "=", "command_dict", ".", "get", "(", "'context'", ",", "[", "]", ")", "code", "=", "command_dict", "[", "'code'", "]", "group", "=", "cli", ".", "manage_groups", ".", "get", "(", "command_dict", ".", "get", "(", "'group'", ")", ",", "cli", ")", "group", ".", "add_command", "(", "make_command_from_string", "(", "code", "=", "code", ",", "cmd_context", "=", "get_context", "(", "context", ")", ",", "options", "=", "options", ",", "arguments", "=", "arguments", ",", "help_text", "=", "help_text", ")", ",", "name", "=", "name", ")", "# get function commands", "commands", "=", "manage_dict", ".", "get", "(", "'function_commands'", ",", "[", "]", ")", "for", "command_dict", "in", "commands", ":", "name", "=", "command_dict", "[", "'name'", "]", "help_text", "=", "command_dict", ".", "get", "(", "'help_text'", ")", "options", "=", "command_dict", ".", "get", "(", "'options'", ",", "{", "}", ")", "arguments", "=", "command_dict", ".", "get", "(", "'arguments'", ",", "{", "}", ")", "function", "=", "import_string", "(", "command_dict", "[", "'function'", "]", ")", "group", "=", "cli", ".", "manage_groups", ".", "get", "(", "command_dict", ".", "get", "(", "'group'", ")", ",", "cli", ")", "group", ".", "add_command", "(", "make_command_from_function", "(", "function", "=", "function", ",", "options", "=", "options", ",", "arguments", "=", "arguments", ",", "help_text", "=", "help_text", ")", ",", "name", "=", "name", ")" ]
Loads the commands defined in manage file
[ "Loads", "the", "commands", "defined", "in", "manage", "file" ]
e904c451862f036f4be8723df5704a9844103c74
https://github.com/rochacbruno/manage/blob/e904c451862f036f4be8723df5704a9844103c74/manage/commands_collector.py#L83-L143
-1
251,241
rochacbruno/manage
manage/cli.py
debug
def debug(version=False): """Shows the parsed manage file -V shows version""" if version: print(__version__) return print(json.dumps(MANAGE_DICT, indent=2))
python
def debug(version=False): """Shows the parsed manage file -V shows version""" if version: print(__version__) return print(json.dumps(MANAGE_DICT, indent=2))
[ "def", "debug", "(", "version", "=", "False", ")", ":", "if", "version", ":", "print", "(", "__version__", ")", "return", "print", "(", "json", ".", "dumps", "(", "MANAGE_DICT", ",", "indent", "=", "2", ")", ")" ]
Shows the parsed manage file -V shows version
[ "Shows", "the", "parsed", "manage", "file", "-", "V", "shows", "version" ]
e904c451862f036f4be8723df5704a9844103c74
https://github.com/rochacbruno/manage/blob/e904c451862f036f4be8723df5704a9844103c74/manage/cli.py#L95-L100
-1
251,242
rochacbruno/manage
manage/cli.py
create_shell
def create_shell(console, manage_dict=None, extra_vars=None, exit_hooks=None): """Creates the shell""" manage_dict = manage_dict or MANAGE_DICT _vars = globals() _vars.update(locals()) auto_imported = import_objects(manage_dict) if extra_vars: auto_imported.update(extra_vars) _vars.update(auto_imported) msgs = [] if manage_dict['shell']['banner']['enabled']: msgs.append( manage_dict['shell']['banner']['message'].format(**manage_dict) ) if auto_imported and manage_dict['shell']['auto_import']['display']: auto_imported_names = [ key for key in auto_imported.keys() if key not in ['__builtins__', 'builtins'] ] msgs.append('\tAuto imported: {0}\n'.format(auto_imported_names)) banner_msg = u'\n'.join(msgs) exec_init(manage_dict, _vars) exec_init_script(manage_dict, _vars) atexit_functions = [ import_string(func_name) for func_name in manage_dict['shell'].get('exit_hooks', []) ] atexit_functions += exit_hooks or [] for atexit_function in atexit_functions: atexit.register(atexit_function) if console == 'ptpython': try: from ptpython.repl import embed embed({}, _vars) except ImportError: click.echo("ptpython is not installed!") return if console == 'bpython': try: from bpython import embed embed(locals_=_vars, banner=banner_msg) except ImportError: click.echo("bpython is not installed!") return try: if console == 'ipython': from IPython import start_ipython from traitlets.config import Config c = Config() c.TerminalInteractiveShell.banner2 = banner_msg c.InteractiveShellApp.extensions = [ extension for extension in manage_dict['shell'].get('ipython_extensions', []) ] c.InteractiveShellApp.exec_lines = [ exec_line for exec_line in manage_dict['shell'].get('ipython_exec_lines', []) ] if manage_dict['shell'].get('ipython_auto_reload', True) is True: c.InteractiveShellApp.extensions.append('autoreload') c.InteractiveShellApp.exec_lines.append('%autoreload 2') start_ipython(argv=[], user_ns=_vars, config=c) else: raise ImportError except ImportError: if manage_dict['shell']['readline_enabled']: import readline import rlcompleter readline.set_completer(rlcompleter.Completer(_vars).complete) readline.parse_and_bind('tab: complete') shell = code.InteractiveConsole(_vars) shell.interact(banner=banner_msg)
python
def create_shell(console, manage_dict=None, extra_vars=None, exit_hooks=None): """Creates the shell""" manage_dict = manage_dict or MANAGE_DICT _vars = globals() _vars.update(locals()) auto_imported = import_objects(manage_dict) if extra_vars: auto_imported.update(extra_vars) _vars.update(auto_imported) msgs = [] if manage_dict['shell']['banner']['enabled']: msgs.append( manage_dict['shell']['banner']['message'].format(**manage_dict) ) if auto_imported and manage_dict['shell']['auto_import']['display']: auto_imported_names = [ key for key in auto_imported.keys() if key not in ['__builtins__', 'builtins'] ] msgs.append('\tAuto imported: {0}\n'.format(auto_imported_names)) banner_msg = u'\n'.join(msgs) exec_init(manage_dict, _vars) exec_init_script(manage_dict, _vars) atexit_functions = [ import_string(func_name) for func_name in manage_dict['shell'].get('exit_hooks', []) ] atexit_functions += exit_hooks or [] for atexit_function in atexit_functions: atexit.register(atexit_function) if console == 'ptpython': try: from ptpython.repl import embed embed({}, _vars) except ImportError: click.echo("ptpython is not installed!") return if console == 'bpython': try: from bpython import embed embed(locals_=_vars, banner=banner_msg) except ImportError: click.echo("bpython is not installed!") return try: if console == 'ipython': from IPython import start_ipython from traitlets.config import Config c = Config() c.TerminalInteractiveShell.banner2 = banner_msg c.InteractiveShellApp.extensions = [ extension for extension in manage_dict['shell'].get('ipython_extensions', []) ] c.InteractiveShellApp.exec_lines = [ exec_line for exec_line in manage_dict['shell'].get('ipython_exec_lines', []) ] if manage_dict['shell'].get('ipython_auto_reload', True) is True: c.InteractiveShellApp.extensions.append('autoreload') c.InteractiveShellApp.exec_lines.append('%autoreload 2') start_ipython(argv=[], user_ns=_vars, config=c) else: raise ImportError except ImportError: if manage_dict['shell']['readline_enabled']: import readline import rlcompleter readline.set_completer(rlcompleter.Completer(_vars).complete) readline.parse_and_bind('tab: complete') shell = code.InteractiveConsole(_vars) shell.interact(banner=banner_msg)
[ "def", "create_shell", "(", "console", ",", "manage_dict", "=", "None", ",", "extra_vars", "=", "None", ",", "exit_hooks", "=", "None", ")", ":", "manage_dict", "=", "manage_dict", "or", "MANAGE_DICT", "_vars", "=", "globals", "(", ")", "_vars", ".", "update", "(", "locals", "(", ")", ")", "auto_imported", "=", "import_objects", "(", "manage_dict", ")", "if", "extra_vars", ":", "auto_imported", ".", "update", "(", "extra_vars", ")", "_vars", ".", "update", "(", "auto_imported", ")", "msgs", "=", "[", "]", "if", "manage_dict", "[", "'shell'", "]", "[", "'banner'", "]", "[", "'enabled'", "]", ":", "msgs", ".", "append", "(", "manage_dict", "[", "'shell'", "]", "[", "'banner'", "]", "[", "'message'", "]", ".", "format", "(", "*", "*", "manage_dict", ")", ")", "if", "auto_imported", "and", "manage_dict", "[", "'shell'", "]", "[", "'auto_import'", "]", "[", "'display'", "]", ":", "auto_imported_names", "=", "[", "key", "for", "key", "in", "auto_imported", ".", "keys", "(", ")", "if", "key", "not", "in", "[", "'__builtins__'", ",", "'builtins'", "]", "]", "msgs", ".", "append", "(", "'\\tAuto imported: {0}\\n'", ".", "format", "(", "auto_imported_names", ")", ")", "banner_msg", "=", "u'\\n'", ".", "join", "(", "msgs", ")", "exec_init", "(", "manage_dict", ",", "_vars", ")", "exec_init_script", "(", "manage_dict", ",", "_vars", ")", "atexit_functions", "=", "[", "import_string", "(", "func_name", ")", "for", "func_name", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'exit_hooks'", ",", "[", "]", ")", "]", "atexit_functions", "+=", "exit_hooks", "or", "[", "]", "for", "atexit_function", "in", "atexit_functions", ":", "atexit", ".", "register", "(", "atexit_function", ")", "if", "console", "==", "'ptpython'", ":", "try", ":", "from", "ptpython", ".", "repl", "import", "embed", "embed", "(", "{", "}", ",", "_vars", ")", "except", "ImportError", ":", "click", ".", "echo", "(", "\"ptpython is not installed!\"", ")", "return", "if", "console", "==", "'bpython'", ":", "try", ":", "from", "bpython", "import", "embed", "embed", "(", "locals_", "=", "_vars", ",", "banner", "=", "banner_msg", ")", "except", "ImportError", ":", "click", ".", "echo", "(", "\"bpython is not installed!\"", ")", "return", "try", ":", "if", "console", "==", "'ipython'", ":", "from", "IPython", "import", "start_ipython", "from", "traitlets", ".", "config", "import", "Config", "c", "=", "Config", "(", ")", "c", ".", "TerminalInteractiveShell", ".", "banner2", "=", "banner_msg", "c", ".", "InteractiveShellApp", ".", "extensions", "=", "[", "extension", "for", "extension", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_extensions'", ",", "[", "]", ")", "]", "c", ".", "InteractiveShellApp", ".", "exec_lines", "=", "[", "exec_line", "for", "exec_line", "in", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_exec_lines'", ",", "[", "]", ")", "]", "if", "manage_dict", "[", "'shell'", "]", ".", "get", "(", "'ipython_auto_reload'", ",", "True", ")", "is", "True", ":", "c", ".", "InteractiveShellApp", ".", "extensions", ".", "append", "(", "'autoreload'", ")", "c", ".", "InteractiveShellApp", ".", "exec_lines", ".", "append", "(", "'%autoreload 2'", ")", "start_ipython", "(", "argv", "=", "[", "]", ",", "user_ns", "=", "_vars", ",", "config", "=", "c", ")", "else", ":", "raise", "ImportError", "except", "ImportError", ":", "if", "manage_dict", "[", "'shell'", "]", "[", "'readline_enabled'", "]", ":", "import", "readline", "import", "rlcompleter", "readline", ".", "set_completer", "(", "rlcompleter", ".", "Completer", "(", "_vars", ")", ".", "complete", ")", "readline", ".", "parse_and_bind", "(", "'tab: complete'", ")", "shell", "=", "code", ".", "InteractiveConsole", "(", "_vars", ")", "shell", ".", "interact", "(", "banner", "=", "banner_msg", ")" ]
Creates the shell
[ "Creates", "the", "shell" ]
e904c451862f036f4be8723df5704a9844103c74
https://github.com/rochacbruno/manage/blob/e904c451862f036f4be8723df5704a9844103c74/manage/cli.py#L103-L180
-1
251,243
sk-/git-lint
gitlint/__init__.py
find_invalid_filenames
def find_invalid_filenames(filenames, repository_root): """Find files that does not exist, are not in the repo or are directories. Args: filenames: list of filenames to check repository_root: the absolute path of the repository's root. Returns: A list of errors. """ errors = [] for filename in filenames: if not os.path.abspath(filename).startswith(repository_root): errors.append((filename, 'Error: File %s does not belong to ' 'repository %s' % (filename, repository_root))) if not os.path.exists(filename): errors.append((filename, 'Error: File %s does not exist' % (filename, ))) if os.path.isdir(filename): errors.append((filename, 'Error: %s is a directory. Directories are' ' not yet supported' % (filename, ))) return errors
python
def find_invalid_filenames(filenames, repository_root): """Find files that does not exist, are not in the repo or are directories. Args: filenames: list of filenames to check repository_root: the absolute path of the repository's root. Returns: A list of errors. """ errors = [] for filename in filenames: if not os.path.abspath(filename).startswith(repository_root): errors.append((filename, 'Error: File %s does not belong to ' 'repository %s' % (filename, repository_root))) if not os.path.exists(filename): errors.append((filename, 'Error: File %s does not exist' % (filename, ))) if os.path.isdir(filename): errors.append((filename, 'Error: %s is a directory. Directories are' ' not yet supported' % (filename, ))) return errors
[ "def", "find_invalid_filenames", "(", "filenames", ",", "repository_root", ")", ":", "errors", "=", "[", "]", "for", "filename", "in", "filenames", ":", "if", "not", "os", ".", "path", ".", "abspath", "(", "filename", ")", ".", "startswith", "(", "repository_root", ")", ":", "errors", ".", "append", "(", "(", "filename", ",", "'Error: File %s does not belong to '", "'repository %s'", "%", "(", "filename", ",", "repository_root", ")", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "errors", ".", "append", "(", "(", "filename", ",", "'Error: File %s does not exist'", "%", "(", "filename", ",", ")", ")", ")", "if", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "errors", ".", "append", "(", "(", "filename", ",", "'Error: %s is a directory. Directories are'", "' not yet supported'", "%", "(", "filename", ",", ")", ")", ")", "return", "errors" ]
Find files that does not exist, are not in the repo or are directories. Args: filenames: list of filenames to check repository_root: the absolute path of the repository's root. Returns: A list of errors.
[ "Find", "files", "that", "does", "not", "exist", "are", "not", "in", "the", "repo", "or", "are", "directories", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L65-L87
-1
251,244
sk-/git-lint
gitlint/__init__.py
get_config
def get_config(repo_root): """Gets the configuration file either from the repository or the default.""" config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml') if repo_root: repo_config = os.path.join(repo_root, '.gitlint.yaml') if os.path.exists(repo_config): config = repo_config with open(config) as f: # We have to read the content first as yaml hangs up when reading from # MockOpen content = f.read() # Yaml.load will return None when the input is empty. if not content: yaml_config = {} else: yaml_config = yaml.load(content) return linters.parse_yaml_config(yaml_config, repo_root)
python
def get_config(repo_root): """Gets the configuration file either from the repository or the default.""" config = os.path.join(os.path.dirname(__file__), 'configs', 'config.yaml') if repo_root: repo_config = os.path.join(repo_root, '.gitlint.yaml') if os.path.exists(repo_config): config = repo_config with open(config) as f: # We have to read the content first as yaml hangs up when reading from # MockOpen content = f.read() # Yaml.load will return None when the input is empty. if not content: yaml_config = {} else: yaml_config = yaml.load(content) return linters.parse_yaml_config(yaml_config, repo_root)
[ "def", "get_config", "(", "repo_root", ")", ":", "config", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'configs'", ",", "'config.yaml'", ")", "if", "repo_root", ":", "repo_config", "=", "os", ".", "path", ".", "join", "(", "repo_root", ",", "'.gitlint.yaml'", ")", "if", "os", ".", "path", ".", "exists", "(", "repo_config", ")", ":", "config", "=", "repo_config", "with", "open", "(", "config", ")", "as", "f", ":", "# We have to read the content first as yaml hangs up when reading from", "# MockOpen", "content", "=", "f", ".", "read", "(", ")", "# Yaml.load will return None when the input is empty.", "if", "not", "content", ":", "yaml_config", "=", "{", "}", "else", ":", "yaml_config", "=", "yaml", ".", "load", "(", "content", ")", "return", "linters", ".", "parse_yaml_config", "(", "yaml_config", ",", "repo_root", ")" ]
Gets the configuration file either from the repository or the default.
[ "Gets", "the", "configuration", "file", "either", "from", "the", "repository", "or", "the", "default", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L90-L109
-1
251,245
sk-/git-lint
gitlint/__init__.py
format_comment
def format_comment(comment_data): """Formats the data returned by the linters. Given a dictionary with the fields: line, column, severity, message_id, message, will generate a message like: 'line {line}, col {column}: {severity}: [{message_id}]: {message}' Any of the fields may nbe absent. Args: comment_data: dictionary with the linter data. Returns: a string with the formatted message. """ format_pieces = [] # Line and column information if 'line' in comment_data: format_pieces.append('line {line}') if 'column' in comment_data: if format_pieces: format_pieces.append(', ') format_pieces.append('col {column}') if format_pieces: format_pieces.append(': ') # Severity and Id information if 'severity' in comment_data: format_pieces.append('{severity}: ') if 'message_id' in comment_data: format_pieces.append('[{message_id}]: ') # The message if 'message' in comment_data: format_pieces.append('{message}') return ''.join(format_pieces).format(**comment_data)
python
def format_comment(comment_data): """Formats the data returned by the linters. Given a dictionary with the fields: line, column, severity, message_id, message, will generate a message like: 'line {line}, col {column}: {severity}: [{message_id}]: {message}' Any of the fields may nbe absent. Args: comment_data: dictionary with the linter data. Returns: a string with the formatted message. """ format_pieces = [] # Line and column information if 'line' in comment_data: format_pieces.append('line {line}') if 'column' in comment_data: if format_pieces: format_pieces.append(', ') format_pieces.append('col {column}') if format_pieces: format_pieces.append(': ') # Severity and Id information if 'severity' in comment_data: format_pieces.append('{severity}: ') if 'message_id' in comment_data: format_pieces.append('[{message_id}]: ') # The message if 'message' in comment_data: format_pieces.append('{message}') return ''.join(format_pieces).format(**comment_data)
[ "def", "format_comment", "(", "comment_data", ")", ":", "format_pieces", "=", "[", "]", "# Line and column information", "if", "'line'", "in", "comment_data", ":", "format_pieces", ".", "append", "(", "'line {line}'", ")", "if", "'column'", "in", "comment_data", ":", "if", "format_pieces", ":", "format_pieces", ".", "append", "(", "', '", ")", "format_pieces", ".", "append", "(", "'col {column}'", ")", "if", "format_pieces", ":", "format_pieces", ".", "append", "(", "': '", ")", "# Severity and Id information", "if", "'severity'", "in", "comment_data", ":", "format_pieces", ".", "append", "(", "'{severity}: '", ")", "if", "'message_id'", "in", "comment_data", ":", "format_pieces", ".", "append", "(", "'[{message_id}]: '", ")", "# The message", "if", "'message'", "in", "comment_data", ":", "format_pieces", ".", "append", "(", "'{message}'", ")", "return", "''", ".", "join", "(", "format_pieces", ")", ".", "format", "(", "*", "*", "comment_data", ")" ]
Formats the data returned by the linters. Given a dictionary with the fields: line, column, severity, message_id, message, will generate a message like: 'line {line}, col {column}: {severity}: [{message_id}]: {message}' Any of the fields may nbe absent. Args: comment_data: dictionary with the linter data. Returns: a string with the formatted message.
[ "Formats", "the", "data", "returned", "by", "the", "linters", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L112-L150
-1
251,246
sk-/git-lint
gitlint/__init__.py
get_vcs_root
def get_vcs_root(): """Returns the vcs module and the root of the repo. Returns: A tuple containing the vcs module to use (git, hg) and the root of the repository. If no repository exisits then (None, None) is returned. """ for vcs in (git, hg): repo_root = vcs.repository_root() if repo_root: return vcs, repo_root return (None, None)
python
def get_vcs_root(): """Returns the vcs module and the root of the repo. Returns: A tuple containing the vcs module to use (git, hg) and the root of the repository. If no repository exisits then (None, None) is returned. """ for vcs in (git, hg): repo_root = vcs.repository_root() if repo_root: return vcs, repo_root return (None, None)
[ "def", "get_vcs_root", "(", ")", ":", "for", "vcs", "in", "(", "git", ",", "hg", ")", ":", "repo_root", "=", "vcs", ".", "repository_root", "(", ")", "if", "repo_root", ":", "return", "vcs", ",", "repo_root", "return", "(", "None", ",", "None", ")" ]
Returns the vcs module and the root of the repo. Returns: A tuple containing the vcs module to use (git, hg) and the root of the repository. If no repository exisits then (None, None) is returned.
[ "Returns", "the", "vcs", "module", "and", "the", "root", "of", "the", "repo", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L153-L165
-1
251,247
sk-/git-lint
gitlint/__init__.py
process_file
def process_file(vcs, commit, force, gitlint_config, file_data): """Lint the file Returns: The results from the linter. """ filename, extra_data = file_data if force: modified_lines = None else: modified_lines = vcs.modified_lines( filename, extra_data, commit=commit) result = linters.lint(filename, modified_lines, gitlint_config) result = result[filename] return filename, result
python
def process_file(vcs, commit, force, gitlint_config, file_data): """Lint the file Returns: The results from the linter. """ filename, extra_data = file_data if force: modified_lines = None else: modified_lines = vcs.modified_lines( filename, extra_data, commit=commit) result = linters.lint(filename, modified_lines, gitlint_config) result = result[filename] return filename, result
[ "def", "process_file", "(", "vcs", ",", "commit", ",", "force", ",", "gitlint_config", ",", "file_data", ")", ":", "filename", ",", "extra_data", "=", "file_data", "if", "force", ":", "modified_lines", "=", "None", "else", ":", "modified_lines", "=", "vcs", ".", "modified_lines", "(", "filename", ",", "extra_data", ",", "commit", "=", "commit", ")", "result", "=", "linters", ".", "lint", "(", "filename", ",", "modified_lines", ",", "gitlint_config", ")", "result", "=", "result", "[", "filename", "]", "return", "filename", ",", "result" ]
Lint the file Returns: The results from the linter.
[ "Lint", "the", "file" ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/__init__.py#L168-L184
-1
251,248
sk-/git-lint
gitlint/hg.py
last_commit
def last_commit(): """Returns the SHA1 of the last commit.""" try: root = subprocess.check_output( ['hg', 'parent', '--template={node}'], stderr=subprocess.STDOUT).strip() # Convert to unicode first return root.decode('utf-8') except subprocess.CalledProcessError: return None
python
def last_commit(): """Returns the SHA1 of the last commit.""" try: root = subprocess.check_output( ['hg', 'parent', '--template={node}'], stderr=subprocess.STDOUT).strip() # Convert to unicode first return root.decode('utf-8') except subprocess.CalledProcessError: return None
[ "def", "last_commit", "(", ")", ":", "try", ":", "root", "=", "subprocess", ".", "check_output", "(", "[", "'hg'", ",", "'parent'", ",", "'--template={node}'", "]", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", ".", "strip", "(", ")", "# Convert to unicode first", "return", "root", ".", "decode", "(", "'utf-8'", ")", "except", "subprocess", ".", "CalledProcessError", ":", "return", "None" ]
Returns the SHA1 of the last commit.
[ "Returns", "the", "SHA1", "of", "the", "last", "commit", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/hg.py#L33-L42
-1
251,249
sk-/git-lint
gitlint/hg.py
modified_files
def modified_files(root, tracked_only=False, commit=None): """Returns a list of files that has been modified since the last commit. Args: root: the root of the repository, it has to be an absolute path. tracked_only: exclude untracked files when True. commit: SHA1 of the commit. If None, it will get the modified files in the working copy. Returns: a dictionary with the modified files as keys, and additional information as value. In this case it adds the status returned by hg status. """ assert os.path.isabs(root), "Root has to be absolute, got: %s" % root command = ['hg', 'status'] if commit: command.append('--change=%s' % commit) # Convert to unicode and split status_lines = subprocess.check_output(command).decode('utf-8').split( os.linesep) modes = ['M', 'A'] if not tracked_only: modes.append(r'\?') modes_str = '|'.join(modes) modified_file_status = utils.filter_lines( status_lines, r'(?P<mode>%s) (?P<filename>.+)' % modes_str, groups=('filename', 'mode')) return dict((os.path.join(root, filename), mode) for filename, mode in modified_file_status)
python
def modified_files(root, tracked_only=False, commit=None): """Returns a list of files that has been modified since the last commit. Args: root: the root of the repository, it has to be an absolute path. tracked_only: exclude untracked files when True. commit: SHA1 of the commit. If None, it will get the modified files in the working copy. Returns: a dictionary with the modified files as keys, and additional information as value. In this case it adds the status returned by hg status. """ assert os.path.isabs(root), "Root has to be absolute, got: %s" % root command = ['hg', 'status'] if commit: command.append('--change=%s' % commit) # Convert to unicode and split status_lines = subprocess.check_output(command).decode('utf-8').split( os.linesep) modes = ['M', 'A'] if not tracked_only: modes.append(r'\?') modes_str = '|'.join(modes) modified_file_status = utils.filter_lines( status_lines, r'(?P<mode>%s) (?P<filename>.+)' % modes_str, groups=('filename', 'mode')) return dict((os.path.join(root, filename), mode) for filename, mode in modified_file_status)
[ "def", "modified_files", "(", "root", ",", "tracked_only", "=", "False", ",", "commit", "=", "None", ")", ":", "assert", "os", ".", "path", ".", "isabs", "(", "root", ")", ",", "\"Root has to be absolute, got: %s\"", "%", "root", "command", "=", "[", "'hg'", ",", "'status'", "]", "if", "commit", ":", "command", ".", "append", "(", "'--change=%s'", "%", "commit", ")", "# Convert to unicode and split", "status_lines", "=", "subprocess", ".", "check_output", "(", "command", ")", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "os", ".", "linesep", ")", "modes", "=", "[", "'M'", ",", "'A'", "]", "if", "not", "tracked_only", ":", "modes", ".", "append", "(", "r'\\?'", ")", "modes_str", "=", "'|'", ".", "join", "(", "modes", ")", "modified_file_status", "=", "utils", ".", "filter_lines", "(", "status_lines", ",", "r'(?P<mode>%s) (?P<filename>.+)'", "%", "modes_str", ",", "groups", "=", "(", "'filename'", ",", "'mode'", ")", ")", "return", "dict", "(", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ",", "mode", ")", "for", "filename", ",", "mode", "in", "modified_file_status", ")" ]
Returns a list of files that has been modified since the last commit. Args: root: the root of the repository, it has to be an absolute path. tracked_only: exclude untracked files when True. commit: SHA1 of the commit. If None, it will get the modified files in the working copy. Returns: a dictionary with the modified files as keys, and additional information as value. In this case it adds the status returned by hg status.
[ "Returns", "a", "list", "of", "files", "that", "has", "been", "modified", "since", "the", "last", "commit", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/hg.py#L45-L79
-1
251,250
sk-/git-lint
scripts/custom_linters/ini_linter.py
lint
def lint(filename): """Lints an INI file, returning 0 in case of success.""" config = ConfigParser.ConfigParser() try: config.read(filename) return 0 except ConfigParser.Error as error: print('Error: %s' % error) return 1 except: print('Unexpected Error') return 2
python
def lint(filename): """Lints an INI file, returning 0 in case of success.""" config = ConfigParser.ConfigParser() try: config.read(filename) return 0 except ConfigParser.Error as error: print('Error: %s' % error) return 1 except: print('Unexpected Error') return 2
[ "def", "lint", "(", "filename", ")", ":", "config", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "try", ":", "config", ".", "read", "(", "filename", ")", "return", "0", "except", "ConfigParser", ".", "Error", "as", "error", ":", "print", "(", "'Error: %s'", "%", "error", ")", "return", "1", "except", ":", "print", "(", "'Unexpected Error'", ")", "return", "2" ]
Lints an INI file, returning 0 in case of success.
[ "Lints", "an", "INI", "file", "returning", "0", "in", "case", "of", "success", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/scripts/custom_linters/ini_linter.py#L23-L34
-1
251,251
sk-/git-lint
gitlint/linters.py
missing_requirements_command
def missing_requirements_command(missing_programs, installation_string, filename, unused_lines): """Pseudo-command to be used when requirements are missing.""" verb = 'is' if len(missing_programs) > 1: verb = 'are' return { filename: { 'skipped': [ '%s %s not installed. %s' % (', '.join(missing_programs), verb, installation_string) ] } }
python
def missing_requirements_command(missing_programs, installation_string, filename, unused_lines): """Pseudo-command to be used when requirements are missing.""" verb = 'is' if len(missing_programs) > 1: verb = 'are' return { filename: { 'skipped': [ '%s %s not installed. %s' % (', '.join(missing_programs), verb, installation_string) ] } }
[ "def", "missing_requirements_command", "(", "missing_programs", ",", "installation_string", ",", "filename", ",", "unused_lines", ")", ":", "verb", "=", "'is'", "if", "len", "(", "missing_programs", ")", ">", "1", ":", "verb", "=", "'are'", "return", "{", "filename", ":", "{", "'skipped'", ":", "[", "'%s %s not installed. %s'", "%", "(", "', '", ".", "join", "(", "missing_programs", ")", ",", "verb", ",", "installation_string", ")", "]", "}", "}" ]
Pseudo-command to be used when requirements are missing.
[ "Pseudo", "-", "command", "to", "be", "used", "when", "requirements", "are", "missing", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L41-L54
-1
251,252
sk-/git-lint
gitlint/linters.py
lint_command
def lint_command(name, program, arguments, filter_regex, filename, lines): """Executes a lint program and filter the output. Executes the lint tool 'program' with arguments 'arguments' over the file 'filename' returning only those lines matching the regular expression 'filter_regex'. Args: name: string: the name of the linter. program: string: lint program. arguments: list[string]: extra arguments for the program. filter_regex: string: regular expression to filter lines. filename: string: filename to lint. lines: list[int]|None: list of lines that we want to capture. If None, then all lines will be captured. Returns: dict: a dict with the extracted info from the message. """ output = utils.get_output_from_cache(name, filename) if output is None: call_arguments = [program] + arguments + [filename] try: output = subprocess.check_output( call_arguments, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as error: output = error.output except OSError: return { filename: { 'error': [('Could not execute "%s".%sMake sure all ' + 'required programs are installed') % (' '.join(call_arguments), os.linesep)] } } output = output.decode('utf-8') utils.save_output_in_cache(name, filename, output) output_lines = output.split(os.linesep) if lines is None: lines_regex = r'\d+' else: lines_regex = '|'.join(map(str, lines)) lines_regex = '(%s)' % lines_regex groups = ('line', 'column', 'message', 'severity', 'message_id') filtered_lines = utils.filter_lines( output_lines, filter_regex.format(lines=lines_regex, filename=re.escape(filename)), groups=groups) result = [] for data in filtered_lines: comment = dict(p for p in zip(groups, data) if p[1] is not None) if 'line' in comment: comment['line'] = int(comment['line']) if 'column' in comment: comment['column'] = int(comment['column']) if 'severity' in comment: comment['severity'] = comment['severity'].title() result.append(comment) return {filename: {'comments': result}}
python
def lint_command(name, program, arguments, filter_regex, filename, lines): """Executes a lint program and filter the output. Executes the lint tool 'program' with arguments 'arguments' over the file 'filename' returning only those lines matching the regular expression 'filter_regex'. Args: name: string: the name of the linter. program: string: lint program. arguments: list[string]: extra arguments for the program. filter_regex: string: regular expression to filter lines. filename: string: filename to lint. lines: list[int]|None: list of lines that we want to capture. If None, then all lines will be captured. Returns: dict: a dict with the extracted info from the message. """ output = utils.get_output_from_cache(name, filename) if output is None: call_arguments = [program] + arguments + [filename] try: output = subprocess.check_output( call_arguments, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as error: output = error.output except OSError: return { filename: { 'error': [('Could not execute "%s".%sMake sure all ' + 'required programs are installed') % (' '.join(call_arguments), os.linesep)] } } output = output.decode('utf-8') utils.save_output_in_cache(name, filename, output) output_lines = output.split(os.linesep) if lines is None: lines_regex = r'\d+' else: lines_regex = '|'.join(map(str, lines)) lines_regex = '(%s)' % lines_regex groups = ('line', 'column', 'message', 'severity', 'message_id') filtered_lines = utils.filter_lines( output_lines, filter_regex.format(lines=lines_regex, filename=re.escape(filename)), groups=groups) result = [] for data in filtered_lines: comment = dict(p for p in zip(groups, data) if p[1] is not None) if 'line' in comment: comment['line'] = int(comment['line']) if 'column' in comment: comment['column'] = int(comment['column']) if 'severity' in comment: comment['severity'] = comment['severity'].title() result.append(comment) return {filename: {'comments': result}}
[ "def", "lint_command", "(", "name", ",", "program", ",", "arguments", ",", "filter_regex", ",", "filename", ",", "lines", ")", ":", "output", "=", "utils", ".", "get_output_from_cache", "(", "name", ",", "filename", ")", "if", "output", "is", "None", ":", "call_arguments", "=", "[", "program", "]", "+", "arguments", "+", "[", "filename", "]", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "call_arguments", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "subprocess", ".", "CalledProcessError", "as", "error", ":", "output", "=", "error", ".", "output", "except", "OSError", ":", "return", "{", "filename", ":", "{", "'error'", ":", "[", "(", "'Could not execute \"%s\".%sMake sure all '", "+", "'required programs are installed'", ")", "%", "(", "' '", ".", "join", "(", "call_arguments", ")", ",", "os", ".", "linesep", ")", "]", "}", "}", "output", "=", "output", ".", "decode", "(", "'utf-8'", ")", "utils", ".", "save_output_in_cache", "(", "name", ",", "filename", ",", "output", ")", "output_lines", "=", "output", ".", "split", "(", "os", ".", "linesep", ")", "if", "lines", "is", "None", ":", "lines_regex", "=", "r'\\d+'", "else", ":", "lines_regex", "=", "'|'", ".", "join", "(", "map", "(", "str", ",", "lines", ")", ")", "lines_regex", "=", "'(%s)'", "%", "lines_regex", "groups", "=", "(", "'line'", ",", "'column'", ",", "'message'", ",", "'severity'", ",", "'message_id'", ")", "filtered_lines", "=", "utils", ".", "filter_lines", "(", "output_lines", ",", "filter_regex", ".", "format", "(", "lines", "=", "lines_regex", ",", "filename", "=", "re", ".", "escape", "(", "filename", ")", ")", ",", "groups", "=", "groups", ")", "result", "=", "[", "]", "for", "data", "in", "filtered_lines", ":", "comment", "=", "dict", "(", "p", "for", "p", "in", "zip", "(", "groups", ",", "data", ")", "if", "p", "[", "1", "]", "is", "not", "None", ")", "if", "'line'", "in", "comment", ":", "comment", "[", "'line'", "]", "=", "int", "(", "comment", "[", "'line'", "]", ")", "if", "'column'", "in", "comment", ":", "comment", "[", "'column'", "]", "=", "int", "(", "comment", "[", "'column'", "]", ")", "if", "'severity'", "in", "comment", ":", "comment", "[", "'severity'", "]", "=", "comment", "[", "'severity'", "]", ".", "title", "(", ")", "result", ".", "append", "(", "comment", ")", "return", "{", "filename", ":", "{", "'comments'", ":", "result", "}", "}" ]
Executes a lint program and filter the output. Executes the lint tool 'program' with arguments 'arguments' over the file 'filename' returning only those lines matching the regular expression 'filter_regex'. Args: name: string: the name of the linter. program: string: lint program. arguments: list[string]: extra arguments for the program. filter_regex: string: regular expression to filter lines. filename: string: filename to lint. lines: list[int]|None: list of lines that we want to capture. If None, then all lines will be captured. Returns: dict: a dict with the extracted info from the message.
[ "Executes", "a", "lint", "program", "and", "filter", "the", "output", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L58-L121
-1
251,253
sk-/git-lint
gitlint/linters.py
_replace_variables
def _replace_variables(data, variables): """Replace the format variables in all items of data.""" formatter = string.Formatter() return [formatter.vformat(item, [], variables) for item in data]
python
def _replace_variables(data, variables): """Replace the format variables in all items of data.""" formatter = string.Formatter() return [formatter.vformat(item, [], variables) for item in data]
[ "def", "_replace_variables", "(", "data", ",", "variables", ")", ":", "formatter", "=", "string", ".", "Formatter", "(", ")", "return", "[", "formatter", ".", "vformat", "(", "item", ",", "[", "]", ",", "variables", ")", "for", "item", "in", "data", "]" ]
Replace the format variables in all items of data.
[ "Replace", "the", "format", "variables", "in", "all", "items", "of", "data", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L124-L127
-1
251,254
sk-/git-lint
gitlint/linters.py
lint
def lint(filename, lines, config): """Lints a file. Args: filename: string: filename to lint. lines: list[int]|None: list of lines that we want to capture. If None, then all lines will be captured. config: dict[string: linter]: mapping from extension to a linter function. Returns: dict: if there were errors running the command then the field 'error' will have the reasons in a list. if the lint process was skipped, then a field 'skipped' will be set with the reasons. Otherwise, the field 'comments' will have the messages. """ _, ext = os.path.splitext(filename) if ext in config: output = collections.defaultdict(list) for linter in config[ext]: linter_output = linter(filename, lines) for category, values in linter_output[filename].items(): output[category].extend(values) if 'comments' in output: output['comments'] = sorted( output['comments'], key=lambda x: (x.get('line', -1), x.get('column', -1))) return {filename: dict(output)} else: return { filename: { 'skipped': [ 'no linter is defined or enabled for files' ' with extension "%s"' % ext ] } }
python
def lint(filename, lines, config): """Lints a file. Args: filename: string: filename to lint. lines: list[int]|None: list of lines that we want to capture. If None, then all lines will be captured. config: dict[string: linter]: mapping from extension to a linter function. Returns: dict: if there were errors running the command then the field 'error' will have the reasons in a list. if the lint process was skipped, then a field 'skipped' will be set with the reasons. Otherwise, the field 'comments' will have the messages. """ _, ext = os.path.splitext(filename) if ext in config: output = collections.defaultdict(list) for linter in config[ext]: linter_output = linter(filename, lines) for category, values in linter_output[filename].items(): output[category].extend(values) if 'comments' in output: output['comments'] = sorted( output['comments'], key=lambda x: (x.get('line', -1), x.get('column', -1))) return {filename: dict(output)} else: return { filename: { 'skipped': [ 'no linter is defined or enabled for files' ' with extension "%s"' % ext ] } }
[ "def", "lint", "(", "filename", ",", "lines", ",", "config", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "ext", "in", "config", ":", "output", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "linter", "in", "config", "[", "ext", "]", ":", "linter_output", "=", "linter", "(", "filename", ",", "lines", ")", "for", "category", ",", "values", "in", "linter_output", "[", "filename", "]", ".", "items", "(", ")", ":", "output", "[", "category", "]", ".", "extend", "(", "values", ")", "if", "'comments'", "in", "output", ":", "output", "[", "'comments'", "]", "=", "sorted", "(", "output", "[", "'comments'", "]", ",", "key", "=", "lambda", "x", ":", "(", "x", ".", "get", "(", "'line'", ",", "-", "1", ")", ",", "x", ".", "get", "(", "'column'", ",", "-", "1", ")", ")", ")", "return", "{", "filename", ":", "dict", "(", "output", ")", "}", "else", ":", "return", "{", "filename", ":", "{", "'skipped'", ":", "[", "'no linter is defined or enabled for files'", "' with extension \"%s\"'", "%", "ext", "]", "}", "}" ]
Lints a file. Args: filename: string: filename to lint. lines: list[int]|None: list of lines that we want to capture. If None, then all lines will be captured. config: dict[string: linter]: mapping from extension to a linter function. Returns: dict: if there were errors running the command then the field 'error' will have the reasons in a list. if the lint process was skipped, then a field 'skipped' will be set with the reasons. Otherwise, the field 'comments' will have the messages.
[ "Lints", "a", "file", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/linters.py#L160-L197
-1
251,255
sk-/git-lint
gitlint/utils.py
filter_lines
def filter_lines(lines, filter_regex, groups=None): """Filters out the lines not matching the pattern. Args: lines: list[string]: lines to filter. pattern: string: regular expression to filter out lines. Returns: list[string]: the list of filtered lines. """ pattern = re.compile(filter_regex) for line in lines: match = pattern.search(line) if match: if groups is None: yield line elif len(groups) == 1: yield match.group(groups[0]) else: matched_groups = match.groupdict() yield tuple(matched_groups.get(group) for group in groups)
python
def filter_lines(lines, filter_regex, groups=None): """Filters out the lines not matching the pattern. Args: lines: list[string]: lines to filter. pattern: string: regular expression to filter out lines. Returns: list[string]: the list of filtered lines. """ pattern = re.compile(filter_regex) for line in lines: match = pattern.search(line) if match: if groups is None: yield line elif len(groups) == 1: yield match.group(groups[0]) else: matched_groups = match.groupdict() yield tuple(matched_groups.get(group) for group in groups)
[ "def", "filter_lines", "(", "lines", ",", "filter_regex", ",", "groups", "=", "None", ")", ":", "pattern", "=", "re", ".", "compile", "(", "filter_regex", ")", "for", "line", "in", "lines", ":", "match", "=", "pattern", ".", "search", "(", "line", ")", "if", "match", ":", "if", "groups", "is", "None", ":", "yield", "line", "elif", "len", "(", "groups", ")", "==", "1", ":", "yield", "match", ".", "group", "(", "groups", "[", "0", "]", ")", "else", ":", "matched_groups", "=", "match", ".", "groupdict", "(", ")", "yield", "tuple", "(", "matched_groups", ".", "get", "(", "group", ")", "for", "group", "in", "groups", ")" ]
Filters out the lines not matching the pattern. Args: lines: list[string]: lines to filter. pattern: string: regular expression to filter out lines. Returns: list[string]: the list of filtered lines.
[ "Filters", "out", "the", "lines", "not", "matching", "the", "pattern", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L24-L43
-1
251,256
sk-/git-lint
gitlint/utils.py
which
def which(program): """Returns a list of paths where the program is found.""" if (os.path.isabs(program) and os.path.isfile(program) and os.access(program, os.X_OK)): return [program] candidates = [] locations = os.environ.get("PATH").split(os.pathsep) for location in locations: candidate = os.path.join(location, program) if os.path.isfile(candidate) and os.access(candidate, os.X_OK): candidates.append(candidate) return candidates
python
def which(program): """Returns a list of paths where the program is found.""" if (os.path.isabs(program) and os.path.isfile(program) and os.access(program, os.X_OK)): return [program] candidates = [] locations = os.environ.get("PATH").split(os.pathsep) for location in locations: candidate = os.path.join(location, program) if os.path.isfile(candidate) and os.access(candidate, os.X_OK): candidates.append(candidate) return candidates
[ "def", "which", "(", "program", ")", ":", "if", "(", "os", ".", "path", ".", "isabs", "(", "program", ")", "and", "os", ".", "path", ".", "isfile", "(", "program", ")", "and", "os", ".", "access", "(", "program", ",", "os", ".", "X_OK", ")", ")", ":", "return", "[", "program", "]", "candidates", "=", "[", "]", "locations", "=", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ")", ".", "split", "(", "os", ".", "pathsep", ")", "for", "location", "in", "locations", ":", "candidate", "=", "os", ".", "path", ".", "join", "(", "location", ",", "program", ")", "if", "os", ".", "path", ".", "isfile", "(", "candidate", ")", "and", "os", ".", "access", "(", "candidate", ",", "os", ".", "X_OK", ")", ":", "candidates", ".", "append", "(", "candidate", ")", "return", "candidates" ]
Returns a list of paths where the program is found.
[ "Returns", "a", "list", "of", "paths", "where", "the", "program", "is", "found", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L47-L59
-1
251,257
sk-/git-lint
gitlint/utils.py
_open_for_write
def _open_for_write(filename): """Opens filename for writing, creating the directories if needed.""" dirname = os.path.dirname(filename) pathlib.Path(dirname).mkdir(parents=True, exist_ok=True) return io.open(filename, 'w')
python
def _open_for_write(filename): """Opens filename for writing, creating the directories if needed.""" dirname = os.path.dirname(filename) pathlib.Path(dirname).mkdir(parents=True, exist_ok=True) return io.open(filename, 'w')
[ "def", "_open_for_write", "(", "filename", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "pathlib", ".", "Path", "(", "dirname", ")", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "return", "io", ".", "open", "(", "filename", ",", "'w'", ")" ]
Opens filename for writing, creating the directories if needed.
[ "Opens", "filename", "for", "writing", "creating", "the", "directories", "if", "needed", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L67-L72
-1
251,258
sk-/git-lint
gitlint/utils.py
_get_cache_filename
def _get_cache_filename(name, filename): """Returns the cache location for filename and linter name.""" filename = os.path.abspath(filename)[1:] home_folder = os.path.expanduser('~') base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache') return os.path.join(base_cache_dir, name, filename)
python
def _get_cache_filename(name, filename): """Returns the cache location for filename and linter name.""" filename = os.path.abspath(filename)[1:] home_folder = os.path.expanduser('~') base_cache_dir = os.path.join(home_folder, '.git-lint', 'cache') return os.path.join(base_cache_dir, name, filename)
[ "def", "_get_cache_filename", "(", "name", ",", "filename", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "[", "1", ":", "]", "home_folder", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "base_cache_dir", "=", "os", ".", "path", ".", "join", "(", "home_folder", ",", "'.git-lint'", ",", "'cache'", ")", "return", "os", ".", "path", ".", "join", "(", "base_cache_dir", ",", "name", ",", "filename", ")" ]
Returns the cache location for filename and linter name.
[ "Returns", "the", "cache", "location", "for", "filename", "and", "linter", "name", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L75-L81
-1
251,259
sk-/git-lint
gitlint/utils.py
get_output_from_cache
def get_output_from_cache(name, filename): """Returns the output from the cache if still valid. It checks that the cache file is defined and that its modification time is after the modification time of the original file. Args: name: string: name of the linter. filename: string: path of the filename for which we are retrieving the output. Returns: a string with the output, if it is still valid, or None otherwise. """ cache_filename = _get_cache_filename(name, filename) if (os.path.exists(cache_filename) and os.path.getmtime(filename) < os.path.getmtime(cache_filename)): with io.open(cache_filename) as f: return f.read() return None
python
def get_output_from_cache(name, filename): """Returns the output from the cache if still valid. It checks that the cache file is defined and that its modification time is after the modification time of the original file. Args: name: string: name of the linter. filename: string: path of the filename for which we are retrieving the output. Returns: a string with the output, if it is still valid, or None otherwise. """ cache_filename = _get_cache_filename(name, filename) if (os.path.exists(cache_filename) and os.path.getmtime(filename) < os.path.getmtime(cache_filename)): with io.open(cache_filename) as f: return f.read() return None
[ "def", "get_output_from_cache", "(", "name", ",", "filename", ")", ":", "cache_filename", "=", "_get_cache_filename", "(", "name", ",", "filename", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "cache_filename", ")", "and", "os", ".", "path", ".", "getmtime", "(", "filename", ")", "<", "os", ".", "path", ".", "getmtime", "(", "cache_filename", ")", ")", ":", "with", "io", ".", "open", "(", "cache_filename", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "return", "None" ]
Returns the output from the cache if still valid. It checks that the cache file is defined and that its modification time is after the modification time of the original file. Args: name: string: name of the linter. filename: string: path of the filename for which we are retrieving the output. Returns: a string with the output, if it is still valid, or None otherwise.
[ "Returns", "the", "output", "from", "the", "cache", "if", "still", "valid", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L84-L103
-1
251,260
sk-/git-lint
gitlint/utils.py
save_output_in_cache
def save_output_in_cache(name, filename, output): """Saves output in the cache location. Args: name: string: name of the linter. filename: string: path of the filename for which we are saving the output. output: string: full output (not yet filetered) of the lint command. """ cache_filename = _get_cache_filename(name, filename) with _open_for_write(cache_filename) as f: f.write(output)
python
def save_output_in_cache(name, filename, output): """Saves output in the cache location. Args: name: string: name of the linter. filename: string: path of the filename for which we are saving the output. output: string: full output (not yet filetered) of the lint command. """ cache_filename = _get_cache_filename(name, filename) with _open_for_write(cache_filename) as f: f.write(output)
[ "def", "save_output_in_cache", "(", "name", ",", "filename", ",", "output", ")", ":", "cache_filename", "=", "_get_cache_filename", "(", "name", ",", "filename", ")", "with", "_open_for_write", "(", "cache_filename", ")", "as", "f", ":", "f", ".", "write", "(", "output", ")" ]
Saves output in the cache location. Args: name: string: name of the linter. filename: string: path of the filename for which we are saving the output. output: string: full output (not yet filetered) of the lint command.
[ "Saves", "output", "in", "the", "cache", "location", "." ]
4f19ec88bfa1b6670ff37ccbfc53c6b67251b027
https://github.com/sk-/git-lint/blob/4f19ec88bfa1b6670ff37ccbfc53c6b67251b027/gitlint/utils.py#L106-L116
-1
251,261
pylast/pylast
src/pylast/__init__.py
md5
def md5(text): """Returns the md5 hash of a string.""" h = hashlib.md5() h.update(_unicode(text).encode("utf-8")) return h.hexdigest()
python
def md5(text): """Returns the md5 hash of a string.""" h = hashlib.md5() h.update(_unicode(text).encode("utf-8")) return h.hexdigest()
[ "def", "md5", "(", "text", ")", ":", "h", "=", "hashlib", ".", "md5", "(", ")", "h", ".", "update", "(", "_unicode", "(", "text", ")", ".", "encode", "(", "\"utf-8\"", ")", ")", "return", "h", ".", "hexdigest", "(", ")" ]
Returns the md5 hash of a string.
[ "Returns", "the", "md5", "hash", "of", "a", "string", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2766-L2772
-1
251,262
pylast/pylast
src/pylast/__init__.py
cleanup_nodes
def cleanup_nodes(doc): """ Remove text nodes containing only whitespace """ for node in doc.documentElement.childNodes: if node.nodeType == Node.TEXT_NODE and node.nodeValue.isspace(): doc.documentElement.removeChild(node) return doc
python
def cleanup_nodes(doc): """ Remove text nodes containing only whitespace """ for node in doc.documentElement.childNodes: if node.nodeType == Node.TEXT_NODE and node.nodeValue.isspace(): doc.documentElement.removeChild(node) return doc
[ "def", "cleanup_nodes", "(", "doc", ")", ":", "for", "node", "in", "doc", ".", "documentElement", ".", "childNodes", ":", "if", "node", ".", "nodeType", "==", "Node", ".", "TEXT_NODE", "and", "node", ".", "nodeValue", ".", "isspace", "(", ")", ":", "doc", ".", "documentElement", ".", "removeChild", "(", "node", ")", "return", "doc" ]
Remove text nodes containing only whitespace
[ "Remove", "text", "nodes", "containing", "only", "whitespace" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2790-L2797
-1
251,263
pylast/pylast
src/pylast/__init__.py
_collect_nodes
def _collect_nodes(limit, sender, method_name, cacheable, params=None): """ Returns a sequence of dom.Node objects about as close to limit as possible """ if not params: params = sender._get_params() nodes = [] page = 1 end_of_pages = False while not end_of_pages and (not limit or (limit and len(nodes) < limit)): params["page"] = str(page) tries = 1 while True: try: doc = sender._request(method_name, cacheable, params) break # success except Exception as e: if tries >= 3: raise e # Wait and try again time.sleep(1) tries += 1 doc = cleanup_nodes(doc) # break if there are no child nodes if not doc.documentElement.childNodes: break main = doc.documentElement.childNodes[0] if main.hasAttribute("totalPages"): total_pages = _number(main.getAttribute("totalPages")) elif main.hasAttribute("totalpages"): total_pages = _number(main.getAttribute("totalpages")) else: raise Exception("No total pages attribute") for node in main.childNodes: if not node.nodeType == xml.dom.Node.TEXT_NODE and ( not limit or (len(nodes) < limit) ): nodes.append(node) if page >= total_pages: end_of_pages = True page += 1 return nodes
python
def _collect_nodes(limit, sender, method_name, cacheable, params=None): """ Returns a sequence of dom.Node objects about as close to limit as possible """ if not params: params = sender._get_params() nodes = [] page = 1 end_of_pages = False while not end_of_pages and (not limit or (limit and len(nodes) < limit)): params["page"] = str(page) tries = 1 while True: try: doc = sender._request(method_name, cacheable, params) break # success except Exception as e: if tries >= 3: raise e # Wait and try again time.sleep(1) tries += 1 doc = cleanup_nodes(doc) # break if there are no child nodes if not doc.documentElement.childNodes: break main = doc.documentElement.childNodes[0] if main.hasAttribute("totalPages"): total_pages = _number(main.getAttribute("totalPages")) elif main.hasAttribute("totalpages"): total_pages = _number(main.getAttribute("totalpages")) else: raise Exception("No total pages attribute") for node in main.childNodes: if not node.nodeType == xml.dom.Node.TEXT_NODE and ( not limit or (len(nodes) < limit) ): nodes.append(node) if page >= total_pages: end_of_pages = True page += 1 return nodes
[ "def", "_collect_nodes", "(", "limit", ",", "sender", ",", "method_name", ",", "cacheable", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "sender", ".", "_get_params", "(", ")", "nodes", "=", "[", "]", "page", "=", "1", "end_of_pages", "=", "False", "while", "not", "end_of_pages", "and", "(", "not", "limit", "or", "(", "limit", "and", "len", "(", "nodes", ")", "<", "limit", ")", ")", ":", "params", "[", "\"page\"", "]", "=", "str", "(", "page", ")", "tries", "=", "1", "while", "True", ":", "try", ":", "doc", "=", "sender", ".", "_request", "(", "method_name", ",", "cacheable", ",", "params", ")", "break", "# success", "except", "Exception", "as", "e", ":", "if", "tries", ">=", "3", ":", "raise", "e", "# Wait and try again", "time", ".", "sleep", "(", "1", ")", "tries", "+=", "1", "doc", "=", "cleanup_nodes", "(", "doc", ")", "# break if there are no child nodes", "if", "not", "doc", ".", "documentElement", ".", "childNodes", ":", "break", "main", "=", "doc", ".", "documentElement", ".", "childNodes", "[", "0", "]", "if", "main", ".", "hasAttribute", "(", "\"totalPages\"", ")", ":", "total_pages", "=", "_number", "(", "main", ".", "getAttribute", "(", "\"totalPages\"", ")", ")", "elif", "main", ".", "hasAttribute", "(", "\"totalpages\"", ")", ":", "total_pages", "=", "_number", "(", "main", ".", "getAttribute", "(", "\"totalpages\"", ")", ")", "else", ":", "raise", "Exception", "(", "\"No total pages attribute\"", ")", "for", "node", "in", "main", ".", "childNodes", ":", "if", "not", "node", ".", "nodeType", "==", "xml", ".", "dom", ".", "Node", ".", "TEXT_NODE", "and", "(", "not", "limit", "or", "(", "len", "(", "nodes", ")", "<", "limit", ")", ")", ":", "nodes", ".", "append", "(", "node", ")", "if", "page", ">=", "total_pages", ":", "end_of_pages", "=", "True", "page", "+=", "1", "return", "nodes" ]
Returns a sequence of dom.Node objects about as close to limit as possible
[ "Returns", "a", "sequence", "of", "dom", ".", "Node", "objects", "about", "as", "close", "to", "limit", "as", "possible" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2800-L2852
-1
251,264
pylast/pylast
src/pylast/__init__.py
_extract
def _extract(node, name, index=0): """Extracts a value from the xml string""" nodes = node.getElementsByTagName(name) if len(nodes): if nodes[index].firstChild: return _unescape_htmlentity(nodes[index].firstChild.data.strip()) else: return None
python
def _extract(node, name, index=0): """Extracts a value from the xml string""" nodes = node.getElementsByTagName(name) if len(nodes): if nodes[index].firstChild: return _unescape_htmlentity(nodes[index].firstChild.data.strip()) else: return None
[ "def", "_extract", "(", "node", ",", "name", ",", "index", "=", "0", ")", ":", "nodes", "=", "node", ".", "getElementsByTagName", "(", "name", ")", "if", "len", "(", "nodes", ")", ":", "if", "nodes", "[", "index", "]", ".", "firstChild", ":", "return", "_unescape_htmlentity", "(", "nodes", "[", "index", "]", ".", "firstChild", ".", "data", ".", "strip", "(", ")", ")", "else", ":", "return", "None" ]
Extracts a value from the xml string
[ "Extracts", "a", "value", "from", "the", "xml", "string" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2855-L2864
-1
251,265
pylast/pylast
src/pylast/__init__.py
_extract_all
def _extract_all(node, name, limit_count=None): """Extracts all the values from the xml string. returning a list.""" seq = [] for i in range(0, len(node.getElementsByTagName(name))): if len(seq) == limit_count: break seq.append(_extract(node, name, i)) return seq
python
def _extract_all(node, name, limit_count=None): """Extracts all the values from the xml string. returning a list.""" seq = [] for i in range(0, len(node.getElementsByTagName(name))): if len(seq) == limit_count: break seq.append(_extract(node, name, i)) return seq
[ "def", "_extract_all", "(", "node", ",", "name", ",", "limit_count", "=", "None", ")", ":", "seq", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "node", ".", "getElementsByTagName", "(", "name", ")", ")", ")", ":", "if", "len", "(", "seq", ")", "==", "limit_count", ":", "break", "seq", ".", "append", "(", "_extract", "(", "node", ",", "name", ",", "i", ")", ")", "return", "seq" ]
Extracts all the values from the xml string. returning a list.
[ "Extracts", "all", "the", "values", "from", "the", "xml", "string", ".", "returning", "a", "list", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2867-L2878
-1
251,266
pylast/pylast
src/pylast/__init__.py
_Network._delay_call
def _delay_call(self): """ Makes sure that web service calls are at least 0.2 seconds apart. """ now = time.time() time_since_last = now - self.last_call_time if time_since_last < DELAY_TIME: time.sleep(DELAY_TIME - time_since_last) self.last_call_time = now
python
def _delay_call(self): """ Makes sure that web service calls are at least 0.2 seconds apart. """ now = time.time() time_since_last = now - self.last_call_time if time_since_last < DELAY_TIME: time.sleep(DELAY_TIME - time_since_last) self.last_call_time = now
[ "def", "_delay_call", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "time_since_last", "=", "now", "-", "self", ".", "last_call_time", "if", "time_since_last", "<", "DELAY_TIME", ":", "time", ".", "sleep", "(", "DELAY_TIME", "-", "time_since_last", ")", "self", ".", "last_call_time", "=", "now" ]
Makes sure that web service calls are at least 0.2 seconds apart.
[ "Makes", "sure", "that", "web", "service", "calls", "are", "at", "least", "0", ".", "2", "seconds", "apart", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L309-L320
-1
251,267
pylast/pylast
src/pylast/__init__.py
_Network.get_top_artists
def get_top_artists(self, limit=None, cacheable=True): """Returns the most played artists as a sequence of TopItem objects.""" params = {} if limit: params["limit"] = limit doc = _Request(self, "chart.getTopArtists", params).execute(cacheable) return _extract_top_artists(doc, self)
python
def get_top_artists(self, limit=None, cacheable=True): """Returns the most played artists as a sequence of TopItem objects.""" params = {} if limit: params["limit"] = limit doc = _Request(self, "chart.getTopArtists", params).execute(cacheable) return _extract_top_artists(doc, self)
[ "def", "get_top_artists", "(", "self", ",", "limit", "=", "None", ",", "cacheable", "=", "True", ")", ":", "params", "=", "{", "}", "if", "limit", ":", "params", "[", "\"limit\"", "]", "=", "limit", "doc", "=", "_Request", "(", "self", ",", "\"chart.getTopArtists\"", ",", "params", ")", ".", "execute", "(", "cacheable", ")", "return", "_extract_top_artists", "(", "doc", ",", "self", ")" ]
Returns the most played artists as a sequence of TopItem objects.
[ "Returns", "the", "most", "played", "artists", "as", "a", "sequence", "of", "TopItem", "objects", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L322-L331
-1
251,268
pylast/pylast
src/pylast/__init__.py
_Network.get_top_tracks
def get_top_tracks(self, limit=None, cacheable=True): """Returns the most played tracks as a sequence of TopItem objects.""" params = {} if limit: params["limit"] = limit doc = _Request(self, "chart.getTopTracks", params).execute(cacheable) seq = [] for node in doc.getElementsByTagName("track"): title = _extract(node, "name") artist = _extract(node, "name", 1) track = Track(artist, title, self) weight = _number(_extract(node, "playcount")) seq.append(TopItem(track, weight)) return seq
python
def get_top_tracks(self, limit=None, cacheable=True): """Returns the most played tracks as a sequence of TopItem objects.""" params = {} if limit: params["limit"] = limit doc = _Request(self, "chart.getTopTracks", params).execute(cacheable) seq = [] for node in doc.getElementsByTagName("track"): title = _extract(node, "name") artist = _extract(node, "name", 1) track = Track(artist, title, self) weight = _number(_extract(node, "playcount")) seq.append(TopItem(track, weight)) return seq
[ "def", "get_top_tracks", "(", "self", ",", "limit", "=", "None", ",", "cacheable", "=", "True", ")", ":", "params", "=", "{", "}", "if", "limit", ":", "params", "[", "\"limit\"", "]", "=", "limit", "doc", "=", "_Request", "(", "self", ",", "\"chart.getTopTracks\"", ",", "params", ")", ".", "execute", "(", "cacheable", ")", "seq", "=", "[", "]", "for", "node", "in", "doc", ".", "getElementsByTagName", "(", "\"track\"", ")", ":", "title", "=", "_extract", "(", "node", ",", "\"name\"", ")", "artist", "=", "_extract", "(", "node", ",", "\"name\"", ",", "1", ")", "track", "=", "Track", "(", "artist", ",", "title", ",", "self", ")", "weight", "=", "_number", "(", "_extract", "(", "node", ",", "\"playcount\"", ")", ")", "seq", ".", "append", "(", "TopItem", "(", "track", ",", "weight", ")", ")", "return", "seq" ]
Returns the most played tracks as a sequence of TopItem objects.
[ "Returns", "the", "most", "played", "tracks", "as", "a", "sequence", "of", "TopItem", "objects", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L333-L350
-1
251,269
pylast/pylast
src/pylast/__init__.py
_Network.get_top_tags
def get_top_tags(self, limit=None, cacheable=True): """Returns the most used tags as a sequence of TopItem objects.""" # Last.fm has no "limit" parameter for tag.getTopTags # so we need to get all (250) and then limit locally doc = _Request(self, "tag.getTopTags").execute(cacheable) seq = [] for node in doc.getElementsByTagName("tag"): if limit and len(seq) >= limit: break tag = Tag(_extract(node, "name"), self) weight = _number(_extract(node, "count")) seq.append(TopItem(tag, weight)) return seq
python
def get_top_tags(self, limit=None, cacheable=True): """Returns the most used tags as a sequence of TopItem objects.""" # Last.fm has no "limit" parameter for tag.getTopTags # so we need to get all (250) and then limit locally doc = _Request(self, "tag.getTopTags").execute(cacheable) seq = [] for node in doc.getElementsByTagName("tag"): if limit and len(seq) >= limit: break tag = Tag(_extract(node, "name"), self) weight = _number(_extract(node, "count")) seq.append(TopItem(tag, weight)) return seq
[ "def", "get_top_tags", "(", "self", ",", "limit", "=", "None", ",", "cacheable", "=", "True", ")", ":", "# Last.fm has no \"limit\" parameter for tag.getTopTags", "# so we need to get all (250) and then limit locally", "doc", "=", "_Request", "(", "self", ",", "\"tag.getTopTags\"", ")", ".", "execute", "(", "cacheable", ")", "seq", "=", "[", "]", "for", "node", "in", "doc", ".", "getElementsByTagName", "(", "\"tag\"", ")", ":", "if", "limit", "and", "len", "(", "seq", ")", ">=", "limit", ":", "break", "tag", "=", "Tag", "(", "_extract", "(", "node", ",", "\"name\"", ")", ",", "self", ")", "weight", "=", "_number", "(", "_extract", "(", "node", ",", "\"count\"", ")", ")", "seq", ".", "append", "(", "TopItem", "(", "tag", ",", "weight", ")", ")", "return", "seq" ]
Returns the most used tags as a sequence of TopItem objects.
[ "Returns", "the", "most", "used", "tags", "as", "a", "sequence", "of", "TopItem", "objects", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L352-L367
-1
251,270
pylast/pylast
src/pylast/__init__.py
_Network.enable_proxy
def enable_proxy(self, host, port): """Enable a default web proxy""" self.proxy = [host, _number(port)] self.proxy_enabled = True
python
def enable_proxy(self, host, port): """Enable a default web proxy""" self.proxy = [host, _number(port)] self.proxy_enabled = True
[ "def", "enable_proxy", "(", "self", ",", "host", ",", "port", ")", ":", "self", ".", "proxy", "=", "[", "host", ",", "_number", "(", "port", ")", "]", "self", ".", "proxy_enabled", "=", "True" ]
Enable a default web proxy
[ "Enable", "a", "default", "web", "proxy" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L417-L421
-1
251,271
pylast/pylast
src/pylast/__init__.py
_Network.enable_caching
def enable_caching(self, file_path=None): """Enables caching request-wide for all cacheable calls. * file_path: A file path for the backend storage file. If None set, a temp file would probably be created, according the backend. """ if not file_path: file_path = tempfile.mktemp(prefix="pylast_tmp_") self.cache_backend = _ShelfCacheBackend(file_path)
python
def enable_caching(self, file_path=None): """Enables caching request-wide for all cacheable calls. * file_path: A file path for the backend storage file. If None set, a temp file would probably be created, according the backend. """ if not file_path: file_path = tempfile.mktemp(prefix="pylast_tmp_") self.cache_backend = _ShelfCacheBackend(file_path)
[ "def", "enable_caching", "(", "self", ",", "file_path", "=", "None", ")", ":", "if", "not", "file_path", ":", "file_path", "=", "tempfile", ".", "mktemp", "(", "prefix", "=", "\"pylast_tmp_\"", ")", "self", ".", "cache_backend", "=", "_ShelfCacheBackend", "(", "file_path", ")" ]
Enables caching request-wide for all cacheable calls. * file_path: A file path for the backend storage file. If None set, a temp file would probably be created, according the backend.
[ "Enables", "caching", "request", "-", "wide", "for", "all", "cacheable", "calls", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L450-L460
-1
251,272
pylast/pylast
src/pylast/__init__.py
_Network.get_track_by_mbid
def get_track_by_mbid(self, mbid): """Looks up a track by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "track.getInfo", params).execute(True) return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
python
def get_track_by_mbid(self, mbid): """Looks up a track by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "track.getInfo", params).execute(True) return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
[ "def", "get_track_by_mbid", "(", "self", ",", "mbid", ")", ":", "params", "=", "{", "\"mbid\"", ":", "mbid", "}", "doc", "=", "_Request", "(", "self", ",", "\"track.getInfo\"", ",", "params", ")", ".", "execute", "(", "True", ")", "return", "Track", "(", "_extract", "(", "doc", ",", "\"name\"", ",", "1", ")", ",", "_extract", "(", "doc", ",", "\"name\"", ")", ",", "self", ")" ]
Looks up a track by its MusicBrainz ID
[ "Looks", "up", "a", "track", "by", "its", "MusicBrainz", "ID" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L496-L503
-1
251,273
pylast/pylast
src/pylast/__init__.py
_Network.get_artist_by_mbid
def get_artist_by_mbid(self, mbid): """Looks up an artist by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "artist.getInfo", params).execute(True) return Artist(_extract(doc, "name"), self)
python
def get_artist_by_mbid(self, mbid): """Looks up an artist by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "artist.getInfo", params).execute(True) return Artist(_extract(doc, "name"), self)
[ "def", "get_artist_by_mbid", "(", "self", ",", "mbid", ")", ":", "params", "=", "{", "\"mbid\"", ":", "mbid", "}", "doc", "=", "_Request", "(", "self", ",", "\"artist.getInfo\"", ",", "params", ")", ".", "execute", "(", "True", ")", "return", "Artist", "(", "_extract", "(", "doc", ",", "\"name\"", ")", ",", "self", ")" ]
Looks up an artist by its MusicBrainz ID
[ "Looks", "up", "an", "artist", "by", "its", "MusicBrainz", "ID" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L505-L512
-1
251,274
pylast/pylast
src/pylast/__init__.py
_Network.get_album_by_mbid
def get_album_by_mbid(self, mbid): """Looks up an album by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "album.getInfo", params).execute(True) return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
python
def get_album_by_mbid(self, mbid): """Looks up an album by its MusicBrainz ID""" params = {"mbid": mbid} doc = _Request(self, "album.getInfo", params).execute(True) return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
[ "def", "get_album_by_mbid", "(", "self", ",", "mbid", ")", ":", "params", "=", "{", "\"mbid\"", ":", "mbid", "}", "doc", "=", "_Request", "(", "self", ",", "\"album.getInfo\"", ",", "params", ")", ".", "execute", "(", "True", ")", "return", "Album", "(", "_extract", "(", "doc", ",", "\"artist\"", ")", ",", "_extract", "(", "doc", ",", "\"name\"", ")", ",", "self", ")" ]
Looks up an album by its MusicBrainz ID
[ "Looks", "up", "an", "album", "by", "its", "MusicBrainz", "ID" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L514-L521
-1
251,275
pylast/pylast
src/pylast/__init__.py
_Network.update_now_playing
def update_now_playing( self, artist, title, album=None, album_artist=None, duration=None, track_number=None, mbid=None, context=None, ): """ Used to notify Last.fm that a user has started listening to a track. Parameters: artist (Required) : The artist name title (Required) : The track title album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. duration (Optional) : The length of the track in seconds. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. context (Optional) : Sub-client version (not public, only enabled for certain API keys) """ params = {"track": title, "artist": artist} if album: params["album"] = album if album_artist: params["albumArtist"] = album_artist if context: params["context"] = context if track_number: params["trackNumber"] = track_number if mbid: params["mbid"] = mbid if duration: params["duration"] = duration _Request(self, "track.updateNowPlaying", params).execute()
python
def update_now_playing( self, artist, title, album=None, album_artist=None, duration=None, track_number=None, mbid=None, context=None, ): """ Used to notify Last.fm that a user has started listening to a track. Parameters: artist (Required) : The artist name title (Required) : The track title album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. duration (Optional) : The length of the track in seconds. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. context (Optional) : Sub-client version (not public, only enabled for certain API keys) """ params = {"track": title, "artist": artist} if album: params["album"] = album if album_artist: params["albumArtist"] = album_artist if context: params["context"] = context if track_number: params["trackNumber"] = track_number if mbid: params["mbid"] = mbid if duration: params["duration"] = duration _Request(self, "track.updateNowPlaying", params).execute()
[ "def", "update_now_playing", "(", "self", ",", "artist", ",", "title", ",", "album", "=", "None", ",", "album_artist", "=", "None", ",", "duration", "=", "None", ",", "track_number", "=", "None", ",", "mbid", "=", "None", ",", "context", "=", "None", ",", ")", ":", "params", "=", "{", "\"track\"", ":", "title", ",", "\"artist\"", ":", "artist", "}", "if", "album", ":", "params", "[", "\"album\"", "]", "=", "album", "if", "album_artist", ":", "params", "[", "\"albumArtist\"", "]", "=", "album_artist", "if", "context", ":", "params", "[", "\"context\"", "]", "=", "context", "if", "track_number", ":", "params", "[", "\"trackNumber\"", "]", "=", "track_number", "if", "mbid", ":", "params", "[", "\"mbid\"", "]", "=", "mbid", "if", "duration", ":", "params", "[", "\"duration\"", "]", "=", "duration", "_Request", "(", "self", ",", "\"track.updateNowPlaying\"", ",", "params", ")", ".", "execute", "(", ")" ]
Used to notify Last.fm that a user has started listening to a track. Parameters: artist (Required) : The artist name title (Required) : The track title album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. duration (Optional) : The length of the track in seconds. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. context (Optional) : Sub-client version (not public, only enabled for certain API keys)
[ "Used", "to", "notify", "Last", ".", "fm", "that", "a", "user", "has", "started", "listening", "to", "a", "track", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L523-L566
-1
251,276
pylast/pylast
src/pylast/__init__.py
_Network.scrobble
def scrobble( self, artist, title, timestamp, album=None, album_artist=None, track_number=None, duration=None, stream_id=None, context=None, mbid=None, ): """Used to add a track-play to a user's profile. Parameters: artist (Required) : The artist name. title (Required) : The track name. timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. context (Optional) : Sub-client version (not public, only enabled for certain API keys) stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. duration (Optional) : The length of the track in seconds. """ return self.scrobble_many( ( { "artist": artist, "title": title, "timestamp": timestamp, "album": album, "album_artist": album_artist, "track_number": track_number, "duration": duration, "stream_id": stream_id, "context": context, "mbid": mbid, }, ) )
python
def scrobble( self, artist, title, timestamp, album=None, album_artist=None, track_number=None, duration=None, stream_id=None, context=None, mbid=None, ): """Used to add a track-play to a user's profile. Parameters: artist (Required) : The artist name. title (Required) : The track name. timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. context (Optional) : Sub-client version (not public, only enabled for certain API keys) stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. duration (Optional) : The length of the track in seconds. """ return self.scrobble_many( ( { "artist": artist, "title": title, "timestamp": timestamp, "album": album, "album_artist": album_artist, "track_number": track_number, "duration": duration, "stream_id": stream_id, "context": context, "mbid": mbid, }, ) )
[ "def", "scrobble", "(", "self", ",", "artist", ",", "title", ",", "timestamp", ",", "album", "=", "None", ",", "album_artist", "=", "None", ",", "track_number", "=", "None", ",", "duration", "=", "None", ",", "stream_id", "=", "None", ",", "context", "=", "None", ",", "mbid", "=", "None", ",", ")", ":", "return", "self", ".", "scrobble_many", "(", "(", "{", "\"artist\"", ":", "artist", ",", "\"title\"", ":", "title", ",", "\"timestamp\"", ":", "timestamp", ",", "\"album\"", ":", "album", ",", "\"album_artist\"", ":", "album_artist", ",", "\"track_number\"", ":", "track_number", ",", "\"duration\"", ":", "duration", ",", "\"stream_id\"", ":", "stream_id", ",", "\"context\"", ":", "context", ",", "\"mbid\"", ":", "mbid", ",", "}", ",", ")", ")" ]
Used to add a track-play to a user's profile. Parameters: artist (Required) : The artist name. title (Required) : The track name. timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. context (Optional) : Sub-client version (not public, only enabled for certain API keys) stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. duration (Optional) : The length of the track in seconds.
[ "Used", "to", "add", "a", "track", "-", "play", "to", "a", "user", "s", "profile", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L568-L618
-1
251,277
pylast/pylast
src/pylast/__init__.py
_Request._get_signature
def _get_signature(self): """ Returns a 32-character hexadecimal md5 hash of the signature string. """ keys = list(self.params.keys()) keys.sort() string = "" for name in keys: string += name string += self.params[name] string += self.api_secret return md5(string)
python
def _get_signature(self): """ Returns a 32-character hexadecimal md5 hash of the signature string. """ keys = list(self.params.keys()) keys.sort() string = "" for name in keys: string += name string += self.params[name] string += self.api_secret return md5(string)
[ "def", "_get_signature", "(", "self", ")", ":", "keys", "=", "list", "(", "self", ".", "params", ".", "keys", "(", ")", ")", "keys", ".", "sort", "(", ")", "string", "=", "\"\"", "for", "name", "in", "keys", ":", "string", "+=", "name", "string", "+=", "self", ".", "params", "[", "name", "]", "string", "+=", "self", ".", "api_secret", "return", "md5", "(", "string", ")" ]
Returns a 32-character hexadecimal md5 hash of the signature string.
[ "Returns", "a", "32", "-", "character", "hexadecimal", "md5", "hash", "of", "the", "signature", "string", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L872-L889
-1
251,278
pylast/pylast
src/pylast/__init__.py
_Request._get_cache_key
def _get_cache_key(self): """ The cache key is a string of concatenated sorted names and values. """ keys = list(self.params.keys()) keys.sort() cache_key = str() for key in keys: if key != "api_sig" and key != "api_key" and key != "sk": cache_key += key + self.params[key] return hashlib.sha1(cache_key.encode("utf-8")).hexdigest()
python
def _get_cache_key(self): """ The cache key is a string of concatenated sorted names and values. """ keys = list(self.params.keys()) keys.sort() cache_key = str() for key in keys: if key != "api_sig" and key != "api_key" and key != "sk": cache_key += key + self.params[key] return hashlib.sha1(cache_key.encode("utf-8")).hexdigest()
[ "def", "_get_cache_key", "(", "self", ")", ":", "keys", "=", "list", "(", "self", ".", "params", ".", "keys", "(", ")", ")", "keys", ".", "sort", "(", ")", "cache_key", "=", "str", "(", ")", "for", "key", "in", "keys", ":", "if", "key", "!=", "\"api_sig\"", "and", "key", "!=", "\"api_key\"", "and", "key", "!=", "\"sk\"", ":", "cache_key", "+=", "key", "+", "self", ".", "params", "[", "key", "]", "return", "hashlib", ".", "sha1", "(", "cache_key", ".", "encode", "(", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")" ]
The cache key is a string of concatenated sorted names and values.
[ "The", "cache", "key", "is", "a", "string", "of", "concatenated", "sorted", "names", "and", "values", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L891-L905
-1
251,279
pylast/pylast
src/pylast/__init__.py
_Request._get_cached_response
def _get_cached_response(self): """Returns a file object of the cached response.""" if not self._is_cached(): response = self._download_response() self.cache.set_xml(self._get_cache_key(), response) return self.cache.get_xml(self._get_cache_key())
python
def _get_cached_response(self): """Returns a file object of the cached response.""" if not self._is_cached(): response = self._download_response() self.cache.set_xml(self._get_cache_key(), response) return self.cache.get_xml(self._get_cache_key())
[ "def", "_get_cached_response", "(", "self", ")", ":", "if", "not", "self", ".", "_is_cached", "(", ")", ":", "response", "=", "self", ".", "_download_response", "(", ")", "self", ".", "cache", ".", "set_xml", "(", "self", ".", "_get_cache_key", "(", ")", ",", "response", ")", "return", "self", ".", "cache", ".", "get_xml", "(", "self", ".", "_get_cache_key", "(", ")", ")" ]
Returns a file object of the cached response.
[ "Returns", "a", "file", "object", "of", "the", "cached", "response", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L907-L914
-1
251,280
pylast/pylast
src/pylast/__init__.py
_Request._download_response
def _download_response(self): """Returns a response body string from the server.""" if self.network.limit_rate: self.network._delay_call() data = [] for name in self.params.keys(): data.append("=".join((name, url_quote_plus(_string(self.params[name]))))) data = "&".join(data) headers = { "Content-type": "application/x-www-form-urlencoded", "Accept-Charset": "utf-8", "User-Agent": "pylast" + "/" + __version__, } (host_name, host_subdir) = self.network.ws_server if self.network.is_proxy_enabled(): conn = HTTPSConnection( context=SSL_CONTEXT, host=self.network._get_proxy()[0], port=self.network._get_proxy()[1], ) try: conn.request( method="POST", url="https://" + host_name + host_subdir, body=data, headers=headers, ) except Exception as e: raise NetworkError(self.network, e) else: conn = HTTPSConnection(context=SSL_CONTEXT, host=host_name) try: conn.request(method="POST", url=host_subdir, body=data, headers=headers) except Exception as e: raise NetworkError(self.network, e) try: response_text = _unicode(conn.getresponse().read()) except Exception as e: raise MalformedResponseError(self.network, e) try: self._check_response_for_errors(response_text) finally: conn.close() return response_text
python
def _download_response(self): """Returns a response body string from the server.""" if self.network.limit_rate: self.network._delay_call() data = [] for name in self.params.keys(): data.append("=".join((name, url_quote_plus(_string(self.params[name]))))) data = "&".join(data) headers = { "Content-type": "application/x-www-form-urlencoded", "Accept-Charset": "utf-8", "User-Agent": "pylast" + "/" + __version__, } (host_name, host_subdir) = self.network.ws_server if self.network.is_proxy_enabled(): conn = HTTPSConnection( context=SSL_CONTEXT, host=self.network._get_proxy()[0], port=self.network._get_proxy()[1], ) try: conn.request( method="POST", url="https://" + host_name + host_subdir, body=data, headers=headers, ) except Exception as e: raise NetworkError(self.network, e) else: conn = HTTPSConnection(context=SSL_CONTEXT, host=host_name) try: conn.request(method="POST", url=host_subdir, body=data, headers=headers) except Exception as e: raise NetworkError(self.network, e) try: response_text = _unicode(conn.getresponse().read()) except Exception as e: raise MalformedResponseError(self.network, e) try: self._check_response_for_errors(response_text) finally: conn.close() return response_text
[ "def", "_download_response", "(", "self", ")", ":", "if", "self", ".", "network", ".", "limit_rate", ":", "self", ".", "network", ".", "_delay_call", "(", ")", "data", "=", "[", "]", "for", "name", "in", "self", ".", "params", ".", "keys", "(", ")", ":", "data", ".", "append", "(", "\"=\"", ".", "join", "(", "(", "name", ",", "url_quote_plus", "(", "_string", "(", "self", ".", "params", "[", "name", "]", ")", ")", ")", ")", ")", "data", "=", "\"&\"", ".", "join", "(", "data", ")", "headers", "=", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"Accept-Charset\"", ":", "\"utf-8\"", ",", "\"User-Agent\"", ":", "\"pylast\"", "+", "\"/\"", "+", "__version__", ",", "}", "(", "host_name", ",", "host_subdir", ")", "=", "self", ".", "network", ".", "ws_server", "if", "self", ".", "network", ".", "is_proxy_enabled", "(", ")", ":", "conn", "=", "HTTPSConnection", "(", "context", "=", "SSL_CONTEXT", ",", "host", "=", "self", ".", "network", ".", "_get_proxy", "(", ")", "[", "0", "]", ",", "port", "=", "self", ".", "network", ".", "_get_proxy", "(", ")", "[", "1", "]", ",", ")", "try", ":", "conn", ".", "request", "(", "method", "=", "\"POST\"", ",", "url", "=", "\"https://\"", "+", "host_name", "+", "host_subdir", ",", "body", "=", "data", ",", "headers", "=", "headers", ",", ")", "except", "Exception", "as", "e", ":", "raise", "NetworkError", "(", "self", ".", "network", ",", "e", ")", "else", ":", "conn", "=", "HTTPSConnection", "(", "context", "=", "SSL_CONTEXT", ",", "host", "=", "host_name", ")", "try", ":", "conn", ".", "request", "(", "method", "=", "\"POST\"", ",", "url", "=", "host_subdir", ",", "body", "=", "data", ",", "headers", "=", "headers", ")", "except", "Exception", "as", "e", ":", "raise", "NetworkError", "(", "self", ".", "network", ",", "e", ")", "try", ":", "response_text", "=", "_unicode", "(", "conn", ".", "getresponse", "(", ")", ".", "read", "(", ")", ")", "except", "Exception", "as", "e", ":", "raise", "MalformedResponseError", "(", "self", ".", "network", ",", "e", ")", "try", ":", "self", ".", "_check_response_for_errors", "(", "response_text", ")", "finally", ":", "conn", ".", "close", "(", ")", "return", "response_text" ]
Returns a response body string from the server.
[ "Returns", "a", "response", "body", "string", "from", "the", "server", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L921-L974
-1
251,281
pylast/pylast
src/pylast/__init__.py
_Request.execute
def execute(self, cacheable=False): """Returns the XML DOM response of the POST Request from the server""" if self.network.is_caching_enabled() and cacheable: response = self._get_cached_response() else: response = self._download_response() return minidom.parseString(_string(response).replace("opensearch:", ""))
python
def execute(self, cacheable=False): """Returns the XML DOM response of the POST Request from the server""" if self.network.is_caching_enabled() and cacheable: response = self._get_cached_response() else: response = self._download_response() return minidom.parseString(_string(response).replace("opensearch:", ""))
[ "def", "execute", "(", "self", ",", "cacheable", "=", "False", ")", ":", "if", "self", ".", "network", ".", "is_caching_enabled", "(", ")", "and", "cacheable", ":", "response", "=", "self", ".", "_get_cached_response", "(", ")", "else", ":", "response", "=", "self", ".", "_download_response", "(", ")", "return", "minidom", ".", "parseString", "(", "_string", "(", "response", ")", ".", "replace", "(", "\"opensearch:\"", ",", "\"\"", ")", ")" ]
Returns the XML DOM response of the POST Request from the server
[ "Returns", "the", "XML", "DOM", "response", "of", "the", "POST", "Request", "from", "the", "server" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L976-L984
-1
251,282
pylast/pylast
src/pylast/__init__.py
_Request._check_response_for_errors
def _check_response_for_errors(self, response): """Checks the response for errors and raises one if any exists.""" try: doc = minidom.parseString(_string(response).replace("opensearch:", "")) except Exception as e: raise MalformedResponseError(self.network, e) e = doc.getElementsByTagName("lfm")[0] # logger.debug(doc.toprettyxml()) if e.getAttribute("status") != "ok": e = doc.getElementsByTagName("error")[0] status = e.getAttribute("code") details = e.firstChild.data.strip() raise WSError(self.network, status, details)
python
def _check_response_for_errors(self, response): """Checks the response for errors and raises one if any exists.""" try: doc = minidom.parseString(_string(response).replace("opensearch:", "")) except Exception as e: raise MalformedResponseError(self.network, e) e = doc.getElementsByTagName("lfm")[0] # logger.debug(doc.toprettyxml()) if e.getAttribute("status") != "ok": e = doc.getElementsByTagName("error")[0] status = e.getAttribute("code") details = e.firstChild.data.strip() raise WSError(self.network, status, details)
[ "def", "_check_response_for_errors", "(", "self", ",", "response", ")", ":", "try", ":", "doc", "=", "minidom", ".", "parseString", "(", "_string", "(", "response", ")", ".", "replace", "(", "\"opensearch:\"", ",", "\"\"", ")", ")", "except", "Exception", "as", "e", ":", "raise", "MalformedResponseError", "(", "self", ".", "network", ",", "e", ")", "e", "=", "doc", ".", "getElementsByTagName", "(", "\"lfm\"", ")", "[", "0", "]", "# logger.debug(doc.toprettyxml())", "if", "e", ".", "getAttribute", "(", "\"status\"", ")", "!=", "\"ok\"", ":", "e", "=", "doc", ".", "getElementsByTagName", "(", "\"error\"", ")", "[", "0", "]", "status", "=", "e", ".", "getAttribute", "(", "\"code\"", ")", "details", "=", "e", ".", "firstChild", ".", "data", ".", "strip", "(", ")", "raise", "WSError", "(", "self", ".", "network", ",", "status", ",", "details", ")" ]
Checks the response for errors and raises one if any exists.
[ "Checks", "the", "response", "for", "errors", "and", "raises", "one", "if", "any", "exists", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L986-L1001
-1
251,283
pylast/pylast
src/pylast/__init__.py
SessionKeyGenerator._get_web_auth_token
def _get_web_auth_token(self): """ Retrieves a token from the network for web authentication. The token then has to be authorized from getAuthURL before creating session. """ request = _Request(self.network, "auth.getToken") # default action is that a request is signed only when # a session key is provided. request.sign_it() doc = request.execute() e = doc.getElementsByTagName("token")[0] return e.firstChild.data
python
def _get_web_auth_token(self): """ Retrieves a token from the network for web authentication. The token then has to be authorized from getAuthURL before creating session. """ request = _Request(self.network, "auth.getToken") # default action is that a request is signed only when # a session key is provided. request.sign_it() doc = request.execute() e = doc.getElementsByTagName("token")[0] return e.firstChild.data
[ "def", "_get_web_auth_token", "(", "self", ")", ":", "request", "=", "_Request", "(", "self", ".", "network", ",", "\"auth.getToken\"", ")", "# default action is that a request is signed only when", "# a session key is provided.", "request", ".", "sign_it", "(", ")", "doc", "=", "request", ".", "execute", "(", ")", "e", "=", "doc", ".", "getElementsByTagName", "(", "\"token\"", ")", "[", "0", "]", "return", "e", ".", "firstChild", ".", "data" ]
Retrieves a token from the network for web authentication. The token then has to be authorized from getAuthURL before creating session.
[ "Retrieves", "a", "token", "from", "the", "network", "for", "web", "authentication", ".", "The", "token", "then", "has", "to", "be", "authorized", "from", "getAuthURL", "before", "creating", "session", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1032-L1048
-1
251,284
pylast/pylast
src/pylast/__init__.py
SessionKeyGenerator.get_web_auth_session_key
def get_web_auth_session_key(self, url, token=""): """ Retrieves the session key of a web authorization process by its URL. """ session_key, _username = self.get_web_auth_session_key_username(url, token) return session_key
python
def get_web_auth_session_key(self, url, token=""): """ Retrieves the session key of a web authorization process by its URL. """ session_key, _username = self.get_web_auth_session_key_username(url, token) return session_key
[ "def", "get_web_auth_session_key", "(", "self", ",", "url", ",", "token", "=", "\"\"", ")", ":", "session_key", ",", "_username", "=", "self", ".", "get_web_auth_session_key_username", "(", "url", ",", "token", ")", "return", "session_key" ]
Retrieves the session key of a web authorization process by its URL.
[ "Retrieves", "the", "session", "key", "of", "a", "web", "authorization", "process", "by", "its", "URL", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1089-L1094
-1
251,285
pylast/pylast
src/pylast/__init__.py
SessionKeyGenerator.get_session_key
def get_session_key(self, username, password_hash): """ Retrieve a session key with a username and a md5 hash of the user's password. """ params = {"username": username, "authToken": md5(username + password_hash)} request = _Request(self.network, "auth.getMobileSession", params) # default action is that a request is signed only when # a session key is provided. request.sign_it() doc = request.execute() return _extract(doc, "key")
python
def get_session_key(self, username, password_hash): """ Retrieve a session key with a username and a md5 hash of the user's password. """ params = {"username": username, "authToken": md5(username + password_hash)} request = _Request(self.network, "auth.getMobileSession", params) # default action is that a request is signed only when # a session key is provided. request.sign_it() doc = request.execute() return _extract(doc, "key")
[ "def", "get_session_key", "(", "self", ",", "username", ",", "password_hash", ")", ":", "params", "=", "{", "\"username\"", ":", "username", ",", "\"authToken\"", ":", "md5", "(", "username", "+", "password_hash", ")", "}", "request", "=", "_Request", "(", "self", ".", "network", ",", "\"auth.getMobileSession\"", ",", "params", ")", "# default action is that a request is signed only when", "# a session key is provided.", "request", ".", "sign_it", "(", ")", "doc", "=", "request", ".", "execute", "(", ")", "return", "_extract", "(", "doc", ",", "\"key\"", ")" ]
Retrieve a session key with a username and a md5 hash of the user's password.
[ "Retrieve", "a", "session", "key", "with", "a", "username", "and", "a", "md5", "hash", "of", "the", "user", "s", "password", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1096-L1111
-1
251,286
pylast/pylast
src/pylast/__init__.py
_BaseObject._get_things
def _get_things(self, method, thing, thing_type, params=None, cacheable=True): """Returns a list of the most played thing_types by this thing.""" limit = params.get("limit", 1) seq = [] for node in _collect_nodes( limit, self, self.ws_prefix + "." + method, cacheable, params ): title = _extract(node, "name") artist = _extract(node, "name", 1) playcount = _number(_extract(node, "playcount")) seq.append(TopItem(thing_type(artist, title, self.network), playcount)) return seq
python
def _get_things(self, method, thing, thing_type, params=None, cacheable=True): """Returns a list of the most played thing_types by this thing.""" limit = params.get("limit", 1) seq = [] for node in _collect_nodes( limit, self, self.ws_prefix + "." + method, cacheable, params ): title = _extract(node, "name") artist = _extract(node, "name", 1) playcount = _number(_extract(node, "playcount")) seq.append(TopItem(thing_type(artist, title, self.network), playcount)) return seq
[ "def", "_get_things", "(", "self", ",", "method", ",", "thing", ",", "thing_type", ",", "params", "=", "None", ",", "cacheable", "=", "True", ")", ":", "limit", "=", "params", ".", "get", "(", "\"limit\"", ",", "1", ")", "seq", "=", "[", "]", "for", "node", "in", "_collect_nodes", "(", "limit", ",", "self", ",", "self", ".", "ws_prefix", "+", "\".\"", "+", "method", ",", "cacheable", ",", "params", ")", ":", "title", "=", "_extract", "(", "node", ",", "\"name\"", ")", "artist", "=", "_extract", "(", "node", ",", "\"name\"", ",", "1", ")", "playcount", "=", "_number", "(", "_extract", "(", "node", ",", "\"playcount\"", ")", ")", "seq", ".", "append", "(", "TopItem", "(", "thing_type", "(", "artist", ",", "title", ",", "self", ".", "network", ")", ",", "playcount", ")", ")", "return", "seq" ]
Returns a list of the most played thing_types by this thing.
[ "Returns", "a", "list", "of", "the", "most", "played", "thing_types", "by", "this", "thing", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1170-L1184
-1
251,287
pylast/pylast
src/pylast/__init__.py
_Chartable.get_weekly_chart_dates
def get_weekly_chart_dates(self): """Returns a list of From and To tuples for the available charts.""" doc = self._request(self.ws_prefix + ".getWeeklyChartList", True) seq = [] for node in doc.getElementsByTagName("chart"): seq.append((node.getAttribute("from"), node.getAttribute("to"))) return seq
python
def get_weekly_chart_dates(self): """Returns a list of From and To tuples for the available charts.""" doc = self._request(self.ws_prefix + ".getWeeklyChartList", True) seq = [] for node in doc.getElementsByTagName("chart"): seq.append((node.getAttribute("from"), node.getAttribute("to"))) return seq
[ "def", "get_weekly_chart_dates", "(", "self", ")", ":", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getWeeklyChartList\"", ",", "True", ")", "seq", "=", "[", "]", "for", "node", "in", "doc", ".", "getElementsByTagName", "(", "\"chart\"", ")", ":", "seq", ".", "append", "(", "(", "node", ".", "getAttribute", "(", "\"from\"", ")", ",", "node", ".", "getAttribute", "(", "\"to\"", ")", ")", ")", "return", "seq" ]
Returns a list of From and To tuples for the available charts.
[ "Returns", "a", "list", "of", "From", "and", "To", "tuples", "for", "the", "available", "charts", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1231-L1240
-1
251,288
pylast/pylast
src/pylast/__init__.py
_Chartable.get_weekly_charts
def get_weekly_charts(self, chart_kind, from_date=None, to_date=None): """ Returns the weekly charts for the week starting from the from_date value to the to_date value. chart_kind should be one of "album", "artist" or "track" """ method = ".getWeekly" + chart_kind.title() + "Chart" chart_type = eval(chart_kind.title()) # string to type params = self._get_params() if from_date and to_date: params["from"] = from_date params["to"] = to_date doc = self._request(self.ws_prefix + method, True, params) seq = [] for node in doc.getElementsByTagName(chart_kind.lower()): if chart_kind == "artist": item = chart_type(_extract(node, "name"), self.network) else: item = chart_type( _extract(node, "artist"), _extract(node, "name"), self.network ) weight = _number(_extract(node, "playcount")) seq.append(TopItem(item, weight)) return seq
python
def get_weekly_charts(self, chart_kind, from_date=None, to_date=None): """ Returns the weekly charts for the week starting from the from_date value to the to_date value. chart_kind should be one of "album", "artist" or "track" """ method = ".getWeekly" + chart_kind.title() + "Chart" chart_type = eval(chart_kind.title()) # string to type params = self._get_params() if from_date and to_date: params["from"] = from_date params["to"] = to_date doc = self._request(self.ws_prefix + method, True, params) seq = [] for node in doc.getElementsByTagName(chart_kind.lower()): if chart_kind == "artist": item = chart_type(_extract(node, "name"), self.network) else: item = chart_type( _extract(node, "artist"), _extract(node, "name"), self.network ) weight = _number(_extract(node, "playcount")) seq.append(TopItem(item, weight)) return seq
[ "def", "get_weekly_charts", "(", "self", ",", "chart_kind", ",", "from_date", "=", "None", ",", "to_date", "=", "None", ")", ":", "method", "=", "\".getWeekly\"", "+", "chart_kind", ".", "title", "(", ")", "+", "\"Chart\"", "chart_type", "=", "eval", "(", "chart_kind", ".", "title", "(", ")", ")", "# string to type", "params", "=", "self", ".", "_get_params", "(", ")", "if", "from_date", "and", "to_date", ":", "params", "[", "\"from\"", "]", "=", "from_date", "params", "[", "\"to\"", "]", "=", "to_date", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "method", ",", "True", ",", "params", ")", "seq", "=", "[", "]", "for", "node", "in", "doc", ".", "getElementsByTagName", "(", "chart_kind", ".", "lower", "(", ")", ")", ":", "if", "chart_kind", "==", "\"artist\"", ":", "item", "=", "chart_type", "(", "_extract", "(", "node", ",", "\"name\"", ")", ",", "self", ".", "network", ")", "else", ":", "item", "=", "chart_type", "(", "_extract", "(", "node", ",", "\"artist\"", ")", ",", "_extract", "(", "node", ",", "\"name\"", ")", ",", "self", ".", "network", ")", "weight", "=", "_number", "(", "_extract", "(", "node", ",", "\"playcount\"", ")", ")", "seq", ".", "append", "(", "TopItem", "(", "item", ",", "weight", ")", ")", "return", "seq" ]
Returns the weekly charts for the week starting from the from_date value to the to_date value. chart_kind should be one of "album", "artist" or "track"
[ "Returns", "the", "weekly", "charts", "for", "the", "week", "starting", "from", "the", "from_date", "value", "to", "the", "to_date", "value", ".", "chart_kind", "should", "be", "one", "of", "album", "artist", "or", "track" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1266-L1293
-1
251,289
pylast/pylast
src/pylast/__init__.py
_Taggable.remove_tag
def remove_tag(self, tag): """Remove a user's tag from this object.""" if isinstance(tag, Tag): tag = tag.get_name() params = self._get_params() params["tag"] = tag self._request(self.ws_prefix + ".removeTag", False, params)
python
def remove_tag(self, tag): """Remove a user's tag from this object.""" if isinstance(tag, Tag): tag = tag.get_name() params = self._get_params() params["tag"] = tag self._request(self.ws_prefix + ".removeTag", False, params)
[ "def", "remove_tag", "(", "self", ",", "tag", ")", ":", "if", "isinstance", "(", "tag", ",", "Tag", ")", ":", "tag", "=", "tag", ".", "get_name", "(", ")", "params", "=", "self", ".", "_get_params", "(", ")", "params", "[", "\"tag\"", "]", "=", "tag", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".removeTag\"", ",", "False", ",", "params", ")" ]
Remove a user's tag from this object.
[ "Remove", "a", "user", "s", "tag", "from", "this", "object", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1323-L1332
-1
251,290
pylast/pylast
src/pylast/__init__.py
_Taggable.get_tags
def get_tags(self): """Returns a list of the tags set by the user to this object.""" # Uncacheable because it can be dynamically changed by the user. params = self._get_params() doc = self._request(self.ws_prefix + ".getTags", False, params) tag_names = _extract_all(doc, "name") tags = [] for tag in tag_names: tags.append(Tag(tag, self.network)) return tags
python
def get_tags(self): """Returns a list of the tags set by the user to this object.""" # Uncacheable because it can be dynamically changed by the user. params = self._get_params() doc = self._request(self.ws_prefix + ".getTags", False, params) tag_names = _extract_all(doc, "name") tags = [] for tag in tag_names: tags.append(Tag(tag, self.network)) return tags
[ "def", "get_tags", "(", "self", ")", ":", "# Uncacheable because it can be dynamically changed by the user.", "params", "=", "self", ".", "_get_params", "(", ")", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getTags\"", ",", "False", ",", "params", ")", "tag_names", "=", "_extract_all", "(", "doc", ",", "\"name\"", ")", "tags", "=", "[", "]", "for", "tag", "in", "tag_names", ":", "tags", ".", "append", "(", "Tag", "(", "tag", ",", "self", ".", "network", ")", ")", "return", "tags" ]
Returns a list of the tags set by the user to this object.
[ "Returns", "a", "list", "of", "the", "tags", "set", "by", "the", "user", "to", "this", "object", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1334-L1346
-1
251,291
pylast/pylast
src/pylast/__init__.py
_Taggable.get_top_tags
def get_top_tags(self, limit=None): """Returns a list of the most frequently used Tags on this object.""" doc = self._request(self.ws_prefix + ".getTopTags", True) elements = doc.getElementsByTagName("tag") seq = [] for element in elements: tag_name = _extract(element, "name") tagcount = _extract(element, "count") seq.append(TopItem(Tag(tag_name, self.network), tagcount)) if limit: seq = seq[:limit] return seq
python
def get_top_tags(self, limit=None): """Returns a list of the most frequently used Tags on this object.""" doc = self._request(self.ws_prefix + ".getTopTags", True) elements = doc.getElementsByTagName("tag") seq = [] for element in elements: tag_name = _extract(element, "name") tagcount = _extract(element, "count") seq.append(TopItem(Tag(tag_name, self.network), tagcount)) if limit: seq = seq[:limit] return seq
[ "def", "get_top_tags", "(", "self", ",", "limit", "=", "None", ")", ":", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getTopTags\"", ",", "True", ")", "elements", "=", "doc", ".", "getElementsByTagName", "(", "\"tag\"", ")", "seq", "=", "[", "]", "for", "element", "in", "elements", ":", "tag_name", "=", "_extract", "(", "element", ",", "\"name\"", ")", "tagcount", "=", "_extract", "(", "element", ",", "\"count\"", ")", "seq", ".", "append", "(", "TopItem", "(", "Tag", "(", "tag_name", ",", "self", ".", "network", ")", ",", "tagcount", ")", ")", "if", "limit", ":", "seq", "=", "seq", "[", ":", "limit", "]", "return", "seq" ]
Returns a list of the most frequently used Tags on this object.
[ "Returns", "a", "list", "of", "the", "most", "frequently", "used", "Tags", "on", "this", "object", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1395-L1412
-1
251,292
pylast/pylast
src/pylast/__init__.py
_Opus.get_title
def get_title(self, properly_capitalized=False): """Returns the artist or track title.""" if properly_capitalized: self.title = _extract( self._request(self.ws_prefix + ".getInfo", True), "name" ) return self.title
python
def get_title(self, properly_capitalized=False): """Returns the artist or track title.""" if properly_capitalized: self.title = _extract( self._request(self.ws_prefix + ".getInfo", True), "name" ) return self.title
[ "def", "get_title", "(", "self", ",", "properly_capitalized", "=", "False", ")", ":", "if", "properly_capitalized", ":", "self", ".", "title", "=", "_extract", "(", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "True", ")", ",", "\"name\"", ")", "return", "self", ".", "title" ]
Returns the artist or track title.
[ "Returns", "the", "artist", "or", "track", "title", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1566-L1573
-1
251,293
pylast/pylast
src/pylast/__init__.py
_Opus.get_playcount
def get_playcount(self): """Returns the number of plays on the network""" return _number( _extract( self._request(self.ws_prefix + ".getInfo", cacheable=True), "playcount" ) )
python
def get_playcount(self): """Returns the number of plays on the network""" return _number( _extract( self._request(self.ws_prefix + ".getInfo", cacheable=True), "playcount" ) )
[ "def", "get_playcount", "(", "self", ")", ":", "return", "_number", "(", "_extract", "(", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "cacheable", "=", "True", ")", ",", "\"playcount\"", ")", ")" ]
Returns the number of plays on the network
[ "Returns", "the", "number", "of", "plays", "on", "the", "network" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1580-L1587
-1
251,294
pylast/pylast
src/pylast/__init__.py
_Opus.get_userplaycount
def get_userplaycount(self): """Returns the number of plays by a given username""" if not self.username: return params = self._get_params() params["username"] = self.username doc = self._request(self.ws_prefix + ".getInfo", True, params) return _number(_extract(doc, "userplaycount"))
python
def get_userplaycount(self): """Returns the number of plays by a given username""" if not self.username: return params = self._get_params() params["username"] = self.username doc = self._request(self.ws_prefix + ".getInfo", True, params) return _number(_extract(doc, "userplaycount"))
[ "def", "get_userplaycount", "(", "self", ")", ":", "if", "not", "self", ".", "username", ":", "return", "params", "=", "self", ".", "_get_params", "(", ")", "params", "[", "\"username\"", "]", "=", "self", ".", "username", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "True", ",", "params", ")", "return", "_number", "(", "_extract", "(", "doc", ",", "\"userplaycount\"", ")", ")" ]
Returns the number of plays by a given username
[ "Returns", "the", "number", "of", "plays", "by", "a", "given", "username" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1589-L1599
-1
251,295
pylast/pylast
src/pylast/__init__.py
_Opus.get_listener_count
def get_listener_count(self): """Returns the number of listeners on the network""" return _number( _extract( self._request(self.ws_prefix + ".getInfo", cacheable=True), "listeners" ) )
python
def get_listener_count(self): """Returns the number of listeners on the network""" return _number( _extract( self._request(self.ws_prefix + ".getInfo", cacheable=True), "listeners" ) )
[ "def", "get_listener_count", "(", "self", ")", ":", "return", "_number", "(", "_extract", "(", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "cacheable", "=", "True", ")", ",", "\"listeners\"", ")", ")" ]
Returns the number of listeners on the network
[ "Returns", "the", "number", "of", "listeners", "on", "the", "network" ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1601-L1608
-1
251,296
pylast/pylast
src/pylast/__init__.py
_Opus.get_mbid
def get_mbid(self): """Returns the MusicBrainz ID of the album or track.""" doc = self._request(self.ws_prefix + ".getInfo", cacheable=True) try: lfm = doc.getElementsByTagName("lfm")[0] opus = next(self._get_children_by_tag_name(lfm, self.ws_prefix)) mbid = next(self._get_children_by_tag_name(opus, "mbid")) return mbid.firstChild.nodeValue except StopIteration: return None
python
def get_mbid(self): """Returns the MusicBrainz ID of the album or track.""" doc = self._request(self.ws_prefix + ".getInfo", cacheable=True) try: lfm = doc.getElementsByTagName("lfm")[0] opus = next(self._get_children_by_tag_name(lfm, self.ws_prefix)) mbid = next(self._get_children_by_tag_name(opus, "mbid")) return mbid.firstChild.nodeValue except StopIteration: return None
[ "def", "get_mbid", "(", "self", ")", ":", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "cacheable", "=", "True", ")", "try", ":", "lfm", "=", "doc", ".", "getElementsByTagName", "(", "\"lfm\"", ")", "[", "0", "]", "opus", "=", "next", "(", "self", ".", "_get_children_by_tag_name", "(", "lfm", ",", "self", ".", "ws_prefix", ")", ")", "mbid", "=", "next", "(", "self", ".", "_get_children_by_tag_name", "(", "opus", ",", "\"mbid\"", ")", ")", "return", "mbid", ".", "firstChild", ".", "nodeValue", "except", "StopIteration", ":", "return", "None" ]
Returns the MusicBrainz ID of the album or track.
[ "Returns", "the", "MusicBrainz", "ID", "of", "the", "album", "or", "track", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1610-L1621
-1
251,297
pylast/pylast
src/pylast/__init__.py
Album.get_tracks
def get_tracks(self): """Returns the list of Tracks on this album.""" return _extract_tracks( self._request(self.ws_prefix + ".getInfo", cacheable=True), self.network )
python
def get_tracks(self): """Returns the list of Tracks on this album.""" return _extract_tracks( self._request(self.ws_prefix + ".getInfo", cacheable=True), self.network )
[ "def", "get_tracks", "(", "self", ")", ":", "return", "_extract_tracks", "(", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "cacheable", "=", "True", ")", ",", "self", ".", "network", ")" ]
Returns the list of Tracks on this album.
[ "Returns", "the", "list", "of", "Tracks", "on", "this", "album", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1639-L1644
-1
251,298
pylast/pylast
src/pylast/__init__.py
Artist.get_name
def get_name(self, properly_capitalized=False): """Returns the name of the artist. If properly_capitalized was asserted then the name would be downloaded overwriting the given one.""" if properly_capitalized: self.name = _extract( self._request(self.ws_prefix + ".getInfo", True), "name" ) return self.name
python
def get_name(self, properly_capitalized=False): """Returns the name of the artist. If properly_capitalized was asserted then the name would be downloaded overwriting the given one.""" if properly_capitalized: self.name = _extract( self._request(self.ws_prefix + ".getInfo", True), "name" ) return self.name
[ "def", "get_name", "(", "self", ",", "properly_capitalized", "=", "False", ")", ":", "if", "properly_capitalized", ":", "self", ".", "name", "=", "_extract", "(", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "True", ")", ",", "\"name\"", ")", "return", "self", ".", "name" ]
Returns the name of the artist. If properly_capitalized was asserted then the name would be downloaded overwriting the given one.
[ "Returns", "the", "name", "of", "the", "artist", ".", "If", "properly_capitalized", "was", "asserted", "then", "the", "name", "would", "be", "downloaded", "overwriting", "the", "given", "one", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1719-L1729
-1
251,299
pylast/pylast
src/pylast/__init__.py
Artist.get_mbid
def get_mbid(self): """Returns the MusicBrainz ID of this artist.""" doc = self._request(self.ws_prefix + ".getInfo", True) return _extract(doc, "mbid")
python
def get_mbid(self): """Returns the MusicBrainz ID of this artist.""" doc = self._request(self.ws_prefix + ".getInfo", True) return _extract(doc, "mbid")
[ "def", "get_mbid", "(", "self", ")", ":", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getInfo\"", ",", "True", ")", "return", "_extract", "(", "doc", ",", "\"mbid\"", ")" ]
Returns the MusicBrainz ID of this artist.
[ "Returns", "the", "MusicBrainz", "ID", "of", "this", "artist", "." ]
a52f66d316797fc819b5f1d186d77f18ba97b4ff
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1772-L1777
-1