repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
sequence | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
unknown | revision_date
unknown | committer_date
unknown | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
unknown | gha_created_at
unknown | gha_updated_at
unknown | gha_pushed_at
unknown | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
lemanouthe/feliciano | 3,676,492,035,981 | d9f2b6598aed26ba78e30afffa1cae60d5ddaaf0 | 4bb005fc2d550bf35cfcb3732356db2886abfd8e | /configuration/migrations/0001_initial.py | f827c71bdb7ecf6c30629cd50d1a43373a4eaca6 | [] | no_license | https://github.com/lemanouthe/feliciano | 5d0a64cd0adc00c8ddc9a04719144cbb193a06c3 | 20e49309a038263714fca4131eb25ed2702a3e47 | refs/heads/master | "2022-03-27T13:56:23.735511" | "2020-01-30T00:28:14" | "2020-01-30T00:28:14" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.2.5 on 2019-09-30 15:23
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AboutConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titre', models.CharField(max_length=50)),
('content', tinymce.models.HTMLField(verbose_name='content')),
('open_hour', models.CharField(max_length=255)),
('phone', models.CharField(max_length=50)),
('yoe', models.IntegerField()),
('menu', models.IntegerField()),
('satff', models.IntegerField()),
('happy_customer', models.IntegerField()),
('description', models.TextField()),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'AboutConfig',
'verbose_name_plural': 'AboutConfigs',
},
),
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('jour', models.CharField(max_length=50)),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Day',
'verbose_name_plural': 'Days',
},
),
migrations.CreateModel(
name='Icon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Icon',
'verbose_name_plural': 'Icons',
},
),
migrations.CreateModel(
name='Instagram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image1', models.ImageField(upload_to='home/insta')),
('image2', models.ImageField(upload_to='home/insta')),
('image3', models.ImageField(upload_to='home/insta')),
('image4', models.ImageField(upload_to='home/insta')),
('image5', models.ImageField(upload_to='home/insta')),
('image6', models.ImageField(upload_to='home/insta')),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Instagram',
'verbose_name_plural': 'Instagrams',
},
),
migrations.CreateModel(
name='Social',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('FB', 'facebook'), ('TW', 'twitter'), ('INS', 'instagram'), ('GOO', 'google')], max_length=100)),
('lien', models.URLField()),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Social',
'verbose_name_plural': 'Socials',
},
),
migrations.CreateModel(
name='Temoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('job', models.CharField(max_length=50)),
('comment', models.TextField()),
('image', models.ImageField(upload_to='testimonial')),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Temoin',
'verbose_name_plural': 'Temoins',
},
),
migrations.CreateModel(
name='WorkingHour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_hour', models.CharField(max_length=50)),
('end_hour', models.CharField(max_length=50)),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
('jour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='day_working', to='configuration.Day')),
],
options={
'verbose_name': 'WorkingHour',
'verbose_name_plural': 'WorkingHours',
},
),
migrations.CreateModel(
name='ServiceConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titre', models.CharField(max_length=50)),
('description', models.TextField()),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
('icone', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='icon_service', to='configuration.Icon')),
],
options={
'verbose_name': 'ServiceConfig',
'verbose_name_plural': 'ServiceConfigs',
},
),
migrations.CreateModel(
name='MainConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=50)),
('description', models.TextField()),
('text1', models.CharField(max_length=50)),
('text2', models.CharField(max_length=50)),
('text3', models.CharField(max_length=50)),
('image1', models.ImageField(upload_to='home/')),
('image2', models.ImageField(upload_to='home/')),
('image3', models.ImageField(upload_to='home/')),
('scroll_image', models.ImageField(upload_to='home/')),
('tel', models.CharField(max_length=15)),
('email', models.EmailField(max_length=255)),
('status', models.BooleanField(default=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('date_upd', models.DateTimeField(auto_now=True)),
('instagram', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='insta_config', to='configuration.Instagram')),
('social', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='social_config', to='configuration.Social')),
('working_hour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='working_config', to='configuration.WorkingHour')),
],
options={
'verbose_name': 'MainConfig',
'verbose_name_plural': 'MainConfigs',
},
),
]
| UTF-8 | Python | false | false | 8,448 | py | 27 | 0001_initial.py | 22 | 0.525805 | 0.518229 | 0 | 176 | 47 | 160 |
oasysokubo/pacAI | 19,086,834,699,285 | 0297384776d9e836d0cc1bebb27a187d8d489a4b | 0ef639c85334c42e7958e40009d47d0a9c5123ae | /src/pacai/student/multiagents.py | 797caecb4f8b0f4323c6a48f5111261f3a50276e | [] | no_license | https://github.com/oasysokubo/pacAI | b3e93b2ca23f341a5cd8b48ce31c17c3e54b0ae6 | 8d2f28be01c7aa388e0f8c8689a50d521cdd27da | refs/heads/master | "2023-08-07T06:16:00.229413" | "2020-10-15T02:33:28" | "2020-10-15T02:33:28" | 215,427,629 | 1 | 1 | null | false | "2023-07-22T18:54:03" | "2019-10-16T01:15:14" | "2022-05-17T03:00:12" | "2023-07-22T18:54:02" | 3,337 | 0 | 0 | 2 | Python | false | false | import random
import sys
from pacai.agents.base import BaseAgent
from pacai.agents.search.multiagent import MultiAgentSearchAgent
from pacai.core import distance
class ReflexAgent(BaseAgent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide.
You are welcome to change it in any way you see fit,
so long as you don't touch the method headers.
"""
def __init__(self, index, **kwargs):
super().__init__(index)
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
`ReflexAgent.getAction` chooses among the best options according to the evaluation function.
Just like in the previous project, this method takes a
`pacai.core.gamestate.AbstractGameState` and returns some value from
`pacai.core.directions.Directions`.
"""
# Collect legal moves.
legalMoves = gameState.getLegalActions()
# Choose one of the best actions.
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best.
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current `pacai.bin.pacman.PacmanGameState`
and an action, and returns a number, where higher numbers are better.
Make sure to understand the range of different values before you combine them
in your evaluation function.
"""
successorGameState = currentGameState.generatePacmanSuccessor(action)
# Useful information you can extract.
newPosition = successorGameState.getPacmanPosition()
# oldPosition = currentGameState.getPacmanPosition()
newFood = successorGameState.getFood()
oldFood = currentGameState.getFood()
# oldScore = currentGameState.getScore()
newScore = successorGameState.getScore()
newGhostStates = successorGameState.getGhostStates()
# newScaredTimes = [ghostState.getScaredTimer() for ghostState in newGhostStates]
# *** Your Code Here ***
all_food = newFood.asList()
# check if adversary is near
for ghost in newGhostStates:
isBrave = ghost.isBraveGhost()
ghostPosition = ghost.getPosition()
if isBrave:
if distance.manhattan(newPosition, ghostPosition) < 2:
# run away
return -999999
# get distance from pacman to all food
food_dist = [(1.0 / distance.euclidean(newPosition, food)) for food in all_food]
food_dist.sort()
# if pacman isnt moving or food is not being eaten --> penalize
old_food_ct = oldFood.count()
new_food_ct = newFood.count()
if old_food_ct != new_food_ct:
if new_food_ct == 0:
return 0
return newScore + food_dist[0]
else:
return food_dist[-1] - abs(newScore)
class MinimaxAgent(MultiAgentSearchAgent):
"""
A minimax agent.
Here are some method calls that might be useful when implementing minimax.
`pacai.core.gamestate.AbstractGameState.getNumAgents()`:
Get the total number of agents in the game
`pacai.core.gamestate.AbstractGameState.getLegalActions`:
Returns a list of legal actions for an agent.
Pacman is always at index 0, and ghosts are >= 1.
`pacai.core.gamestate.AbstractGameState.generateSuccessor`:
Get the successor game state after an agent takes an action.
`pacai.core.directions.Directions.STOP`:
The stop direction, which is always legal, but you may not want to include in your search.
Method to Implement:
`pacai.agents.base.BaseAgent.getAction`:
Returns the minimax action from the current gameState using
`pacai.agents.search.multiagent.MultiAgentSearchAgent.getTreeDepth`
and `pacai.agents.search.multiagent.MultiAgentSearchAgent.getEvaluationFunction`.
"""
def __init__(self, index, **kwargs):
super().__init__(index, **kwargs)
self.nodes_expanded = 0
def getAction(self, gameState):
evalfn = self.getEvaluationFunction()
num_agents = gameState.getNumAgents()
tree_depth = self.getTreeDepth()
def max_value(state, agent_idx, depth):
if depth == tree_depth or state.isOver():
return evalfn(state)
v = -(sys.maxsize - 1)
if agent_idx == 0:
actions = state.getLegalActions(agent_idx)
actions.remove('Stop')
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = max(v, min_value(next_state, agent_idx + 1, depth))
return v
def min_value(state, agent_idx, depth):
v = sys.maxsize
if depth == tree_depth or state.isOver():
return evalfn(state)
if agent_idx == (num_agents - 1):
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = min(v, max_value(next_state, 0, depth + 1))
return v
else:
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = min(v, min_value(next_state, agent_idx + 1, depth))
return v
pacman_moves = []
actions = gameState.getLegalActions()
actions.remove('Stop')
for action in actions:
next_state = gameState.generateSuccessor(0, action)
agent_idx = next_state.getLastAgentMoved()
item = (action, min_value(next_state, agent_idx + 1, 0))
pacman_moves.append(item)
action_to_take = max(pacman_moves, key=lambda pacman_moves: pacman_moves[1])
return action_to_take[0]
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
A minimax agent with alpha-beta pruning.
Method to Implement:
`pacai.agents.base.BaseAgent.getAction`:
Returns the minimax action from the current gameState using
`pacai.agents.search.multiagent.MultiAgentSearchAgent.getTreeDepth`
and `pacai.agents.search.multiagent.MultiAgentSearchAgent.getEvaluationFunction`.
"""
def __init__(self, index, **kwargs):
super().__init__(index, **kwargs)
def getAction(self, gameState):
evalfn = self.getEvaluationFunction()
num_agents = gameState.getNumAgents()
tree_depth = self.getTreeDepth()
def max_value(state, agent_idx, depth, alpha, beta):
if depth == tree_depth or state.isOver():
return evalfn(state)
v = -(sys.maxsize - 1)
if agent_idx == 0:
actions = state.getLegalActions(agent_idx)
actions.remove('Stop')
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = max(v, min_value(next_state, agent_idx + 1, depth, alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, agent_idx, depth, alpha, beta):
v = sys.maxsize
if depth == tree_depth or state.isOver():
return evalfn(state)
if agent_idx == (num_agents - 1):
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = min(v, max_value(next_state, 0, depth + 1, alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
else:
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = min(v, min_value(next_state, agent_idx + 1, depth, alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
pacman_moves = []
actions = gameState.getLegalActions()
actions.remove('Stop')
ninf = -(sys.maxsize - 1)
pinf = sys.maxsize
for action in actions:
next_state = gameState.generateSuccessor(0, action)
agent_idx = next_state.getLastAgentMoved()
item = (action, min_value(next_state, agent_idx + 1, 0, ninf, pinf))
pacman_moves.append(item)
action_to_take = max(pacman_moves, key=lambda pacman_moves: pacman_moves[1])
return action_to_take[0]
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
An expectimax agent.
All ghosts should be modeled as choosing uniformly at random from their legal moves.
Method to Implement:
`pacai.agents.base.BaseAgent.getAction`:
Returns the expectimax action from the current gameState using
`pacai.agents.search.multiagent.MultiAgentSearchAgent.getTreeDepth`
and `pacai.agents.search.multiagent.MultiAgentSearchAgent.getEvaluationFunction`.
"""
def __init__(self, index, **kwargs):
super().__init__(index, **kwargs)
def getAction(self, gameState):
evalfn = self.getEvaluationFunction()
num_agents = gameState.getNumAgents()
tree_depth = self.getTreeDepth()
def expectiminimax(state, agent_idx, depth, is_chance):
v = 0
if depth == tree_depth or state.isOver():
return evalfn(state)
# pacman's turn
if agent_idx == 0 and not is_chance:
v = -(sys.maxsize - 1)
actions = state.getLegalActions(agent_idx)
actions.remove('Stop')
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = max(v, expectiminimax(next_state, agent_idx + 1, depth, True))
# at a chance node
elif is_chance:
v = 0
# calculate chance node for final adversary agent
if agent_idx == (num_agents - 1):
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v += (expectiminimax(next_state, 0, depth + 1, False))
# chance node for adversary agents
elif agent_idx % num_agents != 0:
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v += (expectiminimax(next_state, agent_idx + 1, depth, True))
v = float(v / len(actions))
return v
pacman_moves = []
actions = gameState.getLegalActions()
actions.remove('Stop')
for action in actions:
next_state = gameState.generateSuccessor(0, action)
agent_idx = next_state.getLastAgentMoved()
item = (action, expectiminimax(next_state, agent_idx + 1, 0, True))
pacman_moves.append(item)
action_to_take = max(pacman_moves, key=lambda pacman_moves: pacman_moves[1])
return action_to_take[0]
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function.
DESCRIPTION: <write something here so we know what you did>
Note:
I thought I needed all these factors but apparently just the score, food distance,
and capsule distance was enough...
if game is not over:
d = dist to closest food pellet
c = dist to closest capsule
t = average scared time for all ghosts
b = ghosts are brave
p = penalty for num_food not changing
g = dist to closest ghost
gs = dist to closest scared ghost
utility = score + (10 * d) + (20 * c) + (t * (100 * gs)) - (100 * g) - (10 * p) - b
else:
utility = (w1 * isWin) - [(w2 * isLose) + (w3 * num_food)] + score
= (10000 * isWin) - [(10000 * isLose) + (10 * num_food)] + score
"""
pac_pos = currentGameState.getAgentPosition(0)
num_agents = currentGameState.getNumAgents()
ghost_position = [currentGameState.getAgentPosition(i) for i in range(1, num_agents)]
capsule_position = currentGameState.getCapsules()
score = currentGameState.getScore()
num_food = currentGameState.getNumFood()
food_list = currentGameState.getFood().asList()
win = 1 if currentGameState.isWin() else 0
lose = 1 if currentGameState.isLose() else 0
over = 1 if currentGameState.isOver() else 0
food_dist = [(1.0 / (distance.euclidean(pac_pos, food))) for food in food_list]
ghost_dist = [(1.0 / (distance.euclidean(pac_pos, ghost)))
for ghost in ghost_position if int(distance.euclidean(pac_pos, ghost)) != 0]
capsule_dist = [(1.0 / distance.euclidean(pac_pos, capsule))
for capsule in capsule_position if int(distance.euclidean(pac_pos, capsule) != 0)]
sorted(food_dist)
sorted(ghost_dist)
sorted(capsule_dist)
if len(capsule_dist) == 0:
capsule_dist = 0
else:
capsule_dist = capsule_dist[0]
num_brave = 0
time_scared = 0
closest_scared = sys.maxsize
for i in range(1, num_agents):
ghost_state = currentGameState.getAgentState(i)
if ghost_state.isBraveGhost():
num_brave += 1
else:
time_scared += ghost_state.getScaredTimer()
ghost_pos = ghost_state.getPosition()
if distance.euclidean(pac_pos, ghost_pos) < closest_scared:
closest_scared = distance.euclidean(pac_pos, ghost_pos)
closest_scared = 0 if closest_scared == sys.maxsize else (1.0 / (closest_scared))
if num_brave != (num_agents - 1):
time_scared = float(time_scared) / (num_agents - 1 - num_brave)
if over == 0:
utility = score + (10 * food_dist[0]) + (100 * capsule_dist)
# \
# + (time_scared * (400 * closest_scared)) \
# - ((200 * (num_agents - 1 - num_brave))
# - (100 * ghost_dist[0])) - (10 * num_food) - num_brave
# print('Over utility: ', utility)
return utility
else:
utility = (1000 * win) - ((1000 * lose) + (10 * num_food)) + score
# + (100 * (num_agents - 1 - num_brave))
# + (time_scared * (200 * closest_scared)) \
# print('Win/Lose utility: ', utility)
return utility
class ContestAgent(MultiAgentSearchAgent):
"""
Your agent for the mini-contest.
You can use any method you want and search to any depth you want.
Just remember that the mini-contest is timed, so you have to trade off speed and computation.
Ghosts don't behave randomly anymore, but they aren't perfect either -- they'll usually
just make a beeline straight towards Pacman (or away if they're scared!)
Method to Implement:
`pacai.agents.base.BaseAgent.getAction`
"""
def __init__(self, index, **kwargs):
super().__init__(index, **kwargs)
def getAction(self, gameState):
evalfn = betterEvaluationFunction
num_agents = gameState.getNumAgents()
tree_depth = 3 * self.getTreeDepth()
def max_value(state, agent_idx, depth, alpha, beta):
if depth == tree_depth or state.isOver():
return evalfn(state)
v = -(sys.maxsize - 1)
if agent_idx == 0:
actions = state.getLegalActions(agent_idx)
actions.remove('Stop')
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = max(v, min_value(next_state, agent_idx + 1, depth, alpha, beta))
if v >= beta:
return v
alpha = max(alpha, v)
return v
def min_value(state, agent_idx, depth, alpha, beta):
v = sys.maxsize
if depth == tree_depth or state.isOver():
return evalfn(state)
if agent_idx == (num_agents - 1):
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = min(v, max_value(next_state, 0, depth + 1, alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
else:
actions = state.getLegalActions(agent_idx)
for action in actions:
next_state = state.generateSuccessor(agent_idx, action)
v = min(v, min_value(next_state, agent_idx + 1, depth, alpha, beta))
if v <= alpha:
return v
beta = min(beta, v)
return v
pacman_moves = []
actions = gameState.getLegalActions()
actions.remove('Stop')
ninf = -(sys.maxsize - 1)
pinf = sys.maxsize
for action in actions:
next_state = gameState.generateSuccessor(0, action)
agent_idx = next_state.getLastAgentMoved()
item = (action, min_value(next_state, agent_idx + 1, 0, ninf, pinf))
pacman_moves.append(item)
action_to_take = max(pacman_moves, key=lambda pacman_moves: pacman_moves[1])
return action_to_take[0]
| UTF-8 | Python | false | false | 18,312 | py | 15 | multiagents.py | 14 | 0.587047 | 0.5782 | 0 | 450 | 39.693333 | 100 |
Seabreg/chiasm-shell | 3,435,973,881,728 | 9b7d86dffc14f29e40bda3c375cf7aa6472caf5b | ddd8940c63a64bc0f1924f28bb3bc0ab9037cda9 | /chiasm_shell/chiasm_shell.py | a5b5a3229579ae71def5197590afb3feae18d9fc | [
"MIT"
] | permissive | https://github.com/Seabreg/chiasm-shell | d08d159c0ad90841b38ce348b2ac5595b57a5dbe | e20ed9fdf3fcb87d9469aa6fd52bf9e3eed92bc7 | refs/heads/master | "2020-05-02T01:20:51.398278" | "2017-01-27T04:58:13" | "2017-01-27T04:58:13" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
"""
Main class and method for chiasm shell.
:author: Ben Cheney
:license: MIT
"""
from __future__ import absolute_import
import logging
import chiasm_shell.config as config
l = logging.getLogger('chiasm_shell.chiasm_shell')
class ChiasmShell(object):
"""
Utility class for kicking off the shell.
"""
def run(self):
"""
Creates the default backend and starts the loop.
"""
backend = config.get_backends()[config.get_default_backend()]
info_str = "Chiasm Shell - {}".format(config.__VERSION__)
l.info(info_str)
while True:
l.debug("outer loop spinning up a new shell")
l.info("Current arch is %s", backend.get_arch())
backend.cmdloop()
if backend.launch_module is not None:
backend = backend.launch_module
else:
break
def main():
"""
Public method for starting Chiasm Shell.
"""
ChiasmShell().run()
if __name__ == '__main__':
main()
| UTF-8 | Python | false | false | 1,044 | py | 6 | chiasm_shell.py | 5 | 0.584291 | 0.584291 | 0 | 43 | 23.27907 | 69 |
davidcarvalho/code-snippets | 12,017,318,524,794 | 13cddd9a7136e01b06b7a80e536b931bcb75c258 | 67e18e48337134e144dadeaa3f619952658af1fa | /code_snippets/print_dict.py | b3e7628c09b7a702b1f7a99a4eddf2ad916883c1 | [] | no_license | https://github.com/davidcarvalho/code-snippets | e687f3fa2176b7c9c8cb87c2dce4e64cfb3aa339 | d0c0904b2ea0668b4c58bb269ac27f3eded0827c | refs/heads/master | "2023-06-15T04:46:40.974754" | "2021-07-05T19:00:47" | "2021-07-05T19:00:47" | 382,922,159 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def dict_print(dict1):
for key in dict1.keys():
if type(dict1[key]) is dict:
dict_print(dict1[key])
else:
print(f'Dictionary key {key} for dictionary value {dict1[key]}')
d = {x: x ** 2 for x in [2, 3, 4, 5, 6, 7]}
print(d)
dict_print(d)
| UTF-8 | Python | false | false | 284 | py | 22 | print_dict.py | 21 | 0.545775 | 0.503521 | 0 | 11 | 24.818182 | 76 |
isabella232/altimeter | 644,245,097,680 | e9bc4554d8a297b95d4552d1de7753dc49c8ca8c | b3610065d63cd2a78a8b3563576ea6597d76ea3b | /altimeter/aws/access/accessor.py | 898c69da644bd7763dbe95d2b2d0e0fdf315b378 | [
"Python-2.0",
"MIT"
] | permissive | https://github.com/isabella232/altimeter | 5d0d7c7efcaef2613b0a11a4249b7322a9aaf8de | 7969fa62de54546e7548aa998bb2f42b78e69851 | refs/heads/master | "2023-02-02T04:46:36.762061" | "2020-12-23T18:37:18" | "2020-12-23T18:37:18" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """Classes defining multi-stage AWS access methods. Provides the ability to
auth to an account via 1+ bridge accounts and to try multiple methods."""
from dataclasses import dataclass, field
from datetime import datetime
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Type
import boto3
from altimeter.aws.access.exceptions import AccountAuthException
from altimeter.core.log import Logger
from altimeter.core.log_events import LogEvent
@dataclass(frozen=True)
class SessionCacheValue:
"""A SessionCacheValue represents a value in a :class:`.SessionCache`.
Args:
session: boto3 Session object
expiration: expiration time for this cache value.
"""
session: boto3.Session
expiration: datetime
def is_expired(self) -> bool:
"""Determine if this cache value is expired
Returns:
True if this session is value is expired, else False.
"""
return datetime.utcnow().replace(tzinfo=None) >= self.expiration.replace(tzinfo=None)
class SessionCache:
"""A SessionCache is a cache for boto3 Sessions."""
def __init__(self) -> None:
self._cache: Dict[str, SessionCacheValue] = {}
@staticmethod
def _build_key(
account_id: str, role_name: str, role_session_name: str, region: Optional[str]
) -> str:
"""Build a key for a :class:`.SessionCache` representing a unique Session.
Args:
account_id: session account id
role_name: session role name
role_session_name: session role session name
region: session region
Returns:
string cache key
"""
return f"{account_id}:{role_name}:{role_session_name}:{region}"
def put(
self,
session: boto3.Session,
expiration: datetime,
account_id: str,
role_name: str,
role_session_name: str,
region: Optional[str] = None,
) -> None:
"""Put a Session into the cache.
Args:
session: session to cache
expiration: expiration time for this entry
account_id: session account id
role_name: session role name
role_session_name: session role session name
region: session region
"""
cache_key = SessionCache._build_key(account_id, role_name, role_session_name, region)
self._cache[cache_key] = SessionCacheValue(session, expiration)
def get(
self, account_id: str, role_name: str, role_session_name: str, region: Optional[str] = None
) -> Optional[SessionCacheValue]:
"""Get a session from the cache.
Args:
account_id: session account id
role_name: session role name
role_session_name: session role session name
region: session region
Returns:
SessionCacheValue if one is found, else None.
"""
cache_key = SessionCache._build_key(account_id, role_name, role_session_name, region)
cache_val = self._cache.get(cache_key)
if cache_val is not None:
if cache_val.is_expired():
del self._cache[cache_key]
else:
return self._cache[cache_key]
return None
@dataclass(frozen=True)
class AccessStep:
"""Represents a single access step to get to an account.
Args:
role_name: role name for this step
account_id: account_id for this step. If empty this step is assumed to be the last
in a chain of multiple AccessSteps
external_id: external_id to use for access (if needed).
"""
role_name: str
account_id: Optional[str] = field(default=None)
external_id: Optional[str] = field(default=None)
def __str__(self) -> str:
account = self.account_id if self.account_id else "target"
return f"{self.role_name}@{account}"
def to_dict(self) -> Dict[str, Any]:
"""Generate a dict representation of this AccessStep
Returns:
dict representation of this AccessStep
"""
return {
"role_name": self.role_name,
"external_id": self.external_id,
"account_id": self.account_id,
}
@classmethod
def from_dict(cls: Type["AccessStep"], data: Dict[str, Any]) -> "AccessStep":
"""Create an AccessStep from a dict containing AccessStep data.
Args:
data: AccessStep data dict
Returns:
AccessStep object
Raises:
ValueError if data is not valid
"""
role_name = data.get("role_name")
if role_name is None:
raise ValueError(f"{cls.__name__} missing key 'role_name': {data}")
external_id = data.get("external_id")
if not external_id:
external_id_env_var = data.get("external_id_env_var")
if external_id_env_var is not None:
external_id = os.environ.get(external_id_env_var)
if external_id is None:
raise ValueError(
f"Missing env var '{external_id_env_var}' for {cls.__name__} {data}"
)
account_id = data.get("account_id")
return cls(role_name=role_name, external_id=external_id, account_id=account_id)
class MultiHopAccessor:
"""A MultiHopAccessor contains a list of AccessSteps defining how to gain access to an account.
Args:
role_session_name: role session name to use for session creation.
access_steps: list of AccessSteps defining how to access a final
destination account.
"""
def __init__(self, role_session_name: str, access_steps: List[AccessStep]):
self.role_session_name = role_session_name
if not access_steps:
raise ValueError("One or more access steps must be specified")
for access_step in access_steps[:-1]:
if not access_step.account_id:
raise ValueError(
"Non-final AccessStep of a MultiHopAccessor must specify an account_id"
)
if access_steps[-1].account_id:
raise ValueError(
"The last AccessStep of a MultiHopAccessor must not specify account_id"
)
self.access_steps = access_steps
self.session_cache = SessionCache()
def get_session(self, account_id: str, region: Optional[str] = None) -> boto3.Session:
"""Get a session for an account_id by iterating through the :class:`.AccessStep`s
of this :class:`.MultiHopAccessor`.
Args:
account_id: account to access
region: region to use during session creation.
Returns:
boto3 Session for accessing account_id
"""
logger = Logger()
cws = boto3.Session(region_name=region)
for access_step in self.access_steps:
access_account_id = access_step.account_id if access_step.account_id else account_id
role_name = access_step.role_name
external_id = access_step.external_id
session_cache_value = self.session_cache.get(
account_id=access_account_id,
role_name=role_name,
role_session_name=self.role_session_name,
region=region,
)
if session_cache_value is None:
logger.debug(event=LogEvent.AuthToAccountStart)
sts_client = cws.client("sts")
role_arn = f"arn:aws:iam::{access_account_id}:role/{role_name}"
assume_args = {"RoleArn": role_arn, "RoleSessionName": self.role_session_name}
if external_id:
assume_args["ExternalId"] = external_id
assume_resp = sts_client.assume_role(**assume_args)
creds = assume_resp["Credentials"]
expiration = creds["Expiration"]
cws = boto3.Session(
aws_access_key_id=creds["AccessKeyId"],
aws_secret_access_key=creds["SecretAccessKey"],
aws_session_token=creds["SessionToken"],
region_name=region,
)
self.session_cache.put(
session=cws,
expiration=expiration,
account_id=access_account_id,
role_name=role_name,
role_session_name=self.role_session_name,
region=region,
)
logger.debug(event=LogEvent.AuthToAccountEnd)
else:
cws = session_cache_value.session
return cws
def __str__(self) -> str:
return f'accessor:{self.role_session_name}:{",".join([str(access_step) for access_step in self.access_steps])}'
def to_dict(self) -> Dict[str, Any]:
"""Generate a dict representation of this MultiHopAccessor
Returns:
dict representation of this MultiHopAccessor
"""
return {
"role_session_name": self.role_session_name,
"access_steps": [access_step.to_dict() for access_step in self.access_steps],
}
@classmethod
def from_dict(cls: Type["MultiHopAccessor"], data: Dict[str, Any]) -> "MultiHopAccessor":
"""Build a MultiHopAccessor from a dict representation.
Args:
data: dict of data representing a MultiHopAccessor
Returns:
MultiHopAccessor object
"""
access_step_dicts = data.get("access_steps")
if access_step_dicts is None:
raise ValueError(f"{cls.__name__} missing key 'access_steps': {data}")
access_steps = [
AccessStep.from_dict(access_step_dict) for access_step_dict in access_step_dicts
]
role_session_name = data.get("role_session_name")
if role_session_name is None:
raise ValueError(f"{cls.__name__} missing key 'role_session_name': {data}")
return cls(role_session_name, access_steps)
@dataclass(frozen=True)
class Accessor:
"""An Accessor consists of a list of MultiHopAccessors. It provides a method `get_session`
which will iterate through the MultiHopAccessors until a session can be obtained to
a target account. If an Accessor has no MultiHopAccessors it simply uses the local
session to attempt to access the account. If the session does not match the requested
target account id, ValueError is thrown.
Args:
multi_hop_accessors: List of MultiHopAccessors
"""
multi_hop_accessors: List[MultiHopAccessor] = field(default_factory=list)
def get_session(self, account_id: str, region: Optional[str] = None) -> boto3.Session:
"""Get a boto3 session for a given account.
Args:
account_id: target account id
region: session region
Returns:
boto3.Session object
"""
logger = Logger()
with logger.bind(auth_account_id=account_id):
if self.multi_hop_accessors:
for mha in self.multi_hop_accessors: # pylint: disable=not-an-iterable
with logger.bind(auth_accessor=str(mha)):
try:
session = mha.get_session(account_id=account_id, region=region)
return session
except Exception as ex:
logger.debug(event=LogEvent.AuthToAccountFailure, exception=str(ex))
raise AccountAuthException(f"Unable to access {account_id} using {str(self)}")
# local run mode
session = boto3.Session(region_name=region)
sts_client = session.client("sts")
sts_account_id = sts_client.get_caller_identity()["Account"]
if sts_account_id != account_id:
raise ValueError(f"BUG: sts_account_id {sts_account_id} != {account_id}")
return session
def __str__(self) -> str:
return ", ".join(
[str(mha) for mha in self.multi_hop_accessors] # pylint: disable=not-an-iterable
)
def to_dict(self) -> Dict[str, Any]:
"""Generate a dict representation of this Accessor.
Returns:
dict representation of this Accessor.
"""
mha_dicts = [
mha.to_dict() for mha in self.multi_hop_accessors # pylint: disable=not-an-iterable
]
data = {"accessors": mha_dicts}
return data
@classmethod
def from_dict(cls: Type["Accessor"], data: Dict[str, Any]) -> "Accessor":
"""Create an Accessor from a dict representation.
Args:
data: dict representation of an Accessor
Returns:
Accessor object
"""
multi_hop_accessors: List[MultiHopAccessor] = []
accessor_dicts = data.get("accessors", [])
for accessor_dict in accessor_dicts:
multi_hop_accessor = MultiHopAccessor.from_dict(accessor_dict)
multi_hop_accessors.append(multi_hop_accessor)
return cls(multi_hop_accessors=multi_hop_accessors)
@classmethod
def from_file(cls: Type["Accessor"], filepath: Path) -> "Accessor":
"""Create an Accessor from json content in a file
Args:
filepath: Path to json accessor definition
Returns:
Accessor
"""
with filepath.open("r") as fp:
data = json.load(fp)
return cls.from_dict(data)
| UTF-8 | Python | false | false | 13,614 | py | 45 | accessor.py | 40 | 0.590862 | 0.589687 | 0 | 369 | 35.894309 | 119 |
theMagicalKarp/python-xml-parsing-benchmark | 13,993,003,464,693 | c95b37e324410d534eece2cc55e3e102c7d4bc9b | e7b30e2a05aa8bd440a8b5f5fc0c1e38365dbd9a | /tools/__init__.py | 7c8267e3098ef108200139436aed93e1a5bae75e | [] | no_license | https://github.com/theMagicalKarp/python-xml-parsing-benchmark | 97394cae255ff74c14dc3e052175b44a7288e687 | 5819eedbab0c53bcec1c791f029c1945673c8a01 | refs/heads/master | "2020-12-08T15:38:58.752921" | "2013-03-14T22:33:50" | "2013-03-14T22:33:50" | 8,513,607 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys, profilers, pstats
class ProfileManager(object):
""" Profile Manager
Handles and runs mulitiple profiles
to generates reports.
Attributes:
profiler_list: list of profiler objects compare each run
xml_data: A string representation of our xml file
"""
def __init__(self, profiler_list, xml_file_name):
self.profiler_list = profiler_list
xml_file = open(xml_file_name)
self.xml_data = xml_file.read()
xml_file.close()
def print_stats(self, stats, results, sample_size):
""" Prints stats
This allows for a controlled output format.
Also this should only be only called under the context
of several profiles under the same conditions.
Args:
stats: list of Pstat objects
results: list of result items
sample_size: number of samples ran
"""
num_passed = sum(res.passed for res in results)
print '////////////////////////////////////////////////////////'
print '///// Average number of calls per sample, %s' % (stats.total_calls/(sample_size))
print '///// Average run time per sample, %s seconds' % (stats.total_tt/(sample_size))
print '///// %d out of %d test passed' % (num_passed, sample_size)
print '////////////////////////////////////////////////////////'
stats.sort_stats('time')
stats.print_stats()
def print_current_status(self, to_write):
""" Print current status
Is our standard format for printing and
updating a line on the prompt.
Args:
to_write: a new stirng to replace our current
line with
"""
print to_write,
sys.stdout.flush()
print "\r",
def search_tag_by_attribute(self, tag, attribute, attribute_value,
sample_size = 25):
""" Search tag by attribute
This runs all of our profiles stored in profiler list
and aggergates all of our stats from each run.
This specificly runs the search tag by attribute on each profiler.
Args:
tag: the name of the tag we need to find
attribute: the attribute are we inspecting
attribute_value: the value of our attrubte we
are trying to find
sample_size: the number of samples we would like to collect
"""
results_by_profile = {}
stats_by_profile = {}
expected_result = {'tag':tag, 'attribute_value':attribute_value}
for profiler in self.profiler_list:
print '----- Running Profiler on %s -----' % (profiler.name)
results_by_profile[profiler] = []
num_passed = 0
for sample_num in xrange(sample_size):
self.print_current_status(' Currently profiling sample %s...' % (sample_num+1))
result = profiler.search_tag_by_attribute(self.xml_data,
tag, attribute, attribute_value,
sample_num)
num_passed = num_passed+1 if result.test(expected_result) else num_passed
results_by_profile[profiler].append(result)
if profiler in stats_by_profile:
stats_by_profile[profiler].add(result.profile_result)
else:
stats_by_profile[profiler] = pstats.Stats(result.profile_result)
self.print_stats(stats_by_profile[profiler], results_by_profile[profiler], sample_size)
return stats_by_profile
| UTF-8 | Python | false | false | 3,680 | py | 4 | __init__.py | 3 | 0.555707 | 0.554348 | 0 | 89 | 40.337079 | 99 |
fryslie/eulerbend | 14,456,859,927,456 | 76e53c3563c11936fb522333bce3298fb680f618 | 291e736b4eca07dd55f62a342ed4000b99aa28b5 | /eulerbend_ipkiss.py | 4b30ea76ce47924d81d58b2084423bfe73cc7ddf | [] | no_license | https://github.com/fryslie/eulerbend | 4970fb3aa8bfbffa5b4fd0a5d66a8b997f9025d8 | 00ae6a7968579dacebe267cff9ac90a3f4a4dfa0 | refs/heads/master | "2023-03-17T03:31:28.324571" | "2020-09-24T06:34:09" | "2020-09-24T06:34:09" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Generate an euler bend with IPKISS """
from __future__ import division
import numpy as np
from scipy.special import fresnel
import technologies.silicon_photonics
import ipkiss3.all as i3
def _euler_bend_center_line_shape(R=10, theta=np.pi/2, num_points=1000):
"""
Args:
R (float): minimal bend radius
theta (float): final angle (in radians)
num_points (int): resolution of the shape
"""
L = R * theta # HALF of total length
s = np.linspace(0, L, num_points // 2)
f = np.sqrt(np.pi * R * L) + 1e-18 # for numerical stability
y1, x1 = fresnel(s / f)
# first, rotate by the final angle
x2, y2 = np.dot(
np.array([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]),
np.stack([x1, y1], 0),
)
# then, flip along the x-axis (and reverse direction of the curve):
x2, y2 = -x2[::-1], y2[::-1]
# then translate from (x2[0], y2[0]) to (x1[-1], y1[-1])
x2, y2 = x2 - x2[0] + x1[-1], y2 - y2[0] + y1[-1]
x = f * np.concatenate([x1, x2], 0)
y = f * np.concatenate([y1, y2], 0)
return zip(x, y)
class EulerBend(i3.PCell):
""" An Euler Bend for IPKISS """
_name_prefix = "EULER_BEND"
trace_template = i3.WaveguideTemplateProperty(
doc="trace template for the waveguide"
)
def _default_trace_template(self):
return i3.TECH.PCELLS.WG.DEFAULT
waveguide = i3.ChildCellProperty(doc="waveguide")
def _default_waveguide(self):
return i3.RoundedWaveguide(trace_template=self.trace_template)
class Layout(i3.LayoutView):
""" Layout for the IPKISS Euler Bend """
min_radius = i3.PositiveNumberProperty(doc="minimum Euler bend radius")
def _default_min_radius(self):
if self.end_point is None:
return 10
x, y = self.end_point
# the final angle is twice the angle between start and finish.
theta = 2 * np.arctan2(y, x)
x_final = _euler_bend_center_line_shape(R=1, theta=theta, num_points=4)[-1][0]
return x / x_final
end_angle = i3.PositiveNumberProperty(doc="angle (in degrees) at the end of the Euler bend.")
def _default_end_angle(self):
if self.end_point is None:
return 90
x, y = self.end_point
return 2 * np.arctan2(y, x) * i3.RAD2DEG
num_points = i3.PositiveNumberProperty(
doc="resolution of the euler bend shape.",
default=1000,
)
end_point = i3.Coord2Property(
doc=(
"(optional) coordinates of the end of the Euler bend.\n"
"if given, this will override `min_radius` and `end_angle`."
),
default=None,
)
core_width = i3.PositiveNumberProperty(
doc="default core width of the waveguide",
default=i3.TECH.WG.WIRE_WIDTH,
)
cladding_width = i3.PositiveNumberProperty(
doc="default cladding width of the waveguide",
default=i3.TECH.WG.CLADDING_WIDTH,
)
def _default_trace_template(self):
trace = self.cell.trace_template.get_default_view(i3.LayoutView)
trace.set(
core_width=self.core_width,
cladding_width=self.cladding_width,
)
return trace
def _default_waveguide(self):
waveguide = self.cell.waveguide.get_default_view(i3.LayoutView)
center_line = _euler_bend_center_line_shape(
R=self.min_radius,
theta=self.end_angle * i3.DEG2RAD,
num_points=self.num_points,
)
waveguide.set(
shape=center_line,
trace_template=self.trace_template,
bend_radius=self.min_radius,
)
return waveguide
def _generate_instances(self, insts):
insts += i3.SRef(reference=self.waveguide)
return insts
def _generate_ports(self, ports):
ports += self.waveguide.ports
return ports
if __name__ == "__main__":
cell = EulerBend()
layout = cell.Layout(end_angle=90, core_width=1, min_radius=10)
layout.visualize(annotate=True)
layout.write_gdsii("eulerbend.gds") | UTF-8 | Python | false | false | 4,371 | py | 4 | eulerbend_ipkiss.py | 1 | 0.567376 | 0.545413 | 0 | 130 | 32.630769 | 101 |
simon-pikalov/deep_learning_course_matirial | 8,040,178,800,672 | 2019122ef982ced8b3457f1070649319c7093c85 | 32e5077d41894607daec358998a72c01bc90ce21 | /logistic_regression/ergent_classifier.py | e355cd65608236d3b8da1afa18456a52c4cb3998 | [] | no_license | https://github.com/simon-pikalov/deep_learning_course_matirial | 19889c9496ef464e87ad7054358ebb9cccd4817d | 5b438208745d3e430efb2e389e1ade9ef65811c4 | refs/heads/main | "2023-03-08T09:42:46.647205" | "2021-02-14T06:27:09" | "2021-02-14T06:27:09" | 317,063,942 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import numpy as np
import tensorflow.compat.v1 as tf
import os
import numpy as np
import matplotlib.pyplot as plt
tf.disable_v2_behavior()
tf.disable_eager_execution()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
vocabulary_size = 0 # can use "global" keyword
word2location = {}
data = ["Where are you? I'm trying to reach you for half an hour already, contact me ASAP I need to leave now!",
"I want to go out for lunch, let me know in the next couple of minutes if you would like to join.",
"I was wondering whether you are planning to attend the party we are having next month.",
"I wanted to share my thoughts with you."]
def prepare_vocabulary(data):
idx = 0
for sentence in data:
for word in sentence.split(): # better use nltk.word_tokenize(sentence) and perform some stemming etc.!!!
if word not in word2location:
word2location[word] = idx
idx += 1
return idx
def convert2vec(sentence):
res_vec = np.zeros(vocabulary_size)
for word in sentence.split(): # also here...
if word in word2location:
res_vec[word2location[word]] += 1
return res_vec
def logistic_fun(z):
return 1 / (1.0 + np.exp(-z))
features = vocabulary_size
eps = 1e-12
x = tf.placeholder(tf.float32, [None, features])
y_ = tf.placeholder(tf.float32, [None, 1])
W = tf.Variable(tf.zeros([features,1]))
b = tf.Variable(tf.zeros([1]))
y = 1 / (1.0 + tf.exp(-(tf.matmul(x,W) + b)))
loss1 = -(y_*tf.log(y+eps) + (1-y_) * tf.log( 1 - y + eps))
loss = tf.reduce_mean(loss1)
update = tf.train.GradientDescentOptimizer(0.00001).minimize(loss)
data_x = np.array([convert2vec(data[0]), convert2vec(data[1]), convert2vec(data[2]), convert2vec(data[3])])
data_y = np.array([[1],[1],[0],[0]])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(data_x)
print(data_y)
for i in range(0,10000):
sess.run(update, feed_dict = {x:data_x, y_:data_y}) #BGD
#
test1 = "I need you now! Please answer ASAP!"
test2 = "I wanted to hear your thoughts about my plans."
# pdb.set_trace()
print('Prediction for: "' + test1 + '"',
logistic_fun(np.matmul(np.array([convert2vec(test1)]), sess.run(W)) + sess.run(b))[0][0])
print('Prediction for: "' + test2 + '"',
logistic_fun(np.matmul(np.array([convert2vec(test2)]), sess.run(W)) + sess.run(b))[0][0]) | UTF-8 | Python | false | false | 2,357 | py | 13 | ergent_classifier.py | 12 | 0.651676 | 0.622401 | 0 | 68 | 33.676471 | 114 |
bipashasen/CTC-Transformer-Spech-Recognition | 10,883,447,153,400 | 071d64e0a36de2cd1650eee221c90c70a6a4ecea | e7a4d237203a32ab61d7e922eee457f039bb535e | /create_features_temp.py | a2225748a4a1240d041862101ad19bec06074307 | [] | no_license | https://github.com/bipashasen/CTC-Transformer-Spech-Recognition | cb3e1d34c51037550b8eb96ed878a77a52d1a999 | ee6dc06aac79e0e582488cd4e337f139e5f3e1f8 | refs/heads/master | "2022-12-14T14:49:12.602420" | "2020-09-16T16:36:18" | "2020-09-16T16:36:18" | 296,089,594 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import soundfile as sf
import os
import numpy as np
from numpy import asarray
from numpy import save
from numpy import savez
import sys
folder_path = sys.argv[1]
features_folder_path = sys.argv[2]
featureAudioMapping = sys.argv[3]
feature_size = 3200
def processFeatures():
featureAudioMap = list()
maxFrameLength = -1
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith('.flac'):
# read the audio file and store the features
# in features_folder_path with the same name as audio_file
audioFilePath = root + '/' + file
data, samplerate = sf.read(audioFilePath)
data = np.append(data, [0]*(feature_size-data.shape[0]%feature_size))
data = data.reshape(int(data.shape[0]/feature_size), feature_size)
if data.shape[0] > maxFrameLength:
maxFrameLength = data.shape[0]
maxAudioFile = audioFilePath
featureFilePath = features_folder_path + file.split('.')[0] + '.npz'
savez(featureFilePath, data)
#featureFilePath = features_folder_path + file.split('.')[0] + '.npy'
#save(featureFilePath, data)
# add the mapping to a file
featureAudioMap.append(audioFilePath + ' ' + featureFilePath)
print('Max sequence length : ' + str(maxFrameLength))
print('Max audio file : ' + maxAudioFile)
# save the mapping into a file
with open(featureAudioMapping, 'w') as f:
for line in featureAudioMap:
f.write(line + '\n')
if __name__ == "__main__":
processFeatures()
| UTF-8 | Python | false | false | 1,465 | py | 6 | create_features_temp.py | 5 | 0.692833 | 0.682594 | 0 | 45 | 31.555556 | 73 |
Naive96/AdventOfCode | 9,938,554,348,271 | 1bc85a493d186475606b2bfe4b1a5401947b4f46 | 361c629165244dbabcee15571ff4de1a18d0e6b7 | /test/Day2/TestDay2.py | 20fddc9420b789e70685c11afde793dd3f08817e | [] | no_license | https://github.com/Naive96/AdventOfCode | 17a071e1437c632d3b1fd1a036f6c584c5637f53 | a7cdb18ccb7777f120ef673b164d9dd91ed6910c | refs/heads/main | "2023-02-04T07:39:29.320275" | "2020-12-23T07:16:25" | "2020-12-23T07:16:25" | 323,741,368 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import unittest
from src.Day2 import part1
from src.Day2 import part2
class TestDay2(unittest.TestCase):
def test_part1(self):
pwds = ['1-3 a: abcde',
'1-3 b: cdefg',
'2-9 c: ccccccccc']
result = part1.execute(pwds)
self.assertEqual(result, 2)
def test_part2(self):
pwds = ['1-3 a: abcde',
'1-3 b: cdefg',
'2-9 c: ccccccccc']
result = part2.execute(pwds)
self.assertEqual(result, 1)
| UTF-8 | Python | false | false | 507 | py | 6 | TestDay2.py | 6 | 0.534517 | 0.489152 | 0 | 20 | 24.35 | 36 |
prantoran/prac | 11,321,533,825,796 | bc8b40ebb4f95a4bf32c5c2d6cab76de69a13b3b | 2be0db6b3bde8401095b95b2043202d934713265 | /envp/lib/python3.7/tempfile.py | 2201d99252a7a40153702fb0362e7407baf05063 | [] | no_license | https://github.com/prantoran/prac | cd13f3cf2fed196481dd0c7a1d32407ffcad6aa1 | b0925fcd2cb4c8b27673bfd145aaee7537243114 | refs/heads/master | "2020-06-22T18:40:41.872190" | "2019-07-19T13:23:14" | "2019-07-19T13:23:14" | 197,775,442 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | /home/prantoran/miniconda3/lib/python3.7/tempfile.py | UTF-8 | Python | false | false | 52 | py | 46 | tempfile.py | 43 | 0.846154 | 0.788462 | 0 | 1 | 52 | 52 |
Aeroone/YSTR-CVL | 4,861,902,983,854 | d87d0dba2c0273d3a5872e139686e390537bfbb3 | 773211e16baf1b7a8105f1aa20b0d5c7a370c78c | /model_VisualSemanticEmbedding.py | 5b5c8f4694392b4c0a0ff76016c966dc69eadb10 | [] | no_license | https://github.com/Aeroone/YSTR-CVL | b149ae84e24c12806ea10f5b4150e70bb2cfdf7e | be586f279a4634ee006fb2594902fc9e91f1e17a | refs/heads/master | "2021-08-22T11:31:58.674785" | "2017-11-30T04:17:37" | "2017-11-30T04:17:37" | 109,063,591 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import config
import torchvision.models as models
### use the visual semantic text embedding model here ###
class VisualSemanticEmbedding(nn.Module):
def __init__(self, embed_ndim):
super(VisualSemanticEmbedding, self).__init__()
self.embed_ndim = embed_ndim
# image feature --- use vgg16 ---
if config.use_vgg:
self.img_encoder = models.vgg16(pretrained = True)
for param in self.img_encoder.parameters():
param.requires_grad = False
self.feat_extractor = nn.Sequential(*(self.img_encoder.classifier[i] for i in range(6)))
# turn to the same dimension of the text!
self.W = nn.Linear(4096, embed_ndim, False)
# text feature
self.text_encoder = nn.GRU(embed_ndim, embed_ndim, 1) # GRU(input_size, hidden_size, num_layers)
def forward(self, img, txt):
#####################
### image feature ###
img_feat = self.img_encoder.features(img)
img_feat = img_feat.view(img_feat.size(0), -1)
img_feat = self.feat_extractor(img_feat)
img_feat = self.W(img_feat)
#####################
#####################
### text feature ###
h0 = torch.zeros(1, img.size(0), self.embed_ndim)
h0 = Variable(h0.cuda() if config.is_cuda else h0) # the initial hidden state for each element in the batch
_, txt_feat = self.text_encoder(txt, h0)
txt_feat = txt_feat.squeeze()
#####################
return img_feat, txt_feat | UTF-8 | Python | false | false | 1,649 | py | 24 | model_VisualSemanticEmbedding.py | 15 | 0.582777 | 0.571255 | 0 | 49 | 32.673469 | 115 |
Sunjjjjjj/AAI_simulator_py2 | 15,066,745,319,412 | c6c18fc20a3fb3bf6b5a2c8657f68b54e205b41d | 6adcffdbc4af75c3ccde5bfc638d248241f4bf19 | /MODIS_combine.py | 97dd3d55fdafc7c6c2d8057fb247413a16b2886f | [] | no_license | https://github.com/Sunjjjjjj/AAI_simulator_py2 | fedd9b9050ea8b4c3dc4980ece3e21c08831c1f1 | 4f98c3feae1c27f28397aa8683c8c3af06673cf4 | refs/heads/master | "2021-08-24T02:35:02.428352" | "2017-11-30T14:15:25" | "2017-11-30T14:15:25" | 112,618,641 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Intercomparison among satellite data
@author: sunj
"""
import sys, os
import shutil
import time
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import glob
import tables
from scipy import ndimage
from mpl_toolkits.basemap import Basemap
import seaborn as sns
from matplotlib.colors import ListedColormap
from sklearn.metrics import mean_squared_error
from scipy.optimize import leastsq
from shutil import copyfile
import subprocess
from scipy import optimize
from scipy.interpolate import griddata
from math import sqrt
#from OMAERO import gridOMAERO, OMAERO4cfg
#from MODIS import gridMODIS, MODIS4cfg
from pyhdf.SD import SD, SDC
from scipy.misc import bytescale
#plt.rcParams.update({'xtick.color': 'k','ytick.color':'k','text.color':'k','axes.labelcolor':'k', 'lines.linewidth':2,'font.size':22,'axes.labelsize':22,'axes.titlesize':22,'xtick.labelsize':18,'ytick.labelsize':18,'legend.fontsize':22})
plt.rcParams.update({'xtick.color': 'k','ytick.color':'k','text.color':'k','axes.labelcolor':'k', 'lines.linewidth':2,'font.size':14,'axes.labelsize':14,'axes.titlesize':14,'xtick.labelsize':12,'ytick.labelsize':12,'legend.fontsize':14})
year = 2017
month = 1
days = list(np.arange(26,30,1))
days.remove(28)
day = 26
aerlat = -33.46
aerlon = -70.66
print 'Calculate time zone'
if abs(aerlon)%15 < 7.5:
jetlag = abs(aerlon)//15
else:
jetlag = abs(aerlon)//15+1
if aerlon<0:
jetlag = - jetlag
print jetlag
ROI = [-40,-12.5,-97.5,-70]
crival = 2
res = 0.5
print '**** Reading MODIS %02i-%02i-%4i' % (day, month, year)
moddir = '/nobackup/users/sunj/MODIS/AQUA/MYD021KM/%4i/%02i/%02i/' % (year, month, day)
coordir = '/nobackup/users/sunj/MODIS/AQUA/MYD03/%4i/%02i/%02i/' % (year, month, day)
filelist = glob.glob( moddir + '*.hdf')
coorlist = glob.glob( coordir + '*.hdf')
red = []
modlat = []
modlon = []
plt.figure(figsize=(4.5,4), frameon = False)
for i in range(len(filelist)):
data = SD(filelist[i], SDC.READ)
coor = SD(coorlist[i], SDC.READ)
selected_sds = data.select('EV_250_Aggr1km_RefSB')
selected_sds_attributes = selected_sds.attributes()
for key, value in selected_sds_attributes.iteritems():
if key == 'reflectance_scales':
reflectance_scales_250_Aggr1km_RefSB = np.asarray(value)
if key == 'reflectance_offsets':
reflectance_offsets_250_Aggr1km_RefSB = np.asarray(value)
sds_data_250_Aggr1km_RefSB = selected_sds.get()
selected_sds = data.select('EV_500_Aggr1km_RefSB')
selected_sds_attributes = selected_sds.attributes()
for key, value in selected_sds_attributes.iteritems():
if key == 'reflectance_scales':
reflectance_scales_500_Aggr1km_RefSB = np.asarray(value)
if key == 'reflectance_offsets':
reflectance_offsets_500_Aggr1km_RefSB = np.asarray(value)
sds_data_500_Aggr1km_RefSB = selected_sds.get()
selected_sds = coor.select('Latitude')
myd03_lat = selected_sds.get()
selected_sds = coor.select('Longitude')
myd03_long = selected_sds.get()
data_shape = sds_data_250_Aggr1km_RefSB.shape
along_track = data_shape[1]
cross_trak = data_shape[2]
z = np.zeros((along_track, cross_trak,3))
for i in np.arange(along_track):
for j in np.arange(cross_trak):
z[i,j,0] = ( sds_data_250_Aggr1km_RefSB[0,i,j] - \
reflectance_offsets_250_Aggr1km_RefSB[0] ) * \
reflectance_scales_250_Aggr1km_RefSB[0]
for i in np.arange(along_track):
for j in np.arange(cross_trak):
z[i,j,1] = ( sds_data_500_Aggr1km_RefSB[1,i,j] - \
reflectance_offsets_500_Aggr1km_RefSB[1] ) * \
reflectance_scales_500_Aggr1km_RefSB[1]
for i in np.arange(along_track):
for j in np.arange(cross_trak):
z[i,j,2] = ( sds_data_500_Aggr1km_RefSB[0,i,j] - \
reflectance_offsets_500_Aggr1km_RefSB[0] ) * \
reflectance_scales_500_Aggr1km_RefSB[0]
z[ z > 1 ] = 1.0
z[ z < 0 ] = 0.0
lat_min = myd03_lat[0,0]
lat_max = myd03_lat[along_track-1,cross_trak-1]
lat_0 = lat_min + (lat_max - lat_min) / 2.
long_min = min(myd03_long[0,0],myd03_long[along_track-1,cross_trak-1])
long_max = max(myd03_long[0,0],myd03_long[along_track-1,cross_trak-1])
lon_0 = long_min + (long_max - long_min) / 2.
#----------------------------------------------------------------------------------------#
# Orthographic Map Projection
fig = plt.figure()
ax = fig.add_subplot(111)
ax.patch.set_facecolor((0.75,0.75,0.75))
m1 = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0,resolution=None)
xpt0, ypt0 = m1(lon_0,lat_0)
xpt1, ypt1 = m1(myd03_long[0,0],myd03_lat[0,0])
xpt2, ypt2 = m1(myd03_long[0,cross_trak-1],myd03_lat[0,cross_trak-1])
xpt3, ypt3 = m1(myd03_long[along_track-1,cross_trak-1], \
myd03_lat[along_track-1,cross_trak-1])
xpt4, ypt4 = m1(myd03_long[along_track-1,0],myd03_lat[along_track-1,0])
llx = min(xpt1,xpt2,xpt3,xpt4) - xpt0 # lower left
lly = min(ypt1,ypt2,ypt3,ypt4) - ypt0
urx = max(xpt1,xpt2,xpt3,xpt4) - xpt0 # upper right
ury = max(ypt1,ypt2,ypt3,ypt4) - ypt0
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0,resolution='l',\
llcrnrx=llx,llcrnry=lly,urcrnrx=urx,urcrnry=ury)
x_igrid, y_igrid = m(myd03_long,myd03_lat)
x_igrid = x_igrid - xpt0
y_igrid = y_igrid - ypt0
z_igrid_01 = np.zeros((along_track, cross_trak))
z_igrid_02 = np.zeros((along_track, cross_trak))
z_igrid_03 = np.zeros((along_track, cross_trak))
for i in np.arange(2030):
for j in np.arange(1354):
z_igrid_01[i,j] = z[i,j,0]
z_igrid_02[i,j] = z[i,j,1]
z_igrid_03[i,j] = z[i,j,2]
x1_igrid = x_igrid.ravel()
y1_igrid = y_igrid.ravel()
z_igrid_01 = z_igrid_01.ravel()
z_igrid_02 = z_igrid_02.ravel()
z_igrid_03 = z_igrid_03.ravel()
xy1_igrid = np.vstack((x1_igrid, y1_igrid)).T
xi, yi = np.mgrid[llx:urx:1000j, lly:ury:1000j]
z_01 = griddata(xy1_igrid, z_igrid_01, (xi, yi), method='cubic')
z_02 = griddata(xy1_igrid, z_igrid_02, (xi, yi), method='cubic')
z_03 = griddata(xy1_igrid, z_igrid_03, (xi, yi), method='cubic')
rgb_projected = np.zeros((1000, 1000,3))
for i in np.arange(1000):
for j in np.arange(1000):
rgb_projected[i,j,0] = z_01[i,j]
rgb_projected[i,j,1] = z_02[i,j]
rgb_projected[i,j,2] = z_03[i,j]
#rgb_projected[ z > 1 ] = 1.0
#rgb_projected[ z < 0 ] = 0.0
whereAreNaNs = np.isnan(rgb_projected);
rgb_projected[whereAreNaNs] = 0.75;
img = m.imshow(np.rot90(np.fliplr(rgb_projected)), origin='lower')
m.drawcoastlines()
m.drawparallels(np.arange(-90.,120.,5.), color='k', labels=[True,False,False,False])
m.drawmeridians(np.arange(0.,420.,5.), color='k', labels=[False,False,False,True])
ax.set_xlabel("", fontsize=10)
ax.set_ylabel("", fontsize=10)
| UTF-8 | Python | false | false | 7,291 | py | 35 | MODIS_combine.py | 34 | 0.609519 | 0.549993 | 0 | 228 | 30.947368 | 238 |
vfat0/safe-transaction-service | 996,432,442,799 | eb35a4aa12a59cac12efb1a19c8cb039065df0c3 | 063bb17c8a6ee197f0ef5e228ebb7735287d064a | /safe_transaction_service/history/tests/test_tasks.py | 5a4e36efbeeae64e1b122d727cdd69b86fde5401 | [
"MIT"
] | permissive | https://github.com/vfat0/safe-transaction-service | 55790f5ad4d35e39f563aa66017f31057ecc64ba | a8c48343a5d32971828f1d9c9e9fd113aeaef7fd | refs/heads/master | "2022-12-30T23:06:01.465485" | "2020-10-27T12:24:06" | "2020-10-27T12:24:06" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import logging
from unittest.mock import MagicMock, patch
from django.test import TestCase
from eth_account import Account
from ..tasks import BlockchainRunningTask, BlockchainRunningTaskManager
from .factories import EthereumEventFactory, InternalTxFactory, WebHookFactory
logger = logging.getLogger(__name__)
class TestTasks(TestCase):
@patch('requests.post')
def test_send_webhook_task(self, mock_post: MagicMock):
EthereumEventFactory()
with self.assertRaises(AssertionError):
mock_post.assert_called()
to = Account.create().address
WebHookFactory(address='')
WebHookFactory(address=Account.create().address)
WebHookFactory(address=to)
InternalTxFactory(to=to)
self.assertEqual(mock_post.call_count, 2)
def test_blockchain_running_task(self):
# Test context manager
class A:
def __init__(self, id: str):
self.id = id
BlockchainRunningTaskManager().delete_all_tasks()
a = A('custom-task-id')
b = A('another-task_id')
with BlockchainRunningTask(a) as blockchain_running_task:
self.assertEqual(blockchain_running_task.blockchain_running_task_manager.get_running_tasks(),
[a.id])
with BlockchainRunningTask(b):
self.assertEqual(blockchain_running_task.blockchain_running_task_manager.get_running_tasks(),
[b.id, a.id])
self.assertEqual(BlockchainRunningTaskManager().get_running_tasks(), [])
| UTF-8 | Python | false | false | 1,577 | py | 15 | test_tasks.py | 13 | 0.654407 | 0.653773 | 0 | 45 | 34.044444 | 109 |
profjefer/biological-cells-counter | 7,962,869,382,460 | 339cdf95a0bf01ca4fe1f81ea282cc3ee2a725dc | e3674c95e31ccd76b0d5c0d0674326d17c552584 | /project/tests/dip_test.py | 9471369dbccd52fd8842738b47e1db3180045028 | [] | no_license | https://github.com/profjefer/biological-cells-counter | 2780b66f57d8ed4b6407eb91c294630bea0ce5e3 | cfa0fba93371577e0098434343e00fc68234e9b0 | refs/heads/main | "2023-04-19T13:59:41.327867" | "2021-05-10T17:13:01" | "2021-05-10T17:13:01" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import os
import cv2
from colorama import init
from termcolor import colored
sys.path.append('../')
from dip.segmentation_dip import segmentation
from shared.counter import count_cells, change_cells_to_rgb
def read_file(path_to_file):
f = open(path_to_file, 'r')
files = f.read()
f.close()
return files.split('\n')
def main():
init()
effectiveness = []
number_of_tests = 0
path_to_file = sys.argv[1] if len(sys.argv) > 1 else exit("Missing argument: path_to_file")
if os.path.exists(path_to_file) is False:
exit("File not found: " + path_to_file)
files = read_file(path_to_file)
for file in files:
path_to_image = '../../train_set/' + file + '/images/' + file + '.png'
print(f'Path to file: {path_to_image}')
if os.path.exists(path_to_image) is False:
print(colored(f'File not found', 'red'))
continue
try:
image = cv2.imread(path_to_image, cv2.CV_8UC1)
prepared_image = segmentation(image)
prepared_image = prepared_image.tolist()
image, cells = count_cells(prepared_image)
number_of_cells = len(cells)
path_to_mask = '../../train_set/' + file + '/masks/'
number_of_masks = len(os.listdir(path_to_mask))
indicator = number_of_cells/number_of_masks
effectiveness.append(indicator) if indicator <= 1 else 1
print(colored(f'Effectiveness = {indicator}', 'green'))
print(colored('TEST PASS', 'green'))
number_of_tests += 1
except Exception as e:
print(colored(e, 'red'))
print(colored('TEST FAIL', 'red'))
avg = sum(effectiveness)/len(effectiveness) if len(effectiveness) > 0 else 0
print()
print('SUMMARY')
print(f'Succeeded tests: {number_of_tests/len(files)}')
print(f'Algorithm average: {avg}')
if __name__ == "__main__":
main()
| UTF-8 | Python | false | false | 1,960 | py | 12 | dip_test.py | 4 | 0.591837 | 0.585204 | 0 | 61 | 31.131148 | 95 |
xusl/android-wilhelm | 10,943,576,683,610 | fc5a8f09060324dfdf040ff7b1ac63e4db20de9f | ebd585fdafb22d31fcd6d04b209f81e88d1a2083 | /modem_proc/core/products/build/SConscript | b31ea370704e024ae230bab6d5b180c6ed858686 | [
"Apache-2.0"
] | permissive | https://github.com/xusl/android-wilhelm | fd28144253cd9d7de0646f96ff27a1f4a9bec6e3 | 13c59cb5b0913252fe0b5c672d8dc2bf938bb720 | refs/heads/master | "2018-04-03T21:46:10.195170" | "2017-04-20T10:17:49" | "2017-04-20T10:17:49" | 88,847,837 | 11 | 10 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Title: Sconscript
# License: License
# Copyright 2010 Qualcomm Inc
# Description: General Description
# Sconscript for CoreBSP Products CMM scripts
# Team: Functional Area
# CoreBSP Products 8960
# Target: Target
# MSM8960
# Author: Author
# $Author: rvennam $
# Location: Perforce Revision
# $Header: //source/qcom/qct/core/products/labtools/legacy/8960/Modem/core/products/build/SConscript $
# Edits: Edits
# Created for MSM8960 on 3/8/2011
# ------------------------------------------------------------------------------------------
import datetime
import time
# Class for a CMM script to be included in the dialog
class CMMScript:
def __init__(self, scriptname, altname, path, team, numargs):
self.scriptname = scriptname
self.altname = altname
self.path = path
self.team = team
self.numargs = numargs
self.argmapping = {}
# T32 dialog file class
class T32Dialog:
def __init__(self):
self.height = 0
self.width = 0
self.title = "Nothing"
self.textheight = 0
self.textwidth = 0
self.editheight = 0
self.editwidth = 0
self.chooseboxwidth = 0
self.chooseboxheight = 0
# Function that writes the dialogfile
def writedialogfile():
dialogfile = open(T32_Dialog,'w')
# Throw in all the comments for the cmm file
dialogfile.write('//\t Title: std_dialogauto.dlg')
dialogfile.write('\n\n//\t License: License')
dialogfile.write('\n//\t Copyright 2010 Qualcomm Inc ')
dialogfile.write('\n\n//\t Description: General Description')
dialogfile.write('\n//\t This T32 dialog presents user with options to collect logs.')
dialogfile.write('\n\n//\t Input: Inputs')
dialogfile.write('\n//\t None')
dialogfile.write('\n\n//\t Output: Outputs')
dialogfile.write('\n//\t None')
dialogfile.write('\n\n//\t Usage: Usage')
dialogfile.write('\n//\t Not meant for standalone usage')
dialogfile.write('\n\n//\t Team: Functional Area')
dialogfile.write('\n//\t CoreBSP Products 8660')
dialogfile.write('\n\n//\t Target: Target')
dialogfile.write('\n//\t MSM8660')
dialogfile.write('\n\n//\t Author: Author')
dialogfile.write('\n//\t $Author: amcheriy $')
dialogfile.write('\n\n//\t Location: Perforce Revision')
dialogfile.write('\n//\t $Header: //depot/asic/msm8660/temp_dev/AMSS/products/8660/build/ms/std_buildconfig $')
dialogfile.write('\n\n//\t Edits: Edits')
dialogfile.write('\n//\t Autogenerated on '+datetime.datetime.now().strftime("%A %d/%m/%Y %H:%M:%S"))
dialogfile.write('\n// ---------------------------------------------------------------------------------')
dialogfile.write("\n \n \n")
dialogfile.write('HEADER \"%s\"' % (window.title))
dialogfile.write('\n \n')
dialogfile.write('pos 0. 0. %s. %s.' % (window.width, window.height))
dialogfile.write('\n')
dialogfile.write('BOX \"Select Log Type\"')
dialogfile.write('\n \n')
# Scan the list
for i in range(0,len(scripts)):
# For each script, create an edit box corresponding to the number of the arguments
# Position the edit boxes like a 2D matrix
# Labels created for the edit boxes have the format : EDIT(script number)(argument number) - starting from 0
for argcount in range(0,int(scripts[i].numargs)):
dialogfile.write('pos %s. %s %s. %s.' % (str(window.chooseboxwidth + 2 + (window.editwidth + 2) * argcount), i*2 + 2.5 , window.editwidth, window.editheight))
dialogfile.write('\n')
dialogfile.write('EDIT%s%s: EDIT \"\" \"\"' % (str(i), str(argcount)))
dialogfile.write('\n')
dialogfile.write('\n \n')
for i in range(0,len(scripts)):
# For each script, create an edit box corresponding to the number of the arguments
# Position the edit boxes like a 2D matrix
# Labels created for the edit boxes have the format : EDIT(script number)(argument number) - starting from 0
for argcount in range(0,int(scripts[i].numargs)):
dialogfile.write('pos %s. %s %s. %s.' % (str(window.chooseboxwidth + 2 + (window.editwidth + 2) * argcount), i*2 + 1.5 , window.editwidth, window.editheight))
dialogfile.write('\n')
dialogfile.write('TEXT%s%s: TEXT \"%s\"' % (str(i), str(argcount), scripts[i].argmapping[argcount+1]))
dialogfile.write('\n')
dialogfile.write('\n \n')
# Create the choosebox
dialogfile.write('pos 1. 2. %s. %s.' % (window.chooseboxwidth, window.chooseboxheight))
dialogfile.write('\n')
# Scan the list
for i in range(0,len(scripts)):
argstring =""
for argcount in range(0,int(scripts[i].numargs)):
argstring = argstring + " &arg" + str(argcount)
dialogfile.write('LOGTYPE.%s: CHOOSEBOX \"%s\"' %(scripts[i].altname.replace(' ',''), scripts[i].altname))
dialogfile.write('\n')
dialogfile.write('(&')
dialogfile.write('\n \t')
# On selecting an item in the choosebox, you need to pick up the string from the corresponding edit box created above
for argcount in range(0,int(scripts[i].numargs)):
dialogfile.write('&arg%s=DIALOG.STRING(EDIT%s%s)' % ( str(argcount), str(i), str(argcount)))
dialogfile.write('\n \t')
dialogfile.write('do %s/%s %s' % (scripts[i].path, scripts[i].scriptname, argstring))
dialogfile.write('\n')
dialogfile.write(')')
dialogfile.write('\n')
def writemodemconfigfile():
# Create the file from scratch every single time
build_config_file = open(CMM_Build_Config,'w')
# Throw in all the comments for the cmm file
build_config_file.write('//\t Title: std_buildconfig')
build_config_file.write('\n\n//\t License: License')
build_config_file.write('\n//\t Copyright 2010 Qualcomm Inc ')
build_config_file.write('\n\n//\t Description: General Description')
build_config_file.write('\n//\t This script contains build information.')
build_config_file.write('\n\n//\t Input: Inputs')
build_config_file.write('\n//\t None')
build_config_file.write('\n\n//\t Output: Outputs')
build_config_file.write('\n//\t None')
build_config_file.write('\n\n//\t Usage: Usage')
build_config_file.write('\n//\t do std_buildconfig')
build_config_file.write('\n\n//\t Team: Functional Area')
build_config_file.write('\n//\t CoreBSP Products 8960')
build_config_file.write('\n\n//\t Target: Target')
build_config_file.write('\n//\t MSM8960')
build_config_file.write('\n\n//\t Author: Author')
build_config_file.write('\n//\t $Author: rvennam $')
build_config_file.write('\n\n//\t Location: Perforce Revision')
build_config_file.write('\n//\t $Header: //depot/asic/msm8960/temp_dev/AMSS/products/8660/build/ms/std_buildconfig $')
build_config_file.write('\n\n//\t Edits: Edits')
build_config_file.write('\n//\t Autogenerated on '+datetime.datetime.now().strftime("%A %d/%m/%Y %H:%M:%S"))
build_config_file.write('\n// ---------------------------------------------------------------------------------')
build_config_file.write('\n \n \n \n')
# Declare your GLOBAL variables here
build_config_file.write('\n GLOBAL &MODEM_BUILDID')
build_config_file.write('\n GLOBAL &MODEM_BUILDIDM')
build_config_file.write('\n GLOBAL &CHIPSET')
build_config_file.write('\n GLOBAL &MODEM_BUILDMSDIR')
build_config_file.write('\n GLOBAL &MODEM_MBNDIR')
build_config_file.write('\n GLOBAL &MODEM_ELFFILE')
build_config_file.write('\n GLOBAL &MODEM_TIMESTAMP')
build_config_file.write('\n GLOBAL &BUILDID')
build_config_file.write('\n GLOBAL &BUILDIDM')
build_config_file.write('\n GLOBAL &CHIPSET')
build_config_file.write('\n GLOBAL &BUILDROOT')
build_config_file.write('\n GLOBAL &COREDIR')
build_config_file.write('\n GLOBAL &PRODUCTSDIR')
build_config_file.write('\n GLOBAL &BUILDMSDIR')
build_config_file.write('\n GLOBAL &MBNDIR')
build_config_file.write('\n GLOBAL &ELFFILE')
build_config_file.write('\n GLOBAL &TIMESTAMP')
build_config_file.write('\n // GLOBAL &BOOTLOADERFLAVOR')
# Format it well
build_config_file.write('\n \n')
build_config_file.write('\n &MODEM_BUILDID='+buildid)
build_config_file.write('\n &MODEM_BUILDIDM='+env.Dump('BUILD_ID').replace('\'','\"'))
build_config_file.write('\n &CHIPSET=\"MSM'+chipset+'\"')
build_config_file.write('\n &MODEM_BUILDMSDIR='+'os.ppd()+'+'\"/../../build/ms\"')
build_config_file.write('\n &MODEM_MBNDIR=\"&BUILDMSDIR\"+'+'\"/bin/&BUILDID\"')
build_config_file.write('\n &MODEM_ELFFILE=\"M'+elfname+'\"')
build_config_file.write('\n &MODEM_TIMESTAMP=\"'+str(int(time.time()+0.5))+'\"')
build_config_file.write('\n &BUILDID='+buildid)
build_config_file.write('\n &BUILDIDM='+env.Dump('BUILD_ID').replace('\'','\"'))
build_config_file.write('\n &CHIPSET=\"MSM'+chipset+'\"')
build_config_file.write('\n &PRODUCTSDIR='+'os.ppd()')
build_config_file.write('\n &BUILDROOT='+'os.ppd()+'+'\"/../..\"')
build_config_file.write('\n &COREDIR='+'os.ppd()+'+'\"/..\"')
build_config_file.write('\n &BUILDMSDIR='+'os.ppd()+'+'\"/../../build/ms\"')
build_config_file.write('\n &MBNDIR=\"&BUILDMSDIR\"+'+'\"/bin/&BUILDID\"')
build_config_file.write('\n &ELFFILE=\"M'+elfname+'\"')
build_config_file.write('\n &TIMESTAMP=\"'+str(int(time.time()+0.5))+'\"')
build_config_file.write('\n // &BOOTLOADERFLAVOR=\"'+ bootloaderflavor + '\"')
# Now be a good sport and end the cmm file
build_config_file.write('\n \n')
build_config_file.write('ENDDO')
build_config_file.close()
def writebootconfigfile():
# Create the file from scratch every single time
build_config_file = open(CMM_Build_Config,'w')
# Throw in all the comments for the cmm file
build_config_file.write('//\t Title: std_buildconfig')
build_config_file.write('\n\n//\t License: License')
build_config_file.write('\n//\t Copyright 2010 Qualcomm Inc ')
build_config_file.write('\n\n//\t Description: General Description')
build_config_file.write('\n//\t This script contains build information.')
build_config_file.write('\n\n//\t Input: Inputs')
build_config_file.write('\n//\t None')
build_config_file.write('\n\n//\t Output: Outputs')
build_config_file.write('\n//\t None')
build_config_file.write('\n\n//\t Usage: Usage')
build_config_file.write('\n//\t do std_buildconfig')
build_config_file.write('\n\n//\t Team: Functional Area')
build_config_file.write('\n//\t CoreBSP Products 8960')
build_config_file.write('\n\n//\t Target: Target')
build_config_file.write('\n//\t MSM8660')
build_config_file.write('\n\n//\t Author: Author')
build_config_file.write('\n//\t $Author: rvennam $')
build_config_file.write('\n\n//\t Location: Perforce Revision')
build_config_file.write('\n//\t $Header: //depot/asic/msm8960/temp_dev/AMSS/products/8660/build/ms/std_buildconfig $')
build_config_file.write('\n\n//\t Edits: Edits')
build_config_file.write('\n//\t Autogenerated on '+datetime.datetime.now().strftime("%A %d/%m/%Y %H:%M:%S"))
build_config_file.write('\n// ---------------------------------------------------------------------------------')
build_config_file.write('\n \n \n \n')
# Declare your GLOBAL variables here
build_config_file.write('\n GLOBAL &BOOT_BUILDID')
build_config_file.write('\n GLOBAL &BOOT_BUILDIDM')
build_config_file.write('\n GLOBAL &BOOT_MBNDIR')
build_config_file.write('\n GLOBAL &BOOT_TIMESTAMP')
# Format it well
build_config_file.write('\n \n')
build_config_file.write('\n &BOOT_BUILDID='+buildid)
build_config_file.write('\n &BOOT_BUILDIDM='+env.Dump('BUILD_ID').replace('\'','\"'))
build_config_file.write('\n &BOOT_MBNDIR=\"&BUILDMSDIR\"+'+'\"/bin/&BUILDID\"')
build_config_file.write('\n &BOOT_TIMESTAMP=\"'+str(int(time.time()+0.5))+'\"')
# Now be a good sport and end the cmm file
build_config_file.write('\n \n')
build_config_file.write('ENDDO')
build_config_file.close()
# Technically, the sconscript starts here
Import('env')
env = env.Clone()
if env.has_key('QDSP6_PROC'):
# CMM Source Path
print "COREBSP Products SConscript \n"
CMM_source = env.subst('$COREBSP_ROOT')+'/products'
CMM_Build_Config = CMM_source + '/std_buildconfig.cmm'
T32_Dialog = CMM_source + '/std_dialogauto.dlg'
env.VariantDir('${BUILDPATH}',CMM_source, duplicate=0)
# Do all the processing here. Write the file separately
# Build ID has an M appended to indicate modem build
buildidm = env.Dump('BUILD_ID').replace('\'','\"')
# Remove the M
buildid = buildidm.replace('M\"','\"')
# Replace the quotes from the output of env.Dump
chipset = str(env.Dump('MSM_ID')).replace('\'','')
asicid = str(env.Dump('BUILD_ASIC')).replace('\'','')
buildidstr = str(env.Dump('BUILD_ID')).replace('\'','')
buildver = str(env.Dump('BUILD_VER')).replace('\'','')
# ELF name combines the three together
elfname=asicid + buildidstr + buildver
# Bootloader flavor is hardcoded in the build, provided here
bootloaderflavor = 'AAABQNBG'
# Script is the list of scripts to be included in the dialog
scripts = []
# Delcare the scripts here one after the other
# Fields : Scriptname, Alternate Name (appears in the dialog), Location (with respect
# to CORE directory or BUILD\MS directory, Team responsible for it, Number of arguments it
# takes and the mapping of those arguments
script1=CMMScript("ULogDump","ULOG Log","&COREDIR/power/ulog/scripts","Power", "1")
script1.argmapping = { 1 : 'Log Path'}
script2=CMMScript("NPADump","NPA Log","&COREDIR/power/npa/scripts","Power","1")
script2.argmapping = { 1: 'Log Path'}
script3=CMMScript("testclocks_8660","Test Clock ","&BUILDMSDIR","Systemdrivers","1")
script3.argmapping = { 1: 'Clock Name'}
# Add each one to the list
scripts = [script1, script2, script3]
# Set parameters for the Dialog box here
window = T32Dialog()
window.height = len(scripts)*5
window.width = (len(scripts) + 3) * 8
window.title = "Collect Logs"
window.textheight = 1
window.textwidth = 5
window.editheight = 1
window.editwidth = 10
window.chooseboxwidth = 10
window.chooseboxheight = 2
#print scripts[0].argmapping[1]
# Write the config file
writemodemconfigfile()
# Write the dialog file
writedialogfile()
if env.has_key('BUILD_BOOT_CHAIN'):
CMM_source = env.subst('$COREBSP_ROOT')+'/products'
CMM_Build_Config = CMM_source + '/std_buildconfig.cmm'
env.VariantDir('${BUILDPATH}',CMM_source, duplicate=0)
# Do all the processing here. Write the file separately
# Build ID has an M appended to indicate modem build
buildidm = env.Dump('BUILD_ID').replace('\'','\"')
# Remove the M
buildid = buildidm.replace('M\"','\"')
# Write the boot config file
writebootconfigfile()
| UTF-8 | Python | false | false | 14,250 | 13,006 | SConscript | 1,741 | 0.670386 | 0.658246 | 0 | 329 | 42.31003 | 161 |
|
murielsilveira/django13-agenda | 17,093,969,875,472 | f868aa8062846f61e49fe145162a911adc2fbe48 | f31b053d591d5c8c536be873a8627773a0211579 | /sistema/views.py | 0a18c82de7a22088111de55d84259dc5920b171e | [
"Unlicense"
] | permissive | https://github.com/murielsilveira/django13-agenda | 89ebec2eba67ef8886be6cfe38b9fcfa61157929 | 027a5b495cfab6a3fe832e86ac5cf4a90ee5b9fc | refs/heads/master | "2020-04-06T04:48:16.837336" | "2014-09-15T15:04:16" | "2014-09-15T15:04:16" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404, redirect
def sistema(request):
"""
Define para onde o usuário será encaminhado
"""
#logado = True
#if logado:
#return redirect(login)
return redirect(menu)
def menu(request):
"""
Exibe o menu do sistema, a página principal
"""
return render_to_response('sistema/logado.html', context_instance=RequestContext(request))
def login(request):
"""
Exibe a tela de login do sistema
"""
return render_to_response('sistema/login.html', context_instance=RequestContext(request))
| UTF-8 | Python | false | false | 692 | py | 9 | views.py | 5 | 0.683599 | 0.677794 | 0 | 28 | 23.607143 | 94 |
cms-sw/cmssw | 12,412,455,512,065 | bf8127530a6fe2d0482867209de1ee81743afd79 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /DPGAnalysis/HcalNanoAOD/python/hcalRecHitTable_cff.py | 5478e4c8261c6310bc4c8aa2b1b175e15058581f | [
"Apache-2.0"
] | permissive | https://github.com/cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | "2023-08-23T21:57:42.491143" | "2023-08-22T20:22:40" | "2023-08-22T20:22:40" | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | false | "2023-09-14T19:14:28" | "2013-06-26T14:09:07" | "2023-09-09T18:47:07" | "2023-09-14T19:14:27" | 1,330,249 | 980 | 4,104 | 807 | C++ | false | false | import FWCore.ParameterSet.Config as cms
from PhysicsTools.NanoAOD.common_cff import Var,P3Vars
hbheRecHitTable = cms.EDProducer("HBHERecHitFlatTableProducer",
src = cms.InputTag("hbhereco"),
cut = cms.string(""),
name = cms.string("RecHitHBHE"),
doc = cms.string("HCAL barrel and endcap rec hits"),
singleton = cms.bool(False), # the number of entries is variable
extension = cms.bool(False), # this is the main table for the object
variables = cms.PSet(
detId = Var('detid().rawId()', 'int', precision=-1, doc='detId'),
energy = Var('energy', 'float', precision=14, doc='energy'),
time = Var('time', 'float', precision=14, doc='hit time'),
ieta = Var('id().ieta()', 'int', precision=-1, doc='ieta'),
iphi = Var('id().iphi()', 'int', precision=-1, doc='iphi'),
depth = Var('id().depth()', 'int', precision=-1, doc='depth')
)
)
hfRecHitTable = cms.EDProducer("HFRecHitFlatTableProducer",
src = cms.InputTag("hfreco"),
cut = cms.string(""),
name = cms.string("RecHitHF"),
doc = cms.string("HCAL forward (HF) rec hits"),
singleton = cms.bool(False), # the number of entries is variable
extension = cms.bool(False), # this is the main table for the object
variables = cms.PSet(
detId = Var('detid().rawId()', 'int', precision=-1, doc='detId'),
energy = Var('energy', 'float', precision=14, doc='energy'),
time = Var('time', 'float', precision=14, doc='hit time'),
ieta = Var('id().ieta()', 'int', precision=-1, doc='ieta'),
iphi = Var('id().iphi()', 'int', precision=-1, doc='iphi'),
depth = Var('id().depth()', 'int', precision=-1, doc='depth')
)
)
hoRecHitTable = cms.EDProducer("HORecHitFlatTableProducer",
src = cms.InputTag("horeco"),
cut = cms.string(""),
name = cms.string("RecHitHO"),
doc = cms.string("HCAL outer (HO) rec hits"),
singleton = cms.bool(False), # the number of entries is variable
extension = cms.bool(False), # this is the main table for the object
variables = cms.PSet(
detId = Var('detid().rawId()', 'int', precision=-1, doc='detId'),
energy = Var('energy', 'float', precision=14, doc='energy'),
time = Var('time', 'float', precision=14, doc='hit time'),
ieta = Var('id().ieta()', 'int', precision=-1, doc='ieta'),
iphi = Var('id().iphi()', 'int', precision=-1, doc='iphi'),
depth = Var('id().depth()', 'int', precision=-1, doc='depth')
)
)
hcalRecHitTableSeq = cms.Sequence(
hbheRecHitTable
+ hfRecHitTable
+ hoRecHitTable
)
hcalRecHitTableTask = cms.Task(
hbheRecHitTable,
hfRecHitTable,
hoRecHitTable,
)
| UTF-8 | Python | false | false | 3,047 | py | 46,375 | hcalRecHitTable_cff.py | 40,422 | 0.538563 | 0.530358 | 0 | 65 | 45.876923 | 86 |
vivekaxl/LexisNexis | 11,304,353,941,326 | a7902bfc34e0b39bc4dbfdfaeac37cd542356e69 | 1a2ca64839723ede3134a0781128b0dc0b5f6ab8 | /ExtractFeatures/Data/rahul/deTuner.py | 34024d1fd48909f828b44ce6c63365dde514dff0 | [] | no_license | https://github.com/vivekaxl/LexisNexis | bc8ee0b92ae95a200c41bd077082212243ee248c | 5fa3a818c3d41bd9c3eb25122e1d376c8910269c | refs/heads/master | "2021-01-13T01:44:41.814348" | "2015-07-08T15:42:35" | "2015-07-08T15:42:35" | 29,705,371 | 9 | 3 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys, os
sys.path.append(os.environ['HOME'] + '/git/axe/axe')
sys.path.insert(0, os.getcwd() + '/_imports');
from demos import *
import sk; # @UnresolvedImport
from dectree import *
from settings import *
from settingsWhere import *
from pdb import set_trace
from abcd import _Abcd
from Prediction import rforest, CART, Bugs
from methods1 import createTbl
from random import uniform as rand, randint as randi, choice as any
tree = treeings()
# set_trace()
def say(l):
sys.stdout.write(str(l))
def settings(**d): return o(
name = "Differention Evolution",
what = "DE tuner. Tune the predictor parameters parameters",
author = "Rahul Krishna",
adaptation = "https://github.com/ai-se/Rahul/blob/master/DEADANT/deadant.py",
copyleft = "(c) 2014, MIT license, http://goo.gl/3UYBp",
seed = 1,
np = 10,
k = 100,
tiny = 0.01,
de = o(np = 5,
iter = 5,
epsilon = 1.01,
N = 20,
f = 0.3,
cf = 0.4,
lives = 100)
).update(**d)
The = settings()
class diffEvol(object):
"""
Differential Evolution
"""
def __init__(self, model, data):
self.frontier = []
self.model = model(data)
def new(self):
# Creates a new random instance
return [randi(d[0], d[1]) for d in self.model.indep()]
def initFront(self, N):
# Initialize frontier
for _ in xrange(N):
self.frontier.append(self.new())
def extrapolate(self, l2, l3, l4):
return [max(d[0],
min(d[1], int(a + The.de.f * (b - c)))) for a,
b, c, d in zip(l2, l3, l4, self.model.indep())]
def one234(self, one, pop, f = lambda x:id(x)):
def oneOther():
x = any(pop)
while f(x) in seen:
x = any(pop)
seen.append(f(x))
return x
seen = [ f(one) ]
return oneOther(), oneOther(), oneOther()
def dominates(self, one, two):
# set_trace()
return self.model.depen(one) > self.model.depen(two)
def DE(self):
self.initFront(The.de.N)
lives = The.de.lives
while lives > 0:
better = False
for pos, l1 in enumerate(self.frontier):
lives -= 1
l2, l3, l4 = self.one234(l1, self.frontier)
new = self.extrapolate(l2, l3, l4)
if self.dominates(new, l1):
self.frontier.pop(pos)
self.frontier.insert(pos, new)
better = True
elif self.dominates(l1, new):
better = False
else:
self.frontier.append(new)
better = True
if better:
lives += 1
return self.frontier
class tuneRF(object):
# Tune RF
def __init__(self, data):
self.data = data
self.train = createTbl(data[:-1])
self.test = createTbl([data[-1]])
# set_trace()
def depen(self, rows):
mod = rforest(self.train, self.test
, tunings = rows # n_est, max_feat, mss, msl
, smoteit = True)
g = _Abcd(before = Bugs(self.test), after = mod, show = False)[-1]
return g
def indep(self):
return [(10, 1e3) # n_estimators
, (1, 100) # max_features
, (1, 10) # min_samples_leaf
, (2, 10) # min_samples_split
]
class tuneCART(object):
# Tune CART
def __init__(self, data):
self.data = data
self.train = createTbl(data[:-1])
self.test = createTbl([data[-1]])
def depen(self, rows):
mod = CART(self.train, self.test
, tunings = rows
, smoteit = True)
g = _Abcd(before = Bugs(self.test), after = mod, show = False)[-1]
return g
def indep(self):
return [(1, 50) # max_depth
, (2, 20) # min_samples_split
, (1, 20) # min_samples_leaf
, (1, 100) # max features
, (2, 1e3)] # max_leaf_nodes
def _test(data):
m = tuneRF(data)
vals = [(m.any()) for _ in range(10)]
vals1 = [m.score(v) for v in vals]
print(vals, vals1)
def _de(model, data):
"DE"
DE = diffEvol(model, data);
# set_trace()
res = sorted([k for k in DE.DE()],
key = lambda F: F[-1])[-1]
return res
def tuner(model, data):
if model == rforest:
return _de(tuneRF, data)
elif model == CART:
return _de(tuneCART, data)
if __name__ == '__main__':
from timeit import time
data = explore(dir = '../Data/')[0][-1] # Only training data to tune.
for m in [tuneRF, tuneCART]:
t = time.time()
mdl = m(data)
# _test(data)
tunings = _de(m, data)
print tunings
print mdl.depen(tunings)
print time.time() - t
# print _de()
# print main()
# import sk; xtile = sk.xtile
# print xtile(G)
# main(dir = 'Data/')
| UTF-8 | Python | false | false | 4,580 | py | 936 | deTuner.py | 924 | 0.566376 | 0.543886 | 0 | 179 | 24.586592 | 79 |
mwesterhof/wapsandbox | 13,503,377,211,701 | 953f2b3e58c784aa4d13e29263bcc27d08187cfb | b341d7630b19a8a65eefb985e102cffef6b64c09 | /products/urls.py | 64f80270e00aa50577b41ef85c3b211c4492b2d2 | [] | no_license | https://github.com/mwesterhof/wapsandbox | e8f05ecfcfe9cfe9435653fb2a337b2ce62edb0a | 17e476de45527993fec4b61f92a62558864fe310 | refs/heads/master | "2020-03-24T17:55:04.090229" | "2018-09-07T13:54:49" | "2018-09-07T13:54:49" | 142,876,457 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.urls import path
from .views import OrderProduct, ProductList, ProductDetail
urlpatterns = [
path('', ProductList.as_view(), name='product_list'),
path('<int:pk>/', ProductDetail.as_view(), name='product_detail'),
path('<int:pk>/order/', OrderProduct.as_view(), name='product_order')
]
| UTF-8 | Python | false | false | 313 | py | 49 | urls.py | 34 | 0.686901 | 0.686901 | 0 | 10 | 30.3 | 73 |
kevinholst/politifact_tools | 2,061,584,341,162 | 5bf64a107c94c8582a3b2b1cdd718744abce045a | 06b08b237a34b80d8cfafe07b10c38aa8838983d | /politifact_score.py | 07ef006da6b7ab82e2a98dcafed8451c2c5ad694 | [] | no_license | https://github.com/kevinholst/politifact_tools | a4c8a5ac727803c1490a59fd6e6c8ab1aa922a5c | 0c442f47faadba71460b3ad8e96a0cec564c1105 | refs/heads/master | "2021-01-10T08:22:44.507486" | "2015-12-21T04:57:29" | "2015-12-21T04:57:29" | 48,351,460 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import division, print_function
from bs4 import BeautifulSoup
import requests
import numpy as np
BASE = 'http://www.politifact.com/personalities/'
SCORING = np.array([2, 1, 0, -1, -2, -3])
def get_truthiness_score(name):
page = requests.get(BASE + name)
soup = BeautifulSoup(page.content)
values = np.zeros(6, dtype='int')
rulings = soup.find_all('span', {'class': 'chartlist__count'})
for i, ruling in enumerate(rulings):
values[i] = int(ruling.contents[0].split()[0])
number_of_rulings = values.sum()
values *= SCORING
score = values.sum()/number_of_rulings
return score
| UTF-8 | Python | false | false | 666 | py | 2 | politifact_score.py | 1 | 0.636637 | 0.621622 | 0 | 26 | 24.615385 | 66 |
romanitalian/online_school_bread_blog | 15,436,112,480,636 | 65799ca15000a1965cce90985de352d1aff15a3f | f415e4adfaa3a3e243022744e34422b3315f7f37 | /notes/01/dec/main_1.py | 2bee488c99db1b49dfdf9eb1cad376792560cb8b | [] | no_license | https://github.com/romanitalian/online_school_bread_blog | 706d52a96a45ce40d983b9fe66523e7afd07455d | 19590400fafd37d225761e309e709d5004da0fb2 | refs/heads/main | "2023-06-23T18:20:26.318345" | "2021-07-23T09:05:48" | "2021-07-23T09:05:48" | 388,740,479 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from datetime import datetime
def counter_decorator(func):
cnt = 0
def wrapper():
nonlocal cnt
cnt += 1
print(cnt)
res = func()
return res
return wrapper
@counter_decorator
def foo():
print(datetime.now())
foo()
foo()
foo()
| UTF-8 | Python | false | false | 289 | py | 84 | main_1.py | 64 | 0.560554 | 0.553633 | 0 | 25 | 10.56 | 29 |
javier123454321/complexity | 9,345,848,869,091 | 724fd46bf9894c5db93bfdc42bf98f68c9e4aacb | 3726d860eb6b1fe051126179084283b58bf5a121 | /fractals1/fractals2.py | a75e00036a3a6278f80d277b4622712a61eb46ac | [] | no_license | https://github.com/javier123454321/complexity | 538304e6b25d6cd68dab6fead7a92de4d0c1a930 | 3b5fd2e54a845910bbd30eea90c58c4ffc5f7945 | refs/heads/master | "2020-05-05T06:02:25.424420" | "2019-07-23T15:10:18" | "2019-07-23T15:10:18" | 179,772,902 | 0 | 0 | null | false | "2019-04-06T00:49:05" | "2019-04-06T00:43:36" | "2019-04-06T00:45:10" | "2019-04-06T00:49:04" | 0 | 0 | 0 | 0 | Python | false | false | import rhinoscriptsyntax as rs
import ghpythonlib as ghp
def fractalizeCurve(curve, numsteps, output_lines):
numsteps -= 1
#Get endpoints from input curve
curvePoints = rs.AddPoints(rs.CurvePoints(curve))
pt0 = curvePoints[0]
pt1 = curvePoints[1]
#rotate input curve by input angle and form a triangle
new_pt = rs.RotateObject(pt1, pt0, angle, axis=None, copy=True)
new_line1 = rs.AddCurve([pt0, new_pt])
new_line2 = rs.AddCurve([new_pt, pt1])
#Append all the new lines into an input list
output_lines.append(new_line1)
output_lines.append(new_line2)
#iterate the function as many times as requested
if numsteps > 0:
fractalizeCurve(new_line1, (numsteps), output_lines)
fractalizeCurve(new_line2, (numsteps), output_lines)
else:
return output_lines
a = []
fractalizeCurve(inputCrv, recursion_steps, a)
| UTF-8 | Python | false | false | 964 | py | 2 | fractals2.py | 1 | 0.642412 | 0.62578 | 0 | 30 | 29.8 | 67 |
ranjiayu/EntryFormGenerator | 17,093,969,860,969 | 44d8899b8a7266397271a3b79cbcb96245fc83a6 | c7e6c8f8d0f770e4f1b53617affaf52f56c8a097 | /form/models.py | a1f3bddff5581d482d7db305272e0aa1e8c63c54 | [] | no_license | https://github.com/ranjiayu/EntryFormGenerator | 94dca535ef2e6a8cc91ff0f266a1baedaa5ca327 | 3a38b51a1ced173be0753b7d374cbe75a5ea8531 | refs/heads/master | "2020-06-19T23:40:15.990204" | "2016-11-30T03:06:27" | "2016-11-30T03:06:27" | 74,895,490 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Form(models.Model):
title = models.CharField(max_length=70)
author = models.CharField(max_length=70)
password = models.CharField(max_length=70, default='')
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title
class Meta:
ordering = ['-create_time']
class Key(models.Model):
form = models.ForeignKey('Form', null=True, on_delete=models.CASCADE)
keyLabel = models.CharField(max_length=70, null=False, blank=False)
keyType = models.CharField(max_length=10, null=False, blank=False)
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.keyLabel
class Meta:
ordering = ['-create_time']
class KeyContent(models.Model):
key = models.ForeignKey('Key', null=True, on_delete=models.CASCADE)
content = models.TextField()
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.content
class Meta:
ordering = ['-create_time']
| UTF-8 | Python | false | false | 1,158 | py | 9 | models.py | 5 | 0.670984 | 0.662349 | 0 | 44 | 25.318182 | 73 |
vaankiller/leetcode | 498,216,250,486 | 12c913fb991222908b7316f58955be2613342214 | 1c74dee0cf5efcdcebb52455cb8a11de210a02ab | /py2/273. Integer to English Words.py | 9fb4b1bec6048ade2cc86f1f630168d726979089 | [] | no_license | https://github.com/vaankiller/leetcode | f8072abad23ee41bda2a80d1a536120f11ada50f | 88b434bd493e6cb2f70267b40a87c2d881d89cb0 | refs/heads/master | "2020-04-16T02:11:58.057867" | "2018-12-24T08:49:40" | "2018-12-24T08:49:40" | 61,943,789 | 6 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'vaan'
unit = ["Zero ", "One ", "Two ", "Three ", "Four ", "Five ", "Six ", "Seven ", "Eight ", "Nine "];
decade = ["Zero", "Ten", "Twenty ", "Thirty ", "Forty ", "Fifty ", "Sixty ", "Seventy ", "Eighty ", "Ninety "]
tenth = ["Ten ", "Eleven ", "Twelve ", "Thirteen ", "Fourteen ", "Fifteen ", "Sixteen ", "Seventeen ", "Eighteen ", "Nineteen "]
hundred = "Hundred "
thousand = "Thousand "
million = "Million "
billion = "Billion "
dct = {}
dct["unit"] = unit
dct["decade"] = decade
dct["tenth"] = tenth
dct[hundred] = 100
dct[thousand] = 1000
dct[million] = 1000000
dct[billion] = 1000000000
class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
ret = self.pre(num)
return ret[0:-1]
def pre(self, num):
ret = ""
if num is None:
return ret
if num >= dct[billion]:
ret += self.pre(num/dct[billion]) + billion + (self.pre(num % dct[billion]) if num % dct[billion] else "")
elif num >= dct[million]:
ret += self.pre(num/dct[million]) + million + (self.pre(num % dct[million]) if num % dct[million] else "")
elif num >= dct[thousand]:
ret += self.pre(num/dct[thousand]) + thousand + (self.pre(num % dct[thousand]) if num % dct[thousand] else "")
elif num >= dct[hundred]:
ret += self.pre(num/dct[hundred]) + hundred + (self.pre(num % dct[hundred]) if num % dct[hundred] else "")
elif num >= 20:
ret += dct["decade"][num/10] + (self.pre(num % 10) if num % 10 else "")
elif num >= 10:
ret += tenth[num % 10]
else:
ret += dct["unit"][num]
return ret
s = Solution()
print s.numberToWords(2147891236)
print s.pre(2147891236)
| UTF-8 | Python | false | false | 1,801 | py | 161 | 273. Integer to English Words.py | 80 | 0.540811 | 0.508606 | 0 | 49 | 35.714286 | 128 |
PouceHeure/py_light_mas | 8,272,107,034,995 | c32bb52e4cfaaa5ff50d038970f044e67af6c230 | 61fa9ae05e3fc5d926a56814743fd4dee8c08acd | /lib/py_light_mas/environnemnt.py | a43b431089fb18d0df6eeded6a532701d0bf21c6 | [] | no_license | https://github.com/PouceHeure/py_light_mas | ee2b99a5b671df8db6e6cf60112754d2223e8a10 | 05b01d9e9d0584fca02dd4f45f5eceaae3eeb606 | refs/heads/master | "2022-12-13T03:35:34.793809" | "2020-09-07T09:25:38" | "2020-09-07T09:25:38" | 292,829,428 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
class Environnemnt:
"""abstract class
"""
def on_event_new_tick(self):
"""method trigged when the simulation passed a new tick
"""
def on_event_show(self):
"""method trigged when the simulation ask to show the current state of the env
"""
def event_new_tick(self):
self.on_event_new_tick()
def event_show(self):
self.on_event_show()
| UTF-8 | Python | false | false | 410 | py | 13 | environnemnt.py | 11 | 0.592683 | 0.592683 | 0 | 17 | 23.058824 | 86 |
clademorin/Email-Address-Extractor | 627,065,243,295 | d6ed6dd74712c53e0502a1e0847186f2b272f74b | 85d56946b1bc672492b3b3f4782be37751c2d03d | /Email-Address-Extractor.py | bf33898d27b21ec582dc5735f4871d71d5ce89a2 | [] | no_license | https://github.com/clademorin/Email-Address-Extractor | e1dedcf37239e7f4dca3155731a307f087b2dda7 | ad405d2fcc92273bbc3c2fa45f7664c5eaa9b834 | refs/heads/master | "2022-11-26T03:11:02.294315" | "2020-07-25T14:38:01" | "2020-07-25T14:38:01" | 282,456,873 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tkinter import END, Frame, Tk, Button, Label, Text
import pyperclip
import re
def creaelenco():
indirizzi = ''
test = text_input.get("1.0", END).strip()
elenco = re.findall("([\S]*@[\S]*\.[\w]*)", test)
for i in elenco:
indirizzi += i.replace('\n', '').replace('<', '')+';'
indirizzi = indirizzi[:-1]
text_output.delete("1.0", END)
text_output.insert("1.0", indirizzi)
interfaccia = Tk()
interfaccia.geometry("600x600")
interfaccia.title("Email Address Extractor")
testo = Frame(interfaccia, bd=3, relief="ridge", padx=15, pady=15)
testo.pack()
lb_input = Label(testo, text="Input:")
lb_input.pack()
text_input = Text(testo, bg="white", fg="black", padx=20, pady=20, height=9)
text_input.pack()
bt_input = Button(testo, text="Extract", command=creaelenco)
bt_input.pack(pady=10)
output = Frame(interfaccia, bd=3, relief="ridge", padx=15, pady=15)
output.pack()
lb_output = Label(output, text="Output:")
lb_output.pack()
text_output = Text(output, bg="white", fg="black", padx=20, pady=20, height=9)
text_output.pack()
bt_copy = Button(output, text="Copy to clipboard", command=lambda: pyperclip.copy(text_output.get('1.0', END)))
bt_copy.pack(pady=10)
interfaccia.mainloop() | UTF-8 | Python | false | false | 1,262 | py | 3 | Email-Address-Extractor.py | 1 | 0.647385 | 0.616482 | 0 | 40 | 29.6 | 111 |
jaydeep1412/Catalent | 8,057,358,659,862 | 968c32f0ce7c52d73ffb956b7dd1448fece38cbd | e4ca0195b9f29f2622ab29f56ca380cc73d25f94 | /myapp/migrations/0004_auto_20201104_1510.py | 33f49e164f0d5af55075ed0514836e4283a20661 | [] | no_license | https://github.com/jaydeep1412/Catalent | e09838ec5f4cf90097985932a9814f4c23e08dfa | b79eae3e41de3e9bec3fb39ab6664b53d7cc1b49 | refs/heads/master | "2023-02-02T11:04:58.209225" | "2020-12-07T16:35:37" | "2020-12-07T16:35:37" | 313,762,220 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 3.1.2 on 2020-11-04 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_auto_20201104_1441'),
]
operations = [
migrations.AlterField(
model_name='analysis',
name='CorrelationOfStandards',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='analysis',
name='peakTailing',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='analysis',
name='resolution',
field=models.FloatField(blank=True, null=True),
),
]
| UTF-8 | Python | false | false | 748 | py | 18 | 0004_auto_20201104_1510.py | 10 | 0.572193 | 0.530749 | 0 | 28 | 25.714286 | 59 |
thomas-mckay/test-pycoins | 11,854,109,773,748 | 796e5b3167785e90fdf5432cce0d69f1f81e3927 | 607b04038659b4028b0818ba41dc5ba96db6ad94 | /tests/views/test_account.py | c5b5bf8fdbad670e27bd82e39f2baf9f8f8dcc25 | [] | no_license | https://github.com/thomas-mckay/test-pycoins | bf6a812bb270e89a42ab2ba7e23eaf61ffe4a330 | 6ddca95e7bfa5f6352f3af17549aeed7c817c2e6 | refs/heads/master | "2020-04-07T20:38:33.665397" | "2018-11-27T19:16:08" | "2018-11-27T19:16:08" | 158,697,418 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from django.http import HttpResponse
from django.urls import reverse
from mock import mock
from ..utils import PyCoinsTestCase
class AccountViewsTestCase(PyCoinsTestCase):
def test_user_detail_view__no_auth(self):
url = reverse('user-details')
response = self.client.get(url)
self.assertRedirects(response, '{}?next={}'.format(settings.LOGIN_URL, url),
status_code=302, target_status_code=200,
fetch_redirect_response=True)
def test_user_detail_view__auth_ok(self):
self.login_as_user(self.client)
response = self.client.get(reverse('user-details'))
self.assertEqual(response.status_code, 200)
def test_user_home_view(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
def test_user_api_docs_view(self):
response = self.client.get(reverse('api_docs'))
self.assertEqual(response.status_code, 200)
def test_user_signup_view(self):
response = self.client.get(reverse('signup'))
self.assertEqual(response.status_code, 200)
def test_user_login_view(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
def test_user_password_reset_view(self):
response = self.client.get(reverse('password-reset'))
self.assertEqual(response.status_code, 200)
def test_user_password_reset_confirm_view(self):
response = self.client.get(reverse('password_reset_confirm', kwargs=dict(uidb64='foo', token='ba-bar')))
self.assertEqual(response.status_code, 200)
def test_user_account_confirm_email_view(self):
with mock.patch('pycoins.views.account.VerifyEmailView.post') as mock_post:
mock_post.return_value = HttpResponse(status=200)
response = self.client.get(reverse('account_confirm_email', kwargs=dict(key='foo')))
self.assertRedirects(response, '{}?info=Your+email+has+been+confirmed.'.format(reverse('home')),
status_code=302, target_status_code=200,
fetch_redirect_response=True)
self.assertEqual(mock_post.call_count, 1)
| UTF-8 | Python | false | false | 2,296 | py | 50 | test_account.py | 32 | 0.652003 | 0.635017 | 0 | 55 | 40.745455 | 112 |
YikeZhou/csp-solution | 16,896,401,379,442 | 88f0cf0882fa4067d660fa2e6eb8f8e67dcf89d7 | aaf19a505caaf6b6fe379ddcecffd4b26e8ec3cb | /201712/3.py | 0710d6f5ac278556739f5829c3006ba0c441c78c | [] | no_license | https://github.com/YikeZhou/csp-solution | cb8a07b925ab6bb50bd0637ebc151d83b31fb7d2 | 9b7fe8be09863c87a54830e5a22a356ba9c7143e | refs/heads/master | "2022-12-22T05:11:45.285684" | "2020-09-13T04:25:32" | "2020-09-13T04:25:32" | 293,463,583 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | '''
File: 3.py
Project: 201712
File Created: Sunday, 6th September 2020 10:10:23 am
Author: zyk
-----
Last Modified: Sunday, 6th September 2020 4:49:46 pm
Modified By: zyk
-----
2020 - HUST
'''
from datetime import datetime, timedelta
class Time:
def __init__(self, year, month, day, hour, minute):
self.dt = datetime(year, month, day, hour, minute)
@classmethod
def parse(cls, num):
return cls(int(num / 100000000), int((num % 100000000) / 1000000), int((num % 1000000) / 10000),
int((num % 10000) / 100), int(num % 100))
def __repr__(self):
return 'Time({0.dt.year!r}, {0.dt.month!r}, {0.dt.day!r}, {0.dt.hour!r}, {0.dt.minute!r})'.format(self)
def __str__(self):
return '{0.dt.year!r}'.format(self).zfill(4) + '{0.dt.month!r}'.format(self).zfill(2) + '{0.dt.day!r}'.format(self).zfill(2) + '{0.dt.hour!r}'.format(self).zfill(2) + '{0.dt.minute!r}'.format(self).zfill(2)
def inc(self):
one_min = timedelta(minutes=1)
self.dt += one_min
Weekdays = {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6}
Months = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12}
def get_month(s):
if s.lower() in Months.keys():
return Months[s.lower()]
else:
return int(s)
def get_weekday(s):
if s.lower() in Weekdays.keys():
return Weekdays[s.lower()]
else:
return int(s)
class Task:
def __init__(self, minutes, hours, day_of_month, month, day_of_week, command):
# save command str and interval
self.command = command
self.minutes = list()
if minutes == '*':
self.minutes.append((0, 59))
else:
ms = minutes.split(sep=',')
for m in ms:
if '-' in m:
a, b = map(int, m.split(sep='-'))
self.minutes.append((a, b))
else:
a = int(m)
self.minutes.append((a, a))
self.hours = list()
if hours == '*':
self.hours.append((0, 23))
else:
hs = hours.split(sep=',')
for h in hs:
if '-' in h:
a, b = map(int, h.split(sep='-'))
self.hours.append((a, b))
else:
a = int(h)
self.hours.append((a, a))
self.day_of_month = list()
if day_of_month == '*':
self.day_of_month.append((1, 31))
else:
doms = day_of_month.split(sep=',')
for dom in doms:
if '-' in dom:
a, b = map(int, dom.split(sep='-'))
self.day_of_month.append((a, b))
else:
a = int(dom)
self.day_of_month.append((a, a))
self.month = list()
if month == '*':
self.month.append((1, 12))
else:
mos = month.split(sep=',')
for mo in mos:
if '-' in mo:
a, b = map(get_month, mo.split(sep='-'))
self.month.append((a, b))
else:
a = get_month(mo)
self.month.append((a, a))
self.day_of_week = list()
if day_of_week == '*':
self.day_of_week.append((0, 6))
else:
dows = day_of_week.split(sep=',')
for dow in dows:
if '-' in dow:
a, b = map(get_weekday, dow.split(sep='-'))
self.day_of_week.append((a, b))
else:
a = get_weekday(dow)
self.day_of_week.append((a, a))
def match(self, t: Time):
# month
for (b, e) in self.month:
if b <= t.dt.month and t.dt.month <= e:
break
else:
return False
# day of month
for (b, e) in self.day_of_month:
if b <= t.dt.day and t.dt.day <= e:
break
else:
return False
# hour
for (b, e) in self.hours:
if b <= t.dt.hour and t.dt.hour <= e:
break
else:
return False
# minute
for (b, e) in self.minutes:
if b <= t.dt.minute and t.dt.minute <= e:
break
else:
return False
# day of week
for (b, e) in self.day_of_week:
dow = (t.dt.weekday() + 1) % 7
if b <= dow and dow <= e:
break
else:
return False
return True
n, s, t = map(int, input().split())
confs = list()
# read n lines
for i in range(n):
# crontab config info
minutes, hours, day_of_month, month, day_of_week, command = input().split()
# process each field
confs.append(Task(minutes, hours, day_of_month, month, day_of_week, command))
start = Time.parse(s)
end = Time.parse(t)
cur = start
while cur.dt != end.dt:
for conf in confs:
if conf.match(cur):
print(cur, conf.command)
cur.inc()
| UTF-8 | Python | false | false | 5,242 | py | 14 | 3.py | 14 | 0.454216 | 0.428653 | 0 | 173 | 29.300578 | 214 |
a-luna/vigorish | 17,506,286,708,651 | e51a384cb382be35e775bbf7f019f4340e79bf34 | b7500e25551e14fd71694bf7a240eb7c95e38f20 | /src/vigorish/tasks/sync_data_no_prompts.py | 153192904826f3837c75dcf15e5f46b58e87d036 | [
"MIT"
] | permissive | https://github.com/a-luna/vigorish | 5f05af1d44745af91a82f84886dd81f8a2cf7489 | 84bd02311b35e2789d741d8cb10a3e4e584f0255 | refs/heads/master | "2023-08-22T11:51:51.866613" | "2022-07-08T14:55:56" | "2022-07-08T14:55:56" | 174,183,970 | 2 | 2 | MIT | false | "2023-08-16T02:01:16" | "2019-03-06T16:48:22" | "2021-10-08T00:58:37" | "2023-08-16T02:01:15" | 24,880 | 2 | 2 | 33 | Python | false | false | """Menu item that returns the user to the previous menu."""
import subprocess
from halo import Halo
from vigorish.cli.components import (
get_random_cli_color,
get_random_dots_spinner,
print_heading,
print_message,
)
from vigorish.enums import DataSet, SyncDirection, VigFile
from vigorish.tasks.base import Task
from vigorish.tasks.sync_scraped_data import SyncScrapedDataTask
from vigorish.util.result import Result
SYNC_STATUS_TEXT_COLOR = {
"out_of_sync": "bright_green",
"in_sync": "blue",
"sync_complete": "bright_green",
"error": "bright_red",
}
class SyncDataNoPromptsTask(Task):
def __init__(self, app):
super().__init__(app)
self.s3_sync = SyncScrapedDataTask(self.app)
self.sync_direction = None
self.year = None
self.file_type = None
self.data_sets_int = 0
self.data_set = None
self.task_number = 0
self.sync_files = {}
self.sync_results = []
self.spinners = {}
self.results = {}
@property
def data_sets(self):
if self.file_type == VigFile.COMBINED_GAME_DATA:
return [DataSet.ALL]
return sorted(ds for ds in DataSet if ds in self.valid_data_sets() and self.data_sets_int & ds == ds)
@property
def total_tasks(self):
return len(self.data_sets)
@property
def all_files_are_in_sync(self):
return all(not out_of_sync for (out_of_sync, _, _) in self.sync_files.values())
def execute(self, sync_direction, year, file_type, data_sets_int):
self.sync_direction = sync_direction
self.year = year
self.file_type = file_type
self.data_sets_int = data_sets_int
self.subscribe_to_events()
for data_set in self.data_sets:
self.task_number += 1
self.data_set = data_set
self.report_sync_results()
self.results[data_set] = self.s3_sync.execute(sync_direction, file_type, data_set, year)
if self.results[data_set].failure:
return self.results
self.spinners[data_set].stop()
self.report_sync_results()
self.unsubscribe_from_events()
return self.results
def valid_data_sets(self):
data_set_file_type_map = {
VigFile.SCRAPED_HTML: list(DataSet),
VigFile.PARSED_JSON: list(DataSet),
VigFile.COMBINED_GAME_DATA: [DataSet.ALL],
VigFile.PATCH_LIST: [
DataSet.BBREF_GAMES_FOR_DATE,
DataSet.BBREF_BOXSCORES,
DataSet.BROOKS_GAMES_FOR_DATE,
DataSet.BROOKS_PITCHFX,
],
}
return data_set_file_type_map[self.file_type]
def report_sync_results(self):
subprocess.run(["clear"])
self.print_header_message()
if not self.sync_results:
return
for task_result in self.sync_results:
print_message(task_result[0], wrap=False, fg=task_result[1])
def print_header_message(self):
src_folder = "S3 bucket" if self.sync_direction == SyncDirection.DOWN_TO_LOCAL else "local folder"
dest_folder = "local folder" if self.sync_direction == SyncDirection.DOWN_TO_LOCAL else "S3 bucket"
heading = f"Syncing data from {src_folder} to {dest_folder}"
print_heading(heading, fg="bright_yellow")
def error_occurred(self, error_message):
self.sync_results.append((error_message, SYNC_STATUS_TEXT_COLOR["error"]))
self.results[self.data_set] = Result.Fail(error_message)
def get_s3_objects_start(self):
subprocess.run(["clear"])
self.spinners["default"] = Halo(
spinner=get_random_dots_spinner(),
color=get_random_cli_color(),
text="Retrieving data for all objects stored in S3...",
)
self.spinners["default"].start()
def get_s3_objects_complete(self):
self.spinners["default"].stop()
def find_out_of_sync_files_start(self):
self.spinners[self.data_set] = Halo(
spinner=get_random_dots_spinner(),
color=get_random_cli_color(),
text=(
f"Analyzing MLB {self.year} {self.file_type} {self.data_set} files "
f"(Task {self.task_number}/{self.total_tasks})..."
),
)
self.spinners[self.data_set].start()
def find_out_of_sync_files_complete(self, sync_results):
(out_of_sync, new_files, update_files) = sync_results
self.sync_files[self.data_set] = (out_of_sync, new_files, update_files)
sync_files = self.sync_files_text(out_of_sync, new_files, update_files)
text_color = SYNC_STATUS_TEXT_COLOR["out_of_sync"] if out_of_sync else SYNC_STATUS_TEXT_COLOR["in_sync"]
self.sync_results.append((sync_files, text_color))
def sync_files_text(self, out_of_sync, new_files, update_files):
sync_count = len(new_files) + len(update_files)
sync_files = f"{sync_count} files out of sync" if out_of_sync else "All files in sync"
return (
f"[{self.year} {self.file_type} {self.data_set}] {sync_files} (Task {self.task_number}/{self.total_tasks})"
)
def sync_files_start(self, name, complete, total):
self.report_sync_results()
self.sync_files_progress(name, complete, total)
def sync_files_progress(self, name, complete, total):
direction = "Down" if self.sync_direction == SyncDirection.DOWN_TO_LOCAL else "Up"
percent = complete / float(total)
progress_message = f"{direction}loading: {name} | {percent:.0%} ({complete}/{total} Files)"
self.spinners[self.data_set].text = progress_message
def sync_files_complete(self):
dest_folder = "local folder" if self.sync_direction == SyncDirection.DOWN_TO_LOCAL else "s3 bucket"
src_folder = "s3 bucket" if self.sync_direction == SyncDirection.DOWN_TO_LOCAL else "local folder"
sync_complete = f"[{self.year} {self.file_type} {self.data_set}] {dest_folder} is in sync with {src_folder}!"
self.sync_results.append((sync_complete, SYNC_STATUS_TEXT_COLOR["sync_complete"]))
def subscribe_to_events(self):
self.s3_sync.events.error_occurred += self.error_occurred
self.s3_sync.events.get_s3_objects_start += self.get_s3_objects_start
self.s3_sync.events.get_s3_objects_complete += self.get_s3_objects_complete
self.s3_sync.events.find_out_of_sync_files_start += self.find_out_of_sync_files_start
self.s3_sync.events.find_out_of_sync_files_complete += self.find_out_of_sync_files_complete
self.s3_sync.events.sync_files_start += self.sync_files_start
self.s3_sync.events.sync_files_progress += self.sync_files_progress
self.s3_sync.events.sync_files_complete += self.sync_files_complete
def unsubscribe_from_events(self):
self.s3_sync.events.error_occurred -= self.error_occurred
self.s3_sync.events.get_s3_objects_start -= self.get_s3_objects_start
self.s3_sync.events.get_s3_objects_complete -= self.get_s3_objects_complete
self.s3_sync.events.find_out_of_sync_files_start -= self.find_out_of_sync_files_start
self.s3_sync.events.find_out_of_sync_files_complete -= self.find_out_of_sync_files_complete
self.s3_sync.events.sync_files_start -= self.sync_files_start
self.s3_sync.events.sync_files_progress -= self.sync_files_progress
self.s3_sync.events.sync_files_complete -= self.sync_files_complete
| UTF-8 | Python | false | false | 7,538 | py | 513 | sync_data_no_prompts.py | 238 | 0.634253 | 0.629079 | 0 | 175 | 42.074286 | 119 |
matiasz8/tasty-broccoli | 14,010,183,327,234 | 97483978c2a7a45ac43b4cd388cc438f1c98a974 | da2437008a3c386413ab10e093253104f01f162e | /lattuga/views.py | 4d2abda2cb9456523be4628fc00b81dfeddcc51d | [] | no_license | https://github.com/matiasz8/tasty-broccoli | 57a9b3e33e70cf866f752ffcc75b63a4c5f80654 | c5ff0f3ee20e5833ac09640d77d02dcc0decc51f | refs/heads/main | "2023-03-20T20:54:44.248467" | "2021-03-16T05:05:45" | "2021-03-16T05:05:45" | 320,475,442 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from django.shortcuts import HttpResponse
def index(request):
return HttpResponse(f"<html><p>Response Tasty OK</p></html>", status=201)
| UTF-8 | Python | false | false | 178 | py | 5 | views.py | 3 | 0.764045 | 0.747191 | 0 | 6 | 28.666667 | 77 |
JesusRivera98/reportes-neurociencias | 13,237,089,217,424 | df66811fbafaf5fdce1327bf97e3b81ab00081cf | dc9275f75d3855d591bfed41ee5a82f50fd320b1 | /src/main/python/controladores/LNSController.py | 459646acfb888c4489ae0be79f070ad57cfe576d | [] | no_license | https://github.com/JesusRivera98/reportes-neurociencias | 477a3fbc8d0ac40f289b5f70bee6f193d509243f | da39a4135e6715bab68c87a4efeefc45f11a7297 | refs/heads/master | "2023-01-20T05:07:49.266729" | "2020-12-01T17:18:53" | "2020-12-01T17:18:53" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #Controlador de la vista de LNSWindowWidget
from PyQt5 import QtWidgets, QtCore
from vistas.LNSWindowWidget import *
from MainWindowController import *
from ReporteModel import *
from pruebas.LNSPrueba import *
from PruebaModel import *
from ControllerModel import *
class LNSController(QtWidgets.QWidget, ControllerModel):
#Atributo empleado para realizar el cambio de vista
switch_window = QtCore.pyqtSignal(object, object)
def __init__(self, mainWindow, reporteModel=None):
QtWidgets.QWidget.__init__(self)
self.lnsView = LNSWindowWidget(mainWindow)
self.lnsView.pbStart.clicked.connect(self.getDatos)
self.reporteModel = reporteModel
self.invalidArgs = list()
def changeView(self):
"""
Método encargado de notificar los elementos que serán pasados como parámetros a la siguiente vista
"""
self.switch_window.emit(self.invalidArgs, self.lnsPrueba)
def getDatos(self):
"""
Método que toma los datos ingresados en la vista de LNS
"""
view = self.lnsView
span = view.sbSpan.value()
total = view.sbTotal.value()
valores = (span, total)
self.lnsPrueba = LNSPrueba(valores)
#toma anos de escolaridad del paciente
datos = self.reporteModel.reporte['educacion']
self.lnsPrueba.calcularPERP(datos)
self.changeView()
def emptyInvalidArgs(self):
"""
Método que se encarga de vacíar la lista de elementos inválidos en la vista
"""
self.invalidArgs = list()
def addInvalidArg(self, arg):
"""
Método que se encarga de añadir a la lista de elementos inválidos, aquel parámetro especificado
Args:
arg: String a añadir a la lista de elementos inválidos
"""
if len(self.invalidArgs) == 0:
self.invalidArgs = [arg]
else:
tempList = self.invalidArgs
tempList.append(arg)
self.invalidArgs = tempList
def getListMenu(self):
"""
Método que se regresa el id del menu en la vista de LNS
"""
return self.lnsView.lWVistas
def getProgressBar(self):
"""
Método que se encarga de regresar el valor de la barra de progreso
"""
return self.lnsView.progressBar
def getProgressBar(self):
"""
Método que se encarga de regresar el valor de la barra de progreso
"""
return self.lnsView.progressBar
def updateButtonText(self, text):
"""
Método que se encarga de actulaizar el texto del botón de la vista
"""
self.lnsView.pbStart.setText(text)
# Pruebas unitarias
#if __name__ == "__main__":
# import sys
# app = QtWidgets.QApplication(sys.argv)
# fluidezWindow = QtWidgets.QWidget()
# fluidezVerbalController = LNSController(fluidezWindow)
# fluidezWindow.show()
# sys.exit(app.exec_())
| UTF-8 | Python | false | false | 2,649 | py | 29 | LNSController.py | 23 | 0.722919 | 0.722159 | 0 | 101 | 25.049505 | 101 |
Envinorma/back-office | 7,198,365,225,380 | 2ab2fda878fb34adc9c28af78b1af977ecef05cc | 1c8d46b8cb7b680cf809158be9c652e00126592c | /back_office/pages/edit_parameter_element/form_handling.py | f290e5b8f88fd054c73c6119519612b64deba0f4 | [
"MIT"
] | permissive | https://github.com/Envinorma/back-office | c060b8a3da659ec2b539f00bb711ce5a460bcabf | 2d2c082d40b8c3cb58d2bc0295801baf16cb983d | refs/heads/main | "2021-10-05T02:30:34.604942" | "2021-09-24T15:25:43" | "2021-09-24T15:25:43" | 364,665,307 | 0 | 0 | MIT | false | "2021-09-23T14:49:49" | "2021-05-05T18:05:37" | "2021-09-23T12:43:04" | "2021-09-23T14:49:48" | 19,154 | 0 | 0 | 3 | Python | false | false | import json
import traceback
from dataclasses import dataclass
from typing import Dict, List, Optional
from envinorma.models import ArreteMinisteriel, StructuredText
from envinorma.models.condition import load_condition
from envinorma.models.text_elements import EnrichedString, Table
from envinorma.parametrization import AlternativeSection, AMWarning, Condition, InapplicableSection, ParameterElement
from back_office.helpers.parse_table import parse_table
from back_office.pages.edit_parameter_element.target_sections_form import TargetSectionFormValues
from back_office.utils import DATA_FETCHER, AMOperation, ensure_not_none
class FormHandlingError(Exception):
pass
def _build_condition(condition: Optional[str]) -> Condition:
if not condition:
raise FormHandlingError('La condition doit être définie.')
try:
return load_condition(json.loads(condition))
except Exception:
raise FormHandlingError(
f'Erreur inattendue dans la condition :\n{condition}\n' f'Erreur complète :\n{traceback.format_exc()}'
)
@dataclass
class _Modification:
section_id: str
target_alineas: Optional[List[int]]
new_text: Optional[StructuredText]
propagate_in_subsection: Optional[bool]
def _parse_table(element: str) -> EnrichedString:
try:
result = parse_table(element)
except ValueError as exc:
raise FormHandlingError(str(exc))
if isinstance(result, Table):
return EnrichedString('', table=result)
if isinstance(result, str):
return EnrichedString(result)
raise ValueError(f'Impossible de parser {element}')
def _extract_alineas(text: str) -> List[EnrichedString]:
return [_parse_table(line) for line in text.split('\n')]
_MIN_NB_CHARS = 1
def _check_and_build_new_text(title: str, content: str) -> StructuredText:
if len(title or '') < _MIN_NB_CHARS:
raise FormHandlingError(f'Le champ "Titre" doit contenir au moins {_MIN_NB_CHARS} caractères.')
if len(content or '') < _MIN_NB_CHARS:
raise FormHandlingError(f'Le champ "Contenu du paragraphe" doit contenir au moins {_MIN_NB_CHARS} caractères.')
return StructuredText(EnrichedString(title), _extract_alineas(content), [], None)
def _build_new_text(new_text_title: Optional[str], new_text_content: Optional[str]) -> Optional[StructuredText]:
if not new_text_title and not new_text_content:
return None
if new_text_title and not new_text_content:
raise FormHandlingError('Le champ "Contenu du paragraphe" doit être défini.')
if new_text_content and not new_text_title:
raise FormHandlingError('Le champ "Titre" doit être défini.')
return _check_and_build_new_text(new_text_title or '', new_text_content or '')
def _simplify_alineas(section: StructuredText, target_alineas: Optional[List[int]]) -> Optional[List[int]]:
if not target_alineas:
return None
if len(set(target_alineas)) == len(section.outer_alineas):
return None
return target_alineas
def _build_target_version(
section_id_to_section: Dict[str, StructuredText],
new_text_title: Optional[str],
new_text_content: Optional[str],
section_id: str,
target_alineas: Optional[List[int]],
propagate_in_subsection: Optional[bool],
) -> _Modification:
if not section_id:
raise FormHandlingError('La section visée doit être sélectionnée.')
if section_id not in section_id_to_section:
raise FormHandlingError(f'La section "{section_id}" n\'existe pas.')
section = section_id_to_section[section_id]
simplified_target_alineas = _simplify_alineas(section, target_alineas)
new_text = _build_new_text(new_text_title, new_text_content)
return _Modification(section_id, simplified_target_alineas, new_text, propagate_in_subsection)
def _build_target_versions(am: ArreteMinisteriel, form_values: TargetSectionFormValues) -> List[_Modification]:
new_texts_titles = form_values.new_texts_titles or len(form_values.target_sections) * [None]
new_texts_contents = form_values.new_texts_contents or len(form_values.target_sections) * [None]
target_sections = form_values.target_sections
target_alineas = form_values.target_alineas or len(form_values.target_sections) * [None]
propagate_in_subsection = form_values.propagate_in_subsection or len(form_values.target_sections) * [None]
section_id_to_section = {section.id: section for section in am.descendent_sections()}
return [
_build_target_version(section_id_to_section, title, content, section, alineas, propagate_in_subsection)
for title, content, section, alineas, propagate_in_subsection in zip(
new_texts_titles, new_texts_contents, target_sections, target_alineas, propagate_in_subsection
)
]
def _build_inapplicable_section(condition: Condition, modification: _Modification) -> InapplicableSection:
return InapplicableSection(
modification.section_id,
modification.target_alineas,
condition=condition,
subsections_are_inapplicable=modification.propagate_in_subsection
if modification.propagate_in_subsection is not None
else True,
)
def _build_am_warning(section_id: str, warning_content: str) -> AMWarning:
min_len = 10
if len(warning_content or '') <= min_len:
raise FormHandlingError(f'Le champ "Contenu de l\'avertissement" doit contenir au moins {min_len} caractères.')
return AMWarning(section_id, warning_content)
def _build_parameter_object(
operation: AMOperation,
condition: Optional[Condition],
modification: _Modification,
warning_content: str,
) -> ParameterElement:
if operation == AMOperation.ADD_ALTERNATIVE_SECTION:
return AlternativeSection(
section_id=modification.section_id,
new_text=ensure_not_none(modification.new_text),
condition=ensure_not_none(condition),
)
if operation == AMOperation.ADD_CONDITION:
return _build_inapplicable_section(ensure_not_none(condition), modification)
if operation == AMOperation.ADD_WARNING:
return _build_am_warning(modification.section_id, warning_content)
raise NotImplementedError(f'Not implemented for operation {operation}')
def _extract_new_parameter_objects(
operation: AMOperation,
am: ArreteMinisteriel,
target_section_form_values: TargetSectionFormValues,
condition_str: Optional[str],
warning_content: str,
) -> List[ParameterElement]:
condition = _build_condition(condition_str) if operation != AMOperation.ADD_WARNING else None
target_versions = _build_target_versions(am, target_section_form_values)
return [
_build_parameter_object(operation, condition, target_version, warning_content)
for target_version in target_versions
]
def _check_consistency(operation: AMOperation, parameters: List[ParameterElement]) -> None:
for parameter in parameters:
if operation == AMOperation.ADD_CONDITION:
assert isinstance(parameter, InapplicableSection), f'Expect InapplicableSection, got {type(parameter)}'
elif operation == AMOperation.ADD_ALTERNATIVE_SECTION:
assert isinstance(parameter, AlternativeSection), f'Expect AlternativeSection, got {type(parameter)}'
elif operation == AMOperation.ADD_WARNING:
assert isinstance(parameter, AMWarning), f'Expect AMWarning, got {type(parameter)}'
else:
raise ValueError(f'Unexpected operation {operation}')
def extract_and_upsert_new_parameter(
operation: AMOperation,
am_id: str,
parameter_id: Optional[str],
target_section_form_values: TargetSectionFormValues,
condition: Optional[str],
warning_content: str,
) -> None:
am = DATA_FETCHER.load_am(am_id)
if not am:
raise ValueError(f'AM with id {am_id} not found!')
new_parameters = _extract_new_parameter_objects(
operation, am, target_section_form_values, condition, warning_content
)
_check_consistency(operation, new_parameters)
_upsert_parameters(am_id, new_parameters, parameter_id)
def _upsert_parameters(am_id: str, new_parameters: List[ParameterElement], parameter_id: Optional[str]):
if parameter_id is not None:
if len(new_parameters) != 1:
raise ValueError('Must have only one parameter when updating a specific parameter..')
DATA_FETCHER.upsert_parameter(am_id, new_parameters[0], parameter_id)
else:
for parameter in new_parameters:
DATA_FETCHER.upsert_parameter(am_id, parameter, None)
| UTF-8 | Python | false | false | 8,625 | py | 86 | form_handling.py | 74 | 0.713738 | 0.713158 | 0 | 206 | 40.800971 | 119 |
RavinduAye/pythonProject | 7,808,250,547,256 | 988a34f26a4857c18b4ed92bad6dd01e7317c00c | c0cf536e04147bff8daf35cf47f7bec245e11951 | /venv/Lib/site-packages/detecting/utils/misc.py | ec6efdbd4291d03c5c47fa09e8118b278571ba83 | [] | no_license | https://github.com/RavinduAye/pythonProject | 8f29c5bedc1f23a4901db2abbaa0e439de526d2a | ff1ecb407f33697b02f2f2061912841e168fd33f | refs/heads/master | "2023-05-02T03:57:21.674224" | "2021-05-17T06:44:51" | "2021-05-17T06:44:51" | 367,957,729 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import tensorflow as tf
# 用于删除数值都为0的候选框
def trim_zeros(boxes, name=None):
'''Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
Args
---
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
'''
# 对候选框数值求和
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
# 只留下数值不为0的候选框
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
# 返回数值不为0的候选框
return boxes, non_zeros
# 图片元数据解析
def parse_image_meta(meta):
'''Parses a tensor that contains image attributes to its components.
Args
---
meta: [..., 12]
Returns
---
a dict of the parsed tensors.
'''
meta = meta.numpy()
# 原始图片shape
ori_shape = meta[..., 0:3]
# resize后图片shape
img_shape = meta[..., 3:6]
# 填充后图片shape
pad_shape = meta[..., 6:9]
# 图片缩放因子
scale = meta[..., 9:11]
# 图片是否翻转
flip = meta[..., 11]
return {
'ori_shape': ori_shape,
'img_shape': img_shape,
'pad_shape': pad_shape,
'scale': scale,
'flip': flip
}
# 计算一个批次中填充后的图片的最大高度和宽度
def calc_batch_padded_shape(meta):
'''
Args
---
meta: [batch_size, 12]
Returns
---
nd.ndarray. Tuple of (height, width)
'''
# meta[:, 6:8]填充后的图片shape
# tf.reduce_max计算最大值
return tf.cast(tf.reduce_max(meta[:, 6:8], axis=0), tf.int32).numpy()
# 得到resize后的图片shape
def calc_img_shapes(meta):
'''
Args
---
meta: [..., 12]
Returns
---
nd.ndarray. [..., (height, width)]
'''
# meta[:, 3:5]resize后的图片shape
return tf.cast(meta[..., 3:5], tf.int32).numpy()
# 得到填充后的图片shape
def calc_pad_shapes(meta):
'''
Args
---
meta: [..., 12]
Returns
---
nd.ndarray. [..., (height, width)]
'''
# meta[:, 6:8]填充后的图片shape
return tf.cast(meta[..., 6:8], tf.int32).numpy() | UTF-8 | Python | false | false | 2,268 | py | 42 | misc.py | 35 | 0.541252 | 0.518887 | 0 | 92 | 20.880435 | 73 |
pkolt/django-filefieldtools | 3,109,556,367,671 | 1d18197602e08bfd7d07320381ab026621ed5a93 | ad8790306f06b9d0bbf33f0b43a9f811f328079d | /tests/app/tests/test_tools.py | 96cc58cd4bb4073b1603e7d9252f3b384eff2a24 | [
"BSD-3-Clause"
] | permissive | https://github.com/pkolt/django-filefieldtools | 1064d0ed1b06e016ae0c01cd1495fc525cc2f4c3 | 4e886c9ab173449aa925c107cb9e8f353343e253 | refs/heads/master | "2020-04-10T16:15:36.198093" | "2013-04-28T16:19:57" | "2013-04-28T16:19:57" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
from django.utils import unittest
from filefieldtools.tools import (clean_filename, translate_filename,
control_length, upload_to)
class TestTools(unittest.TestCase):
def test_translate_filename(self):
value = translate_filename(u'Прайс для клиентов')
self.assertEqual(value, 'Prays dlya klientov')
def test_clean_name(self):
value = clean_filename(' Price_-_.for clients.xls ')
self.assertEqual(value, 'Price-for-clients.xls')
value = clean_filename('Price for clients')
self.assertEqual(value, 'Price-for-clients')
value = clean_filename(u'Прайс для клиентов.xls')
self.assertEqual(value, 'Prays-dlya-klientov.xls')
def test_upload_to(self):
value = upload_to()(None, 'picture.jpg')
self.assertEqual(value, 'uploads/picture.jpg')
value = upload_to('books/authors')(None, 'picture.jpg')
self.assertEqual(value, 'uploads/books/authors/picture.jpg')
value = upload_to('books/authors', to_lower=False)(None, 'Picture.JPG')
self.assertEqual(value, 'uploads/books/authors/Picture.JPG')
value = upload_to('books/authors')(None, 'Picture.JPG')
self.assertEqual(value, 'uploads/books/authors/picture.jpg')
value = upload_to('books/authors', to_hash=True)(None, 'Picture.JPG')
self.assertNotEqual(value, 'uploads/books/authors/picture.jpg')
self.assertTrue(value.startswith('uploads/books/authors/'))
self.assertTrue(value.endswith('.jpg'))
value = upload_to('books/%Y/%m/%d')(None, 'Picture.JPG')
year, month, day = value.split('/')[2:5]
self.assertTrue(year.isdigit())
self.assertTrue(month.isdigit())
self.assertTrue(day.isdigit())
def test_control_length(self):
# lenght = 49
value = control_length('uploads/books/authors/client_prices_abcdefghi.xls', 50)
self.assertEqual(value, 'uploads/books/authors/client_prices_abcdefghi.xls')
# lenght = 50
value = control_length('uploads/books/authors/client_prices_abcdefghij.xls', 50)
self.assertEqual(value, 'uploads/books/authors/client_prices_abcdefghij.xls')
# length = 51
value = control_length('uploads/books/authors/client_prices_abcdefghijk.xls', 50)
self.assertEqual(value, 'uploads/books/authors/client_prices_abcdefghij.xls')
# length = 51
value = control_length('uploads/books/authors/client_prices_abcdefghi-k.xls', 50)
self.assertEqual(value, 'uploads/books/authors/client_prices_abcdefghi.xls')
# length = 51
value = control_length('uploads/books/12345678901234567890123/authors/a.xls', 50)
self.assertEqual(value, 'uploads/books/12345678901234567890123/authors/a.xls')
| UTF-8 | Python | false | false | 2,872 | py | 14 | test_tools.py | 9 | 0.658803 | 0.634507 | 0 | 65 | 42.692308 | 89 |
hafiztsalavin/catatanDasarPython | 6,975,026,911,689 | 748c766e32e1b3d154558db6c113c6a7203c87e1 | da82d899f9c057138524923e0ef50b5a50a0abf2 | /stack and queue/stack and queue/stack and queue/queue.py | 8ad667ede6c796344733f1cb94dc73b70cbe2880 | [] | no_license | https://github.com/hafiztsalavin/catatanDasarPython | e0827410ffc7f6ea11b79e02c25a3cd574e4ccba | 32b68eabdb52fc11dff9a50fadd92aeb3d57b945 | refs/heads/master | "2021-06-22T18:03:50.233951" | "2019-07-27T18:19:13" | "2019-07-27T18:19:13" | 199,197,001 | 0 | 0 | null | false | "2021-03-29T20:09:39" | "2019-07-27T17:51:00" | "2020-10-23T08:54:18" | "2021-03-29T20:09:39" | 1,515 | 0 | 0 | 1 | Python | false | false | from collections import deque
# ANTRIAN
Antrian = deque([1,2,3,4,5,6,7,8])
print ('tumpukan sekarang ',Antrian)
Antrian.append(9)
print ('tumpukan sekarang ',Antrian)
Antrian.append(10)
print ('tumpukan sekarang ',Antrian)
keluar = Antrian.popleft()
print ('data yang keluar : ',keluar)
print ('tumpukan sekarang ',Antrian)
keluar = Antrian.popleft()
print ('data yang keluar : ',keluar)
print ('tumpukan sekarang ',Antrian)
keluar = Antrian.popleft()
print ('data yang keluar : ',keluar)
print ('tumpukan sekarang ',Antrian)
| UTF-8 | Python | false | false | 557 | py | 61 | queue.py | 57 | 0.691203 | 0.671454 | 0 | 24 | 21.208333 | 36 |
pycalphad/scheil | 16,887,811,433,090 | 3515049d730a477ebbd9a06aaeacbb42bdef5ca6 | 7c008d1e34080359840f368462106a0336e1928f | /scheil/simulate.py | beba5d183e6ee54c45f6ffe99e2d62dd5cdbd409 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | https://github.com/pycalphad/scheil | b9e1df2ba073bc6767f3f8a0ff0145ab7e349514 | 05a89a914db0f268142a91fa4045f9dca2ce9930 | refs/heads/master | "2023-07-10T06:59:19.620357" | "2023-06-27T16:12:54" | "2023-06-27T16:12:54" | 150,358,281 | 11 | 5 | MIT | false | "2023-06-29T21:11:06" | "2018-09-26T02:38:19" | "2023-06-27T16:13:02" | "2023-06-29T21:11:06" | 143 | 10 | 4 | 5 | Python | false | false | import sys
import numpy as np
from pycalphad import equilibrium, variables as v
from pycalphad.codegen.callables import build_phase_records
from pycalphad.core.calculate import _sample_phase_constitution
from pycalphad.core.utils import instantiate_models, unpack_components, filter_phases, point_sample
from .solidification_result import SolidificationResult
from .utils import local_sample, get_phase_amounts
from .ordering import create_ordering_records, rename_disordered_phases
def is_converged(eq):
"""
Return true if there are phase fractions that are non-NaN
Parameters
----------
eq : pycalphad.LightDataset
"""
if np.any(~np.isnan(eq.NP)):
return True
return False
def _update_points(eq, points_dict, dof_dict, local_pdens=0, verbose=False):
"""
Update the points_dict by appending new points.
Parameters
----------
eq : pycalphad.LightDataset
Point equilibrium result. Incompatible with xarray.Dataset objects.
points_dict : dict[str, np.ndarray]
Map of phase name to array of points
dof_dict : dict[str, list[int]]
Map of phase name to the sublattice degrees of freedom.
local_pdens : Optional[int]
Point density for local sampling. If zero (the default) only the equilibrium site fractions will be added.
verbose : Optional[bool]
"""
# Update the points dictionary with local samples around the equilibrium site fractions
for vtx in eq.vertex.squeeze():
ph = str(eq.Phase.squeeze()[vtx])
pts = points_dict.get(ph)
if pts is not None:
if verbose:
print(f'Adding points to {ph}. ', end='')
dof = dof_dict[ph]
eq_pts = eq.Y.squeeze()[vtx, :sum(dof)].reshape(1, -1)
if local_pdens > 0:
points_dict[ph] = np.concatenate([pts, local_sample(eq_pts, dof, pdens=local_pdens)], axis=0)
else:
points_dict[ph] = np.concatenate([pts, eq_pts], axis=0)
def simulate_scheil_solidification(dbf, comps, phases, composition,
start_temperature, step_temperature=1.0,
liquid_phase_name='LIQUID', eq_kwargs=None,
stop=0.0001, verbose=False, adaptive=True):
"""Perform a Scheil-Gulliver solidification simulation.
Parameters
----------
dbf : pycalphad.Database
Database object.
comps : list
List of components in the system.
phases : list
List of phases in the system.
composition : Dict[v.X, float]
Dictionary of independent `v.X` composition variables.
start_temperature : float
Starting temperature for simulation. Must be single phase liquid.
step_temperature : Optional[float]
Temperature step size. Defaults to 1.0.
liquid_phase_name : Optional[str]
Name of the phase treated as liquid (i.e. the phase with infinitely
fast diffusion). Defaults to 'LIQUID'.
eq_kwargs: Optional[Dict[str, Any]]
Keyword arguments for equilibrium
stop: Optional[float]
Stop when the phase fraction of liquid is below this amount.
adaptive: Optional[bool]
Whether to add additional points near the equilibrium points at each
step. Only takes effect if ``points`` is in the eq_kwargs dict.
Returns
-------
SolidificationResult
"""
eq_kwargs = eq_kwargs or dict()
STEP_SCALE_FACTOR = 1.2 # How much to try to adapt the temperature step by
MAXIMUM_STEP_SIZE_REDUCTION = 5.0
T_STEP_ORIG = step_temperature
phases = filter_phases(dbf, unpack_components(dbf, comps), phases)
ordering_records = create_ordering_records(dbf, comps, phases)
models = instantiate_models(dbf, comps, phases)
if verbose:
print('building PhaseRecord objects... ', end='')
phase_records = build_phase_records(dbf, comps, phases, [v.N, v.P, v.T], models)
if verbose:
print('done')
filtered_disordered_phases = {ord_rec.disordered_phase_name for ord_rec in ordering_records}
solid_phases = sorted((set(phases) | filtered_disordered_phases) - {liquid_phase_name})
temp = start_temperature
independent_comps = sorted([str(comp)[2:] for comp in composition.keys()])
x_liquid = {comp: [composition[v.X(comp)]] for comp in independent_comps}
fraction_solid = [0.0]
temperatures = [temp]
phase_amounts = {ph: [0.0] for ph in solid_phases}
if adaptive:
dof_dict = {phase_name: list(map(len, mod.constituents)) for phase_name, mod in models.items()}
eq_kwargs.setdefault('calc_opts', {})
# TODO: handle per-phase/unpackable points and pdens
if 'points' not in eq_kwargs['calc_opts']:
if verbose:
print('generating points... ', end='')
points_dict = {}
for phase_name, mod in models.items():
if verbose:
print(phase_name, end=' ')
pdens = eq_kwargs['calc_opts'].get('pdens', 50)
points_dict[phase_name] = _sample_phase_constitution(mod, point_sample, True, pdens=pdens)
eq_kwargs['calc_opts']['points'] = points_dict
if verbose:
print('done')
converged = False
phases_seen = {liquid_phase_name, ''}
liquid_comp = composition
while fraction_solid[-1] < 1:
conds = {v.T: temp, v.P: 101325.0, v.N: 1.0}
comp_conds = liquid_comp
fmt_comp_conds = ', '.join([f'{c}={val:0.2f}' for c, val in comp_conds.items()])
conds.update(comp_conds)
eq = equilibrium(dbf, comps, phases, conds, model=models, phase_records=phase_records, to_xarray=False, **eq_kwargs)
if adaptive:
_update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict, verbose=verbose)
eq = eq.get_dataset() # convert LightDataset to Dataset for fancy indexing
eq = rename_disordered_phases(eq, ordering_records)
eq_phases = eq.Phase.values.squeeze().tolist()
new_phases_seen = set(eq_phases).difference(phases_seen)
if len(new_phases_seen) > 0:
if verbose:
print(f'New phases seen: {new_phases_seen}. ', end='')
phases_seen |= new_phases_seen
if liquid_phase_name not in eq["Phase"].values.squeeze():
found_ph = set(eq_phases) - {''}
if verbose:
print(f'No liquid phase found at T={temp:0.3f}, {fmt_comp_conds}. (Found {found_ph}) ', end='')
if len(found_ph) == 0:
# No phases found in equilibrium. Just continue on lowering the temperature without changing anything
if verbose:
print(f'(Convergence failure) ', end='')
if T_STEP_ORIG / step_temperature > MAXIMUM_STEP_SIZE_REDUCTION:
# Only found solid phases and the step size has already been reduced. Stop running without converging.
if verbose:
print('Maximum step size reduction exceeded. Stopping.')
converged = False
break
else:
# Only found solid phases. Try reducing the step size to zero-in on the correct phases
if verbose:
print(f'Stepping back and reducing step size.')
temp += step_temperature
step_temperature /= STEP_SCALE_FACTOR
temp -= step_temperature
continue
# TODO: Will break if there is a liquid miscibility gap
liquid_vertex = sorted(np.nonzero(eq["Phase"].values.squeeze().flat == liquid_phase_name))[0]
liquid_comp = {}
for comp in independent_comps:
x = float(eq["X"].isel(vertex=liquid_vertex).squeeze().sel(component=comp).values)
x_liquid[comp].append(x)
liquid_comp[v.X(comp)] = x
np_liq = np.nansum(eq.where(eq["Phase"] == liquid_phase_name).NP.values)
current_fraction_solid = float(fraction_solid[-1])
found_phase_amounts = [(liquid_phase_name, np_liq)] # tuples of phase name, amount
for solid_phase in solid_phases:
if solid_phase not in eq_phases:
phase_amounts[solid_phase].append(0.0)
continue
np_tieline = np.nansum(eq.isel(vertex=eq_phases.index(solid_phase))["NP"].values.squeeze())
found_phase_amounts.append((solid_phase, np_tieline))
delta_fraction_solid = (1 - current_fraction_solid) * np_tieline
current_fraction_solid += delta_fraction_solid
phase_amounts[solid_phase].append(delta_fraction_solid)
fraction_solid.append(current_fraction_solid)
temperatures.append(temp)
NL = 1 - fraction_solid[-1]
if verbose:
phase_amnts = ' '.join([f'NP({ph})={amnt:0.3f}' for ph, amnt in found_phase_amounts])
if NL < 1.0e-3:
print(f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:.2E}, {phase_amnts} ', end='')
else:
print(f'T={temp:0.3f}, {fmt_comp_conds}, ΔT={step_temperature:0.3f}, NL: {NL:0.3f}, {phase_amnts} ', end='')
if NL < stop:
if verbose:
print(f'Liquid fraction below criterion {stop} . Stopping at {fmt_comp_conds}')
converged = True
break
if verbose:
print() # add line break
temp -= step_temperature
if fraction_solid[-1] < 1:
for comp in independent_comps:
x_liquid[comp].append(np.nan)
fraction_solid.append(1.0)
temperatures.append(temp)
# set the final phase amount to the phase fractions in the eutectic
# this method gives the sum total phase amounts of 1.0 by construction
for solid_phase in solid_phases:
if solid_phase in eq_phases:
amount = np.nansum(eq.isel(vertex=eq_phases.index(solid_phase))["NP"].values.squeeze())
phase_amounts[solid_phase].append(float(amount) * (1 - current_fraction_solid))
else:
phase_amounts[solid_phase].append(0.0)
return SolidificationResult(x_liquid, fraction_solid, temperatures, phase_amounts, converged, "scheil")
def simulate_equilibrium_solidification(dbf, comps, phases, composition,
start_temperature, step_temperature=1.0,
liquid_phase_name='LIQUID', adaptive=True, eq_kwargs=None,
binary_search_tol=0.1,
verbose=False):
"""
Compute the equilibrium solidification path.
Decreases temperature until no liquid is found, performing a binary search to get the soildus temperature.
dbf : pycalphad.Database
Database object.
comps : list
List of components in the system.
phases : list
List of phases in the system.
composition : Dict[v.X, float]
Dictionary of independent `v.X` composition variables.
start_temperature : float
Starting temperature for simulation. Should be single phase liquid.
step_temperature : Optional[float]
Temperature step size. Defaults to 1.0.
liquid_phase_name : Optional[str]
Name of the phase treated as liquid (i.e. the phase with infinitely
fast diffusion). Defaults to 'LIQUID'.
eq_kwargs: Optional[Dict[str, Any]]
Keyword arguments for equilibrium
binary_search_tol : float
Stop the binary search when the difference between temperatures is less than this amount.
adaptive: Optional[bool]
Whether to add additional points near the equilibrium points at each
step. Only takes effect if ``points`` is in the eq_kwargs dict.
"""
eq_kwargs = eq_kwargs or dict()
phases = filter_phases(dbf, unpack_components(dbf, comps), phases)
ordering_records = create_ordering_records(dbf, comps, phases)
filtered_disordered_phases = {ord_rec.disordered_phase_name for ord_rec in ordering_records}
solid_phases = sorted((set(phases) | filtered_disordered_phases) - {liquid_phase_name})
independent_comps = sorted([str(comp)[2:] for comp in composition.keys()])
models = instantiate_models(dbf, comps, phases)
if verbose:
print('building PhaseRecord objects... ', end='')
phase_records = build_phase_records(dbf, comps, phases, [v.N, v.P, v.T], models)
if verbose:
print('done')
conds = {v.P: 101325, v.N: 1.0}
conds.update(composition)
if adaptive:
dof_dict = {phase_name: list(map(len, mod.constituents)) for phase_name, mod in models.items()}
eq_kwargs.setdefault('calc_opts', {})
# TODO: handle per-phase/unpackable points and pdens
if 'points' not in eq_kwargs['calc_opts']:
# construct a points dict for the user
points_dict = {}
for phase_name, mod in models.items():
pdens = eq_kwargs['calc_opts'].get('pdens', 50)
points_dict[phase_name] = _sample_phase_constitution(mod, point_sample, True, pdens=pdens)
eq_kwargs['calc_opts']['points'] = points_dict
temperatures = []
x_liquid = {comp: [] for comp in independent_comps}
fraction_solid = []
phase_amounts = {ph: [] for ph in solid_phases} # instantaneous phase amounts
cum_phase_amounts = {ph: [] for ph in solid_phases}
converged = False
current_T = start_temperature
if verbose:
print('T=')
while (fraction_solid[-1] < 1 if len(fraction_solid) > 0 else True) and not converged:
sys.stdout.flush()
conds[v.T] = current_T
if verbose:
print(f'{current_T} ', end='')
eq = equilibrium(dbf, comps, phases, conds, model=models, phase_records=phase_records, to_xarray=False, **eq_kwargs)
if not is_converged(eq):
if verbose:
comp_conds = {cond: val for cond, val in conds.items() if isinstance(cond, v.X)}
print(f"Convergence failure at T={conds[v.T]} X={comp_conds} ")
if adaptive:
# Update the points dictionary with local samples around the equilibrium site fractions
_update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict)
if liquid_phase_name in eq.Phase:
# Add the liquid phase composition
# TODO: will break in a liquid miscibility gap
liquid_vertex = np.nonzero(eq.Phase == liquid_phase_name)[-1][0]
for comp in independent_comps:
x_liquid[comp].append(float(eq.X[..., liquid_vertex, eq.component.index(comp)]))
temperatures.append(current_T)
current_T -= step_temperature
else:
# binary search to find the solidus
T_high = current_T + step_temperature # High temperature, liquid
T_low = current_T # Low temperature, solids only
found_ph = set(eq.Phase[eq.Phase != ''].tolist())
if verbose:
print(f'Found phases {found_ph}. Starting binary search between T={(T_low, T_high)} ', end='')
while (T_high - T_low) > binary_search_tol:
bin_search_T = (T_high - T_low) * 0.5 + T_low
conds[v.T] = bin_search_T
eq = equilibrium(dbf, comps, phases, conds, model=models, phase_records=phase_records, to_xarray=False, **eq_kwargs)
if adaptive:
# Update the points dictionary with local samples around the equilibrium site fractions
_update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict)
if not is_converged(eq):
if verbose:
comp_conds = {cond: val for cond, val in conds.items() if isinstance(cond, v.X)}
print(f"Convergence failure at T={conds[v.T]} X={comp_conds} ")
if liquid_phase_name in eq.Phase:
T_high = bin_search_T
else:
T_low = bin_search_T
converged = True
conds[v.T] = T_low
temperatures.append(T_low)
eq = equilibrium(dbf, comps, phases, conds, model=models, phase_records=phase_records, to_xarray=False, **eq_kwargs)
if not is_converged(eq):
if verbose:
comp_conds = {cond: val for cond, val in conds.items() if isinstance(cond, v.X)}
print(f"Convergence failure at T={conds[v.T]} X={comp_conds} ")
if verbose:
found_phases = set(eq.Phase[eq.Phase != ''].tolist())
print(f"Finshed binary search at T={conds[v.T]} with phases={found_phases} and NP={eq.NP.squeeze()[:len(found_phases)]}")
if adaptive:
# Update the points dictionary with local samples around the equilibrium site fractions
_update_points(eq, eq_kwargs['calc_opts']['points'], dof_dict)
# Set the liquid phase composition to NaN
for comp in independent_comps:
x_liquid[comp].append(float(np.nan))
# Calculate fraction of solid and solid phase amounts
current_fraction_solid = 0.0
eq = rename_disordered_phases(eq.get_dataset(), ordering_records)
current_cum_phase_amnts = get_phase_amounts(eq.Phase.values.squeeze(), eq.NP.squeeze(), solid_phases)
for solid_phase, amount in current_cum_phase_amnts.items():
# Since the equilibrium calculations always give the "cumulative" phase amount,
# we need to take the difference to get the instantaneous.
cum_phase_amounts[solid_phase].append(amount)
if len(phase_amounts[solid_phase]) == 0:
phase_amounts[solid_phase].append(amount)
else:
phase_amounts[solid_phase].append(amount - cum_phase_amounts[solid_phase][-2])
current_fraction_solid += amount
fraction_solid.append(current_fraction_solid)
converged = True if np.isclose(fraction_solid[-1], 1.0) else False
return SolidificationResult(x_liquid, fraction_solid, temperatures, phase_amounts, converged, "equilibrium")
| UTF-8 | Python | false | false | 18,321 | py | 13 | simulate.py | 8 | 0.606092 | 0.600306 | 0 | 375 | 47.850667 | 137 |
ADebut/Leetcode | 11,201,274,722,322 | ab5bfe079e43ab1ad3193155b2e40067859bf51a | 792ae5d2a5c17af4f2ccfa582e3aeec569a6809a | /152. Maximum Product Subarray.py | 92c11b929ddaf29f1906129aaf0cb68135999799 | [] | no_license | https://github.com/ADebut/Leetcode | 396b8b95ad5b5e623db2839bbfdec861c4c1731f | 7333d481e00e8c1bc5b827d1d4ccd6e4d291abd7 | refs/heads/master | "2020-07-05T18:48:27.504540" | "2019-10-28T10:51:43" | "2019-10-28T10:51:43" | 202,735,925 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Solution:
def maxProduct(self, nums: List[int]) -> int:
MAX = nums[0]
dp_min = [0 for x in range(len(nums))]
dp_max = [0 for x in range(len(nums))]
dp_min[0] = dp_max[0] = nums[0]
for i in range(1, len(nums)):
dp_max[i] = max([dp_max[i - 1] * nums[i], dp_min[i - 1] * nums[i], nums[i]])
dp_min[i] = min([dp_max[i - 1] * nums[i], dp_min[i - 1] * nums[i], nums[i]])
MAX = max(MAX, dp_max[i])
return MAX | UTF-8 | Python | false | false | 494 | py | 76 | 152. Maximum Product Subarray.py | 75 | 0.477733 | 0.455466 | 0 | 11 | 44 | 88 |
annahs/atmos_research | 10,368,051,092,408 | a114324ab9b8a364f48d0c174257652b44f7339b | bff706dc4ce7552200e1d515afa9f0c461eb18a5 | /WHI_CO_vs_alt.py | c8fa617483a4e0c8a83293de520f046a08fd05c4 | [
"MIT"
] | permissive | https://github.com/annahs/atmos_research | 1fffc24d43ddacf460a6d16f0b5264e6692023c5 | b5853c9b12e327492f8f8ba5069bca3fd2e981c8 | refs/heads/master | "2020-12-25T16:59:30.261245" | "2016-08-10T20:47:59" | "2016-08-10T20:47:59" | 32,475,832 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
from datetime import timedelta
import calendar
import mysql.connector
from pyhdf.SD import SD, SDC, SDS
year_to_plot = 2009
#fire times
timezone = timedelta(hours = -8)
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST
fire_time1_UNIX_UTC_start = float(calendar.timegm((fire_time1[0]-timezone).utctimetuple()))
fire_time1_UNIX_UTC_end = float(calendar.timegm((fire_time1[1]-timezone).utctimetuple()))
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST
fire_time2_UNIX_UTC_start = float(calendar.timegm((fire_time2[0]-timezone).utctimetuple()))
fire_time2_UNIX_UTC_end = float(calendar.timegm((fire_time2[1]-timezone).utctimetuple()))
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#set up cluster dictionaries
default_data = {}
other_data = {}
GC_LRT = dict.fromkeys(range(1,47))
GC_NPac = dict.fromkeys(range(1,47))
GC_SPac = dict.fromkeys(range(1,47))
GC_Cont = dict.fromkeys(range(1,47))
GC_all = dict.fromkeys(range(1,47))
for key in GC_LRT:
GC_LRT [key] =[]
GC_NPac[key] =[]
GC_SPac[key] =[]
GC_Cont[key] =[]
GC_all[key] =[]
default_data['NPac'] = GC_NPac
default_data['SPac'] = GC_SPac
default_data['Cont'] = GC_Cont
default_data['LRT'] = GC_LRT
default_data['all'] = GC_all
lat = 20 #20 corresponds to 50deg
lon = 7 #7 corresponds to -122.5deg
molar_mass_BC = 12.0107 #in g/mol
ng_per_g = 10**9
R = 8.3144621 # in m3*Pa/(K*mol)
GEOS_Chem_factor = 10**-9
start_hour = 4
end_hour = 16
data_dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/Junwei_runs/default/'
os.chdir(data_dir)
for file in os.listdir(data_dir):
if file.endswith('.hdf'):
file_year = int(file[2:6])
file_month = int(file[6:8])
file_day = int(file[8:10])
file_hour = int(file[11:13])
GC_datetime = datetime(file_year,file_month,file_day,file_hour)
GC_UNIX_UTC_ts = calendar.timegm(GC_datetime.utctimetuple())
if file_year not in [year_to_plot]:
continue
if start_hour <= file_hour < end_hour: #ignore any times not in the 2000-0800 window
#avoid fire times
if (fire_time1_UNIX_UTC_start <= GC_UNIX_UTC_ts <= fire_time1_UNIX_UTC_end) or (fire_time2_UNIX_UTC_start <= GC_UNIX_UTC_ts <= fire_time2_UNIX_UTC_end):
continue
#avoid high RH times
cursor.execute(('SELECT RH from whi_high_rh_times_2009to2012 where high_RH_start_time <= %s and high_RH_end_time > %s'),(GC_UNIX_UTC_ts,GC_UNIX_UTC_ts))
RH_data = cursor.fetchone()
if RH_data != None:
if RH_data[0] > 90:
continue
#get appropriate cluster
cursor.execute(('SELECT cluster_number FROM whi_ft_cluster_times_2009to2012 where cluster_start_time <= %s and cluster_end_time > %s'),(GC_UNIX_UTC_ts,GC_UNIX_UTC_ts))
cluster_number_result = cursor.fetchone()
if cluster_number_result == None:
continue
else:
cluster_number = cluster_number_result[0]
if cluster_number in [1,3,5,10]:
cluster = 'NPac'
if cluster_number in [6,8,9]:
cluster = 'SPac'
if cluster_number in [4]:
cluster = 'Cont'
if cluster_number in [2,7]:
cluster = 'LRT'
hdf_file = SD(file, SDC.READ)
GC_CO = hdf_file.select('IJ-AVG-$::CO') #3d CO data in ppbv (molBC/molAIR)
pressures = hdf_file.select('PEDGE-$::PSURF')
hydrophilic_BC = hdf_file.select('IJ-AVG-$::BCPI') #3d conc data in ppbv (molBC/molAIR)
hydrophobic_BC = hdf_file.select('IJ-AVG-$::BCPO')
i=0
for level in range(1,47):
pressure = pressures[level,lat,lon]
total_BC_ppbv = hydrophilic_BC[level,lat,lon] + hydrophobic_BC[level,lat,lon]
BC_conc_ngm3 = total_BC_ppbv*molar_mass_BC*ng_per_g*GEOS_Chem_factor*(101325/(R*273)) #101325/(R*273) corrects to STP
CO_ppbv = GC_CO[level,lat,lon]
BC_CO = BC_conc_ngm3/CO_ppbv
default_data[cluster][level].append([pressure,CO_ppbv,GC_datetime])
default_data['all'][level].append([pressure,CO_ppbv,GC_datetime])
hdf_file.end()
mean_dict = {'all':[],'NPac':[],'SPac':[],'Cont':[],'LRT':[]}
for cluster in default_data:
for level in default_data[cluster]:
FT_intervals = {}
for row in default_data[cluster][level]:
GC_datetime = row[2]
pressure = row[0]
CO = row[1]
if start_hour <= GC_datetime.hour < (start_hour+6):
period_midtime = datetime(GC_datetime.year,GC_datetime.month,GC_datetime.day,7)
if (start_hour+6) <= GC_datetime.hour < end_hour:
period_midtime = datetime(GC_datetime.year,GC_datetime.month,GC_datetime.day,13)
if period_midtime in FT_intervals:
FT_intervals[period_midtime].append([pressure,CO])
else:
FT_intervals[period_midtime] = [[pressure,CO]]
temp = []
#get 6-hr means - analogous to measurements
for period_midtime in FT_intervals:
mean_conc = np.mean([row[1] for row in FT_intervals[period_midtime]])
mean_pressure = np.mean([row[0] for row in FT_intervals[period_midtime]])
temp.append([mean_pressure,mean_conc])
#get medians for each level
average_p = np.mean([row[0] for row in temp])
med_CO = np.median([row[1] for row in temp])
min_err_CO = med_CO - np.percentile([row[1] for row in temp],25)
max_err_CO = np.percentile([row[1] for row in temp],75) - med_CO
mean_dict[cluster].append([average_p,med_CO,min_err_CO,max_err_CO])
cursor.execute(('''SELECT measCO.UNIX_UTC_start_time, mc.cluster_number, measCO.CO_ppbv
FROM whi_gc_and_sp2_6h_mass_concs mc
JOIN whi_co_data measCO on mc.CO_meas_id = measCO.id
WHERE mc.RH_threshold = 90 and measCO.CO_ppbv < 250''')
)
data = cursor.fetchall()
CO_by_cluster = {
'all':[],
'NPac':[],
'SPac':[],
'Cont':[],
'LRT':[]
}
for row in data:
CO_start_time = datetime.utcfromtimestamp(row[0])
cluster_number = row[1]
CO_conc = row[2]
if CO_start_time.year == year_to_plot:
CO_by_cluster['all'].append(CO_conc)
if cluster_number in [6,8,9]:
CO_by_cluster['SPac'].append(CO_conc)
if cluster_number in [4]:
CO_by_cluster['Cont'].append(CO_conc)
if cluster_number in [2,7]:
CO_by_cluster['LRT'].append(CO_conc)
if cluster_number in [1,3,5,10]:
CO_by_cluster['NPac'].append(CO_conc)
cnx.close()
#### plotting
cluster_list = ['all','NPac','SPac','Cont','LRT']
colors = ['k','c','g','m','b']
fig = plt.figure()
ax1 = plt.subplot2grid((1,1), (0,0), colspan=1)
default_concs = [row[1] for row in mean_dict['all']]
default_concs_min_err = [row[2] for row in mean_dict['all']]
default_concs_max_err = [row[3] for row in mean_dict['all']]
default_pressures = [row[0] for row in mean_dict['all']]
#ax1.errorbar(108, 781.5,xerr=[[13],[28]],fmt='o',color = colors[i])
#ax1.errorbar(105.6, 781.5,xerr=[[11.7],[12.6]],fmt='o',color = 'b') #2009,2010 (june-aug)
#ax1.errorbar(106.4, 781.5,xerr=[[7],[35]],fmt='o',color = 'b') #2009 (june-aug)
ax1.errorbar(103, 781.5,xerr=[[13],[13]],fmt='o',color = 'r') #2010 (june-aug)
#ax1.errorbar(141.4, 781.5,xerr=[[10],[7]],fmt='o',color = colors[i]) #2012 (apr-may)
ax1.errorbar(default_concs, default_pressures,xerr=[default_concs_min_err,default_concs_max_err],fmt='s',color = 'b',linestyle='-')
ax1.invert_yaxis()
ax1.axhspan(770,793, facecolor='grey', alpha=0.25) #95% CI for pressure at WHI
ax1.set_ylim(910,510)
ax1.set_xlim(80,240)
#ax1.text(0.25, 0.9,'All Data', transform=ax1.transAxes)
ax1.set_xlabel('CO ppbv')
ax1.set_ylabel('Pressure (hPa)')
plt.show()
fig, axes = plt.subplots(3,2, figsize=(8, 10), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = 0., wspace=0.0)
axs = axes.ravel()
axes[-1, -1].axis('off')
i=0
for cluster in cluster_list:
print cluster
default_concs = [row[1] for row in mean_dict[cluster]]
default_concs_min_err = [row[2] for row in mean_dict[cluster]]
default_concs_max_err = [row[3] for row in mean_dict[cluster]]
default_pressures = [row[0] for row in mean_dict[cluster]]
meas_CO = [np.mean(CO_by_cluster[cluster])]
meas_CO_min = [meas_CO - np.percentile(CO_by_cluster[cluster],25)]
meas_CO_max = [np.percentile(CO_by_cluster[cluster],75) - meas_CO]
vert_plot_GC = axs[i].errorbar(default_concs, default_pressures,xerr=[default_concs_min_err,default_concs_max_err],fmt='.',color = colors[i],linestyle='-')
vert_plot_meas = axs[i].errorbar(meas_CO, 781.5,xerr=[meas_CO_min,meas_CO_max],fmt='o',color = colors[i])
axs[i].invert_yaxis()
axs[i].axhspan(770,793, facecolor='grey', alpha=0.25) #95% CI for pressure at WHI
axs[i].set_ylim(910,510)
axs[i].set_xlim(80,240)
if i == 0:
axs[i].text(0.25, 0.9,'All Data', transform=axs[i].transAxes)
if i == 1:
axs[i].text(0.25, 0.9,'N. Pacific', transform=axs[i].transAxes)
if i == 2:
axs[i].text(0.25, 0.9,'S. Pacific', transform=axs[i].transAxes)
axs[i].set_ylabel('Pressure (hPa)')
if i == 3:
axs[i].text(0.25, 0.9,'N. Canada', transform=axs[i].transAxes)
axs[i].set_xlabel('CO ppbv')
if i == 4:
axs[i].text(0.25, 0.9,'W. Pacific/Asia', transform=axs[i].transAxes)
axs[i].set_xlabel('CO ppbv')
if i in [1,3]:
axs[i].yaxis.set_label_position('right')
axs[i].yaxis.tick_right()
if i in [0,1,2]:
axs[i].set_xticklabels([])
i+=1
data_dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/CO data/'
os.chdir(data_dir)
plt.savefig('GCv10 vertical profile - CO ' + str(year_to_plot) + '.png',bbox_inches='tight')
plt.show()
| UTF-8 | Python | false | false | 9,762 | py | 222 | WHI_CO_vs_alt.py | 222 | 0.658574 | 0.610121 | 0 | 301 | 31.418605 | 207 |
abraker95/ultimate_osu_analyzer | 18,451,179,518,584 | 314cb333f7043433d9937335e701a5d65fa15c36 | 05167f142a1c05fc18612da9d7628ce0ba396ec3 | /osu/local/monitor.py | 7c916a9557a62dd718e0252bc41dd91df8cb529e | [
"MIT"
] | permissive | https://github.com/abraker95/ultimate_osu_analyzer | 73494c6507cc97d4cf627c16184cd87be632579b | 8b211a01c2364d51b8bf08e045e9280ec3a04242 | refs/heads/master | "2022-07-01T20:03:36.343409" | "2022-02-24T06:16:11" | "2022-02-24T06:16:11" | 172,823,599 | 30 | 4 | MIT | false | "2022-06-16T23:44:40" | "2019-02-27T01:58:13" | "2022-04-25T08:53:15" | "2022-06-16T23:44:37" | 9,796 | 24 | 3 | 19 | Python | false | false | import watchdog.observers
import watchdog.events
import os
class Monitor(watchdog.observers.Observer):
def __init__(self, osu_path):
watchdog.observers.Observer.__init__(self)
if not os.path.exists(osu_path):
raise Exception(f'"{osu_path}" does not exist!')
self.osu_path = osu_path
self.monitors = {}
self.start()
def __del__(self):
self.stop()
def create_replay_monitor(self, name, callback):
replay_path = f'{self.osu_path}/data/r'
if not os.path.exists(replay_path):
raise Exception(f'"{replay_path}" does not exist!')
class EventHandler(watchdog.events.FileSystemEventHandler):
def on_created(self, event): callback(event.src_path)
print(f'Created file creation monitor for {self.osu_path}/data/r')
self.monitors[name] = self.schedule(EventHandler(), replay_path, recursive=False)
def create_map_montor(self, name, callback, beatmap_path):
# TODO
pass | UTF-8 | Python | false | false | 1,029 | py | 160 | monitor.py | 139 | 0.626822 | 0.626822 | 0 | 38 | 26.105263 | 89 |
ricardoarisequeira/AutomaticDocumentSummarization | 12,189,117,226,486 | f1e4212f637b40f6595e677971f5f80a088b3b71 | 1bd3d4e3c037ad35e6a780105946f12d1b1d7d6a | /project_part1/exercise-4.py | 86acff04ef8886252da02335a7097f12a0521b78 | [] | no_license | https://github.com/ricardoarisequeira/AutomaticDocumentSummarization | 351704dd62b3484343d780eb3359634ecefcbe52 | ec057f1a516bd7c75e164995bfe98ad5fb1f003a | refs/heads/master | "2020-09-21T00:59:52.675132" | "2019-11-28T11:17:08" | "2019-11-28T11:17:08" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from nltk import sent_tokenize
from os import listdir
from customvectorizer import CustomVectorizer
from customvectorizer import similarity
import re
# gets a list of portugueses stopwords from file
stopwords = open('stopwords.txt').read().splitlines()
# function to pre process sentences
def preprocessor(document):
document = re.sub(
r'([a-zA-ZáàâãéêíóõôúçÁÀÂÃÉÊÍÓÕÔÚÇ, ])\n', r'\1.\n', document)
document = re.sub(r'[0-9"-,()ºª;$€&]+', '', document)
document = document.replace('\n', ' ')
return document
def summary(vectorizer, file):
# gets file sentences
doc = open(file, 'r', encoding='iso-8859-1').read()
sentences = sent_tokenize(preprocessor(doc))
# calculate tfidf vector for document sentences and all document
vectors = vectorizer.transform_tfidf(sentences)
docVector = vectorizer.transform_tfidf([doc])
# calculate similarity for each sentence
sim = []
for vector in vectors:
sim.append(similarity(vector, docVector[0]))
# calculate MMR value for each sentence
selected = []
var = 0.05
while len(selected) < 5:
mmr = []
for s in range(len(sim)):
mmr_value = (1 - var) * sim[s]
for sentence in selected:
mmr_value -= var * similarity(vectors[s], vectors[sentence])
mmr.append(mmr_value)
indexOfMax = max(enumerate(mmr), key=lambda x: x[1])[0]
selected.append(indexOfMax)
sim[indexOfMax] = 0
# returns the list of selected sentences
res = []
for i in selected:
res.append(sentences[i])
return res
def calculateStats(file, summary):
# gets file sentences
doc = open(file, 'r', encoding='iso-8859-1').read()
sentences = sent_tokenize(preprocessor(doc))[1:6]
# calculates number of true positives
true_positives = 0
for sentence in sentences:
for s in summary:
if sentence == s:
true_positives += 1
# calculates precision, recall and F1 score
precision = true_positives / len(summary)
recall = true_positives / 5
if true_positives == 0:
F1_score = format(0, '.6f')
else:
F1_score = format(2 * (precision * recall) /
(precision + recall), '.6f')
return [format(precision, '.6f'), format(recall, '.6f'), F1_score]
def main():
# initialize custom vectorizer with all documents collection
vectorizer = CustomVectorizer(
input='fromfiles', stopwords=stopwords, encoding='iso-8859-1')
documents = ['textos-fonte/' + d for d in listdir('textos-fonte')]
vectorizer.fit(documents)
vectorizer._input = 'content'
# print all statistics
MAP = 0
print('File\t\t\tPrecision\tRecall\t\tF1 Score')
for doc in listdir('textos-fonte'):
path = 'textos-fonte/' + doc
stats = calculateStats(path, summary(vectorizer, path))
MAP += float(stats[0])
print (doc + '\t\t' + stats[0] + '\t' + stats[1] + '\t' + stats[2])
print('\nMAP Score: ' + str(MAP / 100))
main()
| UTF-8 | Python | false | false | 3,107 | py | 14 | exercise-4.py | 10 | 0.62228 | 0.605391 | 0 | 102 | 29.186275 | 76 |
alema9/Fashion-Store | 111,669,160,248 | 294dd230c33578c89e3be39656e80f87662d3516 | 99ff850308eee8fa7fc3a9642cc908c85b1588b2 | /ecommerce/errors/handlers.py | 7875e56e567dd83eaf6e1588f56b5eb40cda2b49 | [] | no_license | https://github.com/alema9/Fashion-Store | a93340f82d0d966fc0cfc3f14bd573a4a3b3ebf3 | 0ea56586a0e4c83011dbd683e0ffad8540616fdd | refs/heads/master | "2023-08-29T00:42:26.305552" | "2020-02-13T16:14:51" | "2020-02-13T16:14:51" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Blueprint, render_template
errors = Blueprint('errors', __name__)
@errors.app_errorhandler(404)
def error_404(error):
return render_template('errors/404.html'), 404
#in flask we can return a static code which has default value 200 so we did not need to do with other routes
@errors.app_errorhandler(403)
def error_404(error):
return render_template('errors/403.html'), 403
@errors.app_errorhandler(500)
def error_404(error):
return render_template('errors/500.html'), 500
| UTF-8 | Python | false | false | 514 | py | 17 | handlers.py | 6 | 0.731518 | 0.655642 | 0 | 16 | 30.125 | 109 |
ibrahim713/MyUmassFiles | 3,341,484,586,866 | 31a23b49ba97ffb1a340518e11350b4750189325 | 865d450ed619968d07dbc40551f5a9e911e4a278 | /it110/it116/midterm.py | 10ce4925ae252d524bdae493f1c15156a879224c | [] | no_license | https://github.com/ibrahim713/MyUmassFiles | 5b9fc63585f3852f495c236185ad767d1938cc81 | e288215277d86bad3a7d27e5d22c0abe7d5fb176 | refs/heads/master | "2021-01-25T08:13:33.943934" | "2017-07-18T17:00:00" | "2017-07-18T17:00:00" | 93,752,137 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
n=int(sys.argv[1])
ruler=''
for i in range(1,n+1):
ruler=ruler +str(i)+ruler
print(ruler)
| UTF-8 | Python | false | false | 105 | py | 58 | midterm.py | 49 | 0.657143 | 0.628571 | 0 | 6 | 16.5 | 29 |
coolsnake/JupyterNotebook | 12,764,642,842,449 | edc9151e1713381918aed6b96af6003dab6dbef5 | da29f1f5b4459fbfec968bb694bedb9586f87b14 | /new_algs/Sequence+algorithms/Selection+algorithm/AgentFeatureSelection.py | b2091f1fc04a3861bfa896f23796d2c297035bdf | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | https://github.com/coolsnake/JupyterNotebook | 547806a45a663f090f313dc3e70f779ad9b213c0 | 20d8df6172906337f81583dabb841d66b8f31857 | refs/heads/master | "2023-01-13T18:55:38.615312" | "2020-11-17T22:55:12" | "2020-11-17T22:55:12" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import PhishingDetector as PD
import SpamDetector as SD
import numpy as np
import random
import datetime
feature_size_phishing = 30
feature_size_spam = 141
model_phishing = 1
model_spam = 0
classifier_neural_network = 1
classifier_svm = 0
class Agent():
# Init an Agent
def __init__(self, features_size):
"""
Start a random list of 0 and 1 of len = features_size
e.g. if features_size = 5 the chromosome = [1, 1, 1, 0, 1]
features_size represent the amount of features
"""
self.chromosome = []
for x in range(features_size):
self.chromosome.append(random.randint(0,1))
self.chromosome = np.array(self.chromosome)
self.fitness = -1
def __str__(self):
return "Chromosome: " + str(self.chromosome) + ", with fitness " + str(self.fitness)
population = 20
generations = 100
selection_size = int(0.3 * population)
def ga(model = model_phishing, classifier = classifier_svm, features_size = feature_size_phishing):
agents = Agent.init_agents(Agent.population, features_size)
# the agent with the best fitness
best_agent = Agent(features_size)
# The generation the best agent was created
generation_best_agent = -1
for generation in range(Agent.generations):
print("Generation: "+str(generation))
agents = Agent.fitness(agents, model, classifier)
agents = Agent.selection(agents, features_size)
# check if the best new agent is better than the best_agent
if agents[0].fitness > best_agent.fitness:
# a new agent created have better fitness
best_agent.chromosome = agents[0].chromosome
best_agent.fitness = agents[0].fitness
generation_best_agent = generation
agents = Agent.crossover(agents, features_size)
agents = Agent.mutation(agents, features_size)
print('----------------------------------------Best Agent So Far in '+ str(generation_best_agent)+'----------------------------------')
print(best_agent)
print('----------------------------------------Best Agent So Far in '+ str(generation_best_agent)+'----------------------------------')
if any(agent.fitness >= 0.9 for agent in agents):
print("Found an agent")
print('\n'.join(map(str, agents)))
#get the best agent with minimum value of 0.9
best_agent = max(agents, key = lambda agent: agent.fitness)
Agent.print_best_agent(best_agent, generation_best_agent, model, classifier)
#break
exit(0)
# get the best agent at the end of the generation
Agent.print_best_agent(best_agent, generation_best_agent, model, classifier)
# This function creates initial population using the Agent class, the return is a list
# size population and each agent in the population must be size features_size
def init_agents(population, features_size):
return [Agent(features_size) for _ in range(population)]
# This function will calculate the fitness in each memeber of the population
def fitness(agents, model, classifier):
print("---------------------------------fitness-------------------------------")
if model is model_phishing and classifier is classifier_svm:
# Generate a phishing_detector for each agent with SVM
for agent in agents:
if agent.fitness is -1:
pd = PD.phishing_detector(agent.chromosome)
agent.fitness = float(pd.test_features_svm())
#agent.fitness = random.random()
print(agent)
elif model is model_phishing and classifier is classifier_neural_network:
# Generate a phishing_detector for each agent with ANN
for agent in agents:
pd = PD.phishing_detector(agent.chromosome)
agent.fitness = float(pd.test_features_neural_network())
print(agent)
elif model is model_spam and classifier is classifier_svm:
# Generate a spam detector for each agent with SVM
for agent in agents:
if agent.fitness is -1:
sd = SD.spam_detector(agent.chromosome)
agent.fitness = float(sd.test_features_svm())
print(agent)
elif model is model_spam and classifier is classifier_neural_network:
# Generate a spam detector for each agent with ANN
for agent in agents:
sd = SD.spam_detector(agent.chromosome)
agent.fitness = float(sd.test_features_neural_network())
print(agent)
return agents
# The selection will select the population to be go for the next generation,
# the population will be decide by the highest fitness function higher the
# probability to be selected
def selection(agents, features_size):
print("---------------------------------selection-------------------------------")
agents = sorted(agents, key = lambda agent: agent.fitness, reverse = True)
agents = agents[:Agent.selection_size]
print('\n'.join(map(str, agents)))
return agents
# The crossover will combine the agents that were selected in the selection function
def crossover(agents, features_size):
print("---------------------------------crossover-------------------------------")
# Method 1: Add new population and keep part of the old population
new_blood = []
for _ in range(int((Agent.population - len(agents))/ 2)):
parent1 = random.choice(agents)
parent2 = random.choice(agents)
child1 = Agent(features_size)
child2 = Agent(features_size)
split_point = random.randint(0, features_size)
child1.chromosome = np.concatenate((parent1.chromosome[0:split_point], parent2.chromosome[split_point:features_size]))
child2.chromosome = np.concatenate((parent2.chromosome[0:split_point], parent1.chromosome[split_point:features_size]))
new_blood.append(child1)
new_blood.append(child2)
agents.extend(new_blood)
return agents
"""
# Another method, create a totally new population
new_blood = []
for _ in range(int((Agent.population) / 2)):
parent1 = random.choice(agents)
parent2 = random.choice(agents)
child1 = Agent(features_size)
child2 = Agent(features_size)
split_point = random.randint(0, features_size)
child1.chromosome = np.concatenate((parent1.chromosome[0:split_point], parent2.chromosome[split_point:features_size]))
child2.chromosome = np.concatenate((parent2.chromosome[0:split_point], parent1.chromosome[split_point:features_size]))
new_blood.append(child1)
new_blood.append(child2)
# keep the best agent
new_blood[0].chromosome = agents[0].chromosome
new_blood[0].fitness = agents[0].fitness
return new_blood
"""
# The mutation will do random modification of the agents
def mutation(agents, features_size):
print("---------------------------------mutation-------------------------------")
for agent in agents:
for idx, param in enumerate(agent.chromosome):
if agent.fitness is -1 and random.uniform(0.0, 1.0) <= 0.05:
if agent.chromosome[idx] == 1:
new_value = np.array([0])
else:
new_value = np.array([1])
agent.chromosome = np.concatenate((agent.chromosome[0:idx], new_value, agent.chromosome[idx+1:features_size]))
return agents
def print_best_agent(agent, generation, model, classifier):
# this function will print the value of the agent in a file and appendint at the end
# get the file name
file_name = '_error_'
if model is model_phishing and classifier is classifier_svm:
# file for phishing_detector for each agent with SVM
file_name = 'result\\phishing_detector_svm.txt'
elif model is model_phishing and classifier is classifier_neural_network:
# file for phishing_detector for each agent with ANN
file_name = 'result\\phishing_detector_ann.txt'
elif model is model_spam and classifier is classifier_svm:
# file for spam detector for each agent with SVM
file_name = 'result\\spam_detector_svn.txt'
elif model is model_spam and classifier is classifier_neural_network:
# file for spam detector for each agent with ANN
file_name = 'result\\spam_detector_ann.txt'
# open file
f = open(file_name, 'a')
# get the current time
now = datetime.datetime.now()
# get the current time and date
current_time_date = now.isoformat()
f.write('By '+ current_time_date +' in the generation '+ str(generation) +' The best agent was: '+ str(agent)+'\n')
f.close()
if __name__ == '__main__':
"""
agent1 = Agent(30)
agent2 = Agent(30)
agent3 = Agent(30)
agent4 = Agent(30)
agent5 = Agent(30)
agent6 = Agent(30)
agent1.chromosome = np.array([1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 0., 1., 0., 1., 1., 0., 1., 1., 1., 1., 0., 1., 1., 1., 0.])
agent2.chromosome = np.array([1., 1., 1., 1., 1., 0., 1., 1., 0., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 1., 0., 1.])
agent3.chromosome = np.array([1., 1., 0., 0., 1., 0., 1., 1., 1., 0., 0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 1., 0., 1., 0., 0., 1., 1., 0., 1., 0.])
agent4.chromosome = np.array([1., 1., 1., 1., 1., 0., 1., 0., 1., 0., 1., 0., 0., 1., 1., 0., 0., 1., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1.])
agent5.chromosome = np.array([1., 1., 0., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 1., 0., 1., 0., 1., 0., 0., 0., 1., 1., 0., 0.])
agent6.chromosome = np.array([0., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1., 0., 0., 0., 1., 1., 1., 1., 0., 0., 0., 0., 1., 1., 1., 0., 1.])
agents = [agent1, agent2, agent3, agent4, agent5, agent6]
Agent.fitness(agents, model_phishing, classifier_neural_network)
print('-----------------------------Fitness-------------------------------')
print('\n'.join(map(str, agents)))
Agent.selection(agents)
print('-----------------------------Selection-------------------------------')
print('\n'.join(map(str, agents)))
Agent.crossover(agents)
print('-----------------------------Crossover-------------------------------')
print('\n'.join(map(str, agents)))
Agent.mutation(agents)
print('-----------------------------Mutation-------------------------------')
print('\n'.join(map(str, agents)))
"""
# Phishing and Neural Network
#Agent.ga(model_phishing, classifier_neural_network, feature_size_phishing)
# Phishing and SVM
# Agent.ga(model_phishing, classifier_svm, feature_size_phishing)
# Spam and SVM
Agent.ga(model_spam, classifier_svm, feature_size_spam)
# Spam and Neural Networks
#Agent.ga(model_spam, classifier_neural_network, feature_size_spam)
## Run the code for the selection
| UTF-8 | Python | false | false | 10,243 | py | 1,523 | AgentFeatureSelection.py | 1,515 | 0.632432 | 0.603339 | 0 | 291 | 34.182131 | 151 |
jvmkit/downloader-ui | 11,510,512,391,702 | 18526a4f1786f5761b551e0c5c94abbc72b4dab7 | 681a1a3df34cd2c5dd6d2ea87f29c12b8193c01c | /src/resource.py | 81a68d357dc9a5a5a0df57c361465d582a061cd0 | [] | no_license | https://github.com/jvmkit/downloader-ui | b278d9d5b12a8cc3dbd29163aca5eb1ce4592b48 | c52d24756393403acc26eef4c36498e1c8ed7efe | refs/heads/master | "2023-05-01T14:31:59.739129" | "2021-02-28T02:26:09" | "2021-02-28T02:26:09" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import json
import psutil
class Resource:
def list_downloaded(self):
download_path = os.path.dirname(os.path.abspath(__file__)) + '/dl'
def get_file_list(path:str):
ret_list = []
item_list = os.listdir(path)
for item in item_list:
my_path = path + '/' + item
if os.path.isdir(my_path) == True:
sub_file_list = get_file_list(my_path)
ret_list.append({
'type': 'directory',
'name': item,
'files': sub_file_list
})
else:
ret_list.append({
'type': 'file',
'name': item
})
return ret_list
file_list = get_file_list(download_path)
system_info = self.query_system_info()
ret_obj = {
'file_list' : file_list,
'system_info' : system_info
}
ret_str = json.dumps(ret_obj)
return ret_str
def query_system_info(self):
ret_obj = {}
cur_path = os.path.dirname(os.path.abspath(__file__)) + '/dl'
disk_usage_raw = psutil.disk_usage(cur_path)
ret_obj = {
'disk_usage' : disk_usage_raw.percent
}
return ret_obj
def delete_file(self,path: str):
file_path = os.path.dirname(os.path.abspath(__file__)) + '/dl/' + path
if not os.path.exists(file_path):
print("ignored")
return "IGNORED"
os.remove(file_path)
print("deleted")
return "DONE"
resource = Resource()
if __name__ == '__main__':
print(resource.list_downloaded()) | UTF-8 | Python | false | false | 1,766 | py | 19 | resource.py | 14 | 0.467724 | 0.467724 | 0 | 61 | 27.967213 | 78 |
zzy-program/00_script | 11,656,541,243,686 | e63faf3b56d7b2965ea7c325cf7aa8e3d52d7f5c | 9a8a72efcd39525eff48d77c895b5276440d383c | /python/kernel_sym/sort.py | 20e0eea5fcaef0e0de16f6d19ebbd82edbcb3530 | [] | no_license | https://github.com/zzy-program/00_script | 0d380052fca30ae6b8c5d73d07b4091bf5e28940 | 90870c0dd83c812c1b462202896240fb587c6c74 | refs/heads/master | "2020-03-26T18:54:51.302048" | "2018-08-18T17:12:50" | "2018-08-18T17:12:50" | 145,238,368 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/python
f = open('vmlinux_fun_filename.txt', 'r')
i = 0
my_dict = {}
for line in f:
line = line.strip('\n')
if i % 2 == 0:
fun_name = line
else:
dir_name = line
my_dict[fun_name] = dir_name
i = i+1
f.closed
sorted(my_dict.keys())
for k, v in my_dict.items():
print k, v
| UTF-8 | Python | false | false | 296 | py | 16 | sort.py | 7 | 0.584459 | 0.570946 | 0 | 20 | 13.75 | 41 |
javierramon23/Python-Crash-Course | 12,850,542,191,580 | 985529be32a75082f241cc150e04d4a53cafb2df | 24e53f24cbc457cb62555de1f319d279001d8539 | /8.- Functions/cities.py | ae061edd2da40199fcc7dfae868df111fdedd8dc | [] | no_license | https://github.com/javierramon23/Python-Crash-Course | c93d4726377ffa96310a0b10490c7f308fb0a7aa | 07fe05f149437d9fdfa9de9dbb1633835a5b5f92 | refs/heads/master | "2021-06-19T15:43:00.232933" | "2019-11-21T20:04:33" | "2019-11-21T20:04:33" | 196,716,791 | 0 | 0 | null | false | "2021-06-10T21:51:44" | "2019-07-13T12:02:41" | "2019-11-21T20:04:47" | "2021-06-10T21:51:42" | 22,168 | 0 | 0 | 3 | Python | false | false | def describe_city(city, country = 'España'):
print('{} es una ciudad de {}.'.format(city, country))
describe_city('Teruel')
describe_city('Valencia')
describe_city('New York', country = 'EEUU') | UTF-8 | Python | false | false | 199 | py | 131 | cities.py | 126 | 0.681818 | 0.681818 | 0 | 6 | 32.166667 | 58 |
Ykuee/eclipse-workSpace | 14,078,902,800,865 | ad59d1006eb5223d41dd52cc00ab35b848f11b9e | 54496feb7dbb5315c3649813c468d2eaeab494e2 | /pecan_test/paste/master_valve.py | 82c90a82f161f58a3128934b2c751943219b36e1 | [] | no_license | https://github.com/Ykuee/eclipse-workSpace | 17833c7abd81ed820c85e2e874ef70273be2e9d3 | 118cfe74380b94a68dd4ae0b4829b0989e625587 | refs/heads/master | "2021-06-06T09:54:17.404097" | "2017-06-16T01:35:23" | "2017-06-16T01:35:23" | 94,492,812 | 0 | 1 | null | false | "2020-07-22T21:08:22" | "2017-06-16T01:28:13" | "2017-06-16T01:44:30" | "2017-06-23T07:54:01" | 130,138 | 0 | 1 | 1 | Python | false | false | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#master_valve.py
from wsgiref.simple_server import make_server
from paste import httpserver
from paste.deploy import loadapp
import os
if __name__ == '__main__':
configfile = 'config.ini'
appname = 'main'
wsgi_app = loadapp('config:%s' % os.path.abspath(configfile), appname)
print wsgi_app
print os.path.abspath(configfile)
#httpserver.serve(loadapp('config:configure.ini', relative_to = '.'), host = '127.0.0.1', port=8000)
server = make_server('localhost', 8000, wsgi_app)
print "已启动,在127.0.0.1:8000"
server.serve_forever()
| UTF-8 | Python | false | false | 645 | py | 182 | master_valve.py | 61 | 0.640945 | 0.601575 | 0 | 19 | 32.210526 | 106 |
13555785106/PythonPPT-01 | 16,484,084,504,767 | 728f8e4e005df1fe0f91f81bb84c33a2a4f203d3 | b96ed10d6247e22d4fa1d28bc3314bc319d3109c | /LessonSample/chapter51/dbtest.py | 6f6d5e4792091611fc312bc0d8b96ba945793136 | [] | no_license | https://github.com/13555785106/PythonPPT-01 | ac1b22b9b1851f2b3ea6e4ab0a100e5f6896ee8c | 40e5883f248cb342f3a7fc7ad12ba02ebde4c619 | refs/heads/master | "2020-04-26T16:49:59.675964" | "2019-03-04T07:16:21" | "2019-03-04T07:16:21" | 157,095,747 | 4 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import os
import random
from datetime import datetime, timedelta
# api文档 https://docs.djangoproject.com/en/1.7/ref/models/querysets/
import django
# from django.db.models import Sum,Avg,Count,Q
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "chapter19.settings")
django.setup()
from sample.models import Author, Tag, Article, Hobby
Hobby.objects.all().delete()
Hobby(name='篮球').save()
Hobby(name='足球').save()
Hobby(name='排球').save()
# 清除已有Tag对象,新创建以下Tag对象
Tag.objects.all().delete()
for tagName in (u'武侠', u'推理', u'历史', u'言情', u'穿越', u'科幻', u'奇幻', u'玄幻', u'探险', u'恐怖', u'香艳', u'讽刺', u'神魔'):
# 创建模型对象的第一种方法
Tag.objects.create(name=tagName)
# 清除已有Author对象,新创建以下Author对象
Author.objects.all().delete()
for i in xrange(10):
name = ('Smith' if i % 2 == 0 else 'Jones') + ('%02d' % i)
# 创建模型对象的第二种方法
author = Author(account=name.lower(), name=name, sex='男' if i % 2 == 0 else '女',
birthday=datetime.now() + timedelta(-i),
email=(name + '@telecom.com').upper(),
mobile='131%08d' % i)
author.save()
# 清除已有Article对象,新创建以下Article对象
Article.objects.all().delete()
authors = Author.objects.all()
tags = Tag.objects.all()
titles = ['Apple', 'apple', 'APPLE', 'b123ee', 'kill345',
'老人与海', '红与黑', '双城记', '雾都孤儿', '忏悔录',
'巴黎圣母院', '红楼梦', '西游记', '荒凉山庄', '悲惨世界',
'飘', '失乐园', '日瓦戈医生', '麦田里的守望者', '伊利亚特',
'唐吉坷德', '百年孤独', '变形记', '基督山伯爵', '简爱']
for i in xrange(len(titles)):
author = random.choice(authors)
# 创建模型对象的第三种方法
article = Article()
article.title = titles[i]
article.content = 'content%02d' % i
article.score = i
article.author = author
article.release_time = datetime.now() - timedelta(days=random.randint(1, 365), hours=random.randint(1, 24),
minutes=random.randint(1, 59))
article.save()
s = set()
for m in xrange(3):
s.add(random.choice(tags))
for n in s:
article.tags.add(n)
article.save()
# 查看模型框架生成的SQL
# print Article.objects.all().query
# 获取所有文章
# print Article.objects.all()
# 结果必须是一条,否则出错
# print Article.objects.get(title='红楼梦')
# 查找title是Apple的文章,区分大小写
# print Article.objects.filter(title='Apple')
# 查找title是Apple的文章,不区分大小写
# print Article.objects.filter(title__iexact='Apple')
# print Article.objects.filter(Q(title__iexact='Apple') & Q(score__gte=1))
# 查找title包含红的文章,区分大小写
# print Article.objects.filter(title__contains='红')
# 查找title包含pp的文章,不区分大小写
# print Article.objects.filter(title__icontains='pp')
# 查找title包含至少一位连续数字的文章
# print Article.objects.filter(title__regex=r"\d+")
# 查找title以字符a开头文章,不区分大小写
# print Article.objects.filter(title__iregex=r"^a.*")
# 查找title以字符a开头文章,不区分大小写,并排除以大写字符A开头的文章
# print Article.objects.filter(title__iregex=r"^a.*").exclude(title__regex=r"^a.*")
# 查找2018年发布的文章
# print Article.objects.filter(release_time__year=2018)
# 删除title以字符a开头文章,不区分大小写
# print Article.objects.filter(title__iregex=r"^a.*").delete()
# 更新title以字符a开头文章,不区分大小写,把title设置成'AAAA'
# Article.objects.filter(title__iregex=r"^a.*").update(title='AAAA')
# 排序
# print Article.objects.all().order_by('score')
# print Article.objects.all().order_by('-score')
# 通过values_list形成元组
# print Author.objects.values_list('name', 'mobile')
# print Author.objects.filter(name__regex=r'^Smith.*').values_list('name', 'mobile')
# print Author.objects.filter(name__regex=r'^Smith.*').values_list('name', flat=True)
# print list(Author.objects.filter(name__regex=r'^Smith.*').values_list('name', flat=True))
# 通过values 获取字典形式的结果
# print Author.objects.values('name', 'mobile')
# print list(Author.objects.values('name', 'mobile'))
# print [item for item in Author.objects.filter(name__contains='Smith').values('name', 'mobile')]
# extra 实现 别名,条件,排序等
# print Author.objects.filter(name__contains='Smith').extra(
# select={'author_name': 'name', 'author_mobile': 'mobile'}).query
# print Author.objects.filter(name__contains='Smith').extra(
# select={'author_name': 'name', 'author_mobile': 'mobile'}).defer('name').query
# print Article.objects.all().extra(select={'is_pass': "score > 10"}).query
# for article in Article.objects.all().extra(select={'is_pass': "score > 10"}):
# print article.is_pass
# for article in Article.objects.all().extra(where=["score > 15 AND score <20"],order_by=['-score']):
# print article.score
# annotate 计数,求和,平均数
# print Article.objects.all().values('author_id').annotate(count=Count('author_id')).values('author_id', 'count')
# print Article.objects.all().values('author_id').annotate(score_sum=Sum('score')).values('author_id', 'score_sum')
# print Article.objects.all().values('author_id').annotate(score_avg=Avg('score')).values('author_id', 'score_avg')
# defer 排除不需要的字段
# print Author.objects.all().query
# print Author.objects.all().defer('name','sex','mobile').query
# 排除后,就不应再去获取,会导致二次查询!反而降低了效率
# for v in Author.objects.all().defer('name','sex','mobile'):
# print v.name
# only 仅选择需要的字段
# print Author.objects.all().only('name','sex','mobile').query
# for v in Author.objects.all().only('name','sex','mobile'):
# print v.birthday
# print Article.objects.dates('release_time', 'year')
# print Article.objects.first()
# print Article.objects.last()
# print Article.objects.latest('release_time')
# print Article.objects.earliest('release_time')
# tag0 = Tag.objects.all()[0]
# print tag0
# for a in tag0.article_set.all():
# print a
| UTF-8 | Python | false | false | 6,451 | py | 547 | dbtest.py | 434 | 0.667028 | 0.65656 | 0 | 161 | 33.416149 | 115 |
smuhit/2020-advent-code | 1,683,627,187,602 | 6d5093ff142c5809e05e8a49435bb702ee999030 | 744940636f2e1ddfe5d967ae53eaddc29922e463 | /day-19/solver.py | 247e2981b293f21f2f7bff2eac56f9bb4f0cc458 | [] | no_license | https://github.com/smuhit/2020-advent-code | a3945ebaaf7462d87b20912c6bbf958e92a17a0f | f1ee527bbfa66edc87af98551c32a924878a32c1 | refs/heads/main | "2023-02-01T16:11:47.161988" | "2020-12-19T21:01:27" | "2020-12-19T21:01:27" | 317,636,818 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import re
data = open('input.txt').read().split('\n')
rules = {}
for r_idx, datum in enumerate(data):
if datum == '':
break
key, value = datum.split(': ')
if '"' in value:
value = value.split('"')[1]
rules[int(key)] = value
else:
key_rules = []
for key_rule in value.split(' | '):
key_rules.append([int(x) for x in key_rule.split()])
rules[int(key)] = key_rules
messages = [data[m_idx] for m_idx in range(r_idx, len(data)) if data[m_idx] != '']
def construct(idx):
if isinstance(rules[idx], str):
return rules[idx]
constructor = '(' if len(rules[idx]) > 1 else ''
for key_rule_part in rules[idx]:
constructor += '|' if len(rules[idx]) > 1 and constructor != '(' else ''
for key_rule_unit in key_rule_part:
constructor += construct(key_rule_unit)
constructor += ')' if len(rules[idx]) > 1 else ''
return constructor
# Part 1
matcher = '^'
matcher += construct(0)
matcher += '$'
matching = []
for message in messages:
if re.match(matcher, message):
matching.append(message)
print('Count of 0:', len(matching))
# Part 2
def construct(idx):
# Handling special cases 8 and 11 separetly
if idx == 8: # 42 | 42 8
return f'({construct(42)})+'
if idx == 11: # 42 31 | 42 11 31
max_length = len(max(messages, key = len)) // 2 # Can't repeat more than this...
constructor_42 = construct(42)
constructor_31 = construct(31)
constructors = []
for i in range(1, max_length):
constructors.append(f'{constructor_42}{{{i}}}{constructor_31}{{{i}}}')
constructor = f'({"|".join(constructors)})'
return constructor
if isinstance(rules[idx], str):
return rules[idx]
constructor = '(' if len(rules[idx]) > 1 else ''
for key_rule_part in rules[idx]:
constructor += '|' if len(rules[idx]) > 1 and constructor != '(' else ''
for key_rule_unit in key_rule_part:
constructor += construct(key_rule_unit)
constructor += ')' if len(rules[idx]) > 1 else ''
return constructor
matcher = '^'
matcher += construct(0)
matcher += '$'
matching = []
for message in messages:
if re.match(matcher, message):
matching.append(message)
print('Count of 0:', len(matching))
| UTF-8 | Python | false | false | 2,349 | py | 21 | solver.py | 20 | 0.575138 | 0.553853 | 0 | 85 | 26.635294 | 88 |
anton1k/python_crash_course | 12,171,937,364,419 | 3213d394911eea6df943d4fd04fbab857ffab956 | e36472948f74fd5ed35fc64801a59db4efa27070 | /part_1/09_4_test.py | 8cd9aaa2757eaf779309743cfc40fdece0ccf5c1 | [] | no_license | https://github.com/anton1k/python_crash_course | 051aad7c5a043830d8cc9e5fd314f568bf0f4a53 | 80f302074e5fef48fb40e72f7d79ab4b8658b38a | refs/heads/master | "2020-07-18T23:28:00.871466" | "2019-09-04T14:06:12" | "2019-09-04T14:06:12" | 206,333,934 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | class Restaurant():
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
self.number_served = 0
def describe_restaurant(self):
print('ресторан - {0}, повар - {1}'.format(self.restaurant_name.title(), self.cuisine_type.title()))
def open_restaurant(self):
print('Ресторан {0} открыт'.format(self.restaurant_name.title()))
def set_number_served(self, new_current):
self.number_served = new_current
print('число посетителей изменено')
def increment_number_served(self, next_current):
self.number_served += next_current
print('число посетителей изменено на ' + str(next_current))
restaurant = Restaurant('оазис', 'вася')
print(restaurant.number_served)
restaurant.number_served = 2
print(restaurant.number_served)
restaurant.set_number_served(5)
print(restaurant.number_served)
restaurant.increment_number_served(10)
print(restaurant.number_served) | UTF-8 | Python | false | false | 1,106 | py | 100 | 09_4_test.py | 95 | 0.693137 | 0.685294 | 0 | 28 | 35.464286 | 108 |
wilpoole/Machine-Learning-A-Z | 1,640,677,549,445 | 03990c8bd44ca66adc94c2db59bb270fbb53e8db | 59dfdf2219f6c84baceea62d702eb86617a19137 | /Part 1 - Data Preprocessing/Section 2 - Data Preprocessing/wil_data_preprocessing.py | a4e7a75a6bc83ddea640b3253c53cdd62b21d5ac | [] | no_license | https://github.com/wilpoole/Machine-Learning-A-Z | ca115deca04e40b69828e34a193c802c491cce0f | 0aa31f2cc5c3d6bd6ffe5192dfaa00d02b0cc39a | refs/heads/master | "2021-09-11T16:38:57.242856" | "2018-04-09T19:43:08" | "2018-04-09T19:43:08" | 111,995,951 | 0 | 0 | null | false | "2018-04-09T19:43:09" | "2017-11-25T10:43:40" | "2018-04-08T12:29:27" | "2018-04-09T19:43:08" | 210 | 0 | 0 | 0 | Python | false | null | #!/usr/bin/env python
# File information
"""
wil_data_preprocessing.py: First look at preprocessing data.
"""
__author__ = "Wil Poole"
__copyright__ = "2017"
__credits__ = "Wil Poole"
__license__ = "None"
__version__ = "0.1"
__maintainer__ = "Wil Poole"
__email__ = "@"
__status__ = "Development"
# Import libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from os import path
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
# Variables
data_directory = "data"
data_file = "data.csv"
# Models
imputer = Imputer(missing_values = "NaN", strategy = "mean", axis = 0)
# Data import
data_set = pd.read_csv(path.join(data_directory, data_file))
# Data format
data_set_dependent = data_set.iloc[ : , :-1 ].values
data_set_independent = data_set.iloc[ : , -1 ].values
data_set_dependent = imputer.fit_transform(data_set_dependent[ : , 1:3 ])
data_set_dependent[:, 0] = LabelEncoder().fit_transform(
data_set_dependent[:, 0]
)
data_set_dependent = OneHotEncoder(
categorical_features = [0]
).fit_transform(data_set_dependent).toarray()
len(data_set_dependent)
data_set_independent = LabelEncoder().fit_transform(data_set_independent)
print(data_set_independent)
data_set_dependent_train, data_set_dependent_test, data_set_independent_train, data_set_independent_test = train_test_split(
data_set_dependent, data_set_independent, test_size = 0.2, random_state = 0
)
dependent_scaler = StandardScaler()
data_set_dependent_train = dependent_scaler.fit_transform(data_set_dependent_train)
data_set_dependent_test = dependent_scaler.transform(data_set_dependent_test)
| UTF-8 | Python | false | false | 1,703 | py | 16 | wil_data_preprocessing.py | 16 | 0.725778 | 0.715796 | 0 | 61 | 26.918033 | 124 |
Reigningsun/CSC394_Capstone_Project | 4,509,715,668,740 | b8933485b4dd83bcb562a224d4968bafaba65c87 | db9ca49041febe7ef0d7faa5f402b027c5c16e1a | /app/models/csc394_courses.py | f9508fbf8af6fdfa8a9e1e5a98e7403c175ced1c | [] | no_license | https://github.com/Reigningsun/CSC394_Capstone_Project | 64ef75d4c60b85c9388e1129d513ab3147b63784 | b5f1cba84ee3a8b3613e338ad6504fa0f2cfd972 | refs/heads/master | "2022-12-14T07:04:05.358995" | "2018-06-08T22:44:15" | "2018-06-08T22:44:15" | 136,528,726 | 0 | 0 | null | false | "2022-12-08T00:01:15" | "2018-06-07T20:39:40" | "2018-06-08T22:44:17" | "2022-12-08T00:01:14" | 150 | 0 | 0 | 4 | Python | false | false | from app import db
class Csc394Courses(db.Model):
__tablename__ = "csc394_courses"
id = db.Column(db.String, primary_key=True)
title = db.Column(db.String)
subject = db.Column(db.String)
course_nbr = db.Column(db.Integer)
description = db.Column(db.String)
prereqs = db.Column(db.String)
score = db.Column(db.Integer)
unlock_score = db.Column(db.Integer)
rarity_score = db.Column(db.Integer)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {'title': self.title,
'subject': self.subject,
'course_nbr': self.course_nbr,
'description': self.description,
'prereqs': self.prereqs,
'score': self.score,
'unlock_score': self.unlock_score,
'rarity_score': self.rarity_score}
| UTF-8 | Python | false | false | 894 | py | 28 | csc394_courses.py | 25 | 0.591723 | 0.585011 | 0 | 28 | 30.928571 | 63 |
WoKRamon/Secao5 | 14,568,529,102,968 | 92a334bfac22d70f258a77c6bdcd23efb975c89e | f07a3cedc941d7e50b52f9b322b13842570a194b | /exercicio18.py | 6ffc9f33955af07201e7924734caced6e0b1fa08 | [] | no_license | https://github.com/WoKRamon/Secao5 | f91e9d66462e7df16a7a533f19ba47c6312c0a3c | 2a2dc421c5e2951bf8d3e9b64df1cbcdcb76c09a | refs/heads/main | "2023-03-19T01:49:44.275326" | "2021-03-08T06:11:15" | "2021-03-08T06:11:15" | 345,546,442 | 6 | 6 | null | null | null | null | null | null | null | null | null | null | null | null | null | print("Escolha a operação matemática abaixo")
opc = 0
while 1 > opc or opc > 4:
opc = int(input("1. Para soma digite 1 \n"
"2. Para Subtração digite 2 \n"
"3. Para multiplicação digite 3 \n"
"4. Para divisão digite 4 \n"))
val1 = int(input("Digite o primeiro valor da operação \n"))
val2 = int(input("Digite o segundo valor da operação \n"))
if opc == 1:
result = val1 + val2
print(f"O resultado é: {result}")
elif opc == 2:
result = val1 - val2
print(f"O resultado é: {result}")
elif opc == 3:
result = val1 * val2
print(f"O resultado é: {result}")
elif opc == 4:
result = val1 / val2
print(f"O resultado é: {result}")
else:
print("O Numero digitado é invalido")
| UTF-8 | Python | false | false | 784 | py | 40 | exercicio18.py | 40 | 0.584094 | 0.551499 | 0 | 26 | 28.5 | 59 |
TianleiSun/shakespeare_bot | 16,827,681,869,375 | 0b141f696e0ca0f8cd2d1229f6088dac09276eb3 | 7ff552aa952e7cf6e83ba83adf051b1f8208e744 | /RNN/RNN_ver2.py | 53fc278a4787f411f0720389a5ce821773d359f0 | [] | no_license | https://github.com/TianleiSun/shakespeare_bot | c816f08ab79ac14dadc231810107986b09309bc1 | 24d7851a665fa2fbe0d92cb77f6650b1b1dd0b5c | refs/heads/master | "2021-01-21T10:04:50.674980" | "2017-02-28T01:37:43" | "2017-02-28T01:37:43" | 83,373,646 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# coding: utf-8
# In[1]:
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import matplotlib.pyplot as plt
import preprocess
# In[2]:
words, lastwords, word_map, last_map = preprocess.preprocess_word_to_num("shakespeare.txt")
index_map = {}
for (w,i) in word_map.items():
index_map[i] = w
# In[3]:
wordlist = []
for i in words:
for j in reversed(i):
wordlist.append(j)
# In[4]:
maxlen = 7
step = 1
sentences = []
next_word = []
# In[5]:
for i in range(0, len(wordlist) - maxlen, step):
sentences.append(wordlist[i: i + maxlen])
next_word.append(wordlist[i + maxlen])
print('nb sequences:', len(sentences))
# In[21]:
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(index_map)), dtype=np.bool)
y = np.zeros((len(sentences), len(index_map)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t in range(len(sentence)):
w = sentence[t]
X[i, t, w] = 1
y[i, next_word[i]] = 1
# In[22]:
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(index_map))))
model.add(Dropout(0.2))
model.add(Dense(len(index_map)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# In[23]:
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# In[37]:
for iteration in range(5):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=10)
start_index = random.randint(0, len(wordlist) - maxlen - 1)
for diversity in [0.5, 1.0, 1.5]:
print()
print('----- diversity:', diversity)
generated = []
sentence = wordlist[start_index: start_index + maxlen]
generated.append(sentence)
print('----- Generating with seed:')
for i in sentence:
sys.stdout.write(index_map[i] + " ")
for i in range(130):
x = np.zeros((1, maxlen, len(index_map)))
for t in range(len(sentence)):
w = sentence[t]
X[i, t, w] = 1
preds = model.predict(x, verbose=0)[0]
next_i = sample(preds, diversity)
next_w = index_map[next_i]
generated.append(next_w)
sentence = sentence[1:]
sentence.append(word_map[next_w])
sys.stdout.write(next_w + " ")
sys.stdout.flush()
print()
# In[ ]:
| UTF-8 | Python | false | false | 2,965 | py | 5 | RNN_ver2.py | 4 | 0.611804 | 0.59258 | 0 | 127 | 22.314961 | 91 |
altermarkive/Scheduled-Tasks-with-Cron-Python-Ubuntu-Docker | 17,532,056,514,089 | f86422885a6895798f56b1added7b722195be7a0 | 98b92546ecb15c709b153ccb4503c107500ad5a0 | /aiohttp-connexion-container/app/app.py | 6920765f7a76603357bb4a80144ce908d9484623 | [
"MIT"
] | permissive | https://github.com/altermarkive/Scheduled-Tasks-with-Cron-Python-Ubuntu-Docker | 888135a30a7b83742a747715d943c3aa4560eb55 | 1d9959e33cac5d9d2c2089964089bc416dc9276d | refs/heads/master | "2021-01-10T14:24:08.115025" | "2020-10-05T15:35:16" | "2020-10-05T15:35:16" | 54,128,099 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is the script with the main code of the application
"""
import json
import logging
import aiohttp.web
import connexion
async def index(request):
"""
Redirecting root to index.html
"""
raise aiohttp.web.HTTPFound('/index.html')
def go():
"""
Runs the web server
"""
app = connexion.AioHttpApp(
__name__, port=80, specification_dir='/app/web')
context_name = 'request'
api = app.add_api( # nosec
'api.yaml',
base_path='/api',
options={'swagger_ui': False},
pass_context_arg_name=context_name)
api.subapp['LOGGER'] = logging.getLogger('app')
app.app.router.add_route('*', '/', index)
app.app.router.add_static(prefix='/', path='/app/web/')
app.run()
if __name__ == '__main__':
pattern = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=pattern, level=logging.INFO)
go()
| UTF-8 | Python | false | false | 976 | py | 11 | app.py | 3 | 0.596311 | 0.592213 | 0 | 45 | 20.688889 | 68 |
ajgupta93/enhanced-view-synthesis | 11,132,555,236,698 | 335c63a74d75d3f7974bfdca03379ca1ee64bc33 | 4a661cb2b7dfcc7dac371c5de608cb93570795fa | /code/utility.py | 1e054205726d41c9227c467b998a82b5278ff8e4 | [] | no_license | https://github.com/ajgupta93/enhanced-view-synthesis | 5196794a4982a0bdf71d75aa5a412f2501848ab6 | 881f6cb243baae4aa07857af759dcc9d53f8e248 | refs/heads/master | "2020-03-14T21:30:34.597571" | "2017-03-14T10:13:43" | "2017-03-14T10:13:43" | 131,797,823 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from scipy.misc import imsave
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import Image
import os
import pdb
import shutil
import random
def save_as_image(images):
for i in range(0, len(images)):
filename = filepath+str(i)+".png"
imsave(filename, images[i])
def show_image(image):
plt.imshow(np.squeeze(image))
plt.show()
def img_mask_gen(imgpath):
im = Image.open(imgpath).convert('L').point(lambda x: 0 if x<=0 or x>=250 else 255,'1')
return im
def generate_autoencoder_data_from_list(dataArr):
while 1:
r = random.sample(range(len(dataArr)), 1)
fp = dataArr[r]
currImgPath = fp[0]
img = np.asarray(Image.open(currImgPath).convert('RGB'), dtype=np.uint8)
#msk = imgMaskGen(currImgPath)
img4 = []
img4.append(img)
img4 = np.asarray(img4)
yield ({'convolution2d_input_1': img4}, {'reshape_3': img4})
#yield (img,img)
def generate_data_array_for_autoencoder(dataPath='../data/chairs/'):
dataArr = []
for path,dirs,files in os.walk(dataPath):
#print path
for dr in dirs:
#print dr
if dr!='model_views' and dr != '':
drPath = path+'/'+dr
if '//' not in drPath:
#print drPath
shutil.rmtree(drPath)
#pruning complete
elif dr =='model_views':
inpath = os.path.join(dataPath,path[len(dataPath):]) + '/'+dr
for files in os.walk(inpath):
for fList in files:
for f in fList:
if '.png' in f:
readLoc = inpath + '/'+f
#print readLoc
dataArr.append(readLoc)
dataArr = np.asarray(dataArr)
#pdb.set_trace()
np.random.shuffle(dataArr)
return dataArr
| UTF-8 | Python | false | false | 1,608 | py | 5 | utility.py | 4 | 0.659826 | 0.645522 | 0 | 62 | 24.919355 | 88 |
Ascend/ModelZoo-PyTorch | 18,528,488,915,949 | 7d8c07579797e43423ba8709956c1cfdce1cc2bf | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /ACL_PyTorch/built-in/cv/Flownet2_for_Pytorch/Flownet2_pth2onnx.py | 7fb8bcd89c0ece3f49e510de496549880d3c5e91 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | https://github.com/Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | "2023-07-19T12:40:00.512853" | "2023-07-17T02:48:18" | "2023-07-17T02:48:18" | 483,502,469 | 23 | 6 | Apache-2.0 | false | "2022-10-15T09:29:12" | "2022-04-20T04:11:18" | "2022-10-10T08:03:54" | "2022-10-15T04:01:18" | 53,470 | 7 | 5 | 2 | Python | false | false | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import torch
sys.path.append('./flownet2-pytorch')
import models
from utils import tools
def parser_func():
parser = argparse.ArgumentParser()
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--input_path', type=str, default='./FlowNet2_checkpoint.pth.tar')
parser.add_argument('--out_path', type=str, default='./models/flownet2_bs1.onnx')
parser.add_argument('--model', type=str, default='FlowNet2')
parser.add_argument('--batch_size', type=int, default=1)
args = parser.parse_args()
os.makedirs(os.path.dirname(args.out_path), exist_ok=True)
return args
def export_onnx():
model_class = tools.module_to_dict(models)[args.model]
model = model_class(args)
checkpoint = torch.load(args.input_path, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
model.eval()
input_names = ['x1', 'x2']
output_names = ['flow']
dummy_input_x1 = torch.randn(args.batch_size, 3, 448, 1024)
dummy_input_x2 = torch.randn(args.batch_size, 3, 448, 1024)
torch.onnx.export(model, (dummy_input_x1, dummy_input_x2), args.out_path,
input_names=input_names, output_names=output_names, opset_version=11,
verbose=True)
if __name__ == '__main__':
args = parser_func()
export_onnx()
| UTF-8 | Python | false | false | 1,959 | py | 11,303 | Flownet2_pth2onnx.py | 8,028 | 0.689127 | 0.668709 | 0 | 56 | 33.982143 | 91 |
GGGGGGGG/s2wrapper | 11,579,231,836,414 | f6188052e4b3e62c2bda662acf27e510be9a7a15 | 8626d8f997dc6f16bceedf552a262c036622901c | /plugins/limit.py | ae4c40631d503c94d4a8a1c89ed8c7170fc07caf | [] | no_license | https://github.com/GGGGGGGG/s2wrapper | c4f9509e8d44cc5d0c5900181cb36e66cd306eb9 | 2064e85cae07d6c1b78f42e7fb296be2da6a646e | refs/heads/master | "2021-01-21T07:21:19.483107" | "2018-01-25T22:21:03" | "2018-01-25T22:21:03" | 18,301,597 | 2 | 1 | null | true | "2015-02-04T23:11:29" | "2014-03-31T17:24:36" | "2015-01-26T19:21:20" | "2015-02-04T23:11:29" | 887 | 1 | 1 | 0 | Python | null | null | # -*- coding: utf-8 -*-
import os
import re
import ConfigParser
from MasterServer import MasterServer
from PluginsManager import ConsolePlugin
from S2Wrapper import Savage2DaemonHandler
# TODO: 20101014 winex: improve forcespec'ing player by watching inside onSetTeam
class limit(ConsolePlugin):
CONFIG_DEFAULT = {
'reason': "This server has restrictions on",
'level_min': 10,
'level_max': 100,
'sf_min': 50,
'sf_max': 500,
'forcespec': False,
}
def onPluginLoad(self, config):
self.config = self.CONFIG_DEFAULT.copy()
self.ms = MasterServer()
print(self.config)
ini = ConfigParser.ConfigParser()
ini.read(config)
for name in self.config.keys():
try:
self.config[name] = ini.get('limit', name)
except:
raise
print(self.config)
pass
def onAccountId(self, *args, **kwargs):
config = self.config
reason = config['reason']
clnum = int(args[0])
id = int(args[1])
stats = self.ms.getStatistics("%d" % (id)).get('all_stats').get(id)
lv = int(stats['level'])
sf = int(stats['sf'])
act = False
if (lv < config['level_min']) or (lv > config['level_max']):
act = True
reason += ". Level %d - %d only" % (config['level_min'], config['level_max'])
if (sf < config['sf_min']) or (sf > config['sf_max']):
act = True
reason += ". SF %d - %d only" % (config['sf_min'], config['sf_max'])
if not act:
return
if not config['forcespec']:
kwargs['Broadcast'].put("kick %d \"%s\"" % (clnum, reason))
else:
kwargs['Broadcast'].put("SendMessage %d \"%s\"" % (clnum, reason))
kwargs['Broadcast'].put("SetTeam #GetIndexFromClientNum(%d)# 0" % (clnum))
kwargs['Broadcast'].broadcast()
return
| UTF-8 | Python | false | false | 1,683 | py | 32 | limit.py | 21 | 0.639335 | 0.625074 | 0 | 70 | 23.042857 | 81 |
Saknowman/django-s-store-api | 12,266,426,600,731 | f5cbc109de62b2e0f8b371d147044f9118897888 | 00609a376392df8f1d8ce7e0259e76548479948c | /s_store_api/settings.py | 8585fa9691286b560a45f44f943f3b8c5a56fa6b | [
"MIT"
] | permissive | https://github.com/Saknowman/django-s-store-api | d0271c1de4079d8e233bf6ebada80dc57380e4ab | a14e000ea32cc527ad2c822f09f812194a5c8a47 | refs/heads/master | "2021-09-29T00:36:46.396908" | "2019-12-24T03:30:04" | "2019-12-24T03:30:04" | 228,795,250 | 1 | 0 | MIT | false | "2021-09-22T18:18:14" | "2019-12-18T08:38:36" | "2019-12-24T03:50:04" | "2021-09-22T18:18:12" | 65 | 1 | 0 | 2 | Python | false | false | from django.conf import settings
APP_SETTING_ROUTE_NAME = 'S_STORE_API'
DEFAULTS = {
'STORE_MODEL': {
'MAX_LENGTH': 20,
},
'ITEM_MODEL': {
'MAX_LENGTH': 20,
},
'COIN_MODEL': {
'MAX_LENGTH': 5,
},
'STORE_PERMISSION_CLASSES': [
's_store_api.permissions.store_permissions.DefaultStorePermissions',
],
'ITEM_PERMISSION_CLASSES': [
's_store_api.permissions.item_permissions.DefaultItemPermissions',
],
}
class APISettings:
"""
A settings object, that allows API settings to be accessed as properties.
Set default settings in your app settings.py like this:
from app_utils.setting import APISettings
api_settings = APISettings('TODO_API', DEFAULTS)
For example:
from todo_api.settings import api_settings
print(api_settings.TASK_STATUS_CHOICES)
"""
def __init__(self, setting_root_name, defaults):
self._setting_root_name = setting_root_name
self._defaults = defaults
self._user_settings = getattr(settings, self._setting_root_name, {})
def __getattr__(self, item):
if item not in self._defaults:
raise AttributeError("Invalid {} setting: {}".format(self._setting_root_name, item))
try:
return self._user_settings[item]
except KeyError:
return self._defaults[item]
api_settings = APISettings(APP_SETTING_ROUTE_NAME, DEFAULTS)
| UTF-8 | Python | false | false | 1,503 | py | 44 | settings.py | 41 | 0.606121 | 0.602794 | 0 | 50 | 28.06 | 96 |
pije76/scportal | 14,551,349,200,275 | f7f7dbbcd3acc2f74ebff9c1b9fac4555d781adf | 3c6562047fece137b66665eb78406944e6b0999a | /gridplatform/reports/urls.py | 887890d78f628565054b6ab4c9deb50a28255abd | [] | no_license | https://github.com/pije76/scportal | 401e38d4dd9debb5bccd004362521cbfea0a3f4a | 2c13c94504e07c673d333db4212c5c03ba80c20d | refs/heads/master | "2023-03-06T20:29:52.461816" | "2020-01-02T07:42:13" | "2020-01-02T07:42:13" | 227,796,927 | 1 | 0 | null | false | "2019-12-13T09:04:07" | "2019-12-13T08:54:19" | "2019-12-13T09:03:35" | "2019-12-13T09:04:07" | 0 | 0 | 0 | 6 | Python | false | false | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, url
urlpatterns = patterns(
'gridplatform.reports.views',
url(r'^status/$',
'status',
name='reports-status'),
url(r'^(?P<id>\d+)/(?P<title>.*)$',
'serve',
name='reports-serve'),
)
| UTF-8 | Python | false | false | 369 | py | 807 | urls.py | 457 | 0.588076 | 0.585366 | 0 | 16 | 22.0625 | 42 |
JeanMarc-Moly/mugimugi_client_api_entity | 7,490,423,000,254 | 3504b91b913bc661a9ed8826eb98392dc3ba5725 | faba9787b3e1f610d6622a2ce0539c3ea9b14476 | /test/resource/xml/sample/item/character/tosaka_rin.py | 8b44178e34d7f5a4d08d55a040b49dd30f3c2b42 | [] | no_license | https://github.com/JeanMarc-Moly/mugimugi_client_api_entity | 3f8bd12164a4b35390ea60531f5c7caa91905e55 | 57a10613921361b13bdeec6143f0fe5fd969916b | refs/heads/main | "2023-06-19T04:45:23.719121" | "2021-07-21T21:21:43" | "2021-07-21T21:21:43" | 376,458,493 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from mugimugi_client_api_entity.enum import Ratio, Sex
from mugimugi_client_api_entity.main.book import Character
from ......configuration import SAMPLE
from ...abstract import Sample
class BookCharacterTosakaRin(Sample[Character]):
file_path = SAMPLE / "book/item/character/tosaka_rin.xml"
object = Character(
english_name="Tōsaka Rin",
japanese_name="遠坂凛",
katakana_name="トオサカリン",
other_names=["Tosaka Rin", "Tousaka Rin", "Tohsaka Rin"],
_id="H211",
version=19,
objects_count=979,
sex=Sex.FEMALE,
age="",
ratio=Ratio.NOT_SET,
_type_validator=Character.Type.TYPE,
)
| UTF-8 | Python | false | false | 712 | py | 173 | tosaka_rin.py | 137 | 0.621934 | 0.61039 | 0 | 22 | 29.5 | 65 |
oway13/Schoolwork | 7,310,034,338,173 | 0d8effac346725e46eb653c1691fdbdbce286513 | cd649144fcba3a701b4ac3a01f58af749a55bffa | /15Fall/1133 Intro to Programming Concepts/Python Labs/Lab 5/l5 wo1.py | 48d2faaa396ea633a030eebabce6d172e65e335e | [
"MIT"
] | permissive | https://github.com/oway13/Schoolwork | d3aca4c566a1b1a152b2e40418d8229f91403d3f | 294f407c288ef532f8f187a6ee0bd9fd0e7559ab | refs/heads/master | "2020-03-23T04:24:44.147826" | "2019-02-11T19:53:01" | "2019-02-11T19:53:01" | 141,081,829 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import random
length = int(input('Enter Length of # List: '))
numlist = []
for n in range(1, length+1):
numlist += [random.randint(1,9)]
print(numlist)
for n in range(1, length+1):
swapvar = numlist[n-1]
for i in range(n, length+1):
if numlist[i-1] == min(numlist[n-1:]):
numlist[n-1] = numlist[i-1]
numlist[i-1] = swapvar
print(numlist)
| UTF-8 | Python | false | false | 383 | py | 349 | l5 wo1.py | 193 | 0.590078 | 0.556136 | 0 | 13 | 28.384615 | 47 |
liucongcas/TSD | 3,453,153,742,784 | 53de585fe92abb126e03419a3ebe3c38ecc208e6 | 4823d61a7978d2f855ad118ff6c6d5acacd20b25 | /pipeline/get_readthrough_longsite.py | b41f094104b250ebf8cbb04c99292e5d5c3b8f00 | [
"Apache-2.0"
] | permissive | https://github.com/liucongcas/TSD | b968a9f59b19bccbd167cb5d3abb51be15a0fa28 | 34f6961e02f627964d3430f3bc8d93c7a0ff5a6d | refs/heads/master | "2020-05-22T06:15:05.880808" | "2019-10-19T08:57:30" | "2019-10-19T08:57:30" | 167,324,245 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys,getopt,time,os,commands
def get_time_mem():
fr_r = [r1.strip().split("\t") for r1 in open(readthroughfile).readlines()]
fr_g = [r1.strip().split("\t") for r1 in open(genefile).readlines()]
list_total=[];dict_g={};list_la=[]
for x in fr_g:
par=x[7]
dict_g[par]=[x[0],x[3],x[4],x[6]]
for xr in fr_r:
x1=xr[0].split("-")[0]
x2=xr[0].split("-")[-1]
if x1 in dict_g and x2 in dict_g:
list_total.append([xr[0],dict_g[x1][0],dict_g[x1][1],dict_g[x1][3],dict_g[x2][0],dict_g[x2][2],dict_g[x2][3]])
else:
list_la.append(xr)
dict_gene = splitfile(fr_g)
list_total2=[]
for yr in list_total:
chr_r=yr[1]
g1 = yr[0].split("-")[0]
g2 = yr[0].split("-")[-1]
s_r=float(yr[2])
e_r=float(yr[5])
tmp=[]
list_gene=dict_gene[chr_r]
for y2 in list_gene:
s_g=float(y2[3])
e_g=float(y2[4])
if min(e_r,e_g) > max(s_r,s_g) and y2[7] !=g1 and y2[7]!=g2:
tmp.append(y2)
if tmp !=[]:
print tmp
q=''
for p in tmp:
q=q+"_".join(p)
list_total2.append(yr+[q])
else:
list_total2.append(yr + ["nointermedianGene"])
list_total3=[]
for j in list_total2:
list_total3.append("\t".join(map(str,j)))
lw=open(readthroughfile+"_readthroughsites","w+")
lw.writelines("\n".join(list_total3)+"\n")
list_la2 = []
for j in list_la:
print j
list_la2.append(j[0])
lw = open(readthroughfile + "_notdefined", "w+")
lw.writelines("\n".join(list_la2) + "\n")
def splitfile(fr_g):
dict_s={}
for s in fr_g:
par=s[0]
if par in dict_s:
dict_s[par].append(s)
else:
dict_s[par]=[s]
return dict_s
def main(argv):
t1 = time.time()
global readthroughfile
global genefile
genefile = ''
readthroughfile=''
try:
opts, args = getopt.getopt(argv, "ha:o:", ["afile=","odir="])
except getopt.GetoptError:
print'python DNAlevel_NCL_filter.py -a <genefile> -o <readthroughfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print'python DNAlevel_NCL_filter.py -a <genefile> -o <readthroughfile>'
sys.exit()
elif opt in ("-a", "--afile"):
genefile = arg
elif opt in ("-o", "--odir"):
readthroughfile = arg
get_time_mem()
t2 = time.time()
print"The time needed in DNAlevel_NCL_filter for TSEs is:", t2 - t1
if __name__ == "__main__":
main(sys.argv[1:]) | UTF-8 | Python | false | false | 2,757 | py | 36 | get_readthrough_longsite.py | 23 | 0.4893 | 0.464273 | 0 | 85 | 30.458824 | 122 |
kimdohui/Python_Study | 1,486,058,716,698 | c8ae68e9b339c11cb7db960ca2d834daa2c8134c | 1a65481701a7ec2ba17e051cf50d131ded1516e1 | /unit41/코루틴 받아오기.py | 1e529163bc9c8e13ae3dce69d4f66cbbb370b0f8 | [] | no_license | https://github.com/kimdohui/Python_Study | bec9b24bab79048d3b7169609898c213912f1d99 | 92dcec38cfa1043e004a72fdb2be2a3f56311444 | refs/heads/master | "2021-05-17T13:58:59.729705" | "2020-04-22T10:58:12" | "2020-04-22T10:58:12" | 250,809,291 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | def add(a, b):
c = a + b # add 함수가 끝나면 변수와 계산식은 사라짐
print(c)
print('add 함수')
def calc(): # 메인루틴
add(1, 2) # add 함수가 끝나면 다시 calc 함수로 돌아옴 (서브루틴)
print('calc 함수')
calc()
# 코루틴이란 서로 협력하는 루틴이란 뜻으로 메인루틴과 서브루틴이 대등한 관계이며
# 특정 시점에 상대방의 코드를 실행함
# 일반 함수 호출 시 코드를 한번만 실행할 수 있지만 코루틴은 여러번 실행 가능함
def number_coroutine():
while True: # 코루틴을 계속 유지하기 위해 무한 루프 사용
x = (yield) # 코루틴 바깥에서 값을 받아옴, yield를 괄호로 묶어야 함
print(x)
co = number_coroutine()
next(co) # 코루틴 안의 yield까지 코드 실행(최초 실행)
co.send(1) # 코루틴에 숫자 1을 보냄
co.send(2) # 코루틴에 숫자 2을 보냄
co.send(3) # 코루틴에 숫자 3을 보냄
| UTF-8 | Python | false | false | 1,040 | py | 83 | 코루틴 받아오기.py | 82 | 0.56962 | 0.556962 | 0 | 31 | 19.387097 | 58 |
Aerex/weesms | 9,010,841,402,316 | 2634d52f9797a14b08f9c527b3709ac113a8f5e0 | 14f6e9ada799a4d184c17d107e3d610d2b928703 | /_pytest/conftest.py | 160a2ba6c6390ef20c7b9dce5f824a91a487b3f8 | [] | no_license | https://github.com/Aerex/weesms | 047245efa76071d82348500221f1aaa1aaf07e63 | 9685db128af489d459c34790190490c78accab71 | refs/heads/master | "2020-03-10T14:36:25.035603" | "2018-06-04T05:32:11" | "2018-06-04T05:32:11" | 129,430,771 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import sys
import pytest
sys.path.append(".")
import weesms
class fake_weechat():
WEECHAT_RC_ERROR = 0
WEECHAT_RC_OK = 1
WEECHAT_RC_OK_EAT = 2
def __init__(self):
pass
def prnt(message):
print message
@pytest.fixture
def fake_weechat_instance():
weesms.w = fake_weechat()
weesms.w.WEECHAT_RC_OK = 0
pass
| UTF-8 | Python | false | false | 356 | py | 6 | conftest.py | 4 | 0.620787 | 0.609551 | 0 | 21 | 15.952381 | 30 |
adroffner/tastypie-optimizers | 5,523,327,943,627 | 978a6fd5cf7186cf4a696dbe7295bd9a32688c4f | 1d17ca09b45c79d024e564fc1f204d0e7792b651 | /tastypie_optimizers/authentication.py | 286fce0e644201a3ba3eb7bc60639370eff7ed4e | [
"MIT"
] | permissive | https://github.com/adroffner/tastypie-optimizers | 669a5ae56a2fe99727f5a30e46b859d0b42ed847 | ed59724c0d92383e053c06e22af10c25202bcf66 | refs/heads/master | "2016-09-05T10:28:59.148609" | "2014-03-23T01:13:09" | "2014-03-23T01:13:09" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from tastypie.authentication import ApiKeyAuthentication
from tastypie.models import ApiKey
from tastypie.compat import User, username_field
from tastypie.http import HttpUnauthorized
from django.core.cache import get_cache
import logging
class CachedApiKeyAuthentication(ApiKeyAuthentication):
"""
Handles API key auth, in which a user provides a username & API key.
Caches the ApiKey & goes back to the database to refresh.
Uses the ``ApiKey`` model that ships with tastypie in the DB.
Overrides the ``get_key`` method to perform the key check
to ask cache first.
"""
def __init__(self, cache_name='default', ttl_seconds=60):
super(CachedApiKeyAuthentication, self).__init__()
try:
self._cache = get_cache(cache_name)
self._ttl_seconds = ttl_seconds
except InvalidCacheBackendError:
raise # TODO: handle this better.
def is_authenticated(self, request, **kwargs):
"""
Finds the user and checks their API key.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
try:
username, api_key = self.extract_credentials(request)
except ValueError:
return self._unauthorized()
if not username or not api_key:
return self._unauthorized()
# cache key=api_key, value=User-object
user = self._cache.get(api_key)
logging.error("cached ApiKey: %r value: %r==%r" % (api_key, user and user.username, user))
if not user:
try:
lookup_kwargs = {username_field: username}
user = User.objects.get(**lookup_kwargs)
ApiKey.objects.get(user=user, key=api_key)
self._cache.set(api_key, user, self._ttl_seconds)
except (User.DoesNotExist, User.MultipleObjectsReturned,
ApiKey.DoesNotExist, ApiKey.MultipleObjectsReturned):
return self._unauthorized()
if user.username != username:
return self._unauthorized()
if not self.check_active(user):
return False
key_auth_check = self.get_key(user, api_key)
if key_auth_check and not isinstance(key_auth_check, HttpUnauthorized):
request.user = user
return key_auth_check
def get_key(self, user, api_key):
"""
no-op; everything happens in is_authenticated.
"""
return True
| UTF-8 | Python | false | false | 2,532 | py | 9 | authentication.py | 8 | 0.620458 | 0.619668 | 0 | 73 | 33.671233 | 98 |
santiago3223/CodigoFlutter | 3,599,182,606,734 | d33d108291b253b7128caacf53e32148a5f8c63c | f885d505e5955b66d4da2a960315be62ccbe2410 | /miloficios/API/migrations/0006_solicitud_estado.py | aa6bc173dfca0b49653bab88c05df4c4de5af0d2 | [] | no_license | https://github.com/santiago3223/CodigoFlutter | f3e1c424f68deaad8c0f6a6f4bfaa9bb7d89da81 | c5d6b2560f7dc7d30f78c2bcbbb7fa49e496e2c2 | refs/heads/master | "2023-01-19T18:56:23.916217" | "2020-11-17T02:45:06" | "2020-11-17T02:45:06" | 281,797,681 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Generated by Django 2.0.2 on 2020-10-07 00:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('API', '0005_auto_20201005_1948'),
]
operations = [
migrations.AddField(
model_name='solicitud',
name='estado',
field=models.IntegerField(default=1),
),
]
| UTF-8 | Python | false | false | 382 | py | 103 | 0006_solicitud_estado.py | 4 | 0.58377 | 0.5 | 0 | 18 | 20.222222 | 49 |
presencelearning/django_template | 10,264,971,876,389 | f843322016d34f529a0eac3098cb650af76a83bb | f5df0ec6cbfb90dacb574274ffab11c992890b33 | /config/settings/test.py | c669ddda5fd7419c83308d22d4e6ea38db947b02 | [] | no_license | https://github.com/presencelearning/django_template | 44a87a3621daaa549837ac31395a6d9397fd6134 | 34b47fd8796f577b367c7772be248d36ec3a01e9 | refs/heads/master | "2021-01-18T02:37:17.530663" | "2016-04-21T20:40:42" | "2016-04-21T20:40:42" | 44,154,871 | 0 | 0 | null | false | "2017-02-09T20:03:27" | "2015-10-13T05:49:00" | "2016-02-24T18:16:01" | "2017-02-09T20:02:06" | 48 | 0 | 0 | 1 | Python | null | null | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
'''
from __future__ import absolute_import, unicode_literals
from .common import * # noqa
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [".presencetest.com"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
CELERY_ALWAYS_EAGER = False
RAVEN_CONFIG = {
'dsn': 'https://43d81b91dc29489fbd2ded5844a14509:4dee1a4c284b4cb3bb45ca4f841d2805@app.getsentry.com/13216', # DSN for test
'release': env('DEPLOYED_SHA', default='')
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
},
'loggers': {
'celery': {
'handlers' : ['console'],
'propagate' : True,
},
'': {
'handlers': ['sentry'],
'level': 'WARNING',
'propagate': False,
},
}
}
# Your production stuff: Below this line define 3rd party library settings
| UTF-8 | Python | false | false | 2,304 | py | 26 | test.py | 20 | 0.59592 | 0.570313 | 0 | 78 | 28.538462 | 126 |
vivekaxl/Bellwether-Config | 17,617,955,848,350 | 6694fff6d737bb7aa24ea6edab795fc89a975c00 | 376c291b587ff6d0addf9f528a7691e3310a32dd | /RQ3/Models.py | a63e132ad64d1dec9e59bfa9d009907cbca7e56b | [] | no_license | https://github.com/vivekaxl/Bellwether-Config | f5163eba3976584cec5dbd5be117179c7d3415b2 | a69a50cb7525d86b5ab621f77093026c37e5e8bd | refs/heads/master | "2021-05-08T20:15:48.272789" | "2018-12-27T22:20:44" | "2018-12-27T22:20:44" | 119,597,569 | 1 | 1 | null | false | "2018-02-24T21:27:14" | "2018-01-30T21:37:45" | "2018-02-13T00:08:47" | "2018-02-24T21:27:14" | 22,763 | 1 | 1 | 5 | OpenEdge ABL | false | null | import scipy as sp
import numpy as np
from pdb import set_trace
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.gaussian_process import GaussianProcessRegressor
from kernel import SEAMS_Kernel
class Model:
@classmethod
def train_prediction_model(cls, data_source, T=5):
"""
Train a prediction model using Regression Tree
:param data_source: A pandas dataframe of the source dataset
:type data_source: Pandas DataFrame
:param T: Training coefficient
:type T: int
:return: Decision Tree Model
"""
clf = DecisionTreeRegressor()
N_f = len(data_source.columns)
n_samples = T * N_f
sampled = data_source.sample(n=n_samples)
indep_vars = sampled[sampled.columns[:-1]]
depend_var = sampled[sampled.columns[-1]]
return clf.fit(X=indep_vars, y=depend_var)
@classmethod
def train_transfer_model(cls, p_src, p_tgt):
"""
Train a transfer model to transfer predictions to target from source
:param p_src: performance value of configs C on source
:type p_src: list
:param p_tgt: performance value of configs C on target
:type p_tgt: list
:return: Linear Regression Model
"""
clf = LinearRegression()
return clf.fit(X=p_src.reshape(-1, 1), y=p_tgt.reshape(-1, 1))
@classmethod
def train_gaussproc_model(cls, src_config, tgt_config):
"""
Train a Gaussian processes model to transfer
predictions to target from source
:param src_config: Source configurations
:type src_config: pandas.core.frame.DataFrame
:param tgt_config: Target configurations
:type tgt_config: pandas.core.frame.DataFrame
:rtype: sklearn gaussian processes model
"""
src_indep_vars = src_config[src_config.columns[:-1]]
src_depend_var = src_config[src_config.columns[-1]]
tgt_depend_var = tgt_config[tgt_config.columns[-1]]
corr = np.mean(np.correlate(src_depend_var, tgt_depend_var))
kernel = SEAMS_Kernel(corr)
clf = GaussianProcessRegressor(kernel=kernel)
return clf.fit(X=src_indep_vars, y=src_depend_var)
@classmethod
def train_baseline_model(cls, src_config):
"""
Train a Gaussian processes model to transfer
predictions to target from source
:param src_config: Source configurations
:type src_config: pandas.core.frame.DataFrame
:param tgt_config: Target configurations
:type tgt_config: pandas.core.frame.DataFrame
:rtype: sklearn decisiontree model
"""
src_indep_vars = src_config[src_config.columns[:-1]]
src_depend_var = src_config[src_config.columns[-1]]
clf = DecisionTreeRegressor()
return clf.fit(src_indep_vars, src_depend_var)
| UTF-8 | Python | false | false | 3,005 | py | 1,613 | Models.py | 35 | 0.649251 | 0.645258 | 0 | 84 | 34.77381 | 76 |
pombredanne/trinity | 7,035,156,453,278 | 490a35e80c5c9fe29de00765a7a73d1bd909bc43 | 5cf6034c353ffd4f30c468df22a31c0176ce7507 | /tests/test_stat.py | 52b98c5f5445e5310f3bccfbd32e7b20a930ab9a | [
"MIT"
] | permissive | https://github.com/pombredanne/trinity | 5300aa507dd0f1c44102a9582274b5b659dd90bc | cd419e565e79d76e29da085fb33dd34bea683fd0 | refs/heads/master | "2021-01-14T14:07:09.008250" | "2011-05-13T15:50:39" | "2011-05-13T15:50:39" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from nose.tools import ok_, eq_
from tornado.httpclient import HTTPRequest
import tornado.testing
import json
from tests.test_node import NODE_DATA
from tests.base import BaseTrinityTest
class StatHandlerTest(BaseTrinityTest):
def setUp(self):
super(StatHandlerTest, self).setUp()
self.http_client.fetch(HTTPRequest(
self.get_url('/node'),
'POST',
body=json.dumps(NODE_DATA)), self.stop)
self.wait()
def test_topics_stat(self):
self.http_client.fetch(self.get_url(
'/node/%s/stats?stat=%s' % (NODE_DATA['id'], 'topics')),
self.stop)
response = self.wait()
eq_(response.code, 200)
| UTF-8 | Python | false | false | 719 | py | 20 | test_stat.py | 17 | 0.61057 | 0.606398 | 0 | 23 | 30.26087 | 72 |
jaredstewartcase/project-3 | 1,632,087,606,163 | 21c228b09d2cc7a2d68e4614c18f0a2f7f8ca5b9 | 3b28c64adfecdbe49eb93f123a2d96303d4878f1 | /final-master/final/app.py | cb7f1c63350746a27f96c1c7fc6e1e7a8e1c6dbe | [] | no_license | https://github.com/jaredstewartcase/project-3 | 5c30e2323fe3fcac4882f47356c293d3748b03f8 | 22bd53ec434863264adbc453cb4b150c3c7b1870 | refs/heads/master | "2020-04-20T23:44:47.104756" | "2019-02-16T15:53:29" | "2019-02-16T15:53:29" | 169,176,447 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Example adapted from http://flask.pocoo.org/docs/0.12/patterns/fileuploads/
# @NOTE: The code below is for educational purposes only.
# Consider using the more secure version at the link above for production apps
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.linear_model import LinearRegression
import pandas as pd
import numpy as np
import pickle
import os
from keras.models import load_model
from flask import Flask, request, render_template,json
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads'
def train_model():
arr = []
df = pd.read_csv("stats1.csv")
target_names = df["TD"]
data = df.drop("TD", axis=1)
feature_names = data.columns
data = df.values
X = data[:, 0:11]
y = data[:, 11]
y = y.astype('int')
X = X.astype('int')
model = LinearRegression()
model.fit(X, y)
score = model.score(X, y)
# print(f"R2 Score: {score}")
arr.append(X)
arr.append(y)
return arr
@app.route('/data')
def landing_page():
df = pd.read_csv("stats2.csv")
# a=[]
# a.append(df.to_json(orient='records', lines=True))
# a
response = app.response_class(
# response=json.dumps(df.to_json(orient='index')),
response=df.to_json(orient='index'),
status=200,
mimetype='application/json'
)
return response
@app.route('/', methods=['GET', 'POST'])
def webprint():
arr = train_model()
# load model
filename = 'finalized_model.sav'
loaded_model = pickle.load(open(filename, 'rb'))
result = loaded_model.score(arr[0], arr[1])
print(result)
# make prediction
li = []
atemp = request.form.get('att')
comp = request.form.get('cmp')
perc = request.form.get('pct')
yarsd = request.form.get('yds')
yardsatt = request.form.get('ypa')
inter = request.form.get('inter')
interper = request.form.get('intpct')
longth = request.form.get('lg')
sack = request.form.get('sack')
loss = request.form.get('loss')
rate = request.form.get('rate')
tchdper = request.form.get('tprc')
li.append(atemp)
li.append(comp)
li.append(perc)
li.append(yarsd)
li.append(yardsatt)
li.append(tchdper)
li.append(inter)
li.append(interper)
li.append(longth)
li.append(sack)
li.append(loss)
li.append(rate)
mat = np.array(li)
x = np.array(['1.1', '2.2', '3.3'])
y = x.astype(np.float)
mat_con = mat.astype(np.float)
# print(loaded_model.predict([mat_con]))
if request.method =='POST':
# print(loaded_model.predict([y]))
print(mat_con)
model = load_model("td_predict.h5")
val = model.predict([[mat_con]])
data = {"Predicted Touchdowns":str(val), "Model Type": "Sequential","Loaded Model":"td_predict.h5", "Epochs": "500"}
print(data)
response = app.response_class(
response=json.dumps(data),
status=200,
mimetype='application/json'
)
return response
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True) | UTF-8 | Python | false | false | 3,113 | py | 8 | app.py | 2 | 0.61452 | 0.604562 | 0 | 130 | 22.953846 | 124 |
mrcartoonster/automatingtheboringstuffwithpython | 2,860,448,235,805 | d8386daeadda3c10cde3f7fc921cd0fe8bec62df | e5b90a8e6ab3773ef263f012e774275727036720 | /Pictgrid.py | 3485ec27269e79cfdebe41bd318262f59ea416ad | [] | no_license | https://github.com/mrcartoonster/automatingtheboringstuffwithpython | 606272f8b3134d9fd38b76c2c7a3aa6036fbe9bb | 5d99dab6f2492a616ab94f1e74feb01eee5f9616 | HEAD | "2016-09-16T16:59:33.583299" | "2016-09-03T08:36:19" | "2016-09-03T08:36:19" | 65,518,623 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #adding stuff again
#Second practice project in list chapter.
'''Say you have a list of lists where each value in the inner lists is a one-character string like this:'''
grid = [['.','.','.','.','.','.'],
['.','O','O','.','.','.'],
['O','O','O','O','.','.'],
['O','O','O','O','O','.'],
['.','O','O','O','O','O'],
['O','O','O','O','O','.'],
['O','O','O','O','.','.'],
['.','O','O','.','.','.'],
['.','.','.','.','.','.']]
'''Output should be:
..OO.OO..
.OOOOOOO.
.OOOOOOO.
..OOOOO..
...OOO...
....O....'''
x = 0
while x != 6:
for i in range(9):
print(grid[i][x],end='')
print()
x += 1
| UTF-8 | Python | false | false | 693 | py | 6 | Pictgrid.py | 4 | 0.352092 | 0.34632 | 0 | 29 | 22.862069 | 107 |
WeiHsinChen/Leetcode-Practice | 9,818,295,251,942 | 781691e34bcc9358f04a5e4c0daf4962f0ef467a | 9bae3f425ac259fe4eb79c51751e487836dd617f | /Python/118_Pascal's Triangle.py | fa25b4708e7794f79a38b37fa7d65bc1756bb3fd | [] | no_license | https://github.com/WeiHsinChen/Leetcode-Practice | bc5403347f2a80fe42bfb23b2566aef00e155b55 | 96c9dc236afa6da8b9a57f6dfced915f9a424ec3 | refs/heads/master | "2015-08-23T03:26:51.839596" | "2015-07-22T16:52:24" | "2015-07-22T16:52:24" | 33,717,180 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # Problem: https://leetcode.com/problems/pascals-triangle/
# Complexity: O(n^2)
class Solution:
# @param {integer} numRows
# @return {integer[][]}
def generate(self, numRows):
res = []
for i in xrange(numRows):
temp = []
for j in xrange(i+1):
if j in (0, i):
temp.append(1)
else:
temp.append(res[i-1][j-1]+res[i-1][j])
res.append(temp)
return res
temp = Solution()
print temp.generate(5) | UTF-8 | Python | false | false | 431 | py | 179 | 118_Pascal's Triangle.py | 178 | 0.610209 | 0.591647 | 0 | 24 | 17 | 58 |
Alex-Carter01/Assignment-Planner | 14,568,529,099,986 | e0b008318f0030b555a498b6047a704d56dc7da0 | e27c3e5c6a4fdc48c27449a4db0316269e4197f2 | /assignment-planner.py | b9f1089176aeb05e34ab331356a03300e0c17c6a | [] | no_license | https://github.com/Alex-Carter01/Assignment-Planner | fa2c50d1f11216e9c3020d18633281c0eb498337 | dcfe21f9dab13e0b69408504ea53c6124aa09c09 | refs/heads/master | "2021-01-13T16:31:29.164771" | "2017-01-18T20:06:40" | "2017-01-18T20:06:40" | 79,379,128 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import webapp2
import logging
import re
import cgi
import jinja2
import os
import random
import time
import string
import hashlib
import hmac
import Cookie
from google.appengine.ext import db
import sys
import urllib2
import socket
import select
import json
from xml.dom import minidom
toolbar = """
<a class="tool-link" href="/">home </a>|
<a class="tool-link" href="/agenda">agenda </a>|
<a class="tool-link" href="/logout">logout</a>
"""
toolbar2 = """
<a class="tool-link" href="/">home </a>|
<a class="tool-link" href="/login">login </a>|
<a class="tool-link" href="/signup">signup</a>
"""
## see http://jinja.pocoo.org/docs/api/#autoescaping
def guess_autoescape(template_name):
if template_name is None or '.' not in template_name:
return False
ext = template_name.rsplit('.', 1)[1]
return ext in ('xml', 'html', 'htm')
JINJA_ENVIRONMENT = jinja2.Environment(
autoescape=guess_autoescape, ## see http://jinja.pocoo.org/docs/api/#autoxscaping
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$") # 3-20 characters (A-Za-z0-9_-)
def valid_username(username):
return USER_RE.match(username)
PASSWORD_RE = re.compile(r"^.{4,20}$") # 4-20 characters (any)
def valid_password(username):
return PASSWORD_RE.match(username)
EMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
def valid_email(username):
return EMAIL_RE.match(username)
class Handler(webapp2.RequestHandler):
def write(self, *items):
self.response.write(" : ".join(items))
def render_str(self, template, **params):
tplt = JINJA_ENVIRONMENT.get_template('templates/'+template)
return tplt.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def make_salt():
return ''.join(random.choice(string.hexdigits) for _ in range(25))
def make_pw_hash(name, pw, salt=None):
if not salt:
salt = make_salt()
return hashlib.sha256(name+pw+salt).hexdigest()+'|'+salt
def valid_pw(name, pw, h):
salt = h.split('|')[1]
return h == make_pw_hash(name, pw, salt)
def hash_str(s):
return hmac.new(str(s)).hexdigest()
def make_secure_val(s):
return s+'|'+hash_str(s)
def check_secure_val(h):
val = h.split('|')[0]
if (h == make_secure_val(val)):
return val
WEBSITE_REGEX = re.compile(r"^(http|https)://www[.]")
def valid_url(url):
logging.info("*** regex match: "+str(bool(WEBSITE_REGEX.match(url))))
return bool(WEBSITE_REGEX.match(url))
def check_login_handle(self):
cook = self.request.cookies.get('user_id','0')
if check_secure_val(cook):
#user is logged in
us_id = cook.split('|')[0]
user = MyUsers.get_by_id(int(us_id))
return user
else:
#user is not logged in
self.redirect('/')
time.sleep(0.2)
return False
class MyUsers(db.Model):
username = db.StringProperty()
pwhashsalt = db.StringProperty()
email = db.StringProperty()
created = db.DateTimeProperty(auto_now_add = True)
class MainPage(Handler):
def get(self):
logging.info("********** MainPage GET **********")
cook = self.request.cookies.get('user_id','0')
if check_secure_val(cook):
#user is logged in1
us_id = cook.split('|')[0]
username = MyUsers.get_by_id(int(us_id))
else:
#user is not logged in
username = False
self.render("home.html", username=username, toolbar = toolbar, toolbar2 = toolbar2)
class Agenda(Handler):
def get(self):
logging.info("enter assignment/calendar get req")
username = check_login_handle(self)
def post(self):
logging.info("enter agenda post handler")
class SignUp(Handler):
def write_signup(self, username_error_msg="", password_error_msg="", verify_error_msg="", \
email_error_msg="", user_username="", user_email=""):
cook = self.request.cookies.get('user_id','0')
if check_secure_val(cook):
#user is logged in
us_id = cook.split('|')[0]
user = MyUsers.get_by_id(int(us_id))
self.redirect('|')#they are already logged in
else:
template_values = {'error_username': username_error_msg,
'error_password': password_error_msg,
'error_verify' : verify_error_msg,
'error_email' : email_error_msg,
'username_value': user_username,
'email_value' : user_email}
self.render("signup.html", toolbar = toolbar, toolbar2 = toolbar2)
def get(self):
logging.info("********** SignUp Page GET **********")
self.write_signup()
def post(self):
logging.info("********** SignUp Page POST **********")
user_username = self.request.get('username')
user_password = self.request.get('password')
user_verify = self.request.get('verify')
user_email = self.request.get('email')
user_username_v = valid_username(user_username)
user_password_v = valid_password(user_password)
user_verify_v = valid_password(user_verify)
user_email_v = valid_email(user_email)
username_error_msg = password_error_msg = verify_error_msg = email_error_msg = ""
if not(user_username_v):
username_error_msg = "That's not a valid username."
if (user_password != user_verify):
password_error_msg = "Passwords do not match."
elif not(user_password_v):
password_error_msg = "That's not a valid password."
if (user_email != "") and not(user_email_v):
email_error_msg = "That's not a valid email."
## this should also work userQuery = db.GqlQuery("SELECT * FROM MyUsers WHERE username = :1", user_username)
userQuery = db.GqlQuery("SELECT * FROM MyUsers WHERE username = '%s'" % user_username)
if not(userQuery.count() == 0 or userQuery.count() == 1):
logging.info("***DBerr(signup) username = " + user_username + " (count = " + str(userQuery.count()) + ")" )
user = userQuery.get() ## .get() returns Null if no results are found for the database query
if user and user.username == user_username: ## not really necessay to see if usernames are equal, since query would only have returned if there was a match
user_username_v = False
username_error_msg = "That user already exists."
if not(user_username_v and user_password_v and user_verify_v and ((user_email == "") or user_email_v) and (user_password == user_verify)):
self.write_signup(username_error_msg, password_error_msg, verify_error_msg, \
email_error_msg, user_username, user_email)
else:
pw_hash = make_pw_hash(user_username, user_password)
u = MyUsers(username=user_username, pwhashsalt=pw_hash, email=user_email)
u.put()
id = u.key().id()
self.response.headers.add_header('Set-Cookie', 'user_id=%s; Max-Age=604800; Path=/' % make_secure_val(str(id)))
self.redirect("/")
class LogIn(Handler):
def write_login(self, error=""):
self.render("login.html", toolbar = toolbar, toolbar2 = toolbar2)
def get(self):
logging.info("********** LogIn Page GET **********")
cook = self.request.cookies.get('user_id','0')
if check_secure_val(cook):
#user is logged in
us_id = cook.split('|')[0]
user = MyUsers.get_by_id(int(us_id))
self.redirect('|')#they are already logged in
else:
self.write_login()
def post(self):
logging.info("***DBG: LogIn Page POST")
user_username = self.request.get('username')
user_password = self.request.get('password')
userQuery = db.GqlQuery("SELECT * FROM MyUsers WHERE username = '%s'" % user_username)
if not(userQuery.count() == 0 or userQuery.count() == 1):
logging.info("***DBerr (login) username = " + user_username + " (count = " + str(userQuery.count()) + ")" )
user = userQuery.get() ## .get() returns Null if no results are found for the database query
logging.info(">>> username=" + str(user_username) + " type=" + str(type(user_username)))
if user and user.username == user_username and valid_pw(user_username,user_password,user.pwhashsalt): ## not really necessay to see if usernames are equal, since query would only have returned if there was a match
id = user.key().id()
self.response.headers.add_header('Set-Cookie', 'user_id=%s; Max-Age=604800;Path=/' % make_secure_val(str(id)))
self.redirect("/")
else:
self.write_login("Invalid login")
class LogoutPage(Handler):
def get(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
self.redirect("/")
application = webapp2.WSGIApplication([
('/', MainPage),
(r'/agenda/?', Agenda),
(r'/signup/?', SignUp),
(r'/login/?', LogIn),
(r'/logout/?', LogoutPage),
], debug=True)
| UTF-8 | Python | false | false | 9,348 | py | 7 | assignment-planner.py | 1 | 0.597133 | 0.589859 | 0 | 246 | 36 | 220 |
c-okelly/Programming-1-Python | 15,616,501,113,218 | 810142350e7183b496ff852a7d2b085ea58f8cbc | e9797b2fdf560f2a9f685ac7ff4841304a914b81 | /Lab 13/P13P1.py | f6cdca8b599734e5dc018dc7b6778c11898432da | [] | no_license | https://github.com/c-okelly/Programming-1-Python | 38d825e2fe5fb16ae3fc67d55581fb0ebb5478ae | 12b848f2a58acf5e9c12a585318c54886acff90e | refs/heads/master | "2021-01-10T12:55:46.039218" | "2015-11-28T21:20:03" | "2015-11-28T21:20:03" | 44,961,728 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | __author__ = 'conor'
"""
Program to print out the largest of two numbers entered by the user
Uses a function max
implemented using code from class
"""
def max(a, b): #take to arguemtns and returns the largest
if a > b:
return a
else:
return b
# Prompt the user for two numbers
def get_input(): # Function to get input number
number = float(raw_input("Please enter a number (float) \n"))
return number
# Prompt the user for two numbers
number1 = get_input()
number2 = get_input()
largest_number = max(number1, number2)
print("The largest of %0.2f and %0.2f is %0.2f" % (number1, number2, largest_number))
print("Finished")
| UTF-8 | Python | false | false | 670 | py | 65 | P13P1.py | 60 | 0.670149 | 0.652239 | 0 | 31 | 20.580645 | 85 |
Feynman1999/Personal-Blog-Website | 3,169,685,888,844 | 862f4117be95557d985b44b99d9dee06655941d6 | 0f6dcad05d35c000c15875e7f989e9dd3b11ec10 | /mysite/likes/views.py | e934f86d734509daccdfd65b8aaf0bc22247bb2a | [] | no_license | https://github.com/Feynman1999/Personal-Blog-Website | 4b1e331daf801fae94345c6bd10134f95f7b7caf | 80830affb1e029542083c5ee94c2cdf7e17786a2 | refs/heads/master | "2021-07-24T23:50:56.570289" | "2020-05-13T03:47:11" | "2020-05-13T03:47:11" | 168,927,040 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.shortcuts import render
from .models import LikeCount, LikeRecord
from django.contrib.contenttypes.models import ContentType
from django.db.models import ObjectDoesNotExist
from django.http import JsonResponse
def SuccessResponse(liked_num):
data = {}
data['status'] = 'SUCCESS'
data['liked_num'] = liked_num
return JsonResponse(data)
def ErrorResponse(code, message):
data = {}
data['status'] = 'ERROR'
data['code'] = code
data['message'] = message
return JsonResponse(data)
def like_change(request):
# 获取数据
user = request.user
if not user.is_authenticated:
return ErrorResponse(400, 'You have not logged in yet!')
content_type = request.GET.get('content_type')
object_id = int(request.GET.get('object_id'))
try:
content_type = ContentType.objects.get(model = content_type)
model_class = content_type.model_class()
model_obj = model_class.objects.get(pk=object_id)
except ObjectDoesNotExist:
return ErrorResponse(401, 'Object Does Not Exist')
if request.GET.get('is_like') == 'true': # 要点赞
like_record, is_created = LikeRecord.objects.get_or_create(content_type=content_type , object_id=object_id, user=user)
if is_created: # 正常情况 未点赞过 进行点赞
like_count, is_created = LikeCount.objects.get_or_create(content_type=content_type , object_id=object_id)
like_count.liked_num += 1
like_count.save()
return SuccessResponse(like_count.liked_num)
else: # 非正常情况 已点赞过, 不能重复点赞
return ErrorResponse(402, 'You have already liked it.')
else:
#要取消点赞
if LikeRecord.objects.filter(content_type=content_type, object_id=object_id, user=user).exists():
# 正常情况 有点赞过 取消点赞
like_record_obj = LikeRecord.objects.get(content_type=content_type , object_id=object_id, user=user)
like_record_obj.delete()
# 点赞总数-1
like_count, is_created = LikeCount.objects.get_or_create(content_type=content_type , object_id=object_id)
if not is_created: # 正常情况
like_count.liked_num -= 1
like_count.save()
return SuccessResponse(like_count.liked_num)
else: # 非正常情况 新创建的
return ErrorResponse(404, 'data error')
else: # 非正常情况 本来就没点赞 不能取消点赞
return ErrorResponse(403, 'You have not liked it.') | UTF-8 | Python | false | false | 2,619 | py | 61 | views.py | 29 | 0.641193 | 0.633837 | 0 | 61 | 39.131148 | 126 |
itsolutionscorp/AutoStyle-Clustering | 2,851,858,327,713 | 5bc8d800ed82ec49c15edc49b0bb721067f25627 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/allergies/cf9d8f3a0d444ed4aa1459490aed6ebd.py | e6fe0c0279d46be856c9290c1677f0e536737728 | [] | no_license | https://github.com/itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | "2020-12-11T07:27:19.291038" | "2016-03-16T03:18:00" | "2016-03-16T03:18:42" | 59,454,921 | 4 | 0 | null | true | "2016-05-23T05:40:56" | "2016-05-23T05:40:56" | "2016-05-19T22:14:37" | "2016-05-19T22:35:40" | 133,854 | 0 | 0 | 0 | null | null | null | class Allergies():
def __init__(self, allergies_val):
_all_allergies = ['eggs', 'peanuts', 'shellfish', 'strawberries', 'tomatoes', 'chocolate', 'pollen', 'cats']
self.allergies_as_bin = bin(allergies_val).lstrip('0b')
self.list = []
a = list(self.allergies_as_bin)
a.reverse() # Lowest significant bit first
for index, allergy in enumerate(a[0:8]):
if allergy == '1':
self.list.append(_all_allergies[index])
def is_allergic_to(self, allergy_str):
for client_allergy in self.list:
if client_allergy == allergy_str:
return True
return False
| UTF-8 | Python | false | false | 674 | py | 54,209 | cf9d8f3a0d444ed4aa1459490aed6ebd.py | 21,653 | 0.569733 | 0.563798 | 0 | 20 | 32.7 | 116 |
pohanchi/2018_DSP_FINAL_Mosaic | 11,278,584,133,706 | 4e258a087dbf26cc918b83c61351f80a34aa53be | d806163ac2da877b78eec9b4005a300de1d2006f | /DSP/DSP/crop.py | 10d1024c223e071444de1e08ee33a3077aaee08b | [] | no_license | https://github.com/pohanchi/2018_DSP_FINAL_Mosaic | 319cac2cb4569fc280bdb251b82e03a5c1848c35 | 63f40d8cb31809d5081b50feef60a1771c3c8684 | refs/heads/master | "2020-05-09T09:39:29.655391" | "2019-04-12T16:19:00" | "2019-04-12T16:19:00" | 181,010,147 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
import time
img_names=os.listdir('COMMON_image')
img_names_1 = os.listdir('METHOD1_image')
img_names_2 = os.listdir('METHOD2_image')
img_names_3 = os.listdir('METHOD3_image')
img_names_4 = os.listdir('METHOD4_image')
img_names_5 = os.listdir('origin_image')
for i in img_names_5:
image=Image.open('origin_image/'+i)
(w,h)=image.size
Ratio = 4
new_w = int(w/Ratio)
new_h = int(h/Ratio)
small = image.resize( (new_w, new_h), Image.BILINEAR )
small_=ImageDraw.Draw(small)
box = (0,h/2,w/2,h)
small_.rectangle(((0, new_h/2), (new_w/2, new_h),), fill=None,outline='red',width=10)
time.sleep(1.5)
cropped = image.crop(box)
box2 = (0,h/8,w/8,h/4)
cropped_=ImageDraw.Draw(cropped)
cropped_.rectangle(((0, h/8), (w/8, h/4)),fill=None,outline='red',width=10)
time.sleep(1.5)
cropped_2 = cropped.crop(box2)
box3 = (0,h/16,w/16,h/8)
cropped_2_=ImageDraw.Draw(cropped_2)
cropped_2_.rectangle(((0, h/16), (w/16, h/8)),fill=None,outline='red',width=10)
time.sleep(1.5)
cropped_3 = cropped_2.crop(box3)
small.save('origin_image/'+'small_'+i)
cropped.save('origin_image/'+'small_crop_'+i)
cropped_2.save('origin_image/'+'small_crop2_'+i)
cropped_3.save('origin_image/'+'small_crop3_'+i)
| UTF-8 | Python | false | false | 1,354 | py | 5 | crop.py | 5 | 0.630724 | 0.584195 | 0 | 39 | 33.564103 | 89 |
LUCASLORD/IntroCCPytonCoursera | 1,511,828,525,516 | 2907b22a3c81a24c233f65958a123daba8e635fa | 735b88282610a255a9c49f91df5677f4cb669003 | /Parte1/inverte.py | fb50227a13f3bb66c8336be4459753e2c1629387 | [] | no_license | https://github.com/LUCASLORD/IntroCCPytonCoursera | 8150b8eb81c5a74e02372f2c915ce042dd7a9249 | 702543bd3781a70797dba9e8d84e5ed9929401b6 | refs/heads/master | "2020-03-23T15:39:37.342748" | "2018-08-17T00:57:28" | "2018-08-17T00:57:28" | 141,765,059 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | lista = []
cond = True
while cond:
num = int(input("Digite um número: "))
if num != 0:
lista.append(num)
else:
cond = False
for i in range (len(lista), 0, -1):
print(lista[i-1]) | UTF-8 | Python | false | false | 212 | py | 28 | inverte.py | 27 | 0.535545 | 0.516588 | 0 | 11 | 18.272727 | 43 |
lighthou/uqcs-mobile-server | 19,009,525,271,390 | 4d2f67b33a12e86434348e7c70f9a8c0590aeb5f | aa656d08f2abc60cdd16c7444213f2081bf72256 | /app.py | 251be60001975ce45b4d5a6b5839c038f6e946f2 | [] | no_license | https://github.com/lighthou/uqcs-mobile-server | e3ead9c5a920678d0513433d396d73d3b1b05907 | 86d11ac9681d6c3c2fa38cfc0bf802f1f685f2eb | refs/heads/master | "2021-06-06T12:12:55.099529" | "2019-01-02T08:56:54" | "2019-01-02T08:56:54" | 157,636,059 | 0 | 0 | null | false | "2021-06-01T22:59:57" | "2018-11-15T01:46:11" | "2019-02-10T04:37:21" | "2021-06-01T22:59:57" | 27 | 0 | 0 | 5 | Python | false | false | from flask import Flask, jsonify, request, Response
import os
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import requests
from requests.auth import HTTPBasicAuth
from functools import wraps
import git
app = Flask(__name__)
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
response = requests.get('https://api.github.com/teams/1825316/members', auth=HTTPBasicAuth(username, password))
if response.status_code != 200:
return False
for user in response.json():
if str(user['login']) == username:
os.environ["GIT_USERNAME"] = username
os.environ["GIT_PASSWORD"] = password
return True
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/sign_in', methods=['GET'])
@requires_auth
def sign_in():
reset_creds()
return jsonify([])
@app.route('/events', methods=['GET'])
@requires_auth
def get_events():
reset_creds()
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flags = tools.argparser.parse_args(['--noauth_local_webserver'])
flow = client.flow_from_clientsecrets(os.environ['GOOGLE_APPLICATION_CREDENTIALS'], SCOPES)
creds = tools.run_flow(flow, store, flags)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
events_result = service.events().list(calendarId='q3n3pce86072n9knt3pt65fhio@group.calendar.google.com',
timeMin='2017-01-01T10:00:00Z',
singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
print(events_result)
return_events = []
return_keys = ['start', 'summary', 'description', 'location', 'id']
for event in events:
return_events.append({key: event[key] for key in return_keys if key in event})
return jsonify(return_events)
@app.route('/members', methods=['GET'])
@requires_auth
def get_members():
reset_creds()
admin_list_url = "https://join.uqcs.org.au/admin/list"
options = Options()
options.add_argument("--headless")
options.add_argument('--no-sandbox') # required when running as root user. otherwise you would get no sandbox
driver = webdriver.Chrome(chrome_options=options)
driver.implicitly_wait(30)
driver.get(admin_list_url)
while driver.current_url != "https://join.uqcs.org.au/admin/list":
username = driver.find_element_by_name('username')
username.send_keys(os.environ['UQCS_USER'])
password = driver.find_element_by_name('password')
password.send_keys(os.environ['UQCS_PASS'])
submit_button = driver.find_element_by_name('submit')
submit_button.click()
driver.implicitly_wait(30)
driver.get(admin_list_url)
driver.implicitly_wait(30)
members = []
for row in driver.find_elements_by_tag_name('tr'):
cells = row.find_elements_by_tag_name('td')
members.append({'first_name': cells[0].text,
'last_name': cells[1].text,
'email': cells[2].text,
'paid': False if cells[3].text == 'None' else True})
return jsonify(members[1:]) # cut off the titles of the table
@app.route('/docs', methods=['GET', 'POST'])
@requires_auth
def get_docs():
# update the repo
repo = git.Repo('../committee')
repo.remotes.origin.pull()
if request.method == 'POST':
data = request.get_json(force=True)
if 'file_name' in data and 'file_data' in data and 'commit_message' in data:
for root, dirs, files in os.walk('../committee'):
for file in files:
if str(file) == data['file_name']:
path = os.path.join(root, file)
open(path, "w").close()
f = open(path, "w")
f.write(str(data['file_data']))
f.close()
repo.git.add(os.path.abspath(path))
user_data = requests.get('https://api.github.com/users/' + os.environ['GIT_USERNAME'],
auth=HTTPBasicAuth(os.environ['GIT_USERNAME'],
os.environ['GIT_PASSWORD']))
author = git.Actor(os.environ['GIT_USERNAME'], user_data.json()['email'])
repo.index.commit(data['commit_message'], author=author, committer=author)
repo.remotes.origin.push()
reset_creds()
return Response()
else:
reset_creds()
return Response(400)
else:
# else request is get
directory_dict = {}
read_files("../committee", directory_dict)
reset_creds()
return jsonify(directory_dict)
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)]
def read_files(path, json):
directory_name = path.split('/')[-1]
if directory_name not in json or json[directory_name] is None:
json[directory_name] = {}
for filename in get_immediate_subdirectories(path):
if os.path.isfile(path + '/' + filename) and filename[-3:] == '.md':
with open(path + "/" + filename, 'r') as my_file:
data = my_file.read()
json[directory_name][filename] = data
continue
if os.path.isdir(path + '/' + filename):
if filename == ".git":
continue
json[directory_name][filename] = {}
read_files(path + '/' + filename, json[directory_name])
def reset_creds():
os.environ['GIT_USERNAME'] = ''
os.environ['GIT_PASSWORD'] = ''
if __name__ == '__main__':
app.run()
| UTF-8 | Python | false | false | 6,803 | py | 4 | app.py | 1 | 0.582537 | 0.573717 | 0 | 202 | 32.678218 | 115 |
CodecoolGlobal/lightweight-erp-python-flip_table | 4,784,593,596,386 | 0a0c9e7e4fd82ab6f911af39c91bc5eb5c8052bf | 15166810cf0718ab91f8d955f36c1ef973140532 | /crm/crm.py | daaeddbfc7c2d7e31825cc3ff58c2c22ff7173a7 | [] | no_license | https://github.com/CodecoolGlobal/lightweight-erp-python-flip_table | ec4ea897af2e1f27e701d9cfffc2e7d7d572241d | 33bbb7124c70c8cf818c0f1b6eefda0789fde7c2 | refs/heads/master | "2020-05-06T13:38:52.107784" | "2019-04-25T12:13:17" | "2019-04-25T12:13:17" | 180,148,549 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | """ Customer Relationship Management (CRM) module
Data table structure:
* id (string): Unique and random generated identifier
at least 2 special characters (except: ';'), 2 number, 2 lower and 2 upper case letters)
* name (string)
* email (string)
* subscribed (int): Is she/he subscribed to the newsletter? 1/0 = yes/no
"""
import ui
import data_manager
import common
def start_module():
"""
Starts this module and displays its menu.
* User can access default special features from here.
* User can go back to main menu from here.
Returns:
None
"""
table = data_manager.get_table_from_file("crm/customers.csv")
options = [
"Show table",
"Add item",
"Remove item",
"Update item",
"ID of the longest name",
"Customers subscribed to newsletter"
]
while True:
ui.print_menu("- CRM manager -", options, "Back to Main menu")
option = ui.get_inputs(["Please enter a number: "], "")
try:
if option == "1":
show_table(table)
common.go_back_in_menu()
elif option == "2":
table = add(table)
data_manager.write_table_to_file("crm/customer.csv", table)
elif option == "3":
id_to_remove = ui.get_inputs(
["Please enter the ID of the person you wish to remove: "],
""
)
if common.check_id_in_table(table, id_to_remove):
table = remove(table, id_to_remove)
data_manager.write_table_to_file("crm/customers.csv", table)
elif option == "4":
id_to_update = ui.get_inputs(
["Please enter the ID of the person you wish to update: "],
""
)
if common.check_id_in_table(table, id_to_update):
update(table, id_to_update)
data_manager.write_table_to_file("crm/customers.csv", table)
elif option == "5":
ui.print_result(get_longest_name_id(table), "\nThe ID of the longest name is: ")
common.go_back_in_menu()
elif option == "6":
ui.print_result(get_subscribed_emails(table), "")
common.go_back_in_menu()
elif option == "0":
break
else:
raise KeyError("There is no such option")
except KeyError as err:
ui.print_error_message(str(err))
def show_table(table):
"""
Display a table
Args:
table (list): list of lists to be displayed.
Returns:
None
"""
ui.print_table(
table,
["ID", "NAME", "E-MAIL", "SUBSCRIBED"]
)
def add(table):
"""
Asks user for input and adds it into the table.
Args:
table (list): table to add new record to
Returns:
list: Table with a new record
"""
input_for_new_row = ui.get_inputs(
["Name", "E-mail", "Subscribed"],
"Please enter the persons details"
)
input_for_new_row.insert(0, common.generate_random(table))
if common.confirm_option():
table.append(input_for_new_row)
return table
def remove(table, id_):
"""
Remove a record with a given id from the table.
Args:
table (list): table to remove a record from
id_ (str): id of a record to be removed
Returns:
list: Table without specified record.
"""
if common.confirm_option():
ID = 0
for person in table:
if person[ID] == id_:
table.remove(person)
return table
def update(table, id_):
"""
Updates specified record in the table. Ask users for new data.
Args:
table (list): list in which record should be updated
id_ (str): id of a record to update
Returns:
list: table with updated record
"""
new_data = ui.get_inputs(
["NAME", "E-MAIL", "SUBSCRIBED"],
"Please enter the new data to update"
)
if common.confirm_option():
ID = 0
for person in table:
if person[ID] == id_:
for person_data_index in range(len(new_data)):
person[person_data_index + 1] = new_data[person_data_index]
return table
def get_longest_name_id(table):
"""
Question: What is the id of the customer with the longest name?
Args:
table (list): data table to work on
Returns:
string: id of the longest name (if there are more than one, return
the last by alphabetical order of the names)
"""
FIRST_ELEMENT = 0
ID = 0
NAME = 1
lenght_of_names = [len(row[NAME]) for row in table]
max_len = max(lenght_of_names)
ppl_data_with_max_len_name = []
for index, longest_name in enumerate(lenght_of_names):
if longest_name == max_len:
ppl_data_with_max_len_name.append(table[index])
if len(ppl_data_with_max_len_name) == 1:
return ppl_data_with_max_len_name[ID]
else:
return common.alph_sorted_names_reversed(ppl_data_with_max_len_name)[FIRST_ELEMENT][ID]
def get_subscribed_emails(table):
"""
Question: Which customers has subscribed to the newsletter?
Args:
table (list): data table to work on
Returns:
list: list of strings (where a string is like "email;name")
"""
NAME = 1
EMAIL = 2
SUBSCRIBTION = 3
subscribed_people = []
for people in table:
if people[SUBSCRIBTION] == "1":
subscribed_people.append((people[NAME] + ";" + people[EMAIL]))
return subscribed_people
# functions supports data analyser
# --------------------------------
def get_name_by_id(id):
"""
Reads the table with the help of the data_manager module.
Returns the name (str) of the customer with the given id (str) on None om case of non-existing id.
Args:
id (str): the id of the customer
Returns:
str: the name of the customer
"""
# your code
def get_name_by_id_from_table(table, id):
"""
Returns the name (str) of the customer with the given id (str) on None om case of non-existing id.
Args:
table (list of lists): the customer table
id (str): the id of the customer
Returns:
str: the name of the customer
"""
# your code
| UTF-8 | Python | false | false | 6,598 | py | 8 | crm.py | 8 | 0.550015 | 0.546226 | 0 | 259 | 24.474903 | 102 |
tblxio/sinfo-rpi-truck | 5,463,198,420,863 | 2e0495ea3c8bd202add5305e4d5539285d83123a | f5dabc9983f46af7b387887f12d13a88acb7137a | /imuClass.py | e0e4e264e8e989810f4791f3172c0fdc1ae6f8ae | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | https://github.com/tblxio/sinfo-rpi-truck | d7fd0b9665deb88b0d8734a2a75ffed728bde2c3 | 5e024464f064ba7618a7e53b88a413d23ba20dce | refs/heads/master | "2023-05-25T16:35:58.995290" | "2021-06-18T08:19:56" | "2021-06-18T08:19:56" | 171,266,875 | 3 | 0 | MIT | false | "2023-05-22T22:30:39" | "2019-02-18T10:53:01" | "2021-11-28T16:34:07" | "2023-05-22T22:30:39" | 1,299 | 5 | 1 | 1 | Python | false | false | from componentClass import Component
import time
import sys
import RTIMU
import json
sys.path.append('.')
class Imu(Component):
"""
An implementation of the component class to acquire and publish the
data from the Inertial Measurement unit, namely the data from the
accelerometer and the gyroscope.
"""
# Setup method for this specific device
def setup(self,samplingInterval):
# This is mostly the legacy code used in the SINFO Workshop
# Load configuration file: sensor settings + calibration
SETTINGS_FILE = "RTIMULib"
self.s = RTIMU.Settings(SETTINGS_FILE)
self.imu = RTIMU.RTIMU(self.s)
self.counter = 0
self.timer = time.time()
print("IMU Name: " + self.imu.IMUName())
t_shutdown = 0
if (not self.imu.IMUInit()):
print ("IMU Init Failed, try #:{} ".format(str(t_shutdown)))
t_shutdown += 1
if t_shutdown > 9:
sys.exit(1)
else:
print "IMU Init Succeeded"
self.imu.setSlerpPower(0.02)
self.imu.setGyroEnable(True)
self.imu.setAccelEnable(True)
self.imu.setCompassEnable(True)
# Used to set up the polling interval of the sensor
# Converted from mS to seconds
# (400/self.imu.IMUGetPollInterval()) gives the sampling rate in Hz.
# We multiply by 2 in order to be slower than the sampling rate and
# guarantee a hit everytime we try to read the sensor
# In this case the sampling rate is 100Hz and we are sampling every
# 2/100Hz= 20ms
self.sampInterval = samplingInterval
self.set_topic("imu")
print "{} setup finished".format(self.name)
# Data Handling for this specific device, from collection to
# publishing to the correct MQTT Topics.
def handleData(self, timestamp):
if self.imu.IMURead():
data = self.imu.getIMUData()
(ret, _) = self.mqttHandler.publish(self.my_topic, json.dumps(
self.gen_payload_message(data, timestamp)), retain=True, qos=1)
if(ret != 0):
print "error sending {}".format(ret)
self.counter = 0
self.counter += 1
elapsed = time.time() - self.timer
self.timer = time.time()
print "{} : code {} time elapsed {}".format(
self.counter, ret, elapsed)
else:
print "ops"
self.counter = 0
# Generates the payload specific to the IMU
def gen_payload_message(self, data, timestamp):
try:
payload = {
'accel': {
'x': data.get('accel')[0],
'y': data.get('accel')[1],
'z': data.get('accel')[2]
},
'gyro': {
'x': data.get('gyro')[0],
'y': data.get('gyro')[1],
'z': data.get('gyro')[2]
},
# Note: This is a UNIX timestamp in microseconds
# 'timestamp': data.get('timestamp')
'timestamp': timestamp
}
except BaseException:
print("Received wrong data structure")
return payload
| UTF-8 | Python | false | false | 3,291 | py | 21 | imuClass.py | 17 | 0.552112 | 0.542388 | 0 | 94 | 34.010638 | 79 |
jpmh1309/mp6118_v-v_sw | 4,020,089,428,060 | f0ad51ba2dff4fa49e550641a099f79d906d52c2 | 8c6666083aa762c17d51e7e7f9990b4870092859 | /project_4/alarm.py | cb6daf46ab515020ddb75f41fa17dc5d673abec7 | [] | no_license | https://github.com/jpmh1309/mp6118_v-v_sw | f14c9faa07ec7511bab0898293f10e551c410979 | c6f4d163b793e154357515473e2bc098f39d4dcb | refs/heads/main | "2023-04-16T08:57:43.732271" | "2021-04-30T06:18:23" | "2021-04-30T06:18:23" | 347,750,557 | 1 | 0 | null | false | "2021-04-19T03:43:22" | "2021-03-14T20:48:25" | "2021-04-19T03:42:44" | "2021-04-19T03:43:22" | 92 | 1 | 0 | 0 | Python | false | false | # Costa Rica Institute of Technology
# MP-6118 Validation and Verification in Software Engineering
# Students:
# - David Martínez
# - Jose Martínez
# Project: Smart Embedded Systems Security Alarm
from repeatedtimer import RepeatedTimer
from registers import Registers
from keyboard import Keyboard
import threading
import logging
logger = logging.getLogger(__name__)
class Alarm(object):
def __init__(self, view):
self.view = view
self.keyboard = Keyboard(view)
self.state = "INITIAL_STATE"
self.check_battery()
self.view.lcd_error.setHidden(True)
self.rt = {}
# Evaluate the ALARM_STATE.
if(Registers.ALARM_STATE == "UNARMED"):
self.start_state_unarmed()
elif(Registers.ALARM_STATE == "ARMED"):
# Evaluate the operation mode.
if(Registers.OP_MODE == 0):
self.start_state_mode_0()
elif(Registers.OP_MODE == 1):
self.start_state_mode_1()
# Clean the screen
self.view.lcd_screen.display('*')
logger.info("ALARM in {} state".format(self.state))
def key_pressed(self,value):
if(self.state == "UNARMED" or self.state == "MODE_0" or self.state == "MODE_1" or self.state == "MAIN_ENTRANCE" or self.state == "ALARM"):
self.keyboard.key_pressed(value)
else:
pass
logger.info("ALARM in {} state".format(self.state))
def sensor_activated(self, sensor):
logger.info("Sensor {} activated".format(sensor))
logger.info("ALARM in {} state".format(self.state))
logger.info("Active sensors {}".format(Registers.ACTIVE_SENSOR))
if(sensor in Registers.ACTIVE_SENSOR):
if(self.state == "MODE_0"):
self.start_alarm(sensor)
elif(self.state == "MODE_1"):
self.keyboard.abort()
if(sensor == Registers.MAIN_ENTRANCE):
self.state = "MAIN_ENTRANCE"
# Start the timer and wait for the interrumption.
self.timer = threading.Timer(30.0, self.start_alarm)
logger.info("Starting 30s timer")
self.timer.start()
else:
self.start_alarm(sensor)
else:
pass
logger.info("ALARM in {} state".format(self.state))
# LLR.085, LLR-086
def start_alarm(self,sensor = 1):
self.state = "ALARM"
self.keyboard.abort()
self.view.sound_activated.setHidden(False)
self.view.sound_deactivated.setHidden(True)
logger.info("Llamando al {} : centro de supervision... Numero de usuario: {}, Numero de Sensor:{}"\
.format(Registers.CALL_CENTER_NUMBER,
Registers.USER_NUMBER,
sensor
)
)
logger.info("ALARM in {} state".format(self.state))
# LLR-067, LLR-068, LLR-069, LLR-070, LLR-071, LLR-072
def start_state_mode_0(self):
# ACTIVE_SENSORS are all the sensors.
Registers.ACTIVE_SENSOR = Registers.ZONE_0
logger.info("Registers.ACTIVE_SENSOR: {}".format(Registers.ACTIVE_SENSOR))
self.state = "MODE_0"
self.view.lcd_mode_0.setHidden(False)
self.view.lcd_mode_1.setHidden(True)
self.view.led_armed.setHidden(False)
self.view.sound_activated.setHidden(True)
self.view.sound_deactivated.setHidden(False)
# LLR-073, LLR-074, LLR-075, LLR-076, LLR-077, LLR-078, LLR-079,
# LLR-080, LLR-081, LLR-082, LLR-083
def start_state_mode_1(self):
Registers.ACTIVE_SENSOR = Registers.ZONE_1
logger.info("Registers.ACTIVE_SENSOR: {}".format(Registers.ACTIVE_SENSOR))
self.state = "MODE_1"
self.view.lcd_mode_0.setHidden(True)
self.view.lcd_mode_1.setHidden(False)
self.view.led_armed.setHidden(False)
self.view.sound_activated.setHidden(True)
self.view.sound_deactivated.setHidden(False)
def start_state_unarmed(self):
self.state = "UNARMED"
self.view.led_armed.setHidden(True)
self.view.lcd_mode_0.setHidden(True)
self.view.lcd_mode_1.setHidden(True)
self.view.sound_activated.setHidden(True)
self.view.sound_deactivated.setHidden(False)
# LLR-015
def check_battery(self):
if(self.view.battery_percentage.value() > 50):
self.view.led_battery.setHidden(True)
else:
self.stop_blink_led(self.view.led_battery)
self.view.led_battery.setHidden(False)
def refresh_alarm(self):
self.__init__(self.view)
def toggle_led(led_label):
led_label.setHidden(not led_label.isHidden())
def blink_led(self,led_label, period):
logger.info("Starting {}s timer for led {}".format(period,led_label.objectName()))
self.rt[led_label.objectName()] = RepeatedTimer(period, Alarm.toggle_led, led_label)
def stop_blink_led(self,led_label):
if self.rt:
self.rt[led_label.objectName()].stop()
if led_label:
led_label.setHidden(True)
def display_lcd(self,message,period):
self.view.lcd_screen.display(message)
if(period != 0):
self.lcd_timer = threading.Timer(period, self.clear_display)
logger.info("Starting {}s timer for LCD".format(period))
self.lcd_timer.start()
def clear_display(self):
self.view.lcd_screen.display('')
| UTF-8 | Python | false | false | 5,571 | py | 8 | alarm.py | 6 | 0.595978 | 0.579278 | 0 | 142 | 37.929577 | 146 |
abal09/countries-for-django | 9,826,885,215,449 | d70e48d4bf2a2a161753eab2b9def63fca91b134 | 1a97a606bdc5bfcac80defe29640ba0f1c494308 | /views.py | 0a9c2a1fd4dd6f4b34cffe4c83217073be960de4 | [] | no_license | https://github.com/abal09/countries-for-django | 0cf85d25c3785f96fd9626fa7a578ee65933b084 | 4b005707f2c48ec4c5fc521be9fb4c3061820a50 | refs/heads/master | "2021-01-15T11:23:21.489245" | "2013-05-15T14:27:30" | "2013-05-15T14:27:30" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django import http
import logging
# Create your views here.
def set_country(request):
"""
Just remember country in session or cookie.
Redirect to a given url while setting the chosen country in the
session or cookie. The url and the language code need to be
specified in the request parameters.
"""
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
country_code = request.POST.get('country', None)
if country_code:
if hasattr(request, 'session'):
request.session['django_country'] = country_code
else:
response.set_cookie(settings.COUNTRY_COOKIE_NAME, country_code)
#return http.HttpResponse(request.POST)
return response
| UTF-8 | Python | false | false | 917 | py | 7 | views.py | 6 | 0.644493 | 0.644493 | 0 | 28 | 31.714286 | 79 |
Jeffrey2971/Jeffrey | 5,317,169,545,812 | db152978b034354dea515e840cf5ed9e89cd159e | 8388e810c32528a56e6601bb84695ba32c14163e | /Scattered/python/other/列表.py | 48ff296bffd34252aec92d827211b2df8c40b675 | [] | no_license | https://github.com/Jeffrey2971/Jeffrey | f3c99487a9027601c89c40466aff26b3b43ef2ae | 6966ace38330e614c83f2903c36ac6d4e214ef88 | refs/heads/master | "2021-07-15T05:29:01.583117" | "2021-03-03T13:22:47" | "2021-03-03T13:22:47" | 241,108,429 | 3 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null | my_num = 1, 3, 5, 6, 9
friends = ['Jeffrey', 'Mable', 'Tom', 'Karen', 'Jeffrey', 'mable']
# 0 1 2 3 ->索引顺序默认从左至右从0开始
# -4 -3 -2 -1
print(friends) # 打印完整列表
print(friends[0]) # 打印列表内第四个元素
print(friends[-3]) # 打印倒数第三个元素
print(friends[1:]) # 表示打印第二个元素和后面的所有元素
print(friends[1:3]) # 表示打印第二个和第三个元素但不包括第四个元素
friends[1] = 'jm' # 修改第二个元素为jm
# append()函数
l1 = [1, 2, 3, 4, 5, ]
l1.append([6, 7, 8, 9, ])
# l1.append(*[6, 7, 8, 9, ]) #会报错
print(l1)
l1.extend([6, 7, 8, 9])
print(l1)
# extend()函数
l1 = [1, 2, 3, 4, 5, ]
l1.extend([6, 7, 8, 9])
print(l1)
l1.extend('abc')
print(l1)
l1.extend('a') # 也是可迭代对象
print(l1)
# l1.extend(1) # 报错,不可迭代
print(l1)
# 输出
'''
[1, 2, 3, 4, 5, 6, 7, 8, 9]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 'a', 'b', 'c']
[1, 2, 3, 4, 5, 6, 7, 8, 9, 'a', 'b', 'c', 'a']
[1, 2, 3, 4, 5, 6, 7, 8, 9, 'a', 'b', 'c', 'a']
'''
friends.insert(2, 'None') # .insert()函数可在指定为的后面添加元素
friends.remove('None') # .remove()函数可在移除指定为的元素
#friends.clear() # .clear()方法可重置列表
friends.pop() # .pop()函数默认移除最后一个元素,可在()内输入需倒数移除的元素
print(friends.index('Jeffrey')) # 使用.index()获取元素在第几元素
print(friends.count('Mable')) # .count()函数可计算有几个元素
print(friends.sort()) # .sort()函数可对元素进行a-z0-9顺序排序
print(friends.reverse()) # .reverse()函数可对元素进行颠倒操作
friends2 = friends.copy() # .copy()函数可继承对象
print(friends) | UTF-8 | Python | false | false | 1,809 | py | 347 | 列表.py | 239 | 0.555799 | 0.4814 | 0 | 70 | 18.6 | 66 |
ColCarroll/autobencher | 712,964,596,753 | ad303cc32679aed8434c3ca03a94a600365ed37d | 24dd0dc7f039b08ef1c09128ec5cbb1bb1d91b9b | /autobencher/server.py | a0f7ec9282b9acd31abd93f9c9c746bc258e9b01 | [] | no_license | https://github.com/ColCarroll/autobencher | f7f8ee4fc5c8c94799d615ecf96aaac75bb2e1fe | 93cdb345c1f8d1018e359b1febee9ac0c4a21b26 | refs/heads/master | "2021-01-22T21:49:32.116499" | "2016-08-16T21:19:48" | "2016-08-16T21:19:48" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import json
import os
from tornado.web import RequestHandler, StaticFileHandler, Application, url
from autobencher.util import Authorization
from autobencher.factory import BenchmarkerFactory
def process_post(factory, request):
event = json.loads(request.body.decode('utf-8'))
log_event(event, os.getcwd())
parser = factory.makeEventParser(event)
event_data = parser.get_event_data()
if event_data.valid:
publish_uri = os.environ['PUBLISH_URI']
publisher = factory.make_publisher(publish_uri)
if event_data.is_master_update:
runner = factory.make_master_runner(os.getcwd(),
event_data.runner_data,
publisher)
run_location = runner.get_run_location()
log_event(event, run_location)
runner.run()
else:
report_username = os.environ['REPORT_USERNAME']
report_password = os.environ['REPORT_PASSWORD']
report_auth = Authorization(report_username, report_password)
reporter = factory.makeReporter(event_data.reporter_data,
report_auth, publish_uri)
runner = factory.makeRunner(os.getcwd(), event_data.runner_data,
reporter, publisher)
run_location = runner.get_run_location()
log_event(event, run_location)
runner.run()
class EventHandler(RequestHandler):
def initialize(self):
self._factory = BenchmarkerFactory.makeFactory()
def post(self):
process_post(self._factory, self.request)
def log_event(event, directory):
log_path = os.path.join(directory, 'request.json')
with open(log_path, 'w') as request_fp:
json.dump(event, request_fp, indent=4,
sort_keys=True)
app = Application([
url(r"/webhooks", EventHandler),
url(r"/runs/(.*)", StaticFileHandler, {'path': 'runs'}),
])
| UTF-8 | Python | false | false | 2,035 | py | 13 | server.py | 11 | 0.594595 | 0.593612 | 0 | 64 | 30.796875 | 76 |
maximelhoustau/Prim-Project | 7,335,804,190,298 | 06208b8ef31a16a4b666900329dde715c98a0250 | 44f8501f135a23924bf9f17c3875dbbb72ae15c0 | /archives/Dense-Optical-Flow.py | e378a7d76348db185bb17ac79f262764cfbd5a8d | [] | no_license | https://github.com/maximelhoustau/Prim-Project | a2b644f7afd72d113da4c2e68e4b86bead79abe2 | cfb190f91c7f1f5cd73c6d43dc840efa5a631373 | refs/heads/master | "2020-09-22T09:12:28.421618" | "2020-02-24T18:44:33" | "2020-02-24T18:44:33" | 225,133,322 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2 as cv
import numpy as np
import argparse
from utils.Apply_mask import apply_mask
import time as T
import matplotlib.pyplot as plt
start_time = T.time()
ap=argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video")
ap.add_argument("-d", "--display",type=int, default=1, help="display or not of the real time video")
args = vars(ap.parse_args())
video_folder = "./videos/"
timeline = []
motion = []
time_t = 0
# The video feed is read in as a VideoCapture object
cap = cv.VideoCapture(video_folder+args["video"])
# ret = a boolean return value from getting the frame, first_frame = the first frame in the entire video sequence
ret, first_frame = cap.read()
# Converts frame to grayscale because we only need the luminance channel for detecting edges - less computationally expensive
#first_frame = apply_mask(first_frame)
prev_gray = cv.cvtColor(first_frame, cv.COLOR_BGR2GRAY)
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros_like(first_frame)
# Sets image saturation to maximum
mask[..., 1] = 255
fps = cap.get(cv.CAP_PROP_FPS)
count = 0
while(cap.isOpened()):
time = count/fps
grabbed = cap.grab()
if(grabbed):
count+=1
if((time - time_t) >= 0.07):
frame = cap.retrieve()[1]
time_t = time
else:
continue
else:
break
#frame = apply_mask(frame)
# Converts each frame to grayscale - we previously only converted the first frame to grayscale
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Calculates dense optical flow by Farneback method
# https://docs.opencv.org/3.0-beta/modules/video/doc/motion_analysis_and_object_tracking.html#calcopticalflowfarneback
flow = cv.calcOpticalFlowFarneback(prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# Computes the magnitude and angle of the 2D vectors
magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1])
# Sets image hue according to the optical flow direction
mask[..., 0] = angle * 180 / np.pi / 2
# Sets image value according to the optical flow magnitude (normalized)
mask[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX)
# Converts HSV to RGB (BGR) color representation
rgb = cv.cvtColor(mask, cv.COLOR_HSV2BGR)
# Updates previous frame
prev_gray = gray
# Frames are read by intervals of 1 millisecond. The programs breaks out of the while loop when the user presses the 'q' key
if cv.waitKey(1) & 0xFF == ord('q'):
break
if(args["display"]):
# Opens a new window and displays the output frame
cv.imshow("dense optical flow", rgb)
# Opens a new window and displays the input frame
cv.imshow("input", frame)
timeline.append(count/fps)
motion.append(np.count_nonzero(rgb))
# The following frees up resources and closes all windows
cap.release()
cv.destroyAllWindows()
print("Execution time :"+str(T.time() - start_time))
fig, ax = plt.subplots()
ax.plot(timeline, motion)
ax.set(xlabel='Time (ms)', ylabel='Global Motion Estimation (pixel)', title='Global Motion Estimation with Dense Optical Flow')
ax.grid()
plt.show()
| UTF-8 | Python | false | false | 3,213 | py | 13 | Dense-Optical-Flow.py | 9 | 0.683473 | 0.669779 | 0 | 90 | 34.7 | 128 |
ctrl-alt-del/devtools | 13,718,125,579,077 | d36a898bfe219aec192732dc64b9e20b02e69020 | 5e76ad3c4dab528d1e450af4c84335c3ed2229e4 | /python/android-import-traverse/main.py | c644c8c5010b2901d2bcbfec58ddc5f25d74248b | [] | no_license | https://github.com/ctrl-alt-del/devtools | 505bd4a64d28b9e97b07be0bdbe3f03915c5c5db | cb5b7faf28121a1cf1b07d94d41860d65f0c9f4c | refs/heads/master | "2020-05-19T13:26:16.469628" | "2015-09-23T07:55:06" | "2015-09-23T07:55:06" | 27,367,333 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | #! /usr/bin/python
import os
import JavaClass
def get_package(path):
with open(path) as infile:
for line in infile:
if "package" in line:
return line.lstrip("package").strip().rstrip(";") + "." + os.path.basename(path).rstrip(".java;")
def get_java_files(path):
resultList = []
for name in os.listdir(path):
if name != "src" and "src" not in path:
continue
if os.path.isdir(path + "/" + name):
for each in get_java_files(path + "/" + name):
resultList.append(each)
if ".java" in name:
resultList.append(path + "/" + name)
return resultList
path = ""
meaningful_classes = []
class_statistic = {}
java_file_paths = get_java_files(path)
for line in java_file_paths:
pLine = get_package(line)
meaningful_classes.append(pLine)
class_statistic[pLine] = 0
print "\n=== Statistics Details ===\n"
for line in java_file_paths:
jc = JavaClass.JavaClass(line, meaningful_classes)
jc.print_class()
for import_file in jc.import_files:
class_statistic[import_file] = class_statistic[import_file] + 1
mostly_called_class_name = ''
mostly_called_class_count = 0
max_length = len(str(max(class_statistic.values())))
for key in meaningful_classes:
stat = class_statistic[key]
if stat > 0:
extra_spaces = " " * (max_length - len(str(stat)))
print "[" + extra_spaces + str(stat) + "]: " + key
if class_statistic[key] > mostly_called_class_count:
mostly_called_class_count = class_statistic[key]
mostly_called_class_name = key
print "\n=== Statistics Summary ===\n"
print "mostly_called_class_name: " + mostly_called_class_name
print "mostly_called_class_count: " + str(mostly_called_class_count)
| UTF-8 | Python | false | false | 1,797 | py | 4 | main.py | 3 | 0.621035 | 0.618809 | 0 | 65 | 26.646154 | 113 |
joshcherian42/Activity-Transition-Detection | 14,671,608,315,204 | 8c71ea6feb92d2e0f9939de161efe874472e8a92 | b6fa027796633eb761857ea8729b835c40bbc5b9 | /Scripts/clean_public_data.py | e38e35c287467b7d47939f727a868ae6afd62868 | [] | no_license | https://github.com/joshcherian42/Activity-Transition-Detection | f5a372052032135933e15d3bf65a1d0cd131d367 | 978f4e3f6d0feeaffc31e3b55876f17d1ad6b778 | refs/heads/master | "2023-06-08T03:19:21.949790" | "2019-10-29T20:29:54" | "2019-10-29T20:29:54" | 158,879,657 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import os
import settings
import csv
def fog_dataset():
fog_header = ['Time_ms',
'ank_accx', 'ank_accy', 'ank_accz',
'thigh_accx', 'thigh_accy', 'thigh_accz',
'trunk_accx', 'trunk_accy', 'trunk_accz',
'gesture']
for subdir, dirs, files in os.walk(os.path.join(settings.phase_1_raw, "dataset_fog_release")):
for cur_file in sorted(files, key=settings.natural_keys):
if cur_file.endswith('.txt'):
with open(os.path.join(subdir, cur_file)) as dat_file, open(os.path.join(subdir, cur_file[:-4]) + '.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(fog_header)
for line in dat_file:
row = [field.strip() for field in line.split(' ')]
if row[10] == '0':
row[10] = 'Inactive'
elif row[10] == '1':
row[10] = 'Activity'
elif row[10] == '2':
row[10] = 'Freeze'
csv_writer.writerow(row)
def pamap_dat_to_csv():
for subdir, dirs, files in os.walk(settings.phase_1_raw):
for cur_file in sorted(files, key=settings.natural_keys):
if cur_file.endswith('.dat'):
print cur_file
with open(os.path.join(subdir, cur_file)) as dat_file, open(os.path.join(settings.phase_1_processed, subdir.split('/')[-1], cur_file[:-4]) + '.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(settings.raw_data_cols)
cur_heart_rate = 0.0
cur_rows = []
for line in dat_file:
row = [field.strip() for field in line.split(' ')]
# For one user data was spliced together due to data collection being aborted. So check timestamps to see if are consecutive
# Gesture
if row[1] == '0':
row[1] = 'Inactive'
elif row[1] == '1':
row[1] = 'lying'
elif row[1] == '2':
row[1] = 'sitting'
elif row[1] == '3':
row[1] = 'standing'
elif row[1] == '4':
row[1] = 'walking'
elif row[1] == '5':
row[1] = 'running'
elif row[1] == '6':
row[1] = 'cycling'
elif row[1] == '7':
row[1] = 'Nordic walking'
elif row[1] == '9':
row[1] = 'watching TV'
elif row[1] == '10':
row[1] = 'computer work'
elif row[1] == '11':
row[1] = 'car driving'
elif row[1] == '12':
row[1] = 'ascending stairs'
elif row[1] == '13':
row[1] = 'descending stairs'
elif row[1] == '16':
row[1] = 'vacuum cleaning'
elif row[1] == '17':
row[1] = 'ironing'
elif row[1] == '18':
row[1] = 'folding laundry'
elif row[1] == '19':
row[1] = 'house cleaning'
elif row[1] == '20':
row[1] = 'playing soccer'
elif row[1] == '24':
row[1] = 'rope jumping'
data_indices = [0, 1, 2, 5, 6, 7, 22, 23, 24, 39, 40, 41]
row = [row[i] for i in data_indices]
if 'NaN' not in row:
if cur_heart_rate != 0:
for new_row in cur_rows:
new_row[2] = round(cur_heart_rate + ((float(row[2]) - cur_heart_rate) / len(cur_rows))) # linear interpolation
if 'NaN' not in new_row:
csv_writer.writerow(new_row)
cur_rows = []
cur_heart_rate = float(row[2])
cur_rows.append(row)
else:
cur_rows.append(row)
def opportunity_dat_to_csv():
opportunity_header = ['Time_ms',
'back_accx', 'back_accy', 'back_accz',
'back_gyrox', 'back_gyroy', 'back_gyroz',
'back_magx', 'back_magy', 'back_magz',
'rua_accx', 'rua_accy', 'rua_accz',
'rua_gyrox', 'rua_gyroy', 'rua_gyroz',
'rua_magx', 'rua_magy', 'rua_magz',
'rla_accx', 'rla_accy', 'rla_accz',
'rla_gyrox', 'rla_gyroy', 'rla_gyroz',
'rla_magx', 'rla_magy', 'rla_magz',
'lua_accx', 'lua_accy', 'lua_accz',
'lua_gyrox', 'lua_gyroy', 'lua_gyroz',
'lua_magx', 'lua_magy', 'lua_magz',
'lla_accx', 'lla_accy', 'lla_accz',
'lla_gyrox', 'lla_gyroy', 'lla_gyroz',
'lla_magx', 'lla_magy', 'lla_magz',
'locomotion', 'gesture']
for subdir, dirs, files in os.walk(os.path.join(settings.phase_1_raw, "OpportunityChallengeDatasetTaskC")): # also works for OpportunityChallengeDatasetTasksAB_2011_08_12 and OpportunityChallengeLabeled
for cur_file in sorted(files, key=settings.natural_keys):
if cur_file.endswith('.dat'):
with open(os.path.join(subdir, cur_file)) as dat_file, open(os.path.join(subdir, cur_file[:-4]) + '.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(opportunity_header)
for line in dat_file:
row = [field.strip() for field in line.split(' ')]
# Locomotion
if row[-2] == '101':
row[-2] = 'Stand'
elif row[-2] == '102':
row[-2] = 'Walk'
elif row[-2] == '104':
row[-2] = 'Sit'
elif row[-2] == '105':
row[-2] = 'Lie'
else:
row[-2] = 'Inactive'
# Gesture
if row[-1] == '506616':
row[-1] = 'Open_Door1'
elif row[-1] == '506617':
row[-1] = 'Open_Door2'
elif row[-1] == '504616':
row[-1] = 'Close_Door1'
elif row[-1] == '504617':
row[-1] = 'Close_Door2'
elif row[-1] == '506620':
row[-1] = 'Open_Fridge'
elif row[-1] == '504620':
row[-1] = 'Close_Fridge'
elif row[-1] == '506605':
row[-1] = 'Open_Dishwasher'
elif row[-1] == '504605':
row[-1] = 'Close_Dishwasher'
elif row[-1] == '506619':
row[-1] = 'Open_Drawer1'
elif row[-1] == '504619':
row[-1] = 'Close_Drawer1'
elif row[-1] == '506611':
row[-1] = 'Open_Drawer2'
elif row[-1] == '504611':
row[-1] = 'Close_Drawer2'
elif row[-1] == '506608':
row[-1] = 'Open_Drawer3'
elif row[-1] == '504608':
row[-1] = 'Close_Drawer3'
elif row[-1] == '508612':
row[-1] = 'Clean_Table'
elif row[-1] == '507621':
row[-1] = 'Drink_Cup'
elif row[-1] == '505606':
row[-1] = 'Toggle_Switch'
else:
row[-1] = 'Inactive'
csv_writer.writerow(row)
def clean_data():
if settings.dataset == 'PAMAP2':
pamap_dat_to_csv()
elif settings.dataset == 'Opportunity':
opportunity_dat_to_csv()
elif settings.dataset == 'FOG':
fog_dataset()
| UTF-8 | Python | false | false | 9,119 | py | 13 | clean_public_data.py | 12 | 0.366049 | 0.334138 | 0 | 196 | 45.52551 | 207 |
lex/kiitos-2018-backend | 14,121,852,503,220 | 20f0480529ddf278391ef7c3708840ad005573f5 | 99f174408b3e70f7ff20135e75429a97720b9737 | /observations/test_serializers.py | 83fe7790446570837cea9f902225849051f707b6 | [
"BSD-2-Clause"
] | permissive | https://github.com/lex/kiitos-2018-backend | ac7386316d23d4de188de19e8553b616ab147f25 | a20f9942192eee98700b60918aaf8dcc2c028768 | refs/heads/master | "2021-04-29T14:30:26.102248" | "2018-02-28T15:30:09" | "2018-02-28T15:30:09" | 121,776,018 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.test import TestCase
from observations.models import ObservationPoint, Observation
from observations.serializers import ObservationPointSerializer, ObservationPointDetailsSerializer, ObservationSerializer
import datetime
class ObservationPointSerializerTests(TestCase):
def setUp(self):
point = ObservationPoint.objects.create(name='Tokyo',
latitude=10.0,
longitude=11.0)
Observation.objects.create(observation_point=point, temperature=273.15)
Observation.objects.create(observation_point=point, temperature=274.15)
Observation.objects.create(observation_point=point, temperature=275.15)
def test_serializer_data_has_all_fields(self):
point = ObservationPoint.objects.get(name='Tokyo')
s = ObservationPointSerializer(point)
fields = ['id', 'name', 'latitude', 'longitude', 'latest_observation']
for key, v in s.data.items():
self.assertEqual(key in fields, True)
def test_serializer_data_has_correct_latest_observation(self):
point = ObservationPoint.objects.get(name='Tokyo')
observation = Observation.objects.create(observation_point=point,
temperature=276.15)
s = ObservationPointSerializer(point)
expected = observation.temperature
temperature = float(s.data['latest_observation']['temperature'])
self.assertEqual(temperature, observation.temperature)
class ObservationPointDetailsSerializerTests(TestCase):
def setUp(self):
point = ObservationPoint.objects.create(name='Tokyo',
latitude=10.0,
longitude=11.0)
Observation.objects.create(observation_point=point, temperature=273.15)
Observation.objects.create(observation_point=point, temperature=274.15)
Observation.objects.create(observation_point=point, temperature=275.15)
def test_serializer_data_has_all_observations_for_last_24_hours(self):
point = ObservationPoint.objects.get(name='Tokyo')
s = ObservationPointDetailsSerializer(point)
self.assertEqual(len(s.data['observations']), 3)
def test_serializer_data_doesnt_include_observations_older_than_24_hours(
self):
point = ObservationPoint.objects.get(name='Tokyo')
o = Observation.objects.create(observation_point=point,
temperature=275.15)
o.timestamp = o.timestamp - datetime.timedelta(days=1)
o.save()
s = ObservationPointDetailsSerializer(point)
self.assertEqual(len(s.data['observations']), 3)
| UTF-8 | Python | false | false | 2,782 | py | 14 | test_serializers.py | 11 | 0.652768 | 0.63156 | 0 | 61 | 44.606557 | 121 |
HITX/backend-django | 4,183,298,172,783 | c515a8749ffe8256caeaf6ee4a21d011106e201c | 03179a3cd18e73ddded97744b1a69f7b488629a5 | /submissions/views.py | 644b0ebbd53735c3b828cf72d708bee3d5af0e5f | [] | no_license | https://github.com/HITX/backend-django | 77645b1a63648f80f86828c85142a257005f81ce | 984b83a58c56437ea0dba2470201a1b190b49fdc | refs/heads/master | "2016-09-14T07:37:59.497370" | "2015-12-30T14:20:23" | "2015-12-30T14:20:23" | 56,883,830 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from django.conf import settings
from rest_framework.decorators import detail_route
from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
from rest_framework.serializers import ValidationError
from dry_rest_permissions.generics import DRYPermissions
from submissions.models import Submission
from submissions.serializers import SubmissionSerializer
from common.constants import SubmissionStatus, S3MediaDirs
from common.views import DynamicModelViewSet
class SubmissionViewSet(DynamicModelViewSet):
queryset = Submission.objects.all()
serializer_class = SubmissionSerializer
permission_classes = [DRYPermissions,]
def list(self, request):
submissions = request.user.submissions
return Response(SubmissionSerializer(submissions, many=True).data);
def _update_status(self, new_status):
submission = self.get_object()
submission.status = new_status
submission.save()
return Response(SubmissionSerializer(submission).data)
@detail_route(methods=['post'])
def submit(self, request, pk=None):
return self._update_status(SubmissionStatus.SUBMITTED)
@detail_route(methods=['post'])
def accept(self, request, pk=None):
return self._update_status(SubmissionStatus.ACCEPTED)
@detail_route(methods=['post'])
def reject(self, request, pk=None):
return self._update_status(SubmissionStatus.REJECTED)
| UTF-8 | Python | false | false | 1,509 | py | 46 | views.py | 40 | 0.761431 | 0.760769 | 0 | 42 | 34.928571 | 75 |
nenduru1/TFLearn | 12,163,347,411,423 | 58709c88b14a87b29a127b93a0063976d0c9d38f | 6ce178a6b4f8f7410b6977d0f15e637af4abac81 | /debug.py | 07bac0d616debade659f08ae4c212f2ba1da1e07 | [] | no_license | https://github.com/nenduru1/TFLearn | 8c157ac1a1dbfe952e20ab3a8d73cf0c6cd6f565 | dcd3c2504a7abcd7db50ad603ccc3c79d0863f5d | refs/heads/master | "2021-01-01T17:06:38.996810" | "2017-07-22T01:22:07" | "2017-07-22T01:22:07" | 97,997,267 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 21 10:10:50 2017
@author: O222069
"""
import numpy as np
import pandas as pd
import tflearn
#download the titanic dataset
from tflearn.datasets import titanic
data=pd.read_csv(titanic.download_dataset('titanic_dataset.csv'))
#load csv file
from tflearn.data_utils import load_csv,samplewise_std_normalization
data,labels=load_csv('titanic_dataset.csv',target_column=0,categorical_labels=True,n_classes=2)
#preprocessing function
def preprocess(data,columns_to_ignore):
for id in sorted(columns_to_ignore,reverse=True):
[r.pop(id) for r in data]
for i in range(len(data)):
data[i][1]=1. if data[i][1]=='female' else 0.
return np.array(data,dtype=np.float32)
to_ignore=[1,6]
data=preprocess(data,to_ignore)
#data=samplewise_std_normalization(data)
from sklearn.preprocessing import StandardScaler,MinMaxScaler
sc=MinMaxScaler(feature_range=(0,1))
data=sc.fit_transform(data)
#build neural network
net=tflearn.input_data(shape=[None,6])
net=tflearn.fully_connected(net,32)
net=tflearn.fully_connected(net,32)
net=tflearn.fully_connected(net,2,activation='softmax')
net=tflearn.regression(net)
#define model
model=tflearn.DNN(net)
#start training
model.fit(data,labels,n_epoch=100,batch_size=16,show_metric=True)
#test
dicaprio=[3,'Jack','male',19,0,0,'N/A',5.000]
winslet=[1,'Rose','female',17,1,2,'N/A',100.000]
#preprocess
dicaprio,winslet=preprocess([dicaprio,winslet],to_ignore)
test=np.array([dicaprio,winslet])
sc.fit(test)
pred=model.predict(test)
print("Dicaprio Survivng Rate:",pred[0][1])
print("Winslet Survivng Rate:",pred[1][1])
import tflearn
#linear regression
X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]
Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]
inputt=tflearn.input_data(shape=[None])
linear=tflearn.single_unit(inputt)
reg=tflearn.regression(linear,optimizer='sgd',loss='mean_square',learning_rate=0.01)
m=tflearn.DNN(reg)
m.fit(X,Y,n_epoch=100,show_metric=True,snapshot_epoch=False)
print("\nRegression Results:")
print("Y="+ str(m.get_weights(linear.W))+"*X+"+ str(m.get_weights(linear.b)))
print("\nTest Prediction for x=3.2,3.3,3.4")
print(m.predict([3.2,3.3,3.4]))
| UTF-8 | Python | false | false | 2,362 | py | 1 | debug.py | 1 | 0.704911 | 0.621084 | 0 | 77 | 28.675325 | 96 |
IMEsec-USP/vumos-common | 19,018,115,229,550 | 3010f3c19ac09e50cc495ba1cc48e6539df08f8b | d7564cc00bdf50a39f8bd6982d1e8eef1f158500 | /messaging/vumos/scheduled/__init__.py | 3334e91be26e1e49585537461b312132911be5ff | [] | no_license | https://github.com/IMEsec-USP/vumos-common | 50330433b0e97c34619abfd1daec0d869371a339 | ab355690cc677c923d0d93507938ed9e24557129 | refs/heads/master | "2023-07-06T18:46:29.857061" | "2021-08-06T00:31:54" | "2021-08-06T00:31:54" | 395,433,412 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from .periodic import ScheduledVumosService
| UTF-8 | Python | false | false | 44 | py | 15 | __init__.py | 7 | 0.886364 | 0.886364 | 0 | 1 | 43 | 43 |
artem7902/easy-outline | 8,160,437,911,445 | 547179ddf288a5925a09746ed8b94519ecfddeb1 | 8c876f3a87308a2070a1e48127afaa9fa56cddd6 | /aws/lambda/index.py | 776fbaf1be5ca134b2c01f4877f78af59026a07e | [] | no_license | https://github.com/artem7902/easy-outline | 8dcb3d704b78214aee1f733ac3589d7de21f2386 | def4122e12ed09e7ee6414e2e34da6ce07ce32e9 | refs/heads/develop | "2023-08-04T00:32:01.059047" | "2023-04-25T15:25:31" | "2023-04-25T15:25:31" | 216,583,033 | 0 | 1 | null | false | "2023-07-20T09:05:58" | "2019-10-21T14:03:12" | "2023-04-11T11:22:37" | "2023-07-20T09:05:58" | 5,141 | 0 | 1 | 7 | TypeScript | false | false | import os
import time
import shortuuid
import re
from decimal import Decimal
import boto3
import html
from newspaper import Article
def add_article_to_dynamo(article):
dynamo_client = boto3.resource('dynamodb')
articles_table = dynamo_client.Table(os.environ.get("ARTICLES_TABLE"))
articles_table.put_item(Item=article)
def clean_html_tags(html_code):
html_with_decoded_characters = html.unescape(html_code)
html_with_removed_attributes = re.sub(r"(<[a-z]*)(\s*[a-z]*=\".*?\")*(>)", "\g<1>\g<3>", html_with_decoded_characters)
html_with_removed_empy_tags = re.sub(r"<[^/>]+>[ \n\r\t]*</[^>]+>", " ", html_with_removed_attributes)
html_with_removed_spaces = re.sub(r"\s\s+", " ", html_with_removed_empy_tags)
return html_with_removed_spaces
def main(event, context):
print(event)
article_url = event["arguments"]["url"]
article = Article(article_url, keep_article_html=True)
article.download()
article.parse()
item_to_put = {
"id": str(shortuuid.uuid()),
"article": {
"title": article.title,
'originalText': article.text,
'originalHtml': article.article_html,
'html': clean_html_tags(article.article_html),
"authors": article.authors,
"publishDate": article.publish_date if not article.publish_date else Decimal(article.publish_date.timestamp()),
"sourceUrl": article_url,
"lang": article.extractor.language
},
"createdAt": Decimal(time.time()),
"secretId": str(shortuuid.uuid())
}
add_article_to_dynamo(item_to_put)
return item_to_put | UTF-8 | Python | false | false | 1,647 | py | 66 | index.py | 35 | 0.632665 | 0.630237 | 0 | 46 | 34.826087 | 123 |
kevinlu310/GeThem | 3,186,865,769,115 | d6beaced9a261165bb2b2a9b92e8b8e6434af56d | 0f6656bec7a5f3c9f16b67de96b00bbd8633a2ce | /app/gethem/__init__.py | 93f9b3cfd4871443b872ece0238c987ec6af81b9 | [] | no_license | https://github.com/kevinlu310/GeThem | a2a4079e067a54dcc9935b535813ade3f4d7df53 | fd74257819e78280b2621fdacd61ea5323d58c5c | refs/heads/master | "2020-04-18T00:03:30.783596" | "2013-01-22T15:15:19" | "2013-01-22T15:15:19" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | from flask import Flask
from redis import StrictRedis
import config
app = Flask(__name__)
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = config.SECRET_KEY
app.config['DEBUG'] = 'true'
red = StrictRedis()
from gethem import views
| UTF-8 | Python | false | false | 342 | py | 26 | __init__.py | 13 | 0.704678 | 0.704678 | 0 | 14 | 23.428571 | 69 |
EHSchutzman/WBIQP | 6,107,443,515,831 | 5a12b80a9c6e5137629a357bcc764bf51717d33f | f902c24822f8ebe3f08a59e654e6aab60bc1d97e | /chooseafile.py | 4ef30d64229e4c8ead7c19e79274cf6ec5efe3fd | [] | no_license | https://github.com/EHSchutzman/WBIQP | e91612caf4b5d655bcfa52e7e54e0e3e781e3a52 | 5ff1d2aecf1b971a55f39c0d11f5786650757c99 | refs/heads/master | "2021-04-06T10:54:19.091655" | "2018-04-30T17:24:36" | "2018-04-30T17:24:36" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import pandas
from matplotlib import pyplot as plt
import os
import Reader as read
# The following code takes one day of data of user choice and analyzes it.
def chooseafile():
# If more HMOS are recorded from, add their name below.
HMOlist = ["25_McIntyre", "2_Himbleton", "37_Woodstock", "50_Bleinheim", "8_Bozward"]
# If more data is recorded in the future, add the year, month number below.
yearlist = ["2017", "2018", "2019"]
monthlist = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"]
hmocheck = 1
while hmocheck == 1:
print("HMO Names: 25_McIntyre, 2_Himbleton, 37_Woodstock, 50_Bleinheim, 8_Bozward")
print("")
HMO = input("Type name of the HMO you wish to analyze: ")
if HMO in HMOlist:
print("{} is a correct HMO name".format(HMO))
hmocheck = 0
else:
print("The entered HMO name does not exist")
hmocheck = 1
yearcheck = 1
while yearcheck == 1:
year = input("Type the year you wish to analyze in Ex: 2018 format: ")
if year in yearlist:
yearcheck = 0
else:
print("The entered year name is formatted wrong or not a valid year")
yearcheck = 1
monthcheck = 1
while monthcheck == 1:
month = input("Enter the month of the data in number format, EX: 11 = November, 01 = January")
if month in monthlist:
monthcheck = 0
else:
print("The month entered is not a month number or not entered correctly")
monthcheck = 1
daycheck = 1
while daycheck == 1:
day = input("Enter the day of the data in number format")
intday = int(day)
if month == "01" or month == "03" or month == "05" or month == "07" or month == "08" or month == "10" or month == "12":
if intday in range(1, 31):
daycheck = 0
else:
print("Enter a day in the month chosen")
daycheck = 1
if month == "04" or month == "06" or month == "09" or month == "11":
if intday in range(1, 30):
daycheck = 0
else:
print("Enter a day in the month chosen")
daycheck = 1
if month == "04" or month == "06" or month == "09" or month == "11":
if intday in range(1, 30):
daycheck = 0
else:
print("Enter a day in the month chosen")
daycheck = 1
# doubtful that data will be taken in 2020 for this application / project so no need to check for leap year
if month == "02":
if intday in range(1, 28):
daycheck = 0
else:
print("Enter a day in the month chosen")
daycheck = 1
if HMO == "25_McIntyre":
stringstart = "720200236_Data_"
elif HMO == "2_Himbleton":
stringstart = "720200260_Data_"
elif HMO == "37_Woodstock":
stringstart = "720200288_Data_"
elif HMO == "50_Bleinheim":
stringstart = "720200295_Data_"
elif HMO == "8_Bozward":
stringstart = "720200262_Data_"
else:
print("error in code where likely the start of the string part does not have a check for the new HMO added")
finalstring = "./RawWBData/{}/{}{} {} {}.csv".format(HMO, stringstart, year, month, day)
# print(finalstring)
return finalstring
def doesitexist(finalstring, filelist):
print(type(finalstring), type(filelist[0]), finalstring in filelist)
for file in filelist:
# print(file, finalstring)
if file == finalstring:
print("Attempting to plot desired day")
return "y"
print("No file found")
return "n"
def makelist():
rootDir = './RawWBData/'
filenamelist = [] # directories is a 3d array containing all of the days in the collected data
for dirName, subdirList, fileList in os.walk(rootDir):
for fname in sorted(fileList):
# print("{}/{}".format(dirName, fname))
filenamelist.append("{}/{}".format(dirName, fname))
return filenamelist
def onedayplot(day, path):
changeday = day[:8300]
print(len(changeday))
times = []
for time in pandas.date_range('00:00', None, periods=8300, freq='10S'):
times.append(str(time).split(' ')[-1])
if len(day) >= 8300:
plotchosen = "wrong"
while plotchosen == "wrong":
print("actPow | hwTSet | primT | chActive | primTSet | hWActive | hWTOutlet")
plotchoose = input("Which variable do you want to plot? For multiple variables please separate by commas")
plotchoose = plotchoose
plotchosen = appender(plotchoose, changeday)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(times, plotchosen)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels()[::700], visible=True)
plt.xticks(fontsize=10, rotation=90)
for tic in ax1.xaxis.get_major_ticks():
if (ax1.xaxis.get_major_ticks().index(tic) % 700 == 0):
continue
else:
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
plt.gcf().subplots_adjust(bottom=0.23)
plt.xlabel("Time of Day", labelpad=10)
plt.title(plotchoose + " " + path[39:49])
plt.show()
else:
print("This day exists, but does not have enough data points to be considered for plotting")
#put more plots of interest here!
return
def chooserun():
allfiles = makelist()
requestedpath = chooseafile()
answer = doesitexist(requestedpath, allfiles)
if answer == "y":
onedayplot(read.openFile(requestedpath), requestedpath)
else:
print("file does not exist!")
return
def appender(plotchoose, changeday):
actPow = []
hwTSet = []
primT = []
chActive = []
primTSet = []
hWActive = []
hWTOutlet = []
for item in changeday:
if plotchoose == "actPow":
actPow.append((item[1]))
elif plotchoose == "hwTSet":
hwTSet.append((item[2]))
elif plotchoose == "primT":
primT.append((item[3]))
elif plotchoose == "chActive":
chActive.append((item[4]))
elif plotchoose == "primTSet":
primTSet.append((item[5]))
elif plotchoose == "hWActive":
hWActive.append((item[6]))
elif plotchoose == "hWTOutlet":
hWTOutlet.append((item[7]))
else:
print("that is not a variable!")
plotchosen = "wrong"
if plotchoose == "actPow":
plotchosen = actPow
elif plotchoose == "hwTSet":
plotchosen = hwTSet
elif plotchoose == "primT":
plotchosen = primT
elif plotchoose == "chActive":
plotchosen = chActive
elif plotchoose == "primTSet":
plotchosen = primTSet
elif plotchoose == "hWActive":
plotchosen = hWActive
elif plotchoose == "hWTOutlet":
plotchosen = hWTOutlet
return plotchosen
if __name__ == '__main__':
chooserun() | UTF-8 | Python | false | false | 7,266 | py | 11 | chooseafile.py | 10 | 0.565373 | 0.531792 | 0 | 236 | 29.792373 | 127 |
ExplodingKonjac/ECH-EasyClassHelper | 2,972,117,410,975 | f760f4b57c54e20efbda51a9d083ab92e034f771 | bcaf854a102d5a1b1f1299447ab02f0d49c56b6f | /echSourceCode/modules/MainWindow.py | 6839a1d0234745984c0a0956bbe45bc7c9af68c4 | [] | no_license | https://github.com/ExplodingKonjac/ECH-EasyClassHelper | 949c8393284cddd140bb14bcb9020d389363a00d | 88841fb4873c9302ae82bc4f1fb1f107880ed145 | refs/heads/master | "2022-04-27T11:35:31.014242" | "2020-04-27T11:55:15" | "2020-04-27T11:55:15" | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1300, 800)
MainWindow.setAutoFillBackground(True)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.scroll_area = QtWidgets.QScrollArea(self.centralwidget)
self.scroll_area.setEnabled(True)
self.scroll_area.setGeometry(QtCore.QRect(280, 120, 1015, 620))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scroll_area.sizePolicy().hasHeightForWidth())
self.scroll_area.setSizePolicy(sizePolicy)
self.scroll_area.setMinimumSize(QtCore.QSize(0, 0))
self.scroll_area.setSizeIncrement(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(10)
self.scroll_area.setFont(font)
self.scroll_area.setFrameShape(QtWidgets.QFrame.Box)
self.scroll_area.setFrameShadow(QtWidgets.QFrame.Plain)
self.scroll_area.setLineWidth(3)
self.scroll_area.setMidLineWidth(0)
self.scroll_area.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scroll_area.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.scroll_area.setWidgetResizable(False)
self.scroll_area.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.scroll_area.setObjectName("scroll_area")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 966, 609))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.add_test = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.add_test.setGeometry(QtCore.QRect(10, 10, 970, 50))
self.add_test.setMinimumSize(QtCore.QSize(970, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.add_test.setFont(font)
self.add_test.setObjectName("add_test")
self.add_stu = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.add_stu.setGeometry(QtCore.QRect(10, 10, 970, 50))
self.add_stu.setMinimumSize(QtCore.QSize(970, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.add_stu.setFont(font)
self.add_stu.setObjectName("add_stu")
self.sort_btn = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.sort_btn.setGeometry(QtCore.QRect(10, 10, 970, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.sort_btn.setFont(font)
self.sort_btn.setObjectName("sort_btn")
self.score = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.score.setGeometry(QtCore.QRect(10, 70, 970, 50))
font = QtGui.QFont()
font.setPointSize(15)
self.score.setFont(font)
self.score.setLineWidth(1)
self.score.setObjectName("score")
self.scroll_area.setWidget(self.scrollAreaWidgetContents)
self.choose_class = QtWidgets.QComboBox(self.centralwidget)
self.choose_class.setGeometry(QtCore.QRect(280, 30, 810, 40))
font = QtGui.QFont()
font.setPointSize(15)
self.choose_class.setFont(font)
self.choose_class.setObjectName("choose_class")
self.main = QtWidgets.QPushButton(self.centralwidget)
self.main.setGeometry(QtCore.QRect(30, 30, 100, 40))
font = QtGui.QFont()
font.setPointSize(15)
self.main.setFont(font)
self.main.setObjectName("main")
self.add_class = QtWidgets.QPushButton(self.centralwidget)
self.add_class.setGeometry(QtCore.QRect(1230, 30, 40, 40))
self.add_class.setText("")
self.add_class.setObjectName("add_class")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(30, 80, 231, 31))
font = QtGui.QFont()
font.setPointSize(15)
self.label.setFont(font)
self.label.setObjectName("label")
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralwidget)
self.plainTextEdit.setGeometry(QtCore.QRect(30, 120, 230, 620))
self.plainTextEdit.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.plainTextEdit.setFrameShadow(QtWidgets.QFrame.Sunken)
self.plainTextEdit.setLineWidth(1)
self.plainTextEdit.setObjectName("plainTextEdit")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(280, 80, 991, 31))
font = QtGui.QFont()
font.setPointSize(15)
self.label_2.setFont(font)
self.label_2.setLineWidth(1)
self.label_2.setObjectName("label_2")
self.info = QtWidgets.QPushButton(self.centralwidget)
self.info.setGeometry(QtCore.QRect(1110, 30, 40, 40))
self.info.setText("")
self.info.setObjectName("info")
self.del_class = QtWidgets.QPushButton(self.centralwidget)
self.del_class.setGeometry(QtCore.QRect(1170, 30, 40, 40))
self.del_class.setText("")
self.del_class.setObjectName("del_class")
self.exit = QtWidgets.QPushButton(self.centralwidget)
self.exit.setGeometry(QtCore.QRect(160, 30, 100, 40))
font = QtGui.QFont()
font.setPointSize(15)
self.exit.setFont(font)
self.exit.setObjectName("exit")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1300, 18))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
self.menu_2 = QtWidgets.QMenu(self.menubar)
self.menu_2.setObjectName("menu_2")
self.menu_3 = QtWidgets.QMenu(self.menubar)
self.menu_3.setObjectName("menu_3")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self._save = QtWidgets.QAction(MainWindow)
self._save.setObjectName("_save")
self._main = QtWidgets.QAction(MainWindow)
self._main.setObjectName("_main")
self.set_font = QtWidgets.QAction(MainWindow)
self.set_font.setObjectName("set_font")
self.make_svg = QtWidgets.QAction(MainWindow)
self.make_svg.setObjectName("make_svg")
self.write_Python = QtWidgets.QAction(MainWindow)
self.write_Python.setObjectName("write_Python")
self.rand_stu = QtWidgets.QAction(MainWindow)
self.rand_stu.setEnabled(True)
self.rand_stu.setObjectName("rand_stu")
self.write_html = QtWidgets.QAction(MainWindow)
self.write_html.setObjectName("write_html")
self.set_bg = QtWidgets.QAction(MainWindow)
self.set_bg.setObjectName("set_bg")
self.set_btn = QtWidgets.QAction(MainWindow)
self.set_btn.setObjectName("set_btn")
self.menu.addAction(self.rand_stu)
self.menu.addAction(self.write_html)
self.menu_2.addAction(self.write_Python)
self.menu_3.addAction(self.set_font)
self.menu_3.addAction(self.set_bg)
self.menu_3.addAction(self.set_btn)
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.menubar.addAction(self.menu_3.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "ECH 课堂助手"))
self.add_test.setText(_translate("MainWindow", "添加测试"))
self.add_stu.setText(_translate("MainWindow", "添加学生"))
self.sort_btn.setText(_translate("MainWindow", "按成绩排序"))
self.score.setText(_translate("MainWindow", "平均分:"))
self.main.setText(_translate("MainWindow", "主菜单"))
self.add_class.setToolTip(_translate("MainWindow", "添加班级"))
self.label.setText(_translate("MainWindow", "记事本"))
self.label_2.setText(_translate("MainWindow", "该班级的 测试/学生"))
self.info.setToolTip(_translate("MainWindow", "班级信息"))
self.del_class.setToolTip(_translate("MainWindow", "删除班级"))
self.exit.setText(_translate("MainWindow", "退出"))
self.menu.setTitle(_translate("MainWindow", "基础 (&B)"))
self.menu_2.setTitle(_translate("MainWindow", "高级 (&A)"))
self.menu_3.setTitle(_translate("MainWindow", "设置 (&S)"))
self._save.setText(_translate("MainWindow", "保存"))
self._main.setText(_translate("MainWindow", "返回主菜单"))
self.set_font.setText(_translate("MainWindow", "设置字体 (&F)"))
self.set_font.setShortcut(_translate("MainWindow", "Ctrl+Alt+F"))
self.make_svg.setText(_translate("MainWindow", "绘制SVG图像 (&I)"))
self.make_svg.setShortcut(_translate("MainWindow", "Ctrl+Alt+I"))
self.write_Python.setText(_translate("MainWindow", "编写Python代码 (&P)"))
self.write_Python.setShortcut(_translate("MainWindow", "Ctrl+Alt+P"))
self.rand_stu.setText(_translate("MainWindow", "随机点名 (&R)"))
self.rand_stu.setShortcut(_translate("MainWindow", "Ctrl+Alt+R"))
self.write_html.setText(_translate("MainWindow", "编写文档 (&H)"))
self.write_html.setShortcut(_translate("MainWindow", "Ctrl+Alt+H"))
self.set_bg.setText(_translate("MainWindow", "设置背景 (&G)"))
self.set_bg.setShortcut(_translate("MainWindow", "Ctrl+Alt+G"))
self.set_btn.setText(_translate("MainWindow", "设置按钮颜色 (&C)"))
self.set_btn.setShortcut(_translate("MainWindow", "Ctrl+Alt+B"))
| UTF-8 | Python | false | false | 10,662 | py | 8 | MainWindow.py | 5 | 0.660336 | 0.638385 | 0 | 201 | 50.129353 | 102 |
Shalva-A-Kohen/COMS_W4735_Project | 5,179,730,580,156 | 8d2956e346e942c3919130fca23b3c14f6ea9640 | dbf6a3e5baaedcbaeb3dd581c1e18a29df9cbd32 | /analysis.py | af60f303356c7b7b73d80afd1aa3c3bc845b7326 | [] | no_license | https://github.com/Shalva-A-Kohen/COMS_W4735_Project | e976b57a635615842e211bd531e472905c853565 | 68d1aa92333dd6ff2ddb36f666c077b560249722 | refs/heads/master | "2020-03-14T21:38:36.669438" | "2018-05-10T03:57:59" | "2018-05-10T03:57:59" | 131,801,383 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null | import cv2
import pandas as pd
import numpy as np
import io
from plotly import tools
from math import *
import pandas as pd
import os
import sys
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
import statsmodels.api as sm
def reg_m(y, x):
ones = np.ones(len(x[0]))
X = sm.add_constant(np.column_stack((x[0], ones)))
for ele in x[1:]:
X = sm.add_constant(np.column_stack((ele, X)))
results = sm.OLS(y, X).fit()
return results
df = pd.read_csv("subject_data.csv")
df = df[(df.color_1 != 0) | (df.color_2 != 0) | (df.color_3 != 0)]
target = pd.read_csv(sys.argv[1])
target = target.sort_values(["img_file"], ascending=[1])
print(target.dtypes, df.shape)
X = df
y = target.iloc[:,1]
print(reg_m(y,X))
| UTF-8 | Python | false | false | 784 | py | 10 | analysis.py | 3 | 0.66199 | 0.644133 | 0 | 34 | 22.058824 | 66 |