markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
hash
stringlengths
32
32
Therefore, the function to extract the comments is:
def parse_comments(comments): ''' comment = { "bloggerId": "author", "sentences": [], # all sentences in a comment, "parents": [] # the order depends on how beautifulsoup gives me the parents } ''' parsed_comments = {} for c in comments: comment = {} comment['bloggerId'] = c['bloggerid'] comment['sentences_ids'] = [s['id'] for s in c.findAll('s', recursive=False)] comment['parents'] = [p['id'] for p in c.findParents("comment")] parsed_comments[c['id']] = comment return parsed_comments import json import pprint def parse_article(html): soup = BeautifulSoup(html, "lxml") sentences = soup.findAll('s') parsed_sentences = {} for s in sentences: parsed_sentences[s['id']] = s.get_text() parsed_comments = parse_comments(soup.findAll('comment')) article = { 'sentences': parsed_sentences, 'comments': parsed_comments } return article article = parse_article(article_text) pprint.pprint(article) json_article = json.dumps(article, indent=4) print len(article['comments'].values()), " comments parsed." print len(article['sentences'].values()), " sentences parsed."
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
0c29941ad49de124c90df1b630bc73a5
Clustering just the sentences Vectorizing the sentences (TFIDF)
from sklearn.feature_extraction.text import TfidfVectorizer import nltk.stem english_stemmer = nltk.stem.SnowballStemmer('english') class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer=super(StemmedTfidfVectorizer,self).build_analyzer() return lambda doc:(english_stemmer.stem(w) for w in analyzer(doc)) vectorizer = StemmedTfidfVectorizer(min_df=1, stop_words='english', ) sentences_vectors = vectorizer.fit_transform(article['sentences'].values()) sorted_feature_indices = np.argsort(vectorizer.idf_)[::-1] features = vectorizer.get_feature_names() top_n_features = 20 top_features = [features[i] for i in sorted_feature_indices[:top_n_features]] print "%d features found" % (len(features)) print "Top %d features:" % (top_n_features) print top_features
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
7e1fd783a71005f5bafb00e68ca460fd
Dimensionality reduction and Normalization
import gensim #Dimensionality reduction using LSI. Go from 6D to 2D. X = sentences_vectors.todense() dct = gensim.corpora.Dictionary(X) lsi_docs = {} num_topics = 500 lsi_model = gensim.models.LsiModel(dct, num_topics=500) print lsi_model.shape print lsi_model[:50]
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
5cd76b1102001450a28e5795a1cd92e1
Clustering with MeanShift WHY ARE ALL VECTORS VALUED AT 0!???
import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth bandwidth = estimate_bandwidth(X, quantile=0.3) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(X) labels = ms.labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) print "Number of estimated clusters : %d" % n_clusters_ # Plot result import matplotlib.pyplot as plt from itertools import cycle plt.figure(1) plt.clf() colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk') for k, col in zip(range(n_clusters_), colors): my_members = labels == k cluster_center = cluster_centers[k] plt.plot(X[my_members, 0], X[my_members, 1], col + '.') plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show()
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
b14dac8480e31824a0beea1bf538886e
Using the same approach as a movie clusterer http://brandonrose.org/clustering Imports
import numpy as np import pandas as pd import nltk import re import os import codecs from sklearn import feature_extraction import mpld3
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
51559e69d8348cc85584172899ea9bce
Stopwords, stemming, and tokenizing
stopwords = nltk.corpus.stopwords.words('english') from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("english") print 'Done' def tokenize_and_stem(sentences): tokens = [word for sent in sentences for word in nltk.word_tokenize(sent)] filtered_tokens = [] for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens] return stems def tokenize_only(sentences): tokens = [word.lower() for sent in sentences for word in nltk.word_tokenize(sent)] filtered_tokens = [] for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) return filtered_tokens
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
a3311e9adecc6623975b71c62bef65c6
Make vocabulary stemmmed and not-stemmed
totalvocab_stemmed = [] totalvocab_tokenized = [] allwords_stemmed = tokenize_and_stem(article['sentences'].values()) totalvocab_stemmed.extend(allwords_stemmed) allwords_tokenized = tokenize_only(article['sentences'].values()) totalvocab_tokenized.extend(allwords_tokenized)
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
5d27e3bac8a420d96bf1db4837e3c6e7
Pandas data frame to visualize the vocabulary
vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed) print 'there are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame' print 'here are the first words in the vocabulary' vocab_frame.head()
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
fef0ff242f74a622f00fa4f138d35001
TF-IDF and document similarity
from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=20000, min_df=0.2, stop_words='english', use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3)) %time tfidf_matrix = tfidf_vectorizer.fit_transform(article['sentences'].values()) print tfidf_matrix.shape terms = tfidf_vectorizer.get_feature_names()
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
7bcb061e367fab63dfa276d97a1d1402
Cosine Similarity
from sklearn.metrics.pairwise import cosine_similarity dist = 1 - cosine_similarity(tfidf_matrix) dist_frame = pd.DataFrame(dist) print dist
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
ef894a301d271bfd74a3770b8af5d352
K-means clustering
from sklearn.cluster import KMeans num_clusters = 5 km = KMeans(n_clusters=num_clusters) %time km.fit(tfidf_matrix) clusters = km.labels_.tolist() clusters
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
6272c7301a227678ddd71c6820048448
Multidimensional scaling to plot?
import os import matplotlib.pyplot as plt import matplotlib as mpl from sklearn.manifold import MDS MDS() mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1) pos = mds.fit_transform(dist) xs, ys = pos[:,0], pos[:, 1]
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
3f9b5130916b06a66311629e5f3fcf0b
Plot
cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e'} cluster_names = {0: 'C0', 1: 'C1', 2: 'C2', 3: 'C3', 4: 'C4'} # iPython now will show matplotlib plots inline %matplotlib inline df = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=["s{0}".format(x) for x in range(190)])) groups = df.groupby('label') ### set up the plot fig, ax = plt.subplots(figsize=(17,9)) ax.margins(0.05) for name, group in groups: ax.plot(group.x, group.y, marker='o', linestyle='', ms=12, label=cluster_names[name], color=cluster_colors[name], mec='none') ax.set_aspect('auto') ax.tick_params(\ axis='x', which='both', bottom='off', top='off', labelbottom='off') ax.tick_params(\ axis='y', which='both', left='off', top='off', labelleft='off') ax.legend(numpoints=1) for i in range(len(df)): ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=8) plt.show() print article['sentences']['s151'] print article['sentences']['s170'] print article['sentences']['s171'] print article['sentences']['s108'] print article['sentences']['s93'] print article['sentences']['s150'] print article['sentences']['s114'] print article['sentences']['s110']
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
36cedc196ffc1119d56a935fd03d858b
Hierarchical document clustering The Ward clustering algorithm !!!!
from scipy.cluster.hierarchy import ward, dendrogram linkage_matrix = ward(dist) #define the linkage_matrix # using ward clustering pre-computed distances fig, ax = plt.subplots(figsize=(15,20)) # set size ax = dendrogram(linkage_matrix, orientation="right", labels=["s{0}".format(x) for x in range(190)]) plt.tick_params(\ axis = 'x', which ='both', bottom ='off', top = 'off', labelbottom = 'off') plt.tight_layout() plt.savefig('ward_clusters.png', dpi=200) frame = pd.DataFrame(linkage_matrix) frame.sort_values(2,axis=0, ascending=False)
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
33b279f9d48f829c9f0e3579b5a67d4c
Extracting the links
soup = BeautifulSoup(article_text, "lxml") def is_valid_link(tag): if tag.name != 'link': return False link = tag l_conf = link['link_confidence'] l_val = link['validation'] arg = link.find_next_sibling('argument') sent = link.find_next_sibling('sentiment') a_val = arg['validation'] s_val = sent['validation'] a_conf = arg['val_confidence'] s_conf = sent['val_confidence'] args = [l_val, a_val, s_val, l_conf, a_conf, s_conf] return all(el == '1' or el == 'yes' for el in args) linksHTML = soup.findAll(lambda tag:is_valid_link(tag)) print len(linksHTML), "valid links found!" parsed_links = [] for link_html in linksHTML: arg_html = link_html.find_next_sibling('argument') sent_html = link_html.find_next_sibling('sentiment') link = {} link['id'] = link_html['id'] link['art_sentence'] = link_html['art_sentence'] link['com_sentence'] = link_html['com_sentence'] link['confidence'] = link_html['link_confidence'] link['validation'] = link_html['validation'] arg = {} arg['label'] = arg_html['label'] arg['confidence'] = arg_html['val_confidence'] arg['validation'] = arg_html['validation'] sent = {} sent['label'] = sent_html['label'] sent['confidence'] = sent_html['val_confidence'] sent['validation'] = sent_html['validation'] link['argument'] = arg link['sentiment'] = sent parsed_links.append(link) # pprint.pprint(parsed_links, indent=4) print len(parsed_links),"links parsed!"
testdataextractor/TestDataExtractor.ipynb
betoesquivel/onforums-application
mit
8004abb5e5aff27c46251a412f862872
Exercise 6.1 Impute the missing values of the age and Embarked
titanic.Age.fillna(titanic.Age.median(), inplace=True) titanic.isnull().sum() titanic.Embarked.mode() titanic.Embarked.fillna('S', inplace=True) titanic.isnull().sum()
exercises/06-Titanic_cross_validation.ipynb
MonicaGutierrez/PracticalMachineLearningClass
mit
37e3d6745563b39e28748865ae1314ff
Exercise 6.3 Convert the Sex and Embarked to categorical features
titanic['Sex_Female'] = titanic.Sex.map({'male':0, 'female':1}) titanic.head() embarkedummy = pd.get_dummies(titanic.Embarked, prefix='Embarked') embarkedummy.drop(embarkedummy.columns[0], axis=1, inplace=True) titanic = pd.concat([titanic, embarkedummy], axis=1) titanic.head()
exercises/06-Titanic_cross_validation.ipynb
MonicaGutierrez/PracticalMachineLearningClass
mit
8728ff46536fac6c2c366415e6ac792c
Exercise 6.3 (2 points) From the set of features ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] *Note, use the created categorical features for Sex and Embarked Select the features that maximize the accuracy the model using K-Fold cross-validation
y = titanic['Survived'] features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare','Sex_Female', 'Embarked_Q', 'Embarked_S'] import numpy as np def comb(n,k) : return np.math.factorial(n) / (np.math.factorial(n-k) * np.math.factorial(k)) np.sum([comb(8,i) for i in range(0,8)]) import itertools possible_models = [] for i in range(1,len(features)+1): possible_models.extend(list(itertools.combinations(features,i))) possible_models import itertools possible_models = [] for i in range(1,len(features)+1): possible_models.extend(list(itertools.combinations(features,i))) from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import cross_val_score Y = titanic.Survived resultado = pd.DataFrame(index=possible_models,columns=['presicion']) for i in range(len(possible_models)): X = titanic[list(possible_models[i])] reglogistica = LogisticRegression(C=1e9) resultado.iloc[i] = cross_val_score(reglogistica, X, Y, cv=10, scoring='accuracy').mean() resultado.head() resultado.sort_values('presicion',ascending=False).head(1)
exercises/06-Titanic_cross_validation.ipynb
MonicaGutierrez/PracticalMachineLearningClass
mit
73d6b798718870f6d317df9ab88e69f1
Looking at the data The training data contains a row per comment, with an id, the text of the comment, and 6 different labels that we'll try to predict.
train.head()
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
2e0dcbf3b10ec01039d789ca24cedee3
Here's a couple of examples of comments, one toxic, and one with no labels.
train['comment_text'][0] train['comment_text'][2]
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
b34bc4904fb6cff8a7264d79939353f6
The length of the comments varies a lot.
lens = train.comment_text.str.len() lens.mean(), lens.std(), lens.max() lens.hist();
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
bd99d8dd69067b330b7d71f8a56d8f61
We'll create a list of all the labels to predict, and we'll also create a 'none' label so we can see how many comments have no labels. We can then summarize the dataset.
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] train['none'] = 1-train[label_cols].max(axis=1) train.describe() len(train),len(test)
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
bb56540bca42e73eb0eadf741dab432d
There are a few empty comments that we need to get rid of, otherwise sklearn will complain.
COMMENT = 'comment_text' train[COMMENT].fillna("unknown", inplace=True) test[COMMENT].fillna("unknown", inplace=True)
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
45dc0e9d783529bfb189e1395495ebe6
Building the model We'll start by creating a bag of words representation, as a term document matrix. We'll use ngrams, as suggested in the NBSVM paper.
import re, string re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])') def tokenize(s): return re_tok.sub(r' \1 ', s).split()
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
a1744ddb571daa30acf30d9fb0e3a184
It turns out that using TF-IDF gives even better priors than the binarized features used in the paper. I don't think this has been mentioned in any paper before, but it improves leaderboard score from 0.59 to 0.55.
n = train.shape[0] vec = TfidfVectorizer(ngram_range=(1,2), tokenizer=tokenize, min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1, smooth_idf=1, sublinear_tf=1 ) trn_term_doc = vec.fit_transform(train[COMMENT]) test_term_doc = vec.transform(test[COMMENT])
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
23531cc5151e8e12da38c9ba2950d3cd
This creates a sparse matrix with only a small number of non-zero elements (stored elements in the representation below).
trn_term_doc, test_term_doc
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
6cc5a5c6a6de23a833fc49eba20fc251
Here's the basic naive bayes feature equation:
def pr(y_i, y): p = x[y==y_i].sum(0) return (p+1) / ((y==y_i).sum()+1) x = trn_term_doc test_x = test_term_doc
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
e445798f6aa75efbef2af0fdc60264e2
Fit a model for one dependent at a time:
def get_mdl(y): y = y.values r = np.log(pr(1,y) / pr(0,y)) m = LogisticRegression(C=4, dual=True) x_nb = x.multiply(r) return m.fit(x_nb, y), r preds = np.zeros((len(test), len(label_cols))) for i, j in enumerate(label_cols): print('fit', j) m,r = get_mdl(train[j]) preds[:,i] = m.predict_proba(test_x.multiply(r))[:,1]
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
6bd6ee5b0299f7013b4588b579da841d
And finally, create the submission file.
submid = pd.DataFrame({'id': subm["id"]}) submission = pd.concat([submid, pd.DataFrame(preds, columns = label_cols)], axis=1) submission.to_csv('submission.csv', index=False)
DEEP LEARNING/NLP/text analyses/NB-SVM strong linear baseline - classif.ipynb
Diyago/Machine-Learning-scripts
apache-2.0
e8f57b5399222e4123a796850959c31d
[4-2] x=0.5における接線を描いて、その傾きを求める関数derivativeを定義します。
def derivative(f, filename): fig = plt.figure(figsize=(4,4)) images = [] x0, d = 0.5, 0.5 for _ in range(10): subplot = fig.add_subplot(1,1,1) subplot.set_xlim(0, 1) subplot.set_ylim(0, 1) slope = (f(x0+d)-f(x0)) / d linex = np.linspace(0, 1, 100) image0 = subplot.text(0.5, 8, ('slope = %f' % slope)) image1, = subplot.plot(linex, f(linex), color='blue') image2 = subplot.scatter([x0,x0+d],[f(x0),f(x0+d)]) def g(x): return f(x0) + slope * (x-x0) image3, = subplot.plot([0,1], [g(0),g(1)], linewidth=1, color='red') image4 = subplot.text(0.3, 1.05, ('slope = %f' % slope)) images.append([image0, image1, image2, image3, image4]) d *= 0.5 ani = animation.ArtistAnimation(fig, images, interval=1000) ani.save(filename, writer='imagemagick', fps=1) return ani
No5/Figure11 - derivative_animation.ipynb
enakai00/jupyter_NikkeiLinux
apache-2.0
09222503d255ee6577fb36f958111c71
[4-3] 二次関数 y=x*x を用意して、関数derivativeを呼び出します。 GIF動画ファイル「derivative01.gif」が作成されます。
def f(x): y = x*x return y derivative(f, 'derivative01.gif')
No5/Figure11 - derivative_animation.ipynb
enakai00/jupyter_NikkeiLinux
apache-2.0
839046bc4768fa55b26b326a2eac3f03
Bayesian optimization or sequential model-based optimization uses a surrogate model to model the expensive to evaluate function func. There are several choices for what kind of surrogate model to use. This example compares the performance of: gaussian processes, extra trees, and random forests as surrogate models. A purely random optimization strategy is used as a baseline. Toy model We will use the branin function as toy model for the expensive function. In a real world application this function would be unknown and expensive to evaluate.
from skopt.benchmarks import branin as _branin def branin(x, noise_level=0.): return _branin(x) + noise_level * np.random.randn() from matplotlib.colors import LogNorm def plot_branin(): fig, ax = plt.subplots() x1_values = np.linspace(-5, 10, 100) x2_values = np.linspace(0, 15, 100) x_ax, y_ax = np.meshgrid(x1_values, x2_values) vals = np.c_[x_ax.ravel(), y_ax.ravel()] fx = np.reshape([branin(val) for val in vals], (100, 100)) cm = ax.pcolormesh(x_ax, y_ax, fx, norm=LogNorm(vmin=fx.min(), vmax=fx.max())) minima = np.array([[-np.pi, 12.275], [+np.pi, 2.275], [9.42478, 2.475]]) ax.plot(minima[:, 0], minima[:, 1], "r.", markersize=14, lw=0, label="Minima") cb = fig.colorbar(cm) cb.set_label("f(x)") ax.legend(loc="best", numpoints=1) ax.set_xlabel("X1") ax.set_xlim([-5, 10]) ax.set_ylabel("X2") ax.set_ylim([0, 15]) plot_branin()
examples/strategy-comparison.ipynb
glouppe/scikit-optimize
bsd-3-clause
78024db7d508c34c8bf502c585c27d90
This shows the value of the two-dimensional branin function and the three minima. Objective The objective of this example is to find one of these minima in as few iterations as possible. One iteration is defined as one call to the branin function. We will evaluate each model several times using a different seed for the random number generator. Then compare the average performance of these models. This makes the comparison more robust against models that get "lucky".
from functools import partial from skopt import gp_minimize, forest_minimize, dummy_minimize func = partial(branin, noise_level=2.0) bounds = [(-5.0, 10.0), (0.0, 15.0)] x0 = [2.5, 7.5] n_calls = 80 def run(minimizer, n_iter=20): return [minimizer(func, bounds, x0=x0, n_calls=n_calls, random_state=n) for n in range(n_iter)] # Random search dummy_res = run(dummy_minimize) # Gaussian processes gp_res = run(gp_minimize) # Random forest rf_res = run(partial(forest_minimize, base_estimator="rf")) # Extra trees et_res = run(partial(forest_minimize, base_estimator="et"))
examples/strategy-comparison.ipynb
glouppe/scikit-optimize
bsd-3-clause
9a3986c259901d5071e1cb37619c66b2
Note that this can take a few minutes.
from skopt.plots import plot_convergence plot_convergence(("dummy_minimize", dummy_res), ("gp_minimize", gp_res), ("forest_minimize('rf')", rf_res), ("forest_minimize('et)", et_res), true_minimum=0.397887, yscale="log")
examples/strategy-comparison.ipynb
glouppe/scikit-optimize
bsd-3-clause
5e02a7726db23350322fdd99a6d0cd9e
Labor Force Status The notion of women making up to 23 less cents on the dollar than men has been challenged numerous times. Many claim, including Resident Fellow at the Harvard Institute of Politics, Karen Agness, that this statistic in manipulated and misled by popular media and the government. The extent of systemic discrimination on women in the U.S. suggested by this statistic is far from conclusive, as it does not take into account the many factors that are producing this number. Figure 1 illustrates the difference in labor force placement between men and women. It is worth noting that there were 20% more female respondents in this survey, such that the female count is inflated compared to that of males. Even when adjusting for greater number of female respondents, there is about 25% more females not in the labor force than males. Naturally, this kind of discrepancy in labor force status is likely to contribute to the overall gender pay gap we are witnessing in the U.S. Moreover, the number of men and women unemployed and looking are nearly the same. Although it may not debunk, this insight discredits the notion of systemic hiring discrimination considering there are more women not working, but there are not more women looking for a job. If there was systemic hiring discrimination against women, there would presumably be a greater share of women looking for a job than men.
fig, ax = plt.subplots() fig.set_size_inches(11.7, 8.27) ax.set_title('Figure 2. Income Per Week From Main Job', weight='bold', fontsize = 17) sns.set_style("whitegrid") sns.violinplot(x='Sex',y='Main Job Income/Wk', data = atus) plt.xlabel('Sex',weight='bold',fontsize=13) plt.ylabel('Main Job Income/Wk ($)',weight='bold', fontsize=13)
UG_S16/Jerry_Allen_Gender_Pay_Gap.ipynb
NYUDataBootcamp/Projects
mit
b1ecb528c527152147f90d561969bd76
Differences in Main Stream of Income Figure 2 clearly illustrates men earning more income than women. There's a sizable share of women earning less than 500/week, while there are very few making more than 1500/week. On the other hand, the men's income is a more evenly distributed, as opposed to being as bottom heavy as women's income. The interquartile range of men is about 1000 compared to about 600 for women. Furthermore, the figure clearly portrays men having a lot more of an income upside, as the upper quartile of women is about 1000, while the upper quartile of men is about 1500 (ie. displayed in the black lines within the axes objects). This difference in income is just as stark, when observing the top earners between men and women, as the top earner for men (about 2900) is about 30% more than his women counterpart. If nothing else, this figure reinforces the fact that men make more money than women, and their income is more widely distributed. The below figures will provide potential drivers for this inequality as it pertains to differences in time use between men and women.
fig, ax = plt.subplots() fig.set_size_inches(11.7, 8.27) ax.set_title('Figure 3. Hours Worked Per Week', weight='bold',fontsize = 17) sns.set_style('whitegrid') sns.boxplot(x='Sex', y='Hours Worked/Wk', data= atus) plt.xlabel('Sex',weight='bold',fontsize=13) plt.ylabel('Hours Worked/Wk',weight='bold', fontsize=13)
UG_S16/Jerry_Allen_Gender_Pay_Gap.ipynb
NYUDataBootcamp/Projects
mit
fb7ae70762c2995a0b40474d932bb9fa
Differences in Hours Worked One obvious factor to investigate is the number of hours worked for both men and women. This will surely have an impact on the earnings for each sex. Figure 3 shows that males work considerably more hours than females. A clear indicator of this is the upper quartile for women being 40 hours/week is virtually equal to the lower quartile for men. It does not require statistical analysis to presume the more hours one works, the more income that person tends to earn. This perhaps explains, at least to some degree, the stark difference in incomes between men and women, shown in the Figure 2. However, the question remains what women are spending their time doing more than men if they are not working more hours than men. The implication is that women are enduring certain responsibilities (ie. more so than men) that take up their time, and this in turn has a negative impact on their income.
fig, ax = plt.subplots() fig.set_size_inches(11.7, 8.27) ax.set(xlim=(0, 1400)) ax.set_title('Figure 4. Mins/Day Providing Secondary Child Care (<13y/o)', weight='bold', fontsize = 17) sns.violinplot(data= atus, x='Secondary Child Care (mins)', y='Sex') plt.xlabel('Secondary Child Care (Mins/Day)',weight='bold',fontsize=13) plt.ylabel('Sex',weight='bold', fontsize=13)
UG_S16/Jerry_Allen_Gender_Pay_Gap.ipynb
NYUDataBootcamp/Projects
mit
965df739f6d7506a7f74f1b4b6482172
The Differences in the Time Spent Providing Child Care Secondary child care is referring to time spent looking after children, while taking on something else as a primary activity. In sum, it is keeping a watchful eye over children, without providing one's full and undivided attention. Harvard Economics Professor, Claudia Goldin postulated that women providing more family care is a potential reason for the pay gap. Moreover, she touched upon research that viably suggests that women value temporal flexibility more than men, while men value income more than women. Figure 4 displays that women provide secondary child care more than men, as over 25% provide more than 200 minutes/day of such care. The fat tail on blue object depicts that their is a great deal of women providing hundreds of minutes of child care each day. Resultantly, the women who have these responsibilities are presumably earning less income than men and women who do not.
fig, ax = plt.subplots() fig.set_size_inches(11.27, 5.5) ax.set(ylim=(0, 1400)) ax.set_title("Figure 5. Mins/Day Providing Elderly Care", weight='bold',fontsize = 17) sns.set_style("whitegrid") sns.swarmplot(x='Sex', y='Elderly Care (mins)', data= atus) plt.xlabel('Sex',weight='bold',fontsize=13) plt.ylabel('Elderly Care (Mins/Day)',weight='bold', fontsize=13)
UG_S16/Jerry_Allen_Gender_Pay_Gap.ipynb
NYUDataBootcamp/Projects
mit
00f32dad2b8cce17df0b556b926897b9
Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import zipfile dataset_folder_path = 'data' dataset_filename = 'text8.zip' dataset_name = 'Text8 Dataset' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(dataset_filename): with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar: urlretrieve( 'http://mattmahoney.net/dc/text8.zip', dataset_filename, pbar.hook) if not isdir(dataset_folder_path): with zipfile.ZipFile(dataset_filename) as zip_ref: zip_ref.extractall(dataset_folder_path) with open('data/text8') as f: text = f.read()
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
69b1c36bf53ab3bd34b26914fc84b27f
Preprocessing Here I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to &lt;PERIOD&gt;. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.
words = utils.preprocess(text) print(words[:30]) print("Total words: {}".format(len(words))) print("Unique words: {}".format(len(set(words))))
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
5f562ba42108862d751eda73ed385d36
And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words) int_words = [vocab_to_int[word] for word in words]
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
39985723b9feccf3315bfb38b823390d
Subsampling Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by $$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$ where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset. I'm going to leave this up to you as an exercise. Check out my solution to see how I did it. Exercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is that probability that a word is discarded. Assign the subsampled data to train_words.
from collections import Counter import random threshold = 1e-5 word_counts = Counter(int_words) total_count = len(int_words) freqs = {word: count/total_count for word, count in word_counts.items()} p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts} train_words = [word for word in int_words if p_drop[word] < random.random()]
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
605cb297d7e946781b44fb513944a88f
Making batches Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$. From Mikolov et al.: "Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels." Exercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.
def get_target(words, idx, window_size=5): ''' Get a list of words in a window around an index. ''' R = np.random.randint(1, window_size+1) start = idx - R if (idx - R) > 0 else 0 stop = idx + R target_words = set(words[start:idx] + words[idx+1:stop+1]) return list(target_words)
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
fa71da49e9f8068377ef4829579deb12
Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
def get_batches(words, batch_size, window_size=5): ''' Create a generator of word batches as a tuple (inputs, targets) ''' n_batches = len(words)//batch_size # only full batches words = words[:n_batches*batch_size] for idx in range(0, len(words), batch_size): x, y = [], [] batch = words[idx:idx+batch_size] for ii in range(len(batch)): batch_x = batch[ii] batch_y = get_target(batch, ii, window_size) y.extend(batch_y) x.extend([batch_x]*len(batch_y)) yield x, y
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
8cb7235d4cd2eeb19ec88bd05040c4ca
Building the graph From Chris McCormick's blog, we can see the general structure of our network. The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal. The idea here is to train the hidden layer weight matrix to find efficient representations for our words. This weight matrix is usually called the embedding matrix or embedding look-up table. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset. I'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal. Exercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.
train_graph = tf.Graph() with train_graph.as_default(): inputs = tf.placeholder(tf.int32, [None], name='inputs') labels = tf.placeholder(tf.int32, [None, None], name='labels')
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
a3ad185aa77d19c0b83a6228fa11f3ec
Embedding The embedding matrix has a size of the number of words by the number of neurons in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using one-hot encoded vectors for our inputs. When you do the matrix multiplication of the one-hot vector with the embedding matrix, you end up selecting only one row out of the entire matrix: You don't actually need to do the matrix multiplication, you just need to select the row in the embedding matrix that corresponds to the input word. Then, the embedding matrix becomes a lookup table, you're looking up a vector the size of the hidden layer that represents the input word. <img src="assets/word2vec_weight_matrix_lookup_table.png" width=500> Exercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform.
n_vocab = len(int_to_vocab) n_embedding = 200 # Number of embedding features with train_graph.as_default(): embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1)) embed = tf.nn.embedding_lookup(embedding, inputs)
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
2e17e3746197af736aa967ea2ca44d7c
Negative sampling For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called "negative sampling". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss. Exercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.
# Number of negative labels to sample n_sampled = 100 with train_graph.as_default(): softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1)) softmax_b = tf.Variable(tf.zeros(n_vocab)) # Calculate the loss using negative sampling loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab) cost = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer().minimize(cost)
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
f50306305dca800b39c39ce894f4f1f8
Validation This code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
with train_graph.as_default(): ## From Thushan Ganegedara's implementation valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent valid_examples = np.array(random.sample(range(valid_window), valid_size//2)) valid_examples = np.append(valid_examples, random.sample(range(1000,1000+valid_window), valid_size//2)) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # We use the cosine distance: norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True)) normalized_embedding = embedding / norm valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset) similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding)) # If the checkpoints directory doesn't exist: !mkdir checkpoints epochs = 10 batch_size = 1000 window_size = 10 with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: iteration = 1 loss = 0 sess.run(tf.global_variables_initializer()) for e in range(1, epochs+1): batches = get_batches(train_words, batch_size, window_size) start = time.time() for x, y in batches: feed = {inputs: x, labels: np.array(y)[:, None]} train_loss, _ = sess.run([cost, optimizer], feed_dict=feed) loss += train_loss if iteration % 100 == 0: end = time.time() print("Epoch {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Avg. Training loss: {:.4f}".format(loss/100), "{:.4f} sec/batch".format((end-start)/100)) loss = 0 start = time.time() if iteration % 1000 == 0: # note that this is expensive (~20% slowdown if computed every 500 steps) sim = similarity.eval() for i in range(valid_size): valid_word = int_to_vocab[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k+1] log = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = int_to_vocab[nearest[k]] log = '%s %s,' % (log, close_word) print(log) iteration += 1 save_path = saver.save(sess, "checkpoints/text8.ckpt") embed_mat = sess.run(normalized_embedding)
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
d8699f6d3f999894989982076339104c
Restore the trained network if you need to:
with train_graph.as_default(): saver = tf.train.Saver() with tf.Session(graph=train_graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) embed_mat = sess.run(embedding)
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
669c228f0249b9ff1422ba501406c568
Visualizing the word vectors Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.
%matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt from sklearn.manifold import TSNE viz_words = 500 tsne = TSNE() embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :]) fig, ax = plt.subplots(figsize=(14, 14)) for idx in range(viz_words): plt.scatter(*embed_tsne[idx, :], color='steelblue') plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
embeddings/.ipynb_checkpoints/Skip-Grams-Solution-checkpoint.ipynb
zhuanxuhit/deep-learning
mit
31da5a017e32e7f1b93cd734a2c37846
2. Indexing and slicing At the interactive prompt, define a list named L that contains four strings or numbers (e.g., L=[0,1,2,3] ). Then, experiment with the following boundary cases. You may never see these cases in real programs (especially not in the bizarre ways they appear here!), but they are intended to make you think about the underlying model, and some may be useful in less artificial forms—slicing out of bounds can help, for example, if a sequence is as long as you expect: - What happens when you try to index out of bounds (e.g., L[4] )? - What about slicing out of bounds (e.g., L[−1000:100] )? - Finally, how does Python handle it if you try to extract a sequence in reverse, with the lower bound greater than the higher bound (e.g., L[3:1] )? Hint: try assigning to this slice ( L[3:1]=['?'] ), and see where the value is put. Do you think this may be the same phenomenon you saw when slicing out of bounds? 3. Indexing, slicing and del Define another list L with four items, and assign an empty list to one of its offsets (e.g., L[2]=[] ). What happens? Then, assign an empty list to a slice ( L[2:3]=[] ). What happens now? Recall that slice assignment deletes the slice and inserts the new value where it used to be. The del statement deletes offsets, keys, attributes, and names. Use it on your list to delete an item (e.g., del L[0] ). What happens if you delete an entire slice ( del L[1:] )? What happens when you assign a nonsequence to a slice ( L[1:2]=1 )? 4. Tuple assignment What do you think is happening to X and Y when you run following sequence?
X = 'spam' Y = 'eggs' X, Y = Y, X
home/python/learningPython5thED/Learning python 5th ed..ipynb
frainfreeze/studying
mit
f14f04ab8a32ff6204b199b297341bc3
5. Dictionary keys. You’ve learned that dictionaries aren’t accessed by offsets, so what’s going on here?
D = {} D[1] = 'a' D[2] = 'b' D
home/python/learningPython5thED/Learning python 5th ed..ipynb
frainfreeze/studying
mit
45da1bf9eea90b5201bfb2bf24de1cf4
Does the following shed any light on the subject? (Hint: strings, integers, and tuples share which type category?)
D[(1, 2, 3)] = 'c' D
home/python/learningPython5thED/Learning python 5th ed..ipynb
frainfreeze/studying
mit
c516cabc085e207764a28885c2f7478a
6. Dictionary indexing. Create a dictionary named D with three entries, for keys 'a' , 'b' , and 'c' . What happens if you try to index a nonexistent key ( D['d'] )? What does Python do if you try to assign to a nonexistent key 'd' (e.g., D['d']='spam' )? How does this compare to out-of-bounds assignments and references for lists? Does this sound like the rule for variable names? 7. Generic operations. Run interactive tests to answer the following questions: - What happens when you try to use the + operator on different/mixed types (e.g., string + list, list + tuple)? - Does + work when one of the operands is a dictionary? - Does the append method work for both lists and strings? How about using the keys method on lists? (Hint: what does append assume about its subject object?) - Finally, what type of object do you get back when you slice or concatenate two lists or two strings? 8. String indexing. Define a string S of four characters: S = "spam" . Then type the following expression: S[0][0][0][0][0] . Any clue as to what’s happening this time? (Hint: recall that a string is a collection of characters, but Python characters are one-character strings.) Does this indexing expression still work if you apply it to a list such as ['s', 'p', 'a', 'm'] ? Why? 9. Immutable types. Define a string S of four characters again: S = "spam" . Write an assignment that changes the string to "slam" , using only slicing and concatenation. Could you perform the same operation using just indexing and concatenation? How about index assignment? 10. Nesting. Write a data structure that represents your personal information: name (first, middle, last), age, job, address, email address, and phone number. You may build the data structure with any combination of built-in object types you like (lists, tuples, dictionaries, strings, numbers). Then, access the individual components of your data structures by indexing. Do some structures make more sense than others for this object? 11. Files Write a script that creates a new output file called myfile.txt and writes the string "Hello file world!" into it. Then write another script that opens my- file.txt and reads and prints its contents. Does the new file show up in the directory where you ran your scripts? What if you add a different directory path to the filename passed to open ? Note: file write methods do not add newline characters to your strings; add an explicit \n at the end of the string if you want to fully terminate the line in the file.
!ls
home/python/learningPython5thED/Learning python 5th ed..ipynb
frainfreeze/studying
mit
dc0dded522a1142a605d8502899ab284
Test your knowledge: Part III exercises 1. Coding basic loops Write a for loop that prints the ASCII code of each character in a string named S. Use the built-in function ord(character) to convert each character to an ASCII integer. This function technically returns a Unicode code point in Python 3.X, but if you restrict its content to ASCII characters, you’ll get back ASCII codes. (Test it interactively to see how it works.) Next, change your loop to compute the sum of the ASCII codes of all the characters in a string. Finally, modify your code again to return a new list that contains the ASCII codes of each character in the string. Does the expression map(ord, S) have a similar effect? How about [ord(c) for c in S] ? Why? (Hint: see Chapter 14.) 2. Backslash characters What happens on your machine when you type the following code interactively?
for i in range(5): print('hello %d\n\a' % i, end="")
home/python/learningPython5thED/Learning python 5th ed..ipynb
frainfreeze/studying
mit
44104d17da5c1f0a045283b129c8b747
3. Sorting dictionaries. In Chapter 8, we saw that dictionaries are unordered collections. Write a for loop that prints a dictionary’s items in sorted (ascending) order. (Hint: use the dictionary keys and list sort methods, or the newer sorted built-in function.) 4. Program logic alternatives. Consider the following code, which uses a while loop and found flag to search a list of powers of 2 for the value of 2 raised to the fifth power (32). ```python L = [1, 2, 4, 8, 16, 32, 64] X = 5 found = False i = 0 while not found and i < len(L): if 2 ** X == L[i]: found = True else: i = i+1 if found: print('at index', i) else: print(X, 'not found') ``` As is, the example doesn’t follow normal Python coding techniques. Follow the steps outlined here to improve it: - First, rewrite this code with a while loop else clause to eliminate the found flag and final if statement. - Next, rewrite the example to use a for loop with an else clause, to eliminate the explicit list-indexing logic. (Hint: to get the index of an item, use the list index method— L.index(X) returns the offset of the first X in list L .) - Next, remove the loop completely by rewriting the example with a simple in operator membership expression. (See Chapter 8 for more details, or type this to test: 2 in [1,2,3] .) - Finally, use a for loop and the list append method to generate the powers-of-2 list ( L ) instead of hardcoding a list literal. Deeper thoughts: - Do you think it would improve performance to move the 2 ** X expression outside the loops? How would you code that? - As we saw in exercise 1, Python includes a map(function, list) tool that can generate a powers-of-2 list, too: map(lambda x: 2 ** x, range(7)). Try typing this code interactively; we’ll meet lambda more formally in the next part of this book, especially in Chapter 19. Would a list comprehension help here (see Chapter 14)? 5. Code maintenance. If you haven’t already done so, experiment with making the code changes suggested in this chapter’s sidebar “Changing PyDoc’s Colors” on page 456. Much of the work of real software development is in changing existing code, so the sooner you begin doing so, the better. For reference, my edited copy of PyDoc is in the book’s examples package, named mypydoc.py; to see how it differs, you can run a file compare (fc on Windows) with the original pydoc.py in 3.3 (also included, lest it change radically in 3.4 as the sidebar describes). If PyDoc is more easily customized by the time you read these words, customize colors per its current convention instead; if this involves changing a CSS file, let’s hope the procedure will be well documented in Python’s manuals. Test Your Knowledge: Part IV Exercises 1. The basics. At the Python interactive prompt, write a function that prints its single argument to the screen and call it interactively, passing a variety of object types: string, integer, list, dictionary. Then, try calling it without passing any argument. What happens? What happens when you pass two arguments? 2. Arguments. Write a function called adder in a Python module file. The function should accept two arguments and return the sum (or concatenation) of the two. Then, add code at the bottom of the file to call the adder function with a variety of object types (two strings, two lists, two floating points), and run this file as a script from the system command line. Do you have to print the call statement results to see results on your screen? 3. varargs. Generalize the adder function you wrote in the last exercise to compute the sum of an arbitrary number of arguments, and change the calls to pass more or fewer than two arguments. What type is the return value sum? (Hints: a slice such as S[:0] returns an empty sequence of the same type as S , and the type built- in function can test types; but see the manually coded min examples in Chapter 18 for a simpler approach.) What happens if you pass in arguments of different types? What about passing in dictionaries? 4. Keywords. Change the adder function from exercise 2 to accept and sum/concatenate three arguments: def adder(good, bad, ugly). Now, provide default values for each argument, and experiment with calling the function interactively. Try passing one, two, three, and four arguments. Then, try passing keyword arguments. Does the call adder(ugly=1, good=2) work? Why? Finally, generalize the new adder to accept and sum/concatenate an arbitrary number of keyword arguments. This is similar to what you did in exercise 3, but you’ll need to iterate over a dictionary, not a tuple. (Hint: the dict.keys method returns a list you can step through with a for or while , but be sure to wrap it in a list call to index it in 3.X; dict.values may help here too.) 5. Dictionary tools. Write a function called copyDict(dict) that copies its dictionary argument. It should return a new dictionary containing all the items in its argument. Use the dictionary keys method to iterate (or, in Python 2.2 and later, step over a dictionary’s keys without calling keys ). Copying sequences is easy ( X[:] makes a top-level copy); does this work for dictionaries, too? As explained in this exercise’s solution, because dictionaries now come with similar tools, this and the next exercise are just coding exercises but still serve as representative function examples. 6. Dictionary tools. Write a function called addDict(dict1, dict2) that computes the union of two dictionaries. It should return a new dictionary containing all the items in both its arguments (which are assumed to be dictionaries). If the same key appears in both arguments, feel free to pick a value from either. Test your function by writing it in a file and running the file as a script. What happens if you pass lists instead of dictionaries? How could you generalize your function to handle this case, too? (Hint: see the type built-in function used earlier.) Does the order of the arguments passed in matter? 7. More argument-matching examples.
def f1(a, b): print(a, b) # Normal args def f2(a, *b): print(a, b) # Positional varargs def f3(a, **b): print(a, b) # Keyword varargs def f4(a, *b, **c): print(a, b, c) # Mixed modes def f5(a, b=2, c=3): print(a, b, c) # Defaults def f6(a, b=2, *c): print(a, b, c) # Defaults and positional varargs
home/python/learningPython5thED/Learning python 5th ed..ipynb
frainfreeze/studying
mit
d1c8073257b5b13133c0c3dbbef56cfb
Test the following calls interactively, and try to explain each result; in some cases, you’ll probably need to fall back on the matching algorithm shown in Chapter 18. Do you think mixing matching modes is a good idea in general? Can you think of cases where it would be useful?
f1(1, 2) f1(b=2, a=1) f2(1, 2, 3) f3(1, x=2, y=3) f4(1, 2, 3, x=2, y=3) f5(1) f5(1, 4) f6(1) f6(1, 3, 4)
home/python/learningPython5thED/Learning python 5th ed..ipynb
frainfreeze/studying
mit
8fb94a177cd8a936f607e11423cb8c17
Après chaque question, on vérifie sur un petit exemple que cela fonctionne comme attendu. Exercice 1 Ce premier exercice aborde la problème d'un parcours de graphe non récursif. Q1
def adjacence(N): # on crée uen matrice vide mat = [ [ 0 for j in range(N) ] for i in range(N) ] for i in range(0,N-1): mat[i][i+1] = 1 return mat mat = adjacence(7) mat
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
59787f927fdd8aa096bdbc24a3fc2763
Q2 Il faut ajouter 5 arcs au hasard en évitant d'ajouter deux fois le même.
import random def ajoute_points(mat,nb=5): ajout = { } while len(ajout) < 5 : i,j = random.randint(0,len(mat)-1),random.randint(0,len(mat)-1) if i < j and (i,j) not in ajout: mat[i][j] = 1 ajout[i,j] = 1 ajoute_points(mat) mat
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
da247192b712e8371553a1d9a8d347e4
Q3
def successeurs(adj,i): ligne = adj[i] # dans l'expression suivante, # s est la valeur de la matrice (0 ou 1) # i l'indice return [ i for i,s in enumerate(ligne) if s == 1 ] successeurs(mat, 1)
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
f2afce2488c46b8b75f1045534f86ca0
Q4
def successeurs_dico(adj): return { i:successeurs(adj, i) for i in range(len(adj)) } dico = successeurs_dico(mat) dico
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
5a75402d53fc5d33214969624561c106
Q5
def suites_chemin(chemin, dico): dernier = chemin[-1] res = [ ] for s in dico[dernier]: res.append ( chemin + [ s ] ) return res suites_chemin( [ 0, 1 ], dico)
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
ca61c3183d485ea000f51633b0a17132
Q6
def parcours(adj): dico = successeurs_dico(adj) chemins = [ [ 0 ]] resultat = [ ] while len(chemins) > 0 : chemins2 = [] for chemin in chemins : res = suites_chemin(chemin, dico) if len(res) == 0: # chemin est un chemin qui ne peut être continué resultat.append ( chemin ) else: chemins2.extend ( res ) chemins = chemins2 return resultat parcours(mat)
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
85ab12f0df7c7d2f56b4d09e7587dca8
Q7 La différence entre un parcours en profondeur et un parcours en largeur tient au fait qu'on préfère d'abord explorer le successeur direct, puis le successeur direct plutôt que les voisins du successeurs directe. Dans le premier cas, on aboutit très vite à un chemin terminé. Dans le second cas, on obtient les chemins plutôt vers la fin de l'algorithme. Dans la version proposée par l'algorithme, c'est un parcours en largeur qui est implémenté. Q8 La matrice en question est la suivante (pour $N=7$) :
def adjacence8(N): # on crée uen matrice vide mat = [ [ 0 for j in range(N) ] for i in range(N) ] for i in range(0,N-1): for j in range(i+1,N): mat[i][j] = 1 return mat adj = adjacence8(7) adj che = parcours(adj) print("nombre",len(che)) che
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
3764dac8677aba27e0b57884eace29b0
On fait une petite boucle pour intuiter le résultat :
for i in range(5,11): adj = adjacence8(i) che = parcours(adj) print(i, "-->",len(che))
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
d5b7bf63a118e0552de0242a8e17745a
Cela ressemble beaucoup à des puissances de deux. Cela suggère un raisonnement par récurrence. Chaque noeud $i$ est connecté à tous les suivantes $i+1$, $i+2$... On remarque que tous les chemins se termine par le dernier noeud $n$. Lorsqu'on ajoute le noeud $n+1$ au graphe, il sera le successeur de tous les autres. Pour un chemin donné, on peut soit l'ajouter à la fin, soit remplacer le dernier noeud $n$ par $n-1$. C'est ainsi qu'on multiplie par deux le nombre de chemins. S'il y a $n$ noeuds, on obtient $2^{n-2}$. Exercice 2 On suppose qu'on dispose d'un tableau de nombres non trié. Ecrire une fonction qui retourne les trois éléments minimaux. La première option consiste à utiliser la fonction sort. Celle-ci a un coût de $O(n \ln n)$ le programme est très simple.
l = [ -1, 4, 6, 4, 1, 9, 5 ] l.sort() l[:3]
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
02cf5108a992464f4cc9f8c3b7db2545
Le problème qu'on cherche à résoudre est plus simple puisqu'il s'agit de ne garder que les trois premiers éléments. On n'a pas besoin de trier la fin de la liste. L'idée consiste à parcourir le tableau et à ne conserver que les trois premiers éléments. Si un élément est plus grand que le troisième élément, on ne s'en occupe pas.
def garde_3_element(tab): meilleur = [ ] for t in tab: if len(meilleur) < 3 : meilleur.append(t) meilleur.sort() elif t < meilleur[2] : meilleur[2] = t meilleur.sort() return meilleur garde_3_element(l)
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
cbf21daf6fdf5f3b4efb565d52eecbfe
Même si on utilise un tri, le coût est en en $O(n)$ car le tri opère sur au plus trois éléments. Exercice 3 Q1
def word2dict(mot): return { i: mot[:i] for i in range(len(mot)+1) } word2dict("mot"), word2dict("python")
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
06fb1be367de83d7a0b23809587f85c9
Q2
def two_words2dict(d1,d2): return { (i,j): (d1[i],d2[j]) for i in d1 for j in d2 } mot1 = "python" mot2 = "piton" d1 = word2dict(mot1) d2 = word2dict(mot2) vertices = two_words2dict(d1,d2) vertices
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
04c555e36276ceddf34d4622987d112a
Q3 Il y a autant d'éléments que $(len(mot1) +1)*(len(mot2)+1)$ puisqu'on fait une double boucle sur toutes les positions + 1 pour 0. Donc $(p+1)(q+1)$ si $p$ et $q$ sont les tailles des deux mots.
len(vertices),(len(mot1)+1)*(len(mot2)+1)
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
30d21ed305a82aaa41f0f07a4912c126
Q4
def add_edge_hv(vertices): edges = { } for edge1 in vertices: i1,j1 = edge1 for edge2 in vertices: i2,j2 = edge2 if (i2-i1==1 and j1==j2) or (j2-j1==1 and i1==i2) : edges[ edge1,edge2 ] = 1 return edges edges = add_edge_hv(vertices) edges
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
ec1275c04c67694e3aa850ff9f5d6c76
Q5 Pour chaque noeud, on ajoute deux arcs excepté les noeuds qui correspond à la fin des mots. Donc $2(p+1)(q+1)-(p+1)-(q+1)=2pq+p+q$.
len(edges), 2*len(mot1)*len(mot2)+len(mot1)+len(mot2)
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
19a1fde5235cad0b1d583af4da3d705d
Q6 On s'inspire de la fonction précédente. Il serait plus efficace de les fusionner.
def cout(m1,m2): c1 = m1[-1] c2 = m2[-1] if c1==c2 : return 0 else : return 1 def ajoute_diagonale(edges, vertices): # edges = { } # on n'ajoute surtout pas cette ligne, sinon c'est comme si on effaçait tout ce que contient # edges for edge1 in vertices: i1,j1 = edge1 for edge2 in vertices: i2,j2 = edge2 if i2-i1==1 and j2-j1==1 : edges[ edge1,edge2 ] = cout (vertices [ edge2 ][0], vertices [ edge2 ][1] ) ajoute_diagonale(edges, vertices) edges
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
46539717ebc352a48846b92b0b475a3d
Q7 L'algorithme du plus court chemin.
def loop_on_edges(distance, edges): for edge,cout in edges.items() : v1,v2 = edge if v1 in distance and (v2 not in distance or distance[v2] > distance[v1] + cout) : distance[v2] = distance[v1] + cout
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
6990e0ec04d681f3e48b856c928ac1b7
Q8 La question était sans doute un peu mal posé car il est beaucoup plus facile pour la fonction loop_on_edges de savoir si le dictionnaire distance est modifié ou non. On la modifie pour qu'elle retourne le nombre de mises à jour.
def loop_on_edges(distance, edges): misejour = 0 for edge,cout in edges.items() : v1,v2 = edge if v1 in distance and (v2 not in distance or distance[v2] > distance[v1] + cout) : distance[v2] = distance[v1] + cout misejour += 1 return misejour
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
b66426f9e284571a072b10228b294384
Puis l'algorithme final :
def plus_court_chemin(edges): distance = { (0,0): 0 } m = 1 while m > 0: m = loop_on_edges(distance, edges) return distance resultat = plus_court_chemin(edges) resultat
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
47c63d3d8fc6f21eed9f7c65fe656ed1
Q9 Comme on a tout fait avec ces deux mots, il suffit de prendre la bonne valeur dans le tableau distance :
print(mot1,mot2) resultat [ len(mot1), len(mot2) ]
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
e1d872f5d26c183dc2e6d6e8b31d4a1a
Exercice 4 On a un tableau d'entiers l = [1, 8, 5, 7, 3, 6, 9]. On veut placer les entiers pairs en premiers et les entiers impairs en derniers : 8, 6, 1, 5, 7, 3, 9. Ecrire une fonction qui fait cela. Le coût d'un tri est de $O(n \ln n)$. On construit d'abord le couple (parité, élément) pour chaque élément puis on trie de table. C'est la solution la plus simple.
l = [1, 8, 5, 7, 3, 6, 9] l2 = [ (i%2, i) for i in l] l2.sort() res = [ b for a,b in l2 ] res
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
e30b34596327487a8ab5d87e70cd6ee7
Dans cas précis, on ne souhaite pas trier sur les nombres mais sur leur parité. En quelque sorte, on ne s'intéresse pas de savoir dans quel ordre deux nombres pairs seront triés. Cela réduit le nombre d'opérations à effectuer. Une idée consiste à parcourir le tableau par les deux bouts et à échanger deux nombres dès que leur parité sont mal classées.
def trie_parite(l): i = 0 j = len(l)-1 while i < j : while i < j and l[i]%2 == 0 : i += 1 while i < j and l[j]%2 == 1 : j -= 1 if i < j: ech = l[i] l[i] = l[j] l[j] = ech i += 1 j -= 1 l = l.copy() trie_parite(l) l
_doc/notebooks/exams/td_note_2015.ipynb
sdpython/ensae_teaching_cs
mit
82ca3a5e3547d94db3cf824419c39f32
Github https://github.com/jbwhit/OSCON-2015/commit/6750b962606db27f69162b802b5de4f84ac916d5 A few Python Basics
# Create a [list] days = ['Monday', # multiple lines 'Tuesday', # acceptable 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', ] # trailing comma is fine! days # Simple for-loop for day in days: print(day) # Double for-loop for day in days: for letter in day: print(letter) print(days) print(*days) # Double for-loop for day in days: for letter in day: print(letter) print() for day in days: for letter in day: print(letter.lower())
notebooks/07-Some_basics.ipynb
jbwhit/WSP-312-Tips-and-Tricks
mit
35380f54f4c8a8e86f48ad8a4f86eb33
List Comprehensions
length_of_days = [len(day) for day in days] length_of_days letters = [letter for day in days for letter in day] print(letters) letters = [letter for day in days for letter in day] print(letters) [num for num in xrange(10) if num % 2] [num for num in xrange(10) if num % 2 else "doesn't work"] [num if num % 2 else "works" for num in xrange(10)] [num for num in xrange(10)] sorted_letters = sorted([x.lower() for x in letters]) print(sorted_letters) unique_sorted_letters = sorted(set(sorted_letters)) print("There are", len(unique_sorted_letters), "unique letters in the days of the week.") print("They are:", ''.join(unique_sorted_letters)) print("They are:", '; '.join(unique_sorted_letters)) def first_three(input_string): """Takes an input string and returns the first 3 characters.""" return input_string[:3] import numpy as np # tab np.linspace() [first_three(day) for day in days] def last_N(input_string, number=2): """Takes an input string and returns the last N characters.""" return input_string[-number:] [last_N(day, 4) for day in days if len(day) > 6] from math import pi print([str(round(pi, i)) for i in xrange(2, 9)]) list_of_lists = [[i, round(pi, i)] for i in xrange(2, 9)] print(list_of_lists) for sublist in list_of_lists: print(sublist) # Let this be a warning to you! # If you see python code like the following in your work: for x in range(len(list_of_lists)): print("Decimals:", list_of_lists[x][0], "expression:", list_of_lists[x][1]) print(list_of_lists) # Change it to look more like this: for decimal, rounded_pi in list_of_lists: print("Decimals:", decimal, "expression:", rounded_pi) # enumerate if you really need the index for index, day in enumerate(days): print(index, day)
notebooks/07-Some_basics.ipynb
jbwhit/WSP-312-Tips-and-Tricks
mit
e4216cc54a56497c194fe78babb1976f
Dictionaries Python dictionaries are awesome. They are hash tables and have a lot of neat CS properties. Learn and use them well.
from IPython.display import IFrame, HTML HTML('<iframe src=https://en.wikipedia.org/wiki/Hash_table width=100% height=550></iframe>') fellows = ["Jonathan", "Alice", "Bob"] universities = ["UCSD", "UCSD", "Vanderbilt"] for x, y in zip(fellows, universities): print(x, y) # Don't do this {x: y for x, y in zip(fellows, universities)} # Doesn't work like you might expect {zip(fellows, universities)} dict(zip(fellows, universities)) fellows fellow_dict = {fellow.lower(): university for fellow, university in zip(fellows, universities)} fellow_dict fellow_dict['bob'] rounded_pi = {i:round(pi, i) for i in xrange(2, 9)} rounded_pi[5] sum([i ** 2 for i in range(10)]) sum(i ** 2 for i in range(10)) huh = (i ** 2 for i in range(10)) huh.next()
notebooks/07-Some_basics.ipynb
jbwhit/WSP-312-Tips-and-Tricks
mit
c81c8f04e99a9bd0996969f3844111ba
Beampy Positioning system Beampy has a positioning system that allows to make automatic, fixed or relative positioning. The default behavior is set by the theme used in the presentation. The default theme sets the coordinates to: x='center' which means that element is centered in the horizontal direction x element anchor is set to left, which means that the horizontal distance is computed between to left side of the slide and the left border of the element bounding-box. y='auto' which means that elements are equally spaced on the vertical direction. y element anchor is set to top, which means that the vertical distance is computed between the top of the slide and the top border of the element bounding-box. The reference for computing coordinates as percent is the page or group width for both x and y. Slide coordinate system The origin of the coordinate coordinate system is the upper-left corner of the slide or the current group. And is positive when moving toward the bottom-right corner.
from beampy import * from beampy.utils import bounding_box, draw_axes doc = document(quiet=True) with slide(): draw_axes(show_ticks=True) t1 = text('This is the default theme behaviour') t2 = text('x are centered and y equally spaced') for t in [t1, t2]: t.add_border() display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
9b360ee59259a95b73d41c2c3b3b1f93
Automatic positioning Beampy as some simple automatic positioning, which are 'centering' the Beampy module with center, and equally spaced distribution of Beampy modules that have auto as coordinates Centering +++++++++
with slide(): draw_axes() rectangle(x='center', y='center', width=400, height=200, color='lightgreen', edgecolor=None) text('x and y are centered for the text and the rectangle modules', x='center', y='center', width=350) display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
3f36297eb649a31ca8332aa7d0cd8eac
Auto ++++ Equally spaced vertically ~~~~~~~~~~~~~~~~~~~~~~~~~
with slide(): draw_axes() for c in ['gold', 'crimson', 'orangered']: rectangle(x='center', y='auto', width=100, height=100, color=c, edgecolor=None) display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
242344fe4f09fb08d4b28c1dca95a0dd
Equally spaced horizontally ~~~~~~~~~~~~~~~~~~~~~~~~~~~
with slide(): draw_axes() for c in ['gold', 'crimson', 'orangered']: rectangle(x='auto', y='center', width=100, height=100, color=c, edgecolor=None) display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
df68ccd0140e19ba25925235028730ed
Equally spaced in xy directions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
with slide(): draw_axes() for c in ['gold', 'crimson', 'orangered']: rectangle(x='auto', y='auto', width=100, height=100, color=c, edgecolor=None) display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
466329344e4c931d374c149b8c2adbee
Absolute positioning units +++++ Absolute coordinates could be given as follow: (int or float) <= 1.0, the position is a percent of the slide or group width for x and y (by default, but could be changed). (int or float) > 1.0, the position is in pixels. Given as a string, the position is in pixels or in the unit given just after, like '2cm'. <div class="alert alert-info"><h4>Note</h4><p>For `y` < 1.0, the default will be changed in future version to be percent of the height. To already change this in your slide you could add just after importing Beampy: >>> DEFAULT_Y['unit'] = 'height'</p></div>
with slide(): draw_axes() text('x and y relative to width', x=0.5, y=0.5) text('x and y relative to width, with aspect ratio for y', x=0.5, y=0.5*(3/4.), width=300) text('x and y given in pixels', x=100, y=100) text('x and y given in centimetres', x='2cm', y='5cm') display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
38e31076f7462ce588ccf0c55a290753
Anchors +++++++ We could also change the anchor of the Beampy module using the center, right, bottom function in the coordinate.
with slide(): draw_axes() t1 = text('Top-left absolute positioning $$x=x^2$$', x=400, y=100) t2 = text('Top-right absolute positioning $$x=x^2$$', x=right(400), y=200) t3 = text('Middle-middle absolute positioning $$x=x^2$$', x=center(400), y=center(300)) t4 = text('Bottom-right absolute positioning $$x=x^2$$', x=right(0.5), y=bottom(0.6)) for t in [t1, t2, t3, t4]: bounding_box(t) display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
4b1003b7353ecb33754442949cdc30b8
Relative positioning When a Beampy module as been placed on a slide, we could position an other element relative to this first one. To do so Beampy module have methods to refer to their anchors (module.left, module.right, module.top, module.bottom, module.center).
with slide(): draw_axes() texts_width = 200 r = rectangle(x='center', y='center', width=100, height=100, color='crimson', edgecolor=None) t1 = text('Centered 10 px below the rectangle', x=r.center+center(0), y=r.bottom+10, width=texts_width, align='center') t2 = text('Centered 10 px above the rectangle', x=r.center+center(0), y=r.top-bottom(10), width=texts_width, align='center') t3 = text('10 px left of the rectangle', x=r.left-right(10), y=r.center+center(10), width=texts_width, align='center') t4 = text('10 px right of the rectangle', x=r.right+10, y=r.center+center(10), width=texts_width, align='center') for t in [t1, t2, t3, t4]: bounding_box(t) display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
fbf419b2f83f9b69d442844ce20c5796
An other way to do relative positioning is to use string as coordinate with '+' ot '-' before the shift and the unit. This will place the new Beampy Module relative to previous one.
with slide(): draw_axes() text('text x=20, y=0.5cm', x='20', y='0.5cm') for i in range(2): text('text x=-0, y=+0.5cm', x='-0', y='+0.5cm') text('text x=25, y=0.3', x='25', y=0.3) for i in range(2): text('text x=+0, y=+0.5cm', x='+0', y='+0.5cm') text('text x=25, y=0.5', x='25', y=0.5) text('text x=+10, y=+0', x='+10', y='+0') text('text x=+10, y=-0', x='+10', y='-0') display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
208d1ef06c95f3d2c942a5e034d7a3bc
Coordinate as dictionary Coordinate could also be given as dictionary. The dictionary keys are the following: unit: ('px', 'pt', 'cm', 'width', 'height'), the width of the shift value. shift: float value, the amount of shifting. reference: ('slide' or 'relative') 'relative' is used to make relative positioning. anchor: (top, bottom, left, right, middle) define the anchor position on the module bounding-box. align: (left, right or center for x) and (top, bottom or center for y) is used to set the origin of slide axes.
with slide(): draw_axes() t = text('centered text', x={'anchor':'middle', 'shift':0.5}, y={'anchor':'middle', 'shift':0.5, 'unit':'height'}) bounding_box(t) t = text('bottom right shift', x={'anchor':'right', 'shift':30, 'align':'right'}, y={'anchor':'bottom', 'shift':30, 'align':'bottom'}) bounding_box(t) display_matplotlib(gcs())
doc-src/auto_tutorials/positioning_system.ipynb
hchauvet/beampy
gpl-3.0
166fc75c1db28eee174b53704cb43ee1
Paso 2: Definir una funcion que imprime en formato JSON la informacion
def buscar_accion(nombre_accion): clear_output() os.system('cls' if os.name=='nt' else 'clear') print(json.dumps(getQuotes(nombre_accion), indent=2))
Leer Precio Acciones Python 3.ipynb
Ric01/Uso-Google-Finance-Python3
gpl-3.0
0f63833ff864bec90058ad4a8645976c
Paso 3: Buscar informacion de la accion de Google (GOOG)
buscar_accion("AAPL")
Leer Precio Acciones Python 3.ipynb
Ric01/Uso-Google-Finance-Python3
gpl-3.0
67d91ad0fcdccd0d556d6c78f82a77b9
We can make this a little bit more explicit. In the line k = make_statement('name: '), make_statement() has returned the inner function key and the inner function has been given the name k. Now, when we call k() the inner function returns the desired tuple. The reason this works is that in addition to the environment in which a user-defined function is running, that function has access to a second environment: the environment in which the function was defined. Here, key has access to the environment of make_statement. In this sense the environment of make_statement is the parent of the environment of key. This enables two things: Names inside the inner functions (or the outer ones for that matter) do not interfere with names in the global scope. Inside the outer and inner functions, the "most lexically local" names are the ones that matter An inner function can access the environment of its enclosing (outer) function Closures Since the inner functions can "capture" information from an outer function's environment, the inner function is sometimes called a closure. Notice that s, once captured by the inner function, cannot now be changed: we have lost direct access to its manipulation. This process is called encapsulation, and is a cornerstone of object oriented programming. Augmenting Functions Since functions are first class, we might want to augment them to put out, for example, call information, time information, etc. Example 1 In the following, timer() accepts a function f as it's argument and returns an inner function called inner. inner accepts a variable argument list and wraps the function f with timers to time how long it takes f to execute. Note that f is passed a variable argument list (try to recall what Python does with that).
# First we write our timer function import time def timer(f): def inner(*args): t0 = time.time() output = f(*args) elapsed = time.time() - t0 print("Time Elapsed", elapsed) return output return inner # Now we prepare to use our timer function import numpy as np # Import numpy # User-defined functions def allocate1(x, N): return [x]*N def allocate2(x, N): ones = np.ones(N) return np.multiply(x, ones) x = 1.0 # Time allocation with lists my_alloc = timer(allocate1) l1 = my_alloc(x, 10000000) # Time allocation with numpy array my_alloc2 = timer(allocate2) l2 = my_alloc2(x, 10000000)
lectures/L6/L6.ipynb
crystalzhaizhai/cs207_yi_zhai
mit
28bc217272474c974c97e78c3c47c202
That seemed pretty useful. We might want to do such things a lot (and not just for timing purposes). Let's recap the pattern that was so useful. Basically, we wrote a nice function to "decorate" our function of interest. In this case, we wrote a timer function whose closure wrapped up any function we gave to it in a timing construct. In order to invoke our nice decorations, we had to pass a function to the timer function and get a new, decorated function back. Then we called the decorated function. So the idea is as follows. We have a decorator (here called timer) that sweetens up some function (call it target). python def target(): pass decorated_target = decorator(target) But Python provides what's called syntactic sugar. Instead of writing all of that, we can just write: python @decorator def target(): pass Now target is decorated. Let's see how this all works.
@timer def allocate1(x, N): return [x]*N x = 2.0 allocate1(x, 10000000)
lectures/L6/L6.ipynb
crystalzhaizhai/cs207_yi_zhai
mit
bcfb427caeb7bfdb633d99550b3fed39
Example 2 We'll just create a demo decorator here.
def decorate(f): print("Let's decorate!") d = 1.0 def wrapper(*args): print("Entering function.") output = f(*args) print("Exited function.") if output > d : print("My d is bigger than yours.") elif output < d: print("Your d is bigger than mine.") else: print("Our ds are the same size.") return wrapper @decorate def useful_f(a, b, c): d1 = np.sqrt(a * a + b * b + c * c) return d1 d = useful_f(1.0, 2.0, 3.0)
lectures/L6/L6.ipynb
crystalzhaizhai/cs207_yi_zhai
mit
e1442b405bcff1007ebdb28fe963397e
Vertex SDK: AutoML training image classification model for batch prediction <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> <td> <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_batch.ipynb"> Open in Google Cloud Notebooks </a> </td> </table> <br/><br/><br/> Overview This tutorial demonstrates how to use the Vertex SDK to create image classification models and do batch prediction using a Google Cloud AutoML model. Dataset The dataset used for this tutorial is the Flowers dataset from TensorFlow Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip. Objective In this tutorial, you create an AutoML image classification model from a Python script, and then do a batch prediction using the Vertex SDK. You can alternatively create and deploy models using the gcloud command-line tool or online using the Cloud Console. The steps performed include: Create a Vertex Dataset resource. Train the model. View the model evaluation. Make a batch prediction. There is one key difference between using batch prediction and using online prediction: Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time. Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready. Costs This tutorial uses billable components of Google Cloud: Vertex AI Cloud Storage Learn about Vertex AI pricing and Cloud Storage pricing, and use the Pricing Calculator to generate a cost estimate based on your projected usage. Set up your local development environment If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step. Otherwise, make sure your environment meets this notebook's requirements. You need the following: The Cloud Storage SDK Git Python 3 virtualenv Jupyter notebook running in a virtual environment with Python 3 The Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions: Install and initialize the SDK. Install Python 3. Install virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment. To install Jupyter, run pip3 install jupyter on the command-line in a terminal shell. To launch Jupyter, run jupyter notebook on the command-line in a terminal shell. Open this notebook in the Jupyter Notebook Dashboard. Installation Install the latest version of Vertex SDK for Python.
import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
GoogleCloudPlatform/vertex-ai-samples
apache-2.0
0836d13490088d45b161d53a185da074
Install the latest GA version of google-cloud-storage library as well.
! pip3 install -U google-cloud-storage $USER_FLAG if os.environ["IS_TESTING"]: ! pip3 install --upgrade tensorflow $USER_FLAG
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
GoogleCloudPlatform/vertex-ai-samples
apache-2.0
c541dd3bc05ce6eb745102299a9eb1a8
Restart the kernel Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True)
notebooks/community/sdk/sdk_automl_image_classification_batch.ipynb
GoogleCloudPlatform/vertex-ai-samples
apache-2.0
f5713200423cb69a9ac25cf527b794c0