repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
joshharper64/frost
https://github.com/joshharper64/frost
6793b5f3d1e282a0f136212c4e81032fd758b416
b698aa9efe2f3dab3205193ddfd088aa63bec177
11a4fdba69935b953126149173e2187b62b828d5
refs/heads/master
2020-12-30T14:01:07.573679
2017-05-18T18:54:11
2017-05-18T18:54:11
91,279,045
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7830188870429993, "alphanum_fraction": 0.7830188870429993, "avg_line_length": 20.200000762939453, "blob_id": "7645825e03700faa5832aa0e3ed808922e230e8b", "content_id": "bdf3e83678f7e7976ad00ecad2139fc9082e3fe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 39, "num_lines": 5, "path": "/resident_reports/apps.py", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ResidentReportsConfig(AppConfig):\n name = 'resident_reports'\n" }, { "alpha_fraction": 0.5429769158363342, "alphanum_fraction": 0.5429769158363342, "avg_line_length": 33.07143020629883, "blob_id": "3c1798db12f35c393b4409e995693e723ac2dd69", "content_id": "13c7b06ce046dcb31e38cb1e24dd571258d0f552", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 477, "license_type": "no_license", "max_line_length": 74, "num_lines": 14, "path": "/homepage/templates/homepage/base.html", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "<p>\n <a href=\"{% url 'homepage:index' %}\">Frost</a> -\n <a href=\"{% url 'resident_reports:allreports' %}\">Resident Reports</a> -\n <a href=\"{% url 'homepage:about' %}\">About</a> -\n {% if user.is_authenticated %}\n Hello, {{ user.username }}.\n <a href=\"{% url 'users:logout' %}\">log out</a>\n {% else %}\n <a href=\"{% url 'users:register' %}\">register</a> -\n <a href=\"{% url 'users:login' %}\">log in</a>\n {% endif %}\n</p>\n\n{% block content %}{% endblock content %}\n" }, { "alpha_fraction": 0.5196506381034851, "alphanum_fraction": 0.5655021667480469, "avg_line_length": 19.81818199157715, "blob_id": "8de592326f25743d0e505b4fa0baedf3801e7b1e", "content_id": "e22d72013441057da4a24cb4f2b56f877bbd7a07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 48, "num_lines": 22, "path": "/resident_reports/migrations/0003_auto_20170517_0033.py", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.1 on 2017-05-17 00:33\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('resident_reports', '0002_report'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='report',\n name='topic',\n ),\n migrations.DeleteModel(\n name='Topic',\n ),\n ]\n" }, { "alpha_fraction": 0.6608391404151917, "alphanum_fraction": 0.6608391404151917, "avg_line_length": 34.75, "blob_id": "0c1393a160fdeaccfdcbd2a9a70663cfd5fee3a5", "content_id": "d60df3623bed9ff59a394a4664cb2cfbc4ce00ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 85, "num_lines": 8, "path": "/resident_reports/urls.py", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^allreports/$', views.allreports, name='allreports'),\n url(r'^new_report/$', views.new_report, name='new_report'),\n url(r'^edit_report/(?P<report_id>\\d+)/$', views.edit_report, name='edit_report'),\n]\n" }, { "alpha_fraction": 0.6822209358215332, "alphanum_fraction": 0.6822209358215332, "avg_line_length": 35.021278381347656, "blob_id": "ad789e47ed6f9dc9fd6ecca9a44bf028f6c8456e", "content_id": "a3187df8675e1a0bc78a1056884a80a264293a4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1693, "license_type": "no_license", "max_line_length": 79, "num_lines": 47, "path": "/resident_reports/views.py", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import get_user_model\n\nfrom .models import Report\nfrom .forms import ReportForm\n\ndef allreports(request):\n \"\"\" Show list of all reports, regardless of topic \"\"\"\n reports = Report.objects.order_by('-date_added')\n context = {'reports': reports}\n return render(request, 'resident_reports/allreports.html', context)\n\n@login_required\ndef new_report(request):\n \"\"\" Add new report \"\"\"\n if request.method != 'POST':\n form = ReportForm()\n else:\n form = ReportForm(data=request.POST)\n if form.is_valid():\n new_entry = form.save(commit=False)\n new_entry.user_name = request.user\n form.save()\n return HttpResponseRedirect(reverse('resident_reports:allreports'))\n\n context = {'form': form}\n return render(request, 'resident_reports/new_report.html', context)\n\n@login_required\ndef edit_report(request, entry_id):\n \"\"\" Edit an existing report \"\"\"\n report = Report.objects.get(id=entry_id)\n if report.owner != request.owner:\n return HttpResponseRedirect(reverse('resident_reports:allreports'))\n if request.method != 'POST':\n form = ReportForm(instance=report)\n else:\n form = ReportForm(instance=entry, data=request.POST)\n if form.is_valid:\n form.save()\n return HttpResponseRedirect(reverse('resident_reports:allreports'))\n\n context = {'report': report, 'form': form}\n return render(request, 'resident_reports/edit_report.html', context)\n" }, { "alpha_fraction": 0.644385039806366, "alphanum_fraction": 0.6497326493263245, "avg_line_length": 27.769229888916016, "blob_id": "a8b01630af276a6e911944a3a428093bcfdfcba0", "content_id": "98c52c175b11a264e6cdfece72473a7ac0db51a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 56, "num_lines": 13, "path": "/resident_reports/models.py", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Report(models.Model):\n \"\"\" Report by User \"\"\"\n text = models.TextField()\n date_added = models.DateTimeField(auto_now_add=True)\n user_name = models.ForeignKey(User)\n class Meta:\n verbose_name_plural = 'reports'\n\n def __str__(self):\n return self.text[:50] + \"...\"\n" }, { "alpha_fraction": 0.8301886916160583, "alphanum_fraction": 0.8301886916160583, "avg_line_length": 20.200000762939453, "blob_id": "b7391fa988d66ece344d070366d85e4b50ce51fc", "content_id": "07acbdeacdc93b65ee4a638dee826fae6a3e96ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 42, "num_lines": 5, "path": "/resident_reports/admin.py", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom resident_reports.models import Report\n\nadmin.site.register(Report)\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 23.55555534362793, "blob_id": "424f7ab30bc9272273f708758fb75bfdcc8f2127", "content_id": "eefa99d3fc855c8f3bb271391f23d1a0b592d102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 49, "num_lines": 9, "path": "/homepage/views.py", "repo_name": "joshharper64/frost", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\ndef index(request):\n \"\"\"Homepage\"\"\"\n return render(request, 'homepage/index.html')\n\ndef about(request):\n \"\"\"About Section\"\"\"\n return render(request, 'homepage/about.html')\n" } ]
8
mehulsbhatt/investopedia-trading-api
https://github.com/mehulsbhatt/investopedia-trading-api
6f743470fca99cf7570079b67e03055135d6b825
cf8e821807b7fb2254b007e95f1e11b35d5fd306
2f162f3070754ed2015b1220f914a3e2b4353e76
refs/heads/master
2017-12-04T20:18:26.669546
2016-03-14T11:34:41
2016-03-14T11:34:41
54,290,375
1
0
null
2016-03-19T22:41:40
2016-02-17T09:14:00
2016-03-14T11:34:45
null
[ { "alpha_fraction": 0.6852589845657349, "alphanum_fraction": 0.6932271122932434, "avg_line_length": 34.85714340209961, "blob_id": "a49575cfbffca6a017a60eb6087b9c5c25f4df21", "content_id": "e69a72dd21191477e093a660f597c4e5543bac9b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 502, "license_type": "permissive", "max_line_length": 86, "num_lines": 14, "path": "/setup.py", "repo_name": "mehulsbhatt/investopedia-trading-api", "src_encoding": "UTF-8", "text": "from distutils.core import setup\n\nsetup(\n name = 'investopedia-trading-api',\n packages = ['investopedia-trading-api'],\n version = '1.0',\n description = 'An API for Investopedia\\'s paper trading simulator',\n author = 'Kirk Thaker',\n author_email = 'kirkthaker66@gmail.com',\n url = 'https://github.com/kirkthaker/investopedia-trading-api',\n download_url = 'https://github.com/kirkthaker/investopedia-trading-api/tarball/0.1',\n keywords = ['trading', 'finance', 'investopedia', 'algorithmic'],\n classifiers = [],\n)\n" } ]
1
abndre/TensaoResidual
https://github.com/abndre/TensaoResidual
d63077e846f57651c63d7027a717a46835afd5cb
2b101b9ddf0f74d9a6a0b3e3c4f826bbf6c2b84b
cea37a25e756421ea9bf13eb8c15bf617b19c725
refs/heads/master
2020-03-24T01:58:11.025120
2018-07-29T22:19:09
2018-07-29T22:19:09
142,357,862
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3454987704753876, "alphanum_fraction": 0.37956205010414124, "avg_line_length": 23.235294342041016, "blob_id": "4fcbdec250daec3fe8d5a87d00115b32a4d3d443", "content_id": "f7a83386894bde160592ccb47f77c5b18452dbe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 80, "num_lines": 17, "path": "/P_L_/P_L_PB_1_/read_raw.py", "repo_name": "abndre/TensaoResidual", "src_encoding": "UTF-8", "text": "#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Andrezio\n#\n# Created: 23/07/2017\n# Copyright: (c) Andrezio 2017\n# Licence: <your licence>\n#-------------------------------------------------------------------------------\n\nfile_name='P_L_PB_1_.raw'\ndatafile = file(file_name)\n\nimport fabio\n\nimage = fabio.open(file_name)" }, { "alpha_fraction": 0.6868250370025635, "alphanum_fraction": 0.6976242065429688, "avg_line_length": 45.29999923706055, "blob_id": "6a8dad06b520d70b9851a7314b245b4fe1109a24", "content_id": "80efe03ec5befa92ae48f95f07012eea21816425", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 184, "num_lines": 10, "path": "/calc_stress.py", "repo_name": "abndre/TensaoResidual", "src_encoding": "UTF-8", "text": "#import matplotlib.pyplot as plt\n#from commands import multi, removerbackground,removekalpha, normalizar, removerzero, background,processing_of_data, lenar_calc, read_file,center_psi, red_file_rigaku,red_files_chimazu\nfrom commands import red_file_rigaku,red_files_chimazu\nif __name__ == \"__main__\":\n\n\n print('Start')\n #red_files_chimazu('P_L_','P_L_PB_3_')\n red_files_chimazu('popb','Po_PB_7,1_')\n #red_file_rigaku ('P_L_1/P_PB_L_{}.ASC'.format(7))\n" }, { "alpha_fraction": 0.6166912913322449, "alphanum_fraction": 0.6418020725250244, "avg_line_length": 23.618181228637695, "blob_id": "96696ec4ffab8f1582dbd3147721ce5279b7c510", "content_id": "5d8cbd79d0c3e8e4d835786991a15bcf0a177ba7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1354, "license_type": "no_license", "max_line_length": 80, "num_lines": 55, "path": "/window.py", "repo_name": "abndre/TensaoResidual", "src_encoding": "UTF-8", "text": "#-------------------------------------------------------------------------------\n# Purpose:\n#\n# Author: Andre Santos Barros da Silva\n#\n# Created: 27/07/2018\n# Copyright: \n# Licence: \n#-------------------------------------------------------------------------------\nfrom tkinter import *\n\n\n\nroot = Tk()\nroot.title('Notebook')\n\n\n\ntexto = Label(root,text='SHOW').place(x=10,y=5)\n\nhorizontal=0\nvertical=40\n\nbtnPlotar = Button(root, text=\"SAMPLE\").place(x=horizontal,y=vertical)\nvertical+=30\nbtnPlotar = Button(root, text=\"PLOT\").place(x=horizontal,y=vertical)\nvertical+=30\nbtnResetar = Button(root, text=\"RESET\").place(x=horizontal,y=vertical)\nvertical+=30\nbtnPlotar = Button(root, text=\"CLOSE\").place(x=horizontal,y=vertical)\nvertical+=30\nbtnPlotar = Button(root, text=\"BACK\").place(x=horizontal,y=vertical)\nvertical+=30\nbtnPlotar = Button(root, text=\"DOWNLOAD\").place(x=horizontal,y=vertical)\n\n\n\n\n#menu\nmenubar = Menu(root)\nfilemenu= Menu(menubar)\nfilemenu.add_command(label=\"Open File\")\nfilemenu.add_command(label=\"Close\")\nfilemenu.add_separator()\n\nmenubar.add_cascade(label=\"File\",menu=filemenu)\nhelpmenu = Menu(menubar)\nhelpmenu.add_command(label=\"Help Index\")\nhelpmenu.add_command(label=\"About\")\nmenubar.add_cascade(label=\"Help\",menu=helpmenu)\nroot.config(menu=menubar)\n\nroot.title(\"Cristal Mat - Xtress - IPEN\")\nroot.geometry(\"650x380+10+10\")\nroot.mainloop()\n" }, { "alpha_fraction": 0.5235596299171448, "alphanum_fraction": 0.5387336015701294, "avg_line_length": 24.479650497436523, "blob_id": "f60d39138092a323563c481e24469239e1e78f1b", "content_id": "556546ea1740f7bf0992fff1953f9de43ed59e69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8765, "license_type": "no_license", "max_line_length": 115, "num_lines": 344, "path": "/commands.py", "repo_name": "abndre/TensaoResidual", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import savgol_filter\nfrom lmfit.models import VoigtModel,PseudoVoigtModel, LinearModel\nfrom scipy import stats\n\ndef LPM(theta,psi):\n radians = np.radians(theta)\n radiansby2 = np.radians(theta/2)\n radianpsi = np.radians(psi)\n\n cima = 1 + np.cos(radians)**2\n baixo = np.sin(radiansby2)**2\n lado = 1 - np.tan(radianpsi)/np.tan(radiansby2)\n\n LPM_value = (cima/baixo)*lado\n\n return LPM_value\n\ndef Lorentz_polarization_modified(psi,x,y):\n new_list =[]\n for key, value in enumerate(x):\n new = LPM(value,psi)\n new_list.append(y[key]/new)\n #import pdb;pdb.set_trace()\n return (new_list)\n\ndef plotar_intensity_position():\n plt.grid()\n plt.legend(loc=0)\n plt.xlabel('Position (2/Theta)')\n plt.ylabel('Intensity(u.a.)')\n plt.show()\n\n#return K const, based in sample\ndef multi(E=210000,v=0.3,theta2=156):\n theta2/=2\n V=2.0*(1.0+v)\n theta = np.radians(theta2)\n theta = np.tan(theta)\n theta = 1.0/theta\n theta *= (np.pi/180.0)\n theta *=E\n theta /=-1.0*V\n ## return theta/9.8#kg\n return theta#Mpa\n\n##################################\n#Cleand Data\n#return novot\ndef removekalpha(x,y):\n lambida2=1.541220\n lambida1=1.537400\n deltaL = lambida2 - lambida1\n deltaL = deltaL/lambida1\n diferenca=x[1]-x[0]\n minimo=min(y)\n novoy=[]\n for i in range(len(y)):\n deltasoma = x[1]-x[0]\n ase= np.tan(np.radians(x[i]/2))*2*deltaL/(diferenca)\n n=1;\n\n while(ase>deltasoma):\n deltasoma=deltasoma+diferenca\n n+=1\n try:\n yy=y[i]-0.5*y[i-n]\n\n if yy<0:yy=(yy+y[i])/8\n\n if yy<0:yy=minimo\n novoy.append(yy)\n except:\n novoy.append(y[i])\n\n return novoy\n\n\n#return y\ndef background(y):\n minimo=min(y)\n for i in range(len(y)):\n y[i]-=minimo\n return y\n\n#return y\ndef normalizar(y):\n minimo=max(y)\n for i in range(len(y)):\n y[i]/=minimo\n return y\n\ndef removerzero(vetor):\n for key, value in enumerate(vetor):\n if value <0:\n vetor[key]=0\n\n for key,value in enumerate(vetor):\n try:\n if vetor[key+1]==0 and value >0:\n vetor[key]=0\n except:\n pass\n return vetor\n\ndef removerbackground(x,y,m=5):\n\n minimo= np.mean( np.sort(y)[:10])\n for i in range(len(y)):\n y[i]=y[i]-minimo\n slope, intercept, r_value, p_value, std_err = stats.linregress(np.append(x[:m],x[-m:]),np.append(y[:m],y[-m:]))\n abline_values = [slope * i + intercept for i in x]\n abline_values=np.asarray(abline_values)\n return removerzero(y-abline_values)\n#Cleand Data\n\n\ndef processing_of_data(psi,x,y):\n #y = normalizar(y)\n\n y = background(y)\n\n y = removerbackground(x,y)\n\n\n #import pdb;pdb.set_trace()\n #plt.plot(y)\n y = Lorentz_polarization_modified(psi,x,y)\n #plt.plot(y);plt.show();import pdb;pdb.set_trace()\n y = removekalpha(x,y)\n\n y = savgol_filter(y, 5, 2)\n\n y = normalizar(y)\n\n return y\n\n\ndef lenar_calc(x,y):\n mod = LinearModel()\n pars = mod.guess(y, x=x)\n out = mod.fit(y, pars, x=x)\n calc= out.best_values['slope']\n stress=calc*multi()\n stress=round(stress,3)\n #plt.plot(x,out.bes_fit)\n return stress, x , out.best_fit,out\n #print(out.best_values)\n\n\ndef read_file(file_name):\n psi=0\n try:\n r = open(file_name,'r',encoding = \"ISO-8859-1\")\n except:\n return False\n printar = False\n vx = []\n vy = []\n for i in r:\n if printar:\n value = i.split(' ')\n x=value[3]\n x = float(x)\n vx.append(x)\n y=value[-1].split('\\n')[0]\n y =float(y)\n vy.append(y)\n if not printar and '<2Theta> < I >' in i:\n printar = True\n if not printar and 'psi angle' in i:\n value = i.split(' ')\n psi=float(value[-3])\n psi=np.sin(np.radians(psi))**2\n\n vx = np.asarray(vx)\n vy = np.asarray(vy)\n return psi, vx, vy\n\n\ndef calc_center_pseudoVoigt(vx,vy):\n mod = PseudoVoigtModel()\n y = vy\n pars = mod.guess(y, x=vx)\n out = mod.fit(y, pars, x=vx)\n center = out.best_values['center']\n return center\n\ndef parabol(x):\n import pdb; pdb.set_trace()\n# for key, value in enumerate(x):\n\n\ndef center_psi(file_name):\n #print(file_name)\n psi, vx, vy = read_file(file_name)\n vy = processing_of_data(psi,vx,vy)\n legenda = file_name.split('/')[-1]\n #plt.grid()\n #plt.legend(loc=0)\n #import pdb; pdb.set_trace()\n plt.plot(vx,vy,label=legenda)\n mod = PseudoVoigtModel()\n y=vy\n pars = mod.guess(y, x=vx)\n out = mod.fit(y, pars, x=vx)\n center =out.best_values['center']\n print('center: {} <--> psi: {}'.format(center,psi))\n return psi, center\n\n\n\n#Medidas Rigaku\ndef get_value(i):\n return float(i.split(' ')[-1].split('\\n')[0])\n\n\n#list_keys = list(dicio.keys())\n\n\ndef red_file_rigaku(folder_name):\n dicio={\n '*START':0.0,\n '*STOP' :0.0,\n '*STEP' :0.0,\n '*ST_PSI_ANGLE':0.0\n }\n\n dados={}\n\n file ='P_L_1/P_PB_L_1.ASC'\n file = folder_name\n r = open(file,'r')\n find_intensity=False\n x=[]\n y=[]\n for i in r:\n #print(i)\n if '*END' in i:\n find_intensity=False\n vx = np.asarray(x)\n vy = np.asarray(y)\n vy = processing_of_data(dicio['*ST_PSI_ANGLE'],vx,vy)\n #import pdb; pdb.set_trace()\n plt.plot(vx,vy,label=dicio['*ST_PSI_ANGLE'])\n\n #plt.plot(vy)\n dados[dicio['*ST_PSI_ANGLE']]={}\n dados[dicio['*ST_PSI_ANGLE']]['x']=vx\n dados[dicio['*ST_PSI_ANGLE']]['y']=vy\n x=[]\n y=[]\n elif find_intensity:\n value = i.split(',')\n for i in value:\n if len(x)==0:\n x.append(dicio['*START'])\n y.append(float(i))\n dicio['*NEW_DICIO']=(dicio['*START']+dicio['*STEP'])\n else:\n x.append(dicio['*NEW_DICIO'])\n dicio['*NEW_DICIO']=(dicio['*NEW_DICIO']+dicio['*STEP'])\n y.append(float(i))\n elif '*START' in i:\n dicio['*START']=get_value(i)\n elif '*STOP' in i:\n dicio['*STOP']=get_value(i)\n elif '*STEP' in i:\n dicio['*STEP']=get_value(i)\n elif '*ST_PSI_ANGLE' in i:\n dicio['*ST_PSI_ANGLE']=get_value(i)\n elif '*COUNT' in i and not '*COUNTER' in i:\n find_intensity=True\n\n plotar_intensity_position()\n\n center_list =[]\n psi_list =[]\n for key, value in dados.items():\n psi_list.append(np.sin(np.radians(key))**2)\n center = calc_center_pseudoVoigt(value['x'],value['y'])\n center_list.append(center)\n print('center: {} <--> psi: {}'.format(center,np.sin(np.radians(key))**2))\n\n legenda ,x,bestY, out= lenar_calc(psi_list,center_list)\n\n plt.plot(psi_list,center_list,'o',label='Values')\n plt.plot(x,bestY,label='Best')\n miny=int(min(center_list))-2\n maxy=int(max(center_list))+2\n maxx=round(max(psi_list),3)+round(max(psi_list),3)/2\n plt.axis([0,maxx,miny,maxy])\n\n plt.grid()\n #plt.title(dados)\n plt.legend()\n plt.xlabel('$\\sin ^{2}\\omega (Mpa)$')\n plt.ylabel('$2\\Theta (Degre)$')\n #import pdb;pdb.set_trace()\n plt.title('{}'.format(legenda))\n plt.show()\n\n#Chimazu\ndef red_files_chimazu(folderbefore,folder_name):\n #dados='P_L_PB_3_'\n center_list =[]\n psi_list =[]\n\n dados = folder_name\n first_file='{}/{}/{}.txt'.format(folderbefore,dados,dados)\n file_names=[]\n file_names.append(first_file)\n\n for i in range(1,10):\n file_name='{}/{}{}/{}{}.txt'.format(folderbefore,dados,str(i),dados,str(i))\n file_names.append(file_name)\n\n\n\n for file_name in file_names:\n psi, center = center_psi(file_name)\n psi_list.append(psi)\n center_list.append(center)\n plotar_intensity_position()\n #print(psi_list)\n #print(center_list)\n\n\n miny=int(min(center_list))-2\n maxy=int(max(center_list))+2\n maxx=round(max(psi_list),3)+round(max(psi_list),3)/2\n plt.axis([0,maxx,miny,maxy])\n\n plt.grid()\n plt.title(dados)\n plt.xlabel('$\\sin ^{2}\\omega (Mpa)$')\n plt.ylabel('$2\\Theta (Degre)$')\n legenda ,x,bestY,out= lenar_calc(psi_list,center_list)\n #plt.legend(legenda)\n plt.plot(psi_list,center_list,'o',label=('{}'.format(legenda)))\n plt.plot(x,bestY)\n plt.legend(loc=0)\n plt.show()\n" } ]
4
deharahawa/batida-ponto
https://github.com/deharahawa/batida-ponto
5cced258c77a614ccd934b0178bda6f816d373cc
243669c73fe38c13f367b493e87f2ec680c319f8
8edf6f9b926c256408398491e9ff48d0bf0e868e
refs/heads/master
2022-11-24T20:20:19.324322
2020-07-28T20:22:16
2020-07-28T20:22:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7354781031608582, "alphanum_fraction": 0.7354781031608582, "avg_line_length": 26.317073822021484, "blob_id": "4165db41536be08f54b806d3631e78f086825bc5", "content_id": "e9ede069dfa3e54dea83aec91f8478ea793cbf50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 71, "num_lines": 41, "path": "/app/serializer.py", "repo_name": "deharahawa/batida-ponto", "src_encoding": "UTF-8", "text": "# from marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom marshmallow import ValidationError\nfrom flask_marshmallow import Marshmallow\n\nma = Marshmallow()\n\ndef configure(app):\n \"\"\"\n Factory para poder configurar\n \"\"\"\n ma.init_app(app)\n\n\ndef must_not_be_blank(data):\n \"\"\"\n Valida que os dados nao estao em branco\n \"\"\"\n if not data:\n raise ValidationError('Dado não informado')\n\n# class UserSchema(Schema):\nclass UserSchema(ma.SQLAlchemyAutoSchema):\n \"\"\"\n Define o Schema do User\n \"\"\"\n id = fields.Integer()\n nome_completo = fields.Str(required=True, validate=must_not_be_blank)\n cpf = fields.Str(required=True,validate=must_not_be_blank)\n email = fields.Str(required=True, validate=must_not_be_blank)\n data_cadastro = fields.DateTime(dump_only=True)\n # pontos = ma.Nested(PontoSchema, many=True)\n\n\n# class PontoSchema(Schema):\nclass PontoSchema(ma.SQLAlchemyAutoSchema):\n id = fields.Integer()\n user = fields.Nested(UserSchema, validate=must_not_be_blank)\n user_id = fields.Integer()\n data_batida = fields.DateTime(dump_only=True)\n tipo_batida = fields.Integer()" }, { "alpha_fraction": 0.6660507321357727, "alphanum_fraction": 0.6776299476623535, "avg_line_length": 35.08444595336914, "blob_id": "04e531434c55591919ca85ec0fd41a46e046c66d", "content_id": "a6d8ebaabf685263466b0efdead0d21372171606", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8123, "license_type": "no_license", "max_line_length": 167, "num_lines": 225, "path": "/app/checks.py", "repo_name": "deharahawa/batida-ponto", "src_encoding": "UTF-8", "text": "from flask import Blueprint, request, jsonify, current_app\nfrom .serializer import PontoSchema\nfrom .models import Ponto, User\nfrom datetime import datetime\nfrom marshmallow import ValidationError\nimport re\n\n\nponto_blueprint = Blueprint('checks', __name__)\n\ndef get_horas(dado):\n \"\"\"\n Usa regex para pegar as horas no formato UTC\n \"\"\"\n horas = re.findall('[0-9]{2}:[0-9]{2}:[0-9]{2}', dado)\n return horas\n\n\ndef get_date(dado):\n \"\"\"\n Usa regex para pegar a data no formato UTC\n \"\"\"\n date = re.findall('[0-9]{4}-[0-9]{2}-[0-9]{2}', dado)\n return date\n\n\ndef get_ano_mes_dia(dado):\n \"\"\"\n Usa regex para separar ano, mes e dia\n \"\"\"\n ano, mes, dia = re.split('[^0-9]+', dado)\n return int(ano), int(mes), int(dia)\n\n\ndef get_hora_minutos_segs(dado):\n \"\"\"\n Usa regex para separar hora, minutos e segundos\n \"\"\"\n hora, minutos, segs = re.split('[^0-9]+', dado)\n return int(hora), int(minutos), int(segs)\n\n\n@ponto_blueprint.route('/ponto', methods=['POST'])\ndef cadastrar():\n # Instancia PontoSchema\n ponto_schema = PontoSchema()\n\n json_data = request.json\n # Checa se existem dados vindo na request\n if not json_data:\n return {\"message\": \"Sem dados informados\"}, 400\n # Verificar se ha error ao realizar o load\n try:\n data, errors = ponto_schema.load(json_data)\n except ValidationError as err:\n return err.messages, 422\n # Pega o user que esta batendo o ponto\n user = User.query.filter_by(id = data['user_id']).first()\n\n # Puxa todos os pontos batidos do usuario\n ponto_anterior = Ponto.query.filter(Ponto.user_id == data['user_id'])\n # Pega o ponto anterior para verificar se o usuario nao esta batendo o mesmo tipo de ponto 2 vezes\n ponto_anterior = PontoSchema(many=True).jsonify(ponto_anterior)\n # Converte para json\n ponto_anterior_json = ponto_anterior.json\n # Verifica se ja ha um ponto batido, senao nao ha anterior\n if len(ponto_anterior_json) > 0:\n # Guarda o tipo de batida de ponto anterior\n tipo_batida_memory = 0\n # Salva realmente o tipo de batida anterior\n tipo_batida_memory = ponto_anterior_json[len(ponto_anterior_json)-1]['tipo_batida'] \n # Confere se o usuário nao esta batendo ponto em duplicata\n if tipo_batida_memory != data['tipo_batida']:\n tipo_batida_memory = data['tipo_batida']\n else:\n return {\"message\":\"Ponto já batido\"}\n # Pega o horario atual\n now = datetime.now()\n\n if user is None:\n # Cadastra um usuario para o ponto caso nao exista na base\n user = User(nome_completo=\"Nao identificado\", cpf=\"0\", email='nao@identificado.com', data_cadastro=now)\n # Cria o ponto\n ponto = Ponto(user=user, user_id=data['user_id'], tipo_batida=data['tipo_batida'], data_batida=now)\n # Salva as alteracoes no banco\n current_app.db.session.add(ponto)\n current_app.db.session.commit()\n\n return ponto_schema.jsonify(ponto), 201\n\n@ponto_blueprint.route('/pontos', methods=['GET'])\ndef mostrar():\n \"\"\"\n Seleciona todos os pontos batidos por todos os usuarios\n \"\"\"\n result = Ponto.query.all()\n return PontoSchema(many=True).jsonify(result), 200\n\n\n@ponto_blueprint.route('/pontos/<identificador>', methods=['GET'])\ndef mostrar_usuario(identificador):\n \"\"\"\n Mostra todos os pontos de um usuario especifico \n \"\"\"\n # Faz a query usando o user_id\n result = Ponto.query.filter_by(user_id = identificador)\n # Chama a funcao que calcula o total de horas para determinado usuario\n horas_trabalhadas = calcula_horas(identificador)\n # Pega o result da query feita anteriormente\n result = PontoSchema(many=True).jsonify(result)\n\n # Faz o append das horas trabalhadas no último ponto retornado\n result.json[len(result.json)-1]['horas_trabalhadas'] = horas_trabalhadas.get('horas trabalhadas')\n\n return jsonify(result.json), 200\n\n\n@ponto_blueprint.route('/pontos-user/<identificador>', methods=['GET'])\ndef calcula_horas(identificador):\n \"\"\"\n Calcula as horas trabalhadas \n \"\"\"\n # Calcula as horas trabalhadas pelo user\n data = Ponto.query.filter(Ponto.user_id == identificador)\n # Pega o result da query\n result_json = PontoSchema(many=True).jsonify(data)\n # Cria listas para guardar entradas e saidas\n entrada = []\n saida = []\n # Varre os campos do result da query para separar o que sao batidas de ponto de entrada e saida\n for field in result_json.json:\n if field['tipo_batida'] == 1:\n entrada.append(field['data_batida'])\n else:\n saida.append(field['data_batida'])\n \n # Precisa pegar o total de horas trabalhadas\n horas_total = []\n\n\n for i in range(len(saida)):\n # Nao deve dar problemas porque contamos as saidas, se o funcionario deu entrada e ainda nao saiu o vetor de saidas vai ser automaticamente menor que o de entradas\n # Pega a data de entrada\n date_entrada = get_date(entrada[i])\n ano_entrada, mes_entrada, dia_entrada = get_ano_mes_dia(date_entrada[0])\n # Pega a data de saida\n date_saida = get_date(saida[i])\n ano_saida, mes_saida, dia_saida = get_ano_mes_dia(date_saida[0])\n # Faz algumas verificacoes para nao realizar comparacoes que nao fazem sentido\n if ano_saida != ano_entrada:\n continue\n if mes_saida != mes_entrada:\n continue\n if dia_entrada > dia_saida:\n continue\n\n # Pega hora de entrada e saida\n time_entrada = get_horas(entrada[i])\n time_saida = get_horas(saida[i])\n\n hora_entrada, mins_entrada, segs_entrada = get_hora_minutos_segs(time_entrada[0])\n hora_saida, mins_saida, segs_saida = get_hora_minutos_segs(time_saida[0])\n\n if (dia_saida - dia_entrada) == 1:\n # Caso de um turno noturno\n if hora_entrada > hora_saida:\n if mins_entrada > mins_saida:\n # Caso a diferenca entre os minutos nao complete uma hora e reinicie a contagem por ter virado a hora\n # Por exemplo de 23:59 ate 09:05, temos 6 minutos e aqui eh possivel realizar esse calculo\n minutos_trabalhados = 60 - mins_entrada\n minutos_trabalhados += mins_saida\n # desconta porque a hora nao é completa\n hora_saida -= 1\n elif mins_entrada <= mins_saida:\n # Calcula normalmente os minutos trabalhados\n minutos_trabalhados = mins_saida - mins_entrada\n # Calcula o tempo ate a meia noite\n hora_entrada_mins = (24*60) - ((hora_entrada*60) + mins_entrada)\n if (hora_entrada_mins + minutos_trabalhados) >= 60:\n while((hora_entrada_mins + minutos_trabalhados) >= 60):\n # Faz a conversao das horas ate a meia noite e sobram os minutos trabalhados que serao calculados como fracao de hora\n minutos_trabalhados -= 60\n hora_saida += 1\n # Computa as horas trabalhadas + a fracao de hora\n horas_trabalhadas = hora_saida + (minutos_trabalhados/60)\n\n horas_total.append(horas_trabalhadas)\n else:\n # Entao foi no mesmo dia e o for vai tratar ainda\n continue\n if dia_saida == dia_entrada:\n # Caso de entrada e saida no mesmo dia\n if mins_entrada > mins_saida:\n # Caso a diferenca entre os minutos nao complete uma hora e reinicie a contagem por ter virado a hora\n # Por exemplo de 10:45 ate 11:10, temos 25 minutos e aqui eh possivel realizar esse calculo\n minutos_trabalhados = 60 - mins_entrada\n minutos_trabalhados += mins_saida\n # desconta porque a hora nao é completa\n hora_saida -= 1\n elif mins_entrada <= mins_saida:\n minutos_trabalhados = mins_saida - mins_entrada\n \n # Computa as horas trabalhadas subtraindo a hora de saida da hora de entrada + fracoes de minutos\n horas_trabalhadas = (hora_saida-hora_entrada) + (minutos_trabalhados/60)\n\n horas_total.append(horas_trabalhadas)\n\n soma_horas = 0.0\n for horas in horas_total:\n # Faz o somatorio das horas totais de todos os dias ou periodos\n soma_horas += horas\n\n return {\"horas trabalhadas\": (\"%.2f horas\" % soma_horas)}\n\n\n@ponto_blueprint.route('/limpar/', methods=['GET'])\ndef deletar():\n \"\"\"\n Limpa todos os pontos \n \"\"\"\n # Pega todas as batidas de ponto e deleta\n Ponto.query.filter().delete()\n # Salva as alteracoes no banco\n current_app.db.session.commit()\n return jsonify('Limpa a base')" }, { "alpha_fraction": 0.7205638289451599, "alphanum_fraction": 0.7520729899406433, "avg_line_length": 16.492753982543945, "blob_id": "4a8154f4836ecbc57511d9622ad2a08c827c1762", "content_id": "600c0ca9acd5484a45eb5d7a6d26b4b415dac4e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1211, "license_type": "no_license", "max_line_length": 79, "num_lines": 69, "path": "/README.md", "repo_name": "deharahawa/batida-ponto", "src_encoding": "UTF-8", "text": "# Registro de Ponto\n\nCriação de um sistema simples para controle de entrada e saída de uma empresa. \nO sistema deve permitir o cadastro de usuários e o registro de ponto dos\nmesmos.\n\n## Requisitos\n- flask-rest-jsonapi\n- flask-sqlalchemy\n- flask-migrate\n\nPara instalar basta rodar \n```\npip3 install -r requirements.txt\n```\n\n## Configurar ambiente virtual\n```sh\npython3 -m venv .venv\nsource .venv/bin/activate\n```\n\n## Como rodar o app\n```sh\nexport FLASK_APP=app\nexport FLASK_DEBUG=True\n\nflask run\n```\n## Como fazer as migrations\n```sh\nflask db init\nflask db migrate\nflask db upgrade\n```\n\n## Endpoints Usuario\n```sh\nCREATE\nGET http://localhost:5000/mostrar\n\nREAD\nPOST http://localhost:5000/cadastrar\n\nUPDATE\nPOST http://localhost:5000/modificar/<id do usuario>\n\nDELETE\nGET http://localhost:5000/deletar/<id do usuario>\n```\n\n## Endpoints Ponto\n```sh\nDELETE\nGET http://localhost:5000/limpar\n\nMostrar somente horas trabalhadas por usuário\nGET http://localhost:5000/pontos-user/<id do usuario>\n\nREAD todos os pontos\nGET http://localhost:5000/pontos\n\nMostrar todas as batidas de pontos do usuario \nmais horas trabalhadas\nGET http://localhost:5000/pontos/<id do usuario>\n\nCREATE\nPOST http://localhost:5000/ponto\n```" }, { "alpha_fraction": 0.7382645606994629, "alphanum_fraction": 0.7382645606994629, "avg_line_length": 23.275861740112305, "blob_id": "6efe4e35c3a94630eb85ab6855111f60e32f35b4", "content_id": "d7e35ef48ec8e2c279ec8d701d74d139f26a5cfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "no_license", "max_line_length": 67, "num_lines": 29, "path": "/app/__init__.py", "repo_name": "deharahawa/batida-ponto", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_migrate import Migrate\nfrom .models import configure as config_db\nfrom .serializer import configure as config_ma\n\ndef create_app():\n app = Flask(__name__)\n\n # sqlite db uri configuration\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/users.db'\n\n # remove error from track mod\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n # Configura DB\n config_db(app)\n # Configura Marshmallow\n config_ma(app)\n # Realiza migration\n Migrate(app, app.db)\n \n # Import dos blueprints\n from .users import user_blueprint\n app.register_blueprint(user_blueprint)\n\n from .checks import ponto_blueprint\n app.register_blueprint(ponto_blueprint)\n\n return app" }, { "alpha_fraction": 0.7004830837249756, "alphanum_fraction": 0.7053139805793762, "avg_line_length": 28.125, "blob_id": "9b9e384e087cfb077c9ca74d653d961f3244cae6", "content_id": "768c5f3b9b302b63180e0568950ae494a3193af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1863, "license_type": "no_license", "max_line_length": 107, "num_lines": 64, "path": "/app/users.py", "repo_name": "deharahawa/batida-ponto", "src_encoding": "UTF-8", "text": "from flask import Blueprint, request, jsonify, current_app\nfrom .serializer import UserSchema\nfrom .models import User\nfrom datetime import datetime\n\nuser_blueprint = Blueprint('usuarios', __name__)\n\n@user_blueprint.route('/cadastrar', methods=['POST'])\ndef cadastrar():\n \"\"\"\n Cadastra um user na base \n \"\"\"\n # Instancia o Schema\n user_schema = UserSchema()\n # Faz o load dos dados da request\n user, error = user_schema.load(request.json)\n # Verifica se houve erro no load\n if error:\n return jsonify(error), 401\n # Pega a data atual no formato UTC\n now = datetime.now()\n # Cria o user\n user = User(nome_completo=user['nome_completo'], cpf=user['cpf'], email=user['email'], data_cadastro=now)\n # Salva as alteracoes no banco\n current_app.db.session.add(user)\n current_app.db.session.commit()\n\n return user_schema.jsonify(user), 201\n\n@user_blueprint.route('/mostrar', methods=['GET'])\ndef mostrar():\n \"\"\"\n Mostra todos os usuarios \n \"\"\"\n # Realiza a query de todos os usuarios\n result = User.query.all()\n return UserSchema(many=True).jsonify(result), 200\n\n\n@user_blueprint.route('/modificar/<identificador>', methods=['POST'])\ndef modificar(identificador):\n \"\"\"\n Possibilita modificar um usuario sem mexer no id e na data \n \"\"\"\n # Instancia o Schema\n user_schema = UserSchema()\n # Faz a query para o user especifico\n query = User.query.filter(User.id == identificador)\n # Faz o update\n query.update(request.json)\n # Salva a alteracao\n current_app.db.session.commit()\n return user_schema.jsonify(query.first())\n\n@user_blueprint.route('/deletar/<identificador>', methods=['GET'])\ndef deletar(identificador):\n \"\"\"\n Deleta um usuario \n \"\"\"\n # Faz a query em busca de um user especifico\n User.query.filter(User.id == identificador).delete()\n # Salva as alteracoes\n current_app.db.session.commit()\n return jsonify('Deletado')" }, { "alpha_fraction": 0.6961789131164551, "alphanum_fraction": 0.7045666575431824, "avg_line_length": 26.538461685180664, "blob_id": "477a4757734841640dd61334b25c9eddd3c2c599", "content_id": "979c7f18da048332c62d7842a735e3cad6a0520c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1073, "license_type": "no_license", "max_line_length": 62, "num_lines": 39, "path": "/app/models.py", "repo_name": "deharahawa/batida-ponto", "src_encoding": "UTF-8", "text": "from flask_sqlalchemy import SQLAlchemy\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList\nfrom .serializer import UserSchema\n\ndb = SQLAlchemy()\n\ndef configure(app):\n \"\"\"\n Factory para poder configurar\n \"\"\"\n # Inicializa o app\n db.init_app(app)\n with app.app_context():\n # Cria as alteracoes usando o contexto\n db.create_all()\n app.db = db\n\nclass User(db.Model):\n \"\"\"\n Define a class que reprenta o model do User\n \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n nome_completo = db.Column(db.String(255))\n cpf = db.Column(db.String(11))\n email = db.Column(db.String(255))\n data_cadastro = db.Column(db.DateTime)\n\n\nclass Ponto(db.Model):\n \"\"\"\n Define a class que reprenta o model do Ponto\n \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n # Define a chave estrangeira do relacionamento 1 para muitos\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n # Define o relacionamento entre user e pontos\n user = db.relationship('User', backref='checks')\n data_batida = db.Column(db.DateTime)\n tipo_batida = db.Column(db.Integer)" } ]
6
Fujiki-Nakamura/ConvLSTM
https://github.com/Fujiki-Nakamura/ConvLSTM
eb8f29a989234c467add737c9c4a7556008f2cf1
541612b2cc17bd91179281878bf0caea3716f0fe
eba2ab2ef09b90234cab1d5211d464d0426af994
refs/heads/master
2022-04-15T06:10:39.800766
2020-04-07T00:56:13
2020-04-07T00:56:13
206,240,604
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6501457691192627, "alphanum_fraction": 0.6518950462341309, "avg_line_length": 28.568965911865234, "blob_id": "32516037709083f59fbb50f86cd7af5a8c5d1626", "content_id": "070c27382acdf3e9f96bff6c676a33f280cabac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1715, "license_type": "no_license", "max_line_length": 102, "num_lines": 58, "path": "/utils.py", "repo_name": "Fujiki-Nakamura/ConvLSTM", "src_encoding": "UTF-8", "text": "import os\nimport shutil\nimport torch\nfrom torch import nn, optim\n\n\ndef get_loss_fn(args):\n if args.loss.lower().startswith('bce'):\n loss_fn = nn.BCEWithLogitsLoss(reduction=args.reduction)\n elif args.loss.lower().startswith('mse'):\n loss_fn = nn.MSELoss(reduction=args.reduction)\n else:\n raise NotImplementedError\n return loss_fn\n\n\ndef get_optimizer(model, args):\n if args.optim.lower() == 'adam':\n optimizer = optim.Adam(\n model.parameters(),\n lr=args.lr, betas=args.betas, weight_decay=args.weight_decay)\n elif args.optim.lower() == 'rmsprop':\n optimizer = optim.RMSprop(\n model.parameters(),\n lr=args.lr, alpha=0.99, weight_decay=args.weight_decay)\n return optimizer\n\n\ndef get_scheduler(optimizer, args):\n if args.scheduler.lower() == 'multisteplr':\n scheduler = optim.lr_scheduler.MultiStepLR(\n optimizer, args.milestones, args.gamma)\n else:\n return None\n return scheduler\n\n\ndef get_logger(log_file):\n from logging import getLogger, FileHandler, StreamHandler, Formatter, DEBUG, INFO, WARNING # noqa\n fh = FileHandler(log_file)\n fh.setLevel(DEBUG)\n sh = StreamHandler()\n sh.setLevel(INFO)\n for handler in [fh, sh]:\n formatter = Formatter('%(asctime)s - %(message)s')\n handler.setFormatter(formatter)\n logger = getLogger('log')\n logger.setLevel(INFO)\n logger.addHandler(fh)\n logger.addHandler(sh)\n return logger\n\n\ndef save_checkpoint(state, is_best, log_dir):\n filename = os.path.join(log_dir, 'checkpoint.pt')\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, os.path.join(log_dir, 'best.pt'))\n" }, { "alpha_fraction": 0.6250814199447632, "alphanum_fraction": 0.6423452496528625, "avg_line_length": 39.394737243652344, "blob_id": "564d78546e64543db293c193094392e612aec41d", "content_id": "d9db95318deabfc7e04611080432d648987408b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3070, "license_type": "no_license", "max_line_length": 86, "num_lines": 76, "path": "/visualize.py", "repo_name": "Fujiki-Nakamura/ConvLSTM", "src_encoding": "UTF-8", "text": "import argparse\nfrom collections import OrderedDict\nimport os\n\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom MovingMNIST import MovingMNIST\nimport models\n\n\ndef main(args):\n os.chmod(args.log_dir, 0o0777)\n\n test_set = MovingMNIST(root='./data/test', train=False, download=True)\n test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False)\n\n checkpoint = torch.load(args.checkpoint)\n new_state_dict = OrderedDict()\n for k, v in iter(checkpoint['state_dict'].items()):\n new_k = k.replace('module.', '')\n new_state_dict[new_k] = v\n model = models.__dict__[args.model](args)\n model.load_state_dict(new_state_dict)\n model.to(args.device)\n model.eval()\n\n inpts = np.zeros((len(test_set), 10, args.height, args.width))\n preds = np.zeros((len(test_set), 10, args.height, args.width))\n trues = np.zeros((len(test_set), 10, args.height, args.width))\n for batch_i, (inputs, targets) in enumerate(test_loader):\n inputs, targets = inputs.unsqueeze(2), targets.unsqueeze(2)\n inputs, targets = inputs.float() / 255., targets.float() / 255.\n inputs, targets = inputs.to(args.device), targets.to(args.device)\n with torch.no_grad():\n outputs = model(inputs)\n outputs = outputs.squeeze(2).cpu().numpy()\n targets = targets.squeeze(2).cpu().numpy()\n inputs = inputs.squeeze(2).cpu().numpy()\n inpts[batch_i * args.batch_size:(batch_i + 1) * args.batch_size] = inputs\n preds[batch_i * args.batch_size:(batch_i + 1) * args.batch_size] = outputs\n trues[batch_i * args.batch_size:(batch_i + 1) * args.batch_size] = targets\n\n inpts.dump(os.path.join(args.log_dir, 'inpts.npy'))\n preds.dump(os.path.join(args.log_dir, 'preds.npy'))\n trues.dump(os.path.join(args.log_dir, 'trues.npy'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # network\n parser.add_argument('--model', type=str, default='convlstm_3_layers')\n parser.add_argument('--height', type=int, default=64)\n parser.add_argument('--width', type=int, default=64)\n parser.add_argument('--channels', type=int, default=1)\n # training\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--device', type=int, default=0)\n # optim\n parser.add_argument('--optim', type=str, default='adam')\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--betas', nargs='+', type=float, default=(0.9, 0.999))\n parser.add_argument('--weight_decay', type=float, default=0.)\n parser.add_argument('--scheduler', type=str, default='')\n parser.add_argument('--milestones', nargs='+', type=int, default=[30, ])\n parser.add_argument('--gamma', nargs='+', type=float, default=0.9)\n # misc\n parser.add_argument('--log_dir', type=str, default='./log')\n\n parser.add_argument('--checkpoint', type=str)\n\n args, _ = parser.parse_known_args()\n main(args)\n" }, { "alpha_fraction": 0.5675768256187439, "alphanum_fraction": 0.580628514289856, "avg_line_length": 38.3445930480957, "blob_id": "1b821ad391e6c67c30e0611b2c0f706050326173", "content_id": "6856e5dd9cf5f63eafc1d52799e0996f84707f41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5823, "license_type": "no_license", "max_line_length": 82, "num_lines": 148, "path": "/main.py", "repo_name": "Fujiki-Nakamura/ConvLSTM", "src_encoding": "UTF-8", "text": "import argparse\nimport os\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm # noqa\n\nfrom MovingMNIST import MovingMNIST\nimport models\nfrom utils import get_logger, get_optimizer, save_checkpoint\nfrom utils import get_scheduler, get_loss_fn\n\n\ndef main(args):\n start_epoch = 1\n best_loss = 1e+6\n\n if not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir)\n os.chmod(args.log_dir, 0o0777)\n logger = get_logger(os.path.join(args.log_dir, 'main.log'))\n logger.info(args)\n\n writer = SummaryWriter(args.log_dir)\n\n # data\n train_set = MovingMNIST(root='./data/train', train=True, download=True)\n test_set = MovingMNIST(root='./data/test', train=False, download=True)\n train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)\n test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False)\n\n # network\n model = models.__dict__[args.model](args=args)\n model = nn.DataParallel(model)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n # training\n criterion = get_loss_fn(args)\n optimizer = get_optimizer(model, args)\n scheduler = get_scheduler(optimizer, args)\n\n if args.resume:\n if os.path.isfile(args.resume):\n checkpoint = torch.load(args.resume)\n start_epoch = checkpoint['epoch'] + 1\n best_loss = checkpoint['best_loss']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info('Loaded checkpoint {} (epoch {})'.format(\n args.resume, start_epoch - 1))\n else:\n raise IOError('No such file {}'.format(args.resume))\n\n for epoch_i in range(start_epoch, args.epochs + 1):\n model.train()\n losses = 0.\n for i, (inputs, targets) in enumerate(train_loader):\n bs, ts, h, w = targets.size()\n inputs = inputs.unsqueeze(2)\n inputs, targets = inputs.float() / 255., targets.float() / 255.\n inputs, targets = inputs.to(args.device), targets.to(args.device)\n outputs = model(inputs)\n\n # (bs ,ts, c, h, w) -> (bs, ts, h, w) -> (ts, bs, h, w)\n outputs = outputs.squeeze(2).permute(1, 0, 2, 3)\n # (bs, ts, h, w) -> (ts, bs, h, w)\n targets = targets.permute(1, 0, 2, 3)\n loss = 0.\n for t_i in range(ts):\n loss += criterion(outputs[t_i], targets[t_i]) / bs\n\n losses += loss.item() * bs\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n logger.debug('Train/Batch {}/{}'.format(i + 1, len(train_loader)))\n\n model.eval()\n test_losses = 0.\n for i, (inputs, targets) in enumerate(test_loader):\n bs, ts, h, w = targets.size()\n inputs = inputs.unsqueeze(2)\n inputs, targets = inputs.float() / 255., targets.float() / 255.\n inputs, targets = inputs.to(args.device), targets.to(args.device)\n with torch.no_grad():\n outputs = model(inputs)\n # (bs ,ts, c, h, w) -> (bs, ts, h, w) -> (ts, bs, h, w)\n outputs = outputs.squeeze(2).permute(1, 0, 2, 3)\n # (bs, ts, h, w) -> (ts, bs, h, w)\n targets = targets.permute(1, 0, 2, 3)\n loss = 0.\n for t_i in range(ts):\n loss += criterion(outputs[t_i], targets[t_i]) / bs\n test_losses += loss.item() * bs\n logger.debug('Test/Batch {}/{}'.format(i + 1, len(test_loader)))\n\n train_loss = losses / len(train_set)\n test_loss = test_losses / len(test_set)\n writer.add_scalar('Train/{}'.format(args.loss), train_loss, epoch_i)\n writer.add_scalar('Test/{}'.format(args.loss), test_loss, epoch_i)\n logger.info('Epoch {} Train/Loss {:.4f} Test/Loss {:.4f}'.format(\n epoch_i, train_loss, test_loss))\n\n is_best = test_loss < best_loss\n if test_loss < best_loss:\n best_loss = test_loss\n save_checkpoint({\n 'epoch': epoch_i,\n 'state_dict': model.state_dict(),\n 'test_loss': test_loss,\n 'best_loss': best_loss,\n 'optimizer': optimizer.state_dict(),\n }, is_best, args.log_dir)\n\n if scheduler is not None:\n scheduler.step()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # network\n parser.add_argument('--model', type=str, default='convlstm_3_layers')\n parser.add_argument('--height', type=int, default=64)\n parser.add_argument('--width', type=int, default=64)\n parser.add_argument('--channels', type=int, default=1)\n # training\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--loss', type=str, default='mse')\n parser.add_argument('--reduction', type=str, default='mean')\n # optim\n parser.add_argument('--optim', type=str, default='adam')\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--betas', nargs='+', type=float, default=(0.9, 0.999))\n parser.add_argument('--weight_decay', type=float, default=0.)\n parser.add_argument('--scheduler', type=str, default='')\n parser.add_argument('--milestones', nargs='+', type=int)\n parser.add_argument('--gamma', nargs='+', type=float)\n # misc\n parser.add_argument('--log_dir', type=str, default='./log')\n parser.add_argument('--resume', type=str, default=None)\n\n args, _ = parser.parse_known_args()\n main(args)\n" }, { "alpha_fraction": 0.7642276287078857, "alphanum_fraction": 0.8130081295967102, "avg_line_length": 40, "blob_id": "6596e5dcd8dd4b193dab71b7edcc2f1eae049c6a", "content_id": "cbd30d1f7f6e727ad127a154116bff60410b5995", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 48, "num_lines": 3, "path": "/models/__init__.py", "repo_name": "Fujiki-Nakamura/ConvLSTM", "src_encoding": "UTF-8", "text": "from .convlstm_1_layer import convlstm_1_layer\nfrom .model3 import model3\nfrom .convlstm_3_layers import convlstm_3_layers\n" }, { "alpha_fraction": 0.5371198654174805, "alphanum_fraction": 0.5818426012992859, "avg_line_length": 33.9375, "blob_id": "011107f04728aa119c5f2023eb690cfcdc13f02d", "content_id": "3fb1116f31b4033e7ee2b69343962f60bde1cbc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2236, "license_type": "no_license", "max_line_length": 75, "num_lines": 64, "path": "/models/model3.py", "repo_name": "Fujiki-Nakamura/ConvLSTM", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom .convlstm import ConvLSTM\n\n\nclass Encoder(nn.Module):\n def __init__(self, args):\n super(Encoder, self).__init__()\n\n self.convlstm1 = ConvLSTM(\n input_size=(args.height, args.width), input_dim=args.channels,\n hidden_dim=[128, 64, 64], kernel_size=(5, 5), num_layers=3,\n batch_first=True, bias=True, return_all_layers=True)\n self.convlstm2 = ConvLSTM(\n input_size=(args.height, args.width), input_dim=64,\n hidden_dim=[128, 64, 64], kernel_size=(5, 5), num_layers=3,\n batch_first=True, bias=True, return_all_layers=True)\n\n def forward(self, x):\n out1, hidden1 = self.convlstm1(x)\n out2, hidden2 = self.convlstm2(out1[-1])\n return out2[-1], [hidden1, hidden2]\n\n\nclass Decoder(nn.Module):\n def __init__(self, args):\n super(Decoder, self).__init__()\n\n self.convlstm1 = ConvLSTM(\n input_size=(args.height, args.width), input_dim=64,\n hidden_dim=[128, 64, 64], kernel_size=(5, 5), num_layers=3,\n batch_first=True, bias=True, return_all_layers=False)\n self.convlstm2 = ConvLSTM(\n input_size=(args.height, args.width), input_dim=64,\n hidden_dim=[128, 64, 64], kernel_size=(5, 5), num_layers=3,\n batch_first=True, bias=True, return_all_layers=False)\n\n def forward(self, x, hidden_list=None):\n out1, hidden1 = self.convlstm1(x, hidden_list[0])\n out2, hidden2 = self.convlstm2(out1[0], hidden_list[1])\n return out1[-1], out2[-1]\n\n\nclass Model3(nn.Module):\n def __init__(self, args):\n super(Model3, self).__init__()\n\n self.encoder = Encoder(args)\n self.decoder = Decoder(args)\n self.conv1x1 = nn.Conv2d(\n 2 * 64, args.channels, kernel_size=(1, 1), stride=1, padding=0)\n\n def forward(self, x):\n out_e, hidden_list = self.encoder(x)\n out1, out2 = self.decoder(out_e, hidden_list)\n out_d = torch.cat([out1, out2], dim=2)\n bs, t, c, h, w = out_d.size()\n out = self.conv1x1(out_d.view(bs * t, c, h, w))\n return out.view(bs, t, -1, h, w)\n\n\ndef model3(args):\n model3 = Model3(args)\n return model3\n" }, { "alpha_fraction": 0.553164541721344, "alphanum_fraction": 0.5791139006614685, "avg_line_length": 28.79245376586914, "blob_id": "e56fc00919ada4fd009ff2f3d525f71c0618ef61", "content_id": "3ef2159b1d6455c5168a31e7818d1d5f03d4af01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1580, "license_type": "no_license", "max_line_length": 74, "num_lines": 53, "path": "/models/convlstm_1_layer.py", "repo_name": "Fujiki-Nakamura/ConvLSTM", "src_encoding": "UTF-8", "text": "import torch\nfrom torch import nn\nfrom .convlstm import ConvLSTM\n\n\nclass Encoder(nn.Module):\n def __init__(self, args):\n super(Encoder, self).__init__()\n\n self.convlstm1 = ConvLSTM(\n input_size=(args.height, args.width), input_dim=args.channels,\n hidden_dim=[64,], kernel_size=(5, 5), num_layers=1,\n batch_first=True, bias=True, return_all_layers=True)\n\n def forward(self, x):\n out1, hidden1 = self.convlstm1(x)\n return out1[-1], [hidden1]\n\n\nclass Decoder(nn.Module):\n def __init__(self, args):\n super(Decoder, self).__init__()\n\n self.convlstm1 = ConvLSTM(\n input_size=(args.height, args.width), input_dim=64,\n hidden_dim=[64,], kernel_size=(5, 5), num_layers=1,\n batch_first=True, bias=True, return_all_layers=True)\n\n def forward(self, x, hidden_list=None):\n out1, hidden1 = self.convlstm1(x, hidden_list[0])\n return out1\n\n\nclass Model(nn.Module):\n def __init__(self, args):\n super(Model, self).__init__()\n\n self.encoder = Encoder(args)\n self.decoder = Decoder(args)\n self.conv1x1 = nn.Conv2d(\n 64, args.channels, kernel_size=(1, 1), stride=1, padding=0)\n\n def forward(self, x):\n out_e, hidden_list = self.encoder(x)\n out1 = self.decoder(out_e, hidden_list)\n out_d = torch.cat(out1, dim=2)\n bs, t, c, h, w = out_d.size()\n out = self.conv1x1(out_d.view(bs * t, c, h, w))\n return out.view(bs, t, -1, h, w)\n\n\ndef convlstm_1_layer(args):\n return Model(args) \n" } ]
6
Kelechukwu/kelechukwu_nwosu_test
https://github.com/Kelechukwu/kelechukwu_nwosu_test
dc4f7df6670534b3f4ac769c534086b0ecfab162
8c850129bc03c2bc28af150b46da74b05a592061
121149514c3abdf2777b9f4d974d73ae485bb482
refs/heads/master
2020-05-21T13:15:28.173817
2019-05-13T11:42:46
2019-05-13T11:42:46
186,066,792
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6576576828956604, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 21.399999618530273, "blob_id": "fb1b561fa9aeb7fc4d8335cdb50d4a0aed22ea6d", "content_id": "aca1d0a9098225426f2efbc6fc25f28cce79c3b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "permissive", "max_line_length": 32, "num_lines": 5, "path": "/2.versioning/setup.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(name=\"Versioning library\",\n version=\"0.0\",\n packages=[\"version\"])" }, { "alpha_fraction": 0.8787878751754761, "alphanum_fraction": 0.8787878751754761, "avg_line_length": 33, "blob_id": "38c3386190b7e3b9488596c1a6ce87d891d042c6", "content_id": "bf28d7b3502c03ba7600da1f31d704e77845a46d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "permissive", "max_line_length": 33, "num_lines": 1, "path": "/3.GeoLRU/lru/__init__.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "from lru.LRUCache import LRUCache" }, { "alpha_fraction": 0.7223950028419495, "alphanum_fraction": 0.7317262887954712, "avg_line_length": 25.244897842407227, "blob_id": "4c0f07d8ec7c2db6bea1dae4b8826ec69a2fea44", "content_id": "12b1ba7555b0e5cfcc0f1dc8962326cd156d7939", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1286, "license_type": "permissive", "max_line_length": 204, "num_lines": 49, "path": "/3.GeoLRU/README.md", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "# How to install Geo LRU library\n**Version**: Python3.X \n```bash\n$ cd 3.GeoLRU\n$ sudo python setup.py install\n```\n\n## How to Use\nAfter the library has been installed. You can import it and start using in your projects as a decorator. First you have to start the websocket server to allow for communication between distributed caches.\nExample:\n1. start websocket server\n```bash\n$ python lru/lru_socket.py\n```\n**NOTE** : There should be only one websocket server for a single cache.\n\n2. Import LRUCache and use as decorator in your code\nExample:\n```python \nimport time\nfrom lru.LRUCache import LRUCache\n\n@LRUCache(cache_size=9, validity_in_minutes=0.5,is_master_node=True)\ndef expensive_function(num):\n\n print(\"computing...\")\n time.sleep(2)\n result = num * num\n return result\n\n\n@LRUCache(master_node_hostname=\"localhost\")\ndef not_so_expensive_function(num):\n\n print(\"computing...\")\n time.sleep(2)\n result = num * num\n return result\n\nwhile True:\n print(expensive_function(2))\n print(not_so_expensive_function(4))\n print(expensive_function(5))\n```\n\n## TODO\n- Write exhaustive test cases for the LRUCache library\n- Make listener re-establish connection to network if connection breaks\n- Make publisher re-establish connection to network if connection breaks\n" }, { "alpha_fraction": 0.6329987645149231, "alphanum_fraction": 0.6355081796646118, "avg_line_length": 29.673076629638672, "blob_id": "95e4ae106ba4c3a57fe5cae102b5e77918cc062d", "content_id": "7ffde5368196180c8961742f0db7b8d36b183e68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1594, "license_type": "permissive", "max_line_length": 85, "num_lines": 52, "path": "/3.GeoLRU/lru/lru_socket.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "import asyncio\nimport websockets\nfrom threading import Thread\n\nclass SocketServer:\n # clients/minions list \n CLIENTS = set()\n\n @staticmethod\n def serve(port=6789, loop=None):\n if loop is None:\n asyncio.get_event_loop().run_until_complete(\n websockets.serve(SocketServer.server_function, 'localhost', port))\n asyncio.get_event_loop().run_forever()\n else:\n asyncio.set_event_loop(loop)\n loop.run_until_complete(\n websockets.serve(SocketServer.server_function, 'localhost', port))\n print(\"Cache Server started\")\n loop.run_forever()\n\n\n @staticmethod\n async def server_function(websocket, path):\n # register(websocket) sends user_event() to websocket\n await SocketServer.register(websocket)\n try:\n async for message in websocket:\n\n await SocketServer.broadcast_update(message)\n\n finally:\n await SocketServer.unregister(websocket)\n \n @staticmethod\n async def register(websocket):\n SocketServer.CLIENTS.add(websocket)\n \n @staticmethod\n async def unregister(websocket):\n SocketServer.CLIENTS.remove(websocket)\n \n @staticmethod\n async def broadcast_update(message):\n # asyncio.wait doesn't accept an empty list\n if SocketServer.CLIENTS:\n await asyncio.wait([user.send(message) for user in SocketServer.CLIENTS])\n\nif __name__ == \"__main__\":\n event_loop = asyncio.new_event_loop()\n print(\"Starting Cache server...\")\n SocketServer.serve()" }, { "alpha_fraction": 0.6210405826568604, "alphanum_fraction": 0.6264870166778564, "avg_line_length": 37.12022018432617, "blob_id": "fe9703d0cde287bc054794a8006ab8471f5586b7", "content_id": "437080d91294d5cd5efb4083dc82dff0f60a2ce7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6977, "license_type": "permissive", "max_line_length": 170, "num_lines": 183, "path": "/3.GeoLRU/lru/LRUCache.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "import asyncio\nimport time\nfrom datetime import datetime, timedelta\nimport json\nimport pickle\nfrom threading import Thread, Lock, Event\nimport websockets\n\nfrom lru.lru_socket import SocketServer\n\n\ndef LRUCache(function=None, cache_size=100, validity_in_minutes=60, is_master_node=False, master_node_hostname=None):\n \"\"\"Wrapper function to the main decorator class _LRUCache. _LRUCache stores records in\n key,value pairs. Records expire after validity_in_minutes(default=60 minutes) is reached.\n The cache is Geo distributed and broadcasts changes and updates to replica nodes via\n websocket on port 6789.\n\n Keyword arguments:\n cache_size : the maximum size of the cache. (default = 100) \n validity_in_minutes : cache validity in minutes. This can be a decimal value. (default = 1year)\n is_master_node : specifies if the current instance is the master node in its cluster. (default = False)\n master_node_hostname: specify the hostname of the master node if this cache is not the master ( default = None)\n \"\"\"\n\n # Start a websocket server if this cache is the master\n # TODO: verify that there isn't an already existing master node before starting the webserver\n # if is_master_node:\n # event_loop = asyncio.new_event_loop()\n # t = Thread(target=SocketServer.serve, kwargs={'loop': event_loop},daemon=True)\n # t.start()\n \n\n if function:\n return _LRUCache(function)\n else:\n def wrapper(function):\n return _LRUCache(function, cache_size, validity_in_minutes, master_node_hostname, is_master_node)\n\n return wrapper\n\n\nclass _LRUCache:\n \"\"\"Main decorator for the LRU(Least Recently Used) Cache. This stores records in\n key,value pairs. Records expire after validity_in_minutes(default=60 minutes) is reached.\n The cache is Geo distributed and broadcasts changes and updates to replica nodes via\n websocket on port 6789.\n\n Keyword arguments:\n cache_size : the maximum size of the cache. If this limit is reached the least recently accessed record is deleted to make roam for new records. (default = 100) \n validity_in_minutes : cache validity in minutes. This can be a decimal value. (default = 60)\n is_master_node : specifies if the current instance is the master node in its cluster. (default = False)\n \"\"\"\n\n MASTER_HOST_NAME = None\n VALIDITY = None\n WEB_SOCKET_PORT = 6789\n\n\n def __init__(self, func, cache_size=100, validity_in_minutes=60, master_node_hostname=None, is_master_node=False):\n self.func = func\n self.cache = {\n \"creation_time\":datetime.now().timestamp(),\n \"data\":{},\n }\n # access list of the keys- most recently accessed keys are infront\n self.access_list = []\n self.limit = cache_size\n\n \n\n # set validity_in_minutes \n _LRUCache.VALIDITY = validity_in_minutes * 60\n\n # set SocketServer hostname \n _LRUCache.MASTER_HOST_NAME = master_node_hostname\n\n # if it is not the master then start a listener to \n # listen for updates\n if not is_master_node:\n event_loop = asyncio.new_event_loop()\n t = Thread(target=self._listen_for_updates, kwargs={'loop': event_loop})\n t.start()\n else:\n _LRUCache.MASTER_HOST_NAME = \"localhost\"\n\n\n def __call__(self, *args, **kwargs):\n\n # First try to expire the cache\n self._attempt_to_expire(self.cache)\n\n\n # if the args already exist in the cache\n if args in self.cache.get(\"data\"):\n self._move_to_front(args)\n \n self.publish_update()\n return self.cache[\"data\"][args]\n \n # if the cache limit is reached - remove the oldest record\n if len(self.cache[\"data\"]) == self.limit:\n oldest_key = self.access_list.pop(0)\n\n # delete record from self.cache\n del self.cache[\"data\"][oldest_key]\n\n \n # compute the cache and node - if not in cache\n result = self.func(*args, **kwargs)\n self.cache[\"data\"][args] = result\n\n # add new keys to the top of the access_list register\n self.access_list.append(args)\n\n self.publish_update()\n\n return result\n\n\n def _move_to_front(self, args):\n \"\"\"This function moves the most recently accessed key to the front\n of the access_list\n\n Params:\n args: the key of the Key-Value pair to be read.\n \"\"\"\n\n for index in range(len(self.access_list)):\n if self.access_list[index] == args:\n self.access_list += [self.access_list.pop(index)]\n break\n\n def _attempt_to_expire(self, cache):\n \"\"\"This method expires _LRUCache if the validity_in_minutes time as elapsed.\n It will return True if it expired the Cache and False if it did not\n because the time has not elapsed.\n \"\"\"\n diff = datetime.now().timestamp() - self.cache[\"creation_time\"]\n if diff >= _LRUCache.VALIDITY:\n self.cache[\"creation_time\"] = datetime.now().timestamp()\n self.cache[\"data\"] = dict()\n\n self.access_list = []\n\n return True\n else:\n return False\n\n def publish_update(self):\n \n asyncio.get_event_loop().run_until_complete(self._publisher())\n\n\n # TODO: Make publisher re-establish connection to network if connection breaks\n async def _publisher(self):\n \"\"\"THis method publishes updates to LRUCache so that other distributed Caches can receive it \"\"\"\n async with websockets.connect(f\"ws://{_LRUCache.MASTER_HOST_NAME}:{_LRUCache.WEB_SOCKET_PORT}\") as socket:\n message = {\n \"cache\": self.cache,\n \"access_list\": self.access_list\n }\n message_bytes = pickle.dumps(message)\n await socket.send(message_bytes)\n\n def _listen_for_updates(self, loop):\n \"\"\"this methods makes _listener run perpetually in the background \"\"\"\n asyncio.set_event_loop(loop)\n loop.run_until_complete(self._listener())\n loop.run_forever()\n\n # TODO: Make listener re-establish connection to network if connection breaks\n async def _listener(self):\n \"\"\"This method listens for updates to the cache state and replicates updates locally\"\"\"\n if _LRUCache.MASTER_HOST_NAME:\n async with websockets.connect(f\"ws://{_LRUCache.MASTER_HOST_NAME}:{_LRUCache.WEB_SOCKET_PORT}\") as socket:\n while True:\n raw_response = await socket.recv()\n response = pickle.loads(raw_response)\n with Lock():\n self.cache = response.get(\"cache\")\n self.access_list = response.get(\"access_list\")\n else:\n raise Exception(\"[FATAL] master_node_hostname value is required for non-master LRUCache\")\n\n" }, { "alpha_fraction": 0.636215329170227, "alphanum_fraction": 0.6370310187339783, "avg_line_length": 32.16216278076172, "blob_id": "4fe8f0e9867ea34136b84f932469f5cedd8ace62", "content_id": "c04d0088289003f08fbac62467298fe9217a98e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1226, "license_type": "permissive", "max_line_length": 83, "num_lines": 37, "path": "/2.versioning/version/utils.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "# Question 2: Version Comparison\nfrom version.helpers import tokenize_version_string, add_filler\n\ndef compare(version_a, version_b):\n \"\"\"This function receives two version strings and returns a string\n indicating the inequality/equality between version_a and version_b\n Args:\n version_a (string): the first version string \n version_b (string): the second version string\n\n Returns:\n string: this is a string indicating the relationship between\n version_a and version_b\n Example:\n version_a is greater than version_b\n \"\"\"\n\n sign = \"==\"\n\n # first case if both strings are the same then they are equal\n if version_a == version_b:\n return f\"{version_a} {sign} {version_b}\"\n \n version_a_chars = tokenize_version_string(version_a)\n version_b_chars = tokenize_version_string(version_b)\n\n version_a_chars, version_b_chars = add_filler(version_a_chars, version_b_chars)\n\n for x in range(len(version_a_chars)):\n if version_a_chars[x] > version_b_chars[x]:\n sign = \">\"\n break\n elif version_a_chars[x] < version_b_chars[x]:\n sign = \"<\"\n break\n\n return f\"{version_a} {sign} {version_b}\"" }, { "alpha_fraction": 0.613043487071991, "alphanum_fraction": 0.6315217614173889, "avg_line_length": 30.758621215820312, "blob_id": "92b567e3a1b858cf4771447acac9c5e1e3d3a6c6", "content_id": "72311b2b49dd8836344e675d37291ad50f179505", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 920, "license_type": "permissive", "max_line_length": 76, "num_lines": 29, "path": "/1.overlap/overlap.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "# Question 1: Overlapping Lines\n\ndef overlap(line_one, line_two):\n \"\"\"This function receives two lines and determines if they overlap\n or not.\n Args:\n line_one (tuple): The (x1,x2) co-ordinates of the first line.\n line_two (tuple): The (x1,x2) co-ordinates of the second line.\n\n Returns:\n bool: The return value. True for when line_one and line_two overlap,\n False otherwise.\n \"\"\"\n\n # Ensure that line_one starts before line_two\n if line_one[0] > line_two[0]:\n line_one, line_two = line_two, line_one\n \n # if line_two x1 is not between line_one x1 and x2\n # then there isn't an overlap\n if line_two[0] not in range(line_one[0], line_one[1]):\n return False\n \n # Following from the if statement above, if Line_two x2 is\n # after line_one x2 then there is an overlap\n if line_two[1] > line_one[1]:\n return True\n\n return False" }, { "alpha_fraction": 0.5982081294059753, "alphanum_fraction": 0.601654052734375, "avg_line_length": 27.47058868408203, "blob_id": "e443730133afb588537e7afe65cf6b4096f70b41", "content_id": "f48eae56bf4702f9f1aa3e8674b779f1b221078b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "permissive", "max_line_length": 75, "num_lines": 51, "path": "/2.versioning/version/helpers.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "def tokenize_version_string(version_string, delimiter=\".\"):\n \"\"\"The function breaks down version strings in singular characters\n Returns:\n List(string)\n \"\"\"\n \n # remove prefixes if any\n version_string = version_string.split(\" \")[-1]\n\n tokens = []\n\n for value in version_string.split(delimiter):\n \n if len(value) > 1:\n # check if is a digit or a combination of \n # digits and numbers like 2b\n if not value.isdigit():\n for char in value:\n tokens.append(char)\n else:\n tokens.append(value)\n else:\n tokens.append(value)\n\n return tokens\n\ndef add_filler(version_string_token_a, version_string_token_b, filler=\"0\"):\n \"\"\"Say you want to compare two version strings of different lengths\n this function will make both have equal length by adding fillers to the\n shorter one. Default filler is 0\n\n Returns:\n Tuple(string, string)\n \"\"\"\n\n\n a_length = len(version_string_token_a)\n b_length = len(version_string_token_b)\n\n if a_length == b_length:\n return version_string_token_a, version_string_token_b\n\n if a_length < b_length:\n diff = b_length - a_length\n version_string_token_a += [filler] * diff\n\n else:\n diff = a_length - b_length\n version_string_token_b += [filler] * diff\n\n return version_string_token_a, version_string_token_b" }, { "alpha_fraction": 0.5204248428344727, "alphanum_fraction": 0.5596405267715454, "avg_line_length": 36.67692184448242, "blob_id": "0f1a5eb2aa16741e8e01aa1dc00c6275e2b5b07c", "content_id": "6c84542aa8049595d30517207ef1d311c8366fc5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2448, "license_type": "permissive", "max_line_length": 95, "num_lines": 65, "path": "/2.versioning/tests/test_utils.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport unittest\n\n# Add grandparent directory to sys.path \n# this is so that version module can be imported\n# when running tests directly \nsys.path.append(os.path.abspath(__file__ + \"/../../\"))\n\nfrom version.utils import compare\n\nclass TestVersionComparison(unittest.TestCase):\n def setUp(self):\n # load test examples\n\n self.greater_than = [\n (\"1.2\", \"1.1\"), # greater\n (\"2.2.1\", \"2.1.1\"), # greater from second digit\n (\"2.2.3\", \"2.2.1\"), # greater at the final digit\n (\"1.2\", \"1.1.3\"), # greater (combination of 2 and 3 digit version strings)\n (\"1.2b\", \"1.2a\"), # greater ( alphanumeric version strings)\n (\"1.2.b\", \"1.2.a\"), # greater ( alphanumeric version strings all seperated by dots)\n ]\n\n self.less_than = [\n (\"0.1\", \"0.5\"), # less\n (\"0.1.1\", \"0.1.5\"), # less at final digit\n (\"0.1.1\", \"0.2.1\"), # less at second digit\n (\"0.1\", \"0.5.1\"), # less (combination of 2 and 3 digit version strings)\n (\"0.1a\", \"0.1b\"), # less ( alphanumeric version strings)\n (\"0.1.a\", \"0.1.b\"), # less ( alphanumeric version strings all seperated by dots)\n ]\n\n self.equal = [\n (\"3.1\", \"3.1\"), # equal\n (\"3.8.1\", \"3.8.1\"), # equal\n (\"0.1a\", \"0.1a\"), # equal( alphanumeric)\n (\"0.1.a\", \"0.1.a\"), # equal ( alphanumeric version strings all seperated by dots)\n (\"NT 1.1\", \"NT 1.1\"), # equal ( with prefix)\n ]\n\n \n def test_greater_than(self):\n for versions_tuple in self.greater_than:\n returned_string = compare(versions_tuple[0], versions_tuple[1])\n expectation = f\"{versions_tuple[0]} > {versions_tuple[1]}\"\n self.assertEqual(returned_string,expectation)\n\n\n def test_less_than(self):\n for versions_tuple in self.less_than:\n returned_string = compare(versions_tuple[0], versions_tuple[1])\n expectation = f\"{versions_tuple[0]} < {versions_tuple[1]}\"\n self.assertEqual(returned_string,expectation)\n\n\n def test_equal_to(self):\n for versions_tuple in self.equal:\n returned_string = compare(versions_tuple[0], versions_tuple[1])\n expectation = f\"{versions_tuple[0]} == {versions_tuple[1]}\"\n self.assertEqual(returned_string,expectation)\n\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.8787878751754761, "alphanum_fraction": 0.8787878751754761, "avg_line_length": 33, "blob_id": "dd94a495fd6f07682b621aa66433020fc3273881", "content_id": "74b305698a2b1e6a0facbb4a754fa0e30e51dd94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33, "license_type": "permissive", "max_line_length": 33, "num_lines": 1, "path": "/2.versioning/version/__init__.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "from version.utils import compare" }, { "alpha_fraction": 0.6516128778457642, "alphanum_fraction": 0.6645161509513855, "avg_line_length": 25, "blob_id": "908fe521048bf6b8772f78157508e2ff32480ee8", "content_id": "de4e790c99af50ea8604545e6f227c0e3cdeaade", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "permissive", "max_line_length": 41, "num_lines": 6, "path": "/3.GeoLRU/setup.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(name=\"Geo Distributed LRU library\",\n version=\"0.1\",\n install_requires=['websockets'],\n packages=[\"lru\"])" }, { "alpha_fraction": 0.6322417855262756, "alphanum_fraction": 0.6582703590393066, "avg_line_length": 31.189189910888672, "blob_id": "3ef0f00c41aa600d4d873d618f2cf9ab8a3f62c9", "content_id": "a2f4c7e2eaba4f826868fd649eb0f1f21a85a749", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1199, "license_type": "permissive", "max_line_length": 307, "num_lines": 37, "path": "/2.versioning/README.md", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "# Background\nThis is a software library that accepts 2 version string as input and returns whether one is greater than, equal, or less than the other. As an example: “1.2” is greater than “1.1”. Please note that **this library uses the matematical symbols for greater than (>), equal(==), and less than(<) for brevity.**\n\n## How to install versioning library\n**Version**: Python3.X \n```bash\n$ cd 2.versioning\n$ python setup.py install\n```\n\n## Usage\nAfter the library has been installed. You can import it and start using in your projects\nExample:\n```python\n>>> from version import compare\n>>> compare(\"1.3\",\"4.5\")\n'1.3 < 4.5'\n>>> compare(\"1.3a\",\"1.3b\")\n'1.3a < 1.3b'\n>>> compare(\"1.3a\",\"1.3\")\n'1.3a > 1.3'\n```\n## Contributing\n### Testing\nThe versioning library has a `tests` directory in which all testcases reside. Below is a summary\n\n| Test file | Usuage |\n| ----------------------|:--------------------------------------:|\n| tests/test_helpers.py | Tests for helper functions reside here |\n| tests/test_utils.py | This is where all the name library function tests reside |\n\n### How to run tests\n```bash\n$ cd versioning\n$ python tests/test_helpers.py\n$ python tests/test_utils.py\n```\n" }, { "alpha_fraction": 0.5665873885154724, "alphanum_fraction": 0.5933412313461304, "avg_line_length": 32.65999984741211, "blob_id": "f2bfa50e8290ab28e50990f7c19f57fafcd076dc", "content_id": "95fd2556eca120265900d0cad0a3b5b7a1f57dad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1682, "license_type": "permissive", "max_line_length": 67, "num_lines": 50, "path": "/2.versioning/tests/test_helpers.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport unittest\n\n# Add grandparent directory to sys.path \n# this is so that helpers module can be imported\n# when running tests directly \nsys.path.append(os.path.abspath(__file__ + \"/../../\"))\n\nfrom version.helpers import tokenize_version_string, add_filler\n\nclass TestHelperFunctions(unittest.TestCase):\n \n def test_tokenize_version_string(self):\n return_value = tokenize_version_string(\"1.2\")\n expectation = [\"1\", \"2\"]\n self.assertEqual(return_value, expectation)\n\n return_value = tokenize_version_string(\"2.2.1\")\n expectation = [\"2\", \"2\", \"1\"]\n self.assertEqual(return_value, expectation)\n\n return_value = tokenize_version_string(\"1.2b\")\n expectation = [\"1\", \"2\", \"b\"]\n self.assertEqual(return_value, expectation)\n\n return_value = tokenize_version_string(\"1.2bc\")\n expectation = [\"1\", \"2\", \"b\",\"c\"]\n self.assertEqual(return_value, expectation)\n\n return_value = tokenize_version_string(\"1.2.a\")\n expectation = [\"1\", \"2\", \"a\"]\n self.assertEqual(return_value, expectation)\n\n return_value = tokenize_version_string(\"NT 1.1\")\n expectation = [\"1\", \"1\"]\n self.assertEqual(return_value, expectation)\n \n def test_add_filler(self):\n return_value = add_filler([\"1\", \"2\"], [\"1\", \"2\", \"3\"])\n expectation = ([\"1\", \"2\", \"0\"], [\"1\", \"2\", \"3\"])\n self.assertEqual(return_value, expectation)\n\n return_value = add_filler([\"1\", \"2\", \"a\"], [\"1\", \"2\", \"a\"])\n expectation = ([\"1\", \"2\", \"a\"], [\"1\", \"2\", \"a\"])\n self.assertEqual(return_value, expectation)\n\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.6813725233078003, "alphanum_fraction": 0.6948529481887817, "avg_line_length": 23.02941131591797, "blob_id": "9a5774607f561c491e49c1b9b4abd3c38e10511b", "content_id": "03e93fd5415ce2227ed19d0a682ab067403cc556", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 816, "license_type": "permissive", "max_line_length": 68, "num_lines": 34, "path": "/3.GeoLRU/tests/test_lru.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "import time\nimport sys\nimport os\n\n# Add grandparent directory to sys.path \n# this is so that version module can be imported\n# when running tests directly \nsys.path.append(os.path.abspath(__file__ + \"/../../\"))\nfrom lru.LRUCache import LRUCache\n\n@LRUCache(cache_size=9, validity_in_minutes=0.5,is_master_node=True)\ndef expensive_function(num):\n\n print(\"computing...\")\n time.sleep(2)\n result = num * num\n return result\n\n\n@LRUCache(master_node_hostname=\"localhost\")\ndef not_so_expensive_function(num):\n\n print(\"computing...\")\n time.sleep(2)\n result = num * num\n return result\n\nwhile True:\n print(expensive_function(2))\n # print(not_so_expensive_function(4))\n print(expensive_function(5))\n print(expensive_function(6))\n print(expensive_function(5))\n print(expensive_function(4))" }, { "alpha_fraction": 0.46547314524650574, "alphanum_fraction": 0.5063938498497009, "avg_line_length": 23.46875, "blob_id": "441656fc12cfe3451b8913966ba4b5213571bafd", "content_id": "4e66cbb6d4fcd5288e7f6e3c0409b389dd1cd63c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "permissive", "max_line_length": 47, "num_lines": 32, "path": "/1.overlap/test_overlap.py", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "import unittest\nfrom overlap import overlap\n\nclass TestOverlap(unittest.TestCase):\n def setUp(self):\n # load test inputs\n self.non_overlaping_lines = [\n [(1,5),(6,8)],\n [(1,4),(4,12)],\n [(2,12),(9,12)],\n ]\n self.overlaping_lines = [\n [(1,5),(2,6)],\n [(2,6),(1,5)],\n [(2,4),(3,12)],\n ]\n\n\n def test_overlapping_lines(self):\n for lines in self.overlaping_lines:\n result = overlap(lines[0],lines[1])\n self.assertTrue(result)\n\n\n def test_non_overlapping_lines(self):\n for lines in self.non_overlaping_lines:\n result = overlap(lines[0],lines[1])\n self.assertFalse(result)\n\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.5433628559112549, "alphanum_fraction": 0.5752212405204773, "avg_line_length": 19.925926208496094, "blob_id": "4cf386bfdfeca6cf7effd93c589dde3b6b795e8a", "content_id": "905c975515d61f82e911fa27cf2532a6f690895a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 565, "license_type": "permissive", "max_line_length": 99, "num_lines": 27, "path": "/1.overlap/README.md", "repo_name": "Kelechukwu/kelechukwu_nwosu_test", "src_encoding": "UTF-8", "text": "# Overlap Function\n**Version**: Python3.X\n\nThis function accepts two lines (x1,x2) and (x3,x4) on the x-axis and returns whether they overlap.\nAs an example, (1,5) and (2,6) overlaps but not (1,5) and (6,8).\n\n## Usage\n\n```bash\n$ python3\n```\n```python\n>>> from overlap import overlap\n>>> overlap((1,5), (6,8))\nFalse\n```\n## Contributing\n### Testing\n\n| Test file | Usuage |\n| ----------------------|:--------------------------------------:|\n| test_overlap.py | Tests for the overlap function reside here |\n\n### How to run tests\n```bash\n$ python test_overlap.py\n```\n" } ]
16
sonalidurga/helloWorldRepo
https://github.com/sonalidurga/helloWorldRepo
8e41ccaca67041a4776a6d67148e9d7c6c58c6b2
bf2e67fde96a79c80ab85ff0ba3f2bc7a9962fc5
de119d3ca8dd86a9ae791535b8c8066c81bb11de
refs/heads/master
2021-01-10T03:36:39.824649
2015-11-01T17:39:06
2015-11-01T17:39:06
45,350,206
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 25, "blob_id": "50afc3fe34268b25927a5da33674e4d25bbb040b", "content_id": "79c8231c0412a3940148c5da1ac765f65e2f960c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26, "license_type": "permissive", "max_line_length": 25, "num_lines": 1, "path": "/Sonali.py", "repo_name": "sonalidurga/helloWorldRepo", "src_encoding": "UTF-8", "text": "print \"my name is sonali\"\n" }, { "alpha_fraction": 0.7101449370384216, "alphanum_fraction": 0.7210144996643066, "avg_line_length": 14.277777671813965, "blob_id": "cfca9969f2024b3e3c5021c1728911b561ff3e96", "content_id": "549c5cbed0638418ccef86b9623ca9291f6b8179", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 276, "license_type": "permissive", "max_line_length": 58, "num_lines": 18, "path": "/README.md", "repo_name": "sonalidurga/helloWorldRepo", "src_encoding": "UTF-8", "text": "# helloWorldRepo\nfor learning only\n\n\nHello World! I know this is written in markdown language. \n\nThis will become a list now : \n* entry 1\n* entry 2\n* entry 3\n\nFollowing would look like code.\n```python\nprint \"hello world\"\n```\n\n\nThis is simple only. Nothing too cool about it. \n" } ]
2
mnhampl/alma-slipsomat
https://github.com/mnhampl/alma-slipsomat
1ae6120257f4e4800c3339197461d83bc998f1a6
0387bd433b8fe687f30e6d09ace25aa727e050d0
5cc58dac7eaac7bef837415dcaf238d68ac0ea01
refs/heads/master
2020-09-13T04:20:07.608791
2019-11-26T11:28:23
2019-11-26T11:28:23
222,652,981
0
0
MIT
2019-11-19T09:02:55
2019-08-04T19:43:32
2019-08-04T19:43:30
null
[ { "alpha_fraction": 0.6710526347160339, "alphanum_fraction": 0.7105262875556946, "avg_line_length": 75, "blob_id": "2dfc6aee49b96c3bdfedbecb13473c626377d685", "content_id": "171bc0adccbf2f32dc3492381d010e628f5e2d6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "permissive", "max_line_length": 75, "num_lines": 1, "path": "/slipsomat/__init__.py", "repo_name": "mnhampl/alma-slipsomat", "src_encoding": "UTF-8", "text": "__version__ = '0.3.1-new_letter_configuration' # Use bumpversion to update\n" }, { "alpha_fraction": 0.5775240063667297, "alphanum_fraction": 0.5819095373153687, "avg_line_length": 38.07143020629883, "blob_id": "08fc3f4da096d0ca018b25d36ec5ab8791b93246", "content_id": "6dd84521c66a17d46be3de6caa4a869cdf890be9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10945, "license_type": "permissive", "max_line_length": 116, "num_lines": 280, "path": "/slipsomat/configuration_table.py", "repo_name": "mnhampl/alma-slipsomat", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport os.path\nimport re\nimport time\nimport sys\n\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.remote.errorhandler import NoSuchElementException\nfrom colorama import Fore, Back, Style\n\nfrom .slipsomat import LetterContent\nfrom .letter_info import LetterInfo\n\nclass ConfigurationTable(object):\n \"\"\"Interface to \"Customize letters\" in Alma.\"\"\"\n\n def __init__(self, pagename, worker):\n self.letter_infos = [] # array of LetterInfo objects \n self.update_dates = []\n self.worker = worker\n self.pagename = pagename \n \n self.css_selector_table_row = '.jsRecordContainer'\n self.css_selector_button_template = '#cnew_letter_labeltemplate_span'\n\n if pagename == 'Components Configuration':\n self.css_selector_table = '#filesAndLabels'\n self.css_selector_col_name = '#SELENIUM_ID_filesAndLabels_ROW_%d_COL_letterXslcfgFilefilename'\n self.css_selector_col_customized = '#SELENIUM_ID_filesAndLabels_ROW_%d_COL_customized' \n elif pagename == 'Letters Configuration':\n self.css_selector_table = '#lettersOnPage' \n self.css_selector_col_name = '#SELENIUM_ID_lettersOnPage_ROW_%d_COL_letterNameForUI'\n self.css_selector_col_channel = '#SELENIUM_ID_lettersOnPage_ROW_%d_COL_channel'\n self.css_selector_col_customized = '#SELENIUM_ID_lettersOnPage_ROW_%d_COL_customized' \n\n else:\n raise Exception()\n \n \n\n def open(self):\n \"\"\"Go from Alma start page to general configuration and open subpage\"\"\"\n \n try:\n # at page that lists letters?\n self.worker.first(By.CSS_SELECTOR, self.css_selector_table)\n except NoSuchElementException:\n # not at page that lists letters?\n self.print_letter_status('Opening table...', '')\n\n # Goto Alma start page\n self.worker.goto_alma_start_page()\n \n # Open Alma configuration\n self.worker.wait_for_and_click(By.CSS_SELECTOR, '#ALMA_MENU_TOP_NAV_configuration')\n \n # Open configuration \"General\" \n self.worker.click(By.XPATH, '//*[@href=\"#CONF_MENU6\"]')\n \n # Open Subpage\n self.worker.click(By.XPATH, '//*[text() = \"' + self.pagename + '\"]')\n self.worker.wait_for(By.CSS_SELECTOR, self.css_selector_table)\n\n return self\n\n def modified(self, name):\n# idx = self.names.index(name)\n# return self.update_dates[idx]\n return \"\"\n\n def set_modified(self, name, date):\n # Allow updating a single date instead of having to re-read the whole table\n idx = self.letter_infos.index(name)\n self.update_dates[idx] = date\n\n def print_letter_status(self, string, msg, progress=None, newline=False):\n sys.stdout.write('\\r{:100}'.format('')) # We clear the line first\n if progress is not None:\n sys.stdout.write('\\r[{}] {:60} {}'.format(\n progress,\n string.split('/')[-1],\n msg\n ))\n else:\n sys.stdout.write('\\r{:60} {}'.format(\n string.split('/')[-1],\n msg\n ))\n if newline:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\n def read(self):\n self.letter_infos = []\n\n # number of letters on page \n elems_rows = self.worker.all(By.CSS_SELECTOR, self.css_selector_table_row)\n \n # first try: only read the first page\n for i in range(0, len(elems_rows)):\n name = self.worker.all(By.CSS_SELECTOR, self.css_selector_col_name % i)[0].text\n \n if self.pagename == 'Letters Configuration':\n channel = self.worker.all(By.CSS_SELECTOR, self.css_selector_col_channel % i)[0].text\n else:\n channel = None\n\n letter_info = LetterInfo(name, i, channel)\n \n self.letter_infos.append(letter_info)\n print(str(i+1) + ': ' + letter_info.unique_name)\n \n\n# # Read the modification date column\n# elems = self.worker.all(By.CSS_SELECTOR,\n# '#lettersOnPage tr > td:nth-child(%d) > span' % updatedate_col)\n# self.update_dates = [el.text for el in elems]\n# \n# # return [{x[0]:2 {'modified': x[1], 'index': n}} for n, x in enumerate(zip(names, update_dates))]\n\n\n def is_customized(self, name):\n index = self.letter_infos.index(name)\n css_selector_element = self.css_selector_col_customized % index\n \n self.worker.wait_for(By.CSS_SELECTOR, css_selector_element)\n updated_by = self.worker.first(By.CSS_SELECTOR, css_selector_element)\n\n return updated_by.text not in ('-', 'Network')\n\n def assert_page_title(self, page_title):\n \"\"\" Assert that we are at the right letter \"\"\"\n # on subpage??\n self.worker.wait_for(By.CSS_SELECTOR, self.css_selector_button_template)\n \n element = self.worker.wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '.pageTitle'))\n )\n \n elt = element.text\n assert elt == page_title, \"%r != %r\" % (elt, page_title)\n\n\n def open_letter(self, letter_info):\n self.open()\n\n # Open a letter and return its contents as a LetterContent object.\n index = self.letter_infos.index(letter_info)\n self.worker.wait.until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, self.css_selector_col_name % index))\n )\n\n time.sleep(0.2)\n\n # Open Letter configuration\n self.worker.scroll_into_view_and_click((self.css_selector_col_name + ' a') % index, By.CSS_SELECTOR)\n time.sleep(0.2)\n\n # We should now be at the letter edit form. Assert that page title is correct\n self.assert_page_title(letter_info.name)\n\n\n # goto tab \"Template\"\n # Click tab \"Template\" menu item\n css_selector_link = self.css_selector_button_template + ' a'\n self.worker.wait_for(By.CSS_SELECTOR, css_selector_link)\n self.worker.scroll_into_view_and_click(css_selector_link, By.CSS_SELECTOR)\n\n css_selector_template_textarea = 'pageBeanfileContent'\n self.worker.wait_for(By.ID, css_selector_template_textarea)\n txtarea = self.worker.first(By.ID, css_selector_template_textarea)\n return LetterContent(txtarea.text)\n\n\n def close_letter(self):\n # If we are at specific letter, press the \"Cancel\" button.\n elems = self.worker.all(By.CSS_SELECTOR, '.pageTitle')\n if len(elems) != 0:\n btn_selector = '#PAGE_BUTTONS_cbuttonnavigationcancel'\n self.worker.scroll_into_view_and_click(btn_selector, By.CSS_SELECTOR)\n\n \n def put_contents(self, letter_info, content):\n \"\"\"\n Save letter contents to Alma.\n\n This method assumes the letter has already been opened.\n \"\"\"\n self.assert_page_title(letter_info.name)\n\n # The \"normal\" way to set the value of a textarea with Selenium is to use\n # send_keys(), but it took > 30 seconds for some of the larger letters.\n # So here's a much faster way:\n txtarea = self.worker.first(By.ID, 'pageBeanfileContent')\n txtarea_id = txtarea.get_attribute('id')\n\n value = content.text.replace('\"', '\\\\\"').replace('\\n', '\\\\n')\n script = 'document.getElementById(\"%s\").value = \"%s\";' % (txtarea_id, value)\n self.worker.driver.execute_script(script)\n\n # Submit the form\n try:\n btn = self.worker.first(By.ID, 'PAGE_BUTTONS_cbuttonsave')\n except NoSuchElementException:\n btn = self.worker.first(By.ID, 'PAGE_BUTTONS_cbuttoncustomize')\n btn.click()\n\n # Wait for the table view.\n # Longer timeout per https://github.com/scriptotek/alma-slipsomat/issues/33\n self.worker.wait_for(By.CSS_SELECTOR, '.typeD table', timeout=40)\n\n return True\n\n\n def pull(self, local_storage, status_file):\n\n count_new = 0\n count_changed = 0\n\n self.open()\n self.read()\n\n for idx, letter_info in enumerate(self.letter_infos):\n progress = '%3d/%3d' % ((idx + 1), len(self.letter_infos))\n \n self.print_letter_status(letter_info.unique_name, '', progress)\n \n self.print_letter_status(letter_info.unique_name, 'checking...', progress)\n\n # --- Bug, skip webhook letters \n if letter_info.unique_name.endswith('-WEBHOOK'):\n self.print_letter_status(\n letter_info.unique_name, Fore.RED + 'skipped WEBHOOK' + Style.RESET_ALL, progress, True)\n continue\n # --- End Bug, Letter \n \n \n \n try:\n content = self.open_letter(letter_info)\n # if self.is_customized(letter_info):\n # content = self.open_letter(letter_info)\n # else:\n # content = self.open_default_letter(letter_info)\n except TimeoutException:\n # Retry once\n self.print_letter_status(letter_info.unique_name, 'retrying...', progress)\n# if self.is_customized(letter_info):\n content = self.open_letter(letter_info)\n# else:\n# content = self.open_default_letter(letter_info)\n \n self.close_letter()\n \n old_sha1 = status_file.checksum(letter_info.get_filename())\n if content.sha1 == old_sha1:\n self.print_letter_status(letter_info.unique_name, 'no changes', progress, True)\n continue\n \n if not local_storage.store(letter_info, content, self.modified(letter_info)):\n self.print_letter_status(\n letter_info.unique_name, Fore.RED + 'skipped due to conflict' + Style.RESET_ALL, progress, True)\n continue\n \n if old_sha1 is None:\n count_new += 1\n self.print_letter_status(letter_info.unique_name, Fore.GREEN + 'fetched new letter @ {}'.format(\n content.sha1[0:7]) + Style.RESET_ALL, progress, True)\n else:\n count_changed += 1\n self.print_letter_status(letter_info.unique_name, Fore.GREEN + 'updated from {} to {}'.format(\n old_sha1[0:7], content.sha1[0:7]) + Style.RESET_ALL, progress, True)\n \n sys.stdout.write(Fore.GREEN + 'Fetched {} new, {} changed letters\\n'.format(\n count_new, count_changed) + Style.RESET_ALL)\n \n" }, { "alpha_fraction": 0.5077160596847534, "alphanum_fraction": 0.5077160596847534, "avg_line_length": 27, "blob_id": "9dc7dadc7cd529c158d19e2f3daa66dc1477381a", "content_id": "5f9697f7f435d3e5a3bea215d6067a58c8c69fa4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "permissive", "max_line_length": 68, "num_lines": 23, "path": "/slipsomat/letter_info.py", "repo_name": "mnhampl/alma-slipsomat", "src_encoding": "UTF-8", "text": "class LetterInfo(object):\n \"\"\"Interface to \"Customize letters\" in Alma.\"\"\"\n\n def __init__(self, name, index, channel):\n self.name = name\n self.index = index\n self.channel = channel\n\n self.unique_name = name + '-' + channel if channel else name\n\n# if channel:\n# self.unique_name = name + '-' + channel \n# else:\n# self.unique_name = name \n \n def get_filename(self):\n filename = './' + self.unique_name.replace(' ', '_')\n\n # file ending\n if not(filename.endswith('.xsl')): \n filename += '.xsl'\n \n return filename\n " } ]
3
Gatszow/CarsScrapper
https://github.com/Gatszow/CarsScrapper
0bef5b1975302e1973d8b014907fa16674dce3e6
b2060f0aedc0e7f43059f0609941f9ea5f56dd8f
2d9761e5c59fb8c1101809603dc6bf6d3bacee32
refs/heads/master
2022-11-20T11:54:03.031880
2020-07-20T17:59:53
2020-07-20T17:59:53
279,598,015
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5326981544494629, "alphanum_fraction": 0.5439584255218506, "avg_line_length": 31.521127700805664, "blob_id": "405c37a869fb16484b747a677de458e232c60492", "content_id": "2cd750faf6bffd25a75355022c3a877ebd8e1da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2310, "license_type": "no_license", "max_line_length": 101, "num_lines": 71, "path": "/database.py", "repo_name": "Gatszow/CarsScrapper", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom secret import password\nfrom scrapper import CarsScrapper\n\n\ndef difference(list1, list2):\n list_dif = [i for i in list1 + list2 if i not in list1 or i not in list2]\n return list_dif\n\n\nclass DatabaseUpdater(object):\n def __init__(self):\n self.mydb = mysql.connector.connect(\n host='localhost',\n user='root',\n password=password,\n database='test'\n )\n\n self.mycursor = self.mydb.cursor()\n\n # Database creation\n # mycursor.execute('CREATE DATABASE test')\n\n # Table creation\n self.mycursor.execute(\n 'CREATE TABLE IF NOT EXISTS Cars ('\n 'CarID INT PRIMARY KEY AUTO_INCREMENT, '\n 'Make VARCHAR(30), '\n 'Model VARCHAR(30), '\n 'Mileage_km MEDIUMINT UNSIGNED, '\n 'ProductionYear YEAR, '\n 'FuelType ENUM(\"Benzyna\", \"Benzyna+LPG\", \"Benzyna+CNG\", '\n '\"Diesel\", \"Elektryczny\", \"Etanol\", \"Hybryda\", \"Wodór\", \"Failed to get\"), '\n 'EngineSize_cm3 SMALLINT UNSIGNED, '\n 'URL VARCHAR(500), '\n 'Price MEDIUMINT UNSIGNED, '\n 'Currency VARCHAR(10), '\n 'Negotiable ENUM(\"True\", \"False\", \"Failed to get\") NOT NULL)'\n )\n self.values = CarsScrapper.search\n self.without = []\n\n def check(self):\n self.values = list(set(self.values))\n self.mycursor.execute('SELECT * FROM Cars')\n for record in self.mycursor:\n for row in range(len(self.values)):\n if record[1] == self.values[row][0] and record[2] == self.values[row][1] \\\n and record[3] == self.values[row][2] and record[8] == self.values[row][7] \\\n and record[9] == self.values[row][8]:\n\n self.without.append(self.values[row])\n\n values = difference(self.without, self.values)\n\n return values\n\n def add(self):\n data = self.check()\n self.mycursor.executemany('INSERT INTO Cars Values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',\n data)\n self.mydb.commit()\n\n def show(self):\n self.mycursor.execute('SELECT * FROM Cars')\n for x in self.mycursor:\n print(x)\n\n\nDatabaseUpdater().show()\n" }, { "alpha_fraction": 0.5048869848251343, "alphanum_fraction": 0.5208715200424194, "avg_line_length": 37.51764678955078, "blob_id": "ded2ad4b26ec2e60d4ea1f9ea47999035125b194", "content_id": "859ad40d438de969ad7d204995b02ca00ee91d8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9822, "license_type": "no_license", "max_line_length": 119, "num_lines": 255, "path": "/scrapper.py", "repo_name": "Gatszow/CarsScrapper", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom exceptions import WrongThingToGetError\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException as NSEE, ElementNotInteractableException as ENIE\n\n\ndef change_to_int(string: str) -> int:\n string = string.replace(' ', '')\n while True:\n try:\n string = int(string)\n break\n except ValueError:\n string = string[:-1]\n return string\n\n\ndef is_negotiable(string: str) -> str:\n if string == 'Do negocjacji':\n string = 'True'\n else:\n string = 'False'\n return string\n\n\ndef get_price_and_currency(price_with_currency: str):\n price_with_currency = price_with_currency.replace(' ', '')\n name_of_currency = []\n for z in range(len(price_with_currency)):\n try:\n int(price_with_currency)\n break\n\n except ValueError:\n name_of_currency.append(price_with_currency[len(price_with_currency) - 1:])\n price_with_currency = price_with_currency[:-1]\n\n name_of_currency.reverse()\n name_of_currency = ''.join(name_of_currency)\n\n return int(price_with_currency), name_of_currency\n\n\nclass CarsScrapper(object):\n def __init__(self):\n self.url = 'https://www.otomoto.pl/osobowe/' \\\n '?search%5Bfilter_float_price%3Ato%5D=20000&search' \\\n '%5Bfilter_float_mileage%3Ato%5D=150000&search' \\\n '%5Bfilter_enum_fuel_type%5D%5B0%5D=petrol&search' \\\n '%5Bfilter_enum_fuel_type%5D%5B1%5D=petrol-lpg&search' \\\n '%5Bfilter_enum_damaged%5D=0&search' \\\n '%5Bfilter_enum_no_accident%5D=1&search' \\\n '%5Border%5D=created_at%3Adesc&search%5Bbrand_program_id%5D' \\\n '%5B0%5D=&search%5Bcountry%5D=&view=list&page=209'\n\n self.driver = webdriver.Firefox()\n self.driver.get(self.url)\n self.isclosed = False\n self.list_of_tuples = []\n self.count = 1\n\n self.makes = []\n self.excluded_makes = ['Alfa Romeo', 'Aston Martin', 'De Lorean', 'Land Rover', 'DS Automobiles']\n\n self.models = []\n self.mileages = []\n self.years = []\n self.fuels = []\n self.engine_sizes = []\n self.urls = []\n self.prices = []\n self.currencies = []\n self.negotiable = []\n\n def get_products_make_and_model(self, title_class_name: str):\n titles = self.driver.find_elements_by_class_name(title_class_name)\n\n for title in titles:\n if self.excluded_makes[0] in title.text or self.excluded_makes[1] in title.text or self.excluded_makes[2] \\\n in title.text or self.excluded_makes[3] in title.text or self.excluded_makes[4] in title.text:\n\n self.models.append(' '.join((title.text.split()[2:])))\n temp_makes = title.text.split()[:2]\n make = ' '.join(temp_makes)\n self.makes.append(make)\n temp_makes.clear()\n\n else:\n self.models.append(' '.join((title.text.split()[1:])))\n self.makes.append(title.text.split()[0])\n\n return self.makes, self.models\n\n def get_products(self, thing_to_get, counter):\n try:\n if thing_to_get == 'mileage':\n for i in range(1, counter + 1):\n try:\n mileage = self.driver.find_element_by_xpath(\n f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'\n f']/div[2]/ul/li[2]/span')\n self.mileages.append(change_to_int(mileage.text))\n\n except NSEE:\n mileage = 0000\n self.mileages.append(mileage)\n\n return self.mileages\n\n elif thing_to_get == 'year':\n for i in range(1, counter + 1):\n try:\n year = self.driver.find_element_by_xpath(\n f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'\n f']/div[2]/ul/li[1]/span')\n self.years.append(int(year.text))\n\n except NSEE:\n year = 0000\n self.years.append(year)\n\n return self.years\n\n elif thing_to_get == 'fuel':\n for i in range(1, counter + 1):\n try:\n fuel = self.driver.find_element_by_xpath(\n f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'\n f']/div[2]/ul/li[4]/span')\n self.fuels.append(fuel.text)\n\n except NSEE:\n fuel = 'Failed to get'\n self.fuels.append(fuel)\n\n return self.fuels\n\n elif thing_to_get == 'engine_size':\n for i in range(1, counter + 1):\n try:\n engine_size = self.driver.find_element_by_xpath(\n f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'\n f']/div[2]/ul/li[3]/span')\n self.engine_sizes.append(change_to_int(engine_size.text))\n\n except NSEE:\n engine_size = 0000\n self.engine_sizes.append(engine_size)\n\n return self.engine_sizes\n\n elif thing_to_get == 'url':\n self.urls = [url.get_attribute('href') for url in\n self.driver.find_elements_by_class_name('offer-title__link')]\n\n return self.urls\n\n else:\n raise WrongThingToGetError\n\n except WrongThingToGetError:\n print('Wrong thing to get')\n\n def get_products_price_and_currency(self, counter):\n for i in range(1, counter + 1):\n try:\n price = self.driver.find_element_by_xpath(\n f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/div[1]/div[5]/article[{i}'\n f']/div[2]/div[2]/div/div[1]/span')\n value, currency = get_price_and_currency(price.text)\n self.prices.append(value)\n self.currencies.append(currency)\n\n except NSEE:\n value = 0000\n currency = 'Failed'\n self.prices.append(value)\n self.currencies.append(currency)\n\n try:\n negotiable = self.driver.find_element_by_xpath(\n f'/html/body/div[4]/div[2]/section/div[2]/div[1]/div/d'\n f'iv[1]/div[5]/article[{i}]/div[2]/div[2]/div/span').text\n self.negotiable.append(is_negotiable(negotiable))\n\n except NSEE:\n negotiable = 'Failed to get'\n self.negotiable.append(negotiable)\n\n return self.prices, self.currencies, self.negotiable\n\n def search(self):\n while True:\n if self.isclosed:\n break\n\n else:\n number_of_articles = len(self.driver.find_elements_by_tag_name('article'))\n\n makes, models = self.get_products_make_and_model('offer-title__link')\n mileages = self.get_products('mileage', number_of_articles)\n years = self.get_products('year', number_of_articles)\n fuels = self.get_products('fuel', number_of_articles)\n engine_sizes = self.get_products('engine_size', number_of_articles)\n urls = self.get_products('url', number_of_articles)\n prices, currencies, negotiable = self.get_products_price_and_currency(number_of_articles)\n\n for i in range(number_of_articles):\n temporary_list = (makes[i], models[i], mileages[i], years[i], fuels[i],\n engine_sizes[i], urls[i], prices[i], currencies[i], negotiable[i])\n self.list_of_tuples.append(temporary_list)\n print(temporary_list)\n del temporary_list\n makes.clear(), models.clear(), mileages.clear(), years.clear(), fuels.clear(), engine_sizes.clear()\n urls.clear(), prices.clear(), currencies.clear(), negotiable.clear()\n\n self.next_page()\n\n return self.list_of_tuples\n\n def next_page(self):\n try:\n interupting_element = self.driver.find_element_by_xpath('/html/body/div[4]/div[15]/div/div/a')\n interupting_element.click()\n\n except ENIE:\n pass\n\n li_index = len(self.driver.find_element_by_xpath('/html/body/div[4]/div[2]/section/div[2]/div[2]/ul')\n .find_elements_by_tag_name('li'))\n\n if li_index == 7 and self.count == 2:\n self.isclosed = True\n self.driver.close()\n\n elif self.count == 1:\n nexts = WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located(\n (By.XPATH, f\"/html/body/div[4]/div[2]/section/div[2]/div[2]/ul/li[{li_index}]/a\"))\n )\n nexts.click()\n self.count = 2\n\n else:\n nexts = WebDriverWait(self.driver, 20).until(EC.presence_of_element_located(\n (By.XPATH, f'/html/body/div[4]/div[2]/section/div[2]/div[2]/ul/li[{li_index}]/a'))\n )\n nexts.click()\n\n\nif __name__ == '__main__':\n temp = CarsScrapper().search()\n print(temp)\n" }, { "alpha_fraction": 0.7843137383460999, "alphanum_fraction": 0.7843137383460999, "avg_line_length": 16, "blob_id": "54c5dda5bd8716f771f720129e71b8105cc7185a", "content_id": "c41481477681b97bda798a1a6756f053f0275d87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/exceptions.py", "repo_name": "Gatszow/CarsScrapper", "src_encoding": "UTF-8", "text": "class TooSmallNumberOfRowError(Exception):\n pass\n\n\nclass WrongThingToGetError(Exception):\n pass\n" }, { "alpha_fraction": 0.7128713130950928, "alphanum_fraction": 0.7128713130950928, "avg_line_length": 24.5, "blob_id": "c5103826f21fddb5b29956553126a3dd5921dfc8", "content_id": "3da5514029585bd181efa2ecf3b3d15540470dfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/main.py", "repo_name": "Gatszow/CarsScrapper", "src_encoding": "UTF-8", "text": "from database import DatabaseUpdater\n\nif __name__ == '__main__':\n DatabaseUpdate = DatabaseUpdater" } ]
4
idelaigue/tarea-bd
https://github.com/idelaigue/tarea-bd
30a45431c727d5fa188776d176dbcfbc8c29de8d
c854d9c2ed83b8dccacd3f549f7c86d639b6d2f5
623a62ad780c75f71fcafde0473c6b952b9b4323
refs/heads/main
2023-06-25T18:06:12.841407
2021-07-30T22:57:37
2021-07-30T22:57:37
390,530,285
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5850178599357605, "alphanum_fraction": 0.587395966053009, "avg_line_length": 44.83333206176758, "blob_id": "86a6c00ee312c33dcc702d5a4c10da36621e1217", "content_id": "be2cf68a002088fb033c3780fedd3fb25ca468f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 848, "license_type": "no_license", "max_line_length": 139, "num_lines": 18, "path": "/admin/users/CRUD/update.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\n/* Este archivo debe manejar la lógica de borrar un usuario (y los registros relacionados) como admin */\r\ninclude $_SERVER['DOCUMENT_ROOT'].'/db_config.php';\r\n /* Este archivo debe manejar la lógica de obtener los datos de todos los usuarios */\r\n $sql = \"SELECT * FROM usuario \";\r\n $result= pg_query($dbconn,$sql);\r\n if ($_SERVER[\"REQUEST_METHOD\"] == \"POST\") {\r\n $id=$_POST[\"id\"];\r\n $nombre = $_POST[\"name\"];\r\n $apellido = $_POST[\"surname\"];\r\n $email = $_POST[\"email\"];\r\n $contraseña = $_POST[\"pwd\"];\r\n $contraseña_hasheada = password_hash($contraseña, PASSWORD_BCRYPT, $opciones);\r\n $pais = $_POST[\"country\"];\r\n $sql1= \"UPDATE usuarios SET nombre = $nombre ,apellido = $apellido, correo= $email, constraseña = $contraseña_hasheada , pais = $pais\";\r\n \r\n }\r\n?>" }, { "alpha_fraction": 0.5430847406387329, "alphanum_fraction": 0.545257031917572, "avg_line_length": 41.21875, "blob_id": "8d75ba0c757486d12153ced717af633c3f975f99", "content_id": "94536797ac051beaea13fd4445a64010365bdd5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 172, "num_lines": 32, "path": "/admin/users/CRUD/create.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\n/* Este archivo debe manejar la lógica para crear un usuario como admin */\r\n\r\n include $_SERVER['DOCUMENT_ROOT'].'/db_config.php';\r\n /* Este archivo debe manejar la lógica de obtener los datos de todos los usuarios */\r\n $sql = \"SELECT max(id) FROM usuario order by max(id)\";\r\n $result= pg_query($dbconn,$sql);\r\n $opciones = array('cost'=>12);\r\n\r\n if ($_SERVER[\"REQUEST_METHOD\"] == \"POST\") {\r\n $id=$result+1;\r\n $nombre = $_POST[\"name\"];\r\n $apellido = $_POST[\"surname\"];\r\n $email = $_POST[\"email\"];\r\n $contraseña = $_POST[\"pwd\"];\r\n $contraseña_hasheada = password_hash($contraseña, PASSWORD_BCRYPT, $opciones);\r\n $pais = $_POST[\"country\"];\r\n $sql = \"INSERT INTO usuario (id,nombre, apellido, correo, contraseña, pais)VALUES (\".$id.\",'\".$nombre.\"','\".$apellido.\"','\".$email.\"','\".$contraseña.\"',\".$pais.\")\";\r\n if( pg_query_params($dbconn, $sql, array($id,$nombre,$apellido,$email,$contraseña_hasheada,$pais)) !== FALSE ) {\r\n echo \"Dato ingresado correctamente <br>\";\r\n echo '<a href=\"usuarios.html\"> lista de datos </a> <br>';\r\n echo '<a href=\"index.php\"> Ingresar más datos </a> <br>';\r\n pg_close($dbconn);\r\n } else {\r\n echo \"Hubo un error al ingresar el dato\";\r\n pg_close($dbconn);\r\n }\r\n }\r\n \r\n\r\n \r\n?>" }, { "alpha_fraction": 0.6467652320861816, "alphanum_fraction": 0.6537338495254517, "avg_line_length": 28.959182739257812, "blob_id": "e4783956547112d69b23ee097cbd2158cd9ad76a", "content_id": "eabcb6fb0426871451cdb257c61dc72ff063487a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10628, "license_type": "no_license", "max_line_length": 126, "num_lines": 343, "path": "/Tarea3/main.py", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "from flask import Flask\r\nfrom flask import jsonify\r\nfrom config import config\r\nfrom models import db\r\nfrom models import Usuario\r\nfrom models import Pais\r\nfrom models import Cuenta_bancaria\r\nfrom models import Moneda\r\nfrom models import Precio_moneda\r\nfrom models import Usuario_tiene_moneda\r\nimport datatime\r\nfrom flask import request\r\n\r\n\r\ndef create_app(enviroment):\r\n\tapp = Flask(__name__)\r\n\tapp.config.from_object(enviroment)\r\n\twith app.app_context():\r\n\t\tdb.init_app(app)\r\n\t\tdb.create_all()\r\n\treturn app\r\n\r\n# Accedemos a la clase Config del archivo config.py entregado\r\nenviroment = config['development']\r\napp = create_app(enviroment)\r\n\r\n\r\n\r\n\r\n#___TABLA___USUARIO___#\r\n#Metodo para obtener algo de la tabla usuario#\r\n@app.route('/api/usuario', methods=['GET'])\r\ndef get_usuario():\r\n\tusers = [ usuario.json() for usuario in Usuario.query.all() ] \r\n\treturn jsonify({'users': users })\r\n\r\n#Metodo para almacenar tal cosa en la tabla usuario#\r\n@app.route('/api/usuario/', methods=['POST'])\r\ndef create_usuario():\r\n\tjson = request.get_json(force=True)\r\n\tif json.get('nombre') is None:\r\n\t\treturn jsonify({'message': 'El formato está mal'}), 400\r\n\tuser = Usuario.create(json['nombre'],json['apellido'],json['correo'],json['contraseña'],json['pais'])\r\n\r\n\treturn jsonify({'user': user.json() })\r\n\r\n#Metodo para reemplazar o crear un nuevo elemento#\r\n@app.route('/api/usuario/<id>', methods=['PUT'])\r\ndef update_usuario(id):\r\n\tusuario = Usuario.query.filter_by(id=id).first()\r\n\tif user is None:\r\n\t\treturn jsonify({'message': 'Usuario no existe'}), 404\r\n\r\n\tjson = request.get_json(force=True)\r\n\tif json.get('nombre') is None:\r\n\t\treturn jsonify({'message': 'Bad request'}), 400\r\n#Es indispensable pedir en este orden los datos\r\n\tuser.nombre = json['nombre']\r\n\tuser.apellido = json['apellido']\r\n\tuser.correo = json['correo']\r\n\tuser.contraseña = json['contraseña']\r\n\tuser.pais = json['pais']\r\n\r\n\tuser.update()\r\n\r\n\treturn jsonify({'user': user.json() })\r\n\r\n#Metodo para eliminar un usuario de la tabla\"\r\n@app.route('/api/usuario/<id>', methods=['DELETE'])\r\ndef delete_usuario(id):\r\n\tuser = Usuario.query.filter_by(id=id).first()\r\n\tif user is None:\r\n\t\treturn jsonify({'message': 'El usuario no existe'}), 404\r\n\r\n\tuser.delete()\r\n\r\n\treturn jsonify({'user': user.json() })\r\n\r\n\r\n\r\n\r\n\r\n#___TABLA___PAIS___#\r\n#Metodo para obtener algo de la tabla pais#\r\n@app.route('/api/pais', methods=['GET'])\r\ndef get_pais():\r\n\tpaises = [ pais.json() for pais in Pais.query.all() ] \r\n\treturn jsonify({'paises': paises })\r\n\r\n#Metodo para almacenar tal cosa en la tabla Pais#\r\n@app.route('/api/v1/pais/', methods=['POST'])\r\ndef create_pais():\r\n\tjson = request.get_json(force=True)\r\n\r\n\tif json.get('nombre') is None:\r\n\t\treturn jsonify({'message': 'El formato está mal'}), 400\r\n\r\n\tpaises = Pais.create(json['nombre'])\r\n\r\n\treturn jsonify({'paises': paises.json() })\r\n\r\n#Metodo para reemplazar o crear un nuevo elemento#\r\n@app.route('/api/pais/<id>', methods=['PUT'])\r\ndef update_pais(id):\r\n\tpaises = Pais.query.get(id)\r\n\tif paises is None:\r\n\t\treturn jsonify({'message': 'User does not exists'}), 404\r\n\r\n\tjson = request.get_json(force=True)\r\n\tif json.get('nombre') is None:\r\n\t\treturn jsonify({'message': 'Bad request'}), 400\r\n\r\n\tpaises.nombre = json['nombre']\r\n\tpaises.update()\r\n\r\n\treturn jsonify({'paises': paises.json() })\r\n\r\n@app.route('/api/pais/<id>', methods=['DELETE'])\r\ndef delete_pais(id):\r\n\tpaises = Pais.query.get(id)\r\n\tif paises is None:\r\n\t\treturn jsonify({'message': 'El usuario no existe'}), 404\r\n\r\n\tpaises.delete()\r\n\r\n\treturn jsonify({'paises': paises.json() })\r\n\r\n\r\n\r\n\r\n\r\n\r\n#___TABLA___CUENTA_BANCARIA___#\r\n#Metodo para obtener algo de la tabla usuario#\r\n@app.route('/api/cuenta_bancaria', methods=['GET'])\r\ndef get_cuentas():\r\n\tcuentas = [ cuenta_bancaria.json() for cuenta_bancaria in Cuenta_bancaria.query.all() ] \r\n\treturn jsonify({'cuentas': cuentas })\r\n\r\n#Metodo para almacenar tal cosa en la tabla usuario#\r\n@app.route('/api/cuenta_bancaria/', methods=['POST'])\r\ndef create_cuentas():\r\n\tjson = request.get_json(force=True)\r\n\r\n\tif json.get('id_usuario') is None:\r\n\t\treturn jsonify({'message': 'El formato está mal'}), 400\r\n\r\n\tcuentas = Cuenta_bancaria.create(json['id_usuario'],json['balance'])\r\n\r\n\treturn jsonify({'cuentas': cuentas.json() })\r\n\r\n#Metodo para reemplazar o crear un nuevo elemento#\r\n@app.route('/api/cuenta_bancaria/<id>', methods=['PUT'])\r\ndef update_cuentas(id):\r\n\tcuentas = Cuenta_bancaria.query.get(id)\r\n\tif cuentas is None:\r\n\t\treturn jsonify({'message': 'User does not exists'}), 404\r\n\r\n\tjson = request.get_json(force=True)\r\n\tif json.get('balance') is None:\r\n\t\treturn jsonify({'message': 'Bad request'}), 400\r\n\r\n\tcuentas.balance = json['balance']\r\n\tcuentas.update()\r\n\r\n\treturn jsonify({'cuentas': cuentas.json() })\r\n\r\n@app.route('/api/cuenta_bancaria/<id>', methods=['DELETE'])\r\ndef delete_cuentas(id):\r\n\tcuentas = Cuenta_bancaria.query.get(id)\r\n\tif cuentas is None:\r\n\t\treturn jsonify({'message': 'El usuario no existe'}), 404\r\n\r\n\tcuentas.delete()\r\n\r\n\treturn jsonify({'cuentas': cuentas.json() })\r\n\r\n\r\n\r\n\r\n\r\n\r\n#___TABLA___Moneda___#\r\n#Metodo para obtener algo de la tabla usuario#\r\n@app.route ('/api/moneda', methods=['GET'])\r\ndef get_moneda():\r\n currency = [ moneda.json() for moneda in Moneda.query.all() ] \r\n return jsonify({'currency': currency })\r\n\r\n#Metodo para almacenar tal cosa en la tabla usuario#\r\n@app.route('/api/moneda/', methods=['POST'])\r\ndef create_moneda():\r\n json = request.get_json(force=True)\r\n\r\n if json.get('nombre') is None:\r\n return jsonify({'message': 'El formato está mal'}), 400\r\n\r\n currency = Moneda.create(json['nombre'],json['sigla'])\r\n\r\n return jsonify({'currency': currency.json() })\r\n\r\n#Metodo para reemplazar o crear un nuevo elemento#\r\n@app.route('/api/moneda/<id>', methods=['PUT'])\r\ndef update_moneda(id):\r\n currency = Moneda.query.get(id)\r\n if currency is None:\r\n return jsonify({'message': 'currency does not exists'}), 404\r\n\r\n json = request.get_json(force=True)\r\n if json.get('nombre') is None:\r\n return jsonify({'message': 'Bad request'}), 400\r\n\r\n currency.nombre = json['nombre']\r\n currency.sigla = json['sigla']\r\n\r\n currency.update()\r\n return jsonify({'currency': currency.json() })\r\n\r\n@app.route('/api/moneda/<id>', methods=['DELETE'])\r\ndef delete_moneda(id):\r\n currency = Moneda.query.filter_by(id=id).first()\r\n if currency is None:\r\n return jsonify({'message': 'currency no existe'}), 404\r\n\r\n currency.delete()\r\n\r\n return jsonify({'currency': currency.json() })\r\n\r\n\r\n\r\n\r\n\r\n#___TABLA___PRECIO_MONEDA___#\r\n#Metodo para obtener algo de la tabla precio_moneda#\r\n@app.route ('/api/precio_moneda', methods=['GET'])\r\ndef get_precio_moneda():\r\n precio = [ precio_moneda.json() for precio_moneda in Precio_moneda.query.all() ] \r\n return jsonify({'precio': precio })\r\n\r\n#Metodo para almacenar tal cosa en la tabla usuario#\r\n@app.route ('/api/precio_moneda', methods=['POST'])\r\ndef create_precio_moneda():\r\n json = request.get_json(force=True)\r\n if json.get('id') is None:\r\n return jsonify({'message': 'El formato está mal'}), 400\r\n\r\n currency = Precio_moneda.create(json['id'],json['valor'])\r\n\r\n return jsonify({'currency': currency.json() })\r\n\r\n#Metodo para reemplazar o crear un nuevo elemento#\r\n@app.route('/api/precio_moneda/<id>', methods=['PUT'])\r\ndef update_precio_monedas(id):\r\n json = request.get_json(force=True)\r\n precio = Precio_moneda.query.filter_by(fecha=json['fecha'],id=id).first()\r\n if precio is None:\r\n return jsonify({'message': 'currency does not exists'}), 404\r\n\r\n precio.valor = json['valor']\r\n\r\n precio.update()\r\n\r\n return jsonify({'precio': precio.json() })\r\n@app.route('/api/precio_moneda/<id>', methods=['DELETE'])\r\ndef delete_precio_usuario(id):\r\n precio = Precio_moneda.query.filter_by(id=id).first()\r\n if precio is None:\r\n return jsonify({'message': 'moneda no existe'}), 404\r\n\r\n precio.delete()\r\n\r\n return jsonify({'precio': precio.json() })\r\n\r\n\r\n\r\n\r\n\r\n\r\n#___TABLA___USUARIO_TIENE_MONEDA___#\r\n#Metodo para obtener algo de la tabla usuario_tiene_moneda#\r\n@app.route('/api/usuario_tiene_moneda', methods=['GET'])\r\ndef get_usuario_tiene_moneda():\r\n tener = [ moneda.json() for moneda in Usuario_tiene_moneda.query.all() ] \r\n return jsonify({'tener': tener})\r\n\r\n#Metodo para almacenar tal cosa en la tabla usuario_tiene_moneda#\r\n@app.route('/api/usuario_tiene_moneda', methods=['POST'])\r\ndef create_usuario_tiene_moneda():\r\n json = request.get_json(force=True)\r\n if json.get('id_usuario') is None:\r\n return jsonify({'message': 'El formato está mal'}), 400\r\n\r\n currency = Usuario_tiene_moneda.create(json['id_usuario'],json['id_moneda'],json['balance'])\r\n\r\n return jsonify({'currency': currency.json() })\r\n\r\n#Metodo para reemplazar o crear un nuevo elemento#\r\n@app.route('/api/usuario_tiene_moneda/<id_usuario>/<id_moneda>', methods=['PUT'])\r\ndef update_usuario_tiene_moneda(id_usuario,id_moneda):\r\n\tuser = Usuario_tiene_moneda.query.filter_by(id_usuario=id_usuario,id_moneda=id_moneda).first()\r\n\tif user is None:\r\n\t\treturn jsonify({'message': 'User does not exists'}), 404\r\n\tjson = request.get_json(force=True)\r\n\tif json.get('balance') is None:\r\n\t\treturn jsonify({'message': 'Bad request'}), 400\r\n\tuser.balance = json['balance']\r\n\tuser.update()\r\n\treturn jsonify({'user': user.json() })\r\n\r\n@app.route('/api/usuario_tiene_moneda/<id_usuario>/<id_moneda>', methods=['DELETE'])\r\ndef delete_usuario_tiene_moneda(id_usuario,id_moneda):\r\n tener= Usuario_tiene_moneda.query.filter_by(id_usuario=id_usuario,id_moneda=id_moneda).first()\r\n\t\r\n if tener is None:\r\n return jsonify({'message': 'No existe'}), 404\r\n\r\n tener.delete()\r\n\r\n return jsonify({'tener': tener.json() })\r\n\r\n\r\n\r\n\r\n##############################################################################################################################\r\n#Consultas, solo se me pidieron 3#\r\n@app.route('/api/consulta/2/<max_id>', methods=['GET'])\r\ndef get_custom(max_id):\r\n\ttasks = [dict(cuenta_bancaria) for cuenta_bancaria in Cuenta_bancaria.custom(max_id=max_id).fetchall()]\r\n\treturn jsonify({'tasks': tasks })\r\n\r\n@app.route('/api/consulta/4/<monedas>', methods=['GET'])\r\ndef get_precio_monedass(monedas):\r\n\ttasks = [dict(moneda) for moneda in Moneda.maximo_historico(monedas=monedas).fetchall()]\r\n\treturn jsonify({'tasks': tasks })\r\n\r\n@app.route('/api/consulta/5/<monedas>', methods=['GET'])\r\ndef get_precio_circulacion(monedas):\r\n\ttasks = [dict(moneda) for moneda in Moneda.circulacion(monedas=monedas).fetchall()]\r\n\treturn jsonify({'tasks': tasks })\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(debug=True)\r\n" }, { "alpha_fraction": 0.6096256971359253, "alphanum_fraction": 0.6245989203453064, "avg_line_length": 32.66666793823242, "blob_id": "d2f007de346edb2b04cbcbc2de9b8076014d0d94", "content_id": "2d9ef9d22d921644ea7395cecdf1274058fc2d5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 936, "license_type": "no_license", "max_line_length": 100, "num_lines": 27, "path": "/lista.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php include 'db_config.php';?>\r\n<!DOCTYPE html>\r\n<html>\r\n<head>\r\n<title>Informe COVID</title>\r\n<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css\">\r\n<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js\"></script>\r\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js\"></script>\r\n<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js\"></script>\r\n</head>\r\n<body>\r\n<?php\r\n$sql = \"SELECT * FROM Informe\";\r\n$result = pg_query_params($dbconn, $sql, array());\r\nif( pg_num_rows($result) > 0 ) {\r\n while($row = pg_fetch_assoc($result)) {\r\n echo '<br>' . $row[\"pais\"] . \" = \" . $row[\"nro_cont\"] . '<br>';\r\n }\r\n pg_close($dbconn);\r\n} else {\r\n echo \"Hubo un error al solicitar los datos\";\r\n pg_close($dbconn);\r\n}\r\necho '<a href=\"index.php\"> Ingresar más datos </a>';\r\n?>\r\n</body>\r\n</html> " }, { "alpha_fraction": 0.46384039521217346, "alphanum_fraction": 0.4679966866970062, "avg_line_length": 41.03571319580078, "blob_id": "17fa23366b10a174298b06dad65787182b8a70b8", "content_id": "7dd542fddd490fcb379ad944a6034980a7837238", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1206, "license_type": "no_license", "max_line_length": 120, "num_lines": 28, "path": "/sesion/log-in.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php include '../include/header.html'; ?>\r\n<body>\r\n <div class='container-fluid'>\r\n <div class='row justify-content-center mt-5'>\r\n <div class='container-6 shadow-lg rounded m-auto p-5'>\r\n <h1>Bienvenido</h1>\r\n <p>Ingrese sus datos para iniciar sesión.</p>\r\n <form action=\"/sesion/valida_login.php\" method=\"POST\">\r\n <div class=\"form-group\">\r\n <label for=\"email\">Correo Electrónico</label>\r\n <input type=\"email\" class=\"form-control\" placeholder=\"correo@electronico.com\" id=\"email\" name = \"email\">\r\n </div>\r\n <div class=\"form-group\">\r\n <label for=\"pwd\">Contraseña</label>\r\n <input type=\"password\" class=\"form-control\" placeholder=\"Contraseña\" id=\"pwd\" name = \"pwd\">\r\n </div>\r\n <div class='d-flex justify-content-end'>\r\n <button type=\"submit\" class=\"btn btn-primary\">Enviar <i class=\"fas fa-sign-in-alt\"></i></button>\r\n </div>\r\n </form>\r\n </div>\r\n </div>\r\n </div>\r\n\r\n\r\n</body>\r\n\r\n</html>" }, { "alpha_fraction": 0.6041335463523865, "alphanum_fraction": 0.6041335463523865, "avg_line_length": 19.758621215820312, "blob_id": "c4382e7d3adb9cde8bf8f3dfdc0164eb76543826", "content_id": "c7a0b9cebaa7b9ee1447c1c09575c338c1bc147c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 631, "license_type": "no_license", "max_line_length": 88, "num_lines": 29, "path": "/admin/users/CRUD/read.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\ninclude $_SERVER['DOCUMENT_ROOT'].'/db_config.php';\r\n\r\n/* Este archivo debe manejar la lógica de obtener los datos de un determinado usuario */\r\n$id=$_REQUEST[\"id\"];\r\n$sql= \"SELECT * FROM usuario WHERE usuario.id =$id\" ;\r\n$resultado= pg_query_params($dbconn, $sql, array( ));\r\n$row = pg_fetch_assoc($result);\r\nfunction idos($row){\r\n echo $row[\"id\"];\r\n}\r\nfunction nombre($row){\r\n echo $row[\"nombre\"];\r\n}\r\nfunction ap($row){\r\n echo $row[\"apellido\"];\r\n}\r\nfunction corr($row){\r\n echo $row[\"correo\"];\r\n}\r\nfunction cntr($row){\r\n echo $row[\"contraseña\"];\r\n}\r\nfunction pais($row){\r\n echo $row[\"pais\"];\r\n}\r\n\r\n\r\n?>" }, { "alpha_fraction": 0.50629723072052, "alphanum_fraction": 0.516372799873352, "avg_line_length": 37.70000076293945, "blob_id": "a05d64a74ec9b71f26212b47880a373613a88dd3", "content_id": "74049153bdcd3680d3304cb40a6941976e0a0a4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1192, "license_type": "no_license", "max_line_length": 153, "num_lines": 30, "path": "/sesion/valida_signup.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\ninclude \"../db_config.php\";\r\nif ($_SERVER[\"REQUEST_METHOD\"] == \"POST\"){\r\n $name = $_POST[\"name\"];\r\n $lastname = $_POST[\"surname\"];\r\n $email = $_POST[\"email\"];\r\n $pass = $_POST[\"pwd\"];\r\n $pass2 = $_POST[\"pwd2\"];\r\n $country = $_POST[\"country\"];\r\n if ($pass == $pass2){\r\n $sqlemail = \"SELECT * FROM usuario WHERE correo = $1 \";\r\n $resultemail = pg_query_params($dbconn, $sqlemail, array($email));\r\n $row = pg_fetch_row($resultemail);\r\n if(!$row) { \r\n $sqlregistro = \"INSERT INTO usuario (nombre, apellido, correo, contraseña, pais, fecha_registro, tipo) VALUES ($1, $2, $3, $4, $5, $6, $7)\";\r\n $opciones = array(\"cost\" => 12);\r\n $hashpass = password_hash($pass, PASSWORD_BCRYPT, $opciones);\r\n $fecha = date(\"Y-m-d\");\r\n $resultusuario = pg_query_params($dbconn, $sqlregistro, array($name, $lastname, $email, $hashpass, $country, $fecha, \"usuario\" ));\r\n }\r\n pg_close($dbconn);\r\n header('log-in.php');\r\n }\r\n else {\r\n echo \"Hubo un error al solicitar los datos\";\r\n pg_close($dbconn);\r\n }\r\n}\r\nheader('log-in.php');\r\n?>\r\n" }, { "alpha_fraction": 0.5879541039466858, "alphanum_fraction": 0.6185468435287476, "avg_line_length": 47.80952453613281, "blob_id": "06ef79e9ed4365e41d95c410bad36f0f8a80f6ba", "content_id": "ace248e64d42a9fbc3f53edcf41b49fe9129111c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1052, "license_type": "no_license", "max_line_length": 486, "num_lines": 21, "path": "/index.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "\r\n<?php include 'include/navbar.html'; \r\nsession_start();\r\n?>\r\n\r\n<div class='container-fluid'>\r\n <div class=\"row p-3\">\r\n <h1>Página Principal</h1>\r\n </div>\r\n <div class=\"row p-3\">\r\n <div class=\"col\">\r\n <div class=\"container shadow-lg rounded m-auto p-5\">\r\n <p>¡Bienvenido/a a tu página de criptomonedas! </p> \r\n <p>Enterate de todo lo último en cuanto a tus inversiones, compras, ventas y más de tus criptomonedas.</p>\r\n <div><div><link href='http://fonts.googleapis.com/css?family=Raleway|Open+Sans' rel='stylesheet' type='text/css'><center><div style=\"border-left: solid 7px #866491; border-bottom: solid 7px #9947B3; background:url(https://i.imgur.com/Izco9m9.jpg); height: 300px; width: 600px\"><div style=\"font-family: Anger Styles; font-size: 30px; color: #fff; text-align: center; padding-top: 240px; text-transform: uppercase; letter-spacing: 8px;\">Administración</center></div></div>\r\n </div>\r\n </div>\r\n </div>\r\n</div>\r\n</body>\r\n\r\n</html>" }, { "alpha_fraction": 0.5098543167114258, "alphanum_fraction": 0.5175663828849792, "avg_line_length": 33.42424392700195, "blob_id": "133561b31de52b719fc7fa5f269f0b419a3e6388", "content_id": "89457886ba83bf2c4e2598b73e5df7cf12950659", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 87, "num_lines": 33, "path": "/sesion/valida_login.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\ninclude \"../db_config.php\";\r\nsession_start();\r\nif ($_SERVER[\"REQUEST_METHOD\"] == \"POST\"){\r\n $user = $_POST[\"email\"];\r\n $pass = $_POST[\"pwd\"];\r\n $sqlusuario = \"SELECT * FROM usuario WHERE correo = $1 \";\r\n $resultusuario = pg_query_params($dbconn, $sqlusuario, array($user));\r\n $row = pg_fetch_row($resultusuario);\r\n if($row) { \r\n $contrahash = $row[4];\r\n //revisar si la contraseña corresponde con la de la base de datos\r\n if (password_verify ($pass, $contrahash)){\r\n //guardas los datos relevantes del usuario\r\n $_SESSION[\"nombre\"] = $row[1];\r\n $_SESSION[\"apellido\"] = $row[2];\r\n $_SESSION[\"correo\"] = $row[3];\r\n $_SESSION[\"pais\"] = $row[5];\r\n $_SESSION[\"tipo\"] = $row[7];\r\n $_SESSION[\"fecha\"] = $row[6];\r\n $_SESSION[\"id_usuario\"] = $row[0];\r\n header('Location: ../index.php');\r\n }\r\n pg_close($dbconn);\r\n\r\n }\r\n else {\r\n echo \"Hubo un error al solicitar los datos\";\r\n pg_close($dbconn);\r\n }\r\n}\r\nheader('log-in.php'); //No está registrado, por lo que se redirecciona al mainpage.php \r\n?>" }, { "alpha_fraction": 0.5255354046821594, "alphanum_fraction": 0.5288302898406982, "avg_line_length": 31.83333396911621, "blob_id": "7b3c30637c517d04a21f219d7ba66a9d92f04c86", "content_id": "9ebfc5637c9da2bd39ea3f9d90f817c78964b2ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 608, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/respaldo/form.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php include 'db_config.php';\r\n\r\n\r\nif ($_SERVER[\"REQUEST_METHOD\"] == \"POST\") {\r\n $pais = $_POST[\"pais\"];\r\n $nro_cont = $_POST[\"nro_cont\"];\r\n $sql = 'INSERT INTO Informe (pais, nro_cont) VALUES ($1, $2)';\r\n if( pg_query_params($dbconn, $sql, array($pais,$nro_cont)) !== FALSE ) {\r\n echo \"Dato ingresado correctamente <br>\";\r\n echo '<a href=\"lista.php\"> lista de datos </a> <br>';\r\n echo '<a href=\"index.php\"> Ingresar más datos </a> <br>';\r\n pg_close($dbconn);\r\n } else {\r\n echo \"Hubo un error al ingresar el dato\";\r\n pg_close($dbconn);\r\n }\r\n}\r\n?>" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5254237055778503, "avg_line_length": 19.545454025268555, "blob_id": "c9148d7a923121e7bd00e1babb3f73b295b90ebe", "content_id": "6c4fd808a9c297027a3b965090a15d0cac90b85a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 472, "license_type": "no_license", "max_line_length": 80, "num_lines": 22, "path": "/Tarea3/precio_moneda.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php \r\ninclude '../Curl.php';\r\n\r\n$get_data = callAPI('GET', 'http://127.0.0.1:5000/api/v1/precio_moneda', false);\r\n$response = json_decode($get_data,true);\r\nprint_r ($response);\r\n\r\nforeach ($response as $clave=>$product) {\r\n \r\n \r\n foreach($product as $matriz){\r\n \r\n print_r($matriz[\"id\"] );\r\n print_r($matriz[\"valor\"] );\r\n \r\n }\r\n \r\n}\r\n#$errors = $response['response']['errors'];\r\n#$data = $response['response']['data'][0];\r\n\r\n?>" }, { "alpha_fraction": 0.650779128074646, "alphanum_fraction": 0.6663611531257629, "avg_line_length": 38.44444274902344, "blob_id": "7d9f585cd38085b8379cdb51d8ac532e1c4b6e2f", "content_id": "70a2f6d76c678d2af107e285cb72bd7a678b6407", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1098, "license_type": "no_license", "max_line_length": 125, "num_lines": 27, "path": "/respaldo/index.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php include 'db_config.php';?>\r\n<!DOCTYPE html>\r\n<html>\r\n<head>\r\n<title>Informe COVID</title>\r\n<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css\">\r\n<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js\"></script>\r\n<script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js\"></script>\r\n<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js\"></script>\r\n</head>\r\n<body>\r\n\r\n<h1 style=\"color:blue\">Informe COVID-19</h1>\r\n<form action=\"form.php\" method=\"POST\">\r\n <div class=\"form-group\">\r\n <label for=\"país\">País</label>\r\n <input type=\"país\" class=\"form-control\" name=\"pais\" placeholder=\"Ingresa tu país\" id=\"país\">\r\n </div>\r\n <div class=\"form-group\">\r\n <label for=\"nro_cont\">Número de contagiados:</label>\r\n <input type=\"number\" class=\"form-control\" name=\"nro_cont\" placeholder=\"Ingresa el Número de contagiados\" id=\"nro_cont\">\r\n </div>\r\n <button type=\"submit\" class=\"btn btn-primary\">Enviar</button>\r\n</form> \r\n\r\n</body>\r\n</html> " }, { "alpha_fraction": 0.6614173054695129, "alphanum_fraction": 0.6614173054695129, "avg_line_length": 26.44444465637207, "blob_id": "b4392d049e187a861e3f4d845ddaf48d48a017eb", "content_id": "7105d568ea471051d2f86ac0eb16339140a5ac72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 255, "license_type": "no_license", "max_line_length": 104, "num_lines": 9, "path": "/admin/users/CRUD/delete.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\n/* Este archivo debe manejar la lógica de borrar un usuario (y los registros relacionados) como admin */\r\ninclude $_SERVER['DOCUMENT_ROOT'].'/db_config.php';\r\n\r\n$id=$_REQUEST[\"id\"];\r\n$sql = \"DELETE FROM usuario WHERE usuarios.id=$id\";\r\n \r\n\r\n?>" }, { "alpha_fraction": 0.671324610710144, "alphanum_fraction": 0.6794759631156921, "avg_line_length": 36.40909194946289, "blob_id": "9bbb981ba6b9a310ebde09315594bb3d5cd72355", "content_id": "6617f55be7a6e600092a096dc7ce650c63843ba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 3460, "license_type": "no_license", "max_line_length": 248, "num_lines": 88, "path": "/README.txt", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": " Informe Grupo 33\r\n\t\t Tarea 3 \r\nParticipantes:\r\n-Ignacia Delaigue - 201704160-9\r\n\r\n\r\nResumen de lo hecho en la TAREA 3:\r\nEn la carpeta Tarea3 se encuentran varios archivos, explicamos a continuación:\r\n\r\n-config.py se encuentra la configuración predeterminada con la respectiva contraseña, base de datos (su nombre), etc, solo fue reemplazado según lo explicado en ayudantia. \r\n\r\n-main.py vendría siendo nuestro main echo en py apoyado por Flask, aquí es donde se encuentran las api con sus links de consultas, teniendo los metodos GET POST PUT y DELETE por cada tabla. Tambien se encuentran las 3/8 consultas que se me pidieron\r\n\r\n-models.py tenemos el modelo de las tablas de nuestra nueva base de datos, con las respectivas tablas del modelo establecido en la tarea 2 \r\n\r\n-archivos_nombres.php vendria siendo la vista de la tabla en su backend\r\n\r\n-archivos_nombres.html vendria siendo la vista de la tabla en su frontend\r\n\r\n-Se probó en postman y al menos mostraba los get y post\r\n\r\n-los archivos html y php deben unirse al curl, ya que es el vinculo que une la api con nueva base de datos (deberian ser los mismo de la tarea 2)\r\n\r\n\r\n_________________________________________\r\nSupuestos y Consideraciones a lo largo del trabajo de la Tarea 2 para tener en consideración: \r\n\r\n- La tabla \"usuario\" fue reecha al igual que la base de datos entera (por ello se llama posgress y no posgres), esto debido a que \"id\" de \"usuario\" tuviera caracter serial, para que fuera atomatico su uso. \r\n\r\n- Se cambió la cantidad de caracteres en \"contraseña\" debido a que al hacer hashpass, tiraba 60 caracteres. \r\n\r\n-Se agregó una imagen con los datos de los alumnos (nosotros) en la página principal, guardada en la carpeta img. (imagen propia).\r\n\r\n-Se hizo uso de html para darle cierta estetica a la imagen implementada. (codigo propio)\r\n\r\n-Se eliminó un include (por ejemplo, que se encontraba en el navbar) debido a que generaba un bucle de redireccionamiento infinito, este include estaba incluido de primera en los archivos entregados en la tarea.\r\n\r\n-Se modifico la imagen del comienzo por otra. \r\n\r\n-Se modificó varias extensiones de archivo de html a php.\r\n\r\n-Se asume que todos los días se va a ingresar los valores de las monedas, aunque estos no cambien. \r\n_________________________________________\r\n DIFICULTADES TAREA 3\r\n\r\nSe me presentó dificultades en lo que es unir todas las partes de la tarea\r\n\r\n__________________________________________\r\n\t\tEXPLICACIÓN DE LOS ARCHIVOS\r\n\r\nCarpeta CRUD: \r\nSe tienen archivos para la administración de usuarios, con sus roles, monedas y asignaciones. Su función es crear usuarios, eliminar, modificar entre otros. \r\n\r\nCSS: \r\nFuentes\r\n\r\nIMG:\r\nImagenes (se agregó imagen propia)\r\n\r\nINCLUDE: \r\nHeader: Boostrap (diseño)\r\nnavbar: barra de navegacion de la pagina. \r\n\r\nSESION:\r\nComo iniciar sesión\r\nComo registrarse\r\nComo validar iniciar sesión\r\nComo validar desconectarse de la sesión\r\n\r\nUSER:\r\nPerfil del usuario\r\nBilletera del Usuario. \r\n\r\n\r\n\r\n\r\n__________________________________________\r\n TIEMPO EN LA TAREA\r\n\r\nEstimadores (todos medidos en horas[h]):\r\n\r\nIgnacia Delaigue: \r\n\r\nAnalisis del Enunciado: 1 [h]\r\nModifiaciones y Ajustes al Modelo: 5 [h]\r\nDiseño de Plataforma: 5[h]\r\nDesarrollo de Plataforma: 10[h]\r\nPruebas Finales: - [h]\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6437246799468994, "alphanum_fraction": 0.647773265838623, "avg_line_length": 33.57143020629883, "blob_id": "9665da6f648a4b0cb5f62105b208a8f958c567f4", "content_id": "a6561b6206733168ecebeaccc176c494f939958c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 247, "license_type": "no_license", "max_line_length": 72, "num_lines": 7, "path": "/user/profile.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\ninclude \"../db_config.php\";\r\ninclude \"../sesion/valida_sesion.php\";\r\n$pais = \"SELECT nombre FROM pais WHERE cod_pais = $1\"; \r\n$resultpais = pg_query_params($dbconn, $pais, array($_SESSION[\"pais\"]));\r\n$paiss = pg_fetch_row($resultpais);\r\n?>" }, { "alpha_fraction": 0.6326795816421509, "alphanum_fraction": 0.6349809765815735, "avg_line_length": 30.003204345703125, "blob_id": "75b8834f9d37173dd4f6d99507994360fbd8a96e", "content_id": "f9b9016049b084d98640cb9489b96195c71dee63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10006, "license_type": "no_license", "max_line_length": 532, "num_lines": 312, "path": "/Tarea3/models.py", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "from flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy import text\r\nfrom datetime import datetime\r\n\r\n\r\n#Libreria SQL, por eso la inclusión del db. en la columna de los atributos#\r\ndb = SQLAlchemy()\r\n\r\n# Creación de la entidad Usuario (user) #\r\nclass Usuario(db.Model):\r\n\t__tablename__ = 'usuario'\r\n\tid = db.Column(db.Integer, primary_key=True)\r\n\tnombre = db.Column(db.String(50), nullable=False)\r\n\tapellido = db.Column(db.String(50), nullable=False)\r\n\tcorreo = db.Column(db.String(50), nullable=False)\r\n\tcontraseña = db.Column(db.String(50), nullable=False)\r\n\tpais = db.Column(db.Integer,db.ForeignKey('pais.cod_pais'))\r\n\tpais_id = db.relationship(\"Pais\")\r\n\tfecha_registro = db.Column(db.DateTime, default=db.func.current_timestamp())\r\n\tcuentas = db.relationship('Cuenta_bancaria',cascade=\"all,delete\", lazy='dynamic')\r\n\tmonedas_usuario = db.relationship('Usuario_tiene_moneda', cascade=\"all,delete\", lazy='dynamic')\r\n\t\r\n\t@classmethod\r\n\tdef create(cls, nombre, apellido, correo, contraseña, pais):\r\n\t\tusuario = Usuario(nombre=nombre, apellido=apellido, correo=correo, contraseña=contraseña, pais=pais)\r\n #Creamos un nuevo usuario y lo guardamos en la bd, con sus respectivos atributos en un orden especifico#\r\n\t\treturn usuario.save()\r\n\r\n\tdef save(self):\r\n\t\ttry:\r\n\t\t\tdb.session.add(self)\r\n\t\t\tdb.session.commit()\r\n\r\n\t\t\treturn self\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef json(self):\r\n\t\treturn {\r\n\t\t\t'id': self.id,\r\n\t\t\t'nombre': self.nombre,\r\n\t\t\t'apellido':self.apellido,\r\n\t\t\t'correo':self.correo,\r\n\t\t\t'contraseña':self.contraseña,\r\n\t\t\t'pais':self.pais,\r\n\t\t\t'fecha_registro': self.fecha_registro.strftime('%Y-%m-%d %H:%M:%S.%f') #Al usar Datatime este es el formato de la hora día, año, etc#\r\n\t\t}\r\n\tdef update(self):\r\n\t\tself.save()\r\n\r\n\tdef delete(self):\r\n\t\ttry:\r\n\t\t\tdb.session.delete(self)\r\n\t\t\tdb.session.commit()\r\n\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\tdef fechas(fechaini, fechafinal):\r\n\t\ttry:\r\n\t\t\tresultado = db.session.execute('SELECT * FROM usuario WHERE fecha_registro >= :ini and fecha_registro <= :fin', {'ini': fechaini,'fin': fechafinal})\r\n\t\t\treturn resultado\r\n\t\texcept:\r\n\t\t\treturn False\r\n \r\n\tdef Mayor(usuarios):\r\n\t\ttry:\r\n\t\t\tresultado = db.session.execute('SELECT usuario.nombre as \"Nombre\" , usuario.apellido as \"Apellido\", muv2.name_mon as \"Nombre Moneda\", muv2.bal as \"Cantidad\" FROM usuario INNER JOIN (SELECT muvs.nombre as name_mon, usuario_tiene_moneda.balance as bal, usuario_tiene_moneda.id_usuario as usu FROM usuario_tiene_moneda INNER join (SELECT nombre, id FROM moneda) as muvs on muvs.id=usuario_tiene_moneda.id_usuario)as muv2 on muv2.usu=usuario.id WHERE usuario.nombre= :usuarios ORDER BY muv2.bal desc LIMIT 1', {'usuarios': usuarios})\r\n\t\t\treturn resultado\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\r\nclass Pais(db.Model):\r\n\t__tablename__ = 'pais'\r\n\tcod_pais = db.Column(db.Integer, primary_key=True)\r\n\tusuarios = db.relationship('Usuario', cascade=\"all,delete\", lazy='dynamic')\r\n\tnombre = db.Column(db.String(50), nullable=False)\r\n\t\r\n\t\r\n\t@classmethod\r\n\tdef create(cls, nombre):\r\n\t\t# Instanciamos un nuevo usuario y lo guardamos en la bd\r\n\t\tpaises = Pais(nombre=nombre)\r\n\t\treturn paises.save()\r\n\r\n\tdef save(self):\r\n\t\ttry:\r\n\t\t\tdb.session.add(self)\r\n\t\t\tdb.session.commit()\r\n\t\t\treturn self\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef json(self):\r\n\t\treturn {\r\n\t\t\t'cod_pais': self.cod_pais,\r\n\t\t\t'nombre': self.nombre\r\n\t\t}\r\n\tdef update(self):\r\n\t\tself.save()\r\n\r\n\tdef delete(self):\r\n\t\ttry:\r\n\t\t\tdb.session.delete(self)\r\n\t\t\tdb.session.commit()\r\n\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\r\nclass Cuenta_bancaria(db.Model):\r\n\t__tablename__ = 'cuenta bancaria'\r\n\tnumerocuenta = db.Column(db.Integer, primary_key=True)\r\n\tusuarios_id = db.relationship(\"Usuario\")\r\n id_usuario = db.Column(db.Integer,db.ForeignKey('usuario.id'))\r\n\tbalance = db.Column(db.Float, nullable=False)\r\n\t\r\n\t\r\n\t@classmethod\r\n\tdef create(cls, id_usuario, balance):\r\n\t\t# Instanciamos un nuevo usuario y lo guardamos en la bd\r\n\t\tcuenta = Cuenta_bancaria(id_usuario=id_usuario, balance=balance)\r\n\t\treturn cuenta.save()\r\n\t\r\n\tdef custom(max_id):\r\n\t\ttry:\r\n\t\t\tresultado = db.session.execute('SELECT * FROM cuenta_bancaria WHERE balance >= :max', {'max': max_id})\r\n\t\t\treturn resultado\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef save(self):\r\n\t\ttry:\r\n\t\t\tdb.session.add(self)\r\n\t\t\tdb.session.commit()\r\n\t\t\treturn self\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef json(self):\r\n\t\treturn {\r\n\t\t\t'numero_cuenta': self.numerocuenta,\r\n\t\t\t'id_usuario': self.id_usuario,\r\n\t\t\t'balance': self.balance\r\n\t\t}\r\n\tdef update(self):\r\n\t\tself.save()\r\n\r\n\tdef delete(self):\r\n\t\ttry:\r\n\t\t\tdb.session.delete(self)\r\n\t\t\tdb.session.commit()\r\n\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\r\n#Se crea la clase Moneda\r\nclass Moneda(db.Model):\r\n tablename=\"moneda\"\r\n id = db.Column(db.Integer, primary_key=True)\r\n sigla= db.Column(db.String(10),nullable=False)\r\n nombre= db.Column(db.String(80),nullable=False)\r\n precio_monedas= db.relationship('Precio_moneda',cascade=\"all,delete\",backref=\"parent\",lazy='dynamic')\r\n usuario_tiene = db.relationship('Usuario_tiene_moneda',cascade=\"all,delete\",backref=\"parent\",lazy='dynamic')\r\n\r\n @classmethod\r\n def create(cls,sigla,nombre):\r\n #añadimos una nueva moneda y la guardamos en la bd\r\n moneda=Moneda(nombre=nombre,sigla=sigla)\r\n return moneda.save()\r\n\r\n def save(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return self\r\n except:\r\n return False\r\n\r\n def json(self):\r\n return {\r\n 'id': self.id,\r\n 'sigla': self.sigla,\r\n 'nombre': self.nombre\r\n }\r\n\r\n def update(self):\r\n self.save()\r\n\r\n def delete(self):\r\n try:\r\n db.session.delete(self)\r\n db.session.commit()\r\n return True\r\n except:\r\n return False\r\n\r\n def cambio_valor(fechaini,fechafinal):\r\n try:\r\n resultado= db.session.execute('SELECT agrup_id.\"Moneda\", agrup_id.\"Veces que cambio el valor\" FROM (SELECT moneda.nombre as \"Moneda\",COUNT(precio_moneda.valor) as \"Veces que cambió valor\" FROM moneda INNER JOIN precio_moneda on moneda.id=precio_moneda.id WHERE precio_moneda.fecha > :fechaini and precio_moneda.fecha < :fechafinal GROUP BY moneda.id)AS agrup_id ORDER BY agrup_id.\"Veces que cambio el valor\" desc LIMIT 1',{'fechaini':fechaini,'fechafin':fechafinal})\r\n return resultado\r\n except:\r\n return False\r\n\r\n def cambio_moneda(monedas):\r\n try:\r\n resultado = db.session.execute('SELECT nombre,sum(balance)as Cantidad_Total FROM moneda inner join usuario_tiene_moneda on moneda.id=usuario_tiene_moneda.id_moneda WHERE nombre= :moneda GROUP BY nombre',{'moneda':monedas})\r\n return resultado\r\n except:\r\n return False\r\n\t\r\n def maximo3(monedas):\r\n try:\r\n resultado = db.session.execute('SELECT nombre,COUNT(id) as cantidad FROM moneda inner join usuario_tiene_moneda on moneda.id=usuario_tiene_moneda.id_moneda GROUP BY nombre ORDER BY cantidad desc LIMIT 3')\r\n return resultado\r\n except:\r\n return False\r\n\r\n def maximo_historico(monedas):\r\n try:\r\n resultado= db.session.execute('SELECT nombre,max(precio_moneda.valor) as Valor_Maximo FROM moneda inner join precio_moneda on moneda.id=precio_moneda.id WHERE nombre= :moneda GROUP BY nombre',{'moneda':monedas})\r\n return resultado\r\n except:\r\n return False\r\n\r\n\r\n\r\n\r\n#Se crea la clase precio_moneda\r\nclass Precio_moneda(db.Model):\r\n __tablename__=\"precio_moneda\"\r\n id = db.Column(db.Integer, db.ForeignKey('moneda.id'),primary_key=True )\r\n moneda = db.relationship(\"Moneda\")\r\n fecha= db.Column(db.DateTime, default=db.func.current_timestamp(),primary_key=True)\r\n valormon= db.Column(db.Float,nullable=False)\r\n\r\n @classmethod\r\n def create(cls,id,valormon):\r\n #obtenemos un nuevo valor de la moneda y lo guardamos en la bd\r\n precio_moneda=Precio_moneda(id=id,valormon=valormon)\r\n return precio_moneda.save()\r\n\r\n def save(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return self\r\n except:\r\n return False\r\n\r\n def json(self):\r\n return {\r\n 'id': self.id,\r\n 'fecha': self.fecha.strftime('%Y-%m-%d %H:%M:%S.%f'),\r\n 'valor': self.valormon\r\n }\r\n def update(self):\r\n self.save()\r\n\r\n def delete(self):\r\n try:\r\n db.session.delete(self)\r\n db.session.commit()\r\n return True\r\n except:\r\n return False\r\n\t\r\n\r\n#Se crea la clase usuario_tiene_moneda\r\nclass usuario_tiene_moneda(db.Model):\r\n __tablename__ =\"Usuario tiene estas moneda\"\r\n id_usuario = db.Column(db.Integer,db.ForeignKey(\"usuario.id\"),primary_key=True)\r\n user = db.relationship(\"Usuario\",foreign_keys=[id_usuario])\r\n id_moneda = db.Column(db.Integer,db.ForeignKey(\"moneda.id\"),primary_key=True)\r\n moneda = db.relationship(\"Moneda\",foreign_keys=[id_moneda])\r\n balance = db.Column(db.Float, nullable=False)\r\n\r\n @classmethod\r\n def create(cls, id_usuario, id_moneda, balance):\r\n usuario_tiene_moneda = Usuario_tiene_moneda(id_usuario=id_usuario,id_moneda=id_moneda,balance=balance)\r\n #Nuevo valor de la moneda que guardaremos en la bd#\r\n return usuario_tiene_moneda.save()\r\n def save(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n\r\n return self\r\n except:\r\n return False\r\n\r\n def json(self):\r\n return {\r\n 'id_usuario': self.id_usuario,\r\n 'id_moneda': self.id_moneda,\r\n 'balance': self.balance\r\n }\r\n def update(self):\r\n self.save()\r\n\r\n def delete(self):\r\n try:\r\n db.session.delete(self)\r\n db.session.commit()\r\n\r\n return True\r\n except:\r\n return False\r\n\r\n\r\n\r\n\t\t\t" }, { "alpha_fraction": 0.5894632339477539, "alphanum_fraction": 0.596421480178833, "avg_line_length": 46, "blob_id": "967cf8197206300bb8eecbb4faf6f19f732471e4", "content_id": "4276ce8cf1fcd9922488e7a79cddd7b8acc861be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1006, "license_type": "no_license", "max_line_length": 276, "num_lines": 21, "path": "/user/wallet.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "<?php\r\ninclude \"../db_config.php\";\r\ninclude \"../sesion/valida_sesion.php\";\r\nif ( $_SESSION[\"tipo\"] == \"usuario\"){\r\n $coin = array();\r\n //query para obtener todas las monedas de un usuario con el precio y la fecha de hoy\r\n $monedas = \"SELECT precio_moneda.id_moneda, balance, sigla, valor, nombre FROM usuario_tiene_moneda LEFT JOIN moneda ON id_moneda = id LEFT JOIN precio_moneda ON usuario_tiene_moneda.id_moneda = precio_moneda.id_moneda WHERE id_usuario = $1 AND precio_moneda.fecha = $2\";\r\n $resultmoneda = pg_query_params($dbconn, $monedas, array($_SESSION[\"id_usuario\"], date(\"Y-m-d\")));\r\n if(pg_num_rows($resultmoneda) > 0 ) { \r\n while($row = pg_fetch_assoc($resultmoneda)) {\r\n $valor = $row[3];\r\n $nombremoneda = $row[4];\r\n $cantidad = $row[1];\r\n $codigo = $row[2];\r\n $valortotal = $cantidad * $valor;\r\n\r\n $coin->append(array($codigo, $nombremoneda, $cantidad, $valor, $valortotal));\r\n }\r\n }\r\n}\r\n?>" }, { "alpha_fraction": 0.38836103677749634, "alphanum_fraction": 0.39073634147644043, "avg_line_length": 24.3125, "blob_id": "f3eed470e40df180b489a3af068a453b5134323d", "content_id": "53ad17cc59c8e1c69245d1e17bcb25136d804cd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1684, "license_type": "no_license", "max_line_length": 56, "num_lines": 64, "path": "/admin/users/CRUD/all.php", "repo_name": "idelaigue/tarea-bd", "src_encoding": "UTF-8", "text": "\r\n<?php\r\n include $_SERVER['DOCUMENT_ROOT'].'/db_config.php';\r\n $sql = \"SELECT * FROM usuario\";\r\n $result = pg_query_params($dbconn, $sql, array( ));\r\n $otro=pg_query($dbconn, $sql);\r\n \r\n function id($result,$dbconn){\r\n \r\n if( pg_num_rows($result) > 0 ) {\r\n while($row = pg_fetch_assoc($result)) {\r\n echo '<br>' .$row[\"id\"]. '<br>';\r\n \r\n }\r\n \r\n }else {\r\n echo \"Hubo un error al solicitar los datos\";\r\n pg_close($dbconn);\r\n }\r\n }\r\n function name($result,$dbconn){\r\n \r\n if( pg_num_rows($result) > 0 ) {\r\n while($row = pg_fetch_assoc($result)) {\r\n echo '<br>' .$row[\"nombre\"]. '<br>';\r\n \r\n }\r\n \r\n }else {\r\n echo \"Hubo un error al solicitar los datos\";\r\n pg_close($dbconn);\r\n }\r\n }\r\n function apellido($result,$dbconn){\r\n \r\n if( pg_num_rows($result) > 0 ) {\r\n while($row = pg_fetch_assoc($result)) {\r\n echo '<br>' .$row[\"apellido\"]. '<br>';\r\n \r\n }\r\n \r\n }else {\r\n echo \"Hubo un error al solicitar los datos\";\r\n pg_close($dbconn);\r\n }\r\n }\r\n function correo($result,$dbconn){\r\n \r\n if( pg_num_rows($result) > 0 ) {\r\n while($row = pg_fetch_assoc($result)) {\r\n echo '<br>' .$row[\"correo\"]. '<br>';\r\n \r\n }\r\n \r\n }else {\r\n echo \"Hubo un error al solicitar los datos\";\r\n pg_close($dbconn);\r\n }\r\n }\r\n \r\n\r\n\r\n \r\n\r\n?>" } ]
18
capensis/canopsis-doc
https://github.com/capensis/canopsis-doc
d40d92df3b2e113e4c3cd10f759533c86f31dfa6
a3dd24879a966c3ed445c5af667b1b490858c211
27348c1c390d54ef3298e7641e43bf01d44e3dac
refs/heads/master
2021-01-17T09:16:46.089175
2015-03-19T11:22:39
2015-03-19T11:22:39
6,651,856
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6553704738616943, "alphanum_fraction": 0.6703044176101685, "avg_line_length": 37.68888854980469, "blob_id": "b7acce7ddb83c2a904f6245f567d8e5dce267ac3", "content_id": "ba0b2a71085e4e059ed0c148a4e292305d871f2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1766, "license_type": "no_license", "max_line_length": 190, "num_lines": 45, "path": "/sphinx_index/connectors/connectors/interconnexionBDD.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Interconnexion BDD\n==================\n\nCet onglet permet de définir techniquement la méthode d'interconnexion via des mécanismes de sélection de données en base\t\t\n\t\t\nDescription du type de connecteur\tCe type d'interconnexion permet de publier un événement dans le bus AMQP par l'intermédiaire d'un connecteur qui effectue des sélections en base de données.\n\n|run_manager|\n\nRenseignements sur les sources : Fichier Excel / CSV\n-----------------------------------------------------\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"Nom du fichier\",\"Nom du fichier + emplacement\",\n\t\"Format du fichier\",\"Décrire le découpage du fichier\",\n\t\"Mise à disposition\",\"Quelles sont les méthodes de mise à disposition du fichier ? Copie, Partage réseau, FTP, Autre\",\n\n\nRenseignements sur les sources : Base de données\n------------------------------------------------\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"Technologie\",\"S'agit-il d'une base de données de type : Oracle, SQL Server, MySQL\",\n\t\"Version\",,\n\t\"Nom de la base de données\",\"Nom de la base de données/instance\",\n\t\"Port du listener\",\"Fonction du type de base de données\",\n\t\"Compte d'authentification\",,\n\t\"Tables concernées\",,\n\t\"Exemples de requêtes\",\"L'ensemble des requêtes finales est à positionner dans l'onglet 'mapping' pour chaque attribut\",\n\t\"Commentaires\",,\n\nMatrice des flux\n----------------\n.. csv-table::\n :header: \"Source\", \"Destination\", \"Protocole\",\"Ports\",\"Remarques\"\n :widths: 15, 20, 15,15,15\n\n\t\"Connecteur EDC\",\"Application\",\"Listener de BDD\",\"Fonction de la BDD\",\n\t\"Connecteur EDC\",\"Bus Canopsis\",\"AMQP\",\"5672\",\n\n.. |run_manager| image:: ../_static/images/connectors/InterconnecionBDD.png\n" }, { "alpha_fraction": 0.6365384459495544, "alphanum_fraction": 0.6596153974533081, "avg_line_length": 25.66666603088379, "blob_id": "3abdee2396238648cf16a5343ac66ffa48e1769a", "content_id": "2a3b674c57f96fe53b0f7cc37337adef790575a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1055, "license_type": "no_license", "max_line_length": 98, "num_lines": 39, "path": "/sphinx_index/connectors/connectors/description.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Spécifications connecteur Canopsis\n==================================\n \n\nCe document permet de spécifier techniquement les différentes\npossibilités d'interconnexion entre une application et l'outil Canopsis.\n\nPour ce faire, il vous est demandé de saisir certaines informations de\nla manière suivante :\n\n \n\n- Onglet Description\n\n- Onglet Interconnexion pour le type sélectionné\n\n- Onglet Mapping\n\n- Onglet Conclusion\n\n.. csv-table::\n :header: \"Application ciblée\", \"Version\"\n :widths: 15, 15\n\n\t\"\", \"\"\n\n.. csv-table::\n :header: \"Type d'interconnexion possible\", \"Flux\" , \" \", \"Fichiers\", \"\", \"BDD\", \"\",\"\",\"\",\"CLI\"\n :widths: 15, 15, 15, 15, 15, 15, 15, 15, 15, 15\n\n\t\"\", \"AMQP\", \"API\",\"Trap SNMP\", \"Log\", \"Excel/CSV\",\"Oracle\",\"SQL Server\", \"MySql\", \"Send_Event\"\n\t\"Cochez pour selectionner\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"\n\n\nSi le type d'interconnexion possible n'est pas listé, vous devez le\nspécifier.\n\n Une fois ce type renseigné, RDV sur l'onglet correspondant au type\nd'interconnexion pour saisir de plus amples informations\n" }, { "alpha_fraction": 0.7164835333824158, "alphanum_fraction": 0.7296703457832336, "avg_line_length": 31.571428298950195, "blob_id": "7a6cd8d738340b2fe5f9251357d54d48600392ce", "content_id": "668e0e27681ce81142f37559fa0d3876f5753bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 464, "license_type": "no_license", "max_line_length": 124, "num_lines": 14, "path": "/sphinx_index/connectors/connectors/conclusion.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Conclusion\n==========\n\nL'idée de cet onglet est de conclure quant à la faisabilité de l'interconnexion entre l'applicatif ciblé et l'outil Canopsis\n\n.. csv-table::\n :header: \"Questions\", \"Réponses\", \"Commentaires / Justifications\"\n :widths: 15, 20, 15\n\n\t\"Application / Version\",,\n\t\"Interconnectable par un mécanisme standard\",,\n\t\"Interconnectable par un mécanisme via un développement complémentaire\",,\n\t\"Mapping complet ?\",,\n\t\"Non interconnectable\",," }, { "alpha_fraction": 0.6567977666854858, "alphanum_fraction": 0.6635490655899048, "avg_line_length": 41.81962966918945, "blob_id": "f51836cbc5e9108264118654c22d7992914d3bef", "content_id": "59dac8e41f75efb0ca0b334d137e5c64ef8c8e34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 16152, "license_type": "no_license", "max_line_length": 357, "num_lines": 377, "path": "/sphinx_index/howto-rst.html", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "\n\n<!DOCTYPE html>\n<!--[if IE 8]><html class=\"no-js lt-ie9\" lang=\"en\" > <![endif]-->\n<!--[if gt IE 8]><!--> <html class=\"no-js\" lang=\"en\" > <!--<![endif]-->\n<head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n \n <title>Autogenerated documentation &mdash; howto write docstrings 1.0 documentation</title>\n \n\n \n \n\n \n <link href='https://fonts.googleapis.com/css?family=Lato:400,700|Roboto+Slab:400,700|Inconsolata:400,700' rel='stylesheet' type='text/css'>\n\n \n \n\n <script type=\"text/javascript\">\n var DOCUMENTATION_OPTIONS = {\n URL_ROOT:'./',\n VERSION:'1.0',\n COLLAPSE_INDEX:false,\n FILE_SUFFIX:'.html',\n HAS_SOURCE: true\n };\n </script>\n <script type=\"text/javascript\" src=\"_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"_static/doctools.js\"></script>\n\n \n\n \n\n \n \n <link rel=\"stylesheet\" href=\"theme.css\" type=\"text/css\" />\n <script type=\"text/javascript\" src=\"_static/js/theme.js\"></script>\n \n\n \n <link rel=\"top\" title=\"howto write docstrings 1.0 documentation\" href=\"#\"/> \n\n <script src=\"//cdnjs.cloudflare.com/ajax/libs/modernizr/2.6.2/modernizr.min.js\"></script>\n\n</head>\n\n<body class=\"wy-body-for-nav\">\n\n <div class=\"wy-grid-for-nav\">\n\n \n <nav data-toggle=\"wy-nav-shift\" class=\"wy-nav-side\">\n <div class=\"wy-side-nav-search\">\n <a href=\"#\" class=\"icon icon-home\"> howto write docstrings</a>\n <form id =\"rtd-search-form\" class=\"wy-form\" action=\"search.html\" method=\"get\">\n <input type=\"text\" name=\"q\" placeholder=\"Search docs\" />\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n</form>\n </div>\n\n <div class=\"wy-menu wy-menu-vertical\" data-spy=\"affix\">\n \n \n <!-- Local TOC -->\n <div class=\"local-toc\"><ul>\n<li><a class=\"reference internal\" href=\"#\">Autogenerated documentation</a><ul>\n<li><a class=\"reference internal\" href=\"#sphinx-apidoc\">Sphinx-apidoc</a></li>\n<li><a class=\"reference internal\" href=\"#plaintext-docstrings\">Plaintext docstrings</a></li>\n<li><a class=\"reference internal\" href=\"#useful-rst-directives\">Useful rst directives</a></li>\n<li><a class=\"reference internal\" href=\"#useful-sphinx-directives\">Useful sphinx directives</a></li>\n<li><a class=\"reference internal\" href=\"#template\">Template</a></li>\n</ul>\n</li>\n</ul>\n</div>\n \n </div>\n &nbsp;\n </nav>\n\n <section data-toggle=\"wy-nav-shift\" class=\"wy-nav-content-wrap\">\n\n \n <nav class=\"wy-nav-top\">\n <i data-toggle=\"wy-nav-top\" class=\"icon icon-reorder\"></i>\n <a href=\"#\">howto write docstrings</a>\n </nav>\n\n\n \n <div class=\"wy-nav-content\">\n <div class=\"rst-content\">\n <ul class=\"wy-breadcrumbs\">\n <li><a href=\"#\">Docs</a> &raquo;</li>\n <li><a href=\"\">Autogenerated documentation</a></li>\n <li class=\"wy-breadcrumbs-aside\">\n \n <a href=\"_sources/index.txt\" rel=\"nofollow\"> View page source</a>\n \n </li>\n</ul>\n<hr/>\n\n \n <div class=\"section\" id=\"autogenerated-documentation\">\n<h1>Autogenerated documentation<a class=\"headerlink\" href=\"#autogenerated-documentation\" title=\"Permalink to this headline\">¶</a></h1>\n<p>Source : <a class=\"reference external\" href=\"http://sphinx-doc.org/\">http://sphinx-doc.org/</a></p>\n<div class=\"section\" id=\"sphinx-apidoc\">\n<h2>Sphinx-apidoc<a class=\"headerlink\" href=\"#sphinx-apidoc\" title=\"Permalink to this headline\">¶</a></h2>\n<p>Sphinx allows to autodocument code using docstrings. When this docstring\ndocumentation is build, a script (sphinx-apidoc) will check each package\nand each module to write those lines in rst files (a kind of hook) :</p>\n<div class=\"highlight-rest\"><div class=\"highlight\"><pre><span class=\"p\">..</span> <span class=\"ow\">automodule</span><span class=\"p\">::</span> canopsis.common.init\n <span class=\"nc\">:members:</span> <span class=\"nf\"># documents members (classes, methods, attributes)</span>\n <span class=\"nc\">:undoc-members:</span> <span class=\"nf\"># lists names of undocumented members</span>\n <span class=\"nc\">:show-inheritance:</span>\n</pre></div>\n</div>\n<p>which means you don&#8217;t have to manually write this :</p>\n<div class=\"highlight-rest\"><div class=\"highlight\"><pre><span class=\"p\">..</span> <span class=\"ow\">py:class</span><span class=\"p\">::</span> canopsis.common.init\n\n copy/paste docstring\n\n<span class=\"p\"> ..</span> <span class=\"ow\">py:method</span><span class=\"p\">::</span> a_good_method\n\n copy/paste docstring\n\n<span class=\"cp\">...</span>\n</pre></div>\n</div>\n</div>\n<div class=\"section\" id=\"plaintext-docstrings\">\n<h2>Plaintext docstrings<a class=\"headerlink\" href=\"#plaintext-docstrings\" title=\"Permalink to this headline\">¶</a></h2>\n<p>Every single docstring will be part of your documentation. Even if attributes\ndon&#8217;t have docstrings in python, sphinx allows to document them with the\ndirective <tt class=\"docutils literal\"><span class=\"pre\">#:</span></tt>. By the way, there are several choices for attributes :</p>\n<div class=\"highlight-python\"><div class=\"highlight\"><pre><span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\">Module docstring</span>\n<span class=\"sd\">&quot;&quot;&quot;</span>\n\n<span class=\"k\">class</span> <span class=\"nc\">Foo</span><span class=\"p\">:</span>\n <span class=\"sd\">&quot;&quot;&quot;Docstring for class Foo.&quot;&quot;&quot;</span>\n\n <span class=\"c\">#: Doc comment for class attribute Foo.bar.</span>\n <span class=\"c\">#: It can have multiple</span>\n <span class=\"n\">lines</span><span class=\"o\">.</span> <span class=\"n\">bar</span> <span class=\"o\">=</span> <span class=\"mi\">1</span>\n\n <span class=\"n\">flox</span> <span class=\"o\">=</span> <span class=\"mf\">1.5</span> <span class=\"c\">#: Doc comment for Foo.flox. One line only.</span>\n\n <span class=\"n\">baz</span> <span class=\"o\">=</span> <span class=\"mi\">2</span>\n <span class=\"sd\">&quot;&quot;&quot;Docstring for class attribute Foo.baz.&quot;&quot;&quot;</span>\n\n <span class=\"k\">def</span> <span class=\"nf\">__init__</span><span class=\"p\">(</span><span class=\"bp\">self</span><span class=\"p\">):</span>\n <span class=\"c\">#: Doc comment for instance attribute qux.</span>\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">qux</span> <span class=\"o\">=</span> <span class=\"mi\">3</span>\n\n <span class=\"bp\">self</span><span class=\"o\">.</span><span class=\"n\">spam</span> <span class=\"o\">=</span> <span class=\"mi\">4</span> <span class=\"s\">&quot;&quot;&quot;Docstring for instance attribute spam.&quot;&quot;&quot;</span>\n</pre></div>\n</div>\n<div class=\"admonition note\">\n<p class=\"first admonition-title\">Note</p>\n<p class=\"last\">A quote from sphinx documentation : &#8220;If you document decorated\nfunctions or methods, keep in mind that autodoc retrieves its docstrings by\nimporting the module and inspecting the <tt class=\"docutils literal\"><span class=\"pre\">__doc__</span></tt> attribute of the given\nfunction or method. That means that if a decorator replaces the decorated\nfunction with another, it must copy the original <tt class=\"docutils literal\"><span class=\"pre\">__doc__</span></tt> to the new\nfunction.&#8221;</p>\n</div>\n</div>\n<div class=\"section\" id=\"useful-rst-directives\">\n<h2>Useful rst directives<a class=\"headerlink\" href=\"#useful-rst-directives\" title=\"Permalink to this headline\">¶</a></h2>\n<p>Docstrings can be written in plain text or be formatted in rst format. Many\nthings can be done in rst.</p>\n<div class=\"highlight-rst\"><div class=\"highlight\"><pre><span class=\"p\">..</span> <span class=\"ow\">tip</span><span class=\"p\">::</span> You can hightlight parts\n</pre></div>\n</div>\n<div class=\"admonition tip\">\n<p class=\"first admonition-title\">Tip</p>\n<p class=\"last\">You can highlight parts</p>\n</div>\n<p>Plenty other highlighting boxes are available. However, semantics and colors\nare identic in some cases. Only the following should be used :</p>\n<blockquote>\n<div><ul>\n<li><div class=\"first admonition note\">\n<p class=\"first admonition-title\">Note</p>\n<p class=\"last\">note</p>\n</div>\n</li>\n<li><div class=\"first admonition important\">\n<p class=\"first admonition-title\">Important</p>\n<p class=\"last\">important</p>\n</div>\n</li>\n<li><div class=\"first admonition warning\">\n<p class=\"first admonition-title\">Warning</p>\n<p class=\"last\">warning</p>\n</div>\n</li>\n<li><div class=\"first admonition danger\">\n<p class=\"first admonition-title\">Danger</p>\n<p class=\"last\">danger</p>\n</div>\n</li>\n<li><div class=\"first admonition error\">\n<p class=\"first admonition-title\">Error</p>\n<p class=\"last\">error</p>\n</div>\n</li>\n</ul>\n</div></blockquote>\n<p>An exhaustive list of rst directives can be found at :\n<a class=\"reference external\" href=\"http://docutils.sourceforge.net/docs/ref/rst/directives.html\">http://docutils.sourceforge.net/docs/ref/rst/directives.html</a>.</p>\n</div>\n<div class=\"section\" id=\"useful-sphinx-directives\">\n<h2>Useful sphinx directives<a class=\"headerlink\" href=\"#useful-sphinx-directives\" title=\"Permalink to this headline\">¶</a></h2>\n<p>Sphinx enables to use others tags (<tt class=\"docutils literal\"><span class=\"pre\">..</span> <span class=\"pre\">toctree::</span></tt>, glossary support,\netc.). An exhaustive list of the new directives can be found here :\n<a class=\"reference external\" href=\"http://sphinx-doc.org/extdev/nodes.html\">http://sphinx-doc.org/extdev/nodes.html</a>. However, the following seem\ninteressant to use :</p>\n<div class=\"highlight-rst\"><div class=\"highlight\"><pre><span class=\"p\">..</span> <span class=\"ow\">codeauthor</span><span class=\"p\">::</span> jbb jbbraun@capensis.org\n</pre></div>\n</div>\n<p>codeauthor should only be used in module docstrings. Add your name to the\ncodeauthor list if you contributed to the module.</p>\n<div class=\"highlight-rst\"><div class=\"highlight\"><pre><span class=\"p\">..</span> <span class=\"ow\">versionadded</span><span class=\"p\">::</span> 2.0 Doesn&#39;t bug anymore\n</pre></div>\n</div>\n<div class=\"versionadded\">\n<p><span class=\"versionmodified\">New in version 2.0: </span>Doesn&#8217;t bug anymore</p>\n</div>\n<div class=\"highlight-rst\"><div class=\"highlight\"><pre><span class=\"p\">..</span> <span class=\"ow\">data</span><span class=\"p\">::</span> some data\n</pre></div>\n</div>\n<dl class=\"data\">\n<dt>\n<tt class=\"descname\">some data</tt></dt>\n<dd></dd></dl>\n\n<div class=\"highlight-rst\"><div class=\"highlight\"><pre><span class=\"p\">..</span> <span class=\"ow\">seealso</span><span class=\"p\">::</span> See ?\n</pre></div>\n</div>\n<div class=\"admonition seealso\">\n<p class=\"first admonition-title\">See also</p>\n<p class=\"last\">See ?</p>\n</div>\n<p>Documenting methods or functions should be done with the tags bellow. Note\nthat your plaintext comments and those tags _must_ be separated with a blank\nline. Just like the colored boxes, there are equivalences. Only some of them\nshould be used :</p>\n<blockquote>\n<div><ul>\n<li><p class=\"first\"><tt class=\"docutils literal\"><span class=\"pre\">:param</span> <span class=\"pre\">[type]</span> <span class=\"pre\">&lt;name&gt;:</span></tt> : parameter</p>\n</li>\n<li><p class=\"first\"><tt class=\"docutils literal\"><span class=\"pre\">:type</span> <span class=\"pre\">&lt;param_name&gt;:</span></tt> : parameter type, if you need more than just a word</p>\n</li>\n<li><dl class=\"first docutils\">\n<dt><tt class=\"docutils literal\"><span class=\"pre\">:var</span> <span class=\"pre\">&lt;name&gt;:</span></tt> <span class=\"classifier-delimiter\">:</span> <span class=\"classifier\">variable in the function (alternative for commenting</span></dt>\n<dd><p class=\"first last\">attributes)</p>\n</dd>\n</dl>\n</li>\n<li><p class=\"first\"><tt class=\"docutils literal\"><span class=\"pre\">:return:</span></tt> : returned value</p>\n</li>\n<li><p class=\"first\"><tt class=\"docutils literal\"><span class=\"pre\">:rtype:</span></tt> : returned type</p>\n</li>\n<li><p class=\"first\"><tt class=\"docutils literal\"><span class=\"pre\">:raises</span> <span class=\"pre\">&lt;error&gt;:</span></tt> : condition to raise the error</p>\n</li>\n</ul>\n</div></blockquote>\n</div>\n<div class=\"section\" id=\"template\">\n<h2>Template<a class=\"headerlink\" href=\"#template\" title=\"Permalink to this headline\">¶</a></h2>\n<p>This part recaps everything, providing an exemple :</p>\n<div class=\"highlight-python\"><div class=\"highlight\"><pre><span class=\"k\">def</span> <span class=\"nf\">prepare_coffee</span><span class=\"p\">(</span><span class=\"n\">cup</span><span class=\"p\">,</span> <span class=\"n\">volume</span><span class=\"p\">,</span> <span class=\"n\">sugar</span><span class=\"o\">=</span><span class=\"bp\">True</span><span class=\"p\">):</span>\n <span class=\"sd\">&quot;&quot;&quot;</span>\n<span class=\"sd\"> Records a coffee command and returns a filled cup</span>\n\n<span class=\"sd\"> .. data:: Run this routine at least once a day</span>\n\n<span class=\"sd\"> :param cup: name of the cup, or its ID</span>\n<span class=\"sd\"> :type cup: str or int</span>\n<span class=\"sd\"> :param int volume: volume of coffee in cL</span>\n<span class=\"sd\"> :param bool sugar: sweet coffee or not</span>\n<span class=\"sd\"> :var turns: number of times the teaspoon stirred in</span>\n<span class=\"sd\"> :return: the filled cup ID</span>\n<span class=\"sd\"> :rtype: int</span>\n<span class=\"sd\"> :raises TypeError: if the volume is not an int</span>\n<span class=\"sd\"> :raises ValueError: if the volume exceeds the cup volume</span>\n\n<span class=\"sd\"> .. warning:: Cup IDs are not necessary unique</span>\n<span class=\"sd\"> &quot;&quot;&quot;</span>\n</pre></div>\n</div>\n<p>By convention, parameters are given in this order :</p>\n<blockquote>\n<div><ul class=\"simple\">\n<li>plaintext docstring (overall explainations)</li>\n<li>params</li>\n<li>return</li>\n<li>raises</li>\n</ul>\n</div></blockquote>\n<p>Note that the warning is written after parameters because it&#8217;s about\nit. Otherwise, any text should be written before parameters.</p>\n<dl class=\"function\">\n<dt id=\"prepare_coffee\">\n<tt class=\"descname\">prepare_coffee</tt><big>(</big><em>cup</em>, <em>volume</em>, <em>sugar=True</em><big>)</big><a class=\"headerlink\" href=\"#prepare_coffee\" title=\"Permalink to this definition\">¶</a></dt>\n<dd><p>Records a coffee command and returns a filled cup</p>\n<dl class=\"data\">\n<dt>\n<tt class=\"descname\">Run this routine at least once a day</tt></dt>\n<dd></dd></dl>\n\n<table class=\"docutils field-list\" frame=\"void\" rules=\"none\">\n<col class=\"field-name\" />\n<col class=\"field-body\" />\n<tbody valign=\"top\">\n<tr class=\"field-odd field\"><th class=\"field-name\">Parameters:</th><td class=\"field-body\"><ul class=\"first simple\">\n<li><strong>cup</strong> (<em>str or int</em>) &#8211; name of the cup, or its ID</li>\n<li><strong>volume</strong> (<em>int</em>) &#8211; volume of coffee in cL</li>\n<li><strong>sugar</strong> (<em>bool</em>) &#8211; sweet coffee or not</li>\n</ul>\n</td>\n</tr>\n<tr class=\"field-even field\"><th class=\"field-name\">Variables:</th><td class=\"field-body\"><p class=\"first\"><strong>turns</strong> &#8211; number of times the teaspoon stirred in</p>\n</td>\n</tr>\n<tr class=\"field-odd field\"><th class=\"field-name\">Returns:</th><td class=\"field-body\"><p class=\"first\">the filled cup ID</p>\n</td>\n</tr>\n<tr class=\"field-even field\"><th class=\"field-name\">Return type:</th><td class=\"field-body\"><p class=\"first\">int</p>\n</td>\n</tr>\n<tr class=\"field-odd field\"><th class=\"field-name\">Raises:</th><td class=\"field-body\"><ul class=\"first last simple\">\n<li><strong>TypeError</strong> &#8211; if the volume is not an int</li>\n<li><strong>ValueError</strong> &#8211; if the volume exceeds the cup volume</li>\n</ul>\n</td>\n</tr>\n</tbody>\n</table>\n<div class=\"admonition warning\">\n<p class=\"first admonition-title\">Warning</p>\n<p class=\"last\">Cup IDs are not necessary unique</p>\n</div>\n</dd></dl>\n\n</div>\n</div>\n\n\n <footer>\n \n\n <hr/>\n\n <p>\n &copy; Copyright 2014, Jean-Baptiste Braun.\n </p>\n\n <a href=\"https://www.github.com/snide/sphinx_rtd_theme\">Sphinx theme</a> provided by <a href=\"http://readthedocs.org\">Read the Docs</a>\n</footer>\n </div>\n </div>\n\n </section>\n\n </div>\n \n\n</body>\n</html>\n" }, { "alpha_fraction": 0.5400089621543884, "alphanum_fraction": 0.5592311024665833, "avg_line_length": 38.24561309814453, "blob_id": "42a44043df9d09b6fd428874eab1600c3afa944e", "content_id": "c4601938849291f9339c812c51ba9592974b89a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2252, "license_type": "no_license", "max_line_length": 203, "num_lines": 57, "path": "/sphinx_index/connectors/connectors/interconnexionFichiers.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Interconnexion Fichiers\n=======================\n\nCet onglet permet de définir techniquement la méthode d'interconnexion via des Logs et Trap SNMP\nDescription du type de connecteur\tCe type d'interconnexion permet de publier un événement dans le bus AMQP par l'intermédiaire d'un connecteur qui réceptionne des fichiers de logs ainsi que des Trap SNMP\n\nhttp://www.logstash.net\n\nType d'application\tChoisir un type d'application parmi ceux proposés sur le schéma\n\n|run_manager|\n\nRenseignements sur les sources : Logs\n--------------------------------------\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"Nom des fichiers de Log\",\"Liste des Logs + Emplacement FS\",\n\t\"Format des fichiers de Logs\",\"Décrire le format des fichiers de logs à utiliser\",\n\t\"Agent de transmission de fichier\",\"Quel agent peut-être utilisé ? rsyslog, nxlog, snare, autre\",\n\n\nRenseignements sur les sources : Traps SNMP\n-------------------------------------------\n\n.. |I1| replace:: Nom des fichiers MIB\n.. |C1| replace:: MIB contenant les Trap SNMP à intercepter\n\n.. |I2| replace:: Nom des objets de type NOTIFICATION-TYPE ou TRAP-TYPE\n.. |C2| replace:: Dans la MIB, quels sont les objets à intercepter ?\n\n.. |I3| replace:: Version SNMP / Communauté SNMP / Auth SNMP\n.. |C3| replace:: Informations administratives SNMP\n\n+-----------------------+--------------+---------+\n| Item | Commentaires | Valeurs |\n+=======================+==============+=========+\n| |I1| | |C1| | |\n+-----------------------+--------------+---------+\n| |I2| | |C2| | | | |\n+-----------------------+--------------+---------+\n| |I3| | |C3| | |\n+-----------------------+--------------+---------+\n\nMatrice des flux\n----------------\n.. csv-table::\n :header: \"Source\", \"Destination\", \"Protocole\",\"Ports\",\"Remarques\"\n :widths: 15, 20, 15,15,15\n\n\t\"Connecteur Logstash\",\"Bus Canopsis\",\"AMQP\",\"5672\",\n\t\"Connecteur Logstash\",\"Bus Canopsis\",\"HTTP(s)\",\"5672\",\n\t\"Application\",\"Connecteur Logstash\",\"TCP\",\"5140\",\n\t\"Application\",\"Connecteur Logstash\",\"Trap SNMP (UDP)\",\"162\",\n\n.. |run_manager| image:: ../_static/images/connectors/InterconnecionFlux.png\n" }, { "alpha_fraction": 0.5311535000801086, "alphanum_fraction": 0.5332425236701965, "avg_line_length": 32.76993942260742, "blob_id": "d9c07607f9d76a67f276e2fe9241b0c8bccfd034", "content_id": "e873c4023ea0a003c9e3826113f70844f0befd78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11010, "license_type": "no_license", "max_line_length": 78, "num_lines": 326, "path": "/sphinx_index/doc_builder.py", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# --------------------------------\n# Copyright (c) 2014 \"Capensis\" [http://www.capensis.com]\n#\n# This file is part of Canopsis.\n#\n# Canopsis is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Canopsis is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with Canopsis. If not, see <http://www.gnu.org/licenses/>.\n# ---------------------------------\n\nfrom os import chdir, makedirs, remove, rename\nfrom os.path import isdir, isfile, exists, basename\nfrom json import load\nfrom glob import glob\nfrom subprocess import Popen, PIPE\nfrom shutil import rmtree, move, copy\n\nCONF_FILE = 'doc_builder.json'\n\n# constants are json-parsed from CONF_FILE\nSPHINX_INDEX = {}\nAUTO_LIBS = {}\nHTML_INDEX_TITLE = ''\nVERSIONS = {}\nAUTODOC_DIRECTORY = ''\nCONNECTORS = {}\nLOG_FILE = ''\n\n\nclass DocBuilder:\n \"\"\"Contains imperative style methods to build documentation\"\"\"\n\n def __init__(self):\n # Loads conf\n JsonConfigurationLoader()\n\n # Remove previous log in case of rebuilding\n if isfile(LOG_FILE):\n remove(LOG_FILE)\n\n # Make sure we do not overwrite something\n if exists('doc'):\n self.log(\n 'A directory named \\'doc\\' exists : aborting procedure')\n quit()\n else:\n self.log('Creating directory \\'doc\\'')\n makedirs('doc')\n chdir('doc')\n\n def sphinx_index(self):\n \"\"\"Get core documentation and remove parts we don't need\"\"\"\n\n self.git_sc(\n SPHINX_INDEX['dir'],\n SPHINX_INDEX['branch'],\n SPHINX_INDEX['repo']\n )\n self.mvrm(SPHINX_INDEX['dir'], '.')\n remove('doc_builder.py')\n remove('doc_builder.json')\n\n def generate_html_index(self):\n \"\"\"Insert category titles in the pre-existing html index page\"\"\"\n\n with open('index.html', 'r+') as root_index:\n with open('new_index.html', 'w') as new_index:\n for line in root_index:\n # We write index in new_index...\n new_index.write(line)\n if 'toctree-l1 current' in line: # ...and insert titles\n for version in VERSIONS:\n version_name = version['dir'].replace('doc/', '')\n new_index.write(HTML_INDEX_TITLE.format(\n version_name,\n version_name.capitalize()\n ))\n if CONNECTORS:\n new_index.write(HTML_INDEX_TITLE.format(\n 'connectors',\n 'Connectors'\n ))\n rename('new_index.html', 'index.html')\n\n def autodoc(self):\n \"\"\"Generate rst `hooks` for docstrings in *_AUTODOC dirs\"\"\"\n\n for auto_lib in AUTO_LIBS:\n # Autodocumentation is stored in a temp directory\n # /version/_AUTODOC. Since it is possible to have multiple\n # versions of canopsis to autodocument, we check if the\n # version already has its directory.\n if not isdir(auto_lib['version'] + '_AUTODOC'):\n makedirs(auto_lib['version'] + '_AUTODOC')\n\n self.git_sc(\n auto_lib['sources'],\n auto_lib['branch'],\n auto_lib['repo']\n )\n\n for project in glob(auto_lib['packages']):\n if isdir(project): # ignore isolated files\n self.sphinx_apidoc(\n project,\n '{0}_AUTODOC'.format(auto_lib['version'])\n )\n # we don't need sources anymore\n rmtree(auto_lib['sources'].split('/')[-1])\n\n def autodoc_index(self):\n \"\"\"Generate indexes paths for each _AUTODOC dir\"\"\"\n\n for autolib_dir in [d for d in glob('*') if d[-8:] == '_AUTODOC']:\n chdir(autolib_dir)\n auto_index = ('API\\n'\n '===\\n\\n'\n '.. toctree::\\n'\n ' :maxdepth: 1\\n'\n ' :titlesonly:\\n\\n')\n\n for rst in glob('*.rst'): # rst files are in subdirectories\n # modules.rst and canopsis.rst are heavy to read\n if basename(rst) in ['modules.rst', 'canopsis.rst']:\n remove(rst)\n else:\n auto_index += ' {0}\\n'.format(rst.replace('.rst', ''))\n with open('index.rst', 'w') as index_rst:\n index_rst.write(auto_index)\n chdir('..')\n\n def make_versions(self):\n \"\"\"Download, insert autodoc, sphinx-build and rm sources\"\"\"\n\n for version in VERSIONS:\n self.git_sc(\n version['dir'],\n version['branch'],\n version['repo']\n )\n\n final_directory = version['dir'].split('/')[-1]\n autodoc_tmp_dir = final_directory + '_AUTODOC'\n autodoc_dest_dir = final_directory + AUTODOC_DIRECTORY\n if isdir(autodoc_tmp_dir):\n copy(autodoc_tmp_dir + '/index.rst', autodoc_dest_dir)\n remove(autodoc_tmp_dir + '/index.rst')\n self.mvrm(\n autodoc_tmp_dir,\n autodoc_dest_dir\n )\n\n self.sphinx_build(final_directory)\n # Just extract output from _build and rm sources\n self.mvrmbuild(final_directory)\n\n def connectors(self):\n \"\"\"\n Download each connector, format files out of it and build.\n All connectors are one single sphinx-project.\n \"\"\"\n\n chdir('connectors')\n connector_index_list = '' # string to append to the index\n for connector in CONNECTORS:\n self.git_sc(\n connector['dir'],\n connector['branch'],\n connector['repo']\n )\n\n for rst in glob('doc/*.rst'):\n index_entry = rst[4:-4] # doc/connector.rst --> connector\n connector_index_list += ' {0}\\n'.format(index_entry)\n move(rst, '.') # mv doc/connector.rst connector.rst\n\n # images must be in that dir (or will be ignored)\n if exists('doc/img'):\n self.mvrm('doc/img', '_static/images')\n rmtree('doc') # cleaning\n\n with open('index.rst', 'a') as connector_index:\n connector_index.write(connector_index_list)\n\n self.sphinx_build('.')\n self.mvrmbuild('.') # extract build output and rm sources\n\n chdir('..')\n\n def finish(self):\n \"\"\"Just to say bye\"\"\"\n\n self.log('Process completed')\n self.log('Documentation is available in \\'doc\\'')\n\n def log(self, event):\n \"\"\"Prints an event and records a string in a logfile\"\"\"\n\n with open(LOG_FILE, 'a') as log_file:\n log_file.write(event)\n print(event)\n\n def cmd(self, *args):\n \"\"\"Execute a shell command\"\"\"\n\n p = Popen(args, stdout=PIPE)\n return p.communicate()[0]\n\n def mvrm(self, source, destination):\n \"\"\"\n Recurent operation in the process. Equivalent to :\n mv sources/* destination && rm -r sources\n \"\"\"\n\n for path in glob(source + '/*'):\n move(path, destination)\n rmtree(source)\n\n def mvrmbuild(self, source):\n \"\"\"\n Similar to mvrm to extract built doc to the parent dir.\n mv source/_build/* . && rm -r source\n \"\"\"\n\n for element in glob(source + '/*'):\n if element != source + '/_build':\n if isdir(element):\n rmtree(element)\n else:\n remove(element)\n self.mvrm(source + '/_build', source)\n\n def git_sc(self, directory, branch, repo):\n \"\"\"Download a git subdirectory with sparse checkout option\"\"\"\n\n message_str = 'Downloading directory {0} from branch {1} (repo : {2})'\n message = message_str.format(\n directory,\n branch,\n repo\n )\n self.log(message)\n\n # Sparse checkout `directory` will checkout `directory`/subtree\n # and tree/`directory`/subtree. We create a tmp dir to store\n # what we don't want in an rmtree later.\n makedirs('git_tmp')\n chdir('git_tmp')\n\n self.cmd('git', 'init')\n self.cmd('git', 'config', 'core.sparsecheckout', 'true')\n with open('.git/info/sparse-checkout', 'w') as sparse_checkout:\n sparse_checkout.write(directory)\n self.cmd('git', 'remote', 'add', '-f', 'origin',\n 'https://github.com/capensis/{0}.git'.format(repo))\n self.cmd('git', 'pull', 'origin', branch)\n\n # mv my/dir/lastdir lastdir\n final_directory = '../' + directory.split('/')[-1]\n move(directory, final_directory)\n\n chdir('..')\n rmtree('git_tmp')\n\n def sphinx_build(self, source_path):\n \"\"\"sphinx-build -b html source_path source_path/_build\"\"\"\n\n build_path = '{0}/_build'.format(source_path)\n self.log(\n self.cmd('sphinx-build', '-b', 'html', source_path, build_path))\n\n def sphinx_apidoc(self, source_path, doc_path):\n \"\"\"sphinx-apidoc command\"\"\"\n\n if not isdir(source_path):\n makedirs(doc_path)\n return self.cmd('sphinx-apidoc', '-o', doc_path, source_path)\n\n\nclass JsonConfigurationLoader():\n \"\"\"Loads configuration from CONF_FILE (json)\"\"\"\n\n def __init__(self):\n with open(CONF_FILE) as conf_file:\n conf = load(conf_file)\n\n global SPHINX_INDEX\n SPHINX_INDEX = conf['sphinx_index']\n global HTML_INDEX_TITLE\n HTML_INDEX_TITLE = conf['html_index_titles']\n global VERSIONS\n VERSIONS = conf['versions']\n global AUTO_LIBS\n AUTO_LIBS = conf['auto_libs']\n global AUTODOC_DIRECTORY\n AUTODOC_DIRECTORY = conf['autodoc_directory']\n global CONNECTORS\n CONNECTORS = conf['connectors']\n global LOG_FILE\n LOG_FILE = conf['log_file']\n\n\nif __name__ == '__main__':\n doc_builder = DocBuilder()\n\n doc_builder.sphinx_index()\n doc_builder.generate_html_index()\n if AUTO_LIBS:\n doc_builder.autodoc()\n doc_builder.autodoc_index()\n doc_builder.make_versions()\n if CONNECTORS:\n doc_builder.connectors()\n else:\n rmtree('connectors')\n doc_builder.finish()\n\n" }, { "alpha_fraction": 0.6530612111091614, "alphanum_fraction": 0.6530612111091614, "avg_line_length": 11.5, "blob_id": "b3062358c4ac981cff5fabc6ed1a2e37ca79e380", "content_id": "fab0d367851e027fa11838bf8efdb052d30315cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/README.md", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "canopsis-doc\n============\n\nCanopsis Documentation" }, { "alpha_fraction": 0.6516548991203308, "alphanum_fraction": 0.6646771430969238, "avg_line_length": 51.68571472167969, "blob_id": "17355cb8521dc76fdd583032d870f210a4c752d4", "content_id": "d8d6aa930a422479d99b0d8472eea8a60cd765c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1863, "license_type": "no_license", "max_line_length": 184, "num_lines": 35, "path": "/sphinx_index/connectors/connectors/mappingdesattributs.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Mapping des attributs\n=====================\n\nL'idée de cet onglet est de faire correspondre les attributs de l'application que vous souhaitez interconnecter avec les attributs Canopsis\n\nMatrice des flux\n----------------\n.. csv-table::\n :header: \"Attribut Canopsis\",\"Explications\",\"Obligatoire ?\",\"Attribut applicatif correspondant\",\"Exemple concrèt Fichier de log, requête SQL, Appel web service, Format de log, etc.\"\n :widths: 330, 20, 15, 15, 15\n\n\t\"_id\",\"Réservé\",,,\n\t\"event_id\",\"Réservé\",,,\n\t\"connector : Connector type (gelf, nagios, snmp, ...)\",\"Type de connecteur gelf, nagios, snmp, etc.\",\"X\",,\n\t\"connector_name : Connector name (nagios1, nagios2 ...)\",\"Nom du connecteur Valeur libre\",\"X\",,\n\t\"event_type: Event type (check, log, trap, ...)\",\"Type d'événement check, log, trap\",\"X\",,\n\t\"source_type : Source type ('component' or 'resource')\",\"Type de source 'component' ou 'resource'\",\"X\",,\n\t\"component : Component name\",\"Nom du composant\",\"X\",,\n\t\"resource : Ressource name\",\"Nom de la ressource\",\"X si source type = 'resource'\",,\n\t\"timestamp : UNIX seconds timestamp (UTC)\",\"Timestamp au format UNIX Epoch\",,,\n\t\"state : State (0 (Ok), 1 (Warning), 2 (Critical), 3 (Unknown))\",\"Etat\n\t0 -> OK\n\t1 -> WARNING\n\t2 -> CRITICAL\n\t3 -> UNKNOWN\",\"X si event_type = 'check'\"\n\t\t \"state_type : State type (O (Soft), 1 (Hard))\",\"Type d'état\n\t0 -> Soft\n\t1 -> Hard\",,,\n\t\"scheduled : (optional) True if this is a scheduled event\",\"Réservé\",,,\n\t\"last_state_change : (reserved) Last timestamp after state change\",\"Réservé\",,,\n\t\"previous_state : (reserved) Previous state (after change)\",\"Réservé\",,,\n\t\"output : Event message\",\"Message de l'événement\",,,\n\t\"long_output : Event long message\",\"Message Long de l'événement\",,,\n\t\"tags : Event Tags (default: [])\",\"Tags\",,,\n\t\"display_name : The name to display (customization purpose)\",\"Nom sympathique\",,," }, { "alpha_fraction": 0.6605316996574402, "alphanum_fraction": 0.6779140830039978, "avg_line_length": 36.61538314819336, "blob_id": "fb573338c7f696bc9f329192b59627a6a686f2cf", "content_id": "c544d3bc4cbf3c2e5154e713c9196cadbd48df00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 988, "license_type": "no_license", "max_line_length": 154, "num_lines": 26, "path": "/sphinx_index/connectors/connectors/interconnexionCLI.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Interconnexion CLI\n==================\nCet onglet permet de définir techniquement la méthode d'interconnexion via la ligne de commande\t\t\n\nDescription du type de connecteur\tCe type d'interconnexion permet de publier un événement dans le bus AMQP par l'intermédiaire de l'API REST de Canopsis\n\n|run_manager|\n\nRenseignements sur les sources : Fichier Excel / CSV\n----------------------------------------------------\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"Scripts Canopsis\",\"Canopsis met à disposition 2 scripts d'émission d'événements compatibles Windows et Unix\",\"https://github.com/capensis/canopsis/wiki\"\n\n\nMatrice des flux\n----------------\n.. csv-table::\n :header: \"Source\", \"Destination\", \"Protocole\",\"Ports\",\"Remarques\"\n :widths: 15, 20, 15,15,15\n\n\t\"\",\"Application\",\"Webserver Canopsis\",\"HTTP(s)\",\"Prévoir une ouverture de flux par poller nagios\"\n\n.. |run_manager| image:: ../_static/images/connectors/InterconnexionCLI.png\n" }, { "alpha_fraction": 0.6872037649154663, "alphanum_fraction": 0.6919431090354919, "avg_line_length": 15.230769157409668, "blob_id": "52c46884b66d36c00391618679af33bd90f9b0e3", "content_id": "683e63292032bb3caaf91f0b226890175e965b33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 211, "license_type": "no_license", "max_line_length": 25, "num_lines": 13, "path": "/sphinx_index/connectors/connectors/index.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Connectors's guide\n******************\n\n.. toctree::\n :maxdepth: 2\n\n description\n interconnexionFLUX\n interconnexionFichiers\n interconnexionBDD\n interconnexionCLI\n mappingdesattributs\n conclusion\n" }, { "alpha_fraction": 0.6289212107658386, "alphanum_fraction": 0.6511093974113464, "avg_line_length": 35.81690216064453, "blob_id": "c3c49b1310fe9e736d47f3d3d5e45e482421a483", "content_id": "4e3089e0a335ebfb1c1668b039ba3878f274f94e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2641, "license_type": "no_license", "max_line_length": 171, "num_lines": 71, "path": "/sphinx_index/connectors/connectors/interconnexionFLUX.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Interconnexion FLUX\n===================\nCet onglet permet de définir techniquement la méthode d'interconnexion via des mécanismes de flux\n\n\n\t\t\nDescription du type de connecteur\tCe type d'interconnexion permet de publier un événement dans le bus AMQP par l'intermédiaire d'un lien direct ou via une API applicative\t\n\t\t\n\t\t\nType d'application\tChoisir un type d'application parmi les 4 présents sur le schéma\n\n|run_manager|\n\n*Renseignements sur les sources : Application qui appelle un WS*\n----------------------------------------------------------------\n\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"API Canopsis\", \"Documentation de l'API Canopsis - Authentification - Publication d'un événement\", \"https://github.com/capensis/canopsis/wiki/API-Web\"\n\n*Renseignements sur les sources : Application qui propose un WS*\n----------------------------------------------------------------\n\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"API Applicative\",\"Documentation de l'API nécessaire\",\"\"\n\t\"Méthode d'authentification\",\"Décrire le process ainsi que les requêtes d'authentification\",\"\"\n\t\"Méthode de sélection des événements\",\"Décrire le process ainsi que les requêtes de sélection des événements sur l'API\", \"\"\n\n\n*Renseignements sur les sources : Application Custom*\n-----------------------------------------------------\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"Bindings dans différents langages\",\"Canopsis propose des échantillons de code pour publier des événements dans Canopsis dans les langages suivants :\n\tPHP\n\tPerl\n\tPython\n\t\",\"https://github.com/capensis/canopsis/wiki/Send-Event-with-PHP \n\t\thttps://github.com/capensis/canopsis/wiki/Send-Event-with-Perl \n\t\thttps://github.com/capensis/canopsis/wiki/Send-Event-with-Python\"\n\n*Renseignements sur les sources : Superviseurs Nagios/Shinken*\n--------------------------------------------------------------\n.. csv-table::\n :header: \"Item\", \"Commentaires\", \"Valeurs\"\n :widths: 15, 20, 15\n\n\t\"Superviseur\",\"S'agit-il de Nagios ou Shinken ?\",\n\t\"Version superviseur\",,\n\n\nMatrice des flux\n----------------\n.. csv-table::\n :header: \"Source\", \"Destination\", \"Protocole\",\"Ports\",\"Remarques\"\n :widths: 15, 20, 15,15,15\n\n\t\"Application\",\"Webserver Canopsis\",\"HTTP(s)\",\"80,443\",\n\t\"Application\",\"Bus Canopsis\",\"AMQP\",\"5672\",\n\t\"Connecteur\",\"Application\",\"HTTP(s)\",\"80,443\",\n\t\"Connecteur\",\"Webserver Canopsis\",\"HTTP(s)\",\"80,443\"\n\t\"Connecteur\",\"Bus Canopsis\",\"AMQP\",\"5672\",\n\n.. |run_manager| image:: ../_static/images/connectors/InterconnecionFlux.png\n" }, { "alpha_fraction": 0.5479452013969421, "alphanum_fraction": 0.5616438388824463, "avg_line_length": 9.428571701049805, "blob_id": "eaa0a29e3bec21a8732c5443df1b87eb8934e6de", "content_id": "cf2ac82ddbeca79784b5d40770d9815cb19a94d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 73, "license_type": "no_license", "max_line_length": 19, "num_lines": 7, "path": "/sphinx_index/connectors/index.rst", "repo_name": "capensis/canopsis-doc", "src_encoding": "UTF-8", "text": "Connectors\n**********\n\n.. toctree::\n :maxdepth: 2\n\n connectors/index\n" } ]
12
TJRobson/MIT6.0001
https://github.com/TJRobson/MIT6.0001
305dfd2c2c2dad07c38f6e0e570cf5fb2be42704
fc816c79b81c5ec28cfe9cb5cc66b876a1b2e1f0
91ccdab285b1f4ff8f1a6f279d68c3406639186b
refs/heads/master
2020-12-30T13:08:19.270009
2017-06-21T06:40:44
2017-06-21T06:40:44
91,328,101
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6553966403007507, "alphanum_fraction": 0.672301709651947, "avg_line_length": 33.95454406738281, "blob_id": "99b9baf7a101cee6757321be37efdae6e43e0d30", "content_id": "367eb41b6cf0e32754d245efaa2e14be00190c0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 769, "license_type": "no_license", "max_line_length": 87, "num_lines": 22, "path": "/problem_set_1/ps1a.py", "repo_name": "TJRobson/MIT6.0001", "src_encoding": "UTF-8", "text": "\nannual_salary = int(input('Enter your annual salary:' ))\nportion_saved = float(input('Enter the percent of your salary to save, as a decimal:'))\ntotal_cost = int(input('Enter the cost of your dream home:'))\n\ndef monthCalc(annual, portion, total) :\n current_savings = 0\n monthly_portion = (annual/12)*portion\n portion_down_payment = total*0.25\n number_of_months = 0\n \n while current_savings < portion_down_payment:\n savings_return = (current_savings*0.04)/12\n this_month = savings_return + monthly_portion\n current_savings += int(this_month)\n number_of_months += 1\n \n return number_of_months\n \n\ntotal_months = monthCalc(annual_salary, portion_saved, total_cost)\n\nprint('Number of months: ' + str(total_months))" }, { "alpha_fraction": 0.5529910326004028, "alphanum_fraction": 0.5549980401992798, "avg_line_length": 34.80239486694336, "blob_id": "dd537d02b136e035fc27f837042380d4d98add53", "content_id": "6230fc333ba3f39c5b74a799d6f8e8c4a94df6ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17937, "license_type": "no_license", "max_line_length": 100, "num_lines": 501, "path": "/problem_set_2/hangman.py", "repo_name": "TJRobson/MIT6.0001", "src_encoding": "UTF-8", "text": "# Problem Set 2, hangman.py\n# Name: \n# Collaborators:\n# Time spent:\n\n# Hangman Game\n# -----------------------------------\n# Helper code\n# You don't need to understand this helper code,\n# but you will have to know how to use the functions\n# (so be sure to read the docstrings!)\nimport random\nimport string\nimport re\n\nWORDLIST_FILENAME = \"words.txt\"\n\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\n\n\ndef choose_word(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n \n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n# end of helper code\n\n# -----------------------------------\n\n# Load the list of words into the variable wordlist\n# so that it can be accessed from anywhere in the program\nwordlist = load_words()\n\ndef get_unique_letters(secret_word) :\n sw = list(secret_word) \n seen = {}\n sw[:] = [seen.setdefault(l, l) for l in sw if l not in seen] \n return sw\n\n\ndef is_word_guessed(secret_word, letters_guessed):\n '''\n secret_word: string, the word the user is guessing; assumes all letters are\n lowercase\n letters_guessed: list (of letters), which letters have been guessed so far;\n assumes that all letters are lowercase\n returns: boolean, True if all the letters of secret_word are in letters_guessed;\n False otherwise\n '''\n sw = get_unique_letters(secret_word)\n all_letters_guessed = False\n \n for letter in sw:\n if len(letters_guessed) < len(sw):\n break\n for i, guess in enumerate(letters_guessed):\n if guess == letter:\n all_letters_guessed = True\n break\n elif (len(letters_guessed)-1) == i:\n return False\n else:\n continue\n\n return all_letters_guessed \n\n\ndef get_guessed_word(secret_word, letters_guessed):\n '''\n secret_word: string, the word the user is guessing\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string, comprised of letters, underscores (_), and spaces that represents\n which letters in secret_word have been guessed so far.\n '''\n secret_word_list = list(secret_word)\n guessed_string = ''\n \n for letter in secret_word_list:\n for i, guess in enumerate(letters_guessed):\n if letter == guess:\n guessed_string += guess\n break\n elif (len(letters_guessed)-1) == i:\n guessed_string += '_ '\n else:\n continue\n return guessed_string\n\n\ndef get_available_letters(letters_guessed):\n '''\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string (of letters), comprised of letters that represents which letters have not\n yet been guessed.\n '''\n available_letters = ''\n alphabet = string.ascii_lowercase\n alphabet_list = list(alphabet)\n \n for letter in letters_guessed:\n if letter in alphabet_list:\n alphabet_list.remove(letter)\n else:\n continue\n available_letters = ''.join(alphabet_list)\n return available_letters\n \n\ndef hangman(secret_word):\n '''\n secret_word: string, the secret word to guess.\n \n Starts up an interactive game of Hangman.\n \n * At the start of the game, let the user know how many \n letters the secret_word contains and how many guesses s/he starts with.\n \n * The user should start with 6 guesses\n\n * Before each round, you should display to the user how many guesses\n s/he has left and the letters that the user has not yet guessed.\n \n * Ask the user to supply one guess per round. Remember to make\n sure that the user puts in a letter!\n \n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the \n partially guessed word so far.\n \n Follows the other limitations detailed in the problem write-up.\n ''' \n def is_game_over(is_word_guessed, guesses) :\n is_over = False\n if any([is_word_guessed, guesses <= 1]):\n is_over = True\n return is_over\n \n def is_vowel(guess) :\n vowels= ['a','e','i','o','u']\n return guess in vowels\n \n def has_been_guessed(guess, guess_list) :\n guess_set = set(guess_list)\n return guess in guess_set\n \n def is_guess_in_word(guess, secret_word) :\n return guess in secret_word\n \n def guesses_cap(guesses, num) :\n if guesses + num >= 6:\n guesses = 6\n else:\n guesses += num\n return guesses\n \n def warning_check(warnings, guesses) :\n if warnings > 0:\n warnings -= 1\n else:\n warnings -= 1\n guesses -= 1\n return warnings, guesses\n \n def get_score(gueses, secret_word) :\n unique_letters = get_unique_letters(secret_word)\n score = len(unique_letters)*guesses\n return score\n \n def end_of_game(secret_word, guesses, word_comp) :\n if word_comp:\n total_score = get_score(guesses, secret_word)\n print('Congratulations, you won!\\n' + \\\n 'our total score for this game is: %d'%(total_score))\n else:\n print('Sorry, you ran out of guesses. The word was %s'%(secret_word))\n \n guesses, warnings, guessed_letters = 6, 3, []\n length = len(secret_word)\n break_line = '\\n-------------'\n \n print('Welcome to the game Hangman!\\n' \\\n 'I am thinking of a word that is %d letters long.\\n' \\\n 'You have %d warnings left.'%(length,warnings)+break_line)\n \n word_comp = is_word_guessed(secret_word, guessed_letters)\n game_is_over = is_game_over(word_comp, guesses)\n \n while not game_is_over:\n \n available_letters = get_available_letters(guessed_letters)\n word_string = get_guessed_word(secret_word, guessed_letters)\n print('You have %d guesses left.\\nAvailable letters: %s'%(guesses, available_letters))\n guess = str.lower(input('Please guess a letter: '))\n \n if str.isalpha(guess):\n if has_been_guessed(guess, guessed_letters):\n warnings, guesses = warning_check(warnings, guesses)\n game_is_over = is_game_over(word_comp, guesses)\n if game_is_over:\n break\n else:\n if warnings >= 0:\n print(\"Oops! You've already guessed that letter.\" \\\n \"You have %d warnings left\"%(warnings)+'\\n'+word_string+break_line)\n else:\n print(\"Oops! You've already guessed that letter.\" \\\n \"You have no warnings left\\nso you lose one guess: \"+word_string+break_line)\n else:\n guessed_letters.append(guess)\n word_comp = is_word_guessed(secret_word, guessed_letters)\n word_string = get_guessed_word(secret_word, guessed_letters)\n game_is_over = is_game_over(word_comp, guesses)\n \n if is_vowel(guess): \n if is_guess_in_word(guess, secret_word):\n guesses = guesses_cap(guesses, 2)\n if game_is_over:\n break\n else:\n print('Good guess: '+ word_string + break_line)\n else:\n guesses -= 2\n print('Oops! That letter is not in my word. '+ word_string + break_line)\n else:\n if is_guess_in_word(guess, secret_word):\n if game_is_over:\n break\n else:\n print('Good guess: '+ word_string + break_line)\n else:\n guesses -= 1\n print('Oops! That letter is not in my word. '+ word_string + break_line)\n else:\n warnings, guesses = warning_check(warnings, guesses)\n game_is_over = is_game_over(word_comp, guesses)\n if game_is_over:\n break\n else:\n if warnings >= 0:\n print(\"Oops! That is not a valid letter.\" \\\n 'You have %d warnings left'%(warnings)+'\\n'+word_string+break_line)\n else:\n print(\"Oops! That is not a valid letter.\" \\\n \"You have no warnings left\\nso you lose one guess: \"+word_string+break_line)\n \n end_of_game(secret_word,guesses,word_comp)\n \n\n# When you've completed your hangman function, scroll down to the bottom\n# of the file and uncomment the first two lines to test\n#(hint: you might want to pick your own\n# secret_word while you're doing your own testing)\n\n\n# -----------------------------------\n\n\n\ndef match_with_gaps(my_word, other_word):\n '''\n my_word: string with _ characters, current guess of secret word\n other_word: string, regular English word\n returns: boolean, True if all the actual letters of my_word match the \n corresponding letters of other_word, or the letter is the special symbol\n _ , and my_word and other_word are of the same length;\n False otherwise: \n '''\n my_word = re.sub(' ', '', my_word)\n mw_list, ow_list = list(my_word), list(other_word)\n word_match = False\n for i, letter in enumerate(mw_list):\n other_letter = ow_list[i]\n if len(mw_list) != len(ow_list):\n break\n else:\n if letter == '_':\n if other_letter in mw_list:\n word_match = False\n break\n else:\n word_match = True\n elif letter == other_letter:\n word_match = True\n else:\n word_match = False\n break\n return word_match \n \n\ndef show_possible_matches(my_word):\n '''\n my_word: string with _ characters, current guess of secret word\n returns: nothing, but should print out every word in wordlist that matches my_word\n Keep in mind that in hangman when a letter is guessed, all the positions\n at which that letter occurs in the secret word are revealed.\n Therefore, the hidden letter(_ ) cannot be one of the letters in the word\n that has already been revealed.\n\n '''\n word_list = []\n for word in wordlist:\n if match_with_gaps(my_word, word):\n word_list.append(word)\n length = len(word_list)\n if length == 0:\n print('No matches found')\n else:\n word_string = ' '.join(word_list)\n print(word_string)\n\n\ndef hangman_with_hints(secret_word):\n '''\n secret_word: string, the secret word to guess.\n \n Starts up an interactive game of Hangman.\n \n * At the start of the game, let the user know how many \n letters the secret_word contains and how many guesses s/he starts with.\n \n * The user should start with 6 guesses\n \n * Before each round, you should display to the user how many guesses\n s/he has left and the letters that the user has not yet guessed.\n \n * Ask the user to supply one guess per round. Make sure to check that the user guesses a letter\n \n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the \n partially guessed word so far.\n \n * If the guess is the symbol *, print out all words in wordlist that\n matches the current guessed word. \n \n Follows the other limitations detailed in the problem write-up.\n '''\n def is_game_over(is_word_guessed, guesses) :\n is_over = False\n if any([is_word_guessed, guesses <= 1]):\n is_over = True\n return is_over\n \n def is_vowel(guess) :\n vowels= ['a','e','i','o','u']\n return guess in vowels\n \n def has_been_guessed(guess, guess_list) :\n guess_set = set(guess_list)\n return guess in guess_set\n \n def is_guess_in_word(guess, secret_word) :\n return guess in secret_word\n \n def guesses_cap(guesses, num) :\n if guesses + num >= 6:\n guesses = 6\n else:\n guesses += num\n return guesses\n \n def warning_check(warnings, guesses) :\n if warnings > 0:\n warnings -= 1\n else:\n warnings -= 1\n guesses -= 1\n return warnings, guesses\n \n def get_score(gueses, secret_word) :\n unique_letters = get_unique_letters(secret_word)\n score = len(unique_letters)*guesses\n return score\n \n def end_of_game(secret_word, guesses, word_comp) :\n if word_comp:\n total_score = get_score(guesses, secret_word)\n print('Congratulations, you won!\\n' + \\\n 'our total score for this game is: %d'%(total_score))\n else:\n print('Sorry, you ran out of guesses. The word was %s'%(secret_word))\n \n guesses, warnings, guessed_letters = 6, 3, []\n length = len(secret_word)\n break_line = '\\n-------------'\n \n print('Welcome to the game Hangman!\\n' \\\n 'I am thinking of a word that is %d letters long.\\n' \\\n 'You have %d warnings left.'%(length,warnings)+break_line)\n \n word_comp = is_word_guessed(secret_word, guessed_letters)\n game_is_over = is_game_over(word_comp, guesses)\n \n while not game_is_over:\n \n available_letters = get_available_letters(guessed_letters)\n word_string = get_guessed_word(secret_word, guessed_letters)\n print('You have %d guesses left.\\nAvailable letters: %s'%(guesses, available_letters))\n guess = str.lower(input('Please guess a letter: '))\n \n if str.isalpha(guess):\n if has_been_guessed(guess, guessed_letters):\n warnings, guesses = warning_check(warnings, guesses)\n game_is_over = is_game_over(word_comp, guesses)\n if game_is_over:\n break\n else:\n if warnings >= 0:\n print(\"Oops! You've already guessed that letter.\" \\\n \"You have %d warnings left\"%(warnings)+'\\n'+word_string+break_line)\n else:\n print(\"Oops! You've already guessed that letter.\" \\\n \"You have no warnings left\\nso you lose one guess: \"+word_string+break_line)\n else:\n guessed_letters.append(guess)\n word_comp = is_word_guessed(secret_word, guessed_letters)\n word_string = get_guessed_word(secret_word, guessed_letters)\n game_is_over = is_game_over(word_comp, guesses)\n \n if is_vowel(guess): \n if is_guess_in_word(guess, secret_word):\n guesses = guesses_cap(guesses, 2)\n if game_is_over:\n break\n else:\n print('Good guess: '+ word_string + break_line)\n else:\n guesses -= 2\n print('Oops! That letter is not in my word. '+ word_string + break_line)\n else:\n if is_guess_in_word(guess, secret_word):\n if game_is_over:\n break\n else:\n print('Good guess: '+ word_string + break_line)\n else:\n guesses -= 1\n print('Oops! That letter is not in my word. '+ word_string + break_line)\n else:\n if guess == '*':\n show_possible_matches(word_string) \n \n else:\n warnings, guesses = warning_check(warnings, guesses)\n game_is_over = is_game_over(word_comp, guesses)\n if game_is_over:\n break\n else:\n if warnings >= 0:\n print(\"Oops! That is not a valid letter.\" \\\n 'You have %d warnings left'%(warnings)+'\\n'+word_string+break_line)\n else:\n print(\"Oops! That is not a valid letter.\" \\\n \"You have no warnings left\\nso you lose one guess: \"+word_string+break_line)\n \n end_of_game(secret_word,guesses,word_comp)\n\n# When you've completed your hangman_with_hint function, comment the two similar\n# lines above that were used to run the hangman function, and then uncomment\n# these two lines and run this file to test!\n# Hint: You might want to pick your own secret_word while you're testing.\n\n\nif __name__ == \"__main__\":\n # pass\n\n # To test part 2, comment out the pass line above and\n # uncomment the following two lines.\n secret_word = choose_word(wordlist)\n #hangman(secret_word)\n\n###############\n \n # To test part 3 re-comment out the above lines and \n # uncomment the following two lines. \n \n #secret_word = choose_word(wordlist)\n hangman_with_hints(secret_word)\n" }, { "alpha_fraction": 0.564350426197052, "alphanum_fraction": 0.5963746309280396, "avg_line_length": 27.98245620727539, "blob_id": "8fed42eed806076afa08b012dde4fb016916b8c0", "content_id": "e4207988f21d1a4c5a45f1cf36d095cf27d9c578", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1655, "license_type": "no_license", "max_line_length": 87, "num_lines": 57, "path": "/problem_set_1/ps1c.py", "repo_name": "TJRobson/MIT6.0001", "src_encoding": "UTF-8", "text": "\nannual_salary = int(input('Enter your annual salary:' ))\ntotal_cost = 1000000*0.25\n\ndef getHigh(sal, tot) :\n hi = (tot/36)/(sal/12)\n return hi\n\ndef floatRound(ans) :\n ans = int(ans*10000)\n ans = float(ans)/10000\n return ans\n\ndef testRate(ans, sal) :\n savings = 0\n increse = 1.07\n changingSal = sal\n for i in range(36):\n month = (changingSal/12)*ans\n savingsReturn = (savings*0.04)/12\n monthPortion = month + savingsReturn\n savings += monthPortion\n if i >= 6 and i % 6 == 0:\n changingSal *= increse\n return savings\n\ndef salPercent(total, salary) :\n steps = 0\n low, high = 0.0, getHigh(salary, total_cost)\n ans = (high + low)/2.0\n current_savings = testRate(ans, salary)\n epsilon = 100\n lowEps, hiEps = total - epsilon, total + epsilon\n\n while lowEps >= current_savings or current_savings >= hiEps:\n if current_savings < total:\n low = ans\n if ans > 1:\n break\n else:\n high = ans\n ans = floatRound((high + low)/2.0)\n current_savings = testRate(ans, salary)\n steps += 1\n return steps, ans\n \ndef printFunc(total, salary, func) :\n steps, rate = func(total, salary) \n if rate > 1:\n print('It is not possible to pay the down payment in three years.')\n else:\n print('Best savings rate: %g \\nSteps in bisection search: %d' % (rate, steps) )\n \nprintFunc(total_cost, annual_salary, salPercent)\n\n#noSteps, bestRate = salPercent(total_cost, annual_salary)\n#print('Best savings rate: ' + str(bestRate))\n#print('Steps in bisection search: ' + str(noSteps))\n\n\n" }, { "alpha_fraction": 0.6298157572746277, "alphanum_fraction": 0.6448911428451538, "avg_line_length": 35.15151596069336, "blob_id": "ae588c290f2397e54b4f4eca086df78a64449d24", "content_id": "33ff817bf2a25a3626cc6c27a18ed051971c4bf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1194, "license_type": "no_license", "max_line_length": 87, "num_lines": 33, "path": "/problem_set_1/ps1b.py", "repo_name": "TJRobson/MIT6.0001", "src_encoding": "UTF-8", "text": "\nannual_salary = int(input('Enter your annual salary:' ))\nportion_saved = float(input('Enter the percent of your salary to save, as a decimal:'))\ntotal_cost = int(input('Enter the cost of your dream home:'))\nsemi_annual_raise = float(input('Enter the semiannual raise, as a decimal:'))\n\ndef monthCalc(annual, portion, total, raises) :\n \n current_savings = 0\n portion_down_payment = total*0.25\n number_of_months = 0\n \n changing_annual_salary = annual\n salary_increase = raises + 1.0\n\n while current_savings < portion_down_payment:\n \n monthly_portion = (changing_annual_salary/12)*portion \n savings_return = (current_savings*0.04)/12\n this_month = savings_return + monthly_portion\n current_savings += this_month\n \n if number_of_months >= 6 and number_of_months % 6 == 0:\n changing_annual_salary = changing_annual_salary*salary_increase\n \n #print(changing_annual_salary)\n number_of_months += 1\n \n return number_of_months\n \n\ntotal_months = monthCalc(annual_salary, portion_saved, total_cost, semi_annual_raise)\n\nprint('Number of months: ' + str(total_months))\n" }, { "alpha_fraction": 0.5567010045051575, "alphanum_fraction": 0.5704467296600342, "avg_line_length": 21.384614944458008, "blob_id": "a227c6dbef4dcc3e219a41fe2715c6fc4184097e", "content_id": "2a61d1295746f3ec137ebb06c7b01d73cfebbed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 291, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/problem_set_1/ps0.py", "repo_name": "TJRobson/MIT6.0001", "src_encoding": "UTF-8", "text": "import numpy\n\nx = int(input('Enter a number x:'))\ny = int(input('Enter a number y:'))\n\ndef powerOf(x, y) :\n return x**y\n \npower = powerOf(x, y)\nlog2 = numpy.log2(power)\n\nprint(str(x) + ' to the power of ' + str(y) + ': ' + str(power))\nprint('log2 of ' + str(power) + ': ' + str(log2))\n" } ]
5
SanderA/OCM_Examples
https://github.com/SanderA/OCM_Examples
f0b09265a17ddc68acb4588e4e329dadb493aa25
556b92a65bcb9221ad10ee691821833d888669ad
e5e39f9903a83be77bbf2a0656559f1e5b536e95
refs/heads/master
2020-05-17T14:51:20.706558
2014-02-10T16:01:03
2014-02-10T16:01:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7802378535270691, "alphanum_fraction": 0.7913083434104919, "avg_line_length": 37.04878234863281, "blob_id": "d3467b4a99b46a5678316c7b574ee2351c975a23", "content_id": "0983fb2397ea54349adfb0ad59117ae534b0f9b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10930, "license_type": "no_license", "max_line_length": 148, "num_lines": 287, "path": "/DiffusionNeumann/Python/DiffusionNeumannExample.py", "repo_name": "SanderA/OCM_Examples", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n#> \\file\n#> \\author Sander Arens\n#> \\brief This is an example script to solve a Laplace problem using openCMISS calls in python.\n#>\n#> \\section LICENSE\n#>\n#> Version: MPL 1.1/GPL 2.0/LGPL 2.1\n#>\n#> The contents of this file are subject to the Mozilla Public License\n#> Version 1.1 (the \"License\"); you may not use this file except in\n#> compliance with the License. You may obtain a copy of the License at\n#> http://www.mozilla.org/MPL/\n#>\n#> Software distributed under the License is distributed on an \"AS IS\"\n#> basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the\n#> License for the specific language governing rights and limitations\n#> under the License.\n#>\n#> The Original Code is openCMISS\n#>\n#> The Initial Developer of the Original Code is University of Auckland,\n#> Auckland, New Zealand and University of Oxford, Oxford, United\n#> Kingdom. Portions created by the University of Auckland and University\n#> of Oxford are Copyright (C) 2007 by the University of Auckland and\n#> the University of Oxford. All Rights Reserved.\n#>\n#>\n#> Alternatively, the contents of this file may be used under the terms of\n#> either the GNU General Public License Version 2 or later (the \"GPL\"), or\n#> the GNU Lesser General Public License Version 2.1 or later (the \"LGPL\"),\n#> in which case the provisions of the GPL or the LGPL are applicable instead\n#> of those above. if you wish to allow use of your version of this file only\n#> under the terms of either the GPL or the LGPL, and not to allow others to\n#> use your version of this file under the terms of the MPL, indicate your\n#> decision by deleting the provisions above and replace them with the notice\n#> and other provisions required by the GPL or the LGPL. if you do not delete\n#> the provisions above, a recipient may use your version of this file under\n#> the terms of any one of the MPL, the GPL or the LGPL.\n#>\n\n#> \\example ClassicalField/Diffusion/Diffusion/Python/DiffusionExample.py\n## Example script to solve a Diffusion problem using openCMISS calls in python.\n#<\n\n\n# Add Python bindings directory to PATH\nimport sys, os\nsys.path.append(os.path.join((os.environ['OPENCMISS_ROOT'], 'cm', 'bindings', 'python')))\n\n# Intialise OpenCMISS\nfrom opencmiss import CMISS\n\n# Set problem parameters\nheight = 1.0\nwidth = 1.0\nlength = 1.0\n\nnumberGlobalXElements = 10\nnumberGlobalYElements = 10\nnumberGlobalZElements = 0\n\nnumberOfXi = 2\n\n# Set time integration parameters\nstartTime = 0.0\nstopTime = 0.101\ntimeIncrement = 0.001\n\n# Set point stimulus\nstimulusNodeNr = 50 \nstimulusValue = 1.0 \n\n# Set diffusion parameters\nDx = 1.0\nDy = 3.0\n\n#Set user numbers\ncoordinateSystemUserNumber = 1\nregionUserNumber = 1\nbasisUserNumber = 1\ngeneratedMeshUserNumber = 1\nmeshUserNumber = 1\ndecompositionUserNumber = 1\ngeometricFieldUserNumber = 1\nequationsSetFieldUserNumber = 2\nequationsSetUserNumber = 1\ndependentFieldUserNumber = 3\nmaterialsFieldUserNumber = 4\nproblemUserNumber = 1\n\n#Set output frequency for nodes\noutputFrequency = 10\n\nCMISS.DiagnosticsSetOn(CMISS.DiagnosticTypes.IN, [1,2,3,4,5], \"Diagnostics\", [\"DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE\"])\n\n# Get the computational nodes information\nnumberOfComputationalNodes = CMISS.ComputationalNumberOfNodesGet()\ncomputationalNodeNumber = CMISS.ComputationalNodeNumberGet()\n\n# Creation a RC coordinate system\ncoordinateSystem = CMISS.CoordinateSystem()\ncoordinateSystem.CreateStart(coordinateSystemUserNumber)\nif numberGlobalZElements == 0:\n\tcoordinateSystem.DimensionSet(2)\nelse: \n\tcoordinateSystem.DimensionSet(3)\ncoordinateSystem.CreateFinish()\n\n# Create a region\nregion = CMISS.Region()\nregion.CreateStart(regionUserNumber, CMISS.WorldRegion)\nregion.LabelSet(\"DiffusionRegion\")\nregion.CoordinateSystemSet(coordinateSystem)\nregion.CreateFinish()\n\n# Create a tri-linear lagrange basis\nbasis = CMISS.Basis()\nbasis.CreateStart(basisUserNumber)\nbasis.TypeSet(CMISS.BasisTypes.LAGRANGE_HERMITE_TP) #TP stands for Tensor Product\nif numberGlobalZElements == 0:\n\tbasis.NumberOfXiSet(2)\n\tbasis.InterpolationXiSet([CMISS.BasisInterpolationSpecifications.LINEAR_LAGRANGE]*2)\n\tbasis.QuadratureNumberOfGaussXiSet([2]*2)\nelse: \n\tbasis.NumberOfXiSet(3)\n\tbasis.InterpolationXiSet([CMISS.BasisInterpolationSpecifications.LINEAR_LAGRANGE]*3)\n\tbasis.QuadratureNumberOfGaussXiSet([2]*3)\nbasis.CreateFinish()\n\n# Create a generated mesh\ngeneratedMesh = CMISS.GeneratedMesh()\ngeneratedMesh.CreateStart(generatedMeshUserNumber, region)\ngeneratedMesh.TypeSet(CMISS.GeneratedMeshTypes.REGULAR)\ngeneratedMesh.BasisSet([basis])\nif numberGlobalZElements == 0:\n\tgeneratedMesh.ExtentSet([width,height])\n\tgeneratedMesh.NumberOfElementsSet([numberGlobalXElements, numberGlobalYElements])\nelse: \n\tgeneratedMesh.ExtentSet([width,height,length])\n\tgeneratedMesh.NumberOfElementsSet([numberGlobalXElements, numberGlobalYElements, numberGlobalZElements])\nmesh = CMISS.Mesh()\ngeneratedMesh.CreateFinish(meshUserNumber, mesh)\n\n# Create a decomposition for the mesh\ndecomposition = CMISS.Decomposition()\ndecomposition.CreateStart(decompositionUserNumber, mesh)\ndecomposition.TypeSet(CMISS.DecompositionTypes.CALCULATED)\ndecomposition.NumberOfDomainsSet(numberOfComputationalNodes)\ndecomposition.CreateFinish()\n\n# Create a field for the geometry\ngeometricField = CMISS.Field()\ngeometricField.CreateStart(geometricFieldUserNumber, region)\ngeometricField.MeshDecompositionSet(decomposition)\ngeometricField.ComponentMeshComponentSet(CMISS.FieldVariableTypes.U, 1, 1)\ngeometricField.ComponentMeshComponentSet(CMISS.FieldVariableTypes.U, 2, 1)\nif numberGlobalZElements > 0:\n\tgeometricField.ComponentMeshComponentSet(CMISS.FieldVariableTypes.U, 3, 1)\ngeometricField.CreateFinish()\n\n# Set geometry from the generated mesh\ngeneratedMesh.GeometricParametersCalculate(geometricField)\n\n# Create Diffusion equations set\nequationsSetField = CMISS.Field()\nequationsSet = CMISS.EquationsSet()\nequationsSet.CreateStart(equationsSetUserNumber, region, geometricField,\n CMISS.EquationsSetClasses.CLASSICAL_FIELD,\n CMISS.EquationsSetTypes.DIFFUSION_EQUATION,\n CMISS.EquationsSetSubtypes.NO_SOURCE_DIFFUSION,\n equationsSetFieldUserNumber, equationsSetField)\nequationsSet.CreateFinish()\n\n# Create dependent field\ndependentField = CMISS.Field()\nequationsSet.DependentCreateStart(dependentFieldUserNumber, dependentField)\nequationsSet.DependentCreateFinish()\nnodeDomain = decomposition.NodeDomainGet(stimulusNodeNr, 1)\nif nodeDomain == computationalNodeNumber:\n dependentField.ParameterSetUpdateNodeDP(CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.VALUES, 1, 1, stimulusNodeNr, 1, stimulusValue)\n\n# Create the equations set material field variables\nmaterialsField = CMISS.Field()\nequationsSet.MaterialsCreateStart(materialsFieldUserNumber, materialsField)\nequationsSet.MaterialsCreateFinish()\nmaterialsField.ComponentValuesInitialiseDP(CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.VALUES, 1, Dx) \nmaterialsField.ComponentValuesInitialiseDP(CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.VALUES, 2, Dy) \n\n# Create equations\nequations = CMISS.Equations()\nequationsSet.EquationsCreateStart(equations)\nequations.SparsityTypeSet(CMISS.EquationsSparsityTypes.SPARSE)\nequations.OutputTypeSet(CMISS.EquationsOutputTypes.NONE)\nequationsSet.EquationsCreateFinish()\n\n# Create Diffusion problem\nproblem = CMISS.Problem()\nproblem.CreateStart(problemUserNumber)\nproblem.SpecificationSet(CMISS.ProblemClasses.CLASSICAL_FIELD,\n CMISS.ProblemTypes.DIFFUSION_EQUATION,\n CMISS.ProblemSubTypes.NO_SOURCE_DIFFUSION)\nproblem.CreateFinish()\n\n# Create control loops\ncontrolLoop = CMISS.ControlLoop()\nproblem.ControlLoopCreateStart()\nproblem.ControlLoopGet([CMISS.ControlLoopIdentifiers.NODE], controlLoop)\ncontrolLoop.OutputTypeSet(CMISS.ControlLoopOutputTypes.PROGRESS)\ncontrolLoop.TimesSet(startTime, stopTime, timeIncrement)\ncontrolLoop.TimeOutputSet(outputFrequency)\nproblem.ControlLoopCreateFinish()\n\n# Create problem solver\nsolver = CMISS.Solver()\nlinearSolver = CMISS.Solver()\nproblem.SolversCreateStart()\nproblem.SolverGet([CMISS.ControlLoopIdentifiers.NODE], 1, solver)\nsolver.OutputTypeSet(CMISS.SolverOutputTypes.NONE)\nsolver.DynamicLinearSolverGet(linearSolver)\nlinearSolver.LinearIterativeMaximumIterationsSet(1000)\nproblem.SolversCreateFinish()\n\n# Create solver equations and add equations set to solver equations\nsolver = CMISS.Solver()\nsolverEquations = CMISS.SolverEquations()\nproblem.SolverEquationsCreateStart()\nproblem.SolverGet([CMISS.ControlLoopIdentifiers.NODE], 1, solver)\nsolver.SolverEquationsGet(solverEquations)\nsolverEquations.sparsityType = CMISS.SolverEquationsSparsityTypes.SPARSE\nequationsSetIndex = solverEquations.EquationsSetAdd(equationsSet)\nproblem.SolverEquationsCreateFinish()\n\n# Create boundary conditions\nboundaryConditions = CMISS.BoundaryConditions()\nsolverEquations.BoundaryConditionsCreateStart(boundaryConditions)\nboundaryConditions.NeumannSparsityTypeSet(CMISS.BoundaryConditionSparsityTypes.SPARSE) \nnodes = CMISS.Nodes()\nregion.NodesGet(nodes)\ntol = 1.0e-12\nfor node in range(1, nodes.numberOfNodes + 1):\n nodeDomain = decomposition.NodeDomainGet(node, 1)\n if nodeDomain != computationalNodeNumber:\n continue\n # get x, y and z positions at node\n position = [\n geometricField.ParameterSetGetNodeDP(\n CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.VALUES,\n 1, 1, node, component + 1)\n for component in range(numberOfXi)]\n if (abs(position[0] - width) < tol or abs(position[0]) < tol):\n boundaryConditions.SetNode(dependentField,\n CMISS.FieldVariableTypes.DELUDELN, 1, 1, node, 1,\n CMISS.BoundaryConditionsTypes.NEUMANN_INTEGRATED_ONLY, 0.0)\n\n elif (abs(position[1] - height) < tol or abs(position[1]) < tol):\n # Set Neumann condition of 0 at boundaries\n boundaryConditions.SetNode(dependentField,\n CMISS.FieldVariableTypes.DELUDELN, 1, 1, node, 1,\n CMISS.BoundaryConditionsTypes.NEUMANN_INTEGRATED_ONLY, 0.0)\nsolverEquations.BoundaryConditionsCreateFinish()\n# Solve the problem\nproblem.Solve()\n\n# Export results\n#baseName = \"diffusion\"\n#dataFormat = \"PLAIN_TEXT\"\n#fml = CMISS.FieldMLIO()\n#fml.OutputCreate(mesh, \"\", baseName, dataFormat)\n#fml.OutputAddFieldNoType(baseName+\".geometric\", dataFormat, geometricField,\n# CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.VALUES)\n#fml.OutputAddFieldNoType(baseName+\".u\", dataFormat, dependentField,\n# CMISS.FieldVariableTypes.U, CMISS.FieldParameterSetTypes.VALUES)\n#fml.OutputWrite(\"DiffusionExample.xml\")\n\nfields = CMISS.Fields()\nfields.CreateRegion(region)\n#fields.NodesExport(\"Diffusion\",\"FORTRAN\")\nfields.ElementsExport(\"Diffusion\",\"FORTRAN\")\nfields.Finalise()\nCMISS.Finalise()\n\n############################################################################################\n\n# Show the result in cmgui\n#os.system(\"cmgui-wx visualise.com\")\n\n\n\n\n\n\n\n\n\n\n" } ]
1
beniza/learningPython
https://github.com/beniza/learningPython
dc1105c7771f9e9a53d5a2c24d074fdd72e51ad1
9da39d42c708c9123045a1357ec49a8c8c72930b
13fd30fa40d365300ba760417256293724cbf9bd
refs/heads/master
2021-05-15T01:42:27.402289
2020-04-25T18:33:01
2020-04-25T18:33:01
22,953,255
0
2
null
2014-08-14T12:41:43
2017-03-16T06:32:18
2017-03-17T11:08:46
Python
[ { "alpha_fraction": 0.5227272510528564, "alphanum_fraction": 0.5227272510528564, "avg_line_length": 12.666666984558105, "blob_id": "300678b40ef3558545058a9581356d40a8a660cc", "content_id": "bdefb1797f761c8237484ec4167bdc3f00937a77", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "permissive", "max_line_length": 14, "num_lines": 3, "path": "/README.md", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "learningPython\r\n==============\r\nNew Lesson\r\n" }, { "alpha_fraction": 0.3740389049053192, "alphanum_fraction": 0.4427860677242279, "avg_line_length": 40.519229888916016, "blob_id": "e120d6c10c92b2e00fff532b9999baf5dc051a1e", "content_id": "95b010c8199934c213d57e7940c47fb1d708b8c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2211, "license_type": "permissive", "max_line_length": 121, "num_lines": 52, "path": "/usfm/change-filename-from-id.py", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "'''\r\nBulk Rename USFM files according to the ID of the book\r\n'''\r\nimport os\r\nimport codecs\r\nimport re\r\n\r\nprojects = [\"HIN\", \"GUJ\", \"MAR\", \"KHN\"]\r\n\r\nfor prj in projects:\r\n EXT = [\"usfm\", \"sfm\", \"ptx\"]\r\n SOURCEDIR = \"C:\\\\path\\\\to\\\\usfm\"import os\r\n PROJECT = \"-OA-\" + prj\r\n bk = {\"GEN\":1, \"EXO\":2, \"LEV\":3, \"NUM\":4, \"DEU\":5, \"JOS\":6, \"JDG\":7, \"RUT\":8, \"1SA\":9, \"2SA\":10, \"1KI\":11, \"2KI\":12,\\\r\n \"1CH\":13, \"2CH\":14, \"EZR\":15, \"NEH\":16, \"EST\":17, \"JOB\":18, \"PSA\":19, \"PRO\":20, \"ECC\":21, \"SNG\":22, \"ISA\":23,\\\r\n \"JER\":24, \"LAM\":25, \"EZK\":26, \"DAN\":27, \"HOS\":28, \"JOL\":29, \"AMO\":30, \"OBA\":31, \"JON\":32, \"MIC\":33, \"NAM\":34,\\\r\n \"HAB\":35, \"ZEP\":36, \"HAG\":37, \"ZEC\":38, \"MAL\":39,\\\r\n # NT Starts here\r\n \"MAT\":40, \"MRK\":41, \"LUK\":42, \"JHN\":43, \"ACT\":44, \"ROM\":45, \"1CO\":46, \"2CO\":47, \"GAL\":48, \"EPH\":49, \"PHP\":50,\\\r\n \"COL\":51, \"1TH\":52, \"2TH\":53, \"1TI\":54, \"2TI\":55, \"TIT\":56, \"PHM\":57, \"HEB\":58, \"JAS\":59, \"1PE\":60, \"2PE\":61,\\\r\n \"1JN\":62, \"2JN\":63, \"3JN\":64, \"JUD\":65, \"REV\": 66}\r\n\r\n fileList = os.listdir(SOURCEDIR)\r\n try:\r\n os.mkdir(SOURCEDIR + \"\\\\_out\")\r\n except:\r\n shutil.rmtree(SOURCEDIR + \"\\\\_out\")\r\n os.mkdir(SOURCEDIR + \"\\\\_out\")\r\n \r\n for fil in fileList:\r\n b = fil.split(\".\")\r\n try:\r\n if(b[1].lower() in EXT):\r\n f = codecs.open(os.path.join(SOURCEDIR, fil), mode = 'r', encoding = 'utf-8')\r\n fc = f.read()\r\n f.close()\r\n\r\n try:\r\n bkID = re.findall(\"\\\\id ([A-Z0-9]{3})\", fc)[0]\r\n bookNumber = bk[bkID]\r\n if(bookNumber >= 40):\r\n bookNumber += 1\r\n fName = str(bookNumber).zfill(2) + \"_\" + bkID + PROJECT + \".\" + EXT[0]\r\n except:\r\n fName=fil\r\n o = codecs.open(os.path.join(SOURCEDIR + \"\\\\_out\", fName), mode='w', encoding='utf-8')\r\n o.write(\"\\\\rem Original File Name: \" + fil)\r\n o.write(\"\\n\\\\rem Processed using USFM_Bulk_Rename_Utility by beniza\\n\")\r\n o.write(fc)\r\n o.close()\r\n except:\r\n pass\r\n" }, { "alpha_fraction": 0.5260831117630005, "alphanum_fraction": 0.5340406894683838, "avg_line_length": 24.928571701049805, "blob_id": "c548500ef5d4499a2e0d63825cc133adb79dff83", "content_id": "82dcdd25b1b3149aa48f58b2947b4bb91fd1d235", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1131, "license_type": "permissive", "max_line_length": 80, "num_lines": 42, "path": "/amt/merge-tokens.py", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "'''\r\nModule to merge a set of individual token files stored in the current directory \r\nto one single Spreadsheet (and a text file)\r\n'''\r\nimport openpyxl\r\nimport glob\r\n\r\nfiles = glob.glob(\"*.xlsx\")\r\ntok = {}\r\n\r\nfor f in files:\r\n wb = openpyxl.load_workbook(f)\r\n ws = wb.active\r\n for r in range(ws.max_row):\r\n try:\r\n tok[ws[\"A\" + str(r+1)].value].append(ws[\"B\" + str(r+1)].value)\r\n except KeyError:\r\n tok[ws[\"A\" + str(r+1)].value] = ws[\"B\" + str(r+1)].value\r\n except AttributeError:\r\n pass\r\n\r\no = open(\"tokens.txt\", mode='w', encoding='utf-8')\r\ntry:\r\n for k, v in tok.items():\r\n o.write(str(k) + \"\\t\" + str(v) + \"\\n\")\r\nexcept:\r\n pass\r\no.close()\r\n\r\nout_wb = openpyxl.Workbook()\r\nout_ws = out_wb.active\r\ntry:\r\n for row, (k, v) in enumerate(tok.items()):\r\n out_ws[\"A\" + str(row + 1)].value = k\r\n out_ws[\"B\" + str(row + 1)].value = v\r\nexcept:\r\n out_ws[\"A\" + str(row + 1)].value = \"###\"\r\n out_wb[\"B\" + str(row + 1)].value = \"***\"\r\n pass\r\n\r\nout_wb.save(\"Bodo_Parjo.xlsx\")\r\nprint(\"\\n\" + str(len(tok)) + \" tokens are saved to the file.\")\r\n" }, { "alpha_fraction": 0.567251443862915, "alphanum_fraction": 0.5719298124313354, "avg_line_length": 29.44444465637207, "blob_id": "f8de4a5b37f8b8c6c59841456881d0f99b66f54a", "content_id": "012df93783e9ccd159bbc169079891e0577388fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 855, "license_type": "permissive", "max_line_length": 84, "num_lines": 27, "path": "/experiments/bank-class.py", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "class Account:\r\n def __init__(self, name, balance, min_balance):\r\n self.name = name\r\n self.balance = balance\r\n self.min_balance = min_balance\r\n def __del__(self):\r\n print(\"Account deactivated\")\r\n\r\n def deposit(self, amount):\r\n self.balance += amount\r\n\r\n def withdraw(self, amount):\r\n if(self.balance - amount >= self.min_balance):\r\n self.balance -= amount\r\n else:\r\n print(\"Sorry, you don't have enough balance for this transaction!\")\r\n\r\n def statement(self):\r\n print(\"Account Balance: Rs.{}\".format(self.balance))\r\n\r\n\r\nclass Current(Account):\r\n def __init__(self, name, balance):\r\n super().__init__(name, balance, min_balance=-1000)\r\n\r\n def __str__(self):\r\n return \"{}'s Current Account: Balance Rs.{}\".format(self.name, self.balance)\r\n \r\n" }, { "alpha_fraction": 0.6342412233352661, "alphanum_fraction": 0.6342412233352661, "avg_line_length": 50.599998474121094, "blob_id": "00d4c2058007db797de5c191808956e5bb1b0770", "content_id": "17b4f45eb4f6009e6af047048b127c40da5bc47d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "permissive", "max_line_length": 166, "num_lines": 5, "path": "/feedback-app/test.py", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "gibwords = ['Egbindinatl', 'Evtlesnioi', 'Miet', 'Boilme', 'Ishtr', 'Agdrne', 'Solhoc', 'sjdmai', 'Tsere', 'Yclecs', 'Neplci', 'Odosretp', 'Thgli', 'Sglas', 'Owilpl']\nfor gibword in gibwords:\n gibword = gibword.lower()\n l = [l for l in gibword]\n print(l)" }, { "alpha_fraction": 0.6123188138008118, "alphanum_fraction": 0.6159420013427734, "avg_line_length": 32.5, "blob_id": "de202441abbfbcf683a0ddb6bfb2770ebc435a9c", "content_id": "754571d919a4980dd78b73cf10062bab56cb25b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1380, "license_type": "permissive", "max_line_length": 103, "num_lines": 40, "path": "/amt/testCompletenessOfTokensAgainstUSFM.py", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "'''\r\nThis program tests the completeness of the token list against the usfm\r\nfiles from the tokens created. This will help us in testing if any words or\r\nphrases that are not correctly represented in the token list.\r\n\r\n Input: usfm files (under the usfm folder)\r\n token file (in this case ROM-REV_Full.csv)\r\n Output: usfm files after replacing the words in token file with \"\". This\r\n will get stored in the 'out' folder\r\n'''\r\n\r\nimport codecs\r\nimport os\r\n\r\nfileList = os.listdir(\"usfm\\\\\")\r\n\r\ntokenFile = codecs.open(\"ROM-REV_Full.csv\", mode='r', encoding=\"utf-8\")\r\ntokens = tokenFile.readlines()\r\n\r\n# Sorting the tokens in the reverse order to prevent smaller words get replaced before the longer ones.\r\ntokens = sorted(tokens, key=len, reverse=True) \r\n\r\nfor fil in fileList:\r\n b=fil.split(\".\")\r\n bk = b[0]\r\n\r\n if b[1]==\"usfm\":\r\n f = codecs.open(\"usfm\\\\\" + fil, mode = \"r\", encoding = \"utf-8\")\r\n fc = f.read()\r\n f.close()\r\n\r\n #Replacing the matching tokens with \"\"\r\n #We can modify this step and can get a usfm file translated\r\n #into a new language if the translation of the tokens available.\r\n for token in tokens:\r\n fc = fc.replace(token.strip(\"\\n\"), \"\")\r\n \r\n o = codecs.open(\"out\\\\\" + fil, mode=\"w\", encoding=\"utf-8\")\r\n o.write(fc)\r\n o.close()\r\n" }, { "alpha_fraction": 0.7251995205879211, "alphanum_fraction": 0.73204106092453, "avg_line_length": 32.730770111083984, "blob_id": "46dbbfd0c2cc18f8571e87ddb47fa7052e4e0cd6", "content_id": "072b8aa65132de71b02e30da2c4b6018fb63d0a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 877, "license_type": "permissive", "max_line_length": 178, "num_lines": 26, "path": "/CS-50/html-css.md", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "# Module 2: HTML and CSS\n\nCSS Selectors (56:03) \n![##CSS Selectors](https://i.imgur.com/7rwDGnt.png)\n\n## Making a Responsive Design\nStrategies to make a website behave differently on a user's device based on the type of devices he/she uses. Below are a few such strategies.\n\n### Viewport\n### Media Queries\nA media query is a specific way of assigning CSS not to a particular HTML element, but to a particular HTML\nelement on a particular type of media.\nSo that type of media might be particularly-sized computers, or particularly-sized phones, or even the difference between how\nyou want a website to look when you are writing the code for it to appear on a screen versus how you want the website to look if someone prints it out, for example, on a printer.\n\n```html\n<style>\n @media print {\n .screen-only {\n display: none;\n }\n }\n</style>\n```\n### Flexbox\n### Grids\n" }, { "alpha_fraction": 0.5156591534614563, "alphanum_fraction": 0.5229424834251404, "avg_line_length": 30.690475463867188, "blob_id": "fe99e11952c357448ba65a76461ce7cced5f197c", "content_id": "9b12c4e261635e638ee541173c0e659c7db122d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1373, "license_type": "permissive", "max_line_length": 82, "num_lines": 42, "path": "/amt/merge-tokens2.py", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "'''\r\nGiven an order of the books, this script will create a new list of tokens\r\nwhich will not repeate the already found tokens in a previous book. Useful when\r\nuser wanted to translate the books in a specific order.\r\n\r\n INPUT: Order of the books (Currently alphabetical order. To be implemented)\r\n OUTPUT: tokenList.txt (A file with all the tokens with a unique number and\r\n the list of books where the tokens in found)\r\n out\\*.csv (Tokens of each file according to the order. \r\n'''\r\n\r\nimport codecs\r\nimport os\r\n\r\nfileList = os.listdir(\".\\_in\")\r\n\r\ntok={}\r\ni=1\r\n\r\nfor fil in fileList:\r\n b = fil.split(\".\")\r\n if(b[1]==\"CSV\"):\r\n bk = b[0]\r\n f=codecs.open(\"_in\\\\\" + fil, mode=\"r\", encoding=\"utf-8\")\r\n fc = f.readlines()\r\n f.close() \r\n fo = codecs.open(\"_out\\\\\" + fil, mode=\"w\", encoding=\"utf-8\")\r\n for t in fc:\r\n try:\r\n tok[t.strip(\"\\n\")].append(bk)\r\n #fo.write(tok[t.strip(\"\\n\")[0]] + \"\\t\" + t)\r\n except:\r\n tok[t.strip(\"\\n\")]= [\"HIN\" + str(i).zfill(5), bk]\r\n fo.write(\"Hin\" + str(i).zfill(5) + \"\\t\" + t)\r\n i += 1\r\n\r\nfo.close()\r\no = codecs.open(\"tokenList.txt\", mode=\"w\", encoding=\"utf-8\")\r\nfor k,v in tok.items():\r\n o.write(str(k) + \"\\t\" + str(v) + \"\\n\")\r\n\r\no.close()\r\n" }, { "alpha_fraction": 0.7295132279396057, "alphanum_fraction": 0.7319778203964233, "avg_line_length": 44.371429443359375, "blob_id": "1730016dec9a976b0350458819f0c8bc9b55dbd8", "content_id": "cb047efedafc8367c7f0d388332f4b7b1915d9a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1623, "license_type": "permissive", "max_line_length": 318, "num_lines": 35, "path": "/repo-structure/README.md", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "# Template folder\r\nThis repo contains a template directory structure, which is made based on the instructions found in the [Hitchhicker's Guide](https://docs.python-guide.org/writing/structure/). In His article, he refers to [another guide by Kenneth Reitz](https://www.kennethreitz.org/essays/repository-structure-and-python) found online.\r\n\r\n```shell\r\nREADME.rst\r\nLICENSE\r\nsetup.py\r\nrequirements.txt\r\nsample/__init__.py\r\nsample/core.py\r\nsample/helpers.py\r\ndocs/conf.py\r\ndocs/index.rst\r\ntests/test_basic.py\r\ntests/test_advanced.py\r\n```\r\n## Note\r\n`\r\nAlthough Hitchhicker suggests `.rst` as a format for formatted documents I use `.md` because, I'm more familiar with it, and it is sufficient for my needs.`\r\n## Recommendations\r\n1. The actual module\r\n 1. If your module consists of more than one file, place them in a directory and name the directory as the `module` name\r\n ```\r\n ./sample/__init__.py\r\n ./sample/core.py\r\n ./sample/helpers.py\r\n ```\r\n 1. if the module is just a single file, you could alternatevely keep it as a file directly under the root folder.\r\n2. LICENCE\r\n\r\n Keep your copyright claims and the details of the license (permissions) you share the files with others. To know more about the licenses visit site [www.choosealicesence.com](https://choosealicense.com). \r\n\r\n If you fail to include a license in your repo, other users may not be able to benefit from your contribution, and may not be able to join you in further expanding your work.\r\n\r\n I personally choose `MIT` as the default license for my work unless I have a specific reason to choose another license.\r\n" }, { "alpha_fraction": 0.596287727355957, "alphanum_fraction": 0.5986078977584839, "avg_line_length": 14.5, "blob_id": "6ce8c509617b92f20dcdb438a4237997678ab995", "content_id": "336012cb778ad14ccd274e28376edeeaec4f3e2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "permissive", "max_line_length": 44, "num_lines": 26, "path": "/experiments/my-calc.py", "repo_name": "beniza/learningPython", "src_encoding": "UTF-8", "text": "# Calculator project for collaboration\r\na = raw_input(\"Enter the first number :\")\r\nb = raw_input(\"\\nEnter the second Number :\")\r\n\r\ndef addition(x, y):\r\n\treturn a + b\r\n\r\ndef sum(a,b):\r\n\treturn (a + b)\r\n\r\ndef division(x, y):\r\n\tif(y!=0):\r\n\t\treturn(x/y)\r\n\telse:\r\n\t\treturn(\"Division by Zero not defined\")\r\n\r\ndef mul(a,b):\r\n return(a*b)\r\n\r\ndef subtraction(a, b):\r\n\treturn(a-b)\r\n\r\ndef mod(a,b):\r\n\treturn (a%b)\r\n\r\nprint \"Calculator\"\r\n\r\n" } ]
10
aten2001/CV_assignment_2
https://github.com/aten2001/CV_assignment_2
4e74ee65f627aa35c8aad5c07905b6047029249b
73f85b918a92aa2b237d47f3b1f920e84ab98d94
4abda7e2ee914866c89ea9f858cd8b2cff0a8080
refs/heads/master
2022-02-18T14:16:44.538628
2019-09-25T23:27:18
2019-09-25T23:27:18
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5558217763900757, "alphanum_fraction": 0.5687589645385742, "avg_line_length": 31.10769271850586, "blob_id": "d103a90406a797af5e66a2a18cc4c0cb75be439d", "content_id": "2df161812351e383b382e7a78c49aba48f3543db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2087, "license_type": "no_license", "max_line_length": 92, "num_lines": 65, "path": "/smaller_hough.py", "repo_name": "aten2001/CV_assignment_2", "src_encoding": "UTF-8", "text": "import numpy as np\nimport skimage.feature\nimport skimage.color\nimport matplotlib.pyplot as plt\nimport scipy.misc\n\ntheta_pace_detect_offset = 80\nthreshold_no_gradient = 0.8\nsmall_factor = 5\nmin_distance_between_centers = 10 / small_factor\n\n\n# Hough with smaller vote space\n# Does not include use gradient option, be care\ndef detectCircles(im, radius):\n edge = skimage.feature.canny(skimage.color.rgb2gray(im), sigma=3)\n plt.imshow(edge)\n plt.show()\n h, w, _ = im.shape\n acc = dict()\n acc_mat = np.zeros((h // small_factor, w // small_factor))\n pace = int(radius * 0.5) + theta_pace_detect_offset\n for i in range(h):\n for j in range(w):\n if edge[i, j]:\n for div in range(pace):\n theta = 2 * np.pi * div / pace\n a = int((-radius * np.cos(theta) + i) / small_factor)\n b = int((radius * np.sin(theta) + j) / small_factor)\n if isValid(h, w, a, b):\n acc[(a, b)] = acc.get((a, b), 0) + 1\n acc_mat[a, b] += 1\n\n # Getting centers of the circle + post-processing\n threshold = np.max(acc_mat) * threshold_no_gradient\n print(np.max(acc_mat))\n plt.imshow(acc_mat)\n plt.title('Smaller vote space accumulator - Radius = ' + str(radius))\n plt.show()\n acc_sorted = sorted(acc.items(), key=lambda kv: kv[1], reverse=True)\n qualified_center = []\n for k, v in acc_sorted:\n if v < threshold:\n break\n else:\n if not_close_center(k, qualified_center):\n qualified_center.append((k[0] * small_factor, k[1] * small_factor))\n\n # For constructing binary image with circle on it\n return qualified_center\n\n\ndef not_close_center(pos, set):\n for s in set:\n if (pos[0] - s[0]) ** 2 + (pos[1] - s[1]) ** 2 <= min_distance_between_centers ** 2:\n return False\n return True\n\n\ndef isValid(h, w, a, b):\n if a < 0 or a >= h // small_factor:\n return False\n if b < 0 or b >= w // small_factor:\n return False\n return True\n" }, { "alpha_fraction": 0.6708595156669617, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 30.733333587646484, "blob_id": "e1b15487ef72edb967683f5b464657c053e2a9bd", "content_id": "378ba0179f64928239bae614a48288c850aabe94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/quantizeRGB.py", "repo_name": "aten2001/CV_assignment_2", "src_encoding": "UTF-8", "text": "import scipy.cluster.vq\nimport scipy.misc\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef quantizeRGB(origImg, k):\n h,w,d = origImg.shape\n processed = np.reshape(origImg, (w*h, d))\n processed = np.array(processed, dtype=np.float64)\n centroid, labels = scipy.cluster.vq.kmeans2(processed, k)\n for i in range(h*w):\n processed[i] = centroid[labels[i]]\n res = np.reshape(processed, (h,w,d))\n res = res.astype(np.uint8)\n return res, centroid\n\n" }, { "alpha_fraction": 0.4658322036266327, "alphanum_fraction": 0.48646819591522217, "avg_line_length": 31.47252655029297, "blob_id": "d60cde8cda63a1ea9ee917fb898b786f0de00fda", "content_id": "78834ee78105785c295c24b15d78e5db02606a6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2956, "license_type": "no_license", "max_line_length": 92, "num_lines": 91, "path": "/detectCircles.py", "repo_name": "aten2001/CV_assignment_2", "src_encoding": "UTF-8", "text": "import numpy as np\nimport skimage.feature\nimport skimage.color\nimport matplotlib.pyplot as plt\nimport scipy.misc\n\nmin_distance_between_centers = 10\ntheta_pace_detect_offset = 80\nthreshold_no_gradient = 25\nthreshold_gradient = 8\ntheta_pace_draw = 100\n\n\ndef detectCircles(im, radius, useGradient):\n edge = skimage.feature.canny(skimage.color.rgb2gray(im), sigma=3)\n plt.imshow(edge)\n plt.show()\n h, w, _ = im.shape\n acc = dict()\n acc_mat = np.zeros((h, w))\n pace = int(radius * 0.5) + theta_pace_detect_offset\n if useGradient == 0:\n for i in range(h):\n for j in range(w):\n if edge[i, j]:\n for div in range(pace):\n theta = 2 * np.pi * div / pace\n a = int(-radius * np.cos(theta) + i)\n b = int(radius * np.sin(theta) + j)\n if isValid(h, w, a, b):\n acc[(a, b)] = acc.get((a, b), 0) + 1\n acc_mat[a, b] += 1\n if useGradient == 1:\n gradient_map = np.gradient(skimage.color.rgb2gray(im))\n theta_map = np.arctan(-gradient_map[1]/gradient_map[0])\n for i in range(h):\n for j in range(w):\n if edge[i, j]:\n theta = theta_map[i,j]\n if not theta == theta:\n theta = np.pi/2\n a = int(-radius * np.cos(theta) + i)\n b = int(radius * np.sin(theta) + j)\n for augmented_a_b in augment_a_b(a,b):\n a_aug = augmented_a_b[0]\n b_aug = augmented_a_b[1]\n if isValid(h, w, a_aug, b_aug):\n acc[(a_aug, b_aug)] = acc.get((a_aug, b_aug), 0) + 1\n acc_mat[a_aug, b_aug] += 1\n\n\n # Getting centers of the circle + post-processing\n threshold = np.max(acc_mat) * 0.9\n print(np.max(acc_mat))\n plt.imshow(acc_mat)\n plt.title('Accumulator - Use gradient = '+str(useGradient)+' Radius = '+str(radius))\n plt.show()\n acc_sorted = sorted(acc.items(), key=lambda kv: kv[1], reverse=True)\n qualified_center = []\n for k, v in acc_sorted:\n if v < threshold:\n break\n else:\n if not_close_center(k, qualified_center):\n qualified_center.append(k)\n\n return qualified_center\n\n\ndef not_close_center(pos, set):\n for s in set:\n if (pos[0] - s[0]) ** 2 + (pos[1] - s[1]) ** 2 <= min_distance_between_centers ** 2:\n return False\n return True\n\n\ndef isValid(h, w, a, b):\n if a < 0 or a >= h:\n return False\n if b < 0 or b >= w:\n return False\n return True\n\ndef augment_a_b(a,b):\n res = []\n augment = [[-1,-1],[-1,0],[-1,1],\n [0,-1],[0,0],[0,1],\n [1,-1],[1,0],[1,1]]\n for aug in augment:\n res.append((a+aug[0], b+aug[1]))\n return res\n\n" }, { "alpha_fraction": 0.6799091696739197, "alphanum_fraction": 0.692395031452179, "avg_line_length": 24.941177368164062, "blob_id": "6e9321c0c758832892eb9c59ef1c55dfb753fcc9", "content_id": "62bd9b8bf6a4e99989af00648b98c3bea38ad91d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 881, "license_type": "no_license", "max_line_length": 100, "num_lines": 34, "path": "/hough_test.py", "repo_name": "aten2001/CV_assignment_2", "src_encoding": "UTF-8", "text": "import scipy.misc\nimport detectCircles\nimport matplotlib.pyplot as plt\nimport smaller_hough\n\nim = scipy.misc.imread('egg.jpg')\nradius = 15\nuse_gradient = 1\ncenters = detectCircles.detectCircles(im, radius, use_gradient)\nprint('detect' + str(len(centers)) + ' centers')\nxs = []\nys = []\nfor center in centers:\n xs.append(center[0])\n ys.append(center[1])\nplt.imshow(im)\nplt.scatter(ys, xs, s=radius**2,c='r')\nplt.title('Image with detected circle - use gradient = '+str(use_gradient)+\" radius = \"+str(radius))\nplt.show()\n\n\n\nim = scipy.misc.imread('jupiter.jpg')\nradius = 50\ncenters = smaller_hough.detectCircles(im, radius)\nxs = []\nys = []\nfor center in centers:\n xs.append(center[0])\n ys.append(center[1])\nplt.imshow(im)\nplt.scatter(ys, xs, s=radius**2,c='r')\nplt.title('Image with detected circle - use gradient = '+str(use_gradient)+\" radius = \"+str(radius))\nplt.show()" }, { "alpha_fraction": 0.4680365324020386, "alphanum_fraction": 0.4954337775707245, "avg_line_length": 30.285715103149414, "blob_id": "1c893991a0a92d939bad108edae842e06568cec5", "content_id": "51218a2c7aaf0d13e3447ffdc1d69065bb86841d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 71, "num_lines": 14, "path": "/computeQuantizationError.py", "repo_name": "aten2001/CV_assignment_2", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef computeQuantizationError(origImg, quantizedImg):\n h, w, d = origImg.shape\n sum = 0\n sum = np.int64(sum)\n for i in range(h):\n for j in range(w):\n error = (origImg[i, j, 0] - quantizedImg[i, j, 0]) ** 2 + \\\n (origImg[i, j, 1] - quantizedImg[i, j, 1]) ** 2 + \\\n (origImg[i, j, 2] - quantizedImg[i, j, 2]) ** 2\n sum+=error\n return sum\n" }, { "alpha_fraction": 0.6965116262435913, "alphanum_fraction": 0.7023255825042725, "avg_line_length": 36.39130401611328, "blob_id": "62470d71697d95aa0283b8bc0423fb33417bf2b6", "content_id": "9f5669f7ecf97afa17f5da15ce5b8d2ea1f072b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 860, "license_type": "no_license", "max_line_length": 89, "num_lines": 23, "path": "/colorQuantizeMain.py", "repo_name": "aten2001/CV_assignment_2", "src_encoding": "UTF-8", "text": "import scipy\nimport quantizeRGB\nimport quantizeHSV\nimport matplotlib.pyplot as plt\nimport computeQuantizationError\n\nimg = scipy.misc.imread('fish.jpg')\n\n# Begin test k=3\nfor k in [3, 6, 15]:\n rgb_quantized_img, rgb_centroids = quantizeRGB.quantizeRGB(img, k)\n hsv_quantized_img, hsv_centroids = quantizeHSV.quantizeHSV(img, k)\n plt.imshow(rgb_quantized_img)\n plt.title('RGB quantized image with k = ' + str(k))\n plt.show()\n plt.imshow(hsv_quantized_img)\n plt.title('HSV quantized image with k = ' + str(k))\n plt.show()\n\n rgb_error = computeQuantizationError.computeQuantizationError(img, rgb_quantized_img)\n hsv_error = computeQuantizationError.computeQuantizationError(img, hsv_quantized_img)\n print('RGB SSD error with k = ', str(k), ' : ', str(rgb_error))\n print('HSV SSD error with k = ', str(k), ' : ', str(hsv_error))\n" }, { "alpha_fraction": 0.6039215922355652, "alphanum_fraction": 0.6183006763458252, "avg_line_length": 32.30434799194336, "blob_id": "6fa4776b894f21fbe4b3c9f94eb6c0ef914e3b94", "content_id": "f7c3ac46520278f6a88797975c000b1ac60c1972", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 765, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/quantizeHSV.py", "repo_name": "aten2001/CV_assignment_2", "src_encoding": "UTF-8", "text": "import scipy.cluster.vq\nimport scipy.misc\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport skimage.color\ndef quantizeHSV(origImg, k):\n origImg = skimage.color.rgb2hsv(origImg)\n h,w,d = origImg.shape\n processed = np.reshape(origImg, (w*h, d))\n processed = np.array(processed[:,0], dtype=np.float64)\n centroid, labels = scipy.cluster.vq.kmeans2(processed, k)\n for i in range(h*w):\n processed[i] = centroid[labels[i]]\n processed = np.reshape(processed, (h,w))\n res = np.zeros((h,w,d))\n for i in range(h):\n for j in range(w):\n res[i][j][0] = processed[i][j]\n res[i][j][1] = origImg[i][j][1]\n res[i][j][2] = origImg[i][j][2]\n\n res = skimage.color.hsv2rgb(res)\n return res, centroid" } ]
7
MTG/pymtg
https://github.com/MTG/pymtg
c654de60331d2c2059cb80f1f946ed017088a527
f3f13480a3f8fc5bfafad7611f20940cfc12dbdf
c8f1d8c92ec66db8cc53fd0bfbe81b212e18fe1d
refs/heads/master
2021-01-17T22:39:22.703851
2019-05-23T10:33:13
2019-05-23T10:33:13
84,200,695
7
3
MIT
2017-03-07T13:12:52
2019-01-29T11:07:40
2019-02-20T15:49:07
Python
[ { "alpha_fraction": 0.6483033895492554, "alphanum_fraction": 0.6522954106330872, "avg_line_length": 27.804597854614258, "blob_id": "42afde5c258089b4a119587ee484c172fd017fc4", "content_id": "c8c14661d9712c7aadf2f04af21a2549875d5bb6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2505, "license_type": "permissive", "max_line_length": 134, "num_lines": 87, "path": "/docs/_sources/tips/tip9.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 9, CSV\n============================================\n\nLots of people store data in CSV format. Unfortunately there's no one standard for CSV, \nwhich means that it's really easy to generate badly formatted data, or encounter files \nthat are constructed badly. Be a good citizen and use a library to help you read and write CSV.\n\nNever try and read or write csv files manually. You will get it wrong.\n\nDon't do this:\n\n.. code-block:: python\n\n data = open('myfile.csv').read()\n lines = data.split('\\n')\n rows = lines[0].split(',')\n\nor this\n\n.. code-block:: python\n\n fp = open('myfile.csv', 'w')\n fp.write('%s,%s' % (myid, myvalue))\n\nor this\n\n.. code-block:: python\n\n fp.write('%s\\n' % ','.join([some, data, here]))\n\nInstead, you should use the csv module to read and write files: https://docs.python.org/3/library/csv.html\n\n.. code-block:: python\n\n with open('myfile.csv') as fp:\n csvreader = csv.reader(fp)\n for line in csvreader:\n print(line[2])\n\n with open('myfile.csv', 'w') as fp:\n writer = csv.writer(fp)\n writer.writerow(['data', 'here'])\n writer.writerows( a list of lists, writing each one on a new line )\n\nIf you want to use a file separated with tabs instead of spaces, you can set the delimiter\n\n.. code-block:: python\n\n reader = csv.reader(fp, delimiter='\\t')\n\nor specify a dialect, which also encodes rules about delimiters, quoting and escaping:\n\n.. code-block:: python\n\n reader = csv.reader(fp, dialect=csv.excel_tab)\n\nIf your csv file has a header line, you can use this to generate dictionaries from your file::\n\n # myfile.csv\n id,filename,license\n 1,test.mp3,cc0\n 2,foo.wav,ccy-by\n\n.. code-block:: python\n\n with open('myfile.csv') as fp:\n reader = csv.DictReader(fp)\n for line in reader:\n print(line['filename']) #-> prints 'test.mp3', 'foo.wav'\n\nTo write dictionaries, specify the columns in the DictWriter constructor:\n\n.. code-block:: python\n\n with open('myfile.csv', 'w') as fp:\n writer = csv.DictWriter(fp, fieldnames=['id', 'filename'])\n writer.writeheader()\n writer.writerow({'id': 1, 'filename': 'test.wav'})\n\nPandas also has methods for reading and writing csv files if you are already using it:\n\n.. code-block:: python\n\n import pandas as pd\n df = pd.read_csv('myfile.csv')\n\nNote that by default pandas requires all rows in your csv file to have the same number of columns, which might not always be the case." }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6394736766815186, "avg_line_length": 33.54545593261719, "blob_id": "08301eace2327d49556085f57cd074562c91d0c4", "content_id": "c3102bf15de3dfded8801d078eeb820ab0716167", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "permissive", "max_line_length": 109, "num_lines": 11, "path": "/setup.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(name='pymtg',\n version='0.2.1',\n description='Python research utils that some of us use at the MTG and eventually everyone will use :)',\n url='https://github.com/MTG/pymtg',\n author='Music Technology Group',\n author_email='mtg-info@upf.edu',\n license='MIT',\n install_requires=['numpy'],\n packages=find_packages())\n" }, { "alpha_fraction": 0.6620625853538513, "alphanum_fraction": 0.6740331649780273, "avg_line_length": 42.400001525878906, "blob_id": "b8742d4da52913fa6e1ad686521aa18cf6d15947", "content_id": "50f29f3eac149c42a7af002eae13352210c54cf1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1086, "license_type": "permissive", "max_line_length": 181, "num_lines": 25, "path": "/docs/_sources/tips/tip5.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 5, Sets\n===============================\n\nThe last major datatype of python (after lists and dictionaries the last few weeks)\n\nSets are \"unordered collections of unique elements\" - https://docs.python.org/3.7/library/stdtypes.html#set-types-set-frozenset\n\nWe mentioned sets for speed of membership tests in tip #2, but they are also useful for quickly calculating mathematical set operations.\n\nThis is especially useful when you want to calculate the intersection or difference of two collections of items. You can quickly convert a list into a set to perform this operation:\n\n.. code-block:: python\n\n\t>>> a = ['a', 'b', 'c', 1, 2, 3]\n\t>>> b = [1, 2, 3, 'x', 'y', 'z']\n\t>>> aset = set(a)\n\t>>> bset = set(b)\n\t>>> aset & bset # intersection, or aset.intersection(bset)\n\tset([1, 2, 3])\n\t>>> aset - bset # difference, or aset.difference(bset)\n\tset(['a', 'c', 'b'])\n\t>>> bset - aset\n\tset(['y', 'x', 'z'])\n\nNote that sets are unordered, you can't guarantee that converting a list to a set and back to a list again will result in the items remaining in the same order.\n\n" }, { "alpha_fraction": 0.8888888955116272, "alphanum_fraction": 0.8888888955116272, "avg_line_length": 36, "blob_id": "ffb1070b5453f48b66604cd1dce4c2fc3d2b133f", "content_id": "2697df03a4df781ee61c89287a2b873e1ebd4c37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "permissive", "max_line_length": 36, "num_lines": 1, "path": "/pymtg/processing/__init__.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "from .worker import WorkParallelizer" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.762876570224762, "avg_line_length": 41.875, "blob_id": "fd93d1668178ee225e5c0959bef51e1180dc6721", "content_id": "3b27fa0047b22514f68da293a7f8c33b9ddc8cf8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2058, "license_type": "permissive", "max_line_length": 227, "num_lines": 48, "path": "/README.md", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "# pymtg\nPython research utils that some of us use at the MTG and eventually\neveryone will use :)\n\nYou can install this package using `pip` using the following line:\n```\npip install git+https://github.com/MTG/pymtg\n```\n\n## Documentation\n\nDocs are hosted using Github Pages and can be found here\n[https://mtg.github.io/pymtg/](https://mtg.github.io/pymtg/).\n\nDocs are built using [Sphinx](http://www.sphinx-doc.org/en/stable/).\nUse the following commands:\n```\npip install -r requirements_docs.txt\nmake html\n```\n\nTo update the documentation hosted in [https://mtg.github.io/pymtg/](https://mtg.github.io/pymtg/), docs need to be rebuild locally and\nnewly generated files (under `docs/`) pushed to the `master` branch.\n\nDocumentation is mainly built by reading docstrings. To have better\nconsistency we recommend to follow the [Google Style Python Docstrings](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html),\nwhich read well on console and also render well using Sphinx.\n\nWe also encourage you to use the Examples section of the docstring to\nprovide usage examples that can also be used as unit tests (using\n[doctest](https://docs.python.org/2/library/doctest.html)).\nHave a look at this nice post about [Testing through documentation](https://pymotw.com/2/doctest/) to learn how to use doctest.\n\nAll tests embedded in docstrings can be run at once with the command:\n```\npython test.py\n```\n\n## Adding Tips\n\nFirst of all you need to have [Sphinx](http://www.sphinx-doc.org/en/master/) installed.\n\nThen you need to go to `docs_src/tips` and make two different things:\n\n- Create a new file for your tip of the week. These files are written in [ReStructured Text](http://docutils.sourceforge.net/docs/user/rst/quickref.html#hyperlink-targets), so you just need to format your information into rst. \n- Add your tip to the list of tips on the toctree of `tips.rst`\n\nOnce that is done you just need to move to the pymtg folder on your terminal and type the following command `make html`. This will create the html pages from your rst files. " }, { "alpha_fraction": 0.7653958797454834, "alphanum_fraction": 0.7683284282684326, "avg_line_length": 30, "blob_id": "4354779d6285ee41c0372d9f1d4319966dbfb5cd", "content_id": "002bf62784e70896037ca5478f29db140f8b550b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "permissive", "max_line_length": 75, "num_lines": 11, "path": "/test.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "import doctest\nimport unittest\nimport pkgutil\nimport pymtg\n\n# Create a unit test suite and add all pymtg modules' doctests\nsuite = unittest.TestSuite()\nfor _, modname, _ in pkgutil.iter_modules(pymtg.__path__, prefix='pymtg.'):\n suite.addTest(doctest.DocTestSuite(modname))\nrunner = unittest.TextTestRunner(verbosity=1)\nrunner.run(suite)\n" }, { "alpha_fraction": 0.5605255961418152, "alphanum_fraction": 0.5663964152336121, "avg_line_length": 29.31355857849121, "blob_id": "b3fac482b2db5448dc9aa8e87c4c55e86ed13834", "content_id": "ddb9817a93d3c1c3a7f7e9bad37c5f6610a5f3a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3577, "license_type": "permissive", "max_line_length": 141, "num_lines": 118, "path": "/pymtg/io/__init__.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "import os\nimport errno\nimport json\nimport fnmatch\n\n\ndef json_dump(path, data, indent=4, verbose=False):\n \"\"\"Save python dictionary ``data`` to JSON file at ``path``.\n\n Args:\n path (str): Path to the file\n verbose (bool): Verbosity flag\n \"\"\"\n with open(path, 'w') as f:\n if verbose:\n print('Saving data to {0}'.format(path))\n json.dump(data, f, indent=indent)\n\n\ndef json_load(path, verbose=False):\n \"\"\"Load python dictionary stored in JSON file at ``path``.\n\n Args:\n path (str): Path to the file\n verbose (bool): Verbosity flag\n\n Returns:\n (dict): Loaded JSON contents\n \"\"\"\n with open(path, 'r') as f:\n if verbose:\n print('Loading data from {0}'.format(path))\n return json.load(f)\n\n\ndef save_to_file(path, data, verbose=False):\n \"\"\" Save arbitrary data to file at ``path``.\n\n Args:\n path (str): Path to the file\n verbose (bool): Verbosity flag\n \"\"\"\n with open(path, 'w') as f:\n if verbose:\n print('Saving data to {0}'.format(path))\n f.write(data)\n\n\ndef mkdir_p(path):\n \"\"\"\n TODO: document this function\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef get_filenames_in_dir(dir_name, keyword='*', skip_foldername='', match_case=True, verbose=False):\n \"\"\"TODO: better document this function\n TODO: does a python 3 version of this function exist?\n\n Args:\n dir_name (str): The foldername.\n keyword (str): The keyword to search (defaults to '*').\n skip_foldername (str): An optional foldername to skip searching\n match_case (bool): Flag for case matching\n verbose (bool): Verbosity flag\n\n Returns:\n (tuple): Tuple containing:\n - fullnames (list): List of the fullpaths of the files found\n - folder (list): List of the folders of the files\n - names (list): List of the filenames without the foldername\n\n Examples:\n >>> get_filenames_in_dir('/path/to/dir/', '*.mp3') #doctest: +SKIP\n (['/path/to/dir/file1.mp3', '/path/to/dir/folder1/file2.mp3'], ['/path/to/dir/', '/path/to/dir/folder1'], ['file1.mp3', 'file2.mp3'])\n \"\"\"\n names = []\n folders = []\n fullnames = []\n\n if verbose:\n print(dir_name)\n\n # check if the folder exists\n if not os.path.isdir(dir_name):\n if verbose:\n print(\"Directory doesn't exist!\")\n return [], [], []\n\n # if the dir_name finishes with the file separator,\n # remove it so os.walk works properly\n dir_name = dir_name[:-1] if dir_name[-1] == os.sep else dir_name\n\n # walk all the subdirectories\n for (path, dirs, files) in os.walk(dir_name):\n for f in files:\n hasKey = (fnmatch.fnmatch(f, keyword) if match_case else\n fnmatch.fnmatch(f.lower(), keyword.lower()))\n if hasKey and skip_foldername not in path.split(os.sep)[1:]:\n try:\n folders.append(unicode(path, 'utf-8'))\n except TypeError: # already unicode\n folders.append(path)\n try:\n names.append(unicode(f, 'utf-8'))\n except TypeError: # already unicode\n names.append(path)\n fullnames.append(os.path.join(path, f))\n\n if verbose:\n print(\"> Found \" + str(len(names)) + \" files.\")\n return fullnames, folders, names\n" }, { "alpha_fraction": 0.6737385392189026, "alphanum_fraction": 0.6760321259498596, "avg_line_length": 35.726314544677734, "blob_id": "59c37b0b0d528a2cf2bf64e1b009a318f663972c", "content_id": "9d8fcad6bb56207a91b6b4003a326baeb7aff7a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3488, "license_type": "permissive", "max_line_length": 280, "num_lines": 95, "path": "/docs/_sources/tips/tip7.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 7, Paths and Files\n=========================================\n\nYou may wish to load files in your python code. Sometimes you might want to pass a directory for input or output as a program argument or function parameter. Image that you have an output directory and want to write some files to it. You may be tempted to do something like this:\n\n.. code-block:: python\n\n >>> filename = output_dir + \"data.json\"\n\nbut this requires that ``output_dir`` ends in a ``/`` , otherwise you'll just write a file \"resultsdata.json\" instead of \"results/data.json\". You could ask people to ensure that it ends with a /, or check it yourself, but this can get messy.\nInstead, use ``os.path.join``:\n\n.. code-block:: python\n\n >>> filename = os.path.join(output_dir, \"data.json\")\n\nThis will automatically join arguments with a directory separator if needed (\\\\ in windows!), and can take any number of arguments, which it will join.\n\nIn python 3, you can also use the ``pathlib`` module which is pretty cool: https://docs.python.org/3/library/pathlib.html\n\n.. code-block:: python\n\n >>> output_dir = pathlib.Path(dirname)\n >>> filename = output_dir / \"data.json\"\n\nThis overrides the division operator to allow you to join paths together. As long as one of the items is a Path object, you can perform this operation.\n\nThe os.path and os modules have many other useful methods:\n\n - https://docs.python.org/3/library/os.path.html\n - https://docs.python.org/3/library/os.html\n\nTake a look at the documentation, but here are a few methods which I use often:\n\nSplitting a filename into parts\n\n.. code-block:: python\n\n # Takes care of multiple . in the filename, some extensions are longer than 3 characters\n >>> os.path.splitext(\"myfile.data.json\")\n ('myfile.data', '.json')\n\n # Get the filename from a full path\n >>> os.path.basename(\"/path/to/myfile.json\")\n myfile.json\n\n # Get the directory name from a full path\n >>> os.path.dirname(\"/path/to/myfile.json\")\n /path/to\n\nMaking directories\n\n.. code-block:: python\n\n # os.mkdir can only make one directory at a time. If you want to make a tree, use:\n >>> os.makedirs(\"all/of/my/directories\")\n # This will raise an exception if the final directory already exists. In python 3 use\n >>> os.makedirs(\"all/of/my/directories\", exists_ok=True)\n # or\n import errno, os\n try:\n os.makedirs(\"all/of/my/directories\")\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\nWhy shouldn't you use ``os.path.exists()`` instead of catching ``OSError``? Because it's theoretically possible that between the time that you check if a directory exists, and you make it, some other process could create this directory. It's better to check the exception instead.\n\nGetting file lists from directories\n-----------------------------------\n\nOften you might want to scan a directory and get files in it:\n\nSingle directory:\n\n.. code-block:: python\n\n >>> os.listdir(\"/my/directory\")\n >>> glob.glob(\"/my/directory/*.txt\")\n\nRecursive:\n\n.. code-block:: python\n\n >>> all_files = []\n >>> for root, dirs, files in os.walk(\"/my/directory\"):\n ... for f in files:\n ... all_files.append(os.path.join(root, f)) if f.endswith(\".txt\")\n \n >>> glob.glob(\"/my/directory/*/*.txt\", recursive=True)\n\nOther file operations\n---------------------\n\nAlso check out the shutil module for functions to copy and move files: https://docs.python.org/3/library/shutil.html" }, { "alpha_fraction": 0.5635808706283569, "alphanum_fraction": 0.5635808706283569, "avg_line_length": 13.04285717010498, "blob_id": "8fcf0deddfef3bed9868124d7a0d8e8c268907b9", "content_id": "3fcf9e6a0ed20bda2da5d74b67ad01ec6010b702", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 983, "license_type": "permissive", "max_line_length": 47, "num_lines": 70, "path": "/docs_src/index.rst", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "pymtg documentation\n===================\n\n:doc:`Tip of the week <tips/tips>`\n----------------------------------\n\nInstallation\n------------\n\nYou can install ``pymtg`` using ``pip``:\n\n.. code-block:: none\n\n pip install git+https://github.com/MTG/pymtg\n\n\npymtg.io\n--------\n\n.. automodule:: pymtg.io\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npymtg.iterables\n---------------\n\n.. automodule:: pymtg.iterables\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npymtg.plotting\n--------------\n\n.. automodule:: pymtg.plotting\n :members:\n :undoc-members:\n :show-inheritance:\n\npymtg.processing\n----------------\n\n.. automodule:: pymtg.processing\n :members:\n :undoc-members:\n :show-inheritance:\n\n\n.. autoclass:: WorkParallelizer\n\n\npymtg.signal\n------------\n\n.. automodule:: pymtg.signal\n :members:\n :undoc-members:\n :show-inheritance:\n\n\npymtg.time\n----------\n\n.. automodule:: pymtg.time\n :members:\n :undoc-members:\n :show-inheritance:\n" }, { "alpha_fraction": 0.6454545259475708, "alphanum_fraction": 0.6454545259475708, "avg_line_length": 26.5, "blob_id": "820d99677a0afc194bb3af12d9cba865bf4cb5d6", "content_id": "c5d6c807afbd01ed9b8a9a47a9712ed780a8197b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "permissive", "max_line_length": 69, "num_lines": 4, "path": "/pymtg/__init__.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "import pkgutil\n\nfor _, modname, _ in pkgutil.iter_modules(__path__, prefix='pymtg.'):\n __import__(modname)\n" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 21, "blob_id": "a0752e70e529f5a951c931f4a0789d0647b18fb0", "content_id": "a0af1faf7f612a932f2eb2099466849c85b460f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "permissive", "max_line_length": 29, "num_lines": 2, "path": "/requirements_doc.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Sphinx==1.8.3\nsphinx-bootstrap-theme==0.6.5\n" }, { "alpha_fraction": 0.6476210355758667, "alphanum_fraction": 0.654086709022522, "avg_line_length": 61.964603424072266, "blob_id": "215c0cb21e0f41bf659fce1d39176f96cc2b6b8c", "content_id": "a68a74ef7c43946a81205e79fb7808563373a4e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 14244, "license_type": "permissive", "max_line_length": 643, "num_lines": 226, "path": "/docs/tips/tip3.html", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=Edge\" />\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n <title>Python tip of the week 3, Python Lists &#8212; pymtg 0.1 documentation</title>\n <link rel=\"stylesheet\" href=\"../_static/bootstrap-sphinx.css\" type=\"text/css\" />\n <link rel=\"stylesheet\" href=\"../_static/pygments.css\" type=\"text/css\" />\n <script type=\"text/javascript\" id=\"documentation_options\" data-url_root=\"../\" src=\"../_static/documentation_options.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/jquery.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/underscore.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/doctools.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/language_data.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/js/jquery-1.11.0.min.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/js/jquery-fix.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/bootstrap-3.3.7/js/bootstrap.min.js\"></script>\n <script type=\"text/javascript\" src=\"../_static/bootstrap-sphinx.js\"></script>\n <link rel=\"index\" title=\"Index\" href=\"../genindex.html\" />\n <link rel=\"search\" title=\"Search\" href=\"../search.html\" />\n<meta charset='utf-8'>\n<meta http-equiv='X-UA-Compatible' content='IE=edge,chrome=1'>\n<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1'>\n<meta name=\"apple-mobile-web-app-capable\" content=\"yes\">\n\n </head><body>\n\n <div id=\"navbar\" class=\"navbar navbar-default navbar-fixed-top\">\n <div class=\"container\">\n <div class=\"navbar-header\">\n <!-- .btn-navbar is used as the toggle for collapsed navbar content -->\n <button type=\"button\" class=\"navbar-toggle\" data-toggle=\"collapse\" data-target=\".nav-collapse\">\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n <span class=\"icon-bar\"></span>\n </button>\n <a class=\"navbar-brand\" href=\"../index.html\">\n pymtg</a>\n <span class=\"navbar-text navbar-version pull-left\"><b>0.1</b></span>\n </div>\n\n <div class=\"collapse navbar-collapse nav-collapse\">\n <ul class=\"nav navbar-nav\">\n \n \n <li class=\"dropdown globaltoc-container\">\n <a role=\"button\"\n id=\"dLabelGlobalToc\"\n data-toggle=\"dropdown\"\n data-target=\"#\"\n href=\"../index.html\">Site <b class=\"caret\"></b></a>\n <ul class=\"dropdown-menu globaltoc\"\n role=\"menu\"\n aria-labelledby=\"dLabelGlobalToc\"></ul>\n</li>\n \n <li class=\"dropdown\">\n <a role=\"button\"\n id=\"dLabelLocalToc\"\n data-toggle=\"dropdown\"\n data-target=\"#\"\n href=\"#\">Page <b class=\"caret\"></b></a>\n <ul class=\"dropdown-menu localtoc\"\n role=\"menu\"\n aria-labelledby=\"dLabelLocalToc\"><ul>\n<li><a class=\"reference internal\" href=\"#\">Python tip of the week 3, Python Lists</a><ul>\n<li><a class=\"reference internal\" href=\"#processing-elements-in-lists\">Processing elements in lists</a></li>\n<li><a class=\"reference internal\" href=\"#filtering-elements-in-lists\">Filtering elements in lists</a></li>\n<li><a class=\"reference internal\" href=\"#flatten-a-nested-list\">Flatten a nested list</a></li>\n<li><a class=\"reference internal\" href=\"#combining-multiple-lists\">Combining multiple lists</a></li>\n</ul>\n</li>\n</ul>\n</ul>\n</li>\n \n \n \n \n \n \n \n \n \n \n <li class=\"hidden-sm\">\n<div id=\"sourcelink\">\n <a href=\"../_sources/tips/tip3.rst.txt\"\n rel=\"nofollow\">Source</a>\n</div></li>\n \n </ul>\n\n \n \n<form class=\"navbar-form navbar-right\" action=\"../search.html\" method=\"get\">\n <div class=\"form-group\">\n <input type=\"text\" name=\"q\" class=\"form-control\" placeholder=\"Search\" />\n </div>\n <input type=\"hidden\" name=\"check_keywords\" value=\"yes\" />\n <input type=\"hidden\" name=\"area\" value=\"default\" />\n</form>\n \n </div>\n </div>\n </div>\n\n<div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-12 content\">\n \n <div class=\"section\" id=\"python-tip-of-the-week-3-python-lists\">\n<h1>Python tip of the week 3, Python Lists<a class=\"headerlink\" href=\"#python-tip-of-the-week-3-python-lists\" title=\"Permalink to this headline\">¶</a></h1>\n<p>One of the Python data structures is List.\nIn Python, a list:</p>\n<blockquote>\n<div>is ordered\nis mutable (elements inside can change)\ncontains elements that can be accessed by index\nallows for duplicate elements\ncan contain any arbitrary objects (integers, strings, lists, …)\ncan be nested (can contain lists with more lists inside)\nis dynamic (can grow as needed)</div></blockquote>\n<div class=\"section\" id=\"processing-elements-in-lists\">\n<h2>Processing elements in lists<a class=\"headerlink\" href=\"#processing-elements-in-lists\" title=\"Permalink to this headline\">¶</a></h2>\n<p>We often have to store elements in a list and apply some processing to the values.\nConsider if we have a list of positive float values that we would like to scale between 0 and 1.\nWhen you want to do such processing, it is always a good idea to check the time-complexity (aka “Big O”) of list operations.\nAs getting and appending elements operations in lists are in O(1), we can expect to solve the problem in O(n) by iterating through all the elements, and create a new list with the processed elements.\nYou could do something like this:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span>\n</pre></div>\n</div>\n<p>import random</p>\n<blockquote>\n<div><p>random_floats = [random.uniform(0,100) for _ in range(100)]</p>\n<dl class=\"docutils\">\n<dt>def scale(values):</dt>\n<dd><p class=\"first\">max_value = max(values)\nscaled_values = []\nfor value in values:</p>\n<blockquote>\n<div>scaled_values.append(value/max_value)</div></blockquote>\n<p class=\"last\">return scaled_values</p>\n</dd>\n</dl>\n</div></blockquote>\n<p>However, we can do better in terms of code readability.\nList comprehensions provide a concise way to create lists.\nWe could even say that they are one of the most important tools in a*Pythonista*’s toolbox.\nMoreover, using them will often lead to better performances.\nThis is how we can rewrite our function:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">scale</span><span class=\"p\">(</span><span class=\"n\">values</span><span class=\"p\">):</span>\n <span class=\"n\">max_value</span> <span class=\"o\">=</span> <span class=\"nb\">max</span><span class=\"p\">(</span><span class=\"n\">values</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"p\">[</span><span class=\"n\">value</span><span class=\"o\">/</span><span class=\"n\">max_value</span> <span class=\"k\">for</span> <span class=\"n\">value</span> <span class=\"ow\">in</span> <span class=\"n\">values</span><span class=\"p\">]</span>\n</pre></div>\n</div>\n<p>Furthermore, for the ones that love functional style, Python offers some functions which facilitate a functional approach to programming.\nHere is how we solve our problem with the <em>map()</em> and <em>lambda</em> functions:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">scale</span><span class=\"p\">(</span><span class=\"n\">values</span><span class=\"p\">):</span>\n <span class=\"n\">max_value</span> <span class=\"o\">=</span> <span class=\"nb\">max</span><span class=\"p\">(</span><span class=\"n\">values</span><span class=\"p\">)</span>\n <span class=\"k\">return</span> <span class=\"nb\">map</span><span class=\"p\">(</span><span class=\"k\">lambda</span> <span class=\"n\">x</span><span class=\"p\">:</span> <span class=\"n\">x</span><span class=\"o\">/</span><span class=\"n\">max_value</span><span class=\"p\">,</span> <span class=\"n\">values</span><span class=\"p\">)</span>\n</pre></div>\n</div>\n<p>Be aware that this last approach will be different in Python 2 and Python 3.\nIn Python 2, map() returns a list, whereas in Python 3, it returns an <em>iterator</em> that applies the function given as first argument, to every items in <em>values</em>.</p>\n</div>\n<div class=\"section\" id=\"filtering-elements-in-lists\">\n<h2>Filtering elements in lists<a class=\"headerlink\" href=\"#filtering-elements-in-lists\" title=\"Permalink to this headline\">¶</a></h2>\n<p>Let’s now consider another example to be sure things are understood.\nWe have now a list of float numbers, and we would like to get only the values that are higher than a threshold value. With a list comprehension, here is a function that does the trick:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">filter_threshold</span><span class=\"p\">(</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">threshold</span><span class=\"p\">):</span>\n <span class=\"k\">return</span> <span class=\"p\">[</span><span class=\"n\">value</span> <span class=\"k\">for</span> <span class=\"n\">value</span> <span class=\"ow\">in</span> <span class=\"n\">values</span> <span class=\"k\">if</span> <span class=\"n\">value</span> <span class=\"o\">&gt;</span> <span class=\"n\">threshold</span><span class=\"p\">]</span>\n</pre></div>\n</div>\n<p>And the functional approach:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">filter_threshold</span><span class=\"p\">(</span><span class=\"n\">values</span><span class=\"p\">,</span> <span class=\"n\">threshold</span><span class=\"p\">):</span>\n <span class=\"k\">return</span> <span class=\"nb\">filter</span><span class=\"p\">(</span><span class=\"k\">lambda</span> <span class=\"n\">x</span><span class=\"p\">:</span> <span class=\"n\">x</span> <span class=\"o\">&gt;</span> <span class=\"n\">threshold</span><span class=\"p\">,</span> <span class=\"n\">values</span><span class=\"p\">)</span>\n</pre></div>\n</div>\n</div>\n<div class=\"section\" id=\"flatten-a-nested-list\">\n<h2>Flatten a nested list<a class=\"headerlink\" href=\"#flatten-a-nested-list\" title=\"Permalink to this headline\">¶</a></h2>\n<p>List comprehensions can be very useful, but sometimes they can be hard to understand. If you practice, you will get better at it, and it will simplify your <em>Pythonic</em> life. Here is a more complicated example:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span><span class=\"n\">nested_list</span> <span class=\"o\">=</span> <span class=\"p\">[[</span><span class=\"mi\">1</span><span class=\"p\">,</span> <span class=\"mi\">2</span><span class=\"p\">,</span> <span class=\"mi\">3</span><span class=\"p\">],</span> <span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">,</span> <span class=\"mi\">2</span><span class=\"p\">],</span> <span class=\"p\">[</span><span class=\"mi\">1</span><span class=\"p\">],</span> <span class=\"p\">[</span><span class=\"mi\">5</span><span class=\"p\">,</span> <span class=\"mi\">4</span><span class=\"p\">]]</span>\n<span class=\"n\">flat_list</span> <span class=\"o\">=</span> <span class=\"p\">[</span><span class=\"n\">item</span> <span class=\"k\">for</span> <span class=\"n\">sublist</span> <span class=\"ow\">in</span> <span class=\"n\">nested_list</span> <span class=\"k\">for</span> <span class=\"n\">item</span> <span class=\"ow\">in</span> <span class=\"n\">sublist</span><span class=\"p\">]</span>\n</pre></div>\n</div>\n<p>Pay attention to the order in which the iteration variables are declared in the list comprehension, it is not very intuitive at first!</p>\n</div>\n<div class=\"section\" id=\"combining-multiple-lists\">\n<h2>Combining multiple lists<a class=\"headerlink\" href=\"#combining-multiple-lists\" title=\"Permalink to this headline\">¶</a></h2>\n<p>Finally, another common thing is to have several lists that we would like to combine and then iterate through them.\nInstead of doing two for loops, we could use the zip() function to do something like this:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span><span class=\"k\">for</span> <span class=\"p\">(</span><span class=\"n\">value1</span><span class=\"p\">,</span> <span class=\"n\">value2</span><span class=\"p\">)</span> <span class=\"ow\">in</span> <span class=\"nb\">zip</span><span class=\"p\">(</span><span class=\"n\">list1</span><span class=\"p\">,</span> <span class=\"n\">list2</span><span class=\"p\">):</span>\n <span class=\"k\">print</span><span class=\"p\">(</span><span class=\"n\">value1</span><span class=\"p\">,</span> <span class=\"n\">value2</span><span class=\"p\">)</span>\n</pre></div>\n</div>\n<p>On the contrary, if you have a list of tuples, you can unzip them by doing:</p>\n<div class=\"highlight-python notranslate\"><div class=\"highlight\"><pre><span></span><span class=\"n\">zipped_list</span> <span class=\"o\">=</span> <span class=\"p\">[(</span><span class=\"mi\">1</span><span class=\"p\">,</span> <span class=\"mi\">2</span><span class=\"p\">),</span> <span class=\"p\">(</span><span class=\"mi\">3</span><span class=\"p\">,</span> <span class=\"mi\">4</span><span class=\"p\">),</span> <span class=\"p\">(</span><span class=\"mi\">5</span><span class=\"p\">,</span> <span class=\"mi\">6</span><span class=\"p\">)]</span>\n<span class=\"n\">unzipped_lists</span> <span class=\"o\">=</span> <span class=\"nb\">list</span><span class=\"p\">(</span><span class=\"nb\">zip</span><span class=\"p\">(</span><span class=\"o\">*</span><span class=\"n\">zipped_list</span><span class=\"p\">))</span> <span class=\"c1\"># -&gt; [(1, 3, 5), (2, 4, 6)]</span>\n</pre></div>\n</div>\n<p>Be careful again, the output of <em>zip()</em> in an <em>iterator</em>. You will need to cast it as a list or iterate trough it. No need to understand much about <em>iterators</em>, we will leave that for another tip ;)</p>\n</div>\n</div>\n\n\n </div>\n \n </div>\n</div>\n<footer class=\"footer\">\n <div class=\"container\">\n <p class=\"pull-right\">\n <a href=\"#\">Back to top</a>\n \n </p>\n <p>\n &copy; Copyright 2019, Music Technology Group, UPF.<br/>\n Created using <a href=\"http://sphinx-doc.org/\">Sphinx</a> 1.8.3.<br/>\n </p>\n </div>\n</footer>\n </body>\n</html>" }, { "alpha_fraction": 0.4068323075771332, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 41.93333435058594, "blob_id": "2aab20358f6c602449fed14c2cc3a0e3ba8ebb2e", "content_id": "3baa656c324e87ad721b149b08800b0659cbd75b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 644, "license_type": "permissive", "max_line_length": 119, "num_lines": 15, "path": "/pymtg/plotting/__init__.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "COLORS = ['#FF4500', '#FFA500', '#6B8E23', '#32CD32', '#FFD700', '#008B8B', '#00008B', '#B22222', '#1E90FF', '#FF1493',\n '#008000', '#DAA520', '#2F4F4F', '#8B0000', '#FF8C00', '#8B008B', '#A9A9A9', '#B8860B', '#00FFFF', '#6495ED',\n '#FF7F50', '#D2691E', '#7FFF00', '#DEB887', '#8A2BE2', '#0000FF', '#000000']\n\n\ndef color_at_index(index):\n \"\"\"Return hexadecimal color at given ``index`` from ``COLORS``.\n\n Args:\n index (int): Index of color to return (wraps if larger than the length of ``COLORS``)\n\n Returns:\n (str): Hexadecimal color code (starts with #)\n \"\"\"\n return COLORS[index % len(COLORS)]\n" }, { "alpha_fraction": 0.7361111044883728, "alphanum_fraction": 0.7527777552604675, "avg_line_length": 89.25, "blob_id": "1e8c755bb876a4419f7b394b38a433bd059f9f4f", "content_id": "8452936928ca5732bb74ea7ddbb93edc8ad2d2e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 360, "license_type": "permissive", "max_line_length": 336, "num_lines": 4, "path": "/docs/_sources/tips/tip0.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Free tip 0\n===========\n\nUse python 3. There's no reason to use python 2 for new projects, it's going to have no security support after next year, all libraries that you might need to use already support python 3, and it has many new interesting features (some of which will appear in these tips). Take the jump and use at least python 3.5 for all new projects!" }, { "alpha_fraction": 0.3969697058200836, "alphanum_fraction": 0.4545454680919647, "avg_line_length": 29, "blob_id": "2302d43c1698c829320fe67bcf3b4ec0c3402ce3", "content_id": "114ede991b0cbf29d01f876644d39dc8e5d0a047", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 330, "license_type": "permissive", "max_line_length": 58, "num_lines": 11, "path": "/pymtg/iterables/__init__.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "def chunks(l, n):\n \"\"\"Yield successive ``n``-sized chunks from ``l``.\n\n Examples:\n >>> chunks([1, 2, 3, 4, 5], 2) #doctest: +ELLIPSIS\n <generator object chunks at 0x...>\n >>> list(chunks([1, 2, 3, 4, 5], 2))\n [[1, 2], [3, 4], [5]]\n \"\"\"\n for i in range(0, len(l), n):\n yield l[i:i+n]\n" }, { "alpha_fraction": 0.6883720755577087, "alphanum_fraction": 0.6979069709777832, "avg_line_length": 37.044246673583984, "blob_id": "f2d4340050d9472dbe2a1c0f860b6c7baacfe7d7", "content_id": "83edebeafe4565537fba25e57350598188ac4bd3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4302, "license_type": "permissive", "max_line_length": 208, "num_lines": 113, "path": "/docs/_sources/tips/tip3.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 3, Python Lists\n=======================================\n\nOne of the Python data structures is List.\nIn Python, a list:\n\n is ordered\n is mutable (elements inside can change)\n contains elements that can be accessed by index\n allows for duplicate elements\n can contain any arbitrary objects (integers, strings, lists, ...)\n can be nested (can contain lists with more lists inside)\n is dynamic (can grow as needed)\n\n\nProcessing elements in lists\n-----------------------------\n\nWe often have to store elements in a list and apply some processing to the values.\nConsider if we have a list of positive float values that we would like to scale between 0 and 1.\nWhen you want to do such processing, it is always a good idea to check the time-complexity (aka \"Big O\") of list operations.\nAs getting and appending elements operations in lists are in O(1), we can expect to solve the problem in O(n) by iterating through all the elements, and create a new list with the processed elements.\nYou could do something like this:\n\n.. code-block:: python\n\nimport random\n\n random_floats = [random.uniform(0,100) for _ in range(100)]\n\n def scale(values):\n max_value = max(values)\n scaled_values = []\n for value in values:\n scaled_values.append(value/max_value)\n return scaled_values\n\nHowever, we can do better in terms of code readability.\nList comprehensions provide a concise way to create lists.\nWe could even say that they are one of the most important tools in a*Pythonista*’s toolbox.\nMoreover, using them will often lead to better performances.\nThis is how we can rewrite our function:\n\n.. code-block:: python\n\n def scale(values):\n max_value = max(values)\n return [value/max_value for value in values]\n\nFurthermore, for the ones that love functional style, Python offers some functions which facilitate a functional approach to programming.\nHere is how we solve our problem with the *map()* and *lambda* functions:\n\n.. code-block:: python\n\n def scale(values):\n max_value = max(values)\n return map(lambda x: x/max_value, values)\n\nBe aware that this last approach will be different in Python 2 and Python 3.\nIn Python 2, map() returns a list, whereas in Python 3, it returns an *iterator* that applies the function given as first argument, to every items in *values*.\n\n\nFiltering elements in lists\n----------------------------\n\nLet's now consider another example to be sure things are understood.\nWe have now a list of float numbers, and we would like to get only the values that are higher than a threshold value. With a list comprehension, here is a function that does the trick:\n\n.. code-block:: python\n\n def filter_threshold(values, threshold):\n return [value for value in values if value > threshold]\n\nAnd the functional approach:\n\n.. code-block:: python\n\n def filter_threshold(values, threshold):\n return filter(lambda x: x > threshold, values)\n\n\nFlatten a nested list\n----------------------\n\nList comprehensions can be very useful, but sometimes they can be hard to understand. If you practice, you will get better at it, and it will simplify your *Pythonic* life. Here is a more complicated example:\n\n.. code-block:: python\n\n nested_list = [[1, 2, 3], [1, 2], [1], [5, 4]]\n flat_list = [item for sublist in nested_list for item in sublist]\n\nPay attention to the order in which the iteration variables are declared in the list comprehension, it is not very intuitive at first!\n\n\nCombining multiple lists\n-------------------------\n\nFinally, another common thing is to have several lists that we would like to combine and then iterate through them.\nInstead of doing two for loops, we could use the zip() function to do something like this:\n\n.. code-block:: python\n\n for (value1, value2) in zip(list1, list2):\n print(value1, value2)\n\nOn the contrary, if you have a list of tuples, you can unzip them by doing:\n\n.. code-block:: python\n\n zipped_list = [(1, 2), (3, 4), (5, 6)]\n unzipped_lists = list(zip(*zipped_list)) # -> [(1, 3, 5), (2, 4, 6)]\n\nBe careful again, the output of *zip()* in an *iterator*. You will need to cast it as a list or iterate trough it. No need to understand much about *iterators*, we will leave that for another tip ;)\n\n" }, { "alpha_fraction": 0.7154072523117065, "alphanum_fraction": 0.7193326950073242, "avg_line_length": 67, "blob_id": "1ea197007b63b266daaa8b201722bd50f8ca1f45", "content_id": "f56e001457957a695d47a5e436da0c4e6decd960", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1019, "license_type": "permissive", "max_line_length": 364, "num_lines": 15, "path": "/docs_src/tips/tip1.rst", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 1, timing\n=================================\nSometimes you might want to count how long an operation takes to complete. Surround the call to the function with ``time.monotonic()``: https://docs.python.org/3/library/time.html#time.monotonic\n\n.. code-block:: python\n\n start = time.monotonic()\n result = my_long_running_function()\n end = time.monotonic()\n total = end - start\n print('This operation took {:.2f} seconds'.format(total))\n\n``monotonic()`` is guaranteed to always increase. Other tips suggest ``time.time()``, which will work most of the time, but this value could go backwards or skip forwards in some cases (daylight savings switchover, if you suspend your machine, if your operating system syncs the clock and makes a change...). Make the right choice and learn to use ``monotonic()``.\n\nIf you want to do a micro-benchmark to see how long a function takes to complete (for example if you're optimising it), use the timeit module: https://docs.python.org/3/library/timeit.html" }, { "alpha_fraction": 0.6552491188049316, "alphanum_fraction": 0.7208629846572876, "avg_line_length": 50.102272033691406, "blob_id": "86b4378605ccf4173e662224fb1d45ea85aebfc1", "content_id": "b706e9ab538e7145708adf19415a8b89763dddaa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4496, "license_type": "permissive", "max_line_length": 386, "num_lines": 88, "path": "/docs/_sources/tips/tip2.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 2, access speed of data structures\n==========================================================\n\nPython has 3 main types of data structures for storing collections of items - lists, sets, and dictionaries. It's important to know that the access speed of a list is O(n), that is it takes longer and longer to check something as the list gets longer. Sets and dictionaries have O(1) access, which means that the access speed is the same regardless of the size of the collection.\n\nConsider if we have a list of ground-truth items and a list of candidates and we want to check if each of the candidates is in the ground truth:\n\n.. code-block:: python\n\n import random\n import timeit\n\n # This is a list of 10k random items as an example\n small_groundtruth = [random.randint(1, 1000000) for x in range(10000)]\n\n def check_candidates(groundtruth, n_candidates):\n \"\"\"Check which of the candidates in the range [0-n_candidates]\n are present in the collection `groundtruth`\"\"\"\n yes = [] \n no = [] \n for candidate in range(n_candidates): \n if candidate in groundtruth:\n # This is just an example about what to do with the result\n yes.append(candidate) \n else: \n no.append(candidate) \n\nWe can use the timeit module (from tip 1!) to see how long our method takes:\n\n.. code-block:: python\n\n timeit.timeit('check_candidates(small_groundtruth, 100)', number=10, globals=globals())\n 0.13829218316823244\n\nOK, so checking 100 candidates in a list of 10k items 10 times (the `number` parameter) takes 0.14 seconds (0.01 seconds per iteration), this seems pretty fast.\nWhat happens if we want to check 1000 items?\n\n.. code-block:: python\n\n timeit.timeit('check_candidates(small_groundtruth, 1000)', number=10, globals=globals())\n 1.3406621366739273\n\nTime increases linearly. However, what happens if our groundtruth grows by some orders of magnitude?\n\n.. code-block:: python\n\n # 100k items\n groundtruth = [random.randint(1, 1000000000) for x in range(100000)]\n\n timeit.timeit('check_candidates(groundtruth, 100)', number=10, globals=globals())\n 1.36269876267761\n\n timeit.timeit('check_candidates(groundtruth, 1000)', number=10, globals=globals())\n 13.427033469080925\n\n # 1m items?\n groundtruth = [random.randint(1, 1000000000) for x in range(1000000)]\n\n timeit.timeit('check_candidates(groundtruth, 1000)', number=10, globals=globals())\n 122.46592588163912\n\nThis is getting longer and longer. Why is this the case? When testing for membership in a list, we have to check every item in the list in order to see if a candidate exists. If the item doesn't exist in the list we have to check all items in the list before we can say that it doesn't exist. As this list gets longer and longer, this operation takes longer.\nWe can speed up the process by checking if the item is in a set instead of a list:\n\n.. code-block:: python\n\n groundtruth_set = set(groundtruth)\n\n timeit.timeit('check_candidates(groundtruth_set, 1000)', number=10, globals=globals())\n 0.0022530462592840195\n\nThe same operation which took 122 seconds above (12 seconds per `number`) only took 0.002 seconds! (60,000 times faster!)\nWhat happens if we make the groundtruth 100x larger?\n\n.. code-block:: python\n\n # These two lines take a while to run, but it's just one-time setup for the demo\n groundtruth100m = [random.randint(1, 1000000000000) for x in range(1000000000)]\n groundtruth100m_set = set(groundtruth100m)\n\n timeit.timeit('check_candidates(groundtruth100m_set, 1000)', number=10, globals=globals())\n 0.0023570358753204346\n\nIt takes the same amount of time! No increase as the size of our groundtruth grows. This is because the check uses the hash of its value to quickly see if the item exists in the set (https://docs.python.org/3.7/library/stdtypes.html#set-types-set-frozenset)\n\nThe key of a dictionary is also a hash, and so your groundtruth in this example could also be a dictionary mapping {value: class} and the lookups would still be fast.\n\nNote that the process of changing a list to a set may take some time, but the tradeoff is worth it to get fast lookups multiple times. Make sure that you only do it once! It will cause you to use more memory. Remember that a set is not ordered, and can only contain a value once. To save time and memory, consider generating the set initially instead of making a list and converting it." }, { "alpha_fraction": 0.666593074798584, "alphanum_fraction": 0.6696842312812805, "avg_line_length": 39.81081008911133, "blob_id": "d0403c42b4afaf277ed0b37bfa78d18714585cba", "content_id": "15771c8d2dea8383192cd5cd9b9d3ed449075a5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4535, "license_type": "permissive", "max_line_length": 502, "num_lines": 111, "path": "/docs/_sources/tips/tip8.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 8, Generators\n====================================\n\nThis tip is about the use of generators for optimizing your code. To understand generators, \nwe will also quickly talk about iterators, iterables and iterations.\n\nIterables, iterators and iterations\n-----------------------------------\n\nAn iterable is just an object that has a ``__iter__`` method which returns an iterable, or \na ``__getitem__`` method that can take sequential indexes starting from zero.\n\nAn iterator is an object that implements ``__next__``, which is expected to return the next \nelement of the iterable object that returned it, and raise a StopIteration exception when \nno more elements are available.\n\nAn iteration is the process of taking an item from something. When we use a loop to \niterate over something it is called an iteration.\n\nYou can define your own custom iterables in python like this:\n\n.. code-block:: python\n\n class MusicCollection:\n def __init__(self, tracks=[]):\n self.tracks = tracks\n # keep track of the current index of the iteration\n self.current = 0\n\n def __iter__(self): # iterable\n return self\n\n def __next__(self): # iterator\n if self.current < len(self.tracks):\n track = self.tracks[self.current]\n self.current += 1\n return track\n else:\n self.current = 0\n raise StopIteration()\n\n music_collection = MusicCollection(['track1.wav', 'track2.wav', 'track3.wav'])\n\n for track in music_collection:\n print(track)\n\nGenerators\n----------\nIn the example above, we can iterate several times on the tracks of a music collection. The names of the tracks are just kept in memory (stored in a list). But what if we have a million tracks or feature vectors, but we still want to iterate through them and input them to our processing pipeline? That is where generators become interesting. generators are iterators, but you can iterate over them only once, because they do not store all the values in memory, they generate them on the fly. \n\n\nThe way to declare a generator is quite different from the iterables. Basically you can create a function and use yield instead of the return statement. Let’s consider another example that might be more relevant for you. When we train artificial neural networks, we often have a lot of pre-computed features stored in a file that allows fast I/O processing, such as hdf5 files. Let’s see how we can create batches of shuffled training examples for each epoch, without requiring a huge amount of memory:\n\n.. code-block:: python\n\n def load_batches(hdf5_file, batch_size):\n num_data, num_frames, num_bands = hdf5_file[\"train_X\"].shape\n nb_batches = int(math.ceil(float(num_data) / batch_size))\n ids = list(range(num_data))\n shuffle(ids)\n for i, idx_batch in enumerate(range(nb_batches)):\n i_start = i * batch_size\n i_end = min([(i + 1) * batch_size, num_data])\n batch_indexes = ids[i_start:i_end]\n batch_indexes.sort()\n examples = hdf5_file[\"train_X\"][batch_indexes]\n yield examples\n\n\nGenerator expressions\n---------------------\n\nGenerator expressions are just a shortcut to declare generators. It might remind you\nabout a previous tip where we talked about list comprehensions...\n\nThe code:\n\n.. code-block:: python\n\n def repeater(value, max_repeats):\n for i in range(max_repeats):\n yield value\n\n iterator = repeater('Hello', 3)\n\nIs equivalent to:\n\n.. code-block:: python\n\n iterator = ('Hello' for i in range(3))\n\n\nGenerator expression provide a very concise way for supporting iterator protocols and avoiding \nthe verbosity of defining functions with the yield operator. The nice thing is that you can \ncreate processing chains in a very succinct way while keeping a good code readability.\nLet’s see a simplified example of a natural language processing pipeline where we want to \nextract processable terms from a list of sentences:\n\n.. code-block:: python\n\n import re\n\n stop_words = ['a', 'is', 'in', 'he', 'also']\n text = ['A dog is barking in a street.', 'He also growls!']\n\n lemmas = (lemma for phrase in text for lemma in re.findall('\\w+', phrase))\n lower_case = (term.lower() for term in lemmas)\n terms = (term for term in lower_case if term not in stop_words)\n\n list(terms)\n -> ['dog', 'barking', 'street', 'growls']" }, { "alpha_fraction": 0.63150954246521, "alphanum_fraction": 0.6445286870002747, "avg_line_length": 28.45145606994629, "blob_id": "e57f8778a7c15dea64c30f8bed78dff75ae68c03", "content_id": "950b1934ff7eb5cf5e665f5a822cb2c355e188de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 6068, "license_type": "permissive", "max_line_length": 313, "num_lines": 206, "path": "/docs/_sources/tips/tip4.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 4, Dictionaries\n=======================================\n\nDictionaries in python are cool. They provide a mapping from some key (which can be any hashable object) to some value (which can be any object)\n\nhttps://docs.python.org/3.7/tutorial/datastructures.html#dictionaries\n\nhttps://docs.python.org/3.7/library/stdtypes.html#typesmapping\n\nYou can make a dictionary with the dict literal syntax:\n\n.. code-block:: python\n\n\t>>> d = {'key': 'value', 'second': 1}\n\t>>> d\n\t{'key': 'value', 'second': 1}\n\nAnd add to it by assigning to a new key:\n\n.. code-block:: python\n\n\t>>> d['another'] = ['one', 'two']\n\t>>> d\n\t{'key': 'value', 'second': 1, 'another': ['one', 'two']}\n\nYou can also make one from a list of 2-tuples:\n\n.. code-block:: python\n\n\t>>> items = [('key1', 1), ('key2', 2)]\n\t>>> dict(items)\n\t{'key1': 1, 'key2': 2}\n\nOr with a dictionary comprehension:\n\n.. code-block:: python\n\n\t>>> {k: 'a'*v for k, v in items}\n\t{'key1': 'a', 'key2': 'aa'}\n\nYou can combine two dictionaries with ``.update()``\n\n.. code-block:: python\n\n\t>>> d = {'key': 'value', 'second': 1}\n\t>>> e = {'upf': 'cool', 'year': 2019}\n\t>>> d.update(e)\n\t>>> d\n\t{'key': 'value', 'second': 1, 'upf': 'cool', 'year': 2019}\n\nYou can see if a key exists in a dictionary by using the in keyword:\n\n.. code-block:: python\n\n\t>>> 'upf' in d\n\tTrue\n\t>>> 'test' in d\n\tFalse\n\nNote that this is fast (like a set in python)\n\nGet a value from a dictionary by accessing it with ``[]``\n\n.. code-block:: python\n\n\t>>> d['upf']\n\t'cool'\n\nIf the item doesn't exist, an exception will be raised. If you don't catch this exception your program will exit. Keep this in mind especially if you're reading data from an external file or webservice and you don't know if it will exist or not. You don't want the script to quit 80% through a 3 hour execution...\n\n.. code-block:: python\n\n\t>>> d['not_here']\n\tTraceback (most recent call last):\n\t File \"<stdin>\", line 1, in <module>\n\tKeyError: 'not_here'\n\nYou can check if the key exists first (if 'not here' in d), but you can also use ``.get()``. By default, this will return ``None`` if the key doesn't exist, but you can set it to something else if you want.\n\n.. code-block:: python\n\n\t>>> print(d.get('not_here'))\n\tNone\n\n\t>>> d.get('not_here', 'default_value')\n\t'default_value'\n\nDelete an item from a dictionary by using ``del``\n\n.. code-block:: python\n\n\t>>> del d['upf']\n\t>>> 'upf' in d\n\tFalse\n\nYou can get all of the keys of a dictionary with ``.keys()``, and the values with ``.values()``\n\n.. code-block:: python\n\n\t>>> d.keys()\n\tdict_keys(['key', 'second', 'year'])\n\n\t>>> d.values()\n\tdict_values(['value', 1, 2019])\n\nIn python 3, ``.keys()`` and ``.values()`` return iterators. This means that you can use them in a for loop, but if you want to get the first or second element, for example, you need to cast it to a list:\n\n.. code-block:: python\n\n\t>>> d.keys()[0]\n\tTraceback (most recent call last):\n\t File \"<stdin>\", line 1, in <module>\n\tTypeError: 'dict_keys' object does not support indexing\n\n\t>>> list(d.keys())[0]\n\t'key'\n\nThese values are also views into the dictionary, which means that if you change the dictionary, the view also changes:\n\n.. code-block:: python\n\n\t>>> keys = d.keys()\n\t>>> keys\n\tdict_keys(['key', 'second', 'year'])\n\t>>> del d['year']\n\t>>> keys\n\tdict_keys(['key', 'second'])\n\nBe careful when combining views and loops that modify a dictionary:\n\n.. code-block:: python\n\n\t>>> for k in d.keys():\n\t... if k == 'second':\n\t... del d[k]\n\t...\n\tTraceback (most recent call last):\n\t File \"<stdin>\", line 1, in <module>\n\tRuntimeError: dictionary changed size during iteration\n\nIf you want to iterate through all of the keys and values in the dictionary at the same time, use ``.items()``:\n\n.. code-block:: python\n\n\t>>> d = {'key': 'value', 'second': 1, 'upf': 'cool', 'year': 2019}\n\t>>> for k, v in d.items():\n\t... print('> %s: %s' % (k, v))\n\t...\n\t> key: value\n\t> second: 1\n\t> upf: cool\n\t> year: 2019\n\nIn Python 2 and Python 3 up to 3.5, if you remove and add items from a dictionary then there is no guarantee that the dictionary is ordered. That is, the value of ``.keys()`` might not loop the same twice in a row, and might not be the same as the order which you added items to the dictionary.\n\nYou can use ``collections.OrderedDict`` to create a dictionary which is guaranteed to keep its keys and values in the same order that they were added.\nFrom Python 3.7 this behaviour is part of the language definition and you don't have to use OrderedDict.\n\nhttps://docs.python.org/3/library/collections.html#collections.OrderedDict\n\nOne common pattern that you might have to do with a dictionary is add a default value the first time that you encounter a key, and then modify it the next time that you see it. e.g.\n\n.. code-block:: python\n\n\t>>> data = ['one', 'one', 'two', 'three', 'three', 'three']\n\t>>> d = {}\n\t>>> for item in data:\n\t... if item not in d:\n\t... d[item] = 1\n\t... else:\n\t... d[item] += 1\n\t...\n\t>>> d\n\t{'one': 2, 'two': 1, 'three': 3}\n\nYou can use collections.defaultdict to set a default value if the key doesn't exist. This means that you don't need to include a check for the key in each loop\n\nhttps://docs.python.org/3/library/collections.html#collections.defaultdict\n\n.. code-block:: python\n\n\t>>> import collections\n\t>>> d = collections.defaultdict(int)\n\t>>> for item in data:\n\t... d[item] += 1\n\t...\n\t>>> d\n\tdefaultdict(<class 'int'>, {'one': 2, 'two': 1, 'three': 3})\n\nCommon types to use with defaultdict could be int (for counters), list (for a dictionary of lists), or dict (for a nested dictionary)\n\nIf you simply want a counter, consider using ``collections.Counter``:\n\nhttps://docs.python.org/3/library/collections.html#collections.Counter\n\n.. code-block:: python\n\n\t>>> d = collections.Counter(data)\n\t>>> d\n\tCounter({'three': 3, 'one': 2, 'two': 1})\n\t>>> d.most_common()\n\t[('three', 3), ('one', 2), ('two', 1)]\n\t>>> d.most_common(1)\n\t[('three', 3)]\n\nthe ``.most_common()`` method will order items by their count, or you can choose to select only the top n most common items.\n\n" }, { "alpha_fraction": 0.5831899046897888, "alphanum_fraction": 0.6179001927375793, "avg_line_length": 40.97590255737305, "blob_id": "e891072d02e76dc93046ad150f3504f65de865cd", "content_id": "c65fb10e76c1b3e19269f2f8badabc1ebc3b97d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3486, "license_type": "permissive", "max_line_length": 98, "num_lines": 83, "path": "/pymtg/signal/__init__.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef smooth(x, window_len=11, window='hanning', preserve_length=True):\n \"\"\"Smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with the signal.\n The signal is prepared by introducing reflected copies of the signal \n (with the window size) in both ends so that transient parts are minimized\n in the beginning and end part of the output signal.\n\n The code here is an adaptation of the smoothing code from Scipy Cookbook:\n http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html\n \n Args:\n x (array): The input signal \n window_len (int): The dimension of the smoothing window. Should be an odd integer.\n window (string): The type of window from 'flat', 'hanning', 'hamming', 'bartlett', \n 'blackman'. Flat window will produce a moving average smoothing.\n preserve_length (bool): Whether the length oh the output signal should be the same\n as the length of the input signal (default=True).\n\n Returns:\n (array): The smoothed signal\n \n Examples:\n >>> smooth([0, 1, 0, 1, 0, 1], 4)\n array([ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])\n \"\"\"\n if type(x) != np.array:\n x = np.array(x)\n if x.ndim != 1:\n raise (ValueError, \"Smooth only accepts 1 dimension arrays.\")\n if x.size < window_len:\n raise (ValueError, \"Input vector needs to be bigger than window size.\")\n if window_len<3:\n return x\n if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise (ValueError, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n y=np.convolve(w/w.sum(),s,mode='valid')\n if preserve_length:\n return y[(window_len//2-1):-(window_len//2)][:x.size]\n return y\n\n\ndef linear_approximation(x, include_coeffs=False):\n \"\"\"Compute the first degree least squares polynomial fit of x (linear approximation).\n\n This function returns the linear approximation as a signal of the same length of x.\n If requested, the function can also return the linear approximation coefficients as\n returned by Numpy's 'polyfit' function. For more details in the method used for the linear\n approximation, see https://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html.\n\n Args:\n x (array): The input signal \n include_coeffs (bool): Whether to return the computed linear approximation coefficients \n along with the approximated signal (default=False).\n\n Returns:\n (array): The linear approximation of then input signal\n \n Examples:\n >>> linear_approximation([1, 1, 1])\n array([ 1., 1., 1.])\n >>> linear_approximation([0, 1, 2, 3, 4, 5])\n array([ 0., 1., 2., 3., 4., 5.])\n >>> linear_approximation([1, 2, 4, 8, 16])\n array([ -1. , 2.6, 6.2, 9.8, 13.4])\n >>> linear_approximation([1, 2, 4, 8, 16], include_coeffs=True)\n (array([ -1. , 2.6, 6.2, 9.8, 13.4]), (3.6000000000000001, -0.99999999999999778))\n \"\"\"\n a, b = np.polyfit(range(0, len(x)), x, 1)\n x_fit = np.array([a*i + b for i in range(0, len(x))])\n if not include_coeffs:\n return x_fit\n else:\n return x_fit, (a, b)\n\n\n" }, { "alpha_fraction": 0.5951229333877563, "alphanum_fraction": 0.6440951228141785, "avg_line_length": 49.121212005615234, "blob_id": "7e7310f0f0c04d328b4db4994b1035152b60deff", "content_id": "670267e9fefd93cc9ddf050e3de59d710f95bd7c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4962, "license_type": "permissive", "max_line_length": 160, "num_lines": 99, "path": "/pymtg/time/__init__.py", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "import time\nimport datetime\n\n\ndef time_stats(done, total, starttime):\n \"\"\"Count how far through a repeated operation you are.\n\n Use this method if you are performing a repeated operation over\n a list of items and you want to check progress and time remaining\n after each iteration.\n\n Args:\n done (int): how many items have been processed\n total (int): the total number of items that are to be processed\n starttime: the result of an initial call to time.monotonic()\n\n Returns:\n A tuple of (time elapsed, time remaining), as a string representation\n of a timedelta\n \"\"\"\n nowtime = time.monotonic()\n position = done*1.0 / total\n duration = round(nowtime - starttime)\n durdelta = datetime.timedelta(seconds=duration)\n remaining = round((duration / position) - duration)\n remdelta = datetime.timedelta(seconds=remaining)\n\n return str(durdelta), str(remdelta)\n\n\ndef datetime_range(start_datetime, end_datetime=None, step_interval=None, n_steps=1, snap_to_date=False,\n return_pairs=False):\n \"\"\"Return a list of dates inside the date range between ``start_datetime`` and ``end_datetime``,\n equally spaced in ``step`` time intervals.\n\n Args:\n start_datetime (datetime): Starting time of the range\n end_datetime (datetime): End of the time range (included if range is multiple of step). Defaults to today\n step_interval (timedelta,str): time interval of between list elements. Can be a ``datetime.timedelta``\n object or a string from ['day', 'second', 'microsecond', 'millisecond', 'minute', 'hour', 'week'].\n Defaults to 1 day.\n n_steps (int): number of steps to be applied between list elements (default=1)\n snap_to_date (bool): Whether to disregard hour, minutes and seconds information (as a date object,\n default=False)\n return_pairs (bool): Whether to return a simple list or a list of pairs with edge dates for each\n interval (default=False)\n\n Returns:\n (list): List of ``datetime.datetime`` objects (or tuples of two ``datetime.datetime`` if ``return_pairs=True``)\n\n Examples:\n >>> datetime_range(datetime.datetime(2017,1,1), datetime.datetime(2017,1,3))\n [datetime.datetime(2017, 1, 1, 0, 0), datetime.datetime(2017, 1, 2, 0, 0), datetime.datetime(2017, 1, 3, 0, 0)]\n >>> datetime_range(datetime.datetime(2017,1,1,10,21,45), datetime.datetime(2017,1,3,10,30,54), snap_to_date=True)\n [datetime.datetime(2017, 1, 1, 0, 0), datetime.datetime(2017, 1, 2, 0, 0), datetime.datetime(2017, 1, 3, 0, 0)]\n >>> datetime_range(datetime.datetime(2017,1,1,11,0,0), datetime.datetime(2017,1,1,11,2,0), step_interval='minute')\n [datetime.datetime(2017, 1, 1, 11, 0), datetime.datetime(2017, 1, 1, 11, 1), datetime.datetime(2017, 1, 1, 11, 2)]\n >>> datetime_range(datetime.datetime(2017,1,1,11,0,0), datetime.datetime(2017,1,1,11,20,0), step_interval='minute', n_steps=10)\n [datetime.datetime(2017, 1, 1, 11, 0), datetime.datetime(2017, 1, 1, 11, 10), datetime.datetime(2017, 1, 1, 11, 20)]\n >>> datetime_range(datetime.datetime(2017,1,1), datetime.datetime(2017,1,3), return_pairs=True)\n [(datetime.datetime(2017, 1, 1, 0, 0), datetime.datetime(2017, 1, 2, 0, 0)), (datetime.datetime(2017, 1, 2, 0, 0), datetime.datetime(2017, 1, 3, 0, 0))]\n \"\"\"\n\n if end_datetime is None:\n end_datetime = datetime.datetime.today()\n\n if step_interval is None:\n step_interval = datetime.timedelta(days=1)\n else:\n if not isinstance(step_interval, datetime.timedelta):\n if step_interval.lower() == 'day':\n step_interval = datetime.timedelta(days=n_steps)\n elif step_interval.lower() == 'second':\n step_interval = datetime.timedelta(seconds=n_steps)\n elif step_interval.lower() == 'microsecond':\n step_interval = datetime.timedelta(microseconds=n_steps)\n elif step_interval.lower() == 'millisecond':\n step_interval = datetime.timedelta(milliseconds=n_steps)\n elif step_interval.lower() == 'minute':\n step_interval = datetime.timedelta(minutes=n_steps)\n elif step_interval.lower() == 'hour':\n step_interval = datetime.timedelta(hours=n_steps)\n elif step_interval.lower() == 'week':\n step_interval = datetime.timedelta(weeks=n_steps)\n\n if snap_to_date:\n start_datetime = start_datetime.replace(hour=0, minute=0, second=0, microsecond=0)\n end_datetime = end_datetime.replace(hour=0, minute=0, second=0, microsecond=0)\n\n dates = []\n current_datetime = start_datetime\n while current_datetime <= end_datetime:\n dates.append(current_datetime)\n current_datetime += step_interval\n\n if return_pairs:\n return list(zip(dates[:-1], dates[1:]))\n else:\n return dates\n" }, { "alpha_fraction": 0.3888888955116272, "alphanum_fraction": 0.4682539701461792, "avg_line_length": 7.4666666984558105, "blob_id": "94c6ac68b77db98313487c3233bcb0b69b016fe7", "content_id": "483bffaf6df6cbacd53ab5fb732c9c0242a9b1fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 126, "license_type": "permissive", "max_line_length": 15, "num_lines": 15, "path": "/docs/_sources/tips/tips.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Tip of the week\n---------------\n\n.. toctree::\n\n tip0\n tip1\n tip2\n tip3\n tip4\n tip5\n tip6\n tip7\n tip8\n tip9" }, { "alpha_fraction": 0.6357979774475098, "alphanum_fraction": 0.6522694230079651, "avg_line_length": 35.192054748535156, "blob_id": "b8b20c181eec191e8304eee3b97947dc2f56df9a", "content_id": "bf82016d2198c1545e5abc0e8a51ec03cd1e1b2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5464, "license_type": "permissive", "max_line_length": 368, "num_lines": 151, "path": "/docs/_sources/tips/tip6.rst.txt", "repo_name": "MTG/pymtg", "src_encoding": "UTF-8", "text": "Python tip of the week 6, String formatting\n============================================\n\nThis week's tip is about ways to format strings in Python.\n\nAn 'intuitive' way for string formatting can be using '+' operator for concatenating strings. To use this formatting style, we have to cast the integers or other data types into strings manually. An example can be as following:\n\n.. code-block:: python\n\n\t>>> num_of_items = 42\n\t>>> type_of_items = 'songs'\n\t>>> print('Here, we have ' + str(num_of_items) + ' ' + type_of_items)\n\n\tOutput: Here, we have 42 songs\n\nWhile the '+' operator seems easy to use, it comes with a bunch of problems (e.g. manual type casting, difficulty in reading code). Instead, you should consider using one of these recommended methods:\n\n- Old Style: (https://docs.python.org/3/library/stdtypes.html#old-string-formatting) This way of string formatting uses format specifiers to indicate what to substitute. \n- New Style: (https://docs.python.org/3/library/stdtypes.html#str.format) This method is used with calling the \".format()\" function.\n- F-strings: (https://www.python.org/dev/peps/pep-0498/) The F-strings style is supported in Python 3.6+, and it is a simple yet powerful way for string formatting. Unlike in 'New Style', you don't have to call a specific function such as '.format()' or use format specifiers as in 'Old Style'.\n- Template Strings: (https://docs.python.org/3/library/string.html#template-strings) This simpler and less powerful method of formatting strings can be useful in terms of privacy of variables. For this method, Template class for Python string module should be imported.\n\n\nBelow, you can find some examples of use:\n\nBasic formatting\n----------------\n\n.. code-block:: python\n\n\t>>> num_of_items = 42\n\t>>> type_of_items = 'songs'\n\n\t# Old style\n\t>>> print('Here, we have %d %s' % (num_of_items, type_of_items))\n\n\t# New style\n\t>>> print('Here, we have {} {}'.format(num_of_items, type_of_items))\n\n\t# F-strings\n\t>>> print(f'Here, we have {num_of_items} {type_of_items}')\n\n\tOutput: Here, we have 42 songs\n\nNamed placeholders\n------------------\n\n.. code-block:: python\n\n\t>>> params = {'hop_size': 512, 'frame_size': 1024}\n\n\t# Old style\n\t>>> print('The parameters for hop size is %(hop_size)d, and frame size is %(frame_size)s.' % params)\n\n\t# New style\n\t>>> print('The parameters for hop size is {hop_size}, and frame size is {frame_size}.'.format(**params))\n\n\t# F-strings\n\t>>> print(f'The parameters for hop size is {params[\"hop_size\"]}, and frame size is {params[\"frame_size\"]}.')\n\n\tOutput: The parameters for hop size is 512, and frame size is 1024.\n\nNumber formatting\n-----------------\n\n.. code-block:: python\n\n\t>>> def get_duration():\n\t>>> return 331.1932148\n\n\t# Old Style\n\t>>> print('Duration of this track is %.2f seconds.' % get_duration())\n\n\t# New Style\n\t>>> print('Duration of this track is {:.2f} seconds'.format(get_duration()))\n\n\t# F-strings\n\t>>> print(f'Duration of this track is {get_duration():.2f} seconds.')\n\n\tOutput: Duration of this track is 331.19 seconds.\n\nPadding and alignment\n---------------------\n\n.. code-block:: python\n\n\t>>> songs = ['Yesterday', 'All You Need Is Love', 'Hey Jude']\n\t>>> albums = ['Help!', 'Magical Mystery Tour', 'The Beatles']\n\t>>> song_ids = [483, 65448, 98]\n\n\t# Old Style\n\t>>> for i in range(3):\n\t>>> print('%20s - %-20s - %6d ' % (songs[i], albums[i], song_ids[i]))\n\n\t# New Style\n\t>>> for i in range(3):\n\t>>> print('{:>20} - {:<20} - {:>6}'.format(songs[i], albums[i], song_ids[i]))\n\t \n\t# F-strings\n\t>>> for i in range(3):\n\t>>> print(f'{songs[i]:>20} - {albums[i]:<20} - {song_ids[i]:>6}')\n\n\tOutput: Yesterday - Help! - 483 \n\t All You Need Is Love - Magical Mystery Tour - 65448 \n\t Hey Jude - The Beatles - 98 \n\nExample for Template strings\n-----------------------------\n\nIn most of the cases, one of the methods shown above would be appropriated. However, these methods might introduce security vulnerabilities to your programs. For instance, a user of a web application could retrieve some variables with a designed input. Let's have a look at a simple example where an hypothetical attacker would be able to access some global variables:\n\n.. code-block:: python\n\n\t# This is a variable we don't want to show\n\t>>> SECRET_VARIABLE = \"don't tell anyone\"\n\n\t>>> class Music:\n\t>>> def __init__(self):\n\t>>> pass\n\t \n\t>>> song = Music()\n\n\t# New Style\n\t>>> user_provided_string = '{track.__init__.__globals__[SECRET_VARIABLE]}'\n\t>>> print(user_provided_string.format(track=song))\n\n\tOutput: don't tell anyone\n\nHere is where the Template strings method becomes useful:\n\n.. code-block:: python\n\n\t>>> from string import Template\n\n\t>>> user_provided_string = '${track.__init__.__globals__[SECRET_VARIABLE]}'\n\t>>> print(Template(user_provided_string).substitute(track=song))\n\n\tRaises: ValueError: Invalid placeholder in string: line 1, col 1\n\nBonus tip: join function\n------------------------\nFor joining all the items in a tuple or a list into a single string, you might be tented to use a for loop and concatenate elements one by one. However, the '.join()' function provides a better way to do it:\n\n.. code-block:: python\n\n\t>>> songs = ['Yesterday', 'Come Together', 'Hey Jude', 'Blackbird', '...']\n\t>>> message = 'Processing the songs: '\n\t>>> message += ', '.join(songs)\n\t>>> print(message)\n\n\tOutput: Processing the songs: Yesterday, Come Together, Hey Jude, Blackbird, ..." } ]
24
ZKDeep/Hand-Written-Urdu-Character-Recognition-using-DenseNet121
https://github.com/ZKDeep/Hand-Written-Urdu-Character-Recognition-using-DenseNet121
8f4000b14a93c86cb777973d36b94db8191ce784
db8cfa2a8e7a4d659f260457aa1085ff71535130
0efdc4cff996cb46b340a7a11ffa818b566a8185
refs/heads/master
2020-09-29T02:29:26.313714
2019-12-09T17:34:34
2019-12-09T17:34:34
226,927,301
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4950980246067047, "alphanum_fraction": 0.563725471496582, "avg_line_length": 13.538461685180664, "blob_id": "f12adb7746c11ef1822684720f8c279e6a4900bb", "content_id": "502778a78c6d5397cafc3d6861d24599c6293eaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/arguments.py", "repo_name": "ZKDeep/Hand-Written-Urdu-Character-Recognition-using-DenseNet121", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 9 20:41:45 2019\r\n\r\n@author: zubair\r\n\"\"\"\r\n\r\nbatch_size = 2\r\nno_epochs = 1\r\n\r\ntraining_path = \"train/\"\r\nvalidation_path = \"valid/\"\r\ntest_path = \"test/\"\r\n\r\n" }, { "alpha_fraction": 0.6582304835319519, "alphanum_fraction": 0.6697343587875366, "avg_line_length": 29.649005889892578, "blob_id": "b94050d1e3c735bc2869a9ebe66d5ebc4b0392a0", "content_id": "dbc7add5cd480095fae6dec6f616d49e370efa13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4781, "license_type": "no_license", "max_line_length": 163, "num_lines": 151, "path": "/densenet121.py", "repo_name": "ZKDeep/Hand-Written-Urdu-Character-Recognition-using-DenseNet121", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 7 01:46:43 2019\r\n\r\n@author: zubair\r\n\"\"\"\r\n\r\n\r\n\r\n\r\nimport numpy as np\r\nimport keras\r\nfrom matplotlib import pyplot as plt\r\nfrom keras import Model\r\nfrom keras import applications\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport os\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Activation, Flatten, GlobalAveragePooling2D\r\nfrom keras.models import Sequential\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nimport numpy\r\nimport arguments\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport pandas as pd\r\n\r\nprint(\"type 'train' or 'test' for training or testing\")\r\ncheck = input()\r\n\r\n\r\n\r\ntrain_path = arguments.training_path\r\nvalid_path = arguments.validation_path\r\ntest_path = arguments.test_path\r\n\r\n\r\nlabels_reading = arguments.training_path # This will generate labels as per folders name\r\nclass_lables = os.listdir(labels_reading)\r\n\r\n\r\ntrain_batches = ImageDataGenerator().flow_from_directory(train_path, target_size=(224,224), classes=class_lables, batch_size= arguments.batch_size, shuffle = True)\r\nvalid_batches = ImageDataGenerator().flow_from_directory(valid_path, target_size=(224,224), classes=class_lables, batch_size= arguments.batch_size, shuffle = True)\r\ntest_batches = ImageDataGenerator().flow_from_directory(test_path, target_size=(224,224), classes=class_lables, batch_size= arguments.batch_size, shuffle = False)\r\n\r\nclasses = len(np.unique(train_batches.classes))\r\n\r\n\r\n\r\ndense121 = keras.applications.DenseNet121(include_top=False, weights='imagenet')\r\n\r\nnew_model=dense121.output\r\nnew_model=GlobalAveragePooling2D()(new_model)\r\n\r\nnew_model=Dense(512,activation='relu')(new_model) #dense layer 3\r\npreds=Dense(classes,activation='softmax')(new_model) #final layer with softmax activation\r\n\r\nmodel=Model(inputs=dense121.input,outputs=preds)\r\n\r\nfor i,layer in enumerate(model.layers):\r\n print(i,layer.name)\r\n\r\nfor layer in model.layers:\r\n layer.trainable=True\r\nmodel.compile(optimizer='SGD',loss='categorical_crossentropy',metrics=['accuracy'])\r\n\r\n\r\ndef training():\r\n print(\"training the model\")\r\n try:\r\n model.load_weights(\"results/weights.h5\")\r\n except:\r\n print(\"No weights found training from scratch.....\")\r\n \r\n step_size_train = train_batches.n//train_batches.batch_size\r\n hist = model.fit_generator(generator=train_batches, validation_data=valid_batches, \r\n validation_steps= valid_batches.n//valid_batches.batch_size,\r\n steps_per_epoch=step_size_train,\r\n epochs=arguments.no_epochs)\r\n model.save_weights(\"results/weights.h5\")\r\n \r\n \r\n print(\"Please training results............\")\r\n plt.plot(hist.history['acc'])\r\n plt.plot(hist.history['val_acc'])\r\n plt.title('model accuracy')\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'val'], loc='upper left')\r\n plt.savefig('results/Acc.png')\r\n plt.show()\r\n \r\n \r\n \r\n plt.plot(hist.history['loss'])\r\n plt.plot(hist.history['val_loss'])\r\n plt.title('model loss')\r\n plt.ylabel('loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'val'], loc='upper left')\r\n plt.savefig('results/loss.png')\r\n plt.show()\r\n \r\n testing()\r\n\r\n\r\ndef testing():\r\n print(\"testing the model\")\r\n try:\r\n model.load_weights(\"results/weights.h5\")\r\n except:\r\n print(\"No weights found test on random weights\")\r\n step_size_test = test_batches.n//test_batches.batch_size \r\n evl = model.evaluate_generator(generator=test_batches, steps = step_size_test, verbose=1)\r\n Y_pred = model.predict_generator(test_batches, steps=step_size_test, verbose=1)\r\n \r\n y_pred = np.argmax(Y_pred, axis=1)\r\n \r\n dif = abs(len(y_pred) - len(test_batches.classes))\r\n if dif > 0:\r\n y_true = test_batches.classes[:-dif]\r\n else:\r\n y_true = test_batches.classes\r\n \r\n print('Confusion Matrix')\r\n print(confusion_matrix(y_true, y_pred))\r\n \r\n matrix = confusion_matrix(y_true, y_pred)\r\n sns.heatmap(matrix,annot=True,cbar=False)\r\n \r\n \r\n y_true = pd.Series(y_true, name=\"Actual\")\r\n y_pred = pd.Series(y_pred, name=\"Predicted\")\r\n df_confusion = pd.crosstab(y_true, y_pred)\r\n \r\n df_confusion.to_csv('results/confusion_matrix.csv')\r\n\r\n print('Classification Report')\r\n target_names = list((np.unique(y_true)))\r\n for i in range(len(target_names)):\r\n target_names[i] = str(target_names[i])\r\n print(classification_report(y_true, y_pred, target_names=target_names))\r\n return(evl)\r\n\r\n\r\nif check == \"train\":\r\n training()\r\nelif check == \"test\":\r\n print(\"testing\")\r\n testing()\r\n\r\n" }, { "alpha_fraction": 0.7395659685134888, "alphanum_fraction": 0.7629382014274597, "avg_line_length": 38.93333435058594, "blob_id": "8aeeec3316b080f11272112e4114ff213a4d42cb", "content_id": "6f22483b2118b7191b763789456f69aa3ded948d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 599, "license_type": "no_license", "max_line_length": 130, "num_lines": 15, "path": "/README.md", "repo_name": "ZKDeep/Hand-Written-Urdu-Character-Recognition-using-DenseNet121", "src_encoding": "UTF-8", "text": "# Hand-Written-Urdu-Character-Recognition-using-DenseNet121\n\nDownload the dataset from the Google drive. ( https://drive.google.com/file/d/1_ETFKY_vACIgQWDw_Km6Ggq9lVKFO92a/view?usp=sharing )\n\nPlace the all the folders within the folder where the densenet121.py and arguments.py are placed.\n\nRun densenet121.py from cmd or spyder IDE\n\nRequired a user input \"train\" or \"test\". Train will start training and test will start testing the given model.\n\nYou can change the different hyper parameters in the given file of argument.py\n\nYou are done to train.\n\n########## Good Luck and thank you ###########\n" } ]
3
natelee-sb/TableauAPI
https://github.com/natelee-sb/TableauAPI
30b855f2250be271b7dd15ad103aa587156ff6c1
60bc033713c65256a5693f488b4ed57a9e9bce55
04b30c27ccafd07432273f75876db9fb1940f0ac
refs/heads/master
2021-06-10T22:43:12.636561
2020-01-21T07:37:41
2020-01-21T07:37:41
194,160,048
0
0
null
2019-06-27T20:38:17
2020-01-21T07:38:12
2021-06-02T00:56:33
Python
[ { "alpha_fraction": 0.7248104214668274, "alphanum_fraction": 0.7248104214668274, "avg_line_length": 32.8301887512207, "blob_id": "307673626c81d1e1da9f84a06f7e71f1d70bd711", "content_id": "53d8ace8c4991114e9ecdd0a63c7955f61009e08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1846, "license_type": "no_license", "max_line_length": 105, "num_lines": 53, "path": "/TableauConnector/EnterCredentials.py", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "\"\"\"Enter Credentials\r\n\r\nThis script allows the user to either enter credentials automatically or via command line prompt\r\nNote that the input requires a space between the first and last name, which reflects\r\nBCG Tableau username specifications.\r\n\r\nThis script requires that `getpass` be installed within the Python\r\nenvironment you are running this script in.\r\n\r\nThis file can also be imported as a module and contains the following\r\nfunctions:\r\n\r\n * enterCredentials - user-entered credentials\r\n * enterCredentailsAutomatic - pre-filled credentials for faster testing\r\n\"\"\"\r\n\r\n\r\nfrom getpass import getpass\r\n\r\ndef enterCredentials():\r\n\r\n\twhile True:\r\n\r\n\t\tusername = input(\"Enter Username (Last First): \")\r\n\r\n\t\tif (' ' in username):\r\n\t\t\tprint (\"Example URL: https://tableau.bcg.com/#/site/GeoApps/views/Sales/SalesPotential\")\r\n\t\t\tpasswd = getpass(\"Enter Password (BCG Login_Password) : \")\r\n\t\t\tserverInstance = input(\"Enter Server Instance Name (i.e. https://tableau.BCG.com): \")\r\n\t\t\tsiteName = input (\"Enter Site Name (GeoApps) : \")\r\n\t\t\tworkbookName = input(\"Enter Workbook Name (Sales): \")\r\n\t\t\tdashboardName = input(\"Enter Dashboard Name (Sales Potential - check if DashboardName has space!) : \")\r\n\t\t\tuserEmail = input(\"Enter your email, will be the sent from address: \")\r\n\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tcontinue\r\n\r\n\treturn username, passwd, serverInstance, siteName, workbookName, dashboardName, userEmail\r\n\r\n\r\ndef enterCredentailsAutomatic():\r\n\t# Example of how creds get entered - recommend calling from connector store, especially password\r\n\t\r\n\tusername = \"Lee Nate\"\r\n\tpasswd = \"helloworld1!\"\r\n\tserverInstance = \"https://tableau.BCG.com\"\r\n\tsiteName = \"GeoApps\"\r\n\tworkbookName = \"Sales\"\r\n\tdashboardName = \"Sales Potential\"\r\n\tuserEmail = \"Lee.Nate@bcg.com\"\r\n\r\n\treturn username, passwd, serverInstance, siteName, workbookName, dashboardName, userEmail\r\n" }, { "alpha_fraction": 0.6974157691001892, "alphanum_fraction": 0.7010140419006348, "avg_line_length": 27.89215660095215, "blob_id": "c7d3abcfe1a86f9c1b2ad635f6b640cc55af9328", "content_id": "bf005a4f277d9640b07ec8208ee9f7f262795243", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3057, "license_type": "no_license", "max_line_length": 114, "num_lines": 102, "path": "/TableauConnector/EmailScript.py", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "\"\"\"EmailScript\r\n\r\nThis script allows for the option of either emailing a zipped folder of images created by other scripts\r\nto an email address, or for individual PDF images to be sent to individual email addresses.\r\n\r\nThis script requires that `sendgrid` be installed within the Python\r\nenvironment you are running this script in.\r\nNote that the sendgrid_api_key needs to be set in the CLI for sendgrid to work correctly - in this example, it was\r\nset as SENDGRID_API_KEY, hence why SendGridAPIClient(os.environ.get('***')) is so.\r\n\r\nThis file can also be imported as a module and contains the following\r\nfunctions:\r\n\r\n * email_zip_whole - returns the column headers of the file\r\n * email_singlePDF - the main function of the script\r\n\"\"\"\r\n\r\nimport base64\r\nimport os\r\n\r\nfrom sendgrid import SendGridAPIClient\r\nfrom sendgrid.helpers.mail import *\r\n\r\ndef emailList():\r\n\r\n\temail_data = [\r\n\t\t\"Lee.Nate@bcg.com\",\r\n\t\t\"natelee.sb@gmail.com\",\r\n\t]\r\n\r\n\treturn email_data\r\n\r\n\r\n\r\ndef email_zip_whole(zipdirectory ):\r\n\t# Make this readable from a CSV - easy\r\n\t# problems with BCG email - not automatically let through\r\n\r\n\tmessage = Mail(\r\n\t\tfrom_email=\"Lee.Nate@bcg.com\",\r\n\t\tto_emails= emailList(),\r\n\t\tsubject='SendGrid Python Test - BCG email only',\r\n\t\thtml_content=' Nate - python script test - can see all recipients. BCG email'\r\n\t)\r\n\r\n\tzip_filePath = zipdirectory + \".zip\"\r\n\r\n\twith open(zip_filePath, 'rb') as file_:\r\n\t\tdata = file_.read()\r\n\t\tfile_.close()\r\n\r\n\tencoded = base64.b64encode(data).decode()\r\n\tattachment = Attachment()\r\n\tattachment.file_content = FileContent(encoded)\r\n\tattachment.file_type = FileType('application/zip')\r\n\tattachment.file_name = FileName('test_1.zip')\r\n\tattachment.disposition = Disposition('attachment')\r\n\tattachment.content_id = ContentId('Example Content')\r\n\tmessage.attachment = attachment\r\n\r\n\ttry:\r\n\t\tsg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\r\n\t\tresponse = sg.send(message)\r\n\t\tprint(response.status_code)\r\n\t\tprint(response.body)\r\n\t\tprint(response.headers)\r\n\texcept Exception as e:\r\n\t\tprint(e.message)\r\n\r\n\r\ndef email_singlePDF( email_address, PDF_filepath, PDF_Name, fromEmailAddress ):\r\n\r\n\r\n\tif email_address not in (None, \"\"):\r\n\t\tmessage = Mail(\r\n\t\t\tfrom_email = fromEmailAddress,\r\n\t\t\tto_emails = email_address,\r\n\t\t\tsubject='Python Script - Tableau Dashboard Output - PDF',\r\n\t\t\thtml_content=' Individual PDF Test'\r\n\t\t)\r\n\r\n\t\twith open(PDF_filepath, 'rb') as file_:\r\n\t\t\tdata = file_.read()\r\n\t\t\tfile_.close()\r\n\r\n\t\tencoded = base64.b64encode(data).decode()\r\n\t\tattachment = Attachment()\r\n\t\tattachment.file_content = FileContent(encoded)\r\n\t\tattachment.file_type = FileType('application/pdf')\r\n\t\tattachment.file_name = FileName(PDF_Name + '.pdf')\r\n\t\tattachment.disposition = Disposition('attachment')\r\n\t\tattachment.content_id = ContentId('PDF Document File')\r\n\t\tmessage.attachment = attachment\r\n\r\n\t\ttry:\r\n\t\t\tsg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\r\n\t\t\tresponse = sg.send(message)\r\n\t\t\t# print(response.status_code)\r\n\t\t\t# print(response.body)\r\n\t\t\t# print(response.headers)\r\n\t\texcept Exception as e:\r\n\t\t\tprint(e.message)\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6455667614936829, "avg_line_length": 37.096492767333984, "blob_id": "4a1b5a28710207bb31f102a97d4331735bf2bf36", "content_id": "3a15c0f9e6876482bf56733275043a435d781332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4455, "license_type": "no_license", "max_line_length": 123, "num_lines": 114, "path": "/TableauConnector/ReadCSV_CreateImage.py", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "\"\"\"ReadCSV_CreateImage\r\n\r\nThis script takes in the fields that the user has specified in the main function (TableauConnector.py). It signs\r\nin to a Tableau Server Online using the user entered credentials. It goes through the CSV file the User has entered,\r\nwhere column[0] is the name of the file to be saved, column[1] is the image type, and column[2] is the email where we\r\nsend the images to. Columns[3] to Column[N] are the different parameters within the Tableau Dashboard that this function\r\ncalls to get the different filtered views. It then emails the different views based on the email addresses provided.\r\n\r\nThis script requires that `tableauserverclient` be installed within the Python\r\nenvironment you are running this script in.\r\n\r\nThis file can also be imported as a module and contains the following\r\nfunctions:\r\n * CSV_ImageRequest - the main function of the script\r\n\"\"\"\r\n\r\nimport tableauserverclient as TSC\r\nimport csv\r\n\r\nimport FolderViews as folderExplore\r\nimport EmailScript as emailScript\r\n\r\ndef CSV_ImageRequest(server, tableau_auth, csv_filename, end_file_directory, WorkbookName, DashboardName, UserEmail ):\r\n\r\n\twith server.auth.sign_in(tableau_auth):\r\n\r\n\t\tfolderExplore.TableauFolderViews(server)\r\n\r\n\r\n\t\treq_option1 = TSC.RequestOptions()\r\n\t\treq_option1.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name,\r\n\t\t TSC.RequestOptions.Operator.Equals,\r\n\t\t DashboardName))\r\n\r\n\t\treq_option2 = TSC.RequestOptions()\r\n\t\treq_option2.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name,\r\n\t\t TSC.RequestOptions.Operator.Equals,\r\n\t\t WorkbookName))\r\n\r\n\t\tall_views_filtered, views_items1 = server.views.get(req_option1)\r\n\t\tall_workbooks_filtered, workbooks_items1 = server.workbooks.get(req_option2)\r\n\r\n\t\tview_i, workbook_i = folderExplore.checkMatch(all_views_filtered, views_items1, all_workbooks_filtered, workbooks_items1)\r\n\r\n\t\twith open(csv_filename) as csv_file:\r\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\r\n\t\t\tline_count = 0\r\n\r\n\t\t\tfor row in csv_reader:\r\n\t\t\t\tif line_count == 0:\r\n\t\t\t\t\tnum_col = len(row)\r\n\t\t\t\t\tcolumn_header = row\r\n\t\t\t\t\tprint (\"\\n {}\".format(column_header))\r\n\t\t\t\t\tline_count += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tif (row[1] == \"PNG\" or row[1] == \"JPG\"):\r\n\r\n\t\t\t\t\t\t# view item is the dashboard view.\r\n\t\t\t\t\t\tview_item = all_views_filtered[view_i]\r\n\r\n\t\t\t\t\t\timage_req_option = TSC.ImageRequestOptions(imageresolution=TSC.ImageRequestOptions.Resolution.High)\r\n\r\n\t\t\t\t\t\t# Assumes col 0,1,2 are imageName, FileType, and Email respectively\r\n\r\n\t\t\t\t\t\tfor iter_ in range(3, num_col):\r\n\t\t\t\t\t\t\t# column_header[iter_] takes the nth item across in column header,\r\n\t\t\t\t\t\t\t# row header [iter_] takes the same nth item acrpss in the row\r\n\t\t\t\t\t\t\t# pases it to like this:\r\n\t\t\t\t\t\t\t# image_req_option.vf( Color By , 2)\r\n\t\t\t\t\t\t\t# image_req_option.vf( Bubble Color, 1)\r\n\t\t\t\t\t\t\t# image_req_option.vf( Sales Channel, Channel1)\r\n\t\t\t\t\t\t\tif (row[iter_] == \"All\"):\r\n\t\t\t\t\t\t\t\timage_req_option.view_filters.clear()\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\timage_req_option.vf(column_header[iter_], row[iter_])\r\n\r\n\r\n\t\t\t\t\t\tserver.views.populate_image(view_item, image_req_option)\r\n\t\t\t\t\t\timage_filepath = view_item.name\r\n\r\n\t\t\t\t\t\twith open(end_file_directory + \"/\" + image_filepath + row[0] + \".\" + row[1], \"wb\") as image_file:\r\n\t\t\t\t\t\t\timage_file.write(view_item.image)\r\n\r\n\t\t\t\t\telif (row[1] == \"PDF\"):\r\n\r\n\t\t\t\t\t\tview_item = all_views_filtered[view_i]\r\n\r\n\t\t\t\t\t\tpdf_req_option = TSC.PDFRequestOptions(page_type=TSC.PDFRequestOptions.PageType.A4,\r\n\t\t\t\t\t\t orientation=TSC.PDFRequestOptions.Orientation.Portrait)\r\n\r\n\t\t\t\t\t\t# Assumes col 0,1,2 are imageName, FileType, and Email respectively\r\n\t\t\t\t\t\tfor iter_ in range(3, num_col):\r\n\t\t\t\t\t\t\tif (row[iter_] == \"All\"):\r\n\t\t\t\t\t\t\t\tpdf_req_option.view_filters.clear()\r\n\t\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tpdf_req_option.vf(column_header[iter_], row[iter_])\r\n\r\n\t\t\t\t\t\tserver.views.populate_pdf(view_item, pdf_req_option)\r\n\t\t\t\t\t\tpdf_name = view_item.name\r\n\r\n\t\t\t\t\t\twith open(end_file_directory + \"/\" + pdf_name + row[0] + \".\" + row[1], \"wb\") as pdf_file:\r\n\t\t\t\t\t\t\tpdf_file.write(view_item.pdf)\r\n\r\n\t\t\t\t\t\t\t# full_path = end_file_directory + \"/\" + pdf_name + row[0] + \".\" + row[1]\r\n\t\t\t\t\t\t\t# emailScript.email_singlePDF(row[2], full_path, row[0], UserEmail)\r\n\r\n\t\t\t\t\tprint(row)\r\n\t\t\t\t\tline_count += 1\r\n\r\n\t\t\tprint(f'Processed {line_count} lines.')\r\n\r\n\tserver.auth.sign_out()" }, { "alpha_fraction": 0.5189873576164246, "alphanum_fraction": 0.6835442781448364, "avg_line_length": 17.25, "blob_id": "ff62f0162ef50910ac000713941d0675cad37613", "content_id": "4aa4e4ee6f46a23f5e88df3671ee4ea745dabcab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 79, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/requirements.txt", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "tableauserverclient==0.8\r\nschedule==0.6.0\r\nsendgrid==6.0.4\r\nurllib3==1.25.1\r\n\r\n" }, { "alpha_fraction": 0.7360838055610657, "alphanum_fraction": 0.7367386817932129, "avg_line_length": 38.23684310913086, "blob_id": "9d62ba76689800a64633306a26c8994003c59ab1", "content_id": "499902d0ef590ce48dcd66b5ee677c14719e7470", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1611, "license_type": "no_license", "max_line_length": 338, "num_lines": 38, "path": "/ReadMe.txt", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "DashboardEmailer/\r\n│\r\n├── TableauConnector/\r\n│ ├── TableauConnector.py (main file)\r\n│ ├── FolderViews.py\r\n│ ├── EnterCredentials.py\r\n│ ├── EmailScript.py\r\n│ └── Scheduler.py\r\n├── tests/\r\n│ ├── test_connection.py\r\n│ └── helper_test.py\r\n│\r\n├── ReadMe.txt\r\n└── requirements.txt\r\n\r\nREADME:\r\nDashboard Emailer is a script that queries a Tableau Dashboard using the tableauserverclient API for Python. It takes individual images (user defined either as a JPG / PNG or PDF) of different filters / views of the dashboard. It then emails those images either zipped up to a single email or as individual files to individual emails. \r\n\r\nIndividual explanations for the functions can be found in each file. \r\n\r\nNote: the sendgrid python library package requires you to create a SENDGRID_API_KEY thru their website, and to set that variable locally in the CLI ( set SENDGRID_API_KEY = \"...\") after installation.\r\n\r\nCONTACT:\r\n\r\nExample Run:\r\n\tIn ..../DashboardEmailer_FileLocation> python TableauConnector.py\r\n\tOr to run the script as designated times:\r\n\t\tIn ..../DashboardEmailer_FileLocation> python Scheduler.py\r\n\r\nPython Library Installation - \r\nIf pip install -r requirements.txt doesn't install all the libraries - install the following manually in IDE:\r\n\r\ntableauserverclient\r\nschedule\r\nsendgrid*\r\n\r\nUpdated urllib3\r\nNote: the sendgrid python library package requires you to create a SENDGRID_API_KEY thru their website, and to set that variable locally in the CLI ( set SENDGRID_API_KEY = \"...\") after installation." }, { "alpha_fraction": 0.7327536344528198, "alphanum_fraction": 0.7327536344528198, "avg_line_length": 31.86274528503418, "blob_id": "cd21af9e23374edbb740c1ae4bac864eded141e1", "content_id": "2e2804a33db0e5db9b3b73d7739f89bfaf557bf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1725, "license_type": "no_license", "max_line_length": 155, "num_lines": 51, "path": "/TableauConnector/TableauConnector.py", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "\"\"\"Tableau Connector\r\n\r\nThis is the main function for the Dashboard Emailer. It prints to the console all columns in the\r\nspreadsheet. It is assumed that the first row of the spreadsheet is the\r\nlocation of the columns.\r\n\r\nThis tool accepts comma separated value files (.csv) only, and can only export files as PNG, JPGs, and PDFs\r\n\r\nThis script requires that all libraries be installed as stated in the requirements.txt file.\r\n\r\nThis file can also be imported as a module and contains the following\r\nfunctions:\r\n\r\n * main - the main function of the script\r\n\"\"\"\r\n\r\n\r\nimport tableauserverclient as TSC\r\nimport tkinter as tk\r\nfrom tkinter import filedialog\r\nimport time\r\nimport os\r\n\r\nimport EnterCredentials as entCred\r\nimport ReadCSV_CreateImage as dashboardViewer\r\n\r\n\r\ndef main():\r\n\r\n\tprint(\"Select CSV File\")\r\n\r\n\tCSV_filename = tk.filedialog.askopenfilename(initialdir = os.getcwd(), title = \"Select CSV file\", filetypes = ((\"csv files\",\"*.csv\"),(\"all files\",\"*.*\")))\r\n\r\n\tprint(\"Select End File Location\")\r\n\tSave_location = tk.filedialog.askdirectory(initialdir = os.getcwd(), title = \"Choose Directory\")\r\n\r\n\tuserName, password, serverInstance, siteName, workbook_Name, dashboard_Name, user_Email = entCred.enterCredentailsAutomatic()\r\n\tprint (userName, siteName, workbook_Name, dashboard_Name )\r\n\r\n\tTableauAuthentication = TSC.TableauAuth(userName, password, site_id=siteName)\r\n\r\n\tTSC_server = TSC.Server(serverInstance, use_server_version=True)\r\n\r\n\tstart_time = time.time()\r\n\r\n\tdashboardViewer.CSV_ImageRequest(TSC_server, TableauAuthentication, CSV_filename, Save_location, workbook_Name, dashboard_Name, user_Email )\r\n\r\n\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\nif __name__ == '__main__':\r\n\tmain()" }, { "alpha_fraction": 0.6521450877189636, "alphanum_fraction": 0.6658558249473572, "avg_line_length": 29.845069885253906, "blob_id": "8d473167c8dbf76dc4aa1c61a21f424086bc1583", "content_id": "be9558396d3108cf493bd69271d40e4bf693b8e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4526, "license_type": "no_license", "max_line_length": 141, "num_lines": 142, "path": "/tests/helper_test.py", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "# This is testing that python urllib works: swap out the different test URL in requests.get\r\n\r\n# import urllib.request\r\n#\r\n# URL = \"http://tableau.bcg.com/views/SalesPotential/SalesPotential?Color By=1&Bubble Color=1&:embed=y&:toolbar=no&:format=png\"\r\n# urlhk = \"https://upload.wikimedia.org/wikipedia/commons/thumb/7/75/Hong_Kong_at_night.jpg/2400px-Hong_Kong_at_night.jpg\"\r\n# URL_tab = \"http://tableau.bcg.com/views/SalesPotential/SalesPotential.png\"\r\n#\r\n#\r\n# urllib.request.urlretrieve(urlhk, \"wikiTest_1.jpg\")\r\n\r\n\r\n##############################################################################\r\n\r\n# This is testing that python requests works: swap out the different test URL in requests.get\r\n\r\nurlhk = \"https://upload.wikimedia.org/wikipedia/commons/thumb/7/75/Hong_Kong_at_night.jpg/2400px-Hong_Kong_at_night.jpg\"\r\n\r\nURL_tab = \"http://tableau.bcg.com/views/SalesPotential/SalesPotential.png\"\r\nURL_Tableau2 = \"http://tableau.bcg.com/views/SalesPotential/SalesPotential.png?Color%20By=2&Bubble%20Color=1&:embed=y&:toolbar=no&\"\r\nURL_Tableau_Public = \"https://public.tableau.com/en-us/s/gallery/fortune-magazines-stock-picks-2019.png?gallery=votd\"\r\n\r\nimport requests\r\n\r\n\r\ndef load_requests(source_url, sink_path):\r\n\t\"\"\"\r\n\tLoad a file from an URL (e.g. http).\r\n\r\n\tParameters\r\n\t----------\r\n\tsource_url : str\r\n\t\tWhere to load the file from.\r\n\tsink_path : str\r\n\t\tWhere the loaded file is stored.\r\n\t\"\"\"\r\n\r\n\tr = requests.get(source_url, stream=True)\r\n\r\n\tif r.status_code == 200:\r\n\t\twith open(sink_path, 'wb') as f:\r\n\t\t\tfor chunk in r:\r\n\t\t\t\tf.write(chunk)\r\n\telse:\r\n\t\tprint(r.status_code)\r\n\r\n\r\nload_requests(URL_Tableau_Public, \"test1.png\")\r\n\r\n\r\n##############################################################################\r\n\r\n# This is testing that a webrowser can open the link\r\n\r\n# import webbrowser\r\n#\r\n# # a_website = \"https://www.google.com\"\r\n# a_website = \"http://tableau.bcg.com/views/SalesPotential/SalesPotential?Color By=1&Bubble Color=1&:embed=y&:toolbar=no&:format=png\"\r\n#\r\n# # Open url in a new window of the default browser, if possible\r\n# webbrowser.open_new(a_website)\r\n#\r\n# # Open url in a new page (“tab”) of the default browser, if possible\r\n# # webbrowser.open_new_tab(a_website)\r\n\r\n# from selenium import webdriver\r\n# from webdriver_manager.chrome import ChromeDriverManager\r\n# from bs4 import BeautifulSoup\r\n#\r\n# import time\r\n#\r\n#\r\n# urlhk = \"https://upload.wikimedia.org/wikipedia/commons/thumb/7/75/Hong_Kong_at_night.jpg/2400px-Hong_Kong_at_night.jpg\"\r\n# URL_tab = \"https://tableau.bcg.com/views/SalesPotential/SalesPotential.png\"\r\n#\r\n# URL_Tableau = \"https://tableau.bcg.com/views/SalesPotential/SalesPotential?Color%20By=1&Bubble%20Color=1&:embed=y&:toolbar=no&:format=.png\"\r\n# URL_Tableau2 = \"https://tableau.bcg.com/views/SalesPotential/SalesPotential.png?Color%20By=2&Bubble%20Color=1&:embed=y&:toolbar=no&\"\r\n#\r\n# # driver = webdriver.Chrome(ChromeDriverManager().install())\r\n#\r\n# # options = we\r\n# # options.add_argument('--ignore-certificate-errors')\r\n# # options.add_argument(\"--test-type\")\r\n# # options.binary_location = \"/usr/bin/chromium\"\r\n#\r\n# # driver = webdriver.Chrome(chrome_options=options)\r\n#\r\n# driver = webdriver.Chrome(\"C://Users//lee nate//Downloads//chromedriver_win32//chromedriver.exe\")\r\n#\r\n# driver.get(urlhk)\r\n# # time.sleep(2)\r\n# driver.save_screenshot(\"screenshot1.png\")\r\n#\r\n# driver.close()\r\n\r\n\r\n\r\n# import os\r\n# import shutil\r\n# import tkinter as tk\r\n# from tkinter.filedialog import askopenfilename\r\n#\r\n# print(\"Select zip file location\")\r\n# zip_directory = tk.filedialog.askdirectory()\r\n# zip_directory = zip_directory + \"/Tableau_ZipOutput\"\r\n#\r\n# shutil.make_archive(zip_directory, 'zip', \"C://Users//lee nate//Documents//TableauTest\")\r\n\r\n\r\n# import csv\r\n# import tableauserverclient as TSC\r\n#\r\n# def csv_header(csv_reader, ):\r\n#\r\n# \tline_count = 0\r\n#\r\n# \tfor row in csv_reader:\r\n# \t\tif line_count == 0:\r\n# \t\t\tnum_col = len(row)\r\n# \t\t\tcolumn_header = row\r\n# \t\t\tprint(\"\\n {}\".format(column_header))\r\n# \t\t\tline_count += 1\r\n\r\n\r\n\r\n# import os\r\n# from sendgrid import SendGridAPIClient\r\n# from sendgrid.helpers.mail import Mail\r\n#\r\n# message = Mail(\r\n# from_email=\"natelee.sb@gmail.com\",\r\n# to_emails='lee.nate@bcg.com',\r\n# subject='BCG Email Test!',\r\n# html_content='<strong>and easy to do anywhere, even with Python</strong>')\r\n# try:\r\n# sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\r\n# response = sg.send(message)\r\n# print(response.status_code)\r\n# print(response.body)\r\n# print(response.headers)\r\n# except Exception as e:\r\n# print(e.message)\r\n" }, { "alpha_fraction": 0.73416668176651, "alphanum_fraction": 0.7350000143051147, "avg_line_length": 40.105262756347656, "blob_id": "e91590a803adab54b868419fbed5c92440438e42", "content_id": "a914af7e14ebf5a592b467751dd71b565d73f033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2400, "license_type": "no_license", "max_line_length": 120, "num_lines": 57, "path": "/TableauConnector/FolderViews.py", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "\"\"\"Folder Views\r\n\r\nThis script has two purposes - TableauFolderViews shows each of the files within the four common Tableau server\r\nfolders - data sources, projects, views, and workbooks. The user has provider the name of the workbook and the view,\r\nand the workbooks and views are filtered to match the name. checkMatch first looks at the filtered views to see if\r\nthe user provided views and worksbooks exist, and if so, whether the views and workbooks' ID match each other, for cases\r\nwhere there exist multiple views of the same name.\r\n\r\nThis file can also be imported as a module and contains the following\r\nfunctions:\r\n\r\n * checkMatch - returns the column headers of the file\r\n * TableauFolderViews - the main function of the script\r\n\"\"\"\r\n\r\nimport sys\r\n\r\n\r\ndef checkMatch(filter_DB, views_item, filter_WB, workbooks_item):\r\n\r\n\tif len(filter_DB) == 0:\r\n\t\tsys.exit(\" No Dashboard View Matching that name\")\r\n\telif len(filter_WB) == 0:\r\n\t\tsys.exit(\" No Workbook Matching that name \")\r\n\r\n\t# Case Multiple Dashboards Same Name - return the correct one belonging to the Workbook\r\n\ttry:\r\n\t\tfor view_i in range(views_item.total_available):\r\n\t\t\tfor workbook_i in range(workbooks_item.total_available):\r\n\t\t\t\tif (filter_DB[view_i].workbook_id == filter_WB[workbook_i].id):\r\n\t\t\t\t\treturn view_i, workbook_i\r\n\r\n\t\treturn TypeError\r\n\texcept TypeError:\r\n\t\tsys.exit(\" No Dashboard ID and Workbook ID match\")\r\n\r\n\r\ndef TableauFolderViews(Tableau_server):\r\n\r\n\t# The below four segments explore the four different folders in GeoApps - YMMV in the names / types of folders\r\n\t# in the site_id folder\r\n\r\n\tall_datasources, datasource_items = Tableau_server.datasources.get()\r\n\tprint(\"\\nThere are {} datasources on site: \".format(datasource_items.total_available))\r\n\tprint([datasource.name for datasource in all_datasources])\r\n\r\n\tall_projects, projects_item = Tableau_server.projects.get()\r\n\tprint(\"\\nThere are {} projects on site: \".format(projects_item.total_available))\r\n\tprint([projects.name for projects in all_projects])\r\n\r\n\tall_views, views_item = Tableau_server.views.get()\r\n\tprint(\"\\nThere are {} views on site: \".format(views_item.total_available))\r\n\tprint([views.name for views in all_views])\r\n\r\n\tall_workbooks, workbooks_item = Tableau_server.workbooks.get()\r\n\tprint(\"\\nThere are {} workbooks on site: \".format(workbooks_item.total_available))\r\n\tprint([workbooks.name for workbooks in all_workbooks])\r\n" }, { "alpha_fraction": 0.7235592007637024, "alphanum_fraction": 0.723875880241394, "avg_line_length": 31.97849464416504, "blob_id": "ec1673c05f79bfed2110438df57e6b3e7d6169bc", "content_id": "1413468edb5d8a49814743432a0b00d57f0405d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3158, "license_type": "no_license", "max_line_length": 119, "num_lines": 93, "path": "/tests/test_connection.py", "repo_name": "natelee-sb/TableauAPI", "src_encoding": "UTF-8", "text": "# import tableauserverclient as TSC\r\n#\r\n# # Basic Test to check that you can access the tableau.bcg.com server\r\n# username = \"Lee Nate\"\r\n# password = \"******\"\r\n# site_id_test = \"GeoApps\"\r\n# server_BCG = \"https://tableau.bcg.com\"\r\n#\r\n# tableau_auth = TSC.TableauAuth(username, password, site_id=site_id_test)\r\n#\r\n# # create an instance for your server\r\n# server = TSC.Server(server_BCG, use_server_version=True)\r\n#\r\n# # call the sign-in method with the auth object\r\n# server.auth.sign_in(tableau_auth)\r\n# server.auth.sign_out()\r\n\r\n\r\nimport tableauserverclient as TSC\r\n\r\n# create an auth object\r\n\r\nusername = \"LastName FirstName\" # Fill this in\r\npassword = \"LoginPassword\" # Fill this in\r\n\r\nsite_id_test = \"GeoApps\" # Leave for this demo\r\n\r\nserver_BCG = \"https://tableau.bcg.com\"\r\n\r\ntableau_auth = TSC.TableauAuth(username, password, site_id=site_id_test)\r\n\r\n# create an instance for your server\r\n\r\nserver = TSC.Server(server_BCG, use_server_version=True)\r\n\r\n# # call the sign-in method with the auth object\r\n# server.auth.sign_in(tableau_auth)\r\n#\r\n# server.auth.sign_out()\r\n\r\nwith server.auth.sign_in(tableau_auth):\r\n\r\n\t# The below four segments explore the four different folders in GeoApps - YMMV in the names / types of folders\r\n\t# in the site_id folder\r\n\r\n\tall_datasources, datasource_items = server.datasources.get()\r\n\tprint(\"\\nThere are {} datasources on site: \".format(datasource_items.total_available))\r\n\tprint([datasource.name for datasource in all_datasources])\r\n\r\n\tall_projects, projects_item = server.projects.get()\r\n\tprint(\"\\nThere are {} projects on site: \".format(projects_item.total_available))\r\n\tprint([projects.name for projects in all_projects])\r\n\r\n\tall_views, views_item = server.views.get()\r\n\tprint(\"\\nThere are {} views on site: \".format(views_item.total_available))\r\n\tprint([views.name for views in all_views])\r\n\r\n\tall_workbooks, workbooks_item = server.workbooks.get()\r\n\tprint(\"\\nThere are {} workbooks on site: \".format(workbooks_item.total_available))\r\n\tprint([workbooks.name for workbooks in all_workbooks])\r\n\r\n\r\n\t# https://tableau.bcg.com/#/site/GeoApps/views/Sales/SalesPotential\r\n\t# Want to access Sales Potential - Have to look at naming convention inside Sales folder\r\n\t# To change dashboard type to access, change \"Sales Potential\" to whatever name\r\n\r\n\treq_option = TSC.RequestOptions()\r\n\treq_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name, TSC.RequestOptions.Operator.Equals,'Sales Potential'))\r\n\r\n\t# Also note the server.views - this references a specific folder that I knew the dashboard was in (see printout for\r\n\t# more clarification)\r\n\tall_views, pagination_item = server.views.get(req_option)\r\n\r\n\t# This is unecessary, but sometimes useful\r\n\tprint(all_views)\r\n\tprint(pagination_item)\r\n\r\n\tview_item = all_views[0]\r\n\tprint(view_item)\r\n\r\n\timage_req_option = TSC.ImageRequestOptions(imageresolution=TSC.ImageRequestOptions.Resolution.High)\r\n\r\n\tprint('Populating image')\r\n\tserver.views.populate_image(view_item, image_req_option)\r\n\r\n\tprint('Got image')\r\n\r\n\timage_filepath = 'TableauDashboardImage.png'\r\n\r\n\twith open(image_filepath, \"wb\") as image_file:\r\n\t\timage_file.write(view_item.image)\r\n\r\nserver.auth.sign_out()" } ]
9
michawei/2015-MachineLearning-hw
https://github.com/michawei/2015-MachineLearning-hw
b8f5facf92da3297077b0e87bee4736cef44984a
dda32ec3a34664c773d18b00a4041e29d526ca80
4b17890b765c93cbcbe04b796fd17da0153ab705
refs/heads/master
2020-12-30T23:21:06.599111
2016-01-04T13:37:41
2016-01-04T13:37:41
43,697,390
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4548174738883972, "alphanum_fraction": 0.5320167541503906, "avg_line_length": 17.786516189575195, "blob_id": "f0187e98fc5549f039cb5c673f4852921fca06f5", "content_id": "d2e95cbb07b445b4574f45510e39710f9f5092bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1671, "license_type": "no_license", "max_line_length": 130, "num_lines": 89, "path": "/homework2/r04922080_hw2/17.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tEin = []\n\n\tfor times in range(5000):\n\t\tdata = []\n\t\tans = []\n\t\t#data.append(-1.0)\n\t\tfor i in range(20):\n\t\t\tx = np.random.uniform(-1, 1)\n\t\t\tdata.append(x)\n\t\tdata.sort()\n\t\t#print data\n\n\t\tfor x in data:\n\t\t\tif sign(x) == 1:\n\t\t\t\tr = random.random()\n\t\t\t\tif r <= 0.2:\n\t\t\t\t\tans.append(-1)\n\t\t\t\telse:\n\t\t\t\t\tans.append(1)\n\t\t\telse:\n\t\t\t\tr = random.random()\n\t\t\t\tif r <= 0.2:\n\t\t\t\t\tans.append(1)\n\t\t\t\telse:\n\t\t\t\t\tans.append(-1)\n\n\t\tthetas = []\n\t\tthetas.append((-1.0 + data[0]) / 2)\n\t\tfor i in range(0, 19):\n\t\t\tthetas.append((data[i]+data[i+1])/2)\n\t\tthetas.append((data[19] + 1.0) / 2)\n\t\t#print thetas\n\n\t\tEinmin = 2\n\t\tfor theta in thetas:\n\t\t\ts = 1\n\t\t\terror = 0.0\n\t\t\tfor i in range(len(data)):\n\t\t\t\tif s * sign( data[i] - theta ) != ans[i]:\n\t\t\t\t\terror += 1\n\t\t\te = error / 20.0\n\t\t\t#print e\n\t\t\tif e < Einmin:\n\t\t\t\tEinmin = e\n\n\t\t\ts = -1\n\t\t\terror = 0.0\n\t\t\tfor i in range(len(data)):\n\t\t\t\tif s * sign( data[i] - theta ) != ans[i]:\n\t\t\t\t\terror += 1\n\t\t\te = error / 20.0\n\t\t\t#print e\n\t\t\tif e < Einmin:\n\t\t\t\tEinmin = e\n\t\t#print Einmin\n\t\tEin.append(Einmin)\n\tprint \"average Ein = \" + str(sum(Ein) / float(len(Ein)))\n\n\tx = [0.00, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95, 1.0]\n\ty = [0 for i in range(20)]\n\n\tfor num in Ein:\n\t\tfor i in range(20):\n\t\t\tif num >= x[i] and num < x[i+1]:\n\t\t\t\ty[i] += 1\n\t\t\t\tbreak\n\n\tplt.bar(x[:-1], y, width = 0.05 ,color = \"#fad981\")\n\tplt.title(\"17. Ein distribution\")\n\tplt.xlabel(\"Error Rate\")\n\tplt.ylabel(\"times\")\n\tplt.show()\n\n\tplt.plot(Ein)\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.4612688422203064, "alphanum_fraction": 0.521205723285675, "avg_line_length": 20.1407413482666, "blob_id": "3ff5cd99e5b9e00d95cbe99da90d47ae9ebe4b69", "content_id": "a630ac407aa71a75c50b43d86a463c5c8ed7b10c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2853, "license_type": "no_license", "max_line_length": 75, "num_lines": 135, "path": "/homework6/r04922080_hw6/15.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef sign(a):\n\tif ( a >= 0 ):\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef decisionStump(s, x, theta):\n\treturn s * sign(x - theta)\n\ndef main():\n\n\tf = open(\"hw2_adaboost_train.dat\", \"r\")\n\tx_1 = [[0, 0]]\n\tx_2 = [[0, 0]]\n\tw = []\n\tG = []\n\tindex = 0\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tx_1.append([float(line[0]), float(line[2]), index])\n\t\tx_2.append([float(line[1]), float(line[2]), index])\n\t\tindex += 1\n\tx_1.append([1, 0])\n\tx_2.append([1, 0])\n\n\tlength = len(x_1)\n\tmother = float(length - 2)\n\tweight = 1.0 / mother\n\tfor i in range(length - 2):\n\t\tw.append(weight)\n\t\tG.append(0.0)\n\tf.close()\n\n\n\tf = open(\"hw2_adaboost_test.dat\", \"r\")\n\tx_1_test = []\n\tx_2_test = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tx_1_test.append([float(line[0]), float(line[2])])\n\t\tx_2_test.append([float(line[1]), float(line[2])])\n\tf.close()\n\n\tx_1.sort()\n\tx_2.sort()\n\n\tEin = []\n\tEin_01 = []\n\tU = []\n\tfor time in range(300):\n\t\tEin_tmp = []\n\t\tfor i in range(length - 1):\n\t\t\t# 1\n\t\t\ttheta = (x_1[i][0] + x_1[i+1][0]) / 2.0\n\t\t\ts = 1.0\n\t\t\terror = 0.0\n\t\t\tfor j in range(1, length - 1):\n\t\t\t\tif decisionStump(s, x_1[j][0], theta) != x_1[j][1]:\n\t\t\t\t\terror += w[x_1[j][2]]\n\t\t\tEin_tmp.append([error, s, theta, 1])\n\n\t\t\ts = -1.0\n\t\t\terror = 0.0\n\t\t\tfor j in range(1, length - 1):\n\t\t\t\tif decisionStump(s, x_1[j][0], theta) != x_1[j][1]:\n\t\t\t\t\terror += w[x_1[j][2]]\n\t\t\tEin_tmp.append([error, s, theta, 1])\n\n\t\t\t# 2\n\t\t\ttheta = (x_2[i][0] + x_2[i+1][0]) / 2.0\n\t\t\ts = 1.0\n\t\t\terror = 0.0\n\t\t\tfor j in range(1, length - 1):\n\t\t\t\tif decisionStump(s, x_2[j][0], theta) != x_2[j][1]:\n\t\t\t\t\terror += w[x_2[j][2]]\n\t\t\tEin_tmp.append([error, s, theta, 2])\n\n\t\t\ts = -1.0\n\t\t\terror = 0.0\n\t\t\tfor j in range(1, length - 1):\n\t\t\t\tif decisionStump(s, x_2[j][0], theta) != x_2[j][1]:\n\t\t\t\t\terror += w[x_2[j][2]]\n\t\t\tEin_tmp.append([error, s, theta, 2])\n\n\t\tEin_tmp.sort()\n\t\tepsilon = Ein_tmp[0][0] / sum(w)\n\t\tdiamond_t = math.sqrt(((1.0 - epsilon) / epsilon))\n\t\terror = 0.0\n\t\tif ( Ein_tmp[0][3] == 1 ):\n\t\t\tfor j in range(1, length - 1):\n\t\t\t\tif decisionStump(Ein_tmp[0][1], x_1[j][0], Ein_tmp[0][2]) != x_1[j][1]:\n\t\t\t\t\tw[x_1[j][2]] *= diamond_t\n\t\t\t\t\terror += 1.0\n\t\t\t\telse:\n\t\t\t\t\tw[x_1[j][2]] /= diamond_t\n\t\telse:\n\t\t\tfor j in range(1, length - 1):\n\t\t\t\tif decisionStump(Ein_tmp[0][1], x_2[j][0], Ein_tmp[0][2]) != x_2[j][1]:\n\t\t\t\t\tw[x_2[j][2]] *= diamond_t\n\t\t\t\t\terror += 1.0\n\t\t\t\telse:\n\t\t\t\t\tw[x_2[j][2]] /= diamond_t\n\t\ttmp = Ein_tmp[0]\n\t\ttmp.append(math.log(diamond_t))\n\t\tEin.append(tmp)\n\t\tEin_01.append(error/mother)\n\n\t\tU_tmp = 0.0\n\t\tfor j in range(length - 2):\n\t\t\tU_tmp += w[j];\n\t\tU.append(U_tmp)\n\n\tprint \"U2 = \", U[2]\n\tprint \"UT = \", U[len(U)-1]\n\n\tx = []\n\tfor i in range(300):\n\t\tx.append(i)\n\n\tplt.plot(x, U)\n\tplt.title(\"15.\")\n\tplt.xlabel(\"t\")\n\tplt.ylabel(\"Ut\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.4045853018760681, "alphanum_fraction": 0.4558327794075012, "avg_line_length": 22.935483932495117, "blob_id": "a81be3c59a7fbca268eea528b32a0f33cb3b508a", "content_id": "9f1448515d823297e75187c6456e260d3d8a0ec0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 70, "num_lines": 62, "path": "/homework2/a.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\n\nN = 10000.0\nd = 50.0\ng = 0.05\n\n# a = float(2 * N - 4.0)\n# b = float(-4.0)\n# c = float(-(math.log(4.0) + 2 * d * math.log(N) - math.log(g) ))\n\n# a = float(N)\n# b = float(-2.0)\n# c = float(-math.log(6.0 * math.pow(2*N, d) / g))\nx = []\ny = []\nfor i in range(1, 10001):\n N = i\n x.append(N)\n # a = float(2 * N - 4.0)\n # b = float(-4.0)\n # c = float(-(math.log(4.0) + 2 * d * math.log(N) - math.log(g) ))\n a = float(N)\n b = float(-2.0)\n c = float(-math.log(6.0 * math.pow(2*N, d) / g))\n if a != 0:\n delta = b ** 2 - 4 * a * c;\n if delta < 0:\n print 'No solution', N\n y.append(0)\n elif delta == 0:\n s = -b / (2 * a);\n print 'Solution is', s\n y.append(s)\n else:\n root = math.sqrt(delta)\n s1 = (-b + root) / (2 * a)\n s2 = (-b - root) / (2 * a)\n print 'Solutions are', s1, s2\n y.append(s1)\n else:\n print c/b\n y.append(c/b)\n\nplt.title(\"Parrondo and Van den Broek\")\nplt.xlabel(\"N\")\nplt.ylabel(\"epsilon\")\nplt.plot(x, y, linewidth = 3)\nplt.show()\n\n# i = (2.0 / N) * math.log(2.0 * N * math.pow(N, d))\n# ans = math.sqrt(i)\n# ans += math.sqrt((2.0 / N) * math.log(1 / g)) + (1 / N)\n# print ans\n\n# i = (16.0 / N) * math.log(2.0 * math.pow(N, d) / math.sqrt(g))\n# ans = math.sqrt(i)\n# print ans\n\n# i = (8 / N) * math.log(4.0 * math.pow(2*N, d) / g)\n# ans = math.sqrt(i)\n# print ans" }, { "alpha_fraction": 0.4592744708061218, "alphanum_fraction": 0.550992488861084, "avg_line_length": 19.591548919677734, "blob_id": "9107bdb6c2ffc9564b5491b5c1f251daf1013a18", "content_id": "43459b4bbbd0ea1a4b09dfaedc55aae4f4ec6e50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1461, "license_type": "no_license", "max_line_length": 110, "num_lines": 71, "path": "/homework3/r04922080_hw3/14.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tEin = []\n\tW_LIN_ALL = []\n\tfor times in range(1000):\n\t\t# data = []\n\t\tX = np.zeros(shape=(1000, 6))\n\t\ty = np.zeros(shape=(1000, 1))\n\t\tX = np.asmatrix(X)\n\t\ty = np.asmatrix(y)\n\t\tfor i in range(1000):\n\t\t\tx1 = np.random.uniform(-1, 1)\n\t\t\tx2 = np.random.uniform(-1, 1)\n\t\t\tans = sign(math.pow(x1, 2) + math.pow(x2, 2) - 0.6)\n\n\t\t\tr = random.random()\n\t\t\tif r <= 0.1:\n\t\t\t\tans *= -1\n\t\t\t\n\t\t\tX[i] = [1, x1, x2, x1*x2, math.pow(x1, 2), math.pow(x2, 2)]\n\t\t\ty[i] = [ans]\n\t\t\t# data.append([x1, x2, ans])\n\n\t\tW_LIN = inv(X.transpose()*X)*X.transpose()*y\n\t\t# print W_LIN\n\t\tW_LIN_ALL.append(W_LIN)\n\n\n\tW_hat = np.zeros(shape=(6, 1))\n\tW_hat = np.asmatrix(W_hat)\n\tW3 = []\n\tfor i in range(1000):\n\t\tW_hat += W_LIN_ALL[i]\n\t\tW3.append(W_LIN_ALL[i].item(3, 0))\n\tW_hat /= 1000.0\n\t\n\t# for i in range(6):\n\t# \tprint float('%.6f' % W_hat.item(i))\n\tprint float('%.6f' % W_hat.item(3))\n\n\tx = [-0.05, -0.04, -0.03 ,-0.02 ,-0.01, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11]\n\ty = [0 for i in range(16)]\n\n\tfor num in W3:\n\t\tfor i in range(16):\n\t\t\tif num >= x[i] and num < x[i+1]:\n\t\t\t\ty[i] += 1\n\t\t\t\tbreak\n\n\tplt.bar(x[:-1], y, width = 0.01 ,color = \"#fad981\")\n\tplt.title(\"14. w3 distribution\")\n\tplt.xlabel(\"w3 value\")\n\tplt.ylabel(\"times\")\n\tplt.show()\n\n\tplt.plot(Ein)\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5147541165351868, "alphanum_fraction": 0.5530054569244385, "avg_line_length": 16.283018112182617, "blob_id": "4f20feef5c4b121d0ce0e8e8f8ce20e90645865b", "content_id": "2969eb6e0712cc24bb93ba744291df72466b8099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "no_license", "max_line_length": 56, "num_lines": 53, "path": "/homework5/15.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "from svmutil import *\nimport math\n\nvalue = 0\nC = [0.000001, 0.0001, 0.001, 1, 100]\n\ndef main():\n\n\tf = open(\"features.train\", \"r\")\n\ty = []\n\tx = []\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tif line[0] == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\tprint line[1]\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty.append(ans)\n\t\tx.append(tmp)\n\tf.close()\n\n\tf = open(\"features.test\", \"r\")\n\ty_test = []\n\tx_test = []\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tif line[0] == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty_test.append(ans)\n\t\tx_test.append(tmp)\n\tf.close()\n\n\tfor c in C:\n\t\tprob = svm_problem(y, x)\n\t\tparam = svm_parameter('-t 0 -c ' + str(c))\n\t\tm = svm_train(prob, param)\n\t\tp_label, p_acc, p_val = svm_predict(y_test, x_test, m)\n\t\tprint p_label\n\t\tprint p_acc\n\t\tprint p_val\n\t\tbreak\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5514018535614014, "alphanum_fraction": 0.5876168012619019, "avg_line_length": 20.683544158935547, "blob_id": "f73f75edf81f6a542bfdee16e6207e0eca75ea4a", "content_id": "0abeaf14a7b3a8123e5ac9d5262497355de98345", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1712, "license_type": "no_license", "max_line_length": 83, "num_lines": 79, "path": "/homework4/r04922080_hw4/20.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\nLambda = 11.26\nsize = 200\nDimension_Add_1 = 3\ntest_size = 1000\npower = -8\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tf = open(\"hw4_train.dat\", \"r\")\n\tX = np.zeros(shape=(size, Dimension_Add_1))\n\ty = np.zeros(shape=(size, 1))\n\tX = np.asmatrix(X)\n\ty = np.asmatrix(y)\n\tnum = 0\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(2):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX[num] = x\n\t\ty[num] = [float(line[2])]\n\t\tnum += 1\n\tf.close()\n\n\tf = open(\"hw4_test.dat\", \"r\")\n\tX_test = np.zeros(shape=(test_size, Dimension_Add_1))\n\ty_test = np.zeros(shape=(test_size, 1))\n\tX_test = np.asmatrix(X_test)\n\ty_test = np.asmatrix(y_test)\n\tnum = 0\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(2):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX_test[num] = x\n\t\ty_test[num] = [float(line[2])]\n\t\tnum += 1\n\tf.close()\n\n\n\tLambda = math.pow(10, power)\n\tI = np.identity(Dimension_Add_1)\n\twReg = inv(Lambda * I + X.transpose() * X) * X.transpose() * y\n\n\terror = 0.0\n\tfor j in range(size):\n\t\tif sign((wReg.transpose() * X[j].transpose()).item(0)) != y[j].item(0):\n\t\t\terror += 1\n\tEin = error / float(size)\n\n\tprint \"When log10(lambda) = \" + str(power) + \",\"\n\tprint \"Ein = \" + str(Ein)\n\n\terror = 0.0\n\tfor i in range(test_size):\n\t\tif sign((wReg.transpose() * X_test[i].transpose()).item(0)) != y_test[i].item(0):\n\t\t\terror += 1\n\n\tprint \"Eout = \" + str(error / float(test_size))\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5831157565116882, "alphanum_fraction": 0.607484757900238, "avg_line_length": 18.491525650024414, "blob_id": "3cd2c6f0d5d09c57acd3947d17fdadf137e3abdb", "content_id": "eaf42ea48253e0d9a21efe71f476488eca235f07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 77, "num_lines": 59, "path": "/homework1/r04922080_hw1/15.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "def normalVectorCompute(a, b, s):\n\tans = []\n\tfor i in range(5):\n\t\tans.append(a[i] + s*b[i])\n\treturn ans\n\ndef innerProduct(a, b):\n\tans = 0\n\tfor i in range(5):\n\t\tans += a[i]*b[i]\n\treturn ans\n\ndef plusOrMinus(yn):\n\tif yn > 0 :\n\t\treturn 1\n\telse :\n\t\treturn -1\n\ndef main():\n\tf = open(\"hw1_15_train.dat\", \"r\")\n\tdata = []\n\tfor line in f:\n\t\tline = line[:-1].split('\\t');\n\t\ttmp = line[0].split(' ');\n\t\tmember = []\n\t\tfor i in tmp:\n\t\t\tmember.append(float(i))\n\t\tmember.append(float(1))\n\t\tmember.append(float(line[1]))\n\t\tdata.append(member)\n\t#print data\n\tf.close()\n\n\tw0 = [0, 0, 0, 0, 0]\n\tnormalVector = data[0][:-1]\n\n\t# first normalVector update\n\tupdateCount = 1\n\tlastErrorIndex = -1\n\tnotYet = True\n\twhile notYet == True :\n\t\tnotYet = False\n\t\tfor i in range(len(data)):\n\t\t\tvector = data[i]\n\t\t\tgn = innerProduct(normalVector, vector[:-1])\n\t\t\tgn = plusOrMinus(gn);\n\n\t\t\tif gn != vector[-1]:\n\t\t\t\tnotYet = True\n\t\t\t\tupdateCount += 1\n\t\t\t\tlastErrorIndex = i\n\t\t\t\tnormalVector = normalVectorCompute(normalVector, vector[:-1], vector[-1])\n\n\tprint \"updates number : \" + str(updateCount)\n\tprint \"last error index : \" + str(lastErrorIndex)\n\n\nif __name__ == '__main__':\n\tmain();" }, { "alpha_fraction": 0.44256120920181274, "alphanum_fraction": 0.4896421730518341, "avg_line_length": 24.926828384399414, "blob_id": "6223a0dbd92660839ab9ddd81167cc6d0b72ae1b", "content_id": "f2ef6779b6d726fab0e52948ba6bf7d28657d4ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1062, "license_type": "no_license", "max_line_length": 189, "num_lines": 41, "path": "/homework3/a.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import math\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.linalg import inv\n\n\nu = 0.0\nv = 0.0\n\nfor _ in range(5):\n H = np.matrix([[np.exp(u) + math.pow(v, 2) * np.exp(u*v) + 2, np.exp(u*v) + u*v*np.exp(u*v) - 2], [np.exp(u*v) + u*v*np.exp(u*v) - 2, 4*np.exp(2*v) + math.pow(u, 2) * np.exp(u*v) + 4]])\n gradient_E = np.matrix([[np.exp(u) + v*np.exp(u*v) + 2*u - 2*v - 3],[2*np.exp(2*v) + u*np.exp(u*v) - 2*u + 4*v - 2]])\n\n print H\n print gradient_E\n print inv(H)\n gradient_ans = (-1) * inv(H) * gradient_E\n print gradient_ans\n u += gradient_ans.item(0, 0)\n v += gradient_ans.item(1, 0)\n print u\n print v\n print \"-------------------------------\"\n# u = 0\n# v = 0\n\n# for _ in range(5):\n# U = np.exp(u) + v*np.exp(u*v) + 2*u - 2*v - 3\n# V = 2*np.exp(2*v) + u*np.exp(u*v) - 2*u + 4*v - 2\n\n# print U\n# print V\n# u = u - 0.01 * U\n# v = v - 0.01 * V\n# print u, v\n\nprint u\nprint v\n\nE = np.exp(u) + np.exp(2*v) + np.exp(u*v) + math.pow(u, 2) - 2*u*v + 2*math.pow(v, 2) - 3*u - 2*v\nprint \"Ans: \", E" }, { "alpha_fraction": 0.45269840955734253, "alphanum_fraction": 0.5492063760757446, "avg_line_length": 20.589040756225586, "blob_id": "c729144abaa17204717eb1e95835115110120343", "content_id": "e6b909fd83db47f5b995b64969cf73e881400f58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1575, "license_type": "no_license", "max_line_length": 107, "num_lines": 73, "path": "/homework3/r04922080_hw3/15.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tEout = []\n\tfor times in range(1000):\n\t\t# data = []\n\t\tX = np.zeros(shape=(1000, 6))\n\t\ty = np.zeros(shape=(1000, 1))\n\t\tX = np.asmatrix(X)\n\t\ty = np.asmatrix(y)\n\t\tfor i in range(1000):\n\t\t\tx1 = np.random.uniform(-1, 1)\n\t\t\tx2 = np.random.uniform(-1, 1)\n\t\t\tans = sign(math.pow(x1, 2) + math.pow(x2, 2) - 0.6)\n\n\t\t\tr = random.random()\n\t\t\tif r <= 0.1:\n\t\t\t\tans *= -1\n\t\t\t\n\t\t\tX[i] = [1, x1, x2, x1*x2, math.pow(x1, 2), math.pow(x2, 2)]\n\t\t\ty[i] = [ans]\n\t\t\t# data.append([x1, x2, ans])\n\n\t\tW_LIN = inv(X.transpose()*X)*X.transpose()*y\n\n\t\terror = 0.0\n\t\tfor i in range(1000):\n\t\t\tx = np.zeros(shape=(1, 6))\n\t\t\tx = np.asmatrix(x)\n\t\t\tx1 = np.random.uniform(-1, 1)\n\t\t\tx2 = np.random.uniform(-1, 1)\n\t\t\tans = sign(math.pow(x1, 2) + math.pow(x2, 2) - 0.6)\n\n\t\t\tr = random.random()\n\t\t\tif r <= 0.1:\n\t\t\t\tans *= -1\n\t\t\t\n\t\t\tx = [1, x1, x2, x1*x2, math.pow(x1, 2), math.pow(x2, 2)]\n\t\t\tif ans != sign(x * W_LIN):\n\t\t\t\terror += 1\n\t\tEout.append(error/1000.0)\n\n\t#print Eout\n\tprint sum(Eout) / float(len(Eout))\n\n\tx = [0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21]\n\ty = [0 for i in range(16)]\n\n\tfor num in Eout:\n\t\tfor i in range(16):\n\t\t\tif num >= x[i] and num < x[i+1]:\n\t\t\t\ty[i] += 1\n\t\t\t\tbreak\n\n\tplt.bar(x[:-1], y, width = 0.01 ,color = \"#fad981\")\n\tplt.title(\"15. Eout distribution\")\n\tplt.xlabel(\"Error out rate\")\n\tplt.ylabel(\"times\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5386554598808289, "alphanum_fraction": 0.5663865804672241, "avg_line_length": 16.776119232177734, "blob_id": "5e30f05fc69a2ea4faba1efa58af520d042320f4", "content_id": "950a36fa6d470fdb7ec2c28c48684a919994abd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1190, "license_type": "no_license", "max_line_length": 76, "num_lines": 67, "path": "/homework5/r04922080_hw5/19.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "from svmutil import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nvalue = 0\nC = 0.1\ngamma = [0,1,2,3,4]\n\ndef main():\n\n\tf = open(\"features.train\", \"r\")\n\ty = []\n\tx = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty.append(ans)\n\t\tx.append(tmp)\n\tf.close()\n\n\tf = open(\"features.test\", \"r\")\n\ty_test = []\n\tx_test = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty_test.append(ans)\n\t\tx_test.append(tmp)\n\tf.close()\n\n\tEout = []\n\tfor g in gamma:\n\t\tprob = svm_problem(y, x)\n\t\tparam = svm_parameter('-t 2 -g ' + str(math.pow(10, g)) + ' -c ' + str(C))\n\t\tm = svm_train(prob, param)\n\t\tp_label, p_acc, p_val = svm_predict(y_test, x_test, m)\n\t\t\n\t\tacc = p_acc[0]\n\t\tEout_tmp = (100 - acc)/100.0\n\t\tEout.append(Eout_tmp)\n\n\tprint Eout\n\tplt.plot(gamma, Eout)\n\tplt.title(\"19.\")\n\tplt.xlabel(\"log(10)gamma\")\n\tplt.ylabel(\"Eout\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.43151694536209106, "alphanum_fraction": 0.5522827506065369, "avg_line_length": 20.919355392456055, "blob_id": "90a34d04ca8a84990dd9f3b65d42dd0574de20f7", "content_id": "d237562eebc5057a60273c968d9cdae99a3a7ed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1358, "license_type": "no_license", "max_line_length": 197, "num_lines": 62, "path": "/homework3/r04922080_hw3/13.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tEin = []\n\tfor times in range(1000):\n\t\t# data = []\n\t\tX = np.zeros(shape=(1000, 3))\n\t\ty = np.zeros(shape=(1000, 1))\n\t\tX = np.asmatrix(X)\n\t\ty = np.asmatrix(y)\n\t\tfor i in range(1000):\n\t\t\tx1 = np.random.uniform(-1, 1)\n\t\t\tx2 = np.random.uniform(-1, 1)\n\t\t\tans = sign(math.pow(x1, 2) + math.pow(x2, 2) - 0.6)\n\t\t\tr = random.random()\n\t\t\tif r <= 0.1:\n\t\t\t\tans *= -1\n\t\t\tX[i] = [1, x1, x2]\n\t\t\ty[i] = [ans]\n\t\t\t# data.append([x1, x2, ans])\n\n\t\tW_LIN = inv(X.transpose()*X)*X.transpose()*y\n\t\ty_hat = X*W_LIN\n\n\t\t#print y_hat\n\t\terror = 0\n\t\tfor i in range(1000):\n\t\t\tif sign(y_hat.item(i)) != y[i]:\n\t\t\t\terror += 1\n\t\tEin.append(error / 1000.0)\n\tprint sum(Ein) / float(len(Ein))\n\n\tx = [0.35, 0.36, 0.37, 0.38, 0.39, 0.40, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.50, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.60, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66]\n\ty = [0 for i in range(31)]\n\n\tfor num in Ein:\n\t\tfor i in range(31):\n\t\t\tif num >= x[i] and num < x[i+1]:\n\t\t\t\ty[i] += 1\n\t\t\t\tbreak\n\n\tplt.bar(x[:-1], y, width = 0.01 ,color = \"#fad981\")\n\tplt.title(\"13. Ein distribution\")\n\tplt.xlabel(\"Error in\")\n\tplt.ylabel(\"times\")\n\tplt.show()\n\n\t# plt.plot(Ein)\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5059288740158081, "alphanum_fraction": 0.5447958111763, "avg_line_length": 18.727272033691406, "blob_id": "141e68db56af7e9ecc902027f727a3c22c57e6d9", "content_id": "c2bbaed820f333332538344f3db65209b56ad299", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1518, "license_type": "no_license", "max_line_length": 77, "num_lines": 77, "path": "/homework5/r04922080_hw5/20.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "from svmutil import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nvalue = 0\nC = 0.1\ngamma = [0, 1, 2, 3, 4]\n\ndef main():\n\n\tf = open(\"features.train\", \"r\")\n\ty = []\n\tx = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty.append(ans)\n\t\tx.append(tmp)\n\tf.close()\n\tlength = len(y)\n\n\tgamma_num = [0, 0, 0, 0, 0]\n\n\tfor time in range(100):\n\t\tprint \"--------------------\" + str(time) + \"--------------------------\"\n\t\tindex = random.sample(range(length), 1000)\n\n\t\ty_train = []\n\t\tx_train = []\n\t\ty_val = []\n\t\tx_val = []\n\t\tfor i in range(length):\n\t\t\tif i in index:\n\t\t\t\ty_val.append(y[i])\n\t\t\t\tx_val.append(x[i])\n\t\t\telse:\n\t\t\t\ty_train.append(y[i])\n\t\t\t\tx_train.append(x[i])\n\n\t\tEval = []\n\t\tfor g in gamma:\n\t\t\tprob = svm_problem(y_train, x_train)\n\t\t\tparam = svm_parameter('-t 2 -g ' + str(math.pow(10, g)) + ' -c ' + str(C))\n\t\t\tm = svm_train(prob, param)\n\t\t\tp_label, p_acc, p_val = svm_predict(y_val, x_val, m)\n\t\t\t\n\t\t\tacc = p_acc[0]\n\t\t\tEval_tmp = (100 - acc)/100.0\n\t\t\tEval.append([Eval_tmp, g])\n\n\t\tmin_gamma = -1\n\t\tmin_Eval = 2147483647\n\t\tfor mem in Eval:\n\t\t\tif mem[0] < min_Eval:\n\t\t\t\tmin_Eval = mem[0]\n\t\t\t\tmin_gamma = mem[1]\n\t\tgamma_num[min_gamma] += 1\n\t\t\n\tprint gamma_num\n\tplt.bar(gamma, gamma_num, width = 1 ,color = \"#fad981\")\n\tplt.title(\"20.\")\n\tplt.xlabel(\"log(10)gamma\")\n\tplt.ylabel(\"times\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5316846966743469, "alphanum_fraction": 0.5556414127349854, "avg_line_length": 16.98611068725586, "blob_id": "1f9b9518e5718ef707dd22ebc4c3b4c2567dfc01", "content_id": "e9bae76d2661da02dcc32ca66a17f2c984ef41d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1294, "license_type": "no_license", "max_line_length": 65, "num_lines": 72, "path": "/homework5/r04922080_hw5/15.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "from svmutil import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nvalue = 0\nC = [-6, -4, -2, 0, 2]\n\ndef main():\n\n\tf = open(\"features.train\", \"r\")\n\ty = []\n\tx = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty.append(ans)\n\t\tx.append(tmp)\n\tf.close()\n\n\tf = open(\"features.test\", \"r\")\n\ty_test = []\n\tx_test = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty_test.append(ans)\n\t\tx_test.append(tmp)\n\tf.close()\n\n\tW = []\n\tfor c in C:\n\t\tprob = svm_problem(y, x)\n\t\tparam = svm_parameter('-t 0 -c ' + str(math.pow(10, c)))\n\t\tm = svm_train(prob, param)\n\t\ty_alpha = np.array(m.get_sv_coef());\n\t\tsvs = m.get_SV();\n\n\t\tsv_matrix = []\n\t\tfor sv in svs:\n\t\t\tsv_matrix.append([sv[1], sv[2]])\n\n\t\tw = np.dot(np.transpose(y_alpha), sv_matrix)\n\t\tnorm_w = math.sqrt(math.pow(w[0][0], 2) + math.pow(w[0][1], 2))\n\t\tprint norm_w\n\t\tW.append(norm_w)\n\t\n\tprint W\n\tplt.plot(C, W)\n\tplt.title(\"15.\")\n\tplt.xlabel(\"log(10)C\")\n\tplt.ylabel(\"||w||\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5071482062339783, "alphanum_fraction": 0.5364935994148254, "avg_line_length": 19.15151596069336, "blob_id": "d079f703445a57e73e4d225c8edd1fe4204a5044", "content_id": "6016c8fcbdf28c6a6d8e5c6af13eaee4a6d793ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 90, "num_lines": 66, "path": "/homework2/r04922080_hw2/19.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "def sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\tf = open(\"hw2_train.dat\", \"r\")\n\ttrain_data = []\n\tfor i in range(9):\n\t\ttrain_data.append([])\n\n\tfor line in f:\n\t\tline = line[1:-1].split(' ')\n\t\t#print line\n\t\tfor i in range(9):\n\t\t\ttrain_data[i].append([float(line[i]), int(line[9])])\n\t\n\n\tEin = []\n\tfor i in range(9):\n\t\ttrain_data[i].sort()\n\t\tthetas = []\n\t\tdata_size = len(train_data[i])\n\t\tfor j in range(data_size-1):\n\t\t\tthetas.append( ( train_data[i][j][0] + train_data[i][j+1][0])/2 )\n\n\t\tEinmin = 2\n\t\tchoose_theta = 0\n\t\tchoose_s = 0\n\t\td = -1\n\t\tfor theta in thetas:\n\t\t\ts = 1\n\t\t\terror = 0.0\n\t\t\tfor x in train_data[i]:\n\t\t\t\tif s * sign( x[0] - theta ) != x[1]:\n\t\t\t\t\terror += 1\n\t\t\te = error / float(data_size)\n\t\t\t#print e\n\t\t\tif e < Einmin:\n\t\t\t\tEinmin = e\n\t\t\t\tchoose_theta = theta\n\t\t\t\tchoose_s = s\n\t\t\t\td = i\n\n\t\t\ts = -1\n\t\t\terror = 0.0\n\t\t\tfor x in train_data[i]:\n\t\t\t\tif s * sign( x[0] - theta ) != x[1]:\n\t\t\t\t\terror += 1\n\t\t\te = error / float(data_size)\n\t\t\t#print e\n\t\t\tif e < Einmin:\n\t\t\t\tEinmin = e\n\t\t\t\tchoose_theta = theta\n\t\t\t\tchoose_s = s\n\t\t\t\td = i\n\t\tEin.append([Einmin, choose_theta, choose_s, d])\n\n\tEin.sort()\n\t#print Ein[0][3]\n\tprint \"Ein of the optimal decision stump = \" + str(Ein[0][0])\n\tprint \"the optimal decision stump, theta = \" + str(Ein[0][1]) + \", s = \" + str(Ein[0][2])\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.75, "avg_line_length": 15, "blob_id": "b330e0e4d3eb7cb0e9a4a877b95302ac00f32a4a", "content_id": "5a07eb3c19a4fef0cd1ca1f64b5f152e3d450a56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 16, "license_type": "no_license", "max_line_length": 15, "num_lines": 1, "path": "/README.md", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "# 2015 homework\n" }, { "alpha_fraction": 0.5314270853996277, "alphanum_fraction": 0.5638846158981323, "avg_line_length": 22.251497268676758, "blob_id": "7738d562dc9069fb1ba8497746b9d41720ca95b7", "content_id": "853b440f39d49e29309a9f2045aa3f9ee36d8c17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3882, "license_type": "no_license", "max_line_length": 89, "num_lines": 167, "path": "/homework4/r04922080_hw4/19.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\nLambda = 11.26\ntrain_size = 160\nval_size = 40\nDimension_Add_1 = 3\ntest_size = 1000\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tX = []\n\ty = []\n\tX_fold = []\n\ty_fold = []\n\tfor fold in range(5):\n\t\tX_train = np.zeros(shape=(train_size, Dimension_Add_1))\n\t\ty_train = np.zeros(shape=(train_size, 1))\n\t\tX_train = np.asmatrix(X_train)\n\t\ty_train = np.asmatrix(y_train)\n\n\t\tX_val = np.zeros(shape=(val_size, Dimension_Add_1))\n\t\ty_val = np.zeros(shape=(val_size, 1))\n\t\tX_val = np.asmatrix(X_val)\n\t\ty_val = np.asmatrix(y_val)\n\t\tf = open(\"hw4_train.dat\", \"r\")\n\t\tcut_first = fold * 40\n\t\tcut_second = (fold+1) * 40\n\n\t\tnum = 0\n\t\tif fold != 0:\n\t\t\tfor line in f:\n\t\t\t\tline = line[:-1].split(' ')\n\t\t\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\t\t\tx = np.asmatrix(x)\n\t\t\t\tx.itemset((0, 0), 1)\n\t\t\t\tfor i in range(2):\n\t\t\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\t\t\tX_train[num] = x\n\t\t\t\ty_train[num] = [float(line[2])]\n\t\t\t\tnum += 1\n\t\t\t\tif num == cut_first:\n\t\t\t\t\tbreak\n\n\t\tnum = 0\n\t\tfor line in f:\n\t\t\tline = line[:-1].split(' ')\n\t\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\t\tx = np.asmatrix(x)\n\t\t\tx.itemset((0, 0), 1)\n\t\t\tfor i in range(2):\n\t\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\t\tX_val[num] = x\n\t\t\ty_val[num] = [float(line[2])]\n\t\t\tnum += 1\n\t\t\tif num == 40:\n\t\t\t\tbreak\n\n\t\tnum = 0\n\t\tif fold != 4:\n\t\t\tfor line in f:\n\t\t\t\tline = line[:-1].split(' ')\n\t\t\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\t\t\tx = np.asmatrix(x)\n\t\t\t\tx.itemset((0, 0), 1)\n\t\t\t\tfor i in range(2):\n\t\t\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\t\t\tX_train[num+cut_first] = x\n\t\t\t\ty_train[num+cut_first] = [float(line[2])]\n\t\t\t\tnum += 1\n\t\tf.close()\n\t\tX.append(X_train)\n\t\ty.append(y_train)\n\t\tX_fold.append(X_val)\n\t\ty_fold.append(y_val)\n\t\t# print X_train\n\t\t# print X_val\n\n\t# f = open(\"hw4_test.dat\", \"r\")\n\t# X_test = np.zeros(shape=(test_size, Dimension_Add_1))\n\t# y_test = np.zeros(shape=(test_size, 1))\n\t# X_test = np.asmatrix(X_test)\n\t# y_test = np.asmatrix(y_test)\n\t# num = 0\n\t# for line in f:\n\t# \tline = line[:-1].split(' ')\n\t# \tx = np.zeros(shape=(1, Dimension_Add_1))\n\t# \tx = np.asmatrix(x)\n\t# \tx.itemset((0, 0), 1)\n\t# \tfor i in range(2):\n\t# \t\tx.itemset((0, i+1), float(line[i]))\n\t# \tX_test[num] = x\n\t# \ty_test[num] = [float(line[2])]\n\t# \tnum += 1\n\t# f.close()\n\n\tEcv = []\n\tplot_x = []\n\tplot_y = []\n\tfor i in range(-10, 3):\n\t\tpower = - 8 - i\n\t\tLambda = math.pow(10, power)\n\t\tI = np.identity(Dimension_Add_1)\n\n\t\tE = []\n\t\tfor fold in range(5):\n\t\t\tX_train = X[fold]\n\t\t\ty_train = y[fold]\n\t\t\tX_val = X_fold[fold]\n\t\t\ty_val = y_fold[fold]\n\t\t\t# print X_train\n\t\t\t# print X_val\n\t\t\twReg = inv(Lambda * I + X_train.transpose() * X_train) * X_train.transpose() * y_train\n\n\t\t\terror = 0.0\n\t\t\tfor j in range(val_size):\n\t\t\t\tif sign((wReg.transpose() * X_val[j].transpose()).item(0)) != y_val[j].item(0):\n\t\t\t\t\terror += 1\n\t\t\te = error / float(val_size)\n\t\t\tE.append(e)\n\t\t# print E\n\t\tmean = sum(E) / float(len(E))\n\t\tEcv.append([mean, power])\n\t\tplot_x.append(power)\n\t\tplot_y.append(mean)\n\n\tminimum = Ecv[0][0]\n\tfor member in Ecv:\n\t\tif minimum > member[0]:\n\t\t\tminimum = member[0]\n\t\t\tans = member\n\n\tprint \"When log10(lambda) = \" + str(ans[1]) + \", we have minimum Ecv.\"\n\tprint \"Ecv = \" + str(ans[0])\n\n\t# error = 0.0\n\t# for i in range(train_size):\n\t# \tif sign((wReg.transpose() * X_train[i].transpose()).item(0)) != y_train[i].item(0):\n\t# \t\terror += 1\n\n\t# print \"Etrain = \" + str(error / float(train_size))\n\t# print \"Eval = \" + str(ans[0])\n\n\t# error = 0.0\n\t# for i in range(test_size):\n\t# \tif sign((wReg.transpose() * X_test[i].transpose()).item(0)) != y_test[i].item(0):\n\t# \t\terror += 1\n\n\t# print \"Eout = \" + str(error / float(test_size))\n\n\tplt.bar(plot_x, plot_y, width = 1 ,color = \"#fad981\")\n\tplt.title(\"19. Ecv with respect to log10(lambda)\")\n\tplt.xlabel(\"log10(lambda)\")\n\tplt.ylabel(\"Ecv\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5518501400947571, "alphanum_fraction": 0.5893101692199707, "avg_line_length": 21.121212005615234, "blob_id": "b30b4195de1e59f3e87cd1f2c506f1e8a93c10d3", "content_id": "2973b3f4f22d7f533fe28bfe0b34d7cd367935ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2189, "license_type": "no_license", "max_line_length": 83, "num_lines": 99, "path": "/homework4/r04922080_hw4/14.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\nLambda = 11.26\nsize = 200\nDimension_Add_1 = 3\ntest_size = 1000\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tf = open(\"hw4_train.dat\", \"r\")\n\tX = np.zeros(shape=(size, Dimension_Add_1))\n\ty = np.zeros(shape=(size, 1))\n\tX = np.asmatrix(X)\n\ty = np.asmatrix(y)\n\tnum = 0\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(2):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX[num] = x\n\t\ty[num] = [float(line[2])]\n\t\tnum += 1\n\tf.close()\n\n\tf = open(\"hw4_test.dat\", \"r\")\n\tX_test = np.zeros(shape=(test_size, Dimension_Add_1))\n\ty_test = np.zeros(shape=(test_size, 1))\n\tX_test = np.asmatrix(X_test)\n\ty_test = np.asmatrix(y_test)\n\tnum = 0\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(2):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX_test[num] = x\n\t\ty_test[num] = [float(line[2])]\n\t\tnum += 1\n\tf.close()\n\n\tEin = []\n\tplot_x = []\n\tplot_y = []\n\tfor i in range(-10, 3):\n\t\tpower = - 8 - i\n\t\tLambda = math.pow(10, power)\n\t\tI = np.identity(Dimension_Add_1)\n\t\twReg = inv(Lambda * I + X.transpose() * X) * X.transpose() * y\n\n\t\terror = 0.0\n\t\tfor j in range(size):\n\t\t\tif sign((wReg.transpose() * X[j].transpose()).item(0)) != y[j].item(0):\n\t\t\t\terror += 1\n\t\tein = error / float(size)\n\t\tEin.append([ein, power, wReg])\n\t\tplot_x.append(power)\n\t\tplot_y.append(ein)\n\n\tminimum = Ein[0][0]\n\tfor member in Ein:\n\t\tif minimum > member[0]:\n\t\t\tminimum = member[0]\n\t\t\tans = member\n\n\tprint \"When log10(lambda) = \" + str(ans[1]) + \", we have minimum Ein.\"\n\tprint \"Ein = \" + str(ans[0])\n\twReg = ans[2]\n\n\terror = 0.0\n\tfor i in range(test_size):\n\t\tif sign((wReg.transpose() * X_test[i].transpose()).item(0)) != y_test[i].item(0):\n\t\t\terror += 1\n\n\tprint \"Eout = \" + str(error / float(test_size))\n\n\n\tplt.bar(plot_x, plot_y, width = 1 ,color = \"#fad981\")\n\tplt.title(\"14. Curve of Ein with respect to log10(lambda)\")\n\tplt.xlabel(\"log10(lambda)\")\n\tplt.ylabel(\"Ein\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5397163033485413, "alphanum_fraction": 0.5631205439567566, "avg_line_length": 17.324674606323242, "blob_id": "2920f5581469ce5c16f22646b87e795195f254a9", "content_id": "db1eab35356e8c3c0d122a3916a4cd29cab20344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "no_license", "max_line_length": 67, "num_lines": 77, "path": "/homework5/r04922080_hw5/17.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "from svmutil import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nvalue = 8\nC = [-6, -4, -2, 0, 2]\n\ndef main():\n\n\tf = open(\"features.train\", \"r\")\n\ty = []\n\tx = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty.append(ans)\n\t\tx.append(tmp)\n\tf.close()\n\n\tf = open(\"features.test\", \"r\")\n\ty_test = []\n\tx_test = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty_test.append(ans)\n\t\tx_test.append(tmp)\n\tf.close()\n\n\talphas = []\n\tfor c in C:\n\t\tprob = svm_problem(y, x)\n\t\tparam = svm_parameter('-t 1 -d 2 -c ' + str(math.pow(10, c)))\n\t\tm = svm_train(prob, param)\n\n\t\ty_alpha = np.array(m.get_sv_coef());\n\t\t\n\t\talpha = 0\n\t\tfor mem in y_alpha:\n\t\t\talpha += abs(mem)\n\t\tprint alpha\n\t\talphas.append(alpha)\n\t\t# sv_matrix = []\n\t\t# for sv in svs:\n\t\t# \tsv_matrix.append([sv[1], sv[2]])\n\n\t\t# w = np.dot(np.transpose(y_alpha), sv_matrix)\n\t\t# norm_w = math.sqrt(math.pow(w[0][0], 2) + math.pow(w[0][1], 2))\n\t\t# print norm_w\n\t\t# W.append(norm_w)\n\t\n\tprint alphas\n\tplt.plot(C, alphas)\n\tplt.title(\"17.\")\n\tplt.xlabel(\"log(10)C\")\n\tplt.ylabel(\"sigma(alpha)\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5155038833618164, "alphanum_fraction": 0.5523256063461304, "avg_line_length": 15.125, "blob_id": "33c7cfcf6d6618c3e157341d546213629ec99ec3", "content_id": "2e5275728e92a04304573d60b55b7e8ca08b0a1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 516, "license_type": "no_license", "max_line_length": 50, "num_lines": 32, "path": "/homework2/r04922080_hw2/20.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "\ntheta = 1.6175\ns = -1\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\tf = open(\"hw2_test.dat\", \"r\")\n\ttest_data = []\n\n\tfor line in f:\n\t\tline = line[1:-1].split(' ')\n\t\t#print line\n\t\ttest_data.append([float(line[3]), int(line[9])])\n\t\n\terror = 0.0\n\tlength = len(test_data)\n\tfor j in range(length):\n\t\tx = test_data[j]\n\t\tif s * sign( x[0] - theta ) != x[1]:\n\t\t\terror += 1\n\n\tEout = error / float(length)\n\tprint \"Eout = \", Eout\n\t#print sum(Etest)/float(len(Etest))\n\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5268456339836121, "alphanum_fraction": 0.5536912679672241, "avg_line_length": 17.873416900634766, "blob_id": "29ebd317779be14a2d916c54526994889c6f360a", "content_id": "a68b7d39cd3678acf9ebae21040227c39fa21fd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1490, "license_type": "no_license", "max_line_length": 67, "num_lines": 79, "path": "/homework5/r04922080_hw5/16.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "from svmutil import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nvalue = 8\nC = [-6, -4, -2, 0, 2]\n\ndef main():\n\n\tf = open(\"features.train\", \"r\")\n\ty = []\n\tx = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty.append(ans)\n\t\tx.append(tmp)\n\tf.close()\n\n\tf = open(\"features.test\", \"r\")\n\ty_test = []\n\tx_test = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty_test.append(ans)\n\t\tx_test.append(tmp)\n\tf.close()\n\n\tEin = []\n\tfor c in C:\n\t\tprob = svm_problem(y, x)\n\t\tparam = svm_parameter('-t 1 -d 2 -c ' + str(math.pow(10, c)))\n\t\tm = svm_train(prob, param)\n\t\tp_label, p_acc, p_val = svm_predict(y, x, m)\n\n\t\tacc = p_acc[0]\n\t\tEin_tmp = (100 - acc)/100.0\n\t\tEin.append(Ein_tmp)\n\t\t# length = len(p_val)\n\t\t# for i in range(length):\n\n\t\t# y_alpha = np.array(m.get_sv_coef());\n\t\t# svs = m.get_SV();\n\t\t# sv_matrix = []\n\t\t# for sv in svs:\n\t\t# \tsv_matrix.append([sv[1], sv[2]])\n\n\t\t# w = np.dot(np.transpose(y_alpha), sv_matrix)\n\t\t# norm_w = math.sqrt(math.pow(w[0][0], 2) + math.pow(w[0][1], 2))\n\t\t# print norm_w\n\t\t# W.append(norm_w)\n\t\n\tprint Ein\n\tplt.plot(C, Ein)\n\tplt.title(\"16.\")\n\tplt.xlabel(\"log(10)C\")\n\tplt.ylabel(\"Ein\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.4601227045059204, "alphanum_fraction": 0.5099693536758423, "avg_line_length": 17.913043975830078, "blob_id": "14cf5c6c9970f6600887532ac7b7e65c019826db", "content_id": "f8dad6f359c70b80a89d9c98a1039c978bd8ef4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1304, "license_type": "no_license", "max_line_length": 75, "num_lines": 69, "path": "/homework5/r04922080_hw5/18.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "from svmutil import *\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nvalue = 0\nC = [-3, -2, -1, 0, 1]\n\ndef main():\n\n\tf = open(\"features.train\", \"r\")\n\ty = []\n\tx = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty.append(ans)\n\t\tx.append(tmp)\n\tf.close()\n\n\tf = open(\"features.test\", \"r\")\n\ty_test = []\n\tx_test = []\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\tif float(line[0]) == value:\n\t\t\tans = 1\n\t\telse:\n\t\t\tans = -1\n\t\ttmp = []\n\t\ttmp.append(float(line[1]))\n\t\ttmp.append(float(line[2]))\n\t\ty_test.append(ans)\n\t\tx_test.append(tmp)\n\tf.close()\n\n\tdistance = []\n\tfor c in C:\n\t\tprint \"---------------------\" + str(c) + \"------------------------\"\n\t\tprob = svm_problem(y, x)\n\t\tparam = svm_parameter('-t 2 -g 100 -c ' + str(math.pow(10, c)))\n\t\tm = svm_train(prob, param)\n\n\n\t####################### This ||w|| is from traning #######################\n\t# w = [ 0.121384, 1.212441, 7.761528, 11.882027, 24.329622 ]\n\n\t# for i in range(len(w)):\n\t# \tw[i] = 1.0 / w[i]\n\t# print w\n\n\t# plt.plot(C, w)\n\t# plt.title(\"18.\")\n\t# plt.xlabel(\"log(10)C\")\n\t# plt.ylabel(\"distance\")\n\t# plt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5760368704795837, "alphanum_fraction": 0.6320453882217407, "avg_line_length": 21.94308853149414, "blob_id": "746487c8c84b929d43182e6fa636bb1d8e22f003", "content_id": "dc641102ccc11eb4a49a900f34eeb88288b315c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2821, "license_type": "no_license", "max_line_length": 191, "num_lines": 123, "path": "/homework1/r04922080_hw1/20.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\n\ndef normalVectorCompute(a, b, s):\n\tans = []\n\tfor i in range(5):\n\t\tans.append(a[i] + s*b[i])\n\treturn ans\n\ndef innerProduct(a, b):\n\tans = 0\n\tfor i in range(5):\n\t\tans += a[i]*b[i]\n\treturn ans\n\ndef plusOrMinus(yn):\n\tif yn > 0 :\n\t\treturn 1\n\telse :\n\t\treturn -1\n\ndef checkErrorRate(w, testData, size):\n\terror = 0\n\tfor member in testData:\n\t\tgn = innerProduct(w, member)\n\t\tgn = plusOrMinus(gn)\n\t\tif gn != member[-1]:\n\t\t\terror += 1\n\treturn float(error)/size\n\ndef main():\n\tf = open(\"hw1_18_train.dat\", \"r\")\n\tdata = []\n\tfor line in f:\n\t\tline = line[:-1].split('\\t');\n\t\ttmp = line[0].split(' ');\n\t\tmember = []\n\t\tfor i in tmp:\n\t\t\tmember.append(float(i))\n\t\tmember.append(float(1))\n\t\tmember.append(float(line[1]))\n\t\tdata.append(member)\n\t#print data\n\tf.close()\n\n\tf = open(\"hw1_18_test.dat\", \"r\")\n\ttestData = []\n\tfor line in f:\n\t\tline = line[:-1].split('\\t');\n\t\ttmp = line[0].split(' ');\n\t\tmember = []\n\t\tfor i in tmp:\n\t\t\tmember.append(float(i))\n\t\tmember.append(float(1))\n\t\tmember.append(float(line[1]))\n\t\ttestData.append(member)\n\t#print testData\n\tf.close()\n\tsizeOfTestData = float(len(testData))\n\n\ttwoThousantErrorRate = []\n\tfor count in range(2000):\n\n\t\t#print count\n\t\t\n\t\tindexRandom = range(len(data))\n\t\trandom.shuffle(indexRandom)\n\n\t\tnormalVector = data[indexRandom[0]][:-1]\n\n\t\t# first normalVector update\n\t\tupdateCount = 1\n\t\tpocket = []\n\t\tpocketErrorRate = float(1)\n\t\t#notYet = True\n\t\twhile updateCount != 100 :\n\t\t\t#notYet = False\n\t\t\tfor i in indexRandom:\n\t\t\t\tvector = data[i]\n\t\t\t\tgn = innerProduct(normalVector, vector[:-1])\n\t\t\t\tgn = plusOrMinus(gn);\n\n\t\t\t\tif gn != vector[-1]:\n\t\t\t\t\t#notYet = True\n\t\t\t\t\tupdateCount += 1\n\t\t\t\t\tnormalVector = normalVectorCompute(normalVector, vector[:-1], vector[-1])\n\t\t\t\t\terrorRate = checkErrorRate(normalVector, data, sizeOfTestData)\n\n\t\t\t\t\tif errorRate < pocketErrorRate:\n\t\t\t\t\t\tpocketErrorRate = errorRate\n\t\t\t\t\t\tpocket = normalVector\n\n\t\t\t\t\tif updateCount == 100:\n\t\t\t\t\t\tbreak\n\t\tpocketErrorRate = checkErrorRate(pocket, testData, sizeOfTestData)\n\t\ttwoThousantErrorRate.append(pocketErrorRate)\n\t\t\n\t#print twoThousantErrorRate\n\tx = [0.00, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30]\n\ty = [0 for i in range(30)]\n\n\tans = float(0)\n\tfor num in twoThousantErrorRate:\n\t\tans += num\n\t\tfor i in range(30):\n\t\t\tif num > x[i] and num < x[i+1]:\n\t\t\t\ty[i] += 1\n\t\t\t\tbreak\n\t#print pocket\n\t#print x\n\t#print y\n\n\tprint \"average Error Rate = \" + str(ans/float(len(twoThousantErrorRate)))\n\n\t#plt.hist(twoThousantErrorRate)\n\tplt.bar(x[:-1], y, width = 0.01 ,color = \"#660066\")\n\tplt.title(\"18. Error Rate versus Frequency ( pocket, 100 updates ) \")\n\tplt.xlabel(\"Error Rate\")\n\tplt.ylabel(\"Frequency\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain();" }, { "alpha_fraction": 0.56080162525177, "alphanum_fraction": 0.596807062625885, "avg_line_length": 22.559999465942383, "blob_id": "8195f240f7abd5d2b0f050598976a9067089bb33", "content_id": "44b1605ecab66fd55bc0fa434e6b4fa185b76307", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2944, "license_type": "no_license", "max_line_length": 88, "num_lines": 125, "path": "/homework4/r04922080_hw4/17.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\nLambda = 11.26\ntrain_size = 120\nval_size = 80\nDimension_Add_1 = 3\ntest_size = 1000\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tf = open(\"hw4_train.dat\", \"r\")\n\tX_train = np.zeros(shape=(train_size, Dimension_Add_1))\n\ty_train = np.zeros(shape=(train_size, 1))\n\tX_train = np.asmatrix(X_train)\n\ty_train = np.asmatrix(y_train)\n\n\tX_val = np.zeros(shape=(val_size, Dimension_Add_1))\n\ty_val = np.zeros(shape=(val_size, 1))\n\tX_val = np.asmatrix(X_val)\n\ty_val = np.asmatrix(y_val)\n\tnum = 0\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(2):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX_train[num] = x\n\t\ty_train[num] = [float(line[2])]\n\t\tnum += 1\n\t\tif num == 120:\n\t\t\tbreak\n\n\tnum = 0\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(2):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX_val[num] = x\n\t\ty_val[num] = [float(line[2])]\n\t\tnum += 1\n\tf.close()\n\n\tf = open(\"hw4_test.dat\", \"r\")\n\tX_test = np.zeros(shape=(test_size, Dimension_Add_1))\n\ty_test = np.zeros(shape=(test_size, 1))\n\tX_test = np.asmatrix(X_test)\n\ty_test = np.asmatrix(y_test)\n\tnum = 0\n\tfor line in f:\n\t\tline = line[:-1].split(' ')\n\t\tx = np.zeros(shape=(1, Dimension_Add_1))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(2):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX_test[num] = x\n\t\ty_test[num] = [float(line[2])]\n\t\tnum += 1\n\tf.close()\n\n\tEval = []\n\tplot_x = []\n\tplot_y = []\n\tfor i in range(-10, 3):\n\t\tpower = - 8 - i\n\t\tLambda = math.pow(10, power)\n\t\tI = np.identity(Dimension_Add_1)\n\t\twReg = inv(Lambda * I + X_train.transpose() * X_train) * X_train.transpose() * y_train\n\n\t\terror = 0.0\n\t\tfor j in range(val_size):\n\t\t\tif sign((wReg.transpose() * X_val[j].transpose()).item(0)) != y_val[j].item(0):\n\t\t\t\terror += 1\n\t\teVal = error / float(val_size)\n\t\tEval.append([eVal, power, wReg])\n\t\tplot_x.append(power)\n\t\tplot_y.append(eVal)\n\n\tminimum = Eval[0][0]\n\tfor member in Eval:\n\t\tif minimum > member[0]:\n\t\t\tminimum = member[0]\n\t\t\tans = member\n\n\tprint \"When log10(lambda) = \" + str(ans[1]) + \", we have minimum Eval.\"\n\twReg = ans[2]\n\n\terror = 0.0\n\tfor i in range(train_size):\n\t\tif sign((wReg.transpose() * X_train[i].transpose()).item(0)) != y_train[i].item(0):\n\t\t\terror += 1\n\n\tprint \"Etrain = \" + str(error / float(train_size))\n\tprint \"Eval = \" + str(ans[0])\n\n\terror = 0.0\n\tfor i in range(test_size):\n\t\tif sign((wReg.transpose() * X_test[i].transpose()).item(0)) != y_test[i].item(0):\n\t\t\terror += 1\n\n\tprint \"Eout = \" + str(error / float(test_size))\n\n\tplt.bar(plot_x, plot_y, width = 1 ,color = \"#fad981\")\n\tplt.title(\"17. Eval with respect to log10(lambda)\")\n\tplt.xlabel(\"log10(lambda)\")\n\tplt.ylabel(\"Eval\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5249999761581421, "alphanum_fraction": 0.5526315569877625, "avg_line_length": 18.253164291381836, "blob_id": "5f051f0289a35c2ab4d7de8b10bd239ef088b5b0", "content_id": "9cc859989ebc555a9c0a54439cf7a0b104f654ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1520, "license_type": "no_license", "max_line_length": 74, "num_lines": 79, "path": "/homework6/r04922080_hw6/19.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\n\nGamma = [32, 2, 0.125]\nLambda = [0.001, 1, 1000]\nsize = 400\n\ndef sign(a):\n\tif ( a >= 0 ):\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tf = open(\"hw2_lssvm_all.dat\", \"r\")\n\tx = []\n\ty = []\n\tx_test = []\n\ty_test = []\n\tnum = 0\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\ttmp = []\n\t\tfor string in line[:-1]:\n\t\t\ttmp.append(float(string))\n\t\tx.append(tmp)\n\t\ty.append(float(line[-1]))\n\t\tnum += 1\n\t\tif num == size:\n\t\t\tbreak\n\n\tx = np.asmatrix(x)\n\ty = np.asmatrix(y)\n\n\tfor line in f:\n\t\tline = line.strip().split()\n\t\tif line == '':\n\t\t\tbreak\n\t\ttmp = []\n\t\tfor string in line[:-1]:\n\t\t\ttmp.append(float(string))\n\t\tx_test.append(tmp)\n\t\ty_test.append(float(line[-1]))\n\n\tlength = len(x)\n\tEin = []\n\tfor gamma in Gamma:\n\t\t# K\n\t\tK = np.zeros(shape=(size, size))\n\t\tK = np.asmatrix(K)\n\t\tfor i in range(size):\n\t\t\tfor j in range(size):\n\t\t\t\ttmp = x[i] - x[j]\n\t\t\t\tL = math.exp((tmp*tmp.transpose()).item(0) * (-1) * gamma)\n\t\t\t\tK.itemset((i, j), L)\n\n\t\tfor lambdaa in Lambda:\n\t\t\tBeta = inv(lambdaa * np.identity(size) + K) * y.transpose()\n\t\t\terror = 0.0\n\t\t\tfor i in range(length):\n\t\t\t\tg = 0.0\n\t\t\t\tfor j in range(length):\n\t\t\t\t\tg += Beta.item(j) * K.item((j, i))\n\t\t\t\tif y.item(i) != sign(g):\n\t\t\t\t\terror += 1.0\n\t\t\terror /= float(length)\n\t\t\tEin.append([error, gamma, lambdaa])\n\tEin.sort()\n\tfor mem in Ein:\n\t\tif mem[0] == 0:\n\t\t\tprint \"min Ein = \", mem[0], \", gamma = \", mem[1], \", lambda = \", mem[2]\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.4549402892589569, "alphanum_fraction": 0.5401737093925476, "avg_line_length": 18.606382369995117, "blob_id": "d70c4c434b712729c5006e07f8040608ecbf6197", "content_id": "f5142f99fad6594a69a7008b4129c58475ccab9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1842, "license_type": "no_license", "max_line_length": 150, "num_lines": 94, "path": "/homework2/r04922080_hw2/18.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tEout = []\n\tfor times in range(5000):\n\t\tdata = []\n\t\tans = []\n\t\t#data.append(-1.0)\n\t\tfor i in range(20):\n\t\t\tx = np.random.uniform(-1, 1)\n\t\t\tdata.append(x)\n\t\tdata.sort()\n\t\t#print data\n\n\t\tfor x in data:\n\t\t\tif sign(x) == 1:\n\t\t\t\tr = random.random()\n\t\t\t\tif r <= 0.2:\n\t\t\t\t\tans.append(-1)\n\t\t\t\telse:\n\t\t\t\t\tans.append(1)\n\t\t\telse:\n\t\t\t\tr = random.random()\n\t\t\t\tif r <= 0.2:\n\t\t\t\t\tans.append(1)\n\t\t\t\telse:\n\t\t\t\t\tans.append(-1)\n\n\t\tthetas = []\n\t\tthetas.append((-1.0 + data[0]) / 2)\n\t\tfor i in range(0, 19):\n\t\t\tthetas.append((data[i]+data[i+1])/2)\n\t\tthetas.append((data[19] + 1.0) / 2)\n\t\t#print thetas\n\n\t\tEinmin = 2\n\t\tchoose_theta = 0\n\t\tchoose_s = 0\n\t\tfor theta in thetas:\n\t\t\ts = 1\n\t\t\terror = 0.0\n\t\t\tfor i in range(len(data)):\n\t\t\t\tif s * sign( data[i] - theta ) != ans[i]:\n\t\t\t\t\terror += 1\n\t\t\te = error / 20.0\n\t\t\t#print e\n\t\t\tif e < Einmin:\n\t\t\t\tEinmin = e\n\t\t\t\tchoose_theta = theta\n\t\t\t\tchoose_s = s\n\n\t\t\ts = -1\n\t\t\terror = 0.0\n\t\t\tfor i in range(len(data)):\n\t\t\t\tif s * sign( data[i] - theta ) != ans[i]:\n\t\t\t\t\terror += 1\n\t\t\te = error / 20.0\n\t\t\t#print e\n\t\t\tif e < Einmin:\n\t\t\t\tEinmin = e\n\t\t\t\tchoose_theta = theta\n\t\t\t\tchoose_s = s\n\t\tEout.append( 0.5 + 0.3 * choose_s * (abs(choose_theta) - 1) )\n\n\tprint \"average Eout = \" + str(sum(Eout)/float(len(Eout)))\n\n\tx = [0.100, 0.125, 0.150, 0.175, 0.200, 0.225, 0.250, 0.275, 0.300, 0.325, 0.350, 0.375, 0.400, 0.425, 0.450, 0.475, 0.500, 0.525, 0.550, 0.575, 1.0]\n\ty = [0 for i in range(20)]\n\n\tfor num in Eout:\n\t\tfor i in range(20):\n\t\t\tif num >= x[i] and num < x[i+1]:\n\t\t\t\ty[i] += 1\n\t\t\t\tbreak\n\n\tplt.bar(x[:-1], y, width = 0.025, color = \"#fad981\")\n\tplt.title(\"18. Eout distribution\")\n\tplt.xlabel(\"Error Rate\")\n\tplt.ylabel(\"times\")\n\tplt.show()\n\n\tplt.plot(Eout)\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.6117647290229797, "alphanum_fraction": 0.6359133124351501, "avg_line_length": 19.98701286315918, "blob_id": "a22d242ae025c12a16aebfa648462e39eebf3097", "content_id": "b6f30feb5f026093d04d30bf589043121500c15c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 78, "num_lines": 77, "path": "/homework1/r04922080_hw1/17.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\n\ndef normalVectorCompute(a, b, s):\n\tans = []\n\tfor i in range(5):\n\t\tans.append(a[i] + float(0.5)*s*b[i])\n\treturn ans\n\ndef innerProduct(a, b):\n\tans = 0\n\tfor i in range(5):\n\t\tans += a[i]*b[i]\n\treturn ans\n\ndef plusOrMinus(yn):\n\tif yn > 0 :\n\t\treturn 1\n\telse :\n\t\treturn -1\n\ndef main():\n\tf = open(\"hw1_15_train.dat\", \"r\")\n\tdata = []\n\tfor line in f:\n\t\tline = line[:-1].split('\\t');\n\t\ttmp = line[0].split(' ');\n\t\tmember = []\n\t\tfor i in tmp:\n\t\t\tmember.append(float(i))\n\t\tmember.append(float(1))\n\t\tmember.append(float(line[1]))\n\t\tdata.append(member)\n\t#print data\n\tf.close()\n\n\ttwoThousantUpdate = []\n\tfor count in range(2000):\n\n\t\tindexRandom = range(len(data))\n\t\trandom.shuffle(indexRandom)\n\n\t\tnormalVector = data[indexRandom[0]][:-1]\n\n\t\t# first normalVector update\n\t\tupdateCount = 1\n\t\tnotYet = True\n\t\twhile notYet == True :\n\t\t\tnotYet = False\n\t\t\tfor i in indexRandom:\n\t\t\t\tvector = data[i]\n\t\t\t\tgn = innerProduct(normalVector, vector[:-1])\n\t\t\t\tgn = plusOrMinus(gn);\n\n\t\t\t\tif gn != vector[-1]:\n\t\t\t\t\tnotYet = True\n\t\t\t\t\tupdateCount += 1\n\t\t\t\t\tnormalVector = normalVectorCompute(normalVector, vector[:-1], vector[-1])\n\t\ttwoThousantUpdate.append(updateCount);\n\n\tx = range(max(twoThousantUpdate)+1)\n\ty = [0 for i in range(max(twoThousantUpdate)+1)]\n\n\tans = float(0)\n\tfor num in twoThousantUpdate:\n\t\tans += num\n\t\ty[num] += 1\n\t\n\tprint \"average updates = \" + str(ans/float(len(twoThousantUpdate)))\n\tplt.bar(x, y, color = \"#20b2aa\")\n\tplt.title(\"17. Number of Updates versus Frequency ( Eta = 0.5 )\")\n\tplt.xlabel(\"Updates\")\n\tplt.ylabel(\"Frequency\")\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.47313690185546875, "alphanum_fraction": 0.537839412689209, "avg_line_length": 21.205127716064453, "blob_id": "dfcc08daa242e8dee4c71d322eae0d21af5cfdb4", "content_id": "9fb938fb952e34d357642f1fe47728e32a1dc343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1731, "license_type": "no_license", "max_line_length": 112, "num_lines": 78, "path": "/homework3/r04922080_hw3/20.py", "repo_name": "michawei/2015-MachineLearning-hw", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport math\nfrom numpy.linalg import inv\n\ndef sign(a):\n\tif a >= 0:\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef main():\n\n\tf = open(\"hw3_train.dat\", \"r\")\n\t#train_data = []\n\tX = np.zeros(shape=(1000, 21))\n\ty = np.zeros(shape=(1000, 1))\n\tX = np.asmatrix(X)\n\ty = np.asmatrix(y)\n\tnum = 0\n\tfor line in f:\n\t\tline = line[1:-1].split(' ')\n\t\t#print line\n\t\tx = np.zeros(shape=(1, 21))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(20):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tX[num] = x\n\t\ty[num] = [float(line[20])]\n\t\t#train_data.append(line)\n\t\tnum += 1\n\tf.close()\n\n\t# print X\n\tW_t = np.zeros(shape=(21, 1))\n\tW_t = np.asmatrix(W_t)\n\tEta = 0.001\n\ti = 0\n\tfor _ in range(2000):\n\t\t# gradient_E = np.zeros(shape=(21, 1))\n\t\t# gradient_E = np.asmatrix(gradient_E)\n\t\tgradient_E = (1.0 / (1 + np.exp( y.item(i) * (X[i]*W_t).item(0) ) )) * (-1) * y.item(i) * X[i].transpose()\n\t\ti+=1\n\t\ti%=1000\n\t\t# for i in range(1000):\n\t\t# \tgradient_E += (1.0 / (1 + np.exp( y.item(i) * (X[i]*W_t).item(0) ) )) * (-1) * y.item(i) * X[i].transpose()\n\t\t# gradient_E /= 1000.0\n\t\tW_t -= Eta * gradient_E\n\tprint W_t\n\n\tf = open(\"hw3_test.dat\", \"r\")\n\tEout = 0.0\n\tfor line in f:\n\t\tline = line[1:-1].split(' ')\n\t\t#print line\n\t\tx = np.zeros(shape=(1, 21))\n\t\tx = np.asmatrix(x)\n\t\tx.itemset((0, 0), 1)\n\t\tfor i in range(20):\n\t\t\tx.itemset((0, i+1), float(line[i]))\n\t\tout = 1.0/(1+np.exp((-1)*(x*W_t).item(0)))\n\t\tif out <= 0.5:\n\t\t\tout = -1\n\t\telse:\n\t\t\tout = 1\n\t\tif out != int(line[20]):\n\t\t\tEout += 1\n\t\t# Eout += math.log(1 + np.exp( (-1) * float(line[20]) * (x*W_t).item(0) ))\n\t\t# if sign((x * W_t).item(0)) != float(line[20]):\n\t\t# \terror += 1.0\n\tf.close()\n\tprint Eout / 3000.0\n\n\nif __name__ == '__main__':\n\tmain()" } ]
27
shadowSQ/Python-lenrning
https://github.com/shadowSQ/Python-lenrning
a385327fee93ded417b659b58caeac0c08177a9f
4afd62013e64fbc501bac8dc571ae3aa7c9dc72d
b7f59f28d205b79d02848d83cb0f04e6ed062874
refs/heads/master
2020-03-14T09:37:09.545039
2018-05-02T03:11:24
2018-05-02T03:11:24
131,548,371
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5825027823448181, "alphanum_fraction": 0.5957918167114258, "avg_line_length": 17.079999923706055, "blob_id": "7f15c84d67a875c62b6d99c1baf10b818f200b6d", "content_id": "2c49118a14c36260c3871789e4df88a5b99b30c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 49, "num_lines": 50, "path": "/__iter__迭代对象创建练习.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "#__iter__练习\n'''\n如果一个类想被用于for ... in循环,\n类似list或tuple那样,\n就必须实现一个__iter__()方法,\n该方法返回一个迭代对象,\n然后,Python的for循环就会不断调用该迭代对象的__next__()方法拿到循环的下一个值,\n直到遇到StopIteration错误时退出循环。\n'''\n\nclass Fib(object):\n\tdef __init__(self):\n\t\tself.a,self.b = 0,1\t#初始化计数器\t\n\tdef __iter__(self):\n\t\treturn self\t\t\t#迭代对象为实例本身,返回自己\n\tdef __next__(self):\n\t\t#这个式子注意理解,是同时进行的\n\t\t#下一次相加的数,第一个是self.b \n\t\t#第二个是self.a+self.b\n\t\t#将他们分别给self.a \n\t\t#self.b\n\t\tself.a,self.b = self.b,self.a+self.b\n\t\tif self.a > 1000:\n\t\t\traise StopIteration()\n\t\treturn self.a\n\t#获取元素\t\n\tdef __getitem__(self,n):\n\t\tif isinstance(n,int):\n\t\t\ta,b = 1,1\n\t\t\tfor x in range(n):\n\t\t\t\ta,b = b,a+b\n\t\t\treturn a\t\n\t\tif isinstance(n,slice):\n\t\t\tstart = n.start\n\t\t\tstop = n.stop\n\t\t\tif start is None:\n\t\t\t\tstart = 0\n\t\t#\tif stop is None:\n\t\t#\t\tstop = 0\t\n\t\t\ta,b = 1,1\n\t\t\tL = []\n\t\t\tfor x in range(stop):\n\t\t\t\tif x>=start:\n\t\t\t\t\tL.append(a)\n\t\t\t\ta,b = b,a+b\n\t\t\treturn L\n\t\t\t\n\t\t\t\nfor n in Fib():\n\tprint(n)" }, { "alpha_fraction": 0.662162184715271, "alphanum_fraction": 0.7432432174682617, "avg_line_length": 17.5, "blob_id": "e03982af45a03598dbb8a14831d2fe48e4f72775", "content_id": "d7d0f306a5b201bfa6a3b1f96a2b103fe6e95a3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/functoolS练习.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "import functools\nint2 = functools.partial(int,base=3)\n\nprint(int2('121'))\n" }, { "alpha_fraction": 0.5059171319007874, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 21.53333282470703, "blob_id": "600e48cf5e8d896dd87280e73575f1381ebbb44b", "content_id": "7515248bc7b86817f59b074ed9fdad44b80ee1ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 50, "num_lines": 15, "path": "/Slice and 列表生成式.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "d = {'a': 1, 'b': 2, 'c': 3}\nfor key in d.values():\n print(key)\n\nprint([i*i for i in range(1,100) if i%2==0])\nprint([m+n for m in 'ABC' for n in 'abc'])\n\nimport os\nprint([d for d in os.listdir('.')])\n\nd = {'x':'a','b':'c'}\nprint([k+'='+v for k,v in d.items()])\n\nL=['HEllo',18,'GADA']\nprint([s.lower() for s in L if isinstance(s,str)])\n" }, { "alpha_fraction": 0.2647058963775635, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 21.66666603088379, "blob_id": "469b40539d0e278eef0ad982b93ffb1402c1889e", "content_id": "a9b43fa0052e45a1f1a314edf58ba6865a62e8bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 23, "num_lines": 3, "path": "/字符串格式化.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "print('%.2f' %32.12345)\nprint('%s' %32.12345)\nprint('%d' %32.12345)\n" }, { "alpha_fraction": 0.4150943458080292, "alphanum_fraction": 0.46037736535072327, "avg_line_length": 19.384614944458008, "blob_id": "588ea6ecd815fc5a66e695a1c41bddf8409545a7", "content_id": "fb538366c5ea863cdcea521a395d7f6f77dc43c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 265, "license_type": "no_license", "max_line_length": 58, "num_lines": 13, "path": "/杨辉三角-generator生成器.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "def triangles(max):\n n = 0\n L = [1]\n while n<max:\n yield L\n L = [1]+[L[i]+L[i+1] for i in range(len(L)-1)]+[1]\n n = n+1\n return 'done'\nfor n in triangles(5):\n print(n)\n \nL = [1,1]\nprint([L[i]+L[i+1] for i in range(len(L)-1)])\n" }, { "alpha_fraction": 0.5996240377426147, "alphanum_fraction": 0.6221804618835449, "avg_line_length": 25.350000381469727, "blob_id": "9cf5eb8733e67184b6ea3c158c074f294307aca1", "content_id": "750626b6d7ce8fda4a13672b79bf6f7b44c7187b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "no_license", "max_line_length": 56, "num_lines": 20, "path": "/关键字参数.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "def person(name,age,**kw):\n print('name:',name,'age',age,'other',kw)\n\nperson('li',45)\nperson('li',24,city='beijing',dizhi = 'quanhzou')\n\nextra = {'sex':'nan','interest':'basketball'}\n\nperson('li',22,**extra)\n\n#命令关键字\ndef person(name, age, *, city, job):\n print(name, age, city, job)\n\nperson('Jack', 24, city='Beijing', job='Engineer')\nperson('li',22,city = 'beijing',job = 'engineer')\n\ndef person(name, age, *args, city, job):\n print(name, age, args, city, job)\nperson('li',25,'args',city = 'beijing',job = 'engineer')\n \n" }, { "alpha_fraction": 0.5670102834701538, "alphanum_fraction": 0.6082473993301392, "avg_line_length": 19.85714340209961, "blob_id": "9305cbe404a93b54d96fbc0f5c53f2661ad450bd", "content_id": "ba05ec4533624a01bd50d9dfa8f0902a98c40682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 59, "num_lines": 14, "path": "/排序与key关键字的使用.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]\n\ndef byname(t):\n return t[0]\n\ndef byscore(t):\n return -t[1]\n#函数的作用是将L中的值一个一个传进来,\n#然后得到返回值\nprint(sorted(L,key = byname))\nprint(sorted(L,key = byscore))\n\nprint(sorted(L,key = lambda t:t[0]))\nprint(sorted(L,key = lambda t:t[1]))" }, { "alpha_fraction": 0.32710281014442444, "alphanum_fraction": 0.4672897160053253, "avg_line_length": 12.375, "blob_id": "3bcf7f4c651fd1d0bc6fecb7fc4c832fe11df2c9", "content_id": "bcb259b50daff4a70ad60b27ace6610c7e29d7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/dictionary和Set练习.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "dic = {'li':23,'wang':25}\ndic\ns1 = set([1,2,3,2])\ns2 = set(['a','b','c'])\n\nprint(s1|s2)\n\nlist1 = [2,2,'a']\n" }, { "alpha_fraction": 0.5299145579338074, "alphanum_fraction": 0.5811966061592102, "avg_line_length": 18, "blob_id": "a8ff4ed79b9275e9b7ad3ac0fd19b990a3c87605", "content_id": "f9375fa176f7d16ec093f59a9f21ce7380b02161", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/定义函数求解一元二次方程组.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "import math\n\ndef eryuanyici(a,b,c):\n\tx1 = -b + math.sqrt(b*b-4*a*c)\n\tx2 = -b - math.sqrt(b*b-4*a*c)\n\treturn x1,x2\n\t\n\t" }, { "alpha_fraction": 0.6676923036575317, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 14.428571701049805, "blob_id": "f6ffad951d8653c0d92594b5472394b64767ae6b", "content_id": "7c5e1a26599a3f386be0e4ca76dad7ef0db0a2e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 33, "num_lines": 21, "path": "/类的创建.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "#类的创建\n\n#_init_方法第一个参数永远是self,表示实例本身\nclass Student(object):\n\tdef __init__(self,name,score):\n\t\tself.name = name\n\t\tself.score = score\n\nbart = Student('Li',59)\nprint(bart.name)\n\t\t\n\ndef set_age(self,age):\n\tself.age = age\n\ns = Student()\t\n\t\nfrom types import MethodType\ns.set_age = MethodType(set_age,s)\ns.set_age(25)\nprint(s.age)\t\n" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 19.399999618530273, "blob_id": "800069892256fcb735bf32fbc0e8264cd7d1601b", "content_id": "20988eeab0314418f5c6bc6641591fb56dbe0f33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 55, "num_lines": 5, "path": "/文件IO.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "with open('c://Users/ASUS/Desktop/头文件模板.txt','r') as f:\n print(f.read())\n\nimport os\nprint(os.name)\n" }, { "alpha_fraction": 0.5281385183334351, "alphanum_fraction": 0.588744580745697, "avg_line_length": 13.375, "blob_id": "c9e9468e31cd48fb16f60538ac71706bdc4da3ff", "content_id": "2cbf6af64e8245a25651f48e29bdc805401366d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 25, "num_lines": 16, "path": "/可变参数函数定义.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "def cute(numbers):\n\ts = 0\n\tfor num in numbers:\t\n\t\ts = s+num*num\n\treturn s\n\t\ndef cute2(*numbers):\n\ts = 0\n\tfor num in numbers:\t\n\t\ts = s+num*num\n\treturn s\n\t\nlist1 = [2,3,4,5,2,1,2]\n\nprint(\"%d\"% cute(list1))\nprint(\"%s\"%cute2(*list1))\n\n" }, { "alpha_fraction": 0.3275362253189087, "alphanum_fraction": 0.40289855003356934, "avg_line_length": 23.64285659790039, "blob_id": "440bcbe96f3a7e2bfe8f74da7cfe41b198b1d932", "content_id": "a20e54738cec6c4f541caa585e31166fbee7d247", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 67, "num_lines": 14, "path": "/函数参数综合练习.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "#综合练习\ndef f1(a, b, c=0, *args,city, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)\n if 'est1' in kw:\n print('est')\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)\n\t\n#f1(1,2)\n#f1(1,2,c = 3)\n#f1(1,2,3,'a','b')\n#f1(1,2,3,'a','b',v=23)\nf1(1,2,city='beijing',est=2)\n" }, { "alpha_fraction": 0.42086753249168396, "alphanum_fraction": 0.44548651576042175, "avg_line_length": 14.60377311706543, "blob_id": "b58f01277d3695e310089410b0bfdadc7af56090", "content_id": "af01483dac177ac45dee855deddf132565b0f70b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "no_license", "max_line_length": 53, "num_lines": 53, "path": "/素数.py", "repo_name": "shadowSQ/Python-lenrning", "src_encoding": "UTF-8", "text": "\n#filter 函数练习\n\ndef _odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\n\ndef _not_divisible(n):\n return lambda x: x % n != 0\n\t\ndef primes():\n yield 2\n it = _odd_iter() # 初始序列\n while True:\n n = next(it) # 返回序列的第一个数\n yield n\n it = filter(_not_divisible(n), it) # 构造新序列\t\n\n# 打印1000以内的素数:\n#for n in primes():\n# if n < 1000:\n# print(n)\n# else:\n# break\t\t\n\n#回数\n\ndef _odd_iter1():\n n = 0\n while True:\n n = n + 1\n yield n\n\ndef is_palindrome(n):\n n = str(n)\n if n == n[::-1]:\n return True\n \ndef pri():\n yield 1\n it = _odd_iter1()\n while True:\n n = next(it)\n yield n\n it = filter(is_palindrome,it)#这句话要去掉括号(n)!!巨坑\n \nfor n in pri():\n if n < 100:\n print(n)\n else:\n break\t\t\n \n \n \n" } ]
14
plannapus/MfN-Code-Clinic
https://github.com/plannapus/MfN-Code-Clinic
cd2015c674700a0eca85ec05c1ae9d059668b177
cd9580be3d72688a7738393b373b8f6424e404d1
a7ff10104002b3bfdab2080c63362cf535f56eb2
refs/heads/master
2021-01-20T20:42:22.842689
2018-09-04T13:33:23
2018-09-04T13:33:23
60,703,632
0
2
null
2016-06-08T14:21:15
2016-11-02T14:24:34
2016-12-16T10:08:20
R
[ { "alpha_fraction": 0.7193763852119446, "alphanum_fraction": 0.7204899787902832, "avg_line_length": 26.212121963500977, "blob_id": "39f2e20ed747b2f6c50eb70c8b1e9a7e824e807b", "content_id": "187877650db6b6515a4eb6111c56ef9ba41aeb19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 898, "license_type": "no_license", "max_line_length": 139, "num_lines": 33, "path": "/Scripts/2017/Code 2017-11-22 - Makefile/Makefile", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "LOC_DIR = ./Data/ADP/LOC/Confirmed/\nLOCS = $(shell find $(LOC_DIR) -type f -name '*')\nOCC_FILE = ./Data/flipped\\ occurrences.csv\nCOUNT_FILE = ./Data/counts.csv\n\nall: paleocene.pdf\n\npaleocene.pdf: paleocene.bib paleocene.tex Figures/map1.pdf Figures/Cohort_grey.pdf Figures/count_data.eps Figures/new_div.pdf Plates/*.eps\n\tpdflatex paleocene\n\tbibtex paleocene\n\tpdflatex paleocene\n\tpdflatex paleocene\n\npaleocene.tex: Scripts/latexify_rangechart.r $(OCC_FILE)\n\tRscript $<\n\nFigures/Cohort_grey.pdf: Scripts/cohorts.R Data/cohorts.csv $(OCC_FILE)\n\tRscript $<\n\nFigures/count_data.eps: Scripts/plot_paleocene.R $(COUNT_FILE)\n\tRscript $<\n\nFigures/new_div.pdf: Scripts/paldiatdiv.R $(OCC_FILE)\n\tRscript $<\n\n$(OCC_FILE): Scripts/applyNewAges.R $(LOC_DIR) $(LOCS)\n\tRscript $<\n\n$(COUNT_FILE): Scripts/applyNewAges.R $(LOC_DIR) $(LOCS)\n\tRscript $<\n\nclean:\n\trm *.aux *.bbl *.blg *.log *.spl */*-converted-to.pdf\n" }, { "alpha_fraction": 0.6625766754150391, "alphanum_fraction": 0.6742331385612488, "avg_line_length": 43.08108139038086, "blob_id": "c0c8f68432791ecdc104087b4132303ffd8c7ee9", "content_id": "8c07b92c5c57745172952169eb8646db57d68366", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1630, "license_type": "no_license", "max_line_length": 131, "num_lines": 37, "path": "/Scripts/2016/Code 2016-06-09 Simulations/max_likelihood.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "max_likelihood <- function(res){\n\tlibrary(ape)\n\tn <- nrow(res$Params)\n\tall_trials <- res$Trials\n\tresult <- matrix(nrow=length(all_trials),ncol=n)\n\tfor(i in seq_along(all_trials)){\n\t\t#Making a phylogenetic tree out of each simulation\n\t\tA <- all_trials[[i]]\n\t\ttips <- A$taxa[!A$taxa%in%A$ancestor]\n\t\tnodes <- unique(A$ancestor[A$ancestor!=0])\n\t\tedges <- cbind(A$ancestor,A$taxa)[-1,]\n\t\tbrlen <- A$ext_time - A$orig_time\n\t\teco_tips <- A$eco_type[tips]\n\t\text_tips <- A$ext_time[tips]\n\t\torig_tips <- A$orig_time[tips]\n\t\teco_living <- eco_tips\n\t\teco_living[ext_tips!=max(ext_tips)] <- NA\n\t\teco_living <- eco_living[!is.na(eco_living)]\n\t\tTIPS <- seq_along(tips)\n\t\tNODES <- seq_along(nodes) + length(tips)\n\t\tEDGES <- cbind(NODES[match(edges[,1],nodes)],ifelse(edges[,2]%in%tips,TIPS[match(edges[,2],tips)],NODES[match(edges[,2],nodes)]))\n\t\ttree <- structure(list(edge=EDGES, tip.label=paste0(\"taxa_\",tips), Nnode=length(nodes)),class=\"phylo\")\n\t\ttree <- compute.brlen(tree,brlen[-1])\n\t\t#Ancestral Character Trait reconstruction based on living taxa\n\t\tif(all(eco_living==eco_living[1])){ #If all living taxa shares their ecology, no need to compute\n\t\t\tresult[i,eco_living[1]] <- 1\n\t\t\tresult[i,-eco_living[1]] <- 0\n\t\t}else{if(length(eco_living)>2){\n\t\t\t\tliving_tree <- drop.tip(tree,tree$tip.label[ext_tips!=max(ext_tips)]) #Chop dead branches\n\t\t\t\ta <- ace(factor(eco_living,1:n),living_tree,\"discrete\") #Maximum likelihood ancestral character reconstruction\n\t\t\t\tresult[i,] <- a$lik.anc[1,] #Keep only the score attributed to ecology 2 for the ancestral taxon\n\t\t\t}else{result[i,unique(eco_living)]<-0.5}}\n\t\tcat(i,\"\\r\")\n\t}\n\tcat(\"\\n\")\n\tresult\n}" }, { "alpha_fraction": 0.2781114876270294, "alphanum_fraction": 0.4996996521949768, "avg_line_length": 24.110105514526367, "blob_id": "eebfcac1b65e8a3e0a7c1f744dadb2cd4dec8983", "content_id": "28bc9669daec4b1ac4b385ba7c67d9dd6dc57666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 16648, "license_type": "no_license", "max_line_length": 121, "num_lines": 663, "path": "/Scripts/2016/Code 2016-08-03.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "#Basics on writing functions\n\n> add1 <- function(x){\n+ x+1\n+ }\n> add1(10)\n[1] 11\n> addN <- function(x, n){x+n}\n> addN(10, 5)\n[1] 15\n> addN <- function(x, ...){sum(x, ...)}\n> addN(10, 2, 3, 4, 5)\n[1] 24\n> addN(10, 2, 3, 4, NA, 5)\n[1] NA\n> addN(10, 2, 3, 4, NA, 5, na.rm=TRUE)\n[1] 24\n> addN <- function(x, ...)sum(x, ...)\n\n#Looking for a function definition:\n#See also: http://stackoverflow.com/questions/19226816/how-can-i-view-the-source-code-for-a-function for a complete guide\n\n> sum\nfunction (..., na.rm = FALSE) .Primitive(\"sum\")\n> mean\nfunction (x, ...) \nUseMethod(\"mean\")\n<bytecode: 0x7fb6f95853c8>\n<environment: namespace:base>\n> methods(mean)\n[1] mean.Date mean.default mean.difftime mean.POSIXct mean.POSIXlt \nsee '?methods' for accessing help and source code\n> mean.default\nfunction (x, trim = 0, na.rm = FALSE, ...) \n{\n if (!is.numeric(x) && !is.complex(x) && !is.logical(x)) {\n warning(\"argument is not numeric or logical: returning NA\")\n return(NA_real_)\n }\n if (na.rm) \n x <- x[!is.na(x)]\n if (!is.numeric(trim) || length(trim) != 1L) \n stop(\"'trim' must be numeric of length one\")\n n <- length(x)\n if (trim > 0 && n) {\n if (is.complex(x)) \n stop(\"trimmed means are not defined for complex data\")\n if (anyNA(x)) \n return(NA_real_)\n if (trim >= 0.5) \n return(stats::median(x, na.rm = FALSE))\n lo <- floor(n * trim) + 1\n hi <- n + 1 - lo\n x <- sort.int(x, partial = unique(c(lo, hi)))[lo:hi]\n }\n .Internal(mean(x))\n}\n<bytecode: 0x7fb6f95add50>\n<environment: namespace:base>\n> library(scales)\n> alpha\nfunction (colour, alpha = NA) \n{\n col <- grDevices::col2rgb(colour, TRUE)/255\n if (length(colour) != length(alpha)) {\n if (length(colour) > 1 && length(alpha) > 1) {\n stop(\"Only one of colour and alpha can be vectorised\")\n }\n if (length(colour) > 1) {\n alpha <- rep(alpha, length.out = length(colour))\n }\n else if (length(alpha) > 1) {\n col <- col[, rep(1, length(alpha)), drop = FALSE]\n }\n }\n alpha[is.na(alpha)] <- col[4, ][is.na(alpha)]\n new_col <- grDevices::rgb(col[1, ], col[2, ], col[3, ], alpha)\n new_col[is.na(colour)] <- NA\n new_col\n}\n<environment: namespace:scales>\n\n#Creating a new function based on an existing function (aka \"The Kobayashi-Maru\", dixit Olszewski)\n\n> alpha2 <- function (colour, alpha = NA, blue) \n+ {\n+ col <- grDevices::col2rgb(colour, TRUE)/255\n+ if (length(colour) != length(alpha)) {\n+ if (length(colour) > 1 && length(alpha) > 1) {\n+ stop(\"Only one of colour and alpha can be vectorised\")\n+ }\n+ if (length(colour) > 1) {\n+ alpha <- rep(alpha, length.out = length(colour))\n+ }\n+ else if (length(alpha) > 1) {\n+ col <- col[, rep(1, length(alpha)), drop = FALSE]\n+ }\n+ }\n+ alpha[is.na(alpha)] <- col[4, ][is.na(alpha)]\n+ new_col <- grDevices::rgb(col[1, ], col[2, ], blue, alpha)\n+ new_col[is.na(colour)] <- NA\n+ new_col\n+ }\n> alpha(\"red\", 0.3)\n[1] \"#FF00004D\"\n> alpha2(\"red\", 0.3, blue=0.5)\n[1] \"#FF00804D\"\n> plot(1:3, col=c(\"red\", alpha(\"red\",0.5), alpha2(\"red\", 0.5, 0.5)), pch=19, cex=3)\n> alpha2 <- function (colour, alpha = NA, blue = 0.5) #Adding a default value for an argument\n+ {\n+ col <- grDevices::col2rgb(colour, TRUE)/255\n+ if (length(colour) != length(alpha)) {\n+ if (length(colour) > 1 && length(alpha) > 1) {\n+ stop(\"Only one of colour and alpha can be vectorised\")\n+ }\n+ if (length(colour) > 1) {\n+ alpha <- rep(alpha, length.out = length(colour))\n+ }\n+ else if (length(alpha) > 1) {\n+ col <- col[, rep(1, length(alpha)), drop = FALSE]\n+ }\n+ }\n+ alpha[is.na(alpha)] <- col[4, ][is.na(alpha)]\n+ new_col <- grDevices::rgb(col[1, ], col[2, ], blue, alpha)\n+ new_col[is.na(colour)] <- NA\n+ new_col\n+ }\n> plot(1:3, col=c(\"red\", alpha(\"red\",0.5), alpha2(\"red\", 0.5)), pch=19, cex=3)\n> plot(1:3, col=c(\"red\", alpha(\"red\",0.5), alpha2(\"red\", 0.5, 0.8)), pch=19, cex=3)\n\n#Tapply, lapply etc.\n> tapply(1:10, cut(1:10, c(0,5,10)), mean)\n (0,5] (5,10] \n 3 8 \n> tapply(1:10, cut(1:10, c(0,5,10)), sum)\n (0,5] (5,10] \n 15 40 \n> cut(1:10, c(0,5,10))\n [1] (0,5] (0,5] (0,5] (0,5] (0,5] (5,10] (5,10] (5,10] (5,10] (5,10]\nLevels: (0,5] (5,10]\n> tapply(c(1:10,NA), cut(c(1:10,NA), c(0,5,10)), function(x){sum(x+3)})\n (0,5] (5,10] \n 30 55 \n> a=1:10\n> b=5:1\n> tapply(c(1:10,NA), cut(c(1:10,NA), c(0,5,10)), function(x){cor(x,b)})\n (0,5] (5,10] \n -1 -1 \n\n> outer(matrix(1:4,nrow=2),matrix(5:8, nrow=2) , \"*\")\n, , 1, 1\n\n [,1] [,2]\n[1,] 5 15\n[2,] 10 20\n\n, , 2, 1\n\n [,1] [,2]\n[1,] 6 18\n[2,] 12 24\n\n, , 1, 2\n\n [,1] [,2]\n[1,] 7 21\n[2,] 14 28\n\n, , 2, 2\n\n [,1] [,2]\n[1,] 8 24\n[2,] 16 32\n\n> sapply(1:10, function(x)sapply(1:4, function(y) x*y))\n [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]\n[1,] 1 2 3 4 5 6 7 8 9 10\n[2,] 2 4 6 8 10 12 14 16 18 20\n[3,] 3 6 9 12 15 18 21 24 27 30\n[4,] 4 8 12 16 20 24 28 32 36 40\n> lapply(1:10, function(x)sapply(1:4, function(y) x*y))\n[[1]]\n[1] 1 2 3 4\n\n[[2]]\n[1] 2 4 6 8\n\n[[3]]\n[1] 3 6 9 12\n\n[[4]]\n[1] 4 8 12 16\n\n[[5]]\n[1] 5 10 15 20\n\n[[6]]\n[1] 6 12 18 24\n\n[[7]]\n[1] 7 14 21 28\n\n[[8]]\n[1] 8 16 24 32\n\n[[9]]\n[1] 9 18 27 36\n\n[[10]]\n[1] 10 20 30 40\n\n> lapply(1:10, function(x)lapply(1:4, function(y) x*y))\n[[1]]\n[[1]][[1]]\n[1] 1\n\n[[1]][[2]]\n[1] 2\n\n[[1]][[3]]\n[1] 3\n\n[[1]][[4]]\n[1] 4\n\n\n[[2]]\n[[2]][[1]]\n[1] 2\n\n[[2]][[2]]\n[1] 4\n\n[[2]][[3]]\n[1] 6\n\n[[2]][[4]]\n[1] 8\n\n\n[[3]]\n[[3]][[1]]\n[1] 3\n\n[[3]][[2]]\n[1] 6\n\n[[3]][[3]]\n[1] 9\n\n[[3]][[4]]\n[1] 12\n\n\n[[4]]\n[[4]][[1]]\n[1] 4\n\n[[4]][[2]]\n[1] 8\n\n[[4]][[3]]\n[1] 12\n\n[[4]][[4]]\n[1] 16\n\n\n[[5]]\n[[5]][[1]]\n[1] 5\n\n[[5]][[2]]\n[1] 10\n\n[[5]][[3]]\n[1] 15\n\n[[5]][[4]]\n[1] 20\n\n\n[[6]]\n[[6]][[1]]\n[1] 6\n\n[[6]][[2]]\n[1] 12\n\n[[6]][[3]]\n[1] 18\n\n[[6]][[4]]\n[1] 24\n\n\n[[7]]\n[[7]][[1]]\n[1] 7\n\n[[7]][[2]]\n[1] 14\n\n[[7]][[3]]\n[1] 21\n\n[[7]][[4]]\n[1] 28\n\n\n[[8]]\n[[8]][[1]]\n[1] 8\n\n[[8]][[2]]\n[1] 16\n\n[[8]][[3]]\n[1] 24\n\n[[8]][[4]]\n[1] 32\n\n\n[[9]]\n[[9]][[1]]\n[1] 9\n\n[[9]][[2]]\n[1] 18\n\n[[9]][[3]]\n[1] 27\n\n[[9]][[4]]\n[1] 36\n\n\n[[10]]\n[[10]][[1]]\n[1] 10\n\n[[10]][[2]]\n[1] 20\n\n[[10]][[3]]\n[1] 30\n\n[[10]][[4]]\n[1] 40\n\n\n> array(1:4, c(2,2))\n [,1] [,2]\n[1,] 1 3\n[2,] 2 4\n> apply(array(1:4, c(2,2)), 1, sum)\n[1] 4 6\n> apply(array(1:4, c(2,2)), 2, sum)\n[1] 3 7\n\n> apply(array(1:8, c(2,2,2)), 3, sum)\n[1] 10 26\n> apply(array(1:8, c(2,2,2)), c(1,2), sum)\n [,1] [,2]\n[1,] 6 10\n[2,] 8 12\n\n> data(mtcars)\n> head(mtcars)\n mpg cyl disp hp drat wt qsec vs am gear carb\nMazda RX4 21.0 6 160 110 3.90 2.620 16.46 0 1 4 4\nMazda RX4 Wag 21.0 6 160 110 3.90 2.875 17.02 0 1 4 4\nDatsun 710 22.8 4 108 93 3.85 2.320 18.61 1 1 4 1\nHornet 4 Drive 21.4 6 258 110 3.08 3.215 19.44 1 0 3 1\nHornet Sportabout 18.7 8 360 175 3.15 3.440 17.02 0 0 3 2\nValiant 18.1 6 225 105 2.76 3.460 20.22 1 0 3 1\n> split(mtcars, mtcars$gear)\n$`3`\n mpg cyl disp hp drat wt qsec vs am gear carb\nHornet 4 Drive 21.4 6 258.0 110 3.08 3.215 19.44 1 0 3 1\nHornet Sportabout 18.7 8 360.0 175 3.15 3.440 17.02 0 0 3 2\nValiant 18.1 6 225.0 105 2.76 3.460 20.22 1 0 3 1\nDuster 360 14.3 8 360.0 245 3.21 3.570 15.84 0 0 3 4\nMerc 450SE 16.4 8 275.8 180 3.07 4.070 17.40 0 0 3 3\nMerc 450SL 17.3 8 275.8 180 3.07 3.730 17.60 0 0 3 3\nMerc 450SLC 15.2 8 275.8 180 3.07 3.780 18.00 0 0 3 3\nCadillac Fleetwood 10.4 8 472.0 205 2.93 5.250 17.98 0 0 3 4\nLincoln Continental 10.4 8 460.0 215 3.00 5.424 17.82 0 0 3 4\nChrysler Imperial 14.7 8 440.0 230 3.23 5.345 17.42 0 0 3 4\nToyota Corona 21.5 4 120.1 97 3.70 2.465 20.01 1 0 3 1\nDodge Challenger 15.5 8 318.0 150 2.76 3.520 16.87 0 0 3 2\nAMC Javelin 15.2 8 304.0 150 3.15 3.435 17.30 0 0 3 2\nCamaro Z28 13.3 8 350.0 245 3.73 3.840 15.41 0 0 3 4\nPontiac Firebird 19.2 8 400.0 175 3.08 3.845 17.05 0 0 3 2\n\n$`4`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMazda RX4 21.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4\nMazda RX4 Wag 21.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4\nDatsun 710 22.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1\nMerc 240D 24.4 4 146.7 62 3.69 3.190 20.00 1 0 4 2\nMerc 230 22.8 4 140.8 95 3.92 3.150 22.90 1 0 4 2\nMerc 280 19.2 6 167.6 123 3.92 3.440 18.30 1 0 4 4\nMerc 280C 17.8 6 167.6 123 3.92 3.440 18.90 1 0 4 4\nFiat 128 32.4 4 78.7 66 4.08 2.200 19.47 1 1 4 1\nHonda Civic 30.4 4 75.7 52 4.93 1.615 18.52 1 1 4 2\nToyota Corolla 33.9 4 71.1 65 4.22 1.835 19.90 1 1 4 1\nFiat X1-9 27.3 4 79.0 66 4.08 1.935 18.90 1 1 4 1\nVolvo 142E 21.4 4 121.0 109 4.11 2.780 18.60 1 1 4 2\n\n$`5`\n mpg cyl disp hp drat wt qsec vs am gear carb\nPorsche 914-2 26.0 4 120.3 91 4.43 2.140 16.7 0 1 5 2\nLotus Europa 30.4 4 95.1 113 3.77 1.513 16.9 1 1 5 2\nFord Pantera L 15.8 8 351.0 264 4.22 3.170 14.5 0 1 5 4\nFerrari Dino 19.7 6 145.0 175 3.62 2.770 15.5 0 1 5 6\nMaserati Bora 15.0 8 301.0 335 3.54 3.570 14.6 0 1 5 8\n\n> lapply(split(mtcars, mtcars$gear), function(x) cor(x$mpg, x$disp))\n$`3`\n[1] -0.7249926\n\n$`4`\n[1] -0.9011593\n\n$`5`\n[1] -0.8806232\n\n\n> lapply(split(mtcars, mtcars$gear), function(x) split(x, x$carb))\n$`3`\n$`3`$`1`\n mpg cyl disp hp drat wt qsec vs am gear carb\nHornet 4 Drive 21.4 6 258.0 110 3.08 3.215 19.44 1 0 3 1\nValiant 18.1 6 225.0 105 2.76 3.460 20.22 1 0 3 1\nToyota Corona 21.5 4 120.1 97 3.70 2.465 20.01 1 0 3 1\n\n$`3`$`2`\n mpg cyl disp hp drat wt qsec vs am gear carb\nHornet Sportabout 18.7 8 360 175 3.15 3.440 17.02 0 0 3 2\nDodge Challenger 15.5 8 318 150 2.76 3.520 16.87 0 0 3 2\nAMC Javelin 15.2 8 304 150 3.15 3.435 17.30 0 0 3 2\nPontiac Firebird 19.2 8 400 175 3.08 3.845 17.05 0 0 3 2\n\n$`3`$`3`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMerc 450SE 16.4 8 275.8 180 3.07 4.07 17.4 0 0 3 3\nMerc 450SL 17.3 8 275.8 180 3.07 3.73 17.6 0 0 3 3\nMerc 450SLC 15.2 8 275.8 180 3.07 3.78 18.0 0 0 3 3\n\n$`3`$`4`\n mpg cyl disp hp drat wt qsec vs am gear carb\nDuster 360 14.3 8 360 245 3.21 3.570 15.84 0 0 3 4\nCadillac Fleetwood 10.4 8 472 205 2.93 5.250 17.98 0 0 3 4\nLincoln Continental 10.4 8 460 215 3.00 5.424 17.82 0 0 3 4\nChrysler Imperial 14.7 8 440 230 3.23 5.345 17.42 0 0 3 4\nCamaro Z28 13.3 8 350 245 3.73 3.840 15.41 0 0 3 4\n\n\n$`4`\n$`4`$`1`\n mpg cyl disp hp drat wt qsec vs am gear carb\nDatsun 710 22.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1\nFiat 128 32.4 4 78.7 66 4.08 2.200 19.47 1 1 4 1\nToyota Corolla 33.9 4 71.1 65 4.22 1.835 19.90 1 1 4 1\nFiat X1-9 27.3 4 79.0 66 4.08 1.935 18.90 1 1 4 1\n\n$`4`$`2`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMerc 240D 24.4 4 146.7 62 3.69 3.190 20.00 1 0 4 2\nMerc 230 22.8 4 140.8 95 3.92 3.150 22.90 1 0 4 2\nHonda Civic 30.4 4 75.7 52 4.93 1.615 18.52 1 1 4 2\nVolvo 142E 21.4 4 121.0 109 4.11 2.780 18.60 1 1 4 2\n\n$`4`$`4`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMazda RX4 21.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4\nMazda RX4 Wag 21.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4\nMerc 280 19.2 6 167.6 123 3.92 3.440 18.30 1 0 4 4\nMerc 280C 17.8 6 167.6 123 3.92 3.440 18.90 1 0 4 4\n\n\n$`5`\n$`5`$`2`\n mpg cyl disp hp drat wt qsec vs am gear carb\nPorsche 914-2 26.0 4 120.3 91 4.43 2.140 16.7 0 1 5 2\nLotus Europa 30.4 4 95.1 113 3.77 1.513 16.9 1 1 5 2\n\n$`5`$`4`\n mpg cyl disp hp drat wt qsec vs am gear carb\nFord Pantera L 15.8 8 351 264 4.22 3.17 14.5 0 1 5 4\n\n$`5`$`6`\n mpg cyl disp hp drat wt qsec vs am gear carb\nFerrari Dino 19.7 6 145 175 3.62 2.77 15.5 0 1 5 6\n\n$`5`$`8`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMaserati Bora 15 8 301 335 3.54 3.57 14.6 0 1 5 8\n\n\n> L <-lapply(split(mtcars, mtcars$gear), function(x) split(x, x$carb))\n> M <-list(); for(i in seq_along(L)) M <- c(M, L[[i]])\n> M\n$`1`\n mpg cyl disp hp drat wt qsec vs am gear carb\nHornet 4 Drive 21.4 6 258.0 110 3.08 3.215 19.44 1 0 3 1\nValiant 18.1 6 225.0 105 2.76 3.460 20.22 1 0 3 1\nToyota Corona 21.5 4 120.1 97 3.70 2.465 20.01 1 0 3 1\n\n$`2`\n mpg cyl disp hp drat wt qsec vs am gear carb\nHornet Sportabout 18.7 8 360 175 3.15 3.440 17.02 0 0 3 2\nDodge Challenger 15.5 8 318 150 2.76 3.520 16.87 0 0 3 2\nAMC Javelin 15.2 8 304 150 3.15 3.435 17.30 0 0 3 2\nPontiac Firebird 19.2 8 400 175 3.08 3.845 17.05 0 0 3 2\n\n$`3`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMerc 450SE 16.4 8 275.8 180 3.07 4.07 17.4 0 0 3 3\nMerc 450SL 17.3 8 275.8 180 3.07 3.73 17.6 0 0 3 3\nMerc 450SLC 15.2 8 275.8 180 3.07 3.78 18.0 0 0 3 3\n\n$`4`\n mpg cyl disp hp drat wt qsec vs am gear carb\nDuster 360 14.3 8 360 245 3.21 3.570 15.84 0 0 3 4\nCadillac Fleetwood 10.4 8 472 205 2.93 5.250 17.98 0 0 3 4\nLincoln Continental 10.4 8 460 215 3.00 5.424 17.82 0 0 3 4\nChrysler Imperial 14.7 8 440 230 3.23 5.345 17.42 0 0 3 4\nCamaro Z28 13.3 8 350 245 3.73 3.840 15.41 0 0 3 4\n\n$`1`\n mpg cyl disp hp drat wt qsec vs am gear carb\nDatsun 710 22.8 4 108.0 93 3.85 2.320 18.61 1 1 4 1\nFiat 128 32.4 4 78.7 66 4.08 2.200 19.47 1 1 4 1\nToyota Corolla 33.9 4 71.1 65 4.22 1.835 19.90 1 1 4 1\nFiat X1-9 27.3 4 79.0 66 4.08 1.935 18.90 1 1 4 1\n\n$`2`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMerc 240D 24.4 4 146.7 62 3.69 3.190 20.00 1 0 4 2\nMerc 230 22.8 4 140.8 95 3.92 3.150 22.90 1 0 4 2\nHonda Civic 30.4 4 75.7 52 4.93 1.615 18.52 1 1 4 2\nVolvo 142E 21.4 4 121.0 109 4.11 2.780 18.60 1 1 4 2\n\n$`4`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMazda RX4 21.0 6 160.0 110 3.90 2.620 16.46 0 1 4 4\nMazda RX4 Wag 21.0 6 160.0 110 3.90 2.875 17.02 0 1 4 4\nMerc 280 19.2 6 167.6 123 3.92 3.440 18.30 1 0 4 4\nMerc 280C 17.8 6 167.6 123 3.92 3.440 18.90 1 0 4 4\n\n$`2`\n mpg cyl disp hp drat wt qsec vs am gear carb\nPorsche 914-2 26.0 4 120.3 91 4.43 2.140 16.7 0 1 5 2\nLotus Europa 30.4 4 95.1 113 3.77 1.513 16.9 1 1 5 2\n\n$`4`\n mpg cyl disp hp drat wt qsec vs am gear carb\nFord Pantera L 15.8 8 351 264 4.22 3.17 14.5 0 1 5 4\n\n$`6`\n mpg cyl disp hp drat wt qsec vs am gear carb\nFerrari Dino 19.7 6 145 175 3.62 2.77 15.5 0 1 5 6\n\n$`8`\n mpg cyl disp hp drat wt qsec vs am gear carb\nMaserati Bora 15 8 301 335 3.54 3.57 14.6 0 1 5 8\n\n> lapply(M, function(x)cor(x$mpg, x$disp))\n$`1`\n[1] -0.3128988\n\n$`2`\n[1] 0.9556646\n\n$`3`\n[1] NA\n\n$`4`\n[1] -0.6492474\n\n$`1`\n[1] -0.8963193\n\n$`2`\n[1] -0.7940624\n\n$`4`\n[1] -0.92976\n\n$`2`\n[1] -1\n\n$`4`\n[1] NA\n\n$`6`\n[1] NA\n\n$`8`\n[1] NA\n\nWarning message:\nIn cor(x$mpg, x$disp) : the standard deviation is zero\n\n#####Edit 5th of August:\n#Or more straight-forward:\n> lapply(split(mtcars, interaction(mtcars$carb, mtcars$gear)),function(x)cor(x$disp,x$mpg))\n$`1.3`\n[1] -0.3128988\n\n$`2.3`\n[1] 0.9556646\n\n$`3.3`\n[1] NA\n\n$`4.3`\n[1] -0.6492474\n\n$`6.3`\n[1] NA\n\n$`8.3`\n[1] NA\n\n$`1.4`\n[1] -0.8963193\n\n$`2.4`\n[1] -0.7940624\n\n$`3.4`\n[1] NA\n\n$`4.4`\n[1] -0.92976\n\n$`6.4`\n[1] NA\n\n$`8.4`\n[1] NA\n\n$`1.5`\n[1] NA\n\n$`2.5`\n[1] -1\n\n$`3.5`\n[1] NA\n\n$`4.5`\n[1] NA\n\n$`6.5`\n[1] NA\n\n$`8.5`\n[1] NA\n" }, { "alpha_fraction": 0.7904360294342041, "alphanum_fraction": 0.8045006990432739, "avg_line_length": 78.11111450195312, "blob_id": "bb3a6d2521d14faa4a4f425999becaa8536c45b2", "content_id": "c2a4f94fd4edded8ab80d0025d0c8629bb1c4174", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 711, "license_type": "no_license", "max_line_length": 497, "num_lines": 9, "path": "/Protokolle/2016/Protokoll 2016-05-04.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "10th meeting - 4th May 2016\n----\n\nAttendance: 7\n\nSebastian explained his issue which was that he need to perform a specific operation in various files scattered across various folders. I showed how to use function \"dir\" i. e. dir(pattern=\"pattern.txt\", recursive=TRUE) to search recursively throughout the folders which files followed a specific pattern. Then with the vector of file names, extract a specific pattern (here a species name) from them using functions strsplit and gsub. The occasion to introduce the concept of regular expressions.\nWe also naturally discuss about the alternative of dealing with all that information using a database instead of an arborescence of folders.\n\nNext meeting the 19th." }, { "alpha_fraction": 0.7621738910675049, "alphanum_fraction": 0.7765217423439026, "avg_line_length": 73.19355010986328, "blob_id": "4557b1a5d28c82e69b65d1d3cf5e50cd620e9894", "content_id": "c2a8e7790ca7bfdd2b0b4308ef9762c3cecd99b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2300, "license_type": "no_license", "max_line_length": 423, "num_lines": 31, "path": "/Protokolle/2015/Protokoll 2015-11-24.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "1st meeting - 24 November 2015\n----\nAttendance: 11\n\n1. Introductory remark by two of the initiators of the club, stating its primary purposes: providing help with programming (debugging, but also help with basic implementation, tips on specific aspect of programming) and brainstorming algorithms and more generally methodologies.\n\n2. Presentation of attendants. 11 attendants with the following breakdowns:\n\t- Programming Language:\n\t\t* R \t\t10 (inc. 5 advanced users)\n\t\t* SQL\t \t3\n\t\t* Python\t2\n\t\t* LaTeX\t2\n\t\t* Matlab\t1\n\t\t* VBA\t\t1\n\t- Forschungsbereich:\n\t\t* FB1\t\t8\n\t\t* FB3\t\t3\n\t- 5 out of 11 attendants were doctoral students.\nInterests of the group covered a wide variety of subjects: geometric morphometry, phylogenetic analyses, data managing, data mining, spatial analysis, ...\n\n3. Were mentioned by various attendants: the possibility for the club to help creating collaborations between FB1 and FB3; the possibility for the club to provide help for people taking online programming courses (such as coursera).\n\n4. Several attendants came with programming issues they needed help to resolve. The group then splitted in several groups trying to resolve said issues:\n\t- how to control graphical properties (width, height, aspect ratio, resolution, etc.) of a pdf generated using R's ggplot;\n\t- how to achieve finer control on / how to interpret the result of a PCA resulting from semi-landmarks-based geometric morphometry;\n\t- how to transform the result of a spatial interpolation using R package akima into a shapefile.\n\n5. Was then discussed the structure of the future meetings: \n\t- the club will be held twice a month (next meeting's date is undetermined yet but probably in the first or second week of december).\n\t- in a way similar to the Evolutionary Biology Journal Club, every meeting one of us could propose a subject to be discussed at the start of the meeting (i. e. with subjects ranging from giving \"tips on how to control finely graphical output using R\" to \"version control of collaborative programming projects using git\", ...). A projector will be brought along, on future meeting, to allow such \"crash courses\" to be held.\n\t- debugging sensu stricto will be kept for the end of the session, and dealt as a group, so that everyone can weight in and learn from the original mistake.\n" }, { "alpha_fraction": 0.7571428418159485, "alphanum_fraction": 0.7839285731315613, "avg_line_length": 55, "blob_id": "27e2403e02013d89085c871597dd5fafaccf81e9", "content_id": "7d0eec9d30080c7c23d7ee2afb31683aa9fb273a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 560, "license_type": "no_license", "max_line_length": 360, "num_lines": 10, "path": "/Protokolle/2017/Protokoll 2017-08-30.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "30th Meeting - 30th of August 2017\n----\nAttendance: 6\n\nWe discussed about method to estimate missing values in order to be able to input the maximum number of specimens/variables in a PCA (for Antoine's mesosaurs). Inferring values based on single-bones models, or iterative imputation based on the PCA run (as in PaST; Ilin & Raiko 2010) were discussed. Error propagation in the PCA as a result was also discussed.\n\nLink to data imputation article https://www.r-bloggers.com/imputing-missing-data-with-r-mice-package/\n\n\nNext meeting on the 13th of September.\n" }, { "alpha_fraction": 0.6381179094314575, "alphanum_fraction": 0.6817340850830078, "avg_line_length": 33.090091705322266, "blob_id": "3cad8738f398feee4c202640b60ead2db495c563", "content_id": "9d7246a17c75923d2128039cdbf166e6fa6153a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3783, "license_type": "no_license", "max_line_length": 140, "num_lines": 111, "path": "/Scripts/2016/Code 2016-03-03 R GIS flash course.r", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "# Barry 'Spacedman' Rowlingson CheatSheet:\n# http://www.maths.lancs.ac.uk/~rowlings/Teaching/UseR2012/cheatsheet.html\n#\n# GIS in R: overview of many fancy tools\n# http://pakillo.github.io/R-GIS-tutorial/\n#\n# Natural Earth: depository of free geographic maps (physical + human geography) as shapefiles\n# http://www.naturalearthdata.com/\n#\n# GADM: spatial data for precise human geography (all administrative levels)\n# http://www.gadm.org/\n#\n# GPlates: software for paleogeographic reconstructions\n# http://www.gplates.org/\n#\n\n\n################### Shapefiles ###########################\nlibrary(rgdal)\n\ndownload.file(\"http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/physical/ne_110m_coastline.zip\",\"coastline.zip\")\nunzip(\"coastline.zip\")\ncoast <- readOGR(dsn=\".\",layer=\"ne_110m_coastline\")\n#coast <- readShapeSpatial(\"ne_110m_coastline.shp\")\nplot(coast)\ncoast@data\ncoast@bbox\ncoast@proj4string\nplot(coast, col=1:10)\nplot(coast[50,],lwd=4,add=TRUE, col=\"red\")\n\ndownload.file(\"http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/physical/ne_110m_land.zip\",\"land.zip\")\nunzip(\"land.zip\")\nland <- readOGR(dsn=\".\",layer=\"ne_110m_land\")\nplot(land, col=1:10)\n\n# Creating a simple shapefile\nhome <- data.frame(Longitude=1.1447, Latitude=44.8411, Value=\"My Hometown\", Another_Value=24)\nhome <- SpatialPointsDataFrame(home[,1:2], data=home)\nplot(coast)\nplot(home, add=TRUE, pch=19, col=\"red\")\n\nwriteOGR(home, dsn=\".\", layer=\"home\", driver=\"ESRI Shapefile\")\n\nproj4string(home) <- CRS(\"+proj=longlat\")\nwriteOGR(home, dsn=\"home.kml\", layer=\"Me\", driver=\"KML\")\nreadOGR(dsn=\"home.kml\",layer=\"Me\")\n\n# Checking which formats are available\nogrDrivers()\ngdalDrivers()\n\n# Creating a more complex shapefile\nlon <- seq(-180,180,by=1)\nlat <- rep(-30,length=length(lon))\nab <- cbind(lon,lat)\nAB <- Line(ab)\nlAB <- Lines(list(AB), ID=\"AB\")\nlAB <- SpatialLines(list(lAB), proj4string=CRS(\"+proj=longlat\"))\nplot(coast)\nplot(lAB, add=TRUE, col=\"red\", lwd=2)\n\nplot(coast)\nlines(ab, col=\"red\", lwd=2)\n\nlAB_laea <- spTransform(lAB,CRS(\"+proj=laea +lat_0=-90 +lon_0=0\"))\ncoast_laea <- spTransform(coast,CRS(\"+proj=laea +lat_0=-90 +lon_0=0\"))\nplot(coast_laea)\nplot(lAB_laea,add=TRUE,col=\"red\",lwd=2)\n\nquartz(height=4, width=4)\npar(mar=c(0,0,0,0))\nplot(lAB_laea, col=\"red\", lwd=2,xaxs=\"i\",yaxs=\"i\")\nplot(coast_laea,add=TRUE)\n\n################### Rasters ################################\nlibrary(raster)\ndownload.file(\"http://www.naturalearthdata.com/http//www.naturalearthdata.com/download/50m/raster/NE1_50M_SR.zip\",\"raster.zip\")\nunzip(\"raster.zip\")\nworld <- raster(\"NE1_50M_SR/NE1_50M_SR.tif\")\nplot(world)\nwriteRaster(world, \"world.grd\")\n\n################### NetCDF ################################\nlibrary(ncdf4)\n\nyest <- Sys.Date()-1\nurl <- \"ftp://podaac-ftp.jpl.nasa.gov/allData/ghrsst/data/L4/GLOB/UKMO/OSTIA/2016/062/20160302-UKMO-L4HRfnd-GLOB-v01-fv02-OSTIA.nc.bz2\"\ndownload.file(url,destfile=\"sst.bz2\")\n#system(sprintf(\"bunzip2 %s/sst.bz2\", getwd()))\n\nsst <- nc_open(\"sst\")\nan_sst <- ncvar_get(sst, \"analysed_sst\")\nlon <- ncvar_get(sst, \"lon\")\nlat <- ncvar_get(sst, \"lat\")\nnc_close(sst)\n\nlower_res <- an_sst[seq(1,nrow(an_sst),by=10),seq(1,ncol(an_sst),by=10)]\nlon <- lon[seq(1,nrow(an_sst),by=10)]\nlat <- lat[seq(1,ncol(an_sst),by=10)]\ncelsius <- lower_res - 273.15\n\npar(mar=c(0,0,0,0))\nbreaks <- c(-100, seq(-20,40,by=2), 100)\nimage(x=lon, y=lat, z=celsius, col=rev(heat.colors(length(breaks)-1)), breaks=breaks, xlim=c(-180,180), ylim=c(-90,90))\n\nsst_list <- list(x=lon, y=lat, z=celsius) #As a list\nsst_grd <- image2Grid(sst_list) #As a SpatialGridDataFrame\nwriteGDAL(sst_grd, \"sst.tif\", drivername=\"GTiff\")\nsst_shp <- as(sst_grd,\"SpatialPolygonsDataFrame\") #As a SpatialPolygonsDataFrame\nwriteOGR(sst_shp, dsn=\".\", layer=\"sst\", driver=\"ESRI Shapefile\")" }, { "alpha_fraction": 0.5876288414001465, "alphanum_fraction": 0.7525773048400879, "avg_line_length": 96, "blob_id": "7bdc620220d4d41844e929007a26adec625e05ee", "content_id": "2be703bb78d2f76adbfbcb8e3173a2d94f458132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 97, "license_type": "no_license", "max_line_length": 96, "num_lines": 1, "path": "/Scripts/2017/Code 2017-07-12/README.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "This code uses the supplementary files of Rojas et al 2017 (http://dx.doi.org/10.1130/G38944.1).\n" }, { "alpha_fraction": 0.6897810101509094, "alphanum_fraction": 0.7810218930244446, "avg_line_length": 44.5, "blob_id": "f37de849f7ce983cfd343aa65dd2fd50eecbb3fd", "content_id": "8c3861a0f54fdf5f8863ba1f4ce04921d69d5eab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 274, "license_type": "no_license", "max_line_length": 121, "num_lines": 6, "path": "/Protokolle/2017/Protokoll 2017-03-15.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "22nd Meeting - 15th of March 2017\n----\nAttendance: 5\n\nWe talked about the Hedman algorithm (Hedman 2010; Lloyd et al 2016b) as another method for time\ncalibrating phylogenies and compare this method to the methods Neil discussed previously at the Code Clinik (2016-05-18). \n" }, { "alpha_fraction": 0.7647058963775635, "alphanum_fraction": 0.7809954881668091, "avg_line_length": 91.08333587646484, "blob_id": "197645644f72867cb6c90c62a04fd356ca451b61", "content_id": "6f74d08626c39622dad057ae27dfffedaa3e463a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1105, "license_type": "no_license", "max_line_length": 343, "num_lines": 12, "path": "/Protokolle/2016/Protokoll 2016-12-07.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "17th meeting - 7th of December 2016\n----\nAttendance: 6\n\nWe continued discussing about git. Sascha & Falk explained to us how branching, forking & merging works, i. e. the collaborative aspect of git. We then looked at some scripts in R and Python connecting and transacting with SQL databases. Falk brought up the subject of object-oriented programming (in Python naturally, but also in R using R6).\n\nAs it was the 1st birthday of the Code Clinic, I wanted to brought up a few questions regarding the club and the organization of the meetings but as few of the regular attendants were here we didn't actually discuss them. Here are some of them, for future references:\n- We need to organize a system so that Code Clinic can happen even if I'm not able to participate (which is not the case right now and explain why we had only 17 meetings in a year).\n- Is 14:30 on Wednesday the best timeslot for the meeting?\n- Is a fortnightly meeting a good rhythm? (we could have weekly, shorter meetings for instance; or monthly longer meetings)\n\nNext meeting will be the 11th of January after journal club (again).\n" }, { "alpha_fraction": 0.755667507648468, "alphanum_fraction": 0.7808564305305481, "avg_line_length": 55.71428680419922, "blob_id": "d76e0e4e0a1742f28c71b15f7731905a31a9f9f3", "content_id": "ade9eda2cff83c76e781ce8c1939b0df599bbfe6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 397, "license_type": "no_license", "max_line_length": 305, "num_lines": 7, "path": "/Protokolle/2017/Protokoll 2017-08-09.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "29th Meeting - 9th of August 2017\n----\nAttendance: 5\n\nAs promised I gave a crash course on how PCA and other classic ordination methods are implemented in R. I also showed an example of the use of CCA by showing the paleobotanical/climatological concept of CLAMP analysis (and my new version of it). We also tried to apply all that to Antoine's mesosaur data.\n\nNext meeting on the 23rd of August.\n" }, { "alpha_fraction": 0.614513635635376, "alphanum_fraction": 0.6531137228012085, "avg_line_length": 30.852458953857422, "blob_id": "a7b912291fb8536c12fba0b4095d89e79e3ff011", "content_id": "541d7341b3439ec6fa95c35f8ec886c693d2647b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1943, "license_type": "no_license", "max_line_length": 135, "num_lines": 61, "path": "/Scripts/2017/Code 2017-07-12/attempt.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "library(gdata)\nlibrary(reshape)\nlibrary(igraph)\nlibrary(rgdal)\n\n#setwd(\"Rojas et al 2017\")\nalbian <- read.xls(\"2017213_Table DR1.xlsx\",check.names=FALSE,row.names=1,skip=1,sheet=1)\n\nlongitude <- albian[1,]\nlatitude <- albian[2,]\nn_samples <- unlist(albian[4,])\nn_formations <- albian[5,]\n\nB <- albian[-(1:5),]\nS <- colnames(B)\nP <- rownames(B)\n\nG <- graph_from_incidence_matrix(B)\n\nplot(G, vertex.size=(1:2)[1+V(G)$type], vertex.label=NA, vertex.color=c(\"blue\",\"red\")[1+V(G)$type])\n\nb <- B\nb$taxon <- rownames(B)\nadj <- melt(b,id.vars=\"taxon\")\nadj <- adj[adj$value!=0,]\n\nspecies_per_sites <- sapply(S,function(x)adj$taxon[adj[,2]==x])\nCS <- matrix(nr=length(S),nc=length(S))\nfor(i in 1:nrow(CS)){\n\tfor(j in 1:ncol(CS)){\n\t\tif(i!=j){\n\t\t\tCS[i,j] <- length(intersect(species_per_sites[[i]],species_per_sites[[j]]))/(n_samples[i]+n_samples[j])\n\t\t\t}\n\t\t}\n\t}\n# CS <- matrix(nr=length(S),nc=length(S))\n# n_species <- sapply(S, function(x)sum(adj[,2]==x))\n# for(i in 1:nrow(CS)){\n\t# for(j in 1:ncol(CS)){\n\t\t# if(i!=j){\n\t\t\t# CS[i,j] <- length(intersect(species_per_sites[[i]],species_per_sites[[j]]))/(n_species[i]+n_species[j])\n\t\t\t# }\n\t\t# }\n\t# }\n\n# Gp <- graph_from_adjacency_matrix(CS,diag=FALSE,weighted=TRUE)\n# cluster_infomap(Gp)\n\nGp2 <- graph_from_adjacency_matrix(CS[rowSums(CS,na.rm=TRUE)!=0,colSums(CS,na.rm=TRUE)!=0],diag=FALSE,weighted=TRUE, mode=\"undirected\")\nci <- cluster_infomap(Gp2, nb.trials=100)\n\nlonlat <- cbind(unlist(longitude),unlist(latitude))\nalbian_map <- readOGR(\".\",\"reconstructed_100.00Ma\",)\npar(mar=c(0,0,0,0))\nplot(albian_map,col=\"grey80\",border=\"grey80\", xaxs=\"i\",yaxs=\"i\",xlim=c(-180,180),ylim=c(-90,90))\npalette(c(\"red\",\"green\",\"cornflowerblue\",\"yellow\",\"darkorange\",\"darkblue\",\"grey50\",\"white\"))\npoints(lonlat[rowSums(CS,na.rm=TRUE)!=0,],bg=membership(ci),pch=22,cex=2)\npoints(lonlat[rowSums(CS,na.rm=TRUE)==0,],bg=\"black\",pch=22,cex=1.5)\n\n\n#plot(Gp2,layout=lonlat[rowSums(CS,na.rm=TRUE)!=0,], rescaled=FALSE, add=TRUE)\n" }, { "alpha_fraction": 0.764011800289154, "alphanum_fraction": 0.7964601516723633, "avg_line_length": 41.375, "blob_id": "3b34ee51a4bfecc3a0e3f9c473049b4b4d38616a", "content_id": "33d5b0710fd9298a7fdeba321ec2b2dd0771ea60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 339, "license_type": "no_license", "max_line_length": 155, "num_lines": 8, "path": "/Protokolle/2017/Protokoll 2017-02-15.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "20th Meeting - 15th of February 2017\n----\nAttendance: 12\n\nFalko presented us with tools and products proposed by the MfN and the DFN, in particular some substitutes for non-German law compliant tools and services.\nIn particular we discussed the MfN GitLab, GFBio and possibilities for cluster computing.\n\nNext meeting on the 1st of March.\n" }, { "alpha_fraction": 0.754448413848877, "alphanum_fraction": 0.7864768505096436, "avg_line_length": 34.125, "blob_id": "d092db1d5cc3bb0ada1f37405c2279dbcdb7df66", "content_id": "733314a112dfd64d9f80f06021a156abddaf7bfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 281, "license_type": "no_license", "max_line_length": 180, "num_lines": 8, "path": "/Protokolle/2017/Protokoll 2017-07-05.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "26th Meeting - 5th of July 2017\n----\nAttendance: 5\n\nThis was an impromptu meeting to help out Carolin with understanding the results spewed out by package lme4, and how to enter nested variables and random effects in model formulas.\nWe were unsuccessful.\n\nNext meeting, next week.\n" }, { "alpha_fraction": 0.6865426898002625, "alphanum_fraction": 0.7026805281639099, "avg_line_length": 59.93333435058594, "blob_id": "8659e755a17f70994bd44757bdaf4077db2e2ede", "content_id": "d87638343c4ed9399489f8150d65520d49e31dd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3656, "license_type": "no_license", "max_line_length": 543, "num_lines": 60, "path": "/Protokolle/2018/Protokoll 2018-02-13.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "40th Meeting - 13th of February 2018\n----\nAttendance: 5\n\nFirst we tried to save a ggplot as an object inside a loop (Don't do that), then we tried to do missing data imputation based on a PCA (can't do that - no vcov possible for prcomp) and finally we tried to see what was wrong with a CSL file read in RMarkdown (No idea). All in all, a productive meeting...\n\n\n-----\n**Update 16-02-2018:**\n\nAn explanation at what went wrong in Antoine's code with the ggplot for-loop.\n\nAt the last code clinic, Antoine had the following piece of code which was intending to save all his ggplots into a list but when trying to plot them afterwards, the data were wrong:\n\n\tlogplot <- c()\n\tfor(i in 1:30){\n\t d <- cbind(log(meso_sub[,c(3, i+3)]), species=meso_sub$species)\n\t mod <- lm(d[,2]~ d[,1])\n\t logplot$lm[[i]] <- mod\n\t logplot$plot[[i]] <- ggplot(data=d, aes(d[,1], d[,2]), na.rm=T) +\n\t geom_point(aes(colour=species), size=2) +\n\t labs(x=\"log(femur length)\",\n\t y=paste0(\"log(\", names(d[,2]), \")\")) +\n\t scale_colour_manual(name=\"species\", values=colSp) +\n\t theme_bw() +\n\t geom_abline(intercept=mod[[1]][1], slope=mod[[2]][1], col=\"firebrick\") +\n\t geom_abline(intercept=mod[[1]][1], slope=1, col=\"grey35\")\n\t}\n\nAntoine just wrote a piece of code that ended up working correctly:\n\n\tlogplot <- mapply(function(x, n){\n\t mod <- lm(log(x) ~ log(df$femur_lgth))\n\t p <- qplot(log(df$femur_lgth),log(x), colour=df$species, size=I(2), na.rm=T) +\n\t labs(x=\"log(femur length)\", y=paste0(\"log(\", n, \")\")) +\n\t #+ geom_text(aes(label=rownames(df), hjust=-.05))\n\t scale_colour_manual(name=\"species\", values=colSp) +\n\t theme_bw() +\n\t geom_abline(intercept=mod$coef[1], slope=mod$coef[2], col=\"red\")\n\t message(n)\n\t return(p)\n\t}, x=df[,-c(1:3)], n=names(df)[-c(1:3)], SIMPLIFY = F)\n\n\nWhy does the last one works and the first didn't.\nqplot is just a wrapper around ggplot, so it's not that.\nThe difference lies in three key aspects:\n1) the argument `data` is not explicitely called in qplot.\n2) the aesthetic called in the first attempt refers to the columns as d[,1] and d[,2] while their actual names are femur_length and something else similar for the second one.\n3) the second attempt used mapply instead of a for-loop.\n\nSo here's what happened:\n\nThe saved object ggplot has three elements of interest here:\n1) One called `data` that save the object passed to argument `data`.\n2) One called `plot_env` that save the name of the environment in which the call was made.\n3) One called `mapping` that save the aesthetic mapping \"as it was called in the function call\"\n\nSo in the first attempt, the mapping refers to columns \"d[,1]\"\" and \"d[,2]\" while data is a nameless data frame which has columns called femur_length etc. So the functions search for a column called \"d[,1]\" and doesn't find anything, so what it does is look-up \"plot_env\" which in this case, because it was made in a for-loop, just says \"R_Global_Env\", i. e. the environement in which we're working, look for a variable called d and do the plot using it... except we changed d at each iteration so it uses the last version of d which is wrong.\nIn the second attempt however, the aesthetic mapping says that we re plotting df$femur_lgth vs x. It looks at `data` which contains nothing since there was no explicit calls to `data`, so it then looks at plo_env. And here because the call was in `mapply`, it finds a unique environment that only existed for that particular iteration of the loop (true for every `*apply` functions), goes into that environment, finds `x` which is the correct one as it only existed in that small environment and thus plots the correct plot.\n" }, { "alpha_fraction": 0.7936305999755859, "alphanum_fraction": 0.8050955533981323, "avg_line_length": 111.28571319580078, "blob_id": "fadeea721d8cec6877c85fdf30f36bafb8be3747", "content_id": "77864b36cd75511573ec67a6cded9a7a133a86ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 785, "license_type": "no_license", "max_line_length": 664, "num_lines": 7, "path": "/Protokolle/2016/Protokoll 2016-03-03.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "7th meeting - 3rd march 2016\n----\n\nAttendance: 8\n\nAs promised I presented a short introduction to importing, creating, handling and exporting GIS files in R (with an emphasis on shapefiles, rasters and netCDF files). Script can be found at the end of this protokoll. Sebastian also showed an introduction to higher-level packages performing similar tasks, in particular package dismo that has the added advantage to be able to extract data directly from GBIF as well as maps directly from Google. This, incidentally, triggered a discussion on the legal implications of using such a package (as the GBIF site doesn't allow extracting data without agreeing to terms and conditions this package doesn't even mention).\nNext meeting: 24th of March, with a discussion on knitr and Rmarkdown." }, { "alpha_fraction": 0.5786212086677551, "alphanum_fraction": 0.5910147428512573, "avg_line_length": 31.947368621826172, "blob_id": "629eb4675fb4c711398c23392151cfe34cabde52", "content_id": "9afd0594f84d798fc7564577b1c51f0cb3c96246", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2582, "license_type": "no_license", "max_line_length": 135, "num_lines": 76, "path": "/Scripts/2017/Code 2017-01-03 foreach_loops_soul_friedman/Soul&Friedman code.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "library(phytools)\r\nlibrary(caper)\r\nlibrary(foreach)\r\n\r\n######Function to perform the method\r\nFandP<-function(timetree, starttime, endtime){\r\n \r\n ttsltree<-NA\r\n try(ttsltree<-timeSliceTree(timetree, sliceTime=endtime, drop.extinct=F, plot=FALSE),silent=TRUE)\r\n if (is.na(ttsltree[1])){\r\n ans<-c(NA,NA,NA,NA,NA)\r\n } else {\r\n ttsltree$node.label<-NA\r\n which_extinct<-extincttable(tree=ttsltree, starttime, endtime)\r\n data<-as.data.frame(cbind(which_extinct$ext.tab,rownames(which_extinct$ext.tab)))\r\n inputdata<-comparative.data(data, phy=ttsltree, V4, vcv=T)\r\n res<-NA\r\n try(res <- phylo.d(data=inputdata, binvar=extinct, permut=1000), silent=FALSE)\r\n if (is.na(res[2])==FALSE){\r\n #stores D, #extinct, #survivors, Brownian p value and random p value\r\n ans<-as.numeric(c(res$DEstimate, res$StatesTable[1], res$StatesTable[2], res$Pval1, res$Pval0))\r\n } else {\r\n ans<-c(NA,NA,NA,NA,NA)\r\n }\r\n }\r\n}\r\n\r\n\r\n######Function to put results in a table\r\n\r\nextincttable<-function(tree, starttime, endtime){\r\n \r\n ext.tab<-matrix(ncol=3, nrow=Ntip(tree))\r\n edgetotip<-match(c(1:Ntip(tree)), tree$edge[,2])\r\n tiplengths<-tree$edge.length[edgetotip]\r\n endrange<-tree$root.time-diag(vcv(tree))\r\n startrange<-endrange+tiplengths\r\n\t\r\n rownames(ext.tab)<-tree$tip.label\r\n ext.tab[,1]<-startrange\r\n ext.tab[,2]<-endrange\r\n\r\n for (i in 1:Ntip(tree)){\r\n keytime<-round(ext.tab[i,2],4)\r\n if (keytime<=endtime){\r\n ext.tab[i,3]<-0\r\n }\r\n if (keytime>endtime & keytime<=starttime){\r\n ext.tab[i,3]<-1\r\n }\r\n if (keytime>starttime){\r\n ext.tab[i,3]<-2\r\n }\r\n }\r\n namettd<-names(which(ext.tab[,3]==2))\r\n ttd<-match(namettd, rownames(ext.tab))\r\n if (length(ttd)>0){\r\n ext.tab<-ext.tab[-ttd,]\r\n }\r\n colnames(ext.tab)<- c(\"FAD\", \"LAD\", \"extinct\")\r\n result<-list(ext.tab, tree)\r\n names(result)<-c(\"ext.tab\", \"tree\")\r\n return(result)\r\n}\r\n\r\n\r\n\r\n######Script to carry out function on multiple trees and time bins\r\ntimeslicebins<- #####matrix of begining and end times of time bins\r\nttrees<- #####object containing all the trees you want\r\n\r\n\r\nresults <- foreach (m = 1:length(ttrees), .packages=c('foreach','paleotree','caper','geiger','phangorn')) %do% {\r\n timetree<-ttrees[[m]]\r\n foreach(f=1:(nrow(timeslicebins)), .combine='rbind') %do% FandP(timetree, starttime=timeslicebins[f,1], endtime=timeslicebins[f,2])\r\n}\r\n\r\n" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.770370364189148, "avg_line_length": 53, "blob_id": "f868aed5e7309d9f175e5c893318dd981b0b6691", "content_id": "b5a5cc09145efb07faecb42e8b6cf07e7b429891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 270, "license_type": "no_license", "max_line_length": 216, "num_lines": 5, "path": "/Protokolle/2018/Protokoll 2018-03-06.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "41th Meeting - 6th of March 2018\n----\nAttendance: 3\n\nWe went back to that CSL bibliographic files in RMarkdown to see a solution (using a JSON file as dictionary for journal abbreviations). And I gave a short crash course on how to find R function sources (see Script).\n" }, { "alpha_fraction": 0.7077922224998474, "alphanum_fraction": 0.7662337422370911, "avg_line_length": 29.799999237060547, "blob_id": "e550c3674d9bd01cd9305b40e8d6319d1099f737", "content_id": "721217a3611787be71959aecb37fa1d0a0acc941", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 154, "license_type": "no_license", "max_line_length": 99, "num_lines": 5, "path": "/Protokolle/2018/Protokoll 2018-04-17.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "42th Meeting - 17th of April 2018\n----\nAttendance: 3\n\nWe discussed vector graphics production in R, and the fine details needed to make them paper-ready.\n" }, { "alpha_fraction": 0.7377777695655823, "alphanum_fraction": 0.7733333110809326, "avg_line_length": 44, "blob_id": "03bea36e9db4dd1bec35d2f13d8a81aabbe273d3", "content_id": "db3930f179bc221ca8b0390361972fbd84b783b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 225, "license_type": "no_license", "max_line_length": 169, "num_lines": 5, "path": "/Protokolle/2018/Protokoll 2018-01-03.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "37th Meeting - 3rd of January 2018\n----\nAttendance: 4\n\nThomas Thomas Pfuhl introduced the \"Generic Data Module\" which is been developed at the MfN as a service for storing research data safely and provide a DOI for those data containers.\n" }, { "alpha_fraction": 0.7928118109703064, "alphanum_fraction": 0.8033826351165771, "avg_line_length": 104.11111450195312, "blob_id": "524fb811e45d34d8f69b6e6825dba2e4293719d4", "content_id": "a8f00cc995c55af2b80cc2c5932174749f8907db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 946, "license_type": "no_license", "max_line_length": 582, "num_lines": 9, "path": "/Protokolle/2016/Protokoll 2016-06-09.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "12th meeting - 9th June 2016\n----\nAttendance: 8\n\nBrandon introduced package 'bayou' which fits Ornstein-Uhlenbeck models to phylogenetic data using Bayesian inference. He had several issues with it, including the fact that the branches numbers did not correspond to the order of elements of `edge` from the original 'ape'-style tree.\n\nAfter this, I showed a work-in-progress of mine, a code written based on an idea of Clement, simulating phylogenies based on given speciation/extinction rates, to test if the fact that some ecologies (here specifically, habitats) having intrinsically different speciation rates, caused a bias on the reconstruction of the ancestral ecology. (Code attached). This was a pure review, with valuable feedbacks by the other attendants on the algorithm and on the biology behind the code. Package 'diversitree' was mentioned as a package allowing similar simulations and tests to be done.\n\nNext meeting 23th of June.\n" }, { "alpha_fraction": 0.7064676880836487, "alphanum_fraction": 0.7512437701225281, "avg_line_length": 39.20000076293945, "blob_id": "9dc626d739c53e3421b36e5a0af8f659ae4ae616", "content_id": "1fc017c6ebac948320b6a4954c0b421fa1535a7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 201, "license_type": "no_license", "max_line_length": 143, "num_lines": 5, "path": "/Protokolle/2017/Protokoll 2017-11-22.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "35th Meeting - 22th of November 2017\n----\nAttendance: 5\n\nI showed some examples of makefile i used to compile latex files and their R-based dependencies (i. e. plots, tables etc.). See scripts folder.\n" }, { "alpha_fraction": 0.7535545229911804, "alphanum_fraction": 0.7914692163467407, "avg_line_length": 41.20000076293945, "blob_id": "254b7e268ec1f6776b1241f5110d18396f3b6fc2", "content_id": "3d91d243b1810a0a13dc95e48a713a204887db99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 211, "license_type": "no_license", "max_line_length": 154, "num_lines": 5, "path": "/Protokolle/2016/Protokoll 2016-11-02.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "16th meeting - 2nd of November 2016\n----\nAttendance: 5\n\nFirst hands-on experience with Git and GitHub. Managing version control, uploading RMarkdown files and csv data. GUI GitKraken. Structure of repositories.\n" }, { "alpha_fraction": 0.7399103045463562, "alphanum_fraction": 0.7892376780509949, "avg_line_length": 30.85714340209961, "blob_id": "7d615dc6126f79f6360e48f1aefa70b7e5c71e4d", "content_id": "1446061db0fa42ae20168810ec27f3273702f221", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 223, "license_type": "no_license", "max_line_length": 139, "num_lines": 7, "path": "/Protokolle/2017/Protokoll 2017-05-10.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "24th Meeting - 10th of May 2017\n----\nAttendance: 4\n\nCarolin showed us the R package gstudio (spatial analysis of population genetics) and we discussed about suitable classification algorithm.\n\nNext meeting the 24th of May.\n" }, { "alpha_fraction": 0.6633663177490234, "alphanum_fraction": 0.7458745837211609, "avg_line_length": 42.28571319580078, "blob_id": "388fb6b5f3c85155b293dc83dc0927dabfee8b47", "content_id": "3c8a35c8d4230589dbb4df1d9a5fd7606cc17f89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 303, "license_type": "no_license", "max_line_length": 220, "num_lines": 7, "path": "/Protokolle/2017/Protokoll 2017-03-01.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "21st Meeting - 1st of March 2017\n----\nAttendance: 6\n\nWe walked through a script from [Soul & Friedman 2017](http://dx.doi.org/10.1111/pala.12274), dealing with stats on phylogenetic trees, that was using a `foreach` construct. We discussed package `foreach`, as well as phylo-stats.\n\nNext meeting 15th of March.\n" }, { "alpha_fraction": 0.741830050945282, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 50, "blob_id": "3eeb266a805798b962798c39641f949c0896bf8b", "content_id": "c9505773eaf32ba6492a132739ef5be8458f82fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 306, "license_type": "no_license", "max_line_length": 164, "num_lines": 6, "path": "/Protokolle/2017/Protokoll 2017-04-26.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "23rd Meeting - 26th of April 2017\n----\nAttendance: 4\n\nWe discussed about graphical parameters, and advanced interactive graphs, for Python (Matplotlib) and for R (ggplot, base R, shiny was also mentioned by not shown).\nNext meeting will be on the 10th of May and we will be discussing population genetics.\n" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 27.600000381469727, "blob_id": "ad2ad16789a0fda09d2519c45f7bb7a3ac2baa46", "content_id": "501cf3a7e8f8f22d07a98540347690e44e36d884", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 430, "license_type": "no_license", "max_line_length": 91, "num_lines": 15, "path": "/README.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "## Museum für Naturkunde's 'Code Clinic' Programming Club\n\nRepository to store meeting's reports, code, etc. \n\nContact me internally if you wish to be added as collaborator, so you can add/modify files.\n\nWhen adding new files, please remember this is a public repository.\n\n### Order of the Day.md\n\nFeel free to add a theme you want to see discussed during a future meeting in that file.\n\n### Next meeting\n\n*on momentary hiatus*\n" }, { "alpha_fraction": 0.7473683953285217, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 37, "blob_id": "26b79e2eb251165a740ee8dd74e4697f5e6173ca", "content_id": "6defa2e46c1f84e8360d4de4f1535ade0221856c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 190, "license_type": "no_license", "max_line_length": 133, "num_lines": 5, "path": "/Protokolle/2018/Protokoll 2018-02-06.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "39th Meeting - 6th of February 2018\n----\nAttendance: 3\n\nPackage \"mice\": trying to make sense of errors when performing a missing data imputation using bayesian linear regression imputation.\n" }, { "alpha_fraction": 0.7571884989738464, "alphanum_fraction": 0.7923322916030884, "avg_line_length": 61.599998474121094, "blob_id": "0216c292c4da4dd1081aec5f7cc3f898e97fc83d", "content_id": "908c0b4352c09c18995a945b6ac2364ff295b69b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 313, "license_type": "no_license", "max_line_length": 256, "num_lines": 5, "path": "/Protokolle/2016/Protokoll 2016-10-12.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "15th meeting - 12th of October 2016\n----\nAttendance: 6\n\nShort introduction to python: importing modules; differences between lists, tuples, dictionaries; list comprehension; basic control flow; explicit type casting; difference between R and python, between IDE and script environment, between python 2 and 3,...\n" }, { "alpha_fraction": 0.5792291164398193, "alphanum_fraction": 0.5942184329032898, "avg_line_length": 39.60869598388672, "blob_id": "b296f231755f10387e42152662b8379bb2913b24", "content_id": "1c5b8d153c44c3fe0f5adf143be5fbdf05eb5859", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1868, "license_type": "no_license", "max_line_length": 118, "num_lines": 46, "path": "/Scripts/2017/Code 2017-10-18 - clamp.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "clamp <- function(physio, meteo, fossils, method=\"new\",smooth_method=\"gp\"){\n require(vegan)\n require(mgcv)\n if(any(colnames(fossils)!=colnames(physio))) stop(\"colnames not identical in physio and fossils datasets.\")\n if(!method%in%c(\"new\",\"classic\")) stop(\"method not implemented.\")\n A <- cca(physio ~ as.matrix(meteo))\n n <- A$CCA$rank\n if(length(A$CCA$alias)){\n alias <- gsub(\"as.matrix\\\\(meteo\\\\)\",\"\",A$CCA$alias)\n meteo <- meteo[,!colnames(meteo)%in%alias]\n cat(sprintf(\"Some meteorological parameters were ignored:\\n%s\\n\",paste(alias,collapse=\", \")))\n }\n f_cca <- predict(A, fossils, \"wa\")\n \n clamp <- matrix(0, ncol=n, nrow=nrow(fossils), dimnames=list(rownames(fossils),colnames(meteo)))\n stdev <- double(n)\n names(stdev) <- colnames(meteo)\n fitted <- list()\n \n if(method==\"classic\"){\n env <- A$CCA$biplot[,1:4]\n site <- A$CCA$wa[,1:4]\n scores <- t(apply(site,1,function(x)apply(env,1,function(y)sum(x*y)/sqrt(sum(y^2)))))\n score_fossil <- t(apply(f_cca[,1:4,drop=FALSE],1,function(x)apply(env,1,function(y)sum(x*y)/sqrt(sum(y^2)))))\n for(i in 1:nrow(env)){\n s <- scores[,i]\n s2 <- s^2\n fitted[[i]] <- lm(meteo[,i]~s+s2)\n X <- fitted[[i]]$coef\n predicted <- X[1]+X[2]*s+X[3]*s2\n stdev[i] <- sd(sqrt((predicted - meteo[,i])^2))\n clamp[,i] <- X[1]+X[2]*score_fossil[,i]+X[3]*score_fossil[,i]^2\n }\n }else if(method==\"new\"){\n for(i in 1:n){\n w <- as.data.frame(A$CCA$wa)\n f <- paste(sprintf(\"meteo[,%i] ~ \",i),paste(sprintf(\"s(%s, bs='%s')\",colnames(w),smooth_method),collapse=\" + \"))\n fitted[[i]] <- gam(as.formula(f),data=w)\n clamp[,i] <- predict.gam(fitted[[i]],as.data.frame(f_cca))\n stdev[i] <- sd(sqrt((fitted[[i]]$fitted.values - meteo[,i])^2))\n }\n }\n \n names(fitted) <- colnames(meteo)\n list(estimates=clamp, stdev=stdev, cca=A, fit=fitted)\n}\n" }, { "alpha_fraction": 0.5608240962028503, "alphanum_fraction": 0.5712884068489075, "avg_line_length": 36.76543045043945, "blob_id": "3eed2090bcf6fc4421295f0bed4617ade32eb289", "content_id": "73e4e5c1e3e559b0d68f26743e9dd0eaf2017dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3058, "license_type": "no_license", "max_line_length": 139, "num_lines": 81, "path": "/Scripts/2017/Code 2017-01-03 foreach_loops_soul_friedman/Additional infos/Script to measure D.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "#Scripts to calculate Fritz and Purvis' D for a clade\n#Phylogenies that are input must have an associated root age in Ma and be scaled to time\n#timebins must be non-overlapping and consecutive\n\nrequire(paleotree)\nrequire(caper)\nrequire(foreach)\n\n##########################################\n##########################################\nextincttable<-function(tree, starttime, endtime){\n \n ext.tab<-matrix(ncol=3, nrow=Ntip(tree))\n edgetotip<-match(c(1:Ntip(tree)), tree$edge[,2])\n tiplengths<-tree$edge.length[edgetotip]\n endrange<-tree$root.time-diag(vcv(tree))\n startrange<-endrange+tiplengths\n\t\n rownames(ext.tab)<-tree$tip.label\n ext.tab[,1]<-startrange\n ext.tab[,2]<-endrange\n\n for (i in 1:Ntip(tree)){\n keytime<-round(ext.tab[i,2],4)\n if (keytime<=endtime){\n ext.tab[i,3]<-0\n }\n if (keytime>endtime & keytime<=starttime){\n ext.tab[i,3]<-1\n }\n if (keytime>starttime){\n ext.tab[i,3]<-2\n }\n }\n namettd<-names(which(ext.tab[,3]==2))\n ttd<-match(namettd, rownames(ext.tab))\n if (length(ttd)>0){\n ext.tab<-ext.tab[-ttd,]\n }\n colnames(ext.tab)<- c(\"FAD\", \"LAD\", \"extinct\")\n result<-list(ext.tab, tree)\n names(result)<-c(\"ext.tab\", \"tree\")\n return(result)\n}\n\nFandP<-function(timetree, starttime, endtime){\n \n ttsltree<-NA\n try(ttsltree<-timeSliceTree(timetree, sliceTime=endtime, drop.extinct=F, plot=FALSE),silent=TRUE)\n if (is.na(ttsltree[1])){\n ans<-c(NA,NA,NA,NA,NA)\n } else {\n ttsltree$node.label<-NA\n which_extinct<-extincttable(tree=ttsltree, starttime, endtime)\n data<-as.data.frame(cbind(which_extinct$ext.tab,rownames(which_extinct$ext.tab)))\n inputdata<-comparative.data(data, phy=ttsltree, V4, vcv=T)\n res<-NA\n try(res <- phylo.d(data=inputdata, binvar=extinct, permut=1000), silent=FALSE)\n if (is.na(res[2])==FALSE){\n #stores D, #extinct, #survivors, Brownian p value and random p value\n ans<-as.numeric(c(res$DEstimate, res$StatesTable[1], res$StatesTable[2], res$Pval1, res$Pval0))\n } else {\n ans<-c(NA,NA,NA,NA,NA)\n }\n }\n}\n\n################################################################\n################################################################\n#uploading the data into R\n\ntimeslicebins<-... #table of age in Ma of the start and end of timeslices from earliest to most recent\nttrees<-... #Set of timescaled phylogenies to be analysed\n\n############################################\n############################################\n#calculating D, produces a list of matrices, rows in matrices correspond to timeslices, each matix corresponds to one timescaled phylogeny \nresults <- foreach (m = 1:length(ttrees), .packages=c('foreach','paleotree','caper','geiger','phangorn')) %do% {\n timetree<-ttrees[[m]]\n foreach(f=1:(nrow(timeslicebins)), .combine='rbind') %do% FandP(timetree, starttime=timeslicebins[f,1], endtime=timeslicebins[f,2])\n}" }, { "alpha_fraction": 0.540962815284729, "alphanum_fraction": 0.5793918967247009, "avg_line_length": 26.546510696411133, "blob_id": "133d2dbee8958e1db9c593193f4ee7062f585c9a", "content_id": "d83503391a6e111c47b6c3907966ca2a9c21c343", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2368, "license_type": "no_license", "max_line_length": 128, "num_lines": 86, "path": "/Scripts/2016/Code 2016-06-09 Simulations/simulation7.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "simulation <- function(speciation_rate1, speciation_rate2, \n\t\t\t\t\t\textinction_rate1, extinction_rate2, \n\t\t\t\t\t\tmutation_rate1, mutation_rate2,\n\t\t\t\t\t\tage_max, occupancy1, occupancy2, \n\t\t\t\t\t\tN_TRIALS, RNG_SEED, starting_with=2){\n\t\t\t\t\t\t\t\n\tecologies <- data.frame(type=1:2,\n\t\t\t\t\t\t\tspeciation_rate=c(speciation_rate1,speciation_rate2),\n\t\t\t\t\t\t\textinction_rate=c(extinction_rate1,extinction_rate2),\n\t\t\t\t\t\t\tmutation_rate=c(mutation_rate1,mutation_rate2),\n\t\t\t\t\t\t\toccupancy=c(occupancy1,occupancy2))\n\tall_trials <- list()\n\tset.seed(RNG_SEED)\n\tx <- 0\n\t\t\n\t#Simulated evolution\n\twhile(x<N_TRIALS){\n\t\tx <- x+1\n\t\tt <- 0\n\t\ttaxa <- 1\n\t\torig_time <- 0\n\t\text_time <- NA\n\t\teco_type <- starting_with\n\t\tancestor <- 0\n\t\twhile(t<age_max){\n\t\t\tt <- t+1\n\t\t\ti <- 1\n\t\t\twhile(i<=length(taxa)){\n\t\t\t\tif(is.na(ext_time[i])){\n\t\t\t\t\teco <- eco_type[i]\n\t\t\t\t\tmr <- ecologies$mutation_rate[eco]\n\t\t\t\t\tMUT <- rpois(1,mr)\n\t\t\t\t\tif(MUT & taxa[i]!=1){\n\t\t\t\t\t\teco_type[i] <- sample(ecologies$type[-eco],1)\n\t\t\t\t\t\teco <- eco_type[i]\n\t\t\t\t\t\t}\n\t\t\t\t\tif(orig_time[i]!=t){\n\t\t\t\t\t\tsr <- ecologies$speciation_rate[eco]\n\t\t\t\t\t\ter <- ecologies$extinction_rate[eco]\n\t\t\t\t\t\tN <- sum(eco_type[is.na(ext_time)]==eco)\n\t\t\t\t\t\ter <- er + (sr-er)*N/ecologies$occupancy[eco]\n\t\t\t\t\t\tEXT <- rpois(1,er)\n\t\t\t\t\t\tORG <- rpois(1,sr)\n\t\t\t\t\t\tif(ORG){\n\t\t\t\t\t\t\text_time[i] <- t\n\t\t\t\t\t\t\ttaxa <- c(taxa, max(taxa)+1, max(taxa)+2)\n\t\t\t\t\t\t\torig_time <- c(orig_time, t, t)\n\t\t\t\t\t\t\text_time <- c(ext_time, ifelse(EXT,t,NA), NA)\n\t\t\t\t\t\t\teco_type <- c(eco_type, eco, eco)\n\t\t\t\t\t\t\tancestor <- c(ancestor,taxa[i], taxa[i])\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tif(EXT){ext_time[i] <- t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ti <- i+1\n\t\t\t}\n\t\t\tif(sum(is.na(ext_time))==0){break}\n\t\t\t}\n\t\tif(sum(is.na(ext_time))==0){\n\t\t\tx <- x-1\n\t\t}else{\n\t\t\text_time[is.na(ext_time)] <- t+1\n\t\t\tall_trials[[x]] <- data.frame(taxa, orig_time, ext_time, eco_type, ancestor)\n\t\t\tcat(\"Trial\",x,\"done.\\n\")\n\t\t}\n\t}\n\tlist(Params=ecologies, Trials=all_trials)\n}\n\n\nif(!interactive()){\n\tRNG_SEED <- 20060707 #Random seed\n\tN_TRIALS <- 1000 #Number of trials\n\targs <- as.numeric(commandArgs(TRUE))\n\tsp1 <- args[1]\n\tsp2 <- args[2]\n\tex1 <- args[3]\n\tex2 <- args[4]\n\tmu1 <- args[5]\n\tmu2 <- args[6]\n\tage <- args[7]\n\tocc1 <- args[8]\n\tocc2 <- args[9]\n\tres <- simulation(sp1,sp2,ex1,ex2,mu1,mu2,age,occ1,occ2,N_TRIALS,RNG_SEED)\n\tsave(res,file=sprintf(\"sim7 - Sp%.2f-%.2f,Ext%.2f-%.2f,Mut%.2f-%.2f,Age%i,K%i-%i.Rdata\",sp1,sp2,ex1,ex2,mu1,mu2,age,occ1,occ2))\n\t}" }, { "alpha_fraction": 0.730088472366333, "alphanum_fraction": 0.769911527633667, "avg_line_length": 36.66666793823242, "blob_id": "561800cf943f4eb7c337d41e856340ca69242c51", "content_id": "a707dd8e44216a839aa54c4fe7af9d0546431055", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 226, "license_type": "no_license", "max_line_length": 88, "num_lines": 6, "path": "/Protokolle/2017/Protokoll 2017-11-15.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "34th Meeting - 15th of November 2017\n----\nAttendance: 5\n\nMelanie introduced to us the R package [sampbias](https://github.com/azizka/sampbias). \nWe also debugged an RMarkdown file which refused to produce correctly the date.\n" }, { "alpha_fraction": 0.6627771258354187, "alphanum_fraction": 0.6732788681983948, "avg_line_length": 26.645160675048828, "blob_id": "14e4d6c04a25c8fd9beb04cd2c09e9a224b41686", "content_id": "d77324dc48742e2c2f1c35a9224aabbd4268630b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 85, "num_lines": 31, "path": "/Scripts/2017/Code 2017-04-26 falk notes on matplotlib parameters.py", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "\"\"\"\nin order of execution priority:\n(i) matplotlibrc file\n(ii) custom styles\n(iii) manual RC parameter configuration\n\n\"\"\"\n\n#######################\n\n### (i) matplotlibrc\n# find the default local matplotlibrc file:\n# (root)# find / -name \"*matplotlibrc*\" 2> /dev/null\n# for example here: /usr/lib/python3.6/site-packages/matplotlib/mpl-data/matplotlibrc\n# copy it here:\n# ~/.config/matplotlib/matplotlibrc\n#\n# open it in text editor and manipulate the default configuration.\n\n### (ii) Styles!!\n# custom styles can go to ~/.config/matplotlib/stylelib\n# originals in /usr/lib/python3.6/site-packages/matplotlib/mpl-data/stylelib\n# in python:\n# print(MPP.style.available)\n# MPP.style.use('seaborn-paper')\n\n### (iii) Manual rc configuration\n# in python:\n# MP.rcParams['font.size'] = 10\n## or:\n# MPP.rc('font',**{'family': 'Iwona', 'size': 10})\n" }, { "alpha_fraction": 0.7483221292495728, "alphanum_fraction": 0.7852349281311035, "avg_line_length": 36.375, "blob_id": "6faaf8c9fc7271cb79216289fd9fff2b5d2f324e", "content_id": "2e93f8fd8412a9e2080f20ea21c05e5ce495f3ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 298, "license_type": "no_license", "max_line_length": 144, "num_lines": 8, "path": "/Protokolle/2016/Protokoll 2016-05-18.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "11th meeting - 18th May 2016\n----\n\nAttendance: 6\n\nNeil gave a short course of time calibration and ancestral character estimation using R and packages ape, phytools and paleotree. Code attached.\n\nNext meeting the 2nd of June. Edit: postponed to the 9th as most usual attendants were away that week." }, { "alpha_fraction": 0.7511961460113525, "alphanum_fraction": 0.7910685539245605, "avg_line_length": 88.57142639160156, "blob_id": "3ad804820f06a0a2f6b2ad1b8815de4924f99aca", "content_id": "b5e1fbc3a313019d3cab6e726f392f0a01382eb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 627, "license_type": "no_license", "max_line_length": 553, "num_lines": 7, "path": "/Protokolle/2017/Protokoll 2017-07-12.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "27th Meeting - 12th of July 2017\n----\nAttendance: 4\n\nWe reviewed a script I wrote, trying to reproduce the results of Rojas et al 2017 (http://dx.doi.org/10.1130/G38944.1). In this study the authors identify biogeographic provinces in albian ammonites using network analysis, and Infomap clustering. The data was given in supplementary material but not the code. We thus tried to reproduce the missing code based on the Method explanation of the paper. The results are (to a large extent) reproducible but we were not able to find out the reason behind some minor differences between their output and ours.\n\nNext meeting, TBD.\n" }, { "alpha_fraction": 0.7719298005104065, "alphanum_fraction": 0.7944862246513367, "avg_line_length": 56.14285659790039, "blob_id": "e054e443a369eeecb3f376facd4d01c7cb9c01d0", "content_id": "9891af58e2435830624c22c6248d6a1d2de226e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 399, "license_type": "no_license", "max_line_length": 296, "num_lines": 7, "path": "/Protokolle/2016/Protokoll 2016-04-21.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "9th meeting - 21st April 2016\n----\n\nAttendance: 8\n\nI presented a short introduction to package ape based on two scripts I helped Maren and Christie writing in the past year: one is a script to visually map characters on a phylogenetic tree, and the other one to do some basic stats on phylogenetic distances, using randomized trees. Code attached.\nNext meeting: Wednesday the 4th after Journal Club." }, { "alpha_fraction": 0.7020968794822693, "alphanum_fraction": 0.7180043458938599, "avg_line_length": 42.21875, "blob_id": "ab0bcc705a058b4ee8edd56995875e165dbe161f", "content_id": "d60ee3ddb65241b03e18d52aa89894cd406b9ceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1383, "license_type": "no_license", "max_line_length": 127, "num_lines": 32, "path": "/Scripts/2017/Code 2017-06-01/resolveSynonymy_pre.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "resolveSynonymy <-\nfunction(dataset, username=\"\", password=\"\"){\n\trequire(RPostgreSQL)\n\tcon <- dbConnect(dbDriver(\"PostgreSQL\"), user=username, password=password,host=\"192.168.101.133\", dbname=\"nsb\", port=\"5432\")\n\ttaxonomy <- dbReadTable(con, \"neptune_taxonomy\")\n\tdbDisconnect(con)\n\ttaxonomy <- taxonomy[,colnames(taxonomy)%in%c(\"taxon_id\",\"taxon_synon_to\",\"species\",\"genus\",\"subspecies\")]\n\tsyndiat <- data.frame(taxon_id=as.character(taxonomy$taxon_id), \n\t\t\t\t\t \t taxon_synon=as.character(taxonomy$taxon_synon_to), stringsAsFactors=FALSE)\n\tsyndiat<-syndiat[!is.na(syndiat$taxon_synon),]\n\tdataset$resolved_taxon_id <- dataset$taxon_id\n\tresolved_species <- dataset$species\n\tresolved_genus <- dataset$genus\n\tresolved_subspecies <- dataset$subspecies\n\twhile(sum(dataset$resolved_taxon_id%in%syndiat$taxon_id)!=0){\n\t\tfor(i in 1:nrow(syndiat)){dataset$resolved_taxon_id[dataset$resolved_taxon_id==syndiat[i,1]]<-syndiat[i,2]}\n\t\t}\n\tcat(\"0 row done\")\n\tfor(i in seq_along(dataset$resolved_taxon_id)){\n\t\tr <- taxonomy[taxonomy$taxon_id==dataset$resolved_taxon_id[i],]\n\t\tif(nrow(r)==1){\n\t\t\tresolved_species[i] <- r$species\n\t\t\tresolved_genus[i] <- r$genus\n\t\t\tresolved_subspecies[i] <- r$subspecies\n\t\t\t}\n\t\tcat(\"\\r\",i,\"rows done\",sep=\" \")\n\t\t}\n\tdataset$resolved_species <- resolved_species\n\tdataset$resolved_genus <- resolved_genus\n\tdataset$resolved_subspecies <- resolved_subspecies\n\tdataset\n\t}\n" }, { "alpha_fraction": 0.7821393609046936, "alphanum_fraction": 0.7958782911300659, "avg_line_length": 101, "blob_id": "5fb264547bf130a4ddf3ea81db3430527faa48ae", "content_id": "2f46d0538526c78e8c35171b2811d5ab29622bb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1019, "license_type": "no_license", "max_line_length": 508, "num_lines": 10, "path": "/Protokolle/2016/Protokoll 2016-01-21.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "4th meeting - 21 january 2016\n-----\n\nAttendance: 6\n\nThis meeting was spent debugging Melanie's code. First issue was with a new package doing bayesian analysis and using a just-in-time compiler, but throwing an error doing compiling, primarily because RStudio didn't find the path to Rtools and its compilers. Prior to this, on other machines, the package itself refused to compile. This sparked a conversation on package management and installation, but also on the need to limit the use of dependencies in packages to keep the code manageable over the years.\n\nSecond issue was with HTML parsing a species page from the IUCN red list. This case study needed a combination of xml parsing as seen during the 2nd meeting of the club and REGEX.\n\nIt was decided that, since the room was taken on February the 4th and then every two weeks after, the next meeting will be in 3 weeks instead of 2 in order to be able to book the room in alternance with the other occupants. Next meeting will therefore be on February the 11th." }, { "alpha_fraction": 0.7749999761581421, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 83.0999984741211, "blob_id": "09de9348ada3c65b70e34cf0875c0a45ae4e0118", "content_id": "ea10ba51809f8b1ebf780cd4783ca7cf3c6732a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 840, "license_type": "no_license", "max_line_length": 469, "num_lines": 10, "path": "/Protokolle/2016/Protokoll 2016-02-25.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "6th meeting - 25 february 2016\n----\n\nAttendance: 8\n\nSascha introduced Jupyter notebook to us, which is an interactive \"notebook\" environment that allow you to insert code (R, Python, Matlab, ...) and insert the output of the code in the middle of markdown-formatted text in order to produce quickly fully-reproducible reports. This project evoked us with somewhat similar projects such as knitr and RMarkdown, that we thus planned to discuss during next meeting (as nobody brought the necessary material to discuss them).\n\nWe also opened the discussion on dealing with GIS in R, and working with R output in GIS softwares (ArcGis, DIVAGis, GRASS, etc.). As, again, nobody brought material to discuss it we decided to postpone it to next meeting and call it a day.\n\nIt was thus decided that next meeting will be on Thursday the 3rd of March." }, { "alpha_fraction": 0.7545219659805298, "alphanum_fraction": 0.7803617715835571, "avg_line_length": 47.375, "blob_id": "3ae8d7fcda2fa3f20905158f5f6fdf70b6cb9a40", "content_id": "ce37f0b1526984516352b086381434d079f0a957", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 387, "license_type": "no_license", "max_line_length": 183, "num_lines": 8, "path": "/Protokolle/2017/Protokoll 2017-06-01.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "25th Meeting - 1st of June 2017\n----\nAttendance: 5\n\nBased on recent queries, I put together a collection of tips and tricks to write faster code / code using less memory.\nSome of the scripts discussed can be found in the Scripts folders. The pdf I also showed was the following: http://research.computing.yale.edu/sites/default/files/files/efficientR.pdf\n\nNext meeting the 21st of June.\n" }, { "alpha_fraction": 0.38938194513320923, "alphanum_fraction": 0.39128369092941284, "avg_line_length": 30.71356773376465, "blob_id": "596393f3f388f6e2107383db7faa063dc877f292", "content_id": "54ca1a7dc479387cf98db1ea75dbb00dc6128c3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 6310, "license_type": "no_license", "max_line_length": 102, "num_lines": 199, "path": "/Scripts/2016/Code 2016-12-07 database_connection/SimplerSQLQuery.r", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "#_______________________________________________________________________________\n# This package creates a simplified SQL interface for complex data structures.\n# Tested are\n# data.frame, data.table and tbl (dplyr)\n# SQLite, PostgreSQL, MySQL\n# usage examples can be found below.\n# Requirements (most are optional):\n# tcltk, R6, DBI, dplyr, data.table\n# \n# Questions? Falk.Mielke@hu-berlin.de\n\n\n#_______________________________________________________________________________\n# Password Prompt\nPasswordPrompt <- function( title = \"Password\"\n , label = \"please enter password:\"\n ){\n require(tcltk)\n prompt <- tktoplevel()\n tktitle(prompt) <- title\n\n pwd <- tclVar(\"\")\n\n tkgrid(tklabel(prompt, text = label))\n tkgrid(pwdbox <- tkentry(prompt, textvariable=pwd, show=\"*\"))\n\n tkbind(pwdbox,\"<Return>\",function() tkdestroy(prompt))\n tkgrid(tkbutton(prompt,text=\"OK\",command=function() tkdestroy(prompt)))\n\n tkwait.window(prompt)\n\n return(tclvalue(pwd)) \n} \n\n#_______________________________________________________________________________\n# a more convenient SQL query\n\nrequire(R6)\nrequire(DBI)\nSQL_DF <- R6Class(\n \"SQL_DF\"\n , public = list(\n # the constructor\n initialize = function( drv\n , dbname\n , user = NULL\n , host = NULL\n , port = NULL\n ) {\n # drivers:\n # RSQLite::SQLite()\n # RMySQL::MySQL()\n # RPostgreSQL::PostgreSQL()\n \n if (identical(drv, RSQLite::SQLite())) {\n private$db <- dbConnect(drv, dbname)\n } else {\n private$db <- dbConnect( \n drv\n , user = user\n , dbname = dbname\n , host = host\n , port = port\n , password = PasswordPrompt(\n title = \"Database Query\"\n , label = sprintf('please enter password for \\n %s @ %s'\n , user, host)\n )\n )\n }\n private$alive <- T\n } # end: initialize\n\n # run a query and return converted result\n , Run = function(query_text) {\n if (!private$alive) {\n return()\n }\n rs <- dbSendQuery(private$db, query_text)\n if (dbGetRowsAffected(rs) == 0) {\n dbClearResult(rs)\n return (NULL)\n } else {\n result <- fetch(\n rs\n , n = dbGetRowsAffected(rs)\n )\n dbClearResult(rs)\n return ( private$Convert(result) )\n }\n }\n # upload data to a table\n , Upload = function(dat, table_name, ...) {\n if (!private$alive) {\n return()\n }\n dbWriteTable(private$db, table_name, as.data.frame(dat), ...) \n return()\n }\n # download a whole table\n , LoadTable = function(table_name, ...) {\n if (!private$alive) {\n return()\n }\n result <- dbReadTable(private$db, table_name, ...) \n return( private$Convert(result) )\n }\n \n # upload data to a table\n , Close = function() {\n if (!private$alive) {\n return()\n }\n dbDisconnect(private$db)\n private$alive <- F\n # rm(list=ls()[!grepl('^QQQ$',ls())]) \n }\n )\n , active = list(\n tables = function(){\n return(dbListTables(private$db))\n }\n , connected = function(){\n return(private$alive)\n }\n )\n , private = list(\n db = NULL\n , alive = F\n , Convert = function(result) {\n # require(data.table)\n return (as.data.frame(result))\n }\n )\n )\n\n# analogous, returning a TBL\nrequire(dplyr)\nSQL_TBL <- R6Class(\n \"SQL_TBL\"\n , inherit = SQL_DF\n , private = list(\n Convert = function(result) {\n return (as.tbl(result))\n }\n )\n )\n\n# analogous, returning a data.table\nrequire(data.table)\nSQL_DT <- R6Class(\n \"SQL_DT\"\n , inherit = SQL_DF\n , private = list(\n Convert = function(result) {\n return (as.data.table(result))\n }\n )\n )\n\n\n### Usage example 1:\n# sql_connection <- SQL_DF$new(drv = RPostgreSQL::PostgreSQL()\n# , user = \"monkey\"\n# , dbname = \"sandbox\"\n# , host = \"localhost\"\n# , port = 5432\n# )\n# \n# a <- sql_connection$Run(\"SELECT * FROM hflights\")\n# b <- sql_connection$LoadTable(\"hflights\")\n# \n\n### Usage example 2:\nsql_to_dt <- SQL_DT$new(drv = RSQLite::SQLite(), dbname = \":memory:\")\nprint (sql_to_dt$connected)\nsql_to_dt$Upload(as.data.table(mtcars), 'MTCars', overwrite = T, append = F)\ndt <- sql_to_dt$Run('SELECT * FROM MTCars')\nsummary(dt)\nsql_to_dt$Run('DROP TABLE MTCars')\nsql_to_dt$Close()\nprint (sql_to_dt$connected)\n# \n\n\n\n#_______________________________________________________________________________\n# Optionally, pack a library.\n# if (F) {\n# package.skeleton( \n# name = 'SimplerSQLQuery'\n# , list = c('PasswordPrompt', 'SQL_DF', 'SQL_DT', 'SQL_TBL')\n# , path = '/grove/R/CEB'\n# , force = T\n# )\n# }\n\n#_______________________________________________________________________________\n# eof. thanks for reading!" }, { "alpha_fraction": 0.75052410364151, "alphanum_fraction": 0.7624039053916931, "avg_line_length": 70.5999984741211, "blob_id": "bfae87a5dd9e0bf71e4de62d00c57ece33876768", "content_id": "42937f76ebaec4db7701764c54b2b60cd65aafd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1431, "license_type": "no_license", "max_line_length": 362, "num_lines": 20, "path": "/Protokolle/2016/Protokoll 2016-01-07.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "3rd meeting - 7 january 2016\n----\n\nAttendance: 7\n\nThis meeting was spent reviewing a bit of Simon's code. The code dugged from a database a 400k row dataframe of the form:\n\n event | station_id | timestamp\n ------+------------+----------\n\nand the task was to measure the time lag between each serie of events at each station.\nThe code thus needed to loop through each station, and then proceeded to loop through each row of the subsetted dataframe to measure the lag between each row timestamp.\n\nMost of the discussion thus revolved around the question: when is it necessary to loop and how to do so?\nThe consensus reached was that: a loop is always preferrable to repeating code (repeats leads to higher chance of introducing errors) but at the same time alternative solutions (such as vectorization) are preferable to loop when possible, and if loop needs be, prefer loops in compiled code to loops in interpreted code (i. e. `*apply` and `foreach` over `for`).\nIn the case at hand, the loop for measuring the lag between events could be vectorized, while the stations need to be actually looped through.\n\nAdditionally some advices were given on how to connect to a database from R (specifically on the usage of dbGetQuery vs dbSendQuery), on the relevance of some packages used in Simon's code, etc.\n\nAt the end of the meeting, we decided on the Thurday 21st of January at 4pm in room 3330 (Nordbau) for the next meeting." }, { "alpha_fraction": 0.6973684430122375, "alphanum_fraction": 0.6973684430122375, "avg_line_length": 18, "blob_id": "a5ecced38c43405c1ec67a42d3d4801e7a03f0c5", "content_id": "6105c9eaed17cbb9f2ad4fd82617e15307213dc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 76, "license_type": "no_license", "max_line_length": 53, "num_lines": 4, "path": "/Order of the Day.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "Order of the Day\n---\n\n* Machine Learning: TensorFlow for Python and R (JR).\n" }, { "alpha_fraction": 0.7700534462928772, "alphanum_fraction": 0.7878788113594055, "avg_line_length": 61.33333206176758, "blob_id": "dadc23c70ac9dafbad87594acbc81ae5a3749bc5", "content_id": "3c9a2945f8b502de83300f3d8906403214f13588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 561, "license_type": "no_license", "max_line_length": 270, "num_lines": 9, "path": "/Protokolle/2016/Protokoll 2016-08-03.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "13th meeting - 3rd August 2016\n----\nAttendance: 6\n\nWe went through the basics on how to write a function, how to see the source code of a function, how to use this knowledge to use optimally functions of the apply family (with a special focus on how to perform a correlation between two variables splitted by categories).\nWe shortly discuss at the end the issue of the github account (public versus private).\nWe ended up with keeping the status quo of having a public repository while trying not to put any sensitive data.\n\nNext meeting the 17th of this month.\n" }, { "alpha_fraction": 0.7411764860153198, "alphanum_fraction": 0.7882353067398071, "avg_line_length": 33, "blob_id": "42574d389a5d181e0a63b7f3eb154298cfc692d9", "content_id": "4a22b495fa3287edef7b61c7c3531f96346b253b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 170, "license_type": "no_license", "max_line_length": 113, "num_lines": 5, "path": "/Protokolle/2017/Protokoll 2017-12-06.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "36th Meeting - 6th of December 2017\n----\nAttendance: 6\n\nWe continued discussing about makefiles. We also discussed about how to use command line arguments using Rscript.\n" }, { "alpha_fraction": 0.7071428298950195, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 27, "blob_id": "2818b0098a3970788e75b86bfe101140606fe758", "content_id": "562ea55b2bed823ca994c60f1995583b309f9fdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 140, "license_type": "no_license", "max_line_length": 83, "num_lines": 5, "path": "/Protokolle/2018/Protokoll 2018-01-17.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "38th Meeting - 17th of January 2018\n----\nAttendance: 3\n\nWe helped Sami with issues with exporting ggplot-generated figures to high res pdf.\n" }, { "alpha_fraction": 0.7572383284568787, "alphanum_fraction": 0.7795100212097168, "avg_line_length": 63.14285659790039, "blob_id": "7d9cd109b53531fd145ab6483f85ba1ee5942cf9", "content_id": "baf61113efe73293fd4817cf66f546c95942879e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 449, "license_type": "no_license", "max_line_length": 264, "num_lines": 7, "path": "/Protokolle/2017/Protokoll 2017-02-01.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "19th meeting - 1st of February 2017\n----\nAttendance: 6\n\nFollowing Falko email, we discussed what we needed him to explain to us during next meeting.\nWe also discuss various Club-related issues: how to organize the club when I can't; what to do during meeting where people do not have issues to discuss (=> we'll review code, either from us or from literature; occasionally we might even write some code together).\nNext meeting of February the 15th.\n" }, { "alpha_fraction": 0.7788698077201843, "alphanum_fraction": 0.7948402762413025, "avg_line_length": 80.5, "blob_id": "856bcc85d27c0b5257bdc4bca9e22f6bfec36814", "content_id": "115db599a27ee02df139bd10db00a035a657138f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 814, "license_type": "no_license", "max_line_length": 232, "num_lines": 10, "path": "/Protokolle/2016/Protokoll 2016-02-11.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "5th meeting - 11 february 2016\n-----\n\nAttendance: 5\n\nMartin came with an issue dealing with ggplot2. The particular issue was with having two plots side by side sharing an y-axis (as one of them with a stratigraphic log and the other geochemical data in the corresponding section). \nAs nobody present had enough experience with ggplot2, nobody was really able to help unfortunately: we navigated aimlessly inside the created ggplot object to find how to modify it, without result.\nI briefly showed how to use function `layout` combined with `par` and basic `plot` arguments to produce the desired effect in base plot however. Though, since the whole workflow was already written in ggplot2, it was of little help.\n\nNext meeting was scheduled for the 25th of February, with the hope that more people will attend it." }, { "alpha_fraction": 0.6683818101882935, "alphanum_fraction": 0.6899923086166382, "avg_line_length": 37.117645263671875, "blob_id": "cc78982521f013b01615b534edb542d6610437f2", "content_id": "b3b37039601e3b023140f4080ba0ba509974dc6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3887, "license_type": "no_license", "max_line_length": 128, "num_lines": 102, "path": "/Scripts/2016/Code 2016-04-21 phylogeny 101.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "# R CRAN Task View on Phylogenetics: https://cran.r-project.org/web/views/Phylogenetics.html\n# Introduction to phylogenies in R: http://www.phytools.org/eqg/Exercise_3.2/\n# \"ape\" basic package to manipulate trees. \"phangorn\" can do phylogenetic analyses. \"rmesquite\" can call Mesquite from R.\n# Blog of the guy who created package phytools: http://blog.phytools.org/\n\nlibrary(ape)\nnewick_text <- \"(A,((B,C),(D,(E,F))));\"\ntree <- read.tree(text=newick_text)\n\nstr(tree)\n\nplot(tree)\nplot(tree,type=\"cladogram\", direction=\"upwards\")\nplot(tree,type=\"fan\")\n\n?plot.phylo\n\n#\n# Following script was written to compare morphological distance vs phylogenetic distance\n# and test the significance of that relationship\n#\n\nnexus_file <- \"tree_therapsids.txt\"\ntree <- read.nexus(nexus_file)\ntree <- compute.brlen(tree, 1)\n#tree <- compute.brlen(tree, method=\"Grafen\") #If needs be ultrametric\n\nname_list <- tree$tip.label\nname_comb <- t(combn(name_list,2)) #Find all unique pairs of taxa on the tree\nall <- as.data.frame(name_comb, stringsAsFactors=FALSE)\n\nphylo_dist <- dist.nodes(tree)[1:length(name_list),1:length(name_list)]\n\nPCA <- read.table(\"PCA.csv\", sep=\"\\t\", header=TRUE, stringsAsFactors=FALSE)\nPCA\n\nname.color <- rep(\"black\", length(name_list))\nname.color[name_list%in%PCA$Name] <- \"red\"\nplot(tree, tip.color= name.color, font=4)\n\nall$MorphoDist <- apply(name_comb,1,function(x)ifelse(x[1]%in%PCA$Name & x[2]%in%PCA$Name, \n abs(PCA$Coef[PCA$Name==x[1]]-PCA$Coef[PCA$Name==x[2]]), \n NA))\n\nall$TruePhyloDist <- apply(name_comb,1,function(x)phylo_dist[name_list==x[1],name_list==x[2]])\n\nobslm <- lm(all$TruePhyloDist~all$MorphoDist)\nobsCoef <- obslm$coef[2]\nobsR <- summary(obslm)$r.squared\nprint(summary(obslm))\ncor.test(all$TruePhyloDist,all$MorphoDist)\n\n#####Permuting the tree##############\nn <- 9999 #Number of randomizations\nset.seed(1983)\nrandCoef <- rep(0, n)\nfor(i in 1:n){\n\trandom_tree <- rtree(length(name_list), tip.label=name_list)\n\trand_phylo_dist <- dist.nodes(random_tree)[1:length(name_list),1:length(name_list)]\n\trandom_dist <- apply(name_comb,1,function(x)rand_phylo_dist[name_list==x[1],name_list==x[2]])\n\trand_lm <- lm(random_dist~all$MorphoDist)\n\trandCoef[i]<-rand_lm$coef[2]\n\tcat(i,\"/\",n,\"\\r\",sep=\"\")\n\t}\nobs_signif <- sum(abs(randCoef)>abs(obsCoef)) / n\n\npar(mfcol=c(2,1))\npar(mar=c(4,4,1,1))\nplot(all$MorphoDist, all$TruePhyloDist, xlab=\"Morphological Distance\", ylab=\"Phylogenetic Distance\", pch=19, xaxs=\"i\", yaxs=\"i\")\nabline(obslm, col=\"red\")\ntext(25,5, sprintf(\"Coefficients = %.3f\\nR-squared = %.3f\",obsCoef,obsR), col=\"red\", pos=3)\n\npar(mar=c(4,4,4,1))\nhist(abs(randCoef), 256, main=\"Linear regression coefficient\\nof 9999 random trees\")\nabline(v=abs(obsCoef), col=\"red\", lwd=2)\ntext(abs(obsCoef), 80, sprintf(\"Observed = %.3f\",obsCoef), col=\"red\", cex=0.5, pos=4)\n\n#\n# Another snippet of code from Christie: Mapping character visually on a phylogeny\n#\n\nnexus_christie <- \"Christie/Adulttree.nex\"\nparams_christie <- \"Christie/Adult92spp2.txt\"\n\ntree <- read.nexus(nexus_christie) # read in tree\ndata <- read.table(params_christie, header=TRUE, row.names=1, sep=\"\\t\", stringsAsFactors=FALSE) # read in data file\ndata[1:10,1:10]\nphylo.data <- data[tree$tip.label,] #Because row names of data are == to the tree's tip labels\nBiome <- phylo.data$Biome\n\n#Mapping the characters on the tree tips\ncolor.scheme <- c(\"red\",\"blue\",\"green\",\"yellow\",\"orange\",\"black\")\nplot(tree, tip.color=color.scheme[as.factor(Biome)], type=\"fan\", edge.width=2)\n\n#Mapping the characters on the tree edges\nbiome.edge <- ifelse(tree$edge[ ,2] %in% 1:nrow(phylo.data), Biome[tree$edge[ ,2]], \"unknown\")\n\nbiome.edge2 <- Biome[tree$edge[ ,2]]\nbiome.edge2[is.na(biome.edge2)] <- \"unknown\"\nall(biome.edge==biome.edge2)\n\nplot(tree, edge.color=color.scheme[as.factor(biome.edge)], type=\"fan\", edge.width=2)" }, { "alpha_fraction": 0.753045916557312, "alphanum_fraction": 0.7610121965408325, "avg_line_length": 124.47058868408203, "blob_id": "8af27048cee946519bdacbf87a779041851ffd3f", "content_id": "4318e35717ac48443f29fd4358fd90c8041cafba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2134, "license_type": "no_license", "max_line_length": 483, "num_lines": 17, "path": "/Protokolle/2015/Protokoll 2015-12-09.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "2nd meeting - 9 december 2015\n----\n\nAttendance: 9\n\nWhile waiting for me to connect unsuccessfully my computer to the projector from the seminar room, Maren gave a bunch of tips on \"how to find and get help in R\". Here is a short version of what she said:\n\n> From Inside R: if function name known, here function x, ?x or help(x) [ex: ?mean]; if unknown, function search is ?? (or help.search). Works with fuzzy matching, can be used for expression [ex: ??\"pattern matching\"]. Finally, library sos have a set of functions (findFn) to seek in a similar manner funcitons from package that are not installed on your computer. \n> On the web: R mailing list (https://stat.ethz.ch/mailman/listinfo/r-help), Stackoverflow (http://stackoverflow.com/) or any other more specific site of the stackexchange network [ex: gis.stackexchange.com for GIS]. With one major caveat: learn to ask correctly. Questions should explain clearly the problem, show minimal code for the error to be reproducible, give a sample of data (using dput(your_df)). \n> To change the language in which the error are displayed to english, use: Sys.setenv(LANG=\"en\") \n> Googling error: it usually works (as long as error in english) but if not rseek.org and duckduckgo.com works usually better in edge cases. \n> Syntax highlighting can help avoiding basic errors. In Mac, native to the R GUI. On PC, one can use Notepad++ (it can be used inside the GUI using options(editor=\"path/to/notepad++.exe\")) \n\nThen I tried to explain some basics on web scraping for data mining with a case study: pangaea.de. In this simple case, the difficulty reside in two points: being able to reproduce the API the website use to connect to their database (in this case, in a way similar to google, using a URL of the form search?q=query&count=500&offset=0&...) and being able to read in HTML pages, in particular HTML tables. Basic knowledge of HTML is necessary to be able to perform this kind of tasks.\nCode attached.\n\nAt the end of the meeting, we gathered ideas for future meeting themes, and decided on the Thurday 7th of January at 4pm in room 3330 (Nordbau) for the next meeting.\n\n" }, { "alpha_fraction": 0.42736586928367615, "alphanum_fraction": 0.43670886754989624, "avg_line_length": 30.245283126831055, "blob_id": "80a9eefbcf51120a22c631cedb470727615dcd3a", "content_id": "0458039d6d0bc404d701ec65a8e8c8845a9eb9a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3318, "license_type": "no_license", "max_line_length": 125, "num_lines": 106, "path": "/Scripts/2016/Code 2016-12-07 database_connection/DatabaseBasics.py", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "\nimport tkinter as TK\nimport sqlalchemy as SQL\n\nimport pandas as PD\n\n\n\"\"\"\n#######################################################################\n### Database Interaction ###\n#######################################################################\n\"\"\"\n\nsql_credentials = { \\\n 'user': 'sloth' \\\n , 'database': 'Bones' \\\n }\n\n\n#______________________________________________________________________\n### Password Prompt ###\n\"\"\"\nhttp://stackoverflow.com/questions/15724658/simplest-method-of-asking-user-for-password-using-graphical-dialog-in-python\n\"\"\"\ndef PasswordPrompt(label = 'please enter password:', header = 'Password'):\n prompt = TK.Tk()\n prompt.wm_title(header)\n TK.Label(prompt, text = label).pack(side = 'top')\n\n pwd = TK.StringVar()\n pwdbox = TK.Entry(master = prompt, textvariable=pwd, show = '*')\n\n pwdbox.pack(side = 'top')\n pwdbox.bind('<Return>', lambda _: prompt.destroy())\n TK.Button(prompt, command=lambda: prompt.destroy(), text = 'OK').pack(side = 'top')\n\n prompt.mainloop()\n\n return pwd.get()\n\n#______________________________________________________________________\n### SQL connection ###\nclass SQLConnect(dict):\n \"\"\"\n Encapsulate some common information required to run the experiment\n allow instantiation with a default set.\n from http://stackoverflow.com/a/9550596\n Data storage by subclassing dict\n\n \"\"\"\n \n def __init__(self, credentials=None): \n # first initialize all mandatory values.\n self.SetDefaults()\n # then overwrite everything the user provides.\n if credentials is not None:\n for d in credentials.keys():\n self[d] = credentials[d]\n\n if self.get('password') is None:\n self['password'] = PasswordPrompt( \\\n label = 'please enter password for \\n %s @ %s' % (self['user'], self['hostname']) \\\n , header = 'database password')\n\n # here the connection string is formed from all the information\n self.engine = SQL.create_engine( '%s://%s:%s@%s:%i/%s' \\\n % ( self['dialect'] \\\n , self['user'] \\\n , self['password'] \\\n , self['hostname'] \\\n , self['port'] \\\n , self['database'] \\\n ) )\n\n def __getattr__(self, param):\n return self[param]\n\n def __setattr__(self, param, value):\n self[param] = value\n\n def SetDefaults(self):\n self['hostname'] = 'localhost' # '141.20.60.187' # \n self['port' ] = 5432\n self['dialect' ] = 'postgresql+psycopg2'\n\n def Close(self):\n self.engine.dispose()\n\n\n\n\n\"\"\"\n#######################################################################\n### data query ###\n#######################################################################\n\"\"\"\ndef DataQuery(query):\n \n db = SQLConnect(sql_credentials)\n\n data = PD.read_sql_query( \\\n query \\\n , db.engine \\\n )\n\n db.Close()\n return data\n\n " }, { "alpha_fraction": 0.7665505409240723, "alphanum_fraction": 0.8013937473297119, "avg_line_length": 34.875, "blob_id": "1b6a5bf4bbb5b2b14f8e26723041d260280a3e66", "content_id": "e45cb75b73011fd0487c1d5a52117ad0285c56ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 287, "license_type": "no_license", "max_line_length": 107, "num_lines": 8, "path": "/Protokolle/2017/Protokoll 2017-07-26.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "28th Meeting - 26th of July 2017\n----\nAttendance: 6\n\nMelanie needed help mastering geom_smooth, specifically how to specify model parameter inside the function.\nCarolin needed help looping through data categories to compute a metric and its uncertainty.\n\nNext meeting the 9th of August.\n" }, { "alpha_fraction": 0.7657657861709595, "alphanum_fraction": 0.7747747898101807, "avg_line_length": 21.200000762939453, "blob_id": "23cb76029584fb3355ef60b5285ed8c4c76d3328", "content_id": "5c6a7d87db8724f3e84409c76378418f5dc94725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 111, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/Scripts/2017/Code 2017-11-22 - Makefile/compile.sh", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "cd \"$( dirname \"${BASH_SOURCE[0]}\")\"\npdflatex paleocene\nbibtex paleocene\npdflatex paleocene\npdflatex paleocene\n" }, { "alpha_fraction": 0.7454545497894287, "alphanum_fraction": 0.774545431137085, "avg_line_length": 44.83333206176758, "blob_id": "68b8671eeb7f7b79320b31afc5ef6f495a307ba8", "content_id": "6c19936fb0f9208acba6ac5ab6e2ef98332d14a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 275, "license_type": "no_license", "max_line_length": 114, "num_lines": 6, "path": "/Protokolle/2017/Protokoll 2017-10-04.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "32nd Meeting - 4th of October 2017\n----\nAttendance: 4\n\nWe reviewed a bit of code i worked on a few months ago, trying to come up with a new algorithm for CLAMP analysis.\nIt uses, on top of the classic CCA, surface fitting to computes the predicted meteorological parameters.\n" }, { "alpha_fraction": 0.691530168056488, "alphanum_fraction": 0.7242941856384277, "avg_line_length": 25.546297073364258, "blob_id": "9c92c1d43137717a89858d926fbb8bf0c6f6b76e", "content_id": "84e1d94859f3f13f9e9e951c69511544dfe7b876", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 2869, "license_type": "no_license", "max_line_length": 126, "num_lines": 108, "path": "/Scripts/2017/Code 2017-08-09 - ordinations.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "#Principal component analysis in base R\npca1 <- prcomp(USArrests)\npca2 <- princomp(USArrests)\n\npca1$rotation\nunclass(pca2$loadings)\n\n#These objects contains the actual coordinates of the data on the PC axes\npca1$x\npca2$scores\n\n#Both are identical\nabs(abs(pca1$x)-abs(pca2$scores)) < 1e-10\n\npca1$sdev #Only differences between both functions\npca2$sdev\n#Beware that those are not eigenvalues but standard deviations\n(pca1$sdev^2/sum(pca1$sdev^2)) # <- Proportion of variance explained\n\n#In practice though you want to scale your data (i. e. scale each column to a variance of 1):\n\npca3 <- prcomp(USArrests, scale=TRUE)\npca4 <- princomp(USArrests, cor=TRUE)\n\nabs(abs(pca3$x)-abs(pca4$scores)) < 1e-10 #Not TRUE anymore as variance computed differently in both functions\n\n#Native plots for pca\nplot(pca3)\nbiplot(pca3)\n\nplot(pca4)\nbiplot(pca4)\n\n#Plotting on your own\nplot(pca4$scores[,1:2])\nplot(pca4$scores[,c(1,3)])\n\n#In vegan\nlibrary(vegan)\n\n#Function cmdscale does Principal COORDINATE analysis, which, when used with an euclidean distance metric, is the same as PCA.\npca5 <- cmdscale(dist(USArrests, method=\"euclidean\"), k=4) #Gives same as pca2$scores or pca1$x\n\n#Other means of ordinations:\n#CCA\n\nca1 <- cca(USArrests)\n\ncca1 <- cca(USArrests[,-2], USArrests[,2])\n\nplot(cca1)\nplot(cca1,choices=c(1,3))\n\nstr(cca1)\n\n#This contains the scores on the constrained axes\ncca1$CCA$u\n\n#NMDS (non-metric multidimensional scaling)\nlibrary(MASS)\nnm1 <- isoMDS(dist(USArrests, method=\"euclidean\")) #A bit pointless as whole point of nmds is to be non-euclidean\nnm2 <- isoMDS(vegdist(USArrests, method=\"bray\"))\nnm3 <- isoMDS(vegdist(USArrests, method=\"bray\"), k=4)\n\n#Check quality of the ordination\nstressplot(nm2, vegdist(USArrests, method=\"bray\"))\nstressplot(nm3, vegdist(USArrests, method=\"bray\"))\n\nplot(nm2$points)\nplot(nm3$points[,c(1,3)])\n\nlibrary(vegan)\nnm4 <- metaMDS(USArrests, \"bray\", k=4)\n\nstressplot(nm4)\nplot(nm4)\nplot(nm4, choices=c(1,3))\nstr(nm4)\n\n#Separating by clusters\ndata(iris)\na <- prcomp(iris[,1:4], scale=TRUE)\nplot(a$x[,1:2]) # == to biplot(a)\nplot(a$x[,1:2],col=iris$Species)\n\nsetosa <- a$x[iris$Species==\"setosa\",1:2]\nS <- chull(setosa) #Convex Hull\npolygon(setosa[S,])\n\nversi <- a$x[iris$Species==\"versicolor\",1:2]\npolygon(versi[chull(versi),], border=\"red\")\n\nvirgi <- a$x[iris$Species==\"virginica\",1:2]\npolygon(virgi[chull(virgi),], border=\"green\")\n\nk <- kmeans(a$x,2)\nplot(a$x[,1:2],col=k$cluster)\n\nk1 <- a$x[k$cluster==1,1:2]\nk2 <- a$x[k$cluster==2,1:2]\npolygon(k1[chull(k1),])\npolygon(k2[chull(k2),], border=\"red\")\n\n#You can also fit a surface to an ordination space using \"ordisurf\",\n#predict the ordination score of an additional sample using \"predict\", etc.\n\n# In an NMDS remember to not use a standard anova or manova (implemented in R as \"aov\"), \n# but a permanova (as NMDS non-metric, centroids are meaningless): implemented in R as \"adonis\".\n\n\n" }, { "alpha_fraction": 0.642914354801178, "alphanum_fraction": 0.6557245850563049, "avg_line_length": 33.66666793823242, "blob_id": "6851301a3159c355eea5b44d9c811caaff27ba55", "content_id": "05443e5674f189bbc74de57ff9f7ef0c3f17a9fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1249, "license_type": "no_license", "max_line_length": 95, "num_lines": 36, "path": "/Scripts/2015/Code 2015-12-09 web scraping 101.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "searchPangaea<-function(query){\n\trequire(XML)\n\tquery <- gsub(\" \",\"+\",query)\n\tq <- TRUE\n\toffset <- 0\n\twhile(q){\n\t\turl_query <- sprintf(\"https://www.pangaea.de/search?q=%s&count=500&offset=%i\", query, offset)\n\t\turl_query <- URLencode(url_query)\n\t\tpage <- htmlParse(readLines(url_query), encoding=\"UTF-8\")\n\t\tdataset_name <- xpathSApply(page,\"//li/p[@class='citation']/a\",xmlValue)\n\t\tdoi <- xpathSApply(page,\"//li/p[@class='citation']/a\",xmlAttrs)['href',]\n\t\tn <- length(doi)\n\t\tif(n<500){q <- FALSE}\n\t\tif(!n){stop(\"No result for that query\")}\n\t\tif(!offset){\n\t\t\toutput <- data.frame(Name=dataset_name, DOI=doi, stringsAsFactors=FALSE)\n\t\t\t}else{\n\t\t\t\toutput <- rbind(output, data.frame(Name=dataset_name, DOI=doi, stringsAsFactors=FALSE))\n\t\t\t\t}\n\t\toffset <- offset + 500\n\t\t}\n\toutput\n\t}\n\t\ngetPangaeaData<-function(DOI){\t\t\n\trequire(XML)\n\tDOI <- as.character(DOI)\n\tif(grepl(\"^[0-9]\",DOI)) DOI <- paste(\"http://doi.pangaea.de/\",DOI,sep=\"\")\n\tpage <- paste(DOI,\"?format=html\",sep=\"\")\n\thtml <- htmlParse(page, encoding=\"UTF-8\")\n\ttitle <- xpathSApply(html, \"//div[@class='MetaHeaderItem']\",xmlValue)[1]\n\ttables <- readHTMLTable(html)\n\tparams <- tables[[length(tables)-1]]\n\tdata <- tables[[length(tables)]]\n\tlist(Citation=title, Parameters=params, Dataset=data)\n\t}\n\t" }, { "alpha_fraction": 0.7123595476150513, "alphanum_fraction": 0.7730336785316467, "avg_line_length": 62.57143020629883, "blob_id": "bdb34404b3c5cd6a33268cd4e975716b6d4e423d", "content_id": "13833131d503d324094396e63ac8b19143b874d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 445, "license_type": "no_license", "max_line_length": 226, "num_lines": 7, "path": "/Protokolle/2017/Protokoll 2017-10-18.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "33rd Meeting - 18th of October 2017\n----\nAttendance: 4\n\nMelanie introduced to us the R package [speciesgeocodeR](https://github.com/azizka/speciesgeocodeR). \nWe discussed with Antoine of how to compare various PCA. \nI updated people on [my efforts](https://github.com/plannapus/MfN-Code-Clinic/blob/master/Scripts/2017/Code%202017-10-18%20-%20clamp.R) to come up with a new model for CLAMP. We discussed cross-validation a little as a result.\n" }, { "alpha_fraction": 0.5263041853904724, "alphanum_fraction": 0.5516602993011475, "avg_line_length": 34.159690856933594, "blob_id": "4a4ac06cf2b95d2b561d75c231a27c3aaf598adc", "content_id": "3fba9291bd7fb3bbbabdd602b8f02ad3370af942", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 22677, "license_type": "no_license", "max_line_length": 147, "num_lines": 645, "path": "/Scripts/2017/Code 2017-01-03 foreach_loops_soul_friedman/Additional infos/Simulation scripts.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "source(\"http://www.graemetlloyd.com/pubdata/functions_5.r\")\n\nlibrary(caper)\nlibrary(geiger)\nlibrary(phangorn)\nlibrary(OUwie)\nlibrary(paleotree)\nlibrary(foreach)\n\nlibrary(doParallel)\nregisterDoParallel(cores=detectCores()-1)\n\n#######################################################################################\n\nextincttable<-function(ttsltree, starttime, endtime){\n\n ext.tab <-matrix(ncol=4, nrow=Ntip(ttsltree))\n edgetotip <-match(c(1:Ntip(ttsltree)), ttsltree$edge[,2])\n tiplengths <-ttsltree$edge.length[edgetotip]\n endrange <-ttsltree$root.time-diag(vcv(ttsltree))\n startrange <-endrange+tiplengths\n\t\n ext.tab[,1] <-ttsltree$tip.label\n ext.tab[,2] <-startrange\n ext.tab[,3] <-endrange\n\n for (i in 1:Ntip(ttsltree)){\n keytime <-round(as.numeric(as.character(ext.tab[i,3])),4)\n if (keytime<=endtime){\n \t ext.tab[i,4] <-0\n \t}\n \tif (keytime>endtime & keytime<=starttime){\n \t ext.tab[i,4] <-1\n \t}\n \tif (keytime>starttime){\n \t ext.tab[i,4] <-2\n \t}\n\t}\n namettd <-ext.tab[ext.tab[,4]==2,1]\n ttd <-match(namettd, ext.tab[,1])\n if (length(ttd)>0){\n ext.tab <-ext.tab[-ttd,]\n }\n colnames(ext.tab) <-c(\"species\", \"FAD\", \"LAD\", \"extinct\")\n ext.tab <-as.data.frame(ext.tab)\n result <-list(ext.tab, ttsltree)\n names(result) <-c(\"ext.tab\", \"ttsltree\")\n return(result)\n}\n\n############################################################################################\n############################################################################################\n\nsampling<- function(run, r) {\n \n cladogram<-taxa2cladogram(run)\n ranges<-sampleRanges(taxad=run, r=r, modern.samp.prob=r)\n\n sampledtree <- drop.tip(cladogram, cladogram$tip.label[is.na(match(cladogram$tip.label, names(which(!is.na(ranges[, 1])))))])\n sampledtree <- basemulti2di(sampledtree)\n sampledtree$root.edge<-0\n sampledtree$root.time<-max(ranges)\n\n sampledages<-ranges[complete.cases(ranges),]\n result<-list(sampledages, sampledtree)\n names(result)<-c(\"sampledages\", \"sampledtree\")\n return(result)\n}\n\n####################################################################################################\n####################################################################################################\n#random extinction during the simulation under a budding model of evolution\n\ncount<-1\nbaseruns<-list()\nDValues<-list()\n\nwhile (count < howmany) {\n\n baserun <-simFossilTaxa(p=0.1, q=0.1, mintaxa=200, maxtime=50, min.cond=FALSE)\n\n for (i in 1:nrow(baserun)){\n rownames(baserun)[i]<-paste(\"b\", i, sep=\"\")\n }\n\n for (k in 1:4){\n\n basetree <-taxa2phylo(baserun)\n\n edgetotip <-match(c(1:Ntip(basetree)), basetree$edge[,2])\n tiplengths <-basetree$edge.length[edgetotip]\n endrange <-basetree$root.time-diag(vcv(basetree))\n startrange <-endrange+tiplengths\n\t\t\n realages <-matrix(ncol=2, nrow=Ntip(basetree))\n rownames(realages) <-basetree$tip.label\n realages[,1] <-startrange\n realages[,2] <-endrange\n\n data1 <-matrix(nrow=Ntip(basetree), ncol=2)\n data1[,2] <-1\n data1[,1] <-basetree$tip.label\n data1 <-as.data.frame(data1)\n basetree$node.label <-rep(\"1\", Nnode(basetree))\n\n Trait1 <-OUwie.sim(basetree, data1, alpha=1e-10, sigma.sq=0.5, theta0=1, theta=0)\n threshold <-quantile(Trait1[,3], 0.8)\n extant <-which(baserun[,5]==1)\n extant_traits <-Trait1[extant,]\n\n risk <-extant_traits[which(extant_traits[,3]<=threshold,3),1]\n safe <-extant_traits[which(extant_traits[,3]>threshold,3),1]\n alive <-character()\n dead <-character()\n\n for (j in 1:length(risk)){\n if(runif(1,0,1)<=0.05){\n alive <-c(alive, risk[j])\n } else {\n dead <-c(dead, risk[j])\n }\n }\n\n for (j in 1:length(safe)){\n if(runif(1,0,1)>=0.1){\n alive <-c(alive, safe[j])\n } else {\n dead <-c(dead, safe[j])\n }\n }\n\n alive<-sort(alive)\n if(alive[1]==\"b1\"){\n alive<-alive[-1]\n dead<-c(\"b1\", dead)\n }\n\n if(alive[1]==\"b2\"){\n alive<-alive[-1]\n dead<-c(\"b2\", dead)\n }\n\n no_newtrees <-length(alive)\n\n baserun[,3:4]<-baserun[,3:4]+20\n baserun[,5]<-0\n\n for (m in 1:no_newtrees){\n\n newrun <-simFossilTaxaSilent(p=0.1, q=0.1, mintaxa=5, maxtime=20, min.cond=FALSE, nruns=1)\n attachment_tip <-which(rownames(baserun)==alive[m])\n first_table <-baserun[1:attachment_tip,]\n if(attachment_tip!=nrow(baserun) & attachment_tip!=(nrow(baserun)-1)){\n last_table <-baserun[(attachment_tip+1):nrow(baserun),]\n first_table[nrow(first_table),4]<-newrun[1,4]\n first_table[nrow(first_table),5]<-newrun[1,5]\n newrun<-newrun[-1,]\n newrun[,c(1,6)] <-(nrow(baserun)+1):(nrow(baserun)+nrow(newrun))\n newrun[newrun[,2]!=1,2] <-(newrun[newrun[,2]!=1,2])+nrow(baserun)-1\n newrun[newrun[,2]==1,2]<- baserun[attachment_tip,1]\n baserun <-rbind(first_table, newrun, last_table)\n }\n }\n\n for (l in 1:nrow(baserun)){\n rownames(baserun)[l]<-paste(\"b\", l, sep=\"\")\n }\n cat(\"level up \", k, \"\\n\")\n }\n\n baserun<-baserun[order(baserun[,1]),]\n for (l in 1:nrow(baserun)){\n rownames(baserun)[l]<-paste(\"b\", l, sep=\"\")\n }\n\n realtree<-taxa2phylo(baserun, obs_time=baserun[,3])\n realtree<-di2multi(realtree)\n\n edgetotip<-match(c(1:Ntip(realtree)), realtree$edge[,2])\n extratiplengths<-baserun[,3]-baserun[,4]\n realtree$edge.length[edgetotip]<-(realtree$edge.length[edgetotip])+extratiplengths\n tiplengths<-realtree$edge.length[edgetotip]\n\n slicetimeend<- 19.99\n slicetimestart<- 30\n\n ttsltree<-timeSliceTree(realtree, sliceTime=slicetimeend, drop.extinct=F, plot=F)\n\n extincttab<-extincttable(ttsltree, slicetimestart, slicetimeend)\n inputdata<-comparative.data(extincttab$ext.tab, phy=ttsltree, species, vcv=T)\n res <- phylo.d(data=inputdata, binvar=extinct, permut=1000)\n\n if (res$Pval0>=0.05){\n baseruns[[count]]<-baserun\n DValues[[count]]<- res\n count<-count+1\n }\n}\n\n\n\n#########################################################################################\n#########################################################################################\n#No random extinction during the simulation under \n#a budding model of evolution\n\ncount<-1\nbaseruns<-list()\nDValues<-list()\n\nwhile (count < 6) {\n\n baserun <-simFossilTaxa(p=0.1, q=0, mintaxa=20, maxtime=50, min.cond=FALSE)\n\n for (i in 1:nrow(baserun)){\n rownames(baserun)[i]<-paste(\"b\", i, sep=\"\")\n }\n\n for (k in 1:4){\n\n basetree <-taxa2phylo(baserun)\n\n edgetotip <-match(c(1:Ntip(basetree)), basetree$edge[,2])\n tiplengths <-basetree$edge.length[edgetotip]\n endrange <-basetree$root.time-diag(vcv(basetree))\n startrange <-endrange+tiplength\n\n data1 <-matrix(nrow=Ntip(basetree), ncol=2)\n data1[,2] <-1\n data1[,1] <-basetree$tip.label\n data1 <-as.data.frame(data1)\n basetree$node.label <-rep(\"1\", Nnode(basetree))\n\n Trait1 <-OUwie.sim(basetree, data1, alpha=1e-10, sigma.sq=0.3, theta0=1, theta=0)\n threshold <-quantile(Trait1[,3], 0.8)\n extant_traits <-Trait1\n\n risk <-extant_traits[which(extant_traits[,3]<=threshold,3),1]\n safe <-extant_traits[which(extant_traits[,3]>threshold,3),1]\n alive <-character()\n dead <-character()\n\n for (j in 1:length(risk)){\n if(runif(1,0,1)<=0.05){\n alive <-c(alive, risk[j])\n } else {\n dead <-c(dead, risk[j])\n }\n }\n\n for (j in 1:length(safe)){\n if(runif(1,0,1)>=0.1){\n alive <-c(alive, safe[j])\n } else {\n dead <-c(dead, safe[j])\n }\n }\n\n alive<-sort(alive)\n if(alive[1]==\"b1\"){\n alive<-alive[-1]\n dead<-c(\"b1\", dead)\n }\n\n if(alive[1]==\"b2\"){\n alive<-alive[-1]\n dead<-c(\"b2\", dead)\n }\n\n no_newtrees <-length(alive)\n\n baserun[,3:4]<-baserun[,3:4]+20\n baserun[,5]<-0\n\n for (m in 1:no_newtrees){\n\n newrun <-simFossilTaxaSilent(p=0.1, q=0, mintaxa=5, maxtime=20, min.cond=FALSE, nruns=1)\n attachment_tip <-which(rownames(baserun)==alive[m])\n first_table <-baserun[1:attachment_tip,]\n if(attachment_tip!=nrow(baserun) & attachment_tip!=(nrow(baserun)-1)){\n last_table <-baserun[(attachment_tip+1):nrow(baserun),]\n first_table[nrow(first_table),4]<-newrun[1,4]\n \t first_table[nrow(first_table),5]<-newrun[1,5]\n \tnewrun<-newrun[-1,]\n \t newrun[,c(1,6)] <-(nrow(baserun)+1):(nrow(baserun)+nrow(newrun))\n \t newrun[newrun[,2]!=1,2] <-(newrun[newrun[,2]!=1,2])+nrow(baserun)-1\n \t newrun[newrun[,2]==1,2]<- baserun[attachment_tip,1]\n \t baserun <-rbind(first_table, newrun, last_table)\n }\n }\n \n for (l in 1:nrow(baserun)){\n rownames(baserun)[l]<-paste(\"b\", l, sep=\"\")\n }\n cat(\"level up \", k, \"\\n\")\n }\n\n baserun<-baserun[order(baserun[,1]),]\n for (l in 1:nrow(baserun)){\n rownames(baserun)[l]<-paste(\"b\", l, sep=\"\")\n }\n\n realtree<-taxa2phylo(baserun,obs_time=baserun[,3])\n realtree<-di2multi(realtree)\n\n edgetotip<-match(c(1:Ntip(realtree)), realtree$edge[,2])\n extratiplengths<-baserun[,3]-baserun[,4]\n realtree$edge.length[edgetotip]<-(realtree$edge.length[edgetotip])+extratiplengths\n tiplengths<-realtree$edge.length[edgetotip]\n\n slicetimeend<- 19.99\n slicetimestart<- 30\n\n ttsltree<-timeSliceTree(realtree, sliceTime=slicetimeend, drop.extinct=F, plot=F)\n\n extincttab<-extincttable(ttsltree, slicetimestart, slicetimeend)\n inputdata<-comparative.data(extincttab$ext.tab, phy=ttsltree, species, vcv=T)\n res <- phylo.d(data=inputdata, binvar=extinct, permut=1000)\n\n if (res$DEstimate<0.1){\n baseruns[[count]]<-baserun\n DValues[[count]]<- res\n count<-count+1\n }\n}\n\n\n\n#########################################################################################\n#########################################################################################\n# a BIFURCATING model of evolution\n\ncount<-1\nbaseruns<-list()\nDValues<-list()\n\nwhile (count < 6) {\n\n baserun <-simFossilTaxa(p=0.1, q=0, mintaxa=20, maxtime=50, prop.bifurc=1, min.cond=FALSE)\n\n for (i in 1:nrow(baserun)){\n rownames(baserun)[i]<-paste(\"b\", i, sep=\"\")\n }\n\n for (k in 1:4){\n\n basetree <-taxa2phylo(baserun)\n\n edgetotip <-match(c(1:Ntip(basetree)), basetree$edge[,2])\n tiplengths <-basetree$edge.length[edgetotip]\n endrange <-basetree$root.time-diag(vcv(basetree))\n startrange <-endrange+tiplengths\n\t\t\n realages <-matrix(ncol=2, nrow=Ntip(basetree))\n rownames(realages) <-basetree$tip.label\n realages[,1] <-startrange\n realages[,2] <-endrange\n\n data1 <-matrix(nrow=Ntip(basetree), ncol=2)\n data1[,2] <-1\n data1[,1] <-basetree$tip.label\n data1 <-as.data.frame(data1)\n basetree$node.label <-rep(\"1\", Nnode(basetree))\n\n Trait1 <-OUwie.sim(basetree, data1, alpha=1e-10, sigma.sq=0.3, theta0=1, theta=0)\n threshold <-quantile(Trait1[,3], 0.8)\n extant <-which(baserun[,5]==1)\n extant_traits <-Trait1[extant,]\n\n risk <-extant_traits[which(extant_traits[,3]<=threshold,3),1]\n safe <-extant_traits[which(extant_traits[,3]>threshold,3),1]\n alive <-character()\n dead <-character()\n\n for (j in 1:length(risk)){\n if(runif(1,0,1)<=0.05){\n alive <-c(alive, risk[j])\n } else {\n dead <-c(dead, risk[j])\n }\n }\n\n for (j in 1:length(safe)){\n if(runif(1,0,1)>=0.1){\n alive <-c(alive, safe[j])\n } else {\n dead <-c(dead, safe[j])\n }\n }\n\n alive<-sort(alive)\n if(alive[1]==\"b1\"){\n alive<-alive[-1]\n dead<-c(\"b1\", dead)\n }\n\n if(alive[1]==\"b2\"){\n alive<-alive[-1]\n dead<-c(\"b2\", dead)\n }\n\n no_newtrees <-length(alive)\n\n baserun[,3:4]<-baserun[,3:4]+20\n baserun[,5]<-0\n\n for (m in 1:no_newtrees){\n\n newrun <-simFossilTaxaSilent(p=0.1, q=0, mintaxa=3, maxtime=20, prop.bifurc=1, min.cond=FALSE, nruns=1)\n attachment_tip <-which(rownames(baserun)==alive[m])\n first_table <-baserun[1:attachment_tip,]\n if(attachment_tip!=nrow(baserun) & attachment_tip!=(nrow(baserun)-1)){\n last_table <-baserun[(attachment_tip+1):nrow(baserun),]\n first_table[nrow(first_table),4]<-newrun[1,4]\n \t first_table[nrow(first_table),5]<-newrun[1,5]\n \tnewrun<-newrun[-1,]\n \t newrun[,c(1,6)] <-(nrow(baserun)+1):(nrow(baserun)+nrow(newrun))\n \t newrun[newrun[,2]!=1,2] <-(newrun[newrun[,2]!=1,2])+nrow(baserun)-1\n \t newrun[newrun[,2]==1,2]<- baserun[attachment_tip,1]\n \t baserun <-rbind(first_table, newrun, last_table)\n }\n }\n \n for (l in 1:nrow(baserun)){\n rownames(baserun)[l]<-paste(\"b\", l, sep=\"\")\n }\n cat(\"level up \", k, \"\\n\")\n }\n\n baserun<-baserun[order(baserun[,1]),]\n for (l in 1:nrow(baserun)){\n rownames(baserun)[l]<-paste(\"b\", l, sep=\"\")\n }\n\n realtree<-taxa2phylo(baserun,obs_time=baserun[,3])\n realtree<-di2multi(realtree)\n \n ttd<-vector(\"numeric\")\n for (i in 1:length(baserun[,1])) {\n if ((length(which(baserun[,2]==i))>=2) & (baserun[i,5]==0)) {\n ttd<-c(ttd,i)\n }\n }\n \n edgetotip<-match(c(1:Ntip(realtree)), realtree$edge[,2])\n extratiplengths<-baserun[,3]-baserun[,4]\n realtree$edge.length[edgetotip]<-(realtree$edge.length[edgetotip])+extratiplengths\n tiplengths<-realtree$edge.length[edgetotip]\n \n realtree<-drop.tip(realtree,ttd)\n\n realtree[which(realtree$edge.length==0)]<-0.00001 \n \n slicetimeend<- 19.99\n slicetimestart<- 30\n\n ttsltree<-timeSliceTree(realtree, sliceTime=slicetimeend, drop.extinct=F, plot=F)\n\n extincttab<-extincttable(ttsltree, slicetimestart, slicetimeend)\n inputdata<-comparative.data(extincttab$ext.tab, phy=ttsltree, species, vcv=T)\n res <- phylo.d(data=inputdata, binvar=extinct, permut=1000)\n\n if (res$DEstimate<0.1){\n baseruns[[count]]<-baserun\n DValues[[count]]<- res\n count<-count+1\n }\n }\n\n\n##############################################################################################\n##############################################################################################\n\nsample.scale.mbl<- function(paltree){\n\n ages<-paltree$sampledages\n tree<-paltree$sampledtree\n\n ttree<-timePaleoPhy(tree, ages, type=\"mbl\", vartime=2, add.term=TRUE ,ntrees=1)\n\n orderedages<-ages[match(ttree$tip.label, rownames(ages)),1:2]\n orderedages<-orderedages[complete.cases(orderedages),]\n edgetotip<-match(c(1:Ntip(ttree)), ttree$edge[,2])\n tiplengths<-ttree$edge.length[edgetotip]\n endrange<-ttree$root.time-diag(vcv(ttree))\n startrange<-endrange+tiplengths\n orderedages[,1]<-startrange\n\n result<-list(ttree,orderedages)\n names(result)<-c(\"STtree\", \"SAges\")\n return(result)\n\n }\n\nsample.scale.hedman<- function(paltree){\n outgroup.ages<-c(153.8,145,139.8,139.8,132.9,132.9)\n t<-163.5\n ages<-paltree$sampledages\n tree<-paltree$sampledtree\n \n orderedages<-ages[match(tree$tip.label, rownames(ages)),1:2]\n tree <- compute.brlen(tree,runif)\n ttreeall<-Hedman.tree.dates(tree, tip.ages=orderedages[,1], outgroup.ages, resolution=1000, conservative=TRUE, t0=t)\n ttree<-ttreeall$tree\n ext.time<-orderedages[,2]\n \n obs_ranges <- orderedages[, 1] - orderedages[, 2]\n term_id <- ttree$tip.label[ttree$edge[ttree$edge[,2] <= Ntip(ttree), 2]]\n term_add <- sapply(term_id, function(x) obs_ranges[x])\n ttree$edge.length[ttree$edge[, 2] <= Ntip(ttree)] <- ttree$edge.length[ttree$edge[,2] <= Ntip(ttree)] + term_add\n ttree$root.time <- max(orderedages[ttree$tip.label,2]) + min(dist.nodes(ttree)[1:Ntip(ttree), Ntip(ttree) + 1])\n\n edgetotip<-match(c(1:Ntip(ttree)), ttree$edge[,2])\n tiplengths<-ttree$edge.length[edgetotip]\n endrange<-ttree$root.time-diag(vcv(ttree))\n startrange<-endrange+tiplengths\n orderedages[,1]<-startrange\n\n result<-list(ttree, orderedages)\n names(result)<-c(\"STtree\", \"SAges\")\n return(result)\n }\n\nsample.scale.cal3<- function(paltree, anc.wt){\n ages<-paltree$sampledages\n tree<-paltree$sampledtree\n SRres<-getSampRateCont(ages)\n sampRate <- SRres[[2]][2]\n brRate <- extRate <- SRres[[2]][1]\n ttree<-cal3TimePaleoPhy(tree, ages, brRate, extRate, sampRate, anc.wt, ntrees=1)\n \n edgetotip<-match(c(1:Ntip(ttree)), ttree$edge[,2])\n tiplengths<-ttree$edge.length[edgetotip]\n endrange<-ttree$root.time-diag(vcv(ttree))\n startrange<-endrange+tiplengths\n orderedages<-cbind(startrange, endrange)\n\n result<-list(ttree, orderedages)\n names(result)<-c(\"STtree\", \"SAges\")\n return(result)\n }\n\n##############################################################################################\n#Some example sampling runs\n##############################################################################################\n\nBud.Ext.hed001.Clust<- foreach (p=1:length(baseruns), .packages=c('foreach','doParallel','paleotree','geiger','caper','phangorn')) %do% {\n \n baserun<-baseruns[[p]]\n\n slicetimeend<- 19.99\n slicetimestart<- 30\n\n r<-0.01\n\n\n foreach (i=1:50, .combine='rbind', .packages=c('foreach','doParallel','paleotree','geiger','caper','phangorn')) %dopar% {\n \n sampled.res<-sampled.sim<-sampled.sim.scaled<-sampled.tree<-sampled.ages<-sampled.ttsltree<-sampled.extincttab<-sampled.inputdata<-NA\n sampled.sim<-sampling(baserun, r)\n try(sampled.sim.scaled<-sample.scale.hedman(sampled.sim))\n try(sampled.tree<-sampled.sim.scaled$STtree)\n try(sampled.ages<-sampled.sim.scaled$SAges)\n \n try(sampled.ttsltree<-timeSliceTree(sampled.tree, sliceTime=slicetimeend, drop.extinct=F, plot=F))\n\n try(sampled.extincttab<-extincttable(sampled.ttsltree, slicetimestart, slicetimeend),silent=T)\n try(sampled.inputdata<-comparative.data(sampled.extincttab$ext.tab, phy=sampled.ttsltree, species, vcv=T),silent=T)\n try(sampled.res <- phylo.d(data=sampled.inputdata, binvar=extinct, permut=1000),silent=T)\n\n if (is.na(sampled.res)) {\n ans<-c(NA,NA,NA,NA,NA)\n } else {\n ans<-as.numeric(c(sampled.res$DEstimate, sampled.res$StatesTable[1], sampled.res$StatesTable[2], sampled.res$Pval1, sampled.res$Pval0))\n }\n }\n }\n\n#########################################################################################\n#########################################################################################\nBud.Ext.hed1.Clust<- foreach (p=1:length(baseruns), .packages=c('foreach','doParallel','paleotree','geiger','caper','phangorn')) %do% {\n \n baserun<-baseruns[[p]]\n\n slicetimeend<- 19.99\n slicetimestart<- 30\n\n r<-0.1\n\n foreach (i=1:50, .combine='rbind', .packages=c('foreach','doParallel','paleotree','geiger','caper','phangorn')) %dopar% {\n \n sampled.res<-sampled.sim<-sampled.sim.scaled<-sampled.tree<-sampled.ages<-sampled.ttsltree<-sampled.extincttab<-sampled.inputdata<-NA\n sampled.sim<-sampling(baserun, r)\n try(sampled.sim.scaled<-sample.scale.hedman(sampled.sim))\n try(sampled.tree<-sampled.sim.scaled$STtree)\n try(sampled.ages<-sampled.sim.scaled$SAges)\n \n try(sampled.ttsltree<-timeSliceTree(sampled.tree, sliceTime=slicetimeend, drop.extinct=F, plot=F))\n\n try(sampled.extincttab<-extincttable(sampled.ttsltree, slicetimestart, slicetimeend),silent=T)\n try(sampled.inputdata<-comparative.data(sampled.extincttab$ext.tab, phy=sampled.ttsltree, species, vcv=T),silent=T)\n try(sampled.res <- phylo.d(data=sampled.inputdata, binvar=extinct, permut=1000),silent=T)\n\n if (is.na(sampled.res)) {\n ans<-c(NA,NA,NA,NA,NA)\n } else {\n ans<-as.numeric(c(sampled.res$DEstimate, sampled.res$StatesTable[1], sampled.res$StatesTable[2], sampled.res$Pval1, sampled.res$Pval0))\n }\n }\n }\n\n#########################################################################################\n#########################################################################################\nBud.Ext.hed5.Clust<- foreach (p=1:length(baseruns), .packages=c('foreach','doParallel','paleotree','geiger','caper','phangorn')) %do% {\n \n baserun<-baseruns[[p]]\n\n slicetimeend<- 19.99\n slicetimestart<- 30\n\n r<-0.5\n\n foreach (i=1:50, .combine='rbind', .packages=c('foreach','doParallel','paleotree','geiger','caper','phangorn')) %dopar% {\n \n sampled.res<-sampled.sim<-sampled.sim.scaled<-sampled.tree<-sampled.ages<-sampled.ttsltree<-sampled.extincttab<-sampled.inputdata<-NA\n sampled.sim<-sampling(baserun, r)\n try(sampled.sim.scaled<-sample.scale.hedman(sampled.sim))\n try(sampled.tree<-sampled.sim.scaled$STtree)\n try(sampled.ages<-sampled.sim.scaled$SAges)\n \n try(sampled.ttsltree<-timeSliceTree(sampled.tree, sliceTime=slicetimeend, drop.extinct=F, plot=F))\n\n try(sampled.extincttab<-extincttable(sampled.ttsltree, slicetimestart, slicetimeend),silent=T)\n try(sampled.inputdata<-comparative.data(sampled.extincttab$ext.tab, phy=sampled.ttsltree, species, vcv=T),silent=T)\n try(sampled.res <- phylo.d(data=sampled.inputdata, binvar=extinct, permut=1000),silent=T)\n\n if (is.na(sampled.res)) {\n ans<-c(NA,NA,NA,NA,NA)\n } else {\n ans<-as.numeric(c(sampled.res$DEstimate, sampled.res$StatesTable[1], sampled.res$StatesTable[2], sampled.res$Pval1, sampled.res$Pval0))\n }\n }\n }\n\n######################################################################################" }, { "alpha_fraction": 0.5650184154510498, "alphanum_fraction": 0.5826542973518372, "avg_line_length": 27.993528366088867, "blob_id": "12f5dc0cbda7061a55db18a76896cf79e0bcead3", "content_id": "b2663913d22a71e7895b5336f332e5a26c95492a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 8967, "license_type": "no_license", "max_line_length": 125, "num_lines": 309, "path": "/Scripts/2018/2018-03-06 - crash course.R", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "# Modified after StackOverflow QA: https://stackoverflow.com/questions/19226816/how-can-i-view-the-source-code-for-a-function\n\n#####################S3 methods\nt\n# function (x) \n# UseMethod(\"t\")\n# <bytecode: 0x10a520668>\n# <environment: namespace:base>\n\nmethods(t)\n# [1] t.data.frame t.default t.ts* \n# see '?methods' for accessing help and source code\n\nt.default\n# function (x) \n# .Internal(t.default(x))\n# <bytecode: 0x10a520f10>\n# <environment: namespace:base>\n\nt.ts\n# Error: object 't.ts' not found\n\ngetAnywhere(t.ts)\n# A single object matching ‘t.ts’ was found\n# It was found in the following places\n# registered S3 method for t from namespace stats\n# namespace:stats\n# with value\n# \n# function (x) \n# {\n# cl <- oldClass(x)\n# other <- !(cl %in% c(\"ts\", \"mts\"))\n# class(x) <- if (any(other)) \n# cl[other]\n# attr(x, \"tsp\") <- NULL\n# t(x)\n# }\n# <bytecode: 0x10f727e30>\n# <environment: namespace:stats>\n\n#####################S4 methods\n\nlibrary(Matrix)\nchol2inv\n# standardGeneric for \"chol2inv\" defined from package \"base\"\n# \n# function (x, ...) \n# standardGeneric(\"chol2inv\")\n# <bytecode: 0x1035fe8b0>\n# <environment: 0x1035f9008>\n# Methods may be defined for arguments: x\n# Use showMethods(\"chol2inv\") for currently available ones.\n\nshowMethods(\"chol2inv\")\n# Function: chol2inv (package base)\n# x=\"ANY\"\n# x=\"CHMfactor\"\n# x=\"denseMatrix\"\n# x=\"diagonalMatrix\"\n# x=\"dtrMatrix\"\n# x=\"sparseMatrix\"\n\ngetMethod(\"chol2inv\",\"sparseMatrix\")\n# Method Definition:\n# \n# function (x, ...) \n# {\n# chk.s(..., which.call = -2)\n# tcrossprod(solve(as(x, \"triangularMatrix\")))\n# }\n# <environment: namespace:Matrix>\n# \n# Signatures:\n# x \n# target \"sparseMatrix\"\n# defined \"sparseMatrix\"\n\nall.equal\n# standardGeneric for \"all.equal\" defined from package \"base\"\n# \n# function (target, current, ...) \n# standardGeneric(\"all.equal\")\n# <environment: 0x1296b8640>\n# Methods may be defined for arguments: target, current\n# Use showMethods(\"all.equal\") for currently available ones.\n\nshowMethods(all.equal)\n# Function: all.equal (package base)\n# target=\"abIndex\", current=\"abIndex\"\n# target=\"abIndex\", current=\"numLike\"\n# target=\"ANY\", current=\"ANY\"\n# target=\"ANY\", current=\"Matrix\"\n# target=\"ANY\", current=\"sparseMatrix\"\n# target=\"ANY\", current=\"sparseVector\"\n# target=\"Matrix\", current=\"ANY\"\n# target=\"Matrix\", current=\"Matrix\"\n# target=\"numLike\", current=\"abIndex\"\n# target=\"sparseMatrix\", current=\"ANY\"\n# target=\"sparseMatrix\", current=\"sparseMatrix\"\n# target=\"sparseMatrix\", current=\"sparseVector\"\n# target=\"sparseVector\", current=\"ANY\"\n# target=\"sparseVector\", current=\"sparseMatrix\"\n# target=\"sparseVector\", current=\"sparseVector\"\n\ngetMethod(\"all.equal\",list(target=\"sparseMatrix\",current=\"ANY\"))\n# Method Definition:\n# \n# function (target, current, ...) \n# {\n# .local <- function (target, current, check.attributes = TRUE, \n# ...) \n# {\n# msg <- attr.all_Mat(target, current, check.attributes = check.attributes, \n# ...)\n# if (is.list(msg)) \n# msg[[1]]\n# else .a.e.comb(msg, all.equal(as(target, \"sparseVector\"), \n# current, check.attributes = check.attributes, ...))\n# }\n# .local(target, current, ...)\n# }\n# <environment: namespace:Matrix>\n# \n# Signatures:\n# target current\n# target \"sparseMatrix\" \"ANY\" \n# defined \"sparseMatrix\" \"ANY\"\n\n############################Unexported functions\nstats:::.makeNamesTs\n# function (...) \n# {\n# l <- as.list(substitute(list(...)))[-1L]\n# nm <- names(l)\n# fixup <- if (is.null(nm)) \n# seq_along(l)\n# else nm == \"\"\n# dep <- sapply(l[fixup], function(x) deparse(x)[1L])\n# if (is.null(nm)) \n# return(dep)\n# if (any(fixup)) \n# nm[fixup] <- dep\n# nm\n# }\n# <bytecode: 0x10e180d18>\n# <environment: namespace:stats>\n\n############################Functions calling compiled code\nt.default\n# function (x) \n# .Internal(t.default(x))\n# <bytecode: 0x10a520f10>\n# <environment: namespace:base>\n\n#####Here we need to access R source code.\n#####There are plenty of copies around (including on CRAN)\n#####but for convenience one can use a version of github such as\n#####https://github.com/wch/r-source\n\n#####Folder src/main contains a file called names.c that map Internal functions to their corresponding script\n#####In this case line 723\n{\"t.default\",\tdo_transpose,\t0,\t11,\t1,\t{PP_FUNCALL, PREC_FN,\t0}},\n#####Then one need to search the C source code for function \"do_transpose\" which is in this case in file array.c\nSEXP attribute_hidden do_transpose(SEXP call, SEXP op, SEXP args, SEXP rho)\n{\n SEXP a, r, dims, dimnames, dimnamesnames = R_NilValue,\n ndimnamesnames, rnames, cnames;\n int ldim, ncol = 0, nrow = 0;\n R_xlen_t len = 0;\n \n checkArity(op, args);\n a = CAR(args);\n \n if (isVector(a)) {\n dims = getAttrib(a, R_DimSymbol);\n ldim = length(dims);\n rnames = R_NilValue;\n cnames = R_NilValue;\n switch(ldim) {\n case 0:\n len = nrow = LENGTH(a);\n ncol = 1;\n rnames = getAttrib(a, R_NamesSymbol);\n dimnames = rnames;/* for isNull() below*/\n break;\n case 1:\n len = nrow = LENGTH(a);\n ncol = 1;\n dimnames = getAttrib(a, R_DimNamesSymbol);\n if (dimnames != R_NilValue) {\n rnames = VECTOR_ELT(dimnames, 0);\n dimnamesnames = getAttrib(dimnames, R_NamesSymbol);\n }\n break;\n case 2:\n ncol = ncols(a);\n nrow = nrows(a);\n len = XLENGTH(a);\n dimnames = getAttrib(a, R_DimNamesSymbol);\n if (dimnames != R_NilValue) {\n rnames = VECTOR_ELT(dimnames, 0);\n cnames = VECTOR_ELT(dimnames, 1);\n dimnamesnames = getAttrib(dimnames, R_NamesSymbol);\n }\n break;\n default:\n goto not_matrix;\n }\n }\n else\n goto not_matrix;\n PROTECT(dimnamesnames);\n PROTECT(r = allocVector(TYPEOF(a), len));\n R_xlen_t i, j, l_1 = len-1;\n switch (TYPEOF(a)) {\n case LGLSXP:\n case INTSXP:\n // filling in columnwise, \"accessing row-wise\":\n for (i = 0, j = 0; i < len; i++, j += nrow) {\n if (j > l_1) j -= l_1;\n INTEGER(r)[i] = INTEGER(a)[j];\n }\n break;\n case REALSXP:\n for (i = 0, j = 0; i < len; i++, j += nrow) {\n if (j > l_1) j -= l_1;\n REAL(r)[i] = REAL(a)[j];\n }\n break;\n case CPLXSXP:\n for (i = 0, j = 0; i < len; i++, j += nrow) {\n if (j > l_1) j -= l_1;\n COMPLEX(r)[i] = COMPLEX(a)[j];\n }\n break;\n case STRSXP:\n for (i = 0, j = 0; i < len; i++, j += nrow) {\n if (j > l_1) j -= l_1;\n SET_STRING_ELT(r, i, STRING_ELT(a,j));\n }\n break;\n case VECSXP:\n for (i = 0, j = 0; i < len; i++, j += nrow) {\n if (j > l_1) j -= l_1;\n SET_VECTOR_ELT(r, i, VECTOR_ELT(a,j));\n }\n break;\n case RAWSXP:\n for (i = 0, j = 0; i < len; i++, j += nrow) {\n if (j > l_1) j -= l_1;\n RAW(r)[i] = RAW(a)[j];\n }\n break;\n default:\n UNPROTECT(2); /* r, dimnamesnames */\n goto not_matrix;\n }\n PROTECT(dims = allocVector(INTSXP, 2));\n INTEGER(dims)[0] = ncol;\n INTEGER(dims)[1] = nrow;\n setAttrib(r, R_DimSymbol, dims);\n UNPROTECT(1); /* dims */\n /* R <= 2.2.0: dropped list(NULL,NULL) dimnames :\n * if(rnames != R_NilValue || cnames != R_NilValue) */\n if(!isNull(dimnames)) {\n PROTECT(dimnames = allocVector(VECSXP, 2));\n SET_VECTOR_ELT(dimnames, 0, cnames);\n SET_VECTOR_ELT(dimnames, 1, rnames);\n if(!isNull(dimnamesnames)) {\n PROTECT(ndimnamesnames = allocVector(VECSXP, 2));\n SET_VECTOR_ELT(ndimnamesnames, 1, STRING_ELT(dimnamesnames, 0));\n SET_VECTOR_ELT(ndimnamesnames, 0,\n (ldim == 2) ? STRING_ELT(dimnamesnames, 1):\n R_BlankString);\n setAttrib(dimnames, R_NamesSymbol, ndimnamesnames);\n UNPROTECT(1); /* ndimnamesnames */\n }\n setAttrib(r, R_DimNamesSymbol, dimnames);\n UNPROTECT(1); /* dimnames */\n }\n copyMostAttrib(a, r);\n UNPROTECT(2); /* r, dimnamesnames */\n return r;\n not_matrix:\n error(_(\"argument is not a matrix\"));\n return call;/* never used; just for -Wall */\n}\n\n######Functions called with .Primitive works the same way.\n######Functions called with .C or .Fortran or.Call are the equivalent in non-base packages.\n\n\n#######Alternatively package pryr has a function pryr::show_c_source that can help as well\npryr::show_c_source(.Internal(t.default(x)))\n\n#######As a side note, operators are mode difficult to fetch:\n%%\n # Error: unexpected SPECIAL in \"%%\"\n`%%`\n# function (e1, e2) .Primitive(\"%%\")\ngetAnywhere(\"%%\")\n# A single object matching ‘%%’ was found\n# It was found in the following places\n# package:base\n# namespace:base\n# with value\n# \n# function (e1, e2) .Primitive(\"%%\")\n" }, { "alpha_fraction": 0.7683741450309753, "alphanum_fraction": 0.788418710231781, "avg_line_length": 88.80000305175781, "blob_id": "809b4ea56e932cf458468e170eb9173af5b52a7b", "content_id": "c3d78d7e219fcc3abe3b1aacfa7086d037f5dfa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 449, "license_type": "no_license", "max_line_length": 390, "num_lines": 5, "path": "/Protokolle/2016/Protokoll 2016-09-28.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "14th meeting - 28th of September 2016\n----\nAttendance: 5\n\nSebastian came up with a dataset of lizard weight measured every hour for several specimen and we wrote a function giving the relative per-hour weight loss of each specimen. In the process we discuss how to make a simple piece of code like this a bit more universal, how to catch user input error, how to discard anomalous specimens, how to add a default value for an argument and why, etc.\n" }, { "alpha_fraction": 0.752136766910553, "alphanum_fraction": 0.7863247990608215, "avg_line_length": 57.5, "blob_id": "1b0019311137c87526c3cd6fc0fef187964208dd", "content_id": "e8abe16164c220e825332c86da2f6d4d3d8d1807", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 351, "license_type": "no_license", "max_line_length": 260, "num_lines": 6, "path": "/Protokolle/2017/Protokoll 2017-01-11.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "18th meeting - 11th of January 2017\n----\nAttendance: 5\n\nFalk gave a \"chalk talk\" on the concept of Object Oriented Programming and showed how it was implemented in Python and how it is used.I completed this by showing the R native concepts of S3 and S4 methods which implement the concept (though quite differently).\nNext meeting the 1st of February.\n" }, { "alpha_fraction": 0.7522522807121277, "alphanum_fraction": 0.792792797088623, "avg_line_length": 43.400001525878906, "blob_id": "f3f082c583bc5f4abd97a547ee183a2522f87c1a", "content_id": "f9e84e9aeb6c20a160881119662d075abeac8725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 222, "license_type": "no_license", "max_line_length": 163, "num_lines": 5, "path": "/Protokolle/2017/Protokoll 2017-09-13.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "31st Meeting - 13th of September 2017\n----\nAttendance: 5\n\nWe briefly discussed the usefulness of Python module django to create a database-connected webpage (in the context of the creation of a geochemistry lab database).\n" }, { "alpha_fraction": 0.7055214643478394, "alphanum_fraction": 0.7607361674308777, "avg_line_length": 26.33333396911621, "blob_id": "d503754aed9903b48a0ac1c84181becd02c75c68", "content_id": "baec7ad161ec4cf312de09f3e45bde576b6fe211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 163, "license_type": "no_license", "max_line_length": 112, "num_lines": 6, "path": "/Protokolle/2016/Protokoll 2016-03-31.md", "repo_name": "plannapus/MfN-Code-Clinic", "src_encoding": "UTF-8", "text": "8th meeting - 31st march 2016\n----\n\nAttendance: 4\n\nMelanie gave an introduction on knitr + Rmarkdown using Rstudio, as a follow-up to the 6th meeting's discussion." } ]
64
arianasatryan/testing
https://github.com/arianasatryan/testing
82e5f896237cb5532e3dac7256655b5bbc181865
b2c8c41d6fbae7c540198433f70341948fc16dfc
0d0ea02abc29cb9102b323f6760f035252a2befe
refs/heads/master
2023-02-24T23:10:33.381882
2021-01-27T16:49:00
2021-01-27T16:49:00
333,488,045
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6339461207389832, "alphanum_fraction": 0.6409066319465637, "avg_line_length": 52.875, "blob_id": "21b581e998d71e23bdc02af0b4b60c481c0f6c0f", "content_id": "635d77b00f95c20ad13742c071c897ba468dead9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11206, "license_type": "no_license", "max_line_length": 134, "num_lines": 208, "path": "/prakash.py", "repo_name": "arianasatryan/testing", "src_encoding": "UTF-8", "text": "from typing import List, Tuple, Dict\nfrom collections import Counter\n\nfrom landing import nlp_udpipe, nlp_stanza\nfrom utils.preprocessing import remove_stopwords\nfrom search.query_searching import search_for_query\n\n\ndef get_sources_using_prakash(fragments: List[str], web_search_params) -> List[str, str]:\n keywords_by_chunks, chunks_lvl_imp_words, chunk_of_fragment = extract_key_phrases(fragments)\n chunks_url_snippet_pairs = get_url_snippet_pairs(keywords_by_chunks, chunks_lvl_imp_words, web_search_params)\n # do counting for some kind of ranking and select top_n (url,snippet) pairs\n url_snippet_pair_count = Counter(pair for chunk_url_snippet_pairs in chunks_url_snippet_pairs for pair in chunk_url_snippet_pairs)\n top_n_pairs = [pair for pair, count in url_snippet_pair_count.most_common(n=web_search_params[\"top_n\"])]\n # selecting only top n (url,snippet) pairs for all chunks\n for i in range(len(chunks_url_snippet_pairs)):\n chunks_url_snippet_pairs[i] = [pair for pair in chunks_url_snippet_pairs[i] if pair in top_n_pairs]\n # getting fragment (url,snippet) pairs from related chunk (url,snippet) pairs\n fragments_url_snippet_pairs = []\n for i in range(len(fragments)):\n if chunk_of_fragment[i] is not None:\n fragments_url_snippet_pairs.append(chunks_url_snippet_pairs[chunk_of_fragment[i]])\n else:\n fragments_url_snippet_pairs.append([])\n return fragments_url_snippet_pairs\n\n\ndef extract_key_phrases(fragments: List[str]) -> Tuple[List[List[List[str]]], List[List[str]], Dict[int, int]]:\n chunks, chunk_of_fragment = get_chunks(fragments)\n chunks_lvl_imp_words, document_lvl_imp_words = get_important_words(chunks)\n keywords_by_chunks = []\n for i in range(len(chunks)):\n chunk = chunks[i]\n chunk_lvl_imp_words = chunks_lvl_imp_words[i]\n chunk_lvl_imp_word = chunk_lvl_imp_words[0]\n first_subgroup_of_sents, second_subgroup_of_sents, whole_chunk_words = get_subgroups(chunk,\n chunk_lvl_imp_word,\n document_lvl_imp_words)\n chunk_keywords = get_keywords(first_subgroup_of_sents, second_subgroup_of_sents, whole_chunk_words)\n keywords_by_chunks.append(chunk_keywords)\n return keywords_by_chunks, chunks_lvl_imp_words, chunk_of_fragment\n\n\ndef get_url_snippet_pairs(keywords_by_chunks: List[List[List[str]]], chunks_lvl_imp_words: List[List[str]], params) \\\n -> List[List[str, str]]:\n chunks_relevant_url_snippet_pairs = []\n for i in range(len(keywords_by_chunks)):\n queries = get_queries(chunk_keywords=keywords_by_chunks[i], chunk_lvl_imp_words=chunks_lvl_imp_words[i])\n resulted_url_snippet_pairs = conditional_search(queries, params)\n chunks_relevant_url_snippet_pairs.append(resulted_url_snippet_pairs)\n return chunks_relevant_url_snippet_pairs\n\n\ndef get_chunks(fragments: List[str]) -> Tuple[List[str], Dict[int, int]]:\n # cases when a new chunk is created:\n # 1. current fragment is a title\n # 2. previous fragment was not a title and the current is a title or has length >100 words\n # 3. after previous merge the chunk exceed 200 words size\n # +. if fragment is the very fist one\n # stopwords are filtered out from resulted chunks\n chunks = []\n previous_fragment_is_a_title = False\n previous_chunk_is_over = False\n chunk_of_fragment = {}\n i = 0\n for fragment in fragments:\n fragment_is_empty = not fragment\n fragment_words_count = len(fragment.split())\n current_fragment_is_a_title = fragment_words_count < 9\n if not fragment_is_empty:\n if current_fragment_is_a_title and not previous_fragment_is_a_title or \\\n not previous_fragment_is_a_title and fragment_words_count > 100 or previous_chunk_is_over or \\\n not chunks:\n chunks.append(fragment)\n else:\n chunks[-1] += ' ' + fragment\n previous_chunk_is_over = len(chunks[-1].split()) > 200\n previous_fragment_is_a_title = current_fragment_is_a_title\n chunk_of_fragment[i] = len(chunks) - 1\n else:\n chunk_of_fragment[i] = None\n previous_fragment_is_a_title = False\n i += 1\n chunks = [remove_stopwords(chunk)for chunk in chunks]\n return chunks, chunk_of_fragment\n\n\ndef get_important_words(chunks: List[str]) -> Tuple[List[List[str]], List[str]]:\n # term frequencies (chunk level and document level) are counted without stopwords and short words(with length <3)\n chunk_lvl_tfs = Counter()\n doc_lvl_tfs = Counter()\n chunks_lvl_imp_words = []\n for i in range(len(chunks)):\n chunk_lvl_tfs.clear()\n chunk_words = [word.text for word in nlp_udpipe(chunks[i])]\n for word in chunk_words:\n if len(word) > 3:\n chunk_lvl_tfs[word] += 1\n doc_lvl_tfs[word] += 1\n chunks_lvl_imp_words.append([word for word, _ in chunk_lvl_tfs.most_common()])\n document_lvl_imp_words = [word for word, _ in doc_lvl_tfs.most_common(n=5)]\n return chunks_lvl_imp_words, document_lvl_imp_words\n\n\ndef get_subgroups(chunk: str, chunk_lvl_imp_word: str, doc_lvl_imp_words: List[str]) -> \\\n Tuple[List[Tuple[str, str]], List[Tuple[str, str]], List[Tuple[str, str]]]:\n # sub grouping sentences within i-th chunk into 2 groups\n # 1. first group contains sentences that:\n # a) have >2 document level important words, if the chunk contains >5 sentences\n # b) have any document document level important word, if the chunk contains <5 sentences\n # 2. second group contains sentences that:\n # a) have any document level and any chunk level important words\n # b) have any chunk level important word, if the chunk contains <5 sentences\n nlp_chunk = nlp_stanza(chunk)\n sents_words = [[(w.text.lower(), w.pos) for w in sent.words] for sent in nlp_chunk.sentences]\n whole_chunk_words = [item for sublist in sents_words for item in sublist]\n sents_count_in_chunk = len(nlp_chunk.sentences)\n first_subgroup_of_sents = []\n second_subgroup_of_sents = []\n for sent_words in sents_words:\n doc_lvl_imp_words_occ = sum(sent_word[0] in doc_lvl_imp_words for sent_word in sent_words)\n chunk_lvl_imp_word_occ = any(sent_word[0] == chunk_lvl_imp_word for sent_word in sent_words)\n if sents_count_in_chunk > 5 and doc_lvl_imp_words_occ > 2 or \\\n sents_count_in_chunk < 5 and doc_lvl_imp_words_occ > 0:\n first_subgroup_of_sents.extend(sent_words)\n\n elif chunk_lvl_imp_word_occ and doc_lvl_imp_words_occ > 0 or \\\n chunk_lvl_imp_word_occ and sents_count_in_chunk < 5:\n second_subgroup_of_sents.extend(sent_words)\n return first_subgroup_of_sents, second_subgroup_of_sents, whole_chunk_words\n\n\ndef get_keywords(first_subgroup: List[str], second_subgroup: List[str], whole_chunk_group: List[str]) -> \\\n List[List[str]]:\n # extracting keywords from three groups(first_subgroup, second_subgroup, whole_chunk_group)\n chunk_keywords = []\n word_count = Counter()\n for group in [first_subgroup, second_subgroup, whole_chunk_group]:\n if group:\n for word, pos in group:\n if pos == 'NOUN':\n word_count[word] += 1\n chunk_keywords.append([word for word, count in word_count.most_common()])\n word_count.clear()\n return chunk_keywords\n\n\ndef get_queries(chunk_keywords: List[List[str]], chunk_lvl_imp_words: List[str]) -> List[List[str]]:\n # for received chunk keywords 4 queries are created:\n # 1. the first contains keywords from the first subgroup\n # 2. the second contains keywords from the second subgroup\n # if the number of them is less than 6 NOUNS from the chunk itself are added to form query with length 10\n # 3. contains keywords from the whole chunk\n # 4. contains chunk level important words\n keywords_count = 10\n queries = []\n j = 0\n for i in range(0, 2):\n if len(chunk_keywords[i]) >= keywords_count:\n queries.append(chunk_keywords[i][:keywords_count])\n else:\n queries.append(chunk_keywords[i])\n if queries[-1] and len(queries[i]) < 6:\n while len(queries[-1]) < keywords_count and j < len(chunk_keywords[2]):\n if chunk_keywords[2][j] not in queries[i]:\n queries[-1].append(chunk_keywords[2][j])\n j += 1\n queries.append(chunk_keywords[2][:keywords_count])\n queries.append(chunk_lvl_imp_words[:keywords_count])\n return queries\n\n\ndef conditional_search(queries: List[List[str]], kwargs) -> List[str, str]:\n max_requests_per_chunk = kwargs['max_requests_per_chunk']\n required_results_per_chunk = kwargs['required_results_per_chunk'] \\\n if 'required_results_per_chunk' in kwargs.keys() else None\n # queries are submitted conditionally:\n # each query must have >=60% difference from the previous ones, otherwise will be dropped\n # the third query is submitted if the first one is empty or returns no result\n # the forth query is submitted if the second one is empty or dropped\n # all resulted links for chunks are counted and the top n=required_results_per_chunk is return\n resulted_link_snippet_pairs = {}\n query_is_empty_or_dropped = [len(query) == 0 for query in queries]\n counter = Counter()\n for i in range(4):\n if not resulted_link_snippet_pairs.keys() or not max_requests_per_chunk or \\\n len(resulted_link_snippet_pairs.keys()) < max_requests_per_chunk:\n if not query_is_empty_or_dropped[i]:\n for j in range(i - 1):\n dif = [word for word in queries[i] + queries[j] if word not in queries[i] or i not in queries[j]]\n if len(dif) / (len(queries[i]) + len(queries[j])) < 0.6:\n query_is_empty_or_dropped[i] = True\n break\n if not query_is_empty_or_dropped[i]:\n if i == 0:\n resulted_link_snippet_pairs[i] = search_for_query(queries[i], kwargs)\n query_is_empty_or_dropped[i] = not resulted_link_snippet_pairs[i]\n elif i == 1:\n resulted_link_snippet_pairs[i] = search_for_query(queries[i], kwargs)\n elif i == 2 and query_is_empty_or_dropped[0]:\n resulted_link_snippet_pairs[i] = search_for_query(queries[i], kwargs)\n elif i == 3 and query_is_empty_or_dropped[1]:\n resulted_link_snippet_pairs[i] = search_for_query(queries[i], kwargs)\n if i in resulted_link_snippet_pairs.keys():\n counter.update(resulted_link_snippet_pairs[i])\n required_results_per_chunk = len(counter) if not required_results_per_chunk else required_results_per_chunk\n merged_resulted_link_snippet_pairs_list = [pair for pair, count in counter.most_common(n=required_results_per_chunk)]\n return merged_resulted_link_snippet_pairs_list\n" }, { "alpha_fraction": 0.6770270466804504, "alphanum_fraction": 0.6783784031867981, "avg_line_length": 36, "blob_id": "794b95574df21f69bf44745266569b26110947c3", "content_id": "8beeb8d15465a26fe50e21f569295d44f6513c92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 740, "license_type": "no_license", "max_line_length": 101, "num_lines": 20, "path": "/jsonAPI.py", "repo_name": "arianasatryan/testing", "src_encoding": "UTF-8", "text": "from googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom landing import app\n\nweb_search_config = app.config[\"REPORT_CONFIG\"][\"WEB_SEARCH\"]\napi_key = web_search_config[\"api_key\"]\ncse_id = web_search_config[\"cse_id\"]\n\n\ndef google_search(search_term, **kwargs):\n try:\n service = build(\"customsearch\", \"v1\", developerKey=api_key, cache_discovery=False)\n response = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()\n except HttpError as err:\n print(err)\n return []\n if 'items' in response.keys():\n resulted_link_snippet_pairs = [(item['link'], item['snippet']) for item in response['items']]\n return resulted_link_snippet_pairs\n return []\n" }, { "alpha_fraction": 0.6623376607894897, "alphanum_fraction": 0.6632333397865295, "avg_line_length": 37.5, "blob_id": "7cb501f9a74c2ffa93f951bfcc8a16d01b77ec25", "content_id": "11f1737134d02f9860377d61719fd3c3bb707e1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2233, "license_type": "no_license", "max_line_length": 112, "num_lines": 58, "path": "/web.py", "repo_name": "arianasatryan/testing", "src_encoding": "UTF-8", "text": "import itertools\nfrom typing import List\nimport datetime\n\nfrom account.manager import get_current_user\nfrom database.documents import Documents\nfrom report.report import FragmentReport\nfrom document.document import Fragment, Document\nfrom search.prakash import get_sources_using_prakash\n\n\ndef upload_similar_docs_from_web(fragments: List[Fragment], **params):\n fragments_url_snippet_pairs = get_url_snippet_pairs(fragments, params)\n\n for url, snippet in set(itertools.chain.from_iterable(fragments_url_snippet_pairs)):\n doc = Document(\n id=None,\n uri=url,\n snippet=snippet,\n date_added=datetime.datetime.now(),\n user_login=params[\"user_login\"])\n Documents.add_document(doc)\n\n\ndef get_candidate_fragments_from_web(fragments: List[Fragment], **params) -> List[FragmentReport]:\n fragments_url_snippet_pairs = get_url_snippet_pairs(fragments, params)\n # create reports\n reports = []\n url_doc = {}\n user_login, _ = get_current_user()\n for i in range(len(fragments)):\n similar_fragments = []\n for url, snippet in fragments_url_snippet_pairs[i]:\n if url not in url_doc.keys():\n doc = Document(\n id=None,\n uri=url,\n snippet=snippet,\n date_added=datetime.datetime.now(),\n user_login=user_login)\n url_doc[url] = doc\n web_doc_fragments = url_doc[url].get_fragments()\n similar_fragments.extend([(wb_fragment, 1.0) for page in web_doc_fragments for wb_fragment in page])\n reports.append(FragmentReport(checked_fragment=fragments[i], most_similar=similar_fragments))\n return reports\n\n\ndef get_url_snippet_pairs(fragments: List[Fragment], params) -> List[List[str,str]]:\n # get fragment related urls by specified key phrase extraction algorithm\n fragments = [fragment.text for fragment in fragments]\n web_search_params = params[\"WEB_SEARCH\"]\n algorithm = web_search_params[\"source_extraction_algorithm\"]\n return source_retrieval_algorithms[algorithm](fragments, web_search_params)\n\n\nsource_retrieval_algorithms = {\n \"prakash\": get_sources_using_prakash\n}\n" } ]
3
GitGude/NBA-webscrape
https://github.com/GitGude/NBA-webscrape
109b9d5d58e5672c7af7d5d51e7deef4ffd99385
171d5121bda12fd5778645fec0c5079fe6320da2
987f4ee6505fff392e13f91e611cb48eff6fa9d9
refs/heads/master
2020-04-11T21:43:10.006020
2018-12-17T10:36:28
2018-12-17T10:36:28
162,114,162
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4725956618785858, "alphanum_fraction": 0.47673216462135315, "avg_line_length": 30.19354820251465, "blob_id": "cb61389c8565007a70ac657c9feb6cdc1981ac38", "content_id": "74611426badeedc0ef439ffcb5bc2d91701fef9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1934, "license_type": "no_license", "max_line_length": 62, "num_lines": 62, "path": "/[sample] basketballref - example.py", "repo_name": "GitGude/NBA-webscrape", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport pandas as pd\nimport string\nimport datetime\nimport time\n\ndef player_info():\n\n players = []\n base_url = 'http://www.basketball-reference.com/players/'\n\n for letter in string.ascii_lowercase:\n page_request = requests.get(base_url + letter)\n soup = BeautifulSoup(page_request.text, 'html.parser')\n\n table = soup.find('table')\n\n # Testing if data is coming through..\n # print(table)\n\n if table:\n table_body = table.find('tbody')\n\n for row in table_body.findAll('tr'):\n\n # print(row)\n player_url = row.find('a')\n player_names = player_url.text\n player_pages = player_url['href']\n\n print(player_url)\n print(player_names)\n print(player_pages)\n\n# cells = row.findAll('td')\n# active_from = int(cells[0].text)\n# active_to = int(cells[1].text)\n# position = cells[2].text\n# height = cells[3].text\n# weight = cells[4].text\n# birth_date = cells[5].text\n# college = cells[6].text\n#\n# player_entry = {'url': player_pages,\n# 'name': player_names,\n# 'active_from': active_from,\n# 'active_to': active_to,\n# 'position': position,\n# 'college': college,\n# 'height': height,\n# 'weight': weight,\n# 'birth_date': birth_date}\n#\n# players.append(player_entry)\n#\n# return pd.DataFrame(players)\n#\n# players_general_info = player_info()\nprint(player_info())\n# print(players_general_info.head())\n" }, { "alpha_fraction": 0.6723313927650452, "alphanum_fraction": 0.6765799522399902, "avg_line_length": 23.75, "blob_id": "d18c569ac99f9f8c60ed9b0c53840289a5d245b1", "content_id": "5d9ee7398e78352339911c02d5dc92f1dbd80327", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1883, "license_type": "no_license", "max_line_length": 104, "num_lines": 76, "path": "/NBA-Stats-Reader.py", "repo_name": "GitGude/NBA-webscrape", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport csv\nimport numpy as np\n\n# to find the path of csv files\n# import os\n# print(os.getcwd())\n# out: ../Users/Docs/etc\n# print(os.listdir(os.getcwd())\n# out: ['Names of csv files]\n\n# Reading content from the csv\n\n# Reading a csv file\ndf = pd.read_csv('../PythonProjects/NBA_TeamSchedule.csv')\n# print(df.dtypes)\n# print(df.columns)\n# print(df.describe())\n\n\n# List of NBA teams\n\nATL = 'Atlanta Hawks'\nBOS = 'Boston Celtics'\nBKN = 'Brooklyn Nets'\nCHA = 'Charlotte Hornets'\nCHI = 'Chicago Bulls'\nCLE = 'Cleveland Cavaliers'\nDAL = 'Dallas Mavericks'\nDEN = 'Denver Nuggets'\nDET = 'Detroit Pistons'\nGSW = 'Golden State Warriors'\nHOU = 'Houston Rockets'\nIND = 'Indiana Pacers'\nLAC = 'Los Angeles Clippers'\nLAL = 'Los Angeles Lakers'\nMEM = 'Memphis Grizzlies'\nMIA = 'Miami Heat'\nMIL = 'Milwaukee Bucks'\nMIN = 'Minnesota Timberwolves'\nNOR = 'New Orleans Pelicans'\nOKC = 'Oklahoma City Thunder'\nORL = 'Orlando Magic'\nPHI = 'Philadelphia 76ers'\nPHO = 'Phoenix Suns'\nPOR = 'Portland Trail Blazers'\nSAC = 'Sacramento Kings'\nSAS = 'San Antonio Spurs'\nTOR = 'Toronto Raptors'\nUTA = 'Utah Jazz'\nWAS = 'Washington Wizards'\n\n\n# print(df)\nx = df['Difference'] = df['Away Pts'] - df['Home Pts']\n\n# print(x)\n# Creating a new column 'Difference'\np = df[df['Difference'] > 200][['Home Team', 'Away Team', 'Difference']]\n\nprint(p)\n# if df['Difference'] > 200:\n# print(['Away Team' + 'vs' + 'Home Team'])\n\n# Things to find:\n# - Matchups with the largest differentials\n# - Which team wins at home more\n# - Which team wins away more\n\n# Writing the contents out of the csv\n# with open('../PythonProjects/NBA_TeamSchedule.csv', newline='') as csvfile:\n# b_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n# for row in b_reader:\n# print(','.join(row))\n\n# df = pd.read_csv('../PythonProjects/[working] basketballref_teams.py', sep=',', keep_default_na=False)\n\n\n" }, { "alpha_fraction": 0.44421008229255676, "alphanum_fraction": 0.4650917649269104, "avg_line_length": 31.689655303955078, "blob_id": "b55ad469a2ba1285d272702f77f602b834349b9e", "content_id": "8e36bd1e95dbe1391da8c2ab7c3e888a38c1bf80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4741, "license_type": "no_license", "max_line_length": 96, "num_lines": 145, "path": "/Basketball-Ref - Seasons.Schedule&Resuls - scraper.py", "repo_name": "GitGude/NBA-webscrape", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport string\nimport month_loop\n\n# # 2018 Stat urls\n# list = {'NBA_2018_games-october.html',\n# 'NBA_2018_games-november.html',\n# 'NBA_2018_games-december.html',\n# 'NBA_2018_games-january.html''\n# 'NBA_2018_games-february.html',\n# 'NBA_2018_games-march.html'\n# g = 'NBA_2018_games-april.html'\n# h = 'NBA_2018_games-may.html'\n# i = 'NBA_2018_games-june.html'\n#\n# # 2019 Stat urls\n# j = 'NBA_2019_games-october.html'\n# k = 'NBA_2019_games-november.html'\n# l = 'NBA_2019_games-december.html'\n# m = 'NBA_2019_games-january.html'\n# n = 'NBA_2019_games-february.html'\n# o = 'NBA_2019_games-march.html'\n# p = 'NBA_2019_games-april.html'\n\n\ndef get_team_info():\n\n # for letter in list:\n\n teams = []\n base_url = 'https://www.basketball-reference.com/leagues/'\n\n for u in month_loop.get_url():\n\n page_request = requests.get(base_url + u)\n\n # print(page_request)\n\n soup = BeautifulSoup(page_request.text, 'html.parser')\n\n table = soup.find('table')\n\n # print(soup)\n\n # Looks at the table element..\n\n if table:\n\n table_body = table.find('tbody')\n\n # Loops on all 'tr' elements on the table (rows)..\n\n for row in table_body.findAll('tr'):\n\n pl = row.findAll('th')\n # print(pl)\n\n data1 = row.findAll('td')\n\n if not len(data1) <= 0:\n\n home_pts = data1[4].text\n # print(home_pts)\n if not len(home_pts) <= 0:\n\n cells = row.findAll('a')\n # url = row.findAll['href']\n\n date = cells[0].text\n away_team = cells[1].text\n home_team = cells[2].text\n home_pts = int(data1[4].text)\n away_pts = int(data1[2].text)\n # boxscore_url = cells[3].text\n\n # Testing the data that is pulled through from the above..\n\n # print(date)\n # print(away_team)\n # print(home_team)\n # print(home_pts)\n # print(away_pts)\n\n team_entry = {\"Away Team\": away_team,\n \"Away Pts\": away_pts,\n \"Home Team\": home_team,\n \"Home Pts\": home_pts,\n \"xDate\": date}\n # \"Boxscore URL\": boxscre_url}\n teams.append(team_entry)\n\n # return pd.DataFrame(teams)\n # Need to set the Date, Team Names and URL on another loop as it\n # they are all under element 'a'...\n\n # team_url = row.findAll('a')\n # team_names = team_url.text\n # team_pages = team_url['href']\n\n # print(team_names)\n # print(team_pages)\n\n # This code aligns each iteration/index with a column header in a DataFrame..\n # Currently works.. although we need to convert pts to intergesr.. currently all strings\n\n # cells = row.findAll('td')\n # date = cells[0].text\n # start_time = cells[1].text\n # away_team = cells[2].text\n # away_pts = int(cells[3].text)\n # home_team = cells[4].text\n # home_pts = int(cells[5].text)\n # boxscore = cells[6].text\n # overtime = cells[7].text\n # # attendance = cells[8].text\n # # notes = cells[9].text\n #\n # team_entry = {\"Date\": date,\n # \"Start Time\": start_time,\n # \"Away\": away_team,\n # \"Away Pts\": away_pts,\n # \"Home Team\": home_team,\n # \"Home Pts\": home_pts,\n # \"Boxscore url\": boxscore,\n # \"Overtime\": overtime}\n # # \"Attendance\": attendance}\n # # \"Notes\": notes}\n #\n # teams.append(team_entry)\n #\n return pd.DataFrame(teams)\n\n\nteams_general_info = get_team_info()\n\n# print(get_team_info())\n# print(teams_general_info.head())\nprint(teams_general_info.to_csv(\"NBA_TeamSchedule.csv\", sep=',', encoding='utf-8', index=False))\n\n# Writing to CSV\n# def import_to_csv():\n# wks = teams_general_info.to_csv(\"NBA_TeamSchedule.csv\", sep='\\t', encoding='utf-8')\n# return wks\n\n" }, { "alpha_fraction": 0.5989847779273987, "alphanum_fraction": 0.7436548471450806, "avg_line_length": 31.75, "blob_id": "06cf48265097dd4a87357173e538046af174f7c7", "content_id": "505285d32eabb9f30befa9a833717509c1bf64fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/Basketball-ref_TeamScore.py", "repo_name": "GitGude/NBA-webscrape", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nimport requests\n\nbase_url = 'https://www.basketball-reference.com/leagues/'\n\nOct_2019 = 'NBA_2019_games-october.html'\nNov_2019 = 'NBA_2019_games-november.html'\nDec_2019 = 'NBA_2019_games-december.html'\nJan_2019 = 'NBA_2019_games-january.html'\nFeb_2019 = 'NBA_2019_games-february.html'\nMar_2019 = 'NBA_2019_games-march.html'\nApr_2019 = 'NBA_2019_games-april.html'\n\n" }, { "alpha_fraction": 0.6531365513801575, "alphanum_fraction": 0.6974169611930847, "avg_line_length": 18.285715103149414, "blob_id": "a01fae50bc2dcbda37148afe2404b36ebb50aa0d", "content_id": "55d5df1b9e03cd1dc7c6109f2dc0ad3160ffa1ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 101, "num_lines": 14, "path": "/NBAProjekt2.py", "repo_name": "GitGude/NBA-webscrape", "src_encoding": "UTF-8", "text": "import pygsheets\nimport pandas as pd\n\ngc = pygsheets.authorize(service_account_file='\\PythonProjects\\venv\\NBA Project 1-a1b8594c93d2.json')\n\ndf = pd.DataFrame()\n\ndf['name'] = ['Kyle', 'Mel', 'Moochie']\n\nsh = gc.open('NBAPython')\n\nwks = sh[0]\n\nwks.set_dataframe(df(1,1))\n\n" }, { "alpha_fraction": 0.7765362858772278, "alphanum_fraction": 0.7765362858772278, "avg_line_length": 13.916666984558105, "blob_id": "4b1e923882aa903ab59ee348561dc69022493d3b", "content_id": "b6c0664d47ab683d77735afc1a922238750e16cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 179, "license_type": "no_license", "max_line_length": 68, "num_lines": 12, "path": "/README.md", "repo_name": "GitGude/NBA-webscrape", "src_encoding": "UTF-8", "text": "# NBA-webscrape\n\nHi there,\n\nWelcome to my project on scraping data from basketball-reference.com\nThis is a currently ongoing project.\n\nFeel free to have a play around.\n\nThanks\n\nK\n" }, { "alpha_fraction": 0.503636360168457, "alphanum_fraction": 0.5254545211791992, "avg_line_length": 13.289473533630371, "blob_id": "0e2d4d2620502d2dfbf76f96c9d8e06a0d84c5a4", "content_id": "23d6640aed59136ff9b85917221438417d45d10b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "no_license", "max_line_length": 70, "num_lines": 38, "path": "/month_loop.py", "repo_name": "GitGude/NBA-webscrape", "src_encoding": "UTF-8", "text": "import calendar\n\n# Working...\ndef get_month():\n\n m = [] #Creating a list to store each month\n\n for month in range(1, 13):\n m.append(calendar.month_name[month].lower())\n\n return m\n\n\ndef get_year():\n\n year = []\n base_year = 2016\n\n while base_year < 2019:\n base_year += 1\n year.append(base_year)\n\n return year\n\n\ndef get_url():\n\n url = []\n\n for i in get_year():\n\n for m in get_month():\n url.append('NBA_' + str(i) + '_games-' + str(m) + '.html')\n\n return url\n\n\nprint(get_url())\n\n\n\n\n\n\n\n" } ]
7
nforsch/SSCP19-mechanics-project7
https://github.com/nforsch/SSCP19-mechanics-project7
45065785913de2eaf52c1bd97c625353bbea804a
6bfc1a56e18ac704eeddb2a10a39e5ec439d1b81
2ed9d667f5cb92d48f17352cce1a276a6fcb804b
refs/heads/master
2020-06-04T11:02:18.105943
2019-06-27T12:55:41
2019-06-27T12:55:41
191,994,600
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6554809808731079, "alphanum_fraction": 0.6935123205184937, "avg_line_length": 30.928571701049805, "blob_id": "707026c87f33bdc6e9832f59032e9d81e8092f85", "content_id": "c3dfc31ed2bb9750cde7f705eb86426e800f7b0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "no_license", "max_line_length": 78, "num_lines": 14, "path": "/lhs.py", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "from pyDOE import *\nfrom scipy.stats.distributions import norm\n\n# Latin Hypercube Sampling\n# see: https://pythonhosted.org/pyDOE/randomized.html\n\n# Run LHS for n factors\nX = lhs(4, samples=100) # lhs(n, [samples, criterion, iterations])\n\n# Transform factors to normal distributions with means and standard deviations\nmeans = [1, 2, 3, 4]\nstdvs = [0.1, 0.5, 1, 0.25]\nfor i in range(4):\n X[:, i] = norm(loc=means[i], scale=stdvs[i]).ppf(X[:, i])\n" }, { "alpha_fraction": 0.5505653023719788, "alphanum_fraction": 0.5680485963821411, "avg_line_length": 29.912620544433594, "blob_id": "0de5bc1da251e2ad3f7cda4d66e75c202a06a571", "content_id": "1e3c5958514395275fde81858b9d7e8cb4bad475", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9552, "license_type": "no_license", "max_line_length": 78, "num_lines": 309, "path": "/demo.py", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport dolfin as df\nimport pulse\nimport ldrb\nimport matplotlib.pyplot as plt\n\n\ndef create_geometry(h5name):\n \"\"\"\n Create an lv-ellipsoidal mesh and fiber fields using LDRB algorithm\n\n An ellipsoid is given by the equation\n\n .. math::\n\n \\frac{x^2}{a} + \\frac{y^2}{b} + \\frac{z^2}{c} = 1\n\n We create two ellipsoids, one for the endocardium and one\n for the epicardium and subtract them and then cut the base.\n For simplicity we assume that the longitudinal axis is in\n in :math:`x`-direction and as default the base is located\n at the :math:`x=0` plane.\n \"\"\"\n\n # Number of subdivision (higher -> finer mesh)\n N = 13\n\n # Parameter for the endo ellipsoid\n a_endo = 1.5\n b_endo = 0.5\n c_endo = 0.5\n # Parameter for the epi ellipsoid\n a_epi = 2.0\n b_epi = 1.0\n c_epi = 1.0\n # Center of the ellipsoid (same of endo and epi)\n center = (0.0, 0.0, 0.0)\n # Location of the base\n base_x = 0.0\n\n # Create a lv ellipsoid mesh with longitudinal axis along the x-axis\n geometry = ldrb.create_lv_mesh(\n N=N,\n a_endo=a_endo,\n b_endo=b_endo,\n c_endo=c_endo,\n a_epi=a_epi,\n b_epi=b_epi,\n c_epi=c_epi,\n center=center,\n base_x=base_x\n )\n\n\n # Select fiber angles for rule based algorithm\n angles = dict(alpha_endo_lv=60, # Fiber angle on the endocardium\n alpha_epi_lv=-60, # Fiber angle on the epicardium\n beta_endo_lv=0, # Sheet angle on the endocardium\n beta_epi_lv=0) # Sheet angle on the epicardium\n\n fiber_space = 'Lagrange_1'\n\n # Compte the microstructure\n fiber, sheet, sheet_normal = ldrb.dolfin_ldrb(mesh=geometry.mesh,\n fiber_space=fiber_space,\n ffun=geometry.ffun,\n markers=geometry.markers,\n **angles)\n\n # Compute focal point\n focal = np.sqrt(a_endo**2 - (0.5 * (b_endo + c_endo))**2)\n # Make mesh according to AHA-zons\n # pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh, foc=focal)\n pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh,\n foc=focal,\n nsectors=(15, 15, 15, 5))\n\n mapper = {'lv': 'ENDO', 'epi': 'EPI', 'rv': 'ENDO_RV', 'base': 'BASE'}\n m = {mapper[k]: (v, 2) for k, v in geometry.markers.items()}\n\n pulse.geometry_utils.save_geometry_to_h5(\n geometry.mesh, h5name, markers=m,\n fields=[fiber, sheet, sheet_normal],\n overwrite_file=True\n )\n\n\ndef load_geometry(h5name='ellipsoid.h5', recreate=False):\n\n if not os.path.exists(h5name) or recreate:\n create_geometry(h5name)\n\n geo = pulse.HeartGeometry.from_file(h5name)\n # Scale mesh to a realistic size\n geo.mesh.coordinates()[:] *= 4.5\n return geo\n\n\ndef save_geometry_vis(geometry, folder='geometry'):\n \"\"\"\n Save the geometry as well as markers and fibers to files\n that can be visualized in paraview\n \"\"\"\n if not os.path.isdir(folder):\n os.makedirs(folder)\n\n for attr in ['mesh', 'ffun', 'cfun']:\n print('Save {}'.format(attr))\n df.File('{}/{}.pvd'.format(folder, attr)) << getattr(geometry, attr)\n\n for attr in ['f0', 's0', 'n0']:\n ldrb.fiber_to_xdmf(getattr(geometry, attr),\n '{}/{}'.format(folder, attr))\n\n\ndef get_strains(u, v, dx):\n\n F = pulse.kinematics.DeformationGradient(u)\n E = pulse.kinematics.GreenLagrangeStrain(F, isochoric=False)\n\n return df.assemble(df.inner(E*v, v) * dx) \\\n / df.assemble(df.Constant(1.0) * dx)\n\n\ndef get_nodal_coordinates(u):\n\n mesh = df.Mesh(u.function_space().mesh())\n V = df.VectorFunctionSpace(mesh, \"CG\", 1)\n df.ALE.move(mesh, df.interpolate(u, V))\n return mesh.coordinates()\n\n\ndef postprocess(geometry):\n \"\"\"\n Get strain at nodal values\n\n Arguments\n ---------\n filename : str\n Filname where to store the results\n \"\"\"\n\n coords = [geometry.mesh.coordinates()]\n V = df.VectorFunctionSpace(geometry.mesh, \"CG\", 2)\n Ef = np.zeros((3, 17))\n\n u_ED = df.Function(V, \"ED_displacement.xml\")\n coords.append(get_nodal_coordinates(u_ED))\n for i in range(17):\n Ef[1, i] = get_strains(u_ED, geometry.f0, geometry.dx(i+1))\n EDV = geometry.cavity_volume(u=u_ED)\n\n u_ES = df.Function(V, \"ES_displacement.xml\")\n coords.append(get_nodal_coordinates(u_ES))\n for i in range(17):\n Ef[2, i] = get_strains(u_ES, geometry.f0, geometry.dx(i+1))\n ESV = geometry.cavity_volume(u=u_ES)\n # Stroke volume\n SV = EDV - ESV\n # Ejection fraction\n EF = SV / EDV\n print((\"EDV: {EDV:.2f} ml\\nESV: {ESV:.2f} ml\\nSV: {SV:.2f}\"\n \" ml\\nEF: {EF:.2f}\").format(EDV=EDV, ESV=ESV, SV=SV, EF=EF))\n\n # Save nodes as txt at ED and ES\n np.savetxt('coords_ED.txt',coords[1],fmt='%.4f',delimiter=',')\n np.savetxt('coords_ES.txt',coords[2],fmt='%.4f',delimiter=',')\n\n fig, ax = plt.subplots(1, 3, sharex=True, sharey=True)\n for i in range(17):\n j = i // 6\n # from IPython import embed; embed()\n # exit()\n ax[j].plot(Ef[:, i], label=\"region {}\".format(i+1))\n\n ax[0].set_title(\"Basal\")\n ax[1].set_title(\"Mid\")\n ax[2].set_title(\"Apical\")\n\n ax[0].set_ylabel(\"Fiber strain\")\n for axi in ax:\n axi.set_xticks(range(3))\n axi.set_xticklabels([\"\", \"ED\", \"ES\"])\n axi.legend()\n\n plt.show()\n\n\ndef solve(\n geometry,\n EDP=1.0,\n ESP=15.0,\n Ta=60,\n material_parameters=None,\n):\n \"\"\"\n\n Arguments\n ---------\n EDP : float\n End diastolic pressure\n ESP : float\n End systolic pressure\n Ta : float\n Peak active tension (at ES)\n material_parameters : dict\n A dictionart with parameter in the Guccione model.\n Default: {'C': 2.0, 'bf': 8.0, 'bt': 2.0, 'bfs': 4.0}\n filename : str\n Filname where to store the results\n\n \"\"\"\n # Create model\n activation = df.Function(df.FunctionSpace(geometry.mesh, \"R\", 0))\n matparams = pulse.Guccione.default_parameters()\n if material_parameters is not None:\n matparams.update(material_parameters)\n material = pulse.Guccione(activation=activation,\n parameters=matparams,\n active_model=\"active_stress\",\n f0=geometry.f0,\n s0=geometry.s0,\n n0=geometry.n0)\n\n lvp = df.Constant(0.0)\n lv_marker = geometry.markers['ENDO'][0]\n lv_pressure = pulse.NeumannBC(traction=lvp,\n marker=lv_marker, name='lv')\n neumann_bc = [lv_pressure]\n\n # Add spring term at the base with stiffness 1.0 kPa/cm^2\n base_spring = 1.0\n robin_bc = [pulse.RobinBC(value=df.Constant(base_spring),\n marker=geometry.markers[\"BASE\"][0])]\n\n # Fix the basal plane in the longitudinal direction\n # 0 in V.sub(0) refers to x-direction, which is the longitudinal direction\n def fix_basal_plane(W):\n V = W if W.sub(0).num_sub_spaces() == 0 else W.sub(0)\n bc = df.DirichletBC(V.sub(0),\n df.Constant(0.0),\n geometry.ffun, geometry.markers[\"BASE\"][0])\n return bc\n\n dirichlet_bc = [fix_basal_plane]\n\n # Collect boundary conditions\n bcs = pulse.BoundaryConditions(dirichlet=dirichlet_bc,\n neumann=neumann_bc,\n robin=robin_bc)\n\n # Create the problem\n problem = pulse.MechanicsProblem(geometry, material, bcs)\n\n xdmf = df.XDMFFile(df.mpi_comm_world(), 'output.xdmf')\n\n # Solve the problem\n print((\"Do an initial solve with pressure = 0 kPa \"\n \"and active tension = 0 kPa\"))\n problem.solve()\n u, p = problem.state.split()\n xdmf.write(u, 0.0)\n print(\"LV cavity volume = {} ml\".format(geometry.cavity_volume(u=u)))\n\n # Solve for ED\n print((\"Solver for ED with pressure = {} kPa and active tension = 0 kPa\"\n \"\".format(EDP)))\n pulse.iterate.iterate(problem, lvp, EDP, initial_number_of_steps=20)\n\n u, p = problem.state.split(deepcopy=True)\n xdmf.write(u, 1.0)\n df.File(\"ED_displacement.xml\") << u\n print(\"LV cavity volume = {} ml\".format(geometry.cavity_volume(u=u)))\n\n # Solve for ES\n print((\"Solver for ES with pressure = {} kPa and active tension = {} kPa\"\n \"\".format(ESP, Ta)))\n pulse.iterate.iterate(problem, lvp, ESP,\n initial_number_of_steps=50)\n pulse.iterate.iterate(problem, activation, Ta,\n adapt_step=False, max_iters=100,\n initial_number_of_steps=40)\n\n u, p = problem.state.split(deepcopy=True)\n xdmf.write(u, 2.0)\n df.File(\"ES_displacement.xml\") << u\n print(\"LV cavity volume = {} ml\".format(geometry.cavity_volume(u=u)))\n\n\ndef main():\n\n geometry = load_geometry(h5name='ellipsoid.h5', recreate=True)\n save_geometry_vis(geometry, folder='geometry')\n import time\n t0 = time.time()\n solve(geometry,\n EDP=1.0,\n ESP=15.0,\n Ta=60,\n material_parameters=None)\n t1 = time.time()\n print('Elapsed time = {:.2f} seconds'.format(t1 - t0))\n postprocess(geometry)\n\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6501182317733765, "alphanum_fraction": 0.6647754311561584, "avg_line_length": 34.84745788574219, "blob_id": "5ef833bd9353ffb1ee6adb51decc645afdade70c", "content_id": "bd3e6fbd1d5f4e0c2251b75cfdecd5e100e3ffd2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4230, "license_type": "no_license", "max_line_length": 88, "num_lines": 118, "path": "/compute_displacement_subset.py", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport dolfin as df\nimport pulse\nimport ldrb\nimport matplotlib.pyplot as plt\nfrom scipy import spatial\n\nfrom demo import load_geometry\npi = np.pi\n\ndef cart2prolate( focalLength, XYZ ):\n # Convert Cartesian XYZ to Prolate TML\n # TML[0] = theta, TML[1] = mu, TML[2] = lambda\n\n X = XYZ.T[0]\n Y = XYZ.T[1]\n Z = XYZ.T[2]\n\n r1 = np.sqrt( Y**2 + Z**2 + (X+focalLength)**2 )\n r2 = np.sqrt( Y**2 + Z**2 + (X-focalLength)**2 )\n\n lmbda = np.real( np.arccosh((r1+r2)/(2*focalLength)) )\n mu = np.real( np.arccos((r1-r2)/(2*focalLength)) )\n theta = np.arctan2(Z,Y)\n\n idx = theta<0\n theta[idx] = theta[idx] + 2*np.pi\n\n TML = np.concatenate(([theta], [mu], [lmbda]))\n return TML\n\ndef prolate2cart( focalLength, TML ):\n # Convert Prolate TML to Cartesian XYZ\n # XYZ[0] = X, XYZ[1] = Y, XYZ[2] = Z\n\n theta = TML[0]\n mu = TML[1]\n lmbda = TML[2]\n\n X = focalLength * np.cosh(lmbda) * np.cos(mu)\n Y = focalLength * np.sinh(lmbda) * np.sin(mu) * np.cos(theta)\n Z = focalLength * np.sinh(lmbda) * np.sin(mu) * np.sin(theta)\n\n XYZ = np.concatenate(([X],[Y],[Z]))\n return XYZ\n\ndef focal( a, b, c ):\n focalLength = np.sqrt( a**2 - (0.5*(b+c))**2 )\n return focalLength\n\ndef get_surface_points(marker):\n coordinates = []\n idxs = []\n # Loop over the facets\n for facet in df.facets(geometry.mesh):\n # If the facet markers matched that of ENDO\n if geometry.ffun[facet] == marker:\n # Loop over the vertices of that facets\n for vertex in df.vertices(facet):\n idxs.append(vertex.global_index())\n # coordinates.append(tuple(vertex.midpoint().array()))\n # Remove duplicates\n idxs = np.array(list(set(idxs)))\n coordinates = geometry.mesh.coordinates()[idxs]\n return coordinates, idxs\n\ndef fit_prolate( P ):\n # Sample nodes of mesh using prolate coordinates to get displacements for\n # same number of points, similar regions across meshes\n # input P = TML from mesh endo/epi\n\n mu_max = np.amax(P[1]) # find max mu coordinate from mesh\n tree = spatial.KDTree(P[0:2].T) # setup tree for finding nearest point\n\n idx_match = []\n sample_points = []\n for theta in np.linspace(pi/2,2*pi,4): # theta range\n for mu in np.linspace(0,mu_max,5): # mu ranges from 0 to mu_max based on mesh\n sample_points.append([theta,mu]) # list of sampled [theta,mu] combinations\n distance, index = tree.query([theta,mu]) # find closest point\n idx_match.append(index) # store index of point in endo or epi\n\n return idx_match\n\n# Define coordinates of ED mesh for endo and epi\ngeometry = load_geometry('ellipsoid.h5')\n# Get nodes ENDO\nmarker_endo = geometry.markers['ENDO'][0]\nendo_coordinates, endo_idxs = get_surface_points(marker_endo)\n# Get nodes EPI\nmarker_epi = geometry.markers['EPI'][0]\nepi_coordinates, epi_idxs = get_surface_points(marker_epi)\n\n# convert Cartesian coordinates to Prolate, find maximum mu value\nfocalLength_endo = focal(4.1,1.6,1.6) # same parameters [a,b,c] used for mesh\nfocalLength_epi = focal(5,2.9,2.9) # same parameters [a,b,c] used for mesh\nTML_endo = cart2prolate(focalLength_endo, endo_coordinates)\nTML_epi = cart2prolate(focalLength_epi, epi_coordinates)\n# XYZ_endo = prolate2cart(focalLength_endo,TML_endo) # check return XYZ from TML\n\n# Find fit to closest node by varying theta, mu and fitting lambda (store index of node)\nidx_match_endo = fit_prolate(TML_endo)\nidx_match_epi = fit_prolate(TML_epi)\nidx_node_endo = endo_idxs[idx_match_endo].tolist()\nidx_node_epi = epi_idxs[idx_match_epi].tolist()\nidx_nodes = idx_node_endo + idx_node_epi\n\n# Get displacement between ES and ED using idx_nodes\nprint('Loading ED and ES mesh coordinates...')\ned_coordinates = np.loadtxt('coords_ED.txt',delimiter=',')\nes_coordinates = np.loadtxt('coords_ES.txt',delimiter=',')\ndisplacement = es_coordinates-ed_coordinates # calculate displacement between ED and ES\ndisp_out = displacement[idx_nodes] # get displacement for nodes in list idx_nodes\nprint('Saving displacements for %d points' %(len(idx_nodes)))\nnp.savetxt('displacement.txt',disp_out,fmt='%.8f',delimiter=',')\n\n# from IPython import embed; embed()\n" }, { "alpha_fraction": 0.7030651569366455, "alphanum_fraction": 0.7049808502197266, "avg_line_length": 26.473684310913086, "blob_id": "a62a92db1c6f9a422e6c16fcb4d0269e91574a14", "content_id": "08665ea80e4c18d80119b447198d9b2651621f5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/compute_surface_nodes.py", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "import dolfin as df\n\nfrom demo import load_geometry\n\n\ngeometry = load_geometry()\n\nendo_coordinates = []\nendo_marker = geometry.markers['ENDO'][0]\n# Loop over the facets\nfor facet in df.facets(geometry.mesh):\n # If the facet markers matched that of ENDO\n if geometry.ffun[facet] == endo_marker:\n # Loop over the vertices of that facets\n for vertex in df.vertices(facet):\n endo_coordinates.append(tuple(vertex.midpoint().array()))\n\n# Remove duplicates\nendo_coordinates = set(endo_coordinates)\n" }, { "alpha_fraction": 0.7361963391304016, "alphanum_fraction": 0.754601240158081, "avg_line_length": 21.482759475708008, "blob_id": "9847af82650fbb7696b8d18ce795167459fdaf8b", "content_id": "18aefa14976af63a0163e278bae1310ecb9336f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 652, "license_type": "no_license", "max_line_length": 148, "num_lines": 29, "path": "/README.md", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "# SSCP19 Project 7 - LV Mechanics\n\n## Docker\nWe have made a prebult image with all the necessary requirements\ninstalled. You can get it by typing\n\n\nBuild docker image\n```\ndocker pull finsberg/sscp19_project7\n```\nRun container (one level up `cd ..`)\n```\ndocker run -ti --name summer-school-container -e \"TERM=xterm-256color\" -w /home/fenics/shared -v $(pwd):/home/fenics/shared finsberg/sscp19_project7\n```\n\nCheck out [docker_workflows](https://github.com/ComputationalPhysiology/docker_workflows/) for more example of how to use docker in\nyour workflow.\n\n\n### Requirements for LHS\npyDOE\nscipy\n\n### Requirements for PCA\nmatplotlib\nsklearn\nnumpy\nseaborn\n" }, { "alpha_fraction": 0.8034188151359558, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 38, "blob_id": "0159c49225e323c3fb919774e88feca7abbe7638", "content_id": "003829dc73966d304d0be5cb1651e50c71daeae5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 117, "license_type": "no_license", "max_line_length": 85, "num_lines": 3, "path": "/Dockerfile", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "FROM finsberg/fenics2017_gmsh\n\nRUN pip3 install fenics-pulse ldrb pyDOE scipy matplotlib sklearn numpy seaborn scipy\n" }, { "alpha_fraction": 0.5482977032661438, "alphanum_fraction": 0.5653206706047058, "avg_line_length": 29.433734893798828, "blob_id": "f90f585b43451c507dc88b99e0c2656f5bd88079", "content_id": "8ed7cba7d7bbc2bfc5145c4452c0b850924fc441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2526, "license_type": "no_license", "max_line_length": 75, "num_lines": 83, "path": "/create_ellipsoid.py", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\nimport dolfin as df\nimport pulse\nimport ldrb\n\n\ndef create_geometry(h5name):\n \"\"\"\n Create an lv-ellipsoidal mesh and fiber fields using LDRB algorithm\n\n An ellipsoid is given by the equation\n\n .. math::\n\n \\frac{x^2}{a} + \\frac{y^2}{b} + \\frac{z^2}{c} = 1\n\n We create two ellipsoids, one for the endocardium and one\n for the epicardium and subtract them and then cut the base.\n For simplicity we assume that the longitudinal axis is in\n in :math:`x`-direction and as default the base is located\n at the :math:`x=0` plane.\n \"\"\"\n\n # Number of subdivision (higher -> finer mesh)\n N = 13\n\n # Parameter for the endo ellipsoid\n a_endo = 1.5\n b_endo = 0.5\n c_endo = 0.5\n # Parameter for the epi ellipsoid\n a_epi = 2.0\n b_epi = 1.0\n c_epi = 1.0\n # Center of the ellipsoid (same of endo and epi)\n center = (0.0, 0.0, 0.0)\n # Location of the base\n base_x = 0.0\n\n # Create a lv ellipsoid mesh with longitudinal axis along the x-axis\n geometry = ldrb.create_lv_mesh(\n N=N,\n a_endo=a_endo,\n b_endo=b_endo,\n c_endo=c_endo,\n a_epi=a_epi,\n b_epi=b_epi,\n c_epi=c_epi,\n center=center,\n base_x=base_x\n )\n\n\n # Select fiber angles for rule based algorithm\n angles = dict(alpha_endo_lv=60, # Fiber angle on the endocardium\n alpha_epi_lv=-60, # Fiber angle on the epicardium\n beta_endo_lv=0, # Sheet angle on the endocardium\n beta_epi_lv=0) # Sheet angle on the epicardium\n\n fiber_space = 'Lagrange_1'\n\n # Compte the microstructure\n fiber, sheet, sheet_normal = ldrb.dolfin_ldrb(mesh=geometry.mesh,\n fiber_space=fiber_space,\n ffun=geometry.ffun,\n markers=geometry.markers,\n **angles)\n\n # Compute focal point\n focal = np.sqrt(a_endo**2 - (0.5 * (b_endo + c_endo))**2)\n # Make mesh according to AHA-zons\n pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh, foc=focal)\n\n mapper = {'lv': 'ENDO', 'epi': 'EPI', 'rv': 'ENDO_RV', 'base': 'BASE'}\n m = {mapper[k]: (v, 2) for k, v in geometry.markers.items()}\n\n pulse.geometry_utils.save_geometry_to_h5(\n geometry.mesh, h5name, markers=m,\n fields=[fiber, sheet, sheet_normal]\n )\n\ncreate_geometry('ellipsoid.h5')\n" }, { "alpha_fraction": 0.7364130616188049, "alphanum_fraction": 0.7463768124580383, "avg_line_length": 28.052631378173828, "blob_id": "28e09f8a87dcbf9c37408d5b4fb7bac819d986a3", "content_id": "ca1e6dc0646c3d8461e037b7f4c551f68c598442", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1104, "license_type": "no_license", "max_line_length": 124, "num_lines": 38, "path": "/pca.py", "repo_name": "nforsch/SSCP19-mechanics-project7", "src_encoding": "UTF-8", "text": "# PCA demo\n# Uses PCA from sklearn.decomposition: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport numpy as np\nimport seaborn as sns; sns.set()\n\n# Data\nX_train = []\nX_sample = []\n\n# PCA\npca = PCA(n_components=2)\npca.fit(X_train)\n# pca.explained_variance_\n# pca.explained_variance_ratio_\n# pca.components_\n# pca.mean_\n# pca.singular_values_\n\n# Transform sample data\nsample_weights = pca.transform(X_sample)\n\n# Recreate from component weights\nX_recreate = pca.mean_ + sample_weights.dot(pca.components_)\n# OR\n# X_recreate = pca.inverse_transform(sample_weights)\n\n# Plot explained variance per PC and cumulative\nvar_ratio = pca.explained_variance_ratio_\ncumsum_var = np.cumsum(var_ratio)\nplt.figure(figsize=(8, 6))\nplt.bar(range(1,21), var_ratio.values.flatten(), color='r',alpha=0.5, align='center', label='individual explained variance')\nplt.step(range(1,21), cumsum_var.values.flatten(), where='mid', label='cumulative explained variance')\n" } ]
8
HubertRonald/Seattle_Track_2
https://github.com/HubertRonald/Seattle_Track_2
268ea3dca105a3f0ece600f01fa4f3f42213d613
49875df048879de8ca6d98be3a86fb445a26883f
df8e16369f9e24dce76b740dbf879c4a77a3c78c
refs/heads/master
2021-02-13T19:13:34.093528
2018-09-26T04:19:41
2018-09-26T04:19:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5710111260414124, "alphanum_fraction": 0.5847457647323608, "avg_line_length": 48.59420394897461, "blob_id": "a93516f144a86d420a319df99a0223b1137d2fb8", "content_id": "f59906e3a36851a68b249890b066408571d2338d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3422, "license_type": "no_license", "max_line_length": 252, "num_lines": 69, "path": "/Submission/TeamBlueDots/Preprocessing_filter1/main.py", "repo_name": "HubertRonald/Seattle_Track_2", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom geopy.distance import great_circle\nimport datetime\nfrom itertools import combinations\nfrom Submission.functions import haversine, filter_df, box_intervals, time_overlap\n\ndf_file = '../Data/AIS_2017_12_Zone11.csv'\n\n# Rebuilds the dataframe from the chunks\ndf = pd.read_csv(df_file)\nprint('File read.', df.shape)\n\ndf = filter_df(df)\n\ndate_min = df.Date.min() # start date\ndate_max = df.Date.max() # end date\n\ndelta = date_max - date_min # timedelta\n\ndates = []\nfor i in range(delta.days + 1):\n dates.append(date_min + datetime.timedelta(i))\n\nfor date in dates:\n date_start_time = datetime.datetime.now()\n print(date)\n df_date = df[(df.Date >= date) & (df.Date < date+datetime.timedelta(1))]\n\n intervals = box_intervals(df_date)\n\n num_boxes = len(intervals['lat']*len(intervals['lon']))\n print('Number of boxes:', num_boxes)\n boxes_checked = 0\n\n ship_combos_checked = set()\n ships_interactions = 0\n # Loop through each sub-box\n for lat_i, temp_box_lat in enumerate(intervals['lat']):\n for lon_i, temp_box_lon in enumerate(intervals['lon']):\n boxes_checked = boxes_checked + 1\n if boxes_checked % 1000 == 0:\n print('Boxes checked:', boxes_checked)\n print('Total ships comparisons for this day:', len(ship_combos_checked))\n print('Total ship interactions for this day:', ships_interactions)\n # Don't loop through end of box\n if (lat_i < len(intervals['lat']) - 3) & (lon_i < len(intervals['lon']) - 3):\n # print('Starting box', temp_box_lat, temp_box_lon)\n interactions = pd.DataFrame\n # Get all data within box\n df_box = df_date[(df_date.LAT >= temp_box_lat)&(df_date.LAT <= intervals['lat'][lat_i+2])&(df_date.LON >= temp_box_lon)&(df_date.LON <= intervals['lon'][lon_i+2])]\n # Get ids of all ships that existed in the box\n ships = list(set(df_box.MMSI.tolist()))\n # If more than 1 ship\n if ships is not None and len(ships) > 1:\n ships.sort()\n # Create all combinations of ships that haven't been checked yet\n ship_combinations = set(list(combinations(ships, 2))) # 2 for pairs, 3 for triplets, etc\n ship_combinations = ship_combinations - ship_combos_checked\n for combo in ship_combinations:\n # Check for time overlap\n if time_overlap((df_box.BaseDateTime[df_box.MMSI == combo[0]].min(), df_box.BaseDateTime[df_box.MMSI == combo[0]].max()), (df_box.BaseDateTime[df_box.MMSI == combo[1]].min(), df_box.BaseDateTime[df_box.MMSI == combo[1]].max())):\n # Check distance between ships\n ship_combos_checked.add(combo)\n distance = great_circle((df_box.LAT[df_box.MMSI == combo[0]].iloc[0], df_box.LON[df_box.MMSI == combo[0]].iloc[0]), (df_box.LAT[df_box.MMSI == combo[1]].iloc[0], df_box.LON[df_box.MMSI == combo[1]].iloc[0])).feet / 3\n if distance <= 8000:\n ships_interactions = ships_interactions + 1\n date_end_time = datetime.datetime.now()\n time_delta = date_end_time - date_start_time\n print('Time to process:', time_delta.seconds / 60, 'minutes')\n" }, { "alpha_fraction": 0.5784586668014526, "alphanum_fraction": 0.6142061352729797, "avg_line_length": 26.97402572631836, "blob_id": "a66101447e26aa731f94a0b407fff3b42e224077", "content_id": "1a47804b414fc1263770afbfebffd9382d410124", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2154, "license_type": "no_license", "max_line_length": 84, "num_lines": 77, "path": "/Submission/TeamBlueDots/Preprocessing_filter1/functions.py", "repo_name": "HubertRonald/Seattle_Track_2", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom numpy import radians as rad\nfrom numpy import sin, cos, sqrt, arcsin\nimport numpy as np\nfrom geopy.distance import great_circle\n\n\ndef filter_df(df):\n tug_tows = [21, 22, 31, 32, 52, 1023, 1025]\n df = df[~df.VesselType.isin(tug_tows)]\n print('Filtered out tugs.', df.shape)\n\n df = df[df.Status != 'moored']\n print('Filtered out moored.', df.shape)\n\n df = df.sort_values(by='BaseDateTime', ascending=True)\n print('Data sorted by time.')\n\n df.BaseDateTime = pd.to_datetime(df.BaseDateTime, errors='raise')\n df['Date'] = df.BaseDateTime.apply(lambda x: x.date())\n print('Date/Time values converted to Date/Time objects.')\n\n return df\n\n\ndef box_intervals(df):\n lat_max = df['LAT'].max()\n lat_min = df['LAT'].min()\n lon_max = df['LON'].max()\n lon_min = df['LON'].min()\n\n border_min_gps = (lat_min, lon_min)\n border_max_lat = (lat_max, lon_min)\n border_max_lon = (lat_min, lon_max)\n\n feet_to_yards = 3\n\n lat_distance = great_circle(border_min_gps, border_max_lat).feet / feet_to_yards\n lon_distance = great_circle(border_min_gps, border_max_lon).feet / feet_to_yards\n\n box_half_size_yards = 4000\n\n lat_distance_num_intervals = lon_distance / box_half_size_yards\n lon_distance_num_intervals = lat_distance / box_half_size_yards\n\n intervals = {\n 'lat': list(np.linspace(lat_min, lat_max, lat_distance_num_intervals)),\n 'lon': list(np.linspace(lon_min, lon_max, lon_distance_num_intervals))\n }\n\n # print(intervals['lat'], len(intervals['lat']))\n # print(intervals['lon'], len(intervals['lon']))\n\n return intervals\n\n\ndef haversine(coord1, coord2):\n dLat = rad(coord2[0] - coord1[0])\n dLon = rad(coord2[1]-coord1[1])\n lat1 = rad(coord1[0])\n lat2 = rad(coord2[0])\n a = sin(dLat/2)**2+cos(lat1)*cos(lat2)*sin(dLon/2)**2\n c=2*arcsin(sqrt(a))\n R = 6372.8\n return R*c\n\n\ndef time_overlap(r1, r2):\n if r2[0] <= r1[0] <= r2[1]:\n return True\n if r1[0] <= r2[0] <= r1[1]:\n return True\n if r2[0] <= r1[1] <= r2[1]:\n return True\n if r1[0] <= r2[1] <= r1[1]:\n return True\n return False\n" }, { "alpha_fraction": 0.7540983557701111, "alphanum_fraction": 0.7540983557701111, "avg_line_length": 19.33333396911621, "blob_id": "ef306fe7b1ddf9f87256e3e0de1d0045af3ddd24", "content_id": "4035cbbf1c43bd72a71f223c0bb4e370673d0610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/Submission/TeamBlueDots/README.md", "repo_name": "HubertRonald/Seattle_Track_2", "src_encoding": "UTF-8", "text": "# Blue Dots Team\n\nThis submission is for the Team Blue Dots.\n" }, { "alpha_fraction": 0.6091954112052917, "alphanum_fraction": 0.6469622254371643, "avg_line_length": 26.68181800842285, "blob_id": "3dea6f3140dec8ac1306cefb20ce77918b2be273", "content_id": "4288c42c7c202885aa3d9e5464bed71422ca3830", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "no_license", "max_line_length": 83, "num_lines": 22, "path": "/Data/create_sample.py", "repo_name": "HubertRonald/Seattle_Track_2", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 21 18:57:27 2018\n\n@author: saramelvin\n\"\"\"\nimport pandas as pd\nimport pickle as pkl \n\ndef read_in_sample():\n data_all = pd.read_csv(\"AIS_LA_SD_Jan_1_to_15_2016_Filtered_by_Proximity.csv\") \n data_true_pos = pd.read_csv(\"Example_COLREGs_Interactions_UTM11.csv\")\n\n data_sample = data_all.head()\n \n return data_sample, data_true_pos\n\nif __name__ == \"__main__\":\n sample_data, pos_sample = sample_input = read_in_sample()\n pkl.dump(sample_data, open(\"sample_data.p\",\"wb\"))\n pkl.dump(pos_sample, open(\"pos_sample.p\",\"wb\"))\n" } ]
4
Eugene71/routeService
https://github.com/Eugene71/routeService
9077a70c4e1f34cd917b904312a4a3e22a440108
8d73d5a4763df88bd5390d2009880102c18a0a22
0b23349cd127ed0d56cc4a2a768a83da56cb6a15
refs/heads/master
2020-02-07T00:07:28.301170
2015-05-22T21:12:16
2015-05-22T21:12:16
31,121,993
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.501975953578949, "alphanum_fraction": 0.5139963626861572, "avg_line_length": 31.217506408691406, "blob_id": "e11b48242b2451c240d51f2d1ca4c29bc0999e14", "content_id": "ddf8f013c6f02ceb19e58a365a6d9c1457b3265c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12530, "license_type": "no_license", "max_line_length": 151, "num_lines": 377, "path": "/main.py", "repo_name": "Eugene71/routeService", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport MySQLdb, dateutil.parser\nfrom flask import Flask, request, jsonify, abort\n\napp = Flask(__name__)\n\ndef connectToDb():\n return MySQLdb.connect(\n host = \"mysql.server\",\n db = \"mobappdev$default\",\n user = \"mobappdev\",\n passwd = \"w6E!CkXuP9d#\",\n charset = \"utf8\"\n )\n\n@app.errorhandler(400)\n@app.errorhandler(403)\n@app.errorhandler(404)\ndef badRequest(error):\n if \"message\" in error.description:\n response = jsonify({\n \"message\": error.description[\"message\"]\n })\n\n response.status_code = error.code\n return response\n else:\n return error.description, error.code\n\ndef checkExistFields(jsonObject, listFields):\n correct = True\n\n for field in listFields:\n if (not field in jsonObject or jsonObject[field] == None):\n correct = False\n\n return correct\n\ndef getCorrectString(string):\n return string.replace('\"', '\\\\\"').encode('utf8')\n\n@app.route('/getJobs')\ndef getJobs():\n db = connectToDb()\n cursor = db.cursor()\n\n cursor.execute(\"SELECT * FROM job\")\n data = cursor.fetchall()\n\n jobs = []\n for row in data:\n jobs.append({\n \"id\": row[0],\n \"name\": row[1],\n })\n\n db.close()\n return jsonify(jobs = jobs)\n\n@app.route('/getWorkers')\ndef getWorkers():\n db = connectToDb()\n cursor = db.cursor()\n\n cursor.execute(\"SELECT * FROM worker\")\n data = cursor.fetchall()\n\n workers = []\n for row in data:\n workers.append({\n \"id\": row[0],\n \"jobId\": row[1],\n \"firstName\": row[2],\n \"secondName\": row[3],\n \"lastName\": row[4],\n \"sex\": row[5],\n \"dateOfBirth\": row[6].isoformat(),\n \"phone\": row[7],\n \"mail\": row[8]\n })\n\n db.close()\n return jsonify(workers = workers)\n\n@app.route('/saveWorkerData', methods=['POST'])\ndef saveWorkerData():\n try:\n jsonObject = request.json\n except Exception:\n abort(400, {\n \"message\": u\"Текущий payload не является валидным JSON.\"\n })\n\n if (jsonObject == None):\n abort(400, {\n \"message\": u\"Ваш запрос содержит некорректный Content-Type.\"\n })\n elif (not checkExistFields(jsonObject, [\"jobId\", \"firstName\", \"lastName\", \"sex\", \"dateOfBirth\", \"phone\", \"mail\"])):\n abort(400, {\n \"message\": u\"Не хватает данных для сохранения пользователя в БД.\"\n })\n else:\n db = connectToDb()\n cursor = db.cursor()\n\n secondName = None\n signForSecondName = None\n if \"secondName\" in jsonObject:\n signForSecondName = \"=\"\n secondName = '\"' + getCorrectString(jsonObject[\"secondName\"]) + '\"'\n else:\n signForSecondName = \"IS\"\n secondName = \"NULL\"\n\n cursor.execute(('SELECT * FROM worker WHERE job_id = {} AND first_name = \"{}\" AND second_name {} AND last_name = \"{}\"'\n ' AND sex = \"{}\" AND date_of_birth = \"{}\" AND phone = \"{}\" AND mail = \"{}\"'.format(str(jsonObject[\"jobId\"]),\n getCorrectString(jsonObject[\"firstName\"]), (signForSecondName + ' ' + secondName), getCorrectString(jsonObject[\"lastName\"]),\n getCorrectString(jsonObject[\"sex\"]), dateutil.parser.parse(jsonObject[\"dateOfBirth\"]).date().isoformat(),\n getCorrectString(jsonObject[\"phone\"]), getCorrectString(jsonObject[\"mail\"]))))\n\n data = cursor.fetchall()\n\n if (len(data) == 1):\n worker = {\n \"id\": data[0][0],\n \"jobId\": data[0][1],\n \"firstName\": data[0][2],\n \"secondName\": data[0][3],\n \"lastName\": data[0][4],\n \"sex\": data[0][5],\n \"dateOfBirth\": data[0][6].isoformat(),\n \"phone\": data[0][7],\n \"mail\": data[0][8]\n }\n\n db.close()\n return jsonify(worker = worker)\n else:\n cursor.execute(('INSERT INTO worker (job_id, first_name, second_name, last_name, sex, date_of_birth, phone, mail)'\n ' VALUES ({}, \"{}\", {}, \"{}\", \"{}\", \"{}\", \"{}\", \"{}\")'.format(str(jsonObject[\"jobId\"]),\n getCorrectString(jsonObject[\"firstName\"]), secondName, getCorrectString(jsonObject[\"lastName\"]),\n getCorrectString(jsonObject[\"sex\"]), dateutil.parser.parse(jsonObject[\"dateOfBirth\"]).date().isoformat(),\n getCorrectString(jsonObject[\"phone\"]), getCorrectString(jsonObject[\"mail\"]))))\n\n db.commit()\n\n cursor.execute(('SELECT * FROM worker WHERE job_id = {} AND first_name = \"{}\" AND second_name {} AND last_name = \"{}\"'\n ' AND sex = \"{}\" AND date_of_birth = \"{}\" AND phone = \"{}\" AND mail = \"{}\"'.format(str(jsonObject[\"jobId\"]),\n getCorrectString(jsonObject[\"firstName\"]), (signForSecondName + ' ' + secondName), getCorrectString(jsonObject[\"lastName\"]),\n getCorrectString(jsonObject[\"sex\"]), dateutil.parser.parse(jsonObject[\"dateOfBirth\"]).date().isoformat(),\n getCorrectString(jsonObject[\"phone\"]), getCorrectString(jsonObject[\"mail\"]))))\n\n data = cursor.fetchall()\n\n worker = {\n \"id\": data[0][0],\n \"jobId\": data[0][1],\n \"firstName\": data[0][2],\n \"secondName\": data[0][3],\n \"lastName\": data[0][4],\n \"sex\": data[0][5],\n \"dateOfBirth\": data[0][6].isoformat(),\n \"phone\": data[0][7],\n \"mail\": data[0][8]\n }\n\n db.close()\n return jsonify(worker = worker)\n\n@app.route('/saveWorkerRoute', methods=['POST'])\ndef saveWorkerRoute():\n try:\n jsonObject = request.json\n except Exception:\n abort(400, {\n \"message\": u\"Текущий payload не является валидным JSON.\"\n })\n\n if (jsonObject == None):\n abort(400, {\n \"message\": u\"Ваш запрос содержит некорректный Content-Type.\"\n })\n elif (not checkExistFields(jsonObject, [\"workerId\", \"name\", \"date\", \"length\", \"startPoint\", \"endPoint\"])):\n abort(400, {\n \"message\": u\"Не хватает данных для сохранения маршрута в БД.\"\n })\n else:\n db = connectToDb()\n cursor = db.cursor()\n\n mediumPoints = None\n signForMediumPoints = None\n if \"mediumPoints\" in jsonObject:\n signForMediumPoints = \"=\"\n mediumPoints = '\"' + getCorrectString(jsonObject[\"mediumPoints\"]) + '\"'\n else:\n signForMediumPoints = \"IS\"\n mediumPoints = \"NULL\"\n\n cursor.execute('SELECT id FROM route WHERE name = \"' + getCorrectString(jsonObject[\"name\"]) + '\"')\n data = cursor.fetchall()\n\n if (len(data) == 1):\n db.close()\n\n abort(403, {\n \"message\": u\"Такой маршрут уже есть в БД.\"\n })\n else:\n cursor.execute(('INSERT INTO route (worker_id, name, date, length, start_point, end_point, medium_points)'\n ' VALUES ({}, \"{}\", \"{}\", {}, \"{}\", \"{}\", {})'.format(str(jsonObject[\"workerId\"]),\n getCorrectString(jsonObject[\"name\"]), dateutil.parser.parse(jsonObject[\"date\"]).isoformat(),\n str(jsonObject[\"length\"]), getCorrectString(jsonObject[\"startPoint\"]), getCorrectString(jsonObject[\"endPoint\"]), mediumPoints)))\n\n db.commit()\n\n cursor.execute(('SELECT * FROM route WHERE worker_id = {} AND name = \"{}\" AND date = \"{}\" AND length = {} AND start_point = \"{}\"'\n ' AND end_point = \"{}\" AND medium_points {}'.format(str(jsonObject[\"workerId\"]),\n getCorrectString(jsonObject[\"name\"]), dateutil.parser.parse(jsonObject[\"date\"]).isoformat(), str(jsonObject[\"length\"]),\n getCorrectString(jsonObject[\"startPoint\"]), getCorrectString(jsonObject[\"endPoint\"]), (signForMediumPoints + ' ' + mediumPoints))))\n\n data = cursor.fetchall()\n\n route = {\n \"id\": data[0][0],\n \"workerId\": data[0][1],\n \"name\": data[0][2],\n \"date\": data[0][3].isoformat(),\n \"length\": data[0][4],\n \"startPoint\": data[0][5],\n \"endPoint\": data[0][6],\n \"mediumPoints\": data[0][7]\n }\n\n db.close()\n return jsonify(route = route)\n\n@app.route('/getRoutes/worker/<id>')\ndef getRoutesByWorkerId(id):\n try:\n int(id)\n except:\n abort(400, {\n \"message\": u\"Вы передали некорректный id рабочего.\"\n })\n\n db = connectToDb()\n cursor = db.cursor()\n\n cursor.execute('SELECT * FROM route WHERE worker_id = {}'.format(id))\n data = cursor.fetchall()\n\n if (len(data) >= 1):\n routes = []\n for row in data:\n routes.append({\n \"id\": row[0],\n \"workerId\": row[1],\n \"name\": row[2],\n \"date\": row[3].isoformat(),\n \"length\": row[4],\n \"startPoint\": row[5],\n \"endPoint\": row[6],\n \"mediumPoints\": row[7]\n })\n\n db.close()\n return jsonify(routes = routes)\n else:\n db.close()\n\n abort(404, {\n \"message\": u\"Такого маршрута нет в БД.\"\n })\n\n@app.route('/getRoutes/worker/<id>/name/<name>')\ndef getRoutesByName(id, name):\n try:\n int(id)\n except:\n abort(400, {\n \"message\": u\"Вы передали некорректный id рабочего.\"\n })\n\n db = connectToDb()\n cursor = db.cursor()\n\n cursor.execute('SELECT * FROM route WHERE name LIKE \"%{}%\" AND worker_id = {}'.format(\n getCorrectString(name), id\n ))\n\n data = cursor.fetchall()\n if (len(data) >= 1):\n routes = []\n for row in data:\n routes.append({\n \"id\": row[0],\n \"workerId\": row[1],\n \"name\": row[2],\n \"date\": row[3].isoformat(),\n \"length\": row[4],\n \"startPoint\": row[5],\n \"endPoint\": row[6],\n \"mediumPoints\": row[7]\n })\n\n db.close()\n return jsonify(routes = routes)\n else:\n db.close()\n\n abort(404, {\n \"message\": u\"Такого маршрута нет в БД.\"\n })\n\n@app.route('/getRoutes/worker/<id>/date')\ndef getRoutesByDate(id):\n try:\n int(id)\n except:\n abort(400, {\n \"message\": u\"Вы передали некорректный id рабочего.\"\n })\n\n startDate = request.args.get('start_date', None)\n endDate = request.args.get('end_date', None)\n\n whereClause = \"\"\n if (startDate != None and endDate != None):\n whereClause = 'WHERE date BETWEEN \"{}\" AND \"{}\"'.format(\n dateutil.parser.parse(startDate).isoformat(),\n dateutil.parser.parse(endDate).isoformat()\n )\n elif (startDate != None):\n whereClause = 'WHERE date >= \"{}\"'.format(\n dateutil.parser.parse(startDate).isoformat()\n )\n elif (endDate != None):\n whereClause = 'WHERE date <= \"{}\"'.format(\n dateutil.parser.parse(endDate).isoformat()\n )\n else:\n abort(400, {\n \"message\": u\"Вы не передали ни одну дату.\"\n })\n\n whereClause += ' AND worker_id = {}'.format(id)\n\n db = connectToDb()\n cursor = db.cursor()\n\n cursor.execute('SELECT * FROM route ' + whereClause)\n data = cursor.fetchall()\n\n if (len(data) >= 1):\n routes = []\n for row in data:\n routes.append({\n \"id\": row[0],\n \"workerId\": row[1],\n \"name\": row[2],\n \"date\": row[3].isoformat(),\n \"length\": row[4],\n \"startPoint\": row[5],\n \"endPoint\": row[6],\n \"mediumPoints\": row[7]\n })\n\n db.close()\n return jsonify(routes = routes)\n else:\n db.close()\n\n abort(404, {\n \"message\": u\"Такого маршрута нет в БД.\"\n })\n" } ]
1
akshaybalwally/ICPC
https://github.com/akshaybalwally/ICPC
c42818ec221edb9b95f155dd0394c666158687b4
52cab02197c3eeab49b93d57f36cfc374e027293
1f899ee84d7cfecadcd9672610850d2eaa9cebc3
refs/heads/master
2021-01-01T16:21:01.999346
2015-07-14T17:54:15
2015-07-14T17:54:15
39,091,276
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5968660712242126, "alphanum_fraction": 0.6282051205635071, "avg_line_length": 17.945945739746094, "blob_id": "b51c275a5125426428647d2fe8972bc6bc808695", "content_id": "380262b8d9aaf32c76635bf2afaf88b715239ec6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "no_license", "max_line_length": 40, "num_lines": 37, "path": "/palindromes/palindromes.py", "repo_name": "akshaybalwally/ICPC", "src_encoding": "UTF-8", "text": "def isPalindrome(word):\n\tfor i in range(0,len(word)/2 + 1):\n\t\tif word[i] != word[len(word) - (i+1)]:\n\t\t\treturn False\n\treturn True\n\ndef run(words):\n\tfor word1 in words:\n\t\tfor word2 in words:\n\t\t\tif word1 != word2:\n\t\t\t\tif isPalindrome(word1+word2):\n\t\t\t\t\treturn word1+word2\n\t\t\t\t\t\n\t\t\t\telif isPalindrome(word2+word1):\n\t\t\t\t\treturn word2+word1\n\treturn '0'\n\t\t\t\t\t\n\n\n\nf = open('input.txt', 'r')\ncases = int(f.readline())\n#print cases\nfor case in range(0,cases):\n\t\n\twordNumber = int(f.readline())\n\twords = []\n\tfor wordnum in range(0,wordNumber):\n\t\tstring = f.readline()\n\t\t#print string\n\t\tx = len(string)\n\t\tif string[x-1] == '\\n':\n\t\t\tstring = string[0:x-1]\n\t\twords.append(string)\n\tprint run(words)\n\t\n\t#print words\t\n" } ]
1
rootoor-dev/simec
https://github.com/rootoor-dev/simec
b5648bd93294c2f4e5b537c09030e60101e6f6e3
45392475debc37883a79eddb0e90f2669ce0a44e
1f9b217ef0d9ea634e7ec5c025a4dbfe5c4a091a
refs/heads/master
2023-03-18T17:28:00.072676
2020-06-28T16:30:54
2020-06-28T16:30:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7691360116004944, "alphanum_fraction": 0.7883874773979187, "avg_line_length": 134.2708282470703, "blob_id": "1429c6a6014a8a701df2697d4725b40d635c581e", "content_id": "54a5bdf41522ff645d19aa8f9ef7d3ce9bde1bda", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6494, "license_type": "permissive", "max_line_length": 612, "num_lines": 48, "path": "/README.md", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "# Similarity Encoders (SimEc)\n\nThis repository contains the code for the Similarity Encoder (SimEc) neural network architecture based on the `keras` library. Several Jupyter notebooks with examples should give you an idea of how to use this code. A basic setup of the model is also implemented using the `torch` NN library.\nFor further details on the model and experiments please refer to the [paper](https://arxiv.org/abs/1702.01824) or [my PhD thesis](http://dx.doi.org/10.14279/depositonce-9956) - and of course if any of this code was helpful for your research, please consider citing it:\n```\n@article{horn2018simec,\n title={Predicting Pairwise Relations with Neural Similarity Encoders},\n author={Horn, Franziska and Müller, Klaus-Robert},\n journal={Bulletin of the Polish Academy of Sciences: Technical Sciences},\n volume={66},\n number={6},\n pages={821--830},\n year={2018},\n publisher={Polish Academy of Sciences}\n}\n```\n\nThis code is still work in progress and intended for research purposes. It was programmed for Python 3 but should also work in Python 2.7.\n\n#### dependencies\n- *main simec code:* `numpy`, `keras` (version 2.2.4 with `tensorflow` backend (version 1.x.x)) (or `torch`)\n- *examples:* `scipy`, `sklearn`, `matplotlib`, [`nlputils`](https://github.com/cod3licious/nlputils), `innvestigate`\n\n### Getting your hands dirty\n\nFirst check out the Jupyter notebook [`basic_examples_simec_with_keras.ipynb`](https://github.com/cod3licious/simec/blob/master/basic_examples_simec_with_keras.ipynb), to get an idea of how Similarity Encoders can be implemented with keras. Then have a look at [`basic_examples_compact.ipynb`](https://github.com/cod3licious/simec/blob/master/basic_examples_compact.ipynb), which uses the `SimilarityEncoder` class from `simec.py` to setup a basic SimEc model with less lines of code.\n\nThe other Jupyter notebooks contain further examples and experiments reported in the paper (see below).\n\nIf you're interested in the PyTorch implementation of SimEc, checkout the [`examples_torch.ipynb`](https://github.com/cod3licious/simec/blob/master/examples_torch.ipynb) notebook, which gives some examples of how to use the SimEc model implemented in `simec_torch.py`.\n\nIf you have any questions please don't hesitate to send me an [email](mailto:cod3licious@gmail.com) and of course if you should find any bugs or want to contribute other improvements, pull requests are very welcome!\n\n\n#### Examples\n- [`experiments_paper.ipynb`](https://github.com/cod3licious/simec/blob/master/experiments_paper.ipynb): All experimental results reported in the original paper.\n- [`00_general_transfer_learning.ipynb`](https://github.com/cod3licious/simec/blob/master/00_general_transfer_learning.ipynb): Basic transfer learning example with torch on the CIFAR10 dataset, where a classifier based on a FFNN of varying complexity is pre-trained on different tasks. This has nothing to do with SimEc but serves as a general reference for the transfer learning with similarities notebook.\n- [`00_matrix_factorization.ipynb`](https://github.com/cod3licious/simec/blob/master/00_matrix_factorization.ipynb): Classical SVD and eigendecomposition of a random matrix R (m x n) and a square symmetric matrix S (m x m) with SimEc to show that NN can be used for this kind of computation as first described in 1992 by A. Cichocki.\n- [`00_flowerpots.ipynb`](https://github.com/cod3licious/simec/blob/master/00_flowerpots.ipynb): A simple illustrative example to show that SimEc can learn the connection between feature vectors and an arbitrary similarity matrix, thereby being able to map new test samples into a similarity preserving embedding space, which kernel PCA is unable to do.\n- [`01_embed_linear_nonlinear.ipynb`](https://github.com/cod3licious/simec/blob/master/01_embed_linear_nonlinear.ipynb): Show on the MNIST (image) and 20 newsgroups (text) datasets, that SimEc can achieve the same solution as kernel PCA for linear and non-linear similarity matrices.\n- [`02_embed_nonmetric.ipynb`](https://github.com/cod3licious/simec/blob/master/02_embed_nonmetric.ipynb): Experiments to demonstrate that SimEc can predict non-metric similarities and multiple similarities at once.\n- [`03_embed_classlabels.ipynb`](https://github.com/cod3licious/simec/blob/master/03_embed_classlabels.ipynb): Experiments to demonstrate that SimEc can learn embeddings based on human similarity judgments.\n- [`04_noisy_data.ipynb`](https://github.com/cod3licious/simec/blob/master/04_noisy_data.ipynb): Show how SimEc deals with noise in the input data (random/correlated, either added to the data or as additional dimensions). While kPCA can only handle moderate amounts of noise, SimEc is capable of filtering out noise even if it is several times the standard deviation of the underlying data.\n- [`05_manifold_s-curve.ipynb`](https://github.com/cod3licious/simec/blob/master/05_manifold_s-curve.ipynb): Experiments on classical manifold learning datasets like the S-curve. With the right target similarities and parameters, SimEc can get both a \"global\" solution like PCA or a \"local\" solution (i.e. \"unrolling\" the manifold) like isomap.\n- [`06_link_prediction.ipynb`](https://github.com/cod3licious/simec/blob/master/06_link_prediction.ipynb): Shows how SimEc can be used to predict relations between two entities on popular link prediction datasets.\n- [`07_recommender_systems.ipynb`](https://github.com/cod3licious/simec/blob/master/07_recommender_systems.ipynb): Gives an example how SimEc can be used in practice for improving recommender systems. In particular, we show that better recommendations can be generated for new items, which did not receive any user ratings yet, by learning the mapping between the items' feature vectors and the user preferences. Furthermore, we show how the predicted ratings can be interpreted (i.e., why a certain user prefers an item) and how SimEc embeddings can improve suggestions based on content based similarity scores.\n- [`08_transfer_learning_similarities.ipynb`](https://github.com/cod3licious/simec/blob/master/08_transfer_learning_similarities.ipynb): Pre-training a CNN on the CIFAR10 dataset using a SimEc with different target similarities.\n- [`09_interpret_similarities_zappos50k.ipynb`](https://github.com/cod3licious/simec/blob/master/09_interpret_similarities_zappos50k.ipynb): Interpret the similarities predicted by SimEc for images of shoes (zappos50k dataset) based on two different similarity measures (heel height and closure mechanism of the shoes).\n" }, { "alpha_fraction": 0.594990074634552, "alphanum_fraction": 0.5999204516410828, "avg_line_length": 49.29999923706055, "blob_id": "3c6c650b37d0e4c48c88dd9b355e9a5426de940e", "content_id": "6da0537622c3b8d655abbe3302ba51e3d5a01188", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12575, "license_type": "permissive", "max_line_length": 136, "num_lines": 250, "path": "/simec_torch.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, division, print_function, absolute_import\nfrom builtins import range, object\nfrom copy import deepcopy\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as tdata\nrandom.seed(28)\nnp.random.seed(28)\ntorch.manual_seed(28)\ntorch.cuda.manual_seed(28)\ntorch.backends.cudnn.deterministic = True\n\n\nclass Dense(nn.Linear):\n \"\"\"\n Shorthand for a nn.Linear layer with an activation function\n\n Args:\n in_dim (int): number of input feature\n out_dim (int): number of output features\n bias (bool): If set to False, the layer will not adapt the bias. (default: True)\n activation (callable): activation function or string (default: None)\n \"\"\"\n def __init__(self, in_dim, out_dim, bias=True, activation=None):\n activation_map = {\"tanh\": torch.tanh,\n \"sigmoid\": torch.sigmoid,\n \"relu\": torch.relu}\n if activation in activation_map:\n activation = activation_map[activation]\n self.activation = activation\n super(Dense, self).__init__(in_dim, out_dim, bias)\n\n def forward(self, inputs):\n y = super(Dense, self).forward(inputs)\n if self.activation:\n y = self.activation(y)\n return y\n\n\nclass FFNet(nn.Module):\n\n def __init__(self, in_dim, out_dim, hidden_layers=[]):\n \"\"\"\n Neural network PyTorch model; shortcut for creating a feed forward NN that can be used as an in_net for a SimEcModel\n\n Input:\n - in_dim: input dimensionality\n - out_dim: output dimensionality\n - hidden_layers: list with tuples of (number of hidden units [int], activation function [string or function])\n \"\"\"\n super(FFNet, self).__init__()\n # get a list of layer dimensions: in_dim --> hl --> out_dim\n dimensions = [in_dim]\n dimensions.extend([h[0] for h in hidden_layers])\n dimensions.append(out_dim)\n # get list of activation functions (output (i.e. embedding) layer has no activation)\n activations = [h[1] for h in hidden_layers]\n activations.append(None)\n # initialize dense layers\n layers = [Dense(dimensions[i], dimensions[i+1], activation=activations[i]) for i in range(len(activations))]\n # construct feed forward network\n self.net = nn.Sequential(*layers)\n\n def forward(self, inputs):\n return self.net(inputs)\n\n\nclass SimEcModel(nn.Module):\n\n def __init__(self, in_net, embedding_dim, out_dim, ll_activation=None, W_ll=None, wll_frozen=False):\n \"\"\"\n Similarity Encoder (SimEc) neural network PyTorch model\n\n Input:\n - in_net: input network mapping from whatever original input to the embedding (e.g. a FFNet)\n - embedding_dim: dimensionality of the embedding layer\n - out_dim: dimensionality of the output / number of targets\n - ll_activation: activation function on the last layer.\n - W_ll: matrix that should be used as the (frozen) weights of the last layer; this should be used if you factorize\n an (m x n) matrix R and want to get the mapping for both some (m x D) features as well as some (n x P) features.\n To do this, first train a SimEc to approximate R using the (m x D) feature matrix as input. After training,\n use simec.transform(X) to get the (m x embedding_dim) embedding Y. Then train another SimEc using the\n (n x P) feature matrix as input to approximate R.T and this time set W_ll=Y.T. Then, with both SimEcs you\n can project the (m x D) as well as the (n x P) feature vectors into the same embedding space where their\n scalar product approximates R.\n W_ll could also be initialized by the kPCA embedding of the similarity matrix.\n - wll_frozen: if W_ll is initialized manually (W_ll given), whether the parameters should be frozen (bool, default: False)\n \"\"\"\n super(SimEcModel, self).__init__()\n # the simec model is the in_net, which creates the embedding,\n self.embedding_net = in_net\n # plus a last layer to compute the similarity approximation\n self.W_ll = Dense(embedding_dim, out_dim, bias=False, activation=ll_activation)\n # possibly initialize W_ll, e.g., to KPCA embedding\n if W_ll is not None:\n assert W_ll.shape == (embedding_dim, out_dim), \"W_ll shape mismatch; should be (%i, %i)\" % (embedding_dim, out_dim)\n self.W_ll.weight.data.copy_(torch.from_numpy(W_ll.T))\n if wll_frozen:\n self.W_ll.weight.requires_grad = False\n\n def forward(self, inputs):\n x = self.embedding_net(inputs)\n x = self.W_ll(x)\n return x\n\n\nclass SimilarityEncoder(object):\n\n def __init__(self, in_net, embedding_dim, out_dim, ll_activation=None, W_ll=None, wll_frozen=False, **kwargs):\n \"\"\"\n Similarity Encoder (SimEc) neural network model wrapper\n\n Input:\n - in_net: either a NN model that maps from the input to the embedding OR the dimensionality of\n the input feature vector (int), in which case a FFNet will be created by supplying\n the kwargs, which should probably contain a \"hidden_layers\" argument\n - embedding_dim: dimensionality of the embedding layer\n - out_dim: dimensionality of the output / number of targets\n - ll_activation: activation function on the last layer. If a different loss than mse is used,\n this should probably be changed as well (default: None, i.e. linear activation).\n - W_ll: matrix that should be used as the (frozen) weights of the last layer; this should be used if you factorize\n an (m x n) matrix R and want to get the mapping for both some (m x D) features as well as some (n x P) features.\n To do this, first train a SimEc to approximate R using the (m x D) feature matrix as input. After training,\n use simec.transform(X) to get the (m x embedding_dim) embedding Y. Then train another SimEc using the\n (n x P) feature matrix as input to approximate R.T and this time set W_ll=Y.T. Then, with both SimEcs you\n can project the (m x D) as well as the (n x P) feature vectors into the same embedding space where their\n scalar product approximates R.\n W_ll could also be initialized by the kPCA embedding of the similarity matrix.\n - wll_frozen: if W_ll is initialized manually (W_ll given), whether the parameters should be frozen (bool, default: False)\n \"\"\"\n if isinstance(in_net, int):\n in_net = FFNet(in_net, embedding_dim, **kwargs)\n self.model = SimEcModel(in_net, embedding_dim, out_dim, ll_activation, W_ll, wll_frozen)\n self.device = \"cpu\" # by default, before training, the model is on the cpu\n\n def fit(self, X, S, epochs=25, batch_size=32, lr=0.0005, weight_decay=0., s_ll_reg=0., S_ll=None, orth_reg=0., warn=True):\n \"\"\"\n Train the SimEc model\n\n Input:\n - X: n x in_dim feature matrix\n - S: n x out_dim target similarity matrix\n - epochs: int, number of epochs to train (default: 25)\n - batch_size: int, number of samples per batch (default: 32)\n - lr: float used as the learning rate for the Adam optimizer (default: lr=0.0005)\n - weight_decay: l2 regularization, given as a parameter to the optimizer\n - s_ll_reg: float, regularization strength for (S - W_ll^T W_ll), i.e. how much the dot product of the\n last layer weights should approximate the target similarities; useful when factoring a square symmetric\n similarity matrix. (default: 0.; if > 0. need to give S_ll): try 100.\n - S_ll: matrix that the dot product of the last layer should approximate (see above), needs to be (out_dim x out_dim)\n - orth_reg: float, regularization strength for (lambda*I - W_ll W_ll^T), i.e. to encourage orthogonal rows in the last layer\n usually only helpful when using many embedding dimensions (> 100): try <= 1.\n \"\"\"\n if np.max(np.abs(S)) > 5. and warn:\n print(\"Warning: For best results, S (and X) should be normalized (try S /= np.max(np.abs(S))).\")\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.model.to(self.device)\n self.model.train()\n\n if s_ll_reg > 0:\n if S_ll is None:\n print(\"Warning: need to give S_ll if s_ll_reg > 0.\")\n s_ll_reg = 0.\n else:\n S_ll = torch.from_numpy(S_ll).float()\n S_ll = S_ll.to(self.device)\n if orth_reg > 0:\n edim = self.model.W_ll.weight.size()[1]\n Ones = torch.from_numpy((np.ones((edim, edim)) - np.eye(edim))).float()\n Ones = Ones.to(self.device)\n\n criterion = nn.MSELoss()\n optimizer = optim.Adam(self.model.parameters(), lr=lr, weight_decay=weight_decay)\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, eps=0., verbose=True)\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if not self.device == \"cpu\" else {}\n trainloader = tdata.DataLoader(tdata.TensorDataset(torch.from_numpy(X).float(), torch.from_numpy(S).float()),\n batch_size=batch_size, shuffle=True, **kwargs)\n # loop over the dataset multiple times\n best_loss = np.inf\n best_model = None\n for epoch in range(epochs):\n\n running_loss = 0.0\n for i, data in enumerate(trainloader):\n # get the inputs\n x_batch, s_batch = data\n x_batch, s_batch = x_batch.to(self.device), s_batch.to(self.device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = self.model(x_batch)\n loss = criterion(outputs, s_batch)\n running_loss += loss.item()\n # possibly some regularization stuff based on the last layer\n # CAREFUL! linear layer weights are stored with dimensions (out_dim, in_dim) instead of\n # how you initialized them with (in_dim, out_dim), therefore the transpose is mixed up here!\n if s_ll_reg > 0:\n loss += s_ll_reg*criterion(torch.mm(self.model.W_ll.weight, self.model.W_ll.weight.t()), S_ll)\n # since orth_reg can slow down convergence, don't use it right from the start\n if orth_reg > 0 and epoch > epochs/3:\n loss += orth_reg * torch.mean((Ones * torch.mm(self.model.W_ll.weight.t(), self.model.W_ll.weight))**2)\n loss.backward()\n optimizer.step()\n print('[epoch %d] loss: %.7f' % (epoch + 1, running_loss / (i + 1)))\n if epoch >= 4:\n lr_scheduler.step(running_loss)\n # in case the learning rate was too high or something we keep track\n # of the model with the lowest error and use that in the end\n if running_loss <= best_loss:\n best_loss = running_loss\n best_model = deepcopy(self.model.state_dict())\n self.model.load_state_dict(best_model)\n\n def transform(self, X):\n \"\"\"\n Project the input feature vectors to the embedding space\n\n Input:\n - X: m x in_dim feature matrix\n\n Returns:\n - Y: m x embedding_dim embedding matrix\n \"\"\"\n self.model.eval()\n X = torch.from_numpy(X).float().to(self.device)\n with torch.no_grad():\n Y = self.model.embedding_net(X).cpu()\n return Y.numpy()\n\n def predict(self, X):\n \"\"\"\n Generate the output of the network, i.e. the predicted similarities\n\n Input:\n - X: m x in_dim feature matrix\n\n Returns:\n - S': m x out_dim output matrix with approximated similarities to the out_dim targets\n \"\"\"\n self.model.eval()\n X = torch.from_numpy(X).float().to(self.device)\n with torch.no_grad():\n S = self.model(X).cpu()\n return S.numpy()\n" }, { "alpha_fraction": 0.6937738060951233, "alphanum_fraction": 0.714104175567627, "avg_line_length": 48.1875, "blob_id": "87a0b62283b4a6d2759037f6b8a0565df1635c51", "content_id": "15c69f13a02d7bc02ba80bfdf97ea243aef0933f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "permissive", "max_line_length": 157, "num_lines": 16, "path": "/data/nonmetric/get_nasa.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "import os\nimport random\nimport urllib2\nfrom bs4 import BeautifulSoup as bsoup\n\n# download html with links to all articles\nlink = \"https://www.nasa.gov/audience/formedia/archives/MP_Archive_02.html\"\nhtml = urllib2.urlopen(urllib2.Request(link, headers={'User-Agent': 'Magic Browser%i'%random.randint(0,100)})).read()\n# make a beautiful soup object out of the html\nsoup = bsoup(html)\n# extract the links to all articles\ntxt_urls = soup.find_all('a', {'class':'featureLnk'}, href=True)\n# save all articles (beware: folder has to exist!)\nfor u in txt_urls:\n with open(os.path.join('nasa', os.path.basename(u['href'])), 'w') as f:\n f.write(urllib2.urlopen(urllib2.Request(\"https://www.nasa.gov\" + u['href'], headers={'User-Agent': 'Magic Browser%i'%random.randint(0,100)})).read())\n" }, { "alpha_fraction": 0.5859574675559998, "alphanum_fraction": 0.5982978940010071, "avg_line_length": 35.71875, "blob_id": "4199b9a7ffc42ad0b1b1a6ca74578d32ed787a88", "content_id": "36294ccf4a0727af61919771c5238433b6eca03b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2350, "license_type": "permissive", "max_line_length": 101, "num_lines": 64, "path": "/utils_torch.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, division, print_function, absolute_import\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\n\n\ndef compare_state_dicts(sd1, sd2):\n # check if the state dicts of two models are the same\n is_equal = True\n for p in sd1:\n if not torch.all(torch.eq(sd1[p], sd2[p])):\n is_equal = False\n break\n return is_equal\n\n\ndef examine_param_space(model, sd1, sd2, train_loader, test_loader, criterion, plot=None):\n \"\"\"\n Interpolate between the parameters of sd1 and sd2 and evaluate the loss function of the model\n at each point. The resulting plot shows the flatness or sharpness of the minima at each solution.\n\n See: Goodfellow et al. \"Qualitatively Characterizing Neural Network Optimization Problems\" (2015)\n \"\"\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n loss_train = []\n loss_test = []\n alphas = np.arange(-1, 2.1, 0.1)\n for a in alphas:\n print(\"alpha: %.1f \" % a, end=\"\\r\")\n # get model with interpolated weights\n sd_tmp = {p: (1-a)*sd1[p] + a*sd2[p] for p in sd1}\n model.load_state_dict(sd_tmp)\n model = model.to(device)\n # evaluate the model with these parameters on the given data\n model.eval()\n\n def compute_loss(data_loader):\n tmp_loss = 0\n with torch.no_grad():\n for data, target in data_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n # sum up batch loss (criterion should have reduction=\"sum\")\n tmp_loss += criterion(output, target).item()\n return tmp_loss / len(data_loader.sampler)\n\n # train\n loss_train.append(compute_loss(train_loader))\n # test\n loss_test.append(compute_loss(test_loader))\n print(\"alpha: %.1f...done.\" % a)\n\n # possibly plot the results\n if plot is not None:\n plt.figure()\n plt.plot(alphas, loss_train, label=\"$J(\\\\theta)$ train\")\n plt.plot(alphas, loss_test, label=\"$J(\\\\theta)$ test\")\n plt.xlabel(\"$\\\\alpha$\")\n plt.ylabel(\"$J((1-\\\\alpha)\\\\cdot\\\\theta_0 + \\\\alpha\\\\cdot\\\\theta_1)$\")\n plt.legend(loc=0)\n plt.title(plot)\n else:\n return alphas, loss_train, loss_test\n" }, { "alpha_fraction": 0.5131208300590515, "alphanum_fraction": 0.539065957069397, "avg_line_length": 38.44444274902344, "blob_id": "f33d22025e9d51716e152ab598d32e0376cfe6dd", "content_id": "68e269c354742f6c06fb3899bb1006d2efabffe5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6745, "license_type": "permissive", "max_line_length": 119, "num_lines": 171, "path": "/utils_plotting.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, division, print_function, absolute_import\nfrom builtins import str, range\nimport colorsys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import offsetbox\n\n\ndef get_colors(N=100):\n HSV_tuples = [(x * 1. / (N+1), 1., 0.8) for x in range(N)]\n return [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]\n\n\ndef plot2d(X, Y_plot, X_test=None, Y_plot_test=None, title='original'):\n plt.figure()\n if (X_test is not None) and (Y_plot_test is not None):\n plt.scatter(X[:, 0], X[:, 1], c=Y_plot.flatten(), alpha=1)\n plt.scatter(X_test[:, 0], X_test[:, 1], c=Y_plot_test.flatten(), alpha=0.3)\n else:\n plt.scatter(X[:, 0], X[:, 1], c=Y_plot.flatten(), alpha=1)\n plt.title(title)\n\n\ndef plot3d(X, Y_plot, X_test=None, Y_plot_test=None, title='original'):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n if (X_test is not None) and (Y_plot_test is not None):\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=Y_plot.flatten(), alpha=1)\n ax.scatter(X_test[:, 0], X_test[:, 1], X_test[:, 2], c=Y_plot_test.flatten(), alpha=0.3)\n else:\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=Y_plot.flatten(), alpha=1)\n plt.title(title)\n\n\ndef plot_digits(X, digits, title=None, plot_box=True):\n colorlist = get_colors(10)\n # Scale and visualize the embedding vectors\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n plt.figure()\n ax = plt.subplot(111)\n for i in range(X.shape[0]):\n plt.text(X[i, 0], X[i, 1], str(digits.target[i]),\n color=colorlist[digits.target[i]],\n fontdict={'weight': 'medium', 'size': 'smaller'})\n\n if plot_box and hasattr(offsetbox, 'AnnotationBbox'):\n # only print thumbnails with matplotlib > 1.0\n shown_images = np.array([[1., 1.]]) # just something big\n for i in range(digits.data.shape[0]):\n dist = np.sum((X[i] - shown_images) ** 2, 1)\n if np.min(dist) < 4e-2:\n # don't show points that are too close\n continue\n shown_images = np.r_[shown_images, [X[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),\n X[i])\n ax.add_artist(imagebox)\n plt.xticks([]), plt.yticks([])\n plt.xlim(-0.05, 1.05)\n plt.ylim(-0.05, 1.05)\n if title is not None:\n plt.title(title)\n\n\ndef plot_mnist(X, y, X_test=None, y_test=None, title=None):\n plt.figure()\n colorlist = get_colors(10)\n # Scale and visualize the embedding vectors\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n if (X_test is not None) and (y_test is not None):\n x_min, x_max = np.min(np.array([x_min, np.min(X_test, 0)]), 0), np.max(np.array([x_max, np.max(X_test, 0)]), 0)\n X_test = (X_test - x_min) / (x_max - x_min)\n X = (X - x_min) / (x_max - x_min)\n if (X_test is not None) and (y_test is not None):\n for i in range(X_test.shape[0]):\n plt.text(X_test[i, 0], X_test[i, 1], str(y_test[i]),\n color=colorlist[y_test[i]],\n fontdict={'weight': 'medium', 'size': 'smaller'},\n alpha=0.4)\n for i in range(X.shape[0]):\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=colorlist[y[i]],\n fontdict={'weight': 'medium', 'size': 'smaller'},\n alpha=1.)\n plt.xticks([]), plt.yticks([])\n plt.xlim(-0.05, 1.05)\n plt.ylim(-0.05, 1.05)\n if title is not None:\n plt.title(title)\n\n\ndef plot_mnist2(X, y, X_test=None, y_test=None, X_original=None, title=None):\n plt.figure()\n ax = plt.subplot(111)\n colorlist = get_colors(10)\n # Scale and visualize the embedding vectors\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n if (X_test is not None) and (y_test is not None):\n x_min, x_max = np.min(np.array([x_min, np.min(X_test, 0)]), 0), np.max(np.array([x_max, np.max(X_test, 0)]), 0)\n X_test = (X_test - x_min) / (x_max - x_min)\n X = (X - x_min) / (x_max - x_min)\n if (X_test is not None) and (y_test is not None):\n for i in range(X_test.shape[0]):\n plt.text(X_test[i, 0], X_test[i, 1], str(y_test[i]),\n color=colorlist[y_test[i]],\n fontdict={'weight': 'medium', 'size': 'smaller'},\n alpha=0.4)\n for i in range(X.shape[0]):\n plt.text(X[i, 0], X[i, 1], str(y[i]),\n color=colorlist[y[i]],\n fontdict={'weight': 'medium', 'size': 'smaller'},\n alpha=1.)\n # plot some images on top\n if X_original is not None:\n shown_images = np.array([[1., 1.]]) # just something big\n for i in range(X.shape[0]):\n dist = np.sum((X[i] - shown_images) ** 2, 1)\n if np.min(dist) < 4e-3:\n # don't show points that are too close\n continue\n shown_images = np.r_[shown_images, [X[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(X_original[i].reshape(28, 28), cmap=plt.cm.gray_r),\n X[i])\n ax.add_artist(imagebox)\n plt.xticks([]), plt.yticks([])\n plt.xlim(-0.05, 1.05)\n plt.ylim(-0.05, 1.05)\n if title is not None:\n plt.title(title, fontdict={'weight': 'medium', 'size': 25})\n\n\ndef plot_20news(X, y, target_names, X_test=None, y_test=None, title=None, legend=False):\n colorlist = get_colors(len(target_names))\n\n def plot_scatter(X, y, alpha=1):\n y = np.array(y)\n for i, l in enumerate(target_names):\n plt.scatter(X[y == i, 0], X[y == i, 1], c=colorlist[i], alpha=alpha,\n edgecolors='none', label=l if alpha >= 0.5 else None) # , rasterized=True)\n # plot scatter plot\n plt.figure()\n if (X_test is not None) and (y_test is not None):\n plot_scatter(X_test, y_test, 0.4)\n plot_scatter(X, y, 1.)\n else:\n plot_scatter(X, y, 0.6)\n if legend:\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), scatterpoints=1)\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)\n\n\ndef plot_words(X, word_list, title=None):\n # Scale and visualize the embedding vectors\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n plt.figure()\n for i in range(X.shape[0]):\n plt.text(X[i, 0], X[i, 1], word_list[i],\n color='k',\n fontdict={'weight': 'medium', 'size': 'smaller'})\n plt.xticks([]), plt.yticks([])\n plt.xlim(-0.05, 1.2)\n plt.ylim(-0.05, 1.05)\n if title is not None:\n plt.title(title)\n" }, { "alpha_fraction": 0.8168317079544067, "alphanum_fraction": 0.8168317079544067, "avg_line_length": 66.33333587646484, "blob_id": "70fb66551357139323da0eed233bcbb552659952", "content_id": "4b32f224d6c6a2586e2548c03aff5fc282f800a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "permissive", "max_line_length": 90, "num_lines": 3, "path": "/__init__.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, division, print_function, absolute_import\nfrom .simec import SimilarityEncoder, masked_mse, masked_binary_crossentropy, LastLayerReg\nfrom .utils import center_K\n" }, { "alpha_fraction": 0.5983415842056274, "alphanum_fraction": 0.6092584729194641, "avg_line_length": 55.441078186035156, "blob_id": "9b6e9a2d2d4441be98cc136b40847474e82b48ac", "content_id": "2186024c5fa17c1fab399b328ed8c010fccc6ae1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16763, "license_type": "permissive", "max_line_length": 209, "num_lines": 297, "path": "/simec.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, division, print_function, absolute_import\nfrom builtins import range, object\nimport numpy as np\nimport scipy.sparse as sp\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Reshape\nfrom keras.regularizers import Regularizer\nfrom keras.losses import mean_squared_error, mean_absolute_error, binary_crossentropy\n\n\ndef generate_from_sparse_targets(X, S, mask_value, batch_size=32, shuffle=True):\n # save the shape here in case S is a tensor\n S_out_shape = list(S.shape)\n while True:\n # get index for every epoch\n if shuffle:\n idx = np.array(np.random.permutation(X.shape[0]), dtype=np.int16)\n else:\n idx = np.arange(X.shape[0], dtype=np.int16)\n # generate data for each batch\n for i in range(int(np.ceil(X.shape[0]/batch_size))):\n b_idx = idx[i*batch_size:(i+1)*batch_size]\n S_out_shape[0] = len(b_idx)\n # missing values will be mask_value\n S_batch = mask_value*np.ones(S_out_shape, dtype=np.float16)\n # other entries are filled with entries from corresponding rows of S\n S_batch[S[b_idx].nonzero()] = S[b_idx][S[b_idx].nonzero()]\n yield X[b_idx], S_batch\n\n\ndef masked_mse(mask_value):\n \"\"\"\n https://github.com/fchollet/keras/issues/7065\n compute mean squared error using only those values not equal to mask_value,\n e.g. to deal with missing values in the target similarity matrix\n \"\"\"\n def f(y_true, y_pred):\n mask_true = K.cast(K.not_equal(y_true, mask_value), K.floatx())\n masked_squared_error = K.square(mask_true * (y_true - y_pred))\n # in case mask_true is 0 everywhere, the error would be nan, therefore divide by at least 1\n # this doesn't change anything as where sum(mask_true)==0, sum(masked_squared_error)==0 as well\n masked_mse = K.sum(masked_squared_error, axis=-1) / K.maximum(K.sum(mask_true, axis=-1), 1)\n return masked_mse\n f.__name__ = str('Masked MSE (mask_value={})'.format(mask_value))\n return f\n\n\ndef masked_mae(mask_value):\n \"\"\"\n compute mean absolute error using only those values not equal to mask_value,\n e.g. to deal with missing values in the target similarity matrix\n \"\"\"\n def f(y_true, y_pred):\n mask_true = K.cast(K.not_equal(y_true, mask_value), K.floatx())\n masked_absolute_error = K.abs(mask_true * (y_true - y_pred))\n # in case mask_true is 0 everywhere, the error would be nan, therefore divide by at least 1\n # this doesn't change anything as where sum(mask_true)==0, sum(masked_absolute_error)==0 as well\n masked_mae = K.sum(masked_absolute_error, axis=-1) / K.maximum(K.sum(mask_true, axis=-1), 1)\n return masked_mae\n f.__name__ = str('Masked MAE (mask_value={})'.format(mask_value))\n return f\n\n\ndef masked_binary_crossentropy(mask_value):\n \"\"\"\n compute binary cross-entropy using only those values not equal to mask_value,\n e.g. to deal with missing values in the target similarity matrix\n \"\"\"\n def f(y_true, y_pred):\n mask_true = K.cast(K.not_equal(y_true, mask_value), K.floatx())\n masked_bce = mask_true * binary_crossentropy(y_true, y_pred)\n # in case mask_true is 0 everywhere, the error would be nan, therefore divide by at least 1\n # this doesn't change anything as where sum(mask_true)==0, sum(masked_bce)==0 as well\n masked_bce = K.sum(masked_bce, axis=-1) / K.maximum(K.sum(mask_true, axis=-1), 1)\n return masked_bce\n f.__name__ = str('Masked Binary Cross-Entropy (mask_value={})'.format(mask_value))\n return f\n\n\nclass LastLayerReg(Regularizer):\n\n def __init__(self, l2_reg=0., s_ll_reg=0., S_ll=None, orth_reg=0., embedding_dim=0, reshape=None, mask_value=None):\n \"\"\"\n Custom regularizer used for the last layer of a SimEc\n s_ll_reg enforces that W^TW approximates S,\n orth_reg enforces that WW^T approximates lambda*I, i.e. that the vectors are orthogonal (but not necessarily length 1)\n \"\"\"\n self.l2_reg = K.cast_to_floatx(l2_reg)\n self.s_ll_reg = K.cast_to_floatx(s_ll_reg)\n if s_ll_reg > 0.:\n assert (S_ll is not None), \"need to give S_ll\"\n self.S_ll = S_ll\n else:\n self.S_ll = None\n self.orth_reg = K.cast_to_floatx(orth_reg)\n if orth_reg > 0.:\n assert (embedding_dim > 0), \"need to give shape of embedding layer, i.e. x.shape[0]\"\n self.embedding_dim = embedding_dim\n self.reshape = reshape\n if mask_value is None:\n self.errfun = mean_squared_error\n else:\n self.errfun = masked_mse(mask_value)\n\n def __call__(self, x):\n regularization = 0.\n if self.l2_reg > 0.:\n regularization += K.sum(self.l2_reg * K.square(x))\n if self.reshape is None:\n if self.s_ll_reg > 0.:\n regularization += self.s_ll_reg * K.mean(self.errfun(self.S_ll, K.dot(K.transpose(x), x)))\n if self.orth_reg > 0.:\n regularization += self.orth_reg * K.mean(K.square((K.ones((self.embedding_dim, self.embedding_dim)) - K.eye(self.embedding_dim)) * K.dot(x, K.transpose(x))))\n else:\n x_reshaped = K.reshape(x, self.reshape)\n for i in range(self.reshape[2]):\n if self.s_ll_reg > 0.:\n regularization += self.s_ll_reg * K.mean(self.errfun(self.S_ll[:,:,i], K.dot(K.transpose(x_reshaped[:,:,i]), x_reshaped[:,:,i])))\n if self.orth_reg > 0.:\n regularization += self.orth_reg * K.mean(K.square((K.ones((self.embedding_dim, self.embedding_dim)) - K.eye(self.embedding_dim)) * K.dot(x_reshaped[:,:,i], K.transpose(x_reshaped[:,:,i]))))\n return regularization\n\n def get_config(self):\n return {'l2_reg': float(self.l2_reg), 's_ll_reg': float(self.s_ll_reg), 'orth_reg': float(self.orth_reg)}\n\n\nclass SimilarityEncoder(object):\n\n def __init__(self, in_dim, embedding_dim, out_dim, hidden_layers=[], sparse_inputs=False, mask_value=None,\n l2_reg=0.00000001, l2_reg_emb=0.00001, l2_reg_out=0., s_ll_reg=0., S_ll=None, orth_reg=0., W_ll=None,\n wll_frozen=False, opt=0.0005, loss='mse', ll_activation='linear'):\n \"\"\"\n Similarity Encoder (SimEc) neural network model\n\n Input:\n - in_dim: dimensionality of the input feature vector\n - embedding_dim: dimensionality of the embedding layer\n - out_dim: dimensionality of the output / number of targets; if out_dim is a tuple, e.g. (n_targets, n_similarities)\n then s_ll_reg and orth_reg are ignored\n - hidden_layers: list with tuples of (number of hidden units [int], activation function [string or keras function])\n - sparse_inputs: boolean, whether the input matrix is a scipy sparse matrix (default: False)\n - mask_value: if some entries of the target matrix are missing, set them e.g. to -100 and then set\n mask_value=-100 such that these entries are ignored when the backprop error is computed\n - l2_reg: float, l2 regularization strength of the hidden layers (default: 0.00000001)\n - l2_reg_emb: float, l2 regularization strength of the embedding (i.e. second to last) layer (default: 0.00001)\n - l2_reg_out: float, l2 regularization strength of the output (i.e. last) layer (default: 0.)\n - s_ll_reg: float, regularization strength for (S - W_ll^T W_ll), i.e. how much the dot product of the\n last layer weights should approximate the target similarities; useful when factoring a square symmetric\n similarity matrix. (default: 0.; if > 0. need to give S_ll): try 100.\n - S_ll: matrix that the dot product of the last layer should approximate (see above), needs to be (out_dim x out_dim)\n - orth_reg: float, regularization strength for (lambda*I - W_ll W_ll^T), i.e. to encourage orthogonal rows in the last layer\n usually only helpful when using many embedding dimensions (> 100): try <= 1.\n - W_ll: matrix that should be used as the (frozen) weights of the last layer; this should be used if you factorize\n an (m x n) matrix R and want to get the mapping for both some (m x D) features as well as some (n x P) features.\n To do this, first train a SimEc to approximate R using the (m x D) feature matrix as input. After training,\n use simec.transform(X) to get the (m x embedding_dim) embedding Y. Then train another SimEc using the\n (n x P) feature matrix as input to approximate R.T and this time set W_ll=Y.T. Then, with both SimEcs you\n can project the (m x D) as well as the (n x P) feature vectors into the same embedding space where their\n scalar product approximates R.\n W_ll could also be initialized by the kPCA embedding of the similarity matrix.\n - wll_frozen: if W_ll is initialized manually (W_ll given), whether the parameters should be frozen (bool, default: False)\n - opt: either a float used as the learning rate for keras.optimizers.Adamax (default: lr=0.0005),\n or a keras optimizers instance that should be used for training the model\n - loss: which loss function to use (if mask_value != None, only 'mse', 'mae', or 'binary_crossentropy'; default: loss='mse').\n - ll_activation: activation function on the last layer. If a different loss than mse is used,\n this should probably be changed as well (default: 'linear').\n \"\"\"\n # save some parameters we might need for later checks\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.reshape_output = None\n self.mask_value = mask_value\n ll_reshape = None\n if isinstance(out_dim, tuple):\n ll_reshape = (embedding_dim, out_dim[0], out_dim[1])\n out_dim, self.reshape_output = np.prod(out_dim), out_dim\n # checks for s_ll_regularization\n if s_ll_reg > 0.:\n assert S_ll is not None, \"need S_ll\"\n if self.reshape_output is None:\n assert S_ll.shape == (out_dim, out_dim), \"S_ll needs to be of shape (out_dim x out_dim)\"\n else:\n assert S_ll.shape == (self.reshape_output[0], self.reshape_output[0], self.reshape_output[1]), \"S_ll needs to be of shape (out_dim x out_dim x n_similarities)\"\n # inputs - might be sparse\n inputs = Input(shape=(in_dim,), sparse=sparse_inputs)\n # linear simec only gets the linear layer that maps to the embedding\n if not hidden_layers:\n embedding = Dense(embedding_dim, activation='linear',\n kernel_regularizer=keras.regularizers.l2(l2_reg_emb))(inputs)\n else:\n # add additional hidden layers (first one acts on the input)\n # hidden_layers should be a list with (h_layer_dim, activation)\n for i, h in enumerate(hidden_layers):\n if i == 0:\n x = Dense(h[0], activation=h[1],\n kernel_regularizer=keras.regularizers.l2(l2_reg))(inputs)\n else:\n x = Dense(h[0], activation=h[1],\n kernel_regularizer=keras.regularizers.l2(l2_reg))(x)\n # after the hidden layers, add the embedding layer\n embedding = Dense(embedding_dim, activation='linear',\n kernel_regularizer=keras.regularizers.l2(l2_reg_emb))(x)\n # add another linear layer to get the linear approximation of the target similarities\n if W_ll is None:\n outputs = Dense(out_dim, activation=ll_activation, use_bias=False,\n kernel_regularizer=LastLayerReg(l2_reg_out, s_ll_reg, S_ll, orth_reg, embedding_dim, ll_reshape, mask_value))(embedding)\n else:\n assert W_ll.shape[0] == embedding_dim, \"W_ll.shape[0] doesn't match embedding_dim (%i != %i)\" % (W_ll.shape[0], embedding_dim)\n assert W_ll.shape[1] == out_dim, \"W_ll.shape[1] doesn't match out_dim (%i != %i)\" % (W_ll.shape[1], out_dim)\n outputs = Dense(out_dim, activation=ll_activation, use_bias=False, trainable=not wll_frozen, weights=[W_ll])(embedding)\n # possibly reshape the output if multiple similarities are used as targets\n if self.reshape_output is not None:\n outputs = Reshape(self.reshape_output)(outputs)\n # put it all into a model\n self.model = Model(inputs=inputs, outputs=outputs)\n # compile the model to minimize the MSE\n if isinstance(opt, float):\n opt = keras.optimizers.Adamax(lr=opt)\n if mask_value is None:\n self.model.compile(optimizer=opt, loss=loss)\n else:\n assert loss in (\"mse\", \"mae\", \"binary_crossentropy\"), \"Loss %s not implemented for target matrices with missing values. Use 'mse' or 'binary_crossentropy'.\" % loss\n if loss == \"binary_crossentropy\":\n self.model.compile(optimizer=opt, loss=masked_binary_crossentropy(mask_value))\n elif loss == \"mae\":\n self.model.compile(optimizer=opt, loss=masked_mae(mask_value))\n else:\n self.model.compile(optimizer=opt, loss=masked_mse(mask_value))\n\n # placeholder for embedding model\n self.model_embed = None\n\n def fit(self, X, S, epochs=25, batch_size=32, verbose=1):\n \"\"\"\n Train the SimEc model\n\n Input:\n - X: n x in_dim feature matrix\n - S: n x out_dim target similarity matrix\n - epochs: int, number of epochs to train (default: 25)\n - batch_size: int, number of samples per batch (default: 32)\n - verbose: given to the keras fit function, default: 1\n\n After training is complete, the SimEc object has another attribute \"model_embed\",\n which can be use to project the input feature vectors to the embedding space\n \"\"\"\n if np.max(np.abs(S)) > 5.:\n print(\"Warning: For best results, S (and X) should be normalized (try S /= np.max(np.abs(S))).\")\n assert X.shape[1] == self.in_dim, \"input dim of data doesn't match (%i != %i)\" % (X.shape[1], self.in_dim)\n assert X.shape[0] == S.shape[0], \"number of samples for inputs and targets doesn't match (%i != %i)\" % (X.shape[0], S.shape[0])\n if self.reshape_output is None:\n assert S.shape[1] == self.out_dim, \"output dim of targets doesn't match (%i != %i)\" % (S.shape[1], self.out_dim)\n else:\n assert S.shape[1:] == self.reshape_output, \"output dims of targets don't match (%r != %r)\" % (S.shape[1:], self.reshape_output)\n if self.mask_value is not None and sp.issparse(S):\n self.model.fit_generator(generate_from_sparse_targets(X, S, self.mask_value, batch_size),\n epochs=epochs, verbose=verbose, steps_per_epoch=int(np.ceil(X.shape[0]/batch_size)))\n else:\n self.model.fit(X, S, epochs=epochs, batch_size=batch_size, verbose=verbose)\n # store the model we need for the prediction\n if self.reshape_output is None:\n self.model_embed = Sequential(self.model.layers[:-1])\n else:\n self.model_embed = Sequential(self.model.layers[:-2])\n\n def transform(self, X, warn=True):\n \"\"\"\n Project the input feature vectors to the embedding space\n\n Input:\n - X: m x in_dim feature matrix\n\n Returns:\n - Y: m x embedding_dim embedding matrix\n \"\"\"\n assert X.shape[1] == self.in_dim, \"input dim of data doesn't match (%i != %i)\" % (X.shape[1], self.in_dim)\n if self.model_embed is None and warn:\n print(\"WARNING: model is not fitted yet!\")\n return self.model_embed.predict(X)\n\n def predict(self, X, warn=True):\n \"\"\"\n Generate the output of the network, i.e. the predicted similarities\n\n Input:\n - X: m x in_dim feature matrix\n\n Returns:\n - S': m x out_dim output matrix with approximated similarities to the out_dim targets\n \"\"\"\n assert X.shape[1] == self.in_dim, \"input dim of data doesn't match (%i != %i)\" % (X.shape[1], self.in_dim)\n if self.model_embed is None and warn:\n print(\"WARNING: model is not fitted yet!\")\n return self.model.predict(X)\n" }, { "alpha_fraction": 0.5853450894355774, "alphanum_fraction": 0.6109060049057007, "avg_line_length": 40.91666793823242, "blob_id": "e07b2ef35208dfebd3367bc2be0e19fabfa82c1d", "content_id": "087873962b996fad24da7208c6270a525e7b742d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3521, "license_type": "permissive", "max_line_length": 115, "num_lines": 84, "path": "/utils_datasets.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, division, print_function, absolute_import\nfrom builtins import range\nimport numpy as np\nfrom sklearn.datasets import make_circles, make_blobs, make_swiss_roll, make_s_curve\nfrom sklearn.utils import check_random_state\n\n\ndef make_3_circles(n_samples, random_state=1):\n random_state = check_random_state(random_state)\n X = np.ones((3 * n_samples, 3))\n Y_plot = np.ones((3 * n_samples, 1))\n X[:n_samples, :2], _ = make_circles(n_samples=n_samples, noise=0.05, factor=.01, random_state=random_state)\n X[:n_samples, 2] *= -1\n Y_plot[:n_samples, 0] = 1\n X[n_samples:2 * n_samples, :2], _ = make_circles(n_samples=n_samples,\n noise=0.05, factor=.01, random_state=random_state)\n X[n_samples:2 * n_samples, 2] = 0\n Y_plot[n_samples:2 * n_samples, 0] = 2\n X[2 * n_samples:, :2], _ = make_circles(n_samples=n_samples, noise=0.05, factor=.01, random_state=random_state)\n Y_plot[2 * n_samples:, 0] = 3\n # shuffle examples\n idx = random_state.permutation(list(range(3 * n_samples)))\n X, Y_plot = X[idx, :], Y_plot[idx, :]\n # cut to actual size\n X, Y_plot = X[:n_samples, :], Y_plot[:n_samples, :]\n return X, Y_plot\n\n\ndef make_sphere(n_samples, random_state=1):\n # Create our sphere.\n random_state = check_random_state(random_state)\n p = random_state.rand(int(n_samples * 1.5)) * (2 * np.pi - 0.5)\n t = random_state.rand(int(n_samples * 1.5)) * np.pi\n\n # Sever the poles from the sphere.\n indices = ((t < (np.pi - (np.pi / 10))) & (t > ((np.pi / 10))))\n colors = p[indices]\n x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \\\n np.sin(t[indices]) * np.sin(p[indices]), \\\n np.cos(t[indices])\n sphere_data = np.array([x, y, z]).T\n return sphere_data[:n_samples, :], colors[:n_samples]\n\n\ndef make_broken_swiss_roll(n_samples, random_state=1):\n # get original swiss roll\n X, Y_plot = make_swiss_roll(2 * n_samples, random_state=random_state)\n # cut off a part\n X, Y_plot = X[X[:, 0] > -5, :], Y_plot[X[:, 0] > -5]\n # get desired number of samples\n X, Y_plot = X[:n_samples, :], Y_plot[:n_samples]\n return X, Y_plot\n\n\ndef make_peaks(n_samples, random_state=1):\n # get randomly sampled 2d grid\n random_state = check_random_state(random_state)\n X = 10. * random_state.rand(n_samples, 3)\n # have as 3rd dimension some peaks\n X[X[:, 0] <= 5, 2] = np.cos(0.9 * (X[X[:, 0] <= 5, 1] - 2))\n X[X[:, 0] > 5, 2] = np.cos(0.5 * (X[X[:, 0] > 5, 1] - 5))\n # 3rd dim is also the color\n Y_plot = X[:, 2]\n return X, Y_plot\n\n\ndef load_dataset(dataset, n_samples, random_state=1, n_features=3):\n # wrapper function to load one of the 3d datasets\n if dataset == 's_curve':\n return make_s_curve(n_samples, random_state=random_state)\n elif dataset == 'swiss_roll':\n return make_swiss_roll(n_samples, random_state=random_state)\n elif dataset == 'broken_swiss_roll':\n return make_broken_swiss_roll(n_samples, random_state=random_state)\n elif dataset == 'sphere':\n return make_sphere(n_samples, random_state=random_state)\n elif dataset == '3_circles':\n return make_3_circles(n_samples, random_state=random_state)\n elif dataset == 'peaks':\n return make_peaks(n_samples, random_state=random_state)\n elif dataset == 'blobs':\n return make_blobs(n_samples, n_features=n_features, centers=3, random_state=random_state)\n else:\n print(\"unknown dataset\")\n" }, { "alpha_fraction": 0.6512924432754517, "alphanum_fraction": 0.6604155898094177, "avg_line_length": 40.10416793823242, "blob_id": "99f86333ca569a18947a2c4d9d58b12719a4707a", "content_id": "6440a0737feda7ccd7d0c4a31b257177f8c63707", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3946, "license_type": "permissive", "max_line_length": 122, "num_lines": 96, "path": "/utils.py", "repo_name": "rootoor-dev/simec", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals, division, print_function, absolute_import\nimport numpy as np\nfrom scipy.spatial.distance import pdist\nfrom scipy.stats import spearmanr, pearsonr\n\n\ndef compute_W_ll(S_ll, embedding_dim):\n \"\"\"\n given the square and symmetric similarity matrix used for the last layer regularization,\n compute the optimal weights W_ll via eigendecomposition\n\n Inputs:\n - S_ll: square (and symmetric) similarity matrix\n - embedding_dim: embedding dimensionality of the SimEc network\n Returns:\n - W_ll: embedding_dim x S_ll.shape[1] optimal weight matrix\n \"\"\"\n D, V = np.linalg.eig(S_ll)\n D, V = D[np.argsort(D)[::-1]], V[:, np.argsort(D)[::-1]]\n W_ll = np.dot(V.real[:, :embedding_dim], np.diag(np.sqrt(np.abs(D.real[:embedding_dim]))))\n return W_ll.T\n\n\ndef center_K(K):\n \"\"\"\n Center the given square (and symmetric) kernel matrix\n\n Input:\n - K: square (and symmetric) kernel (similarity) matrix\n Returns:\n - centered kernel matrix (like if you had subtracted the mean from the input data)\n \"\"\"\n n, m = K.shape\n assert n == m, \"Kernel matrix needs to be square\"\n H = np.eye(n) - np.tile(1. / n, (n, n))\n B = np.dot(np.dot(H, K), H)\n return (B + B.T) / 2\n\n\ndef check_embed_match(X_embed1, X_embed2):\n \"\"\"\n Check whether the two embeddings are almost the same by computing their normalized euclidean distances\n in the embedding space and checking the correlation.\n\n Inputs:\n - X_embed1, X_embed2: two Nxd matrices with coordinates in the embedding space\n Returns:\n - msq, r^2, rho: mean squared error, R^2, and Spearman correlation coefficient between the distance matrices of\n both embeddings (mean squared error is more exact, corrcoef a more relaxed error measure)\n \"\"\"\n D_emb1 = pdist(X_embed1, 'euclidean')\n D_emb2 = pdist(X_embed2, 'euclidean')\n D_emb1 /= D_emb1.max()\n D_emb2 /= D_emb2.max()\n # compute mean squared error\n msqe = np.mean((D_emb1 - D_emb2) ** 2)\n # compute Spearman correlation coefficient\n rho = spearmanr(D_emb1.flatten(), D_emb2.flatten())[0]\n # compute Pearson correlation coefficient\n r = pearsonr(D_emb1.flatten(), D_emb2.flatten())[0]\n return msqe, r**2, rho\n\n\ndef check_similarity_match(X_embed, S, X_embed_is_S_approx=False, norm=False):\n \"\"\"\n Since SimEcs are supposed to project the data into an embedding space where the target similarities\n can be linearly approximated; check if X_embed*X_embed^T = S\n (check mean squared error, R^2, and Spearman correlation coefficient)\n\n Inputs:\n - X_embed: Nxd matrix with coordinates in the embedding space\n - S: NxN matrix with target similarities (do whatever transformations were done before using this\n as input to the SimEc, e.g. centering, etc.)\n Returns:\n - msq, r^2, rho: mean squared error, R^2, and Spearman correlation coefficient between linear kernel of embedding\n and target similarities (mean squared error is more exact, corrcoef a more relaxed error measure)\n \"\"\"\n if X_embed_is_S_approx:\n S_approx = X_embed\n else:\n # compute linear kernel as approximated similarities\n S_approx = X_embed.dot(X_embed.T).real\n # to get results that are comparable across similarity measures, we have to normalize them somehow,\n # in this case by dividing by the absolute max value of the target similarity matrix\n if norm:\n S_norm = S / np.max(np.abs(S))\n S_approx /= np.max(np.abs(S_approx))\n else:\n S_norm = S\n # compute mean squared error\n msqe = np.mean((S_norm - S_approx) ** 2)\n # compute Spearman correlation coefficient\n rho = spearmanr(S_norm.flatten(), S_approx.flatten())[0]\n # compute Pearson correlation coefficient\n r = pearsonr(S_norm.flatten(), S_approx.flatten())[0]\n return msqe, r**2, rho\n" } ]
9
kalpajpise/Email
https://github.com/kalpajpise/Email
33659c08ce775bd0da0981d515728e2f3c2a5296
01ec0826d15276a9e37d09b9457c1decf0a18089
e59ef5e3c6430f119ab1033bc70b953293dcb371
refs/heads/master
2020-05-31T02:28:56.284433
2019-06-08T03:39:19
2019-06-08T03:39:19
190,066,611
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6963906288146973, "alphanum_fraction": 0.7197452187538147, "avg_line_length": 38.25, "blob_id": "d34d16560f03c701c0361fa6af1c5d6502d32631", "content_id": "d11d9a4226e6516e28962d93e93587e9b563a44e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 471, "license_type": "no_license", "max_line_length": 89, "num_lines": 12, "path": "/README.md", "repo_name": "kalpajpise/Email", "src_encoding": "UTF-8", "text": "# ========= Email - Autogenerate =========\n\n\n## Set of instruction to function\n\n<br>1.System must have python software & internet connection should be active.\n<br>2.Install Flask package, for installing open command prompt type \"pip install flask\".\n<br>3.On Command Prompt open WebPage folder.\n<br>3.Then type \"python main.py\" on command prompt.\n<br>4.Copy the url from cmd or type \"http://localhost:5000\" in web browser.\n<br>5.Enjoy the code.\n<br>6.Thanks for using it.\n" }, { "alpha_fraction": 0.6400356292724609, "alphanum_fraction": 0.6430056691169739, "avg_line_length": 34.08333206176758, "blob_id": "435e614060c7884a0efa43fca0f574244cc5d620", "content_id": "1764cf90e419e9543ebfb6d8191298274535ba99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3367, "license_type": "no_license", "max_line_length": 76, "num_lines": 96, "path": "/Code/Email_csv.py", "repo_name": "kalpajpise/Email", "src_encoding": "UTF-8", "text": "import smtplib\nimport ssl\nimport csv\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n\ndef process(receiver_email, attachment_file):\n # Create a multipart message and set headers\n\n message = MIMEMultipart()\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n message[\"Subject\"] = subject\n # message[\"Bcc\"] = receiver_email # Recommended for mass emails\n\n # Add body to email\n message.attach(MIMEText(body, \"plain\"))\n\n filename = f\"{attachment_file}\" # In same directory as script\n\n # Open PDF file in binary mode\n with open(filename, \"rb\") as attachment:\n # Add file as application/octet-stream\n # Email client can usually download this automatically as attachment\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(attachment.read())\n\n # Encode file in ASCII characters to send by email\n encoders.encode_base64(part)\n\n # Add header as key/value pair to attachment part\n part.add_header(\n \"Content-Disposition\",\n f\"attachment; filename= {filename}\",\n )\n\n # Add attachment to message and convert message to string\n message.attach(part)\n return message.as_string()\n # text1: str = message.as_string()\n # return text1\n\n\n# def body_compute(name):\n# body_template = \"\"\"Dear {name},\n# {body_input}\"\"\"\n# body_input = \" This is input from the web interface \"\n# return body_template.format(body_input,name)\n\ndef makemail(email, password, body, url):\n port = 465 # Initalize the port for the ssl\n smtp_server = \"smtp.gmail.com\" # Initalize the smtp server\n sender_email = email # sender mail\n subject = \"An email with attachment from Python\"\n context = ssl.create_default_context()\n\n # Log in to server using secure context and send email\n\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n #csv_file = \"D:\\Projects\\Pycharm\\email.csv\"\n csv_file = url\n with open(csv_file) as file:\n reader = csv.reader(file)\n next(reader) # Skip header row\n for name, usn, email, attach_file in reader:\n text = process(email, attach_file)\n server.sendmail(sender_email, email, text)\n server.close()\n''' \nif __name__ == '__main__':\n\n port = 465 # Initalize the port for the ssl\n smtp_server = \"smtp.gmail.com\" # Initalize the smtp server\n sender_email = \"dojobalways@gmail.com\" # sender mail\n password = input(\"Type your password and press enter:\") # password\n subject = \"An email with attachment from Python\"\n body = \"This the body\"\n context = ssl.create_default_context()\n\n # Log in to server using secure context and send email\n\n with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(sender_email, password)\n csv_file = \"D:\\Projects\\Pycharm\\email.csv\"\n with open(csv_file) as file:\n reader = csv.reader(file)\n next(reader) # Skip header row\n for name, usn, email, attach_file in reader:\n text = process(email, attach_file)\n server.sendmail(sender_email, email, text)\n server.close()\n'''" }, { "alpha_fraction": 0.7395017743110657, "alphanum_fraction": 0.744484007358551, "avg_line_length": 27.67346954345703, "blob_id": "006c76de158b32540167f2770277ca83e1edc9cf", "content_id": "4874f3dcf85101ce387b998258d3156087c61f8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 77, "num_lines": 49, "path": "/Code/Email_attachment.py", "repo_name": "kalpajpise/Email", "src_encoding": "UTF-8", "text": "import email, smtplib, ssl\n\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nsubject = \"Email with the attachment\"\nbody = \"Hi ma Friend Amey PICKLE\"\nsender_email = \"dojobalways@gmail.com\"\nreceiver_email = \"kalpaj12@gmail.com\"\nreceiver_email1 = \"ameyaditya.j@gmail.com\"\npassword = input(\"Type your password and press enter:\")\n\n# Create a multipart message and set headers\nmessage = MIMEMultipart()\nmessage[\"From\"] = sender_email\nmessage[\"To\"] = receiver_email\nmessage[\"Subject\"] = subject\n\n# Add body to email\nmessage.attach(MIMEText(body, \"plain\"))\n\nfilename = \"D:\\Projects\\College-Work\\Ityukta\\Cerificates\\Winner\\Pdf\\Amey.pdf\"\n\n# Open PDF file in binary mode\nwith open(filename, \"rb\") as attachment:\n part = MIMEBase(\"application\", \"octet-stream\")\n part.set_payload(attachment.read())\n\nencoders.encode_base64(part)\n\npart.add_header(\n \"Content-Disposition\",\n f\"attachment; filename= {filename}\",\n)\n\nmessage.attach(part)\ntext = message.as_string()\n\n# Add attachment to message and convert message to string\nmessage.attach(part)\ntext = message.as_string()\n\n# Log in to server using secure context and send email\ncontext = ssl.create_default_context()\nwith smtplib.SMTP_SSL(\"smtp.gmail.com\", 465, context=context) as server:\n server.login(sender_email, password)\n server.sendmail(sender_email, [receiver_email, receiver_email1], text)\n" }, { "alpha_fraction": 0.6795421838760376, "alphanum_fraction": 0.6838340759277344, "avg_line_length": 29.34782600402832, "blob_id": "f435fc2c64a43da2dfedd89a305711c7eef7c84d", "content_id": "d09248622e605657cf5cdbe22da31f31f51382df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "no_license", "max_line_length": 77, "num_lines": 23, "path": "/Code/Email.py", "repo_name": "kalpajpise/Email", "src_encoding": "UTF-8", "text": "import email, smtplib, ssl\n\nport = 465\nsmtp_server = \"smtp.gmail.com\" # this is the server smtp for the gmail\nsender_email = \"dojobalways@gmail.com\" # Sender Email\nreceiver_email = \"hdwork247@gmail.com\" # Receiver email\nprint(sender_email)\npassword = input(\"Enter the password\\n\") # Password Of Sender Email\nsubject = \"Achievement on ITYUKTA fest\"\nmessage = \"\"\"\n\n\n This is the code where it sends the email to the other person !!!!!!!\n u can go through with it \n\n\n \"\"\" # Body of the Email.\n\ncontext = ssl.create_default_context()\n\nwith smtplib.SMTP_SSL(smtp_server, port, context=context) as server:\n server.login(\"dojobalways@gmail.com\", password)\n server.sendmail(sender_email, receiver_email, message)\n\n" } ]
4
YukiSora/bluebell-adventures
https://github.com/YukiSora/bluebell-adventures
44e018df3b45a8ba29e765de57e358bc2ede24f6
b71d452dcec36746d5e7ead001fd23ad7bea82a2
c7ffe40b385bc75b3d04abdedcca0f7b94805a81
refs/heads/master
2021-01-18T04:13:49.899145
2016-06-29T10:47:01
2016-06-29T10:47:01
56,126,913
0
0
null
2016-04-13T06:30:01
2016-05-26T00:19:35
2016-06-01T04:34:11
null
[ { "alpha_fraction": 0.6042065024375916, "alphanum_fraction": 0.6089866161346436, "avg_line_length": 28.885713577270508, "blob_id": "c3a7d9408a99726eade99b78cf48793ee8be2a72", "content_id": "222a3fb9a84010a4f8b61442aa658981acae346e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1046, "license_type": "permissive", "max_line_length": 121, "num_lines": 35, "path": "/src/BluebellAdventures/DatabaseDriver.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures;\n\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\n\nimport java.util.Date;\nimport java.text.SimpleDateFormat;\n\nimport Megumin.Database.Database;\n\npublic class DatabaseDriver {\n \n\n public static void main(String[] args) {\n Date dt = new java.util.Date();\n SimpleDateFormat sdf = new SimpleDateFormat(\"yyyy-MM-dd HH:mm:ss\");\n\n String currentTime = sdf.format(dt);\n try {\n //Init database\n Database.createDatabase(\"jdbc:mysql://localhost:3306/BluebellAdventuresRecord\", \"root\", \"root\");\n\n //INSERT UPDATE DELETE\n Database.getInstance().update(\"INSERT INTO Records (Score, Date_Time) VALUE('\"+ 1 +\"','\"+ currentTime +\"')\");\n\n //SELECT\n ResultSet result = Database.getInstance().query(\"SELECT * FROM Records\");\n while (result.next()) {\n System.out.println(result.getString(\"Date_Time\"));\n }\n } catch (SQLException e) {\n System.out.println(e);\n }\n }\n}\n" }, { "alpha_fraction": 0.6539895534515381, "alphanum_fraction": 0.6674124002456665, "avg_line_length": 33.38461685180664, "blob_id": "fbeb34604a8f49ec9e5d2365aa7d3e9cba26da4d", "content_id": "d753bc0d8906fb45d8c98fc916bb49ef7383681e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1341, "license_type": "permissive", "max_line_length": 93, "num_lines": 39, "path": "/src/BluebellAdventures/CreateScenes/CreateMenuScene.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures.CreateScenes;\n\nimport java.io.IOException;\n\nimport Megumin.Nodes.Director;\nimport Megumin.Nodes.Layer;\nimport Megumin.Nodes.Scene;\nimport Megumin.Nodes.Sprite;\nimport Megumin.Point;\n\npublic class CreateMenuScene {\n public static Scene createMenuScene() throws IOException {\n //init sprite\n Sprite singlePlayer = new Sprite(\"resource/image/tag_single.png\", new Point(0, 100));\n Sprite multiPlayer = new Sprite(\"resource/image/tag_multi.png\", new Point(0, 250));\n Sprite setting = new Sprite(\"resource/image/tag_setting.png\", new Point(00, 400));\n Sprite exit = new Sprite(\"resource/image/tag_quit.png\", new Point(0, 550));\n Sprite background = new Sprite(\"resource/image/character_design.png\");\n singlePlayer.setName(\"single player\");\n exit.setName(\"exit\");\n\n //init layer\n Layer tabLayer = new Layer();\n tabLayer.addSprite(singlePlayer);\n tabLayer.addSprite(multiPlayer);\n tabLayer.addSprite(setting);\n tabLayer.addSprite(exit);\n Layer backgroundLayer = new Layer();\n backgroundLayer.addSprite(background);\n\n //init scene\n Scene menu = new Scene();\n menu.setName(\"menu\");\n menu.addLayer(tabLayer);\n menu.addLayer(backgroundLayer, 0);\n\n return menu;\n }\n}\n" }, { "alpha_fraction": 0.6311110854148865, "alphanum_fraction": 0.648888885974884, "avg_line_length": 31, "blob_id": "864518277661d8f67815a654b345899d54205eee", "content_id": "304a211fadc6136e837651965b7c917fc21cad78", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 225, "license_type": "permissive", "max_line_length": 83, "num_lines": 7, "path": "/game.sh", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [[ $1 == 'make' ]]; then\n javac -d bin -classpath :src:resource/mysql.jar: src/BluebellAdventures/$2.java\nelif [[ $1 == 'run' ]]; then\n java -classpath :bin:resource/mysql.jar: BluebellAdventures/$2\nfi\n\n" }, { "alpha_fraction": 0.4317548871040344, "alphanum_fraction": 0.4336118996143341, "avg_line_length": 14.838234901428223, "blob_id": "9814f5adb46ae940908cb7dabaf9e4b812f53d22", "content_id": "8c49a9c51035cc954731c976676a2e1180267ef2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1077, "license_type": "permissive", "max_line_length": 68, "num_lines": 68, "path": "/src/Megumin/Point.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin;\n\npublic class Point {\n private int x;\n private int y;\n\n public Point() {\n x = 0;\n y = 0;\n }\n\n public Point(Point p) {\n x = p.getX();\n y = p.getY();\n }\n\n public Point(int x, int y) {\n this.x = x;\n this.y = y;\n }\n\n public int getX() {\n return x;\n }\n\n public void setX(int x) {\n this.x = x;\n }\n\n public int getY() {\n return y;\n }\n\n public void setY(int y) {\n this.y = y;\n }\n\n public Point getPoint() {\n return this;\n }\n\n public void setPoint(Point p) {\n x = p.getX();\n y = p.getY();\n }\n\n public void setPoint(int x, int y) {\n this.x = x;\n this.y = y;\n }\n\n public Point offset(int dx, int dy) {\n x += dx;\n y += dy;\n\n return this;\n }\n\n @Override\n public boolean equals(Object obj) {\n return ((Point)obj).getX() == x && ((Point)obj).getY() == y;\n }\n\n @Override\n public String toString() {\n return \"{x = \" + x + \", y = \" + y + \"}\";\n }\n}\n" }, { "alpha_fraction": 0.47289156913757324, "alphanum_fraction": 0.47921687364578247, "avg_line_length": 26.213115692138672, "blob_id": "2ce6a9e31ed985f03017b88ef8f4b89df8ad6730", "content_id": "85dcf3d47b2960e265f7e27ce41b6205c59c6533", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3320, "license_type": "permissive", "max_line_length": 97, "num_lines": 122, "path": "/src/BluebellAdventures/Characters/GameMap.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures.Characters;\n\nimport java.awt.image.BufferedImage;\nimport java.io.File;\nimport java.io.FileInputStream;\nimport java.io.IOException;\nimport javax.imageio.ImageIO;\n\nimport Megumin.Nodes.Sprite;\nimport Megumin.Point;\n\npublic class GameMap extends Sprite {\n private static GameMap map;\n private byte[][] path;\n\n public GameMap() {\n }\n\n public GameMap(String filename) throws IOException {\n super(filename, new Point(0, 0));\n }\n\n public static GameMap getInstance(String filename) throws IOException {\n if (map == null) {\n map = new GameMap(filename);\n }\n else {\n map.setImage(ImageIO.read(new File(filename)));\n }\n\n return map;\n }\n\n public static GameMap getInstance() {\n if (map == null) {\n map = new GameMap();\n }\n\n return map;\n }\n\n public static boolean characterCollision(Sprite sprite, int moveX, int moveY) {\n byte[][] path = map.getPath();\n\n //map position\n //this is negative\n int mapX = map.getPosition().getX();\n int mapY = map.getPosition().getY();\n\n //character postion\n int x = sprite.getPosition().getX() + moveX;\n int y = sprite.getPosition().getY() + moveY;\n int w = sprite.getSize().getX();\n int h = sprite.getSize().getY();\n\n //check 4 sides of sprite whether any point in path is 0\n for (int i = 0; i < w; i++) {\n if (path[-mapY + y][-mapX + x + i] == 0 || path[-mapY + y + h][-mapX + x + i] == 0) {\n return true;\n }\n }\n for (int i = 0; i < h; i++) {\n if (path[-mapY + y + i][-mapX + x] == 0 || path[-mapY + y + i][-mapX + x + w] == 0) {\n return true;\n }\n }\n\n return false;\n }\n\n public static boolean enemyCollision(Sprite sprite, int moveX, int moveY) {\n byte[][] path = map.getPath();\n\n //enemy and map in same coordinated system\n //so don't need mapX and mapY\n\n int x = sprite.getPosition().getX() + moveX;\n int y = sprite.getPosition().getY() + moveY;\n int w = sprite.getSize().getX();\n int h = sprite.getSize().getY();\n\n for (int i = 0; i < w; i++) {\n if (path[y][x + i] == 0 || path[y + h][x + i] == 0) {\n return true;\n }\n }\n for (int i = 0; i < h; i++) {\n if (path[y + i][x] == 0 || path[y + i][x + w] == 0) {\n return true;\n }\n }\n\n return false;\n }\n\n public GameMap setPath(String filename) throws IOException {\n //read map path file and save into byte array\n path = new byte[getSize().getY()][getSize().getX()];\n int i = 0, j = 0;\n try (FileInputStream in = new FileInputStream(filename)) {\n int c;\n while ((c = in.read()) != -1) {\n //skip \\r for windows\n if (c == '\\r') {\n }\n else if (c == '\\n') {\n i++;\n j = 0;\n }\n else {\n path[i][j++] = (byte)(c - '0');\n }\n }\n }\n\n return this;\n }\n\n public byte[][] getPath() {\n return path;\n }\n}\n" }, { "alpha_fraction": 0.6570363640785217, "alphanum_fraction": 0.6663560271263123, "avg_line_length": 26.512821197509766, "blob_id": "d2debff142abe31e57e30c03e16ea0338f3b30d6", "content_id": "3ec5b5ad9a07b79906728ea1954ac06f6eca0797", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1073, "license_type": "permissive", "max_line_length": 55, "num_lines": 39, "path": "/src/BluebellAdventures/Actions/CharacterCollision.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures.Actions;\n\nimport BluebellAdventures.Characters.Character;\nimport BluebellAdventures.Characters.GameMap;\n\nimport Megumin.Actions.Action;\nimport Megumin.Actions.Effect;\nimport Megumin.Audio.AudioEngine;\nimport Megumin.Nodes.Sprite;\n\npublic class CharacterCollision extends Effect {\n private Action action;\n\n CharacterCollision(Action action) {\n this.action = action;\n }\n\n @Override\n public void update(Sprite sprite) {\n AudioEngine.getInstance().stop(\"nervous\");\n AudioEngine.getInstance().play(\"attacking\");\n AudioEngine.getInstance().loop(\"main\");\n\n Character player = (Character) getSprite();\n // Minus Player's Health\n player.setHp(player.getHp() - 1);\n\n // Set Enemy to Patrol\n ((EnemyMove)action).setMode(0);\n ((EnemyMove)action).setCharacterSprite(null);\n\n // Respawn Player's Position\n GameMap.getInstance().setPosition(-705, -2600);\n\n if (player.getHp() == 0) {\n new GameOver(false).update(getSprite());\n }\n }\n}\n" }, { "alpha_fraction": 0.5036035776138306, "alphanum_fraction": 0.5378378629684448, "avg_line_length": 32.66666793823242, "blob_id": "01351902acf4a8558b3269a06123b20092c20b36", "content_id": "3859c840d7ab6abb45cde76f758558d005f0c87c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1110, "license_type": "permissive", "max_line_length": 61, "num_lines": 33, "path": "/tools/ImageConverter.py", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "import sys\nfrom PIL import Image\nfrom PIL import ImageDraw\n\nif len(sys.argv) < 2:\n print('python ImageConverter.py convert sourceImage.png')\n print('python ImageConverter.py test sourceImage.png')\n sys.exit()\n\nsourceImage = Image.open(sys.argv[2])\nif sys.argv[1] == 'convert':\n with open('path', mode = 'w') as f:\n for h in range(0, sourceImage.size[1]):\n for w in range(0, sourceImage.size[0]):\n pixel = sourceImage.getpixel((w, h))\n if pixel == (255, 255, 255, 0):\n f.write('0')\n else:\n f.write('1')\n f.write('\\n')\nelif sys.argv[1] == 'test':\n testImage = Image.new('RGB', sourceImage.size)\n draw = ImageDraw.Draw(testImage)\n\n for h in range(0, sourceImage.size[1]):\n for w in range(0, sourceImage.size[0]):\n pixel = sourceImage.getpixel((w, h))\n if not pixel == (255, 255, 255, 0):\n draw.point((w, h), fill = (0, 0, 0, 0))\n else:\n draw.point((w, h), fill = pixel)\n\n testImage.save('test.jpg', 'jpeg');" }, { "alpha_fraction": 0.6085714101791382, "alphanum_fraction": 0.6085714101791382, "avg_line_length": 21.340425491333008, "blob_id": "b3783f41d24f790c6002c8acda8b39752a67865c", "content_id": "f006b856860583ed38291a90ea5ddc5bc774965a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1050, "license_type": "permissive", "max_line_length": 59, "num_lines": 47, "path": "/src/Megumin/Actions/Infinite.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin.Actions;\n\nimport java.util.Iterator;\nimport java.util.concurrent.CopyOnWriteArrayList;\n\nimport Megumin.Nodes.Sprite;\n\npublic class Infinite {\n private static Infinite infinite;\n private CopyOnWriteArrayList<Event> events;\n\n public Infinite() {\n events = new CopyOnWriteArrayList<>();\n }\n\n public static Infinite getInstance() {\n if (infinite == null) {\n infinite = new Infinite();\n }\n\n return infinite;\n }\n\n public void addEvent(Event event) {\n events.add(event);\n }\n\n public void addEvent(Sprite sprite, Action action) {\n events.add(new Event(sprite, action));\n }\n\n public void removeEvent(Event event) {\n events.remove(event);\n }\n\n public void removeAll() {\n events = new CopyOnWriteArrayList<>();\n }\n\n public void update() {\n Iterator it = events.iterator();\n while (it.hasNext()) {\n Event event = (Event)it.next();\n event.getSprite().runAction(event.getAction());\n }\n }\n}\n" }, { "alpha_fraction": 0.5267857313156128, "alphanum_fraction": 0.5267857313156128, "avg_line_length": 15, "blob_id": "5412ac17410bfffafa2644521d79a3e090489519", "content_id": "1a7a14f65bfcffd1ceba1711047f8ba831a87070", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 560, "license_type": "permissive", "max_line_length": 42, "num_lines": 35, "path": "/src/Megumin/Actions/MoveTo.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin.Actions;\n\nimport Megumin.Nodes.Sprite;\n\npublic class MoveTo extends Action {\n private int x;\n private int y;\n\n public MoveTo(int x, int y) {\n this.x = x;\n this.y = y;\n }\n\n @Override\n public void update(Sprite sprite) {\n sprite.getPosition().offset(x, y);\n super.update(sprite);\n }\n\n public int getX() {\n return x;\n }\n\n public void setX(int x) {\n this.x = x;\n }\n\n public int getY() {\n return y;\n }\n\n public void setY(int y) {\n this.y = y;\n }\n}\n" }, { "alpha_fraction": 0.48514851927757263, "alphanum_fraction": 0.49970880150794983, "avg_line_length": 40.878047943115234, "blob_id": "a4f65797a4e5974c68661aa351ec016d28f2ebe0", "content_id": "3c4b73cc6ab15610b99db5356b75bd019f7fcf01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1717, "license_type": "permissive", "max_line_length": 80, "num_lines": 41, "path": "/src/BluebellAdventures/CharacterDriver.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures;\n\nimport java.io.File;\nimport java.io.IOException;\n\nimport BluebellAdventures.Characters.Character;\nimport BluebellAdventures.Characters.Snack;\nimport BluebellAdventures.Characters.Enemy;\n\npublic class CharacterDriver {\n public static void main(String[] args) {\n try {\n String filename = \"resource/image/machi1.png\";\n\n Snack snack = new Snack(filename).setScore(110);\n System.out.println(snack.getScore());\n\n Enemy enemy = new Enemy(filename).setAttack(10)\n .setSpeed(100);\n System.out.println(enemy.getAttack());\n System.out.println(enemy.getSpeed());\n\n Character character = new Character(filename).setHp(100)\n .setMp(100)\n .setChargeBar(10)\n .setSpeed(100)\n .setUnlockSpeed(100)\n .setAttackScore(0)\n .setSnackScore(0);\n System.out.println(character.getHp());\n System.out.println(character.getMp());\n System.out.println(character.getChargeBar());\n System.out.println(character.getSpeed());\n System.out.println(character.getUnlockSpeed());\n System.out.println(character.getAttackScore());\n System.out.println(character.getSnackScore());\n } catch (IOException e) {\n System.out.println(e);\n }\n }\n}\n" }, { "alpha_fraction": 0.6111587882041931, "alphanum_fraction": 0.6191702485084534, "avg_line_length": 36.180850982666016, "blob_id": "39cd04890bee878cfc604058518770e4614b0abf", "content_id": "92eb2e22d6a49cd2f83c073a96ba9fb8a54c0090", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3495, "license_type": "permissive", "max_line_length": 159, "num_lines": 94, "path": "/src/BluebellAdventures/CreateScenes/CreateGameOverScene.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures.CreateScenes;\n\nimport java.io.IOException;\nimport java.awt.Color;\nimport java.awt.event.MouseEvent;\nimport java.awt.Font;\nimport java.awt.Graphics2D;\nimport java.sql.SQLException;\nimport java.text.SimpleDateFormat;\nimport java.util.Date;\n\nimport BluebellAdventures.Characters.Character;\nimport BluebellAdventures.Actions.ChangeScene;\n\nimport Megumin.Actions.Action;\nimport Megumin.Actions.Infinite;\nimport Megumin.Actions.Interact;\nimport Megumin.Actions.MouseCrash;\nimport Megumin.Database.Database;\nimport Megumin.Nodes.Layer;\nimport Megumin.Nodes.Scene;\nimport Megumin.Nodes.Sprite;\nimport Megumin.Point;\n\npublic class CreateGameOverScene {\n public static Scene createGameOverScene(String backgroundImage, Sprite sprite, boolean victory) throws IOException {\n Interact interact = Interact.getInstance();\n Infinite infinite = Infinite.getInstance();\n interact.removeAllKeyPress();\n infinite.removeAll();\n Character player = (Character)sprite;\n\n //init sprite\n Sprite background = new Sprite(backgroundImage);\n\n Sprite printStats = new Sprite() {\n @Override\n public void render(Graphics2D g) {\n g.setFont(new Font(\"TimesRoman\", Font.BOLD, 35));\n g.setColor(Color.white);\n g.drawString(\"Score: \" + player.getSnackScore(), 560, 430);\n }\n };\n\n // [1] Submit - Highscore\n Sprite submitHighScore = null;\n if (victory) {\n submitHighScore = new Sprite(\"resource/image/tag_highscore.png\", new Point(600, 480));\n Action saveHighScore = new MouseCrash(new Action() {\n @Override\n public void update(Sprite sprite) {\n SimpleDateFormat sdf = new SimpleDateFormat(\"yyyy-MM-dd HH:mm:ss\");\n String currentTime = sdf.format(new java.util.Date());\n\n try {\n Database.getInstance().update(\"INSERT INTO Records (Score, Date_Time) VALUE('\" + player.getSnackScore() + \"', '\" + currentTime + \"')\");\n new ChangeScene(CreateMenuScene.createMenuScene(), \"menu\").update(null);\n } catch (SQLException e) {\n System.out.println(e);\n System.exit(1);\n } catch (IOException e){\n System.exit(1);\n }\n }\n });\n interact.addEvent(MouseEvent.BUTTON1, Interact.ON_MOUSE_CLICK, submitHighScore, saveHighScore, \"game over\");\n }\n\n // [2] Back to Main Menu\n Sprite mainMenu = new Sprite(\"resource/image/tag_winback.png\", new Point(650, 610));\n Scene menu = CreateMenuScene.createMenuScene();\n Action backToMenu = new MouseCrash(new ChangeScene(menu, \"menu\"));\n interact.addEvent(MouseEvent.BUTTON1, Interact.ON_MOUSE_CLICK, mainMenu, backToMenu, \"game over\");\n\n //init layer\n Layer backgroundLayer = new Layer();\n backgroundLayer.addSprite(background);\n\n Layer tabLayer = new Layer();\n tabLayer.addSprite(printStats);\n if (victory) {\n tabLayer.addSprite(submitHighScore);\n }\n tabLayer.addSprite(mainMenu);\n\n //init scene\n Scene gameover = new Scene();\n gameover.setName(\"game over\");\n gameover.addLayer(backgroundLayer);\n gameover.addLayer(tabLayer);\n\n return gameover;\n }\n}\n" }, { "alpha_fraction": 0.6988636255264282, "alphanum_fraction": 0.6998106241226196, "avg_line_length": 25.399999618530273, "blob_id": "916eefa7eb66ae5e3d50e722aacae5150dc0c5dc", "content_id": "8ff85b2fb74c5f7ad1881c04a65206f118d6dd8b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1056, "license_type": "permissive", "max_line_length": 101, "num_lines": 40, "path": "/src/Megumin/Database/Database.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin.Database;\n\nimport com.mysql.jdbc.Driver;\nimport java.sql.Connection;\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\n\npublic class Database {\n private static Database database;\n private Connection connection;\n\n private Database(String url, String user, String password) throws SQLException {\n connection = (Connection)DriverManager.getConnection(url, user, password);\n }\n\n public static void createDatabase(String url, String user, String password) throws SQLException {\n database = new Database(url, user, password);\n }\n\n public static Database getInstance() {\n return database;\n }\n\n public ResultSet query(String sql) throws SQLException {\n ResultSet result = null;\n\n result = connection.createStatement().executeQuery(sql);\n\n return result;\n }\n\n public int update(String sql) throws SQLException {\n int result = -1;\n\n result = connection.createStatement().executeUpdate(sql);\n\n return result;\n }\n}\n" }, { "alpha_fraction": 0.6012095212936401, "alphanum_fraction": 0.604411244392395, "avg_line_length": 21.85365867614746, "blob_id": "52ce92262fb0f086f35f3ffc3f800cad86d86142", "content_id": "43e009562178a59eb2b5adc0fa6b368b675ff58e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2811, "license_type": "permissive", "max_line_length": 106, "num_lines": 123, "path": "/src/Megumin/Nodes/Director.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin.Nodes;\n\nimport java.awt.Frame;\nimport java.awt.Graphics;\nimport java.awt.Graphics2D;\nimport java.awt.Image;\nimport java.awt.event.KeyEvent;\nimport java.awt.event.KeyListener;\nimport java.awt.event.MouseEvent;\nimport java.awt.event.MouseListener;\n\nimport Megumin.Actions.Infinite;\nimport Megumin.Actions.Interact;\n\npublic class Director extends Frame implements KeyListener, MouseListener, Runnable {\n private static Director director;\n private Infinite infinite;\n private Interact interact;\n private Scene scene;\n private Thread thread;\n\n private Director() {\n infinite = Infinite.getInstance();\n interact = Interact.getInstance();\n thread = new Thread(this);\n addKeyListener(this);\n addMouseListener(this);\n }\n\n public static Director getInstance() {\n if (director == null) {\n director = new Director();\n }\n\n return director;\n }\n\n public void start() {\n setVisible(true);\n thread.start();\n }\n\n //render\n @Override\n public void paint(Graphics g) {\n Graphics2D g2 = (Graphics2D)g;\n scene.render(g2);\n }\n\n @Override\n public void update(Graphics g) {\n //double buffer\n Image imageBuffer = createImage(this.getWidth(), this.getHeight());\n Graphics imageBufferGraphics = imageBuffer.getGraphics();\n paint(imageBufferGraphics);\n imageBufferGraphics.dispose();\n g.drawImage(imageBuffer, 0, 0, this);\n }\n\n //main loop\n @Override\n public void run () {\n while(true) {\n infinite.update();\n interact.update();\n repaint();\n try{\n Thread.sleep(33);\n }catch(InterruptedException e){\n System.out.println(e);\n }\n }\n }\n\n //key listener\n @Override\n public void keyTyped(KeyEvent e) {\n }\n\n @Override\n public void keyPressed(KeyEvent e) {\n interact.keyPressed(e.getKeyCode());\n }\n\n @Override\n public void keyReleased(KeyEvent e) {\n interact.keyReleased(e.getKeyCode());\n }\n\n @Override\n public void mouseClicked(MouseEvent e) {\n System.out.println(\"Debug message: Mouse position: {x = \" + e.getX() + \", y = \" + e.getY() + \"}\");\n interact.mouseClicked(e.getX(), e.getY());\n }\n\n @Override\n public void mouseEntered(MouseEvent e) {\n }\n\n @Override\n public void mouseExited(MouseEvent e) {\n }\n\n @Override\n public void mousePressed(MouseEvent e) {\n }\n\n @Override\n public void mouseReleased(MouseEvent e) {\n }\n\n public Scene getScene() {\n return scene;\n }\n\n public void setScene(Scene scene) {\n this.scene = scene;\n }\n\n public Thread getThread() {\n return thread;\n }\n}\n" }, { "alpha_fraction": 0.6441717743873596, "alphanum_fraction": 0.6441717743873596, "avg_line_length": 7.199999809265137, "blob_id": "3e3e42b422e7e77162059e860bf1faf66db7b74d", "content_id": "18c4c0b1d397c71cdbed43d4d5568b132739f7d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 163, "license_type": "permissive", "max_line_length": 27, "num_lines": 20, "path": "/README.md", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "# snack-and-steal\nA stealth action video game\n\n## How to use\n\n### Shell\n\ncompile\n\n`./game.sh make Main`\n\nrun\n\n`./game.sh run Main`\n\n### Ant\n\ncompile and run\n\n`ant`" }, { "alpha_fraction": 0.5907729268074036, "alphanum_fraction": 0.5907729268074036, "avg_line_length": 22.18055534362793, "blob_id": "5c737dda61c90420456b045347fef483f1d59a97", "content_id": "035a418e2f00ce8543effd318a18593ea4e0dd44", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1669, "license_type": "permissive", "max_line_length": 61, "num_lines": 72, "path": "/src/Megumin/Audio/AudioEngine.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin.Audio;\n\nimport java.util.HashMap;\nimport java.util.Map.Entry;\nimport javax.sound.sampled.Clip;\n\npublic class AudioEngine {\n // Variables\n private static AudioEngine audioEngine;\n HashMap<String, Audio> audios;\n HashMap<String, Boolean> status;\n\n // Constructor\n private AudioEngine() {\n audios = new HashMap<>();\n status = new HashMap<>();\n }\n\n public static AudioEngine getInstance() {\n if (audioEngine == null) {\n audioEngine = new AudioEngine();\n }\n\n return audioEngine;\n }\n\n // Methods\n public void addAudio(String name, Audio audio) {\n audios.put(name, audio);\n status.put(name, false);\n }\n\n public void removeAudio(String name) {\n audios.remove(name);\n status.remove(name);\n }\n\n public void play(String name) {\n audios.get(name).play();\n status.put(name, true);\n }\n\n public boolean isPlayed(String name) {\n return status.get(name);\n }\n\n public void loop(String name) {\n audios.get(name).loop(Clip.LOOP_CONTINUOUSLY);\n status.put(name, true);\n }\n\n public void loop(String name, int times) {\n audios.get(name).loop(times);\n status.put(name, true);\n }\n\n public void setVolume(String name, float percent){\n audios.get(name).setVolume(percent);\n }\n\n public void stop(String name) {\n audios.get(name).stop();\n status.put(name, false);\n }\n\n public void stopAll() {\n for(Entry<String, Audio> entry : audios.entrySet()) {\n entry.getValue().stop();\n status.put(entry.getKey(), false);\n }\n }\n}\n" }, { "alpha_fraction": 0.5968722701072693, "alphanum_fraction": 0.5968722701072693, "avg_line_length": 19.210525512695312, "blob_id": "b628312aa6fdbe27d4a03de3e0adc4374a1b4375", "content_id": "aa6808d6f72b38df39af52ec3968217b0a54a52a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1151, "license_type": "permissive", "max_line_length": 94, "num_lines": 57, "path": "/src/Megumin/Actions/Event.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin.Actions;\n\nimport Megumin.Nodes.Sprite;\n\nclass Event {\n private Sprite sprite;\n private Action action;\n private String sceneName;\n\n Event() {\n }\n\n Event(Sprite sprite, Action action) {\n this(sprite, action, \"\");\n }\n\n Event(Sprite sprite, Action action, String sceneName) {\n this.sprite = sprite;\n this.action = action;\n this.sceneName = sceneName;\n }\n\n Event(Event event) {\n sprite = event.sprite;\n action = event.action;\n sceneName = event.sceneName;\n }\n\n public Sprite getSprite() {\n return sprite;\n }\n\n public void setSprite(Sprite sprite) {\n this.sprite = sprite;\n }\n\n public Action getAction() {\n return action;\n }\n\n public void setAction(Action action) {\n this.action = action;\n }\n\n public String getSceneName() {\n return sceneName;\n }\n\n public void setSceneName(String sceneName) {\n this.sceneName = sceneName;\n }\n\n @Override\n public boolean equals(Object o) {\n return ((Event)o).getSprite().equals(sprite) && ((Event)o).getAction().equals(action);\n }\n}" }, { "alpha_fraction": 0.5138484239578247, "alphanum_fraction": 0.5160349607467651, "avg_line_length": 30.204545974731445, "blob_id": "be7ce413c373a9bfcb08174b82a21fe9d035a43e", "content_id": "fa45679ccb9149fcc25d80c0a1883f50f10d5213", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Ant Build System", "length_bytes": 1372, "license_type": "permissive", "max_line_length": 75, "num_lines": 44, "path": "/build.xml", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project name=\"bluebell-adventures\" default=\"run\" basedir=\".\">\n <property name=\"src\" value=\"src\" />\n <property name=\"mysql\" value=\"resource/mysql.jar\" />\n <property name=\"dest\" value=\"bin\" />\n <property name=\"game\" value=\"BluebellAdventures\" />\n\n <target name=\"init\">\n <mkdir dir=\"${dest}\" />\n <echo message=\"Create bin/\"/>\n </target>\n\n <target name=\"compile\" depends=\"init\">\n <javac srcdir=\"${src}\" destdir=\"${dest}\" includeantruntime=\"false\">\n <classpath>\n <pathelement path=\"${mysql}\"/>\n </classpath>\n </javac>\n <echo message=\"Compile\"/>\n </target>\n\n <target name=\"run\" depends=\"compile\">\n <java classname=\"${game}.Main\" classpath=\"${dest}\">\n <classpath>\n <pathelement path=\"${mysql}\"/>\n </classpath>\n </java>\n <echo message=\"Run\"/>\n </target>\n\n <target name=\"jar\" depends=\"compile\">\n <jar jarfile=\"${game}.jar\" basedir=\"${dest}\"> \n <manifest> \n <attribute name=\"Main-Class\" value=\"${game}.Main\" />\n <attribute name=\"Class-Path\" value=\"${mysql}\"> \n </attribute>\n </manifest>\n </jar>\n </target>\n\n <target name=\"clean\"> \n <delete dir=\"${dest}\" />\n </target> \n</project>" }, { "alpha_fraction": 0.5599048137664795, "alphanum_fraction": 0.569955050945282, "avg_line_length": 23.875, "blob_id": "aaf17a23444e06bb19fc81f5a04c7d7ba83f56a0", "content_id": "fcc6f9dabf79b9fba9feecc5b5b896cb3d96f99e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3781, "license_type": "permissive", "max_line_length": 88, "num_lines": 152, "path": "/src/Megumin/Nodes/Sprite.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package Megumin.Nodes;\n\nimport java.awt.Graphics2D;\nimport java.awt.image.BufferedImage;\nimport java.io.File;\nimport java.io.IOException;\nimport java.util.Iterator;\nimport java.util.concurrent.CopyOnWriteArrayList;\nimport javax.imageio.ImageIO;\n\nimport Megumin.Actions.Action;\nimport Megumin.Actions.Effect;\nimport Megumin.Point;\n\npublic class Sprite {\n private BufferedImage image;\n private Point position;\n private Point size;\n private boolean visible;\n private String name;\n private int[] direction;\n\n public Sprite() {\n image = null;\n position = null;\n size = null;\n visible = false;\n name = \"\";\n direction = new int[]{1, 1};\n }\n\n public Sprite(String filename) throws IOException {\n this(filename, new Point(0, 0));\n }\n\n public Sprite(String filename, Point position) throws IOException {\n this(ImageIO.read(new File(filename)), position);\n }\n\n public Sprite(BufferedImage image) {\n this(image, new Point(0, 0));\n }\n\n public Sprite(BufferedImage image, Point position) {\n this.image = image;\n this.position = position;\n size = new Point(image.getWidth(), image.getHeight());\n visible = true;\n name = \"\";\n direction = new int[]{1, 1};\n }\n\n public void render(Graphics2D g) {\n if (visible) {\n g.drawImage(image, position.getX(), position.getY(), null);\n }\n }\n\n public Action runAction(Action action) {\n action.update(this);\n\n return action;\n }\n\n public boolean checkCollision(CopyOnWriteArrayList<Sprite> sprites, Action action) {\n boolean collision = false;\n int x1 = position.getX();\n int y1 = position.getY();\n int w1 = size.getX();\n int h1 = size.getY();\n Iterator it = sprites.iterator();\n while (it.hasNext()) {\n Sprite sprite = (Sprite)it.next();\n\n int x2 = sprite.getPosition().getX();\n int y2 = sprite.getPosition().getY();\n int w2 = sprite.getSize().getX();\n int h2 = sprite.getSize().getY();\n\n //check whether in collision area exist\n if (w2 == 0 || h2 == 0) {\n continue;\n }\n //check whether two rectangle intersect\n if (Math.max(Math.abs(x2 - (x1 + w1)), Math.abs(x2 + w2 - x1)) < w1 + w2 &&\n Math.max(Math.abs(y2 - (y1 + h1)), Math.abs(y2 + h2 - y1)) < h1 + h2) {\n //set sprite which be effected\n ((Effect)action).setSprite(sprite);\n runAction(action);\n collision = true;\n }\n }\n\n return collision;\n }\n\n public Point getPosition() {\n return position;\n }\n\n public void setPosition(Point position) {\n this.position = position;\n }\n\n public void setPosition(int x, int y) {\n this.position = new Point(x, y);\n }\n\n public Point getSize() {\n return size;\n }\n\n public void setSize(Point size) {\n this.size = size;\n }\n\n public void setSize(int x, int y) {\n this.size = new Point(x, y);\n }\n\n public void setVisible(boolean visible) {\n this.visible = visible;\n }\n\n public boolean getVisible() {\n return visible;\n }\n\n public void setImage(BufferedImage image) {\n this.image = image;\n }\n\n public void setImage(String filename) throws IOException {\n this.image = ImageIO.read(new File(filename));\n }\n\n public BufferedImage getImage() {\n return image;\n }\n\n public String getName() {\n return name;\n }\n\n public void setName(String name) {\n this.name = name;\n }\n\n public int[] getDirection() {\n return direction;\n }\n}\n" }, { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7439516186714172, "avg_line_length": 26.55555534362793, "blob_id": "d92233ce19bf1f7c8c43e7ff282c4336830b31bd", "content_id": "93de9e4c4332029eb24687cfb24d9a9f49fc0d31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 496, "license_type": "permissive", "max_line_length": 91, "num_lines": 18, "path": "/src/BluebellAdventures/Actions/KeyCollision.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures.Actions;\n\nimport BluebellAdventures.Characters.Character;\n\nimport Megumin.Actions.Effect;\nimport Megumin.Audio.AudioEngine;\nimport Megumin.Nodes.Director;\nimport Megumin.Nodes.Sprite;\n\npublic class KeyCollision extends Effect {\n @Override\n public void update(Sprite sprite) {\n AudioEngine.getInstance().play(\"key\");\n\n Director.getInstance().getScene().getLayerByName(\"keys\").removeSprite(getSprite());\n ((Character)sprite).addKey(1);\n }\n}\n" }, { "alpha_fraction": 0.7651515007019043, "alphanum_fraction": 0.7651515007019043, "avg_line_length": 23, "blob_id": "6882a6e539574871495265533e338ec5f3784679", "content_id": "44bc087f5986eed90b32a9d9c5b9c9f96ac99c1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 264, "license_type": "permissive", "max_line_length": 49, "num_lines": 11, "path": "/resource/database.sql", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "DROP DATABASE IF EXISTS BluebellAdventuresRecord;\nCREATE DATABASE BluebellAdventuresRecord;\nUSE BluebellAdventuresRecord;\n\nCREATE TABLE Records (\n Id INT NOT NULL AUTO_INCREMENT,\n Score INT NOT NULL,\n Date_Time DATETIME NOT NULL,\n\n PRIMARY KEY (Id)\n);\n" }, { "alpha_fraction": 0.7013322114944458, "alphanum_fraction": 0.7049849629402161, "avg_line_length": 40.55356979370117, "blob_id": "8b270dde2a7ab6291a49b2f4d6f5b1985133176e", "content_id": "4ad2b40c0cd00ebcf63e0bbf7a8d964c46817416", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4654, "license_type": "permissive", "max_line_length": 148, "num_lines": 112, "path": "/src/BluebellAdventures/Main.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures;\n\nimport java.awt.Color;\nimport java.awt.event.MouseEvent;\nimport java.awt.event.KeyEvent;\nimport java.io.File;\nimport java.io.FileReader;\nimport java.io.IOException;\nimport java.sql.SQLException;\nimport java.util.Scanner;\nimport javax.imageio.ImageIO;\nimport javax.sound.sampled.Clip;\n\nimport BluebellAdventures.Actions.ChangeScene;\nimport BluebellAdventures.Actions.SelectCharacter;\nimport BluebellAdventures.Actions.Quit;\nimport BluebellAdventures.CreateScenes.CreateCharacterSelectionScene;\n\nimport BluebellAdventures.CreateScenes.CreateLoadingScene;\nimport BluebellAdventures.CreateScenes.CreateMenuScene;\n\nimport Megumin.Actions.Action;\nimport Megumin.Actions.MouseCrash;\nimport Megumin.Actions.Infinite;\nimport Megumin.Actions.Interact;\nimport Megumin.Audio.Audio;\nimport Megumin.Audio.AudioEngine;\nimport Megumin.Database.Database;\nimport Megumin.Nodes.Director;\nimport Megumin.Nodes.Layer;\nimport Megumin.Nodes.Scene;\nimport Megumin.Nodes.Sprite;\nimport Megumin.Point;\n\npublic class Main {\n private static Director director;\n private static Infinite infinite;\n private static Interact interact;\n private static AudioEngine audioEngine;\n\n public static void main(String[] args) throws IOException {\n //init instances\n director = Director.getInstance();\n infinite = Infinite.getInstance();\n interact = Interact.getInstance();\n audioEngine = AudioEngine.getInstance();\n try (Scanner in = new Scanner(new FileReader(\"resource/mysql.txt\"))) {\n Database.createDatabase(\"jdbc:mysql://localhost:3306/BluebellAdventuresRecord\", in.nextLine(), in.nextLine());\n } catch (SQLException e) {\n System.out.println(e);\n System.exit(1);\n }\n\n //init window property\n director.setTitle(\"Bluebell's Adventures\");\n director.setResizable(false);\n director.setSize(1280, 720);\n director.setBackground(Color.black);\n director.setUndecorated(true);\n director.setIconImage(ImageIO.read(new File(\"resource/image/logo.png\")));\n\n //start loading page\n director.setScene(CreateLoadingScene.createLoadingScene(\"resource/image/splash_screen.png\"));\n director.start();\n\n //init audio\n audioEngine.addAudio(\"menu\", new Audio(\"resource/audio/menu.wav\"));\n audioEngine.addAudio(\"main\", new Audio(\"resource/audio/main.wav\"));\n audioEngine.addAudio(\"nervous\", new Audio(\"resource/audio/nervous.wav\"));\n audioEngine.addAudio(\"victory\", new Audio(\"resource/audio/victory.wav\"));\n audioEngine.addAudio(\"lose\", new Audio(\"resource/audio/lose.wav\"));\n audioEngine.addAudio(\"eating\", new Audio(\"resource/audio/eating.wav\"));\n audioEngine.addAudio(\"attacking\", new Audio(\"resource/audio/attacking.wav\"));\n audioEngine.addAudio(\"slurping\", new Audio(\"resource/audio/slurping.wav\"));\n audioEngine.addAudio(\"walking\", new Audio(\"resource/audio/walking.wav\"));\n audioEngine.addAudio(\"door\", new Audio(\"resource/audio/door.wav\"));\n audioEngine.addAudio(\"fridge\", new Audio(\"resource/audio/fridge.wav\"));\n audioEngine.addAudio(\"unlock\", new Audio(\"resource/audio/unlock.wav\"));\n audioEngine.addAudio(\"key\", new Audio(\"resource/audio/key.wav\"));\n\n audioEngine.setVolume(\"main\", 0.8f);\n\n //system action\n Sprite system = new Sprite();\n interact.addEvent(KeyEvent.VK_ESCAPE, Interact.ON_KEY_CLICK, system, new Quit(), \"\");\n\n //create scene\n Scene menu = CreateMenuScene.createMenuScene();\n Scene characterSelection = CreateCharacterSelectionScene.createCharacterSelectionScene();\n\n //menu action\n Sprite single = menu.getSpriteByName(\"single player\");\n interact.addEvent(MouseEvent.BUTTON1, Interact.ON_MOUSE_CLICK, single, new MouseCrash(new ChangeScene(characterSelection, \"main\")), \"menu\");\n Sprite exit = menu.getSpriteByName(\"exit\");\n interact.addEvent(MouseEvent.BUTTON1, Interact.ON_MOUSE_CLICK, exit, new MouseCrash(new Quit()), \"menu\");\n\n //character selection action\n Sprite back = characterSelection.getSpriteByName(\"back\");\n Action backToMenu = new MouseCrash(new ChangeScene(menu, \"menu\"));\n interact.addEvent(MouseEvent.BUTTON1, Interact.ON_MOUSE_CLICK, back, backToMenu, \"character selection\");\n\n\n //after loading start game\n audioEngine.loop(\"menu\", Clip.LOOP_CONTINUOUSLY);\n director.setScene(menu);\n try {\n director.getThread().join();\n } catch (InterruptedException e) {\n System.out.println(e);\n }\n }\n}\n" }, { "alpha_fraction": 0.4782428443431854, "alphanum_fraction": 0.492954820394516, "avg_line_length": 28.607362747192383, "blob_id": "723497b0c1fc94c9d3c545a70524f010ebe2b5e7", "content_id": "bd2634edd1edc5b6b3e00325728204810a262c65", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4826, "license_type": "permissive", "max_line_length": 94, "num_lines": 163, "path": "/src/BluebellAdventures/Characters/Enemy.java", "repo_name": "YukiSora/bluebell-adventures", "src_encoding": "UTF-8", "text": "package BluebellAdventures.Characters;\n\nimport java.awt.Graphics2D;\nimport java.awt.image.BufferedImage;\nimport java.io.File;\nimport java.io.IOException;\nimport java.util.Iterator;\nimport java.util.concurrent.CopyOnWriteArrayList;\nimport javax.imageio.ImageIO;\n\nimport BluebellAdventures.Characters.GameMap;\n\nimport Megumin.Actions.Action;\nimport Megumin.Actions.Effect;\nimport Megumin.Nodes.Sprite;\nimport Megumin.Point;\n\npublic class Enemy extends Sprite {\n private int attack;\n private int detectionRange;\n private int speed;\n private int rotate;\n\n // Constructors //\n public Enemy() {\n super();\n }\n\n public Enemy(String filename) throws IOException {\n super(filename, new Point(0, 0));\n }\n\n public Enemy(String filename, Point position) throws IOException {\n super(ImageIO.read(new File(filename)), position);\n }\n\n public Enemy(BufferedImage image) {\n super(image, new Point(0, 0));\n }\n\n public Enemy(BufferedImage image, Point position) {\n super(image, position);\n }\n\n @Override\n public void render(Graphics2D g) {\n if (getVisible()) {\n GameMap map = GameMap.getInstance();\n int[] direction = getDirection();\n int x = map.getPosition().getX() + getPosition().getX();\n int y = map.getPosition().getY() + getPosition().getY();\n int w = getImage().getWidth();\n int h = getImage().getHeight();\n\n //TODO: rotate area corrected\n if (direction[0] == -1) {\n if (direction[1] == -1) {\n rotate = 7;\n setSize(h, h);\n }\n else if (direction[1] == 0) {\n rotate = 6;\n setSize(h, w);\n }\n else if (direction[1] == 1) {\n rotate = 5;\n setSize(h, h);\n }\n }\n else if (direction[0] == 0) {\n if (direction[1] == -1) {\n rotate = 0;\n setSize(w, h);\n }\n else if (direction[1] == 0) {\n }\n else if (direction[1] == 1) {\n rotate = 4;\n setSize(w, h);\n }\n }\n else if (direction[0] == 1) {\n if (direction[1] == -1) {\n setSize(h, h);\n rotate = 1;\n }\n else if (direction[1] == 0) {\n setSize(h, w);\n rotate = 2;\n }\n else if (direction[1] == 1) {\n setSize(h, h);\n rotate = 3;\n }\n }\n double theta = Math.PI / 4 * rotate;\n\n g.rotate(theta, x + getImage().getWidth() / 2, y + getImage().getHeight() / 2);\n g.drawImage(getImage(), x, y, null);\n g.rotate(-theta, x + getImage().getWidth() / 2, y + getImage().getHeight() / 2);\n }\n }\n\n @Override\n public boolean checkCollision(CopyOnWriteArrayList<Sprite> sprites, Action action) {\n boolean collision = false;\n int x1 = getPosition().getX();\n int y1 = getPosition().getY();\n int w1 = getSize().getX();\n int h1 = getSize().getY();\n Iterator it = sprites.iterator();\n while (it.hasNext()) {\n Sprite sprite = (Sprite)it.next();\n\n int x2 = sprite.getPosition().getX() - GameMap.getInstance().getPosition().getX();\n int y2 = sprite.getPosition().getY() - GameMap.getInstance().getPosition().getY();\n int w2 = sprite.getSize().getX();\n int h2 = sprite.getSize().getY();\n\n //check whether collision area exist\n if (w2 == 0 || h2 == 0) {\n continue;\n }\n //check whether two rectangle intersect\n if (Math.max(Math.abs(x2 - (x1 + w1)), Math.abs(x2 + w2 - x1)) < w1 + w2 &&\n Math.max(Math.abs(y2 - (y1 + h1)), Math.abs(y2 + h2 - y1)) < h1 + h2) {\n ((Effect)action).setSprite(sprite);\n runAction(action);\n collision = true;\n }\n }\n\n return collision;\n }\n\n // Get and Sets //\n public Enemy setAttack(int attack) {\n this.attack = attack;\n return this;\n }\n\n public int getAttack() {\n return attack;\n }\n\n public Enemy setDetectionRange(int detectionRange) {\n this.detectionRange = detectionRange;\n return this;\n }\n\n public int getDetectionRange() {\n return detectionRange;\n }\n\n public Enemy setSpeed(int speed) {\n this.speed = speed;\n return this;\n }\n\n public int getSpeed() {\n return speed;\n }\n}\n" } ]
22
UCSD-RHC-Lab/Group-Motion-Forecasting
https://github.com/UCSD-RHC-Lab/Group-Motion-Forecasting
35081b8cf1cc6ca75849a393edcd6fe29ca8dd40
46ee756a3838e592652b306bf7c559799c61f7cd
4776ba720389e4f19251b8bb9fdfdb3a7b768f5d
refs/heads/master
2020-07-09T21:17:07.142486
2019-10-02T04:10:01
2019-10-02T04:10:01
204,085,954
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7828842997550964, "alphanum_fraction": 0.7950343489646912, "avg_line_length": 93.6500015258789, "blob_id": "b4c9494054e73d3defa82a89649eb6db3a486172", "content_id": "d7237c8f1f1fd045fc6788ca5715e8ff0b5c80e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1895, "license_type": "no_license", "max_line_length": 373, "num_lines": 20, "path": "/README.md", "repo_name": "UCSD-RHC-Lab/Group-Motion-Forecasting", "src_encoding": "UTF-8", "text": "# Group-Motion-Forecasting\n\nFuture Trajectory Prediction of Human Groups for Mobile Robots\n\n### Motivation:\n\nThe goal of this project is to design an algorithm that enables robots to predict the future motion trajectory of groups of people. This algorithm will enable robots to employ safer motion planning algorithms while also providing a high-level understanding of the social context (e.g., groups) within an environment. \n\nThis will enable robots to work seamlessly in the real-world to improve the safety of pedestrians around them and improve a robot’s context understanding among groups of people. \n\nAs robots enter human-centered environments, they need to work side-by-side with groups of people. As such, robots need a high-level of understanding of social dynamics in order to effectively work alongside people. As a result, we design robots that predict the future trajectory of groups of people, which we refer to as group motion forecasting [1-3].\n\nPrior research on group motion forecasting relies on exo-centric (e.g., third-person perspective) sensing, and focuses on predicting the motion intentions of individual pedestrians. To our knowledge, no prior work addresses how to predict the future motion trajectory of groups of people from an ego-centric perspective, which is important for mobile robotic applications. \n\n\n[1] Taylor, A. and Riek, L.D. (2019) Group Perception Methods to Support Human-Robot Teaming. Southern California Robotics Symposium (SCR).\n\n[2] Taylor, A. and Riek, L.D. (2018). Robot-Centric Human Group Detection. In Proc. of Social Robots in the Wild, Workshop at the 13th Annual ACM/IEEE International Conference on Human-Robot Interaction (HRI), 2018.\n\n[3] Taylor, A. and Riek, L.D. (2016). Robot Perception of Human Groups in the Real World: State of the Art In Proceedings of the AAAI Fall Symposium on Artificial Intelligence in Human-Robot Interaction (AI-HRI).\n" }, { "alpha_fraction": 0.5587324500083923, "alphanum_fraction": 0.5656481981277466, "avg_line_length": 39.7100830078125, "blob_id": "d8c3fccff6fd8b1dfde0d725c07e6b57a24e1e93", "content_id": "078f5e47cd6b2d83f574ae7cb6c8f0bc467c5bf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9688, "license_type": "no_license", "max_line_length": 143, "num_lines": 238, "path": "/cross_validation.py", "repo_name": "UCSD-RHC-Lab/Group-Motion-Forecasting", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nfrom numpy import concatenate\nfrom pandas import DataFrame, concat, read_csv\nimport pandas as pd\nfrom math import sqrt\nfrom matplotlib import pyplot\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential, model_from_json\nfrom keras.layers import Dense, LSTM, GRU, Activation, Flatten\nfrom os import path\nfrom utils import *\nimport argparse\n\n\n# Modify for Experiments\n'''\nn_neurons = 1\nn_epochs = 1\nn_batch = 1\nloss='mae'\noptimizer='adam'\n\n# Set DEBUG mode ON=1, OFF=0\nDEBUG = 0\n'''\ndef parse_args():\n \"\"\" Parse command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Group-Motion-Forecasting\")\n parser.add_argument(\n \"--method_name\", help=\"'--method_name=lstm' or '--method_name='gru'\", default='None', type=str,\n required=True)\n parser.add_argument(\n \"--n_epoch\", help=\"--epoch=1\", default=1, type=int,\n required=False)\n parser.add_argument(\n \"--n_batch\", help=\"--batch_size=1\", default=1, type=int,\n required=False)\n parser.add_argument(\n \"--n_neurons\", help=\"--n_neurons=1\", default=1, type=int,\n required=False)\n parser.add_argument(\n \"--model_name\", help=\"--model_name = models/model.json, store in models folder\", default=\"models/default_<method_name>.json\", type=str,\n required=False)\n parser.add_argument(\n \"--debug\", help=\"--debug=1, ON=1, OFF=0\", default=1, type=float,\n required=False)\n parser.add_argument(\n \"--loss\", help=\"--loss=mae\", default=\"mae\", type=str,\n required=False)\n parser.add_argument(\n \"--optimizer\", help=\"--optimizer=adam\", default=\"adam\", type=str,\n required=False)\n parser.add_argument(\n \"--weights_name\", help=\"--weights_name=model_weights/lstm_model.h5\", default=\"default_model.h5\", type=str,\n required=False)\n parser.add_argument(\n \"--t_observe\", help=\"--t_observe=1\", default=1, type=int,\n required=False)\n parser.add_argument(\n \"--t_predict\", help=\"--t_predict=1\", default=1, type=int,\n required=False)\n parser.add_argument(\n \"--n_folds\", help=\"--n_folds=3\", default=3, type=int,\n required=False)\n return parser.parse_args()\n\ndef main():\n # Parse input arguments\n args = parse_args()\n\n # Set Input Variables\n n_neurons = args.n_neurons\n n_epochs = args.n_epoch\n n_batch = args.n_batch\n loss = args.loss\n optimizer = args.optimizer\n model_name = args.model_name\n weights_name = args.weights_name\n T_OBSERVE = args.t_observe\n T_PREDICT = args.t_predict\n n_folds = args.n_folds\n DEBUG = args.debug\n # Format train, valid, and test data\n trainX, trainY, valX, valY, testX, testY, scaler, maxlength = FormatInputDataCrossValidation(train_group_1,\n train_group_4,\n val_group_1,\n test_group_3,\n test_group_5,\n T_OBSERVE, \n T_PREDICT,\n n_folds,\n DEBUG)\n '''\n if(DEBUG):\n print('trainY shape: {}'.format(trainY.shape))\n print('valY shape: {}'.format(valY.shape))\n print('testY shape: {}'.format(testY.shape))\n\n # Plot 200 samples of trainX data from 0->199\n #plot_data(trainX, 0, 200, maxlength, mode='train',T_OBSERVE, T_PREDICT)\n\n # Plot 200 samples of trainX data from 199->399\n #plot_data(trainX, 199, 400, maxlength, mode='train',T_OBSERVE, T_PREDICT)\n\n if(args.method_name == 'lstm' or args.method_name == 'LSTM'):\n print('Training LSTM model with epoch={}, n_batch={}, n_neurons={}, loss={}, optimizer={}, t_observe={}, t_predict={}'.format(\n n_epochs, n_batch, n_neurons, loss, optimizer, T_OBSERVE, T_PREDICT\n ))\n ################## Train LSTM Model ##################\n # https://machinelearningmastery.com/save-load-keras-deep-learning-models/\n model = RunLSTM(model_name, trainX, trainY, \n valX, valY, \n testX, testY, T_PREDICT,\n n_neurons=n_neurons,\n n_epochs=n_epochs, \n n_batch=n_batch, \n outputModel=model_name,\n loss=loss, \n optimizer=optimizer,\n weights_name=weights_name)\n\n ################## Evaluate LSTM Model ##################\n print('Evaluating LSTM model with epoch={}, n_batch={}, n_neurons={}, loss={}, optimizer={}, t_observe={}, t_predict={}'.format(\n n_epochs, n_batch, n_neurons, loss, optimizer, T_OBSERVE, T_PREDICT\n ))\n # Make LSTM prediction\n yhat = model.predict(testX)\n xhat = model.predict(trainX)\n \n if(DEBUG):\n pd.DataFrame(xhat).to_csv(\"DEBUG/xhat_lstm.csv\")\n pd.DataFrame(yhat).to_csv(\"DEBUG/yhat_lstm.csv\")\n\n # Invert predictions\n yhat_rescaled = scaler.inverse_transform(yhat)\n xhat_rescaled = scaler.inverse_transform(xhat)\n\n # Reshape data to its previous shape\n trainX = trainX.reshape((trainX.shape[0], T_OBSERVE*n_features))\n trainY = trainY.reshape((trainY.shape[0], T_PREDICT*n_features))\n\n valX = valX.reshape((valX.shape[0], T_OBSERVE*n_features))\n valY = valY.reshape((valY.shape[0], T_PREDICT*n_features))\n\n testX = testX.reshape((testX.shape[0], T_OBSERVE*n_features))\n testY = testY.reshape((testY.shape[0], T_PREDICT*n_features))\n\n if(DEBUG):\n pd.DataFrame(trainX).to_csv(\"DEBUG/trainX.csv\")\n pd.DataFrame(trainY).to_csv(\"DEBUG/trainY.csv\")\n\n pd.DataFrame(valX).to_csv(\"DEBUG/valX.csv\")\n pd.DataFrame(valY).to_csv(\"DEBUG/valY.csv\")\n\n pd.DataFrame(testX).to_csv(\"DEBUG/testX.csv\")\n pd.DataFrame(testY).to_csv(\"DEBUG/testY.csv\")\n\n # Invert data to original scale\n trainX_rescaled = scaler.inverse_transform(trainX)\n trainY_rescaled = scaler.inverse_transform(trainY)\n testX_rescaled = scaler.inverse_transform(testX)\n testY_rescaled = scaler.inverse_transform(testY)\n\n # Evaluate LSTM Model on Test Data\n lstm_rmse = evaluate_forecasts_MSE(testX_rescaled, yhat_rescaled, T_OBSERVE, T_PREDICT)\n\n elif(args.method_name == 'gru' or args.method_name == 'GRU'):\n print('Training GRU model with epoch={}, n_batch={}, n_neurons={}, loss={}, optimizer={}, t_observe={}, t_predict={}'.format(\n n_epochs, n_batch, n_neurons, loss, optimizer, T_OBSERVE, T_PREDICT\n ))\n ################## Train GRU Model ##################\n model = RunGRU(model_name, trainX, trainY, \n valX, valY, \n testX, testY, T_PREDICT,\n n_neurons=n_neurons,\n n_epochs=n_epochs, \n n_batch=n_batch, \n outputModel=model_name,\n loss=loss, \n optimizer=optimizer,\n weights_name=weights_name)\n\n ################## Evaluate GRU Model ##################\n print('Evaluating GRU model with epoch={}, n_batch={}, n_neurons={}, loss={}, optimizer={}, t_observe={}, t_predict={}'.format(\n n_epochs, n_batch, n_neurons, loss, optimizer, T_OBSERVE, T_PREDICT\n ))\n\n # Make GRU prediction\n yhat = model.predict(testX)\n xhat = model.predict(trainX)\n\n if(DEBUG):\n pd.DataFrame(xhat).to_csv(\"DEBUG/xhat_gru.csv\")\n pd.DataFrame(yhat).to_csv(\"DEBUG/yhat_gru.csv\")\n\n # Invert predictions\n yhat_rescaled = scaler.inverse_transform(yhat)\n xhat_rescaled = scaler.inverse_transform(xhat)\n\n # Save results\n if(DEBUG):\n pd.DataFrame(yhat_rescaled).to_csv(\"DEBUG/yhat_rescaled_gru.csv\")\n pd.DataFrame(xhat_rescaled).to_csv(\"DEBUG/xhat_rescaled_gru.csv\")\n\n # Reshape data to its previous shape\n trainX = trainX.reshape((trainX.shape[0], T_OBSERVE*n_features))\n trainY = trainY.reshape((trainY.shape[0], T_PREDICT*n_features))\n\n valX = valX.reshape((valX.shape[0], T_OBSERVE*n_features))\n valY = valY.reshape((valY.shape[0], T_PREDICT*n_features))\n\n testX = testX.reshape((testX.shape[0], T_OBSERVE*n_features))\n testY = testY.reshape((testY.shape[0], T_PREDICT*n_features))\n\n if(DEBUG):\n pd.DataFrame(trainX).to_csv(\"DEBUG/trainX.csv\")\n pd.DataFrame(trainY).to_csv(\"DEBUG/trainY.csv\")\n\n pd.DataFrame(valX).to_csv(\"DEBUG/valX.csv\")\n pd.DataFrame(valY).to_csv(\"DEBUG/valY.csv\")\n\n pd.DataFrame(testX).to_csv(\"DEBUG/testX.csv\")\n pd.DataFrame(testY).to_csv(\"DEBUG/testY.csv\")\n\n # Invert data to original scale\n trainX_rescaled = scaler.inverse_transform(trainX)\n trainY_rescaled = scaler.inverse_transform(trainY)\n testX_rescaled = scaler.inverse_transform(testX)\n testY_rescaled = scaler.inverse_transform(testY)\n\n # Evaluate GRU Model on Test Data\n gru_rmse = evaluate_forecasts_MSE(testX_rescaled, yhat_rescaled, T_OBSERVE, T_PREDICT)\n '''\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6149531006813049, "alphanum_fraction": 0.6277086138725281, "avg_line_length": 38.98310470581055, "blob_id": "a1d92347abc1b07143fc226c1ae83ef0b08004b2", "content_id": "7d85e32360dead2092aceefb1ba50a0e246ccccf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26028, "license_type": "no_license", "max_line_length": 207, "num_lines": 651, "path": "/utils.py", "repo_name": "UCSD-RHC-Lab/Group-Motion-Forecasting", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nfrom numpy import concatenate\nfrom pandas import DataFrame, concat, read_csv\nimport pandas as pd\nfrom math import sqrt\nfrom matplotlib import pyplot\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential, model_from_json\nfrom keras.layers import Dense, LSTM, GRU, Activation, Flatten\nfrom os import path\nfrom sklearn.model_selection import TimeSeriesSplit\n\ngroups_per_obs = 1\nn_features = 2 # x and y\n\ntrain_group_1 = 'input_data/train-group-01-gt.txt'\ntest_group_3 = 'input_data/test-group-03-gt.txt'\ntrain_group_4 = 'input_data/train-group-04-gt.txt'\ntest_group_5 = 'input_data/test-group-05-gt.txt'\nval_group_1 = 'input_data/val-group-01-gt.txt'\n\ndef FormatInputData(train_group_1,train_group_4,val_group_1,test_group_3,test_group_5, \n T_OBSERVE, T_PREDICT, DEBUG=0):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input:\n Output:\n Method Description:\n '''\n\n #read all data and transform \n train1, endGroupID = ReadFileandTransform(train_group_1)\n train4, endGroupID = ReadFileandTransform(train_group_4, endGroupID)\n val1, endGroupID = ReadFileandTransform(val_group_1, endGroupID)\n test3, endGroupID = ReadFileandTransform(test_group_3, endGroupID)\n test5, endGroupID = ReadFileandTransform(test_group_5, endGroupID)\n\n # Get length of all training, val, and all test\n train_length = np.shape(train1)[1] + np.shape(train4)[1]\n val_length = np.shape(val1)[1]\n test_length = np.shape(test3)[1] + np.shape(test5)[1]\n\n # Normalize x and y values for all data \n all_data, scaler = Normalize(train1,train4,val1,test3,test5)\n\n # Split data into train, val, and test data\n train, val, test = Breakupdata(all_data, train_length, val_length, test_length)\n\n # Group data by group ID so that data is sequential --> \n # [ maxlength*number_of_group, groups_per_image*n_features]\n train, train_maxlength = RearrangebyGroupID(train, n_features, T_OBSERVE, T_PREDICT,groups_per_obs)\n val, val_maxlength = RearrangebyGroupID(val, n_features, T_OBSERVE, T_PREDICT,groups_per_obs)\n test, test_maxlength = RearrangebyGroupID(test, n_features, T_OBSERVE, T_PREDICT,groups_per_obs)\n maxlength = max(train_maxlength,val_maxlength,test_maxlength)\n #\n # Write Input to text file\n if(DEBUG):\n print('train_maxlength: {}, val_maxlength: {}, test_maxlength: {}'.format(train_maxlength,val_maxlength,test_maxlength))\n train.to_csv('DEBUG/train.csv', index=True)\n val.to_csv('DEBUG/val.csv', index=True)\n test.to_csv('DEBUG/test.csv', index=True)\n\n # Get DataFrame data\n train = train.values\n val = val.values\n test = test.values\n\n if(DEBUG):\n print('train.shape: {}'.format(np.shape(train)))\n print('val.shape: {}'.format(np.shape(val)))\n print('test.shape: {}'\n \n \n \n \n .format(np.shape(test)))\n \n n_samples = groups_per_obs*n_features*T_PREDICT\n trainX, trainY = train[:,:-n_samples], train[:,-n_samples:]\n valX, valY = val[:,:-n_samples], val[:,-n_samples:]\n testX, testY = test[:,:-n_samples], test[:,-n_samples:]\n \n if(DEBUG):\n print('size of trainX: {}, trainY: {}'.format(trainX.shape,trainY.shape))\n print('size of valX: {}, valY: {}'.format(valX.shape,valY.shape))\n print('size of testX: {}, testY: {}'.format(testX.shape,testY.shape))\n \n # Reshape input to be 3D [samples, timesteps, features]\n trainX = trainX.reshape((trainX.shape[0], T_OBSERVE, n_features))\n valX = valX.reshape((valX.shape[0], T_OBSERVE, n_features))\n testX = testX.reshape((testX.shape[0], T_OBSERVE, n_features))\n\n if(DEBUG):\n print(trainX.shape, trainY.shape, valX.shape, valY.shape, testX.shape, testY.shape)\n \n return trainX, trainY, valX, valY, testX, testY, scaler, maxlength\n\ndef FormatInputDataCrossValidation(train_group_1,train_group_4,val_group_1,test_group_3,test_group_5, \n T_OBSERVE, T_PREDICT, n_folds=3, DEBUG=0):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input:\n Output:\n Method Description:\n '''\n\n #read all data and transform \n train1, endGroupID = ReadFileandTransform(train_group_1)\n train4, endGroupID = ReadFileandTransform(train_group_4, endGroupID)\n val1, endGroupID = ReadFileandTransform(val_group_1, endGroupID)\n test3, endGroupID = ReadFileandTransform(test_group_3, endGroupID)\n test5, endGroupID = ReadFileandTransform(test_group_5, endGroupID)\n\n # Get length of all training, val, and all test\n train_length = np.shape(train1)[1] + np.shape(train4)[1]\n val_length = np.shape(val1)[1]\n test_length = np.shape(test3)[1] + np.shape(test5)[1]\n\n # Normalize x and y values for all data \n all_data, scaler = Normalize(train1,train4,val1,test3,test5)\n\n # Split data into train, val, and test data\n train, val, test = Breakupdata(all_data, train_length, val_length, test_length)\n\n # Group data by group ID so that data is sequential --> \n # [ maxlength*number_of_group, groups_per_image*n_features]\n train, train_maxlength = RearrangebyGroupID(train, n_features, T_OBSERVE, T_PREDICT,groups_per_obs)\n val, val_maxlength = RearrangebyGroupID(val, n_features, T_OBSERVE, T_PREDICT,groups_per_obs)\n test, test_maxlength = RearrangebyGroupID(test, n_features, T_OBSERVE, T_PREDICT,groups_per_obs)\n maxlength = max(train_maxlength,val_maxlength,test_maxlength)\n #\n # Write Input to text file\n if(DEBUG):\n print('train_maxlength: {}, val_maxlength: {}, test_maxlength: {}'.format(train_maxlength,val_maxlength,test_maxlength))\n train.to_csv('DEBUG/train.csv', index=True)\n val.to_csv('DEBUG/val.csv', index=True)\n test.to_csv('DEBUG/test.csv', index=True)\n\n # Get DataFrame data\n train = train.values\n val = val.values\n test = test.values\n\n if(DEBUG):\n print('before train shape: {}'.format(train.shape))\n print('before val shape: {}'.format(val.shape))\n\n # Combine train and validation for cross validation \n if(DEBUG):\n train = np.concatenate((train,val), axis=0)\n print('after train shape: {}'.format(np.shape(train)))\n\n # Format data for cross validation (cv)\n train_cvX, train_cvY = GenerateTimesSeriesFolds(train, train_maxlength, n_folds, DEBUG)\n val_cvX, val_cvY = GenerateTimesSeriesFolds(val, val_maxlength, n_folds, DEBUG)\n #test_cv_X, test_cvY = GenerateTimesSeriesFolds(test, test_maxlength, n_folds, DEBUG)\n\n # Done in GenerateTimesSeriesFolds\n '''\n if(DEBUG):\n print('train.shape: {}'.format(np.shape(train)))\n print('val.shape: {}'.format(np.shape(val)))\n print('test.shape: {}'.format(np.shape(test)))\n '''\n n_samples = groups_per_obs*n_features*T_PREDICT\n trainX, trainY = train[:,:-n_samples], train[:,-n_samples:]\n valX, valY = val[:,:-n_samples], val[:,-n_samples:]\n testX, testY = test[:,:-n_samples], test[:,-n_samples:]\n \n if(DEBUG):\n print('size of trainX: {}, trainY: {}'.format(trainX.shape,trainY.shape))\n print('size of valX: {}, valY: {}'.format(valX.shape,valY.shape))\n print('size of testX: {}, testY: {}'.format(testX.shape,testY.shape))\n \n # Reshape input to be 3D [samples, timesteps, features]\n trainX = trainX.reshape((trainX.shape[0], T_OBSERVE, n_features))\n valX = valX.reshape((valX.shape[0], T_OBSERVE, n_features))\n testX = testX.reshape((testX.shape[0], T_OBSERVE, n_features))\n\n if(DEBUG):\n print(trainX.shape, trainY.shape, valX.shape, valY.shape, testX.shape, testY.shape)\n \n return trainX, trainY, valX, valY, testX, testY, scaler, maxlength\n'''\ndef FormatForTimesSeriesCrossValidation(X, n_folds=3):\n splits = TimeSeriesSplit(n_splits=3)\n index = 1\n for train_index, test_index in splits.split(X):\n print('train_index: {}'.format(train_index))\n print('test_index: {}'.format(test_index))\n train = X[train_index]\n test = X[test_index]\n print('Observations: %d' % (len(train) + len(test)))\n print('Training Observations: %d' % (len(train)))\n print('Testing Observations: %d' % (len(test)))\n'''\ndef GenerateTimesSeriesFolds(X, maxlength, n_splits=3, DEBUG=0):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input:\n Output:\n Method Description:\n '''\n splits = TimeSeriesSplit(n_splits=3)\n n_samples = int(len(X)/maxlength)\n if(DEBUG):\n print('n_samples: {}'.format(n_samples))\n trainX = []\n trainY = []\n index = 1\n for i in range(1, n_splits+1):\n train_index = int(i * n_samples / (n_splits + 1) + n_samples % (n_splits + 1))\n test_index = int(n_samples / (n_splits + 1))\n train_index = train_index*maxlength\n test_index = test_index*maxlength\n train = X[:train_index]\n test = X[train_index:train_index+test_index]\n trainX.append(train)\n trainY.append(test)\n if(DEBUG):\n print('Observations: %d' % (len(train) + len(test)))\n print('Training Observations: %d' % (len(train)))\n print('Testing Observations: %d' % (len(test)))\n if(DEBUG):\n print('len(trainX): {}'.format(len(trainX)))\n print('len(trainY): {}'.format(len(trainY)))\n\n # trainX should increase in size after each iteration\n for i in range(0, len(trainX)):\n print('trainX[{}].shape: {}'.format(i, trainX[i].shape))\n print('trainY[{}].shape: {}'.format(i, trainY[i].shape))\n return trainX, trainY\n\ndef ReadFileandTransform(filename, startGroupID=None):\n '''\n Addressed\n Input: file to read and startGroupID\n Output: [frameid_values, x_values, y_values, groupid_values] and endGroupID\n Method Description: takes in file, reads and splits it into frameid, x, y, groupid. To keep files in \n order, initializes startGroupId and appends it to groupID in file, and returns result as endGroupID\n '''\n file = open(filename, \"r\")\n x_values = []\n y_values = []\n frameid_values = []\n groupid_values = []\n endGroupID = 0\n if(startGroupID is None):\n startGroupID = 0\n else:\n startGroupID+=1\n for line in file: \n # Read files by comma demilimiter\n fields = line.split(\",\") \n frameid = int(fields[0])\n x = int(fields[2]) \n y = int(fields[3]) # transform data\n w = int(fields[4])\n h = int(fields[5])\n groupID = int(fields[1]) + startGroupID\n endGroupID = groupID\n Xt = int(x + w/2)\n Yt = int(y - h)\n x_values.append(Xt)\n y_values.append(Yt)\n frameid_values.append(frameid) \n groupid_values.append(groupID) \n\n output = [frameid_values, x_values, y_values, groupid_values]\n return output, endGroupID\n\ndef Normalize(train1,train4,val1,test3,test5):\n '''\n Addressed\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input: train1, train4, val1, test3, test5\n Output: [frameid_values, x_scaled, y_scaled, groupid_values], scaler\n Method Description: appends x values of all files to array x_values, \n appends y_values of all files to array of y_values\n appends frameid of all files to array of frameid_values\n appends groupid of all files to array of groupid_values\n normzalizes x_values, y_values, frameid_values, groupid_values using scaler\n\n '''\n train1_xval = train1[:][1]\n train4_xval = train4[:][1]\n val1_xval = val1[:][1]\n test3_xval = test3[:][1]\n test5_xval = test5[:][1]\n x_values = train1_xval + train4_xval + val1_xval + test3_xval + test5_xval\n\n train1_yval = train1[:][2]\n train4_yval = train4[:][2]\n val1_yval = val1[:][2]\n test3_yval = test3[:][2]\n test5_yval = test5[:][2]\n y_values = train1_yval + train4_yval + val1_yval + test3_yval + test5_yval\n\n train1_frameid = train1[:][0]\n train4_frameid = train4[:][0]\n val1_frameid = val1[:][0]\n test3_frameid = test3[:][0]\n test5_frameid = test5[:][0]\n frameid_values = train1_frameid + train4_frameid + val1_frameid + test3_frameid + test5_frameid\n\n train1_groupid = train1[:][3]\n train4_groupid = train4[:][3]\n val1_groupid = val1[:][3]\n test3_groupid = test3[:][3]\n test5_groupid = test5[:][3]\n groupid_values = train1_groupid + train4_groupid + val1_groupid + test3_groupid + test5_groupid\n\n #create arrays of x and y values\n x_values= np.array(x_values) \n y_values= np.array(y_values)\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n x_scaled = scaler.fit_transform(x_values.reshape(-1, 1))\n y_scaled = scaler.fit_transform(y_values.reshape(-1, 1))\n frameid_values = np.array(frameid_values).reshape(-1, 1)\n groupid_values = np.array(groupid_values).reshape(-1, 1)\n\n output = [frameid_values, x_scaled, y_scaled, groupid_values] #result is [frameid, normalized x values, normalized y values, groupid]\n return output, scaler \n\ndef Breakupdata(data, train_length, val_length, test_length):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input: data, train_length, val_length, test_length\n Output: train, val, test\n Method Description: Breaks up the train, val, and test data using the lengths \n each data set has [frameid, x, y]\n '''\n train = [data[:][0][0:train_length],data[:][1][0:train_length],data[:][2][0:train_length],data[:][3][0:train_length]]\n val = [data[:][0][train_length:train_length+val_length],data[:][1][train_length:train_length+val_length],data[:][2][train_length:train_length+val_length],data[:][3][train_length:train_length+val_length]]\n test = [data[:][0][train_length+val_length:],data[:][1][train_length+val_length:],data[:][2][train_length+val_length:],data[:][3][train_length+val_length:] ]\n return train, val, test\n \ndef RearrangebyGroupID(data, n_features, T_OBSERVE, T_PREDICT,groups_per_obs=None):\n '''\n Addressed\n Input: \n data: Sequence of observations as a list or NumPy array\n n_features: how many features model will observe\n T_OBSERVE:how many to observe\n T_PREDICT: how many to predict ,\n groups_per_obs: How many groups of observation in data\n Output: data: all groups made same length and run series_to_supervised on it\n maxlength: length of the longest group\n\n Method Description: Make all data the same length to be able to frame a time series as a supervised learning dataset\n '''\n maxlength = 0\n output = []\n temp = []\n all_groups = []\n if groups_per_obs is None:\n groups_per_obs = 1\n\n uniqueGID = np.unique(data[:][3])\n for i in range(0, len(uniqueGID)):\n u_value = uniqueGID[i]\n idx = np.where(data[:][3]==u_value)[0]\n group_i = []\n \n for j in range(0,len(idx)):\n row = [data[0][idx[j]][0],data[1][idx[j]][0],data[2][idx[j]][0],data[3][idx[j]][0]]\n group_i.append(row)\n temp.append(group_i)\n # Get the length of the longest group\n if(len(group_i)>maxlength): \n maxlength = len(group_i)\n \n # Pad the data of the other groups so that all have same length\n for i in range(0,len(temp)): \n group = np.zeros((maxlength,n_features))\n curr_group = temp[i]\n for j in range(0, len(curr_group)):\n group[j,0] = curr_group[j][1] # x\n group[j,1] = curr_group[j][2] # y\n if(i ==0):\n all_groups = group\n else:\n all_groups = np.append(all_groups,group,axis=1)\n\n # Pad remaining groups with zeros\n remaining_groups = int(((len(uniqueGID) * n_features) % groups_per_obs)/2)\n for i in range(0, remaining_groups):\n group = np.zeros((maxlength,n_features))\n all_groups = np.append(all_groups,group,axis=1)\n\n x = int(maxlength * (remaining_groups+len(uniqueGID))/(groups_per_obs))\n all_groups = np.reshape(all_groups,(x, groups_per_obs*n_features))\n\n # Convert data for supervised learning\n data = series_to_supervised(all_groups, T_OBSERVE, T_PREDICT)\n\n return data, maxlength\n\n\ndef series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input:\n data: Sequence of observations as a list or NumPy array.\n\t\tn_in: Number of lag observations as input (X).\n\t\tn_out: Number of observations as output (y).\n\t\tdropnan: Boolean whether or not to drop rows with NaN values.\n Output: Pandas DataFrame of series framed for supervised learning.\n Method Description: Frame a time series as a supervised learning dataset.\n\n '''\n n_vars = 1 if type(data) is list else data.shape[1]\n df = DataFrame(data)\n cols, names = list(), list()\n # input sequence (t-n, ... t-1)\n for i in range(n_in, 0, -1):\n cols.append(df.shift(i))\n names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]\n # forecast sequence (t, t+1, ... t+n)\n for i in range(0, n_out):\n cols.append(df.shift(-i))\n if i == 0:\n names += [('var%d(t)' % (j+1)) for j in range(n_vars)]\n else:\n names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]\n # put it all together\n agg = concat(cols, axis=1)\n agg.columns = names\n # drop rows with NaN values\n if dropnan:\n agg.dropna(inplace=True)\n return agg\n\ndef evaluate_model(trainX, trainY, valX, valY, testX, testY, model, T_OBSERVE, T_PREDICT, n_features, scaler):\n # Make prediction\n yhat = model.predict(testX)\n xhat = model.predict(trainX)\n \n if(DEBUG):\n pd.DataFrame(xhat).to_csv(\"DEBUG/xhat.csv\")\n pd.DataFrame(yhat).to_csv(\"DEBUG/yhat.csv\")\n\n # Invert predictions\n yhat_rescaled = scaler.inverse_transform(yhat)\n xhat_rescaled = scaler.inverse_transform(xhat)\n\n # Reshape data to its previous shape\n trainX = trainX.reshape((trainX.shape[0], T_OBSERVE*n_features))\n trainY = trainY.reshape((trainY.shape[0], T_PREDICT*n_features))\n\n valX = valX.reshape((valX.shape[0], T_OBSERVE*n_features))\n valY = valY.reshape((valY.shape[0], T_PREDICT*n_features))\n\n testX = testX.reshape((testX.shape[0], T_OBSERVE*n_features))\n testY = testY.reshape((testY.shape[0], T_PREDICT*n_features))\n\n if(DEBUG):\n pd.DataFrame(trainX).to_csv(\"DEBUG/trainX.csv\")\n pd.DataFrame(trainY).to_csv(\"DEBUG/trainY.csv\")\n\n pd.DataFrame(valX).to_csv(\"DEBUG/valX.csv\")\n pd.DataFrame(valY).to_csv(\"DEBUG/valY.csv\")\n\n pd.DataFrame(testX).to_csv(\"DEBUG/testX.csv\")\n pd.DataFrame(testY).to_csv(\"DEBUG/testY.csv\")\n\n # Invert data to original scale\n trainX_rescaled = scaler.inverse_transform(trainX)\n trainY_rescaled = scaler.inverse_transform(trainY)\n testX_rescaled = scaler.inverse_transform(testX)\n testY_rescaled = scaler.inverse_transform(testY)\n\n # Evaluate Model on Test Data\n rmse = evaluate_forecasts_MSE(testX_rescaled, yhat_rescaled, T_OBSERVE, T_PREDICT)\n fde = evaluate_forecasts_FED(testX_rescaled, yhat_rescaled, T_OBSERVE, T_PREDICT)\n return rmse, fde\n\n# evaluate the MSE for each forecast time step\ndef evaluate_forecasts_MSE(test, forecasts, n_lag, n_seq):\n mse = 0\n result = []\n for i in range(0, n_seq):\n actual = test[:,(n_lag+i)]\n predicted = [forecast[i] for forecast in forecasts]\n mse = mean_squared_error(actual, predicted)\n result.append(mse)\n print('t+%d MSE: %f' % ((i+1), mse))\n print('result: {}'.format(result))\n return mse\n\n# evaluate the FED for each forecast time step\ndef evaluate_forecasts_FED(test, forecasts, n_lag, n_seq):\n fed = 0\n for i in range(0, n_seq):\n actual = test[:,(n_lag+i)]\n predicted = [forecast[i] for forecast in forecasts]\n fed = predicted-actual # TODO Fix this\n print('fed: {}'.format(fed))\n return fed\n\ndef RunLSTM(model_name, trainX, trainY, valX, valY, testX, testY, T_PREDICT,\n n_neurons=1, n_epochs=1, n_batch=1, outputModel='models/default_lstm_model.json',\n loss='mae', optimizer='adam',weights_name='model_weights/lstm_model.h5'):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input:\n Output:\n Method Description:\n '''\n if(not path.exists(model_name)):\n # Design the Network\n model = Sequential()\n model.add(LSTM(n_neurons, return_sequences=False, input_shape=(trainX.shape[1], trainX.shape[2])))\n \n model.add(Dense(T_PREDICT*n_features))\n model.compile(loss=loss, optimizer=optimizer)\n \n # Fit Network\n history = model.fit(trainX, trainY, epochs=n_epochs, batch_size=n_batch, validation_data=(valX, valY), verbose=2, shuffle=False)\n\n #print('history: {}'.format(history))\n\n # Evaluate the model\n scores = model.evaluate(testX, testY, verbose=0)\n #print('len(model.metrics_names): {}, model.metrics_names: {}, scores: {}'.format(len(model.metrics_names),model.metrics_names,scores))\n print(\"Testing Set %s: %.2f%%\" % (model.metrics_names[0], scores*100)) # ERROR\n \n # Serialize model to JSON\n model_json = model.to_json()\n with open(outputModel, \"w\") as json_file:\n json_file.write(model_json)\n \n # Serialize weights to HDF5\n model.save_weights(weights_name)\n print(\"Saved model to disk\")\n else:\n # Load json and create model\n json_file = open(outputModel, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model = model_from_json(loaded_model_json)\n\n # Load weights into new model\n model.load_weights(weights_name)\n print(\"Loaded model from disk\")\n\n # compile model\n model.compile(loss=loss, optimizer=optimizer)\n score = model.evaluate(testX, testY, verbose=0)\n print(\"%s: %.2f%%\" % (model.metrics_names[0], score*100))\n return model\n\ndef RunGRU(model_name, trainX, trainY, valX, valY, testX, testY, T_PREDICT,\n n_neurons=10, n_epochs=1, n_batch=1, outputModel='models/default_gru_model.json', \n loss='mae', optimizer='adam', weights_name='model_weights/gru_model.h5'):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input:\n Output:\n Method Description:\n '''\n if(not path.exists(model_name)):\n # Design the Network\n model = Sequential()\n #model.add(GRU(n_neurons, return_sequences=False, input_shape=(trainX.shape[1], trainX.shape[2])))\n \n model.add(GRU(n_neurons, input_shape = (trainX.shape[1], trainX.shape[2]), return_sequences = True))\n model.add(GRU(1, return_sequences = False))\n model.add(Activation('sigmoid'))\n \n #model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])\n\n model.add(Dense(T_PREDICT*n_features))\n model.compile(loss=loss, optimizer=optimizer)\n \n # Fit Network\n history = model.fit(trainX, trainY, epochs=n_epochs, batch_size=n_batch, validation_data=(valX, valY), verbose=2, shuffle=False)\n \n # Plot History\n #print('history: {}'.format(history))\n\n # Evaluate the model\n scores = model.evaluate(testX, testY, verbose=0)\n #print('len(model.metrics_names): {}, model.metrics_names: {}, scores: {}'.format(len(model.metrics_names),model.metrics_names,scores))\n print(\"Testing Set %s: %.2f%%\" % (model.metrics_names[0], scores*100)) # ERROR\n \n # Serialize model to JSON\n model_json = model.to_json()\n with open(outputModel, \"w\") as json_file:\n json_file.write(model_json)\n \n # Serialize weights to HDF5\n model.save_weights(weights_name)\n print(\"Saved model to disk\")\n else:\n # Load json and create model\n json_file = open(outputModel, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n model = model_from_json(loaded_model_json)\n\n # Load weights into new model\n model.load_weights(weights_name)\n print(\"Loaded model from disk\")\n\n # compile model\n model.compile(loss='mae', optimizer='adam')\n score = model.evaluate(testX, testY, verbose=0)\n print(\"%s: %.2f%%\" % (model.metrics_names[0], score*100))\n return model\n\n# In progress\ndef plot_data(data, startIdx, endIdx, maxlength, T_OBSERVE, T_PREDICT,mode=None):\n '''\n TODO: Fill in. Say Addressed and remove this TODO when finished.\n Input:\n Output:\n Method Description:\n '''\n if(mode is not None):\n groups = []\n if(mode == 'train'):\n for i in range(0, T_OBSERVE):\n groups.append(i)\n elif(mode == 'test'):\n for i in range(0, T_PREDICT):\n groups.append(i)\n else:\n print(\"Invalid model provide. Options include: 'train' or 'test'\")\n return\n print('groups: {}'.format(groups))\n titlename = ['x','y']\n i = 1\n # plot each column\n pyplot.figure()\n length = endIdx\n for group in groups:\n print('group: {}'.format(group))\n pyplot.subplot(len(groups), 1, i)\n pyplot.plot(data[startIdx:length, 0])\n length = length + endIdx - startIdx\n startIdx = startIdx + endIdx - startIdx\n #print('data[startIdx:endIdx, group]: {}'.format(data[startIdx:endIdx, group]))\n #print('size(data[startIdx:endIdx, group]): {}'.format(np.shape(np.array(data[startIdx:endIdx, group]))))\n #print('len(data[group,:]): {}'.format(len(data[:,group])))\n pyplot.title(titlename[group%2], y=1.0, loc='right')\n i += 1\n pyplot.show()" } ]
3
anuagarwal1409/Video-Summariser
https://github.com/anuagarwal1409/Video-Summariser
a9aa599ca0a8f3934874ba5f3b374f2be3d5e92a
15da6be56c990f2eae58e51abec25c65acb6d435
05e9c010f58869b710e4b3aee2ffacfe43418ca4
refs/heads/main
2023-05-06T19:50:38.840084
2021-05-27T08:45:39
2021-05-27T08:45:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6228892803192139, "alphanum_fraction": 0.6463414430618286, "avg_line_length": 24.04878044128418, "blob_id": "26ade9ca1402b5cab63b96c8e34a831c9aa85b49", "content_id": "d4ef5386185ebf124a42ff62b2faf663bff8118d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "no_license", "max_line_length": 74, "num_lines": 41, "path": "/ui.py", "repo_name": "anuagarwal1409/Video-Summariser", "src_encoding": "UTF-8", "text": "import streamlit as st\r\nimport os\r\nimport subprocess \r\nimport time\r\nimport random\r\nimport plotly.express as px\r\nimport pandas as pd\r\n\r\ndef radar_chart(): \r\n df = pd.DataFrame(dict(\r\n r=[random.randint(0,22),\r\n random.randint(0,22),\r\n random.randint(0,22),\r\n random.randint(0,22),\r\n random.randint(0,22)],\r\n theta=['processing cost','mechanical properties','chemical stability',\r\n 'thermal stability', 'device integration']))\r\n fig = px.line_polar(df, r='r', theta='theta', line_close=True)\r\n placeholder.write(fig)\r\n\r\n\r\nplaceholder = st.empty()\r\nstart_button = st.empty()\r\n\r\nst.title('Video Summarizer App')\r\ninput_url = st.text_input(\"Youtube Video Link\")\r\nos.system(\"python sum.py -u \"+input_url)\r\n\r\n\r\nlatest_iteration = st.empty()\r\nbar = st.progress(0)\r\nfor i in range(100):\r\n# Update the progress bar with each iteration.\r\n latest_iteration.text(f'## Finishing..{i+1}')\r\n bar.progress(i + 1)\r\n time.sleep(1)\r\n\r\n\r\nvideo_file = open('1_1.mp4', 'rb')\r\nvideo_bytes = video_file.read()\r\nst.video(video_bytes)" }, { "alpha_fraction": 0.42818427085876465, "alphanum_fraction": 0.6720867156982422, "avg_line_length": 14.375, "blob_id": "c580ad5c583792ca3e90d88aa5f83ececbfd05c6", "content_id": "f965c49d1fe883222ac8b186878bbe0486323df7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 369, "license_type": "no_license", "max_line_length": 21, "num_lines": 24, "path": "/requirements.txt", "repo_name": "anuagarwal1409/Video-Summariser", "src_encoding": "UTF-8", "text": "breadability==0.1.20\ncertifi==2017.7.27.1\nchardet==3.0.4\ndecorator==4.0.11\ndocopt==0.6.2\nidna==2.6\nimageio==2.1.2\nlxml==4.6.2\nmoviepy==0.2.3.2\nnltk==3.5\nnumpy==1.19.5\nolefile==0.46\nPillow==8.1.0\npysrt==1.1.2\npytube==10.4.1\npandas==1.1.5\nplotly-express==0.4.1\nrequests==2.25.1\nsix==1.15.0\nsumy==0.8.1\ntqdm==4.11.2\nurllib3==1.26.3\nyoutube-dl==2021.2.10\nstreamlit==0.76.0\n" } ]
2
Pulkit12dhingra/Clicking-Game
https://github.com/Pulkit12dhingra/Clicking-Game
135792e502ebf8c734403a4138e30a3232c6518d
f5f62a131b1d1a9b9a9f541b86fa3bd7d8f45775
ae76b303b63eea45f0ec21add20c70978344c6de
refs/heads/master
2022-11-30T07:58:48.329132
2020-08-15T12:09:27
2020-08-15T12:09:27
287,739,244
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7671464681625366, "alphanum_fraction": 0.773073673248291, "avg_line_length": 57.900001525878906, "blob_id": "9f9bce5a3e520d0bcf606a6aa1e4d538981d9b34", "content_id": "42263adbba58183ac51ccc1e788bc7fd71c1d16e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1181, "license_type": "no_license", "max_line_length": 181, "num_lines": 20, "path": "/README.md", "repo_name": "Pulkit12dhingra/Clicking-Game", "src_encoding": "UTF-8", "text": " \n# Clicker\nA cool Clicking game using python\n\nI bought my first laptop last month. It was really difficult to set my hands on the touchpad as wellas keyboard. In order to solve this problem I came up with a creative solution.\n\nPresenting clicker.....\nA cool clicking game that checks you control over your touchpad. Click the correct block to get the points. each wrong click will result in points deduction.\n\nAgain I thank the vast library support of python that enabled me to complete this project. Please make sure you have these libraries installed in you device before running the code.\n\n1. Functools:- Documentation:-https://docs.python.org/3/library/functools.html\n2. Random:- Documentation:-https://docs.python.org/3/library/random.html\n3. Tkinter:- Documentation:-https://docs.python.org/3/library/tk.html\nPlay and practice as much as you can and Make sure that you are running this project on Python 3.\n\nI didn't use any sort of database to store the scores but it may be useful to do so in order to incerase the practicality and effeciency of the project.\n\nI learned a lot while making this project and I am sure it will teach you the same.\n\n## Enjoyyy............ :)\n" }, { "alpha_fraction": 0.5742686986923218, "alphanum_fraction": 0.6288464069366455, "avg_line_length": 37.67336654663086, "blob_id": "1b5247701c972dea7e9bf80d7db0721f0e8f3041", "content_id": "7f5dd9942e405ade3019208fbee2c24e68f235f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7897, "license_type": "no_license", "max_line_length": 114, "num_lines": 199, "path": "/project2.py", "repo_name": "Pulkit12dhingra/Clicking-Game", "src_encoding": "UTF-8", "text": "from tkinter import *\r\nimport random\r\nimport time\r\n\r\n\r\nscr=Tk()\r\nscr.geometry(\"1600x800+0+0\")\r\nscr.title(\"Resaurant Management System\")\r\n\r\ntext_Input=StringVar()\r\noperator=\"\"\r\n\r\nTops=Frame(scr,width=1600,bg=\"green\",relief=SUNKEN)\r\nTops.pack(side=TOP)\r\n\r\nf1=Frame(scr,width=800,height=700,relief=SUNKEN)\r\nf1.pack(side=LEFT)\r\n\r\nf2=Frame(scr,width=300,height=700,bg=\"green\",relief=SUNKEN)\r\nf2.pack(side=RIGHT)\r\n#--------------------------time\r\nlocaltime=time.asctime(time.localtime(time.time()))\r\n#---------------------info\r\n\r\nlblInfo=Label(Tops,font=(\"default\",50),text=\"Restaurant Management System\",fg=\"blue\",bd=10,anchor='w')\r\nlblInfo.grid(row=0,column=0)\r\nlblInfo=Label(Tops,font=(\"default\",20),text=localtime,fg=\"blue\",bd=10,anchor='w')\r\nlblInfo.grid(row=1,column=0)\r\n#----------------------calculator\r\ndef btnClick(numbers):\r\n global operator\r\n operator=operator + str(numbers)\r\n text_Input.set(operator)\r\n\r\ndef btnClearDisplay():\r\n global operator\r\n operator=\"\"\r\n text_Input.set(\"\")\r\ndef btnEqualsInput():\r\n global operator\r\n sumup=str(eval(operator))\r\n text_Input.set(sumup)\r\n operator=\"\"\r\n\r\n\r\ndef Ref():\r\n x=random.randint(100,500)\r\n randomRef=str(x)\r\n rand.set(randomRef)\r\n\r\ndef qExit():\r\n scr.destroy()\r\n\r\ndef Reset():\r\n rand.set(\"\")\r\n Fries.set(\"\")\r\n Burger.set(\"\")\r\n Momos.set(\"\")\r\n Pancake.set(\"\")\r\n Drinks.set(\"\")\r\n Noodles.set(\"\")\r\n Cakes.set(\"\")\r\n Chips.set(\"\")\r\n Sweets.set(\"\")\r\n\r\n \r\ntxtDisplay=Entry(f2,font=(\"arial\",20),textvariable=text_Input,bd=30,insertwidth=4,bg=\"steel blue\",justify='right')\r\ntxtDisplay.grid(columnspan=4)\r\n\r\nbtn7=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"7\",\r\n bg=\"powderblue\",command=lambda:btnClick(7)).grid(row=2,column=0)\r\nbtn8=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"8\",\r\n bg=\"powderblue\",command=lambda:btnClick(8)).grid(row=2,column=1)\r\nbtn9=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"9\",\r\n bg=\"powderblue\",command=lambda:btnClick(9)).grid(row=2,column=2)\r\n\r\nAddition=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"+\",\r\n bg=\"powderblue\",command=lambda:btnClick(\"+\")).grid(row=2,column=3)\r\n\r\n#------------------------------------\r\n\r\nbtn4=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"4\",\r\n bg=\"powderblue\",command=lambda:btnClick(4)).grid(row=3,column=0)\r\nbtn5=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"5\",\r\n bg=\"powderblue\",command=lambda:btnClick(5)).grid(row=3,column=1)\r\nbtn6=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"6\",\r\n bg=\"powderblue\",command=lambda:btnClick(6)).grid(row=3,column=2)\r\n\r\nSubraction=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"-\",\r\n bg=\"powderblue\",command=lambda:btnClick(\"-\")).grid(row=3,column=3)\r\n#--------------------------------------------------\r\n\r\n\r\nbtn1=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"1\",\r\n bg=\"powderblue\",command=lambda:btnClick(1)).grid(row=4,column=0)\r\nbtn2=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"2\",\r\n bg=\"powderblue\",command=lambda:btnClick(2)).grid(row=4,column=1)\r\nbtn3=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"3\",\r\n bg=\"powderblue\",command=lambda:btnClick(3)).grid(row=4,column=2)\r\n\r\nMultiply=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"*\",\r\n bg=\"powderblue\",command=lambda:btnClick(\"*\")).grid(row=4,column=3)\r\n\r\n#------------------------------------------------------\r\n\r\nbtn0=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"0\",\r\n bg=\"powderblue\",command=lambda:btnClick(0)).grid(row=5,column=0)\r\nbtnClear=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"C\",\r\n bg=\"powderblue\",command=btnClearDisplay).grid(row=5,column=1)\r\nbtnEquals=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"=\",\r\n bg=\"powderblue\",command=btnEqualsInput).grid(row=5,column=2)\r\n\r\nDivision=Button(f2,padx=16,pady=16,bd=8,fg=\"black\",font=(\"default\",20),text=\"/\",\r\n bg=\"powderblue\",command=lambda:btnClick(\"/\")).grid(row=5,column=3)\r\n\r\n#----------------------------------restaurant -------------\r\nrand=StringVar()\r\nFries=StringVar()\r\nBurger=StringVar()\r\nMomos=StringVar()\r\nPancake=StringVar()\r\nDrinks=StringVar()\r\nNoodles=StringVar()\r\nCakes=StringVar()\r\nChips=StringVar()\r\nSweets=StringVar()\r\n\r\nlblRefrence=Label(f1,font=(\"default\",16),text=\"Refrence\",bd=16,anchor='w')\r\nlblRefrence.grid(row=0,column=0)\r\ntxtRefrence=Entry(f1,font=(\"default\",16),textvariable=rand,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtRefrence.grid(row=0,column=1)\r\n\r\nlblFries=Label(f1,font=(\"default\",16),text=\"Fries\",bd=16,anchor='w')\r\nlblFries.grid(row=1,column=0)\r\ntxtFries=Entry(f1,font=(\"default\",16),textvariable=Fries,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtFries.grid(row=1,column=1)\r\n\r\nlblBurger=Label(f1,font=(\"default\",16),text=\"Burger\",bd=16,anchor='w')\r\nlblBurger.grid(row=2,column=0)\r\ntxtBurger=Entry(f1,font=(\"default\",16),textvariable=Burger,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtBurger.grid(row=2,column=1)\r\n\r\nlblMomos=Label(f1,font=(\"default\",16),text=\"Momos\",bd=16,anchor='w')\r\nlblMomos.grid(row=3,column=0)\r\ntxtMomos=Entry(f1,font=(\"default\",16),textvariable=Momos,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtMomos.grid(row=3,column=1)\r\n\r\nlblPancake=Label(f1,font=(\"default\",16),text=\"Pancake\",bd=16,anchor='w')\r\nlblPancake.grid(row=4,column=0)\r\ntxtPancake=Entry(f1,font=(\"default\",16),textvariable=Pancake,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtPancake.grid(row=4,column=1)\r\n\r\n\r\n#------------------------------2--------\r\n\r\n\r\nlblDrinks=Label(f1,font=(\"default\",16),text=\"Drinks\",bd=16,anchor='w')\r\nlblDrinks.grid(row=0,column=2)\r\ntxtDrinks=Entry(f1,font=(\"default\",16),textvariable=Drinks,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='left')\r\ntxtDrinks.grid(row=0,column=3)\r\n\r\n\r\nlblNoodles=Label(f1,font=(\"default\",16),text=\"Noodles\",bd=16,anchor='w')\r\nlblNoodles.grid(row=1,column=2)\r\ntxtNoodles=Entry(f1,font=(\"default\",16),textvariable=Noodles,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtNoodles.grid(row=1,column=3)\r\n\r\nlblCakes=Label(f1,font=(\"default\",16),text=\"Cakes\",bd=16,anchor='w')\r\nlblCakes.grid(row=2,column=2)\r\ntxtCakes=Entry(f1,font=(\"default\",16),textvariable=Cakes,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtCakes.grid(row=2,column=3)\r\n\r\nlblChips=Label(f1,font=(\"default\",16),text=\"Chips\",bd=16,anchor='w')\r\nlblChips.grid(row=3,column=2)\r\ntxtChips=Entry(f1,font=(\"default\",16),textvariable=Chips,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtChips.grid(row=3,column=3)\r\n\r\nlblSweets=Label(f1,font=(\"default\",16),text=\"Sweets\",bd=16,anchor='w')\r\nlblSweets.grid(row=4,column=2)\r\ntxtSweets=Entry(f1,font=(\"default\",16),textvariable=Sweets,bd=10,insertwidth=4,\r\n bg=\"powder blue\",justify='right')\r\ntxtSweets.grid(row=4,column=3)\r\n\r\n#----------------------------------------------------------\r\nbtnTotal=Button(f1,padx=16,pady=8,bd=16,fg=\"black\",font=(\"default\",16),width=10,\r\n text=\"Total\",bg=\"powder blue\",command=Ref).grid(row=7,column=1)\r\nbtnReset=Button(f1,padx=16,pady=8,bd=16,fg=\"black\",font=(\"default\",16),width=10,\r\n text=\"Reset\",bg=\"powder blue\",command=Reset).grid(row=7,column=2)\r\nbtnExit=Button(f1,padx=16,pady=8,bd=16,fg=\"black\",font=(\"default\",16),width=10,\r\n text=\"Exit\",bg=\"powder blue\",command=qExit).grid(row=7,column=3)\r\n\r\n" } ]
2
ChenluLiu/PythonLearnS2
https://github.com/ChenluLiu/PythonLearnS2
cafeac6169f265a198197b961a96c3f4f8ffe006
3eaf3484b0e2d921d75a41697037313babaeb2db
2e82bddc538f51b78d1492605b5f91ab45132dab
refs/heads/master
2023-02-21T23:34:43.366037
2021-01-19T08:17:47
2021-01-19T08:17:47
327,845,300
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6115942001342773, "alphanum_fraction": 0.6289855241775513, "avg_line_length": 17.210525512695312, "blob_id": "a052179b2c873dff0bcd846fd4d9a741d39c4513", "content_id": "8103e2898189a1892e6743c67e129e70017204fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 37, "num_lines": 19, "path": "/chapter04_Functions.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "import turtle\n\ndef make_window(color, title):\n w = turtle.Screen()\n w.bgcolor(color)\n w.title(title)\n return w\n\ndef make_turtle(color, size):\n t = turtle.Turtle()\n t.color(color)\n t.pensize(size)\n return t\n\nwn = make_window(\"pink\",\"chapter_04\")\na_1 = make_turtle(\"orange\", 5)\na_2 = make_turtle(\"white\", 2)\n\n# Exercise 待补充" }, { "alpha_fraction": 0.6020408272743225, "alphanum_fraction": 0.647230327129364, "avg_line_length": 18.898550033569336, "blob_id": "4ebb9755eee0ecad459051ae71850848bbc13553", "content_id": "f92013e30e22902667a8767980c13c4335eb80ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1480, "license_type": "no_license", "max_line_length": 91, "num_lines": 69, "path": "/chapter12_Modules.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "# module random\nimport random\nrng = random.Random()\ndice_throw = rng.randrange(1,7)\ndelay_in_seconds = rng.random()\nr_odd = rng.randrange(1,100,2) # 1-99中的奇数(间隔2)\ncards = list(range(52)) # int from 0 - 51\nrng.shuffle(cards) # 随机打乱顺序\nprint(cards)\n\ndef make_random_ints(num, lower, upper):\n rng = random.Random()\n result = []\n for i in range(num):\n result.append(rng.randrange(lower, upper))\n return result\nlist_a = make_random_ints(5,200,300)\nprint(list_a)\n\n# module time\nimport time\n\ndef do_my_sum(xs):\n sum = 0\n for v in xs:\n sum += v\n return sum\n\nsz = 10000000\ntestdata = range(sz)\nt0 = time.clock()\nmy_result = do_my_sum(testdata)\nt1 = time.clock()\nprint('my_result = {0} (time taken = {1:.4f} seconds)'.format(my_result, t1-t0))\n\nt2 = time.clock()\ntheir_result = sum(testdata)\nt3 = time.clock()\nprint('their_result = {0} (time taken = {1:.4f} seconds)'.format(their_result, t3-t2))\n\n# module math\nimport math\npi = math.pi\nprint(pi)\ne = math.e\nprint(e)\nsquare = math.sqrt(2.0)\nprint(square)\nradians = math.radians(90) # 最好用radian来表示角度,而不是degree\nprint(radians)\n\n# 变量的定义位置\nn = 10\nm = 3\ndef f(n):\n m = 7 #在函数内使用m=7\n return 2*n+m\nprint(f(5),n,m) #在整体使用m=3\n\n# module calendar\nimport calendar\ncal = calendar.TextCalendar()\nprint(cal)\ncal.pryear(2012)\nprint(cal)\n\n# module copy\nimport copy\n# 都有什么?能做什么?" }, { "alpha_fraction": 0.6402188539505005, "alphanum_fraction": 0.6429548859596252, "avg_line_length": 22.580644607543945, "blob_id": "e06adea3e7bcb291c75573c502f23de75cd305aa", "content_id": "83c1c6b06de2341e92d0bf1d2072bbc3af5ffecf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 761, "license_type": "no_license", "max_line_length": 89, "num_lines": 31, "path": "/chapter13_Files.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "myfile = open('test.txt','w')\nmyfile.write('my first file written from python\\n')\nmyfile.write('---------------------------------\\n')\nmyfile.write('Hello, world!\\n')\nmyfile.close()\n\nmynewhandle = open('test.txt','r')\nwhile True:\n theline = mynewhandle.readline()\n if len(theline) == 0:\n break\n\n print(theline, end='')\n\nmynewhandle.close()\n\nf = open('test.txt') # 绝对地址:\"C:\\\\temp\\\\somefile.txt\" 相对地址:\"/home/jimmy/somefile.txt\"\ncontent = f.read()\nf.close()\n\nwords = content.split()\nprint(f\"there are {len(words)} words in the file.\") # 为啥是9个?\n\n\n# copy files from URL\nimport urllib.request\n\nurl = 'http://www.qingfan.com'\ndestination_filename = 'baidu.txt'\n\nurllib.request.urlretrieve(url, destination_filename)\n" }, { "alpha_fraction": 0.560606062412262, "alphanum_fraction": 0.5735930800437927, "avg_line_length": 13.935483932495117, "blob_id": "27bd51be4f8ac71b61c02cca2a02e659f0ee8bd1", "content_id": "06baeb25304564eebaa2656f503d6f4e8da846b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 29, "num_lines": 31, "path": "/chapter08_Strings.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "fruit = 'banana'\nprint(list(enumerate(fruit)))\nprint(len(fruit))\nlength = len(fruit)\nlast = fruit[length-1]\nprint(last)\n\nfor i in range(6):\n print(fruit[i])\n\na = 0\nwhile a < length:\n print(fruit[a])\n a += 1\n\nfor x in fruit:\n print(x)\n\n# in, not in, find\ndef count_a(text):\n count = 0\n for c in text:\n if c == 'a':\n count += 1\n return(count)\nprint(count_a('banana'))\n\n# split\na = \"well I never did it\"\nb = a.split()\nprint(b)" }, { "alpha_fraction": 0.5421585440635681, "alphanum_fraction": 0.5640809535980225, "avg_line_length": 16.188405990600586, "blob_id": "4c593a7f23121f03e662d6701e3059d5347eefac", "content_id": "e8aedebbcbddfb583a92b9717adfc89f61db0e4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 60, "num_lines": 69, "path": "/chapter10_Ex_notSolved.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "import turtle\n\nwn = turtle.Screen()\nwn.bgcolor(\"pink\")\n\ntess =turtle.Turtle()\ntess.pencolor('black')\ntess.pensize(5)\ntess.shape(\"circle\")\n\ndef keyr():\n tess.pencolor(\"red\")\n\ndef keyg():\n tess.pencolor('green')\n\ndef keyb():\n tess.pencolor('blue')\n\ndef keyplus():\n x = int(tess.pensize)\n if x < 20:\n tess.pensize(x+1)\n else:\n tess.pensize(20)\n\ndef keyminus():\n x = int(tess.pensize)\n if x > 1:\n tess.pensize(x-1)\n else:\n tess.pensize(1)\n\nwn.onkey(keyr,'r')\nwn.onkey(keyg,'g')\nwn.onkey(keyb,'b')\nwn.onkey(keyplus, 'u')\nwn.onkey(keyminus, 'd')\n\nwn.listen()\nwn.mainloop()\n\n# 6\nglobal scorea\nglobal scoreb\nglobal statenum\nscorea = 0\nsocreb = 0\n\ndef score():\n # a wins\n if statenum == -1:\n scorea = scorea + 15\n statenum = 0\n # b wins\n elif statenum == 1:\n scoreb = scoreb + 15\n statenum = 0\n\nwhile scorea < 65 or scoreb < 65:\n x = input('who wins this turn? ')\n if x == \"a\":\n statenum = -1\n score()\n print(f'the current score is: {scorea} vs {scoreb}')\n elif x == 'b':\n statenum = 1\n score()\n print(f'the current score is: {scorea} vs {scoreb}')\n" }, { "alpha_fraction": 0.5218750238418579, "alphanum_fraction": 0.5609375238418579, "avg_line_length": 23.615385055541992, "blob_id": "a4277fe682437e9fb4113d96190228944a763c21", "content_id": "62b353fd586699c33d95e692fc523528e3758197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1324, "license_type": "no_license", "max_line_length": 77, "num_lines": 52, "path": "/chapter16_ClassesAndObjects.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "class Point:\n 'Point class represents and manipulates x,y coords.' # 注释会出现在调用时的说明中\n def __init__(self, x=0, y=0):\n 'Create a new point at x, y' # 注释会出现在定义时\n self.x = x\n self.y = y\n\n def distance_from_origin(self):\n 'Compute my distance from the origin'\n return((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n def __str__(self):\n return'({0}, {1})'.format(self.x, self.y)\n\n def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)\n\nclass Rectangle:\n 'A class to manufacture rectangle objects'\n def __init__(self, posn, w, h):\n self.corner = posn\n self.width = w\n self.height = h\n\n def __str__(self):\n return '({0}, {1}, {2})'.format(self.corner, self.width, self.height)\n\n def grow(self, delta_width, delta_height):\n self.width += delta_width\n self.height += delta_height\n \n def move(self, dx, dy):\n self.corner.x += dx\n self.corner.y += dy\n\n\nbox = Rectangle(Point(0,0), 100, 200)\nbomb = Rectangle(Point(100,80),5,10)\nprint(\"box: \", box)\nprint(\"bomb: \", bomb)\n\nbox.width += 50\nbox.height += 100\n\nr = Rectangle(Point(10,5), 100, 50)\nprint(r)\nr.grow(25,-10)\nprint(r)\nr.move(-10,10)\nprint(r)\n" }, { "alpha_fraction": 0.5152714848518372, "alphanum_fraction": 0.5593891143798828, "avg_line_length": 18.76404571533203, "blob_id": "270512b9bf6890a0f011f2a3fb3ab7cb09ff04ab", "content_id": "1fc61de36a8021013746d2c5af522fea5d8ac562", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1768, "license_type": "no_license", "max_line_length": 66, "num_lines": 89, "path": "/chapter05_Conditionals.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "# type \"bool\" --> True and False\na = (5 == (3 + 2))\nprint(a)\nb = type(5 == (3 + 2))\nprint(b)\n\n# and, or, not\n\n# if elif else\n\n# int() float() str()\nprint(int(2.6))\nprint(str(178))\nprint(float(1))\n\n# turtle bar chart\nimport turtle\n\ndef draw_bar(t, height):\n t.begin_fill()\n t.left(90)\n t.forward(height)\n t.write(\" \" + str(height))\n t.right(90)\n t.forward(40)\n t.right(90)\n t.forward(height)\n t.left(90)\n t.end_fill()\n t.forward(10)\n\nwn = turtle.Screen()\nwn.bgcolor(\"lightgreen\")\n\ntess = turtle.Turtle()\ntess.color(\"blue\", \"red\")\ntess.pensize(5)\n\n#scale = [30, 50, 40, 80, 90]\n#for v in scale:\n# draw_bar(tess, v)\n\n#wn.mainloop()\n\n# Exercise\n# 1\ninput_num = input(\"please input a number: \")\n\ndef E1(a):\n if a == 0:\n print(\"Sunday\")\n elif a == 1:\n print(\"Monday\")\n elif a == 2:\n print(\"Tuesday\")\n elif a == 3:\n print(\"Wedesday\")\n elif a == 4:\n print(\"Thursday\")\n elif a == 5:\n print(\"Friday\")\n elif a == 6:\n print(\"Saturday\")\n else:\n print(\"please input a number form 0 to 6\")\n\nstart_num = input(\"please tell me your starting day number: \")\nstay_num = input(\"please tell me your stay days: \")\ntotal = start_num + stay_num\n#day = total % 7\n#E1(total)\n\n# 6\ndef grade(score):\n if score >= 75:\n print(score, \"scores makes you a First grade!\")\n elif score >= 70:\n print(score, \"scores makes you a Upper Second grade!\") \n elif score >= 60:\n print(score, \"scores makes you a Second grade!\") \n elif score >= 50:\n print(score, \"scores makes you a Third grade!\") \n else:\n print(score, \"scores means you failed this time!\")\n\nxs = [83,75,74.9,70,69.9,65,55,45,40,2]\n\nfor x in xs:\n grade(x)\n\n " }, { "alpha_fraction": 0.4437499940395355, "alphanum_fraction": 0.5062500238418579, "avg_line_length": 16.035715103149414, "blob_id": "7876f723f6775715bbd3d26045b960709f64784d", "content_id": "984959e252c581f51cd607c1a6f11e75d7f4ccb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 480, "license_type": "no_license", "max_line_length": 43, "num_lines": 28, "path": "/chapter06_Fruitful Functions.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "# calculate the distance between two points\ndef distance(x1, y1, x2, y2):\n a = x2 - x1\n b = y2 - y1\n c = a**2 + b**2\n result = c ** 0.5\n return result\n\nprint(distance(2,2,1,1))\n\n# Exercise 11\ndef compare(a,b):\n if a > b:\n return 1\n if a == b:\n return 0\n if a < b:\n return -1\n\ndef test(x):\n if x:\n print(\"Correct\")\n else:\n print(\"Failed\")\n\ntest(compare(5,4) == 1)\ntest(compare(7,7) == 0)\ntest(compare(2,3) == -1)\n\n\n\n" }, { "alpha_fraction": 0.5064540505409241, "alphanum_fraction": 0.5497342348098755, "avg_line_length": 20.25806427001953, "blob_id": "b3e08b59ceb202cd364745d76ab09df823a69ad9", "content_id": "4169df24bc845859a9aa862764eb9ded7026b2ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1317, "license_type": "no_license", "max_line_length": 71, "num_lines": 62, "path": "/chapter07_Iteration.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "# while\nfor i in range(1,7):\n print(2*i,end = \" \")\nprint()\n\n# page101\nimport random\n\nrng = random.Random()\nnumber = rng.randrange(1,1000)\n\nguesses = 0\nmsg = ''\n\n#while True:\n# guess = int(input(msg + '\\nGuess my number between 1 and 1000: '))\n# guesses += 1\n# if guess > number:\n# msg += str(guess) + ' is too high.\\n'\n# elif guess < number:\n# msg += str(guess) + ' is too low.\\n'\n# else:\n# break\n\n#input('\\n\\nYou got it in {0} guesses!\\n\\n'.format(guesses))\n\n# continue \nfor i in [12,16,17,24,29,30]:\n if i % 2 == 1:\n continue\n print(i)\nprint(\"done\")\n\n# 7.21\ncelebs = [(\"Brad Pitt\", 1963), (\"Jack Nicholson\", 1937), (\"Justin Bieber\", 1994)]\nprint(celebs)\nprint(len(celebs))\n\nfor (name,year) in celebs:\n if year < 1980:\n print(name)\n\n# 7.22\nstudents = [\n (\"John\", ['CompSci', 'Physics']),\n (\"Vusi\", ['Maths', 'CompSci', 'Stats']),\n (\"Jess\", ['CompSci', 'Accounting', 'Economics', 'Management']),\n (\"Zuki\", ['Maths', 'Economics', 'Management']),\n]\n\nfor (name, subs) in students:\n print(name, ' takes ', len(subs), ' courses.')\n\ncounter = 0\nnamelist = ''\nfor (name, subs) in students:\n for s in subs:\n if s == 'CompSci':\n counter += 1\n namelist += str(name) + ', '\n\nprint(namelist, 'take computerscience, in total ', counter)" }, { "alpha_fraction": 0.4821428656578064, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 10.066666603088379, "blob_id": "8ed5f49d1e787e337b0bdee0967e62104134eabc", "content_id": "31a6e972366bc6624ceb5a79bd220e152c1525d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 31, "num_lines": 15, "path": "/chapter11_Lists.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "# Exercise\n\n# 在列表中,如果赋值b=a,改变b的某个序列值同样会改变a \na = [3, 2, 1]\nb = a\nb[0] = 1\nprint(a)\nprint(b)\n\n# 需要使用b=a[:],这样修改b不会影响a\na = [1, 2, 3]\nb = a[:]\nb[0] = 5\nprint(a)\nprint(b)\n\n\n" }, { "alpha_fraction": 0.5388127565383911, "alphanum_fraction": 0.5910705327987671, "avg_line_length": 17.77142906188965, "blob_id": "0bccc7e987767c07a2204d64a0027c9a0b65baeb", "content_id": "25f1159320fbd4a6f38d7d24a317db6a12144a47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1999, "license_type": "no_license", "max_line_length": 68, "num_lines": 105, "path": "/PythonLearnS2.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "\n# 根据输入的半径,计算圆的面积\nr = float(input(\"What is your radius? \"))\narea = 3.1415926 * r * r\nprint(f\"The area is {area}.\")\n\n# 2.14 Exercises\n# 1\na = 'All'\nb = 'work'\nc = 'and'\nd = 'no'\ne = 'play'\nf = 'makes'\ng = 'Jack'\nh = 'a'\ni = 'dull'\nj = 'boy.'\nprint(f\"{a} {b} {c} {d} {e} {f} {g} {h} {i} {j}\")\n\n# 2\nx_1 = 6 * 1 - 2\nx_2 = 6 *( 1 - 2 )\n\n# 3\n\n# 4\nbruce = 6\nbruce = bruce + 4\nprint(bruce)\n\n# 5\np = 10000\nn = 12\nr = 0.08\nt = int(input(\"please enter your number of years: \"))\namount = (1 + r/n) ** (n * t) * p\nprint(f\"your final amount after {t} years is {amount}\")\n\n# 6\ny_1 = 5 % 2\ny_2 = 9 % 5\ny_3 = 15 % 12\ny_4 = 12 % 15\ny_5 = 6 % 6\ny_6 = 0 % 7\n# y_7 = 7 % 0\n\n# 7\ngo_off = (14 + 51) % 24\nprint(go_off)\n\n# 8\ncurrent_time = int(input(\"What time is it now?(24h format) \"))\nwait_time = int(input(\"How many hours would you like to wait? \"))\noff_time = (current_time + wait_time) % 24\nprint(f\"Your alarm will go off on {off_time} clock.\")\n\n# 3.1 Our first turtle program\n#import turtle #load \"turtle\" module\n#wn = turtle.Screen() #creat and open a window named wn\n#alex = turtle.Turtle() #creat a turtle named alex\n\n#alex.forward(50) #make alex move\n#alex.left(90)\n#alex.forward(30)\n\n#wn.mainloop() #keep a state where it waits for events\n\n# 3.1 turtle attributes\n#import turtle\n#wn = turtle.Screen()\n#wn.bgcolor(\"lightgreen\")\n#wn.title(\"Hi, Liu!\")\n\n#liu = turtle.Turtle()\n#liu.color(\"yellow\")\n#liu.pensize(3)\n\n#liu.forward(50)\n\n#wn.mainloop()\n\n# Practice 1/2/3\nimport turtle\nwn = turtle.Screen()\np1 = turtle.Turtle()\nbg_color = input(\"which background color you prefer to? \")\nwn.bgcolor(f\"{bg_color}\")\n\np1_color = input(\"which tess's color you prefer to? \")\np1.color(f\"{p1_color}\")\n\np1_size = int(input(\"what is your tess's size? \"))\np1.pensize(p1_size)\n\nwn.mainloop()\n\n# 3.8 Exercise\n# 1\nfor i in range(5):\n print(\"I love U!\")\n\n# 2\nfor month in [\"Jan.\", \"Feb.\", \"Mar.\"]:\n print(f\"one of the months of the year is {month}.\")" }, { "alpha_fraction": 0.5690000057220459, "alphanum_fraction": 0.6320000290870667, "avg_line_length": 15.129032135009766, "blob_id": "451e7e040d0e0406e6171bc0514205fb786bb919", "content_id": "2624ba22af197e15ff062a0ae22bba7c1d28c92d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "no_license", "max_line_length": 57, "num_lines": 62, "path": "/chapter10_EventDrivenProgramming.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "#import turtle\n\n#turtle.setup(400,500)\n\n#wn = turtle.Screen()\n#wn.title('mouse click')\n#wn.bgcolor('pink')\n\n#tess = turtle.Turtle()\n#tess.color('orange')\n#tess.pensize(3)\n#tess.shape('circle')\n\n#alex = turtle.Turtle()\n#alex.color('blue')\n#alex.forward(100)\n\n# goes only once\n#def h1():\n# tess.forward(100)\n# tess.left(56)\n\n# wn.ontimer(h1,2000)\n\n# goes\n#def h1():\n# tess.forward(10)\n# tess.left(20)\n# wn.ontimer(h1,200)\n\n#h1()\n#wn.mainloop()\n\n\n#def h2(x,y):\n# wn.title('got click at coords {0}, {1}'.format(x,y))\n# alex.right(84)\n# alex.forward(50)\n\n#wn.onclick(h2)\n\n# traffic light\nimport turtle\nturtle.setup(400,500)\nwn = turtle.Screen()\nwn.title('Now Traffic Light!')\nwn.bgcolor('pink')\ntess = turtle.Turtle()\n\ndef draw_housing():\n tess.pensize(3)\n tess.color('darkgray','lightgrey')\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40,180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()\n\ndraw_housing()\n" }, { "alpha_fraction": 0.5617461204528809, "alphanum_fraction": 0.5663411617279053, "avg_line_length": 23.885713577270508, "blob_id": "2e0819a56edad21ef5e9a8e1bf591265d2a670e3", "content_id": "7fc82a0af6750b1b0b996637347b3c9910c1a68c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1829, "license_type": "no_license", "max_line_length": 66, "num_lines": 70, "path": "/chapter14_ListAlgorithms.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "# find unknown words\n# known_words = ['apple', 'tree', 'fish', 'river', 'boy', 'girl']\n# book_words = \"the apple fell from the tree to the river\".split()\n\ndef find_unknown(known, exist):\n result = []\n for word in exist:\n if word not in known:\n result.append(word)\n return result\n\n# print(find_unknown(known_words, book_words))\n\ndef load_words_from_file(filename):\n f = open(filename, 'r')\n file_content = f.read()\n f.close()\n lower = file_content.lower()\n nokomma = lower.replace(',',' ') # 去掉文本中的各类符号\n nodoc = nokomma.replace('.', ' ')\n nodoubledoc = nodoc.replace('''\"''',' ')\n noquestion = nodoubledoc.replace('?', ' ')\n nosurprise = noquestion.replace('!', ' ')\n wds = nosurprise.split() # 将文本分割\n return wds\n\n# 导入小学的英文单词\nprimary_voc = load_words_from_file('words.txt')\nprint(len(primary_voc))\nknown_words = primary_voc[:]\n\n# 导入故事的英文单词\nstory_voc = load_words_from_file('story.txt')\nprint(len(story_voc))\nstory_words = story_voc[:]\n\n# 对比词汇\nunknown_words = find_unknown(known_words,story_words)\nprint(unknown_words)\n\n# 去掉重复的词汇\ndef clean(words):\n words.sort()\n t = words[-1]\n for i in range(len(words)-2,-1,-1):\n if t == words[i]:\n words.remove(words[i])\n else:\n t = words[i]\n return words\n\nprint(clean(unknown_words))\n\n# binary search\ndef search_binary(xs,target):\n lb = 0\n ub = len(xs)\n while True:\n if lb == ub:\n return -1\n\n mid_index = (lb+ub) // 2\n item_at_mid = xs[mid_index]\n \n if item_at_mid == target:\n return mid_index\n if item_at_mid < target:\n lb = mid_index + 1\n else:\n ub = mid_index" }, { "alpha_fraction": 0.5412541031837463, "alphanum_fraction": 0.5610560774803162, "avg_line_length": 24.91428565979004, "blob_id": "a9cfe04eed2738af029f01f7fa295e94682b7733", "content_id": "4de9d1863312a9066cc8025c09234dd7824838a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 953, "license_type": "no_license", "max_line_length": 80, "num_lines": 35, "path": "/chapter15_Object_orientedProgramming.py", "repo_name": "ChenluLiu/PythonLearnS2", "src_encoding": "UTF-8", "text": "class Point:\n 'Point class represents and manipulates x,y coords.' # 注释会出现在调用时的说明中\n def __init__(self, x=0, y=0):\n 'Create a new point at x, y' # 注释会出现在定义时\n self.x = x\n self.y = y\n\n def distance_from_origin(self):\n 'Compute my distance from the origin'\n return((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n def to_string(self):\n return'({0}, {1})'.format(self.x, self.y)\n\n def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)\n\np = Point(4,2)\nq = Point(6,3)\n\nprint(p.x, q.y)\nprint(q.distance_from_origin())\n\nr = p.halfway(q)\nprint(r.to_string())\n\n# exercise 6\nclass SMS_store:\n def __init__(self, has_been_viewed, from_number, time_arrived, text_of_SMS):\n self.has_been_viewed = 0\n self.from_number = '0'\n self.time_arrived = '0'\n self.text_of_SMS = ''\n\n\n" } ]
14
birdmanmandbir/classify-your-leetcode
https://github.com/birdmanmandbir/classify-your-leetcode
9bc863a6d88ee33a9d87c861ed73001273ec1b86
7e288f917c216ea58d2eaa62e7b2292a2f6fb89f
bdc36b0c9742e16611cbab1eec7d4c957f8a33d1
refs/heads/master
2022-06-15T21:31:54.174983
2020-05-09T02:28:12
2020-05-09T02:28:12
262,331,906
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7229219079017639, "alphanum_fraction": 0.732997477054596, "avg_line_length": 19.947368621826172, "blob_id": "0ef4c933de2b3ebe677a3e2a087202243683ca3d", "content_id": "3fba2e6ec7dd0a06c539d1e72dfd45f54a0f966a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 693, "license_type": "no_license", "max_line_length": 65, "num_lines": 19, "path": "/Readme.md", "repo_name": "birdmanmandbir/classify-your-leetcode", "src_encoding": "UTF-8", "text": "# code-classify\n用来整理leetcode刷题代码的小脚本\n\n注释中定义代码所属的标签后,本程序会自动提取标签并将文件放入相应文件夹\n\n如:\n\n在`496.下一个更大元素-i.cpp`中定义标签`// @单调栈`, 程序会将此文件复制到`output_dir`中的`单调栈`\n## 依赖\npython3\n## 配置参数:\n```python\n# 标签格式:// @\nannotation = \"// @\"\nprograming_language = \".cpp\"\n# 输出目录,如果不存在会自动创建; 输出目录可以是输入目录的子目录,在输出目录下的文件会在处理过程中被忽略\noutput_dir = \"/home/neil/Codes/classify-python/题型总结\"\ninput_dir = \"/home/neil/Codes/classify-python\"\n```" }, { "alpha_fraction": 0.5431472063064575, "alphanum_fraction": 0.5459672808647156, "avg_line_length": 32.47169876098633, "blob_id": "519d5441baba94a136a90f41508577f757ce7edb", "content_id": "ccda1dc5b2c5907f5ea61ee40af6d451f6606131", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1803, "license_type": "no_license", "max_line_length": 95, "num_lines": 53, "path": "/归类.py", "repo_name": "birdmanmandbir/classify-your-leetcode", "src_encoding": "UTF-8", "text": "import os\nfrom shutil import copyfile\n# 标签格式:// @\nannotation = \"// @\"\nprograming_language = \".cpp\"\n# output_dir = \"/home/neil/Codes/leetcode/题型总结/\"\noutput_dir = \"/home/neil/Codes/leetcode/题型总结\"\ninput_dir = \"/home/neil/Codes/leetcode\" # also work dir\n# TODO log\n# cur_dir is abs path\ndef recur(cur_dir):\n dirs = os.listdir(cur_dir)\n if len(dirs) == 0:\n return\n for dir in dirs:\n abs_dir = os.path.join(cur_dir, dir)\n if os.path.isdir(abs_dir) and abs_dir != output_dir:\n recur(abs_dir)\n elif os.path.isfile(abs_dir):\n # start processing\n dir_to_create_or_move = get_annotation(abs_dir)\n if dir_to_create_or_move == \"\":\n continue\n abs_dir_to_create_or_move = os.path.join(output_dir, dir_to_create_or_move)\n if (not os.path.exists(abs_dir_to_create_or_move)):\n os.mkdir(abs_dir_to_create_or_move)\n cp_src = abs_dir\n cp_tgt = os.path.join(abs_dir_to_create_or_move, cur_dir.split(\"/\")[-1]+ \".\" + dir)\n copyfile(cp_src, cp_tgt)\n\ndef get_annotation(abs_filename):\n extend_name = os.path.splitext(abs_filename)[-1];\n res = \"\"\n if extend_name != programing_language:\n return res\n with open(abs_filename, \"rt\") as file:\n line = file.readline()\n while(line):\n if annotation in line:\n res = line.split(\" \")[1].split(\"@\")[1].replace(\"\\n\",\"\")\n # 过滤lc\n if res == \"lc\":\n return \"\"\n return res\n line = file.readline()\n return res\n\nif __name__ == '__main__':\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n else:\n os.rmdir(output_dir)\n recur(input_dir)" } ]
2
aaronn/django-rest-framework-passwordless
https://github.com/aaronn/django-rest-framework-passwordless
a7535670017b05f4d268d582758dd8330577fb0a
36592eae58f616e3068bd393d06a33e3179b96c8
edc6ae341cb650649ca0d96159f781fac905fcbe
refs/heads/master
2023-08-22T06:43:47.065797
2022-04-11T07:39:47
2022-04-11T07:39:47
86,131,730
661
159
MIT
2017-03-25T04:13:14
2023-02-10T07:28:51
2023-02-11T04:40:02
Python
[ { "alpha_fraction": 0.6877018809318542, "alphanum_fraction": 0.6972670555114746, "avg_line_length": 48.69135665893555, "blob_id": "4a2f31874e4a0cbed433d968004cecca1b4f01b1", "content_id": "16a046499822e44fd955911dec5e25b76f0587be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8050, "license_type": "permissive", "max_line_length": 124, "num_lines": 162, "path": "/tests/test_verification.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.test import APITestCase\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom drfpasswordless.settings import api_settings, DEFAULTS\nfrom drfpasswordless.utils import CallbackToken\n\nUser = get_user_model()\n\n\nclass AliasEmailVerificationTests(APITestCase):\n\n def setUp(self):\n api_settings.PASSWORDLESS_AUTH_TYPES = ['EMAIL']\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com'\n api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED = True\n\n self.url = reverse('drfpasswordless:auth_email')\n self.callback_url = reverse('drfpasswordless:auth_token')\n self.verify_url = reverse('drfpasswordless:verify_email')\n self.callback_verify = reverse('drfpasswordless:verify_token')\n self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME\n self.email_verified_field_name = api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME\n\n def test_email_unverified_to_verified_and_back(self):\n email = 'aaron@example.com'\n email2 = 'aaron2@example.com'\n data = {'email': email}\n\n # create a new user\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(**{self.email_field_name: email})\n self.assertNotEqual(user, None)\n self.assertEqual(getattr(user, self.email_verified_field_name), False)\n\n # Verify a token exists for the user, sign in and check verified again\n callback = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_AUTH, is_active=True).first()\n callback_data = {'email': email, 'token': callback}\n callback_response = self.client.post(self.callback_url, callback_data)\n self.assertEqual(callback_response.status_code, status.HTTP_200_OK)\n\n # Verify we got the token, then check and see that email_verified is now verified\n token = callback_response.data['token']\n self.assertEqual(token, Token.objects.get(user=user).key)\n\n # Refresh and see that the endpoint is now verified as True\n user.refresh_from_db()\n self.assertEqual(getattr(user, self.email_verified_field_name), True)\n\n # Change email, should result in flag changing to false\n setattr(user, self.email_field_name, email2)\n user.save()\n user.refresh_from_db()\n self.assertEqual(getattr(user, self.email_verified_field_name), False)\n\n # Verify\n self.client.force_authenticate(user)\n verify_response = self.client.post(self.verify_url)\n self.assertEqual(verify_response.status_code, status.HTTP_200_OK)\n\n # Refresh User\n user = User.objects.get(**{self.email_field_name: email2})\n self.assertNotEqual(user, None)\n self.assertNotEqual(getattr(user, self.email_field_name), None)\n self.assertEqual(getattr(user, self.email_verified_field_name), False)\n\n # Post callback token back.\n verify_token = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_VERIFY, is_active=True).first()\n self.assertNotEqual(verify_token, None)\n verify_callback_response = self.client.post(self.callback_verify, {'email': email2, 'token': verify_token.key})\n self.assertEqual(verify_callback_response.status_code, status.HTTP_200_OK)\n\n # Refresh User\n user = User.objects.get(**{self.email_field_name: email2})\n self.assertNotEqual(user, None)\n self.assertNotEqual(getattr(user, self.email_field_name), None)\n self.assertEqual(getattr(user, self.email_verified_field_name), True)\n\n def tearDown(self):\n api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS']\n api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED = DEFAULTS['PASSWORDLESS_USER_MARK_MOBILE_VERIFIED']\n\n\nclass AliasMobileVerificationTests(APITestCase):\n\n def setUp(self):\n api_settings.PASSWORDLESS_TEST_SUPPRESSION = True\n api_settings.PASSWORDLESS_AUTH_TYPES = ['MOBILE']\n api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = '+15550000000'\n api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED = True\n\n self.url = reverse('drfpasswordless:auth_mobile')\n self.callback_url = reverse('drfpasswordless:auth_token')\n self.verify_url = reverse('drfpasswordless:verify_mobile')\n self.callback_verify = reverse('drfpasswordless:verify_token')\n self.mobile_field_name = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME\n self.mobile_verified_field_name = api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME\n\n def test_mobile_unverified_to_verified_and_back(self):\n mobile = '+15551234567'\n mobile2 = '+15557654321'\n data = {'mobile': mobile}\n\n # create a new user\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(**{self.mobile_field_name: mobile})\n self.assertNotEqual(user, None)\n self.assertEqual(getattr(user, self.mobile_verified_field_name), False)\n\n # Verify a token exists for the user, sign in and check verified again\n callback = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_AUTH, is_active=True).first()\n callback_data = {'mobile': mobile, 'token': callback}\n callback_response = self.client.post(self.callback_url, callback_data)\n self.assertEqual(callback_response.status_code, status.HTTP_200_OK)\n\n # Verify we got the token, then check and see that email_verified is now verified\n token = callback_response.data['token']\n self.assertEqual(token, Token.objects.get(user=user).key)\n\n # Refresh and see that the endpoint is now verified as True\n user.refresh_from_db()\n self.assertEqual(getattr(user, self.mobile_verified_field_name), True)\n\n # Change mobile, should result in flag changing to false\n setattr(user, self.mobile_field_name, '+15557654321')\n user.save()\n user.refresh_from_db()\n self.assertEqual(getattr(user, self.mobile_verified_field_name), False)\n\n # Verify\n self.client.force_authenticate(user)\n verify_response = self.client.post(self.verify_url)\n self.assertEqual(verify_response.status_code, status.HTTP_200_OK)\n\n # Refresh User\n user = User.objects.get(**{self.mobile_field_name: mobile2})\n self.assertNotEqual(user, None)\n self.assertNotEqual(getattr(user, self.mobile_field_name), None)\n self.assertEqual(getattr(user, self.mobile_verified_field_name), False)\n\n # Post callback token back.\n verify_token = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_VERIFY, is_active=True).first()\n self.assertNotEqual(verify_token, None)\n verify_callback_response = self.client.post(self.callback_verify, {'mobile': mobile2, 'token': verify_token.key})\n self.assertEqual(verify_callback_response.status_code, status.HTTP_200_OK)\n\n # Refresh User\n user = User.objects.get(**{self.mobile_field_name: mobile2})\n self.assertNotEqual(user, None)\n self.assertNotEqual(getattr(user, self.mobile_field_name), None)\n self.assertEqual(getattr(user, self.mobile_verified_field_name), True)\n\n def tearDown(self):\n api_settings.PASSWORDLESS_TEST_SUPPRESSION = DEFAULTS['PASSWORDLESS_TEST_SUPPRESSION']\n api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']\n api_settings.PASSWORDLESS_MOBILE_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_MOBILE_NOREPLY_NUMBER']\n api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED = DEFAULTS['PASSWORDLESS_USER_MARK_MOBILE_VERIFIED']\n" }, { "alpha_fraction": 0.6620209217071533, "alphanum_fraction": 0.6864111423492432, "avg_line_length": 36.434783935546875, "blob_id": "707a9a4a4880b5bcfd72b9e6c647510728f1b0be", "content_id": "8bf37954cd220b0706c8faac79c52ff137d80aa3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 861, "license_type": "permissive", "max_line_length": 106, "num_lines": 23, "path": "/tests/models.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import AbstractBaseUser\nfrom django.contrib.auth.models import BaseUserManager\nfrom django.core.validators import RegexValidator\nfrom django.db import models\n\nphone_regex = RegexValidator(regex=r'^\\+[1-9]\\d{1,14}$',\n message=\"Mobile number must be entered in the format:\"\n \" '+999999999'. Up to 15 digits allowed.\")\n\n\nclass CustomUser(AbstractBaseUser):\n email = models.EmailField(max_length=255, unique=True, blank=True, null=True)\n email_verified = models.BooleanField(default=False)\n\n mobile = models.CharField(validators=[phone_regex], max_length=17, unique=True, blank=True, null=True)\n mobile_verified = models.BooleanField(default=False)\n\n objects = BaseUserManager()\n\n USERNAME_FIELD = 'email'\n\n class Meta:\n app_label = 'tests'\n" }, { "alpha_fraction": 0.5741207003593445, "alphanum_fraction": 0.5776658058166504, "avg_line_length": 34.604652404785156, "blob_id": "f991696de254d7b3973cb63179563c525b9ff669", "content_id": "597f4d436e8c04903b1f6e3fc4c96a1e02ba0c1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10719, "license_type": "permissive", "max_line_length": 97, "num_lines": 301, "path": "/drfpasswordless/serializers.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "import logging\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.validators import RegexValidator\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom drfpasswordless.models import CallbackToken\nfrom drfpasswordless.settings import api_settings\nfrom drfpasswordless.utils import verify_user_alias, validate_token_age\n\nlogger = logging.getLogger(__name__)\nUser = get_user_model()\n\n\nclass TokenField(serializers.CharField):\n default_error_messages = {\n 'required': _('Invalid Token'),\n 'invalid': _('Invalid Token'),\n 'blank': _('Invalid Token'),\n 'max_length': _('Tokens are {max_length} digits long.'),\n 'min_length': _('Tokens are {min_length} digits long.')\n }\n\n\nclass AbstractBaseAliasAuthenticationSerializer(serializers.Serializer):\n \"\"\"\n Abstract class that returns a callback token based on the field given\n Returns a token if valid, None or a message if not.\n \"\"\"\n\n @property\n def alias_type(self):\n # The alias type, either email or mobile\n raise NotImplementedError\n\n def validate(self, attrs):\n alias = attrs.get(self.alias_type)\n\n if alias:\n # Create or authenticate a user\n # Return THem\n\n if api_settings.PASSWORDLESS_REGISTER_NEW_USERS is True:\n # If new aliases should register new users.\n try:\n user = User.objects.get(**{self.alias_type+'__iexact': alias})\n except User.DoesNotExist:\n user = User.objects.create(**{self.alias_type: alias})\n user.set_unusable_password()\n user.save()\n else:\n # If new aliases should not register new users.\n try:\n user = User.objects.get(**{self.alias_type+'__iexact': alias})\n except User.DoesNotExist:\n user = None\n\n if user:\n if not user.is_active:\n # If valid, return attrs so we can create a token in our logic controller\n msg = _('User account is disabled.')\n raise serializers.ValidationError(msg)\n else:\n msg = _('No account is associated with this alias.')\n raise serializers.ValidationError(msg)\n else:\n msg = _('Missing %s.') % self.alias_type\n raise serializers.ValidationError(msg)\n\n attrs['user'] = user\n return attrs\n\n\nclass EmailAuthSerializer(AbstractBaseAliasAuthenticationSerializer):\n @property\n def alias_type(self):\n return 'email'\n\n email = serializers.EmailField()\n\n\nclass MobileAuthSerializer(AbstractBaseAliasAuthenticationSerializer):\n @property\n def alias_type(self):\n return 'mobile'\n\n phone_regex = RegexValidator(regex=r'^\\+[1-9]\\d{1,14}$',\n message=\"Mobile number must be entered in the format:\"\n \" '+999999999'. Up to 15 digits allowed.\")\n mobile = serializers.CharField(validators=[phone_regex], max_length=17)\n\n\n\"\"\"\nVerification\n\"\"\"\n\n\nclass AbstractBaseAliasVerificationSerializer(serializers.Serializer):\n \"\"\"\n Abstract class that returns a callback token based on the field given\n Returns a token if valid, None or a message if not.\n \"\"\"\n @property\n def alias_type(self):\n # The alias type, either email or mobile\n raise NotImplementedError\n\n def validate(self, attrs):\n\n msg = _('There was a problem with your request.')\n\n if self.alias_type:\n # Get request.user\n # Get their specified valid endpoint\n # Validate\n\n request = self.context[\"request\"]\n if request and hasattr(request, \"user\"):\n user = request.user\n if user:\n if not user.is_active:\n # If valid, return attrs so we can create a token in our logic controller\n msg = _('User account is disabled.')\n\n else:\n if hasattr(user, self.alias_type):\n # Has the appropriate alias type\n attrs['user'] = user\n return attrs\n else:\n msg = _('This user doesn\\'t have an %s.' % self.alias_type)\n raise serializers.ValidationError(msg)\n else:\n msg = _('Missing %s.') % self.alias_type\n raise serializers.ValidationError(msg)\n\n\nclass EmailVerificationSerializer(AbstractBaseAliasVerificationSerializer):\n @property\n def alias_type(self):\n return 'email'\n\n\nclass MobileVerificationSerializer(AbstractBaseAliasVerificationSerializer):\n @property\n def alias_type(self):\n return 'mobile'\n\n\n\"\"\"\nCallback Token\n\"\"\"\n\n\ndef token_age_validator(value):\n \"\"\"\n Check token age\n Makes sure a token is within the proper expiration datetime window.\n \"\"\"\n valid_token = validate_token_age(value)\n if not valid_token:\n raise serializers.ValidationError(\"The token you entered isn't valid.\")\n return value\n\n\nclass AbstractBaseCallbackTokenSerializer(serializers.Serializer):\n \"\"\"\n Abstract class inspired by DRF's own token serializer.\n Returns a user if valid, None or a message if not.\n \"\"\"\n phone_regex = RegexValidator(regex=r'^\\+[1-9]\\d{1,14}$',\n message=\"Mobile number must be entered in the format:\"\n \" '+999999999'. Up to 15 digits allowed.\")\n\n email = serializers.EmailField(required=False) # Needs to be required=false to require both.\n mobile = serializers.CharField(required=False, validators=[phone_regex], max_length=17)\n token = TokenField(min_length=6, max_length=6, validators=[token_age_validator])\n\n def validate_alias(self, attrs):\n email = attrs.get('email', None)\n mobile = attrs.get('mobile', None)\n\n if email and mobile:\n raise serializers.ValidationError()\n\n if not email and not mobile:\n raise serializers.ValidationError()\n\n if email:\n return 'email', email\n elif mobile:\n return 'mobile', mobile\n\n return None\n\n\nclass CallbackTokenAuthSerializer(AbstractBaseCallbackTokenSerializer):\n\n def validate(self, attrs):\n # Check Aliases\n try:\n alias_type, alias = self.validate_alias(attrs)\n callback_token = attrs.get('token', None)\n user = User.objects.get(**{alias_type+'__iexact': alias})\n token = CallbackToken.objects.get(**{'user': user,\n 'key': callback_token,\n 'type': CallbackToken.TOKEN_TYPE_AUTH,\n 'is_active': True})\n\n if token.user == user:\n # Check the token type for our uni-auth method.\n # authenticates and checks the expiry of the callback token.\n if not user.is_active:\n msg = _('User account is disabled.')\n raise serializers.ValidationError(msg)\n\n if api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED \\\n or api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED:\n # Mark this alias as verified\n user = User.objects.get(pk=token.user.pk)\n success = verify_user_alias(user, token)\n\n if success is False:\n msg = _('Error validating user alias.')\n raise serializers.ValidationError(msg)\n\n attrs['user'] = user\n return attrs\n\n else:\n msg = _('Invalid Token')\n raise serializers.ValidationError(msg)\n except CallbackToken.DoesNotExist:\n msg = _('Invalid alias parameters provided.')\n raise serializers.ValidationError(msg)\n except User.DoesNotExist:\n msg = _('Invalid user alias parameters provided.')\n raise serializers.ValidationError(msg)\n except ValidationError:\n msg = _('Invalid alias parameters provided.')\n raise serializers.ValidationError(msg)\n\n\nclass CallbackTokenVerificationSerializer(AbstractBaseCallbackTokenSerializer):\n \"\"\"\n Takes a user and a token, verifies the token belongs to the user and\n validates the alias that the token was sent from.\n \"\"\"\n\n def validate(self, attrs):\n try:\n alias_type, alias = self.validate_alias(attrs)\n user_id = self.context.get(\"user_id\")\n user = User.objects.get(**{'id': user_id, alias_type+'__iexact': alias})\n callback_token = attrs.get('token', None)\n\n token = CallbackToken.objects.get(**{'user': user,\n 'key': callback_token,\n 'type': CallbackToken.TOKEN_TYPE_VERIFY,\n 'is_active': True})\n\n if token.user == user:\n # Mark this alias as verified\n success = verify_user_alias(user, token)\n if success is False:\n logger.debug(\"drfpasswordless: Error verifying alias.\")\n\n attrs['user'] = user\n return attrs\n else:\n msg = _('This token is invalid. Try again later.')\n logger.debug(\"drfpasswordless: User token mismatch when verifying alias.\")\n\n except CallbackToken.DoesNotExist:\n msg = _('We could not verify this alias.')\n logger.debug(\"drfpasswordless: Tried to validate alias with bad token.\")\n pass\n except User.DoesNotExist:\n msg = _('We could not verify this alias.')\n logger.debug(\"drfpasswordless: Tried to validate alias with bad user.\")\n pass\n except PermissionDenied:\n msg = _('Insufficient permissions.')\n logger.debug(\"drfpasswordless: Permission denied while validating alias.\")\n pass\n\n raise serializers.ValidationError(msg)\n\n\n\"\"\"\nResponses\n\"\"\"\n\n\nclass TokenResponseSerializer(serializers.Serializer):\n \"\"\"\n Our default response serializer.\n \"\"\"\n token = serializers.CharField(source='key')\n key = serializers.CharField(write_only=True)\n\n\n" }, { "alpha_fraction": 0.5541871786117554, "alphanum_fraction": 0.5837438702583313, "avg_line_length": 14.615385055541992, "blob_id": "4f14d31e5b6e7142c5fb38e5c2e7fded1bdc62aa", "content_id": "a3df5a7bed83536a097f24b401b031c3bbdb8a60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 406, "license_type": "permissive", "max_line_length": 38, "num_lines": 26, "path": "/Pipfile", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\ndjango = \"*\"\ndjangorestframework = \"*\"\ntwilio = \"*\"\n\n[dev-packages]\nflake8 = \"*\"\npandoc = \"*\"\npep8 = \"*\"\npytest= \"*\"\npytest-cov= \"*\"\npytest-django= \"*\"\nrequests = {version = \">=2.20.0\"}\nsetuptools-markdown = \"*\"\ntwine = \"*\"\nurllib3 = {version = \">=1.23\"}\nwheel = \"*\"\ntox = \"*\"\n\n[requires]\npython_version = \"3.7\"\n" }, { "alpha_fraction": 0.6663726568222046, "alphanum_fraction": 0.6784701943397522, "avg_line_length": 42.24523162841797, "blob_id": "c83fc801b21ef28f95e79dd90ea11672e4e4f0b0", "content_id": "ca3d44a54132287878ebfa85652aa8cda1b5236e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15871, "license_type": "permissive", "max_line_length": 106, "num_lines": 367, "path": "/tests/test_authentication.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.test import APITestCase\n\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom drfpasswordless.settings import api_settings, DEFAULTS\nfrom drfpasswordless.utils import CallbackToken\n\nUser = get_user_model()\n\n\nclass EmailSignUpCallbackTokenTests(APITestCase):\n\n def setUp(self):\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com'\n self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME\n\n self.url = reverse('drfpasswordless:auth_email')\n\n def test_email_signup_failed(self):\n email = 'failedemail182+'\n data = {'email': email}\n\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_email_signup_success(self):\n email = 'aaron@example.com'\n data = {'email': email}\n\n # Verify user doesn't exist yet\n user = User.objects.filter(**{self.email_field_name: 'aaron@example.com'}).first()\n # Make sure our user isn't None, meaning the user was created.\n self.assertEqual(user, None)\n\n # verify a new user was created with serializer\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(**{self.email_field_name: 'aaron@example.com'})\n self.assertNotEqual(user, None)\n\n # Verify a token exists for the user\n self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 1)\n\n def test_email_signup_disabled(self):\n api_settings.PASSWORDLESS_REGISTER_NEW_USERS = False\n\n # Verify user doesn't exist yet\n user = User.objects.filter(**{self.email_field_name: 'aaron@example.com'}).first()\n # Make sure our user isn't None, meaning the user was created.\n self.assertEqual(user, None)\n\n email = 'aaron@example.com'\n data = {'email': email}\n\n # verify a new user was not created\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n user = User.objects.filter(**{self.email_field_name: 'aaron@example.com'}).first()\n self.assertEqual(user, None)\n\n # Verify no token was created for the user\n self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 0)\n\n def tearDown(self):\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS']\n api_settings.PASSWORDLESS_REGISTER_NEW_USERS = DEFAULTS['PASSWORDLESS_REGISTER_NEW_USERS']\n\n\nclass EmailLoginCallbackTokenTests(APITestCase):\n\n def setUp(self):\n api_settings.PASSWORDLESS_AUTH_TYPES = ['EMAIL']\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com'\n\n self.email = 'aaron@example.com'\n self.url = reverse('drfpasswordless:auth_email')\n self.challenge_url = reverse('drfpasswordless:auth_token')\n\n self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME\n self.user = User.objects.create(**{self.email_field_name: self.email})\n\n def test_email_auth_failed(self):\n data = {'email': self.email}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n challenge_data = {'email': self.email, 'token': '123456'} # Send an arbitrary token instead\n\n # Try to auth with the callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_email_auth_missing_alias(self):\n data = {'email': self.email}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n challenge_data = {'token': callback_token} # Missing Alias\n\n # Try to auth with the callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_email_auth_bad_alias(self):\n data = {'email': self.email}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n challenge_data = {'email': 'abcde@example.com', 'token': callback_token} # Bad Alias\n\n # Try to auth with the callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_email_auth_expired(self):\n data = {'email': self.email}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n challenge_data = {'email': self.email, 'token': callback_token}\n\n data = {'email': self.email}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Second token sent to alias\n second_callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n second_challenge_data = {'email': self.email, 'token': second_callback_token}\n\n # Try to auth with the old callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # Try to auth with the new callback token\n second_challenge_response = self.client.post(self.challenge_url, second_challenge_data)\n self.assertEqual(second_challenge_response.status_code, status.HTTP_200_OK)\n\n # Verify Auth Token\n auth_token = second_challenge_response.data['token']\n self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key)\n\n def test_email_auth_success(self):\n data = {'email': self.email}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n challenge_data = {'email': self.email, 'token': callback_token}\n\n # Try to auth with the callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_200_OK)\n\n # Verify Auth Token\n auth_token = challenge_response.data['token']\n self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key)\n\n def tearDown(self):\n api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS']\n self.user.delete()\n\n\n\"\"\"\nMobile Tests\n\"\"\"\n\n\nclass MobileSignUpCallbackTokenTests(APITestCase):\n\n def setUp(self):\n api_settings.PASSWORDLESS_TEST_SUPPRESSION = True\n api_settings.PASSWORDLESS_AUTH_TYPES = ['MOBILE']\n api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = '+15550000000'\n self.url = reverse('drfpasswordless:auth_mobile')\n\n self.mobile_field_name = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME\n\n def test_mobile_signup_failed(self):\n mobile = 'sidfj98zfd'\n data = {'mobile': mobile}\n\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_mobile_signup_success(self):\n mobile = '+15551234567'\n data = {'mobile': mobile}\n\n # Verify user doesn't exist yet\n user = User.objects.filter(**{self.mobile_field_name: '+15551234567'}).first()\n # Make sure our user isn't None, meaning the user was created.\n self.assertEqual(user, None)\n\n # verify a new user was created with serializer\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(**{self.mobile_field_name: '+15551234567'})\n self.assertNotEqual(user, None)\n\n # Verify a token exists for the user\n self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 1)\n\n def test_mobile_signup_disabled(self):\n api_settings.PASSWORDLESS_REGISTER_NEW_USERS = False\n\n # Verify user doesn't exist yet\n user = User.objects.filter(**{self.mobile_field_name: '+15557654321'}).first()\n # Make sure our user isn't None, meaning the user was created.\n self.assertEqual(user, None)\n\n mobile = '+15557654321'\n data = {'mobile': mobile}\n\n # verify a new user was not created\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n user = User.objects.filter(**{self.mobile_field_name: '+15557654321'}).first()\n self.assertEqual(user, None)\n\n # Verify no token was created for the user\n self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 0)\n\n def tearDown(self):\n api_settings.PASSWORDLESS_TEST_SUPPRESSION = DEFAULTS['PASSWORDLESS_TEST_SUPPRESSION']\n api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']\n api_settings.PASSWORDLESS_REGISTER_NEW_USERS = DEFAULTS['PASSWORDLESS_REGISTER_NEW_USERS']\n api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = DEFAULTS['PASSWORDLESS_MOBILE_NOREPLY_NUMBER']\n\n\ndef dummy_token_creator(user):\n token = Token.objects.create(key=\"dummy\", user=user)\n return (token, True)\n\n\nclass OverrideTokenCreationTests(APITestCase):\n def setUp(self):\n super().setUp()\n\n api_settings.PASSWORDLESS_AUTH_TOKEN_CREATOR = 'tests.test_authentication.dummy_token_creator'\n api_settings.PASSWORDLESS_AUTH_TYPES = ['EMAIL']\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com'\n\n self.email = 'aaron@example.com'\n self.url = reverse('drfpasswordless:auth_email')\n self.challenge_url = reverse('drfpasswordless:auth_token')\n\n self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME\n self.user = User.objects.create(**{self.email_field_name: self.email})\n\n def test_token_creation_gets_overridden(self):\n \"\"\"Ensure that if we change the token creation function, the overridden one gets called\"\"\"\n data = {'email': self.email}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n challenge_data = {'email': self.email, 'token': callback_token}\n\n # Try to auth with the callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_200_OK)\n\n # Verify Auth Token\n auth_token = challenge_response.data['token']\n self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key)\n self.assertEqual('dummy', Token.objects.filter(key=auth_token).first().key)\n\n def tearDown(self):\n api_settings.PASSWORDLESS_AUTH_TOKEN_CREATOR = DEFAULTS['PASSWORDLESS_AUTH_TOKEN_CREATOR']\n api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS']\n self.user.delete()\n super().tearDown()\n\n\nclass MobileLoginCallbackTokenTests(APITestCase):\n\n def setUp(self):\n api_settings.PASSWORDLESS_TEST_SUPPRESSION = True\n api_settings.PASSWORDLESS_AUTH_TYPES = ['MOBILE']\n api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = '+15550000000'\n\n self.mobile = '+15551234567'\n self.url = reverse('drfpasswordless:auth_mobile')\n self.challenge_url = reverse('drfpasswordless:auth_token')\n\n self.mobile_field_name = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME\n\n self.user = User.objects.create(**{self.mobile_field_name: self.mobile})\n\n def test_mobile_auth_failed(self):\n data = {'mobile': self.mobile}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n challenge_data = {'mobile': self.mobile, 'token': '123456'} # Send an arbitrary token instead\n\n # Try to auth with the callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_mobile_auth_expired(self):\n data = {'mobile': self.mobile}\n first_response = self.client.post(self.url, data)\n self.assertEqual(first_response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n first_callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n first_challenge_data = {'mobile': self.mobile, 'token': first_callback_token}\n\n data = {'mobile': self.mobile}\n second_response = self.client.post(self.url, data)\n self.assertEqual(second_response.status_code, status.HTTP_200_OK)\n\n # Second token sent to alias\n second_callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n second_challenge_data = {'mobile': self.mobile, 'token': second_callback_token}\n\n # Try to auth with the old callback token\n challenge_response = self.client.post(self.challenge_url, first_challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # Try to auth with the new callback token\n second_challenge_response = self.client.post(self.challenge_url, second_challenge_data)\n self.assertEqual(second_challenge_response.status_code, status.HTTP_200_OK)\n\n # Verify Auth Token\n auth_token = second_challenge_response.data['token']\n self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key)\n\n def test_mobile_auth_success(self):\n data = {'mobile': self.mobile}\n response = self.client.post(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Token sent to alias\n callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first()\n challenge_data = {'mobile': self.mobile, 'token': callback_token}\n\n # Try to auth with the callback token\n challenge_response = self.client.post(self.challenge_url, challenge_data)\n self.assertEqual(challenge_response.status_code, status.HTTP_200_OK)\n\n # Verify Auth Token\n auth_token = challenge_response.data['token']\n self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key)\n\n def tearDown(self):\n api_settings.PASSWORDLESS_TEST_SUPPRESSION = DEFAULTS['PASSWORDLESS_TEST_SUPPRESSION']\n api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES']\n api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = DEFAULTS['PASSWORDLESS_MOBILE_NOREPLY_NUMBER']\n self.user.delete()\n" }, { "alpha_fraction": 0.6313498020172119, "alphanum_fraction": 0.6313498020172119, "avg_line_length": 39.52941131591797, "blob_id": "2a87020058892eccfcc25e46cc2b3975c6cf29e1", "content_id": "120fea03504b86301204480c1bf47165bd56b7b5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 689, "license_type": "permissive", "max_line_length": 75, "num_lines": 17, "path": "/tests/urls.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from django.urls import path, include\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom drfpasswordless.settings import api_settings\nfrom drfpasswordless.views import (ObtainEmailCallbackToken,\n ObtainMobileCallbackToken,\n ObtainAuthTokenFromCallbackToken,\n VerifyAliasFromCallbackToken,\n ObtainEmailVerificationCallbackToken,\n ObtainMobileVerificationCallbackToken, )\n\napp_name = 'drfpasswordless'\n\nurlpatterns = [\n path('', include('drfpasswordless.urls')),\n]\n\nformat_suffix_patterns(urlpatterns)\n" }, { "alpha_fraction": 0.6067415475845337, "alphanum_fraction": 0.63670414686203, "avg_line_length": 21.25, "blob_id": "14bee5a1e8cbe01a97f22df736f13305cbb6b5e9", "content_id": "8beef42f578763527902b4ce5e0966c61157b8ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "permissive", "max_line_length": 65, "num_lines": 12, "path": "/drfpasswordless/__init__.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n__title__ = 'drfpasswordless'\n__version__ = '1.5.8'\n__author__ = 'Aaron Ng'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2022 Aaron Ng'\n\n# Version synonym\nVERSION = __version__\n\ndefault_app_config = 'drfpasswordless.apps.DrfpasswordlessConfig'\n" }, { "alpha_fraction": 0.6782712936401367, "alphanum_fraction": 0.6782712936401367, "avg_line_length": 36.8636360168457, "blob_id": "ce9a8f7e8210b884f6d85376f6ad17490a3d69b1", "content_id": "efa161c83587e028555f43afa4b426d5ee6e471b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "permissive", "max_line_length": 81, "num_lines": 22, "path": "/drfpasswordless/services.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from django.utils.module_loading import import_string\nfrom drfpasswordless.settings import api_settings\nfrom drfpasswordless.utils import (\n create_callback_token_for_user,\n)\n\n\nclass TokenService(object):\n @staticmethod\n def send_token(user, alias_type, token_type, **message_payload):\n token = create_callback_token_for_user(user, alias_type, token_type)\n send_action = None\n\n if user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys():\n return True\n if alias_type == 'email':\n send_action = import_string(api_settings.PASSWORDLESS_EMAIL_CALLBACK)\n elif alias_type == 'mobile':\n send_action = import_string(api_settings.PASSWORDLESS_SMS_CALLBACK)\n # Send to alias\n success = send_action(user, token, **message_payload)\n return success\n" }, { "alpha_fraction": 0.5822083950042725, "alphanum_fraction": 0.5826134085655212, "avg_line_length": 49.054054260253906, "blob_id": "280cfc60636ddf99fd2b2a1c7a7893452f13fc20", "content_id": "33adf6d5bf1427ccfa2b34808fd0f171c2712854", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7408, "license_type": "permissive", "max_line_length": 133, "num_lines": 148, "path": "/drfpasswordless/signals.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "import logging\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.dispatch import receiver\nfrom django.db.models import signals\nfrom drfpasswordless.models import CallbackToken\nfrom drfpasswordless.models import generate_numeric_token\nfrom drfpasswordless.settings import api_settings\nfrom drfpasswordless.services import TokenService\n\nlogger = logging.getLogger(__name__)\n\n\n@receiver(signals.post_save, sender=CallbackToken)\ndef invalidate_previous_tokens(sender, instance, created, **kwargs):\n \"\"\"\n Invalidates all previously issued tokens of that type when a new one is created, used, or anything like that.\n \"\"\"\n\n if instance.user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys():\n return\n\n if isinstance(instance, CallbackToken):\n CallbackToken.objects.active().filter(user=instance.user, type=instance.type).exclude(id=instance.id).update(is_active=False)\n\n\n@receiver(signals.pre_save, sender=CallbackToken)\ndef check_unique_tokens(sender, instance, **kwargs):\n \"\"\"\n Ensures that mobile and email tokens are unique or tries once more to generate.\n Note that here we've decided keys are unique even across auth and validation.\n We could consider relaxing this in the future as well by filtering on the instance.type.\n \"\"\"\n if instance._state.adding:\n # save is called on a token to create it in the db\n # before creating check whether a token with the same key exists\n if isinstance(instance, CallbackToken):\n unique = False\n tries = 0\n \n if CallbackToken.objects.filter(key=instance.key, is_active=True).exists():\n # Try N(default=3) times before giving up.\n while tries < api_settings.PASSWORDLESS_TOKEN_GENERATION_ATTEMPTS:\n tries = tries + 1\n new_key = generate_numeric_token()\n instance.key = new_key\n \n if not CallbackToken.objects.filter(key=instance.key, is_active=True).exists():\n # Leave the loop if we found a valid token that doesn't exist yet.\n unique = True\n break\n\n if not unique:\n # A unique value wasn't found after three tries\n raise ValidationError(\"Couldn't create a unique token even after retrying.\")\n else:\n # A unique value was found immediately.\n pass\n\n \n else:\n # save is called on an already existing token to update it. Such as invalidating it.\n # in that case there is no need to check for the key. This way we both avoid an unneccessary db hit\n # and avoid to change key field of used tokens.\n pass\n\n\n\nUser = get_user_model()\n\n\n@receiver(signals.pre_save, sender=User)\ndef update_alias_verification(sender, instance, **kwargs):\n \"\"\"\n Flags a user's email as unverified if they change it.\n Optionally sends a verification token to the new endpoint.\n \"\"\"\n if isinstance(instance, User):\n\n if instance.id:\n\n if api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED is True:\n \"\"\"\n For marking email aliases as not verified when a user changes it.\n \"\"\"\n email_field = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME\n email_verified_field = api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME\n\n # Verify that this is an existing instance and not a new one.\n try:\n user_old = User.objects.get(id=instance.id) # Pre-save object\n instance_email = getattr(instance, email_field) # Incoming Email\n old_email = getattr(user_old, email_field) # Pre-save object email\n\n if instance_email != old_email and instance_email != \"\" and instance_email is not None:\n # Email changed, verification should be flagged\n setattr(instance, email_verified_field, False)\n if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True:\n email_subject = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT\n email_plaintext = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE\n email_html = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME\n message_payload = {'email_subject': email_subject,\n 'email_plaintext': email_plaintext,\n 'email_html': email_html}\n success = TokenService.send_token(instance, 'email', CallbackToken.TOKEN_TYPE_VERIFY, **message_payload)\n\n if success:\n logger.info('drfpasswordless: Successfully sent email on updated address: %s'\n % instance_email)\n else:\n logger.info('drfpasswordless: Failed to send email to updated address: %s'\n % instance_email)\n\n except User.DoesNotExist:\n # User probably is just initially being created\n return\n\n if api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED is True:\n \"\"\"\n For marking mobile aliases as not verified when a user changes it.\n \"\"\"\n mobile_field = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME\n mobile_verified_field = api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME\n\n # Verify that this is an existing instance and not a new one.\n try:\n user_old = User.objects.get(id=instance.id) # Pre-save object\n instance_mobile = getattr(instance, mobile_field) # Incoming mobile\n old_mobile = getattr(user_old, mobile_field) # Pre-save object mobile\n\n if instance_mobile != old_mobile and instance_mobile != \"\" and instance_mobile is not None:\n # Mobile changed, verification should be flagged\n setattr(instance, mobile_verified_field, False)\n if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True:\n mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE\n message_payload = {'mobile_message': mobile_message}\n success = TokenService.send_token(instance, 'mobile', CallbackToken.TOKEN_TYPE_VERIFY, **message_payload)\n\n if success:\n logger.info('drfpasswordless: Successfully sent SMS on updated mobile: %s'\n % instance_mobile)\n else:\n logger.info('drfpasswordless: Failed to send SMS to updated mobile: %s'\n % instance_mobile)\n\n except User.DoesNotExist:\n # User probably is just initially being created\n pass\n" }, { "alpha_fraction": 0.7161753177642822, "alphanum_fraction": 0.7275006771087646, "avg_line_length": 29.43947410583496, "blob_id": "b2cf852ec93e8bc7520beed26bbcb3f777c03188", "content_id": "d6ceaba5367601e1e53a1b6a547f2704b7e32a4b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11611, "license_type": "permissive", "max_line_length": 117, "num_lines": 380, "path": "/README.md", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "![splash-image]\n![ci-image]\n\ndrfpasswordless is a quick way to integrate ‘passwordless’ auth into\nyour Django Rest Framework project using a user’s email address or\nmobile number only (herein referred to as an alias).\n\nBuilt to work with DRF’s own TokenAuthentication system, it sends the\nuser a 6-digit callback token to a given email address or a mobile\nnumber. The user sends it back correctly and they’re given an\nauthentication token (again, provided by Django Rest Framework’s\nTokenAuthentication system).\n\nCallback tokens by default expire after 15 minutes.\n\nExample Usage:\n==============\n\n```bash\ncurl -X POST -d “email=aaron@email.com” localhost:8000/auth/email/\n```\n\nEmail to aaron@email.com:\n\n```\n...\n<h1>Your login token is 815381.</h1>\n...\n```\n\nReturn Stage\n\n```bash\ncurl -X POST -d \"email=aaron@example.com&token=815381\" localhost:8000/auth/token/\n\n> HTTP/1.0 200 OK\n> {\"token\":\"76be2d9ecfaf5fa4226d722bzdd8a4fff207ed0e”}\n```\n\nRequirements\n============\n\n- Python (3.7+)\n- Django (2.2+)\n- Django Rest Framework + AuthToken (3.10+)\n- Python-Twilio (Optional, for mobile.)\n\n\nInstall\n=======\n\n1. Install drfpasswordless\n\n ```\n pipenv install drfpasswordless\n ```\n\n2. Add Django Rest Framework’s Token Authentication to your Django Rest\n Framework project.\n\n```python\n REST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES':\n ('rest_framework.authentication.TokenAuthentication',\n )}\n\n INSTALLED_APPS = [\n ...\n 'rest_framework',\n 'rest_framework.authtoken',\n 'drfpasswordless',\n ...\n ]\n```\n\nAnd run\n```bash\npython manage.py migrate\n```\n\n3. Set which types of contact points are allowed for auth in your\n Settings.py. The available options are ``EMAIL`` and ``MOBILE``.\n\n```python\nPASSWORDLESS_AUTH = {\n ..\n 'PASSWORDLESS_AUTH_TYPES': ['EMAIL', 'MOBILE'],\n ..\n}\n```\n\n By default drfpasswordless looks for fields named ``email`` or ``mobile``\n on the User model. If an alias provided doesn’t belong to any given user,\n a new user is created.\n\n 3a. If you’re using ``email``, see the Configuring Email section\n below.\n\n 3b. If you’re using ``mobile``, see the Configuring Mobile section\n below.\n\n4. Add ``drfpasswordless.urls`` to your urls.py\n\n```python\n from django.urls import path, include\n\n urlpatterns = [\n ..\n path('', include('drfpasswordless.urls')),\n ..\n ]\n```\n\n5. You can now POST to either of the endpoints:\n\n```bash\ncurl -X POST -d \"email=aaron@email.com\" localhost:8000/auth/email/\n\n// OR\n\ncurl -X POST -d \"mobile=+15552143912\" localhost:8000/auth/mobile/\n```\n A 6 digit callback token will be sent to the contact point.\n\n6. The client has 15 minutes to use the 6 digit callback token\n correctly. If successful, they get an authorization token in exchange\n which the client can then use with Django Rest Framework’s\n TokenAuthentication scheme.\n\n```bash\ncurl -X POST -d \"email=aaron@email.com&token=815381\" localhost:8000/auth/token/\n\n> HTTP/1.0 200 OK\n> {\"token\":\"76be2d9ecfaf5fa4226d722bzdd8a4fff207ed0e”}\n```\n\nConfiguring Emails\n------------------\n\nSpecify the email address you’d like to send the callback token from\nwith the ``PASSWORDLESS_EMAIL_NOREPLY_ADDRESS`` setting.\n```python\nPASSWORDLESS_AUTH = {\n ..\n 'PASSWORDLESS_AUTH_TYPES': ['EMAIL',],\n 'PASSWORDLESS_EMAIL_NOREPLY_ADDRESS': 'noreply@example.com',\n ..\n}\n```\n\nYou’ll also need to set up an SMTP server to send emails but for\ndevelopment you can set up a dummy development smtp server to test\nemails. Sent emails will print to the console. `Read more\nhere. <https://docs.djangoproject.com/en/3.0/topics/email/#console-backend>`__\n\n```python\n# Settings.py\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n```\n\nConfiguring Mobile\n------------------\n\nYou’ll need to have the python twilio module installed\n\n```bash\npipenv install twilio\n```\n\nand set the ``TWILIO_ACCOUNT_SID`` and ``TWILIO_AUTH_TOKEN`` environment\nvariables. These are read from `os.environ`, so make sure you don't put\nthem in your settings file accidentally.\n\nYou’ll also need to specify the number you send the token from with the\n``PASSWORDLESS_MOBILE_NOREPLY_NUMBER`` setting.\n\nTemplates\n=========\n\nIf you’d like to use a custom email template for your email callback\ntoken, specify your template name with this setting:\n\n```bash\nPASSWORDLESS_AUTH = {\n ...\n 'PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME': \"mytemplate.html\"\n}\n```\n\nThe template renders a single variable ``{{ callback_token }}`` which is\nthe 6 digit callback token being sent.\n\nContact Point Validation\n========================\n\nEndpoints can automatically mark themselves as validated when a user\nlogs in with a token sent to a specific endpoint. They can also\nautomatically mark themselves as invalid when a user changes a contact\npoint.\n\nThis is off by default but can be turned on with\n``PASSWORDLESS_USER_MARK_EMAIL_VERIFIED`` or\n``PASSWORDLESS_USER_MARK_MOBILE_VERIFIED``. By default when these are\nenabled they look for the User model fields ``email_verified`` or\n``mobile_verified``.\n\nYou can also use ``auth/verify/email/`` or ``/auth/verify/mobile/`` which will\nautomatically send a token to the endpoint attached to the current\n``request.user``'s email or mobile if available.\n\nYou can then send that token to ``/auth/verify/`` which will double-check\nthat the endpoint belongs to the request.user and mark the alias as verified.\n\nRegistration\n============\n\nAll unrecognized emails and mobile numbers create new accounts by\ndefault. New accounts are automatically set with\n``set_unusable_password()`` but it’s recommended that admins have some\ntype of password.\n\nThis can be turned off with the ``PASSWORDLESS_REGISTER_NEW_USERS``\nsetting.\n\nOther Settings\n==============\n\nHere’s a full list of the configurable defaults.\n\n```python\nDEFAULTS = {\n\n # Allowed auth types, can be EMAIL, MOBILE, or both.\n 'PASSWORDLESS_AUTH_TYPES': ['EMAIL'],\n\n # URL Prefix for Authentication Endpoints\n 'PASSWORDLESS_AUTH_PREFIX': 'auth/',\n \n # URL Prefix for Verification Endpoints\n 'PASSWORDLESS_VERIFY_PREFIX': 'auth/verify/',\n\n # Amount of time that tokens last, in seconds\n 'PASSWORDLESS_TOKEN_EXPIRE_TIME': 15 * 60,\n\n # The user's email field name\n 'PASSWORDLESS_USER_EMAIL_FIELD_NAME': 'email',\n\n # The user's mobile field name\n 'PASSWORDLESS_USER_MOBILE_FIELD_NAME': 'mobile',\n\n # Marks itself as verified the first time a user completes auth via token.\n # Automatically unmarks itself if email is changed.\n 'PASSWORDLESS_USER_MARK_EMAIL_VERIFIED': False,\n 'PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME': 'email_verified',\n\n # Marks itself as verified the first time a user completes auth via token.\n # Automatically unmarks itself if mobile number is changed.\n 'PASSWORDLESS_USER_MARK_MOBILE_VERIFIED': False,\n 'PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME': 'mobile_verified',\n\n # The email the callback token is sent from\n 'PASSWORDLESS_EMAIL_NOREPLY_ADDRESS': None,\n\n # The email subject\n 'PASSWORDLESS_EMAIL_SUBJECT': \"Your Login Token\",\n\n # A plaintext email message overridden by the html message. Takes one string.\n 'PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE': \"Enter this token to sign in: %s\",\n\n # The email template name.\n 'PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME': \"passwordless_default_token_email.html\",\n\n # Your twilio number that sends the callback tokens.\n 'PASSWORDLESS_MOBILE_NOREPLY_NUMBER': None,\n\n # The message sent to mobile users logging in. Takes one string.\n 'PASSWORDLESS_MOBILE_MESSAGE': \"Use this code to log in: %s\",\n\n # Registers previously unseen aliases as new users.\n 'PASSWORDLESS_REGISTER_NEW_USERS': True,\n\n # Suppresses actual SMS for testing\n 'PASSWORDLESS_TEST_SUPPRESSION': False,\n\n # Context Processors for Email Template\n 'PASSWORDLESS_CONTEXT_PROCESSORS': [],\n\n # The verification email subject\n 'PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT': \"Your Verification Token\",\n\n # A plaintext verification email message overridden by the html message. Takes one string.\n 'PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE': \"Enter this verification code: %s\",\n\n # The verification email template name.\n 'PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME': \"passwordless_default_verification_token_email.html\",\n\n # The message sent to mobile users logging in. Takes one string.\n 'PASSWORDLESS_MOBILE_VERIFICATION_MESSAGE': \"Enter this verification code: %s\",\n\n # Automatically send verification email or sms when a user changes their alias.\n 'PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN': False,\n\n # What function is called to construct an authentication tokens when\n # exchanging a passwordless token for a real user auth token. This function\n # should take a user and return a tuple of two values. The first value is\n # the token itself, the second is a boolean value representating whether\n # the token was newly created.\n 'PASSWORDLESS_AUTH_TOKEN_CREATOR': 'drfpasswordless.utils.create_authentication_token',\n \n # What function is called to construct a serializer for drf tokens when\n # exchanging a passwordless token for a real user auth token.\n 'PASSWORDLESS_AUTH_TOKEN_SERIALIZER': 'drfpasswordless.serializers.TokenResponseSerializer',\n\n # A dictionary of demo user's primary key mapped to their static pin\n 'PASSWORDLESS_DEMO_USERS': {},\n\n # configurable function for sending email\n 'PASSWORDLESS_EMAIL_CALLBACK': 'drfpasswordless.utils.send_email_with_callback_token',\n \n # configurable function for sending sms\n 'PASSWORDLESS_SMS_CALLBACK': 'drfpasswordless.utils.send_sms_with_callback_token',\n\n # Token Generation Retry Count\n 'PASSWORDLESS_TOKEN_GENERATION_ATTEMPTS': 3\n\n\n}\n```\n\nTo Do\n----\n\n- github.io project page\n- Add MkDocs - http://www.mkdocs.org/\n- Support non-US mobile numbers\n- Custom URLs\n- Change bad settings to 500's\n\nPull requests are encouraged!\n\nDonations & Support\n----\nIf you found drfpasswordless useful, consider giving me a follow\n[@localghost](https://www.twitter.com/aaronykng) on Twitter and\n[@hi.aaron](https://www.instagram.com/hi.aaron) on Instagram.\n\nIf you'd like to go a step further and are using drfpasswordless in your startup\nor business, consider a donation:\n\n- BTC: `3FzSFeKVABL5Adh9Egoxh77gHbtg2kcTPk`\n- ETH: `0x13412a79F06A83B107A8833dB209BECbcb700f24`\n- Square Cash: `$aaron`\n\nLicense\n-------\n\nThe MIT License (MIT)\n\nCopyright (c) 2020 Aaron Ng\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n[ci-image]: https://travis-ci.org/aaronn/django-rest-framework-passwordless.svg?branch=master\n[splash-image]: https://i.imgur.com/OdDHAIf.png\n" }, { "alpha_fraction": 0.5641592741012573, "alphanum_fraction": 0.6637167930603027, "avg_line_length": 17.079999923706055, "blob_id": "7b7a611834fa087aeaff74697361d1a364f9fc68", "content_id": "f4eb57b034934f4a521c9c1df0c2c75198491e48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 452, "license_type": "permissive", "max_line_length": 39, "num_lines": 25, "path": "/tox.ini", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "[tox]\nenvlist =\n flake8\n py37-django22-drf{310,311}\n py37-django30-drf{310,311}\n\n[testenv]\ncommands =\n pytest tests\ndeps =\n pytest\n pytest-cov\n pytest-django\n django22: Django==2.2.*\n django30: Django==3.0.*\n drf310: djangorestframework==3.10.*\n drf311: djangorestframework==3.11.*\nsetenv =\n\tPYTHONPATH = {toxinidir}\n\n[testenv:flake8]\ndeps = flake8\ncommands =\n flake8 --version\n flake8 setup.py docs project test\n" }, { "alpha_fraction": 0.7421875, "alphanum_fraction": 0.7421875, "avg_line_length": 27.44444465637207, "blob_id": "7503273bcb4f1dc50a4661a485752977d528ff3e", "content_id": "3f9b86bffb3a5baa3024e090e35abbb0250b995a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "permissive", "max_line_length": 54, "num_lines": 9, "path": "/drfpasswordless/apps.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\nclass DrfpasswordlessConfig(AppConfig):\n name = 'drfpasswordless'\n verbose = _(\"DRF Passwordless\")\n\n def ready(self):\n import drfpasswordless.signals\n" }, { "alpha_fraction": 0.5869218707084656, "alphanum_fraction": 0.6188197731971741, "avg_line_length": 26.2608699798584, "blob_id": "4500b704bd2db7e4a21c616cf1ea558aaeb71a3b", "content_id": "003d7e16d9f870287979208a2b5a2c73b7925bf6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 627, "license_type": "permissive", "max_line_length": 104, "num_lines": 23, "path": "/drfpasswordless/migrations/0004_auto_20200125_0853.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.2 on 2020-01-25 08:53\n\nfrom django.db import migrations, models\nimport drfpasswordless.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('drfpasswordless', '0003_callbacktoken_type'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='callbacktoken',\n name='key',\n field=models.CharField(default=drfpasswordless.models.generate_numeric_token, max_length=6),\n ),\n migrations.AlterUniqueTogether(\n name='callbacktoken',\n unique_together={('is_active', 'key', 'type')},\n ),\n ]\n" }, { "alpha_fraction": 0.6288776993751526, "alphanum_fraction": 0.6288776993751526, "avg_line_length": 36.230045318603516, "blob_id": "37c31996b9375aa4aaca36a8bc97f2155edfc808", "content_id": "ba02deb155f4a412fef3d406aaa7420b4609d264", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7932, "license_type": "permissive", "max_line_length": 123, "num_lines": 213, "path": "/drfpasswordless/utils.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "import logging\nimport os\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.mail import send_mail\nfrom django.template import loader\nfrom django.utils import timezone\nfrom rest_framework.authtoken.models import Token\nfrom drfpasswordless.models import CallbackToken\nfrom drfpasswordless.settings import api_settings\n\n\nlogger = logging.getLogger(__name__)\nUser = get_user_model()\n\n\ndef authenticate_by_token(callback_token):\n try:\n token = CallbackToken.objects.get(key=callback_token, is_active=True, type=CallbackToken.TOKEN_TYPE_AUTH)\n\n # Returning a user designates a successful authentication.\n token.user = User.objects.get(pk=token.user.pk)\n token.is_active = False # Mark this token as used.\n token.save()\n\n return token.user\n\n except CallbackToken.DoesNotExist:\n logger.debug(\"drfpasswordless: Challenged with a callback token that doesn't exist.\")\n except User.DoesNotExist:\n logger.debug(\"drfpasswordless: Authenticated user somehow doesn't exist.\")\n except PermissionDenied:\n logger.debug(\"drfpasswordless: Permission denied while authenticating.\")\n\n return None\n\n\ndef create_callback_token_for_user(user, alias_type, token_type):\n token = None\n alias_type_u = alias_type.upper()\n to_alias_field = getattr(api_settings, f'PASSWORDLESS_USER_{alias_type_u}_FIELD_NAME')\n if user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys():\n token = CallbackToken.objects.filter(user=user).first()\n if token:\n return token\n else:\n return CallbackToken.objects.create(\n user=user,\n key=api_settings.PASSWORDLESS_DEMO_USERS[user.pk],\n to_alias_type=alias_type_u,\n to_alias=getattr(user, to_alias_field),\n type=token_type\n )\n \n token = CallbackToken.objects.create(user=user,\n to_alias_type=alias_type_u,\n to_alias=getattr(user, to_alias_field),\n type=token_type)\n\n\n\n if token is not None:\n return token\n\n return None\n\n\ndef validate_token_age(callback_token):\n \"\"\"\n Returns True if a given token is within the age expiration limit.\n \"\"\"\n\n try:\n token = CallbackToken.objects.get(key=callback_token, is_active=True)\n seconds = (timezone.now() - token.created_at).total_seconds()\n token_expiry_time = api_settings.PASSWORDLESS_TOKEN_EXPIRE_TIME\n if token.user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys():\n return True\n if seconds <= token_expiry_time:\n return True\n else:\n # Invalidate our token.\n token.is_active = False\n token.save()\n return False\n\n except CallbackToken.DoesNotExist:\n # No valid token.\n return False\n\n\ndef verify_user_alias(user, token):\n \"\"\"\n Marks a user's contact point as verified depending on accepted token type.\n \"\"\"\n if token.to_alias_type == 'EMAIL':\n if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME):\n setattr(user, api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME, True)\n elif token.to_alias_type == 'MOBILE':\n if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME):\n setattr(user, api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME, True)\n else:\n return False\n user.save()\n return True\n\n\ndef inject_template_context(context):\n \"\"\"\n Injects additional context into email template.\n \"\"\"\n for processor in api_settings.PASSWORDLESS_CONTEXT_PROCESSORS:\n context.update(processor())\n return context\n\n\ndef send_email_with_callback_token(user, email_token, **kwargs):\n \"\"\"\n Sends a Email to user.email.\n\n Passes silently without sending in test environment\n \"\"\"\n\n try:\n if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS:\n # Make sure we have a sending address before sending.\n\n # Get email subject and message\n email_subject = kwargs.get('email_subject',\n api_settings.PASSWORDLESS_EMAIL_SUBJECT)\n email_plaintext = kwargs.get('email_plaintext',\n api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE)\n email_html = kwargs.get('email_html',\n api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME)\n\n # Inject context if user specifies.\n context = inject_template_context({'callback_token': email_token.key, })\n html_message = loader.render_to_string(email_html, context,)\n send_mail(\n email_subject,\n email_plaintext % email_token.key,\n api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS,\n [getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)],\n fail_silently=False,\n html_message=html_message,)\n\n else:\n logger.debug(\"Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.\")\n return False\n return True\n\n except Exception as e:\n logger.debug(\"Failed to send token email to user: %d.\"\n \"Possibly no email on user object. Email entered was %s\" %\n (user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)))\n logger.debug(e)\n return False\n\n\ndef send_sms_with_callback_token(user, mobile_token, **kwargs):\n \"\"\"\n Sends a SMS to user.mobile via Twilio.\n\n Passes silently without sending in test environment.\n \"\"\"\n if api_settings.PASSWORDLESS_TEST_SUPPRESSION is True:\n # we assume success to prevent spamming SMS during testing.\n\n # even if you have suppression on– you must provide a number if you have mobile selected.\n if api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER is None:\n return False\n \n return True\n \n base_string = kwargs.get('mobile_message', api_settings.PASSWORDLESS_MOBILE_MESSAGE)\n\n try:\n if api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER:\n # We need a sending number to send properly\n\n from twilio.rest import Client\n twilio_client = Client(os.environ['TWILIO_ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n\n to_number = getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME)\n if to_number.__class__.__name__ == 'PhoneNumber':\n to_number = to_number.__str__()\n\n twilio_client.messages.create(\n body=base_string % mobile_token.key,\n to=to_number,\n from_=api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER\n )\n return True\n else:\n logger.debug(\"Failed to send token sms. Missing PASSWORDLESS_MOBILE_NOREPLY_NUMBER.\")\n return False\n except ImportError:\n logger.debug(\"Couldn't import Twilio client. Is twilio installed?\")\n return False\n except KeyError:\n logger.debug(\"Couldn't send SMS.\"\n \"Did you set your Twilio account tokens and specify a PASSWORDLESS_MOBILE_NOREPLY_NUMBER?\")\n except Exception as e:\n logger.debug(\"Failed to send token SMS to user: {}. \"\n \"Possibly no mobile number on user object or the twilio package isn't set up yet. \"\n \"Number entered was {}\".format(user.id, getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME)))\n logger.debug(e)\n return False\n\n\ndef create_authentication_token(user):\n \"\"\" Default way to create an authentication token\"\"\"\n return Token.objects.get_or_create(user=user)\n" }, { "alpha_fraction": 0.6637856364250183, "alphanum_fraction": 0.6663785576820374, "avg_line_length": 33.02941131591797, "blob_id": "ae3a9eccbcf7e04d23e0e99876a662739cd734e3", "content_id": "7d0bfde4e7633c02c165f8937e892ebdd80d6ce6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1157, "license_type": "permissive", "max_line_length": 112, "num_lines": 34, "path": "/drfpasswordless/admin.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.urls import reverse\nfrom drfpasswordless.models import CallbackToken\n\n\nclass UserLinkMixin(object):\n \"\"\"\n A mixin to add a linkable list_display user field.\n \"\"\"\n LINK_TO_USER_FIELD = 'link_to_user'\n\n def link_to_user(self, obj):\n link = reverse('admin:users_user_change', args=[obj.user.id])\n return u'<a href={}>{}</a>'.format(link, obj.user.username)\n link_to_user.allow_tags = True\n link_to_user.short_description = 'User'\n\n\nclass AbstractCallbackTokenInline(admin.StackedInline):\n max_num = 0\n extra = 0\n readonly_fields = ('created_at', 'key', 'type', 'is_active')\n fields = ('created_at', 'user', 'key', 'type', 'is_active')\n\n\nclass CallbackInline(AbstractCallbackTokenInline):\n model = CallbackToken\n\n\nclass AbstractCallbackTokenAdmin(UserLinkMixin, admin.ModelAdmin):\n readonly_fields = ('created_at', 'user', 'key', 'type', 'to_alias_type')\n list_display = ('created_at', UserLinkMixin.LINK_TO_USER_FIELD, 'key', 'type', 'is_active', 'to_alias_type')\n fields = ('created_at', 'user', 'key', 'type', 'is_active', 'to_alias_type')\n extra = 0\n" }, { "alpha_fraction": 0.7723649144172668, "alphanum_fraction": 0.7723649144172668, "avg_line_length": 52.761905670166016, "blob_id": "3b4c373be957c353fd4a6b41a7739ed5e8d4020a", "content_id": "08189d74f58ca5249109f4e7380e17acfafd95bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1129, "license_type": "permissive", "max_line_length": 134, "num_lines": 21, "path": "/drfpasswordless/urls.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from drfpasswordless.settings import api_settings\nfrom django.urls import path\nfrom drfpasswordless.views import (\n ObtainEmailCallbackToken,\n ObtainMobileCallbackToken,\n ObtainAuthTokenFromCallbackToken,\n VerifyAliasFromCallbackToken,\n ObtainEmailVerificationCallbackToken,\n ObtainMobileVerificationCallbackToken,\n)\n\napp_name = 'drfpasswordless'\n\nurlpatterns = [\n path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'email/', ObtainEmailCallbackToken.as_view(), name='auth_email'),\n path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'mobile/', ObtainMobileCallbackToken.as_view(), name='auth_mobile'),\n path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'token/', ObtainAuthTokenFromCallbackToken.as_view(), name='auth_token'),\n path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'email/', ObtainEmailVerificationCallbackToken.as_view(), name='verify_email'),\n path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'mobile/', ObtainMobileVerificationCallbackToken.as_view(), name='verify_mobile'),\n path(api_settings.PASSWORDLESS_VERIFY_PREFIX, VerifyAliasFromCallbackToken.as_view(), name='verify_token'),\n]\n" }, { "alpha_fraction": 0.698033332824707, "alphanum_fraction": 0.7015842795372009, "avg_line_length": 39.67777633666992, "blob_id": "e07a52ce97368f34977488023576c17d27b19eff", "content_id": "6692ac804878106f28450a1e3c078ea7cd2ce9ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7322, "license_type": "permissive", "max_line_length": 116, "num_lines": 180, "path": "/drfpasswordless/views.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "import logging\nfrom django.utils.module_loading import import_string\nfrom rest_framework import parsers, renderers, status\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny, IsAuthenticated \nfrom rest_framework.views import APIView\nfrom drfpasswordless.models import CallbackToken\nfrom drfpasswordless.settings import api_settings\nfrom drfpasswordless.serializers import (\n EmailAuthSerializer,\n MobileAuthSerializer,\n CallbackTokenAuthSerializer,\n CallbackTokenVerificationSerializer,\n EmailVerificationSerializer,\n MobileVerificationSerializer,\n)\nfrom drfpasswordless.services import TokenService\n\nlogger = logging.getLogger(__name__)\n\n\nclass AbstractBaseObtainCallbackToken(APIView):\n \"\"\"\n This returns a 6-digit callback token we can trade for a user's Auth Token.\n \"\"\"\n success_response = \"A login token has been sent to you.\"\n failure_response = \"Unable to send you a login code. Try again later.\"\n\n message_payload = {}\n\n @property\n def serializer_class(self):\n # Our serializer depending on type\n raise NotImplementedError\n\n @property\n def alias_type(self):\n # Alias Type\n raise NotImplementedError\n\n @property\n def token_type(self):\n # Token Type\n raise NotImplementedError\n\n def post(self, request, *args, **kwargs):\n if self.alias_type.upper() not in api_settings.PASSWORDLESS_AUTH_TYPES:\n # Only allow auth types allowed in settings.\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = self.serializer_class(data=request.data, context={'request': request})\n if serializer.is_valid(raise_exception=True):\n # Validate -\n user = serializer.validated_data['user']\n # Create and send callback token\n success = TokenService.send_token(user, self.alias_type, self.token_type, **self.message_payload)\n\n # Respond With Success Or Failure of Sent\n if success:\n status_code = status.HTTP_200_OK\n response_detail = self.success_response\n else:\n status_code = status.HTTP_400_BAD_REQUEST\n response_detail = self.failure_response\n return Response({'detail': response_detail}, status=status_code)\n else:\n return Response(serializer.error_messages, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ObtainEmailCallbackToken(AbstractBaseObtainCallbackToken):\n permission_classes = (AllowAny,)\n serializer_class = EmailAuthSerializer\n success_response = \"A login token has been sent to your email.\"\n failure_response = \"Unable to email you a login code. Try again later.\"\n\n alias_type = 'email'\n token_type = CallbackToken.TOKEN_TYPE_AUTH\n\n email_subject = api_settings.PASSWORDLESS_EMAIL_SUBJECT\n email_plaintext = api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE\n email_html = api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME\n message_payload = {'email_subject': email_subject,\n 'email_plaintext': email_plaintext,\n 'email_html': email_html}\n\n\nclass ObtainMobileCallbackToken(AbstractBaseObtainCallbackToken):\n permission_classes = (AllowAny,)\n serializer_class = MobileAuthSerializer\n success_response = \"We texted you a login code.\"\n failure_response = \"Unable to send you a login code. Try again later.\"\n\n alias_type = 'mobile'\n token_type = CallbackToken.TOKEN_TYPE_AUTH\n\n mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE\n message_payload = {'mobile_message': mobile_message}\n\n\nclass ObtainEmailVerificationCallbackToken(AbstractBaseObtainCallbackToken):\n permission_classes = (IsAuthenticated,)\n serializer_class = EmailVerificationSerializer\n success_response = \"A verification token has been sent to your email.\"\n failure_response = \"Unable to email you a verification code. Try again later.\"\n\n alias_type = 'email'\n token_type = CallbackToken.TOKEN_TYPE_VERIFY\n\n email_subject = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT\n email_plaintext = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE\n email_html = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME\n message_payload = {\n 'email_subject': email_subject,\n 'email_plaintext': email_plaintext,\n 'email_html': email_html\n }\n\n\nclass ObtainMobileVerificationCallbackToken(AbstractBaseObtainCallbackToken):\n permission_classes = (IsAuthenticated,)\n serializer_class = MobileVerificationSerializer\n success_response = \"We texted you a verification code.\"\n failure_response = \"Unable to send you a verification code. Try again later.\"\n\n alias_type = 'mobile'\n token_type = CallbackToken.TOKEN_TYPE_VERIFY\n\n mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE\n message_payload = {'mobile_message': mobile_message}\n\n\nclass AbstractBaseObtainAuthToken(APIView):\n \"\"\"\n This is a duplicate of rest_framework's own ObtainAuthToken method.\n Instead, this returns an Auth Token based on our 6 digit callback token and source.\n \"\"\"\n serializer_class = None\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid(raise_exception=True):\n user = serializer.validated_data['user']\n token_creator = import_string(api_settings.PASSWORDLESS_AUTH_TOKEN_CREATOR)\n (token, _) = token_creator(user)\n\n if token:\n TokenSerializer = import_string(api_settings.PASSWORDLESS_AUTH_TOKEN_SERIALIZER)\n token_serializer = TokenSerializer(data=token.__dict__, partial=True)\n if token_serializer.is_valid():\n # Return our key for consumption.\n return Response(token_serializer.data, status=status.HTTP_200_OK)\n else:\n logger.error(\"Couldn't log in unknown user. Errors on serializer: {}\".format(serializer.error_messages))\n return Response({'detail': 'Couldn\\'t log you in. Try again later.'}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ObtainAuthTokenFromCallbackToken(AbstractBaseObtainAuthToken):\n \"\"\"\n This is a duplicate of rest_framework's own ObtainAuthToken method.\n Instead, this returns an Auth Token based on our callback token and source.\n \"\"\"\n permission_classes = (AllowAny,)\n serializer_class = CallbackTokenAuthSerializer\n\n\nclass VerifyAliasFromCallbackToken(APIView):\n \"\"\"\n This verifies an alias on correct callback token entry using the same logic as auth.\n Should be refactored at some point.\n \"\"\"\n serializer_class = CallbackTokenVerificationSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data, context={'user_id': self.request.user.id})\n if serializer.is_valid(raise_exception=True):\n return Response({'detail': 'Alias verified.'}, status=status.HTTP_200_OK)\n else:\n logger.error(\"Couldn't verify unknown user. Errors on serializer: {}\".format(serializer.error_messages))\n\n return Response({'detail': 'We couldn\\'t verify this alias. Try again later.'}, status.HTTP_400_BAD_REQUEST)\n" }, { "alpha_fraction": 0.5269461274147034, "alphanum_fraction": 0.5928143858909607, "avg_line_length": 25.36842155456543, "blob_id": "45c90cf1fc2e6869723fb00206100b42cde389a4", "content_id": "2160ae2ece198a58ce8554be709f970c5bb85b6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "permissive", "max_line_length": 118, "num_lines": 19, "path": "/drfpasswordless/migrations/0003_callbacktoken_type.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.2 on 2020-01-22 08:34\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('drfpasswordless', '0002_auto_20200122_0424'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='callbacktoken',\n name='type',\n field=models.CharField(choices=[('AUTH', 'Auth'), ('VERIFY', 'Verify')], default='VERIFY', max_length=20),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7337278127670288, "alphanum_fraction": 0.7349605560302734, "avg_line_length": 39.158416748046875, "blob_id": "641bdc4af85ce4178cf7428ad064aeedc39a6add", "content_id": "5b93197fbdd5582878d5207163a979d18a26a3c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4056, "license_type": "permissive", "max_line_length": 117, "num_lines": 101, "path": "/drfpasswordless/settings.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom rest_framework.settings import APISettings\n\nUSER_SETTINGS = getattr(settings, 'PASSWORDLESS_AUTH', None)\n\nDEFAULTS = {\n\n # Allowed auth types, can be EMAIL, MOBILE, or both.\n 'PASSWORDLESS_AUTH_TYPES': ['EMAIL'],\n\n # URL Prefix for Authentication Endpoints\n 'PASSWORDLESS_AUTH_PREFIX': 'auth/',\n\n # URL Prefix for Verification Endpoints\n 'PASSWORDLESS_VERIFY_PREFIX': 'auth/verify/',\n\n # Amount of time that tokens last, in seconds\n 'PASSWORDLESS_TOKEN_EXPIRE_TIME': 15 * 60,\n\n # The user's email field name\n 'PASSWORDLESS_USER_EMAIL_FIELD_NAME': 'email',\n\n # The user's mobile field name\n 'PASSWORDLESS_USER_MOBILE_FIELD_NAME': 'mobile',\n\n # Marks itself as verified the first time a user completes auth via token.\n # Automatically unmarks itself if email is changed.\n 'PASSWORDLESS_USER_MARK_EMAIL_VERIFIED': False,\n 'PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME': 'email_verified',\n\n # Marks itself as verified the first time a user completes auth via token.\n # Automatically unmarks itself if mobile number is changed.\n 'PASSWORDLESS_USER_MARK_MOBILE_VERIFIED': False,\n 'PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME': 'mobile_verified',\n\n # The email the callback token is sent from\n 'PASSWORDLESS_EMAIL_NOREPLY_ADDRESS': None,\n\n # The email subject\n 'PASSWORDLESS_EMAIL_SUBJECT': \"Your Login Token\",\n\n # A plaintext email message overridden by the html message. Takes one string.\n 'PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE': \"Enter this token to sign in: %s\",\n\n # The email template name.\n 'PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME': \"passwordless_default_token_email.html\",\n\n # Your twilio number that sends the callback tokens.\n 'PASSWORDLESS_MOBILE_NOREPLY_NUMBER': None,\n\n # The message sent to mobile users logging in. Takes one string.\n 'PASSWORDLESS_MOBILE_MESSAGE': \"Use this code to log in: %s\",\n\n # Registers previously unseen aliases as new users.\n 'PASSWORDLESS_REGISTER_NEW_USERS': True,\n\n # Suppresses actual SMS for testing\n 'PASSWORDLESS_TEST_SUPPRESSION': False,\n\n # Context Processors for Email Template\n 'PASSWORDLESS_CONTEXT_PROCESSORS': [],\n\n # The verification email subject\n 'PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT': \"Your Verification Token\",\n\n # A plaintext verification email message overridden by the html message. Takes one string.\n 'PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE': \"Enter this verification code: %s\",\n\n # The verification email template name.\n 'PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME': \"passwordless_default_verification_token_email.html\",\n\n # The message sent to mobile users logging in. Takes one string.\n 'PASSWORDLESS_MOBILE_VERIFICATION_MESSAGE': \"Enter this verification code: %s\",\n\n # Automatically send verification email or sms when a user changes their alias.\n 'PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN': False,\n\n # What function is called to construct an authentication tokens when\n # exchanging a passwordless token for a real user auth token.\n 'PASSWORDLESS_AUTH_TOKEN_CREATOR': 'drfpasswordless.utils.create_authentication_token',\n\n # What function is called to construct a serializer for drf tokens when\n # exchanging a passwordless token for a real user auth token.\n 'PASSWORDLESS_AUTH_TOKEN_SERIALIZER': 'drfpasswordless.serializers.TokenResponseSerializer',\n\n # A dictionary of demo user's primary key mapped to their static pin\n 'PASSWORDLESS_DEMO_USERS': {},\n 'PASSWORDLESS_EMAIL_CALLBACK': 'drfpasswordless.utils.send_email_with_callback_token',\n 'PASSWORDLESS_SMS_CALLBACK': 'drfpasswordless.utils.send_sms_with_callback_token',\n\n # Token Generation Retry Count\n 'PASSWORDLESS_TOKEN_GENERATION_ATTEMPTS': 3\n}\n\n# List of settings that may be in string import notation.\nIMPORT_STRINGS = (\n 'PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE',\n 'PASSWORDLESS_CONTEXT_PROCESSORS',\n)\n\napi_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)\n" }, { "alpha_fraction": 0.694337785243988, "alphanum_fraction": 0.7005758285522461, "avg_line_length": 30.104476928710938, "blob_id": "e1eb6c52803c1e7ea910271afca203ad38f764fb", "content_id": "d09f3fcf0a2187ff103061407af6b9cf93cd8b6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2084, "license_type": "permissive", "max_line_length": 99, "num_lines": 67, "path": "/drfpasswordless/models.py", "repo_name": "aaronn/django-rest-framework-passwordless", "src_encoding": "UTF-8", "text": "import uuid\nfrom django.db import models\nfrom django.conf import settings\nimport string\nfrom django.utils.crypto import get_random_string\n\ndef generate_hex_token():\n return uuid.uuid1().hex\n\n\ndef generate_numeric_token():\n \"\"\"\n Generate a random 6 digit string of numbers.\n We use this formatting to allow leading 0s.\n \"\"\"\n return get_random_string(length=6, allowed_chars=string.digits)\n\n\nclass CallbackTokenManger(models.Manager):\n def active(self):\n return self.get_queryset().filter(is_active=True)\n\n def inactive(self):\n return self.get_queryset().filter(is_active=False)\n\n\nclass AbstractBaseCallbackToken(models.Model):\n \"\"\"\n Callback Authentication Tokens\n These tokens present a client with their authorization token\n on successful exchange of a random token (email) or token (for mobile)\n\n When a new token is created, older ones of the same type are invalidated\n via the pre_save signal in signals.py.\n \"\"\"\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=None, on_delete=models.CASCADE)\n is_active = models.BooleanField(default=True)\n to_alias = models.CharField(blank=True, max_length=254)\n to_alias_type = models.CharField(blank=True, max_length=20)\n\n objects = CallbackTokenManger()\n\n class Meta:\n abstract = True\n get_latest_by = 'created_at'\n ordering = ['-id']\n\n def __str__(self):\n return str(self.key)\n\n\nclass CallbackToken(AbstractBaseCallbackToken):\n \"\"\"\n Generates a random six digit number to be returned.\n \"\"\"\n TOKEN_TYPE_AUTH = 'AUTH'\n TOKEN_TYPE_VERIFY = 'VERIFY'\n TOKEN_TYPES = ((TOKEN_TYPE_AUTH, 'Auth'), (TOKEN_TYPE_VERIFY, 'Verify'))\n\n key = models.CharField(default=generate_numeric_token, max_length=6)\n type = models.CharField(max_length=20, choices=TOKEN_TYPES)\n\n class Meta(AbstractBaseCallbackToken.Meta):\n verbose_name = 'Callback Token'\n" } ]
20
ivyana/PR
https://github.com/ivyana/PR
a2a95bf6825fcd40302624344ea6f91cdc6d61c6
d5debfb49cb3374b0fac3e75f46ba8e38e58a7a9
b6edcde35f39a0a78123f8028a7e460a500c5799
refs/heads/main
2023-01-24T00:40:43.901746
2020-12-03T11:58:34
2020-12-03T11:58:34
304,326,405
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5983253121376038, "alphanum_fraction": 0.6036539077758789, "avg_line_length": 61.564517974853516, "blob_id": "0d1246fc06c8fe4d5fd70c9da91c32f0ae224160", "content_id": "abe355c40893039ac33a54b596d4d3047343553b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3941, "license_type": "no_license", "max_line_length": 114, "num_lines": 62, "path": "/lab2/rdt_receive.py", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "import utils\r\n\r\n\r\n'''This function is the the Server's RDT(Reliable Data Transfer) function to receive the file.'''\r\n\r\n\r\ndef rdt_receive_packet(sock, buffer_size, rx_seq_number, e_prob=0, p_drop=0, loop=0, window_size=0):\r\n receive_successful = 0 # Receive_successful is set to '0' initially\r\n\r\n while not receive_successful: # Loop goes on until condition becomes 'false'\r\n\r\n data, address = sock.recvfrom(buffer_size) # Packet is received from client\r\n\r\n # If Data_bit_error is True, it starts to Drop packets intentionally by coming out of while-loop\r\n # The received packets are not utilised/used. Also, packet is not dropped for the last window\r\n if (utils.is_error_condition(p_drop)) and (rx_seq_number < loop - window_size):\r\n print(\"############################ DATA PACKET DROPPED ################################\\n\")\r\n receive_successful = 0 # Comes out of current loop and starts again since condition will be while(1).\r\n\r\n else:\r\n # If data_bit_error is False,then it refers to No-packet dropping\r\n # It goes to else loop and utilises the received packet\r\n\r\n # Extracts the sequence number, checksum value, data from a packet\r\n seq_num, checksum, img_packet = utils.extract_data(data)\r\n\r\n # If data_bit_error is True, it starts to corrupt packet intentionally\r\n # Also, ack packet is not corrupted for the last window\r\n if (utils.is_error_condition(e_prob)) and (rx_seq_number < loop - window_size):\r\n img_packet = utils.corrupt_data(img_packet) # Function to corrupt data\r\n print(\"############################ DATA CORRUPTED ################################\\n\")\r\n\r\n rx_checksum = utils.calc_checksum(img_packet) # Receiver Checksum in integer\r\n\r\n # If packet is not corrupted and has expected sequence number,\r\n # sends Acknowledgement with sequence number *updates sequence number for next loop\r\n if ((rx_checksum == checksum) and (\r\n seq_num == rx_seq_number)):\r\n ack = rx_seq_number # Sends sequence number as ACK\r\n # Converting (ack) from int to string and then encoding to bytes\r\n ack = b'ACK' + str(ack).encode(\"UTF-8\")\r\n # Server sends ack with expected seq_number (Next Sequence Number), checksum, ack\r\n sender_ack = utils.make_packet(seq_num + 1, utils.calc_checksum(ack), ack)\r\n print(\"Sequence Number: {0}, Receiver Sequence Number: {1}, Checksum from Client: {2}, \"\r\n \"Checksum for Received File: {3}\\n\".format(seq_num, rx_seq_number, checksum, rx_checksum))\r\n rx_seq_number = 1 + seq_num # Update sequence number to the next expected seq_number\r\n receive_successful = 1 # Comes out of while loop\r\n\r\n # If packet is corrupted or has unexpected sequence number,\r\n # sends Acknowledgement with previous Acknowledged sequence number\r\n # Requests client to resend the data\r\n elif (rx_checksum != checksum) or (seq_num != rx_seq_number):\r\n ack = rx_seq_number - 1 # last acknowledged sequence number\r\n # Converting (ack) from int to string and then encoding to bytes\r\n ack = b'ACK' + str(ack).encode(\"UTF-8\")\r\n # Server sends ack with Seq_num, checksum, ack\r\n sender_ack = utils.make_packet(rx_seq_number, utils.calc_checksum(ack), ack)\r\n # print(\"Sequence Number: {0},Receiver_sequence: {1}\\n\".format(seq_num,Rx_seq_num))\r\n receive_successful = 0 # Loop continues until satisfies condition\r\n sock.sendto(sender_ack, address) # sending the Acknowledgement packet to the client\r\n\r\n return img_packet, address, rx_seq_number # Returns data,address,updated sequence number\r\n" }, { "alpha_fraction": 0.6525054574012756, "alphanum_fraction": 0.6745642423629761, "avg_line_length": 39.35165023803711, "blob_id": "b102efe756c4f203a52474467aba05590dd594c0", "content_id": "9667de3309109cdc4e7ea92a140a0fcaba03f176", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3672, "license_type": "no_license", "max_line_length": 120, "num_lines": 91, "path": "/lab2/utils.py", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "import math\nimport random\nimport struct\n\n\n'''Finds the file size with the help of seek function'''\n\n\ndef calc_file_size(file):\n file.seek(0, 2) # Moves the file pointer to the EOF\n file_size = file.tell() # Gets file size\n file.seek(0, 0) # Moves the file pointer the the beginning of the file\n return file_size # Returns the file size in integer\n\n\n'''Finds how many loops the program has to run to transfer the file'''\n\n\ndef calc_loop_times(file_size, buffer_size):\n # File size is divided by buffer_size - 3 to find the loop_times, because 3 bytes will be the headers for the packet\n loop_times = (file_size / (buffer_size - 3))\n loop = math.ceil(loop_times) # Changing loop_times to next integer\n return loop # Returns the loop value in integer\n\n\n'''Updates the sequence number'''\n\n\ndef update_seq_number(seq_number):\n return 1 - seq_number # Returns 1-seq_number in integer\n\n\n'''Finds the Checksum for the data'''\n\n\ndef calc_checksum(data):\n checksum_addition = 0 # Initial checksum value is zero\n for i in range(0, len(data), 2): # Loop starts from 0 to len(data)-1, iterated +2 times.\n first_2bits = data[i: (i + 2)] # taking 16 bits (2 bytes) value from 1024 bytes\n if len(first_2bits) == 1:\n two_byte_integer = struct.unpack(\"!B\", first_2bits)[\n 0] # If len(data)=1 it has to be unpacked with standard size 1\n elif len(first_2bits) == 2:\n two_byte_integer = struct.unpack(\"!H\", first_2bits)[\n 0] # If len(data)=2 it has to be unpacked with standard size 2\n checksum_addition = checksum_addition + two_byte_integer # Checksum addition\n while (checksum_addition >> 16) == 1: # Loop goes on until condition becomes 'false'\n checksum_addition = (checksum_addition & 0xffff) + (checksum_addition >> 16) # Wrap up function\n return checksum_addition # Returns checksum for the data in integer\n\n\n'''Finds if the bit_error has to happen or not'''\n\n\ndef is_error_condition(e_prob=0):\n data_bit_error = False # data_bit_error has been initialised as 'False'\n random_number = random.random() # This generates a random probability value (0.00 to 1.00)\n # Convert percentage(e_prob) to probability [(0 to 100) into (0.00 to 1.00)] in order to compare with random_number\n if random_number < (e_prob / 100):\n data_bit_error = True # If condition is 'True' it corrupts data\n return data_bit_error # Returns data_bit_error as 'True' or 'False'\n\n\n'''Corrupts the data'''\n\n\ndef corrupt_data(data):\n # Replacing the first two bytes of data with alphabet character 'X' in order to corrupt, returns in byte\n return b'XX' + data[2:]\n\n\n'''Extracts data (sequence number, checksum, data) from packet'''\n\n\ndef extract_data(packet): # Extracts the packet\n # Find the length of the data, (length of the sequence number (2byte) and checksum(2bytes) are fixed)\n data_len = len(packet) - struct.calcsize('HH')\n # This is the packet format. example if data length is 1020 bytes then it should be \"!HH1020s\"\n packet_format = \"!HH\" + str(data_len) + \"s\"\n return struct.unpack(packet_format, packet) # Returns the unpacked values of packet.\n\n\n'''Makes packets (sequence number + checksum + data -> together forms a packet)'''\n\n\ndef make_packet(seq_numbers, checksums, data):\n # This is the packet format. example if data length is 1020 bytes then it should be \"!HH1021s\"\n packet_format = \"!HH\" + str(len(data)) + \"s\"\n # Packs sequence number, checksum, data and forms a packet\n packet = struct.pack(packet_format, seq_numbers, checksums, data)\n return packet # Returns packet in bytes\n" }, { "alpha_fraction": 0.5836487412452698, "alphanum_fraction": 0.5889477729797363, "avg_line_length": 58.04545593261719, "blob_id": "e888e6cd344392b2ad18b7c6aaae331e194b36c7", "content_id": "270ab0d7ff39380bd4bb6bc12d32e414abfa40ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5284, "license_type": "no_license", "max_line_length": 120, "num_lines": 88, "path": "/lab2/rdt_send.py", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "from globals import *\r\nimport utils\r\nimport time\r\nimport socket\r\n\r\n'''This function is the client's RDT(Reliable Data Transfer) function to send the file.'''\r\n\r\n\r\ndef rdt_send_packet(f, udp_sock, address, seq_number, data, e_prob=0, p_drop=0, window_size=0, base=0, loop=0,\r\n image_buffer=None, time_buffer=None):\r\n if image_buffer is None:\r\n image_buffer = []\r\n if time_buffer is None:\r\n time_buffer = []\r\n if seq_number < (base + window_size): # Check for empty slots in the windows\r\n while (seq_number < base + window_size) and (seq_number <= loop): # Condition for GBN protocol (Sliding window)\r\n if seq_number > 0: # Initially file size is sent through sequence number 0\r\n data = f.read(buffer_size - 4)\r\n packet = utils.make_packet(seq_number, utils.calc_checksum(data),\r\n data) # Packet is created with the sequence number, checksum, data\r\n # Buffer size of window size is created and data is added to the buffer\r\n image_buffer[seq_number % window_size] = packet\r\n udp_sock.sendto(packet, address) # Sends the data\r\n print(\"Packet Number_Sliding Window: \", seq_number)\r\n time_buffer[seq_number % window_size] = time.time() # Time buffer stores the start time for each packet\r\n seq_number += 1 # Sequence number is updated by 1\r\n print(\"Start timer...\")\r\n try: # This is used for timer -> If timed-out, it comes out of try loop and goes to exception\r\n # UDP Socket timer is added here\r\n # In this case 30 milliseconds is set as timer\r\n # If timed-out before operation, it goes to the timer exception\r\n udp_sock.settimeout(0.03)\r\n ack_packet, address = udp_sock.recvfrom(buffer_size) # Client receiving the acknowledgement packet\r\n # It is equivalent to sock.setblocking(0).\r\n # Timer is activated only for receive function which takes care of entire operation according to the FSM\r\n udp_sock.settimeout(None)\r\n\r\n # If Data_bit_error is true, it starts to drop packets intentionally\r\n # The received packets are not utilised/used. Also, ack packet is not dropped for the last window\r\n if (utils.is_error_condition(p_drop)) and (seq_number < loop - window_size):\r\n # As per the FSM, we need to time-out\r\n # Here we are using while loop\r\n # If current-time is less than the timer-time, it runs infinite loop with no operations\r\n # After timer-time, condition fails and loop comes out\r\n while time.time() < (time_buffer[base % window_size] + 0.03):\r\n pass\r\n print(\"############################ ACK PACKET DROPPED ################################\\n\")\r\n # Raise OSError\r\n else:\r\n # If data_bit_error is False, then it refers to No-packet dropping\r\n # It goes to else loop and utilises the received packet\r\n\r\n # Extracts the sequence number, checksum value, data from a packet\r\n packet_seq_number, sender_checksum, ack_data = utils.extract_data(ack_packet)\r\n\r\n # If data_bit_error is True, it starts to corrupt data intentionally\r\n # Also last window packets are not corrupted\r\n if (utils.is_error_condition(e_prob)) and (seq_number < loop - window_size):\r\n ack_data = utils.corrupt_data(ack_data) # Function to corrupt data\r\n print(\"############################ ACK CORRUPTED ################################\")\r\n\r\n ack_checksum = utils.calc_checksum(ack_data) # Finds the checksum for received acknowledgement\r\n ack_data = ack_data.decode(\"UTF-8\") # Decodes from byte to integer for the comparison\r\n # Gets the integer value alone from the ACK\r\n # For example, if string 'ACK500' is the input then the output will be integer of 500\r\n ack_data_int = int(ack_data[3:len(ack_data)])\r\n print(\"ACK from Server: \", ack_data_int)\r\n\r\n '''Comparing Acknowledgement'''\r\n # If packet is not corrupted and has expected sequence number\r\n if (ack_data_int >= base) and (ack_checksum == sender_checksum):\r\n base = ack_data_int + 1 # Base value is the next value to the ack value\r\n print(\"ACK is OKAY: \", ack_data)\r\n print(\"Updated Base: \", base)\r\n print(\"Stop timer...\\n\")\r\n\r\n elif ack_checksum != sender_checksum: # If packet is corrupted, it resends the packet\r\n print(\"ACK is NOT OKAY:{} \\n\".format(ack_data)) # Do Nothing\r\n\r\n except (socket.timeout, OSError):\r\n print(\"############################ SOCKET TIMED OUT ################################\")\r\n print(\"Base: \", base)\r\n for i in range(base, seq_number): # Resends the entire packet\r\n time_buffer[i % window_size] = time.time() # Restarting the timer, updating start time for the packet\r\n udp_sock.sendto(image_buffer[i % window_size], address) # Sending the data\r\n print(\"Sending the packet: \", i)\r\n print(\"\\n\")\r\n return seq_number, base # Returns updated sequence number, base value\r\n" }, { "alpha_fraction": 0.7267497777938843, "alphanum_fraction": 0.7478427886962891, "avg_line_length": 33.766666412353516, "blob_id": "0a32bc7b54c416ba13e2c541950939223ddae09a", "content_id": "c9aeac6a098f80f1ac7e0d92caca485064912f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 145, "num_lines": 30, "path": "/lab1/README.md", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "## Network Programming - Lab1 \n\n### Implementation:\n\n1. Pull a docker container (alexburlacu/pr-server) from the registry and run it (by forwarding the port 5000 to a port on local machine);\n2. Access the root route of the server and find the way to /register;\n3. Get the access token from /register;\n4. Put the access token in an HTTP header of subsequent requests under the X-Access-Token key;\n4. Extract data from data key and get next links from link key;\n5. Use only one token(register) per program run;\n6. Convert the fetched data to a common representation(json in my case);\n10. Make a concurrent TCP server, serving the fetched content, that will respond to (mandatory) a \ncolumn selector message, like `SelectColumn column_name`, and (optional) `SelectFromColumn column_name glob_pattern` (only SelectColumn for now);\n\n### Instructions:\n**Get docker container:**\n```\ndocker pull alexburlacu/pr-server\ndocker run -p5000:5000 alexburlacu/pr-server\n```\n\n**Run server:**\n```\npython server.py\n```\n\n**Run client:**\n```\npython client.py\n```\n" }, { "alpha_fraction": 0.6907692551612854, "alphanum_fraction": 0.7015384435653687, "avg_line_length": 53.16666793823242, "blob_id": "4d9026adf03fbeafd50b88640ca716447dc340bf", "content_id": "954cd04a6cac7ccb928e5f2d1a7988cf954d9453", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1950, "license_type": "no_license", "max_line_length": 119, "num_lines": 36, "path": "/lab2/client.py", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "from globals import * # Common variables\nimport utils # Functions like checksum, file size, bit error, etc.\nimport rdt_send # Reliable Data Transfer Send function\nimport socket\nimport struct\n\n'''To corrupt data packet, Set value from 0 - 99 in e_prob'''\ne_prob = 0 # e_prob is the error probability and can be set from 0-99\np_drop = 0 # p_drop is the packet dropping probability and can be set from 0-99\n\ntime_buffer = [None] * window_size # Time_Buffer stores the start time for the packets\nprint(\"Window size: \", window_size) # Prints the window size\nimage_buffer = [None] * window_size # Stores the data in the buffer\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Socket with IPV4, UDP\nf = open('sent_image.jpg', 'rb') # Opening the file which will be transferred to the server\n\nfile_size = utils.calc_file_size(f) # File size is calculated\n\nloop = utils.calc_loop_times(file_size, buffer_size) # Finding the loop value\nloop_bytes = struct.pack(\"!I\", loop) # Change loop from integer to byte inorder to send data from client to server\nprint(\"File has been Extracted \\nFile size: {0} \\nNo. of Loops to send the entire file: {1}\".format(file_size, loop))\nseq_number = 0 # Sequence Number is set to 0 initially\nbase = 0 # Here base is set to 0\nprint('Client file transfer starts...')\n\nwhile base <= loop: # Loop runs until sequence number is equal to loop value. Sequence number starts from 1.\n # calls the function rdt_send to send the packet\n seq_number, base = rdt_send.rdt_send_packet(f, sock, addr, seq_number, loop_bytes, e_prob, p_drop,\n window_size, base, loop, image_buffer, time_buffer)\n\nf.close() # File closed\nsock.close() # Socket Closed\n\nend = time.time() # Gets the End time\nelapsed_time = end - start # Gets the elapsed time\nprint(\"Client: File Sent\\nFile size sent to server: {0}\\nTime taken in Seconds:{1}s\\n\".format(file_size, elapsed_time))\n" }, { "alpha_fraction": 0.7581900954246521, "alphanum_fraction": 0.7656503319740295, "avg_line_length": 57.150943756103516, "blob_id": "56d2eac242f69b7e9755fe10476e202a5ff10962", "content_id": "f51bce18c451490de179b9b65f6013ba828ce3ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6167, "license_type": "no_license", "max_line_length": 266, "num_lines": 106, "path": "/lab2/README.md", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "## Network Programming - Lab2\n\n## Tasks:\n1. Implement a protocol atop UDP, with error checking and retransmissions. \nLimit the number of retries for retransmission.\n\n2. Make the connection secure, using either a CA to get the public key of the receiver and encrypt data with it, \nor using Diffie-Helman to get a shared connection key between client and server, ensure that the traffic is encrypted.\n\n3. Regarding the application-level protocol, there are 3 options:\n - make an FTP-like protocol for data transfer, thus you will need to ensure data splitting and in-order delivery and reassembly at the destination. The protocol must support URIs, file creation and update (PUT), file fetching (GET) and metadata retrieval (OPTIONS)\n - make a protocol based on the workings (state machine) of an ATM\n - make a protocol based on the workings (state machine) of a stationary telephone \n\n### Implementation:\n\nMy project has a server-client architecture which runs on a protocol atop UDP with features of TCP. \n\nThe TCP like attributes are the following:\n- **checksums** (values used to verify the integrity of a file or a data transfer \nwhich are typically used to compare two sets of data to make sure they are the same)\n- **sequence numbers** (they are used to coordinate which data has been transmitted \nand received and they will help to arrange for retransmission if data has been lost)\n- **timer** (they are here to ensure that excessive delays are not encountered during communication)\n\nBonus: *Go-back-N protocol* implementation (this protocol uses a sliding window method for reliable and sequential delivery of \ndata frames; it provides for sending multiple frames before receiving the acknowledgment for the first frame; this fact \nmeans that is a more efficient to use such method because during the time that would otherwise be spent waiting, more packets \nare being sent)\n\n**Using all from above, I have focused on creating something like a FTP protocol for data transfer which ensures data \nsplitting, in-order delivery and reassembly at the destination.**\n\n### Implementation Details:\n\nHere I will explain every module that I have in my project.\n*If more info is needed, see the multiple comments in every particular module.*\n\nSo, I will start with the smallest ones: *1. globals.py* and *2. utils.py*\n\n1. In *globals.py* module, I have IP address(localhost is the IP address my machine), port number(I used 5005 porn number), \nbuffer_size(is set to 1024 -> packet size is 1024 with sequence number 1 byte, checksum 2 bytes, data 1021 bytes) \nwhich will be used later. Also, here I set the window size for the Go-Back-N protocol.\n\n2. The module *utils.py* is used to store different useful functions like: calc_file_size (*Finds the file size with the \nhelp of seek function*), calc_loop_times (*Finds how many loops the program has to run to transfer the file*), \nupdate_seq_number (*Updates the sequence number*), calc_checksum (*Finds the Checksum for the data*), \nis_error_condition (*Finds if the bit_error has to happen or not*), corrupt_data (*Corrupts the data*), \nextract_data (*Extracts data (sequence number, checksum, data) from packet*), \nmake_packet (*Makes packets (sequence number + checksum + data -> together forms a packet)*).\n\nNext are *3. rdt_send.py* and *4. rdt_receive.py* which are Reliable Data Transfer functions:\n\n3. Here, Packet is created with the sequence number, checksum, data + Buffer size of window size is created and \ndata is added to the buffer. Also, here is the implementation of timers and sequence numbers for\nevery packet. For testing purposes, here I have used functions which drop packets or corrupt data intentionally(the\nprobability can be set in *client.py*).\n\n4. Here is checked if the data was received successfully. The function from this module extracts the sequence number, \nchecksum value, data from a packet. If packet is not corrupted and has expected sequence number, sends Acknowledgement \nwith sequence number (*updates sequence number for next loop). If packet is corrupted or has unexpected sequence number,\nsends Acknowledgement with previous Acknowledged sequence number and requests client to resend the data. (Here, we can\nalso introduce error or drop packets probability from *server.py*).\n\nLast, but not the least, I have *5. client.py* and *6. server.py* (*In this 2 modules you can choose the probability of \ngetting errors. This is done for testing purposes.*):\n\n5. In client we work with time_buffer, image_buffer, opening the udp sockets and the file which will be transferred \nto the server. Also, here is calculated the size of image and how many loops we will need to send it. A loop runs until \nsequence number is equal to loop value. After that, is called the function rdt_send to send the packet. When the sending \nprocess is done, File and Socket are closed and we get the end time and the elapsed time for the client.\n\n6. The server will bind the socket and will start to wait for clients. It will receive the size of file from client and \nwill create a new file to store the received data. The server will call the function rdt_receive_packet to receive \npackets from client and will write the data to mentioned new file. As in client, in the end, New File and Socket will be\nclosed and we will have the end time and the elapsed time for the server.\n\n(*Note: For now, I didn't implement the security part.*)\n\n### Instructions:\n**Choose an image, change its name to *sent_image.jpg* and put it in the same folder as the project.**\n\n**Run server:**\n```\npython server.py\n```\n\n**Run client:**\n```\npython client.py\n```\n\n**Voilà, the image has been sent from client to server:)**\n\n### Output Examples:\n\n**Server without errors:**\n![alt text](https://github.com/ivyana/PR/blob/main/lab2/output/1.PNG)\n![alt text](https://github.com/ivyana/PR/blob/main/lab2/output/2.PNG)\n\n**Client without errors:**\n![alt text](https://github.com/ivyana/PR/blob/main/lab2/output/3.PNG)\n![alt text](https://github.com/ivyana/PR/blob/main/lab2/output/4.PNG)\n\n**Here was introduced error and dropping packets probability:**\n![alt text](https://github.com/ivyana/PR/blob/main/lab2/output/5.PNG)\n\n\n" }, { "alpha_fraction": 0.7134292721748352, "alphanum_fraction": 0.7470024228096008, "avg_line_length": 45.33333206176758, "blob_id": "dee0e00337343f34cc418272d8d55a3ff02d1487", "content_id": "01b0dbcb3877ad5071eb2ecf90548125300c7c62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 119, "num_lines": 18, "path": "/lab2/globals.py", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "import time\n\n# Find the start time of the program, elapsed time can be found by end time - start time\nstart = time.time()\n\n'''Importing IP address, port number, buffer_size'''\nUDP_IP = \"localhost\" # Localhost is the IP address of machine\nUDP_PORT = 5005 # Port Number is assigned to 5005\n# buffer_size is set to 1024 -> packet size is 1024 with sequence number 1 byte, checksum 2 bytes, data 1021 bytes\nbuffer_size = 1024\naddr = (UDP_IP, UDP_PORT)\n\n'''For the GBN(Go back N Protocol) sliding window, setting the window_size value'''\n# Set the window size for the Go-Back-N protocol.\n# This window_size is only for the client program for the sliding window.\n# Server side window_size is always 1.\n# Client window_size also included in Server program ONLY to avoid intentional packet corrupt/drop for the last window.\nwindow_size = 5\n" }, { "alpha_fraction": 0.6714497804641724, "alphanum_fraction": 0.6793666481971741, "avg_line_length": 50.82051467895508, "blob_id": "2bcd76e0080795a4d04cbacb09b2ac5cfc907747", "content_id": "5f0db9945b7adbb7cd29228a0460da65111a9487", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2021, "license_type": "no_license", "max_line_length": 115, "num_lines": 39, "path": "/lab2/server.py", "repo_name": "ivyana/PR", "src_encoding": "UTF-8", "text": "from globals import * # Common variables\nimport rdt_receive # Reliable Data Transfer Receive function\nimport utils # Functions like checksum, filesize, bit error, etc.\nimport socket\nimport struct\n\n'''To corrupt ack packet, set value from 0 - 99 in e_prob'''\ne_prob = 0 # e_prob is the error probability and can be set from 0-99\np_drop = 0 # p_drop is the packet dropping probability and can be set from 0-99\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # Socket with IPV4, UDP\nsock.bind(addr) # Binding the socket\nprint(\"Server started...\\nWaiting for clients...\")\n\np = open('received_image.jpg', 'wb') # Opening a new file to copy the transferred image\n\nreceiver_sequence = 0 # Server side sequence number is initialised to zero\n# Receiving the file size from client\nloopTimes, address, receiver_sequence = rdt_receive.rdt_receive_packet(sock, buffer_size, receiver_sequence)\nloop = struct.unpack(\"!I\", loopTimes)[0] # Changing loop from byte to integer\nprint(\"Number of loops to send the entire file: \", loop)\nprint(\"Writing/Receiving process starting soon...\\n\") # Receiving file from Client\n\nwhile receiver_sequence <= loop:\n # Calls the function rdt_receive_packet to receive the packet\n image_packet, address, receiver_sequence = rdt_receive.rdt_receive_packet(sock, buffer_size, receiver_sequence,\n e_prob, p_drop, loop, window_size)\n p.write(image_packet) # Writes/Stores the received data to a file\n\n# File Received from Client at the end of Loop\nReceived_File_Size = utils.calc_file_size(p) # Calculating Received Image file size\n\np.close() # Closing the file\nsock.close() # Closing the socket\n\nend = time.time() # Finding the end-time\nElapsed_time = end - start # Elapsed time\nprint(\"Server: File Received\\nReceived File size: {0}\\nTime taken in Seconds: {1}s\".format(Received_File_Size,\n Elapsed_time))\n" } ]
8
zxw4332/Content-based-movie-recommendation-system
https://github.com/zxw4332/Content-based-movie-recommendation-system
2bdd463e4bfa6f577be59cd349a9e9c018ed0ca9
a82ade22373bf03ffba2319af872b4b92313bb31
f82bfa3a7b7bb3f02da949dd2945cb4513a365a2
refs/heads/main
2023-02-14T14:30:01.688890
2020-12-19T05:01:08
2020-12-19T05:01:08
327,728,266
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 27, "blob_id": "5236d7772c9afed0fc6e35d2ee56795f8c4f049a", "content_id": "0b3e2067096e7a19d84bce6b338173e90ea839dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 27, "num_lines": 1, "path": "/xlearn/mf_country/Readme.md", "repo_name": "zxw4332/Content-based-movie-recommendation-system", "src_encoding": "UTF-8", "text": "### Add country information\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 12, "blob_id": "8e99da705d01718452d931edf97e9bb458b19baf", "content_id": "d0b289ebbcda9e43f0804f7c0aceac4bc50ecace", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13, "license_type": "no_license", "max_line_length": 12, "num_lines": 1, "path": "/xlearn/mf_ensemble/Readme.md", "repo_name": "zxw4332/Content-based-movie-recommendation-system", "src_encoding": "UTF-8", "text": "### ensemble\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 12, "blob_id": "c402807bce4d3605d16871793553256e674e04e1", "content_id": "6c58f393f5f7b8504f58e15934e3d67e9c256782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13, "license_type": "no_license", "max_line_length": 12, "num_lines": 1, "path": "/xlearn/mf/Readme.md", "repo_name": "zxw4332/Content-based-movie-recommendation-system", "src_encoding": "UTF-8", "text": "### baseline\n" }, { "alpha_fraction": 0.6261682510375977, "alphanum_fraction": 0.6443925499916077, "avg_line_length": 31.938461303710938, "blob_id": "1d773c3a489a6d581604e14d871b64655ef8bda2", "content_id": "b11873aa39710afc3ceb748a3e470993018b1148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2140, "license_type": "no_license", "max_line_length": 142, "num_lines": 65, "path": "/lda/LDA_model.py", "repo_name": "zxw4332/Content-based-movie-recommendation-system", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport gensim\nfrom gensim.utils import simple_preprocess\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom nltk.stem.porter import *\nimport numpy as np\nnp.random.seed(40)\nimport nltk\nfrom gensim import corpora, models\n\ndata = pd.read_pickle('movie_tags.pkl')\ndata['index'] = [x for x in range(data.shape[0])]\n\ndic = {}\nfor x in data['tags']:\n for y in x:\n if y in dic:\n dic[y] = 1 + dic[y]\n else:\n dic[y] = 1\ndf = pd.DataFrame(dic, index=[0])\ndf = df.T.reset_index()\ndf.columns = ['tags', 'count']\ndic_bigger_2 = {}\nfor x in df['tags']:\n dic_bigger_2[x] = 1\npreprocessed = []\nfor line in data['tags']:\n new_line = []\n for word in line:\n if word in dic_bigger_2:\n new_line.append(word)\n preprocessed.append(new_line)\ndata['preprocessed'] = preprocessed\ndictionary = gensim.corpora.Dictionary(data['preprocessed'])\ncount = 0\nfor k, v in dictionary.iteritems():\n count += 1\n if count > 10:\n break\ndictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)\nbow_corpus = [dictionary.doc2bow(doc) for doc in data['preprocessed']]\ntfidf = models.TfidfModel(bow_corpus)\ncorpus_tfidf = tfidf[bow_corpus]\n\nif __name__ == '__main__':\n lda_model_tfidf = gensim.models.LdaMulticore(corpus_tfidf, num_topics=50, id2word=dictionary, passes=2, workers=2,minimum_probability = 0)\n for idx, topic in lda_model_tfidf.print_topics(-1):\n print('Topic: {} Word: {}'.format(idx, topic))\n result_df = pd.DataFrame()\n result_ls = []\n for x in bow_corpus:\n y = lda_model_tfidf[x]\n new_result = []\n for n in y:\n new_result.append(n[1])\n result_ls.append(new_result)\n result_df['imdbId'] = data['movie_id']\n result_df['result'] = result_ls\n result_df.to_pickle('movie_lda_50.pkl')\n link_data = pd.read_csv('links.csv')\n sample_movie = pd.read_pickle('movie_lda_50.pkl')\n sample_data = link_data.merge(sample_movie, on = 'imdbId', how = 'right')[['movieId', 'result']]\n sample_data.to_pickle('movie_lda_50.pkl')" }, { "alpha_fraction": 0.6507384181022644, "alphanum_fraction": 0.6700979471206665, "avg_line_length": 47.36492156982422, "blob_id": "05e9f50d3d9309157e49a133e01b77b523e1aa1a", "content_id": "dc465040757b1583b9992ecd578d74257b082166", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 34197, "license_type": "no_license", "max_line_length": 859, "num_lines": 707, "path": "/Readme.md", "repo_name": "zxw4332/Content-based-movie-recommendation-system", "src_encoding": "UTF-8", "text": "# Movie Recommendation Using Multi-Field FFM \n\n\n## Abstract \n\n\nIn this project our team focused on generating movie recommendations to old users in the movie rating system based on rating information and movie information. We used only bias term model and matrix factorization to build our baseline model. And in order to enhance the power of our model, we used FFM to incoorperate several types of movie information like movie genres, movie years and moive countries. MSE and NDCG were used to evaluate model accuracy and 3-fold cross validation was used for hyper parameter tuning(hidden dimension k for FFM model). Finally, we ensembeled several fields that can be used to enhance model accuray into FFM and build our final model(voting is best, give mse 0.6514) for movie recommendation. \n\n## Github Repo Structrue & Requirements\n\nFolder:\n- xlearn: store training and testing set, models and results, empty here, files will be generated after running the code.\n- lda: topic modeling for adding genres information.\n\nFiles:\n- *.png and *.jpeg files are used in markdown.\n- **xlearn.ipynb: python source code** for this project.\n- movie_info.csv, genres_encode.pkl, movie_lda_50.pkl: files as model input. data in movielens dataset is not included.\n\nRequirements:\n- python 3.8\n- pandas numpy xlearn matplotlib seaborn\n\n\n## Dataset & Framework\n\n\nThe dataset we used for this project is movielens together with web crawled information from imdb. However, in the dataset, all the information we can know about users is their rating behaviours, so it is unlikely to solve the new user cold start problem. Therefore in the scope of this project, we focus on how to generate high quality recommendation to old users in our system. And we also proposed how to select good movies as cold start moives used to recommend to new users.\n\n\nThe model we used for this FFM model. FFM is appropriate when movie information is incoperated with the rating information and can be viewed as the generalization of matrix factorization. In this project, we tested several movie information that may be helpful for user preference prediction, including moive genres(provided), movie tags(generated by LDA), recently rated movies, movie years and movie countries. The evaluation metric we used is mse and ranking mse. After these models been built, we evaluated their performance on different users with different activity levels. And finally we ensembled these results to produce the final movie recommendation model. The upper part of the figure shown below is our main work, and in order to be self contained, we also have some suggestions on how to recommend movies to new users in the end of this report.\n\n![avatar](./framework.png)\n\n\n\n## Model and Results \n\nThe provided movielens dataset contains movies information like moive genres, movive years, which we think are not enough to represent a movie. So we crawled other movie information like movie tags and movie countries from imdb to make the movie information more complete. Since we run our model on a single machine, we choose to use a movie sample containing 3357 movies to do the recommendation. And we selected users having rating records on these 3357 movies with at least 15 times. The reason to choose 15 is that since we will do 3-fold cross validation to tune the hyper parameter, we want one user has at least 5 records in test set otherwise the ranking method will not that useful.\n\nThe main package we used for this project is xlearn. xLearn is a high-performance, easy-to-use, and scalable machine learning package, which can be used to solve large-scale machine learning problems, especially for the problems on large-scale sparse data, which is very common in scenes like CTR prediction and recommender system. xlearn is written in C++ and Python wrapper is provided. xlearn is much faster than libffm and libfm. So it's worth trying.\n\n![avatar](./xlearn_intro.png)\n\n\nResults:\n||bias|mf|ffm+genre|ffm+tag|ffm+time|ffm+year|ffm+country|ffm+ensemble|voting_ensembel|\n|-|-|-|-|-|-|-|-|-|-|\n|MSE|0.7120|0.6800|0.6673|0.6785|0.7850|0.6561|0.6569|0.6879|**0.6514**|\n|NDCG|-|0.9058|0.9088|0.9057|0.8808|0.9111|0.9108|0.9079|-|\n|optimal_k|-|5|40|5|30|110|45|25|-|\n\n### Group user by user rating activite level\n\n<img src=\"./active_hist.jpeg\" width=\"50%\" height=\"50%\" />\n\nWe draw a hist distribution of user rating count number to see whether there exist difference for active level. We want to see the different effect of our model on different active level so we group all user to three groups according to their rating count record number. We want each group to cover enough number of users so in the end we decided to let the user who has more than 60 records to be high active level user and users who has less than 20 rating records to be low active level user and between is medium active level users. We draw our conclusion by ploting the hist distribution below and the result shows below.\n\n\n\nResults:\n||low_active_user|medium_active_user|high_active_user|\n|-|-|-|-|\n|Count number|2470|3559|1031|\n\n\n### Baseline \n\nWe used 2 models served as our baseline models. First one only consider bias terms of users and movies, and second one is FFM model only incoorperates rating information.\n\n#### User Bias + Movie Bias Baseline \n\nWe implemented this base line using surprise and did 5 fold cross validation. MSE for this baseline is 0.7120 \n\n```Python \nfrom surprise import NormalPredictor\nfrom surprise import Dataset\nfrom surprise import Reader\nfrom surprise import SVD\nfrom surprise import BaselineOnly\nfrom surprise.model_selection import cross_validate\n\nreader = Reader(rating_scale=(1, 5))\ndata = Dataset.load_from_df(ratings_high[['user_id', 'movie_id', 'rating']], reader)\nalgo = BaselineOnly()\ncross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, verbose=True) \n\n```\n\n![avatar](./bias_baseline.png)\n\n\n\n\n#### FFM + Rating Information\n\nIn the FFM framwork, if we do not add any other information, then FFM is just the same as matrix factorization. After running these 3 line of codes we can get the result.(The implementation of these functions can be seen in the Appendix). The best mse and NDCG for this baseline is 0.680 and 0.906 and k is 4.\n\n\n```Python \nffm_CV(ratings_high,[\"rating\",\"user_index\",\"movie_index\"],\"mf\")\nk_lst = [1,3,5,10,15,20,25,30,35,40,45,50]\nbase_res = ffm_cv_eval(k_lst, \"mf\")\n```\n<img src=\"./ffm_baseline.png\" width=\"50%\" height=\"50%\" />\n\nWe want to record the base mse for different activate level user and compare the result when we add extral information in it.\n\nResults:\n||low_active_user|medium_active_user|high_active_user|\n|-|-|-|-|\n|MSE|0.7343|0.711|0.6629|\n\n\n### Add Genres \n\nMovie genres contain infromation about which catogory a movie belongs to, and users often can have preferences on genres, we have a case to show this below. We crawled movie tags from IMDB and implemented LDA to give movies topic and we also used movie genres provided by movielens. Result showed that movie genre provided by movielens is a better choice.\n\n\n\n#### Add IMDB Tags \n\nWe use LDA to embedding tag information for each movies, we systematically try wide range of topic numbers and choose the 50 hidden number of topics as it gives the best performance. \n\nWe visualize the topic tags distribution as well as document topics distribution. We can see that there is a clear pattern in the tags distribution in topics 3 and topics 6 which is about violence and illness. Some topics don’t reveal much information as the data is not complete, we believe adding more data will result in a better topic word distribution.\n\n<img src=\"./topic_word.jpeg\" width=\"50%\" height=\"50%\" />\n\n\nAs for document topic distribution, there are many documents to be explored we select two document to see there distribution difference, we see there indeed exist great variety in topic distribution between documents between theses two movies.\n\n<img src=\"./docu_topic.jpeg\" width=\"50%\" height=\"50%\" />\n\nThe orange line is accuracy of adding genres while the blue line is the baseline. Adding this information can beat the baseline model. k = 5 is the best choice. Sepecific data can be seen in our notebook.\n\n```Python \nratings_high = Add_tag(ratings_high)\nffm_CV(ratings_high,[\"rating\",\"user_index\",\"movie_index\",\"tag_index\"],\"mf_tag\")\ntag_res = ffm_cv_eval(k_lst, \"mf_tag\")\n\nsns.lineplot(x=k_lst, y=base_res[0]);\nsns.lineplot(x=k_lst, y=tag_res[0]);\nplt.title(\"FFM add tag vs baseline RMSE\")\nplt.show()\nsns.lineplot(x=k_lst, y=base_res[1]);\nsns.lineplot(x=k_lst, y=tag_res[1]);\nplt.title(\"FFM add tag vs baseline NDCG\")\nplt.show()\n```\n\n<img src=\"./ffm_tag.png\" width=\"50%\" height=\"50%\" />\n\nWe compare the result for different activate level of users, we draw conclusion that the extral tags information incease the accuracy of our model but for a little bit and there is not a significant level of different improvement among activate users and inactivate users. \n\nResults:\n||low_active_user|medium_active_user|high_active_user|\n|-|-|-|-|\n|MSE|0.7236|0.6938|0.6454|\n|Improve|0.0107|0.0172|0.0175|\n\n\n\n#### Add Genres\n\nThe orange line is accuracy of adding genres while the blue line is the baseline. Adding this information can beat the baseline model. k = 40 is the best choice. Sepecific data can be seen in our notebook.\n\n\n\n```Python \nratings_high = Add_genre(ratings_high)\nffm_CV(ratings_high,[\"rating\",\"user_index\",\"movie_index\",\"genre_index\"],\"mf_genre\")\ngenre_res = ffm_cv_eval(k_lst, \"mf_genre\")\n\nsns.lineplot(x=k_lst, y=base_res[0]);\nsns.lineplot(x=k_lst, y=genre_res[0]);\nplt.title(\"FFM add genre vs baseline RMSE\")\nplt.show()\nsns.lineplot(x=k_lst, y=base_res[1]);\nsns.lineplot(x=k_lst, y=genre_res[1]);\nplt.title(\"FFM add genre vs baseline NDCG\")\nplt.show()\n```\n\n<img src=\"./ffm_genre.png\" width=\"50%\" height=\"50%\" />\n\n\nThe mystery is the most popular genre and have an overall score near to 4.0, it means that Mystery topic is more acceptable for general population. As contrary, horror film is the hardest genre to capture population. We beleive the different acceptable threshold of different genre contain information that is useful for later prediction.\n\n<img src=\"./movie_genre_rating.jpeg\" width=\"50%\" height=\"50%\" />\n\nTake user_id= 34415 as an example. They user rated 35 movies in our dataset and we plot the distribution of genres of 30% of the movies with highest scores(blue) and 30% of the movies with lowest scores(orange). In this picture we show what genres the movies are. This user are likely to give higher scores to drama, comedy, romance movies, and give lower scores to action, thriller, adventure movies. This kind of users usually have preference for several particular genres of movies but hate some other genres. Thus we think genre is a good side information to help learn user tastes.\n\n<img src=\"./user_genre.jpeg\" width=\"70%\" height=\"70%\" />\n\nWe test our model for different level of activate users. We draw conclusion that the higher the activate level the lower the mse score it is reasonable because the more the user watched the movie, the more information about the movie is added and result in more accurate prediction. What we want to know the most is which level of active user improved the most, it shows that the high active user improve the most which means we will get more accuracy when we add more side information.\n\nResults:\n||low_active_user|medium_active_user|high_active_user|\n|-|-|-|-|\n|MSE|0.719|0.689|0.629|\n|Improve|0.015|0.022|0.0339|\n\n\n### Add Time \n\nWe assumed that users' preferences will change over time, so it might be a good choice to let the machine learn this information. We added a field called recently rated movies to record recent 3 moives a user rated when this user is rating a movie. The result is shown below. The orange line is accuracy of adding this information while the blue line is the baseline. However, the result is quite bad.\n\n\n```Python \nratings_high = Add_recent(ratings_high)\nffm_CV(ratings_high,[\"rating\",\"user_index\",\"movie_index\",\"recent_index\"],\"mf_recent\")\nrecent_res = ffm_cv_eval(k_lst, \"mf_recent\")\n\n\nsns.lineplot(x=k_lst, y=base_res[0]);\nsns.lineplot(x=k_lst, y=recent_res[0]);\nplt.title(\"FFM add recent rated moive vs baseline RMSE\")\nplt.show()\nsns.lineplot(x=k_lst, y=base_res[1]);\nsns.lineplot(x=k_lst, y=recent_res[1]);\nplt.title(\"FFM add recent rated movie vs baseline NDCG\")\nplt.show()\n```\n\n<img src=\"./ffm_recent.png\" width=\"50%\" height=\"50%\" />\n\n\n### Add Year\n\nThe orange line is accuracy of adding genres while the blue line is the baseline. Adding this information can beat the baseline model. k = 90 is the best choice. Sepecific data can be seen in our notebook.\n\n\n```Python \nratings_high = Add_year(ratings_high)\nffm_CV(ratings_high,[\"rating\",\"user_index\",\"movie_index\",\"year_index\"],\"mf_year\")\nyear_res = ffm_cv_eval(k_lst+[70,90,110], \"mf_year\")\n\nsns.lineplot(x=k_lst, y=base_res[0]);\nsns.lineplot(x=k_lst, y=year_res[0]);\nplt.title(\"FFM add moive year vs baseline RMSE\")\nplt.show()\nsns.lineplot(x=k_lst, y=base_res[1]);\nsns.lineplot(x=k_lst, y=year_res[1]);\nplt.title(\"FFM add movie year vs baseline NDCG\")\nplt.show()\n```\n\n<img src=\"./ffm_year.png\" width=\"50%\" height=\"50%\" />\n\n\nWe saw the movie from 80s receive the highest mean score, and we think the return to studio-driven pictures in 80s plays an important role in the high popularity of the films. The idea of Hollywood film making concept changed in 80s from plain story telling to highly marketable and understandable cinematic plots that could be summarized in one or two sentences. \nWe saw 2010 ranked second, it probability because the massive use of 3D technology following the success of Avatar.\n\n<img src=\"./movie_year.jpeg\" width=\"50%\" height=\"50%\" />\n\nWe compaer the result from different activate level to base model and see that the more activate the user is the more accurate the model predict. \n\nResults:\n||low_active_user|medium_active_user|high_active_user|\n|-|-|-|-|\n|MSE|0.7050|0.6691|0.6166|\n|Improve|0.029|0.0419|0.0469|\n\n### Add Country\n\nThe orange line is accuracy of adding genres while the blue line is the baseline. Adding this information can beat the baseline model. k = 45 is the best choice. Sepecific data can be seen in our notebook.\n\n\n```Python \nratings_high = Add_country(ratings_high)\nffm_CV(ratings_high,[\"rating\",\"user_index\",\"movie_index\",\"country_index\"],\"mf_country\")\ncountry_res = ffm_cv_eval(k_lst, \"mf_country\")\n\nsns.lineplot(x=k_lst, y=base_res[0]);\nsns.lineplot(x=k_lst, y=country_res[0]);\nplt.title(\"FFM add moive country vs baseline RMSE\")\nplt.show()\nsns.lineplot(x=k_lst, y=base_res[1]);\nsns.lineplot(x=k_lst, y=country_res[1]);\nplt.title(\"FFM add movie country vs baseline NDCG\")\nplt.show()\n\n```\n\n<img src=\"./ffm_country.png\" width=\"50%\" height=\"50%\" />\n\n\nAccording to the result, it shows that country information is helpful in determining the rating of the movie, we classify the movie according to the region. We saw an improvement in the overall score once added the country information to model.\n\n<img src=\"./movie_country.jpeg\" width=\"100%\" height=\"100%\" />\n\nTake user_id= 81924 as an example. The user rated 89 movies in our dataset and we use 30% of the movies with highest scores and 30% of the movies with lowest scores. In this picture we show from which countries they were produced. We can see that the user watched lots of USA and UK movies, only gave low scores to USA movies. For movies from other countries, he or she gave high scores. For this kind of users, they usually watch movies from a specific country( like USA in the example), but sometimes they would also watch really good movies from other countries. For these 'really good movies' they tend to give higher scores.\nAlso, for some other users, they may have a preference for movies from a particular country and tend to give higher scores. In this way, we can really learn some useful information from the 'country' embedding.\n\n<img src=\"./user_country.jpeg\" width=\"70%\" height=\"70%\" />\n\nWe compaer the result from different activate level to base model and see that the medium activate level user improved the most. We believe that it is because the lower activate level user do not have enough information and high activate user have too much inforamtion to drag their prediction from massive rating records which are direct evidence.\n\nResults:\n||low_active_user|medium_active_user|high_active_user|\n|-|-|-|-|\n|MSE|0.7052|0.6726|0.6166|\n|Improve|0.015|0.0384|0.0339|\n\n### Ensemble \n\nThe orange line is accuracy of adding genres while the blue line is the baseline. Doing ensemble can beat the baseline model. k = 25 is the best choice. Sepecific data can be seen in our notebook. From the plot we can see that although the pure accuray is not as high as accuracy before ensemble, the variation of the result became small. So doing ensemble gives us a more stable result.\n\n```Python \nratings_high = data_sampling()\nratings_high = Add_country(ratings_high)\nratings_high = Add_year(ratings_high)\nratings_high = Add_genre(ratings_high)\n\nratings_high.country_index = ratings_high.country_index.apply(\n lambda x: \"3:\"+str(int(x.split(\":\")[1])+20)+\":1\")\nratings_high.year_index = ratings_high.year_index.apply(\n lambda x: \"4:\"+str(int(x.split(\":\")[1])+20+89)+\":1\")\n\nffm_CV(ratings_high,[\"rating\",\"user_index\",\"movie_index\",\"genre_index\",\"country_index\",\"year_index\"],\"mf_ensemble\")\nk_lst = [1,3,5,10,15,20,25,30,35,40,45,50]\nensem_res = ffm_cv_eval(k_lst, \"mf_ensemble\")\n\n```\n\n<img src=\"./ffm_ensembel.png\" width=\"50%\" height=\"50%\" />\n\nWe test our model on different activate level users and see that the improvement from base line model is not greater than the above model when we add one filed each time. We think it is because the attribute is not indipendent and the correlation may affect the accuracy of the model.\n\nResults:\n||low_active_user|medium_active_user|high_active_user|\n|-|-|-|-|\n|MSE|0.728|0.692|0.640|\n|Improve|0.0063|0.019|0.0229|\n\n\nWe also tried voting ensemble for 3 models on year, genre and country, and this is our best result. mse is 0.6514.\n\n```Python \ndef NDCG(t):\n \"\"\"\n compute NDCG for a user\n t: pandas dataframe \n \"\"\"\n rank = np.array(sorted(t['ranking'].values)[::-1])\n DCG = t.sort_values(by=[\"ranking\"],ascending=False).rating.values\n IDCG = np.array(sorted(DCG))\n NDCG = (np.sum((2**(DCG)-1)/np.log2(rank+1))) / (np.sum((2**(IDCG)-1)/np.log2(rank+1)))\n return NDCG\n\ndf_lst = [pd.DataFrame(),pd.DataFrame(),pd.DataFrame()]\n\nfor index,file_name in enumerate([\"mf_genre\",\"mf_year\",\"mf_country\"]):\n for i in [\"1\",\"2\",\"3\"]:\n # update mse\n pred_ratings = pd.read_csv(\"./xlearn/\"+file_name+\"/ratings_high_output\"+i+\".txt\",header=None)\n pred_ratings = pred_ratings[0]\n\n\n label_ratings = pd.read_csv(\"./xlearn/\"+file_name+\"/ratings_high_test\"+i+\".txt\",header=None)\n label_ratings[0] = label_ratings[0].apply(lambda x: x.split(\" \")[0:3])\n label_ratings[\"rating\"] = label_ratings[0].apply(lambda x: x[0])\n label_ratings[\"rating\"] = label_ratings[\"rating\"].astype(float)\n label_ratings[\"user_id\"] = label_ratings[0].apply(lambda x: x[1])\n label_ratings[\"movie_id\"] = label_ratings[0].apply(lambda x: x[2])\n label_ratings = label_ratings[['rating','user_id','movie_id']]\n label_ratings[\"rating_pred\"] = pred_ratings\n \n df_lst[index] = df_lst[index].append(label_ratings)\n \nall_df = pd.merge(df_lst[0],df_lst[1],on=[\"user_id\",\"movie_id\"],how=\"inner\")\nall_df = pd.merge(all_df,df_lst[2],on=[\"user_id\",\"movie_id\"],how=\"inner\")\nall_df[\"rating_mean\"] = (all_df.rating_pred + all_df.rating_pred_x + all_df.rating_pred_y)/3\n\nprint(np.sum((all_df.rating_mean-all_df.rating)**2)/all_df.shape[0])\n```\n\n\n\n## Generate Recommendation \n\nAfter our model has been built, we can generate recommendations to old users. And for new users, our model can not work on them since they have no rating records in the system. One solution to solve this probelm is to recommend movies with highest ctr rate and rating score, in this case new users will click the link with highest probability and we can have activity information about this user, and then we can do recommendations.\n\n\n## Conclusion \n\nIn this project we used FFM to combine rating information and multi-fields to build our recommendation model. One reason to choose this kind of linear model instead of deep learning models is that linear models have better interpretibility. And our result showed that adding movie information truely helps improve model accuracy and can do better recommendations. This practice is also meaningful in business practice. The model we proposed can be used to solve old user recommendation problems. Another advantage of using our model in practice is that FFM can used for online learning, once the model is trained, making prediction is really fast.\n\n\n\n\n## Appendix \n\n\n\n```Python \nimport pandas as pd \nimport xlearn as xl \nimport numpy as np \nimport re \n\n\ndef data_sampling():\n\n \"\"\"\n sample rating data, and change data format as ffm input format \n\n \"\"\"\n # read data \n movie_ids = pd.read_csv(\"./movie_info.csv\")[[\"movie_id\",\"movie_rating\",\"movie_year\",\"movie_detail\"]]\n links = pd.read_csv(\"./links.csv\")\n ratings = pd.read_csv(\"ratings.csv\")\n\n # merge imdb movieid and movielens movieid\n links.columns = [\"id\",\"movie_id\",\"tmdbId\"]\n movie_ids = pd.merge(links,movie_ids,on=\"movie_id\",how=\"inner\")\n ratings.columns = ['user_id','id','rating','timestamp']\n ratings = pd.merge(ratings,movie_ids,on=\"id\",how=\"inner\")\n\n # sample users with more than 15 ratings\n user = ratings[\"user_id\"].value_counts().reset_index()\n user = user[user.user_id>=15][[\"index\"]]\n user.columns = [\"user_id\"]\n ratings_high = pd.merge(ratings,user,on=\"user_id\",how=\"inner\")\n ratings_high = ratings_high[['user_id','id','rating','timestamp','movie_rating','movie_year','movie_detail']]\n ratings_high.columns = ['user_id','movie_id','rating','timestamp','movie_rating','movie_year','movie_detail']\n\n\n # generate user index and movie index\n ratings_high_user = ratings_high.drop_duplicates(subset=\"user_id\") \\\n .sort_values(by=\"user_id\").reset_index()['user_id'].reset_index()\n ratings_high_movie = ratings_high.drop_duplicates(subset=\"movie_id\") \\\n .sort_values(by=\"movie_id\").reset_index()['movie_id'].reset_index()\n\n # merge dataframe \n ratings_high = pd.merge(ratings_high,ratings_high_user,on=\"user_id\",how=\"inner\")\n ratings_high = pd.merge(ratings_high,ratings_high_movie,on=\"movie_id\",how=\"inner\")\n\n # change format to ffm format \n ratings_high.columns = ['user_id','movie_id','rating','timestamp','movie_rating','movie_year','movie_detail','user_index','movie_index']\n ratings_high['movie_index'] = ratings_high['user_index'].max() + 1 + ratings_high['movie_index'] \n ratings_high['user_index'] = ratings_high['user_index'].apply(lambda x: \"0:\"+str(x)+\":1\")\n ratings_high['movie_index'] = ratings_high['movie_index'].apply(lambda x: \"1:\"+str(x)+\":1\")\n\n # create 3 fold cv for hyper parameter tuning \n ratings_high['rand'] = np.random.random(ratings_high.shape[0])\n ratings_high['rank'] = ratings_high.groupby(\"user_id\")['rand'].rank(ascending=True,method=\"first\")\n user_rating_num = ratings_high.groupby(\"user_id\")[\"rank\"].max().reset_index()\n ratings_high = pd.merge(ratings_high,user_rating_num,on=\"user_id\",how=\"inner\")\n ratings_high[\"group\"] = ratings_high[\"rank_x\"]/ratings_high[\"rank_y\"]\n \n return ratings_high\n\ndef ffm_CV(ratings_high,columns,file_name):\n\n \"\"\"\n create 3 - fold cross validation\n\n \"\"\"\n\n # Generate 3 fold cv\n\n ratings_high_train = ratings_high[columns][ratings_high.group>=0.33]\n ratings_high_test = ratings_high[columns][ratings_high.group<0.33]\n\n ratings_high_train.to_csv(\"./xlearn/\"+file_name+\"/ratings_high_train1.txt\",\n sep=\" \",\n index=False,\n header=None)\n with open(\"./xlearn/\"+file_name+\"/ratings_high_train1.txt\", \"r\") as f:\n s = f.read()\n s = s.replace('\"','')\n with open(\"./xlearn/\"+file_name+\"/ratings_high_train1.txt\", 'w') as f:\n f.write(s)\n\n\n ratings_high_test.to_csv(\"./xlearn/\"+file_name+\"/ratings_high_test1.txt\",\n sep=\" \",\n index=False,\n header=None)\n with open(\"./xlearn/\"+file_name+\"/ratings_high_test1.txt\", \"r\") as f:\n s = f.read()\n s = s.replace('\"','')\n with open(\"./xlearn/\"+file_name+\"/ratings_high_test1.txt\", 'w') as f:\n f.write(s)\n\n ratings_high_train = ratings_high[columns][(ratings_high.group<0.33)|(ratings_high.group>0.66)]\n ratings_high_test = ratings_high[columns][(ratings_high.group>=0.33)&(ratings_high.group<=0.66)]\n\n ratings_high_train.to_csv(\"./xlearn/\"+file_name+\"/ratings_high_train2.txt\",\n sep=\" \",\n index=False,\n header=None)\n with open(\"./xlearn/\"+file_name+\"/ratings_high_train2.txt\", \"r\") as f:\n s = f.read()\n s = s.replace('\"','')\n with open(\"./xlearn/\"+file_name+\"/ratings_high_train2.txt\", 'w') as f:\n f.write(s)\n \n ratings_high_test.to_csv(\"./xlearn/\"+file_name+\"/ratings_high_test2.txt\",\n sep=\" \",\n index=False,\n header=None)\n with open(\"./xlearn/\"+file_name+\"/ratings_high_test2.txt\", \"r\") as f:\n s = f.read()\n s = s.replace('\"','')\n with open(\"./xlearn/\"+file_name+\"/ratings_high_test2.txt\", 'w') as f:\n f.write(s)\n\n\n ratings_high_train = ratings_high[columns][ratings_high.group<=0.66]\n ratings_high_test = ratings_high[columns][ratings_high.group>0.66]\n\n ratings_high_train.to_csv(\"./xlearn/\"+file_name+\"/ratings_high_train3.txt\",\n sep=\" \",\n index=False,\n header=None)\n with open(\"./xlearn/\"+file_name+\"/ratings_high_train3.txt\", \"r\") as f:\n s = f.read()\n s = s.replace('\"','')\n with open(\"./xlearn/\"+file_name+\"/ratings_high_train3.txt\", 'w') as f:\n f.write(s)\n ratings_high_test.to_csv(\"./xlearn/\"+file_name+\"/ratings_high_test3.txt\",\n sep=\" \",\n index=False,\n header=None)\n with open(\"./xlearn/\"+file_name+\"/ratings_high_test3.txt\", \"r\") as f:\n s = f.read()\n s = s.replace('\"','')\n with open(\"./xlearn/\"+file_name+\"/ratings_high_test3.txt\", 'w') as f:\n f.write(s)\n\n\n\ndef ffm_cv_eval(k_lst, file_name):\n \"\"\"\n k_lst: hidden dimension k, hyperparameter\n file_name: indicates which data set is used to build ffm model\n return: mse and ndcg\n \"\"\"\n\n def NDCG(t):\n \"\"\"\n compute NDCG for a user\n t: pandas dataframe \n \"\"\"\n rank = np.array(sorted(t['ranking'].values)[::-1])\n DCG = t.sort_values(by=[\"ranking\"],ascending=False).rating.values\n IDCG = np.array(sorted(DCG))\n NDCG = (np.sum((2**(DCG)-1)/np.log2(rank+1))) / (np.sum((2**(IDCG)-1)/np.log2(rank+1)))\n return NDCG\n\n \n mse_lst = []\n NDCG_lst = []\n for k in k_lst:\n\n param = {'task':'reg', 'lr':0.2, 'lambda':0.02, 'metric':'mae', 'k':k}\n mse = []\n ndcg = []\n for i in ['1','2','3']:\n\n ffm_model = xl.create_ffm() \n ffm_model.setTrain(\"./xlearn/\"+file_name+\"/ratings_high_train\"+i+\".txt\")\n\n #ffm_model.setTXTModel(\"./xlearn/model.txt\")\n ffm_model.fit(param, \"./xlearn/\"+file_name+\"/ratings_high_model\"+i+\".out\")\n\n # Prediction task\n ffm_model.setTest(\"./xlearn/\"+file_name+\"/ratings_high_test\"+i+\".txt\") # Set the path of test dataset\n # Start to predict\n # The output result will be stored in output.txt\n ffm_model.predict(\"./xlearn/\"+file_name+\"/ratings_high_model\"+i+\".out\",\"./xlearn/\"+file_name+\"/ratings_high_output\"+i+\".txt\")\n\n # update mse\n pred_ratings = pd.read_csv(\"./xlearn/\"+file_name+\"/ratings_high_output\"+i+\".txt\",header=None)\n pred_ratings = pred_ratings[0]\n\n\n label_ratings = pd.read_csv(\"./xlearn/\"+file_name+\"/ratings_high_test\"+i+\".txt\",header=None)\n label_ratings[0] = label_ratings[0].apply(lambda x: x.split(\" \")[0:3])\n label_ratings[\"rating\"] = label_ratings[0].apply(lambda x: x[0])\n label_ratings[\"rating\"] = label_ratings[\"rating\"].astype(float)\n label_ratings[\"user_id\"] = label_ratings[0].apply(lambda x: x[1])\n label_ratings[\"movie_id\"] = label_ratings[0].apply(lambda x: x[2])\n label_ratings = label_ratings[['rating','user_id','movie_id']]\n\n\n mse.append((np.sum((label_ratings[\"rating\"] - pred_ratings[0])**2)/label_ratings.shape[0]))\n\n # update ndcg\n label_ratings[\"pred_rating\"] = pred_ratings\n label_ratings['ranking'] = label_ratings.groupby(\"user_id\")[\"pred_rating\"].rank(ascending=False,method=\"first\")\n ndcg.append(label_ratings.groupby(\"user_id\").apply(NDCG).mean())\n\n \n mse_lst.append(np.mean(mse)) \n NDCG_lst.append(np.mean(ndcg))\n \n return mse_lst,NDCG_lst\n\n\n\ndef Add_genre(ratings_high):\n \"\"\"\n add genre and change it to ffm input format\n \"\"\"\n\n movie_lda = pd.read_pickle(\"./genres_encode.pkl\")\n movie_lda = movie_lda[[\"movieId\",\"genres_vec\"]]\n movie_lda.columns = ['movie_id','genre_index']\n index_start = ratings_high.user_id.unique().shape[0] + ratings_high.movie_id.unique().shape[0]\n movie_lda['genre_index'] = movie_lda['genre_index'].apply(lambda x: \n \" \".join([\"2:\"+str(index+index_start)+\":\"+str(i) \n for index,i in enumerate(x) if i != 0]))\n ratings_high = pd.merge(ratings_high,movie_lda,on=\"movie_id\",how=\"inner\")\n\n return ratings_high\n\n\ndef Add_tag(ratings_high):\n \"\"\"\n add tag and change it to ffm input format\n \"\"\"\n movie_lda = pd.read_pickle(\"./movie_lda_50.pkl\")\n movie_lda = movie_lda[[\"movieId\",\"result\"]]\n movie_lda.columns = ['movie_id','tag_index']\n index_start = ratings_high.user_id.unique().shape[0] + ratings_high.movie_id.unique().shape[0]\n movie_lda['tag_index'] = movie_lda['tag_index'].apply(lambda x: \n (\" \".join([\"2:\"+str(index+index_start)+\":\"+str(i) \n for index,i in enumerate(x) if i >= 0.01])).strip('\"'))\n ratings_high = pd.merge(ratings_high,movie_lda,on=\"movie_id\",how=\"inner\")\n\n return ratings_high\n\ndef Add_recent(ratings_high):\n \"\"\"\n add recent rated movies for a user rating on a movie and change it to ffm input format\n \"\"\"\n def list_convert(l):\n if l != \"\":\n return \" \".join(l)\n else:\n return \"\"\n\n ratings_high['time_rank'] = ratings_high.groupby(\"user_id\")[\"timestamp\"].rank(ascending=True,method=\"first\")\n ratings_high_time = ratings_high[['rating','user_index','movie_index','time_rank']]\n\n ratings_high_time = pd.merge(ratings_high_time,ratings_high_time[['user_index','movie_index','time_rank']],on=\"user_index\",how=\"inner\")\n ratings_high_time[\"time_diff\"] = ratings_high_time.time_rank_x - ratings_high_time.time_rank_y\n ratings_high_time = ratings_high_time[(ratings_high_time.time_diff>0)&(ratings_high_time.time_diff<4)]\n rating_recent = ratings_high_time.groupby([\"user_index\",\"movie_index_x\"])[\"movie_index_y\"].apply(lambda x: list(x)).reset_index()\n rating_recent.columns = ['user_index','movie_index','recent_index']\n ratings_high = pd.merge(ratings_high,rating_recent,on=[\"user_index\",\"movie_index\"],how=\"left\")\n ratings_high.fillna(\"\",inplace=True)\n ratings_high.recent_index = ratings_high.recent_index.apply(list_convert)\n\n return ratings_high\n\n\ndef Add_year(ratings_high):\n \"\"\"\n add year and change it to ffm input format\n \"\"\"\n def convert_year(year):\n if year == 0:\n return 0\n elif year >= 2010:\n return 1\n elif year >= 2000:\n return 2\n elif year >= 1990:\n return 3\n else:\n return 4\n \n ratings_high.movie_year = ratings_high.movie_year.apply(convert_year)\n index_start = ratings_high.user_id.unique().shape[0] + ratings_high.movie_id.unique().shape[0]\n ratings_high.movie_year = ratings_high.movie_year.apply(lambda x: \"2:\"+str(index_start+x)+\":1\")\n\n return ratings_high\n\n\n\n\ndef Add_country(ratings_high):\n \"\"\"\n add country and change it to ffm input format\n \"\"\"\n\n def convert_country(country):\n if re.findall(r\"'Country:(.*?)'\",country) == []:\n return 0\n else:\n return re.findall(r\"'Country:(.*?)'\",country)[0]\n \n ratings_high[\"country\"] = ratings_high['movie_detail'].apply(convert_country)\n\n movie_country = ratings_high.country.value_counts().reset_index()\n movie_country['country_index'] = [i for i in range(movie_country.shape[0])]\n movie_country.columns = [\"country\",\"num\",\"country_index\"]\n ratings_high = pd.merge(ratings_high,movie_country[[\"country\",\"country_index\"]],on=\"country\")\n index_start = ratings_high.user_id.unique().shape[0] + ratings_high.movie_id.unique().shape[0]\n ratings_high.country_index = ratings_high.country_index.apply(lambda x: \"2:\"+str(index_start+x)+\":1\")\n\n\n```\n\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 13, "blob_id": "d41d429f3ea75def05625df4ff17c371d7b92649", "content_id": "d5be6d96d6c26ea968eda73d074ca0308d906f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14, "license_type": "no_license", "max_line_length": 13, "num_lines": 1, "path": "/xlearn/mf_genre/Readme.md", "repo_name": "zxw4332/Content-based-movie-recommendation-system", "src_encoding": "UTF-8", "text": "### Add genre\n" }, { "alpha_fraction": 0.7556270360946655, "alphanum_fraction": 0.7652733325958252, "avg_line_length": 27.090909957885742, "blob_id": "712b6c1c52fa00e21e071f280ca60cc9174d8932", "content_id": "1f4f35b176eae862f5193ee3bb6240afbc3cd86c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 311, "license_type": "no_license", "max_line_length": 125, "num_lines": 11, "path": "/lda/Readme.md", "repo_name": "zxw4332/Content-based-movie-recommendation-system", "src_encoding": "UTF-8", "text": "# LDA Model\n\n## Packages:\n\ngensim/numpy/pandas/nltk\n\n## Procedure: \n\n1. load sampled data \n2. preprocess tags, filter the infrequent word.(tag is already preprocessed word and we don't think it needs further process)\n3. Using build in gensim package to give movie-tag distribution and topic-tag distribution. \n\n" } ]
7
natyrix/SimpleSOAPTest
https://github.com/natyrix/SimpleSOAPTest
ba2efa31b9aa218f2655460ce7ba997dbde2a128
a826022eee4471a500f99b47cdc2166ebabf9b7a
7a81cc9382900568da15c911dbc37a0a062eb956
refs/heads/main
2023-04-25T00:16:55.866805
2021-05-09T11:49:31
2021-05-09T11:49:31
365,670,720
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4787878692150116, "alphanum_fraction": 0.6939393877983093, "avg_line_length": 15.550000190734863, "blob_id": "57308fe9fa0841996d04e20a44d1df8e4e74f0c2", "content_id": "ca7997ce33fe9be97db7a84295ad6e6d2ba91b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 330, "license_type": "no_license", "max_line_length": 27, "num_lines": 20, "path": "/requirements.txt", "repo_name": "natyrix/SimpleSOAPTest", "src_encoding": "UTF-8", "text": "appdirs==1.4.4\nasgiref==3.3.4\nattrs==21.2.0\ncached-property==1.5.2\ncertifi==2020.12.5\nchardet==4.0.0\ndefusedxml==0.7.1\nDjango==3.2.2\ndjango-restframework==0.0.1\nidna==2.10\nisodate==0.6.0\nlxml==4.6.3\npytz==2021.1\nrequests==2.25.1\nrequests-file==1.5.1\nrequests-toolbelt==0.9.1\nsix==1.16.0\nsqlparse==0.4.1\nurllib3==1.26.4\nzeep==4.0.0" }, { "alpha_fraction": 0.6356208920478821, "alphanum_fraction": 0.6437908411026001, "avg_line_length": 41.13793182373047, "blob_id": "f25b87017d816e4705032fcf7076999e3d57e99c", "content_id": "2eca3266c7f32166afe2fce79e1b04aaf0f3d3a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1224, "license_type": "no_license", "max_line_length": 122, "num_lines": 29, "path": "/API/views.py", "repo_name": "natyrix/SimpleSOAPTest", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom rest_framework import viewsets, views, status\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom django.contrib.auth.models import User\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom zeep import Client\n\nclass Home(views.APIView):\n permission_classes = (AllowAny,)\n def get(self, request):\n client = Client(wsdl=\"http://www.dneonline.com/calculator.asmx?wsdl\")\n resp = client.service.Add(12,13)\n user = User.objects.get(username='hp')\n tok = Token.objects.get(user=user)\n\n return Response({\n \"request\":{\n \"Requested URL\":\"http://www.dneonline.com/calculator.asmx?wsdl\",\n \"Requested Method\": \"Requested Method: Add(intA: xsd:int, intB: xsd:int) -> AddResult: xsd:int\",\n \"Requested Method With Passed Arguments\": \"Requested Method: Add(12: xsd:int, 13: xsd:int) -> 25: xsd:int\"\n },\n \"response\": {\n \"Response\": resp \n },\n }, headers={\n \"authentication\": \"Token {}\".format(tok)\n })\n\n\n" }, { "alpha_fraction": 0.6647058725357056, "alphanum_fraction": 0.6647058725357056, "avg_line_length": 18, "blob_id": "cc12e707c11e8b2d56eceafb51baaa8c18fb35b8", "content_id": "4cc8f3df71b9081eb67a760db023ce18d141ce9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 62, "num_lines": 9, "path": "/Home/urls.py", "repo_name": "natyrix/SimpleSOAPTest", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('',views.home, name='Home'),\n path('make_req/',views.make_request, name='make_request'),\n\n]" }, { "alpha_fraction": 0.6728280782699585, "alphanum_fraction": 0.6839186549186707, "avg_line_length": 26.049999237060547, "blob_id": "dd1785732d97d893d4f4c82072fe1d8335d0c5c2", "content_id": "3745ba20361c8c12717aaa9833e35d0ca682488c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 73, "num_lines": 20, "path": "/Home/views.py", "repo_name": "natyrix/SimpleSOAPTest", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nimport secrets\nimport json\nfrom django.http import JsonResponse\nfrom zeep import Client\n\ndef home(request):\n return render(request, 'index.html')\n\n\ndef make_request(request):\n client = Client(wsdl=\"http://www.dneonline.com/calculator.asmx?wsdl\")\n resp = client.service.Add(12,13)\n \n context = {\n 'request_to': \"http://www.dneonline.com/calculator.asmx?wsdl\",\n 'resp': resp,\n 'token': secrets.token_hex(nbytes=16)\n }\n return render(request, 'index.html', context)\n" } ]
4
Ksionszek/DES
https://github.com/Ksionszek/DES
1246de38c108252c6527cf6f298969ef62398f9d
11b8c08af91b931cbf3e0657c89c592d371e30aa
17d798b732e6c77cf449f88d831390990821528d
refs/heads/master
2023-08-17T04:07:11.111323
2020-08-25T20:35:48
2020-08-25T20:35:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.33672696352005005, "alphanum_fraction": 0.48207515478134155, "avg_line_length": 31.975000381469727, "blob_id": "70ae1a12b7ddcb62b75c42ad607b400f5c3688ef", "content_id": "45081ccea01c11126684a2091f1e7617a4eebdad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9233, "license_type": "no_license", "max_line_length": 104, "num_lines": 280, "path": "/des_karol.py", "repo_name": "Ksionszek/DES", "src_encoding": "UTF-8", "text": "import tkinter as tk\nimport string\nimport random\nfrom struct import unpack\n\nCP_1 = [\n 57, 49, 41, 33, 25, 17, 9,\n 1, 58, 50, 42, 34, 26, 18,\n 10, 2, 59, 51, 43, 35, 27,\n 19, 11, 3, 60, 52, 44, 36,\n 63, 55, 47, 39, 31, 23, 15,\n 7, 62, 54, 46, 38, 30, 22,\n 14, 6, 61, 53, 45, 37, 29,\n 21, 13, 5, 28, 20, 12, 4\n]\n\nCP_2 = [\n 14, 17, 11, 24, 1, 5, 3, 28,\n 15, 6, 21, 10, 23, 19, 12, 4,\n 26, 8, 16, 7, 27, 20, 13, 2,\n 41, 52, 31, 37, 47, 55, 30, 40,\n 51, 45, 33, 48, 44, 49, 39, 56,\n 34, 53, 46, 42, 50, 36, 29, 32\n]\n\n# Expand matrix to get a 48bits matrix of datas to apply the xor with Ki\nE = [\n 32, 1, 2, 3, 4, 5,\n 4, 5, 6, 7, 8, 9,\n 8, 9, 10, 11, 12, 13,\n 12, 13, 14, 15, 16, 17,\n 16, 17, 18, 19, 20, 21,\n 20, 21, 22, 23, 24, 25,\n 24, 25, 26, 27, 28, 29,\n 28, 29, 30, 31, 32, 1\n]\n\n# SBOX\nS_BOX = [\n\n [[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],\n [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],\n [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],\n [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],\n ],\n\n [[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],\n [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],\n [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],\n [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],\n ],\n\n [[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],\n [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],\n [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],\n [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],\n ],\n\n [[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],\n [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],\n [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],\n [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],\n ],\n\n [[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],\n [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],\n [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],\n [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],\n ],\n\n [[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],\n [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],\n [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],\n [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],\n ],\n\n [[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],\n [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],\n [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],\n [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],\n ],\n\n [[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],\n [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],\n [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],\n [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],\n ]\n]\n\n# Permut made after each SBox substitution for each round\nP = [16, 7, 20, 21, 29, 12, 28, 17,\n 1, 15, 23, 26, 5, 18, 31, 10,\n 2, 8, 24, 14, 32, 27, 3, 9,\n 19, 13, 30, 6, 22, 11, 4, 25]\n\nPI = [\n 58, 50, 42, 34, 26, 18, 10, 2,\n 60, 52, 44, 36, 28, 20, 12, 4,\n 62,\t54,\t46,\t38,\t30,\t22,\t14,\t6,\n 64, 56, 48, 40, 32, 24, 16, 8,\n 57,\t49,\t41,\t33,\t25,\t17,\t 9, 1,\n 59, 51, 43, 35, 27, 19, 11, 3,\n 61,\t53,\t45,\t37,\t29,\t21,\t13,\t5,\n 63, 55, 47, 39, 31, 23, 15, 7\n]\n\n# Final permut for datas after the 16 rounds\nPI_1 = [40, 8, 48, 16, 56, 24, 64, 32,\n 39, 7, 47, 15, 55, 23, 63, 31,\n 38, 6, 46, 14, 54, 22, 62, 30,\n 37, 5, 45, 13, 53, 21, 61, 29,\n 36, 4, 44, 12, 52, 20, 60, 28,\n 35, 3, 43, 11, 51, 19, 59, 27,\n 34, 2, 42, 10, 50, 18, 58, 26,\n 33, 1, 41, 9, 49, 17, 57, 25]\n\n# Matrix that determine the shift for each round of keys\nSHIFT = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n\n\n\ndef keyGenerator():\n #no randomowy w chuj XD\n randomKey = []\n randomKey = string_to_bit_array(\"secret_k\")\n #randomLetters = ''.join(random.choice(string.ascii_letters)\n # for i in range(8))\n print(\"RandomKey: \")\n print(randomKey)\n #res = ''.join(bin(ord(c)) for c in randomLetters).replace('b', '')\n # print(randomLetters)\n # print(res)\n # print(len(res))\n permutKey = permut(randomKey, CP_1)\n print(\"PermutKey: \")\n print(permutKey)\n # ================================ generateAndPermutKey()\n # print(len(keyGenerator))\n splitResL, splitResR = nsplit(permutKey, 28)\n # print(splitResL)\n # print('\\n')\n # print(splitResR)\n \n for i in range(16):\n splitResL, splitResR = shift(splitResL, splitResR, SHIFT[i])\n temp = splitResL + splitResR\n subKeys.append(permut(temp, CP_2))\n print(\"SubKeys: \")\n print(subKeys)\n \n # for i in range(16):\n # print('\\n', len(subKeys[i]))\n # ================================ generateAndPermutKey()\n\n\ndef nsplit(s, n): # Split a list into sublists of size \"n\"\n return [s[k:k+n] for k in range(0, len(s), n)]\n\n\ndef permut(block, table): # Permut the given block using the given table (so generic method)\n return [block[x-1] for x in table]\n\n\ndef shift(g, d, n): # Shift a list of the given value\n return g[n:] + g[:n], d[n:] + d[:n]\n\ndef string_to_bit_array(text):\n array = list()\n for char in text:\n binval = binValue(char, 8)\n array.extend([int(x) for x in list(binval)])\n return array\n\n\ndef binValue(val, bitsize):\n binval = bin(val)[2:] if isinstance(val, int) else bin(ord(val))[2:]\n if len(binval) > bitsize:\n raise \"binary value larger than the expected size\"\n while len(binval) < bitsize:\n binval = \"0\"+binval # Add as many 0 as needed to get the wanted size\n return binval\n\n\ndef bit_array_to_string(array): # Recreate the string from the bit array\n res = ''.join([chr(int(y, 2)) for y in [''.join([str(x)\n for x in _bytes]) for _bytes in nsplit(array, 8)]])\n return res\n\n\ndef addPadding(text): # Add padding to the datas using PKCS5 spec.\n pad_len = 8 - (len(text) % 8)\n text += pad_len * chr(pad_len)\n return text\n\n\ndef xor(t1, t2): # Apply a xor and return the resulting list\n return [x ^ y for x, y in zip(t1, t2)]\n\n\ndef substitute(d_e): # Substitute bytes using SBOX\n subblocks = nsplit(d_e, 6) # Split bit array into sublist of 6 bits\n result = list()\n for i in range(len(subblocks)): # For all the sublists\n block = subblocks[i]\n # Get the row with the first and last bit\n row = int(str(block[0])+str(block[5]), 2)\n # Column is the 2,3,4,5th bits\n column = int(''.join([str(x) for x in block[1:][:-1]]), 2)\n # Take the value in the SBOX appropriated for the round (i)\n val = S_BOX[i][row][column]\n bin = binValue(val, 4) # Convert the value to binary\n result += [int(x) for x in bin] # And append it to the resulting list\n return result\n\n\nsubKeys = []\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n tk.Label(\n root, text=\"Podaj slowo do zaszyfrowania:\").pack()\n name = tk.Entry(root, width=40)\n name.pack()\n name.focus()\n\n def encrypt():\n keyGenerator()\n message = name.get()\n if(len(message) % 8 != 0):\n message = addPadding(message)\n\n result = list()\n allText = nsplit(message, 8)\n for block in allText:\n block = string_to_bit_array(block)\n block = permut(block, PI)\n blockLeft, blockRight = nsplit(block, 32)\n print(\"BlockLeft: \")\n print(blockLeft)\n print(\"\\n\")\n for i in range(16):\n blockRightAftPermE = permut(blockRight, E)\n print( \"{}. BlockRightAfterPermutE:\".format(i))\n print(blockRightAftPermE)\n print(\"\\n\")\n temp = xor(subKeys[i], blockRightAftPermE)\n print(\"{}. Temp: \".format(i))\n print(temp)\n print(\"\\n\")\n temp = substitute(temp)\n temp = permut(temp, P)\n temp = xor(blockLeft, temp)\n blockLeft = blockRight\n blockRight = temp\n result += permut(blockRight + blockLeft, PI_1)\n final_res = bit_array_to_string(result)\n resEncryp[\"text\"] = final_res\n \n\n self.hi_there = tk.Button(self)\n self.hi_there[\"text\"] = \"Tajne kodowanie\"\n self.hi_there[\"command\"] = encrypt\n self.hi_there.pack(side=\"top\")\n\n self.quit = tk.Button(self, text=\"QUIT\", fg=\"red\",\n command=self.master.destroy)\n self.quit.pack(side=\"bottom\")\n\n resEncryp = tk.Label(master=root, text=\"zaszyfrowana wiadomosc\")\n resEncryp.pack()\n\n\nroot = tk.Tk()\napp = Application(master=root)\napp.mainloop()\n" }, { "alpha_fraction": 0.34947410225868225, "alphanum_fraction": 0.49672552943229675, "avg_line_length": 30.276397705078125, "blob_id": "9745349644cde01d47b82bbd481505468b0d36fb", "content_id": "ceff1d2c3a01ec7d775a0e2deada9ce55e55ad3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10084, "license_type": "no_license", "max_line_length": 114, "num_lines": 322, "path": "/des.py", "repo_name": "Ksionszek/DES", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom array import *\nfrom tkinter import messagebox\nimport random\nimport string\n\nCP_1 = [\n 57, 49, 41, 33, 25, 17, 9,\n 1, 58, 50, 42, 34, 26, 18,\n 10, 2, 59, 51, 43, 35, 27,\n 19, 11, 3, 60, 52, 44, 36,\n 63, 55, 47, 39, 31, 23, 15,\n 7, 62, 54, 46, 38, 30, 22,\n 14, 6, 61, 53, 45, 37, 29,\n 21, 13, 5, 28, 20, 12, 4\n]\n\nCP_2 = [\n 14, 17, 11, 24, 1, 5, 3, 28,\n 15, 6, 21, 10, 23, 19, 12, 4,\n 26, 8, 16, 7, 27, 20, 13, 2,\n 41, 52, 31, 37, 47, 55, 30, 40,\n 51, 45, 33, 48, 44, 49, 39, 56,\n 34, 53, 46, 42, 50, 36, 29, 32\n]\n\nE = [\n 32, 1, 2, 3, 4, 5,\n 4, 5, 6, 7, 8, 9,\n 8, 9, 10, 11, 12, 13,\n 12, 13, 14, 15, 16, 17,\n 16, 17, 18, 19, 20, 21,\n 20, 21, 22, 23, 24, 25,\n 24, 25, 26, 27, 28, 29,\n 28, 29, 30, 31, 32, 1\n]\n\nS_BOX = [\n\n [[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],\n [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],\n [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],\n [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],\n ],\n\n [[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],\n [3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],\n [0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],\n [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],\n ],\n\n [[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],\n [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],\n [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],\n [1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],\n ],\n\n [[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],\n [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],\n [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],\n [3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],\n ],\n\n [[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],\n [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],\n [4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],\n [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],\n ],\n\n [[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],\n [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],\n [9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],\n [4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],\n ],\n\n [[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],\n [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],\n [1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],\n [6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],\n ],\n\n [[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],\n [1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],\n [7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],\n [2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],\n ]\n]\n\nP = [16, 7, 20, 21, 29, 12, 28, 17,\n 1, 15, 23, 26, 5, 18, 31, 10,\n 2, 8, 24, 14, 32, 27, 3, 9,\n 19, 13, 30, 6, 22, 11, 4, 25]\n\nPI = [\n 58, 50, 42, 34, 26, 18, 10, 2,\n 60, 52, 44, 36, 28, 20, 12, 4,\n 62,\t54,\t46,\t38,\t30,\t22,\t14,\t6,\n 64, 56, 48, 40, 32, 24, 16, 8,\n 57,\t49,\t41,\t33,\t25,\t17,\t 9, 1,\n 59, 51, 43, 35, 27, 19, 11, 3,\n 61,\t53,\t45,\t37,\t29,\t21,\t13,\t5,\n 63, 55, 47, 39, 31, 23, 15, 7\n]\n\nPI_1 = [40, 8, 48, 16, 56, 24, 64, 32,\n 39, 7, 47, 15, 55, 23, 63, 31,\n 38, 6, 46, 14, 54, 22, 62, 30,\n 37, 5, 45, 13, 53, 21, 61, 29,\n 36, 4, 44, 12, 52, 20, 60, 28,\n 35, 3, 43, 11, 51, 19, 59, 27,\n 34, 2, 42, 10, 50, 18, 58, 26,\n 33, 1, 41, 9, 49, 17, 57, 25]\n\nSHIFT = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n\n\n\ndef keyGenerator():\n \n randomKey = []\n randomKey = string_to_bit_array(label0[\"text\"])\n if(label0[\"text\"] == \"\"):\n messagebox.showerror(\"ERROR\", \"You have to choose a key!\")\n return\n print(label0[\"text\"])\n permutKey = permut(randomKey, CP_1)\n splitResL, splitResR = nsplit(permutKey, 28)\n for i in range(16):\n splitResL, splitResR = shift(splitResL, splitResR, SHIFT[i])\n temp = splitResL + splitResR\n subKeys.append(permut(temp, CP_2))\n\n\n\n# dzielenie listy na podlisty o rozmiarze n\ndef nsplit(s, n):\n return [s[k:k+n] for k in range(0, len(s), n)]\n\n\n# permutacja tabeli uzywajac innej tabeli\ndef permut(block, table):\n return [block[x-1] for x in table]\n\n\ndef shift(g, d, n):\n return g[n:] + g[:n], d[n:] + d[:n]\n\ndef string_to_bit_array(text):\n array = list()\n for char in text:\n binval = binValue(char, 8)\n array.extend([int(x) for x in list(binval)])\n return array\n\n\ndef binValue(val, bitsize):\n binval = bin(val)[2:] if isinstance(val, int) else bin(ord(val))[2:]\n if len(binval) > bitsize:\n raise \"binary value larger than the expected size\"\n while len(binval) < bitsize:\n binval = \"0\"+binval\n return binval\n\n\ndef bit_array_to_string(array):\n res = ''.join([chr(int(y, 2)) for y in [''.join([str(x)\n for x in _bytes]) for _bytes in nsplit(array, 8)]])\n return res\n\n#nadmiarowe bity dodawane dla wyrazów, których dl. nie jest wielokrotnoscia 8\ndef addPadding(text):\n pad_len = 8 - (len(text) % 8)\n text += pad_len * chr(pad_len)\n return text\n\n#usuwa nadmiarowe bity(padding) przy załozeniu, ze ten wystąpił\ndef removePadding(data):\n pad_len = ord(data[-1])\n return data[:-pad_len]\n\n#wykonuje xor i zwraca nową liste\ndef xor(t1, t2):\n return [x ^ y for x, y in zip(t1, t2)]\n\n\ndef substitute(d_e):\n subblocks = nsplit(d_e, 6)\n result = list()\n for i in range(len(subblocks)):\n block = subblocks[i]\n row = int(str(block[0])+str(block[5]), 2)\n column = int(''.join([str(x) for x in block[1:][:-1]]), 2)\n val = S_BOX[i][row][column]\n bin = binValue(val, 4)\n result += [int(x) for x in bin]\n return result\n \n\ndef clicked(value):\n label0[\"text\"] = value\n \n\nsubKeys = []\nwindow = Tk()\nwindow.title(\"DES\")\nwindow.configure(bg='#90c4f5')\n\nframe0 = LabelFrame(window, text=\"ChooseKey\", padx=100, pady=25, bg=\"#bad7f5\")\nframe0.pack(padx=10, pady=10)\n\nMODES = [\n #wygenerowane klucze\n (\"Key1\",\"testtest\"),\n (\"Key2\",\"p0oiss13\"),\n (\"Key3\",\"98sstxId\"),\n (\"Key4\",\"aaV7fb13\"),\n (\"Key5\",\"Vd3o1c9Z\"),\n]\n\nklucz = StringVar()\n\nfor textt, mode in MODES:\n Radiobutton(frame0,text=textt, variable = klucz, value=mode ,command=lambda:clicked(klucz.get())).pack(pady=2)\n\nlabel0 = Label(frame0, text=klucz.get())\nlabel0.pack()\n\n\nframe1 = LabelFrame(window, text=\"Encryption\", padx=100, pady=50,bg=\"#bad7f5\")\nframe1.pack(padx=10, pady=10) \n\nlabel1 = Label(frame1,text=\"This is the message to encrypt:\", bg=\"#529ae3\")\nlabel1.grid(column = 0, row = 1, padx=20)\n\ndef encrypt():\n \n keyGenerator()\n message = txt.get()\n if(len(message) % 8 != 0):\n message = addPadding(message)\n\n result = list()\n Text = nsplit(message, 8)\n for block in Text:\n block = string_to_bit_array(block)\n block = permut(block, PI)\n blockLeft, blockRight = nsplit(block, 32)\n for i in range(16):\n blockRightAftPermE = permut(blockRight, E)\n temp = xor(subKeys[i], blockRightAftPermE)\n temp = substitute(temp)\n temp = permut(temp, P)\n temp = xor(blockLeft, temp)\n blockLeft = blockRight\n blockRight = temp\n result += permut(blockRight + blockLeft, PI_1)\n final_res = bit_array_to_string(result)\n print(final_res)\n resEncryp[\"text\"] = final_res\n return final_res\n\nbtn = Button(frame1, text=\"Encrypt\", command=encrypt,bg=\"blue\")\nbtn.grid(column=2, row=1,padx=5, pady=5)\n\n\ntxt = Entry(frame1, width=30, borderwidth=5, bg=\"#6aaceb\", fg=\"white\" )\ntxt.grid(column = 1, row = 1,padx=5, pady=5)\ntxt.focus()\n\nEncryp = Label(frame1, text =\"Encryption result: \", bg=\"#529ae3\") \nEncryp.grid(column = 0, row = 2,padx=5, pady=5)\nresEncryp = Label(frame1, text =\"******\", bg=\"#2154a1\") \nresEncryp.grid(column = 1, row = 2)\n\ndef decrypt():\n \n \n message = encrypt()\n if(message == \"\"):\n resDecryp[\"text\"] = \"there is no message\"\n return\n if(len(message) % 8 != 0):\n message = addPadding(message)\n\n textPad = 8 - len(message) % 8\n result = list()\n Text = nsplit(message, 8)\n for block in Text:\n block = string_to_bit_array(block)\n block = permut(block, PI)\n blockLeft, blockRight = nsplit(block, 32)\n for i in range(16):\n blockRightAftPermE = permut(blockRight, E)\n temp = xor(subKeys[15-i], blockRightAftPermE)\n temp = substitute(temp)\n temp = permut(temp, P)\n temp = xor(blockLeft, temp)\n blockLeft = blockRight\n blockRight = temp\n result += permut(blockRight + blockLeft, PI_1)\n final_res = bit_array_to_string(result)\n subKeys.clear()\n if textPad == 8:\n resDecryp[\"text\"] = final_res\n else:\n resDecryp[\"text\"] = removePadding(final_res)\n\nframe2 = LabelFrame(window, text=\"Decryption\", padx=100, pady=50, bg=\"#bad7f5\")\nframe2.pack(padx=10, pady=10) \nbtn2 = Button(frame2, text=\"Decrypt\", command=decrypt, bg=\"blue\")\nbtn2.grid(column=1, row=4,padx=5, pady=5)\n\nDecryp = Label(frame2, text =\"Decryption result\", bg=\"#529ae3\") \nDecryp.grid(column = 0, row = 5,padx=5, pady=5)\n\nresDecryp = Label(frame2, text =\"-----\", bg=\"#2154a1\") \nresDecryp.grid(column = 1, row = 5,padx=5, pady=5)\n\nbutton_quit = Button(window, text=\"Exit\", command=window.quit, bg=\"#ff5252\", borderwidth=2)\nbutton_quit.pack(pady=10 )\n\nwindow.mainloop()\n\n\n \n" } ]
2
alvarofpp/atividade_seguranca_em_redes
https://github.com/alvarofpp/atividade_seguranca_em_redes
cfc7379b6f21ade6f5a2ee4575db50b05a13dd30
f036b425f838c2597199574ace30df2a98c1f062
5940e2e5c7aad0f63c914e840846a5bc9f6daf49
refs/heads/master
2021-05-04T09:20:11.012101
2018-08-09T20:41:01
2018-08-09T20:41:01
69,712,449
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4559686779975891, "alphanum_fraction": 0.4940639138221741, "avg_line_length": 34.165138244628906, "blob_id": "2a64f90451f5df81e8e04851992882cc860612d7", "content_id": "e781639f42d25e0d6dcdb0ae2022a51f01150e4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 7737, "license_type": "no_license", "max_line_length": 109, "num_lines": 218, "path": "/assets/php/encrypt/s_des_script.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n\n/**\n * Created by PhpStorm.\n * User: alvarofpp\n * Date: 09/09/16\n * Time: 21:05\n */\nclass SDES\n{\n var $P10, $P8; // Permutações\n var $PI, $PI_1; // Permutações Inicial e Final\n var $EP, $P4; // Permutações da função ep\n var $S0, $S1; // Matrizes\n var $bits_file_array; // Array com os bits\n var $subkeys; // Array com as chaves\n var $key; // Chave\n\n function __construct()\n {\n // Inicializar as variaveis\n // Permutações para geração de keys\n $this->P10 = array(1 => 3, 2 => 5, 3 => 2, 4 => 7, 5 => 4, 6 => 10, 7 => 1, 8 => 9, 9 => 8, 10 => 6);\n $this->P8 = array(1 => 6, 2 => 3, 3 => 7, 4 => 4, 5 => 8, 6 => 5, 7 => 10, 8 => 9);\n\n // Permutações para cifragem\n $this->PI = array(1 => 2, 2 => 6, 3 => 3, 4 => 1, 5 => 4, 6 => 8, 7 => 5, 8 => 7);\n $this->PI_1 = array(1 => 4, 2 => 1, 3 => 3, 4 => 5, 5 => 7, 6 => 2, 7 => 8, 8 => 6);\n\n // Permutações da função ep\n $this->EP = array(1 => 4, 2 => 1, 3 => 2, 4 => 3, 5 => 2, 6 => 3, 7 => 4, 8 => 1);\n $this->P4 = array(1 => 2, 2 => 4, 3 => 3, 4 => 1);\n\n // Matrizes\n $this->S0 = array(\n 0 => array(1, 0, 3, 2),\n 1 => array(3, 2, 1, 0),\n 2 => array(0, 2, 1, 3),\n 3 => array(3, 1, 3, 2));\n $this->S1 = array(\n 0 => array(0, 1, 2, 3),\n 1 => array(2, 0, 1, 3),\n 2 => array(3, 0, 1, 0),\n 3 => array(2, 1, 0, 3));\n\n // Keys\n $this->subkeys = array();\n }\n\n // Realiza a criptação do arquivo\n public function s_des()\n {\n $texto_saida = '';\n foreach ($this->bits_file_array as $byte) {\n $new_byte = $this->permutacao($byte, $this->PI); // Permutação Inicial\n $byte_div = str_split($new_byte, 4); // Divide em 2 blocos com 4 bits\n\n // Primeira sequência\n $byte_right = $this->f($byte_div[1], $this->subkeys[0]);\n $byte_div[0] = implode('', $this->op_xor(str_split($byte_right), str_split($byte_div[0])));\n list($byte_div[1], $byte_div[0]) = array($byte_div[0], $byte_div[1]); // SW\n\n // Segunda sequência\n $byte_right = $this->f($byte_div[1], $this->subkeys[1]);\n $byte_div[0] = implode('', $this->op_xor(str_split($byte_right), str_split($byte_div[0])));\n $final_byte = $this->permutacao(implode(\"\", $byte_div), $this->PI_1); // Permutação Final\n $texto_saida = $texto_saida . chr(bindec($final_byte));\n }\n return $texto_saida;\n }\n\n // Realiza a decriptação do arquivo\n public function s_des_1()\n {\n $texto_saida = '';\n foreach ($this->bits_file_array as $byte) {\n $new_byte = $this->permutacao($byte, $this->PI); // Permutação Inicial\n $byte_div = str_split($new_byte, 4); // Divide em 2 blocos com 4 bits\n\n // Primeira sequência\n $byte_right = $this->f($byte_div[1], $this->subkeys[1]);\n $byte_div[0] = implode('', $this->op_xor(str_split($byte_right), str_split($byte_div[0])));\n list($byte_div[1], $byte_div[0]) = array($byte_div[0], $byte_div[1]); // SW\n\n // Segunda sequência\n $byte_right = $this->f($byte_div[1], $this->subkeys[0]);\n $byte_div[0] = implode('', $this->op_xor(str_split($byte_right), str_split($byte_div[0])));\n $final_byte = $this->permutacao(implode(\"\", $byte_div), $this->PI_1); // Permutação Final\n $texto_saida = $texto_saida . chr(bindec($final_byte));\n }\n return $texto_saida;\n }\n\n /*\n * $bits_right: função F com o lado direito do byte\n * $subkey: subkey que será usado para as operações\n * Função f do S-DES (vide slide)\n */\n public function f($bits_right, $subkey)\n {\n $bits_right = $this->permutacao($bits_right, $this->EP);\n $bits_right_xor = implode('', $this->op_xor(str_split($bits_right), $subkey));\n $s = str_split($bits_right_xor, 4);\n $ss = $this->s($s[0], 0) . $this->s($s[1], 1);\n $ss = $this->permutacao($ss, $this->P4);\n return $ss;\n }\n\n /*\n * $array0, $array1: arrays que serão comparaddos\n * Serve para realizar a operação XOR entre arrays, retorna o array de bits comparados\n */\n public function op_xor($array0, $array1)\n {\n $array = array();\n for ($i = 0; $i < sizeof($array0); $i++) {\n $array[$i] = ($array0[$i] xor $array1[$i]) ? '1' : '0';\n }\n return $array;\n }\n\n /*\n * $s: 4 bits\n * $matriz: matriz que será consultada\n * Recebe 4 bits e encontra o valor na matriz, retorna 2 bits\n */\n public function s($s, $matriz)\n {\n $s = str_split($s);\n // Posições na matriz\n $l = bindec($s[0] . $s[3]); // Linha\n $c = bindec($s[1] . $s[2]); // Coluna\n // Valores na matriz\n $valor = 0;\n switch ($matriz) {\n case 0:\n $valor = $this->S0[$l][$c];\n break;\n case 1:\n $valor = $this->S1[$l][$c];\n break;\n }\n $valor = str_pad(decbin($valor), 2, 0, STR_PAD_LEFT); // 2 bits\n return $valor;\n }\n\n /*\n * $bloco: É o bloco de bits\n * $p_valor: É o valor da permutação (10 ou 8)\n * Essa função irá realizar a permutação do bloco de acordo com o $p_valor definido\n */\n public function permutacao($bloco, $array_permutacao)\n {\n $bits = str_split($bloco);\n $novo_bloco = array();\n // Muda as posições\n foreach ($array_permutacao as $p) {\n array_push($novo_bloco, $bits[--$p]);\n }\n return implode($novo_bloco);\n }\n\n /*\n * $bloco: É o bloco de bits\n * $num: É o número de casas que será deslocado para a esquerda\n * Essa função irá realizar a locomoção de casas do bloco enviado para a esquerda\n */\n public function ls($bloco, $num)\n {\n $len = strlen($bloco);\n $new_bloco = array();\n foreach (str_split($bloco) as $key => $value) {\n $posicao = ($key - $num < 0) ? $len + ($key - $num) : ($key - $num);\n $new_bloco[$posicao] = $value;\n }\n\n ksort($new_bloco);\n return implode($new_bloco);\n }\n\n // Pega os bytes do arquivo escolhido\n public function getFileBits($texto)\n {\n $bits = '';\n $caracteres = str_split($texto);\n foreach ($caracteres as $caracter) {\n $ascii = ord($caracter); // Decimal\n $binary = decbin($ascii); // Binário\n $byte = str_pad($binary, 8, 0, STR_PAD_LEFT); // Byte\n $bits = $bits . $byte;\n }\n $this->bits_file_array = str_split($bits, 8);\n }\n\n // Cria as subkeys de acordo com a key definida\n public function key_generation()\n {\n $key_10 = $this->permutacao($this->key, $this->P10);\n $key_10_div = str_split($key_10, 5); // Divide em 2 blocos com 5 bits\n\n // Realiza a locomoção\n $key_10_div[0] = $this->ls($key_10_div[0], 1);\n $key_10_div[1] = $this->ls($key_10_div[1], 1);\n $key_10 = implode($key_10_div); // Junta tudo\n $key_8 = $this->permutacao($key_10, $this->P8);\n array_push($this->subkeys, $key_8); // Salva a key\n\n $key_10_div = str_split($key_10, 5);\n // Realiza a locomoção\n $key_10_div[0] = $this->ls($key_10_div[0], 2);\n $key_10_div[1] = $this->ls($key_10_div[1], 2);\n $key_10 = implode($key_10_div); // Junta tudo\n $key_8 = $this->permutacao($key_10, $this->P8);\n array_push($this->subkeys, $key_8); // Salva a key\n }\n}\n\n?>" }, { "alpha_fraction": 0.6460905075073242, "alphanum_fraction": 0.6460905075073242, "avg_line_length": 36.46154022216797, "blob_id": "4e489bad551fb2feb4b76a5cd2c8061dbe2f7d1a", "content_id": "07af1e8432568e212466562de6b8c7624846881a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 492, "license_type": "no_license", "max_line_length": 92, "num_lines": 13, "path": "/assets/php/encrypt/dh_change_num.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n$num = $_GET['num']; // Número novo\n\n$dh_file = file_get_contents('dh.json'); // Ler JSON\n$dh = json_decode($dh_file, true); // Decodifica JSON\nsession_start(); // Inicia sessão\n$dh['users'][$_SESSION['username']] = (int)$num; // Atribui novo número ao do usuário logado\n\n$json = fopen('dh.json', 'w+'); // Abre arquivo JSON para escrita\nfwrite($json, json_encode($dh)); // Escreve novas configurações\nfclose($json); // Fecha\n\nheader(\"Location: ../../../chat.php\"); // Redireciona" }, { "alpha_fraction": 0.6132858991622925, "alphanum_fraction": 0.6204033493995667, "avg_line_length": 35.65217208862305, "blob_id": "e620d02bc8d5bb1826fba478675c7568e116c7a2", "content_id": "3240f0d9adaf3315d8a5ae43e4200eb68ae42bb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1697, "license_type": "no_license", "max_line_length": 103, "num_lines": 46, "path": "/assets/php/encrypt/dh_finish.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n// Pega arquivo JSON com as configurações\n$json_file = file_get_contents('../database/config.json');\n$json = json_decode($json_file, true);\n\n// Cria objeto da classe chatCriptografia, declarando o tipo de criptografia e a chave usada\ninclude('../../php/database/chat_criptografia.php');\n$cript = new chatCriptografia($json['encryption'], $json['key']);\n$cript->import('../encrypt/s_des_script.php');\n$cript->import('../encrypt/rc4_script.php');\n$cript->encryption = $json['encryption'];\n\n$texto_final = '';\n$chat = fopen('../database/chat.txt', 'r+'); // Pega o chat\n// Ler o arquivo até o final\nwhile (!feof($chat)) {\n $linha = fgets($chat, 4096); // Ler uma linha do arquivo e avança o ponteiro\n $final = (feof($chat)) ? 1 : 0; // Verifica se é o final do arquivo\n if ($final == 0) {\n $linha = substr($linha, 0, strlen($linha) - 1); // Se não, retira o último caractere que é \"\\n\"\n }\n\n // Esse IF serve para evitar imprimir algo caso não tenha nada no arquivo\n if (strlen($linha) > 1) {\n $cript->key = $json['key'];\n $texto = $cript->action('d', $linha);\n\n $cript->key = $_GET['psk'];\n $texto_final .= \"\\n\" . $cript->action('c', $texto);\n }\n}\nfclose($chat); // Fecha\n\n$texto_final = substr($texto_final, 1); // Retira o \"\\n\" inicial\n$chat = fopen('../database/chat.txt', 'w+'); // Abre chat para escrita\nfwrite($chat, $texto_final); // Sobrescreve\nfclose($chat); // Fecha\n\n// Salva as configurações no JSON (criptografia e chave)\nsession_start();\n$json['key'] = $_GET['psk'];\n$fp = fopen('../database/config.json', 'w');\nfwrite($fp, json_encode($json));\nfclose($fp);\n\nheader(\"Location: ../../../chat.php\"); // Redireciona\n" }, { "alpha_fraction": 0.4840686321258545, "alphanum_fraction": 0.49203431606292725, "avg_line_length": 32.32653045654297, "blob_id": "871c01f17bb6ff138cd68b42837bddec0b791605", "content_id": "2c001c99a15a020ce71b95d8b2b25d2391336354", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1634, "license_type": "no_license", "max_line_length": 100, "num_lines": 49, "path": "/assets/php/encrypt/cifra_data.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n\nclass CifraData\n{\n var $data; // Array com a data (dia, mês, ano)\n var $texto;\n var $contador_auxiliar;\n\n function __construct($data, $texto)\n {\n $data = str_replace('/', '', $data); // Retira os /\n $this->data = str_split($data); // Transforma em um array com cada indice sendo um caractere\n $this->texto = $texto; // Atribui valor\n $this->contador_auxiliar = 0; // Atribui valor\n }\n\n // Realiza a cifragem pela data\n public function cifradata($escolha)\n {\n $texto = '';\n $letras = str_split($this->texto); // Transforma em um array com as letras\n // Percorre o array anteriormente criado\n foreach ($letras as $key => $value) {\n $ascii = ord($value); // ASCII\n // Se for cifragem ou se for decifragem\n if ($escolha == 'c') {\n $ascii = $ascii + $this->data[$this->contador_auxiliar];\n if ($ascii > 255) {\n $ascii = $ascii - 255;\n }\n } else {\n $ascii = $ascii - $this->data[$this->contador_auxiliar];\n if ($ascii < 0) {\n $ascii = 255 + $ascii;\n }\n }\n // Verificia contador, se volta ao inicio do indice do array ou se avança\n if ($this->contador_auxiliar >= count($this->data) - 1) {\n $this->contador_auxiliar = 0;\n } else {\n $this->contador_auxiliar++;\n }\n $texto .= chr($ascii); // Transforma em caracter e coloca no texto final\n }\n return $texto;\n }\n}\n\n?>" }, { "alpha_fraction": 0.5044554471969604, "alphanum_fraction": 0.5079208016395569, "avg_line_length": 31.564516067504883, "blob_id": "490d8562927ed9ca582aee249abe2bf827d9664d", "content_id": "7aec337be413be9b6f944d47956d7968461618c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2025, "license_type": "no_license", "max_line_length": 110, "num_lines": 62, "path": "/assets/php/encrypt/diffie_hellman.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\nsession_start(); // Inicia sessão\n$num = explode('-', $_SESSION['num_dh']); // Recolhe em formato de array os números selecionados\n\n$dh_file = file_get_contents('dh.json'); // Ler JSON\n$dh = json_decode($dh_file, true); // Decodifica JSON\n\n// Valores usados\n$Xa = $dh['users'][$_SESSION['username']];\n$Xb = (int)$_GET['user2'];\n$q = (int)$num[0];\n$a = (int)$num[1];\n\n// Primeiros cálculos\n$Ya = intval(fmod(pow($a, $Xa), $q));\n$Yb = intval(fmod(pow($a, $Xb), $q));\n\necho '<b>q</b>:' . $q;\necho '<br/><b>a</b>:' . $a . '<br/>';\necho '<b>Xa</b>:' . $Xa . '<br/>';\necho '<b>Xb</b>:' . $Xb . '<br/>';\necho '<p>(Ya) a<sup>Xa</sup> mod q = ' . $a . '<sup>' . $Xa . '</sup>%' . $q . ' = ' . $Ya . '</p>';\necho '<p>(Yb) a<sup>Xb</sup> mod q = ' . $a . '<sup>' . $Xb . '</sup>%' . $q . ' = ' . $Yb . '</p>';\necho '<br/>';\n\n// Segundos cálculos\n$psk_alice = intval(fmod(pow($Yb, $Xa), $q));\n$psk_bob = intval(fmod(pow($Ya, $Xb), $q));\necho '<p>(PSK) Yb<sup>Xa</sup> mod q = ' . $Yb . '<sup>' . $Xa . '</sup>%' . $q . ' = ' . $psk_alice . '</p>';\necho '<p>(PSK) Ya<sup>Xb</sup> mod q = ' . $Ya . '<sup>' . $Xb . '</sup>%' . $q . ' = ' . $psk_bob . '</p>';\n\n// Salva os dados usados\n$dh['config']['q'] = $q;\n$dh['config']['a'] = $a;\n$dh['config']['Xa'] = $Xa;\n$dh['config']['Xb'] = $Xb;\n$dh['config']['last_update'] = $_SESSION['username'] . '-' . date(\"d/m/Y H:i:s\");\n$json = fopen('dh.json', 'w+');\nfwrite($json, json_encode($dh));\nfclose($json);\n\nif ($psk_alice == $psk_bob) {\n ?>\n <form method=\"get\" action=\"dh_finish.php\">\n <input type=\"hidden\" name=\"psk\" value=\"<?php echo $psk_alice; ?>\"/>\n <input type=\"submit\" value=\"CONTINUAR\"/>\n </form>\n <?php\n} else {\n ?>\n <h1>Error.</h1>\n <h3>SUGESTAO: Tente com outros valores</h3>\n <p>Normalmente isso ocorre devido ao uso de valores muito grande para os cálculos.\n Recomendamos o uso de valores menores.</p>\n\n <form method=\"get\" action=\"../../../chat.php\">\n <input type=\"submit\" value=\"OK\"/>\n </form>\n <?php\n}\n\n?>\n\n" }, { "alpha_fraction": 0.4090301990509033, "alphanum_fraction": 0.41508594155311584, "avg_line_length": 47.405174255371094, "blob_id": "93b8cc1b48b2c4d5ad5c8ead4c4f9b1fb1dae3e7", "content_id": "a85c06d1457b437c330a37c224b445f36fc50970", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 11252, "license_type": "no_license", "max_line_length": 164, "num_lines": 232, "path": "/chat.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<html>\n<head>\n <meta charset=\"UTF-8\">\n <title>Chat</title>\n <link rel=\"stylesheet\" href=\"assets/css/chat.css\">\n <link rel=\"stylesheet\" href=\"assets/css/bootstrap.min.css\">\n <script src=\"assets/js/jquery-2.2.0.min.js\"></script>\n <script src=\"assets/js/bootstrap.min.js\"></script>\n</head>\n<body>\n<?php\ninclude('assets/php/database/chat_criptografia.php');\n\n// Arquivo JSON com as configurações (tipo de criptogafia e chave)\n$json_file = file_get_contents('assets/php/database/config.json');\n$json = json_decode($json_file, true);\n\n// Cria objeto da classe chatCriptografia, salvando o tipo de criptografia e a chave usada\n$cript = new chatCriptografia($json['encryption'], $json['key']);\n$cript->import('assets/php/encrypt/s_des_script.php');\n$cript->import('assets/php/encrypt/rc4_script.php');\n?>\n<div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-4\">\n <b>Criptografia:</b> <?php echo $cript->encryption; ?><br/>\n <b>Key:</b> <?php echo $cript->key; ?>\n </div>\n <fieldset>\n <legend>Mudança de senha por esteganografia e cifra de data</legend>\n <div class=\"col-md-4\">\n <form method=\"get\" action=\"assets/py/esteganografia_py/trocar_senha_processo.php\">\n <label for=\"c\">Criptografia:</label><br/>\n <input type=\"radio\" name=\"c\"\n value=\"s_des\" <?php echo ($cript->encryption == 's_des') ? 'checked' : ''; ?>/> S-DES<br/>\n <input type=\"radio\" name=\"c\"\n value=\"rc4\" <?php echo ($cript->encryption == 'rc4') ? 'checked' : ''; ?>/> RC4<br/>\n <input type=\"submit\" value=\"Trocar Senha\"/>\n </form>\n </div>\n <div class=\"col-md-4\">\n <form method=\"post\" action=\"assets/py/esteganografia_py/trocar_senha_validar.php\"\n enctype=\"multipart/form-data\">\n <label for=\"image\">Imagem para validar nova senha:</label><br/>\n <input type=\"file\" name=\"image\" required/><br/>\n <label for=\"data\">Data:</label><br/>\n <input type=\"text\" name=\"data\" placeholder=\"07/07/1822\" required/><br/>\n <input type=\"submit\" value=\"Trocar Senha\"/>\n </form>\n </div>\n </fieldset>\n </div>\n</div>\n<div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-12\">\n <div class=\"panel panel-primary\">\n <div class=\"panel-heading\">\n <span class=\"glyphicon glyphicon-comment\"></span> Chat\n </div>\n <div class=\"panel-body\">\n <ul class=\"chat\" id=\"chat\">\n <?php\n session_start(); // Inicia sessão\n if (isset($_POST)) $_SESSION['username'] = $_POST['username']; // Salva o nick do usuário se estiver vindo pelo formulário\n if (!isset($_SESSION['username'])) header(\"Location: index.html\"); // Verifica se alguém está tentando acessar a página sem ter logado antes\n\n $contador = 0;\n $ponteiro = fopen('assets/php/database/chat.txt', \"r\"); // Abre o arquivo\n\n // Ler o arquivo até chegar no fim\n while (!feof($ponteiro)) {\n $linha = fgets($ponteiro, 4096); // Ler uma linha do arquivo e avanço o ponteiro\n $final = (feof($ponteiro)) ? 1 : 0; // Verifica se é o final do arquivo\n if ($final == 0) {\n $linha = substr($linha, 0, strlen($linha) - 1); // Se não, retira o último caractere que é \"\\n\"\n }\n\n // Esse IF serve para evitar imprimir algo caso não tenha nada no arquivo\n if (strlen($linha) > 1) {\n $contador++; // Se tiver, aumenta o contador\n\n $texto = $cript->action('d', $linha); // Decripta\n $chat = explode(\";;;\", $texto); // Divide o texto decriptado\n\n // Imprime o texto de duas formas, caso seja o usuário proprietário ou não\n if ($chat[0] == $_SESSION['username']) {\n ?>\n <li class=\"right clearfix\"><span class=\"chat-img pull-right\">\n <img src=\"http://placehold.it/50/FA6F57/fff&text=ME\" alt=\"User Avatar\"\n class=\"img-circle\"/>\n </span>\n <div class=\"chat-body clearfix\">\n <div class=\"header\">\n <small class=\" text-muted\"><span\n class=\"glyphicon glyphicon-time\"></span><?php echo $chat[1]; ?>\n </small>\n <strong class=\"pull-right primary-font\"><?php echo $chat[0]; ?></strong>\n </div>\n <p><b><?php echo $chat[2]; ?></b></p>\n </div>\n </li>\n <?php\n } else {\n ?>\n <li class=\"left clearfix\"><span class=\"chat-img pull-left\">\n <img src=\"http://placehold.it/50/55C1E7/fff&text=U\" alt=\"User Avatar\"\n class=\"img-circle\"/>\n </span>\n <div class=\"chat-body clearfix\">\n <div class=\"header\">\n <strong class=\"primary-font\"><?php echo $chat[0]; ?></strong>\n <small class=\"pull-right text-muted\">\n <span\n class=\"glyphicon glyphicon-time\"></span><?php echo $chat[1]; ?>\n </small>\n </div>\n <p><b><?php echo $chat[2]; ?></b></p>\n </div>\n </li>\n <?php\n }\n }\n }\n fclose($ponteiro); // Fecha o ponteiro do arquivo\n ?>\n </ul>\n </div>\n <div class=\"panel-footer\">\n <div class=\"input-group\">\n <input id=\"btn-input\" type=\"text\" class=\"form-control input-sm\"\n placeholder=\"Type your message here...\"/>\n <span class=\"input-group-btn\">\n <button class=\"btn btn-warning btn-sm\" id=\"btn-chat\">\n Send</button>\n </span>\n </div>\n <input type=\"hidden\" id=\"username\" value=\"<?php echo $_POST['username']; ?>\"/>\n </div>\n </div>\n </div>\n </div>\n</div>\n\n\n<div class=\"container\">\n <div class=\"row\">\n <fieldset>\n <legend>Diffie-Hellman</legend>\n <?php\n $dh_file = file_get_contents('assets/php/encrypt/dh.json');\n $dh = json_decode($dh_file, true);\n\n foreach ($dh as $key => $value) {\n switch ($key) {\n case 'config':\n break;\n case 'users':\n // Se não tiver um número definido para o Diffie-Hellman\n if (!isset($dh[$key][$_SESSION['username']])) {\n $dh[$key][$_SESSION['username']] = 1;\n }\n break;\n }\n }\n ?>\n <div class=\"col-md-4\">\n <form method=\"get\" action=\"assets/php/encrypt/dh_change_num.php\">\n <label for=\"num\">Número:</label><br/>\n <input type=\"input\" name=\"num\" value=\"<?php echo $dh['users']['alvarofpp']; ?>\" required/><br/>\n <input type=\"submit\" value=\"Trocar número\"/>\n </form>\n </div>\n <div class=\"col-md-4\">\n <form method=\"post\" action=\"assets/php/encrypt/dh_init.php\"\n enctype=\"multipart/form-data\">\n <label for=\"image\">q:</label><br/>\n <input type=\"input\" name=\"q\" value=\"<?php echo $dh['config']['q']; ?>\" required/><br/>\n <label for=\"image\">a:</label><br/>\n <input type=\"input\" name=\"a\" value=\"<?php echo $dh['config']['a']; ?>\" required/><br/>\n <input type=\"submit\" value=\"Realizar algoritmo Diffie-Hellman\"/>\n </form>\n </div>\n </fieldset>\n </div>\n</div>\n\n\n<script>\n var contador = <?php echo $contador; ?>;\n var json = <?php echo json_encode($json); ?>;\n\n // A cada 1 segundo, verifica se tem mensagem nova\n setInterval(function () {\n $.post(\"assets/php/database/chat_verificar.php\", {\n 'contador': contador,\n 'json': json\n }, function (data) {\n }).success(function (data) {\n if (data.length > 0) {\n // Se tiver mensagem nova, ele trabalha sobre o texto retorno e o transforma em JSON\n var json = JSON.parse(data);\n json = json.replace(\"\\n\", '');\n json = JSON.parse(json);\n\n $('#chat').append('<li class=\"left clearfix\"><span class=\"chat-img pull-left\">'\n + '<img src=\"http://placehold.it/50/55C1E7/fff&text=U\" alt=\"User Avatar\" class=\"img-circle\"/>'\n + '</span>'\n + '<div class=\"chat-body clearfix\">'\n + '<div class=\"header\">'\n + '<strong class=\"primary-font\">' + json[0].username + '</strong>'\n + '<small class=\"pull-right text-muted\">'\n + '<span class=\"glyphicon glyphicon-time\"></span>' + json[0].data_hora\n + '</small>'\n + '</div>'\n + '<p><b>'\n + json[0].mensagem\n + '</b></p>'\n + '</div>'\n + '</li>');\n contador++;\n console.log(\"Mensagem recebida!\");\n }\n }).error(function () {\n alert(\"error na coleta de mensagens\");\n });\n return true;\n }, 1000);\n</script>\n<script src=\"assets/js/chat.js\"></script>\n</body>\n</html>" }, { "alpha_fraction": 0.4703989624977112, "alphanum_fraction": 0.4723294675350189, "avg_line_length": 27.796297073364258, "blob_id": "8370aa3aa4a97a50972ebf432c78a48b9da68611", "content_id": "ebe1106abc17f297f2cd95ab1a7eadda54e80afa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1562, "license_type": "no_license", "max_line_length": 86, "num_lines": 54, "path": "/assets/php/database/chat_criptografia.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n\nclass chatCriptografia\n{\n var $encryption; // Tipo de criptografia\n var $key; // Chave usada\n\n // O construtor já começa inicializando as variáveis com o tipo de criptografia\n // e a chave usada\n function __construct($encryption, $key)\n {\n $this->encryption = $encryption;\n $this->key = $key;\n }\n\n // Serve para importar os arquivos necessários para executar a classe corretamente\n public function import($include)\n {\n include($include);\n }\n\n // Realiza a ação de criptar ou decriptar\n public function action($action, $texto)\n {\n $return = ''; // Variável que conterá o texto de retorno\n switch ($this->encryption) {\n case 's_des':\n $sdes = new SDES();\n $sdes->key = $this->key; // CHAVE DA CONVERSA\n $sdes->key_generation(); // CHAVES\n $sdes->getFileBits($texto); // PEGAR BITS DO ARQUIVO\n\n switch ($action) {\n case 'c':\n $return = $sdes->s_des(); // SAIDA\n break;\n case 'd':\n $return = $sdes->s_des_1(); // SAIDA\n break;\n }\n break;\n case 'rc4':\n $rc = new RC4();\n $rc->getFileBits($texto); // PEGAR BITS DO ARQUIVO\n $rc->key = $this->key; // CHAVE DA CONVERSA\n\n $return = $rc->rc();\n break;\n }\n return $return;\n }\n}\n\n?>" }, { "alpha_fraction": 0.5527156591415405, "alphanum_fraction": 0.584664523601532, "avg_line_length": 26.647058486938477, "blob_id": "2fd3c85c5d525aa3876a2c58864858081b8e3693", "content_id": "ef697e2f6ca930c8824407f84282274a39a97727", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 939, "license_type": "no_license", "max_line_length": 83, "num_lines": 34, "path": "/assets/py/esteganografia_py/trocar_senha_processo.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n\n$nova_senha = '';\nswitch ($_GET['c']) {\n case 's_des':\n for ($i = 0; $i < 10; $i++) {\n $nova_senha .= rand(0, 1);\n }\n break;\n case 'rc4':\n $nova_senha = generateRandomString(rand(4, 10));\n break;\n}\n$texto_final = $_GET['c'] . '-' . $nova_senha;\n\ninclude('../../php/encrypt/cifra_data.php');\n$cifradata = new CifraData('07/07/1822', $texto_final);\n$texto_final = $cifradata->cifradata('c');\n\n$command = escapeshellcmd(\"python esconder.py '\" . $texto_final . \"'\");\n$output = shell_exec($command);\n\nheader(\"Location: ../../../trocar_senha.php\");\n\nfunction generateRandomString($length)\n{\n $characters = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';\n $charactersLength = strlen($characters);\n $randomString = '';\n for ($i = 0; $i < $length; $i++) {\n $randomString .= $characters[rand(0, $charactersLength - 1)];\n }\n return $randomString;\n}" }, { "alpha_fraction": 0.6003689765930176, "alphanum_fraction": 0.6092250943183899, "avg_line_length": 32.04878234863281, "blob_id": "9018ce52b88c122c4edb0cdf579c9f16e5f70d29", "content_id": "4535b01bcc48ce12a9bf31df4212c946dd9cce10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2722, "license_type": "no_license", "max_line_length": 103, "num_lines": 82, "path": "/assets/py/esteganografia_py/trocar_senha_validar.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<html>\n<head>\n <title>Nova senha</title>\n <meta charset=\"UTF-8\"/>\n</head>\n<body>\n<?php\n$target_dir = \"\";\n$target_file = $target_dir . basename($_FILES[\"image\"][\"name\"]);\n\n// Salva imagem\nif (move_uploaded_file($_FILES[\"image\"][\"tmp_name\"], $target_file)) {\n echo \"The file \" . basename($_FILES[\"image\"][\"name\"]) . \" has been uploaded.\";\n} else {\n echo \"Sorry, there was an error uploading your file.\";\n}\n\n$command = escapeshellcmd('python mostrar.py');\n$output = shell_exec($command); // Recebe a mensagem escondida\n\ninclude('../../php/encrypt/cifra_data.php');\n$cifradata = new CifraData($_POST['data'], $output);\n$texto_final = $cifradata->cifradata('d');\n\n$config = explode('-', $texto_final); // Transforma em array\n$config[1] = substr($config[1], 0, strlen($config[1]) - 1); // Retira o \"\\n\"\necho '<br/>A nova senha é: <b>' . $config[1] . '</b>';\n\n// Pega arquivo JSON com as configurações\n$json_file = file_get_contents('../../php/database/config.json');\n$json = json_decode($json_file, true);\n\n// Cria objeto da classe chatCriptografia, salvando o tipo de criptografia e a chave usada\ninclude('../../php/database/chat_criptografia.php');\n$cript = new chatCriptografia($json['encryption'], $json['key']);\n$cript->import('../../php/encrypt/s_des_script.php');\n$cript->import('../../php/encrypt/rc4_script.php');\n\n$texto_final = '';\n// Pega o chat\n$chat = fopen('../../php/database/chat.txt', 'r+');\n// Ler o arquivo até o final\nwhile (!feof($chat)) {\n $linha = fgets($chat, 4096); // Ler uma linha do arquivo e avança o ponteiro\n $final = (feof($chat)) ? 1 : 0; // Verifica se é o final do arquivo\n if ($final == 0) {\n $linha = substr($linha, 0, strlen($linha) - 1); // Se não, retira o último caractere que é \"\\n\"\n }\n\n // Esse IF serve para evitar imprimir algo caso não tenha nada no arquivo\n if (strlen($linha) > 1) {\n $cript->encryption = $json['encryption'];\n $cript->key = $json['key'];\n $texto = $cript->action('d', $linha);\n\n $cript->encryption = $config[0];\n $cript->key = $config[1];\n $texto_final .= \"\\n\" . $cript->action('c', $texto);\n }\n}\nfclose($chat);\n$texto_final = substr($texto_final, 1); // Retira o \"\\n\" inicial\n$chat = fopen('../../php/database/chat.txt', 'w+');\nfwrite($chat, $texto_final);\nfclose($chat);\n\n// Salva as configurações (criptografia e chave)\nsession_start();\n$json['encryption'] = $config[0];\n$json['key'] = $config[1];\n$fp = fopen('../../php/database/config.json', 'w');\nfwrite($fp, json_encode($json));\nfclose($fp);\n\necho '<br/>Criptografia: <b>' . $config[0] . '</b>';\n?>\n\n<form method=\"GET\" action=\"../../../chat.php\">\n <input type=\"submit\" value=\"OKAY\"/>\n</form>\n</body>\n</html>\n" }, { "alpha_fraction": 0.5832214951515198, "alphanum_fraction": 0.5899328589439392, "avg_line_length": 42.85293960571289, "blob_id": "c497a71d6c6195dd3ecdd64f202f238f6075ef83", "content_id": "d77d7a220f16355fd452a7f8c7048d84a4eff75a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1499, "license_type": "no_license", "max_line_length": 124, "num_lines": 34, "path": "/assets/php/database/chat_verificar.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\ninclude('chat_criptografia.php');\n$contador = $_POST['contador']; // O contador tem a quantidade de linhas lidas até o momento\n\n$linhas = count(file('chat.txt')); // Conta a quantidade de linhas do arquivo de chat\n$texto_final = '';\n\n// Verifica se existe alguma linha não escrita no arquivo\nif ($contador < $linhas) {\n // Cria objeto da classe chatCriptografia, salvando o tipo de criptografia e a chave usada\n $cript = new chatCriptografia($_POST['json']['encryption'], $_POST['json']['key']);\n $cript->import('../encrypt/s_des_script.php');\n $cript->import('../encrypt/rc4_script.php');\n\n $lendo = fopen('chat.txt', \"r+\");\n $i = 1;\n // Ler o arquivo até chegar no fim\n while (!feof($lendo)) {\n $linha = fgets($lendo, 4096); // Ler uma linha do arquivo e avança o ponteiro\n // Quando esse IF for ativado, significa que é uma linha não lida\n // Pois é posterior as linhas já lidas pelo usuário\n if ($i > $contador) {\n $texto = $cript->action('d', $linha); // Decripta\n $info = explode(\";;;\", $texto); // Transforma em array\n // Deixa no formato JSON\n $texto_final .= ',{\"username\":\"' . $info[0] . '\",\"data_hora\":\"' . $info[1] . '\",\"mensagem\":\"' . $info[2] . '\"}';\n }\n $i++;\n }\n fclose($lendo); // Fecha o ponteiro\n $texto_final = substr($texto_final, 1); // Elimina a \",\" inicial\n echo json_encode('[' . $texto_final . ']'); // Transforma em JSON\n}\nreturn true;" }, { "alpha_fraction": 0.6310043931007385, "alphanum_fraction": 0.6427947878837585, "avg_line_length": 35.95161437988281, "blob_id": "af82ea326bb009fac75690ead9ca9f741a9acfbe", "content_id": "92a35bcd1e8f510644fa42c109f9a3445b85cf01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2314, "license_type": "no_license", "max_line_length": 121, "num_lines": 62, "path": "/assets/py/esteganografia_py/esconder.py", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport re\nimport Image\nimport sys\n\n# Define quantos pixels serão utilizados para informar o tamanho da mensagem oculta\nPIXELS_RESERVADOS = 10\n\ndef pixels(tam):\n '''Facilita a iteração pelos pixels da imagem'''\n for y in xrange(tam[1]):\n for x in xrange(tam[0]):\n yield (x, y)\n\n\ndef esteganografar(img_orig, img_esteg, str_bin):\n # Abre a imagem e obtém seus atributos\n img = Image.open(img_orig)\n largura, altura = img.size\n\n # Verifica se o formato da imagem é compatível e se ela possui capacidade\n if img.mode[:3] != 'RGB' or largura * altura * 3 < len(str_bin) + PIXELS_RESERVADOS * 3:\n raise IndexError('O tamanho da mensagem excede a capacidade da imagem ou não há suporte para a mesma')\n\n # Os primeiros pixels definem o tamanho da informação a ser ocultada\n bits_tam = bin(len(str_bin))[2:].zfill(PIXELS_RESERVADOS * 3)\n str_bin = bits_tam + str_bin\n\n # Completa a informação tornando-a múltipla de 3 e iterável;\n str_bin = enumerate(str_bin + '0' * (3 - len(str_bin) % 3))\n\n # Carrega os pixels da imagem para a memória;\n pix = img.load()\n\n # Percorre cada pixel da imagem\n for x, y in pixels(img.size):\n try:\n # Altera o valor dos bits menos significativos\n rgb = map(lambda cor, bit: cor - (cor % 2) + int(bit), pix[x, y][:3], [str_bin.next()[1] for _ in xrange(3)])\n pix[x, y] = tuple(rgb)\n except StopIteration:\n # Quando não houver mais bits para se esteganografar, str_bin disparará uma\n # exceção do tipo StopIteration, e a nova imagem estará pronta para ser salva;\n img.save(img_esteg, 'PNG', quality=100)\n return\n\ndef gera_bin(msg):\n '''Para cada caractere, obtém o valor binário de seu código ASCII'''\n return ''.join(bin(ord(caractere))[2:].zfill(8) for caractere in msg)\n\n\ndef recupera_str(str_bin):\n '''Converte cada grupo de 8 bits no seu respectivo caractere'''\n return ''.join(chr(int(bin, 2)) for bin in re.findall(r'.{8}', str_bin))\n\nif __name__ == '__main__':\n msg_bin = gera_bin(sys.argv[1]) # Transforma em binário\n imagem_original = 'Gabe.png' # Recolhe nome da imagem\n hue = esteganografar(imagem_original, 'esteganografia.png', msg_bin)\n\n print(hue)" }, { "alpha_fraction": 0.4399031102657318, "alphanum_fraction": 0.4586739242076874, "avg_line_length": 27.482759475708008, "blob_id": "a340c022d1e51c61015b3d79c9c5e0beeafcc894", "content_id": "f812f82018763d14b0545c82233f58cc4dab4133", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3317, "license_type": "no_license", "max_line_length": 90, "num_lines": 116, "path": "/assets/php/encrypt/rc4_script.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n\n/**\n * Created by PhpStorm.\n * User: alvarofpp\n * Date: 09/09/16\n * Time: 21:05\n */\nclass RC4\n{\n var $S, $S2, $T, $C; // Array com os bits\n var $bits_file_array; // Array com os bits\n var $key; // Chave\n var $pseudo_random_key; // Chave\n\n function __construct()\n {\n $this->S = array();\n $this->S2 = array();\n $this->T = array();\n $this->C = array();\n $this->pseudo_random_key = array();\n }\n\n public function rc(){\n $this->inicializacao_vetores(); // INICIALIZA OS VETORES\n $this->permutacao(); // PERMUTAÇÃO INICIAL\n $this->geracao_fluxo(); // GERAÇÃO DE FLUXO\n $this->realizar_xor();\n $texto = '';\n foreach ($this->C as $c){\n $texto = $texto . chr($c);\n }\n return $texto;\n }\n\n // Inicialização dos vetores\n public function inicializacao_vetores()\n {\n $key = str_split($this->key);\n $length_key = sizeof($key);\n $c = 0;\n for ($i = 0; $i < 256; $i++) {\n $this->S[$i] = $i; // inicializando o vetor S\n $this->T[$i] = $key[$c]; // inicializando o vetor S\n $c = ($c + 1) == $length_key ? 0 : $c + 1;\n }\n }\n\n // Permutação Inicial\n public function permutacao()\n {\n $j = 0;\n for ($i = 0; $i < 256; $i++) {\n $j = ($j + $this->S[$i] + ord($this->T[$i])) % 256; // MOD\n list($this->S[$i], $this->S[$j]) = array($this->S[$j], $this->S[$i]); // SW\n }\n }\n\n // Geração de Fluxo\n public function geracao_fluxo()\n {\n $i = 0;\n $j = 0;\n $size = sizeof($this->bits_file_array);\n while ($size > 0) {\n $i = ($i + 1) % 256;\n $j = ($j + $this->S[$i]) % 256;\n list($this->S[$i], $this->S[$j]) = array($this->S[$j], $this->S[$i]); // SW\n $k = $this->S[($this->S[$i] + $this->S[$j]) % 256];\n $this->pseudo_random_key[] = $k;\n $size--;\n }\n }\n\n public function realizar_xor()\n {\n $size = sizeof($this->bits_file_array);\n for ($i = 0; $i < $size; $i++) {\n $byte = str_pad(decbin($this->pseudo_random_key[$i]), 8, 0, STR_PAD_LEFT);\n $array0 = str_split($byte);\n $array1 = str_split($this->bits_file_array[$i]);\n $num = bindec(implode('', $this->op_xor($array0, $array1)));\n $this->C[] = $num;\n }\n }\n\n /*\n * $array0, $array1: arrays que serão comparaddos\n * Serve para realizar a operação XOR entre arrays, retorna o array de bits comparados\n */\n public function op_xor($array0, $array1)\n {\n $array = array();\n for ($i = 0; $i < sizeof($array0); $i++) {\n $array[$i] = ($array0[$i] xor $array1[$i]) ? '1' : '0';\n }\n return $array;\n }\n\n // Pega os bytes do arquivo escolhido\n public function getFileBits($texto)\n {\n $bits = '';\n $caracteres = str_split($texto);\n foreach ($caracteres as $caracter) {\n $ascii = ord($caracter); // Decimal\n $binary = decbin($ascii); // Binário\n $byte = str_pad($binary, 8, 0, STR_PAD_LEFT); // Byte\n $bits = $bits . $byte;\n }\n $this->bits_file_array = str_split($bits, 8);\n }\n}\n\n?>" }, { "alpha_fraction": 0.5977198481559753, "alphanum_fraction": 0.6026058793067932, "avg_line_length": 33.16666793823242, "blob_id": "108b434d98c3857101813a8af2613bd76a261d33", "content_id": "18f2983ac07ccf29ed0874bd1b82d2047766e95b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 615, "license_type": "no_license", "max_line_length": 92, "num_lines": 18, "path": "/assets/php/encrypt/dh_init.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\n$dh_file = file_get_contents('dh.json'); // Ler JSON\n$dh = json_decode($dh_file, true); // Decodifica JSON\n\nsession_start(); // Inicia sessão\n$_SESSION['num_dh'] = $_POST['q'].'-'.$_POST['a']; // Atribui valores\n\necho '<h2>Escolha o outro usuario com que ira realizar a troca de chaves</h2>';\necho '<form method=\"get\" action=\"diffie_hellman.php\">';\nforeach ($dh['users'] as $key => $value) {\n if(!($key == $_SESSION['username'])){\n echo '<input type=\"radio\" name=\"user2\" value=\"'.$value.'\" required />'.$key.'<br/>';\n }\n}\necho '<input type=\"submit\" value=\"Continuar\"/>';\necho '</form>';\n\nexit();" }, { "alpha_fraction": 0.44242802262306213, "alphanum_fraction": 0.4461827278137207, "avg_line_length": 42.216217041015625, "blob_id": "6b9753f4a8134ea410ee2a3bd5a8bed3f9a73483", "content_id": "9b8c59ec1b40a0002095127abd8e1475a7e0ac05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1599, "license_type": "permissive", "max_line_length": 111, "num_lines": 37, "path": "/assets/js/chat.js", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n $(\"#btn-chat\").click(function () {\n // Cria uma string com o username, data/horário e mensagem enviada\n var d = new Date();\n var dformat = [d.getDate(), (d.getMonth() + 1), d.getFullYear()].join('/') +\n ' ' +\n [d.getHours(), d.getMinutes(), d.getSeconds()].join(':');\n var mensagem = $('#username').val() + ';;;' + dformat + ';;;' + $('#btn-input').val();\n\n // Executa script PHP salvando a mensagem criptografada\n $.post(\"assets/php/database/chat.php\", {\n 'mensagem': mensagem,\n 'json': json\n }, function (data) {\n }).success(function () {\n console.log(\"Mensagem enviada com sucesso!\");\n $('#chat').append('<li class=\"right clearfix\"><span class=\"chat-img pull-right\">'\n + '<img src=\"http://placehold.it/50/FA6F57/fff&text=ME\" alt=\"User Avatar\" class=\"img-circle\"/>'\n + '</span>'\n + '<div class=\"chat-body clearfix\">'\n + '<div class=\"header\">'\n + '<small class=\" text-muted\"><span'\n + 'class=\"glyphicon glyphicon-time\"></span>' + dformat\n + '</small>'\n + '<strong class=\"pull-right primary-font\">' + $('#username').val() + '</strong>'\n + '</div>'\n + '<p><b>'\n + $('#btn-input').val()\n + '</b></p>'\n + '</div>'\n + '</li>');\n contador++;\n }).error(function () {\n alert(\"error\");\n });\n });\n});" }, { "alpha_fraction": 0.6112957000732422, "alphanum_fraction": 0.6259136199951172, "avg_line_length": 29.73469352722168, "blob_id": "70b11513e481602416d8166335df153b7e607c20", "content_id": "75288f69ce4b73c9cb11eeaab03e47bf36dd7c13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1519, "license_type": "no_license", "max_line_length": 83, "num_lines": 49, "path": "/assets/py/esteganografia_py/mostrar.py", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport re\nimport Image\n\n# Define quantos pixels serão utilizados para informar o tamanho da mensagem oculta\nPIXELS_RESERVADOS = 10\n\ndef pixels(tam):\n '''Facilita a iteração pelos pixels da imagem'''\n for y in xrange(tam[1]):\n for x in xrange(tam[0]):\n yield (x, y)\n\ndef recuperar(img_esteg):\n # Abre a imagem, obtém seus atributos e carrega os pixels para a memória\n img = Image.open(img_esteg)\n tam = img.size\n pix = img.load()\n\n # Obtém os primeiros pixels, que definem o tamanho da informação embutida;\n info_tam = ''\n for p in pixels(tam):\n info_tam += ''.join('1' if cor % 2 else '0' for cor in pix[p][:3])\n if len(info_tam) >= PIXELS_RESERVADOS * 3:\n info_tam = int(info_tam, 2)\n break\n\n # Extrai a informação binária da imagem\n info_bin = ''\n for p in pixels(tam):\n info_bin += ''.join('1' if cor % 2 else '0' for cor in pix[p][:3])\n\n return info_bin[PIXELS_RESERVADOS * 3:info_tam + PIXELS_RESERVADOS * 3]\n\n\ndef gera_bin(msg):\n '''Para cada caractere, obtém o valor binário de seu código ASCII'''\n return ''.join(bin(ord(caractere))[2:].zfill(8) for caractere in msg)\n\n\ndef recupera_str(str_bin):\n '''Converte cada grupo de 8 bits no seu respectivo caractere'''\n return ''.join(chr(int(bin, 2)) for bin in re.findall(r'.{8}', str_bin))\n\nif __name__ == '__main__':\n # Recupera a mensagem\n msg_bin = recuperar('esteganografia.png')\n print(recupera_str(msg_bin))" }, { "alpha_fraction": 0.6867167949676514, "alphanum_fraction": 0.7393483519554138, "avg_line_length": 56, "blob_id": "0d90461c06370b0a0326b7e1f8e66ee2170fe8f5", "content_id": "a8e7cd528c776a5391c823f0015b79725ba3d907", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 402, "license_type": "no_license", "max_line_length": 258, "num_lines": 7, "path": "/README.md", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "# Chat usando criptografia\nO presente repositório refere-se a um trabalho da disciplina **Segurança de Redes** (IMD0703, T01, 2016.2), ministrada pelo professor [Silvio Costa Sampaio](https://github.com/imdcode). O trabalho consiste em criar um chat, implementando alguns algoritmos de criptografia.\n\n**Discente**:\n- <a href=\"https://github.com/alvarofpp\">Álvaro Ferreira Pires de Paiva</a>\n - Matrícula: 2016039162\n - E-mail: alvarofepipa@gmail.com\n" }, { "alpha_fraction": 0.6594454050064087, "alphanum_fraction": 0.6655112504959106, "avg_line_length": 37.46666717529297, "blob_id": "9b51b71a0fbbf91f4978b44e83a88af463c9c3da", "content_id": "7f4e0b57eb5d977ca51a2c308e7386d0de95f8b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1157, "license_type": "no_license", "max_line_length": 90, "num_lines": 30, "path": "/assets/php/database/chat.php", "repo_name": "alvarofpp/atividade_seguranca_em_redes", "src_encoding": "UTF-8", "text": "<?php\necho '1';\ninclude('chat_criptografia.php');\n\n$conteudo = '';\n$lendo = fopen('chat.txt', \"r+\"); // Abre o arquivo para leitura\n// Ler o arquivo até chegar no fim\nwhile (!feof($lendo)) {\n $linha = fgets($lendo, 4096); // Ler uma linha do arquivo\n $conteudo = $conteudo . $linha; // Adiciona a variável $conteudo\n}\nfclose($lendo); // Fecha o ponteiro do arquivo\n\n// Cria objeto da classe chatCriptografia, salvando o tipo de criptografia e a chave usada\n$cript = new chatCriptografia($_POST['json']['encryption'], $_POST['json']['key']);\n$cript->import('../encrypt/s_des_script.php');\n$cript->import('../encrypt/rc4_script.php');\n\n$texto = $cript->action('c', $_POST['mensagem']); // Cripta o texto enviado\n\n$escrevendo = fopen('chat.txt', \"w+\"); // Abre o arquivo para escrita\n// Verifica se existia algo no arquivo\nif (strlen($conteudo) > 0) {\n $dados = $conteudo . \"\\n\" . $texto; // Adiciona quebra de linha, caso exista\n $escreve = fwrite($escrevendo, $dados); // Escreve no arquivo\n} else {\n $escreve = fwrite($escrevendo, $texto); // Não adiciona quebra de linha\n}\nfclose($escrevendo); // Fecha o ponteiro do arquivo\nreturn true;\n" } ]
17
Elreniel/WebScraping
https://github.com/Elreniel/WebScraping
a74a9ba39fac0bdbb44c3936406dbb9c2f314238
4e09730ba4ee2c87546684e29a9e724af6133018
c441702fe4a2d5510c47ea787781451bb5b8bf18
refs/heads/master
2023-08-02T06:18:09.291835
2021-09-17T13:14:55
2021-09-17T13:14:55
407,546,076
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6633928418159485, "alphanum_fraction": 0.6732142567634583, "avg_line_length": 32.93939208984375, "blob_id": "1e6aef154aa3e619022a83c58534d50659f03d35", "content_id": "5183534309ba44d59300fe1e935a38bc597c93c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 195, "num_lines": 33, "path": "/seleniumTutorial.py", "repo_name": "Elreniel/WebScraping", "src_encoding": "UTF-8", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndriver = webdriver.Chrome(\"chromedriver.exe\")\n\ndriver.get('https://www.google.com/imghp')\n\nbox = driver.find_element_by_xpath('//*[@id=\"sbtc\"]/div/div[2]/input')\n\nbox.send_keys(\"medical head cap\")\nbox.send_keys(Keys.ENTER)\n\n#Will keep scrolling down the webpage until it cannot scroll no more\nlast_height = driver.execute_script('return document.body.scrollHeight')\nwhile True:\n driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')\n time.sleep(2)\n new_height = driver.execute_script('return document.body.scrollHeight')\n try:\n driver.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div/div[5]/input').click()\n time.sleep(2)\n except:\n pass\n if new_height == last_height:\n break\n last_height = new_height\n\nfor i in range(1, 10):\n try:\n driver.find_element_by_xpath('//*[@id=\"islrg\"]/div[1]/div[' + str(i) + ']/a[1]/div[1]/img').screenshot('C:\\\\Users\\\\bcosk\\\\Desktop\\\\WebScraping\\\\seleniumImages\\\\bonnet_' + str(i) + '.png')\n except:\n pass\n" }, { "alpha_fraction": 0.7096773982048035, "alphanum_fraction": 0.7235022783279419, "avg_line_length": 26.25, "blob_id": "f1ec178f121abf1df0c2eacf5eae96c871c06eba", "content_id": "610b578c97122ec01ad25fdfaee152547b66a544", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 63, "num_lines": 8, "path": "/main.py", "repo_name": "Elreniel/WebScraping", "src_encoding": "UTF-8", "text": "from simple_image_download import simple_image_download as simp\n\nresponse = simp()\n\nkeywords = \"medical bonnet\"\nlimit = 100\n#extensions={'.jpg', '.png', '.jpeg'}\nresponse.download(keywords, limit, extensions={'.png'})" } ]
2
odysseyhack/boldchain
https://github.com/odysseyhack/boldchain
5561bacafb7c4438bf3787dcab31d837b59496e5
4067a42984e91e6a72f87d8f18bd18f802846584
84b7637e7fc7c1fa56b7203d65e8d4dd27940a87
refs/heads/master
2020-05-05T03:13:25.301039
2019-04-13T19:25:02
2019-04-13T19:25:02
179,664,645
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 23, "blob_id": "d6cd8501a6da26268f902b424e2d41f397561840", "content_id": "ea797cac5eb29bd4cd8b1c5186d6e66daa319f59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 43, "num_lines": 6, "path": "/backend/boldapi/giftcards/admin.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import PensionEntity, Giftcard\n\n\nadmin.site.register(PensionEntity)\nadmin.site.register(Giftcard)\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 21.14285659790039, "blob_id": "784c1d505014979b2b1e36d7f522959ac8c272a5", "content_id": "110c344f2dd75fa7a2a684ec77b59c4e05f14b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 465, "license_type": "no_license", "max_line_length": 51, "num_lines": 21, "path": "/frontend-vue/src/store/main/index.ts", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import Vue from 'vue';\nimport { Module } from 'vuex';\nimport state from './state';\nimport * as getters from './getters';\nimport { actions } from './actions';\nimport * as mutations from './mutations';\nimport Vuex from 'vuex';\nimport { RootState } from '../types';\nimport { MainState } from './types';\n\nVue.use(Vuex);\n\nconst namespaced: boolean = true;\n\nexport const main: Module<MainState, RootState> = {\n namespaced,\n state,\n getters,\n actions,\n mutations,\n};\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 36, "blob_id": "734c6bd7c45b87eb458eb0ac89bfbce3f782be23", "content_id": "c9db6a11fd2bdf0d86f111bc7e8d77bca7feaf21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 38, "license_type": "no_license", "max_line_length": 36, "num_lines": 1, "path": "/frontend-vue/src/store/main/getters.ts", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import { MainState } from './types';\n\n" }, { "alpha_fraction": 0.5730689167976379, "alphanum_fraction": 0.5892484188079834, "avg_line_length": 44.619049072265625, "blob_id": "974b5fd3a0775324087fbc83e0aed0fbf65e4cd5", "content_id": "62c1f157208dfac88b153dbc0bf1b9ebf672bbbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1916, "license_type": "no_license", "max_line_length": 173, "num_lines": 42, "path": "/backend/boldapi/mockdigid/migrations/0001_initial.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-13 10:35\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Participant',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('birth_date', models.DateField(blank=True, null=True)),\n ('bio', models.TextField(blank=True, max_length=500)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='PensionFund',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('bsn', models.CharField(max_length=100)),\n ('fund_name', models.CharField(choices=[('abp', 'ABP fund'), ('pfzw', 'PFZW Fund'), ('sf', 'Some Fund'), ('gf', 'Gold Fund')], default='abp', max_length=5)),\n ('active', models.BooleanField(default=False)),\n ('ascription', models.CharField(max_length=100, null=True)),\n ('eligible', models.BooleanField(default=True)),\n ('start_date', models.DateField(blank=True, null=True)),\n ('end_date', models.DateField(blank=True, null=True)),\n ('fulltime_salary', models.FloatField(default=0.0)),\n ('entitlements', models.CharField(max_length=1000)),\n ('participant', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='mockdigid.Participant')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6119257211685181, "alphanum_fraction": 0.6231671571731567, "avg_line_length": 30.96875, "blob_id": "1ae117bde6a232c53a5dc5a1fa18007593dac931", "content_id": "92e5729aecf3e676f83ee27925acf6cc15d22ba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2046, "license_type": "no_license", "max_line_length": 85, "num_lines": 64, "path": "/backend/boldapi/mockdigid/models.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import json\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Participant(models.Model):\n '''\n Participant is the customer. They can link their Digid to the system, view\n projections, etc.\n '''\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n birth_date = models.DateField(null=True, blank=True)\n bio = models.TextField(max_length=500, blank=True)\n\n def __str__(self):\n return self.user.get_username()\n\n\nclass PensionFund(models.Model):\n '''\n Details of the pension fund\n '''\n ABP = 'abp'\n PFZW = 'pfzw'\n SOMEFUND = 'sf'\n GOLDFUND = 'gf'\n FUND_CHOICES = (\n (ABP, 'ABP fund'),\n (PFZW, 'PFZW Fund'),\n (SOMEFUND, 'Some Fund'),\n (GOLDFUND, 'Gold Fund'),\n )\n\n session_id = models.CharField(max_length=100, primary_key=True)\n amount = models.FloatField(default=0.0)\n bsn = models.CharField(max_length=100)\n participant = models.ForeignKey(Participant, on_delete=models.CASCADE, null=True)\n fund_name = models.CharField(max_length=5, choices=FUND_CHOICES, default=ABP)\n active = models.BooleanField(default=False)\n ascription = models.CharField(max_length=100, null=True)\n eligible = models.BooleanField(default=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n\n fulltime_salary = models.FloatField(default=0.0)\n entitlements = models.CharField(max_length=1000)\n\n def set_entitlements(self, x):\n self.entitlements = json.dumps(x)\n\n def get_entitlements(self, x):\n return json.loads(self.entitlements)\n\n def get_web_link(self):\n if self.fund_name == 'abp':\n return \"https://www.abp.nl\"\n elif self.fund_name == 'pfzw':\n return \"https://www.pfzw.nl\"\n else:\n return \"https://www.abp.nl\"\n\n def __str__(self):\n return '{} - {} - {}'.format(self.session_id, self.fund_name, self.bsn[0:6])\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 13, "blob_id": "4cb2b302423d1820a53bb2c9be14e5d1283ab6b3", "content_id": "dd9103bf780fa46bfd8252b95d2055dc697969d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 14, "license_type": "no_license", "max_line_length": 13, "num_lines": 1, "path": "/data/README.md", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "`Data` Folder\n" }, { "alpha_fraction": 0.7138263583183289, "alphanum_fraction": 0.7138263583183289, "avg_line_length": 22.923076629638672, "blob_id": "23cf6640d0e28786526a31082833941e8e18ccbd", "content_id": "8dc38f2f5ab289824cc521e1802858914023404e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 622, "license_type": "no_license", "max_line_length": 48, "num_lines": 26, "path": "/frontend-vue/src/main.ts", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import Vue, { PluginObject } from 'vue';\n\nimport VueMaterial from 'vue-material';\nimport 'vue-material/dist/vue-material.min.css';\nimport BootstrapVue from 'bootstrap-vue';\nimport Trend from 'vuetrend';\n\nimport App from './App.vue';\nimport router from './router';\nimport store from './store/index';\n\n// bootstrap-vue css\nimport 'bootstrap/dist/css/bootstrap.css';\nimport 'bootstrap-vue/dist/bootstrap-vue.css';\n\nVue.config.productionTip = false;\nVue.use(VueMaterial as PluginObject<{}>);\nVue.use(BootstrapVue);\nVue.use(Trend as PluginObject<{}>);\n\n\nnew Vue({\n router,\n store,\n render: (h) => h(App),\n}).$mount('#app');\n" }, { "alpha_fraction": 0.6383857727050781, "alphanum_fraction": 0.6482363939285278, "avg_line_length": 32.478721618652344, "blob_id": "f51ba5396a4be53aeeda2dc3910cc53242b02822", "content_id": "945d9aea29f98e754507f161298fad27e3a4f335", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3147, "license_type": "no_license", "max_line_length": 102, "num_lines": 94, "path": "/backend/boldapi/mockdigid/views.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import json\n\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom mockdigid.models import Participant, PensionFund\nfrom giftcards.models import Giftcard\n\n\ndef _get_participant_pension_funds(participant):\n '''\n Return list of all pension funds of a participant\n '''\n response = []\n pensions = list(PensionFund.objects.filter(participant=participant))\n\n for pension in pensions:\n response.append({\n 'fund_name': pension.fund_name,\n 'id': pension.session_id,\n 'active': pension.active,\n 'start_date': pension.start_date,\n 'ascription': pension.ascription,\n 'eligible': pension.eligible,\n 'fulltime_salary': pension.fulltime_salary,\n 'entitlements': pension.entitlements\n })\n\n return response\n\n\n@api_view(['POST'])\ndef authenticate_digid(request):\n '''\n Check if user is valid (Will be replaced with BasicAuth in the future)\n\n Example call: 127.0.0.1:8000/mockdigid/authenticate?username=TEST&password=TEST\n '''\n user = authenticate(username=request.query_params['username'],\n password=request.query_params['password'])\n\n try:\n participant = Participant.objects.get(user=user)\n except Participant.DoesNotExist:\n return Response({'msg': 'Username or password is wrong'}, status=status.HTTP_401_UNAUTHORIZED)\n\n return Response({\n 'first_name' : participant.user.first_name,\n 'last_name' : participant.user.last_name,\n 'bio' : participant.bio,\n 'pension_funds': _get_participant_pension_funds(participant)\n }, status=status.HTTP_200_OK)\n\n\n@api_view(['PUT'])\ndef add_to_fund(request):\n '''\n Add giftcard amount to a fund\n '''\n try:\n giftcard = Giftcard.objects.get(barcode=request.query_params['barcode'])\n pension_fund = PensionFund.objects.get(session_id=request.query_params['id'])\n except Giftcard.DoesNotExist:\n return Response({'msg': 'Invalid barcode'}, status=status.HTTP_400_BAD_REQUEST)\n except PensionFund.DoesNotExist:\n return Response({'msg': 'Invalid Pension Fund'}, status=status.HTTP_400_BAD_REQUEST)\n\n pension_fund.amount += giftcard.amount\n pension_fund.save()\n\n return Response({\n 'amount': pension_fund.amount,\n 'msg': 'Amount added to pension fund',\n 'link': pension_fund.get_web_link()\n }, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef create_participant(request):\n '''\n Create a new user\n '''\n try:\n user = User.objects.create_user(username=request.query_params['username'],\n password=request.query_params['password'])\n Participant.objects.create(user=user)\n except:\n return Response({'msg': 'Unable to create user'}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response({'msg': 'Created new user'}, status=status.HTTP_200_OK)\n" }, { "alpha_fraction": 0.835616409778595, "alphanum_fraction": 0.835616409778595, "avg_line_length": 23.33333396911621, "blob_id": "8879d4e4f7d4e3e5a123ff4ebec7b60551001f81", "content_id": "e39cebe626bcb8729a89721948e34b8623bef340", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/backend/boldapi/mockdigid/admin.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Participant, PensionFund\n\n\nadmin.site.register(Participant)\nadmin.site.register(PensionFund)\n" }, { "alpha_fraction": 0.5733333230018616, "alphanum_fraction": 0.6251282095909119, "avg_line_length": 17.22429847717285, "blob_id": "6ecba52c362e0402ebaaded9f09c58cd63b80965", "content_id": "cc939052bcbd24b9cbfcaf6833903873b6bbcef3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1950, "license_type": "no_license", "max_line_length": 86, "num_lines": 107, "path": "/backend/README.md", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "### Run\n\n### Initialize database\n\n```\ncd boldapi\npython manage.py create_users 2\npython manage.py create_pension_funds 5\npython manage.py create_entities 2\npython manage.py create_giftcards 10\n```\n\n### To run admin site\n\n```\ncd boldapi\npython manage.py migrate\npython manage.py runserver\n```\nIn browser, open `http://localhost:8000`\n\n\n### API\n\n1. [GET] /giftcards/valid\n - query_params: barcode\n - example: 127.0.0.1:8000/giftcards/valid?barcode=xyz\n - response:\n ```\n {\n \"barcode\": \"xyz\",\n \"amount\": 150\n }\n ```\n\n - example: 127.0.0.1:8000/giftcards/valid?barcode=INVALID_BARCODE\n - response:\n ```\n {\n \"msg\": \"Invalid barcode\"\n }\n ```\n\n2. [POST] /mockdigid/authenticate\n - query_params: username, password\n - example: 127.0.0.1:8000/mockdigid/authenticate?username=test&password=root\n - response:\n ```\n {\n \"first_name\": \"first1\",\n \"last_name\": \"last1\",\n \"bio\": \"Some bio\"\n }\n ```\n\n - example: 127.0.0.1:8000/mockdigid/authenticate?username=INVALID&password=INVALID\n - response:\n ```\n {\n \"msg\": \"Username or password is wrong\"\n }\n ```\n\n3. [PUT] /mockdigid/addtofund\n - query_params: barcode, id\n - example: 127.0.0.1:8000/mockdigid/addtofund?barcode=valid_barcode&id=valid_id\n - response:\n ```\n {\n \"amount\": 225,\n \"msg\": \"Amount added to pension fund\"\n }\n ```\n\n - example: 127.0.0.1:8000/mockdigid/addtofund?barcode=INVALID&id=valid_id\n - response:\n ```\n {\n \"msg\": \"Invalid barcode\"\n }\n ```\n\n - example: 127.0.0.1:8000/mockdigid/addtofund?barcode=valid_barcode&id=INVALID\n - response:\n ```\n {\n \"msg\": \"Invalid Pension Fund\"\n }\n ```\n\n - example: used giftcard\n - response\n ```\n {\n \"msg\": \"Giftcard already used\"\n }\n ```\n\n4. [POST] /mockdigid/createuser\n - query_params: username, password\n - example: 127.0.0.1:8000/mockdigid/cerateuser?username=new_user&password=test\n - response:\n ```\n {\n 'msg': 'Unable to create user'\n }\n ```\n" }, { "alpha_fraction": 0.6100478172302246, "alphanum_fraction": 0.6100478172302246, "avg_line_length": 31.153846740722656, "blob_id": "8fbe0c32f20a1a79723a093b62e7c36cdc11bd68", "content_id": "fe6539b7e79a8c794f44d80d77236f5f6fde7852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 836, "license_type": "no_license", "max_line_length": 108, "num_lines": 26, "path": "/backend/boldapi/giftcards/management/commands/create_entities.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.utils.crypto import get_random_string\n\nfrom giftcards.models import PensionEntity\n\n\nclass Command(BaseCommand):\n help = 'Create test PensionEntity'\n\n def add_arguments(self, parser):\n parser.add_argument('total', type=int, help='Indicates the number of PensionEntities to be created')\n\n def handle(self, *args, **kwargs):\n total = kwargs['total']\n\n for i in range(total):\n kwargs = {\n 'username': 'PE{}'.format(i),\n 'password': 'test',\n 'first_name': 'PE_{}'.format(i),\n 'email': 'test{}@entity.com'.format(i)\n }\n\n user = User.objects.create_user(**kwargs)\n PensionEntity.objects.create(user=user)\n" }, { "alpha_fraction": 0.7089201807975769, "alphanum_fraction": 0.7089201807975769, "avg_line_length": 22.66666603088379, "blob_id": "2b5dca3f9de6dc828dbfb33c4e405623f2dd4e9d", "content_id": "4d1502faa3df005e95f2cafcfeee165620ec44d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 51, "num_lines": 9, "path": "/backend/boldapi/mockdigid/urls.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('authenticate', views.authenticate_digid),\n path('addtofund', views.add_to_fund),\n path('createuser', views.create_participant),\n]\n" }, { "alpha_fraction": 0.4901960790157318, "alphanum_fraction": 0.5661764740943909, "avg_line_length": 19.399999618530273, "blob_id": "6b4cf5086fc816b0b9babcd7d2e47cea9e1716f9", "content_id": "72192f4dde6ee38b840c775c6e0e462c29fdf2e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 408, "license_type": "no_license", "max_line_length": 49, "num_lines": 20, "path": "/backend/boldapi/giftcards/migrations/0006_auto_20190412_2205.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-12 20:05\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('giftcards', '0005_auto_20190412_2131'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='participant',\n name='user',\n ),\n migrations.DeleteModel(\n name='Participant',\n ),\n ]\n" }, { "alpha_fraction": 0.562601625919342, "alphanum_fraction": 0.5934959053993225, "avg_line_length": 25.7391300201416, "blob_id": "0049bf9a0388ee980e63f68a8498690b34b11d68", "content_id": "7ff7b9f34e84f0a0250cd0594ce803abee8eb812", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/backend/boldapi/giftcards/migrations/0005_auto_20190412_2131.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-12 19:31\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('giftcards', '0004_remove_participant_image'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='giftcard',\n name='created',\n field=models.DateTimeField(null=True, verbose_name='Date of creation'),\n ),\n migrations.AlterField(\n model_name='giftcard',\n name='validity',\n field=models.DateTimeField(null=True, verbose_name='Valid until'),\n ),\n ]\n" }, { "alpha_fraction": 0.6043010950088501, "alphanum_fraction": 0.6136200428009033, "avg_line_length": 30, "blob_id": "30cd07d26843869659af713b4c614c2afba6fd76", "content_id": "b4b097915a03cce714c60e1758b93d955a582f2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1395, "license_type": "no_license", "max_line_length": 106, "num_lines": 45, "path": "/frontend-vue/src/store/main/actions.ts", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import { RootState } from '../types';\nimport { MainState } from './types';\nimport { ActionTree, ActionContext } from 'vuex';\nimport axios from 'axios';\nimport { AxiosResponse, AxiosError } from 'axios';\n\nconst baseUrl: string = 'http://172.16.165.8:8000';\n\nexport const actions: ActionTree<MainState, RootState> = {\n\n redeemCode(store: ActionContext<MainState, RootState>, code: string): Promise<{}> {\n return new Promise((resolve, reject) => {\n axios.get(\n baseUrl + `/giftcards/valid?barcode=${ code }`,\n ).then((response: AxiosResponse) => {\n resolve(response.data);\n }).catch((error: AxiosError) => {\n reject(error.message);\n });\n });\n },\n\n authenticate(\n store: ActionContext<MainState, RootState>,\n payload: {username: string, password: string },\n ): Promise<{}> {\n return new Promise((resolve, reject) => {\n axios.post(\n baseUrl + `/mockdigid/authenticate?username=${ payload.username }&password=${ payload.password }`,\n ).then((response: AxiosResponse) => {\n resolve(response.data);\n }).catch((error: AxiosError) => {\n reject(error.message);\n });\n });\n },\n\n contribute(\n store: ActionContext<MainState, RootState>,\n payload: { username: string, password: string, code: string }): Promise<{}> {\n return new Promise((resolve, reject) => {\n resolve({});\n });\n },\n};\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6753812432289124, "avg_line_length": 30.65517234802246, "blob_id": "6728558fb03f87b623b43d7c9d40aa95ff42da6a", "content_id": "52ef10cad87ce64babdcdb9a36f0a94fdde16042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "no_license", "max_line_length": 87, "num_lines": 29, "path": "/backend/boldapi/giftcards/models.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Giftcard(models.Model):\n '''\n Model describes the Giftcard\n '''\n barcode = models.CharField(max_length=100, primary_key=True)\n issued_by = models.ForeignKey('PensionEntity', on_delete=models.CASCADE, null=True)\n amount = models.FloatField(default=0.0)\n used = models.BooleanField(default=False)\n created = models.DateTimeField('Date of creation', null=True)\n validity = models.DateTimeField('Valid until', null=True)\n\n def __str__(self):\n return \"{} - {}\".format(self.barcode, self.amount)\n\nclass PensionEntity(models.Model):\n '''\n PensionEntity can be something like \"APG\"\n '''\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n bio = models.TextField(max_length=500, blank=True)\n\n def __str__(self):\n return self.user.get_username()\n" }, { "alpha_fraction": 0.6703296899795532, "alphanum_fraction": 0.6703296899795532, "avg_line_length": 14.166666984558105, "blob_id": "ad7995222872c153fe24fe51df86b2e8648ee90e", "content_id": "18e0eb977599d0126016ab405b9023e5a69b89e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 91, "license_type": "no_license", "max_line_length": 36, "num_lines": 6, "path": "/frontend-vue/src/store/main/state.ts", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import { MainState } from './types';\n\nconst state: MainState = {\n};\n\nexport default state;\n" }, { "alpha_fraction": 0.7101865410804749, "alphanum_fraction": 0.7331420183181763, "avg_line_length": 32.19047546386719, "blob_id": "5502dc5e25ab777ce0d4a38fd61591b432a1c8b8", "content_id": "a0e028ed184a65f8a5d99127295b8b3198d58bfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 697, "license_type": "no_license", "max_line_length": 104, "num_lines": 21, "path": "/backend/boldapi/giftcards/views.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom .models import Giftcard\nfrom .serializers import GiftcardsSerializer\n\n\n@api_view(['GET'])\ndef giftcard_valid(request):\n '''\n Check if gift card is valid\n\n Example call: http://127.0.0.1:8000/giftcards/valid?barcode=INVALIDBARCODE\n '''\n try:\n giftcard = Giftcard.objects.get(barcode=request.query_params['barcode'])\n except Giftcard.DoesNotExist:\n return Response({'msg': 'Invalid barcode'}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response({'barcode': giftcard.barcode, 'amount': giftcard.amount}, status=status.HTTP_200_OK)\n" }, { "alpha_fraction": 0.4898419976234436, "alphanum_fraction": 0.5598194003105164, "avg_line_length": 20.095237731933594, "blob_id": "afe0e443e1e3b9458a198383a15282c4fae7a8d3", "content_id": "544b497646eec47d7630d248a9bdabce6b782633", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 443, "license_type": "no_license", "max_line_length": 49, "num_lines": 21, "path": "/backend/boldapi/giftcards/migrations/0003_auto_20190412_1959.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-12 17:59\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('giftcards', '0002_auto_20190412_1956'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='participant',\n name='email',\n ),\n migrations.RemoveField(\n model_name='pensionentity',\n name='email',\n ),\n ]\n" }, { "alpha_fraction": 0.5828343033790588, "alphanum_fraction": 0.6447106003761292, "avg_line_length": 25.36842155456543, "blob_id": "2bae59a2f88d7d2de0e8150e8d7181246af7b702", "content_id": "672390571ba59a0bcec9be3d4e8c1ff17283d318", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 501, "license_type": "no_license", "max_line_length": 120, "num_lines": 19, "path": "/backend/boldapi/mockdigid/migrations/0003_auto_20190413_1356.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-13 11:56\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mockdigid', '0002_auto_20190413_1238'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='pensionfund',\n name='participant',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='mockdigid.Participant'),\n ),\n ]\n" }, { "alpha_fraction": 0.5657051205635071, "alphanum_fraction": 0.6009615659713745, "avg_line_length": 25, "blob_id": "916d6943f96ebb2365408e7579275c0806e27972", "content_id": "e7bdbfb504555e8786df722211eb50d1e04116ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "no_license", "max_line_length": 121, "num_lines": 24, "path": "/backend/boldapi/mockdigid/migrations/0002_auto_20190413_1238.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-13 10:38\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mockdigid', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='pensionfund',\n name='id',\n ),\n migrations.AddField(\n model_name='pensionfund',\n name='session_id',\n field=models.CharField(default=django.utils.timezone.now, max_length=100, primary_key=True, serialize=False),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.5504807829856873, "alphanum_fraction": 0.6033653616905212, "avg_line_length": 22.11111068725586, "blob_id": "f3d01482952094630c5aba7939da441e93322329", "content_id": "4b7d5e9d40ec5d127ea2bdefa78ae23daf0c573d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 86, "num_lines": 18, "path": "/backend/boldapi/giftcards/migrations/0002_auto_20190412_1956.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-12 17:56\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('giftcards', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='giftcard',\n name='barcode',\n field=models.CharField(max_length=100, primary_key=True, serialize=False),\n ),\n ]\n" }, { "alpha_fraction": 0.6436781883239746, "alphanum_fraction": 0.6954023241996765, "avg_line_length": 12.384614944458008, "blob_id": "edc6fda730d4dc5023252c253c791d3093ddea38", "content_id": "2061a244ff90c752771763785dbacef318a199a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 174, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/README.md", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Pension Giftcards\n\n### Setup\n#### Requirements\n1. python v3.7.x\n2. django v2.1.7\n3. miniconda\n4. Pillow\n\n```\nconda env create -f environment.yml\nconda activate odyssey\n```\n" }, { "alpha_fraction": 0.7098445892333984, "alphanum_fraction": 0.7098445892333984, "avg_line_length": 28.69230842590332, "blob_id": "528fd18b8d5dc2e2532a073f96698d17dcf86024", "content_id": "445c0eefddd73cce0b3aa92002ea7b563b0a2199", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 66, "num_lines": 13, "path": "/backend/boldapi/mockdigid/serializers.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Participant, PensionFund\n\n\nclass ParticipantsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Participant\n fields = (\"user\")\n\nclass PensionFundsSerializer(serializers.ModelSerializer):\n class Meta:\n model = PensionFund\n fields = (\"fund_name\", \"active\", \"start_date\", \"end_date\")\n" }, { "alpha_fraction": 0.6390284895896912, "alphanum_fraction": 0.6633166074752808, "avg_line_length": 30.421052932739258, "blob_id": "a552efb8cc31ba1fcf14c81532c3de917199b72a", "content_id": "4899a2a8fdeccee8453f87f49c51340cdbe2d4c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1194, "license_type": "no_license", "max_line_length": 98, "num_lines": 38, "path": "/backend/boldapi/giftcards/tests.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import datetime\n\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom rest_framework.test import APIRequestFactory\n\nfrom .models import Giftcard\nfrom .views import giftcard_valid\n\n\nclass GiftcardModelTests(TestCase):\n\n def test_giftcard_is_invalid(self):\n '''\n Gift card is not valid\n '''\n factory = APIRequestFactory()\n request = factory.get('http://127.0.0.1:8000/giftcards/valid?barcode=INVALIDBARCODE')\n response = giftcard_valid(request)\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.data['msg'], 'Invalid barcode')\n\n def test_giftcard_is_valid(self):\n '''\n Valid giftcard, return amount\n '''\n barcode = 'abc'\n amount = 10.0\n Giftcard.objects.create(barcode=barcode, amount=amount)\n\n factory = APIRequestFactory()\n request = factory.get('http://127.0.0.1:8000/giftcards/valid?barcode={}'.format(barcode))\n response = giftcard_valid(request)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['barcode'], barcode)\n self.assertEqual(response.data['amount'], amount)\n" }, { "alpha_fraction": 0.6289381384849548, "alphanum_fraction": 0.63710618019104, "avg_line_length": 29.60714340209961, "blob_id": "7db8a160ed0ef32352b276690db5096f6dd663d1", "content_id": "65cbb21fc57f3fd5a0808483755ee8ab5008a91f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 857, "license_type": "no_license", "max_line_length": 98, "num_lines": 28, "path": "/backend/boldapi/giftcards/management/commands/create_giftcards.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import random\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom django.utils.crypto import get_random_string\n\nfrom giftcards.models import Giftcard, PensionEntity\n\n\nclass Command(BaseCommand):\n help = 'Create test giftcards'\n\n def add_arguments(self, parser):\n parser.add_argument('total', type=int, help='Indicates the number of users to be created')\n\n def handle(self, *args, **kwargs):\n total = kwargs['total']\n\n for i in range(total):\n kwargs = {\n 'barcode': get_random_string(length=10),\n 'issued_by': PensionEntity.objects.order_by('?')[0],\n 'amount': random.randint(10, 50),\n 'created': timezone.now()\n }\n\n Giftcard.objects.create(**kwargs)\n" }, { "alpha_fraction": 0.572705864906311, "alphanum_fraction": 0.5868235230445862, "avg_line_length": 39.86538314819336, "blob_id": "7143d82997b94e0946aca9f121491dc1712ab1cc", "content_id": "25d38a304a3365e5559051e802a41e9afc75b361", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2125, "license_type": "no_license", "max_line_length": 122, "num_lines": 52, "path": "/backend/boldapi/giftcards/migrations/0001_initial.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-12 17:32\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Giftcard',\n fields=[\n ('barcode', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('amount', models.FloatField(default=0.0)),\n ('created', models.DateTimeField(verbose_name='Date of creation')),\n ('validity', models.DateTimeField(verbose_name='Valid until')),\n ],\n ),\n migrations.CreateModel(\n name='Participant',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('birth_date', models.DateField(blank=True, null=True)),\n ('email', models.EmailField(max_length=100)),\n ('image', models.ImageField(upload_to='')),\n ('bio', models.TextField(blank=True, max_length=500)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='PensionEntity',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('email', models.EmailField(max_length=100)),\n ('bio', models.TextField(blank=True, max_length=500)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='giftcard',\n name='issued_by',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='giftcards.PensionEntity'),\n ),\n ]\n" }, { "alpha_fraction": 0.7233009934425354, "alphanum_fraction": 0.7233009934425354, "avg_line_length": 24.75, "blob_id": "b719870b8fb42fa82a40d1e409b48df21e28c335", "content_id": "93a6dfc10d7b523bf237ae5e1a126b054e416faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 55, "num_lines": 8, "path": "/backend/boldapi/giftcards/serializers.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom .models import Giftcard\n\n\nclass GiftcardsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Giftcard\n fields = (\"barcode\", \"amount\")\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 24.33333396911621, "blob_id": "88b8d7a49525cc99cacc015cc00a627fe091c79b", "content_id": "85ac3b3be68f2674a23697c0e440c963635b6128", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 76, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/frontend-vue/src/vue-material.ts", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "declare module 'vue-material' {\n import VueMaterial from 'vue-material';\n}\n" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.699999988079071, "avg_line_length": 14.714285850524902, "blob_id": "2935347407c747cf8739b8f29cf99844a16c988d", "content_id": "b529d98fe6836551396aa1058ef17d5cf4c33791", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 110, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/backend/boldapi/giftcards/urls.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('valid', views.giftcard_valid),\n]\n" }, { "alpha_fraction": 0.7634408473968506, "alphanum_fraction": 0.7634408473968506, "avg_line_length": 17.600000381469727, "blob_id": "7ce99ff5eb5459178278e42176b3b30bfada9796", "content_id": "757bccfe27530b8312fc484a2750ea8b60caf4d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 93, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/backend/boldapi/mockdigid/apps.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass MockdigidConfig(AppConfig):\n name = 'mockdigid'\n" }, { "alpha_fraction": 0.5896487832069397, "alphanum_fraction": 0.5970425009727478, "avg_line_length": 35.06666564941406, "blob_id": "783d32d070d71877befb8811b59698f414119859", "content_id": "3e925012c61c247031282bb2e57bde2ababb180d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1082, "license_type": "no_license", "max_line_length": 98, "num_lines": 30, "path": "/backend/boldapi/mockdigid/management/commands/create_pension_funds.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import random\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.utils.crypto import get_random_string\n\nfrom mockdigid.models import Participant, PensionFund\n\n\nclass Command(BaseCommand):\n help = 'Create test pension funds'\n\n def add_arguments(self, parser):\n parser.add_argument('total', type=int, help='Indicates the number of users to be created')\n\n def handle(self, *args, **kwargs):\n total = kwargs['total']\n\n for i in range(total):\n kwargs = {\n 'session_id': 'sess_id_{}'.format(get_random_string(length=5)),\n 'bsn': 'BSN_{}'.format(i),\n 'participant': Participant.objects.order_by('?')[0],\n 'fund_name': random.choice(['abp', 'pfzw', 'sf', 'gf']),\n 'active': random.choice([True, False]),\n 'ascription': random.choice(['ABP', 'PFZW', 'Some Fund', 'Gold Fund']),\n 'fulltime_salary': random.randint(100, 200)\n }\n\n PensionFund.objects.create(**kwargs)\n" }, { "alpha_fraction": 0.5905420780181885, "alphanum_fraction": 0.5905420780181885, "avg_line_length": 31.11111068725586, "blob_id": "ba02f1937b92852af2dda6cf88746d08f6a40c8e", "content_id": "2de754ffc53e8967f5cc2e68694c0e4d8b45528f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 98, "num_lines": 27, "path": "/backend/boldapi/mockdigid/management/commands/create_users.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "from django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.utils.crypto import get_random_string\n\nfrom mockdigid.models import Participant\n\n\nclass Command(BaseCommand):\n help = 'Create test users'\n\n def add_arguments(self, parser):\n parser.add_argument('total', type=int, help='Indicates the number of users to be created')\n\n def handle(self, *args, **kwargs):\n total = kwargs['total']\n\n for i in range(total):\n kwargs = {\n 'username': 'test{}'.format(i),\n 'password': 'test',\n 'first_name': 'first{}'.format(i),\n 'last_name': 'last{}'.format(i),\n 'email': 'test{}@admin.com'.format(i)\n }\n\n user = User.objects.create_user(**kwargs)\n Participant.objects.create(user=user)\n" }, { "alpha_fraction": 0.6442244052886963, "alphanum_fraction": 0.6613861322402954, "avg_line_length": 35.95121765136719, "blob_id": "7f5fcfb83afe5c690cd8d393ffdbb60b7de8d632", "content_id": "8fdf7fe8bbb3857019e57ae8fe98fff3fb0cc980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1515, "license_type": "no_license", "max_line_length": 130, "num_lines": 41, "path": "/backend/boldapi/mockdigid/tests.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom rest_framework.test import APIRequestFactory\n\nfrom .models import Participant\nfrom .views import authenticate_digid\n\n\nclass ParticipantModelTests(TestCase):\n\n def test_participant_is_invalid(self):\n '''\n Participant does not exist in the system\n '''\n factory = APIRequestFactory()\n request = factory.post('http://127.0.0.1:8000/mockdigid/authenticate?username=TEST&password=TEST')\n response = authenticate_digid(request)\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.data['msg'], 'Username or password is wrong')\n\n def test_participant_is_valid(self):\n '''\n Valid Participant, return details\n '''\n username = 'abc'\n user = User.objects.create_user(username=username, password=username,\n first_name='a', last_name='a')\n p = Participant.objects.create(user=user)\n\n factory = APIRequestFactory()\n request = factory.post('http://127.0.0.1:8000/mockdigid/authenticate?username={}&password={}'.format(username, username))\n response = authenticate_digid(request)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['first_name'], 'a')\n self.assertEqual(response.data['last_name'], 'a')\n self.assertEqual(response.data['bio'], '')\n" }, { "alpha_fraction": 0.5051282048225403, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 20.66666603088379, "blob_id": "fff488ae8df6031a68913426e72d94c10c8f963e", "content_id": "28f74bdbc517d3416b46364d390538f78530979e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/backend/boldapi/mockdigid/migrations/0004_pensionfund_amount.py", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.7 on 2019-04-13 13:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mockdigid', '0003_auto_20190413_1356'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='pensionfund',\n name='amount',\n field=models.FloatField(default=0.0),\n ),\n ]\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 38, "blob_id": "30624f586be039be078156b815eed2a5fb0626cd", "content_id": "60868a83396251bde384cf1f7ed92b88a9bff00a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 312, "license_type": "no_license", "max_line_length": 110, "num_lines": 8, "path": "/frontend-vue/src/store/main/types.ts", "repo_name": "odysseyhack/boldchain", "src_encoding": "UTF-8", "text": "export interface MainState {\n}\n\nexport type RedeemCodeAction = (redeemCode: string) => Promise<{}>;\n\nexport type ContributeAction = (payload: { username: string, password: string, code: string }) => Promise<{}>;\n\nexport type AuthenticateAction = (payload: { username: string, password: string }) => Promise<{}>;\n" } ]
36
MarcusviniciusLsantos/ml-flask-react-app
https://github.com/MarcusviniciusLsantos/ml-flask-react-app
95b684c51808ff3d451924d87edb0864a31d243a
3416c4176d9af725c4efb6ec51e2ea49c46a230c
48441b86213b6c3c4db4e858834fbddf5fe728d2
refs/heads/master
2022-11-03T21:18:54.978796
2021-02-01T04:12:37
2021-02-01T04:12:37
229,195,179
0
0
null
2019-12-20T05:19:41
2021-02-01T04:12:39
2022-09-30T18:45:45
JavaScript
[ { "alpha_fraction": 0.7556406855583191, "alphanum_fraction": 0.7743721008300781, "avg_line_length": 32.57143020629883, "blob_id": "da5151e0997769260457659fb465c48d599b68c4", "content_id": "169b56f971769881210abbca1212697d1fe603e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2349, "license_type": "no_license", "max_line_length": 169, "num_lines": 70, "path": "/back-app/model_generator.py", "repo_name": "MarcusviniciusLsantos/ml-flask-react-app", "src_encoding": "UTF-8", "text": "# Import libraries\nimport pandas as pd\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\n\n\n#%%\n# Get the dataset\nbase = pd.read_csv('/home/vinicius/Documents/IA/Python/credit_data1.csv')\nbase.loc[base.idade < 0, 'idade'] = 40.92\n \nprevisores = base.iloc[:, 1:4].values\nclasse = base.iloc[:, 4].values\n\nimputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)\nimputer = imputer.fit(previsores[:, 1:4])\nprevisores[:, 1:4] = imputer.transform(previsores[:, 1:4])\n\n# Split the dataset into training (75%) and testing (25%) data\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(\nprevisores, classe, test_size=0.25, random_state=0)\n\n# Build the classifier and make prediction\nclassificador = RandomForestClassifier(n_estimators=40, criterion='entropy', random_state=0)\nclassificador.fit(previsores_treinamento, classe_treinamento)\nprevisoes = classificador.predict(previsores_teste)\n\n\n# Confusion matrix\nprecisao = accuracy_score(classe_teste, previsoes)\nmatriz = confusion_matrix(classe_teste, previsoes)\n\n\n# Save the model to disk\njoblib.dump(classificador, 'classifier.joblib')\n\n\n#%%%\n\n#data 02\nbase = pd.read_csv('/home/vinicius/Documents/mestrado-UFAL/Topico-especial-em-mineracao-e-exploracao-de-dados/project-spotify/base02_genero_no_artist.csv')\n\n#provisores\natributos = base.iloc[:,1:5].values\n\n#classes\nY = base.iloc[:,0].values\n\n\n# Split the dataset into training (75%) and testing (25%) data\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(\natributos, Y, test_size=0.25, random_state=0)\n\nclassificador = SVC(kernel='linear')\n\nclassificador.fit(previsores_treinamento, classe_treinamento)\nprevisoes = classificador.predict(previsores_teste)\n\n\n# Confusion matrix\nprecisao = accuracy_score(classe_teste, previsoes)\nmatriz = confusion_matrix(classe_teste, previsoes)\n\njoblib.dump(classificador, '/home/vinicius/Documents/mestrado-UFAL/Topico-especial-em-mineracao-e-exploracao-de-dados/app_ml_basic/back-app/classifier_spotify01.joblib')" }, { "alpha_fraction": 0.4794113039970398, "alphanum_fraction": 0.4916882812976837, "avg_line_length": 36.47142791748047, "blob_id": "dfa4e8f7219cef836d8fb1b0163a734865733fee", "content_id": "21489d3d88ba1c246d7cffa83c260daa801d2ff2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 13114, "license_type": "no_license", "max_line_length": 254, "num_lines": 350, "path": "/front-app/src/App.js", "repo_name": "MarcusviniciusLsantos/ml-flask-react-app", "src_encoding": "UTF-8", "text": "import React, { Component } from 'react';\nimport './App.css';\nimport Form from 'react-bootstrap/Form';\nimport Col from 'react-bootstrap/Col';\nimport Container from 'react-bootstrap/Container';\nimport Row from 'react-bootstrap/Row';\nimport Button from 'react-bootstrap/Button';\nimport 'bootstrap/dist/css/bootstrap.css';\n\nclass App extends Component {\n\n constructor(props) {\n super(props);\n\n this.state = {\n ButtonSpotify: true,\n ButtonCredit: true,\n isLoading: false,\n formData: {\n salary: 4500,\n age: 25,\n loan: 10000\n },\n result: \"\",\n formData2: {\n energy: 44,\n danceability: 20,\n liveness: 5,\n acousticness: 4\n },\n result2: \"\",\n genders: {\n 'dance pop': \"https://www.youtube.com/embed/-zYz0OhqzXM\",\n \"neo mellow\": \"https://www.youtube.com/embed/Yk9G7OyKwLM\",\n \"detroit hip hop\": \"https://www.youtube.com/embed/pq-zgi_RnY4\",\n \"pop\": \"https://www.youtube.com/embed/SlPhMPnQ58k\",\n \"canadian pop\": \"https://www.youtube.com/embed/q0hyYWKXF0Q\",\n \"hip pop\": \"https://www.youtube.com/embed/aIHF7u9Wwiw\",\n \"barbadian pop\": \"https://www.youtube.com/embed/9qaVcyYkDg4\",\n \"atl hip hop\": \"https://www.youtube.com/embed/5YGSVLhWo6A\",\n \"australian pop\": \"https://www.youtube.com/embed/q0hyYWKXF0Q\",\n \"indie pop\": \"https://www.youtube.com/embed/bpOSxM0rNPM\",\n \"art pop\": \"https://www.youtube.com/embed/JNJv-Ebi67I\",\n \"colombian pop\": \"https://www.youtube.com/embed/tbneQDc2H3I\",\n \"big room\": \"https://www.youtube.com/embed/nFjTcJT2dTw\",\n \"british soul\": \"https://www.youtube.com/embed/-nwdjQmc_N8\",\n \"chicago rap\": \"https://www.youtube.com/embed/YWyHZNBz6FE\",\n \"acoustic pop\": \"https://www.youtube.com/embed/uzgp65UnPxA\",\n \"permanent wave\": \"https://www.youtube.com/embed/7vQEucBgxGQ\",\n \"boy band\": \"https://www.youtube.com/embed/4fndeDfaWCg\",\n \"baroque pop\": \"https://www.youtube.com/embed/kZj-o42szuk\",\n \"celtic rock\": \"https://www.youtube.com/embed/Vj41xZHA5Eg\",\n \"electro\": \"https://www.youtube.com/embed/HhjHYkPQ8F0\",\n \"complextro\": \"https://www.youtube.com/embed/jp_pClmkqUo\",\n \"canadian hip hop\": \"https://www.youtube.com/embed/SN6jcMruHfA\",\n \"alaska indie\": \"https://www.youtube.com/embed/hIE2eNDmV5s\",\n \"folk-pop\": \"https://www.youtube.com/embed/OruY8u7Rhx8\",\n \"metropopolis\": \"https://www.youtube.com/embed/VJJhexFlJB0\",\n \"australian hip hop\": \"https://www.youtube.com/embed/aB16fJpoj-I\",\n \"electropop\": \"https://www.youtube.com/embed/qrO4YZeyl0I\",\n \"australian dance\": \"https://www.youtube.com/embed/m2vi6sfE8Ik\",\n \"candy pop\": \"https://www.youtube.com/embed/vjI1QTjfyYE\",\n \"hollywood\": \"https://www.youtube.com/embed/wpfqHdRoGPA\",\n \"canadian contemporary r&b\": \"https://www.youtube.com/embed/bnVUHWCynig\",\n \"irish singer-songwriter\": \"https://www.youtube.com/embed/eHQG6-DojVw\",\n \"tropical house\": \"https://www.youtube.com/embed/yFLtfaJwyfw\",\n \"belgian edm\": \"https://www.youtube.com/embed/bWPOLL_Rr8U\",\n \"french indie pop\": \"https://www.youtube.com/embed/eyk-Sliy8RU\",\n \"latin\": \"https://www.youtube.com/embed/OSUxrSe5GbI\",\n \"canadian latin\": \"https://www.youtube.com/embed/L86gQQBYSc4\",\n \"edm\": \"https://www.youtube.com/embed/gCYcHz2k5x0\",\n \"downtempo\": \"https://www.youtube.com/embed/iVTqxdEXFkA\",\n \"brostep\": \"https://www.youtube.com/embed/OF6fSgOFhvQ\",\n \"moroccan pop\": \"https://www.youtube.com/embed/ZvvcQrmbggw\",\n \"escape room\": \"https://www.youtube.com/embed/aW3-E3My-kc\",\n \"alternative r&b\": \"https://www.youtube.com/embed/X0t8zeEiqEY\",\n \"electronic trap\": \"https://www.youtube.com/embed/rpJtWTtxbCA\",\n \"danish pop\": \"https://www.youtube.com/embed/_xAjt64AoD0\"\n },\n youtubeUrl: \"\"\n };\n }\n\n handleChange = (event) => {\n const value = event.target.value;\n const name = event.target.name;\n var formData = this.state.formData;\n formData[name] = value;\n this.setState({\n formData\n });\n }\n\n handlePredictClick = (event) => {\n const formData = this.state.formData;\n this.setState({ isLoading: true });\n fetch('http://localhost:5000/prediction/',\n {\n headers: {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n },\n method: 'POST',\n body: JSON.stringify(formData)\n })\n .then(response => response.json())\n .then(response => {\n this.setState({\n result: response.result,\n isLoading: false\n });\n }).catch((err) => {\n this.setState({\n result: \"Api error\",\n isLoading: false\n })\n })\n }\n\n handleCancelClick = (event) => {\n this.setState({ result: \"\" });\n }\n\n handleChangeSpotify = (event) => {\n const value = event.target.value;\n const name = event.target.name;\n var formData2 = this.state.formData2;\n formData2[name] = value;\n this.setState({\n formData2\n });\n }\n\n handlePredictSpotifyClick = (event) => {\n const formData2 = this.state.formData2;\n this.setState({ isLoading: true });\n fetch('http://localhost:5000/prediction/spotify/',\n {\n headers: {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n },\n method: 'POST',\n body: JSON.stringify(formData2)\n })\n .then(response => response.json())\n .then(response => {\n console.log('testing', this.state.genders[\"dance pop\"][0])\n Object.keys(this.state.genders).map((item, index) => {\n // console.log('item ->', item)\n // console.log('resp ->', response.result)\n let item2 = `'${item}'`\n if (response.result === item2) {\n console.log('resp ->', response.result, \"item ->\", item)\n this.setState({\n youtubeUrl: this.state.genders[item]\n })\n console.log('url', this.state.genders[item])\n }\n })\n this.setState({\n result2: `Gender: ${response.result}`,\n isLoading: false\n });\n\n\n }).catch((err) => {\n console.log('err ->', err)\n this.setState({\n result2: \"Api error\",\n isLoading: false\n })\n })\n }\n\n handleCancelSpotifyClick = (event) => {\n this.setState({ result2: \"\" });\n }\n\n handleButtonSpotify = () => {\n this.setState({ ButtonSpotify: !this.state.ButtonSpotify, ButtonCredit: true })\n }\n\n handleButtonCredit = () => {\n this.setState({ ButtonCredit: !this.state.ButtonCredit, ButtonSpotify: true })\n }\n\n\n render() {\n const isLoading = this.state.isLoading;\n const formData = this.state.formData;\n const result = this.state.result;\n const formData2 = this.state.formData2;\n const result2 = this.state.result2;\n\n return (\n <Container>\n <div>\n <h2 className=\"title\">APP MACHINE LEARNING BASIC</h2>\n <Form>\n <Form.Row>\n <Form.Group as={Col}>\n <Button\n block\n variant=\"success\"\n onClick={this.handleButtonSpotify}>\n Gender ML APP\n </Button>\n </Form.Group>\n <Form.Group as={Col}>\n <Button\n block\n variant=\"info\"\n onClick={this.handleButtonCredit}>\n Credit ML APP\n </Button>\n </Form.Group>\n </Form.Row>\n </Form>\n <Form.Row>\n {!this.state.ButtonSpotify ?\n <Container>\n <div className=\"content\">\n <h4>Spotify Gender predict</h4>\n <Form>\n <Form.Row>\n <Form.Group as={Col}>\n <Form.Label>Energy</Form.Label>\n <Form.Control placeholder=\"Ex: 0-99\" value={formData2.energy} onChange={this.handleChangeSpotify} name=\"energy\" />\n </Form.Group>\n <Form.Group as={Col}>\n <Form.Label>Danceability</Form.Label>\n <Form.Control\n placeholder=\"Ex: 0-99\"\n value={formData2.danceability}\n name=\"danceability\"\n onChange={this.handleChangeSpotify} />\n </Form.Group>\n </Form.Row>\n <Form.Row>\n <Form.Group as={Col}>\n <Form.Label>Liveness</Form.Label>\n <Form.Control placeholder=\"Ex: 0-99\" value={formData2.liveness} onChange={this.handleChangeSpotify} name=\"liveness\" />\n </Form.Group>\n <Form.Group as={Col}>\n <Form.Label>Acousticness</Form.Label>\n <Form.Control placeholder=\"Ex: 0-99\" value={formData2.acousticness} onChange={this.handleChangeSpotify} name=\"acousticness\" />\n </Form.Group>\n </Form.Row>\n <Row>\n <Col>\n <Button\n block\n variant=\"success\"\n disabled={isLoading}\n onClick={!isLoading ? this.handlePredictSpotifyClick : null}>\n {isLoading ? 'Making prediction' : 'Predict'}\n </Button>\n </Col>\n <Col>\n <Button\n block\n variant=\"danger\"\n disabled={isLoading}\n onClick={this.handleCancelSpotifyClick}>\n Reset prediction\n </Button>\n </Col>\n </Row>\n </Form>\n {result2 === \"\" ? null :\n (\n <div>\n <Row>\n <Col className=\"result-container\">\n <h5 id=\"result\">{result2}</h5>\n </Col>\n </Row>\n <div style={{ alignItems: 'center' }}>\n <iframe style={{ marginTop: '2%', marginLeft: '8%' }} width=\"600\" height=\"200\" src={this.state.youtubeUrl} frameBorder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowFullScreen></iframe>\n </div>\n </div>)\n }\n\n </div>\n </Container>\n : null}\n\n\n {!this.state.ButtonCredit ?\n <div className=\"content\">\n <h4>Credit Data Predict (YES OR NO)</h4>\n <Form>\n <Form.Row>\n <Form.Group as={Col}>\n <Form.Label>Salary</Form.Label>\n <Form.Control placeholder=\"Ex: 4300\" value={formData.salary} onChange={this.handleChange} name=\"salary\" />\n </Form.Group>\n <Form.Group as={Col}>\n <Form.Label>Age</Form.Label>\n <Form.Control\n placeholder=\"Ex: 4300\"\n value={formData.age}\n name=\"age\"\n onChange={this.handleChange} />\n </Form.Group>\n </Form.Row>\n <Form.Row>\n <Form.Group as={Col}>\n <Form.Label>Loan</Form.Label>\n <Form.Control placeholder=\"Ex: 4300\" value={formData.loan} onChange={this.handleChange} name=\"loan\" />\n </Form.Group>\n </Form.Row>\n <Row>\n <Col>\n <Button\n block\n variant=\"success\"\n disabled={isLoading}\n onClick={!isLoading ? this.handlePredictClick : null}>\n {isLoading ? 'Making prediction' : 'Predict'}\n </Button>\n </Col>\n <Col>\n <Button\n block\n variant=\"danger\"\n disabled={isLoading}\n onClick={this.handleCancelClick}>\n Reset prediction\n </Button>\n </Col>\n </Row>\n </Form>\n {result === \"\" ? null :\n (<Row>\n <Col className=\"result-container\">\n <h5 id=\"result\">{result}</h5>\n </Col>\n </Row>)\n }\n </div> : null}\n </Form.Row>\n </div>\n </Container>\n );\n }\n}\n\nexport default App;" }, { "alpha_fraction": 0.4589235186576843, "alphanum_fraction": 0.6883852481842041, "avg_line_length": 15.045454978942871, "blob_id": "f2959e0667fe2094b0e58374069e9c324e8e646c", "content_id": "549540ed9c95322b72bec7c662bae0c61a3b6e02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 353, "license_type": "no_license", "max_line_length": 22, "num_lines": 22, "path": "/back-app/requirements.txt", "repo_name": "MarcusviniciusLsantos/ml-flask-react-app", "src_encoding": "UTF-8", "text": "aniso8601==6.0.0\njoblib==0.14.1\nattrs==19.1.0\nClick==7.0\nFlask==1.0.2\nFlask-Cors==3.0.7\nflask-restplus==0.12.1\nitsdangerous==1.1.0\nJinja2==2.10.1\njsonschema==3.0.1\nMarkupSafe==1.1.1\nnumpy==1.16.2\npandas==0.24.2\npyrsistent==0.14.11\npython-dateutil==2.8.0\npytz==2019.1\nscikit-learn==0.21.2\nscipy==1.2.1\nsix==1.12.0\nsklearn==0.0\nWerkzeug>=0.15.3\nwget==3.2\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 20, "blob_id": "4fb940b40e62019e782a9a31678ce4178a30e02d", "content_id": "d14d0ae5847f894743a96258cc1bd80c2c08779f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/README.md", "repo_name": "MarcusviniciusLsantos/ml-flask-react-app", "src_encoding": "UTF-8", "text": "# ml-flask-react-app\n" } ]
4
gracetikaa/Test-Case-Speech-Classification
https://github.com/gracetikaa/Test-Case-Speech-Classification
8a5adfa3928d9a19789dcd7fbd4129eec627a1c9
9cb18f05e0f153e3974887787563699964f5452d
a9649cb012f1d2e1a8c676893ad4460ac021ee16
refs/heads/master
2020-06-04T21:36:17.147803
2019-08-14T01:32:49
2019-08-14T01:32:49
192,199,240
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6241743564605713, "alphanum_fraction": 0.6373844146728516, "avg_line_length": 31.212766647338867, "blob_id": "f6d4e98878deec91e364bb01abd3742b820b98c3", "content_id": "9dda16fe3224019f6b40362bd6414d3efc465531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1514, "license_type": "no_license", "max_line_length": 93, "num_lines": 47, "path": "/main.py", "repo_name": "gracetikaa/Test-Case-Speech-Classification", "src_encoding": "UTF-8", "text": "from sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, Flatten\nfrom keras.utils import to_categorical\nimport os\nimport numpy as np\nimport pandas as pd\nimport soundfile as sf\n\n\ndef readfile(path):\n dataset = []\n label = []\n k = 1\n for folder in os.listdir(path):\n dir = os.path.join(path)\n directory = dir+\"/\"+folder\n for root,dirs,files in os.walk(directory):\n for i in files:\n if i.endswith(\".flac\"):\n file = os.path.abspath(directory+\"/\"+i)\n data, samplerate = sf.read(file)\n dataset.append(data)\n label.append(k)\n k+=1\n # print(dataset[0].shape)\n df = pd.DataFrame({'data':dataset,'label':label})\n return df\n\ndata = readfile('dataset')\na = data.loc[:,'data']\nb = data.loc[:,'label']\n\n(x_train, x_test, y_train, y_test) = train_test_split(a, b, test_size = 0.25, shuffle = True)\nx_train = [x_train,x_train]\nx_test = [x_test,x_test]\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nmodel = Sequential()\nmodel.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=x_train[0].shape))\nmodel.add(Conv2D(32, kernel_size=3, activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10)\n" } ]
1
VijayMuraliG/SHOWING-USER-DEATILS
https://github.com/VijayMuraliG/SHOWING-USER-DEATILS
ae79b8ea376f6b41aa47781e028ed6287f4ab5e3
b90878b5337f71daed091a56a46a47565c274b1e
f1e972790f27ac6819f011e7d80a96f872c7353a
refs/heads/master
2021-01-01T03:46:10.118298
2016-05-10T09:35:23
2016-05-10T09:35:23
58,447,794
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7380457520484924, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 27.117647171020508, "blob_id": "42f6f7abb3a0032539681c500ce912c52eed7d57", "content_id": "2006ad79de9ff6ec7efdc0d2cee5ea04992fc0e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "no_license", "max_line_length": 63, "num_lines": 17, "path": "/models.py", "repo_name": "VijayMuraliG/SHOWING-USER-DEATILS", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass Signin(models.Model):\n\tuname=models.CharField(max_length=200)\n\tmnumber=models.CharField(max_length=200)\n\tmailid=models.CharField(max_length=200)\n\taddress=models.CharField(max_length=200)\n\tpassword=models.IntegerField()\n\tconfirmpassword=models.IntegerField()\n\n\tdef vijay(self):\n\t\treturn \"%s - %s -%s\" %(self.uname,self.address,self.password)\n\n\nclass Login(models.Model):\n\tuname=models.CharField(max_length=200)\n\tpassword=models.IntegerField()\n\t\n\n" }, { "alpha_fraction": 0.6558098793029785, "alphanum_fraction": 0.6575704216957092, "avg_line_length": 31.705883026123047, "blob_id": "d397ed568fcdadb27509d436d78f0c120804dde1", "content_id": "d2493a283573b3475288820079fd83b73d54c5cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1136, "license_type": "no_license", "max_line_length": 147, "num_lines": 34, "path": "/views.py", "repo_name": "VijayMuraliG/SHOWING-USER-DEATILS", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import Login,Signin\n\n\ndef index(request):\n\treturn render(request,'index.html')\n\ndef form(request):\n\treturn render(request,'form.html')\n\ndef form1(request):\n\tif request.method == 'POST':\n\t\tsuname = request.POST.get('uname')\n smnumber = request.POST.get('mnumber')\n smailid = request.POST.get('mailid')\n saddress = request.POST.get('address')\n spassword = request.POST.get('password')\n sconfirmpassword= request.POST.get('confirmpassword')\n q = Signin(uname=suname,mnumber=smnumber,mailid=smailid,address=saddress,password=spassword,confirmpassword=sconfirmpassword)\n q.save()\n \n return render(request,'loginform.html') \n\ndef form2(request):\n\tsuname = request.POST.get('uname')\n\tspassword = request.POST.get('password')\n\ti=Signin.objects.get(uname=suname)\n\ti.id\n\ti.uname\n\ti.mnumber\n\ti.address\n\ti.mailid\n\ti.password\n\treturn render(request,'details.html',{'uname':suname,'umnumber':i.mnumber,'uid':i.id,'mailid':i.mailid,'address':i.address,'password':i.password})\n \n\t\t\n\t\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5397923588752747, "alphanum_fraction": 0.5605536103248596, "avg_line_length": 27.899999618530273, "blob_id": "275fb6af61e0c4c61e43342dfb45017ca179ea50", "content_id": "091c98e1980e09eea857a4a83bf00e6cae116c70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 57, "num_lines": 10, "path": "/urls.py", "repo_name": "VijayMuraliG/SHOWING-USER-DEATILS", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns=[url(r'^$',views.index,name='index'),\n url(r'dhaba/form/$',views.form,name='form'),\n\n\n url(r'form1/$',views.form1,name='form1'),\n url(r'form2/$',views.form2,name='form2'),\n ]\n" }, { "alpha_fraction": 0.7385321259498596, "alphanum_fraction": 0.7385321259498596, "avg_line_length": 26.25, "blob_id": "583bfa279a9a477d00e869bfb50f2eb4aa53d263", "content_id": "46d1cb50625f2a4fadc3eead464b8fb9b435b65c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 218, "license_type": "no_license", "max_line_length": 116, "num_lines": 8, "path": "/README.md", "repo_name": "VijayMuraliG/SHOWING-USER-DEATILS", "src_encoding": "UTF-8", "text": "# SHOWING-USER-DEATILS\n\nTHIS IS THE WEBSITE:\n\n\n\n this website gets the user input and store it into database and diplay the user detail which the user entered \n this is all done by the Django and python code\n" } ]
4
NatalieSkornyakova/AbityIRNITUBot
https://github.com/NatalieSkornyakova/AbityIRNITUBot
1fa0e62c0c60f0db1f92c069359f4412a1bd2c51
01d78bef1a279ae82a9bd2fde32b93758dc9414e
77ce49251cc73911e695ab86d09d11d9721cf2a9
refs/heads/master
2020-08-18T05:26:34.068852
2019-10-17T09:16:50
2019-10-17T09:16:50
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7294924259185791, "alphanum_fraction": 0.7379972338676453, "avg_line_length": 51.60293960571289, "blob_id": "2a77c027ef64d0fb0e9a7e74cb598f12899e1160", "content_id": "9ca58060f9cef5f95200ce1f64262008364e351f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4208, "license_type": "no_license", "max_line_length": 195, "num_lines": 68, "path": "/AbityIrnituBot.py", "repo_name": "NatalieSkornyakova/AbityIRNITUBot", "src_encoding": "UTF-8", "text": "#Настройки\r\nimport apiai, json\r\nimport telebot\r\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\r\nfrom telebot import types\r\nfrom aiogram.types import ReplyKeyboardRemove, ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton\r\nbot = telebot.TeleBot('981156430:AAFLH8w6tIWGkaXC5iPkU7CMXvYI2R4uS8M')\r\nupdater = Updater(token='981156430:AAFLH8w6tIWGkaXC5iPkU7CMXvYI2R4uS8M') \r\ndispatcher = updater.dispatcher\r\n\r\n\r\n\r\n@bot.message_handler(commands=[\"start\"])\r\ndef start(m):\r\n\tmsg = bot.send_message(m.chat.id, \"Вас приветствует AbityBot помощь /help\")\r\n\tkeyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n\tkeyboard.add(*[types.KeyboardButton(name) for name in ['СПО']])\r\n\tkeyboard.add(*[types.KeyboardButton(name) for name in ['Бакалавриат, специалитет']])\r\n\tkeyboard.add(*[types.KeyboardButton(name) for name in ['Магистратура']])\r\n\tkeyboard.add(*[types.KeyboardButton(name) for name in ['Аспирантура']])\r\n\tbot.send_message(m.chat.id, 'На какой уровень образования собираешься поступать?',\r\n\t\treply_markup=keyboard)\r\n\tbot.register_next_step_handler(msg, name)\r\ndef name(m):\r\n\tif m.text == 'СПО':\r\n\t\tkeyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n\t\tbot.send_message(m.chat.id, 'Специальности на сайте: https://www.istu.edu/abiturientu/napravleniya/spo ',\r\n\t\t\treply_markup=keyboard)\r\n\telif m.text == 'Бакалавриат, специалитет':\r\n\t\tkeyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n\t\tbot.send_message(m.chat.id, 'Специальности: Для очной-https://www.istu.edu/abiturientu/napravleniya/bakalavriat. Для заочной-https://www.istu.edu/abiturientu/napravleniya/bakalavriat_zaoch ',\r\n\t\t\treply_markup=keyboard)\r\n\telif m.text == 'Магистратура':\r\n\t\tkeyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n\t\tbot.send_message(m.chat.id, 'Специальности: Для очной-https://www.istu.edu/abiturientu/napravleniya/magistratura. Для заочной-https://www.istu.edu/abiturientu/napravleniya/magistratura_zaoch ',\r\n\t\t\treply_markup=keyboard)\r\n\telif m.text == 'Аспирантура':\r\n\t\tkeyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n\t\tbot.send_message(m.chat.id, 'Специальности: Для очной-https://www.istu.edu/abiturientu/napravleniya/aspirantura. Для заочной-https://www.istu.edu/abiturientu/napravleniya/aspirantura_zaoch ',\r\n\t\t\treply_markup=keyboard)\r\n\r\n@bot.message_handler(commands = ['url'])\r\ndef url(message):\r\n\tmarkup = types.InlineKeyboardMarkup()\r\n\tbtn_my_site= types.InlineKeyboardButton(text='Сайт Ирниту', url='https://www.istu.edu/')\r\n\tmarkup.add(btn_my_site)\r\n\tbot.send_message(message.chat.id, \"Нажми на кнопку и перейди на наш сайт.\", reply_markup = markup)\r\n@bot.message_handler(commands = ['help'])\r\ndef text(message):\r\n\tmarkup = types.InlineKeyboardMarkup()\r\n\tbot.send_message(message.chat.id, \"/start - начать работу , /v + вопрос - общие вопросы , /help - помощь\", reply_markup = markup)\r\n@bot.message_handler(commands = ['v'])\r\ndef text_message(message):\r\n request = apiai.ApiAI('af519e66270f48249a973bb8ddb5317d').text_request() # Токен API к Dialogflow\r\n request.lang = 'ru' # На каком языке будет послан запрос\r\n request.session_id = 'AbityTest_bot' # ID Сессии диалога (нужно, чтобы потом учить бота)\r\n request.query = message.text # Посылаем запрос к ИИ с сообщением от юзера\r\n responseJson = json.loads(request.getresponse().read().decode('utf-8'))\r\n response = responseJson['result']['fulfillment']['speech'] # Разбираем JSON и вытаскиваем ответ\r\n # Если есть ответ от бота - присылаем юзеру, если нет - бот его не понял\r\n if response:\r\n bot.send_message(message.chat.id, text=response)\r\n else:\r\n bot.send_message(message.chat.id, text='Я Вас не совсем понял!')\r\n\r\n\r\n\r\nbot.polling()\r\n" } ]
1
cxmSevenlaps/CrossinMovieSite
https://github.com/cxmSevenlaps/CrossinMovieSite
39a42b62843b05fad3adeb6b9b1272d9590d5467
e4416c44017067c3a3f366b330eb9088a1827931
4e07bdef285950a00658860d5206383e9fb080da
refs/heads/master
2020-03-22T08:05:47.110989
2018-07-04T16:27:02
2018-07-04T16:27:02
139,744,398
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7362637519836426, "alphanum_fraction": 0.7362637519836426, "avg_line_length": 90, "blob_id": "e4f3e1238efe395be9b90f5e6cddcecd660dd205", "content_id": "5c712be2808ad110e5c79af91bebca6116c7a922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 91, "license_type": "no_license", "max_line_length": 90, "num_lines": 1, "path": "/tables.sql", "repo_name": "cxmSevenlaps/CrossinMovieSite", "src_encoding": "UTF-8", "text": "create table movie (id, title, origin,url, rating, image, directors, casts, year, genres);\n" }, { "alpha_fraction": 0.6836283206939697, "alphanum_fraction": 0.6924778819084167, "avg_line_length": 19.590909957885742, "blob_id": "430827045a6288d18217506ee8c9d4323b31a131", "content_id": "2ac47909bc4b81d3823e37bd4c728f19f2646df2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/getposter.py", "repo_name": "cxmSevenlaps/CrossinMovieSite", "src_encoding": "UTF-8", "text": "import urllib.request\nimport json\nimport time\n\nimport web\n\ndef get_poster(id, url):\n\tpic = urllib.request.urlopen(url).read()\n\tfile_name = 'static/poster/%d.jpg' % id\n\tf = open(file_name, \"wb\")#python2里面用file(file_name, \"wb\")\n\tf.write(pic)\n\tf.close()\n\t\ndb = web.database(dbn='sqlite', db='MovieSite.db')\nmovies = db.select('movie')\n\ncount = 0\nfor movie in movies:\n\tget_poster(movie.id, movie.image)\n\tcount += 1\n\tprint(count, movie.title)\n\ttime.sleep(2)" } ]
2
marbibu/myCad
https://github.com/marbibu/myCad
ea2c6ac3740e2e3a9673aa5d6475978e95360121
e945f33b55c0dd7a74677493d3eddfb147d3fca7
5dba3b17fd9a61140ed855ceb8d1c1cc7c030261
refs/heads/master
2021-01-13T02:07:33.165076
2014-06-17T18:05:41
2014-06-17T18:05:41
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5024420022964478, "alphanum_fraction": 0.5079365372657776, "avg_line_length": 30.519229888916016, "blob_id": "e3ebfbac5f47744f0c3296b5c53af109bb30d139", "content_id": "ee0aecaf9e3548d895bea07ec1005f41828e9cc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1638, "license_type": "no_license", "max_line_length": 57, "num_lines": 52, "path": "/Point.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "from Sender import Sender\nclass Point(Sender):\n def __init__(s,x,y):\n #Dane:\n Sender.__init__(s)\n s.__x,s.__y=x,y\n s.__X,s.__Y=x,y\n s.__visible=1\n s.__exist=1\n s.__selected=0\n #Definicje:\n s.create()\n s.show()\n s.select()\n def getXY(s):#Zwraca wspolrzedne globalne\n return s.__X,s.__Y\n def getX(s):#Zwraca wspolrzedna globalna X\n return s.__X\n def getY(s):#Zwraca wspolrzedna globalna Y\n return s.__Y\n def getxy(s):#Zwraca wspolrzedne lokalne\n return s.__x,s.__y\n def getx(s):#Zwraca wspolrzedna lokalna x\n return s.__x\n def gety(s):#Zwraca wspolrzedna lokalna y\n return s.__x\n def getVisible(s):#Zwraca parametr widocznosci\n return s.__visible\n def getExist(s):#Zwraca parametr istnienia\n return s.__exist\n def getSelected(s):#Zwraca parametr zaznaczenia\n return s.__selected\n \n #Definicje ktore beda wysylaly sygnaly do sluchaczy\n def create(s):#Tworzy punkt\n s.__exist=1\n s.sendSignal()\n def destroy(s):#Niszczy punkt\n s.__exist=0\n s.sendSignal()\n def show(s):#Wyswietla punkt\n s.__visible=1\n s.sendSignal()\n def hide(s):#Ukrywa punkt\n s.__visible=0\n s.sendSignal()\n def select(s):#Zaznacza punkt\n s.__selected=1\n s.sendSignal()\n def deselect(s):#Odznacza punkt\n s.__selected=0\n s.sendSignal()" }, { "alpha_fraction": 0.4118316173553467, "alphanum_fraction": 0.46188852190971375, "avg_line_length": 22.157894134521484, "blob_id": "5dba9de25e4a95a95ec31e7ac4c83894523461ff", "content_id": "3aec8dab515046abf89ae55984cdba380d0f8586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 879, "license_type": "no_license", "max_line_length": 49, "num_lines": 38, "path": "/Main.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "from Window import Window\nfrom Desk import Desk\n\nfrom Point import Point\nfrom Path import Path\n\nclass Main:\n def __init__(s):\n #Dane:\n win=Window(\"Point\",0,0,600,600)\n master=win.getMaster()\n desk=Desk(master)\n C=desk.getC()\n \n \n p1=Point(100,100)\n p2=Point(200,100)\n p3=Point(200,200)\n p4=Point(100,200)\n \n desk.addPoint(p1)\n desk.addPoint(p2)\n desk.addPoint(p3)\n desk.addPoint(p4)\n \n path=Path()\n path.addPoint(p1)\n path.addPoint(p2)\n path.addPoint(p3)\n path.addPoint(p4)\n \n desk.addPath(path)\n \n \n win.loop()\nMain()\n#Pytania\n#Gdzie bedziemy tworzyc obiekty GUI? moze w Desk?" }, { "alpha_fraction": 0.4134897291660309, "alphanum_fraction": 0.4164222776889801, "avg_line_length": 30.090909957885742, "blob_id": "e01825fd221dc5a9c9e06618f4d90dbe515d3471", "content_id": "4e2403a4e14edeaaacc64e9711edbd49ad069727", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 99, "num_lines": 11, "path": "/PointGUI.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "class PointGUI:\n __r=6\n def __init__(s,C,point):\n #Dane:\n s.__C=C\n s.__point=point\n #Definicje:\n s.__draw()\n def __draw(s):#Rysuje punkt:\n x,y=s.__point.getXY()\n s.__tag=s.__C.create_oval(x-s.__r,y-s.__r,x+s.__r,y+s.__r,fill=\"gold\",outline=\"orange\")" }, { "alpha_fraction": 0.5514469742774963, "alphanum_fraction": 0.5578778386116028, "avg_line_length": 31.421052932739258, "blob_id": "a222b1ce5519220c4603e7c9c76a40ad496cc0dd", "content_id": "e17b0b05b054ae5761a6c0c13971ee8384ea008d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 622, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/Desk.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "from PointGUI import PointGUI\nfrom PathGUI import PathGUI\n\nfrom Tkinter import Canvas\nclass Desk:\n def __init__(s,master):\n #Dane:\n s.__master=master\n #Definicje:\n s.__draw()\n def __draw(s):#Rysuje kontrolke\n s.__C=Canvas(s.__master,highlightthickness=0,bg=\"gray80\")\n s.__C.pack(side=\"top\",expand=1,fill=\"both\")\n def getC(s):#Zwraca id Canvasu\n return s.__C\n def addPoint(s,point):#Dodaje punkt do Desk\n PointGUI(s.__C,point)\n def addPath(s,path):#Dodaje sciezke do Desk\n PathGUI(s.__C,path)\n " }, { "alpha_fraction": 0.6331360936164856, "alphanum_fraction": 0.6360946893692017, "avg_line_length": 36.66666793823242, "blob_id": "282f4a1d795b9ed74dc2deaa8e2d776dfe22aeec", "content_id": "d709a6d0113eadbeb9e96693c2e784fdae46c494", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 338, "license_type": "no_license", "max_line_length": 72, "num_lines": 9, "path": "/Listener.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "class Listener:\n #Klasa, ktora umozliwia odbieranie sygnalow od nadawcy\n def __init__(s):\n #Dane:\n pass\n def receiveSignal(s,sender):\n print \"Nie zaimplementowano odbierania sygnalow\"\n def listen2(s,sender):#Rozpoczyna nasluchiwanie wskazanego nadawcy\n sender.addListener(s)" }, { "alpha_fraction": 0.5642023086547852, "alphanum_fraction": 0.5642023086547852, "avg_line_length": 35.78571319580078, "blob_id": "b526e01a663a13d55dc70e3e5c9adaba7f327fbc", "content_id": "673898e4b7d7e9599e4aefd4ed3e18e6d4073cdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/Sender.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "class Sender:\n #Klasa, ktora rozsyla info do odbiorcow\n def __init__(s):\n #Dane:\n s.__listeners=[]\n def addListener(s,listener):#Dodaje sluchacza\n s.__listeners.append(listener)\n def delListener(s,listener):#Usuwa sluchacza\n s.__listeners.remove(listener)\n def sendSignal(s):#Wysyla sygnal\n for i in s.__listeners:\n i.receiveSignal(s)\n def getName(s):#Zwraca nazwe nadawcy\n return s.__class__.__name__" }, { "alpha_fraction": 0.4463667869567871, "alphanum_fraction": 0.4463667869567871, "avg_line_length": 28, "blob_id": "ac7a6604f882e235156e42a1596793bcf22d5f57", "content_id": "c76558c16353f1a3a0e4e6a1e15b94f2f4f3e1eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 289, "license_type": "no_license", "max_line_length": 56, "num_lines": 10, "path": "/PathGUI.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "class PathGUI:\n def __init__(s,C,path):\n #Dane:\n s.__C=C\n s.__path=path\n #Definicje:\n s.__draw()\n def __draw(s):#Rysuje sciezke\n coords=s.__path.getCoords()\n s.__C.create_line(*coords)#zalezy od warstwy" }, { "alpha_fraction": 0.4853932559490204, "alphanum_fraction": 0.48707863688468933, "avg_line_length": 35.34693908691406, "blob_id": "7069f2b25c5019d2e18d459d6feb0bdd81585bf0", "content_id": "dbccf22b5c4e0ca914f9643f2cf233d9f61f2596", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1780, "license_type": "no_license", "max_line_length": 113, "num_lines": 49, "path": "/Path.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "from Sender import Sender\nclass Path(Sender):\n def __init__(s):\n #Dane:\n Sender.__init__(s)\n s.__points=[]\n s.__current=None\n def selectPoint(s,point):#Zaznacza punkt\n if s.__current==None:\n pass\n else:\n s.__current.select()\n s.__current=point\n s.__current.deselect()\n def getCurrentPointIndex(s):#Zwraca indeks biezacego punktu\n return s.__points.index(s.__current)\n def hasPointWithXY(s,point):#Sprawdza czy punkt z podanymi wspolrzednymi istnieje i jezeli tak to go zwraca\n result=0\n X,Y=point.getXY()\n for i in s.__points:\n x,y=i.getXY()\n if x==X and y==Y:\n return i\n return None\n def __addPoint(s,point):#Dodaje punkt do listy\n if s.__current==None:\n s.__points.append(point)\n else:\n index=s.getCurrentPointIndex()\n if index==len(s.__points)-1:\n s.__points.append(point)\n else:\n s.__points.insert(index+1,point)\n def addPoint(s,point):#Dodaje punkt\n new=s.hasPointWithXY(point)\n if new==None:\n pass\n else:\n point=new\n s.__addPoint(point)\n s.selectPoint(point)\n def getCoords(s):#Zwraca liste wspolrzednych\n result=[]\n for i in s.__points:\n result.extend(list(i.getXY()))\n return result\n def getPoints(s):#Zwraca liste punktow\n return s.__points\n #najpierw sprawdzamy czy punkt o podanych wspolrzednych istnieje" }, { "alpha_fraction": 0.43906810879707336, "alphanum_fraction": 0.43906810879707336, "avg_line_length": 31.117647171020508, "blob_id": "d2f2982fb99415e0a6ae14685bfb4cbbbde6562a", "content_id": "b90f7613280c0b5182449fb461ce501f17edd637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 558, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/Window.py", "repo_name": "marbibu/myCad", "src_encoding": "UTF-8", "text": "from Tkinter import Tk\nclass Window:\n def __init__(s,title,x,y,w,h):\n #Dane:\n s.__title=title\n s.__x,s.__y=x,y\n s.__w,s.__h=w,h\n #Definicje:\n s.__draw()\n def __draw(s):#Rysuje okno\n s.__master=Tk()\n s.__master.geometry(\"%sx%s+%s+%s\"%(s.__w,s.__h,s.__x,s.__y))\n s.__master.title(s.__title)\n def getMaster(s):#Zwraca id okna\n return s.__master\n def loop(s):#Zapetla wyswietlanie okna\n s.__master.mainloop()\n " } ]
9
ashkonf/QuantitativeHedging
https://github.com/ashkonf/QuantitativeHedging
2653134f2282e5c97ec64b0f94bd42b2b64c1ad2
b16a9f6b7a0de9528a9a6bf641c0b605eb084c99
04cbc80e3dc0c9a3591b1eacc892aa571bda10dc
refs/heads/master
2023-03-02T14:56:06.821388
2021-02-12T03:21:22
2021-02-12T03:21:22
337,302,953
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5396121740341187, "alphanum_fraction": 0.5587257742881775, "avg_line_length": 43.125, "blob_id": "4eefb0a7de2cfdfad2d4417d0f82b60b0cc2dbc0", "content_id": "fe3da489a45d053f1e0a28856311b0cf0e9c2888", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3610, "license_type": "permissive", "max_line_length": 629, "num_lines": 80, "path": "/readme.md", "repo_name": "ashkonf/QuantitativeHedging", "src_encoding": "UTF-8", "text": "# Quantitative Hedging\r\n\r\n## Contents\r\n\r\n- [Overview](#Overview)\r\n- [Dependencies](#Dependencies)\r\n- [Usage](#Usage)\r\n- [Example](#Example)\r\n- [License](#License)\r\n- [Links](#Links)\r\n\r\n## Overview\r\n\r\nThe Quantitative Hedging repository provides an easy way to hedge a stock using a basket of other stocks which collectively behave as a hedge against the desired stock. The repo is intended for two types of users: (1) market makers who need to offset the risk derived from undesired inventory and (2) quantitative researchers who need to identify factors or replicate studies involving the performance of a security, portfolio, or hedge fund. \r\n\r\n## Dependencies\r\n\r\nTrading Baskets requires the following libraries:\r\n\r\n- [`pandas`](https://pandas.pydata.org/)\r\n- [`cvxopt`](https://cvxopt.org/)\r\n- [`numpy`](https://numpy.org/)\r\n\r\nInstall these libraries using `pip` with requirements.txt:\r\n\r\n```bash\r\npip install -r requirements.txt\r\n```\r\n\r\n## Usage\r\n\r\nThis repo exports one public functions (in hedge.py) `build_basket` which builds the basket of stocks intended to hedge a desired stock. \r\n\r\n### build_basket()\r\n\r\nUse `build_basket()` to compose a hedging basket from (1) the stock to be hedged (`hedged_ticker_symbol`) and (2) the ticker symbols to consider including in the hedging basket (`basket_ticker_symbols`):\r\n\r\n basket(build_basket, basket_ticker_symbols)\r\n\r\nThe `basket()` arguments are as follows:\r\n\r\n| Name | Type | Description | Optional? | Sample Value |\r\n|-------------------|---------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|------|\r\n| `hedged_ticker_symbol` | `str` | The stock-ticker name of the stock to be hedged. | No | `\"AAPL\"` |\r\n| `basket_ticker_symbols` | `list` | A list of ticker symbols the library will consider including in the hedging basket. | No | `[\"GOOG\", \"MSFT\", \"NFLX\", \"AMZN\", \"FB\"]` |\r\n\r\n## Example\r\n\r\nThe code below shows how to hedge a stock. The code defines APPL (Apple) as the stock to hedge, a list of stocks to consider using as part of the hedge (GOOG, MSFT, NFLX, AMZN, and FB), and composes a corresponding hedge basket for AAPL.\r\n\r\n```bash\r\nfrom hedge import build_basket\r\n\r\nhedged_ticker_symbol = \"AAPL\"\r\nbasket_ticker_symbols = [\"GOOG\", \"MSFT\", \"NFLX\", \"AMZN\", \"FB\"]\r\nprint(\"Hedge for %s:\" % hedged_ticker_symbol)\r\nprint(build_basket(hedged_ticker_symbol, basket_ticker_symbols))\r\n```\r\n\r\nThis will produce the following hedging basket:\r\n\r\n```\r\n{'AAPL': 0.2614353523521262, 'FB': 0.1921680128468791, 'AMZN': 0.5463966348009947}\r\n```\r\n\r\ni.e. AAPL with weight 27%, FB with weight 19%, and AMZN with weight 54%.\r\n\r\nA snippet like this can be incorporated in any Python application.\r\n\r\n## License\r\n\r\nTrading Baskets is licensed under the\r\n[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0).\r\n\r\n## Links\r\n\r\n- [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r\n- [CVXOPT](https://cvxopt.org/)\r\n- [NumPy](https://numpy.org/)\r\n- [pandas](https://pandas.pydata.org/)\r\n" }, { "alpha_fraction": 0.3863636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 13.333333015441895, "blob_id": "56f592f99f526456e80857bc0a9ca60492e09d5f", "content_id": "80ae6ab8bcc187b1175e168fa1d5cc51bc74b168", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 44, "license_type": "permissive", "max_line_length": 14, "num_lines": 3, "path": "/requirements.txt", "repo_name": "ashkonf/QuantitativeHedging", "src_encoding": "UTF-8", "text": "numpy==1.17.4\r\ncvxopt==1.2.5\r\npandas==0.25.3" }, { "alpha_fraction": 0.5930609107017517, "alphanum_fraction": 0.6077101230621338, "avg_line_length": 32.595855712890625, "blob_id": "7af1a0e13066687c4f9af1aad5a05cc35808e48e", "content_id": "75a9f6152eb93b7bc07d1d0eb6960ac5e4497223", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6485, "license_type": "permissive", "max_line_length": 146, "num_lines": 193, "path": "/hedge.py", "repo_name": "ashkonf/QuantitativeHedging", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport re\nfrom datetime import datetime, timedelta\nimport time\nimport math\nimport requests\nfrom io import StringIO\n\nimport numpy as np\nimport pandas as pd\nimport cvxopt\nimport cvxopt.blas\nimport cvxopt.solvers\nfrom statistics import variance\n\n## Loading historical prices ############################################################################################\n\ndef csvstr2df(string):\n file_ = StringIO(string)\n return pd.read_csv(file_, sep=\",\")\n\ndef datetime_to_timestamp(dt):\n return time.mktime(dt.timetuple()) + dt.microsecond / 1000000.0\n\ndef historical_prices(ticker_symbol):\n url = \"https://query1.finance.yahoo.com/v7/finance/download/%s?period1=%s&period2=%s&interval=1d&events=history&includeAdjustedClose=true\" % (\n ticker_symbol,\n int(datetime_to_timestamp(datetime.now() - timedelta(days=365))),\n int(datetime_to_timestamp(datetime.now()))\n )\n df = csvstr2df(requests.get(url).text)\n return df[\"Adj Close\"]\n\n## Helper functions #####################################################################################################\n\nMARKET_DAYS_IN_YEAR = 252\n\ndef _truncate_quotes(quotes):\n truncated_quotes = {}\n for ticker in quotes:\n truncated_quotes[ticker] = quotes[ticker][-min(MARKET_DAYS_IN_YEAR, len(quotes[ticker])):]\n return truncated_quotes\n\ndef _remove_row(matrix, row):\n return np.vstack((matrix[:row], matrix[row + 1:]))\n\ndef _filter_negative_prices(price_matrix, ticker_map):\n # Remove stocks with any negative prices:\n index = 0\n while index < len(price_matrix):\n if any(value < 0.0 for value in price_matrix[index]):\n price_matrix = _remove_row(price_matrix, index)\n del ticker_map[index]\n else:\n index += 1\n\n return (price_matrix, ticker_map)\n\ndef _filter_duplicate_rows(price_matrix, ticker_map):\n # Remove duplicate rows:\n rowsEqual = lambda row1, row2: all(item == row2[index] for index, item in enumerate(row1))\n index1 = 0\n while index1 < len(price_matrix):\n index2 = index1 + 1\n while index2 < len(price_matrix):\n if rowsEqual(price_matrix[index1], price_matrix[index2]):\n price_matrix = _remove_row(price_matrix, index1)\n del ticker_map[index2]\n else:\n index2 += 1\n index1 += 1\n\n return (price_matrix, ticker_map)\n\ndef _filter_no_variance_rows(price_matrix, ticker_map):\n # Remove stocks with no variance:\n index = 0\n while index < len(price_matrix):\n if len(set(price_matrix[0])) == 1:\n price_matrix = _remove_row(price_matrix, index)\n del ticker_map[index]\n else:\n index += 1\n\n return (price_matrix, ticker_map)\n\ndef _filter_low_variance_rows(price_matrix, ticker_map):\n # Remove stocks stocks with low variance:\n VARIANCE_THRESHOLD = 0.1\n index = 0\n while index < len(price_matrix):\n if variance(price_matrix[index]) < VARIANCE_THRESHOLD:\n price_matrix = _remove_row(price_matrix, index)\n del ticker_map[index]\n else:\n index += 1\n\n return (price_matrix, ticker_map)\n\ndef _build_price_matrix(quotes, ticker):\n price_matrix = quotes[ticker]\n ticker_map = [ticker]\n\n for index, other_ticker in enumerate(quotes):\n if other_ticker != ticker:\n price_matrix = np.vstack((price_matrix, quotes[other_ticker]))\n ticker_map.append(other_ticker)\n\n price_matrix, ticker_map = _filter_negative_prices(price_matrix, ticker_map)\n price_matrix, ticker_map = _filter_duplicate_rows(price_matrix, ticker_map)\n price_matrix, ticker_map = _filter_no_variance_rows(price_matrix, ticker_map)\n price_matrix, ticker_map = _filter_low_variance_rows(price_matrix, ticker_map)\n\n return (price_matrix, ticker_map)\n\ndef _build_returns_matrix(price_matrix):\n returnsMatrix = []\n\n for row in price_matrix:\n returns = []\n for index in range(len(row) - 1):\n returns.append((row[index + 1] - row[index]) / row[index])\n returnsMatrix.append(returns)\n\n return returnsMatrix\n\ndef _minimize_portfolio_variance(returns_matrix):\n # Compose QP parameters:\n S = np.cov(returns_matrix) # Sigma\n n = len(S) - 1\n P = np.vstack((np.hstack((2.0 * S[1:, 1:], np.zeros((n, n)))),\n np.hstack((np.zeros((n, n)), 2.0 * S[1:, 1:])))) # No negative here because -1 ^ 2 = 1\n q = np.vstack((2.0 * S[1:, 0:1],\n -2.0 * S[1:, 0:1])) # But this terms is linear so we do need the -1\n G = -np.eye(2 * n)\n h = np.zeros((2 * n, 1))\n A = np.ones((1, 2 * n))\n b = 1.0\n\n # Make QP parameters into CVXOPT matrices:\n P = cvxopt.matrix(P)\n q = cvxopt.matrix(q)\n G = cvxopt.matrix(G)\n h = cvxopt.matrix(h)\n A = cvxopt.matrix(A)\n b = cvxopt.matrix(b)\n\n # Solve the QP:\n cvxopt.solvers.options[\"show_progress\"] = False\n result = cvxopt.solvers.qp(P, q, G, h, A, b)\n weights = result[\"x\"]\n\n return weights\n\ndef _filter_small_weights(weights):\n WEIGHT_THRESHOLD = 0.01\n for index, weight in enumerate(weights):\n if abs(weight) < WEIGHT_THRESHOLD:\n weights[index] = 0\n\n # We have to normalize weights after\n # discarding small ones above so that\n # they still sum to 1.\n weights = weights / sum(weights)\n\n return weights\n\ndef _compose_basket(weights, ticker_map, hedged_ticker_symbol):\n basket = {}\n\n for index in range(int(len(weights)/2)):\n pweight = weights[index]\n nweight = weights[int(len(weights) / 2) + index]\n weight = pweight - nweight\n if weight != 0 and ticker_map[index] != hedged_ticker_symbol:\n basket[ticker_map[index]] = float(weight) * -1.0\n\n return basket\n\n## Public functions #####################################################################################################\n\ndef build_basket(hedged_ticker_symbol, basket_ticker_symbols):\n quotes = {ticker: historical_prices(ticker) for ticker in set(basket_ticker_symbols + [hedged_ticker_symbol])}\n quotes = _truncate_quotes(quotes)\n\n price_matrix, ticker_map = _build_price_matrix(quotes, hedged_ticker_symbol)\n returns_matrix = _build_returns_matrix(price_matrix)\n weights = _minimize_portfolio_variance(returns_matrix)\n weights = np.array(weights)\n weights = _filter_small_weights(weights)\n\n return _compose_basket(weights, ticker_map, hedged_ticker_symbol)\n\n" } ]
3
wdlsvnit/ProjectX
https://github.com/wdlsvnit/ProjectX
b2e49727d06865bf64ab99d8d17024181c324ab6
f2c0834e28170eb47f00bd6b9f48c60a86014792
6928345b728262574985f2d4c9bb6d04a6eb5d22
refs/heads/master
2021-05-27T16:40:19.670527
2014-03-27T08:51:48
2014-03-27T08:51:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 19.33333396911621, "blob_id": "1cc11f255a79c3ed59bee39040ef04bf43212f29", "content_id": "576c6ef1bec6a838448845185fbae7bb31e69f9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "permissive", "max_line_length": 42, "num_lines": 3, "path": "/README.md", "repo_name": "wdlsvnit/ProjectX", "src_encoding": "UTF-8", "text": "projectx\n========\nSVNIT web portal for various applications." }, { "alpha_fraction": 0.7045130729675293, "alphanum_fraction": 0.7140142321586609, "avg_line_length": 28.64788818359375, "blob_id": "2a2039eb7def69581425d667fc8b86b1a95ee4ce", "content_id": "e9d1d1a89841f557512a83210973977b51668b61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2105, "license_type": "permissive", "max_line_length": 145, "num_lines": 71, "path": "/projectx/organisation/models.py", "repo_name": "wdlsvnit/ProjectX", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\n\n# Create your models here.\nfrom django.contrib.auth.models import User,Group,Permission\n\n#Faculties=Group.objects.get_or_create(name='Faculties')\n#Students=Group.objects.get_or_create(name='Students')\np=Group.objects.all()\nFaculties=p[0]\nStudents=p[1]\n\n\nclass Branch(models.Model):\n COURSES=(('B.TECH','B.TECH'),('M.SC','M.SC'))\n YEARS=(('1','FIRST'),('2','SECOND'),('3','THIRD'),('4','FOURTH'),('5','FIFTH'))\n name=models.CharField('Branch Name',max_length=50)\n year=models.CharField('year',max_length=1,choices=YEARS)\n division=models.CharField(max_length=1,default='A')\n course=models.CharField('Course',max_length=8,choices=COURSES)\n \n def __unicode__(self):\n return self.name + \" \" + self.year + \" \" + self.division \n \nclass Subject(models.Model):\n code=models.CharField(max_length=10)\n name=models.CharField(max_length=50)\n \n def __unicode__(self):\n return self.name\n\nclass StudentUserProfile(models.Model):\n user=models.OneToOneField(User,related_name='sprofile')\n branch=models.ForeignKey(Branch)\n \n \n def __unicode__(self):\n return self.user.username\n\nUser.sprofile = property(lambda u: StudentUserProfile.objects.get_or_create(user=u)[0] if u.groups.filter(name='Faculties').count()==0 else None)\n\n\nclass FacultyUserProfile(models.Model):\n \n user=models.OneToOneField(User,related_name='tprofile')\n subjects=models.ManyToManyField(Subject,related_name='Faculties') \n \n def __unicode__(self):\n return self.user.username\n \nUser.fprofile = property(lambda u: FacultyUserProfile.objects.get_or_create(user=u)[0] if u.groups.filter(name='Students').count()==0 else None )\n\n\n@receiver(post_save,sender=StudentUserProfile)\ndef my_handler_one(sender, **kwargs):\n if kwargs['created']:\n obj=kwargs['instance']\n u=obj.user\n u.groups.add(Students)\n \n \n \n\n@receiver(post_save,sender=FacultyUserProfile)\ndef my_handler_two(sender, **kwargs):\n\tif kwargs['created']:\n\t\tobj=kwargs['instance']\n\t\tobj.user.groups.add(Faculties)\n\t\tobj.save()\n" }, { "alpha_fraction": 0.5239999890327454, "alphanum_fraction": 0.6959999799728394, "avg_line_length": 16.85714340209961, "blob_id": "56982733a1eb832542f9d9e92aba73f20ca1fb1d", "content_id": "5672f60dedd07cfef295b0617c6dee70a3780e31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 250, "license_type": "permissive", "max_line_length": 28, "num_lines": 14, "path": "/projectx/requirements.txt", "repo_name": "wdlsvnit/ProjectX", "src_encoding": "UTF-8", "text": "Django==1.6.2\nPillow==2.3.1\nSouth==0.8.4\nWhoosh==2.6.0\nargparse==1.2.1\ndjango-allauth==0.16.1\ndjango-haystack==2.1.0\ndjango-simple-captcha==0.4.2\noauthlib==0.6.1\npython-openid==2.2.5\nrequests==2.2.1\nrequests-oauthlib==0.4.0\nsix==1.6.1\nwsgiref==0.1.2\n" }, { "alpha_fraction": 0.8621907830238342, "alphanum_fraction": 0.8621907830238342, "avg_line_length": 39.57143020629883, "blob_id": "56d35bc184b16acb93556f205726b7f0a3dbcbd3", "content_id": "5e4eb8edb336ce18a5ecbb43e53b2acc0a158bfb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "permissive", "max_line_length": 84, "num_lines": 7, "path": "/projectx/organisation/admin.py", "repo_name": "wdlsvnit/ProjectX", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom organisation.models import Branch,Subject,StudentUserProfile,FacultyUserProfile\n# Register your models here.\nadmin.site.register(Branch)\nadmin.site.register(Subject)\nadmin.site.register(StudentUserProfile)\nadmin.site.register(FacultyUserProfile)" } ]
4
uhapppy/bubble-sort-visualisation
https://github.com/uhapppy/bubble-sort-visualisation
3d356b2ea5b24382cf50bd1415c3bc86c2cbd463
7084acc6d680ffbd67de29eb972bb3ad020bf281
02feecc7134a69fbbb3f9e9c2a5c8652575a17f2
refs/heads/main
2023-01-24T03:52:40.451284
2020-12-12T15:39:20
2020-12-12T15:39:20
320,863,742
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48421356081962585, "alphanum_fraction": 0.5045924186706543, "avg_line_length": 18.023256301879883, "blob_id": "1dafcdd84adddc98cebfd02d18de01508b1ce768", "content_id": "a9b03734696078a59517befad1feaea9b0640923", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3484, "license_type": "no_license", "max_line_length": 73, "num_lines": 172, "path": "/visualisation_tri.py", "repo_name": "uhapppy/bubble-sort-visualisation", "src_encoding": "UTF-8", "text": "import tkinter\r\nimport random\r\nimport pygame\r\n\r\npygame.init()\r\n\r\nwhite=(255,255,255)\r\nblack=(0,0,0)\r\nbasse=0\r\nhauteur=600\r\ncount=0\r\nfin=-1\r\n#liste de depart\r\nLISTE=[]\r\nfor i in range(basse,hauteur+1):\r\n LISTE.append(i)\r\n\r\nrandom.shuffle(LISTE)\r\n\r\n\r\n\r\n\r\ndef update1():\r\n global LISTE\r\n random.shuffle(LISTE)\r\n\r\nprint(LISTE)\r\n\r\ndef update2():\r\n global LISTE\r\n global count\r\n newlist=[]\r\n if count>=hauteur:\r\n #print(LISTE)\r\n return True\r\n\r\n if count<hauteur:\r\n for i in range(0,count+1):\r\n if LISTE[count]==LISTE[i]:\r\n continue\r\n if LISTE[i]<LISTE[count]:\r\n newlist.append(LISTE[i])\r\n print(count)\r\n if LISTE[i]>LISTE[count]:\r\n newlist.append(LISTE[count])\r\n print(count)\r\n for j in range(i,hauteur+1):\r\n newlist.append(LISTE[j])\r\n count+=1\r\n LISTE=newlist\r\n \r\n return LISTE\r\n\r\ndef update3():\r\n global LISTE\r\n global count\r\n newlist=[]\r\n\r\n if count==hauteur:\r\n for i in range(0,hauteur+1):\r\n newlist.append(LISTE[i])\r\n count=0\r\n LISTE=newlist\r\n return LISTE\r\n\r\n\r\n \r\n\r\n if LISTE[count]>LISTE[count+1]:\r\n for i in range(0,count):\r\n newlist.append(LISTE[i])\r\n newlist.append(LISTE[count+1])\r\n newlist.append(LISTE[count])\r\n for j in range(count+2,hauteur+1):\r\n newlist.append(LISTE[j])\r\n \r\n \r\n if LISTE[count]<LISTE[count+1]:\r\n for k in range(0,hauteur+1):\r\n newlist.append(LISTE[k])\r\n \r\n \r\n count+=1\r\n \r\n LISTE=newlist\r\n return LISTE\r\n\r\n\r\ndef update4():\r\n global LISTE\r\n global count\r\n global fin\r\n newlist=[]\r\n\r\n if count==hauteur:\r\n for i in range(0,hauteur+1):\r\n newlist.append(LISTE[i])\r\n count=0\r\n \r\n LISTE=newlist\r\n return LISTE\r\n\r\n\r\n print(count)\r\n if LISTE[count]==LISTE[fin]-1 :\r\n print(\"saluttttt\")\r\n for i in range(0,hauteur+1):\r\n if count==i or -fin==i+1:\r\n continue\r\n newlist.append(LISTE[i])\r\n newlist.append(LISTE[count])\r\n newlist.append(LISTE[fin])\r\n fin-=1\r\n count+=1\r\n LISTE=newlist\r\n return LISTE\r\n\r\n if LISTE[count]>LISTE[count+1]:\r\n print(\"2\")\r\n for i in range(0,count):\r\n newlist.append(LISTE[i])\r\n newlist.append(LISTE[count+1])\r\n newlist.append(LISTE[count])\r\n for j in range(count+2,hauteur+1):\r\n newlist.append(LISTE[j])\r\n count+=1\r\n LISTE=newlist\r\n return LISTE\r\n\r\n \r\n if LISTE[count]<LISTE[count+1]:\r\n print(\"3\")\r\n for k in range(0,hauteur+1):\r\n newlist.append(LISTE[k])\r\n\r\n count+=1\r\n LISTE=newlist\r\n return LISTE\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef draw():\r\n global LISTE\r\n for i in range(0,hauteur+1):\r\n pygame.draw.line(screen,white,(i,hauteur),(i,hauteur-LISTE[i]),2)\r\n\r\n\r\n\r\nscreen = pygame.display.set_mode((hauteur,hauteur))\r\n\r\nrunning=True\r\n\r\nwhile running:\r\n #pygame.time.delay(20)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running=False\r\n \r\n screen.fill(black)\r\n while count%100!=0:\r\n update3()\r\n update3()\r\n draw()\r\n pygame.display.flip()\r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" } ]
1
NasroGalili/hexol
https://github.com/NasroGalili/hexol
ebb9c5e7a1f50bf2f2d2af244650184bc228e72e
a0ffdac64e456f85ef125d74970845518e8eec59
469cb0bc08625689cf8b9bf6fdd72277dcc45690
refs/heads/master
2022-07-18T01:49:05.879776
2020-05-14T14:59:04
2020-05-14T14:59:04
263,922,673
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6979166865348816, "alphanum_fraction": 0.6979166865348816, "avg_line_length": 20.33333396911621, "blob_id": "6b131bce57717aa6bbc232024386dea5e3f14f7d", "content_id": "23bd9b20997c118399d4fc118175643427d4e765", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 42, "num_lines": 9, "path": "/bot.py", "repo_name": "NasroGalili/hexol", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nclient = commands.Bot(command_prefix ='.')\n\n@client.event\nasync def on_ready():\n print('I am ready!')\n\n client.run(os.environ['BOT_KEY'])\n" }, { "alpha_fraction": 0.8260869383811951, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 6.666666507720947, "blob_id": "91b487b7ba100bdfbe8499afb468d3068df3ca00", "content_id": "13eefc95e3c0a6c152299d1bc0a1271bc993e4bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 23, "license_type": "no_license", "max_line_length": 10, "num_lines": 3, "path": "/requirements.txt", "repo_name": "NasroGalili/hexol", "src_encoding": "UTF-8", "text": "discord.py\nbash\nbundle\n" } ]
2
jfenton/iprowlergox
https://github.com/jfenton/iprowlergox
1828657bb0cfb1d4366dc60eaf9168e75072e94d
33eea7a2be9421f5b2eae1a43cf62f236be6b2e7
8bad42eaa10468325c00000cb078b7b23fd54066
refs/heads/master
2021-03-12T23:42:28.778876
2013-04-15T17:08:54
2013-04-15T17:08:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7352070808410645, "alphanum_fraction": 0.7603550553321838, "avg_line_length": 33.64102554321289, "blob_id": "8cd20c134204505b0ddda2d327a6bfe442a67b91", "content_id": "cf0945c60e1ffd77aaed89db3299a2d694f2912a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1354, "license_type": "permissive", "max_line_length": 358, "num_lines": 39, "path": "/README.md", "repo_name": "jfenton/iprowlergox", "src_encoding": "UTF-8", "text": "iProwlerGox\n===========\n\nThis is a Python script which monitors the current ฿TC ticker price via the Mt.Gox 'ticker_fast' API, and sends notifications to your iPhone/iPad device using the Prowler service.\n\nInstallation\n------------\n\n1) Install Python\n\n2) Install the 'prowler' and 'simplejson' Python modules with pip or easy_install\n\n```easy_install prowler simplejson```\n\tor\n```pip install prowler simplejson```\n\n3) Go to http://www.prowler.com/ and register an account\n\n4) Login to your account there, go to the API Keys tab and click Current API Keys > Generate Key\n\n5) Paste the API Key given into the file iprowlergox.conf\n\nAll done!\n\nUsage\n-----\n\n```python iprowlergox.py 45 55```\n\nWhere 45 and 55 are the low and high watermarks respectively that you want to use. The script checks the ticker price at Gox every 5 seconds at present.\n\nYou will receive iOS notifications in your Prowler client when the price breaches the thresholds set, and a further notification when it crosses that threshold in the opposite direction i.e. if the price were 54 and then rose to 55.2, you would receive your first iOS notification. If it subsequently dropped below 55, you would receive another notification.\n\nLicense\n-------\n\nCopyright (c) 2013 Jay Fenton, released under the MIT license.\n\nBTC Donations (entirely voluntary): 1HTcfFx4j6cf49Xg8H6nxz5C8TYRasHmkw\n\n" }, { "alpha_fraction": 0.6543408632278442, "alphanum_fraction": 0.6709539294242859, "avg_line_length": 21.7439022064209, "blob_id": "74957c950ff42ab2f7654b5a159d718f71c387cb", "content_id": "be3b02345e9cb8b77fb757119509d8254b2d77ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1866, "license_type": "permissive", "max_line_length": 135, "num_lines": 82, "path": "/iprowlergox.py", "repo_name": "jfenton/iprowlergox", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# iprowlergox.py (c) Jay Fenton 2013 <na.nu@na.nu>\n#\n\nimport prowler\nimport urllib2\nimport simplejson as json\nimport sys\nimport os\nimport signal\nimport time\n\nGOX_URL = 'http://data.mtgox.com/api/2/BTCUSD/money/ticker_fast'\n\nsys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # Disable stdout buffering\n\nkey = None\n\ntry:\n\tkey = open('iprowlergox.conf').read().rstrip('\\r\\n')\nexcept: pass\n\nif not key:\n\tprint 'Please put your Prowler API key in iprowlergox.conf'\n\tsys.exit(-1)\n\nif len(sys.argv) < 2:\n\tprint 'Syntax: %s 95 105' % (sys.argv[0])\n\tsys.exit(-1)\n\nlow = float(sys.argv[1])\nhigh = float(sys.argv[2])\n\np = prowler.Prowl(key)\n\nbreach = False\n\nprint '[ iWatchGox.py (c) Jay Fenton 2013 <na.nu@na.nu> ]'\nsys.stdout.write('Monitoring for breaches of price range %s-%s.' % (str(low), str(high)))\n\np.post('Now monitoring for breaches of range %s-%s' % (str(low), str(high)), priority=2, app='iprowlergox', event='Monitoring started')\n\ndef signal_handler(signal, frame):\n\tprint 'Exiting...'\n\tsys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nwhile True:\n\ttry:\n\t\tsys.stdout.write('.')\n\n\t\tdata = json.loads(urllib2.urlopen(GOX_URL).read())\n\t\tlast = float(data['data']['last']['value'])\n\n\t\tsubject = None\n\t\tbody = 'Mt.Gox USD is @ ' + str(last)\n\n\t\tif breach:\n\t\t\tif last > low:\n\t\t\t\tsubject = 'Price Has Returned To Above Low Watermark'\n\t\t\t\tbreach = False\n\t\t\telif last < high:\n\t\t\t\tsubject = 'Price Has Returned To Below High Watermark'\n\t\t\t\tbreach = False\n\t\telif last <= low:\n\t\t\tsubject = 'Price Has Breached Low Watermark'\n\t\t\tbreach = True\n\t\telif last >= high:\n\t\t\tsubject = 'Price Has Breached High Watermark'\n\t\t\tbreach = True\n\n\t\tif subject and body:\n\t\t\tp.post(body, priority=2, app='iprowlergox', event=subject)\n\t\t\tsys.stdout.write('+')\n\t\t\ttime.sleep(25) # Only send notifications at most every 30 seconds\n\n\texcept:\n\t\tsys.stdout.write('!')\n\n\ttime.sleep(5)\n\n" } ]
2
ronbeltran/four-pics-one-word
https://github.com/ronbeltran/four-pics-one-word
59aa97a1ea83c49d77896f1509b10aa293f7955a
e777d28c89a864742a72403cd8979f48b6b59778
ec36dacd674fa25f3b11bdcd681d4b5b925bfa88
refs/heads/master
2021-01-10T04:23:01.337165
2015-10-10T12:57:17
2015-10-10T12:57:17
44,010,533
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5976190567016602, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 22.33333396911621, "blob_id": "4e45c6ed1cb976c5c20a5d9e9560b6acf9b611f6", "content_id": "6c61065548abec4321bbe10b4b2d02544bb54bd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 63, "num_lines": 18, "path": "/game/models.py", "repo_name": "ronbeltran/four-pics-one-word", "src_encoding": "UTF-8", "text": "from google.appengine.ext import ndb\n\n\nclass Letters(ndb.Model):\n match_count = ndb.IntegerProperty(required=True, default=0)\n\n @classmethod\n def _build_key(cls, letters):\n return ndb.Key(cls, letters)\n\n @classmethod\n def new(cls, letters, match_count):\n key = cls._build_key(letters)\n new = cls(\n key=key,\n match_count=match_count,\n )\n return new\n" }, { "alpha_fraction": 0.5603032112121582, "alphanum_fraction": 0.5685734152793884, "avg_line_length": 32.74418640136719, "blob_id": "2ce7cce21ae95535dfef11bf18d3111d0b0d999c", "content_id": "349fd3303ea5d048b9f50fb4d8a1dcdc55d7c67a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 94, "num_lines": 43, "path": "/game/views.py", "repo_name": "ronbeltran/four-pics-one-word", "src_encoding": "UTF-8", "text": "import logging\nimport operator\n\nfrom flask import render_template, request\nfrom google.appengine.api import memcache\n\nfrom game import app\nfrom game import utils\nfrom game import models\n\nEXPIRE_TIME = 60 * 60 * 24 # 24 hours\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n context = {}\n if request.method == \"POST\":\n length = request.form['length'] or 1\n letters = request.form['letters'] or None\n if letters is None:\n context.update({\n 'length': length,\n 'letters': '',\n })\n return render_template('index.html', **context)\n letters = letters.upper().strip().replace(' ', '').replace('\\n', '').replace('\\t', '')\n key = '{0}_{1}'.format(str(length), ''.join(sorted(letters)))\n cached_data = memcache.get(key)\n if cached_data is None:\n logging.info('{} not found in memcache'.format(key))\n words = utils.get_words_dict(length, letters)\n sorted_words = sorted(words.items(), key=operator.itemgetter(1))\n sorted_words.reverse()\n memcache.add(key, sorted_words, EXPIRE_TIME)\n else:\n logging.info('{} found in memcache'.format(key))\n sorted_words = cached_data\n context.update({\n 'length': length,\n 'letters': letters,\n 'words': sorted_words,\n })\n return render_template('index.html', **context)\n" }, { "alpha_fraction": 0.6028309464454651, "alphanum_fraction": 0.607826828956604, "avg_line_length": 26.295454025268555, "blob_id": "24b7f42a6aeaef25ca41c114af6f79eb060dfddd", "content_id": "ece040104249f9451a47c12dd219d35440924307", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1201, "license_type": "no_license", "max_line_length": 79, "num_lines": 44, "path": "/game/utils.py", "repo_name": "ronbeltran/four-pics-one-word", "src_encoding": "UTF-8", "text": "import pickle\nimport logging\n\n\nWORDS = pickle.load(open('./static/google-books-common-words.bin', 'r'))\n\n\ndef is_subset(word, choices):\n _choices = list(choices)\n for c in word:\n if c not in _choices:\n return False\n _choices.remove(c)\n return True\n\n\ndef get_words(length, letters):\n length = int(length)\n candidates = []\n selected = []\n for key, value in WORDS.iteritems():\n if len(key) == length:\n candidates.append(key)\n for word in candidates:\n if is_subset(word, letters):\n selected.append(word)\n logging.info('Got {0} matches with length of {1} where choices {2}'.format(\n len(selected), length, letters))\n return selected\n\n\ndef get_words_dict(length, letters):\n length = int(length)\n candidates = []\n selected = {}\n for key, value in WORDS.iteritems():\n if len(key) == length:\n candidates.append(key)\n for word in candidates:\n if is_subset(word, letters):\n selected.update({word: WORDS.get(word)})\n logging.info('Got {0} matches with length of {1} where choices {2}'.format(\n len(selected.keys()), length, letters))\n return selected\n" }, { "alpha_fraction": 0.6842857003211975, "alphanum_fraction": 0.7035714387893677, "avg_line_length": 30.11111068725586, "blob_id": "b093b5a843415ef9ce28122096d70f8b5f86f8ac", "content_id": "4e0059b326dc6a09676534bd4aba27acd48270cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1400, "license_type": "no_license", "max_line_length": 100, "num_lines": 45, "path": "/api.py", "repo_name": "ronbeltran/four-pics-one-word", "src_encoding": "UTF-8", "text": "import endpoints\nfrom protorpc import messages\nfrom protorpc import message_types\nfrom protorpc import remote\n\npackage = 'Wordsapi'\n\nfrom game import utils\n\n\nclass Word(messages.Message):\n word = messages.StringField(1)\n frequency = messages.StringField(2)\n\n\nclass Words(messages.Message):\n words = messages.MessageField(Word, 1, repeated=True)\n\n\nWORDS_CRITERIA_RESOURCE = endpoints.ResourceContainer(\n message_types.VoidMessage,\n length=messages.IntegerField(1, variant=messages.Variant.INT32, required=True), # noqa\n choices=messages.StringField(2, required=True)\n)\n\nWEB_CLIENT_ID = '471311115005-4bd8aqpnmrnro61ntdgstb2bsbvhma90.apps.googleusercontent.com' # noqa\nANDROID_CLIENT_ID = ''\nIOS_CLIENT_ID = ''\nANDROID_AUDIENCE = WEB_CLIENT_ID\n\n\n@endpoints.api(name='wordsapi', version='v1',\n allowed_client_ids=[WEB_CLIENT_ID, endpoints.API_EXPLORER_CLIENT_ID], # noqa\n audiences=[ANDROID_AUDIENCE],)\nclass WordsApi(remote.Service):\n\n @endpoints.method(WORDS_CRITERIA_RESOURCE, Words,\n path='words/{length}/{choices}', http_method='POST',\n name='words.get')\n def get_words(self, request):\n words = utils.get_words(request.length, request.choices.upper())\n return Words(words=[Word(word=w, frequency=str(utils.WORDS.get(w))) for w in words]) # noqa\n\n\napp = endpoints.api_server([WordsApi])\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 77, "blob_id": "a6197b8b212448d40366b743f02b9f5208483204", "content_id": "c042af14f169b48f26fe69965195357c46c1fd65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 78, "license_type": "no_license", "max_line_length": 77, "num_lines": 1, "path": "/README.md", "repo_name": "ronbeltran/four-pics-one-word", "src_encoding": "UTF-8", "text": "Read more [here](http://ronbeltran.github.io/2015/07/4pics-1word-solver.html)\n" }, { "alpha_fraction": 0.6056337952613831, "alphanum_fraction": 0.6056337952613831, "avg_line_length": 23.850000381469727, "blob_id": "23ec291f92b8fc420dd3e688bb5ef53dbb9edd69", "content_id": "1f2df0cb32798847973563d086749dadeddaecfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 994, "license_type": "no_license", "max_line_length": 63, "num_lines": 40, "path": "/static/serialize.py", "repo_name": "ronbeltran/four-pics-one-word", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\nimport pickle\n\nFILEPATH = os.path.abspath('google-books-common-words.txt')\n\nWORD_LENGTHS = []\n\n\ndef convert_to_dict(filename):\n data = {}\n with open(filename, 'r') as f:\n for line in f:\n if line.startswith('#'):\n continue\n key, value = line.split()\n data.update({key: int(value)})\n return data\n\n\ndef pickle_data(data, filename):\n if not isinstance(data, dict):\n raise TypeError('data should be a dict')\n with open(filename, 'wb') as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n print 'Data saved as: {}'.format(os.path.abspath(filename))\n\n\ndef unpickle_data(filename):\n data = None\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n return data\n\n\nif __name__ == \"__main__\":\n data = convert_to_dict(FILEPATH)\n pickle_data(data, 'google-books-common-words.bin')\n# _data = unpickle_data('google-books-common-words.bin')\n# print _data\n" }, { "alpha_fraction": 0.6962025165557861, "alphanum_fraction": 0.6962025165557861, "avg_line_length": 14.800000190734863, "blob_id": "834de8a110fb87de0878f777d558236aa1d86fb9", "content_id": "e3a50770d57b8e34ef116eefe5609bef21624a6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "no_license", "max_line_length": 30, "num_lines": 5, "path": "/game/__init__.py", "repo_name": "ronbeltran/four-pics-one-word", "src_encoding": "UTF-8", "text": "from flask import Flask\napp = Flask(__name__)\n\n\nfrom game import views # noqa\n" } ]
7
rellisapk/Tic-Tac-Toe-Dekstop-Game-With-Python
https://github.com/rellisapk/Tic-Tac-Toe-Dekstop-Game-With-Python
e36ab7e2b6311cf799f88a008673655181401123
f6335f3a73a9fa583530019444094b4a6669eaa7
0df771d1cd8925462c092c4f72b9be734f118a33
refs/heads/main
2023-06-16T12:18:59.158387
2021-07-16T05:55:37
2021-07-16T05:55:37
386,525,249
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6448230743408203, "alphanum_fraction": 0.6461336612701416, "avg_line_length": 49.86666488647461, "blob_id": "9a7bacbe6acebe9a43693d927bb4922e0092c247", "content_id": "f7eae91dc0e5e8415b43b7ed1b972ed9154832d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 763, "license_type": "no_license", "max_line_length": 129, "num_lines": 15, "path": "/README.md", "repo_name": "rellisapk/Tic-Tac-Toe-Dekstop-Game-With-Python", "src_encoding": "UTF-8", "text": "# Tic-Tac-Toe-Dekstop-Game-With-Python\nWelcome to the TicTacToe Game, a simple desktop application that allows users to interact with our Game Engine. \nThe purpose of this project is to allow users to play easily besides that users can also try to make this game themselves. \nThis game is made in the Python programming language using tkinter.\n\n---------------------------------------------------------------------------------------------------------------------------------\n## Running The App\n- Open the folder where the tictactoe program files are stored\n- Open another cmd / terminal\n- Run & Compile the Program with the command \"python3 TictTacToe.py\"\n- Then a message box will appear, click OK\n- Tic Tac Toe is now playable\n\n## Using GUI\n- Already Using GUI\n" }, { "alpha_fraction": 0.5515414476394653, "alphanum_fraction": 0.5785163640975952, "avg_line_length": 34.186439514160156, "blob_id": "88be1d3810f81bdb2f1208ae42054375c8ff3a07", "content_id": "1a08f161bd2ad6f4af631ef27f11c9c22fb25231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8304, "license_type": "no_license", "max_line_length": 452, "num_lines": 236, "path": "/TicTacToe.py", "repo_name": "rellisapk/Tic-Tac-Toe-Dekstop-Game-With-Python", "src_encoding": "UTF-8", "text": "from tkinter import*\nfrom tkinter import messagebox\n\nroot = Tk()\n#root.geometry(\"650x570\")\nroot.title(\"Tic Tac Toe Game\")\nroot.configure(background = 'White')\nmessagebox.showinfo(\"Tic Tac Toe\", \"Hello wecome to Tic Tac Toe Game\\nGood Luck! and Playing Fun^^\")\n\nclicked = True\ncount = 0\ndef about():\n messagebox.showinfo(\"RULES FOR TIC-TAC-TOE\", \"1. The game is played on a grid that's 3 squares by 3 squares.\\n\\n2. You are X, your friend (or the computer in this case) is O. Players take turns putting their marks in empty squares.\\n\\n3. The first player to get 3 of her marks in a row (up, down, across, or diagonally) is the winner.\\n\\n4. When all 9 squares are full, the game is over. If no player has 3 marks in a row, the game ends in a tie.\")\ndef disable_all_buttons():\n b1.config(state=DISABLED)\n b2.config(state=DISABLED)\n b3.config(state=DISABLED)\n b4.config(state=DISABLED)\n b5.config(state=DISABLED)\n b6.config(state=DISABLED)\n b7.config(state=DISABLED)\n b8.config(state=DISABLED)\n b9.config(state=DISABLED)\n\n#Check The Winner\ndef wonthegame():\n global winner\n winner = False\n\n #For X\n if b1[\"text\"] == \"X\" and b2[\"text\"] == \"X\" and b3[\"text\"] == \"X\" :\n b1.config(bg=\"white\")\n b2.config(bg=\"white\")\n b3.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n\n elif b4[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b6[\"text\"] == \"X\" :\n b4.config(bg=\"white\")\n b5.config(bg=\"white\")\n b6.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n \n elif b7[\"text\"] == \"X\" and b8[\"text\"] == \"X\" and b9[\"text\"] == \"X\" :\n b7.config(bg=\"white\")\n b8.config(bg=\"white\")\n b9.config(bg=\"white\")\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n\n elif b1[\"text\"] == \"X\" and b4[\"text\"] == \"X\" and b7[\"text\"] == \"X\" :\n b1.config(bg=\"white\")\n b4.config(bg=\"white\")\n b7.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n\n elif b2[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b8[\"text\"] == \"X\" :\n b2.config(bg=\"white\")\n b5.config(bg=\"white\")\n b8.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n \n elif b3[\"text\"] == \"X\" and b6[\"text\"] == \"X\" and b9[\"text\"] == \"X\" :\n b3.config(bg=\"white\")\n b6.config(bg=\"white\")\n b9.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n \n elif b1[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b9[\"text\"] == \"X\" :\n b1.config(bg=\"white\")\n b5.config(bg=\"white\")\n b9.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n\n elif b3[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b7[\"text\"] == \"X\" :\n b3.config(bg=\"white\")\n b5.config(bg=\"white\")\n b7.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"X Winning The Games\\nO is a Loser!\")\n disable_all_buttons()\n\n #For O\n if b1[\"text\"] == \"O\" and b2[\"text\"] == \"O\" and b3[\"text\"] == \"O\" :\n b1.config(bg=\"white\")\n b2.config(bg=\"white\")\n b3.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n\n elif b4[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b6[\"text\"] == \"O\" :\n b4.config(bg=\"white\")\n b5.config(bg=\"white\")\n b6.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n \n elif b7[\"text\"] == \"O\" and b8[\"text\"] == \"O\" and b9[\"text\"] == \"O\" :\n b7.config(bg=\"white\")\n b8.config(bg=\"white\")\n b9.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n\n elif b1[\"text\"] == \"O\" and b4[\"text\"] == \"O\" and b7[\"text\"] == \"O\" :\n b1.config(bg=\"white\")\n b4.config(bg=\"white\")\n b7.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n\n elif b2[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b8[\"text\"] == \"O\" :\n b2.config(bg=\"white\")\n b5.config(bg=\"white\")\n b8.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n \n elif b3[\"text\"] == \"O\" and b6[\"text\"] == \"O\" and b9[\"text\"] == \"O\" :\n b3.config(bg=\"white\")\n b6.config(bg=\"white\")\n b9.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n \n elif b1[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b9[\"text\"] == \"O\" :\n b1.config(bg=\"white\")\n b5.config(bg=\"white\")\n b9.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n elif b3[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b7[\"text\"] == \"O\" :\n b3.config(bg=\"white\")\n b5.config(bg=\"white\")\n b7.config(bg=\"white\")\n\n winner = True\n messagebox.showinfo(\"Congratulations\", \"O Winning The Games\\nX is a Loser!\")\n disable_all_buttons()\n \n if count == 9 and winner == False:\n messagebox.showinfo(\"Tic Tac Toe\", \"Its a Tie\\nNo one wins this game! Try again\")\n\n#Button Clicked Function\ndef b_click(b):\n global clicked, count\n\n if b[\"text\"] == \" \" and clicked == True :\n b[\"text\"] = \"X\"\n clicked = False\n count += 1\n wonthegame()\n elif b[\"text\"] == \" \" and clicked == False :\n b[\"text\"] = \"O\"\n clicked = True\n count += 1\n wonthegame()\n else :\n messagebox.showerror(\"Error\", \"Its already selected\\nPick Another Column\")\n\ndef reset():\n global b1,b2,b3,b4,b5,b6,b7,b8,b9\n global clicked, count\n clicked = True\n count = 0\n\n #Build Button\n b1 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg=\"white\", width=10, height=5, command=lambda:b_click(b1))\n b2 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b2))\n b3 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b3))\n b4 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b4))\n b5 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b5))\n b6 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b6))\n b7 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b7))\n b8 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b8))\n b9 = Button(root, text=\" \", font=(\"Tw Cen MT\", 22), bg='white', width=10, height=5, command=lambda:b_click(b9))\n\n #Grid Button\n b1.grid(row=0, column=0)\n b2.grid(row=0, column=1)\n b3.grid(row=0, column=2)\n\n b4.grid(row=1, column=0)\n b5.grid(row=1, column=1)\n b6.grid(row=1, column=2)\n\n b7.grid(row=2, column=0)\n b8.grid(row=2, column=1)\n b9.grid(row=2, column=2)\n\n#Menu\nmymenu = Menu(root)\nroot.config(menu=mymenu)\n\noption_menu = Menu(mymenu, tearoff=False)\nmymenu.add_cascade(label=\"Menu\", menu=option_menu)\noption_menu.add_command(label=\"Reset\", command=reset)\noption_menu.add_command(label=\"About\", command=about)\n\nreset()\nabout()\n\nroot.mainloop()\n" } ]
2
mazelife/figgy
https://github.com/mazelife/figgy
944f3e0c6ebadd8be6b64e006c059f66e0b1c36b
41c619fc71e2d2e7d42c308dc27cf4efd5d1696a
0c48061e9c6add061bfec8ae8c36b22847026783
refs/heads/master
2020-04-03T16:45:58.661413
2014-06-02T18:49:16
2014-06-02T18:49:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.707602322101593, "alphanum_fraction": 0.7115010023117065, "avg_line_length": 18.653846740722656, "blob_id": "721c0343a57269139f383f59b01cd1caf7d19ddf", "content_id": "dc93c8350fbd1fc5d4e01aeb792ec5ef73239c45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 513, "license_type": "no_license", "max_line_length": 56, "num_lines": 26, "path": "/storage/admin.py", "repo_name": "mazelife/figgy", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom storage.models import Book, Alias, Edition\n\n\nclass InlineAliasAdmin(admin.StackedInline):\n\n model = Alias\n extra = 0\n\n\nclass InlineEditionAdmin(admin.StackedInline):\n\n model = Edition\n extra = 0\n\n\nclass BookAdmin(admin.ModelAdmin):\n\n inlines = [InlineEditionAdmin, InlineAliasAdmin]\n list_display = ['id', 'title', 'number_of_editions']\n\n def number_of_editions(self, obj):\n return obj.edition_set.count()\n\nadmin.site.register(Book, BookAdmin)\n\n\n" }, { "alpha_fraction": 0.6621848940849304, "alphanum_fraction": 0.6756302714347839, "avg_line_length": 28.649999618530273, "blob_id": "fc0eeac9bbc2cc4958cda39cf8f8fa0e55c25533", "content_id": "c940fc8a5a4bd5de8e83558beaf24a677a79ee65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 595, "license_type": "no_license", "max_line_length": 102, "num_lines": 20, "path": "/storage/tests/test_models.py", "repo_name": "mazelife/figgy", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n'''\nCopyright (c) 2013 Safari Books Online. All rights reserved.\n'''\n\nimport uuid\n\nfrom django.test import TestCase\n\nfrom storage import models\n\nclass TestModels(TestCase):\n def setUp(self):\n self.book = models.Book.objects.create(pk=str(uuid.uuid4()))\n self.edition = models.Edition.objects.create(book=self.book, title=\"The Title\", version=\"1.0\")\n\n def test_book_have_unicode_method(self):\n '''The Book should have a __unicode__ method.'''\n expected = 'Book {}'.format(self.book.pk)\n self.assertEquals(expected, unicode(self.book))\n\n\n" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 28.5, "blob_id": "a7149d99dd212732ed558d8921d1afb3a5f4c0f3", "content_id": "b79675361d8ad91f1fb189d4694b009a351d317e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 71, "num_lines": 4, "path": "/storage/exceptions.py", "repo_name": "mazelife/figgy", "src_encoding": "UTF-8", "text": "class BadDataFile(Exception):\n \"\"\"\n This exception is raised when a bad data file (XML) is encountered.\n \"\"\"\n" }, { "alpha_fraction": 0.6709097623825073, "alphanum_fraction": 0.6776488423347473, "avg_line_length": 43.516666412353516, "blob_id": "32179f05bf5f46190efdad93acb7d86bbb781807", "content_id": "c00ef5ddb31091feb1b2a0791f026562a9cfcf20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2671, "license_type": "no_license", "max_line_length": 120, "num_lines": 60, "path": "/storage/tools.py", "repo_name": "mazelife/figgy", "src_encoding": "UTF-8", "text": "# encoding: utf-8\n# Created by David Rideout <drideout@safaribooksonline.com> on 2/7/14 4:58 PM\n# Copyright (c) 2013 Safari Books Online, LLC. All rights reserved.\n\nfrom decimal import Decimal, InvalidOperation\n\nfrom storage.models import Alias, Book, Edition\nfrom storage.exceptions import BadDataFile\n\n\ndef process_book_element(book_element):\n \"\"\"\n Process a book element into the database. Operates on the following assumptions:\n\n 1. A book ID may have a bad value, but if any of it's aliases match a single existing book then an update\n operation on that book can be done safely using the data in the <book> element.\n 2. For any given <book> element, if the aliases match more than one book, then one or more of them are incorrect and\n an exception should be raised.\n 3. If the book is missing a <version> or if it's not number, an exception should be raised.\n\n\n :param book: book element\n :returns:\n :raises: BadDataFile\n \"\"\"\n book_id = book_element.get('id')\n aliases = [(a.get('scheme'), a.get('value')) for a in book_element.xpath('aliases/alias')]\n edition_version = book_element.findtext('version')\n try:\n edition_version = Decimal(edition_version)\n except InvalidOperation:\n raise BadDataFile(\"Invalid version data: {} is not a decimal number.\".format(edition_version))\n except TypeError: # Raised when there is no <version> element.\n raise BadDataFile(\"The version number is missing from this file.\")\n try:\n book = Book.objects.get(pk=book_id)\n except Book.DoesNotExist:\n book = None\n # Try to match on aliases, all of which must agree.\n books_matched = {}\n for scheme, value in aliases:\n for alias in Alias.objects.filter(scheme=scheme, value=value):\n if alias.book_id not in books_matched:\n books_matched[alias.book_id] = alias.book\n if len(books_matched) > 1:\n raise BadDataFile(\"The aliases in this file match more than one book.\")\n # If a book was did not match by ID use the alias match if there was one, or create a new book.\n if book is None:\n if len(books_matched) == 1:\n book = books_matched.values()[0]\n else:\n book = Book.objects.create(pk=book_id)\n # Handle create/update of the book's edition.\n edition, created = Edition.objects.get_or_create(book_id=book.pk, version=edition_version)\n edition.title = book_element.findtext('title')\n edition.description = book_element.findtext('description')\n edition.save()\n # Handle create/update of the book's aliases.\n for scheme, value in aliases:\n book.aliases.get_or_create(scheme=scheme, value=value)\n" } ]
4
yjad/Academy
https://github.com/yjad/Academy
b605ad342287c58358873be70df2269b8de32169
af32442744f21483aaa8b2cadff1dc26731163b0
feb55bd0190c0511c060d7f29f074e61b5055f4e
refs/heads/master
2023-01-07T17:05:57.954731
2020-10-15T10:20:49
2020-10-15T10:20:49
293,545,953
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6340541243553162, "alphanum_fraction": 0.6371569633483887, "avg_line_length": 38.51340866088867, "blob_id": "48757125f9dc98ed860e77612065c38a489116f4", "content_id": "e6e61b5695df86ef7293e98f3554023227687657", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10313, "license_type": "no_license", "max_line_length": 109, "num_lines": 261, "path": "/flaskblog/users/routes.py", "repo_name": "yjad/Academy", "src_encoding": "UTF-8", "text": "from flask import render_template, url_for, flash, redirect, request, Blueprint\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom flaskblog import db, bcrypt\nfrom flaskblog.models import User, Post, Role\nfrom flaskblog.users.forms import (RegistrationForm, LoginForm, UpdateAccountForm,\n RequestResetForm, ResetPasswordForm, AddUserForm, EditUserForm, RoleForm)\nfrom flaskblog.users.utils import save_picture, send_reset_email\n\nusers = Blueprint('users', __name__)\n\n\n@users.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('users.login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@users.route('/users/adduser', methods=['GET', 'POST'])\ndef add_user():\n # if current_user.is_authenticated:\n # return redirect(url_for('home'))\n\n roles = db.session.query(Role).all()\n roles_list = [(i.id, i.name) for i in roles]\n form = AddUserForm()\n form.role_id.choices = roles_list\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data)\n user = User(\n login_name=form.login_name.data,\n username=form.username.data,\n email=form.email.data,\n password=hashed_password,\n role_id=form.role_id.data)\n db.session.add(user)\n db.session.commit()\n\n flash(f'Account has been created, you can login!', 'success')\n return redirect(url_for('users.add_user'))\n else:\n return render_template('add_user.html', title='Add User', form=form)\n\n\n@users.route('/users/user_list', methods=['GET', 'POST'])\n@login_required\ndef user_list():\n page = request.args.get('page', default=1, type=int)\n user_lst = User.query.order_by(User.login_name.asc()).paginate(page=page, per_page=5)\n return render_template('user_list.html', user_list=user_lst)\n\n\n@users.route(\"/users/users/<int:user_id>\", methods=['GET', 'POST'])\n@login_required\ndef edit_user(user_id):\n user = User.query.get_or_404(user_id)\n roles = db.session.query(Role).all()\n roles_list = [(i.id, i.name) for i in roles]\n form = EditUserForm()\n form.role_id.choices = roles_list\n\n if form.validate_on_submit():\n if user.email != form.email.data: # email changed. check if new is unique\n if User.query.filter_by(email=form.email.data).first():\n flash('Email alreay exist, select another one!', 'danger')\n return redirect('#') # reload\n user.username = form.username.data\n user.email = form.email.data\n user.role_id = form.role_id.data\n db.session.commit()\n flash('Your updates have been saved!', 'success')\n return redirect(url_for('users.user_list'))\n elif request.method == 'GET':\n # print(\"inside GET\", form.email.data, form.email.object_data, form.email.raw_data)\n form.username.data = user.username\n form.login_name.data = user.login_name\n form.email.data = user.email\n form.role_id.data = user.role_id\n\n return render_template('edit_user.html', title='Update User', form=form, user=user, legend=\"Update User\")\n\n\n#@users.route(\"/\")\n@users.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(login_name=form.login_name.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n next_page = request.args.get('next')\n login_user(user, remember=form.remember.data)\n return redirect(next_page) if next_page else redirect(url_for('main.home'))\n else:\n flash('Login Unsuccessful. Please check username and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n@users.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('main.home'))\n\n\n@users.route(\"/account\", methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdateAccountForm()\n if form.validate_on_submit():\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.image_file = picture_file\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Your account has been updated!', 'success')\n return redirect(url_for('users.account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n return render_template('account.html', title='Account',\n image_file=image_file, form=form)\n\n\n@users.route(\"/user/<string:username>\")\ndef user_posts(username):\n page = request.args.get('page', 1, type=int)\n user = User.query.filter_by(username=username).first_or_404()\n posts = Post.query.filter_by(author=user)\\\n .order_by(Post.date_posted.desc())\\\n .paginate(page=page, per_page=5)\n return render_template('user_posts.html', posts=posts, user=user)\n\n\n@users.route(\"/reset_password\", methods=['GET', 'POST'])\ndef reset_request():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n form = RequestResetForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n send_reset_email(user)\n flash('An email has been sent with instructions to reset your password.', 'info')\n return redirect(url_for('users.login'))\n return render_template('reset_request.html', title='Reset Password', form=form)\n\n\n@users.route(\"/reset_password/<token>\", methods=['GET', 'POST'])\ndef reset_token(token):\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n user = User.verify_reset_token(token)\n if user is None:\n flash('That is an invalid or expired token', 'warning')\n return redirect(url_for('users.reset_request'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user.password = hashed_password\n db.session.commit()\n flash('Your password has been updated! You are now able to log in', 'success')\n return redirect(url_for('users.login'))\n return render_template('reset_token.html', title='Reset Password', form=form)\n# ----------------------------------------------------------------------\n\n\n@users.route('/users/add_role', methods=['GET', 'POST'])\n# @login_required\ndef add_role():\n # if current_user.is_authenticated:\n # return redirect(url_for('home'))\n form = RoleForm()\n if form.validate_on_submit():\n role = Role(\n name=form.name.data,\n description=form.description.data,\n is_admin=form.is_admin.data) # 1 or 0\n db.session.add(role)\n db.session.commit()\n flash(f'Role Saved!', 'success')\n return redirect(url_for('users.role_list'))\n else:\n return render_template('add_role.html', title='Add Role', form=form, legend=\"Add Role\")\n\n\n# @users.route(\"/test\", methods=['GET', 'POST'])\n# def test():\n# form = TestRoleForm()\n# return render_template(\"test.html\", form=form)\n\n\n@users.route('/users/role_list', methods=['GET', 'POST'])\n@login_required\ndef role_list():\n page = request.args.get('page', default=1, type=int)\n # role_list = Role.query.order_by(Role.name.desc()).paginate(page=page, per_page=5)\n role_list = Role.query.all()\n # for r in role_list:\n # r.admin_role = (r.admin_role == 1)\n return render_template('role_list.html', role_list=role_list)\n\n\n# @users.route(\"/users/roles/<int:role_id>/update\", methods=['GET', 'POST'])\n# @login_required\n# def edit_role(role_id):\n# role = Role.query.get_or_404(role_id)\n# # if post.author != current_user:\n# # abort(403)\n# form = RoleForm()\n# if form.validate_on_submit():\n# role.name = form.name.data\n# role.description = form.description.data\n# role.is_admin = form.is_admin.data\n# db.session.commit()\n# flash('Your updates have been saved!', 'success')\n# return redirect(url_for('role_list'))\n# elif request.method == 'GET':\n# form.name.data = role.name\n# form.description.data = role.description\n# form.is_admin.data = role.is_admin\n# return render_template('add_role.html', title='Update Role', form=form, legend=\"Update Role\")\n\n\n@users.route(\"/users/roles/<int:role_id>/delete\", methods=['GET', 'POST'])\n@login_required\ndef delete_role(role_id):\n role = Role.query.get_or_404(role_id)\n db.session.delete(role)\n db.session.commit()\n flash('Record deleted!', 'success')\n return redirect(url_for('users.role_list'))\n\n\n@users.route(\"/users/roles/<int:role_id>\", methods=['GET', 'POST'])\n@login_required\ndef edit_role(role_id):\n role = Role.query.get_or_404(role_id)\n # if post.author != current_user:\n # abort(403)\n form = RoleForm()\n if form.validate_on_submit():\n role.name = form.name.data\n role.description = form.description.data\n role.is_admin = form.is_admin.data\n db.session.commit()\n flash('Your updates have been saved!', 'success')\n return redirect(url_for('users.role_list'))\n elif request.method == 'GET':\n form.name.data = role.name\n form.description.data = role.description\n form.is_admin.data = role.is_admin\n return render_template('edit_role.html', title='Update Role', form=form, role=role, legend=\"Update Role\")\n" }, { "alpha_fraction": 0.6488736271858215, "alphanum_fraction": 0.6616062521934509, "avg_line_length": 34.8070182800293, "blob_id": "8ed8431dfccc9c99071ee613ed45d3cba65b7bed", "content_id": "47d5a80633923425d3bb5c809e0975c16de82bef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2042, "license_type": "no_license", "max_line_length": 103, "num_lines": 57, "path": "/flaskblog/reports/routes.py", "repo_name": "yjad/Academy", "src_encoding": "UTF-8", "text": "from flask import render_template, request, Blueprint, url_for, Flask\nfrom flask_login import login_required\nfrom flaskblog.models import Post\nfrom flaskblog.reports.zoom_reports import attendees_last_2_month, update_meetings, attendees_per_month\nimport os\n\n\n\nreports = Blueprint('reports', __name__)\n\n\n@reports.route(\"/reports/attendance_last_2_month\")\n@login_required\ndef attendance_last_2_month():\n\n #IMAGE_DIR= r\"C:\\Yahia\\Home\\Yahia-Dev\\Python\\Academy\\flaskblog\\static\\out\"\n IMAGE_DIR= os.path.join(os.path.dirname(__file__)[:-8], 'static','out')\n image_name = 'attendess_2.png'\n filename = os.path.join(IMAGE_DIR, image_name)\n #attendees_last_2_month(filename)\n return render_template('attendance_graph.html', image=image_name, title=\"Attendance last 2 Month\")\n\n # legend = 'Monthly Data'\n # labels = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\"]\n # values = [10, 9, 8, 7, 6, 4, 7, 8]\n # return render_template('chart.html', values=values, labels=labels, legend=legend)\n\n # def plot_png():\n # fig = create_figure()\n # output = io.BytesIO()\n # FigureCanvas(fig).print_png(output)\n # return Response(output.getvalue(), mimetype='image/png')\n #\n # def create_figure():\n # fig = Figure()\n # axis = fig.add_subplot(1, 1, 1)\n # xs = range(100)\n # ys = [random.randint(1, 50) for x in xs]\n # axis.plot(xs, ys)\n # return fig\n\n@reports.route(\"/reports/attendance_per_month\")\n@login_required\ndef attendance_per_month():\n\n IMAGE_DIR= os.path.join(os.path.dirname(__file__)[:-8], 'static','out')\n image_name = 'attendess_month.png'\n filename = os.path.join(IMAGE_DIR, image_name)\n attendees_per_month(filename)\n return render_template('attendance_graph.html', image=image_name, title=\"Attendance per Month\")\n\n@reports.route(\"/reports/load_meetings_data\")\n@login_required\ndef load_meetings_data():\n render_template('loading.html')\n update_meetings()\n return render_template('home.html')\n\n" }, { "alpha_fraction": 0.6578043103218079, "alphanum_fraction": 0.6621450781822205, "avg_line_length": 45.47058868408203, "blob_id": "7f9b8515c4ec5fde7782623e5a42bade4cf7e5af", "content_id": "87682fd95e15cef37294bd02de0c3d198b939de0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5529, "license_type": "no_license", "max_line_length": 144, "num_lines": 119, "path": "/flaskblog/users/forms.py", "repo_name": "yjad/Academy", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileAllowed\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\nfrom flask_login import current_user\nfrom flaskblog.models import User, Role\n\n\nclass RegistrationForm(FlaskForm):\n username = StringField('Username',\n validators=[DataRequired(), Length(min=2, max=20)])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password',\n validators=[DataRequired(), EqualTo('password')])\n submit = SubmitField('Sign Up')\n\n def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username is taken. Please choose a different one.')\n\n def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email is taken. Please choose a different one.')\n\n\nclass AddUserForm(FlaskForm):\n login_name = StringField('Login Name', validators=[DataRequired(), Length(min=4, max=25)])\n username = StringField('Username', validators=[DataRequired(), Length(max=100)])\n email = StringField('Email Address', validators=[DataRequired(), Email()])\n password = PasswordField('New Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password', message='Passwords must match')])\n role_id = SelectField(\"Role\", coerce=int, validators=[DataRequired()])\n submit = SubmitField('Save')\n\n def validate_login_name(self, login_name):\n user = User.query.filter_by(login_name=login_name.data).first()\n if user:\n raise ValidationError('That login name is taken, please select another one')\n\n def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email already exists')\n\nclass EditUserForm(FlaskForm):\n #login_name = StringField('Login Name', validators=[DataRequired(), Length(min=4, max=25)])\n login_name = StringField('Login Name', render_kw={'readonly': True})\n username = StringField('Username', validators=[DataRequired(), Length(max=100)])\n email = StringField('Email Address', validators=[DataRequired(), Email()])\n role_id = SelectField(\"Role\", coerce=int, validators=[DataRequired()])\n submit = SubmitField('Save')\n\n # def validate_email(self, email):\n # print (f\"from Validate_email data: {email.data}, raw: {email.object_data}, self: {self.email.data} selfobj: {self.email.object_data}\")\n # #if email.data != email.object_data: # field changed\n # user = User.query.filter_by(email=email.data).first()\n # if user:\n # raise ValidationError('That email already exists')\n\n\nclass LoginForm(FlaskForm):\n login_name = StringField('Login Name', validators=[DataRequired()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember = BooleanField('Remember Me', validators = [])\n submit = SubmitField('Login')\n\nclass UpdateAccountForm(FlaskForm):\n username = StringField('Username',\n validators=[DataRequired(), Length(min=2, max=20)])\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])\n submit = SubmitField('Update')\n\n def validate_username(self, username):\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username is taken. Please choose a different one.')\n\n def validate_email(self, email):\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('That email is taken. Please choose a different one.')\n\n\nclass RequestResetForm(FlaskForm):\n email = StringField('Email',\n validators=[DataRequired(), Email()])\n submit = SubmitField('Request Password Reset')\n\n def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is None:\n raise ValidationError('There is no account with that email. You must register first.')\n\n\nclass ResetPasswordForm(FlaskForm):\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password',\n validators=[DataRequired(), EqualTo('password')])\n submit = SubmitField('Reset Password')\n\nclass RoleForm(FlaskForm):\n name = StringField('Name', validators=[DataRequired(), Length(min=4, max=20)])\n description = StringField('Description', validators=[DataRequired(), Length(max=200)])\n is_admin = BooleanField('Admin Role?', default= False)\n submit = SubmitField('Save')\n\n def validate_name(self, name):\n\n role = Role.query.filter_by(name=name.data).first()\n if role:\n raise ValidationError('This role already exists, please select another one')" }, { "alpha_fraction": 0.6246684193611145, "alphanum_fraction": 0.6989389657974243, "avg_line_length": 43.35293960571289, "blob_id": "f857383c1fbbc5b6556df9390c75a0f1229a301c", "content_id": "c335bb6d2957eb021e465817302231189bf739ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 754, "license_type": "no_license", "max_line_length": 107, "num_lines": 17, "path": "/flaskblog/reports/run.py", "repo_name": "yjad/Academy", "src_encoding": "UTF-8", "text": "\nfrom zoom import load_zoom_meetings, get_zoom_report_daily, load_zoom_telephone_report, get_meeting_details\nfrom zoom_reports import stats_attendees, attendance_sheet, list_unmatched_attendees, update_meetings, \\\n stats_attendees_graph\n\nif __name__ == \"__main__\":\n #load_zoom_meetings(\"2020-06-15\", \"2020-07-14\")\n #load_zoom_meetings(\"2020-07-01\")\n #load_zoom_meetings(\"2020-07-26\")\n #attendance_sheet(\"2020-08-11\")\n #attendance_sheet(\"\")\n #stats_attendees()\n #list_unmatched_attendees()\n #update_meetings() # update meetings starts after the last loaded meeting\n #get_zoom_report_daily(2020,8)\n #load_zoom_telephone_report(\"2020-07-26\")\n #get_meeting_details(\"Hce5zSsbRPmL+1l0VKGmVQ==\")\n stats_attendees_graph()" }, { "alpha_fraction": 0.8686131238937378, "alphanum_fraction": 0.8686131238937378, "avg_line_length": 9.615385055541992, "blob_id": "86196723794885f6c8bfe5a69c6e831867498d97", "content_id": "cbe35d0b738b280ee9c0d7bf2fb6b3fcf93c29f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 137, "license_type": "no_license", "max_line_length": 16, "num_lines": 13, "path": "/requirements.txt", "repo_name": "yjad/Academy", "src_encoding": "UTF-8", "text": "Flask\nFlask-SQLAlchemy\nflask-bcrypt\nflask-login\nflask-mail\nitsdangerous\nflask-wtf\nemail-validator\nPillow\nnumpy\nmatplotlib\npandas\nrequests" }, { "alpha_fraction": 0.6680412292480469, "alphanum_fraction": 0.6742268204689026, "avg_line_length": 33.64285659790039, "blob_id": "52041a7b860f577166a46a4353a75b2035f254f2", "content_id": "21c783c47430c57970316c0ccf2218e8b9201b8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/flaskblog/config.py", "repo_name": "yjad/Academy", "src_encoding": "UTF-8", "text": "import os\n\n\nclass Config:\n #SECRET_KEY = os.environ.get('SECRET_KEY')\n SECRET_KEY = '793c5f9c78d57a71bbc1e21eed2aa6b3'\n #SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')\n SQLALCHEMY_DATABASE_URI = 'sqlite:///data/site.sqlite'\n MAIL_SERVER = 'smtp.googlemail.com'\n MAIL_PORT = 587\n MAIL_USE_TLS = True\n MAIL_USERNAME = os.environ.get('EMAIL_USER')\n MAIL_PASSWORD = os.environ.get('EMAIL_PASS')\n IMAGE_DIR = r\"E:\\Yahia-Home\\Python\\src\\Academy\\flaskblog\\static\\out\"\n" }, { "alpha_fraction": 0.5537749528884888, "alphanum_fraction": 0.559116780757904, "avg_line_length": 27.363636016845703, "blob_id": "53a949a9ad833d42ae98d2709a4b8536d691ffd8", "content_id": "df360fd7cdc9ee2879b5563697a7a505f3202d29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2808, "license_type": "no_license", "max_line_length": 114, "num_lines": 99, "path": "/flaskblog/reports/DB.py", "repo_name": "yjad/Academy", "src_encoding": "UTF-8", "text": "import sqlite3\nfrom sqlalchemy.sql import text\nfrom flaskblog.reports.config import config\nfrom flaskblog import db\n\nMEETING_TABLE = \"meetings\"\nATTENDEES_TABLE = \"attendees\"\n\n\n# def open_db():\n# connection = sqlite3.Connection(config.get(\"DB_FILE_NAME\"))\n# cursor = connection.cursor()\n# return connection, cursor\n\n\ndef create_tables():\n cmd = f'CREATE TABLE IF NOT EXISTS {MEETING_TABLE}' \\\n f'(uuid TEXT NOT NULL PRIMARY KEY,' \\\n f'id INTEGER ,' \\\n f'host_id\tTEXT,' \\\n f'type\tINTEGER,' \\\n f'topic\tTEXT,' \\\n f'user_name\tTEXT,' \\\n f'user_email TEXT,' \\\n f'start_time TEXT,' \\\n f'end_time\tTEXT,' \\\n f'duration\tINTEGER,' \\\n f'total_minutes\tINTEGER,' \\\n f'participants_count INTEGER)'\n\n db.engine.execute(cmd)\n\n db.engine.execute(f'CREATE TABLE IF NOT EXISTS {ATTENDEES_TABLE}'\n '(meeting_uuid TEXT,'\n 'id TEXT ,'\n 'user_id TEXT,'\n 'name TEXT,'\n 'user_email TEXT,'\n 'join_time TEXT,'\n 'leave_time TEXT,'\n 'duration INTEGER,'\n 'attentiveness_score TEXT)')\n return\n\n\n# def close_db(cursor):\n# cursor.close()\n\n\ndef insert_row(table_name, rec):\n keys = ','.join(rec.keys())\n question_marks = ','.join(list('?' * len(rec)))\n values = tuple(rec.values())\n try:\n db.engine.execute('INSERT INTO ' + table_name + ' (' + keys + ') VALUES (' + question_marks + ')', values)\n #db.engine.commit()\n return 0\n #except sqlite3.Error as er:\n except : # already exist\n #print('SQLite error: %s' % (' '.join(er.args)))\n #print(\"Exception class is: \", er.__class__)\n #print('SQLite traceback: ')\n #exc_type, exc_value, exc_tb = sys.exc_info()\n #print(traceback.format_exception(exc_type, exc_value, exc_tb))\n return -1\n\n\ndef insert_row_meeting(rec):\n return insert_row(MEETING_TABLE, rec)\n\n\ndef insert_row_attendees(rec):\n return insert_row(ATTENDEES_TABLE, rec)\n\n\ndef exec_query(cmd):\n rows = db.engine.execute(text(cmd))\n #rows = db.engine.fetchall()\n return rows\n\n\ndef get_last_meeting_date():\n #conn, cursor = open_db()\n rows = db.engine.execute('SELECT start_time from meetings order by start_time DESC LIMIT 1')\n #rows = db.engine.fetchall()\n #close_db(cursor)\n # for r in rows:\n # print (r)\n # print (r[0])\n # print (r[0][:10])\n # return r[0][:10]\n\n\ndef get_col_names(sql):\n get_column_names = db.engine.execute(sql + \" limit 1\")\n print(get_column_names)\n col_name = [i[0] for i in get_column_names.description]\n print (\"----------------->\", col_name)\n return col_name\n" } ]
7
mszczesniak89/PYTHON_PODSTAWY_WARSZTAT_ZAD4
https://github.com/mszczesniak89/PYTHON_PODSTAWY_WARSZTAT_ZAD4
61347ab99ce1f7a8c95511265f79d322133deb7d
64f25cadf0f63bdf2f5ba21516c7411baff65f6f
6c59b06d3b60a517b0198bc23a366eeb633a2dd2
refs/heads/master
2023-07-13T11:09:28.597488
2021-08-27T10:09:27
2021-08-27T10:09:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5038106441497803, "alphanum_fraction": 0.511833131313324, "avg_line_length": 26.395605087280273, "blob_id": "95726702cbee6b296f82e999edc849c24fdb8124", "content_id": "b57e976915d7a2c173b9a6d2a2af7abf60e3b055", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2493, "license_type": "no_license", "max_line_length": 63, "num_lines": 91, "path": "/main.py", "repo_name": "mszczesniak89/PYTHON_PODSTAWY_WARSZTAT_ZAD4", "src_encoding": "UTF-8", "text": "from flask import Flask, request\n\napp = Flask(\"Gra w zgadywanie liczb 3\")\n\nmin = 0\nmax = 1000\nguess = 0\n# hint = \"\"\n\ndef intro():\n intro = \"\"\"\n <header>\n <h1>Guess The Number - Game</h1>\n <p>Think of a number from 0 to 1000.</p>\n <p>I will guess that number in max 10 attempts.</p>\n <p>Just give me hints using the buttons below.</p>\n </header>\n \"\"\"\n return intro\n\ndef hints_menu():\n global min\n global max\n global guess\n global hint\n hints_form = f\"\"\"\n <form method='POST'>\n <input type='submit' value='Too small' name ='small'>\n <input type='submit' value='Too big' name ='big'>\n <input type='submit' value='You win!' name ='win'>\n <input type='hidden' name='min' value={min}>\n <input type='hidden' name='max' value={max}>\n </form>\n \"\"\"\n guess = int(((max - min) / 2) + min)\n message = f\"My guess is: {guess}\"\n # if request.method == 'POST':\n # if request.form.get('small'):\n # hint = 'small'\n # elif request.form.get('big'):\n # hint = 'big'\n # elif request.form.get('win'):\n # hint = 'win'\n # return hints_menu\n if request.method == 'POST':\n if request.form.get('small'):\n min = guess\n guess = int(((max - min) / 2) + min)\n message = f\"My guess is: {guess}\"\n elif request.form.get('big'):\n max = guess\n guess = int(((max - min) / 2) + min)\n message = f\"My guess is: {guess}\"\n elif request.form.get('win'):\n final_message = \"I win!\"\n return final_message\n return hints_form + message\n\n# def belly_of_the_beast(hint):\n# global min\n# global max\n# global guess\n# if hint ==\"small\":\n# min = guess\n# elif hint == \"big\":\n# max = guess\n# elif hint == \"win\":\n# return False\n\n\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef game():\n while True:\n global guess\n global max\n global min\n global hint\n return intro() + hints_menu()\n # while True:\n # if belly_of_the_beast(hint) is False:\n # final_message = \"I win! Thanks for playing!\"\n # return intro() + hints_menu() + final_message\n # else:\n # guess = int(((max - min) / 2) + min)\n # message = f\"My guess is: {guess}\"\n # belly_of_the_beast(hint)\n # return intro() + hints_menu() + message\n\napp.run()\n" } ]
1
DiamondLightSource/islatu
https://github.com/DiamondLightSource/islatu
c7e535b7949ecadfd7d2206f5d53bbba4f217cca
8765668e4d8dd528844040988e6b31a382c46641
3c337085c0aeb8825352147a532e659e9f1e2f4c
refs/heads/master
2023-08-19T07:22:20.765414
2023-08-07T13:57:58
2023-08-07T13:57:58
238,451,339
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6250553131103516, "alphanum_fraction": 0.6316954493522644, "avg_line_length": 33.75384521484375, "blob_id": "434bb6021c4cc1659905dc8de97896483bde768f", "content_id": "47fa02660aeeade5b604ea3f2b841de4f0056218", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2259, "license_type": "permissive", "max_line_length": 77, "num_lines": 65, "path": "/src/islatu/corrections.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nReflectometry data must be corrected as a part of reduction.\nThese functions facilitate this, including the footprint and\nDCD q-variance corrections.\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy.interpolate import splrep\n\n\ndef footprint_correction(beam_width, sample_size, theta):\n \"\"\"\n The factor by which the intensity should be multiplied to account for the\n scattering geometry, where the beam is Gaussian in shape.\n\n Args:\n beam_width (:py:attr:`float`):\n Width of incident beam, in metres.\n sample_size (:py:attr:`float`):\n Width of sample in the dimension of the beam, in metres.\n theta (:py:attr:`float`):\n Incident angle, in degrees.\n\n Returns:\n Array of correction factors.\n \"\"\"\n # Deal with the [trivial] theta=0 case.\n theta = np.array([10**(-3) if t == 0 else t for t in theta])\n\n beam_sd = beam_width / 2 / np.sqrt(2 * np.log(2))\n projected_beam_sd = beam_sd / np.sin(np.radians(theta))\n frac_of_beam_sampled = (\n norm.cdf(sample_size/2, 0, projected_beam_sd) -\n norm.cdf(-sample_size/2, 0, projected_beam_sd)\n )\n return frac_of_beam_sampled\n\n\ndef get_interpolator(\n file_path, parser, q_axis_name=\"qdcd_\", intensity_axis_name=\"adc2\"):\n \"\"\"\n Get an interpolator object from scipy, this is useful for the DCD\n q-normalisation step.\n\n Args:\n file_path (:py:attr:`str`): File path to the normalisation file.\n parser (:py:attr:`callable`): Parser function for the normalisation\n file.\n q_axis_name (:py:attr:`str`, optional): Label for the q-value in the\n normalisation file. Defaults to ``'qdcd_'``.\n intensity_axis_name (:py:attr:`str`, optional): Label for the\n intensity in the normalisation file. Defaults to ``'adc2'``.\n\n Returns:\n :py:attr:`tuple`: Containing:\n - :py:attr:`array_like`: Interpolation knots.\n - :py:attr:`array_like`: B-spline coefficients.\n - :py:attr:`int`: Degree of spline.\n \"\"\"\n normalisation_data = parser(file_path)[1].sort_values(by='qdcd_')\n return splrep(\n normalisation_data[q_axis_name],\n normalisation_data[intensity_axis_name])\n" }, { "alpha_fraction": 0.640609622001648, "alphanum_fraction": 0.685594916343689, "avg_line_length": 35.106510162353516, "blob_id": "6c482985cd330c9604ca27bbe76769688aee0e55", "content_id": "ce91a9e96db915bafc1e50d3f2fa2de88265286c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12208, "license_type": "permissive", "max_line_length": 80, "num_lines": 338, "path": "/tests/unit/test_scan.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module tests the central islatu.scan module's Scan and Scan2D classes.\n\"\"\"\n\n\nimport pytest\nfrom pytest_lazyfixture import lazy_fixture as lazy\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\n\nfrom islatu.background import roi_subtraction, fit_gaussian_1d\nfrom islatu.cropping import crop_to_region\nfrom islatu.scan import Scan2D\nfrom islatu.region import Region\n\n\ndef test_subsample_q_01(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure subsample_q deletes the appropriate things. Because it just calls\n remove_data_points, which has already been tested extensively in test_data,\n we only need to check a couple of values to make sure the right qs have been\n deleted an we know that all the other attributes will have been handled\n correctly.\n \"\"\"\n original_len = len(scan2d_from_nxs_01.theta)\n # Defaults shouldn't change anything.\n scan2d_from_nxs_01.subsample_q()\n assert len(scan2d_from_nxs_01.theta) == original_len\n\n\ndef test_subsample_q_02(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that we can set just an upper bound. Note that this dataset goes\n from 0.025Å to 0.06Å\n \"\"\"\n q_max = 0.04\n\n assert max(scan2d_from_nxs_01.q_vectors) > q_max\n scan2d_from_nxs_01.subsample_q(q_max=q_max)\n assert max(scan2d_from_nxs_01.q_vectors) <= q_max\n\n\ndef test_subsample_q_03(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that we can set a lower bound. Note that this dataset goes from\n 0.025Å to 0.06Å.\n \"\"\"\n q_min = 0.04\n\n assert min(scan2d_from_nxs_01.q_vectors) < q_min\n scan2d_from_nxs_01.subsample_q(q_min=q_min)\n assert min(scan2d_from_nxs_01.q_vectors) >= q_min\n\n\ndef test_subsample_q_04(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Test that we can set both lower and upper bounds.\n \"\"\"\n q_min = 0.032\n q_max = 0.051\n\n scan2d_from_nxs_01.subsample_q(q_min, q_max)\n\n assert min(scan2d_from_nxs_01.q_vectors) >= q_min\n assert max(scan2d_from_nxs_01.q_vectors) <= q_max\n\n\n@pytest.mark.parametrize(\n 'scan, transmission',\n [\n (lazy('scan2d_from_nxs_01'), 0.000448426658633058)\n ]\n)\ndef test_transmission_normalisation_intensities(scan: Scan2D, transmission):\n \"\"\"\n Make sure that we can correct for the attenuation of the beam. The\n transmission values have been manually read from the .nxs file using a GUI.\n \"\"\"\n intensity_0 = np.copy(scan.intensity)\n scan.transmission_normalisation()\n\n for i, intensity in enumerate(scan.intensity):\n assert intensity == intensity_0[i]/transmission\n\n\n@pytest.mark.parametrize(\n 'scan, transmission',\n [\n (lazy('scan2d_from_nxs_01'), 0.000448426658633058)\n ]\n)\ndef test_transmission_normalisation_errors(scan: Scan2D, transmission):\n \"\"\"\n Make sure that we can correct for the attenuation of the beam. The\n transmission values have been manually read from the .nxs file using a GUI.\n This function checks the intensity_e values have been dealt with properly.\n \"\"\"\n intensity_e_0 = np.copy(scan.intensity_e)\n scan.transmission_normalisation()\n\n for i, intensity_e in enumerate(scan.intensity_e):\n assert intensity_e == intensity_e_0[i]/transmission\n\n\ndef test_qdcd_name_assumes(parsed_dcd_normalisation_01):\n \"\"\"\n Takes a parsed DCD normalisation pandas dataframe and makes sure that\n we can find the qdcd data, which is [in]conveniently called qdcd_.\n \"\"\"\n _, dataframe = parsed_dcd_normalisation_01\n assert \"qdcd_\" in dataframe\n assert \"adc2\" in dataframe\n\n\ndef test_qdcd_normalisation_01(scan2d_from_nxs_01: Scan2D, dcd_norm_01_splev):\n \"\"\"\n Make sure that our qdcd normalisation is doing something, and isn't failing\n silently. (This is a dumb test, but it's really quite hard to test that\n this is working without just rewriting a division by splev).\n \"\"\"\n intensities_0 = np.copy(scan2d_from_nxs_01.intensity)\n intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e)\n\n scan2d_from_nxs_01.qdcd_normalisation(dcd_norm_01_splev)\n\n assert (intensities_0 != scan2d_from_nxs_01.intensity).all()\n assert (intensities_e_0 != scan2d_from_nxs_01.intensity_e).all()\n\n\ndef test_qdcd_normalisation_02(scan2d_from_nxs_01: Scan2D, dcd_norm_01_splev,\n parsed_dcd_normalisation_01):\n \"\"\"\n Make sure that our nice splev normalisation does something similar to what\n would be achieved using a simple cubic scipy.interpolate.interp1D.\n \"\"\"\n\n # First, generate some test intensities by dividing by an interp1D function.\n intensities_0 = np.copy(scan2d_from_nxs_01.intensity)\n intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e)\n\n _, dataframe = parsed_dcd_normalisation_01\n\n interp = interp1d(dataframe[\"qdcd_\"], dataframe['adc2'], kind='cubic')\n\n test_intensities = intensities_0 / interp(scan2d_from_nxs_01.q_vectors)\n test_intensities_e = intensities_e_0 / interp(scan2d_from_nxs_01.q_vectors)\n\n # Now, carry out the qdcd normalisation as normal.\n scan2d_from_nxs_01.qdcd_normalisation(dcd_norm_01_splev)\n\n # These interpolation methods could be decently different, but lets enforce\n # that our values are the same to within 1%.\n for i, test_intensity in enumerate(test_intensities):\n assert test_intensity == pytest.approx(scan2d_from_nxs_01.intensity[i],\n rel=0.01)\n\n for i, test_inten_e in enumerate(test_intensities_e):\n assert test_inten_e == pytest.approx(scan2d_from_nxs_01.intensity_e[i],\n rel=0.01)\n\n\ndef test_footprint_correction_01(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Makes sure that the footprint correction acually does something for a\n reasonable beam FWHM and a small (1mm) sample.\n \"\"\"\n # 100 micron beam.\n beam_width = 100e-6\n # 1 mm sample.\n sample_size = 1e-3\n intensities_0 = np.copy(scan2d_from_nxs_01.intensity)\n intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e)\n scan2d_from_nxs_01.footprint_correction(beam_width, sample_size)\n\n assert (intensities_0 != scan2d_from_nxs_01.intensity).all()\n assert (intensities_e_0 != scan2d_from_nxs_01.intensity_e).all()\n\n\ndef test_footprint_correction_02(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Do a really naive footprint correction assuming a step function beam.\n Enforce that this is the same as our fancy correction, to within 10%.\n (Note: they are actually about 10% out from each other).\n \"\"\"\n\n # 100 micron beam.\n beam_width = 100e-6\n # 1 mm sample.\n sample_size = 1e-3\n\n intensities_0 = np.copy(scan2d_from_nxs_01.intensity)\n intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e)\n\n beam_size_on_sample = beam_width / \\\n np.sin(np.radians(scan2d_from_nxs_01.theta))\n\n incident_beam_fraction = sample_size / beam_size_on_sample\n\n test_intensities = intensities_0/incident_beam_fraction\n test_intensities_e = intensities_e_0/incident_beam_fraction\n\n scan2d_from_nxs_01.footprint_correction(beam_width, sample_size)\n for i, test_intensity in enumerate(test_intensities):\n assert test_intensity == pytest.approx(\n scan2d_from_nxs_01.intensity[i], 0.1)\n\n for i, test_intensity_e in enumerate(test_intensities_e):\n assert test_intensity_e == pytest.approx(\n scan2d_from_nxs_01.intensity_e[i], 0.1)\n\n\ndef test_crop_01(scan2d_from_nxs_01: Scan2D, region_01):\n \"\"\"\n Check that crop is decreasing the size of the image.\n \"\"\"\n initial_shape = scan2d_from_nxs_01.images[0].shape\n scan2d_from_nxs_01.crop(crop_to_region, region=region_01)\n\n assert scan2d_from_nxs_01.images[0].shape[0] < initial_shape[0]\n assert scan2d_from_nxs_01.images[0].shape[1] < initial_shape[1]\n\n\ndef test_crop_02(scan2d_from_nxs_01: Scan2D, region_01: Region):\n \"\"\"\n Make sure that our cropped region has the correct size.\n \"\"\"\n scan2d_from_nxs_01.crop(crop_to_region, region=region_01)\n assert (scan2d_from_nxs_01.images[0].shape[0]\n * scan2d_from_nxs_01.images[0].shape[1]) == region_01.num_pixels\n\n\ndef test_crop_03(scan2d_from_nxs_01: Scan2D, region_01: Region):\n \"\"\"\n Make sure that the region we've cropped to has the specified shape.\n \"\"\"\n scan2d_from_nxs_01.crop(crop_to_region, region=region_01)\n assert scan2d_from_nxs_01.images[0].shape[0] == region_01.x_length\n assert scan2d_from_nxs_01.images[0].shape[1] == region_01.y_length\n\n\ndef test_bkg_sub_01(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that we start out with no background.\n \"\"\"\n assert scan2d_from_nxs_01.images[0].bkg == 0\n assert scan2d_from_nxs_01.images[0].bkg_e == 0\n\n\ndef test_bkg_sub_02(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that the background subtraction function is doing something.\n \"\"\"\n region_list = scan2d_from_nxs_01.metadata.background_regions\n scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=region_list)\n\n assert scan2d_from_nxs_01.images[0].bkg != 0\n assert scan2d_from_nxs_01.images[0].bkg_e != 0\n\n\ndef test_bkg_sub_03(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that the background subtraction decreases our intensity.\n \"\"\"\n vals, stdevs = (np.zeros(\n len(scan2d_from_nxs_01.intensity)),\n np.zeros(len(scan2d_from_nxs_01.intensity)))\n\n # Also update the image intensities & errors.\n for i, image in enumerate(scan2d_from_nxs_01.images):\n vals[i], stdevs[i] = image.sum()\n\n # Store the intensity(Q) to the new value.\n scan2d_from_nxs_01.intensity = np.array(vals)\n scan2d_from_nxs_01.intensity_e = np.array(stdevs)\n\n region_list = scan2d_from_nxs_01.metadata.background_regions\n scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=region_list)\n\n assert (vals > scan2d_from_nxs_01.intensity).all()\n\n\ndef test_bkg_sub_04(scan2d_from_nxs_01: Scan2D, scan2d_from_nxs_01_copy,\n custom_bkg_region_01):\n \"\"\"\n Make sure that using two background regions yields a lower uncertainty\n measurement of the background than using just one background region.\n \"\"\"\n regions_1 = [scan2d_from_nxs_01.metadata.background_regions[0]]\n regions_2 = [scan2d_from_nxs_01.metadata.background_regions[0]] + [\n custom_bkg_region_01]\n scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=regions_1)\n scan2d_from_nxs_01_copy.bkg_sub(\n roi_subtraction, list_of_regions=regions_2)\n\n for i, image_1 in enumerate(scan2d_from_nxs_01.images):\n image_2 = scan2d_from_nxs_01_copy.images[i]\n assert image_1.bkg_e > image_2.bkg_e\n\n\ndef test_gauss_bkg_01(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that our Gaussian fit background subtraction function is doing\n something.\n\n Note that this function is not being tested for sensible results because\n this doesn't generally seem to be a sensible technique to use on I07. As\n more instruments are supported, if this technique becomes useful, its\n tests will need to be extended. For now, only the minimum is being done\n to ensure that it is roughly functional.\n \"\"\"\n scan2d_from_nxs_01.bkg_sub(fit_gaussian_1d)\n\n assert scan2d_from_nxs_01.images[0].bkg != 0\n assert scan2d_from_nxs_01.images[0].bkg_e != 0\n\n\ndef test_gauss_bkg_02(scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that carrying out this subtraction decreases our intensity.\n\n Note that this function is not being tested for sensible results because\n this doesn't generally seem to be a sensible technique to use on I07. As\n more instruments are supported, if this technique becomes useful, its\n tests will need to be extended. For now, only the minimum is being done\n to ensure that it is roughly functional.\n \"\"\"\n vals = np.zeros(len(scan2d_from_nxs_01.intensity))\n # Also update the image intensities & errors.\n for i, image in enumerate(scan2d_from_nxs_01.images):\n vals[i], _ = image.sum()\n # Store the intensity(Q) to the new value.\n scan2d_from_nxs_01.intensity = np.array(vals)\n\n intensity_0 = np.copy(scan2d_from_nxs_01.intensity)\n scan2d_from_nxs_01.bkg_sub(fit_gaussian_1d)\n\n assert (scan2d_from_nxs_01.intensity < intensity_0).all()\n" }, { "alpha_fraction": 0.5853658318519592, "alphanum_fraction": 0.5853658318519592, "avg_line_length": 16.714284896850586, "blob_id": "a892f77273c73c6a03482abfc0062c600f5e26c9", "content_id": "bd360c8762d21a3691d1c53e8793803c83ceec94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 123, "license_type": "permissive", "max_line_length": 31, "num_lines": 7, "path": "/docs/source/metadata.rst", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "islatu\\.metadata\n================\n\n.. automodule:: islatu.metadata\n :members:\n :undoc-members:\n :show-inheritance:" }, { "alpha_fraction": 0.5849114060401917, "alphanum_fraction": 0.593835711479187, "avg_line_length": 34.8361930847168, "blob_id": "897765475cf99975ab781532a5c83eeafce78e59", "content_id": "2a2592c1da1ff1a7fd9e610378f1399a767ace3e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22971, "license_type": "permissive", "max_line_length": 103, "num_lines": 641, "path": "/src/islatu/io.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains:\n\nParsing functions used to extract information from experimental files.\n\nClasses used to help make parsing more modular. These include the NexusBase\nclass and its children.\n\"\"\"\n\n# We've gotta access the _value attribute on some NXobjects.\n# pylint: disable=protected-access\n\n\nimport json\nimport os\nfrom typing import List\nfrom abc import abstractmethod\n\n\nimport nexusformat.nexus.tree as nx\nfrom nexusformat.nexus import nxload\nimport pandas as pd\nimport numpy as np\nimport h5py\n\n\nfrom .scan import Scan2D\nfrom .image import Image\nfrom .data import Data\nfrom .region import Region\nfrom .debug import debug\nfrom .metadata import Metadata\n\n\nclass NexusBase(Metadata):\n \"\"\"\n This class contains *mostly* beamline agnostic nexus parsing convenience\n stuff. It's worth noting that this class still makes a series of assumptions\n about how data is laid out in a nexus file that can be broken. Instead of\n striving for some impossible perfection, this class is practical in its\n assumptions of how data is laid out in a .nxs file, and will raise if an\n assumption is violated. All instrument-specific assumptions that one must\n inevitably make to extract truly meaningful information from a nexus file\n are made in children of this class.\n\n Attrs:\n file_path:\n The local path to the file on the local filesystem.\n nxfile:\n The object produced by loading the file at file_path with nxload.\n \"\"\"\n\n def __init__(self, local_path: str):\n super().__init__(local_path)\n self.nxfile = nxload(local_path)\n\n @property\n def src_path(self):\n \"\"\"\n The name of this nexus file, as it was recorded when the nexus file was\n written.\n \"\"\"\n return self.nxfile.file_name\n\n @property\n def detector(self):\n \"\"\"\n Returns the NXdetector instance stored in this NexusFile.\n\n Raises:\n ValueError if more than one NXdetector is found.\n \"\"\"\n det, = self.instrument.NXdetector\n return det\n\n @property\n def instrument(self):\n \"\"\"\n Returns the NXinstrument instanced stored in this NexusFile.\n\n Raises:\n ValueError if more than one NXinstrument is found.\n \"\"\"\n instrument, = self.entry.NXinstrument\n return instrument\n\n @property\n def entry(self) -> nx.NXentry:\n \"\"\"\n Returns this nexusfile's entry.\n\n Raises:\n ValueError if more than one entry is found.\n \"\"\"\n entry, = self.nxfile.NXentry\n return entry\n\n @property\n def default_signal(self) -> np.ndarray:\n \"\"\"\n The numpy array of intensities pointed to by the signal attribute in the\n nexus file.\n \"\"\"\n return self.default_nxdata[self.default_signal_name].nxdata\n\n @property\n def default_axis(self) -> np.ndarray:\n \"\"\"\n Returns the nxdata associated with the default axis.\n \"\"\"\n return self.default_nxdata[self.default_axis_name].nxdata\n\n @property\n def default_signal_name(self):\n \"\"\"\n Returns the name of the default signal.\n \"\"\"\n return self.default_nxdata.signal\n\n @property\n def default_axis_name(self) -> str:\n \"\"\"\n Returns the name of the default axis.\n \"\"\"\n return self.entry[self.entry.default].axes\n\n @property\n def default_nxdata_name(self):\n \"\"\"\n Returns the name of the default nxdata.\n \"\"\"\n return self.entry.default\n\n @property\n def default_nxdata(self) -> np.ndarray:\n \"\"\"\n Returns the default NXdata.\n \"\"\"\n return self.entry[self.default_nxdata_name]\n\n # A hack to tell pylint that this class is still meant to be abstract.\n @property\n @abstractmethod\n def default_axis_type(self) -> str:\n return super().default_axis_type()\n\n\nclass I07Nexus(NexusBase):\n \"\"\"\n This class extends NexusBase with methods useful for scraping information\n from nexus files produced at the I07 beamline at Diamond.\n \"\"\"\n excalibur_detector_2021 = \"excroi\"\n excalibur_04_2022 = \"exr\"\n\n @property\n def local_data_path(self) -> str:\n \"\"\"\n The local path to the data (.h5) file. Note that this isn't in the\n NexusBase class because it need not be reasonably expected to point at a\n .h5 file.\n\n Raises:\n FileNotFoundError if the data file cant be found.\n \"\"\"\n file = _try_to_find_files(\n [self._src_data_path], [self.local_path])[0]\n return file\n\n @property\n def detector_name(self) -> str:\n \"\"\"\n Returns the name of the detector that we're using. Because life sucks,\n this is a function of time.\n \"\"\"\n if \"excroi\" in self.entry:\n return I07Nexus.excalibur_detector_2021\n if \"exr\" in self.entry:\n return I07Nexus.excalibur_04_2022\n # Couldn't recognise the detector.\n raise NotImplementedError()\n\n @property\n def default_axis_name(self) -> str:\n \"\"\"\n Returns the name of the default axis.\n \"\"\"\n return self.entry[self.entry.default].axes\n\n @property\n def default_axis_type(self) -> str:\n \"\"\"\n Returns the type of our default axis, either being 'q', 'th' or 'tth'.\n \"\"\"\n if self.default_axis_name == 'qdcd':\n return 'q'\n if self.default_axis_name == 'diff1chi':\n return 'th' \n if self.default_axis_name == 'diff1delta':\n return 'tth'\n # It's also possible that self.default_axis_name isn't recorded in some\n # nexus files. Just in case, let's check the length of diff1delta.\n if isinstance(self.instrument[\"diff1delta\"].value.nxdata, np.ndarray):\n return 'tth'\n\n def _get_ith_region(self, i: int):\n \"\"\"\n Returns the ith region of interest found in the .nxs file.\n\n Args:\n i:\n The region of interest number to return. This number should\n match the ROI name as found in the .nxs file (generally not 0\n indexed).\n\n Returns:\n The ith region of interest found in the .nxs file.\n \"\"\"\n x_1 = self.detector[self._get_region_bounds_key(i, 'x_1')][0]\n x_2 = self.detector[self._get_region_bounds_key(i, 'Width')][0] + x_1\n y_1 = self.detector[self._get_region_bounds_key(i, 'y_1')][0]\n y_2 = self.detector[self._get_region_bounds_key(i, 'Height')][0] + y_1\n return Region(x_1, x_2, y_1, y_2)\n\n @property\n def signal_regions(self) -> List[Region]:\n \"\"\"\n Returns a list of region objects that define the location of the signal.\n Currently there is nothing better to do than assume that this is a list\n of length 1.\n \"\"\"\n if self.detector_name == I07Nexus.excalibur_detector_2021:\n return [self._get_ith_region(i=1)]\n if self.detector_name == I07Nexus.excalibur_04_2022:\n # Make sure our code executes for bytes and strings.\n try:\n json_str = self.instrument[\n \"ex_rois/excalibur_ROIs\"]._value.decode(\"utf-8\")\n except AttributeError:\n json_str = self.instrument[\n \"ex_rois/excalibur_ROIs\"]._value\n\n # This is badly formatted and cant be loaded by the json lib. We\n # need to make a series of modifications.\n json_str = json_str.replace('u', '')\n json_str = json_str.replace(\"'\", '\"')\n\n roi_dict = json.loads(json_str)\n return [Region.from_dict(roi_dict['Region_1'])]\n\n raise NotImplementedError()\n\n @property\n def background_regions(self) -> List[Region]:\n \"\"\"\n Returns a list of region objects that define the location of background.\n Currently we just ignore the zeroth region and call the rest of them\n background regions.\n \"\"\"\n if self.detector_name == I07Nexus.excalibur_detector_2021:\n return [self._get_ith_region(i)\n for i in range(2, self._number_of_regions+1)]\n if self.detector_name == I07Nexus.excalibur_04_2022:\n # Make sure our code executes for bytes and strings.\n try:\n json_str = self.instrument[\n \"ex_rois/excalibur_ROIs\"]._value.decode(\"utf-8\")\n except AttributeError:\n json_str = self.instrument[\n \"ex_rois/excalibur_ROIs\"]._value\n # This is badly formatted and cant be loaded by the json lib. We\n # need to make a series of modifications.\n json_str = json_str.replace('u', '')\n json_str = json_str.replace(\"'\", '\"')\n\n roi_dict = json.loads(json_str)\n bkg_roi_list = list(roi_dict.values())[1:2]\n return [Region.from_dict(x) for x in bkg_roi_list]\n\n raise NotImplementedError()\n\n @property\n def probe_energy(self):\n \"\"\"\n Returns the energy of the probe particle parsed from this NexusFile.\n \"\"\"\n return float(self.instrument.dcm1energy.value)\n\n @property\n def transmission(self):\n \"\"\"\n Proportional to the fraction of probe particles allowed by an attenuator\n to strike the sample.\n \"\"\"\n if 'filterset' in self.instrument:\n return float(self.instrument.filterset.transmission)\n elif 'fatt' in self.instrument:\n return np.array(self.instrument.fatt.transmission)\n else:\n debug.log(f\"\\n No transmission value found in expected location, set transmission to 1 \\n\")\n return float(1)\n\n @property\n def detector_distance(self):\n \"\"\"\n Returns the distance between sample and detector.\n \"\"\"\n return float(self.instrument.diff1detdist.value)\n\n @property\n def _src_data_path(self):\n \"\"\"\n Returns the raw path to the data file. This is useless if you aren't on\n site, but used by islatu to guess where you've stored the data file\n locally.\n \"\"\"\n # This is far from ideal; there currently seems to be no standard way\n # to refer to point at information stored outside of the nexus file.\n # If you're a human, it's easy enough to find, but with code this is\n # a pretty rubbish task. Here I just grab the first .h5 file I find\n # and run with it.\n found_h5_files = []\n\n def recurse_over_nxgroups(nx_object, found_h5_files):\n \"\"\"\n Recursively looks for nxgroups in nx_object that, when cast to a\n string, end in .h5.\n \"\"\"\n for key in nx_object:\n new_obj = nx_object[key]\n if key == \"data\":\n if new_obj.tree[8:-9].endswith(\".h5\"):\n found_h5_files.append(new_obj.tree[8:-9])\n if str(new_obj).endswith(\".h5\"):\n found_h5_files.append(str(new_obj))\n if str(new_obj).endswith(\".h5['/data']\"):\n found_h5_files.append(str(new_obj)[:-9])\n if isinstance(new_obj, nx.NXgroup):\n recurse_over_nxgroups(new_obj, found_h5_files)\n\n recurse_over_nxgroups(self.nxfile, found_h5_files)\n\n return found_h5_files[0]\n\n @property\n def _region_keys(self) -> List[str]:\n \"\"\"\n Parses all of the detector's dictionary keys and returns all keys\n relating to regions of interest.\n \"\"\"\n return [key for key in self.detector.keys() if key.startswith(\"Region\")]\n\n @property\n def _number_of_regions(self) -> int:\n \"\"\"\n Returns the number of regions of interest described by this nexus file.\n This *assumes* that the region keys take the form f'region_{an_int}'.\n \"\"\"\n split_keys = [key.split('_') for key in self._region_keys]\n\n return max([int(split_key[1]) for split_key in split_keys])\n\n def _get_region_bounds_key(self, region_no: int, kind: str) -> List[str]:\n \"\"\"\n Returns the detector key relating to the bounds of the region of\n interest corresponding to region_no.\n\n Args:\n region_no:\n An integer corresponding the the particular region of interest\n we're interested in generating a key for.\n kind:\n The kind of region bounds keys we're interested in. This can\n take the values:\n 'x_1', 'width', 'y_1', 'height'\n where '1' can be replaced with 'start' and with/without caps on\n first letter of width/height.\n\n Raises:\n ValueError if 'kind' argument is not one of the above.\n\n Returns:\n A list of region bounds keys that is ordered by region number.\n \"\"\"\n # Note that the x, y swapping is a quirk of the nexus standard, and is\n # related to which axis on the detector varies most rapidly in memory.\n if kind in ('x_1', 'x_start'):\n insert = 'X'\n elif kind in ('width', 'Width'):\n insert = 'Width'\n elif kind in ('y_1', 'y_start'):\n insert = 'Y'\n elif kind in ('height', 'Height'):\n insert = 'Height'\n else:\n raise ValueError(\n \"Didn't recognise 'kind' argument.\")\n\n return f\"Region_{region_no}_{insert}\"\n\n\ndef i07_dat_to_dict_dataframe(file_path):\n \"\"\"\n Parses a .dat file recorded by I07, returning a [now mostly obsolete] tuple\n containing a metadata dictionary and a pandas dataframe of the data.\n\n Though outdated, this is still a handy way to parse the DCD normalization\n .dat file.\n\n Args:\n (:py:attr:`str`): The ``.dat`` file to be read.\n\n Returns:\n :py:attr:`tuple`: Containing:\n - :py:attr:`dict`: The metadata from the ``.dat`` file.\n - :py:class:`pandas.DataFrame`: The data from the ``.dat`` file.\n \"\"\"\n f_open = open(file_path, \"r\", encoding='utf-8')\n # Neither the data nor the metadata are being read yet.\n data_reading = False\n metadata_reading = False\n\n # Create the dictionaries to be populated.\n data_dict = {}\n metadata_dict = {}\n # Create the list to be filled with lists for each line\n data_lines = []\n\n for line in f_open:\n # This string incidates the start of the metadata.\n if \"<MetaDataAtStart>\" in line:\n metadata_reading = True\n # This string indicates the end of the metadata.\n if \"</MetaDataAtStart>\" in line:\n metadata_reading = False\n # This string indicates the start of the data.\n if \" &END\" in line:\n data_reading = True\n # Set counter to minus two, such that when is\n # reaches the data it is 0.\n count = -2\n # When the metadata section is being read populate the metadata_dict\n if metadata_reading:\n if \"=\" in line:\n metadata_in_line = []\n for i in line.split(\"=\")[1:]:\n try:\n j = float(i)\n except ValueError:\n j = i\n metadata_in_line.append(j)\n metadata_dict[line.split(\"=\")[0]] = metadata_in_line\n # When the data section is being read, make the list of the zeroth line\n # the titles and everything after is the data_lines list of lists.\n if data_reading:\n count += 1\n if count == 0:\n titles = line.split()\n if count > 0:\n data_lines.append(line.split())\n f_open.close()\n # Sort the data_lines list of lists to transpore and make into a dict where\n # the keys are the titles.\n for j, _ in enumerate(data_lines[0]):\n list_to_add = []\n for i, _ in enumerate(data_lines):\n try:\n list_to_add.append(float(data_lines[i][j]))\n except ValueError:\n list_to_add.append(data_lines[i][j])\n count = 0\n if j >= len(titles):\n data_dict[str(count)] = list_to_add\n count += 1\n else:\n data_dict[titles[j]] = list_to_add\n return metadata_dict, pd.DataFrame(data_dict)\n\n\ndef load_images_from_h5(h5_file_path, transpose=False):\n \"\"\"\n Loads images from a .h5 file.\n\n Args:\n h5_file_path:\n Path to the h5 file from which we're loading images.\n transpose:\n Should we take the transpose of these images? Defaults to True.\n \"\"\"\n internal_data_path = 'data'\n images = []\n debug.log(\"Loading images from file \" + h5_file_path, unimportance=0)\n with h5py.File(h5_file_path, \"r\") as file_handle:\n dataset = file_handle[internal_data_path][()]\n\n num_images = dataset.shape[0]\n # Prepare to show a progress bar for image loading.\n debug.log(f\"Loading {num_images} images.\", unimportance=2)\n for i in range(num_images):\n debug.log(\"Currently loaded \" + str(i+1) + \" images.\", end=\"\\r\")\n images.append(Image(dataset[i], transpose=transpose))\n # This line is necessary to prevent overwriting due to end=\"\\r\".\n debug.log(\"\")\n debug.log(f\"Loaded all {num_images} images.\", unimportance=2)\n\n return images\n\n\ndef i07_nxs_parser(file_path: str):\n \"\"\"\n Parses a .nxs file acquired from the I07 beamline at diamond, returning an\n instance of Scan2D. This process involves loading the images contained in\n the .h5 file pointed at by the .nxs file, as well as retrieving the metadata\n from the .nxs file that is relevant for XRR reduction.\n\n Args:\n file_path:\n Path to the .nxs file.\n\n Returns:\n An initialized Scan2D object containing all loaded detector frames, as\n well as the relevant metadata from the .nxs file.\n \"\"\"\n # Use the magical parser class that does everything for us.\n i07_nxs = I07Nexus(file_path)\n\n # Load the images, taking a transpose if necessary (because which axis is\n # x and which is why is determined by fast vs slow detector axes in memory).\n if i07_nxs.detector_name in [\n I07Nexus.excalibur_detector_2021,\n I07Nexus.excalibur_04_2022]:\n images = load_images_from_h5(i07_nxs.local_data_path, transpose=True)\n\n # The dependent variable.\n rough_intensity = i07_nxs.default_signal\n rough_intensity_e = np.sqrt(rough_intensity)\n\n # The independent variable.\n axis = i07_nxs.default_axis\n\n # We have to load the Data according to what our independent variable is.\n if i07_nxs.default_axis_type == 'q':\n data = Data(rough_intensity, rough_intensity_e, i07_nxs.probe_energy,\n q_vectors=axis)\n elif i07_nxs.default_axis_type == 'th':\n data = Data(rough_intensity, rough_intensity_e, i07_nxs.probe_energy,\n theta=axis)\n elif i07_nxs.default_axis_type == 'tth':\n data = Data(rough_intensity, rough_intensity_e, i07_nxs.probe_energy,\n theta=axis/2)\n else:\n raise NotImplementedError(\n f\"{i07_nxs.default_axis_type} is not a supported axis type.\")\n\n # Returns the Scan2D object\n return Scan2D(data, i07_nxs, images)\n\n\ndef _try_to_find_files(filenames: List[str],\n additional_search_paths: List[str]):\n \"\"\"\n Check that data files exist if the file parsed by parser pointed to a\n separate file containing intensity information. If the intensity data\n file could not be found in its original location, check a series of\n probable locations for the data file. If the data file is found in one\n of these locations, update file's entry in self.data.\n\n Returns:\n :py:attr:`list` of :py:attr:`str`:\n List of the corrected, actual paths to the files.\n \"\"\"\n found_files = []\n\n # If we had only one file, make a list out of it.\n if not hasattr(filenames, \"__iter__\"):\n filenames = [filenames]\n\n cwd = os.getcwd()\n start_dirs = [\n cwd, # maybe file is stored near the current working dir\n # To search additional directories, add them in here manually.\n ]\n start_dirs.extend(additional_search_paths)\n\n local_start_directories = [x.replace('\\\\', '/') for x in start_dirs]\n num_start_directories = len(local_start_directories)\n\n # Now extend the additional search paths.\n for i in range(num_start_directories):\n search_path = local_start_directories[i]\n split_srch_path = search_path.split('/')\n for j in range(len(split_srch_path)):\n extra_path_list = split_srch_path[:-(j+1)]\n extra_path = '/'.join(extra_path_list)\n local_start_directories.append(extra_path)\n\n # This line allows for a loading bar to show as we check the file.\n for i, _ in enumerate(filenames):\n # Better to be safe... Note: windows is happy with / even though it\n # defaults to \\\n filenames[i] = str(filenames[i]).replace('\\\\', '/')\n\n # Maybe we can see the file in its original storage location?\n if os.path.isfile(filenames[i]):\n found_files.append(filenames[i])\n continue\n\n # If not, maybe it's stored locally? If the file was stored at\n # location /a1/a2/.../aN/file originally, for a local directory LD,\n # check locations LD/aj/aj+1/.../aN for all j<N and all LD's of\n # interest. This algorithm is a generalization of Andrew McCluskey's\n # original approach.\n\n # now generate a list of all directories that we'd like to check\n candidate_paths = []\n split_file_path = str(filenames[i]).split('/')\n for j in range(len(split_file_path)):\n local_guess = '/'.join(split_file_path[j:])\n for start_dir in local_start_directories:\n candidate_paths.append(\n os.path.join(start_dir, local_guess))\n\n # Iterate over each of the candidate paths to see if any of them contain\n # the data file we're looking for.\n found_file = False\n for candidate_path in candidate_paths:\n if os.path.isfile(candidate_path):\n # File found - add the correct file location to found_files\n found_files.append(candidate_path)\n found_file = not found_file\n debug.log(\"Data file found at \" + candidate_path + \".\")\n break\n\n # If we didn't find the file, tell the user.\n if not found_file:\n raise FileNotFoundError(\n \"The data file with the name \" + filenames[i] + \" could \"\n \"not be found. The following paths were searched:\\n\" +\n \"\\n\".join(candidate_paths)\n )\n return found_files\n" }, { "alpha_fraction": 0.6235294342041016, "alphanum_fraction": 0.6411764621734619, "avg_line_length": 17.88888931274414, "blob_id": "4c935f0da386bdc24e9d342fcbbd7d09d668671e", "content_id": "4016458ab65f26bb52e7866719c2c2a862c06ed8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 170, "license_type": "permissive", "max_line_length": 77, "num_lines": 9, "path": "/docs/source/workflows.rst", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "Reduction workflows\n===================\n\nA typical data reduction workflow handled by :py:mod:`islatu` is shown here: \n\n.. toctree::\n :maxdepth: 1\n\n i07_reflectivity\n" }, { "alpha_fraction": 0.5063291192054749, "alphanum_fraction": 0.5443037748336792, "avg_line_length": 18.75, "blob_id": "4a156b4cc4a1040d273a2ca43ab60883117320d2", "content_id": "0a4bf8b1cc5f7ddf59998c6bc23d62d84a47f117", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 79, "license_type": "permissive", "max_line_length": 48, "num_lines": 4, "path": "/src/islatu/__init__.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "MAJOR = 1\nMINOR = 0\nMICRO = 7\n__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\n" }, { "alpha_fraction": 0.5704917907714844, "alphanum_fraction": 0.5737704634666443, "avg_line_length": 23.7297306060791, "blob_id": "1bfcf80b76ee784568a742d293d39f8d6a9c943d", "content_id": "44d8308772f72eb52ad3e2b4ace199884b621ff7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "permissive", "max_line_length": 78, "num_lines": 37, "path": "/src/islatu/debug.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nIslatu's simple Debug class.\n\"\"\"\n\nDEFAULT_LOG_LEVEL = 1\n\n\nclass Debug:\n \"\"\"\n A simple logger.\n\n Attrs:\n logging_level:\n Current logging level. Higher means more unimportant messages will\n be shown.\n \"\"\"\n\n def __init__(self, logging_level):\n self.logging_level = logging_level\n\n def log(self, log_string, unimportance: int = 1, **kwargs):\n \"\"\"\n Prints to stdout if self.logging_level >= unimportance.\n\n Args:\n log_string:\n The string to be printed.\n unimportance:\n A measure of unimportance assigned to the printing of this\n string. Very unimportant messages require a larger logging\n level to be printed. Defaults to 1.\n \"\"\"\n if self.logging_level >= unimportance:\n print(log_string, **kwargs)\n\n\ndebug = Debug(DEFAULT_LOG_LEVEL)\n" }, { "alpha_fraction": 0.5941176414489746, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 8.5, "blob_id": "2a6001759f169b6634e7fb8b3373078638cdcd66", "content_id": "602ea640b3b32117a039885785f317c964cb2b35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 170, "license_type": "permissive", "max_line_length": 15, "num_lines": 18, "path": "/docs/source/modules.rst", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "API\n===\n\n.. toctree::\n :maxdepth: 4\n\n background\n corrections\n cropping\n data\n debug\n image\n io\n metadata\n refl_profile\n region\n scan\n stitching" }, { "alpha_fraction": 0.603635311126709, "alphanum_fraction": 0.6051011681556702, "avg_line_length": 36.483516693115234, "blob_id": "bf3ed38010d9c2cf327e4b0eb8aae4cc967b5e5e", "content_id": "abe0a4097687af3aefdf208c415b3517456fdcff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6826, "license_type": "permissive", "max_line_length": 80, "num_lines": 182, "path": "/src/islatu/scan.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains the Scan and Scan2D classes. A Scan is a measurement and so\ninherits from MeasurementBase. An instance of Scan contains scan metadata, as\nwell as a suite of methods useful for data correction, uncertainty calculations\nand the like.\n\nA Scan2D is a Scan whose Data object's intensity values are computed from an\nimage captured by an area detector. Many of Scan's methods are overloaded to\nmake use of the additional information provided by the area detector, and extra\nimage manipulation methods are included in Scan2D.\n\"\"\"\n\nfrom typing import List\n\nimport numpy as np\nfrom scipy.interpolate import splev\n\nfrom islatu import corrections\nfrom islatu.metadata import Metadata\nfrom islatu.data import Data, MeasurementBase\nfrom islatu.image import Image\n\n\nclass Scan(MeasurementBase):\n \"\"\"\n A class used to store reflectometry scans taken with a point detector.\n \"\"\"\n\n def __init__(self, data: Data, metadata: Metadata) -> None:\n # Initialize the MeasurementBase from Data. This is much simpler than\n # passing a million arguments directly to the scan.\n super().__init__(data.intensity, data.intensity_e, data.energy,\n metadata, data.theta)\n\n def subsample_q(self, q_min=0, q_max=float('inf')):\n \"\"\"\n Delete data points less than q_min and more than q_max.\n\n Args:\n q_min:\n The minimum q to be included in this scan. Defaults to 0 Å.\n q_max:\n The maximum q to be included in this scan. Defaults to inf Å.\n \"\"\"\n # A place to store all the indices violating our condition on q.\n illegal_q_indices = np.where(\n (self.q_vectors <= q_min) | (self.q_vectors >= q_max)\n )[0]\n # [0] necessary because np.where returns a tuple of arrays of length 1.\n # This is a quirk of np.where – I don't think it's actually designed to\n # be used like this, and they encourage np.asarray(condition).nonzero()\n\n # Now remove all data points at these qs.\n self.remove_data_points(illegal_q_indices)\n\n def transmission_normalisation(self):\n \"\"\"\n Perform the transmission correction.\n \"\"\"\n if len(self.metadata.transmission)==1:\n self.intensity /= float(self.metadata.transmission)\n self.intensity_e /= float(self.metadata.transmission)\n else:\n self.intensity /= self.metadata.transmission\n self.intensity_e /= self.metadata.transmission\n\n def qdcd_normalisation(self, itp):\n \"\"\"\n Perform normalisation by DCD variance.\n\n Args:\n itp (:py:attr:`tuple`): Containing interpolation knots\n (:py:attr:`array_like`), B-spline coefficients\n (:py:attr:`array_like`), and degree of spline (:py:attr:`int`).\n \"\"\"\n self.intensity /= splev(self.q_vectors, itp)\n self.intensity_e /= splev(self.q_vectors, itp)\n\n def footprint_correction(self, beam_width, sample_size):\n \"\"\"\n Class method for :func:`islatu.corrections.footprint_correction`.\n\n Args:\n beam_width (:py:attr:`float`): Width of incident beam, in metres.\n sample_size (:py:class:`uncertainties.core.Variable`): Width of\n sample in the dimension of the beam, in metres.\n theta (:py:attr:`float`): Incident angle, in degrees.\n \"\"\"\n frac_of_beam_sampled = corrections.footprint_correction(\n beam_width, sample_size, self.theta)\n self.intensity /= frac_of_beam_sampled\n self.intensity_e /= frac_of_beam_sampled\n\n\nclass Scan2D(Scan):\n \"\"\"\n Attributes:\n data (:py:attr:`islatu.data.Data`):\n The intensity as a function of Q data for this scan.\n metadata (:py:attr:`islatu.metadata.Metadata`):\n This scan's metadata.\n images (:py:attr:`list` of :py:class:`islatu.image.Image`):\n The detector images in the given scan.\n \"\"\"\n\n def __init__(self, data: Data, metadata: Metadata, images: List[Image]) \\\n -> None:\n super().__init__(data, metadata)\n self.images = images\n\n def crop(self, crop_function, **kwargs):\n \"\"\"\n Crop every image in images according to crop_function.\n\n args:\n crop_function (:py:attr:`callable`):\n Cropping function to be used.\n kwargs (:py:attr:`dict`, optional):\n Keyword arguments for the cropping function. Defaults to\n :py:attr:`None`.\n progress (:py:attr:`bool`, optional):\n Show a progress bar. Requires the :py:mod:`tqdm` package.\n Defaults to :py:attr:`True`.\n \"\"\"\n\n (vals, stdevs) = (np.zeros(len(self.intensity)),\n np.zeros(len(self.intensity)))\n for i, image in enumerate(self.images):\n image.crop(crop_function, **kwargs)\n vals[i], stdevs[i] = self.images[i].sum()\n\n self.intensity = np.array(vals)\n self.intensity_e = np.array(stdevs)\n\n def bkg_sub(self, bkg_sub_function, **kwargs):\n \"\"\"\n Perform background substraction for each image in a Scan.\n\n Args:\n bkg_sub_function (:py:attr:`callable`): Background subtraction\n function to be used.\n kwargs (:py:attr:`dict`, optional): Keyword arguments for\n the background subtraction function. Defaults\n to :py:attr:`None`.\n progress (:py:attr:`bool`, optional): Show a progress bar.\n Requires the :py:mod:`tqdm` package. Defaults\n to :py:attr:`True`.\n \"\"\"\n vals, stdevs = np.zeros(\n len(self.intensity)), np.zeros(len(self.intensity))\n\n # We keep track of the bkg_sub_infos for meta-analyses.\n bkg_sub_info = [\n image.background_subtraction(bkg_sub_function, **kwargs)\n for image in self.images\n ]\n\n # Also update the image intensities & errors.\n for i, image in enumerate(self.images):\n vals[i], stdevs[i] = image.sum()\n\n # Store the intensity(Q) to the new value.\n self.intensity = np.array(vals)\n self.intensity_e = np.array(stdevs)\n\n # Expose the information relating to the background subtraction.\n return bkg_sub_info\n\n def remove_data_points(self, indices):\n \"\"\"\n Convenience method for the removal of specific data points by their\n indices.\n\n Args:\n indices:\n The indices to be removed.\n \"\"\"\n super().remove_data_points(indices)\n\n # Delete images in reverse order if you don't like errors.\n for idx in sorted(indices, reverse=True):\n del self.images[idx]\n" }, { "alpha_fraction": 0.7252918481826782, "alphanum_fraction": 0.7311283946037292, "avg_line_length": 44.08771896362305, "blob_id": "55114995140ad16034dbe31f317d5067bc2e13bc", "content_id": "40ed0a1f081cc0668251365a9fa068fc456f8b32", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2572, "license_type": "permissive", "max_line_length": 457, "num_lines": 57, "path": "/docs/source/index.rst", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": ".. islatu documentation master file, created by\n sphinx-quickstart on Fri Jan 17 18:56:48 2020.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nX-ray reflectometry reduction in Python\n=======================================\n\n:py:mod:`islatu` is an open-source pacakge for the reduction of x-ray reflectometry datasets.\nCurrently, :py:mod:`islatu` is developed at and supports data from `Diamond Light Source`_, however we are happy to work with others to enable data from other sources (including neutron sources).\n\nThese webpages include `API-level documentation`_ and information about some `workflows`_ that can be used for data reduction. There is also documentation on a `command line interface`_ that can be used to process reflectivity data without any python programming.\n\nContributing\n------------\nAs with any coding project, there are many ways to contribue. To report a bug or suggest a feature, `open an issue on the github repository`_. If you would like to contribute code, we would recommend that you first `raise an issue`_ before diving into writing code, so we can let you know if we are working on something similar already. To e.g. fix typos in documentation or in the code, or for other minor changes, feel free to make pull requests directly.\n\nContact us\n----------\nIf you need to contact the developers about anything, please either `raise an issue on the github repository`_ if appropriate, or send an email to richard.brearton@diamond.ac.uk.\n\nContributors\n------------\n\n- `Richard Brearton`_\n- `Andrew R. McCluskey`_\n\nAcknowledgements\n----------------\n\nWe acknowledge the support of the Ada Lovelace Centre – a joint initiative between the Science and Technology Facilities Council (as part of UK Research and Innovation), Diamond Light Source, and the UK Atomic Energy Authority, in the development of this software.\n\n.. _Diamond Light Source: https://www.diamond.ac.uk\n.. _open an issue on the github repository: https://github.com/RBrearton/islatu/issues\n.. _raise an issue: https://github.com/RBrearton/islatu/issues\n.. _raise an issue on the github repository: https://github.com/RBrearton/islatu/issues\n.. _API-level documentation: ./modules.html\n.. _workflows: ./workflows.html\n.. _command line interface: ./process_xrr.ipynb\n.. _Andrew R. McCluskey: https://www.armccluskey.com\n.. _Richard Brearton: https://scholar.google.com/citations?user=fD9zp0YAAAAJ&hl=en\n\n.. toctree::\n :hidden:\n :maxdepth: 2\n\n installation\n workflows\n process_xrr\n modules\n\nSearching\n=========\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" }, { "alpha_fraction": 0.6445714235305786, "alphanum_fraction": 0.6946938633918762, "avg_line_length": 32.46994400024414, "blob_id": "e1b801619a0c11ae3f647633f3c50eda59ddd170", "content_id": "1d77e198bdb362c7d39b2b707042488e72c49ca0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6129, "license_type": "permissive", "max_line_length": 80, "num_lines": 183, "path": "/tests/unit/test_refl_profile.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module tests the islatu.refl_profile module's Profile class.\n\"\"\"\n\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom islatu.refl_profile import Profile\nfrom islatu.cropping import crop_to_region\nfrom islatu.background import roi_subtraction\nfrom islatu.scan import Scan2D\n\n\ndef test_profile_data(profile_01: Profile, scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that our profile has exactly the same q_vectors as its scan,\n intensities, intensity_e's, etc.\n \"\"\"\n assert profile_01.energy == scan2d_from_nxs_01.energy\n assert (profile_01.intensity == scan2d_from_nxs_01.intensity).all()\n assert (profile_01.intensity_e == scan2d_from_nxs_01.intensity_e).all()\n\n assert_allclose(profile_01.q_vectors, scan2d_from_nxs_01.q_vectors, 1e-5)\n assert_allclose(profile_01.theta, scan2d_from_nxs_01.theta, 1e-5)\n\n\ndef test_profile_crop(profile_01: Profile):\n \"\"\"\n Make sure that the profile's crop method crops its constituent scans'\n images.\n \"\"\"\n region = profile_01.scans[0].metadata.signal_regions[0]\n profile_01.crop(crop_to_region, region=region)\n\n\ndef test_profile_bkg_sub(profile_01: Profile, scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Make sure that bkg_sub from the profile is the same as bkg_sub from the\n scan.\n \"\"\"\n bkg_region = scan2d_from_nxs_01.metadata.background_regions[0]\n profile_01.bkg_sub(roi_subtraction, list_of_regions=[bkg_region])\n scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=[bkg_region])\n\n assert_allclose(profile_01.intensity_e,\n scan2d_from_nxs_01.intensity_e, 1e-4)\n assert_allclose(profile_01.intensity, scan2d_from_nxs_01.intensity, 1e-4)\n\n\ndef test_profile_subsample_q_01(profile_01: Profile):\n \"\"\"\n Make sure subsample_q deletes the appropriate things. Because it just calls\n remove_data_points, which has already been tested extensively in test_data,\n we only need to check a couple of values to make sure the right qs have been\n deleted an we know that all the other attributes will have been handled\n correctly.\n \"\"\"\n original_len = len(profile_01.scans[0].theta)\n # Defaults shouldn't change anything.\n profile_01.subsample_q(\"404876\")\n assert len(profile_01.scans[0].theta) == original_len\n assert len(profile_01.theta) == original_len\n\n\ndef test_subsample_q_02(profile_01: Profile):\n \"\"\"\n Make sure that we can set just an upper bound. Note that this dataset goes\n from 0.025Å to 0.06Å\n \"\"\"\n q_max = 0.04\n\n assert max(profile_01.q_vectors) > q_max\n assert max(profile_01.scans[0].q_vectors) > q_max\n profile_01.subsample_q(\"404876\", q_max=q_max)\n assert max(profile_01.q_vectors) <= q_max\n assert max(profile_01.scans[0].q_vectors) <= q_max\n\n\ndef test_subsample_q_03(profile_01: Profile):\n \"\"\"\n Make sure that we can set a lower bound. Note that this dataset goes from\n 0.025Å to 0.06Å.\n \"\"\"\n q_min = 0.04\n\n assert min(profile_01.q_vectors) < q_min\n assert min(profile_01.scans[0].q_vectors) < q_min\n profile_01.subsample_q(\"404876\", q_min=q_min)\n assert min(profile_01.q_vectors) >= q_min\n assert min(profile_01.scans[0].q_vectors) >= q_min\n\n\ndef test_subsample_q_04(profile_01: Profile):\n \"\"\"\n Test that we can set both lower and upper bounds.\n \"\"\"\n q_min = 0.032\n q_max = 0.051\n\n profile_01.subsample_q(\"404876\", q_min, q_max)\n\n assert min(profile_01.q_vectors) >= q_min\n assert max(profile_01.q_vectors) <= q_max\n\n\ndef test_profile_footprint_correction(profile_01: Profile, scan2d_from_nxs_01):\n \"\"\"\n Assert that calling the footprint_correction method in an instance of\n Profile is the same thing as calling it in all of its constituent Scans.\n Then, if the Scan footprint correction tests pass, then this must also\n work.\n \"\"\"\n beam_width = 100e-6\n sample_size = 1e-3\n\n profile_01.footprint_correction(beam_width, sample_size)\n scan2d_from_nxs_01.footprint_correction(beam_width, sample_size)\n\n assert_allclose(profile_01.intensity, scan2d_from_nxs_01.intensity)\n assert_allclose(profile_01.intensity_e, profile_01.intensity_e)\n\n\ndef test_profile_transmission_normalisation(\n profile_01: Profile, scan2d_from_nxs_01: Scan2D):\n \"\"\"\n Assert that carrying out a transmission normalisation on an instance of\n Profile is the same thing as doing it on each of its constituent scans.\n \"\"\"\n profile_01.transmission_normalisation()\n scan2d_from_nxs_01.transmission_normalisation()\n\n assert_allclose(profile_01.intensity, scan2d_from_nxs_01.intensity)\n assert_allclose(profile_01.intensity_e, profile_01.intensity_e)\n\n\ndef test_profile_qdcd_normalisation(\n profile_01: Profile, scan2d_from_nxs_01: Scan2D, dcd_norm_01_splev):\n \"\"\"\n Assert that carrying out the qdcd correction on an instance of Profile is\n the same thing as doing it on each of its constituent scans.\n \"\"\"\n profile_01.qdcd_normalisation(dcd_norm_01_splev)\n scan2d_from_nxs_01.qdcd_normalisation(dcd_norm_01_splev)\n\n assert_allclose(profile_01.intensity, scan2d_from_nxs_01.intensity)\n assert_allclose(profile_01.intensity_e, profile_01.intensity_e)\n\n\ndef test_concatenate(profile_01: Profile):\n \"\"\"\n Explicit simple check that concatenate is working. Note that, if it isn't\n working, many other tests would also raise.\n \"\"\"\n profile_01.scans[0].intensity = 0\n profile_01.concatenate()\n\n assert profile_01.intensity == 0\n\n\ndef test_rebin_01(profile_0102: Profile):\n \"\"\"\n Make sure that we can rebin the data using default parameters.\n \"\"\"\n initial_length = len(profile_0102.q_vectors)\n profile_0102.rebin()\n assert initial_length > len(profile_0102.q_vectors)\n\n\ndef test_rebin_02(profile_0102: Profile):\n \"\"\"\n Now that we know that rebin is doing something, lets make sure that it is\n doing sane things.\n \"\"\"\n\n init = np.copy(profile_0102.intensity)\n\n profile_0102.rebin()\n\n new = profile_0102.intensity\n\n big, small = (init[3], init[8]) if init[3] > init[8] else init[8], init[3]\n assert small < new[3] and big > new[3]\n" }, { "alpha_fraction": 0.5532928705215454, "alphanum_fraction": 0.5576256513595581, "avg_line_length": 29.36842155456543, "blob_id": "570e17269de2c52521e9ed9a3b9e0fecc8b01f0e", "content_id": "b142724aa3ffef26503c9bebc7ef5d8f3526ed38", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2308, "license_type": "permissive", "max_line_length": 76, "num_lines": 76, "path": "/src/islatu/region.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module defines the Region object, whose instances define regions of\ninterest in images.\n\"\"\"\n\n\nclass Region:\n \"\"\"\n Instances of this class define regions of interest.\n \"\"\"\n\n def __init__(self, x_start, x_end, y_start, y_end):\n\n # Make sure that x_end > x_start, etc.\n if x_end < x_start:\n x_start, x_end = x_end, x_start\n if y_end < y_start:\n y_start, y_end = y_end, y_start\n\n # These may be recorded as types other than int, but we really want\n # these to be integers so they can be used to index objects.\n self.x_start = int(x_start)\n self.x_end = int(x_end)\n self.y_start = int(y_start)\n self.y_end = int(y_end)\n\n @property\n def x_length(self):\n \"\"\"\n Returns the length of the region in the x-direction.\n \"\"\"\n return self.x_end - self.x_start\n\n @property\n def y_length(self):\n \"\"\"\n Returns the length of the region in the y-direction.\n \"\"\"\n return self.y_end - self.y_start\n\n @property\n def num_pixels(self):\n \"\"\"\n returns the number of pixels in the region.\n \"\"\"\n return self.x_length * self.y_length\n\n @classmethod\n def from_dict(cls, region_dict: dict):\n \"\"\"\n Instantiates a Region from a dictionary with keys in:\n ['x', 'y', 'width', 'height'].\n\n This is to help loading dictionarys that are generated by calling\n json.loads on the NXcollections found in I07 nexus files as of\n 27/04/2022.\n \"\"\"\n x_start = int(region_dict['x'])\n y_start = int(region_dict['y'])\n x_end = x_start + int(region_dict['width'])\n y_end = y_start + int(region_dict['height'])\n return cls(x_start, x_end, y_start, y_end)\n\n def __eq__(self, other):\n \"\"\"\n Allows for equality checks to be made between instances of Region.\n \"\"\"\n if not isinstance(other, Region):\n return False\n\n return self.x_start == other.x_start and self.x_end == other.x_end \\\n and self.y_start == other.y_start and self.y_end == other.y_end\n\n def __str__(self):\n return f\"x_start: {self.x_start}, x_end: {self.x_end}, \" + \\\n f\"y_start: {self.y_start}, y_end: {self.y_end}.\"\n" }, { "alpha_fraction": 0.6863468885421753, "alphanum_fraction": 0.6937269568443298, "avg_line_length": 21.66666603088379, "blob_id": "028a0e963aafaff8dec2f8d9fd08e7ec5b568e70", "content_id": "6c90aa38258a6f292e21cd12ce34e04fd6b68381", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 271, "license_type": "permissive", "max_line_length": 83, "num_lines": 12, "path": "/docs/source/installation.rst", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "Installation\n============\n\n:py:mod:`islatu` can be installed from the PyPI package manager with :py:mod:`pip`:\n\n.. code-block:: bash \n\n pip install islatu\n\nAlternatively, the latest development build can be found `Github`_. \n\n.. _Github: https://github.com/arm61/islatu" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 18, "blob_id": "49c270f7e54f19979f6d03f127e69d5638b2ef9c", "content_id": "cdff7e5b2e2d06d2af1e1238ac360da37f430c01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 132, "license_type": "permissive", "max_line_length": 34, "num_lines": 7, "path": "/docs/source/corrections.rst", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "islatu\\.corrections\n===================\n\n.. automodule:: islatu.corrections\n :members:\n :undoc-members:\n :show-inheritance:" }, { "alpha_fraction": 0.6225139498710632, "alphanum_fraction": 0.638424813747406, "avg_line_length": 32.52000045776367, "blob_id": "9f9fced4a593b61e29ab3e2c0e7fa2ddf1fc32a0", "content_id": "1bfa26af2cfcc674d03a61f16be2caf70eecb6e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2514, "license_type": "permissive", "max_line_length": 78, "num_lines": 75, "path": "/tests/system/_test_cli.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module tests the command line interface to islatu.\n\"\"\"\n\nimport os\nimport subprocess\n\nimport numpy as np\n\n\ndef test_process_xrr_01(process_xrr_path):\n \"\"\"\n Make sure that we raise a FileNotFoundError when there's no file to be\n processed by the process_xrr script.\n \"\"\"\n proc = subprocess.run(\n [process_xrr_path], capture_output=True, text=True\n )\n\n error_type = proc.stderr.split('\\n')[3].split(':')[0].strip()\n assert error_type == \"FileNotFoundError\"\n\n\ndef test_process_xrr_02(process_xrr_path, path_to_resources, tmp_path,\n old_dcd_data):\n \"\"\"\n Make sure that the processing is running, and that it is producing\n acceptable results.\n \"\"\"\n yaml_path = path_to_resources + os.sep + \"dcd.yaml\"\n proc = subprocess.run(\n [process_xrr_path, '-d', path_to_resources, '-y', yaml_path,\n '-o', tmp_path],\n capture_output=True, text=True\n )\n\n # Make sure no errors were thrown during reduction.\n # This will only print if the assertion fails.\n print(proc.stdout)\n print(proc.stderr)\n assert proc.stdout.split('\\n')[204].strip().startswith(\n \"Reduced data stored at \"\n )\n\n # Make sure that the saved data is correct.\n reduced_data = np.loadtxt(os.path.join(tmp_path, os.listdir(tmp_path)[0]))\n assert np.allclose(reduced_data[0], old_dcd_data[0], 1e-3)\n assert np.allclose(reduced_data[1], old_dcd_data[1], 1e-3)\n assert np.allclose(reduced_data[2], old_dcd_data[2], 1e-3)\n\n\ndef test_process_xrr_03(process_xrr_path, path_to_resources, tmp_path,\n old_dcd_data):\n \"\"\"\n Make sure that we can subsample q, and that we can select only specific\n scan numbers.\n \"\"\"\n yaml_path = path_to_resources + os.sep + \"dcd.yaml\"\n proc = subprocess.run(\n [process_xrr_path, '-d', path_to_resources, '-y', yaml_path,\n '-o', tmp_path], capture_output=True, text=True)\n\n # Make sure no errors were thrown during reduction.\n # This will only print if the assertion fails.\n print(proc.stdout)\n print(proc.stderr)\n assert proc.stdout.split('\\n')[204].strip().startswith(\n \"Reduced data stored at \"\n )\n\n # Make sure that the saved data is correct.\n reduced_data = np.loadtxt(os.path.join(tmp_path, os.listdir(tmp_path)[0]))\n assert np.allclose(reduced_data[0], old_dcd_data[0], 1e-3)\n assert np.allclose(reduced_data[1], old_dcd_data[1], 1e-3)\n assert np.allclose(reduced_data[2], old_dcd_data[2], 1e-3)\n" }, { "alpha_fraction": 0.647995114326477, "alphanum_fraction": 0.6554943323135376, "avg_line_length": 35.915252685546875, "blob_id": "02e5d0145f7780479e54f1a3e944fb3634b998a9", "content_id": "ed0f6b1869b32321ec4952acd7840b2ff2e4bc0e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6536, "license_type": "permissive", "max_line_length": 80, "num_lines": 177, "path": "/src/islatu/background.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nBackground substraction is a necessary component of reflectometry reduction,\nwhere the background scattering is removed from the reflected intensity.\n\nHerein are some functions to enable that for a two-dimensional detector image,\nas well as simple dataclasses in which we can store some information relating to\nthe background subtraction, and any fitting that we might have carried out.\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import Callable, List\n\nimport numpy as np\nfrom scipy.stats import norm\nfrom scipy.optimize import curve_fit\n\nfrom .region import Region\nfrom .image import Image\n\n\n@dataclass\nclass FitInfo:\n \"\"\"\n A simple dataclass in which we can store data relating to the quality of a\n fit.\n \"\"\"\n popt: np.ndarray\n pcov: np.ndarray\n fit_function: Callable\n\n\n@dataclass\nclass BkgSubInfo:\n \"\"\"\n A simple data class in which we can store information relating to a\n background subtraction.\n \"\"\"\n bkg: float\n bkg_e: float\n bkg_sub_function: Callable\n fit_info: FitInfo = None\n\n\ndef roi_subtraction(image, list_of_regions: List[Region]):\n \"\"\"\n Carry out background subtraction by taking a series of rectangular regions\n of interested (ROIs) as being fair Poissonian measurements of the\n background.\n\n Args:\n image:\n The islatu.image.Image object from which we should subtract\n background from.\n list_of_regions:\n A list of instances of the Regions class corresponding to background\n regions.\n \"\"\"\n # We're going to need to count all intensity in all the background, as well\n # as the number of pixels used in our measurement of the background.\n sum_of_bkg_areas = 0\n total_num_pixels = 0\n\n # Make sure we've been given multiple regions. If not, np: make a list.\n if isinstance(list_of_regions, Region):\n list_of_regions = [list_of_regions]\n\n # Add up all the intensity in all the pixels.\n for region in list_of_regions:\n # Now add the total intensity in this particular background region to\n # the intensity measured in all the background regions so far.\n sum_of_bkg_areas += np.sum(\n image.array_original[\n int(region.x_start):int(region.x_end),\n int(region.y_start):int(region.y_end)\n ]\n )\n # Add the number of pixels in this background ROI to the total number of\n # pixels used to compute the background measurement overall.\n total_num_pixels += region.num_pixels\n\n # Now Poisson stats can be abused to only calculate a single sqrt.\n err_of_bkg_areas = np.sqrt(sum_of_bkg_areas)\n if err_of_bkg_areas == 0:\n err_of_bkg_areas = 1\n\n # Get the per pixel background mean and stddev.\n bkg_per_pixel = sum_of_bkg_areas / total_num_pixels\n bkg_error_per_pixel = err_of_bkg_areas / total_num_pixels\n\n # Expose the calculated background and background_error per pixel.\n return BkgSubInfo(bkg_per_pixel, bkg_error_per_pixel, roi_subtraction)\n\n\ndef univariate_normal(data, mean, sigma, offset, factor):\n \"\"\"\n Produce a univariate normal distribution.\n\n Args:\n data (:py:attr:`array_like`): Abscissa data.\n mean (:py:attr:`float`): Mean (horizontal).\n sigma (:py:attr:`float`): Variance (horizontal).\n offset (:py:attr:`float`): Offset from the 0 for the ordinate, this is\n the background level.\n factor (:py:attr:`float`): Multiplicative factor for area of normal\n distribution.\n\n Returns:\n :py:attr:`array_like`: Ordinate data for univariate normal distribution.\n \"\"\"\n # Creation of the bivariate normal distribution\n normal = norm(loc=mean, scale=sigma)\n return offset + normal.pdf(data).flatten() * factor\n\n\ndef fit_gaussian_1d(image: Image, params_0=None, bounds=None, axis=0):\n \"\"\"\n Fit a one-dimensional Gaussian function with some ordinate offset to an\n image with uncertainty. This is achieved by averaging in a given ``axis``\n before performing the fit. Return the results, and index of the offset.\n\n Args:\n image:\n The islatu image object to fit.\n params_0 (:py:attr:`list`, optional):\n An initial guess at the parameters. Defaults to values based on the\n image.\n bounds (:py:attr:`list` of :py:attr:`tuple`, optional):\n Bounds for the fitting. Defaults to values based on the image.\n axis (:py:attr:`int`):\n The dimension along which the averaging will be performed.\n\n Returns:\n :py:attr:`tuple`: Containing:\n - :py:attr:`array_like`: The results (with uncertainties) for each\n of the 6 parameters fit.\n - :py:attr:`int`: The index of the offset.\n - :py:attr:`None`: As it is not possible to describe the reflected\n peak width.\n \"\"\"\n arr, arr_e = image.array, image.array_e\n ordinate = arr.mean(axis=axis)\n\n # Now we can generate an array of errors.\n ordinate_e = np.sqrt(np.mean(arr_e**2, axis=axis))\n\n # Setting default values.\n if params_0 is None:\n # Now we generate the initial values for our Gaussian fit.\n # These values are crucial – as this is a high dimensional fitting\n # problem, it is likely that we'll get stuck in a local minimum if these\n # aren't good.\n # Guess that the Gaussian mean is at the most intense mean pixel value.\n mean0 = np.argmax(ordinate)\n # Guess that the standard deviation is a single pixel.\n sdev0 = 1\n # Guess that the background (offset) is the median pixel value.\n offset0 = np.median(ordinate)\n # Guess that the scale is equal to the largest recorded value.\n scale0 = arr.max()\n params_0 = [mean0, sdev0, offset0, scale0]\n if bounds is None:\n bounds = ([0, 0, 0, 0],\n [ordinate.shape[0], ordinate.shape[0], scale0, scale0 * 10])\n\n # Perform the fitting.\n fit_popt_pcov = curve_fit(\n univariate_normal,\n np.arange(0, ordinate.shape[0], 1), ordinate, bounds=bounds,\n sigma=ordinate_e, p0=params_0, maxfev=2000 * (len(params_0) + 1))\n\n fit_info = FitInfo(fit_popt_pcov[0], fit_popt_pcov[1], univariate_normal)\n\n # Determine uncertainty from covarience matrix.\n # Note: the stddev of the fit Gaussian can be accessed via popt[1].\n p_sigma = np.sqrt(np.diag(fit_info.pcov))\n\n return BkgSubInfo(fit_info.popt[2], p_sigma[2], fit_gaussian_1d, fit_info)\n" }, { "alpha_fraction": 0.6124803423881531, "alphanum_fraction": 0.6219192743301392, "avg_line_length": 28.796875, "blob_id": "0b86fb8af69ba99c64de3bb161ae418623da5bfa", "content_id": "8adcfc63a627962295b025b27a7cfcf9da75c7a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1907, "license_type": "permissive", "max_line_length": 74, "num_lines": 64, "path": "/setup.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "import io\nfrom glob import glob\nfrom os.path import basename, dirname, join, splitext, abspath\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nTHIS_DIRECTORY = abspath(dirname(__file__))\nwith io.open(join(THIS_DIRECTORY, 'README.md')) as f:\n LONG_DESCRIPTION = f.read()\n\nREQUIREMENTS = [\n \"wheel\",\n \"numpy\",\n \"scipy\",\n \"coverage\",\n \"pandas\",\n \"pyyaml\",\n \"nexusformat\",\n \"pytest\",\n \"pytest-lazy-fixture\",\n \"nbsphinx\",\n \"jupyter-sphinx\",\n \"jupyterlab\",\n \"ipywidgets\",\n \"pytest-cov\",\n]\n\nsetup(\n name='islatu',\n version='1.0.7',\n license='MIT',\n description='A package for the reduction of reflectometry data.',\n author='Richard Brearton',\n author_email='richardbrearton@gmail.com',\n long_description=LONG_DESCRIPTION,\n long_decription_content_type='text/markdown',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Physics'\n ],\n setup_requires=REQUIREMENTS,\n install_requires=REQUIREMENTS\n)\n" }, { "alpha_fraction": 0.6336933374404907, "alphanum_fraction": 0.662057101726532, "avg_line_length": 37.69921875, "blob_id": "34f88626eebc64ffa30395a5d24cc8a8546320fe", "content_id": "965f39e8ceff2ef208c44486cf300c88dc8cee72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9907, "license_type": "permissive", "max_line_length": 80, "num_lines": 256, "path": "/tests/unit/test_data.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nModule for testing the Data class, and the MeasurementBase class.\n\"\"\"\n\nimport pytest\nfrom pytest_lazyfixture import lazy_fixture as lazy\nimport numpy as np\n\nfrom islatu.data import Data, MeasurementBase\nfrom islatu.io import I07Nexus\nfrom islatu.scan import Scan2D\n\n# Fairly obvious disable for testing: we also need to test protected attrs.\n# pylint: disable=protected-access\n\n\n@pytest.mark.parametrize(\n 'data',\n [lazy('generic_data_01'), lazy('generic_data_02')]\n)\nclass TestDataSimple:\n \"\"\"\n Simple tests for the Data class that don't require any additional fixtures.\n \"\"\"\n\n def test_reflectivity_max(self, data: Data):\n \"\"\"\n Make sure that max(reflectivity) is 1.\n \"\"\"\n assert max(data.reflectivity) == 1\n\n\n@pytest.mark.parametrize(\n 'data, correct_intensity',\n [(lazy('generic_data_01'), np.arange(1100, 300, -45)[:10]),\n (lazy('generic_data_02'), (np.arange(11100012, 0, -12938)[:6]))]\n)\ndef test_intensity_access(data, correct_intensity):\n \"\"\"\n Make sure we can access data.intensity\n \"\"\"\n assert (data.intensity == correct_intensity).all()\n\n\n@pytest.mark.parametrize(\n 'data, correct_intensity_e',\n [(lazy('generic_data_01'), np.sqrt(np.arange(1100, 300, -45)[:10])),\n (lazy('generic_data_02'), np.sqrt(np.arange(11100012, 0, -12938)[:6]))]\n)\ndef test_intensity_e_access(data, correct_intensity_e):\n \"\"\"\n Make sure we can access the I_e attribute.\n \"\"\"\n assert(data.intensity_e == correct_intensity_e).all()\n\n\n@pytest.mark.parametrize(\n 'data,correct_energy',\n [(lazy('generic_data_01'), 12.5), (lazy('generic_data_02'), 8.04)])\ndef test_energy_access(data: Data, correct_energy):\n \"\"\"\n Make sure we can access the data.energy attribute, and that it has the\n correct value.\n \"\"\"\n assert data.energy == correct_energy\n\n\n@pytest.mark.parametrize(\n 'data, correct__theta',\n [(lazy('generic_data_01'), None), (lazy('generic_data_02'), np.arange(6))]\n)\ndef test__theta_access(data: Data, correct__theta):\n \"\"\"\n Make sure that we can access the _theta attribute, and that it has the\n correct values.\n \"\"\"\n if correct__theta is not None:\n assert (data._theta == correct__theta).all()\n else:\n assert data._theta is correct__theta\n\n\n@pytest.mark.parametrize(\n 'data, correct__q',\n [\n (lazy('generic_data_01'), np.arange(10)/10),\n (lazy('generic_data_02'), None)\n ]\n)\ndef test__q_access(data: Data, correct__q):\n \"\"\"\n Make sure that we can access the data._q attribute, and that it has the\n correct value.\n \"\"\"\n if correct__q is not None:\n assert (data._q == correct__q).all()\n else:\n assert correct__q is data._q\n\n\ndef test_conversion_to_q(generic_data_02: Data):\n \"\"\"\n Check that we can correctly convert from theta to q. Basically any decent\n programmatic way of checking this would be completely circular: I would\n just re-implement the function I'm trying to test. So, I used a random\n online calculator to check the value against my function.\n \"\"\"\n assert generic_data_02.q_vectors[1] == pytest.approx(0.142217, rel=1e-5)\n\n\ndef test_conversion_to_th(generic_data_01: Data):\n \"\"\"\n Check that we can correctly convert from q to theta. As above, this number\n was calculated using online calculators. Don't hate the tester, hate the\n tests.\n \"\"\"\n # Online calculator derped for these numbers so rel is small. These things\n # are dumb and throw away significant figures just for kicks.\n assert generic_data_01.theta[1] == pytest.approx(0.4525, rel=1e-3)\n\n\n@pytest.mark.parametrize(\n 'data',\n [lazy('generic_data_01'), lazy('generic_data_02'),\n lazy('scan2d_from_nxs_01')]\n)\ndef test_remove_data_points_01(data: Data):\n \"\"\"\n First data point removal test.\n \"\"\"\n # Make a deep copy of data. Worth noting that this copy won't quite be\n # precise if our generic_data was defined using q values, hence the need for\n # pytest.approx later.\n data_copy = Data(np.copy(data.intensity),\n np.copy(data.intensity_e),\n data.energy, np.copy(data.theta))\n\n # If our data is a Scan2D, we need to construct it slightly differently.\n if isinstance(data, Scan2D):\n data_copy = Scan2D(data_copy, data.metadata,\n list(np.copy(data.images)))\n data.remove_data_points([1])\n\n assert len(data.intensity) + 1 == len(data_copy.intensity)\n assert len(data.intensity_e) + 1 == len(data_copy.intensity_e)\n assert len(data.theta) + 1 == len(data_copy.theta)\n assert len(data.q_vectors) + 1 == len(data_copy.q_vectors)\n assert len(data.reflectivity) + 1 == len(data_copy.reflectivity)\n assert len(data.reflectivity_e) + 1 == len(data_copy.reflectivity_e)\n assert data.intensity[1] == data_copy.intensity[2]\n assert data.intensity_e[1] == data_copy.intensity_e[2]\n assert data.theta[1] == pytest.approx(data_copy.theta[2], rel=1e-3)\n assert data.q_vectors[1] == pytest.approx(data_copy.q_vectors[2], rel=1e-3)\n assert data.reflectivity[1] == data_copy.reflectivity[2]\n assert data.reflectivity_e[1] == data_copy.reflectivity_e[2]\n\n if isinstance(data, Scan2D):\n assert len(data.images) + 1 == len(data_copy.images)\n assert data.images[1] == data_copy.images[2]\n\n\n@pytest.mark.parametrize(\n 'data',\n [lazy('generic_data_01'), lazy('generic_data_02'),\n lazy('scan2d_from_nxs_01')]\n)\ndef test_remove_data_points_02(data: Data):\n \"\"\"\n Second data point removal test. Most of these tests are fairly trivial, but\n the point is more to make sure that we're indeed remembering to remove\n a data point from every single array. Sure, it would be great to split\n these into their own tests, but... cba. These could also have been wrapped\n into fancy tests where I calculate with code which indices in the new\n data object correspond to which indices in the original data_copy. But, that\n leaves room for error, which defeats the point of testing.\n \"\"\"\n # Make a deep copy of data.\n data_copy = Data(np.copy(data.intensity),\n np.copy(data.intensity_e),\n data.energy, np.copy(data.theta))\n # If our data is a Scan2D, we need to construct it slightly differently.\n if isinstance(data, Scan2D):\n data_copy = Scan2D(data_copy, data.metadata,\n list(np.copy(data.images)))\n data.remove_data_points([1, 2, 4])\n\n assert len(data.intensity) + 3 == len(data_copy.intensity)\n assert len(data.intensity_e) + 3 == len(data_copy.intensity_e)\n assert len(data.theta) + 3 == len(data_copy.theta)\n assert len(data.q_vectors) + 3 == len(data_copy.q_vectors)\n assert len(data.reflectivity) + 3 == len(data_copy.reflectivity)\n assert len(data.reflectivity_e) + 3 == len(data_copy.reflectivity_e)\n assert data.intensity[1] == data_copy.intensity[3]\n assert data.intensity_e[1] == data_copy.intensity_e[3]\n assert data.theta[1] == pytest.approx(data_copy.theta[3], rel=1e-3)\n assert data.q_vectors[1] == pytest.approx(data_copy.q_vectors[3], rel=1e-3)\n assert data.reflectivity[1] == data_copy.reflectivity[3]\n assert data.reflectivity_e[1] == data_copy.reflectivity_e[3]\n assert data.intensity[2] == data_copy.intensity[5]\n assert data.intensity_e[2] == data_copy.intensity_e[5]\n assert data.theta[2] == pytest.approx(data_copy.theta[5], rel=1e-3)\n assert data.q_vectors[2] == pytest.approx(data_copy.q_vectors[5], rel=1e-3)\n assert data.reflectivity[2] == data_copy.reflectivity[5]\n assert data.reflectivity_e[2] == data_copy.reflectivity_e[5]\n if isinstance(data, Scan2D):\n assert len(data.images) + 3 == len(data_copy.images)\n assert data.images[1] == data_copy.images[3]\n assert data.images[2] == data_copy.images[5]\n\n\ndef test_measurement_base_metadata_type(measurement_base_01):\n \"\"\"\n Make sure that our measurement base type is indeed I07Nexus. If it is, then\n the following tests just quickly make sure that its values have remained\n intact.\n \"\"\"\n assert isinstance(measurement_base_01.metadata, I07Nexus)\n\n\ndef test_measurement_base_metadata_path(measurement_base_01,\n path_to_i07_nxs_01):\n \"\"\"\n Make sure that we can access the metadata, and that its local_path is good.\n \"\"\"\n assert measurement_base_01.metadata.local_path == path_to_i07_nxs_01\n\n\ndef test_measurement_base_metadata_energy(measurement_base_01):\n \"\"\"\n Check that the metadata has the correct energy. The I07Nexus class\n \"\"\"\n assert measurement_base_01.metadata.probe_energy == 12.5\n\n\ndef test_measurement_base_underlying_data(measurement_base_01: MeasurementBase,\n generic_data_01: Data):\n \"\"\"\n Make sure that the instance of MeasurementBase has the same values of\n q, theta, intensity etc. as the instance of Data from which it was\n constructed.\n \"\"\"\n # Note that, while there are multiple assertions here, they're really all\n # testing the same thing: pretty trivial attribute access, and equivalence\n # of parent and child for the subset of child that should be the same as\n # parent.\n assert (measurement_base_01._q == generic_data_01._q).all()\n assert measurement_base_01._theta == generic_data_01._theta\n assert (measurement_base_01.q_vectors == generic_data_01.q_vectors).all()\n assert (measurement_base_01.intensity == generic_data_01.intensity).all()\n assert (measurement_base_01.intensity_e ==\n generic_data_01.intensity_e).all()\n assert measurement_base_01.energy == generic_data_01.energy\n assert (measurement_base_01.reflectivity ==\n generic_data_01.reflectivity).all()\n assert (measurement_base_01.reflectivity_e ==\n generic_data_01.reflectivity_e).all()\n" }, { "alpha_fraction": 0.5927578210830688, "alphanum_fraction": 0.5975334644317627, "avg_line_length": 36.144248962402344, "blob_id": "089f1b7af46bc3b191ac8c8abc6a5fe6959ba08d", "content_id": "552ab21399bd9a31984d00ae0acea8f08f6bb45c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19058, "license_type": "permissive", "max_line_length": 80, "num_lines": 513, "path": "/src/islatu/runner.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains functions whose purpose is simply to use the islatu\nlibrary to process data acquired from a specific instrument.\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom typing import List\nfrom os import path\nimport os\nfrom datetime import datetime\nfrom ast import literal_eval as make_tuple\n\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\nfrom yaml import load, dump\nimport numpy as np\n\nimport islatu\nfrom islatu import background\nfrom islatu import corrections\nfrom islatu import cropping\nfrom islatu import io\nfrom islatu.region import Region\nfrom islatu.io import i07_dat_to_dict_dataframe\nfrom islatu.refl_profile import Profile\nfrom islatu.debug import debug\n\n\n# This could be done by reflection, but it feels slightly less arcane to use\n# this kind of function map. It also gives these scripts a little more\n# flexibility.\nfunction_map = {\n 'roi_subtraction': background.roi_subtraction,\n 'None': None,\n 'i07': io.i07_nxs_parser,\n 'crop': cropping.crop_to_region\n}\n\n\n@dataclass\nclass Creator:\n \"\"\"\n Simple dataclass to store information relating to the person that created\n this dataset.\n \"\"\"\n name: str = 'Unknown'\n affiliation: str = 'Unknown'\n time: datetime = datetime.now()\n\n\n@dataclass\nclass Origin:\n \"\"\"\n Simple dataclass to store information relating to the experiment.\n \"\"\"\n contact: str = 'My local contact'\n facility: str = 'Diamond Light Source'\n id: str = None\n title: str = None\n directory_path: str = None\n date: str = str(datetime.now())\n year: str = None\n\n\n@dataclass\nclass Measurement:\n \"\"\"\n This dataclass stores measurement-specific metadata.\n \"\"\"\n scheme: str = 'q-dispersive'\n q_range: List[str] = (str(-np.inf), str(np.inf))\n theta_axis_name: str = 'dcdtheta'\n q_axis_name: str = 'qdcd'\n transpose: bool = False\n qz_dimension: int = 1\n qxy_dimension: int = 0\n\n\n@dataclass\nclass Experiment:\n \"\"\"\n This dataclass stores more instrument-specific metadata.\n \"\"\"\n instrument: str = 'i07'\n probe: str = 'x-ray'\n energy: float = 12.5\n measurement: Measurement = Measurement()\n sample: str = None\n\n\nclass DataSource:\n \"\"\"\n This class stores information relating both to the experiment, and to the\n data processor.\n \"\"\"\n\n def __init__(self, title, origin=Origin(), experiment=Experiment(),\n links=None):\n self.origin = origin\n self.origin.title = title\n self.experiment = experiment\n self.links = links\n\n\n@dataclass\nclass Software:\n \"\"\"\n This dataclass stores information relating to the software used to carry\n out the any reduction/processing steps (in this case, islatu of course).\n \"\"\"\n name: str = 'islatu'\n link: str = 'https://islatu.readthedocs.io'\n version: str = islatu.__version__\n\n\n@dataclass\nclass DataState:\n \"\"\"\n This class stores more reduction specific parameters.\n \"\"\"\n\n background = None\n resolution = None\n dcd = None\n transmission = None\n intensity = None\n rebinned = None\n\n\nclass Reduction:\n \"\"\"\n This class contains all of the information pertaining to data reduction\n carried out on this reflectometry data.\n \"\"\"\n\n def __init__(self, software=Software(), input_files=None,\n data_state=DataState(), parser=io.i07_nxs_parser,\n crop_function=cropping.crop_to_region, crop_kwargs=None,\n bkg_function=background.fit_gaussian_1d, bkg_kwargs=None,\n dcd_normalisation=None, sample_size=None, beam_width=None):\n if input_files is None:\n input_files = []\n self.software = software\n self.input_files = input_files\n self.data_state = data_state\n self.parser = parser\n self.crop_function = crop_function\n self.crop_kwargs = crop_kwargs\n self.bkg_function = bkg_function\n self.bkg_kwargs = bkg_kwargs\n self.dcd_normalisation = dcd_normalisation\n self.sample_size = sample_size\n self.beam_width = beam_width\n\n\nclass Data:\n \"\"\"\n This class stores information pertaining to the data collected in the\n experiment.\n \"\"\"\n\n def __init__(self, columns=None, n_qvectors=50, q_min=None, q_max=None,\n q_step=None, q_shape='linear'):\n if columns is None:\n columns = ['Qz / Aa^-1', 'RQz', 'sigma RQz, standard deviation',\n 'sigma Qz / Aa^-1, standard deviation']\n self.column_1 = columns[0]\n self.column_2 = columns[1]\n self.column_3 = columns[2]\n if len(columns) == 4:\n self.column_4 = columns[3]\n if columns == 'both':\n self.both = True\n self.column_4 = columns[3]\n self.rebin = True\n self.n_qvectors = n_qvectors\n self.q_min = q_min\n self.q_max = q_max\n self.q_step = q_step\n self.q_shape = q_shape\n\n\nclass Foreperson:\n \"\"\"\n This class brings together all of the above classes and dataclasses into\n one big ball of yaml-able information.\n \"\"\"\n\n def __init__(self, run_numbers, yaml_file, directory, title):\n self.creator = Creator()\n self.data_source = DataSource(title)\n self.reduction = Reduction()\n self.data = Data()\n self.yaml_file = yaml_file\n y_file = open(yaml_file, 'r', encoding='utf-8')\n recipe = load(y_file, Loader=Loader)\n y_file.close()\n\n self.setup(recipe)\n\n directory_path = directory.format(\n self.data_source.experiment.instrument,\n self.data_source.origin.year,\n self.data_source.origin.id)\n if path.isdir(directory_path):\n self.directory_path = directory_path\n else:\n raise FileNotFoundError(\n \"The experiment directory <\" + directory_path +\n \"> cannot be found.\")\n\n self.reduction.input_files = [\n self.directory_path + 'i07-' + str(r) + '.nxs' for r in run_numbers]\n\n def setup(self, recipe):\n \"\"\"\n This is a McClusky special. I inherited it, and it works.\n Don't ask questions.\n \"\"\"\n keys = recipe.keys()\n # Populate information from the visit section\n if 'visit' in keys:\n self.data_source.origin.id = recipe['visit']['visit id']\n if 'date' in recipe['visit'].keys():\n self.data_source.origin.date = datetime.strptime(\n str(recipe['visit']['date']), '%Y-%m-%d')\n self.data_source.origin.year = self.data_source.origin.date.year\n if 'local contact' in recipe['visit'].keys():\n self.data_source.origin.contact = recipe[\n 'visit']['local contact']\n if 'user' in recipe['visit'].keys():\n self.creator.name = recipe['visit']['user']\n if 'affiliation' in recipe['visit'].keys():\n self.creator.affiliation = recipe['visit']['user affiliation']\n else:\n raise ValueError(\n f\"No visit given in {self.yaml_file}. \" +\n \"You must at least give a visit id\")\n # Populate informatio from the information section\n if 'instrument' in keys:\n self.data_source.experiment.instrument = recipe['instrument']\n self.reduction.parser = function_map[recipe['instrument']]\n # Populate cropping information\n if 'crop' in keys:\n self.reduction.crop_function = function_map[\n recipe['crop']['method']]\n if 'kwargs' in recipe['crop']:\n self.reduction.crop_kwargs = recipe['crop']['kwargs']\n # Populate background subtraction method\n if 'background' in keys:\n self.reduction.bkg_function = function_map[\n recipe['background']['method']]\n if 'kwargs' in recipe['background']:\n self.reduction.bkg_kwargs = recipe['background']['kwargs']\n\n # Populate the setup information\n if 'setup' in keys:\n if 'dcd normalisation' in recipe['setup'].keys():\n self.reduction.dcd_normalisation = recipe[\n 'setup']['dcd normalisation']\n self.data_source.links = {\n 'instrument reference': 'doi:10.1107/S0909049512009272'}\n if 'sample size' in recipe['setup'].keys():\n self.reduction.sample_size = make_tuple(recipe[\n 'setup']['sample size'])\n try:\n _ = len(self.reduction.sample_size)\n self.reduction.sample_size = self.reduction.sample_size[0]\n except TypeError:\n pass\n else:\n raise ValueError(\"No sample size given in setup of {}.\".format(\n self.yaml_file))\n if 'beam width' in recipe['setup'].keys():\n self.reduction.beam_width = make_tuple(recipe[\n 'setup']['beam width'])\n try:\n _ = len(self.reduction.beam_width)\n self.reduction.beam_width = self.reduction.beam_width[0]\n except TypeError:\n pass\n else:\n raise ValueError(\n f\"No beam width given in setup of {self.yaml_file}\"\n )\n if 'theta axis' in recipe['setup'].keys():\n self.data_source.experiment.measurement.theta_axis_name = (\n recipe['setup']['theta axis'])\n if 'q axis' in recipe['setup'].keys():\n self.data_source.experiment.measurement.q_axis_name = (\n recipe['setup']['q axis'])\n if 'transpose' in recipe['setup'].keys():\n self.data_source.experiment.measurement.transpose = (\n recipe['setup']['transpose'])\n if self.data_source.experiment.measurement.transpose:\n self.data_source.experiment.measurement.qz_dimension = 0\n self.data_source.experiment.measurement.qxy_dimension = 1\n if 'pixel max' in recipe['setup'].keys():\n self.data_source.experiment.measurement.pixel_max = recipe[\n 'setup']['pixel max']\n if 'hot pixel max' in recipe['setup'].keys():\n self.data_source.experiment.measurement.hot_pixel_max = recipe[\n 'setup']['hot pixel max']\n else:\n raise ValueError(f\"No setup given in {self.yaml_file}.\")\n if 'output_columns' in keys:\n if recipe['output columns'] == 3:\n self.data = Data(\n columns=[\n 'Qz / Aa^-1', 'RQz', 'sigma RQz, standard deviation'])\n if recipe['output columns'] == 34:\n self.data = Data(columns='both')\n if 'rebin' in keys:\n if 'n qvectors' in recipe['rebin'].keys():\n self.data.n_qvectors = recipe['rebin']['n qvectors']\n elif 'min' in recipe['rebin'].keys() and 'max' in recipe[\n 'rebin'].keys() and 'step' in recipe['rebin'].keys():\n self.data.q_step = recipe['rebin']['step']\n if 'shape' in recipe['rebin'].keys():\n self.data.q_shape = recipe['rebin']['shape']\n else:\n raise ValueError(\"Please define parameters of \" +\n f\"rebin in {self.yaml_file}.\")\n else:\n self.data.rebin = False\n\n\ndef log_processing_stage(processing_stage):\n \"\"\"\n Simple function to make logging slightly neater.\n \"\"\"\n debug.log(\"-\" * 10)\n debug.log(processing_stage, unimportance=0)\n debug.log(\"-\" * 10)\n\n\ndef i07reduce(run_numbers, yaml_file, directory='/dls/{}/data/{}/{}/',\n title='Unknown', filename=None,\n q_subsample_dicts=None):\n \"\"\"\n The runner that parses the yaml file and performs the data reduction.\n\n run_numbers (:py:attr:`list` of :py:attr:`int`):\n Reflectometry scans that make up the profile.\n yaml_file (:py:attr:`str`):\n File path to yaml config file\n directory (:py:attr:`str`):\n Outline for directory path.\n title (:py:attr:`str`):\n A title for the experiment.\n filename:\n Either a full path to the .dat file that will be produced by this\n function, or a directory. If a directory is given, then the filename\n will be automatically generated and the file will be placed in the\n specified directory.\n q_subsample_dicts:\n A list of dictionaries, which takes the form:\n [{'scan_ID': ID, 'q_min': q_min, 'q_max': q_max},...]\n where type(ID) = str, type(q_min)=float, type(q_max)=float.\n \"\"\"\n\n # Make sure the directory is properly formatted.\n if not str(directory).endswith(os.sep):\n directory = directory + os.sep\n the_boss = Foreperson(run_numbers, yaml_file, directory, title)\n\n # Necessary to distnguish the same data processed by different pipelines.\n yaml_pipeline_name = yaml_file.split(os.sep)[-1][:-5]\n\n files_to_reduce = the_boss.reduction.input_files\n\n log_processing_stage(\"File parsing\")\n refl = Profile.fromfilenames(files_to_reduce, the_boss.reduction.parser)\n\n # Set the energy correctly.\n the_boss.data_source.experiment.energy = refl.energy\n\n log_processing_stage(\"Cropping\")\n # Currently, only crop_to_region is implemented.\n if the_boss.reduction.crop_function is not cropping.crop_to_region and \\\n the_boss.reduction.crop_function is not None:\n raise NotImplementedError(\n \"The only implemented cropping function is crop_to_region.\")\n\n # Check to see if we were given an explicit cropping region. If not, use\n # the first (and likely only) signal region.\n if (the_boss.reduction.crop_function is cropping.crop_to_region and\n the_boss.reduction.crop_kwargs is None):\n roi = refl.scans[0].metadata.signal_regions[0]\n the_boss.reduction.crop_kwargs = {'region': roi}\n debug.log(f\"Crop ROI '{str(roi)}' generated from the .nxs file.\")\n else:\n the_boss.reduction.crop_kwargs = {\n 'region': Region(**the_boss.reduction.crop_kwargs)\n }\n refl.crop(the_boss.reduction.crop_function,\n **the_boss.reduction.crop_kwargs)\n\n\n\n log_processing_stage(\"Subtracting background\")\n # Before subtracting background, make sure that, by default, we're at least\n # trying to subtract background from roi_2.\n if the_boss.reduction.bkg_function is background.roi_subtraction:\n # Make sure we have the desired background regions.\n if the_boss.reduction.bkg_kwargs is None:\n the_boss.reduction.bkg_kwargs = {\n 'list_of_regions': refl.scans[0].metadata.background_regions}\n else:\n the_boss.reduction.bkg_kwargs = {\n 'list_of_regions': Region(**the_boss.reduction.bkg_kwargs)\n }\n else:\n print(\"COULD NOT SUBTRACT BACKGROUND. SKIPPING...\")\n if the_boss.reduction.bkg_function is not None:\n refl.bkg_sub(the_boss.reduction.bkg_function,\n **the_boss.reduction.bkg_kwargs)\n the_boss.reduction.data_state.background = 'corrected'\n\n\n\n log_processing_stage(\"Performing data corrections...\")\n if the_boss.reduction.dcd_normalisation is not None:\n log_processing_stage(\"DCD normalisation\")\n itp = corrections.get_interpolator(\n the_boss.reduction.dcd_normalisation, i07_dat_to_dict_dataframe)\n refl.qdcd_normalisation(itp)\n the_boss.reduction.data_state.dcd = 'normalised'\n \n\n\n log_processing_stage(\"Footprint correction.\")\n refl.footprint_correction(\n the_boss.reduction.beam_width, the_boss.reduction.sample_size)\n \n\n\n\n log_processing_stage(\"Transmission normalisation.\")\n refl.transmission_normalisation()\n the_boss.reduction.data_state.transmission = 'normalised'\n refl.concatenate()\n\n\n if q_subsample_dicts is not None:\n log_processing_stage(\n \"Doctoring data.\\nSorry, I mean: Bounding q-vectors.\")\n # We'll need to subsample a subset of our scans.\n for q_subsample_dict in q_subsample_dicts:\n refl.subsample_q(**q_subsample_dict)\n debug.log(\"Limited q-range on specified scans.\")\n\n # Rebin the data, if the user requested this.\n if the_boss.data.rebin:\n log_processing_stage(\"Rebinning the data.\")\n if the_boss.data.q_min is None:\n debug.log(\"Linearly rebinning data into \" +\n str(the_boss.data.n_qvectors) + \" uniformly spaced \" +\n \"points in q-space.\", unimportance=2)\n refl.rebin(number_of_q_vectors=the_boss.data.n_qvectors)\n else:\n if the_boss.data.q_shape == 'linear':\n debug.log(\"Rebinning data linearly.\", unimportance=2)\n spacing = np.linspace\n elif the_boss.data.q_shape == 'log':\n debug.log(\"Rebinning data logarithmically\", unimportance=2)\n spacing = np.logspace\n debug.log(\n f\"Spacing generated from {refl.q_vectors.min()}Å to \" +\n f\"{refl.q_vectors.max()}Å.\", unimportance=2\n )\n refl.rebin(new_q=spacing(refl.q_vectors.min(), refl.q_vectors.max(),\n the_boss.data.q_step))\n the_boss.reduction.data_state.rebinned = the_boss.data.q_shape\n\n the_boss.data_source.experiment.measurement.q_range = [\n str(refl.q_vectors.min()), str(refl.q_vectors.max())]\n the_boss.data.n_qvectors = str(len(refl.reflectivity))\n\n\n\n # Prepare the data array.\n data = np.array([refl.q_vectors, refl.reflectivity, refl.reflectivity_e]).T\n debug.log(\"XRR reduction completed.\", unimportance=2)\n\n # Work out where to save the file.\n datetime_str = datetime.now().strftime(\"%Y-%m-%d_%Hh%Mm%Ss\")\n dat_filename = 'XRR_{}_'.format(\n run_numbers[0]) + yaml_pipeline_name + datetime_str + \".dat\"\n if filename is None:\n # Make sure that the processing directory exists.\n processing_path = path.join(the_boss.directory_path, 'processing')\n if not os.path.exists(processing_path):\n os.makedirs(processing_path)\n # Now prepare the full path to the file\n filename = (processing_path + dat_filename)\n elif os.path.isdir(filename):\n # It's possible we were given a directory in which to save the created\n # file. In this case, use the filename variable as a directory and add\n # our auto generated filename to it.\n filename = os.path.join(filename, dat_filename)\n\n # Write the data.\n np.savetxt(\n filename, data, header=f\"{dump(vars(the_boss))}\\n Q(1/Å) R R_error\"\n )\n\n debug.log(\"-\" * 10)\n debug.log(f\"Reduced data stored at {filename}\", unimportance=0)\n debug.log(\"-\" * 10)\n" }, { "alpha_fraction": 0.6384810209274292, "alphanum_fraction": 0.6384810209274292, "avg_line_length": 26.054794311523438, "blob_id": "2fe699bcc68adc2642d59042afcc8a08389a489e", "content_id": "964dc99e5fc140d6ffdcaa384e8864a1b768742b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1975, "license_type": "permissive", "max_line_length": 80, "num_lines": 73, "path": "/src/islatu/metadata.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains the Metadata class, returned by parser methods in the\nislatu.io module. This class provides a consistent way to refer to metadata\nreturned by different detectors/instruments, and also contains a dictionary\nof all of the metadata as scraped from the parsed file.\n\"\"\"\n\nfrom abc import abstractmethod\n\nimport numpy as np\n\n\nclass Metadata:\n \"\"\"\n An ABC for classes that store metadata parsed from data files. This defines\n the properties that must be implemented by parsing classes.\n \"\"\"\n\n def __init__(self, local_path):\n self.local_path = local_path\n\n @property\n @abstractmethod\n def probe_energy(self):\n \"\"\"\n This must be overridden.\n \"\"\"\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def default_axis(self) -> np.ndarray:\n \"\"\"\n Returns a numpy array of data associated with the default axis, where\n \"default axis\" should be understood in the NeXus sense to mean the\n experiment's dependent variable.\n \"\"\"\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def default_axis_name(self) -> str:\n \"\"\"\n Returns the name of the default axis, as it was recorded in the data\n file stored at local_path.\n \"\"\"\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def default_axis_type(self) -> str:\n \"\"\"\n Returns what type of default axis we have. Options are 'q', 'th' or\n 'tth'.\n \"\"\"\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def transmission(self):\n \"\"\"\n Proportional to the fraction of probe particles allowed by an attenuator\n to strike the sample.\n \"\"\"\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def detector_distance(self):\n \"\"\"\n Returns the distance between sample and detector.\n \"\"\"\n raise NotImplementedError()\n" }, { "alpha_fraction": 0.6052311658859253, "alphanum_fraction": 0.6491281390190125, "avg_line_length": 29.728971481323242, "blob_id": "8cd8ad9ab4942e2b465b1829c9c70d7a2045213b", "content_id": "f6d41f11739b4faab35b8992f41b2d79b775cb3f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9864, "license_type": "permissive", "max_line_length": 80, "num_lines": 321, "path": "/tests/unit/test_io.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file contains a suite of tests for the islatu.io module.\n\"\"\"\n\n# The following is necessary to use classes to share parameters using\n# mark.parametrize.\n# pylint: disable=no-self-use\n\n# The following is necessary because of the dynamic nature of the nexusformat\n# package's type generation.\n# pylint: disable=no-member\n\n# The following is to stop pylint from complaining about protected member tests.\n# pylint: disable=protected-access\n\nimport pytest\nimport numpy as np\nimport nexusformat.nexus.tree as nx\nfrom pytest_lazyfixture import lazy_fixture as lazy\n\nfrom islatu.io import I07Nexus\nfrom islatu.region import Region\n\n\n@pytest.mark.parametrize(\n 'nexus_base',\n [lazy('nexus_base_object_01'), lazy('i07_nexus_object_01')])\nclass TestNexusBaseAttrTypes:\n \"\"\"\n This class checks that the types of each of the fixtures that inherits from\n NexusBase have attributes whose types are correct, and that can be accessed\n without raising e.g. a ValueError (as would happen if assumptions relating\n to the structure of the nexus file are broken).\n \"\"\"\n\n def test_local_path(self, nexus_base):\n \"\"\"\n Make sure that we can access the local_path.\n \"\"\"\n assert isinstance(nexus_base.local_path, str)\n\n def test_nxfile(self, nexus_base):\n \"\"\"\n Makes sure that our nxfile has the correct type.\n \"\"\"\n assert isinstance(nexus_base.nxfile, nx.NXroot)\n\n def test_src_path(self, nexus_base):\n \"\"\"\n Makes sure that our src_path can be acquired. Also make sure that\n it isn't an empty string.\n \"\"\"\n assert isinstance(nexus_base.src_path, str)\n assert len(nexus_base.src_path) != 0\n\n def test_entry(self, nexus_base):\n \"\"\"\n Makes sure that there is only one entry in the nexus_base. Otherwise, a\n ValueError will be thrown. This also tests that the entry has the\n correct type.\n \"\"\"\n assert isinstance(nexus_base.entry, nx.NXentry)\n\n def test_instrument(self, nexus_base):\n \"\"\"\n Makes sure that we can access the instrument property without throwing,\n and that our instrument has the correct type.\n \"\"\"\n assert isinstance(nexus_base.instrument, nx.NXinstrument)\n\n def test_detector(self, nexus_base):\n \"\"\"\n Makes sure that we can access the detector property of our nexus_base\n without throwing anything, and that it has the correct type.\n \"\"\"\n assert isinstance(nexus_base.detector, nx.NXdetector)\n\n def test_default_axis_nxdata(self, nexus_base):\n \"\"\"\n Makes sure that our default axis is provided as a numpy array.\n \"\"\"\n assert isinstance(nexus_base.default_axis, np.ndarray)\n\n def test_default_signal_nxdata(self, nexus_base):\n \"\"\"\n Make sure that we can access our default signal, and that its type is\n np.ndarray.\n \"\"\"\n assert isinstance(nexus_base.default_signal, np.ndarray)\n\n\n@pytest.mark.parametrize(\n 'nexus_base, path',\n [\n (lazy('nexus_base_object_01'), lazy('path_to_i07_nxs_01')),\n (lazy('i07_nexus_object_01'), lazy('path_to_i07_nxs_01'))\n ]\n)\ndef test_local_path(nexus_base, path):\n \"\"\"\n Make sure that the local_paths of our nexus_base objects are\n correct.\n \"\"\"\n assert nexus_base.local_path == path\n\n\n@pytest.mark.parametrize(\n 'nexus_base, path',\n [\n (lazy('nexus_base_object_01'),\n '/dls/i07/data/2021/si28707-1/i07-404876.nxs'),\n (lazy('i07_nexus_object_01'),\n '/dls/i07/data/2021/si28707-1/i07-404876.nxs')\n ]\n)\ndef test_src_path(nexus_base, path):\n \"\"\"\n Checks that the parsed nxs path is correct. Worth noting that, when\n extending this test for more .nxs files, it's important to manually\n scrape the src_path by parsing nxfile.tree, unless you already know\n what value this will take (because, e.g., you just downloaded the file).\n \"\"\"\n assert nexus_base.src_path == path\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, path',\n [\n (lazy('i07_nexus_object_01'),\n '/dls/i07/data/2021/si28707-1/excaliburScan404876_000001.h5')\n ]\n)\ndef test_src_data_path(i07_nexus: I07Nexus, path):\n \"\"\"\n Make sure we can properly find the path to where the data was originally\n stored, as referenced in the .nxs file. This is used to guess where the\n .h5 file is stored locally.\n \"\"\"\n assert i07_nexus._src_data_path == path\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, path',\n [\n (lazy('i07_nexus_object_01'), lazy('path_to_i07_h5_01'))\n ]\n)\ndef test_local_data_path(i07_nexus: I07Nexus, path):\n \"\"\"\n Tests our class' ability to find .h5 files stored locally. This test\n only makes sure that our class can find .h5 files that are stored in the\n same directory as the .nxs file. More directories are searched, but\n these are not tested (a test generating .h5 files throughout the\n directory structure would not be portable, and would merit tests of its\n own).\n \"\"\"\n assert i07_nexus.local_data_path == path\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, correct_num',\n [(lazy('i07_nexus_object_01'), 3)]\n)\ndef test_number_of_regions(i07_nexus: I07Nexus, correct_num):\n \"\"\"\n Makes sure that we can correctly determine the number of regions of\n interest in the nexus file.\n \"\"\"\n assert i07_nexus._number_of_regions == correct_num\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, region_number, kind, result',\n [\n (lazy('i07_nexus_object_01'), 1, 'x_1', 'Region_1_X'),\n (lazy('i07_nexus_object_01'), 1, 'x_start', 'Region_1_X'),\n (lazy('i07_nexus_object_01'), 17, 'Height', 'Region_17_Height'),\n (lazy('i07_nexus_object_01'), 9917, 'y_1', 'Region_9917_Y'),\n (lazy('i07_nexus_object_01'), 6, 'Width', 'Region_6_Width'),\n (lazy('i07_nexus_object_01'), 4, 'y_start', 'Region_4_Y')\n ]\n)\ndef test_region_bounds_keys(i07_nexus: I07Nexus,\n region_number, kind, result):\n \"\"\"\n Makes sure that region bounds keys are being generated correctly.\n \"\"\"\n assert i07_nexus._get_region_bounds_key(region_number, kind) == result\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, regions',\n [\n (lazy('i07_nexus_object_01'), lazy('signal_regions_01'))\n ]\n)\ndef test_signal_regions_len(i07_nexus, regions):\n \"\"\"\n Make sure our signal regions has the correct length.\n \"\"\"\n assert len(i07_nexus.signal_regions) == len(regions)\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, regions',\n [\n (lazy('i07_nexus_object_01'), lazy('signal_regions_01'))\n ]\n)\ndef test_signal_regions(i07_nexus: I07Nexus, regions):\n \"\"\"\n Tests the I07Nexus class' ability to parse signal regions of interest.\n \"\"\"\n # Note: this should probably always be a for loop with just 1 iteration.\n for i, _ in enumerate(regions):\n assert i07_nexus.signal_regions[i] == regions[i]\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, regions',\n [\n (lazy('i07_nexus_object_01'), lazy('bkg_regions_01'))\n ]\n)\ndef test_bkg_regions_len(i07_nexus: I07Nexus, regions):\n \"\"\"\n Makes sure that we can extract background regions from an I07 nexus\n file.\n \"\"\"\n assert len(i07_nexus.background_regions) == len(regions)\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, regions',\n [\n (lazy('i07_nexus_object_01'), lazy('bkg_regions_01'))\n ]\n)\ndef test_bkg_regions(i07_nexus: I07Nexus, regions):\n \"\"\"\n Makes sure that we can extract background regions from an I07 nexus\n file.\n \"\"\"\n for i, _ in enumerate(regions):\n assert i07_nexus.background_regions[i] == regions[i]\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, transmission',\n [(lazy('i07_nexus_object_01'), 0.000448426658633058)])\ndef test_transmission(i07_nexus: I07Nexus, transmission):\n \"\"\"\n Make sure we can correctly parse the transmission coefficient.\n \"\"\"\n assert i07_nexus.transmission == transmission\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, probe_energy',\n [(lazy('i07_nexus_object_01'), 12.5)]\n)\ndef test_probe_energy(i07_nexus: I07Nexus, probe_energy):\n \"\"\"\n Make sure we can extract the energy of the probe particle from the .nxs\n file.\n \"\"\"\n assert i07_nexus.probe_energy == probe_energy\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, detector_distance',\n [(lazy('i07_nexus_object_01'), 1.1155)]\n)\ndef test_detector_distance(i07_nexus: I07Nexus, detector_distance):\n \"\"\"\n Make sure that we can extract the detector distance from the .nxs file.\n \"\"\"\n assert i07_nexus.detector_distance == detector_distance\n\n\n@pytest.mark.parametrize(\n 'i07_nexus, description',\n [(lazy('i07_nexus_object_01'), 'q')]\n)\ndef test_default_axis_type(i07_nexus: I07Nexus, description):\n \"\"\"\n Make sure that we are correctly identifying the kind of axis data\n stored in the nexus file.\n \"\"\"\n assert i07_nexus.default_axis_type == description\n\n\n@pytest.mark.parametrize(\n 'i, ith_region',\n [\n (1, Region(1208, 1208+50, 206, 206+18)),\n (2, Region(1258, 1258+50, 206, 206+18)),\n (3, Region(1208, 1208+50, 188, 188+18))\n ]\n)\ndef test_ith_region_nxs_01(i07_nexus_object_01: I07Nexus,\n i, ith_region):\n \"\"\"\n Make sure that we can extract the ith region from i07_nexus_object_01.\n \"\"\"\n assert i07_nexus_object_01._get_ith_region(i) == ith_region\n\n\ndef test_detector_name(i07_nexus_object_01: I07Nexus):\n \"\"\"\n Make sure that we can properly extract the name of the detector.\n \"\"\"\n assert i07_nexus_object_01.detector_name == I07Nexus.excalibur_detector_2021\n\n\ndef test_excalibur_name():\n \"\"\"\n Make sure that we're spelling the detector name properly!\n \"\"\"\n assert I07Nexus.excalibur_detector_2021 == \"excroi\"\n assert I07Nexus.excalibur_04_2022 == \"exr\"\n" }, { "alpha_fraction": 0.7730690240859985, "alphanum_fraction": 0.7758031487464905, "avg_line_length": 31.511110305786133, "blob_id": "f1c106eed39c3f5f58038e0f70cc40dab54ebd89", "content_id": "e0f886930e84a742b3ee363a9a0f6a2f2e0e1731", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1463, "license_type": "permissive", "max_line_length": 143, "num_lines": 45, "path": "/README.md", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "# islatu\n\nX-ray reflectometry reduction from Diamond Light Source\n\n[![codecov](https://codecov.io/gh/RBrearton/islatu/branch/master/graph/badge.svg?token=FGIV0MVHS8)](https://codecov.io/gh/RBrearton/islatu)\n[![Actions Status](https://github.com/RBrearton/islatu/workflows/pytest/badge.svg)](https://github.com/pytest/islatu/actions)\n[![Documentation Status](https://readthedocs.org/projects/islatu/badge/?version=latest)](https://islatu.readthedocs.io/en/latest/?badge=latest)\n\n### Install\n\nThis package can be easily installed using `pip install islatu`.\n\n### Installation from source\n\nTo install in a fresh environment, first ensure that pip is available. For\nexample, using conda:\n\n```\nconda create --name islatu\nconda activate islatu\nconda install pip\n```\n\nAssuming that pip is available, installation of the library from source can be\ndone by cloning this repository. Navigate to its directory and use pip to\ninstall this package and its dependencies as follows:\n\n```\npython -m pip install . -r requirements.txt\n```\n\nMake sure that your installation is functioning by running `pytest`.\n\n### Documentation\n\nThe documentation for this package can be found at https://islatu.readthedocs.io/en/latest/\n\nAn example of a typical data reduction workflow can be found at https://islatu.readthedocs.io/en/latest/i07_reflectivity.html\n\nTo build the documentation, make sure you have sphinx installed on your system.\nGo to the docs directory and run\n\n```\nmake html\n```\n" }, { "alpha_fraction": 0.6502947211265564, "alphanum_fraction": 0.6620824933052063, "avg_line_length": 22.136363983154297, "blob_id": "d0505b1ff00d3053439ffe8eb43abb6d43775ce3", "content_id": "4b974ccf9740ad39a2c01f1eccd6bf0a9c7a4675", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 509, "license_type": "permissive", "max_line_length": 68, "num_lines": 22, "path": "/tests/unit/test_debug.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains a couple of simple tests for Islatu's debugger.\n\"\"\"\n\nfrom islatu.debug import debug\n\n\ndef test_debug_default_log_lvl():\n \"\"\"\n Make sure that the debugger starts out with a logging_lvl of 1.\n \"\"\"\n assert debug.logging_level == 1\n\n\ndef test_debug_log_lvl_change():\n \"\"\"\n Make sure that we can change the logging level, if required.\n \"\"\"\n debug.logging_level = 2\n assert debug.logging_level == 2\n debug.logging_level = 1\n assert debug.logging_level == 1\n" }, { "alpha_fraction": 0.614797055721283, "alphanum_fraction": 0.6723079085350037, "avg_line_length": 26.477365493774414, "blob_id": "8a51433f9788daac5ca077a16f72bc28e7555547", "content_id": "bc9e1befae7b398c874a0867f2662fa9b269f5a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6677, "license_type": "permissive", "max_line_length": 80, "num_lines": 243, "path": "/tests/conftest.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains fixture definitions used when testing the islatu module.\n\"\"\"\n\n# The following pylint rule is, unfortunately, necessary due to how pytest works\n# with fixtures. Consequently, all fixtures are defined in this file so that\n# redefined-outer-name only needs to be disabled once.\n# pylint: disable=redefined-outer-name\n\n# We need to test protected members too.\n# pylint: disable=protected-access\n\nimport os\nimport pytest\nimport numpy as np\n\nfrom islatu.io import I07Nexus, i07_nxs_parser, i07_dat_to_dict_dataframe\nfrom islatu.corrections import get_interpolator\nfrom islatu.data import Data, MeasurementBase\nfrom islatu.region import Region\nfrom islatu.refl_profile import Profile\n\n\n@pytest.fixture\ndef path_to_resources():\n \"\"\"\n Returns the path to the resources folder.\n \"\"\"\n if os.path.isdir(\"resources\"):\n return \"resources\" + os.sep\n if os.path.isdir(\"tests\") and os.path.isdir(\"src\"):\n return \"tests\" + os.sep + \"resources\" + os.sep\n raise FileNotFoundError(\n \"Couldn't locate the tests/resources directory. Make sure that \" +\n \"the pytest command is run from within the base islatu directory\" +\n \", or from within the tests directory.\"\n )\n\n\n@pytest.fixture\ndef path_to_i07_nxs_01(path_to_resources):\n \"\"\"\n Returns the path to an i07 nexus file. If it can't be found, raises.\n \"\"\"\n return os.path.join(path_to_resources, \"i07-404876.nxs\")\n\n\n@pytest.fixture\ndef path_to_i07_nxs_02(path_to_resources):\n \"\"\"\n Returns the path to a second i07 nexus file. If it cant be found, raises.\n \"\"\"\n return os.path.join(path_to_resources, \"i07-404877.nxs\")\n\n\n@pytest.fixture\ndef path_to_dcd_normalisation_01(path_to_resources):\n \"\"\"\n Returns the path to the qdcd normalisation file corresponding to i07_nxs_01.\n \"\"\"\n return os.path.join(path_to_resources, \"404863.dat\")\n\n\n@pytest.fixture\ndef parsed_dcd_normalisation_01(path_to_dcd_normalisation_01):\n \"\"\"\n Returns the ([metadata] dict, [data] dataframe) relating to the first\n dcd normalisation file.\n \"\"\"\n return i07_dat_to_dict_dataframe(path_to_dcd_normalisation_01)\n\n\n@pytest.fixture\ndef dcd_norm_01_splev(path_to_dcd_normalisation_01):\n \"\"\"\n Returns the scipy splev corresponding to the first dcd normalisation file.\n \"\"\"\n return get_interpolator(path_to_dcd_normalisation_01,\n i07_dat_to_dict_dataframe)\n\n\n@pytest.fixture\ndef path_to_i07_h5_01(path_to_resources):\n \"\"\"\n Returns the path to an i07 h5 file. If it can't be found, raises.\n \"\"\"\n return os.path.join(path_to_resources, \"excaliburScan404876_000001.h5\")\n\n\n@pytest.fixture\ndef nexus_base_object_01(path_to_i07_nxs_01):\n \"\"\"\n Returns the path's corresponding i07 nexus object.\n \"\"\"\n return I07Nexus(path_to_i07_nxs_01)\n\n\n@pytest.fixture\ndef i07_nexus_object_01(path_to_i07_nxs_01):\n \"\"\"\n Returns the path's corresponding i07 nexus object.\n \"\"\"\n return I07Nexus(path_to_i07_nxs_01)\n\n\n@pytest.fixture\ndef signal_regions_01():\n \"\"\"\n Returns the list of signal regions recorded in i07_nexus_object_01.\n \"\"\"\n return [Region(1208, 1208+50, 206, 206+18)]\n\n\n@pytest.fixture\ndef bkg_regions_01():\n \"\"\"\n Returns the list of signal regions recorded in i07_nexus_object_01.\n \"\"\"\n return [Region(1258, 1258+50, 206, 206+18),\n Region(1208, 1208+50, 188, 188+18)]\n\n\n@pytest.fixture\ndef custom_bkg_region_01():\n \"\"\"\n Returns a decent background regions, specifically chosen for scan_01.\n \"\"\"\n return Region(1340, 1420, 220, 300)\n\n\n@pytest.fixture\ndef scan2d_from_nxs_01(path_to_i07_nxs_01):\n \"\"\"\n Uses the i07_nxs_parser to produce an instance of Scan2D from the given\n path.\n \"\"\"\n return i07_nxs_parser(path_to_i07_nxs_01)\n\n\n@pytest.fixture\ndef scan2d_from_nxs_01_copy(path_to_i07_nxs_01):\n \"\"\"\n An exact copy of the above Scan2D instance. Useful to have in some tests.\n \"\"\"\n return i07_nxs_parser(path_to_i07_nxs_01)\n\n\n@pytest.fixture\ndef scan_02(path_to_i07_nxs_02):\n \"\"\"\n Returns another scan at higher q.\n \"\"\"\n return i07_nxs_parser(path_to_i07_nxs_02)\n\n\n@pytest.fixture\ndef generic_data_01():\n \"\"\"\n Constructs a generic, valid, Data instance.\n \"\"\"\n # Some meaningless values.\n q_vecs = np.arange(10)/10\n intensities = np.arange(1100, 300, -45)[:10]\n\n # A realistic value (in keV)\n energy = 12.5\n\n return Data(intensities, np.sqrt(intensities), energy, q_vectors=q_vecs)\n\n\n@pytest.fixture\ndef generic_data_02():\n \"\"\"\n Constructs another random Data instance, this time initializing with theta\n rather than q.\n \"\"\"\n # More meaningless values.\n theta = np.arange(6)\n intensities = np.arange(11100012, 0, -12938)[:6]\n\n # Cu k-alpha\n energy = 8.04\n\n return Data(intensities, np.sqrt(intensities), energy, theta)\n\n\n@pytest.fixture\ndef measurement_base_01(path_to_i07_nxs_01, generic_data_01: Data):\n \"\"\"\n Constructs a fairly meaningless instance of MeasurementBase to test against.\n This uses generic_data_01 to populate its data, and gets metadata by\n parsing a nxs file.\n \"\"\"\n i07_nxs_metadata = I07Nexus(path_to_i07_nxs_01)\n return MeasurementBase(generic_data_01.intensity,\n generic_data_01.intensity_e, generic_data_01.energy,\n i07_nxs_metadata, q=generic_data_01._q)\n\n\n@pytest.fixture\ndef region_01():\n \"\"\"\n Returns a fairly generic instance of islatu.region's Region class.\n \"\"\"\n return Region(x_start=1056, x_end=1124, y_start=150, y_end=250)\n\n\n@pytest.fixture\ndef profile_01(path_to_i07_nxs_01):\n \"\"\"\n Returns an instance of the Profile class that containts just scan_01.\n \"\"\"\n return Profile.fromfilenames([path_to_i07_nxs_01], i07_nxs_parser)\n\n\n@pytest.fixture\ndef profile_0102(path_to_i07_nxs_01, path_to_i07_nxs_02):\n \"\"\"\n Returns an instance of the Profile class that contains scan_01 and scan_02.\n \"\"\"\n return Profile.fromfilenames([path_to_i07_nxs_01, path_to_i07_nxs_02],\n i07_nxs_parser)\n\n\n@pytest.fixture\ndef old_dcd_data(path_to_resources):\n \"\"\"\n Returns a np.ndarray of the data as processed by islatu prior to a\n substantial refactor. This old DCD data was confirmed to be correctly\n reduced by beamline staff.\n \"\"\"\n return np.loadtxt(os.path.join(\n path_to_resources, \"XRR_404875_dcd_template2021-11-01_15h35m02s.dat\"))\n\n\n@pytest.fixture\ndef process_xrr_path(path_to_resources):\n \"\"\"\n Uses relative pathfinding to return a valid path to process_xrr.py\n \"\"\"\n return os.path.join(\n path_to_resources, '../../CLI/process_xrr.py'\n )\n" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 18, "blob_id": "bb14004bd74353f8aea3d791cddab061ee4635d7", "content_id": "80d37c0e23c96c1b3d4e8c94289b660bda540518", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 132, "license_type": "permissive", "max_line_length": 35, "num_lines": 7, "path": "/docs/source/refl_profile.rst", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "islatu\\.refl_profile\n=================\n\n.. automodule:: islatu.refl_profile\n :members:\n :undoc-members:\n :show-inheritance:" }, { "alpha_fraction": 0.5813295841217041, "alphanum_fraction": 0.5846298933029175, "avg_line_length": 32.84574508666992, "blob_id": "c9a6a6b3292e2c60d83c51a68b7616289888859f", "content_id": "7397c0db40cabd8612ed86f0e59cf4f7fe02d1bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6363, "license_type": "permissive", "max_line_length": 80, "num_lines": 188, "path": "/src/islatu/data.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module contains both the Data class and the MeasurementBase class.\nIn a reflectometry measurement, the experimental data corresponds to the\nreflected intensity as a function of scattering vector Q. In a typical\ndiffractometer, Q is a virtual axis, calculated geometrically from various motor\npositions. The Data class takes care of these conversions, exposing q, theta,\nintensity, reflectivity, and energy.\n\nThe MeasurementBase class defines a simple class that is Data, but that also has\nmetadata.\n\"\"\"\n\nimport numpy as np\nfrom scipy.constants import physical_constants\n\n\nclass Data:\n \"\"\"\n The base class of all Islatu objects that contain data.\n\n Attributes:\n intensity:\n A numpy array containing intensities in this dataset.\n intensity_e:\n A numpy array containing the corresponding errors in intensity.\n theta:\n A numpy array containing the probe particle's angle of\n incidence at each intensity.\n q_vectors:\n A numpy array containing the magnitude of the probe particle's\n scattering vector for each intensity value.\n energy:\n The energy of the probe particle used to acquire this data. This\n is necessary to swap between theta and q.\n\n Args:\n intensity:\n A numpy array of the intensities in this dataset.\n intensity_e:\n The errors on the intensities.\n energy:\n The energy of the probe particle used to acquire this data.\n theta:\n A numpy array containing the probe particle's angle of\n incidence at each intensity. NOTE: only one of theta/q needs to\n be provided.\n q_vectors:\n A numpy array containing the magnitude of the probe particle's\n scattering vector for each intensity value. NOTE: only one of\n theta/q needs to be provided.\n \"\"\"\n\n def __init__(self, intensity, intensity_e, energy, theta=None,\n q_vectors=None):\n\n self.intensity = intensity\n self.intensity_e = intensity_e\n self.energy = energy\n\n if (theta is None) and (q_vectors is None):\n raise ValueError(\n \"Either theta or q must be provided to create a Data instance\"\n )\n\n # When using properties, it wont matter which of these ends up as None.\n self._theta = theta\n self._q = q_vectors\n\n @property\n def reflectivity(self) -> np.array:\n \"\"\"\n Returns the intensity, normalized such that the maximum value of the\n intensity is equal to 1. To acquire\n \"\"\"\n return self.intensity/np.amax(self.intensity)\n\n @property\n def reflectivity_e(self) -> np.array:\n \"\"\"\n Returns the errors on the intensity, divided by the maximum value of the\n intensity array.\n \"\"\"\n return self.intensity_e/np.amax(self.intensity)\n\n @property\n def q_vectors(self) -> np.array:\n \"\"\"\n Returns self._q if this instance of Data was generated from q-data.\n Otherwise, converts from self._theta to q.\n \"\"\"\n if (self._q is None) and (self._theta is not None):\n return self._theta_to_q(self._theta, self.energy)\n else:\n return self._q\n\n @q_vectors.setter\n def q_vectors(self, value) -> None:\n \"\"\"\n Sets self._q.\n \"\"\"\n self._q = value\n\n @property\n def theta(self) -> np.array:\n \"\"\"\n Returns self._theta if this instance of Data was generate from th-data.\n Otherwise, converts from scattered q to theta.\n \"\"\"\n if (self._theta is None) and (self._q is not None):\n return self._q_to_theta(self._q, self.energy)\n else:\n return self._theta\n\n @theta.setter\n def theta(self, value) -> None:\n self._theta = value\n\n def _theta_to_q(self, theta, energy) -> np.array:\n \"\"\"\n Calculates the scattering vector Q from diffractometer theta.\n\n Args:\n theta (:py:attr:`str`):\n Array of theta values to be converted.\n energy (:py:attr:`float`):\n Energy of the incident probe particle.\n \"\"\"\n planck = physical_constants[\"Planck constant in eV s\"][0] * 1e-3\n speed_of_light = physical_constants[\n \"speed of light in vacuum\"][0] * 1e10\n q_values = np.sin(np.radians(theta)) / (planck * speed_of_light)\n\n q_values *= energy * 4.0 * np.pi\n return q_values\n\n def _q_to_theta(self, q_values, energy) -> np.array:\n \"\"\"\n Calculates the diffractometer theta from scattering vector Q.\n\n Args:\n theta (:py:attr:`str`):\n Array of theta values to be converted.\n energy (:py:attr:`float`):\n Energy of the incident probe particle.\n \"\"\"\n planck = physical_constants[\"Planck constant in eV s\"][0] * 1e-3\n speed_of_light = physical_constants[\n \"speed of light in vacuum\"][0] * 1e10\n theta_values = planck * speed_of_light * \\\n np.arcsin(q_values / (energy * 4 * np.pi))\n\n theta_values = theta_values*180/np.pi\n\n return theta_values\n\n def remove_data_points(self, indices):\n \"\"\"\n Convenience method for the removal of a specific data point by its\n index.\n\n Args:\n indices:\n The indices to be removed.\n \"\"\"\n if self._q is not None:\n self._q = np.delete(self._q, indices)\n if self._theta is not None:\n self._theta = np.delete(self._theta, indices)\n\n self.intensity = np.delete(self.intensity, indices)\n self.intensity_e = np.delete(self.intensity_e, indices)\n\n\nclass MeasurementBase(Data):\n \"\"\"\n All measurements derive from this class.\n\n Attrs:\n metadata:\n The metadata relevant to this measurement.\n \"\"\"\n\n def __init__(self, intensity, intensity_e, energy, metadata, theta=None,\n q=None) -> None:\n # Initialize the Data.\n super().__init__(intensity, intensity_e, energy, theta, q)\n # Store the metadata.\n self.metadata = metadata\n" }, { "alpha_fraction": 0.5723890662193298, "alphanum_fraction": 0.574406087398529, "avg_line_length": 29.353740692138672, "blob_id": "01e039bc1cd44423ec5f2a535912dab76fa65780", "content_id": "02263948f4eb251427c23dfb677a1db44e8e36fc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4462, "license_type": "permissive", "max_line_length": 79, "num_lines": 147, "path": "/src/islatu/image.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThe two-dimension detector generates images of the reflected intensity.\nThe purpose of the Image class stored in this module is the investigation\nand manipulation of these images.\n\"\"\"\n\n\nimport numpy as np\n\n\nclass Image:\n \"\"\"\n This class stores information about the detector images.\n\n Attributes:\n file_path (:py:attr:`str`):\n File path for the image.\n array (:py:attr:`array_like`):\n The image described as an array.\n array_original (:py:attr:`array_like`):\n The original value of the image array when it was loaded from disk.\n array_e (:py:attr:`array_like`):\n The errors on each pixel of the array.\n bkg (:py:attr:`float`):\n The background that was subtracted from the image.\n bkg_e (:py:attr:`float`):\n The uncertainty on the background.\n\n Args:\n file_path (:py:attr:`str`): The file path for the image.\n data (:py:class:`pandas.DataFrame`, optional): Experimental data about\n the measurement. Defaults to :py:attr:`None`.\n transpose (:py:attr:`bool`, optional): Should the data be rotated by\n 90 degrees? Defaults to :py:attr:`False`.\n \"\"\"\n\n def __init__(self, array: np.ndarray, transpose: bool = False):\n \"\"\"\n Initialisation of the :py:class:`islatu.image.Image` class, includes\n assigning uncertainties.\n \"\"\"\n if transpose:\n array = array.T\n self.array = array\n self.array_original = np.copy(array)\n self.array_e = self.initial_std_devs\n self.bkg = 0\n self.bkg_e = 0\n\n @property\n def nominal_values(self):\n \"\"\"\n Get the nominal values of the image array.\n\n Returns:\n :py:attr:`array_like`: Nominal values of image.\n \"\"\"\n return self.array\n\n @property\n def initial_std_devs(self):\n \"\"\"\n Get the standard deviation values of the original raw image array.\n\n Returns:\n :py:attr:`array_like`: Standard deviation values of image.\n \"\"\"\n array_error = np.sqrt(self.array_original)\n array_error[np.where(self.array_original == 0)] = 1\n return array_error\n\n @property\n def shape(self):\n \"\"\"\n Array shape\n\n Returns:\n :py:attr:`tuple` of :py:attr:`int`: The shape of the image.\n \"\"\"\n return self.array.shape\n\n def __repr__(self):\n \"\"\"\n Custom representation.\n\n Returns:\n :py:attr:`array_like`: Image array.\n \"\"\"\n return self.array\n\n def __str__(self):\n \"\"\"\n Custom string.\n\n Returns:\n :py:attr:`array_like`: Image array.\n \"\"\"\n return self.array\n\n def crop(self, crop_function, **kwargs):\n \"\"\"\n Perform an image crop based on some function.\n\n Args:\n crop_function (:py:attr:`callable`): The function to crop the data.\n **kwargs (:py:attr:`dict`): The crop function keyword arguments.\n \"\"\"\n self.array = crop_function(self.array, **kwargs)\n self.array_e = crop_function(self.array_e, **kwargs)\n\n def background_subtraction(self, background_subtraction_function,\n **kwargs):\n \"\"\"\n Perform a background subtraction based on some function.\n\n Args:\n background_subtraction_function (:py:attr:`callable`): The\n function to model the data and therefore remove the background.\n **kwargs (:py:attr:`dict`): The background substraction function\n keyword arguments.\n \"\"\"\n\n bkg_sub_info = background_subtraction_function(\n self, **kwargs\n )\n # Store the calculated background, and its error.\n self.bkg, self.bkg_e = bkg_sub_info.bkg, bkg_sub_info.bkg_e\n\n # Do the subtraction.\n self.array = self.array - self.bkg\n self.array_e = np.sqrt(self.bkg_e**2 + self.array_e**2)\n\n # Expose information relating to the background subtraction for\n # meta-analyses.\n return bkg_sub_info\n\n def sum(self):\n \"\"\"\n Perform a summation on the image's array.\n\n Returns:\n A tuple taking the form (summed_intensity, summed_intensity_e).\n \"\"\"\n intensity = np.sum(self.array)\n intensity_e = np.sqrt(np.sum(self.array_e**2))\n\n return intensity, intensity_e\n" }, { "alpha_fraction": 0.5745019912719727, "alphanum_fraction": 0.6446214914321899, "avg_line_length": 24.100000381469727, "blob_id": "424fcae384dab223b8c593bd74f573171da7ffff", "content_id": "471df5b304498099f719217b17255c4bec3dfb72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1255, "license_type": "permissive", "max_line_length": 75, "num_lines": 50, "path": "/tests/unit/test_region.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module tests the simple islatu.region module's Region class.\n\"\"\"\n\nfrom islatu.region import Region\n\n\ndef test_region_attr_access(region_01: Region):\n \"\"\"\n Make sure that we can access a region's start and end attributes.\n \"\"\"\n assert region_01.x_start == 1056\n assert region_01.x_end == 1124\n assert region_01.y_start == 150\n assert region_01.y_end == 250\n\n\ndef test_region_instantiation():\n \"\"\"\n Make sure that regions correctly set their end to be after their start.\n \"\"\"\n region = Region(2, 1, 4, 3)\n\n assert region.x_start == 1\n assert region.x_end == 2\n assert region.y_start == 3\n assert region.y_end == 4\n\n\ndef test_region_length(region_01: Region):\n \"\"\"\n Make sure that regions have the correct length.\n \"\"\"\n assert region_01.x_length == 1124 - 1056\n assert region_01.y_length == 250 - 150\n\n\ndef test_region_num_pixels(region_01: Region):\n \"\"\"\n Make sure that regions are correctly calculating the number of pixels\n contained in them.\n \"\"\"\n assert region_01.num_pixels == (1124 - 1056)*(250 - 150)\n\n\ndef test_region_equality(region_01: Region):\n \"\"\"\n Make sure that out __eq__ method is working.\n \"\"\"\n assert Region(1056, 1124, 150, 250) == region_01\n" }, { "alpha_fraction": 0.7650602459907532, "alphanum_fraction": 0.7831325531005859, "avg_line_length": 9.199999809265137, "blob_id": "7b7225add7fb7b0d5f3f2506c599ccfb113ce59a", "content_id": "705d27f9755d674393b3be108e90fe44b27ff447", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 166, "license_type": "permissive", "max_line_length": 19, "num_lines": 15, "path": "/requirements.txt", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "wheel\r\nnumpy\r\nscipy\r\ncoverage\r\npandas\r\npyyaml\r\nnexusformat\r\npytest\r\npytest-lazy-fixture\r\nnbsphinx\r\njupyter-sphinx\r\njupyterlab\r\nipywidgets\r\ncoverage==6.3.2\r\npytest-cov" }, { "alpha_fraction": 0.6499372720718384, "alphanum_fraction": 0.6913425326347351, "avg_line_length": 29.653846740722656, "blob_id": "a951e2b9654c9b86cb901bdcd00aa184f566a8a9", "content_id": "9da7a3762d22461ad59c06f39cd2c4c37a2c56dd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "permissive", "max_line_length": 78, "num_lines": 26, "path": "/tests/system/_test_runner.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module tests the islatu.runner module's processing capabilities.\n\"\"\"\n\nimport os\n\nimport numpy as np\n\nfrom islatu.runner import i07reduce\n\n\ndef test_i07reduce_dcd(tmp_path, path_to_resources, old_dcd_data):\n \"\"\"\n Tests the i07reduce function with DCD data.\n \"\"\"\n # Do the reduction.\n run_numbers = range(404875, 404883)\n yaml_file = os.path.join(path_to_resources, \"dcd.yaml\")\n i07reduce(run_numbers, yaml_file, path_to_resources, filename=tmp_path)\n\n # Make sure that the saved data is correct.\n reduced_data = np.loadtxt(os.path.join(tmp_path, os.listdir(tmp_path)[0]))\n\n assert np.allclose(reduced_data[0], old_dcd_data[0], 1e-3)\n assert np.allclose(reduced_data[1], old_dcd_data[1], 1e-3)\n assert np.allclose(reduced_data[2], old_dcd_data[2], 1e-3)\n" }, { "alpha_fraction": 0.5855739116668701, "alphanum_fraction": 0.5974664092063904, "avg_line_length": 42.29850769042969, "blob_id": "ae5d90fc564b4bec69d6edacc579abb4cf8232fc", "content_id": "c0f6c8bfd1201054b3ffeb1887501c6e0d890c92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11608, "license_type": "permissive", "max_line_length": 80, "num_lines": 268, "path": "/CLI/process_xrr.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n\n\"Command line interface for the Islatu library.\"\n\nimport argparse\nimport os\n\n\nif __name__ == \"__main__\":\n # First deal with the parsing of the command line arguments using the\n # argparse library.\n HELP_STR = (\n \"Command line interface to the Islatu library's autoprocessing \" +\n \"functionality.\"\n )\n parser = argparse.ArgumentParser(description=HELP_STR)\n\n # The most important argument is the path to the data. If this is not\n # provided, we'll assume that we're in the data directory. Note that the\n # default argument is never passed to add_argument because the default\n # behaviour implemented here is too complex in some cases to be replaced by\n # simple hardcoded values. Instead, default values are calculated after\n # parse_args is called.\n HELP_STR = (\n \"Path to the directory in which the data is stored. If this \" +\n \"is not specified, your current directory will be used.\"\n )\n parser.add_argument(\"-d\", \"--data_path\", help=HELP_STR)\n\n HELP_STR = (\n \"Path to the .yaml recipe file. \" +\n \"If this is not specified, this module will search your data \" +\n \"directory, and data_path/processing/, for a .yaml file.\"\n )\n parser.add_argument(\"-y\", \"--yaml_path\", help=HELP_STR)\n\n HELP_STR = (\n \"Use this flag if you are on site in diamond and would like your \" +\n \"data to be processed on a cluster. (19/10/2021) Note: this is \" +\n \"currently finicky; if you *need* to get this to work email \" +\n \"richard.brearton@diamond.ac.uk\"\n )\n parser.add_argument(\"-c\", \"--cluster\", help=HELP_STR, action=\"store_true\")\n\n HELP_STR = (\n \"Specify the first scan number to process. If this is not specified, \" +\n \"no lower bound on scan number will be placed on scans found in the \" +\n \"data directory. If neither lower nor upper bounds are placed, all \" +\n \"scans found in the data directory will be used to construct a profile.\"\n )\n parser.add_argument(\"-l\", \"--lower_bound\", help=HELP_STR, type=int)\n\n HELP_STR = (\n \"Specify the final scan number to process. If this is not specified, \" +\n \"no upper bound will be placed on scan number for scans found in the \" +\n \"data directory.\"\n )\n parser.add_argument(\"-u\", \"--upper_bound\", help=HELP_STR, type=int)\n\n HELP_STR = (\n \"Directly specify the scan numbers to be used to construct the \" +\n \"profile. Simply sequentially list the scan numbers. Example usage: \" +\n \"python3 process_xrr.py --scan_numbers 401320 401321 401324 401326 \" +\n \"-d data/ -o processed_curves/. This argument overwrites -l and -u.\"\n )\n parser.add_argument(\"-N\", \"--scan_numbers\",\n help=HELP_STR, nargs='*', type=int)\n\n HELP_STR = (\n \"Specify the directory in which you would like your processed \" +\n \"reflectivity curve to be stored. Defaults to data_path/processing/\"\n )\n parser.add_argument(\"-o\", \"--output\", help=HELP_STR)\n\n HELP_STR = (\n \"\"\"\n Specify a list of scans whose q values should be limited, as well as the\n corresponding acceptable minimum and maximum q-values. For example:\n -Q 413243 0 0.4 413244 0.3 0.6 413248 0.8 inf\n Would ignore any q-values higher than 0.4 in scan 413243, would\n ignore any q-values smaller than 0.3 or larger than 0.6 in scan number\n 413244, and would ignore any q-values lower than 0.8 present in scan\n number 413248. As implied in the example, a value of 0 indicates\n \"no lower limit\" and a value of inf indicates \"no upper limit\". In\n general, the numbers \"413243\" etc. given above must be unique to the\n name of the file from which the scan was parsed.\n \"\"\"\n )\n parser.add_argument(\"-Q\", \"--limit_q\",\n help=HELP_STR, nargs='*', type=str)\n\n HELP_STR = (\n \"Specify a list of \"\n )\n\n # A switch to allow verbosity toggle.\n HELP_STR = \"Increase output verbosity. -v = verbose, -vv = very verbose!\"\n parser.add_argument(\"-v\", \"--verbose\", help=HELP_STR, action=\"count\")\n\n # Extract the arguments from the parser.\n args = parser.parse_args()\n\n # Now we can import islatu. We need to do this after parsing args so that\n # the -h/--help option doesn't get slowed down by bad load times in hdf5/\n # nexusformat libs.\n from islatu.runner import i07reduce\n from islatu.debug import debug\n\n # Now we need to generate default values of inputs, where required.\n # Default to local dir.\n if args.data_path is None:\n args.data_path = os.getcwd()\n\n # Default to data_path/processing/.\n args.processing_path = os.path.join(args.data_path, \"processing\")\n\n # Default to smallest possible scan number (0).\n if args.lower_bound is None:\n args.lower_bound = 0\n\n # Make a number that will always be bigger than all other numbers.\n if args.upper_bound is None:\n args.upper_bound = float('inf')\n\n # Output should be stored in the processing directory by default.\n if args.output is None:\n args.output = args.processing_path\n\n if args.verbose is None:\n args.verbose = 0\n\n # Set islatu's logger to requested verbosity.\n debug.logging_lvl = args.verbose\n\n # Now it's time to prepare to do some XRR reduction. If the user is in\n # diamond and wants to use a cluster, then we should go ahead and do that.\n if args.cluster:\n raise NotImplementedError(\n \"Islatu currently only runs locally. If cluster submission is \" +\n \"necessary, please contact richard.brearton@diamond.ac.uk\"\n )\n\n # If execution reaches here, we're processing the scan locally. First look\n # for the .yaml file if we weren't explicitly told where it is.\n if args.yaml_path is None:\n debug.log(\"Searching for .yaml files in '\" + args.data_path +\n \"' and '\" + args.processing_path + \"'.\")\n\n # Search in both the processing directory and the data directory.\n files = []\n\n # Only check in the processing directory if it actually exists.\n if os.path.exists(args.processing_path):\n files.extend([args.processing_path + x\n for x in os.listdir(args.processing_path)])\n\n # The data_path should definitely exist. If it doesn't, we shouldn't be\n # unhappy about an error being raised at this point.\n files.extend(os.listdir(args.data_path))\n\n # Work out which of these files are .yaml files.\n yaml_files = [x for x in files if x.endswith(\".yaml\")]\n debug.log(\".yaml files found: \" + str(yaml_files))\n\n # If we didn't find exactly one .yaml file, complain.\n if len(yaml_files) != 1:\n generic_err_str = (\n \"Could not uniquely determine location of .yaml file.\\n\" +\n \"Searched directories \" + args.processing_path + \" and \" +\n args.data_path + \".\\n\" + \"Hoped to find exactly one file, \" +\n \"but found \" + str(len(yaml_files)) + \". \"\n )\n if len(yaml_files) > 1:\n generic_err_str += \"Names of found files are: \" + \\\n str(yaml_files) + \".\"\n raise FileNotFoundError(generic_err_str)\n else:\n # We only found one .yaml, so that's our guy.\n args.yaml_path = yaml_files[0]\n\n # If execution reaches here, we've successfully found the .yaml file.\n # Next lets try to work out what scan numbers are in the data directory if\n # we weren't told explicitly which scan numbers we should be looking at.\n if args.scan_numbers is None:\n debug.log(\n \"Scan numbers not explicitly given. Searching for scans \" +\n \"in directory \" + args.data_path + \".\"\n )\n # Grab every valid looking nexus file in the directory.\n nexus_files = [x for x in os.listdir(\n args.data_path) if x.endswith(\".nxs\")]\n\n # Make noise if we didn't find any .nxs files.\n generic_cant_find_nxs = (\n \"Couldn't find any nexus (.nxs) files in the data directory '\" +\n args.data_path\n )\n if len(nexus_files) == 0:\n raise FileNotFoundError(\n generic_cant_find_nxs + \"'.\"\n )\n\n # So, we found some .nxs files. Now lets grab the scan numbers from\n # these files.\n debug.log(\"Scans located: \" + str(nexus_files))\n nexus_files = [int(x.replace(\".nxs\", '').replace(\"i07-\", ''))\n for x in nexus_files]\n\n # Now select the subset of these scan numbers that lies within the\n # closed interval [args.lower_bound, args.upper_bound].\n args.scan_numbers = [x for x in nexus_files if\n x >= args.lower_bound and x <= args.upper_bound]\n debug.log(\"Scan numbers found: \" + str(args.scan_numbers) + \".\", 2)\n\n # Make sure we found some scans.\n if len(args.scan_numbers) == 0:\n raise FileNotFoundError(\n generic_cant_find_nxs +\n \" whose scan numbers were greater than or equal to \" +\n str(args.lower_bound) +\n \" and less than or equal to \" + str(args.upper_bound) + \".\"\n )\n\n if args.limit_q is not None:\n if len(args.limit_q) % 3 != 0:\n raise ValueError(\n f\"\"\"\n --limit_q must have a number of arguments passed to it that is\n a multiple of three. Instead, {len(args.limit_q)} arguments were\n found. Please use the pattern:\n -L N1 qmin1 qmax1 N2 qmin2 qmax2 ...\n where N1 is a scan number, qmin1 is the minimum q for the\n scan with scan number N1, and qmax1 is the maximum acceptable q\n for the scan with scan number N1, etc.. Please refer to the\n --help for more information.\n \"\"\"\n )\n # Okay, this is presumably properly formatted. Lets turn this into a\n # list of dictionaries that we can pass directly to the\n # profile.subsample_q method.\n q_subsample_dicts = []\n for i, _ in enumerate(args.limit_q):\n if i % 3 == 0:\n # We're on a new scan, so we'll need a new subsample dict.\n q_subsample_dicts.append({})\n\n # Now grab that dict we just created and give it our new scan\n # index. Note that if i%3 != 0, then we can skip the creation\n # of a new dictionary.\n q_subsample_dicts[-1]['scan_ID'] = args.limit_q[i]\n elif i % 3 == 1:\n # Convert every 2nd and 3rd value to a float – these will be\n # our q limits.\n args.limit_q[i] = float(args.limit_q[i])\n q_subsample_dicts[-1]['q_min'] = args.limit_q[i]\n elif i % 3 == 2:\n # Convert every 2nd and 3rd value to a float – these will be\n # our q limits.\n args.limit_q[i] = float(args.limit_q[i])\n q_subsample_dicts[-1]['q_max'] = args.limit_q[i]\n args.limit_q = q_subsample_dicts\n\n # If execution reaches here, we found the .yaml file and we have the scan\n # numbers we'll construct the XRR curve from. This is all that we need: a\n # recipe and some data; let's go ahead and process the data on this machine.\n i07reduce(args.scan_numbers, args.yaml_path, args.data_path,\n filename=args.output, q_subsample_dicts=args.limit_q)\n" }, { "alpha_fraction": 0.5948594212532043, "alphanum_fraction": 0.5985515713691711, "avg_line_length": 37.90607833862305, "blob_id": "2bda9861410494d7b8ae90ff71a29670308bf34a", "content_id": "93386e00f75966d2ec361d97986cb0c7581dddee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7044, "license_type": "permissive", "max_line_length": 80, "num_lines": 181, "path": "/src/islatu/refl_profile.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nA profile is a measurement resulting from a scan, or a series of scans. Profiles\nare the central objects in the islatu library, containing the total reflected\nintensity as a function of scattering vector data.\n\"\"\"\n\nfrom typing import List\n\nfrom .scan import Scan\nfrom .stitching import concatenate, rebin\nfrom .data import Data\n\n\nclass Profile(Data):\n \"\"\"\n The object that is used to store all information relating to a reflectivity\n profile.\n \"\"\"\n\n def __init__(self, data: Data, scans: List[Scan]) -> None:\n super().__init__(data.intensity, data.intensity_e, data.energy,\n data.theta)\n self.scans = scans\n\n @classmethod\n def fromfilenames(cls, filenames, parser):\n \"\"\"\n Instantiate a profile from a list of scan filenames.\n\n Args:\n filenames (:py:attr:`list`):\n List of files, one for each reflectometry scan. Can have length\n one.\n parser (:py:attr:`callable`):\n Parser function for the reflectometry scan files.\n \"\"\"\n\n # Load the scans, specifying the scan axis name if necessary.\n scans = [parser(filename) for filename in filenames]\n\n # Now that the individual scans have been loaded, data needs to be\n # constructed. The simplest way to do this is by concatenating the\n # data from each of the constituent scans.\n q_vectors, intensity, intensity_e = concatenate(scans)\n\n # Note: we are making the implicit assumption that energy is independent\n # of scan number at this point.\n energy = scans[0].metadata.probe_energy\n\n data = Data(intensity, intensity_e, energy, q_vectors=q_vectors)\n\n return cls(data, scans)\n\n def crop(self, crop_function, **kwargs):\n \"\"\"\n Calls the Class method for the :func:`~islatu.scan.Scan2D.crop`\n method for each :py:class:`~Scan2D` in :py:attr:`self.scans`.\n\n Args:\n crop_function (:py:attr:`callable`): Cropping function to be used.\n kwargs (:py:attr:`dict`, optional): Keyword arguments for the\n cropping function. Defaults to :py:attr:`None`.\n \"\"\"\n for scan in self.scans:\n scan.crop(crop_function, **kwargs)\n self.concatenate()\n\n def bkg_sub(self, bkg_sub_function, **kwargs):\n \"\"\"\n Class method for the :func:`~islatu.refl_data.Scan.bkg_sub` method for\n each :py:class:`~Scan` in the list.\n\n Args:\n bkg_sub_function (:py:attr:`callable`): Background subtraction\n function to be used.\n kwargs (:py:attr:`dict`, optional): Keyword arguments for\n the background subtraction function. Defaults to\n :py:attr:`None`.\n \"\"\"\n # When a scan subtracts background from each of its images, its\n # background subtraction function may expose information relating to the\n # subtraction process. This information will be stored in bkg_sub_info.\n bkg_sub_info = []\n # Now just iterate over all of the scans in the profile and subtract the\n # background, storing the return values in bkg_sub_info.\n for scan in self.scans:\n bkg_sub_info.append(scan.bkg_sub(\n bkg_sub_function, **kwargs))\n\n self.concatenate()\n\n # Expose the optimized fit parameters for meta-analysis.\n return bkg_sub_info\n\n def subsample_q(self, scan_identifier, q_min=0, q_max=float('inf')):\n \"\"\"\n For the scan with identifier scan_identifier, delete all data points for\n which q < q_min or q > q_max.\n\n Args:\n scan_identifier:\n The scan ID of the scan to be subsampled. This must be a unique\n substring of the filename from which the scan was taken. For\n example, if a scan's nexus filename is i07-413244.nxs, then\n a valid scan_ID would be \"413244\", as this string will uniquely\n identify the correct scan from within the profile.\n q_min:\n The smallest acceptable value of q. Defaults to 0 Å.\n q_max:\n The largest acceptable value of q. Defaults to inf Å.\n \"\"\"\n for scan in self.scans:\n print(scan_identifier, scan.metadata.src_path)\n if scan_identifier in scan.metadata.src_path:\n scan.subsample_q(q_min, q_max)\n self.concatenate()\n\n def footprint_correction(self, beam_width, sample_size):\n \"\"\"\n Class method for :func:`~islatu.refl_data.Scan.footprint_correction`\n for each :py:class:`~Scan` in the list.\n\n Args:\n beam_width (:py:attr:`float`): Width of incident beam, in metres.\n sample_size (:py:class:`uncertainties.core.Variable`): Width of\n sample in the dimension of the beam, in metres.\n theta (:py:attr:`float`): Incident angle, in degrees.\n \"\"\"\n for scan in self.scans:\n scan.footprint_correction(beam_width, sample_size)\n self.concatenate()\n\n def transmission_normalisation(self):\n \"\"\"\n Perform the transmission correction.\n \"\"\"\n for scan in self.scans:\n scan.transmission_normalisation()\n\n self.concatenate()\n\n def qdcd_normalisation(self, itp):\n \"\"\"\n Class method for :func:`~islatu.refl_data.Scan.qdcd_normalisation` for\n each :py:class:`~Scan` in the list.\n\n Args:\n normalisation_file (:py:attr:`str`): The ``.dat`` file that\n contains the normalisation data.\n \"\"\"\n for scan in self.scans:\n scan.qdcd_normalisation(itp)\n self.concatenate()\n\n def concatenate(self):\n \"\"\"\n Class method for :func:`~islatu.stitching.concatenate`.\n \"\"\"\n self.q_vectors, self.intensity, self.intensity_e = \\\n concatenate(self.scans)\n\n def rebin(self, new_q=None, rebin_as=\"linear\", number_of_q_vectors=5000):\n \"\"\"\n Class method for :func:`islatu.stitching.rebin`.\n\n Args:\n new_q (:py:attr:`array_like`):\n Array of potential q-values. Defaults to :py:attr:`None`. If\n this argument is not specified, then the new q, R values are\n binned according to rebin_as and number_of_q_vectors.\n rebin_as (py:attr:`str`):\n String specifying how the data should be rebinned. Options are\n \"linear\" and \"log\". This is only used if the new_q are\n unspecified.\n number_of_q_vectors (:py:attr:`int`, optional):\n The max number of q-vectors to be using initially in the\n rebinning of the data. Defaults to :py:attr:`400`.\n \"\"\"\n self.q_vectors, self.intensity, self.intensity_e = rebin(\n self.q_vectors, (self.intensity, self.intensity_e), new_q,\n rebin_as=rebin_as, number_of_q_vectors=number_of_q_vectors)\n" }, { "alpha_fraction": 0.649899423122406, "alphanum_fraction": 0.649899423122406, "avg_line_length": 21.590909957885742, "blob_id": "26d27af8472111eb461f25653c636fbc955f7d71", "content_id": "d09e343241170f093bfe3616fe677abc1fef3e4b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 497, "license_type": "permissive", "max_line_length": 75, "num_lines": 22, "path": "/src/islatu/cropping.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nOften the detector is a lot larger than the reflected intensity peak, so it\nmakes sense to crop the image to the peak.\n\"\"\"\n\n\nimport numpy as np\n\nfrom islatu.region import Region\n\n\ndef crop_to_region(array: np.ndarray, region: Region):\n \"\"\"\n Crops the input array to the input region.\n\n Args:\n array:\n The array to crop.\n region:\n The instance of Region to crop to.\n \"\"\"\n return array[region.x_start:region.x_end, region.y_start:region.y_end]\n" }, { "alpha_fraction": 0.5818663835525513, "alphanum_fraction": 0.588618278503418, "avg_line_length": 32.99180221557617, "blob_id": "9dc86bee2159a8d9b2771e9da646b66253a11715", "content_id": "166b3c4c39fcc7fd0b130a8dfa91d18def62c7ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4149, "license_type": "permissive", "max_line_length": 80, "num_lines": 122, "path": "/src/islatu/stitching.py", "repo_name": "DiamondLightSource/islatu", "src_encoding": "UTF-8", "text": "\"\"\"\nAs reflectometry measurements typically consist of multiple scans at different\nattenutation, we must stitch these together.\n\"\"\"\n\nfrom typing import List\n\nimport numpy as np\n\nfrom .scan import Scan\n\n\ndef concatenate(scan_list: List[Scan]):\n \"\"\"\n Concatenate each of the datasets together.\n\n Args:\n scans:\n List of reflectometry scans.\n\n Returns:\n :py:attr:`tuple`: Containing:\n - q-values.\n - Reflected intensities.\n – Errors on reflected intensities.\n \"\"\"\n\n q_vectors = np.array([])\n intensity = np.array([])\n intensity_e = np.array([])\n for scan in scan_list:\n q_vectors = np.append(q_vectors, scan.q_vectors)\n intensity = np.append(intensity, scan.intensity)\n intensity_e = np.append(intensity_e, scan.intensity_e)\n return q_vectors, intensity, intensity_e\n\n\ndef rebin(q_vectors, reflected_intensity, new_q=None, rebin_as=\"linear\",\n number_of_q_vectors=5000):\n \"\"\"\n Rebin the data on a linear or logarithmic q-scale.\n\n Args:\n q_vectors:\n q - the current q vectors.\n reflected_intensity (:py:attr:`tuple`):\n (I, I_e) - The current reflected intensities, and their errors.\n new_q (:py:attr:`array_like`):\n Array of potential q-values. Defaults to :py:attr:`None`. If this\n argument is not specified, then the new q, R values are binned\n according to rebin_as and number_of_q_vectors.\n rebin_as (py:attr:`str`):\n String specifying how the data should be rebinned. Options are\n \"linear\" and \"log\". This is only used if the new_q are unspecified.\n number_of_q_vectors (:py:attr:`int`, optional):\n The max number of q-vectors to be using initially in the rebinning\n of the data. Defaults to :py:attr:`400`.\n\n Returns:\n :py:attr:`tuple`: Containing:\n - q: rebinned q-values.\n - intensity: rebinned intensities.\n - intensity_e: rebinned intensity errors.\n \"\"\"\n\n # Unpack the arguments.\n q = q_vectors\n R, R_e = reflected_intensity\n\n # Required so that logspace/linspace encapsulates the whole data.\n epsilon = 0.001\n\n if new_q is None:\n # Our new q vectors have not been specified, so we should generate some.\n if rebin_as == \"log\":\n new_q = np.logspace(\n np.log10(q[0]),\n np.log10(q[-1] + epsilon), number_of_q_vectors)\n elif rebin_as == \"linear\":\n new_q = np.linspace(q.min(), q.max() + epsilon,\n number_of_q_vectors)\n\n binned_q = np.zeros_like(new_q)\n binned_R = np.zeros_like(new_q)\n binned_R_e = np.zeros_like(new_q)\n\n for i in range(len(new_q)-1):\n indices = []\n inverse_var = []\n for j in range(len(q)):\n if new_q[i] <= q[j] < new_q[i + 1]:\n indices.append(j)\n inverse_var.append(1/float(R_e[j]**2))\n\n # Don't bother doing maths if there were no recorded q-values between\n # the two bin points we were looking at.\n if len(indices) == 0:\n continue\n\n # We will be using inverse-variance weighting to minimize the variance\n # of the weighted mean.\n sum_of_inverse_var = np.sum(inverse_var)\n\n # If we measured multiple qs between these bin locations, then average\n # the data, weighting by inverse variance.\n for j in indices:\n binned_R[i] += R[j]/(R_e[j]**2)\n binned_q[i] += q[j]/(R_e[j]**2)\n\n # Divide by the sum of the weights.\n binned_R[i] /= sum_of_inverse_var\n binned_q[i] /= sum_of_inverse_var\n\n # The stddev of an inverse variance weighted mean is always:\n binned_R_e[i] = np.sqrt(1/sum_of_inverse_var)\n\n # Get rid of any empty, unused elements of the array.\n cleaned_q = np.delete(binned_q, np.argwhere(binned_R == 0))\n cleaned_R = np.delete(binned_R, np.argwhere(binned_R == 0))\n cleaned_R_e = np.delete(binned_R_e, np.argwhere(binned_R == 0))\n\n return cleaned_q, cleaned_R, cleaned_R_e\n" } ]
34
Subzeero/Arduino-Vehicle-Project
https://github.com/Subzeero/Arduino-Vehicle-Project
6cb8b7c0f740d8e71364481e69e1bd4669fbf5e2
73e992f9bd991dcdcabed171c0731e4e196bc98a
0285b4b56de3c9efe7e1de6a7ed0e5c13531a511
refs/heads/master
2023-06-03T00:55:10.858320
2021-06-24T06:30:41
2021-06-24T06:30:41
368,036,441
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7082833051681519, "alphanum_fraction": 0.7262905240058899, "avg_line_length": 32.79166793823242, "blob_id": "5bdc884cfb751c8aa5ced8b517a7ce6bd2545bd8", "content_id": "a9e9ce70bf58f70920f76f4cc96c7ee62aa3d1ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 130, "num_lines": 24, "path": "/cogs/purge.py", "repo_name": "Subzeero/Arduino-Vehicle-Project", "src_encoding": "UTF-8", "text": "import discord\r\nfrom discord import channel\r\nfrom discord.ext import commands\r\n\r\nclass purge(commands.Cog):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\t\r\n\t@commands.command()\r\n\t@commands.has_permissions(manage_messages=True)\r\n\tasync def purge(self, ctx, clean: int):\r\n\t\tawait ctx.channel.purge(limit=clean+1)\r\n\t\tembed=discord.Embed(title=\"Successfully Purged!\", description=f'{ctx.author.mention} Cleared {clean} messages!', color=0x80ff00)\r\n\t\tawait ctx.send(embed=embed, delete_after=5.0)\r\n\t@purge.error\r\n\tasync def clear_error(self, ctx, error):\r\n\t\tif isinstance(error, commands.MissingPermissions):\r\n\t\t\tembed=discord.Embed(title=\"Error Purging!\", description=f'You do not have required permissions to run this!', color=0xff0006)\r\n\t\t\tawait ctx.send(embed=embed, delete_after=5.0)\r\n\r\n\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(purge(bot))" }, { "alpha_fraction": 0.6901408433914185, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 15.70588207244873, "blob_id": "4b5bc6f46cb464bf0c30fc1107718bb6523c7c72", "content_id": "af1fa21a3162f26b1f2ff675b02044bb4fa840ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 56, "num_lines": 17, "path": "/cogs/info.py", "repo_name": "Subzeero/Arduino-Vehicle-Project", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\nimport math\n\nclass info(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t\n\t@commands.command()\n\tasync def info(self, ctx):\n\t\tawait ctx.send(f\"Currently in {len(self.bot.guilds)}\")\n\t\t\n\n\ndef setup(bot):\n\tbot.add_cog(info(bot))\n" }, { "alpha_fraction": 0.5486111044883728, "alphanum_fraction": 0.550595223903656, "avg_line_length": 28.08955192565918, "blob_id": "f133266c340543c6fdf00e7c674adaf6dc1e9670", "content_id": "bc4d11b7f61e28cfe55fb3575a10a92ce397a8cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2016, "license_type": "no_license", "max_line_length": 163, "num_lines": 67, "path": "/main.py", "repo_name": "Subzeero/Arduino-Vehicle-Project", "src_encoding": "UTF-8", "text": "import discord\r\nfrom discord.ext import commands\r\nimport os, json\r\n\r\ndef get_prefix(bot, message): \r\n\twith open('prefixes.json', 'r') as f:\r\n\t\tprefixes = json.load(f) \r\n\t\treturn prefixes[str(message.guild.id)]\r\n###############################################\r\nbot = commands.AutoShardedBot(command_prefix = get_prefix, activity=discord.Activity(type=discord.ActivityType.watching, name=\"Watching _ servers\"), shard_count=1)\r\n\r\nfrom dotenv import load_dotenv\r\nload_dotenv()\r\nTOKEN = os.getenv(\"DISCORD_TOKEN\")\r\n####################################################\r\n\r\n@bot.event\r\nasync def on_guild_join(guild):\r\n with open('prefixes.json', 'r') as f: \r\n prefixes = json.load(f) \r\n\r\n prefixes[str(guild.id)] = '!' #default prefix\r\n\r\n with open('prefixes.json', 'w') as f: \r\n json.dump(prefixes, f, indent=4)\r\n####################################################\r\n@bot.event\r\nasync def on_guild_remove(guild): \r\n with open('prefixes.json', 'r') as f:\r\n prefixes = json.load(f)\r\n\r\n prefixes.pop(str(guild.id)) \r\n\r\n with open('prefixes.json', 'w') as f: \r\n json.dump(prefixes, f, indent=4)\r\n####################################################\r\n@bot.command(Administrator=True)\r\nasync def changeprefix(ctx, prefix): \r\n\twith open('prefixes.json', 'r') as f:\r\n\t\tprefixes = json.load(f)\r\n\r\n\t\tprefixes[str(ctx.guild.id)] = prefix\r\n\r\n\t\twith open('prefixes.json', 'w') as f: \r\n\t\t\tjson.dump(prefixes, f, indent=4)\r\n\t\t\tawait ctx.send(f'Prefix changed to: {prefix}') \r\n############################STATUS###############################\r\n\r\n@bot.command(aliases=['lo'])\r\nasync def load(ctx, extension):\r\n\tbot.load_extension(f'cogs.{extension}') \r\n\tawait ctx.send(f'Loaded \"{extension}\"')\r\n\tprint(f'Loaded \"{extension}\"')\r\n\r\n\r\n\r\n@bot.command(aliases=['un'])\r\nasync def unload(ctx, extension):\r\n\tbot.unload_extension(f'cogs.{extension}') \r\n\tawait ctx.send(f'Unloaded \"{extension}\"')\r\n\tprint(f'Unloaded \"{extension}\"')\r\n\r\n \r\n##########################################\r\n\r\n\r\nbot.run(TOKEN)\r\n" }, { "alpha_fraction": 0.6008333563804626, "alphanum_fraction": 0.6058333516120911, "avg_line_length": 28.769229888916016, "blob_id": "ef4c9b3d91acacb4c699238d075f2e4f2168a75e", "content_id": "a83f05dbd116ff381a59f5197d2169c0a9019258", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2400, "license_type": "no_license", "max_line_length": 89, "num_lines": 78, "path": "/cogs/warn.py", "repo_name": "Subzeero/Arduino-Vehicle-Project", "src_encoding": "UTF-8", "text": "\r\nimport discord\r\nfrom discord.ext import commands\r\nimport json\r\n\r\n\r\nclass warning(commands.Cog):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t@commands.command(Administrator=True, aliases=['w'])\r\n\tasync def warn(self, ctx, user:discord.User, *, reason): \r\n\t\twith open('./warns.json', 'r') as f:\r\n\t\t\twarns = json.load(f)\r\n\r\n\t\t\tif not str(ctx.guild.id) in warns:\r\n\t\t\t\twarns[str(ctx.guild.id)] = {}\r\n\r\n\t\t\tif not str(user.id) in warns[str(ctx.guild.id)].keys():\r\n\t\t\t\twarns[str(ctx.guild.id)][str(user.id)] = []\r\n\r\n\t\t\twarns[str(ctx.guild.id)][str(user.id)].append(reason)\r\n\r\n\t\t\twith open('./warns.json', 'w') as f: \r\n\t\t\t\tjson.dump(warns, f, indent=4)\r\n\t\t\t\tawait ctx.send(f'{user} Warned for: {reason}') \r\n\r\n\t@commands.command(Administrator=True, aliases=['dw'] )\r\n\tasync def delwarn(self, ctx, user:discord.User, warnId:int): \r\n\t\twith open('./warns.json', 'r') as f:\r\n\t\t\twarns = json.load(f)\r\n\r\n\t\t\tif not str(ctx.guild.id) in warns:\r\n\t\t\t\twarns[str(ctx.guild.id)] = {}\r\n\r\n\t\t\tif not str(user.id) in warns[str(ctx.guild.id)].keys():\r\n\t\t\t\twarns[str(ctx.guild.id)][str(user.id)] = []\r\n\t\t\t\treturn await ctx.send(\"This user has no warnings!\")\r\n\r\n\t\t\ttry:\r\n\t\t\t\tdel warns[str(ctx.guild.id)][str(user.id)][warnId - 1]\r\n\t\t\texcept:\r\n\t\t\t\treturn await ctx.send(\"Invalid warnId\")\r\n\t\t\telse:\r\n\t\t\t\tawait ctx.send(f\"Warn `{warnId}` has been removed.\")\r\n\t\t\t\twith open('./warns.json', 'w') as f: \r\n\t\t\t\t\tjson.dump(warns, f, indent=4)\r\n\r\n###################################################################\r\n\t\r\n\t@commands.command(Administrator=True, aliases=['warnings'])\r\n\tasync def warns(self, ctx, user:discord.User): \r\n\t\twith open('./warns.json', 'r') as f:\r\n\t\t\twarns = json.load(f)\r\n\r\n\t\t\tif not str(ctx.guild.id) in warns:\r\n\t\t\t\tawait ctx.send(\"There are no warnings for this user.\")\r\n\t\t\t\treturn\r\n\r\n\t\t\tif not str(user.id) in warns[str(ctx.guild.id)]:\r\n\t\t\t\tawait ctx.send(\"This user has no warnings.\")\r\n\t\t\t\treturn\r\n\r\n\t\t\tuserwarnlist = warns[str(ctx.guild.id)][str(user.id)]\r\n\t\t\tnumWarns = 0\r\n\t\t\t\r\n\t\t\tembed=discord.Embed(title=f\"Warns\", description=\"Someones bad\", color=0xea0006)\r\n\t\t\tembed.set_author(name=user.name +'#'+ user.discriminator, icon_url=user.avatar_url)\r\n\r\n\t\t\tfor userwarn in userwarnlist:\r\n\t\t\t\tif numWarns != 25:\r\n\t\t\t\t\tnumWarns += 1 \r\n\t\t\t\t\tembed.add_field(name=\"\\uFEFF\", value=str(numWarns) + '. ' + userwarn , inline=False)\r\n\r\n\t\t\tawait ctx.send(embed=embed)\r\n\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(warning(bot))" }, { "alpha_fraction": 0.6997166872024536, "alphanum_fraction": 0.6997166872024536, "avg_line_length": 18.61111068725586, "blob_id": "098bf38f1748ad5e4115b7c55540941b5e60516c", "content_id": "d8f7f5613a01bc6d54e5d9578432e307aaacf67e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 55, "num_lines": 18, "path": "/cogs/bane.py", "repo_name": "Subzeero/Arduino-Vehicle-Project", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\n\nclass bane(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t\n\t@commands.command()\n\tasync def bane(self, ctx, user=None):\n\t\tif not user:\n\t\t\tuser = ctx.author.mention\n\t\tembed=discord.Embed(description=f\"{user} was banned\")\n\t\tawait ctx.send(embed=embed)\n\n\ndef setup(bot):\n\tbot.add_cog(bane(bot))\n" }, { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 20.9375, "blob_id": "26493fb9c7c94416fbbc34bee6b8d0528e4998e0", "content_id": "6f9520986a9d7d2626d6d4164890dc25f5617521", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 79, "num_lines": 16, "path": "/cogs/mute.py", "repo_name": "Subzeero/Arduino-Vehicle-Project", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\n\nclass mute(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t\n\t@commands.command()\n\tasync def mute(self, ctx, user:discord.Member, *, reason=None, duration=None):\n\t\trole = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n\n\t\tawait user.add_roles(role)\n\ndef setup(bot):\n\tbot.add_cog(mute(bot))\n" }, { "alpha_fraction": 0.650130569934845, "alphanum_fraction": 0.6684073209762573, "avg_line_length": 21.5, "blob_id": "81105289419b6d8788b198a669e4f0b034bca171", "content_id": "a27c68414515f6b27059b07e8de5012afa1d1d8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 106, "num_lines": 16, "path": "/cogs/ping.py", "repo_name": "Subzeero/Arduino-Vehicle-Project", "src_encoding": "UTF-8", "text": "import discord\r\nfrom discord.ext import commands\r\nimport math\r\nclass ping(commands.Cog):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t\r\n\t@commands.command()\r\n\tasync def ping(self, ctx):\r\n\t\tembed=discord.Embed(title=\"Ping!\", description=f'in {round(self.bot.latency * 1000)}ms', color=0xafff24)\r\n\t\tawait ctx.send(embed=embed)\r\n\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(ping(bot))\r\n\r\n\t\r\n\r\n" } ]
7
MouseHu/zcj-vin
https://github.com/MouseHu/zcj-vin
06905aeda5c940b7368ff8ff1167f649f402dda3
62cfe0628a8b84accd9c331cc08d9c7b9249ab1a
a4ffdf88ec443c09d8c32526a0ceda9167c297cc
refs/heads/master
2020-03-19T13:37:41.700256
2018-06-11T13:42:50
2018-06-11T13:42:50
136,588,062
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5939765572547913, "alphanum_fraction": 0.6313441395759583, "avg_line_length": 25, "blob_id": "bcbce8010bab5db6b5951e572b3bd6cdf8dbc2c5", "content_id": "aef514de480ad572bfca654bb3c55314e2913e1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1793, "license_type": "no_license", "max_line_length": 87, "num_lines": 69, "path": "/code-vin/test-cnn.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "# %load traincuda-td-online2.py\nimport gridworld2 as gw\nimport cnn\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport copy\n\nimport time\nimport sys\nimport itertools\ndef randomWalk(status,place):\n return np.random.randint(8)\n\ndef cnnPolicy(status,place):\n if np.random.random()<e:\n action=np.random.randint(8)\n return action\n S1=torch.Tensor([place[0]])#.expand(1,1)\n S2=torch.Tensor([place[1]])#.expand(1,1)\n #print(torch.Tensor(status).shape)\n X=torch.Tensor(status).expand(1, len(status),status[0].shape[0],status[0].shape[1])\n config=cnn.Config()\n q1,q2=CNN(X,S1,S2,cnn.Config())\n q1=q1\n q2=q2\n #print(q1)\n #print(q2.shape)\n _,action=torch.max(q2,dim=1)\n action=int(action) \n #print(action)\n assert 0<=action and action<9\n return action\n\ndef evaluate(env,policy,iters=5000,show=False):\n total_reward=0\n success=0.0\n time2=time.time()\n for i in range(iters):\n status,place,reward,over=env.reset()\n t=0\n Tmax=100\n while over==False and t<Tmax:\n action=policy(status,place)\n if iters%100==0 and show:\n print(action)\n env.plot()\n \n status,place,reward,over=env.step(action)\n \n t+=1\n total_reward+=env.total_reward+0.0\n if env.total_reward>Tmax*env.step_reward:\n success+=1\n if i%100==0:\n print(i)\n return total_reward/iters,success/iters,time.time()-time2\n\n\nmodel=sys.argv[1]\nCNN=cnn.CNN(cnn.Config())\nCNN.load_state_dict(torch.load(model,map_location='cpu'))\ngrid=gw.GridWorld2_8dir(8,8,nobstacle=4,moving=True)\ne=0\nprint(CNN)\nprint(evaluate(grid,cnnPolicy,iters=1000))" }, { "alpha_fraction": 0.5045372247695923, "alphanum_fraction": 0.5491833090782166, "avg_line_length": 31.046510696411133, "blob_id": "daccaedf53543e671a0d7e58d43875e2d745310c", "content_id": "e9961679b625696bc0aea27dca0b8944dc5225e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2755, "license_type": "no_license", "max_line_length": 99, "num_lines": 86, "path": "/code-vin/cnn.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\n#from visualize import make_dot\n\nclass CNN(nn.Module):\n def __init__(self, config):\n super(CNN, self).__init__()\n self.config = config\n self.conv_1 = nn.Conv2d(\n in_channels=config.l_i,\n out_channels=config.l1,\n kernel_size=(3, 3),\n stride=1,\n padding=1,\n bias=True)\n self.pool1 = nn.MaxPool2d(kernel_size=2, ceil_mode=False)\n self.conv_2 = nn.Conv2d(\n in_channels=config.l1,\n out_channels=config.l1,\n kernel_size=(3, 3),\n stride=1,\n padding=1,\n bias=True)\n self.pool_2=nn.MaxPool2d(kernel_size=1, ceil_mode=False)\n self.conv_3 = nn.Conv2d(\n in_channels=config.l1,\n out_channels=config.l2,\n kernel_size=(3, 3),\n stride=1,\n padding=1,\n bias=True)\n self.pool_3=nn.MaxPool2d(kernel_size=2, ceil_mode=False)\n self.conv_4 = nn.Conv2d(\n in_channels=config.l2,\n out_channels=config.l2,\n kernel_size=(3, 3),\n stride=1,\n padding=1,\n bias=True)\n self.pool_4=nn.MaxPool2d(kernel_size=1, ceil_mode=False)\n self.conv_5 = nn.Conv2d(\n in_channels=config.l2,\n out_channels=config.l2,\n kernel_size=(3, 3),\n stride=1,\n padding=1,\n bias=True)\n self.pool_5=nn.MaxPool2d(kernel_size=1, ceil_mode=False)\n self.fc = nn.Linear(in_features=config.l2*(config.imsize/4)**2, out_features=8, bias=False)\n \n self.sm = nn.Softmax(dim=1)\n\n def forward(self, X, S1,S2, config):\n place=torch.zeros(X.shape[0],1,self.config.imsize,self.config.imsize)\n for i in range(S1.shape[0]):\n place[i,0,S1[i].long(),S2[i].long()]=1\n \n inputs=torch.cat((X,place),1)\n conv1=self.conv_1(inputs)\n pool1=self.pool1(conv1)\n conv2=self.conv_2(pool1)\n pool2=self.pool_2(conv2)\n conv3=self.conv_3(pool2)\n pool3=self.pool_3(conv3)\n conv4=self.conv_4(pool3)\n pool4=self.pool_4(conv4)\n conv5=self.conv_5(pool4)\n pool5=self.pool_5(conv5)\n out=self.fc(pool5.reshape(X.shape[0],pool5.shape[1]*pool5.shape[2]*pool5.shape[3]))\n return out, self.sm(out)\nclass Config(object):\n def __init__(self):\n self.l_i=3\n self.l2=100\n self.l1=50\n self.l_q=10\n self.batch_size=32\n self.k=3\n self.imsize=8\n self.lr=5e-4" }, { "alpha_fraction": 0.5938065648078918, "alphanum_fraction": 0.6372411251068115, "avg_line_length": 25.174863815307617, "blob_id": "90ec39c2fd8d3c6addc6e0f3665b52ca3777a8e2", "content_id": "501e481ff527dea269bde84b9bf116e078c0d7d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4973, "license_type": "no_license", "max_line_length": 94, "num_lines": 183, "path": "/traincuda-td-online2.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import gridworld3 as gw\r\nimport myvin\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\nimport copy\r\n\r\nimport time\r\nimport sys\r\nimport itertools\r\nimport expBuffer\r\ndef randomWalk(status,place):\r\n return np.random.randint(9)\r\n\r\ndef vinPolicy(status,place):\r\n if np.random.random()<e:\r\n action=np.random.randint(9)\r\n return action\r\n S1=torch.Tensor([place[0]]).cuda()\r\n S2=torch.Tensor([place[1]]).cuda()\r\n X=status.expand(1, len(status),status[0].shape[0],status[0].shape[1])#.cuda()\r\n X=X.cuda()\r\n config=myvin.Config()\r\n q1,q2=VIN(X,S1,S2,myvin.Config())\r\n q1=q1.cuda()\r\n q2=q2.cuda()\r\n #print(q1)\r\n #print(q2.shape)\r\n _,action=torch.max(q1,dim=1)\r\n action=int(action) \r\n #print(action)\r\n assert 0<=action and action<9\r\n return action\r\ndef vinPredict(status,place,vin):\r\n if np.random.random()<e:\r\n action=np.random.randint(9)\r\n return action\r\n S1=torch.Tensor([place[0]]).cuda()\r\n S2=torch.Tensor([place[1]]).cuda()\r\n X=torch.Tensor(status).expand(1, len(status),status[0].shape[0],status[0].shape[1]).cuda()\r\n config=myvin.Config()\r\n q1,q2=vin(X,S1,S2,myvin.Config())\r\n q1=q1.cuda()\r\n \r\n return q1\r\ndef update(expbuffer,vin,oldvin,p=False):\r\n\t#(action,state,place,next_state,next_place,reward,over)\r\n\taction,X,S1,S2,oldX,oldS1,oldS2,Y=expbuffer.sample()\r\n\t#Qmax=torch.Tensor([replay[x[0]][x[1]][4] for x in index]).float() .cuda()\r\n\r\n\r\n\toldoutputs, _ = oldvin(oldX,oldS1,oldS2,myvin.Config())\r\n\toldouputs=oldoutputs.detach()\r\n\tQmax=(torch.max(oldoutputs,dim=1)[0]).squeeze().cuda()\r\n\r\n\toutputs, _ = vin(X,S1,S2 , myvin.Config())\r\n\tprint(outputs.shape,action.unsqueeze(1).shape)\r\n\tQvalue=outputs.gather(index=action.unsqueeze(1).long(),dim=1).squeeze().cuda()\r\n\t#print(Qvalue.shape)\r\n\t#print(Y.shape)\r\n\r\n\tTDtarget=(Y+gamma*Qmax).cuda()\r\n\r\n\tcriterion = torch.nn.MSELoss(size_average=False)\r\n\tloss=criterion(Qvalue,Y).cuda()\r\n\toptimizer = optim.RMSprop(VIN.parameters(), lr=myvin.Config().lr, eps=1e-6)\r\n\toptimizer.zero_grad() \r\n\tloss.backward()\r\n\toptimizer.step()\r\n\r\n\tif p:\r\n\t\tprint(outputs[0],Qvalue[0],TDtarget[0],Y[0].cpu().numpy())\r\n\t\tgrid.plot2(X[0].cpu().numpy(),int(S1[0].item()),int(S2[0].item()))\t\r\n\treturn loss\r\ndef evaluate(env,policy,iters=5000):\r\n\ttotal_reward=0\r\n\ttime2=time.time()\r\n\tfor i in range(iters):\r\n\t\tstatus,place,reward,over=env.reset()\r\n\t\tt=0\r\n\t\twhile over==False and t<100:\r\n\t\t\taction=policy(status,place)\r\n\t\t\tstatus,place,reward,over=env.step(action)\r\n\t\t\tt+=1\r\n\t\ttotal_reward+=env.total_reward+0.0\r\n\t\tif i%100==0:\r\n\t\t\tprint(i)\r\n\treturn total_reward/iters,time.time()-time2\r\ndevice=0\r\nif len(sys.argv)>1:\r\n device=int(sys.argv[1])\r\nwith torch.cuda.device(device):\r\n\t\r\n\tVIN=myvin.VIN(myvin.Config())\r\n\tVIN=VIN.cuda()\r\n\t#VIN.load_state_dict(torch.load(\"model2/moving-model-9-3920.pkl\"))\r\n\tprint(VIN)\r\n\toldVIN=myvin.VIN(myvin.Config()).cuda()\r\n\toldVIN.load_state_dict(VIN.state_dict())\r\n\tgrid=gw.GridWorld3_8dir(8,8,nobstacle=4,moving=True)\r\n\te=0\r\n\t#print(evaluate(grid,vinPolicy,1000))\r\n\t#print(evaluate(grid,randomWalk))\r\n\tmaxStep=5000000\r\n\tepisodes=20000\r\n\tgamma=0.99\r\n\tTmax=1000\r\n\treplay=[]\r\n\tmax_exp=10000\r\n\tlearning_begin=1000\r\n\tlearning_freq=10\r\n\tupdate_freq=1000\r\n\te=0.1\r\n\texperience=expBuffer.ExperienceBuffer(myvin.Config(),max_exp)\r\n\tprint(\"here\")\r\n\t#print(evaluate(grid,randomWalk))\r\n\t#print(evaluate(grid,vinPolicy,iters=500))\r\n\t#time1=ti09, -10me.time()\r\n\t#experience\r\n\tcount=0\r\n\tl=0\r\n\ts=0\r\n\tfor k in range(3920,episodes): \r\n\t #step\t\r\n\t #rewards=[]\r\n\t e=100/(k+100)\r\n\t\r\n\t state,place,reward,over=grid.reset()\r\n\t #print(\"begin\")\r\n\t # time1=time.time()\r\n\t #if k%10==0:\r\n\t #\tgrid.plot()\r\n\t time1=time.time()\t\r\n\t for i in range(Tmax):\r\n\t count+=1\r\n\t action=vinPolicy(state,place)\r\n\t next_state,next_place,reward,over=grid.step(action)\r\n\t\texperience.add((action,state,place,next_state,next_place,reward,over))\r\n\t\tstate=next_state\r\n\t\tplace=next_place\r\n\t\t#if i%1000==0:\r\n\t\t # print(i)\t\t\r\n\t\tif count<learning_begin:\r\n\t\t continue\r\n\t\tif count%learning_freq==0 :\r\n\t\t loss=0\r\n\t\t #for x in range(3):\t\t\r\n\t\t loss+=update(experience,VIN,oldVIN)#,True)\r\n\t\t #if count%1000==0:\r\n\t\t\t#update(experience,VIN,oldVIN,True)\r\n\t\t #s+=loss\r\n\t\t #l+=1\r\n\t\t #if l%100==0:\r\n\t\t\t#print(\"loss\",s/100)\r\n\t\t\t#s=0\r\n\t\t\t#l=0\r\n\t\t#if count%100==0:\r\n\t\t # print(\"loss\",loss/100)\r\n\t\t\r\n\t\tif count%update_freq==0:\r\n\t\t\toldVIN.load_state_dict(VIN.state_dict())\r\n\t\t\toldVIN.cuda()\r\n\t\tif over:\r\n\t\t\t#print state\r\n\t\t\tbreak\r\n\t\t\t\r\n\t if k%50==0:\r\n\t\tprint(\"episode\",k,time.time()-time1,i,grid.total_reward)\r\n\t\t\r\n\t if count>maxStep:\r\n\t\tbreak\r\n\t#evaluate(grid,randomWalk)\r\n\t#evaluate(grid,vinPolicy)\r\n\t \r\n\t if k%200==20: \r\n\t\ttorch.save(VIN.state_dict(),\"model/moving-model-9-\"+str(k)+\".pkl\") \r\n\t\tprint(\"begin eval\") \r\n\t\tprint(evaluate(grid,vinPolicy,iters=200))\r\n\t\t\r\n\t\t#print total_reward/iters,time.time()-time2\r\n" }, { "alpha_fraction": 0.5914804339408875, "alphanum_fraction": 0.6284916400909424, "avg_line_length": 28.253520965576172, "blob_id": "47ae0a26c21798ef1bb49d50fcb47a68e59b743c", "content_id": "c13f917ba9c1a1924db7a06689f34b4d961f92a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4296, "license_type": "no_license", "max_line_length": 95, "num_lines": 142, "path": "/traincuda-td.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import gridworld as gw\r\nimport myvin\r\n\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\nimport copy\r\nimport time\r\nimport sys\r\n#import torchvision.transforms as transforms\r\ngrid=gw.GridWorld_8dir(nobstacle=5,moving=False)\r\n#grid.show()\r\n#grid.plot()\r\n#for _ in range(100):\r\n# grid.step(grid.sample())\r\n#grid.plot()\r\ndef randomWalk(status,place):\r\n return np.random.randint(9)\r\ndef evaluate(env,policy,iters=5000):\r\n\t total_reward=0\r\n\t for i in range(iters):\r\n\t\tstatus,place,reward,over,action=env.reset()\r\n\t\twhile over==False:\r\n\t\t status,place,reward,over,action=env.step(policy(status,place))\r\n\t\ttotal_reward+=env.total_reward+0.0\r\n\t\tif i%100==0:\r\n\t\t print(i)\r\n\t return total_reward/iters\r\ndevice=0\r\nif len(sys.argv)>1:\r\n device=int(sys.argv[1])\r\nwith torch.cuda.device(device):\r\n\tVIN=myvin.VIN(myvin.Config())\r\n\tVIN.cuda()\r\n\tdef vinPolicy(status,place):\r\n\t if np.random.random()<e:\r\n\t\taction=np.random.randint(9)\r\n\t\treturn action\r\n\t S1=torch.Tensor([place[0]]).cuda()\r\n\t S2=torch.Tensor([place[1]]).cuda()\r\n\t X=torch.Tensor(status).expand(1, len(status),status[0].shape[0],status[0].shape[1]).cuda()\r\n\t config=myvin.Config()\r\n\t q1,q2=VIN(X,S1,S2,myvin.Config())\r\n\t q1=q1.cuda()\r\n\t q2=q2.cuda()\r\n\t #print(q1)\r\n\t #print(q2.shape)\r\n\t _,action=torch.max(q1,dim=1)\r\n\t action=int(action) \r\n\t #print(action)\r\n\t assert 0<=action and action<9\r\n\t return action\r\n\tepoches=2000\r\n\tepisodes=200\r\n\tprint(\"here\")\r\n\toldVIN=copy.deepcopy(VIN).cuda()\r\n\t#experience\r\n\tfor k in range(epoches):\r\n\t gamma=0.99\r\n\t #step\r\n\t replay=[]\r\n\t #rewards=[]\r\n\t print(\"begin\")\r\n\t time1=time.time()\r\n\t for i in range(episodes):\r\n\t\t#reward=[]\r\n\t\te=40.0/(40+k)#8.0/(10+k)# is not ok why?\r\n\t\tif i==0:\r\n\t\t experience=grid.run_episode(vinPolicy)#,show=True)\r\n\t\telse:\r\n\t\t experience=grid.run_episode(vinPolicy)\r\n\t\t#make discounted reward\r\n\t\tif i%100==0:\r\n\t\t print(i)\r\n\t\treplay.append(experience)\r\n\t #replay & update\r\n\t #print(rewards)\r\n\t print(\"experience\",k,time.time()-time1)\r\n\t time1=time.time()\t\r\n\t for i in range(500):\r\n\t\tX=[]\r\n\t\tS1=[]\r\n\t\tS2=[]\r\n\t\toldS1=[]#next action\r\n\t\toldS2=[]#next action\r\n\t\toldX=[]\r\n\t\taction=[]\r\n\t\tY=[]#torch.Tensor(reward[::-1])\r\n\t\tindex=[]\r\n\t\tfor j in range(myvin.Config().batch_size):# sample experience from replay\r\n\t\t x1=np.random.randint(episodes)\r\n\t\t x2=np.random.randint(len(replay[x1])-1)\r\n\t\t #status,place,reward,over,action\r\n\t\t \r\n\t\t Y.append(replay[x1][x2][2])\r\n\t\t action.append(replay[x1][x2][4])\r\n\t\t X.append(replay[x1][x2][0])\r\n\t\t oldX.append(replay[x1][x2+1][0])\r\n\t\t S1.append(replay[x1][x2][1][0])\r\n\t\t S2.append(replay[x1][x2][1][1])\r\n\t\t oldS1.append(replay[x1][x2+1][1][0])\r\n\t\t oldS2.append(replay[x1][x2+1][1][1])\r\n\t\t index.append((x1,x2+1))\r\n\r\n\t\tX=torch.from_numpy(np.array(X)).float().cuda()#do not change it to torch.Tensor(X).float()\r\n\t\tS1=torch.from_numpy(np.array(S1)).float().cuda()\r\n\t\tS2=torch.from_numpy(np.array(S2)).float().cuda()\r\n\t\toldS1=torch.from_numpy(np.array(oldS1)).float().cuda()\r\n\t\toldS2=torch.from_numpy(np.array(oldS2)).float().cuda()\r\n\t\toldX=torch.from_numpy(np.array(oldX)).float().cuda()\r\n\t\taction=torch.from_numpy(np.array(action)).unsqueeze(dim=1).long().cuda()\r\n\t\tY=torch.from_numpy(np.array(Y)).float().cuda()\r\n\t\t#Qmax=torch.Tensor([replay[x[0]][x[1]][4] for x in index]).float() .cuda()\r\n\r\n\r\n\t\toldoutputs, _ = oldVIN(oldX,oldS1,oldS2 , myvin.Config())\r\n\t\tQmax=(torch.max(oldoutputs,dim=1)[0]).squeeze().cuda()\r\n\t\toutputs, _ = VIN(X,S1,S2 , myvin.Config())\r\n\t\tQvalue=outputs.gather(index=action,dim=1).squeeze().cuda()\r\n\t\t#print(Qvalue.shape)\r\n\t\t#print(Y.shape)\r\n\t\tTDtarget=(Y+gamma*Qmax).cuda()\r\n\t\tcriterion = torch.nn.MSELoss(size_average=False)\r\n\t\tloss=criterion(Qvalue,Y).cuda()\r\n\t\toptimizer = optim.RMSprop(VIN.parameters(), lr=2e-4, eps=1e-6) \r\n\t\tloss.backward()\r\n\t\t# Update params\r\n\t\toptimizer.step()\r\n\t\tif i%100==0:\r\n\t\t print(i)\r\n\t print(\"update\",k,time.time()-time1)\r\n\t if k%100==99:\t \r\n\t\tprint(evaluate(grid,vinPolicy))#its a bad policy :(\r\n\t#evaluate(grid,randomWalk)\r\n\t#evaluate(grid,vinPolicy)\r\n\t#e=8.0/28.0\r\n\t\r\n\tprint(evaluate(grid,randomWalk))\r\n\tprint(evaluate(grid,vinPolicy))#its a bad policy :(\r\n" }, { "alpha_fraction": 0.625321090221405, "alphanum_fraction": 0.6618348360061646, "avg_line_length": 25.827587127685547, "blob_id": "bfd14b706ee85780dab8a54ac12351989b6edb7b", "content_id": "44c2d95bed4270ca6826044e01da5c8e06eb1eab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5450, "license_type": "no_license", "max_line_length": 94, "num_lines": 203, "path": "/code-vin/old/traincuda-td-online.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import gridworld as gw\nimport myvin\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport copy\n\nimport time\nimport sys\nimport itertools\ndef randomWalk(status,place):\n return np.random.randint(9)\n\ndef vinPolicy(status,place):\n if np.random.random()<e:\n action=np.random.randint(9)\n return action\n S1=torch.Tensor([place[0]]).cuda()\n S2=torch.Tensor([place[1]]).cuda()\n X=torch.Tensor(status).expand(1, len(status),status[0].shape[0],status[0].shape[1]).cuda()\n config=myvin.Config()\n q1,q2=VIN(X,S1,S2,myvin.Config())\n q1=q1.cuda()\n q2=q2.cuda()\n #print(q1)\n #print(q2.shape)\n _,action=torch.max(q1,dim=1)\n action=int(action) \n #print(action)\n assert 0<=action and action<9\n return action\ndef vinPredict(status,place,vin):\n if np.random.random()<e:\n action=np.random.randint(9)\n return action\n S1=torch.Tensor([place[0]]).cuda()\n S2=torch.Tensor([place[1]]).cuda()\n X=torch.Tensor(status).expand(1, len(status),status[0].shape[0],status[0].shape[1]).cuda()\n config=myvin.Config()\n q1,q2=vin(X,S1,S2,myvin.Config())\n q1=q1.cuda()\n \n return q1\ndef update(experience,vin,oldvin):\n\t#(action,state,place,next_state,next_place,reward,over)\n\tX=[]\n\tS1=[]\n\tS2=[]\n\toldS1=[]#next action\n\toldS2=[]#next action\n\toldX=[]\n\taction=[]\n\tY=[]#torch.Tensor(reward[::-1])\n\tindex=[]\n\tfor j in range(myvin.Config().batch_size):# sample experience from replay\n\t x=np.random.randint(len(experience))\n\t #status,place,reward,over,action\n \n\t Y.append(experience[x][5])\n\t action.append(experience[x][0])\n\t X.append(experience[x][1])\n\t oldX.append(experience[x][3])\n\t S1.append(experience[x][2][0])\n\t S2.append(experience[x][2][1])\n\t oldS1.append(experience[x][4][0])\n\t oldS2.append(experience[x][4][1])\n\t #index.append((x1,x2+1))\n\n\tX=torch.from_numpy(np.array(X)).float().cuda()#do not change it to torch.Tensor(X).float()\n\tS1=torch.from_numpy(np.array(S1)).float().cuda()\n\tS2=torch.from_numpy(np.array(S2)).float().cuda()\n\toldS1=torch.from_numpy(np.array(oldS1)).float().cuda()\n\toldS2=torch.from_numpy(np.array(oldS2)).float().cuda()\n\toldX=torch.from_numpy(np.array(oldX)).float().cuda()\n\taction=torch.from_numpy(np.array(action)).unsqueeze(dim=1).long().cuda()\n\tY=torch.from_numpy(np.array(Y)).float().cuda()\n\t#Qmax=torch.Tensor([replay[x[0]][x[1]][4] for x in index]).float() .cuda()\n\n\n\toldoutputs, _ = oldvin(oldX,oldS1,oldS2 , myvin.Config())\n\tQmax=(torch.max(oldoutputs,dim=1)[0]).squeeze().cuda()\n\n\toutputs, _ = vin(X,S1,S2 , myvin.Config())\n\tQvalue=outputs.gather(index=action,dim=1).squeeze().cuda()\n\t#print(Qvalue.shape)\n\t#print(Y.shape)\n\tTDtarget=(Y+gamma*Qmax).cuda()\n\tcriterion = torch.nn.MSELoss(size_average=False)\n\tloss=criterion(Qvalue,Y).cuda()\n\toptimizer = optim.RMSprop(VIN.parameters(), lr=myvin.Config().lr, eps=1e-6) \n\toptimizer.zero_grad() \n\tloss.backward()\n\t# Update params\n\toptimizer.step()\ndef evaluate(env,policy,iters=5000):\n\ttotal_reward=0\n\ttime2=time.time()\n\tfor i in range(iters):\n\t\tstatus,place,reward,over=env.reset()\n\t\tt=0\n\t\twhile over==False and t<2000:\n\t\t\taction=policy(status,place)\n\t\t\tstatus,place,reward,over=env.step(action)\n\t\t\tt+=1\n\t\ttotal_reward+=env.total_reward+0.0\n\t\tif i%100==0:\n\t\t\tprint(i)\n\treturn total_reward/iters,time.time()-time2\ndevice=0\nif len(sys.argv)>1:\n device=int(sys.argv[1])\nwith torch.cuda.device(device):\n\t\n\tVIN=myvin.VIN(myvin.Config()).cuda()\n\tprint(VIN)\n\toldVIN=copy.deepcopy(VIN).cuda()\n\tgrid=gw.GridWorld_8dir(8,8,nobstacle=4,moving=False)\n\n\tmaxStep=1000000\n\tepisodes=10000\n\tgamma=0.99\n\tTmax=5000\n\treplay=[]\n\tmax_exp=50000\n\tlearning_begin=20000\n\tlearning_freq=4\n\tupdate_freq=5000\n\te=0.1\n\texperience=[]\n\tprint(\"here\")\n\t#print(evaluate(grid,randomWalk))\n\t#print(evaluate(grid,vinPolicy,iters=500))\n\t#time1=ti09, -10me.time()\n\t#experience\n\tcount=0\n\tfor k in range(episodes): \n\t #step\t\n\t #rewards=[]\n\t e=50.0/(k+50)\n\n\t state,place,reward,over=grid.reset()\n\t #print(\"begin\")\n\t # time1=time.time()\n\t \n\t time1=time.time()\t\n\t for i in range(Tmax):\n\t count+=1\n\t action=vinPolicy(state,place)\n\t next_state,next_place,reward,over=grid.step(action)\n\t\texperience.append((action,state,place,next_state,next_place,reward,over))\n\t\tif len(experience)>max_exp:\n\t\t\texperience.pop(0)\n\t\tstate=next_state\n\t\tplace=next_place\n\t\t#if i%1000==0:\n\t\t # print(i)\t\t\n\t\tif count<learning_begin:\n\t\t continue\n\t\tif count%learning_freq==0 :\t\t\n\t\t update(experience,VIN,oldVIN)\n\t\t #print(\"update\")\n\t\t\n\t\tif count%update_freq==0:\n\t\t\toldVIN.load_state_dict(VIN.state_dict())\n\t\t\toldVIN.cuda()\n\t\tif over:\n\t\t\t#print state\n\t\t\tbreak\n\t\t\t\n\t if k%10==0:\n\t\tprint(\"episode\",k,time.time()-time1,i,grid.total_reward)\n\t if count>maxStep:\n\t\tbreak\n\t#evaluate(grid,randomWalk)\n\t#evaluate(grid,vinPolicy)\n\t \n\t if k%1000==20: \n\t\tprint(\"begin eval\") \n\t\titers=10\n\t\t#print(evaluate(grid,vinPolicy,iters=100))#its a bad policy :(\n\t\ttotal_reward=0\n\t\ttime2=time.time()\n\t\tfor x in range(iters):\n\t\t\tstate,place,reward,over=grid.reset()\n\t\t\t#t=0\n\t\t\te=0\n\t\t\tfor y in range(Tmax):\n\t\t\t\taction=vinPolicy(state,place)\n\t\t\t\tnext_state,next_place,reward,over=grid.step(action)\n\t\t\t\tstate=next_state\n\t\t\t\tplace=next_place\n\t\t\t\t#t+=1\n\t\t\t\tif over:\n\t\t\t\t\tbreak\n\t\t\ttotal_reward+=grid.total_reward+0.0\n\t\t\tprint(grid.total_reward)\n\t\t\tif x%5==0:\n\t\t\t\tprint(x)\n\t\t\n\t\tprint total_reward/iters,time.time()-time2\n\t\n\t\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6700000166893005, "avg_line_length": 15.666666984558105, "blob_id": "b48c84f4b79f817bb8549e9afecc0c49edbce105", "content_id": "dc6936a6bb846068df94f7daf3642f5260226e14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 26, "num_lines": 6, "path": "/testcuda.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import torch\nwith torch.cuda.device(1):\n\ta=torch.randn(1,2,3)\n\tb=torch.rand(1,2,3)\n\tc=a+b\n\tprint(c)\n" }, { "alpha_fraction": 0.6787980198860168, "alphanum_fraction": 0.7021703124046326, "avg_line_length": 30.82978630065918, "blob_id": "2d2c2d7d108c536906c0365b1e772991a15c084f", "content_id": "3499ab6958db54f3e8a66c6b980017c1ff87da26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2995, "license_type": "no_license", "max_line_length": 117, "num_lines": 94, "path": "/expBuffer.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import torch \nimport torch.nn as nn\nimport numpy as np\nclass ExperienceBuffer(object):\n\tdef __init__(self,config,maxexp=10000,cuda=True):\n\t\tself.pointer=0\n\t\tself.cuda=cuda\n\t\tself.maxexp=maxexp\n\t\tself.batch_size=config.batch_size\n\t\tself.config=config\n\t\tself.action=torch.Tensor(maxexp)\n\t\tself.state=torch.Tensor(maxexp,2,config.imsize,config.imsize)\n\t\tself.S1=torch.Tensor(maxexp)\n\t\tself.S2=torch.Tensor(maxexp)\n\t\tself.next_state=torch.Tensor(maxexp,2,config.imsize,config.imsize)\n\t\tself.next_S1=torch.Tensor(maxexp)\n\t\tself.next_S2=torch.Tensor(maxexp)\n\t\tself.reward=torch.Tensor(maxexp)\n\t\tself.over=torch.Tensor(maxexp)\n\t\tself.full=False\n\t\tif cuda:\n\t\t\tself.S1=self.S1.cuda()\n\t\t\tself.S2=self.S2.cuda()\n\t\t\tself.state=self.state.cuda()\n\t\t\tself.next_S1=self.next_S1.cuda()\n\t\t\tself.next_S2=self.next_S2.cuda()\n\t\t\tself.next_state=self.next_state.cuda()\n\t\t\tself.action=self.action.cuda()\n\t#(action,state,place,next_state,next_place,reward,over)\n\tdef add(self,exp):\n\t\tself.pointer=(self.pointer+1)%self.maxexp\n\t\tself.action[self.pointer]=exp[0]\n\t\tself.state[self.pointer]=exp[1]\n\t\tself.next_state[self.pointer]=exp[3]\n\t\tself.S1[self.pointer]=exp[2][0]\n\t\tself.S2[self.pointer]=exp[2][1]\n\t\tself.next_S1[self.pointer]=exp[4][0]\n\t\tself.next_S2[self.pointer]=exp[4][1]\n\t\tself.reward[self.pointer]=exp[5]\n\t\tif exp[6]==True:\n\t\t\tself.over[self.pointer]=0\n\t\telse:\n\t\t\tself.over[self.pointer]=1\n\t\tif self.pointer==0:\n\t\t\tself.full=True\n\tdef can_sample(self,length):\n\t\tif length>self.maxexp:\n\t\t\treturn False\n\t\tif self.full:\n\t\t\treturn True\n\t\tif self.pointer>length:\n\t\t\treturn True\n\t\treturn False\n\tdef sample(self,length=0):\n\t\tif length==0:\n\t\t\tlength=self.batch_size\n\t\tif self.can_sample==False:\n\t\t\tprint(\"no!\")\n\t\t\n\t\tself.s_action=torch.Tensor(length)\n\t\tself.s_state=torch.Tensor(length,2,self.config.imsize,self.config.imsize)\n\t\tself.s_S1=torch.Tensor(length)\n\t\tself.s_S2=torch.Tensor(length)\n\t\tself.s_next_state=torch.Tensor(length,2,self.config.imsize,self.config.imsize)\n\t\tself.s_next_S1=torch.Tensor(length)\n\t\tself.s_next_S2=torch.Tensor(length)\n\t\tself.s_reward=torch.Tensor(length)\n\t\tif self.cuda:\n\t\t\tself.s_action=self.s_action.cuda()\n\t\t\tself.s_state=self.s_state.cuda()\n\t\t\tself.s_S1=self.s_S1.cuda()\n\t\t\tself.s_S2=self.s_S2.cuda()\n\t\t\tself.s_next_state=self.s_next_state.cuda()\n\t\t\tself.s_next_S1=self.s_next_S1.cuda()\n\t\t\tself.s_next_S2=self.s_next_S2.cuda()\n\t\t\tself.s_reward=self.s_reward.cuda()\n\t\t\t#self.s_over=torch.Tensor(length)\n\t\tif self.full:\n\t\t\tmaxexp=self.maxexp\n\t\telse:\n\t\t\tmaxexp=self.pointer\n\t\tfor i in range(length):\n\t\t\tx=np.random.randint(maxexp)\n\t\t\twhile self.over[x]==1:\n\t\t\t\tx=np.random.randint(maxexp)\n\t\t\tself.s_action[i]=self.action[x]\n\t\t\tself.s_state[i]=self.state[x]\n\t\t\tself.s_S1[i]=self.S1[x]\n\t\t\tself.s_S2[i]=self.S2[x]\n\t\t\tself.s_next_state[i]=self.next_state[x]\n\t\t\tself.s_next_S1[i]=self.next_S1[x]\n\t\t\tself.s_next_S2[i]=self.next_S2[x]\n\t\t\tself.s_reward[i]=self.reward[x]\n\t\treturn self.s_action,self.s_state,self.s_S1,self.s_S2,self.s_next_state,self.s_next_S1,self.s_next_S2,self.s_reward\n\t\t\n" }, { "alpha_fraction": 0.6537795662879944, "alphanum_fraction": 0.6964800953865051, "avg_line_length": 25.25757598876953, "blob_id": "b0c3b08028df57ce7584dffd6d335bb9fb298c05", "content_id": "ab435202202bf4b90092377305037e851a3aa2cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1733, "license_type": "no_license", "max_line_length": 94, "num_lines": 66, "path": "/test.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import gridworld2 as gw\nimport myvin\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport copy\n\nimport time\nimport sys\nimport itertools\ndef randomWalk(status,place):\n return np.random.randint(9)\n\ndef vinPolicy(status,place):\n if np.random.random()<e:\n action=np.random.randint(9)\n return action\n S1=torch.Tensor([place[0]]).cuda()\n S2=torch.Tensor([place[1]]).cuda()\n X=torch.Tensor(status).expand(1, len(status),status[0].shape[0],status[0].shape[1]).cuda()\n config=myvin.Config()\n q1,q2=VIN(X,S1,S2,myvin.Config())\n q1=q1.cuda()\n q2=q2.cuda()\n #print(q1)\n #print(q2.shape)\n _,action=torch.max(q1,dim=1)\n action=int(action) \n #print(action)\n assert 0<=action and action<9\n return action\ndef evaluate(env,policy,iters=5000):\n\ttotal_reward=0\n\ttime2=time.time()\n\tfor i in range(iters):\n\t\tstatus,place,reward,over=env.reset()\n\t\tt=0\n\t\twhile over==False and t<100:\n\t\t\taction=policy(status,place)\n\t\t\tstatus,place,reward,over=env.step(action)\n\t\t\tt+=1\n\t\ttotal_reward+=env.total_reward+0.0\n\t\tif i%100==0:\n\t\t\tprint(i)\n\treturn total_reward/iters,time.time()-time2\ndevice=0\nif len(sys.argv)>1:\n device=int(sys.argv[1])\nwith torch.cuda.device(device):\n\t\n\tVIN=myvin.VIN(myvin.Config()).cuda()\n\tVIN.load_state_dict(torch.load(\"model2/moving-model-9-3920.pkl\"))#3920\n\tprint(VIN)\n\toldVIN=myvin.VIN(myvin.Config()).cuda()\n\toldVIN.load_state_dict(VIN.state_dict())\n\tgrid=gw.GridWorld2_8dir(8,8,nobstacle=4,moving=True)\n\te=0\n\t#print(evaluate(grid,vinPolicy,1000))\n\tprint(evaluate(grid,randomWalk))\n\tfor i in range(10):\n\t\tprint(evaluate(grid,vinPolicy,iters=1000))\n\t\t\n\t\t#print total_reward/iters,time.time()-time2\n" }, { "alpha_fraction": 0.5277517437934875, "alphanum_fraction": 0.5570134520530701, "avg_line_length": 37.195919036865234, "blob_id": "c5be7af5d28d2f5c0516098ed83b2b8ca2fb0d00", "content_id": "2199f1b52aeaafd4aeb991701cf13509f9239eeb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9603, "license_type": "no_license", "max_line_length": 155, "num_lines": 245, "path": "/gridworld3.py", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport torch\r\nclass Obstacle3(object):\r\n def __init__(self,width,height,nobstacle,avoid,moving=True,boundary=True):\r\n self.width=width\r\n self.height=height\r\n self.num=0# number of obstacles\r\n self.obstacles=[]\r\n self.dir_list=[[-1,1],[0,1],[1,1],[-1,0],[0,0],[1,0],[-1,-1],[0,-1],[1,-1]]\r\n\t#self.dir_list=[[-1,0], [1,0], [0,1], [0,-1] ,[-1,1], [-1,-1], [1,1], [1,-1]]\r\n\tself.moving=moving\r\n\tself.boundary=boundary\r\n for i in range(nobstacle):\r\n self.genObstacle(avoid)\r\n \r\n def checkCollision(self,pos,avoid):#check Collision\r\n for i in avoid:\r\n if i==pos:\r\n return True\r\n return False\r\n def genObstacle(self,avoid):\r\n if self.num>=self.width*self.height-len(avoid):\r\n print(\"error:no place to hold obstacles\")\r\n return\r\n flag=True\r\n while flag:\r\n\t if self.boundary:\r\n \ts1,s2=(np.random.randint(1,self.width-1),np.random.randint(1,self.height-1))\r\n\t else:\r\n\t\ts1,s2=(np.random.randint(self.width),np.random.randint(self.height))\r\n flag=self.checkCollision((s1,s2),self.obstacles+avoid)\r\n self.obstacles.append((s1,s2))\r\n self.num+=1\r\n return\r\n def move(self,avoid):\r\n if self.moving==False:\r\n return\r\n for index,ob in enumerate(self.obstacles):\r\n #print(self.dir_list,np.random.randint(9))\r\n move=self.dir_list[np.random.randint(9)]\r\n new_pos=(max(min(ob[0]+move[0],self.width-1),0),max(min(ob[1]+move[1],self.height-1),0))\r\n if self.checkCollision(new_pos,avoid)==False:\r\n self.obstacles[index]=new_pos\r\n return\r\nclass GridWorld3_8dir(object):#8dir\r\n def __init__(self,width=10,height=10,nobstacle=3,moving=True,boundary=True,cuda=True):\r\n self.dir_list=[[-1,1],[0,1],[1,1],[-1,0],[0,0],[1,0],[-1,-1],[0,-1],[1,-1]]#0 up 1 down 2 left 3 right\r\n #self.dir_list=[[-1,1],[0,1],[1,1],[-1,0],[1,0],[-1,-1],[0,-1],[1,-1]]\r\n\t#self.dir_list=[[-1,0], [1,0], [0,1], [0,-1] ,[-1,1], [-1,-1], [1,1], [1,-1]]\r\n self.width=width\r\n self.height=height\r\n self.moving=moving\r\n self.nobstacle=nobstacle\r\n self.boundary=boundary\r\n self.step_reward=-1;\r\n self.goal_reward=100;\r\n self.collision_reward=-1\r\n \tself.train=True\r\n\tself.cuda=cuda \r\n self.reset()\r\n\t \r\n def reset(self):\r\n while True:\r\n\t if self.boundary:\r\n\t\tc=1\r\n\t else:\r\n\t\tc=0\r\n self.init_place=(np.random.randint(c,self.width-c),np.random.randint(c,self.height-c))\r\n self.goal_place=(np.random.randint(c,self.width-c),np.random.randint(c,self.height-c))\r\n while self.goal_place==self.init_place:\r\n self.goal_place=(np.random.randint(self.width),np.random.randint(self.height))\r\n\t if self.train:\r\n\t\tbreak\r\n if (abs(self.init_place[0]-self.goal_place[0])+abs(self.init_place[1]-self.goal_place[1]))>math.sqrt((self.width-2*c)**2+(self.height-2*c)**2):\r\n break\r\n #print(abs(self.init_place[0]-self.goal_place[0])+abs(self.init_place[1]-self.goal_place[1]))\r\n self.obstacles=Obstacle3(self.width,self.height,self.nobstacle,[self.init_place,self.goal_place],self.moving,self.boundary)\r\n self.place=self.init_place\r\n self.total_reward=0;\r\n self.over=False\r\n \r\n return self.status(),self.place,self.total_reward,self.over\r\n \r\n def status(self):#inbulid\r\n reward_map=torch.zeros(self.width,self.height)\r\n obstacle_map=torch.ones(self.width,self.height)\r\n #array[self.place]=1\r\n #print(self.init_place, array[self.init_place])\r\n reward_map[self.goal_place]=self.goal_reward#prior knewledge\r\n for ob in self.obstacles.obstacles:\r\n #print(ob)\r\n obstacle_map[ob]=0\r\n #print(array)\r\n\tif self.boundary:\t\r\n\t\tfor i in range(self.height):\r\n\t\t\tobstacle_map[0,i]=0\r\n\t\t\tobstacle_map[self.width-1,i]=0\r\n\t\tfor i in range(self.width):\r\n\t\t\tobstacle_map[i,0]=0\r\n\t\t\tobstacle_map[i,self.height-1]=0\r\n\tif self.cuda:\r\n\t\treturn torch.cat((reward_map.unsqueeze(0),obstacle_map.unsqueeze(0)),0).cuda()\r\n\telse:\r\n\t\treturn torch.cat((reward_map.unsqueeze(0),obstacle_map.unsqueeze(0)),0)\r\n \r\n def show(self):\r\n array=np.full((self.width,self.height),0)\r\n array[self.place]=1\r\n #print(self.init_place, array[self.init_place])\r\n array[self.goal_place]=2\r\n for ob in self.obstacles.obstacles:\r\n #print(ob)\r\n array[ob]=-1\r\n #print(array)\r\n\tif self.boundary:\t\r\n\r\n\t\tfor i in range(self.height):\r\n\t\t\tarray[0,i]=-1\r\n\t\t\tarray[self.width-1,i]=-1\r\n\t\tfor i in range(self.width):\r\n\t\t\tarray[i,0]=-1\r\n\t\t\tarray[i,self.height-1]=-1\r\n return array\r\n \r\n def step(self,direction):\r\n current_reward=self.total_reward\r\n if self.over:\r\n return self.status(),self.goal_place,0,True\r\n #if meet obstacle, stop at origin place\r\n \r\n self.obstacles.move([self.place,self.goal_place])\r\n flag=0\r\n assert (0<=direction and direction<9)\r\n\tif self.boundary is not True:\r\n \tnew_place=(max(min(self.place[0]+self.dir_list[direction][0],self.width-1),0),max(min(self.place[1]+self.dir_list[direction][1],self.height-1),0))\r\n\telse:\r\n\t\tnew_place=(max(min(self.place[0]+self.dir_list[direction][0],self.width-2),1),max(min(self.place[1]+self.dir_list[direction][1],self.height-2),1))\r\n\tif (self.place[0]+self.dir_list[direction][0]!=new_place[0]) or (self.place[1]+self.dir_list[direction][1]!=new_place[1]):# zhaung qiang le\r\n\t\tself.total_reward+=self.collision_reward\r\n\t\tflag=1\r\n if self.obstacles.checkCollision(new_place,self.obstacles.obstacles)==False:\r\n #print(\"here!\")\r\n self.place=new_place\r\n else:\r\n\t if flag==0:\r\n\t\tself.total_reward+=self.collision_reward \r\n self.total_reward+=self.step_reward\r\n \r\n if self.place==self.goal_place:\r\n self.over=True\r\n self.total_reward+=self.goal_reward\r\n return self.status(),self.place,self.total_reward-current_reward,self.over\r\n \r\n def run_episode(self,policy,show=False,Tmax=2000):\r\n status,place,reward,over=self.reset()\r\n experience=[]\r\n count=0\r\n while over==False:\r\n count+=1\r\n\t action=policy(status,place)\r\n\t experience.append((status,place,reward,over,action))\r\n status,place,reward,over=self.step(action)\r\n \r\n if show==True and count%100==0:\r\n self.plot()\r\n\t if count>=Tmax:\r\n\t\tbreak\r\n\texperience.append((status,place,reward,over,-1))\r\n #print(place,reward,over)\r\n #self.clean()\r\n return experience\r\n def run_episode2(self,policy,show=False):\r\n status,place,reward,over=self.reset()\r\n experience=[]\r\n count=0\r\n while over==False:\r\n count+=1\r\n\t print(policy(status,place))\r\n\t action,q=policy(status,place)\r\n status,place,reward,over=self.step(action)\r\n\t Q=int(torch.max(q,dim=1)[1])\r\n experience.append((status,place,reward,over,action,Q))\r\n if show==True and count%100==0:\r\n self.plot()\r\n #print(place,reward,over)\r\n #self.clean()\r\n return experience\r\n def sample(self):\r\n return np.random.randint(9)\r\n \r\n def plot(self):\r\n plt.figure(figsize=(5, 5))\r\n ax=plt.gca() \r\n ax.set_xticks(np.linspace(0,self.width,self.width+1)) \r\n ax.set_yticks(np.linspace(0,self.height,self.height+1)) \r\n \r\n plt.grid(True) \r\n plt.xlim((0, self.width))\r\n plt.ylim((0, self.height))\r\n matrix=self.show()\r\n x=np.arange(0, self.width, 0.01)\r\n for j in range(self.height):\r\n y1=np.array([j]*len(x))\r\n y2=np.array([j+1]*len(x))\r\n for i in range(self.width):\r\n if matrix[i,j]==0:\r\n continue\r\n if matrix[i,j]==-1:\r\n plt.fill_between(x,y1,y2,where=(i<=x) & (x<=i+1),facecolor='black')\r\n elif matrix[i,j]==1:\r\n plt.fill_between(x,y1,y2,where=(i<=x) & (x<=i+1),facecolor='blue')\r\n else:\r\n plt.fill_between(x,y1,y2,where=(i<=x) & (x<i+1),facecolor='red')\r\n plt.show()\r\n return\r\n def plot2(self,status,s1,s2):\r\n plt.figure(figsize=(5, 5))\r\n ax=plt.gca() \r\n ax.set_xticks(np.linspace(0,self.width,self.width+1)) \r\n ax.set_yticks(np.linspace(0,self.height,self.height+1)) \r\n \r\n plt.grid(True) \r\n plt.xlim((0, self.width))\r\n\r\n plt.ylim((0, self.height))\r\n reward=status[0]\r\n\treward[s1][s2]=1\r\n\tobstacle=status[1]\r\n x=np.arange(0, self.width, 0.01)\r\n for j in range(self.height):\r\n y1=np.array([j]*len(x))\r\n y2=np.array([j+1]*len(x))\r\n for i in range(self.width):\r\n if obstacle[i][j]==1 and reward[i,j]==0:\r\n continue\r\n if obstacle[i][j]==0:\r\n plt.fill_between(x,y1,y2,where=(i<=x) & (x<=i+1),facecolor='black')\r\n elif reward[i][j] == self.goal_reward:\r\n plt.fill_between(x,y1,y2,where=(i<=x) & (x<=i+1),facecolor='red')\r\n else:\r\n plt.fill_between(x,y1,y2,where=(i<=x) & (x<i+1),facecolor='blue')\r\n plt.show()\r\n return\r\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 19.733333587646484, "blob_id": "50ff06efa76e3ed2b638e60214b25eff0ebc4fac", "content_id": "20daf1b6a85df13945452efbb9a41a5e3fd380b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 310, "license_type": "no_license", "max_line_length": 68, "num_lines": 15, "path": "/code-vin/ReadMe.txt", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "train:\n\npython train-vin.py\npython train-vindyn.py\npython train-cnn.py\n\ntest:\n\npython test-vin.py model/moving-model-vin.pkl\npython test-vindyn.py model/moving-model-vindyn.pkl\npython test-cnn.py model/moving-model-cnn.pkl\n\n\nacknowledgement:\nmyvin.py is modified from https://github.com/transedward/pytorch-dqn" }, { "alpha_fraction": 0.8245614171028137, "alphanum_fraction": 0.8245614171028137, "avg_line_length": 57, "blob_id": "0c6cc1cbcf633f2dadb6d4c2595ffe41d362ffeb", "content_id": "9093b8bf22c8b74fd3ca0498c7a3af40fcdd44ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 57, "license_type": "no_license", "max_line_length": 57, "num_lines": 1, "path": "/code-vin/old/ReadMe.txt", "repo_name": "MouseHu/zcj-vin", "src_encoding": "UTF-8", "text": "This dir contains my failed trail code and no useful code" } ]
11
renjuzac/Basic
https://github.com/renjuzac/Basic
83f1e7bd7459aab523eaeb6f6bed8b977616e2b8
6b3417a70bb54e8760294eafd75d3c2c17a3e327
6eebe61314b8821d36e1336d16cad2ad3ea78059
refs/heads/master
2016-08-06T20:27:34.680730
2012-08-17T21:40:44
2012-08-17T21:40:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6824885010719299, "alphanum_fraction": 0.6866359710693359, "avg_line_length": 24.244186401367188, "blob_id": "6a3ee5361ac350390e96aad11a5960f182436d9f", "content_id": "03b6274c5db447ecea2e6bc0a1778cdbc5388a94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2170, "license_type": "no_license", "max_line_length": 88, "num_lines": 86, "path": "/BasicTool.py", "repo_name": "renjuzac/Basic", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python\t\nimport math\n\ndef equity_size():\n\tglobal equi\n\tprint \"Please enter the value of your portfolio.\"\n\tequi = float(raw_input(\"> \"))\n\treturn float(equi)\n\ndef max_risk():\n\tglobal ma_ri\n\tprint \"Please enter your risk threshold.\"\n\tprint \"For daytrades the risk threshold is capped at 3%.\"\n\tma_ri = (float(raw_input(\"> \")))\n\t\n\tif ma_ri > 3.0:\n\t\tprint \"The max risk threshold is too high.\"\n\t\tprint \"Adjust this risk to 3% or lower.\"\n\t\treturn max_risk()\n\t\t\n\telse:\n\t\treturn float(ma_ri)\n\t\t\ndef price_entry():\n\tglobal buy_price\n\tprint \"Please enter your price entry.\"\n\tbuy_price = float(raw_input(\"> \"))\n\treturn float(buy_price)\n\t\ndef pre_determined_stop():\n\tglobal pre_stop\n\tprint \"Please enter your stop: \"\n\tpre_stop = float(raw_input(\"> \"))\n\treturn pre_stop\n\t\ndef trade_amount():\n\tglobal scalp_dollar_risk\n\tprint \"Please enter the amount you wish to allocate to this trade.\"\n\tscalp_dollar_risk = float(raw_input(\"> \"))\n\t\n\tif scalp_dollar_risk > (equi * 4):\n\t\tprint \"You can not use more than 4x's leverage.\"\n\t\tprint \"Please enter a valid amount.\"\n\t\treturn trade_amount()\n\t\n\telif scalp_dollar_risk <= 0:\n\t\tprint \"Please enter a valid amount.\"\n\t\n\t\treturn trade_amount()\n\telse:\n\t\treturn float(scalp_dollar_risk)\n\t\ndef shares_to_buy(): #This calculation is used when there is no pre-determined stop.\n\tshares_calc = (equi * ma_ri)/(buy_price - generated_stop)\n\tprint int(shares_calc)\n\t\ndef shares_buying(): #This calculation is used when there is a pre-determined stop.\n\tshare_calcu = (equi * ma_ri)/(buy_price - pre_stop)\n\tprint int(share_calcu)\n\t\ndef stop_to_use():\n\tglobal generated_stop\n\tprint \"This is your stop.\"\n\tgenerated_stop = (buy_price * (scalp_dollar_risk - (equi * ma_ri))) / scalp_dollar_risk\n\tprint round(generated_stop, 2)\n\ndef start():\n\tprint \"We need some information before we can return the number of shares to purchase.\"\n\tequity_size()\n\tmax_risk()\n\tprice_entry()\n\ttrade_amount()\n\t\n\tprint \"Do you have a stop in mind? If not, we can provide one for you.\"\n\tprint \"Please answer yes or no.\"\n\tnext = raw_input()\n\t\n\tif next[:1] == 'y':\n\t\tpre_determined_stop()\n\t\treturn shares_buying()\n\n\telse:\n\t\tstop_to_use()\n\t\treturn shares_to_buy()\n\t\t\nstart()" }, { "alpha_fraction": 0.7783767580986023, "alphanum_fraction": 0.7885594367980957, "avg_line_length": 114.17241668701172, "blob_id": "6e1845ca559ec80422dff112b6f5bfda692eb3c0", "content_id": "2c90401632fb65ea8afc535d08c4fc028cd14b76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3339, "license_type": "no_license", "max_line_length": 477, "num_lines": 29, "path": "/README.md", "repo_name": "renjuzac/Basic", "src_encoding": "UTF-8", "text": "PosSize version 0.1.2.\n\nTHIS REMAINS A WORK IN PROGRESS.\n\nThis is a position sizing tool for that allows traders to effectively size their position. Position sizing is a fundamental aspect of risk management (one that is rarely discussed.) This tool is meant to demonstrate how someone can adequately size (in lay terms, how many shares or contracts to buy) a position.\n\nBasic Tool deals with daytrades. This tool will limit how much you can buy of single position (4x's the equity in a portfolio) and will give an accurate number of shares/contracts to be purchased for a scalp. The output can be utilized for those who scale into to trades by simply dividing the final output the number of tranches that the trader prefers to trade in. Note: there is no leverage function currently available in the Basic Tool to account for options & futures.\n\nPositional deals with positional trades and will take into account volatility by using the 20 day ATR (average true range, http://www.investopedia.com/terms/a/atr.asp#axzz21pednSRY). This tool will be BASED on the method reportedly used by Richard Dennis and William Eckhardt in their famous Turtle Trading experiment. The sources used are be listed below (with emphasis on Curtis Faith's pdf since he was an actual Turtle trader):\n\nThe program has been modified to take into account leveraged and volatility base instruments. There is also a function that limits what stocks can be called. Volatility based instruments are expressly restricted from this program at this time. What ever position size is generated for a volatility based instrument IS NOT CORRECT, the program will OVERSTATE the size.\n\nFaith, Curtis. 2003. \"The Original Turtle Trading Rules\", pgs. 12-17.\n\nCovel, Michael. 2007. \"Complete Turtle Trader\", pgs. 67-98.\n\nTharp, Van K. 1998. \"Trade Your Way to Financial Freedom\", pgs. 280-313.\n\nThis program also uses Corey Goldberg's ystockquote python API; http://goldb.org/ystockquote.html . Ystockquote is used for getting historical prices from Yahoo finance.\n\nAs mentioned, historical prices come from Yahoo finance. Currently there exists some minor consistency issues with SOME of Yahoo's price data. This is due to a sync issue with some of Yahoo's servers. It is not clear if, or when, Yahoo will be able to fix this issue. The resulting issue is minor as it does (at least has not been see to) affect multiple prices within one series of price data.\n\nThis program currently does not take into account stock splits and volatility based instruments (i.e. VXX, XIV, VIX options, etc.) At some point these issues will be resolved.\n\nThis program does not currently support futures. The plan is to support futures sometime, well, in the future. This may or may not be accomplished in this project (if futures are not supported in this project then they might be support in a django project that is temporarily on hold.)\n\nThis file will be ammended in the future as the project evolves.\n\nStandard disclaimers here. This tool should in NO WAY be seen as giving financial advice. This tool should be seen as an educational device and in no way should be taken as explicit advice to buy, sell, or short any financial instrument. Always do your own due diligence and consult a financial advisor when dealing with financial instruments and making financial decisions. THIS IS NOT FINANCIAL ADVICE." }, { "alpha_fraction": 0.5451723337173462, "alphanum_fraction": 0.5579012632369995, "avg_line_length": 30.58823585510254, "blob_id": "0d1d92542ba875cae5b128fa74fa02d0e86a8ba3", "content_id": "4b7dad7d1aa80f447710166b6d77149776dfb750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3221, "license_type": "no_license", "max_line_length": 75, "num_lines": 102, "path": "/positional.py", "repo_name": "renjuzac/Basic", "src_encoding": "UTF-8", "text": "import dateutil\nimport urllib\nimport n_calculator\n\nfrom datetime import date\n\ndef x_2_product(n):\n\tlevered2 = ['FSU', 'FSE', 'FSG', 'FSA', 'FOL', 'DAG', 'AGA', 'BDD', 'BOM',\n\t\t\t\t'DYY', 'DEE', 'OLO', 'SZO', 'DGP', 'DZZ', 'TBZ', 'UST', 'PST',\n\t\t\t\t'UBT', 'TBT', 'GDAY', 'CROC', 'UYM', 'SMN', 'UGE', 'SZK', 'UCC',\n\t\t\t\t'SCC', 'UCD', 'CMD', 'UCO', 'SCO', 'BOIL', 'KOLD', 'DDM', 'DXD',\n\t\t\t\t'ULE', 'EUO', 'UYG', 'SKF', 'XPP', 'FXP', 'UGL', 'GLL', 'RXL',\n\t\t\t\t'RXD', 'UJB', 'UXI', 'SIJ', 'IGU', 'KRU', 'KRS', 'MVV', 'MZZ',\n\t\t\t\t'UBR', 'BZQ', 'EFO', 'EFU', 'EET', 'EEV', 'UPV', 'EPV', 'EZJ',\n\t\t\t\t'EWV', 'UMX', 'SMK', 'BIB', 'BIS', 'DIG', 'DUG', 'UXJ', 'JPX',\n\t\t\t\t'QLD', 'QID', 'URE', 'SRS', 'UKW', 'SDK', 'UVU', 'SJL', 'UKF',\n\t\t\t\t'SFK', 'UVG', 'SJF', 'UWM', 'TWM', 'UKK', 'SKK', 'UVT', 'SJH',\n\t\t\t\t'UWC', 'TWQ', 'SSO', 'SDS', 'USD', 'SSG', 'AGQ', 'ZSL', 'SAA',\n\t\t\t\t'SDD', 'ROM', 'REW', 'LTL', 'TLL', 'TPS', 'UPW', 'SDP', 'UVXY',\n\t\t\t\t'YCL', 'YCS', 'RSU', 'RSW', 'DVYL', 'SDYL', 'EIPL', 'BDCL',\n\t\t\t\t'LCPR', 'SCPR', 'TVIX', 'TVIZ', 'LPLT', 'IPLT', 'LPAL', 'IPAL']\n\n\tif n in levered2:\n\t\tvap_2 = (vap / 2)\n\t\tprint round(vap_2, 0)\n\telse:\n\t\tprint round(vap, 0)\n\ndef x_3_product(n):\n\tlevered3 = ['COWL', 'COWS', 'FAS', 'FAZ', 'MIDU', 'MIDZ', 'TNA', 'TZA',\n\t\t\t\t'ERY', 'SPXL', 'SPXS', 'TECL', 'TECS', 'EDC', 'EDZ', 'DZK',\n\t\t\t\t'DRN', 'DRV', 'MATL', 'MATS', 'BRIL', 'BRIS', 'YINN', 'YANG',\n\t\t\t\t'NUGT', 'DUST', 'CURE', 'SICK', 'INDL', 'INDZ', 'LBJ', 'LHB',\n\t\t\t\t'GASL', 'GASX', 'RETL', 'RETS', 'RUSL', 'RUSS', 'SOXL', 'SOXS',\n\t\t\t\t'ERX', 'DPK', 'TYD', 'TYO', 'TMF', 'TMV', 'LBND', 'SBND', 'BUNT',\n\t\t\t\t'JGBD', 'ITLT', 'JGBT', 'UDNT', 'UUPT', 'TTT', 'UPRO', 'SPXU',\n\t\t\t\t'FINU', 'FINZ', 'TQQQ', 'SQQQ', 'UDOW', 'SDOW', 'UMDD', 'SMDD',\n\t\t\t\t'URTY', 'SRTY', 'UOIL', 'DOIL', 'UWTI', 'DWTI', 'UGLD', 'DGLD',\n\t\t\t\t'UGAZ', 'DGAZ', 'USLV', 'DSLV']\n\n\tif n in levered3:\n\t\tvap_3 = (vap / 3)\n\t\tprint round(vap_3, 0)\n\telse:\n\t\tx_2_product(n_calculator.symbol)\n\ndef vap_unit():\n\tglobal vap\n\tvap = ((ma_ri * equi)/vol) * .16667\n\tx_3_product(n_calculator.symbol)\n\treturn round(vap, 0)\n\ndef volatility():\n\tglobal vol\n\tvol = val_n * leverage\n\treturn vol\n\ndef equity_size():\n\tglobal equi\n\tprint \"Please enter the value of your portfolio.\"\n\tequi = float(raw_input(\"> \"))\n\treturn float(equi)\n\ndef max_risk():\n\tglobal ma_ri\n\tprint \"Please enter your risk threshold.\"\n\tprint \"For positional trades, risk threshold is capped at 2%.\"\n\tprint \"Please enter in decimal, .01, format.\"\n\tma_ri = (float(raw_input(\"> \")))\n\t\n\tif ma_ri > .02:\n\t\tprint \"The max risk threshold is too high.\"\n\t\tprint \"Adjust this risk to 2% or lower.\"\n\t\treturn max_risk()\n\t\t\n\telse:\n\t\treturn float(ma_ri)\n\ndef dollar_unit():\n\tglobal leverage\n\tprint \"Please enter the leverage of your instrument.\"\n\tprint \"Ex.: Stock = 1, Options = 100, etc.\"\n\tprint \"Note: Futures are not yet supported.\"\n\tleverage = float(raw_input(\"> \"))\n\n\t# if leverage == 1 or 100:\n\treturn float(leverage)\n\t# else:\n\t# \tprint \"Please enter either 1 or 100.\"\n\t# \tprint \"Currently only stocks and stock options are supported.\"\n\ndef start():\n\tglobal val_n\n\tprint \"We need some information in order to generate your position size.\"\n\tval_n = n_calculator.average_atr()\n\tequity_size()\n\tmax_risk()\n\tdollar_unit()\n\tvolatility()\n\tvap_unit()\n\nstart()" }, { "alpha_fraction": 0.5706264972686768, "alphanum_fraction": 0.5868794322013855, "avg_line_length": 36.19780349731445, "blob_id": "4aafb4bc1a88938a6f821981a06a1da79c995635", "content_id": "fff9bc4e0217cb9ae15f5e42863ce454c82a0935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3384, "license_type": "no_license", "max_line_length": 85, "num_lines": 91, "path": "/n_calculator.py", "repo_name": "renjuzac/Basic", "src_encoding": "UTF-8", "text": "import math\nimport datetime\nimport ystockquote\n\ndef vol_check(n):\n\tvol_based = ['VXBB', 'IVOP', 'CVOL', 'VXAA', 'VZZB', 'VXX', 'XVZ', 'UVXY',\n\t\t\t\t'XIV', 'TVIX', 'VXZ', 'VIXY', 'VIXM', 'VIIX', 'XVIX', 'SVXY',\n\t\t\t\t'AAVX', 'BBVX', 'CCVX', 'DDVX', 'EEVX', 'FFVX', 'XXV', 'ZIV',\n\t\t\t\t'VXFF', 'VIIZ', 'VXEE', 'VXDD', 'VXCC', 'TVIZ']\n\n\tif n in vol_based:\n\t\tprint \"Volatility based instruments are currently not supported.\"\n\t\treturn average_atr()\n\telse:\n\t\tpass\n\ndef split_check(n):\n\t# if initial_li[-1] != initial_li[4]:\n\t# \tprint \"There was a recent split and that calculation is not currently supported.\"\n\t# else:\n\tpass\t\n\ndef symbol_check(n):\n\tsupported = ['AA', 'AXP', 'BA', 'BAC', 'CAT', 'CSCO', 'CVX', 'DD', 'SIRI',\n\t\t\t\t'DIS', 'GE', 'HD', 'HPQ', 'IBM', 'INTC', 'JNJ', 'JPM', 'SIAL',\n\t\t\t\t'KFT', 'KO', 'MCD', 'MMM', 'MRK', 'MSFT', 'PFE', 'PG', 'SHLD',\n\t\t\t\t'T', 'TRV', 'UTX', 'VZ', 'WMT', 'XOM', 'ATVI', 'ADBE', 'STX',\n\t\t\t\t'AKAM', 'ALXN', 'ALTR', 'AMZN', 'AMGN', 'APOL', 'AAPL', 'SNDK',\n\t\t\t\t'AMAT', 'ADSK', 'ADP', 'AVGO', 'BIDU', 'BBBY', 'BIIB', 'ROST',\n\t\t\t\t'BMC', 'BRCM', 'CHRW', 'CA', 'CELG', 'CERN', 'CHKP', 'RIMM',\n\t\t\t\t'CSCO', 'CTXS', 'CTSH', 'CMCSA', 'COST', 'DELL', 'XRAY',\n\t\t\t\t'DTV', 'DLTR', 'EBAY', 'EA', 'EXPE', 'EXPD', 'ESRX', 'FFIV',\n\t\t\t\t'FAST', 'FISV', 'FLEX', 'FOSL', 'GRMN', 'GILD', 'GOOG', 'GMCR',\n\t\t\t\t'HSIC', 'INFY', 'INTC', 'INTU', 'ISRG', 'KLAC', 'KFT', 'LRCX',\n\t\t\t\t'LINTA', 'LIFE', 'LLTC', 'MRVL', 'MAT', 'MXIM', 'MCHP', 'MU',\n\t\t\t\t'MSFT', 'MNST', 'MYL', 'NTAP', 'NFLX', 'NWSA', 'NUAN', 'NVDA',\n\t\t\t\t'ORLY', 'ORCL', 'PCAR', 'PAYX', 'PRGO', 'PCLN', 'QCOM', 'GOLD',\n\t\t\t\t'SPLS', 'SBUX', 'SRCL', 'SYMC', 'TXN', 'VRSN', 'VRTX', 'VIAB',\n\t\t\t\t'VMED', 'VOD', 'WCRX', 'WFM', 'WYNN', 'XLNX', 'YHOO']\n\n\tif n in supported:\n\t\tpass\n\telse:\n\t\tprint \"Only NASDAQ 100 & DOW 30 stocks are supported in the free version.\"\n\t\treturn average_atr()\n\t\ndef new_time(n):\n\tfmt = \"%Y%m%d\"\n\tend_date1 \t= datetime.datetime.strptime(n, fmt)\n\tsixty_day \t= datetime.timedelta(days=32)\n\tstart_date \t= end_date1 - sixty_day\n\tstart_date1 = str(start_date)\n\tstart_date2 = start_date1[:4] + start_date1[5:7] + start_date1[8:10]\n\treturn start_date2\n\ndef average_atr():\n\tglobal symbol\n\tprint \"Enter your ticker symbol: \"\n\tsymbol \t\t\t= str.upper(raw_input(\" \"))\n\n\tvol_check(symbol)\n\tsymbol_check(symbol)\n\n\tprint \"Enter the end date in (YYYYMMDD) format: \"\n\tend_date \t\t= raw_input(\" \")\n\tstart_date \t\t= new_time(end_date)\n\tinitial_li \t= ystockquote.get_historical_prices(symbol, start_date, end_date)\n\n\t# split_check(initial_list)\n\n\tinitial_adj \t= initial_li[1:]\n\ttable_adj\t\t= [[r[0], float(r[1]), float(r[2]), float(r[3]), \n\t\t\t\t\t\tfloat(r[4]), r[5], float(r[6])] for r in initial_adj]\n\t#Used specifically for calculating the Previous Day Close (PDC).\n\tpdc_li\t\t\t= table_adj[1:]\n\n\t#High - Low.\n\th_l = [r[2] - r[3] for r in table_adj]\n\t#High - PDC\n\th_pdc\t= [r[2] - j[6] for r, j in zip(table_adj, pdc_li)]\n\t#PDC - Low\n\tpdc_l\t= [j[6] - r[3] for j, r in zip(pdc_li, table_adj)]\n\t#As of 2012-08-10 Yahoo Finance is NOT generating consistent data. The\n\t#issue stems from a sync problem between different servers. There is no\n\t#ETA on a resolution of the problem. This will have some affect on the\n\t#output generated by the sizing algorithm.\n\n\ttrue_range \t= map(lambda x,y,z:max(x, y, z), h_l, h_pdc, pdc_l)\n\ttwenty_ave\t= sum(true_range[1:-1])/len(true_range[1:-1])\n\tn_unit\t\t= (19*twenty_ave+true_range[0])/20\n\treturn n_unit" } ]
4
raulezama/bookstore
https://github.com/raulezama/bookstore
26d67b1628796fa19fa7797c683cb4cd19c28184
18491b4e4474541b73d5a3e6c0f28f3798890aa7
c5cf3d36cea0b0c72a9bc508d547743f0107bc28
refs/heads/master
2021-06-23T20:40:57.365166
2017-07-18T21:26:23
2017-07-18T21:26:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6659402251243591, "alphanum_fraction": 0.701743483543396, "avg_line_length": 31.1200008392334, "blob_id": "7aa607417bed5259144a16d526b9895c5c2b07d9", "content_id": "325480bc8eff38d6c1a7f2790a490be1669aeb9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3215, "license_type": "no_license", "max_line_length": 147, "num_lines": 100, "path": "/frontend.py", "repo_name": "raulezama/bookstore", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport backend\n\ndef view_command():\n list1.delete(0,END) #se pone antes de que el for entre a la lista para que no se repita la operacion, si se pone despues, la lista se elimina.\n for row in backend.view():\n list1.insert(END, row) #\"\"\"pyinstaller --onefile --windowed frontend.py / INSTALAR PYINSTALLER\"\"\"\n\ndef search_command():\n list1.delete(0,END)\n for row in backend.search(entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get()):\n list1.insert(END, row)\n\ndef add_command():\n backend.insert(entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get())\n list1.delete(0,END) #limpia la lista\n list1.insert(END,entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get())\n\ndef get_selected_row(event): #funcion para enlazar la accion de seleccionar lista con el boton delete\n global selected_tuple #Se declara global para poder usarse en la funcion delete\n index=list1.curselection()[0] #se ubica el cursor en la lista seleccionada , ID index de 0\n selected_tuple=list1.get(index) #Se extrae toda la informacion por el id\n e1.delete(0, END)\n e1.insert(END, selected_tuple[1]) #title index 1\n e2.delete(0, END)\n e2.insert(END, selected_tuple[2])\n e3.delete(0, END)\n e3.insert(END, selected_tuple[3])\n e4.delete(0, END)\n e4.insert(END, selected_tuple[4])\n\ndef delete_command():\n backend.delete(selected_tuple[0])\n\ndef update_command():\n backend.update(selected_tuple[0], entry_title.get(),entry_author.get(), entry_year.get(), entry_id.get())\n\n\nwindow= Tk()\n\nwindow.wm_title(\"BookStore\")\n\nla1= Label(window, text=\"Title\")\nla1.grid(row=0, column=0)\n\nla2= Label(window, text=\"Year\")\nla2.grid(row=1, column=0)\n\nla3= Label(window, text=\"Author\")\nla3.grid(row=0, column=2)\n\nla4= Label(window, text=\"ISBN\")\nla4.grid(row=1, column=2)\n\nentry_title=StringVar()\ne1= Entry(window, textvariable=entry_title)\ne1.grid(row=0, column=1)\n\nentry_author=StringVar()\ne2= Entry(window, textvariable=entry_author)\ne2.grid(row=0, column=3)\n\nentry_year=StringVar()\ne3= Entry(window, textvariable=entry_year)\ne3.grid(row=1, column=1)\n\nentry_id=StringVar()\ne4= Entry(window, textvariable=entry_id)\ne4.grid(row=1, column=3)\n\nlist1=Listbox(window, height=6, width=35)\nlist1.grid(row=2, column=0, rowspan=6, columnspan=2)\n\nscbar=Scrollbar(window)\nscbar.grid(row=2, column=2, rowspan=6)\n\nlist1.configure(yscrollcommand=scbar.set)\nscbar.configure(command=list1.yview)\n\nlist1.bind('<<ListboxSelect>>', get_selected_row) #Enlazar el scroll con la lista\n\nb1=Button(window, text=\"View all\", width=12, command=view_command)\nb1.grid(row=2, column=3)\n\nb2=Button(window, text=\"Search entry\", width=12, command=search_command)\nb2.grid(row=3, column=3)\n\nb3=Button(window, text=\"Add entry\", width=12, command=add_command)\nb3.grid(row=4, column=3)\n\nb4=Button(window, text=\"Update selected\", width=12, command=update_command)\nb4.grid(row=5, column=3)\n\nb5=Button(window, text=\"Delete selected\", width=12, command=delete_command)\nb5.grid(row=6, column=3)\n\nb6=Button(window, text=\"Close\", width=12, command=window.destroy)\nb6.grid(row=7, column=3)\n\nwindow.mainloop()\n" }, { "alpha_fraction": 0.6328893899917603, "alphanum_fraction": 0.6496815085411072, "avg_line_length": 32.21154022216797, "blob_id": "ceb7a7d6487a9e88a4af41c536377e70062339b3", "content_id": "8d8ac672747c719e30e390d0bc1156ecdeddb85d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1727, "license_type": "no_license", "max_line_length": 150, "num_lines": 52, "path": "/backend.py", "repo_name": "raulezama/bookstore", "src_encoding": "UTF-8", "text": "import sqlite3\n\ndef connect_db():\n conn= sqlite3.connect(\"books.db\")\n cur= conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS book (id INTEGER PRIMARY KEY, title TEXT, author TEXT, year INTEGER, isbn INTEGER)\")\n conn.commit()\n conn.close()\n\ndef insert(title, author, year, isbn):\n conn= sqlite3.connect(\"books.db\")\n cur= conn.cursor()\n cur.execute(\"INSERT INTO book VALUES (NULL, ?, ?, ?, ?) \", (title, author, year, isbn))\n conn.commit()\n conn.close()\n\ndef view():\n conn= sqlite3.connect(\"books.db\")\n cur= conn.cursor()\n cur.execute(\"SELECT * FROM book\")\n rows=cur.fetchall()\n conn.close()\n return rows\n\ndef search(title=\"\", author=\"\", year=\"\", isbn=\"\"): #Le pasamos parametros de busqueda para el filtro y cadenas vacias para que no retorne ningun error\n conn=sqlite3.connect(\"books.db\")\n cur=conn.cursor()\n cur.execute(\"SELECT * FROM book WHERE title=? OR author=? OR year=? OR isbn=?\", (title, author, year, isbn))\n rows=cur.fetchall()\n conn.close()\n return rows\n\ndef delete(id): #Se pasa el parametro id ya que el registro sera eliminado por ese argumento\n conn= sqlite3.connect(\"books.db\")\n cur= conn.cursor()\n cur.execute(\"DELETE FROM book WHERE id=?\",(id,))\n conn.commit()\n conn.close()\n\ndef update(id, title, author, year, isbn):\n conn= sqlite3.connect(\"books.db\")\n cur= conn.cursor()\n cur.execute(\"UPDATE book SET title=?, author=?, year=?, isbn=? WHERE id=?\",(title, author, year, isbn, id))\n conn.commit()\n conn.close()\n\nconnect_db()\n#insert(\"The Lord of the Rings\", \"J.R.R Tolkien\", 1942, 3344348)\n#delete(3)\n#update(2, \"The lord of the rings\", \"JRR Tolkien\", 1956, 64646)\n#print(view())\n#print(search(\"The Vampire Diaries\"))\n" } ]
2
bates-hu/soda
https://github.com/bates-hu/soda
d41b83396519adefd747c60591d41bb23021bf35
07d895c28aca68a85f5d300ba2906515fa687979
0b4f0e490410d8c562050e64da98b8ceb52513ec
refs/heads/master
2021-05-31T04:59:13.939454
2015-09-13T09:10:29
2015-09-13T09:10:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6105407476425171, "alphanum_fraction": 0.6266255974769592, "avg_line_length": 31.422222137451172, "blob_id": "754db785b309f0937143c5e2b4b4338229af6f17", "content_id": "ba0651f8e9170a06530730ac1102bf8aa27522aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3488, "license_type": "no_license", "max_line_length": 102, "num_lines": 90, "path": "/soda_accident.py", "repo_name": "bates-hu/soda", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport time\nimport re\nimport urllib2\nimport os\nimport json\n\n\nkey = 'ak=XoGwGhTaucOT9P95tnFXVOXm'\ncity = 'city=上海'\npath = '/home/arnold-hu/project/soda/accident.txt'\nresult_path = '/home/arnold-hu/project/soda/location_accident.txt' # 用于存放增加经纬度信息并且剔除经纬度无效的数据,保存的信息最完整\nnumber_data_path = '/home/arnold-hu/project/soda/number_data_accident.txt' # 用于存放全数字化信息\nmap_data_path = '/home/arnold-hu/project/soda/map_data.txt' #用于存放将要提交给百度地图api的json信息\ncount = 0\npoints = [] \n\n#输入精度系数,地图将会按index的数量把地图均等分割,然后把每一块的事故集中到中点\nindex = raw_input('请输入地图分割精度(数字越大地图分割越细): ')\nindex = int(index)\n#确定上海市经纬度范围并求出各个grid中点坐标\nlllat =30.6 \nurlat=32 \nlllon=120.9\nurlon=122\nlats = [lllat + (urlat - lllat)*(x+0.5)/index for x in range(index)]\nlons = [lllon + (urlon - lllon)*(x+0.5)/index for x in range(index)]\ncenters = [(x,y) for x in lons for y in lats]\nlon_all = [x[0] for x in centers]\nlat_all = [x[1] for x in centers]\nsize = [0] * (index * index)\n\n#创建或覆盖原有的number_data_accdent.txt用来储存全数字化数据,并给其增加头部索引以便后来分析\nwith open(number_data_path, 'w+') as number:\n\thead = ','.join(['id', 'accident_id', 'flag', 'date', 'time', 'lng', 'lat'])\n\th=number.write(head + '\\n')\n\n#主要操作\ntry:\n\tos.remove(result_path) # 删除原文件\nfinally:\n\twith open(path, 'r') as f:\n\t\tfor line in f.readlines()[1:]:\n\t\t\tline = line.strip('\\n')\n\t\t\taddr = line.split(',')[3]\n\t\t\taddress = addr.split('约')[0]\n\t\t\tadd = 'address=' + address\n\t\t\treq = 'http://api.map.baidu.com/geocoder/v2/?' + '&'.join([key, add, city]) \n\t\t\tweb = urllib2.urlopen(req)\n\t\t\twebdata = web.readlines()\n\t\t\ttry:\n\t\t\t\tlat = re.search('\\d+\\.\\d+', webdata[6]).group()\n\t\t\t\tlng = re.search('\\d+\\.\\d+', webdata[7]).group()\n\t\t\t\tlines = line.split(',')\n\t\t\t\tdate = lines[4].split(' ')[0]\n\t\t\t\ttime = lines[4].split(' ')[1]\n\t\t\t\tcount = count + 1\n\t\t\t\t#向location_accident.txt填充数据\n\t\t\t\twith open(result_path, 'a+') as result:\n\t\t\t\t\tr = line + ',' + lng + ',' + lat \n\t\t\t\t\tresult.write(str(count) + ',' +r + '\\n')\n\t\t\t\t#向number_data_accident.txt填充数据\n\t\t\t\twith open(number_data_path, 'a+') as number:\n\t\t\t\t\t#用flg表示事故类型,1为死亡,2为伤人,3为财产损失或其他\n\t\t\t\t\tif lines[2] == '死亡事故': \n\t\t\t\t\t\tflag =1\n\t\t\t\t\telif lines[2] == '伤人事故': \n\t\t\t\t\t\tflag =2\n\t\t\t\t\telse:\n\t\t\t\t\t\tflag = 3\n\t\t\t\t\tnumber_line = ','.join([str(count), lines[1], str(flag), date, time, str(lng), str(lat)])\n\t\t\t\t\tnumber.write(number_line + '\\n')\n\t\t\t\t# 如果事故点在上海市范围,则所属grid的计数加1\n\t\t\t\tif (float(lng) <= urlon and float(lng) >= lllon and float(lat) <= urlat and float(lat) >= lllat):\n\t\t\t\t\tlat_shift = int( (float(lng) - lllon) / ((urlon - lllon) / index)) \n\t\t\t\t\tlon_shift = int( (float(lat) - lllat) / ((urlat - lllat) / index )) \n\t\t\t\t\tsize[index * lat_shift + lon_shift] += 1\n\n\t\t\texcept AttributeError: # 若生成经纬度的api出现错误,或者时间信息不全,则pass此记录\n\t\t\t\tpass\n\t\t\texcept IndexError:\n\t\t\t\tpass\n\n\n#生成points变量,转化为json格式,并写入文件\nfor i in range(len(lon_all)):\n\tpoints.append({\"lng\":lon_all[i], \"lat\":lat_all[i], \"count\":size[i]*5})\npoints = json.dumps(points, indent=4)\nwith open(map_data_path, 'w+') as md:\n\tmd.write(points)\n\n\n\n\n" }, { "alpha_fraction": 0.6080267429351807, "alphanum_fraction": 0.6428093910217285, "avg_line_length": 24.758621215820312, "blob_id": "1eca62e34314336b8772e7e4077920cd56a1b903", "content_id": "1006adfb4c8a1a688b72c5097b72ea6147336c87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1495, "license_type": "no_license", "max_line_length": 115, "num_lines": 58, "path": "/map.py", "repo_name": "bates-hu/soda", "src_encoding": "UTF-8", "text": "#coding:utf-8\n\n\nfrom mpl_toolkits.basemap import Basemap,cm\nimport matplotlib.pyplot as plt\nimport random\nimport json\npath = '/home/arnold-hu/project/soda/number_data_accident.txt'\ndef basic_shanghai_map(ax=None, lllat = 31, urlat=32, lllon=121, urlon=122,width=None,height=None):\n m = Basemap(ax=ax, projection='cyl',\n lon_0=(urlon + lllon)/2,\n lat_0=(urlat + lllat)/2,\n llcrnrlat=lllat, urcrnrlat=urlat,\n llcrnrlon=lllon, urcrnrlon=urlon,\n resolution='f', width=None,\n height=None)\n m.drawcoastlines()\n m.drawstates()\n m.drawcountries()\n m.fillcontinents(color='0.95')\n return m\n# data = []\nlllat =30.6 \nurlat=32 \nlllon=120.9\nurlon=122\nwidth = (urlat - lllat) * 300\nheight = (urlon - lllon) * 300\n\nindex = 25\nlons=[]\nlats=[]\n\n\nmaptry = basic_shanghai_map(ax=None, lllat=lllat, urlat=urlat, lllon=lllon, urlon=urlon,width=width,height=height)\nwith open(path) as d:\n\tfor line in d.readlines()[1:]:\n\t\tline = line.strip('\\n')\n\t\tline = line.split(',')\n\t\tlon = float(line[5])\n\t\tlat = float(line[6])\n\t\tlons.append(lon)\n\t\tlats.append(lat)\n\t\t\n\n\t\t# data.append(random.randint(1,10))\nx,y = maptry(lons, lats)\n#fix_size = [i * 200 / index for i in size]\n\n\n\nmaptry.scatter(x, y , 5, zorder=10)\n\nroad_path = '/home/arnold-hu/map/CHN_roads'\n# boundary_path = '/home/arnold-hu/map/bou2_4p'\nmaptry.readshapefile(road_path, 'roads')\n# maptry.readshapefile(boundary_path, 'boundary')\nplt.show(maptry)\n\n" }, { "alpha_fraction": 0.5646551847457886, "alphanum_fraction": 0.5804597735404968, "avg_line_length": 28.04166603088379, "blob_id": "202abfed67d7d6cc09e43f16c48c9076af5a099e", "content_id": "662cb006769a7d621af3aceecf73fad4b717eea5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 712, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/squez_number.py", "repo_name": "bates-hu/soda", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport os\n\npath = '/home/arnold-hu/project/soda/location_accident.txt'\nnew_path = '/home/arnold-hu/project/soda/number_data_accident.txt'\ntry:\n\tos.remove(new_path)\nfinally:\n\twith open(new_path, 'a+') as number:\n\t\thead = ','.join(['id', 'accident_id', 'flag', 'time', 'lat', 'lng'])\n\t\th=number.write(head + '\\n')\n\twith open(path, 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\tline = line.strip('\\n')\n\t\t\twith open(new_path, 'a+') as number:\n\t\t\t\tline = line.split(',')\n\t\t\t\tif line[3] == '死亡事故': \n\t\t\t\t\tflag =1\n\t\t\t\telif line[3] == '伤人事故': \n\t\t\t\t\tflag =2\n\t\t\t\telse:\n\t\t\t\t\tflag = 3\n\t\t\t\trecord = ','.join([line[0], line[2], str(flag), line[5], line[6], line[7]])\n\t\t\t\tnumber.write(record + '\\n')" } ]
3
mpHarm88/cs-module-project-algorithms
https://github.com/mpHarm88/cs-module-project-algorithms
741b1052347e5ada50a5408feced70e43bba832d
752366cfabdea7b2ddffa87e9b6adb5411659522
d2a3e20c777e20e52dfd6bc10d1e3702032dc779
refs/heads/master
2022-11-30T17:53:53.266664
2020-08-13T03:26:44
2020-08-13T03:26:44
286,894,520
0
0
null
2020-08-12T02:18:02
2020-07-17T18:21:41
2020-08-11T04:15:33
null
[ { "alpha_fraction": 0.535315990447998, "alphanum_fraction": 0.5576208233833313, "avg_line_length": 25.950000762939453, "blob_id": "a1829dd23d749e277c1f9a1bb267ca15bbe3d441", "content_id": "a2312064d4882a8c44099c08a83691b54f6fc091", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/single_number/single_number.py", "repo_name": "mpHarm88/cs-module-project-algorithms", "src_encoding": "UTF-8", "text": "'''\nInput: a List of integers where every int except one shows up twice\nReturns: an integer\n'''\ndef single_number(arr):\n \n # If x is in the list then remove both items, else return True\n for x in range(len(arr)):\n x = arr.pop(0)\n if x in arr:\n y = arr.index(x)\n arr.pop(y)\n else:\n return x\n\nif __name__ == '__main__':\n # Use the main function to test your implementation\n arr = [1, 1, 4, 4, 5, 5, 3, 3, 9, 0, 0]\n\n print(f\"The odd-number-out is {single_number(arr)}\")" }, { "alpha_fraction": 0.5401606559753418, "alphanum_fraction": 0.5542168617248535, "avg_line_length": 21.68181800842285, "blob_id": "8412267955a278be12253c86c0dd833beb636e30", "content_id": "237235d3775954e7fcca676b64a92ad624edb291", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/moving_zeroes/moving_zeroes.py", "repo_name": "mpHarm88/cs-module-project-algorithms", "src_encoding": "UTF-8", "text": "'''\nInput: a List of integers\nReturns: a List of integers\n'''\ndef moving_zeroes(arr):\n \n ls = []\n \n # append to ls if zero and isnert at start if not\n for x in range(len(arr)):\n if arr[x] == 0:\n ls.append(arr[x])\n else:\n ls.insert(0, arr[x])\n\n return ls\n\nif __name__ == '__main__':\n # Use the main function here to test out your implementation\n arr = [0, 3, 1, 0, -2]\n\n print(f\"The resulting of moving_zeroes is: {moving_zeroes(arr)}\")" } ]
2
rmolines/2d-sim
https://github.com/rmolines/2d-sim
b09b83d65ebddb9d2d4f4b996c850aaae26b237d
5cfa23a355a209244cd3ef0c8ed342aca8c7d825
3b5dc53329a04f9fac4c4ea3f89e907c45df0ea8
refs/heads/master
2020-03-27T04:04:14.868477
2018-10-22T17:50:55
2018-10-22T17:50:55
145,909,844
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6253348588943481, "alphanum_fraction": 0.646766185760498, "avg_line_length": 26.21875, "blob_id": "85e736c7aceecc669fbb4aacbc99e83464318d2c", "content_id": "cd1d2d406ed6e4908abe74a4f12fa6b73cfbdfcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2613, "license_type": "no_license", "max_line_length": 68, "num_lines": 96, "path": "/visualizador/main.cpp", "repo_name": "rmolines/2d-sim", "src_encoding": "UTF-8", "text": "/*\n * Copyright (c) 2018 Igor Montagner igordsm@gmail.com\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n\n#include \"visualizador.h\"\n\n#include <vector>\n#include <random>\n#include <iostream>\n#include <string>\n\n#define XB 2\n#define YB 1\n#define FW 10000\n#define FH 10000\n#define RADIUS 140\n\n\nvoid read_global(simul *sim){\n double w, h, n, mu, alpha_w, alpha_b;\n if (!(std::cin >> w >> h >> n)) { throw 1; } // error\n if (!(std::cin >> mu >> alpha_w >> alpha_b)) { throw 1; } // error\n *sim = { w, h, mu, n, alpha_w, alpha_b };\n}\n\nvoid read_balls(double n, std::vector<ball> &balls){ \n\n for (int i=0; i<int(n); i++){\n double id, raio, massa, x0, y0, vx0, vy0;\n std::cin >> id >> raio >> massa >> x0 >> y0 >> vx0 >> vy0;\n ball ball = { id, raio, massa, x0, y0, vx0, vy0};\n balls[int(i)] = ball;\n } \n}\n\nint main(int argc, char ** argv) {\n\n std::string arg0 = argv[1];\n std::string arg1 = argv[2];\n std::string arg2 = argv[3];\n std::string arg3 = argv[4];\n\n int gui = std::stoi(arg1);\n std::string s = arg2;\n double delta_t = std::stod(arg0);\n int model = std::stoi(arg3);\n\n std::default_random_engine gen;\n std::uniform_real_distribution<double> dist(-1.0,1.0);\n std::uniform_real_distribution<double> mass(1, 10); \n \n\n simul sim;\n read_global(&sim);\n sim.s = s;\n sim.gui = gui;\n sim.model = model;\n std::vector<ball> balls(sim.n);\n\n read_balls(sim.n, balls);\n\n\n Visualizador v(balls, sim.w, sim.h, delta_t, sim);\n \n if (gui == 0){\n v.gui_run();\n \n } else if (gui == 1) {\n v.run();\n } else {\n v.results();\n }\n\n return 0; \n}\n" }, { "alpha_fraction": 0.6893865466117859, "alphanum_fraction": 0.7059396505355835, "avg_line_length": 31.0625, "blob_id": "5203a466cbeef1d2708eae3500b45f482a815516", "content_id": "4b9d962d8b1e2ba85d5cb0acc4b8b75902408694", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 83, "num_lines": 32, "path": "/visualizador/CMakeLists.txt", "repo_name": "rmolines/2d-sim", "src_encoding": "UTF-8", "text": "project(visualizador)\ncmake_minimum_required(VERSION 2.8)\n\n# Point to our own cmake modules\nlist(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/CMakeModules)\n\nfind_package(PkgConfig REQUIRED)\n# Find SDL2\n\npkg_search_module(SDL2 REQUIRED sdl2)\npkg_search_module(SDL2_gfx REQUIRED SDL2_gfx)\n\n# Add global definitions\nadd_definitions(\"-Wall\")\ninclude_directories(${SDL2_INCLUDE_DIR} ${SDL2_gfx_INCLUDE_DIR}) \n\nadd_executable(visualizador main.cpp visualizador.cpp)\ntarget_link_libraries(visualizador ${SDL2_LIBRARIES} ${SDL2_gfx_LIBRARIES})\n\nif(\"${STRATEGY} \" STREQUAL \"omp \")\n SET(CMAKE_CXX_FLAGS \" -fopenmp -O2 -Wall\")\n message(\"OMP\")\nelseif(\"${STRATEGY} \" STREQUAL \"seq \")\n message(\"SEQ\")\n SET(CMAKE_CXX_FLAGS \" -Wall -O2\")\nelseif(\"${STRATEGY} \" STREQUAL \"best \")\n message(\"BEST\")\n SET(CMAKE_CXX_FLAGS \" -ffast-math -ftree-vectorize -mavx2 -O2 -fopenmp -Wall\")\nelseif(\"${STRATEGY} \" STREQUAL \"simd \")\n message(\"SIMD\")\n SET(CMAKE_CXX_FLAGS \" -ffast-math -ftree-vectorize -mavx2 -O2 -Wall\")\nENDIF()\n\n" }, { "alpha_fraction": 0.7223219871520996, "alphanum_fraction": 0.7247157096862793, "avg_line_length": 30.528301239013672, "blob_id": "fb1c68ec60fdebde87abfe9ccf5f425da935f7ec", "content_id": "9e5c1d45e8ec48d6562ebfc125d3240cf9497637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1676, "license_type": "no_license", "max_line_length": 77, "num_lines": 53, "path": "/visualizador/body.h", "repo_name": "rmolines/2d-sim", "src_encoding": "UTF-8", "text": "/*\n * Copyright (c) 2018 Igor Montagner igordsm@gmail.com\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n\n\n/* Este arquivo deve conter a definição da sua estrutura/classe que\n * representa um corpo na simulação. A estrutura abaixo é somente um exemplo \n * que deve ser alterado para que seu programa seja integrado com o \n * visualizador. \n */\n\n#include <string>\n\ntypedef struct _ball ball;\n\nstruct _ball {\n int id;\n double radius, mass;\n double x, y;\n double vx, vy;\n double dist;\n ball *ball_col;\n};\n\n\n\ntypedef struct _sim simul;\nstruct _sim {\n double w, h, mu, n, alpha_w, alpha_b;\n int gui, model;\n std::string s, argv;\n};\n" }, { "alpha_fraction": 0.5134478211402893, "alphanum_fraction": 0.5343486666679382, "avg_line_length": 28.18203353881836, "blob_id": "ce20e76bb2566a87fb8d45bc0aa9f76d906eae03", "content_id": "9b6b9e0df5a5694d9ff45cbcc1f020d901a979f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 12344, "license_type": "no_license", "max_line_length": 134, "num_lines": 423, "path": "/visualizador/visualizador.cpp", "repo_name": "rmolines/2d-sim", "src_encoding": "UTF-8", "text": "/*\n * Copyright (c) 2018 Igor Montagner igordsm@gmail.com\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n\n#include \"visualizador.h\"\n#include \"SDL2/SDL.h\"\n#include \"SDL2/SDL2_gfxPrimitives.h\"\n\n#include <stdio.h>\n#include <future>\n#include <chrono>\n#include <unistd.h>\n#include \"math.h\"\n#include <omp.h>\n#include <signal.h> \n#include <time.h> \n#include <iostream>\n#include <fstream>\n#include <ctime>\n#include <ratio>\n#include <string>\n\ntypedef std::chrono::high_resolution_clock Time;\n\nint NUM_SAMPLES = 100;\n\nVisualizador::Visualizador(std::vector<ball> &bodies, int field_width, int field_height, double delta_t, simul &sim) :\n delta_t(delta_t),\n field_width(field_width),\n field_height(field_height),\n bodies(bodies),\n sim(sim) {\n \n if(sim.gui == 1) {\n SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS);\n double ratio = (double) field_width / field_height;\n if (ratio > 1) {\n win_width = max_dimension;\n win_height = max_dimension / ratio;\n } else {\n win_width = max_dimension * ratio;\n win_height = max_dimension;\n }\n win = SDL_CreateWindow(\"Visualizador SUPERCOMP\", SDL_WINDOWPOS_CENTERED,\n SDL_WINDOWPOS_CENTERED, win_width, win_height, 0);\n renderer = SDL_CreateRenderer(win, -1, SDL_RENDERER_ACCELERATED);\n }\n\n\n std::vector<ball> collided(bodies.size());\n\n\n iter = 0;\n}\n\nVisualizador::~Visualizador() {\n if (sim.gui == 1){\n SDL_DestroyRenderer(renderer);\n SDL_DestroyWindow(win);\n SDL_Quit();\n\n }\n}\n\n\nvoid Visualizador::draw() {\n SDL_SetRenderDrawColor(renderer, 117, 117, 117, 255);\n SDL_RenderClear(renderer);\n for (auto i = bodies.begin(); i != bodies.end(); i++) {\n filledCircleRGBA(renderer, i->x / field_width * win_width,\n i->y / field_height* win_height,\n i->radius / field_width * win_width,\n 57, 73, 171, 255);\n }\n SDL_RenderPresent(renderer);\n}\n\n\nvoid Visualizador::run() {\n SDL_ShowWindow(win);\n draw();\n while (!SDL_QuitRequested()) {\n do_iteration(); \n draw();\n }\n}\n\nvoid handler(int sig)\n{\n printf(\"singal %d\\n\", sig);\n exit(1);\n}\n\n\n\nvoid Visualizador::gui_run() {\n struct sigaction act;\n act.sa_handler = handler;\n sigemptyset(&act.sa_mask);\n act.sa_flags = 0;\n sigaction(SIGINT, &act, 0);\n \n \n std::chrono::duration<double> timer;\n double sec = 0;\n Time::time_point t2 = Time::now(); \n Time::time_point t1 = t2;\n\n while (1) {\n Time::time_point t2 = Time::now();\n timer = std::chrono::duration_cast<std::chrono::duration<double>> (t2-t1);\n\n if (timer.count() > 1){\n sec+=1;\n t1 = t2;\n for (int id=0; id<(int) bodies.size(); id++){\n printf(\"id=%d raio=%f massa=%f x=%d y=%d vx=%f vy=%f sec=%fs \\n\", bodies[id].id, bodies[id].radius, bodies[id].mass, \\\n int(bodies[id].x), int(bodies[id].y), bodies[id].vx, \\\n bodies[id].vy, sec);\n }\n }\n do_iteration();\n }\n}\n\n\nvoid Visualizador::results(){\n std::chrono::duration<double> sum;\n int counter = 0;\n std::string filename = \"./results/-s=\" + sim.s + \" -gui=\" + std::to_string(sim.gui) + \" -model=\" + std::to_string(sim.model) + \\ \n + \" -n_balls=\" + std::to_string(bodies.size()) + \".csv\";\n std::ofstream myfile;\n myfile.open (filename);\n\n\n for (int i=0; i<NUM_SAMPLES; i++){\n auto start = Time::now();\n do_iteration();\n auto finish = Time::now();\n counter++;\n sum = (finish-start);\n myfile << sum.count() << \"\\n\";\n printf(\"%fs - iteration=%d\\n\", sum.count(), i);\n }\n myfile.close();\n}\n\n\n\nvoid Visualizador::update_pos(std::vector<ball> &balls){\n #pragma omp parallel for\n for (int i=0; i<int(bodies.size()); i++){\n balls[i].x+=balls[i].vx*delta_t;\n balls[i].y+=balls[i].vy*delta_t;\n }\n}\n\nvoid Visualizador::update_v(std::vector<ball> &balls){\n #pragma omp parallel for\n\n for (int i=0; i<int(bodies.size()); i++){\n double mod = calc_mod(balls[i].vx, balls[i].vy);\n double theta = calc_theta(balls[i].vx, balls[i].vy, mod);\n\n double ax = (sim.mu*10*delta_t) * cos(theta);\n double ay = -(sim.mu*10*delta_t) * sin(theta); \n \n double vx = (mod) * cos(theta);\n double vy = (mod) * sin(theta); \n\n\n if (abs(balls[i].vx - ax) < 0) {\n balls[i].vx = 0;\n } else {\n balls[i].vx -= ax;\n }\n\n if (abs(balls[i].vy - ay) < 0) {\n balls[i].vy = 0;\n } else {\n balls[i].vy -= ay;\n }\n }\n}\n\nvoid Visualizador::check_boundaries(){\n #pragma omp parallel for\n\n for (int i=0; i<int(bodies.size()); i++){\n if((bodies[i].x-bodies[i].radius)<0 || (bodies[i].x+bodies[i].radius)>field_width){\n if ((bodies[i].x-bodies[i].radius)<0){\n bodies[i].x=bodies[i].radius; \n } else {\n bodies[i].x=field_width-bodies[i].radius;\n }\n \n bodies[i].vx*=-1;\n }\n if((bodies[i].y-bodies[i].radius)<0 || (bodies[i].y+bodies[i].radius)>field_height){ \n if ((bodies[i].y-bodies[i].radius)<0){\n bodies[i].y=bodies[i].radius; \n } else {\n bodies[i].y=field_height-bodies[i].radius;\n }\n bodies[i].vy*=-1;\n }\n }\n}\n\ndouble Visualizador::calc_dist(ball ball1, ball ball2){\n \n double x1 = ball1.x+ball1.vx*delta_t;\n double y1 = ball1.y+ball1.vy*delta_t;\n double x2 = ball2.x+ball2.vx*delta_t;\n double y2 = ball2.y+ball2.vy*delta_t;\n\n\n double dist = sqrt((pow((x2-x1),2) + pow((y2-y1), 2)));\n \n return dist;\n}\n\nvoid Visualizador::check_collision(){\n #pragma omp parallel for\n\n for (int i=0; i<int(bodies.size()); i++){\n ball *ball1 = &bodies[i];\n ball1->ball_col = NULL;\n for (int j=0; j<int(bodies.size()); j++){\n if (j != i) {\n ball ball2 = bodies[j];\n\n double dist = calc_dist(*ball1, ball2);\n double min_dist = ball1->radius + ball2.radius;\n if (abs(dist) < abs(min_dist)){\n if (!ball1->ball_col){\n ball *ball_col = &ball2;\n ball1->ball_col = &bodies[j];\n ball1->dist = dist;\n } else {\n if (ball1->dist > dist){\n ball *ball_col = &ball2;\n ball1->ball_col = &bodies[j];\n ball1->dist = dist;\n }\n }\n }\n }\n }\n }\n}\n\ndouble Visualizador::calc_mod(double vx, double vy){\n double mod = sqrt(pow(vx,2) + pow(vy, 2));\n return mod;\n}\n\ndouble Visualizador::calc_theta(double vx, double vy, double mod){\n double theta;\n double x = vx/mod;\n if (x<=-1){\n x = -1;\n } else if (x>=1){\n x = 1;\n }\n if (vy < 0){\n theta = acos (x);\n } else {\n theta = 2*M_PI - acos (x);\n }\n return theta;\n}\n\ndouble Visualizador::calc_delta_theta(ball &ball1, ball &ball2){\n double delta_x = (ball2.x) - (ball1.x);\n double delta_y = (ball2.y) - (ball1.y);\n\n double mod1 = calc_mod(ball1.vx, ball1.vy);\n double mod2 = calc_mod(delta_x, delta_y);\n double theta1 = calc_theta(ball1.vx, ball1.vy, mod1);\n double theta2 = calc_theta(delta_x, delta_y, mod2);\n\n double delta_theta = (theta2-theta1);\n\n return delta_theta;\n}\n\n\nvoid Visualizador::death_func(ball *ballp1, ball *ballp2){\n ball *ball1 = ballp1;\n ball *ball2 = ballp1->ball_col;\n\n double delta_x = (ball2->x) - (ball1->x);\n double delta_y = (ball2->y) - (ball1->y);\n\n double mod1 = calc_mod(ball1->vx, ball1->vy);\n double mod2 = calc_mod(delta_x, delta_y);\n double theta2 = calc_theta(delta_x, delta_y, mod2);\n\n double delta_theta = calc_delta_theta(*ball1, *ball2);\n\n\n double new_vx, new_vy, new_theta;\n\n if (delta_theta >= 0) {\n double new_theta = theta2 + (M_PI - delta_theta);\n new_vx = mod1 * cos (new_theta);\n new_vy = -mod1 * sin (new_theta);\n } else {\n new_theta = theta2 + (M_PI + delta_theta);\n new_vx = mod1 * cos (new_theta);\n new_vy = -mod1 * sin (new_theta);\n }\n\n\n ballp1->vx = new_vx;\n ballp1->vy = new_vy;\n}\n\n\nvoid Visualizador::modelo2(ball *ballp1, ball *ballp2){\n ball *ball1 = ballp1;\n ball *ball2 = ballp1->ball_col;\n\n double delta_x = ((ball2->x) - (ball1->x));\n double delta_y = ((ball2->y) - (ball1->y));\n double phi = atan2(delta_y, delta_x);\n double m1 = ball1->mass;\n double m2 = ball2->mass;\n double v1 = calc_mod(ball1->vx, ball1->vy);\n double v2 = calc_mod(ball2->vx, ball2->vy);\n double theta1 = calc_theta(ball1->vx, ball1->vy, v1);\n double theta2 = calc_theta(ball2->vx, ball2->vy, v2);\n\n double vx_linha1 = ((v1*cos(theta1-phi)*(m1-m2)+(2*m2*v2*cos(theta2-phi)))/(m1+m2))*cos(phi)+v1*-sin(theta1-phi)*-sin(phi); \n double vy_linha1 = ((v1*cos(theta1-phi)*(m1-m2)+2*m2*v2*cos(theta2-phi))/(m1+m2))*-sin(phi)+v1*-sin(theta1-phi)*cos(phi);\n\n\n ballp1->vx = vx_linha1;\n ballp1->vy = vy_linha1;\n\n}\n\nvoid Visualizador::elastic_collision(ball *ballp1, ball *ballp2){\n ball *ball1 = ballp1;\n ball *ball2 = ballp1->ball_col;\n\n double delta_x = ((ball1->x) - (ball2->x));\n double delta_y = ((ball1->y) - (ball2->y));\n\n double delta_vx = ball1->vx - ball2->vx;\n double delta_vy = ball1->vy - ball2->vy;\n\n double modx = calc_mod(delta_x, delta_y);\n\n double m1 = ball1->mass;\n double m2 = ball2->mass;\n\n double new_vx = ball1->vx - (2*m2/(m1+m2)) * \\\n ((delta_vx*delta_x + delta_y*delta_vy) \\\n / modx*modx) * delta_x;\n\n double new_vy = ball1->vy - (2*m2/(m1+m2)) * \\\n ((delta_vx*delta_x + delta_y*delta_vy) \\\n / modx*modx) * delta_y;\n\n ballp1->vx = new_vx;\n ballp1->vy = new_vy;\n}\n\nvoid Visualizador::do_collision(){\n #pragma omp parallel for\n\n for (int i=0; i<int(bodies.size()); i++){\n if (bodies[i].ball_col != NULL){\n if (sim.gui == 0){\n printf(\"CHOQUE ID%d > ID%d\");\n }\n if(sim.model == 0){\n death_func(&bodies[i], bodies[i].ball_col);\n } else if (sim.model == 1){\n modelo2(&bodies[i], bodies[i].ball_col);\n }\n }\n }\n}\n\nvoid Visualizador::print_ball(int id){\n printf(\"id=%d raio=%f massa=%f x=%d y=%d vx=%f vy=%f\\n\", bodies[id].id, bodies[id].radius, bodies[id].mass, \\\n int(bodies[id].x), int(bodies[id].y), bodies[id].vx, \\\n bodies[id].vy);\n}\n\nvoid Visualizador::do_iteration() {\n /* TODO: me implemente! */\n update_v(bodies);\n check_collision();\n do_collision();\n check_boundaries();\n update_pos(bodies);\n iter++;\n}\n" }, { "alpha_fraction": 0.5764890909194946, "alphanum_fraction": 0.5936762094497681, "avg_line_length": 42.81034469604492, "blob_id": "368d2ae283d1718ad8a097a4ca49afb64e2f7ec5", "content_id": "9885d46236fcb57232eb4071082fbd50fd7ee644", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7635, "license_type": "no_license", "max_line_length": 128, "num_lines": 174, "path": "/visualizador/RODATUDO.PY", "repo_name": "rmolines/2d-sim", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport matplotlib.pyplot as plt\nimport csv\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-s\", \"-strategy\", action=\"store\", dest=\"s\", help=\"seleciona tipo de paralelizacao:\\\n sequencial(seq), simd(simd, openmp(omp) ou best(best). results para rodar os testes \\\n e gerar os gráficos ao final. \\\n default is best\", default=\"best\")\n\nparser.add_argument(\"-gui\", help=\"1, mostra o visualizador, 0 não. deafult é 1 \", \\\n action=\"store\", dest=\"gui\", default=1)\n\nparser.add_argument(\"-deltat\", help=\"define um deltat para a simulacao. 0.01 se nao definido\",\\\n action=\"store\", dest=\"delta_t\", default=0.01)\n\nparser.add_argument(\"-model\", help=\"escolhe o modelo simulacional a rodar. 0 para geometrico, 1 para elastico. padrao - 0\",\\\n action=\"store\", dest=\"model\", default=0)\n\nparser.add_argument(\"-nballs\", help=\"escolhe o numero de bolinhas.\",\\\n action=\"store\", dest=\"n_balls\", default=10)\n \n\nresults = parser.parse_args()\n\nprint(results)\n\nos.system(\"python create_input.py -n %d\" % int(results.n_balls))\n\nif(results.s == \"simd\"):\n os.system('cmake -DSTRATEGY=simd . && make \\\n && ./visualizador %f %f %s %f<input' % (float(results.delta_t), float(results.gui), results.s, float(results.model)))\n \nelif (results.s == \"omp\"):\n os.system('cmake -DSTRATEGY=omp . && make \\\n && ./visualizador %f %f %s %f <input' % (float(results.delta_t), float(results.gui), results.s, float(results.model)))\n\nelif (results.s == \"seq\"):\n os.system('cmake -DSTRATEGY=seq . && make \\\n && ./visualizador %f %f %s %f <input' % (float(results.delta_t), float(results.gui), results.s, float(results.model)))\n\nelif (results.s == \"best\"):\n os.system(\"cmake -DSTRATEGY=best . && make \\\n && ./visualizador %f %f %s %f <input\" % (float(results.delta_t), float(results.gui), results.s, float(results.model)))\n\nelif (results.s == \"results\"):\n command = 'cmake -DSTRATEGY=%s . && make \\\n && mprof run ./visualizador %f %f %s %f <input && mprof plot -o %s'\n\n\n os.system(\"python create_input.py -n %d\" % (50))\n filename = './results-img/s=%s-n_balls=2500.jpg'\n\n os.system(command % (\"best\", float(results.delta_t), float(results.gui), \"best\", float(results.model), filename % (\"best\")))\n os.system(command % (\"omp\", float(results.delta_t), float(results.gui), \"omp\", float(results.model), filename % (\"omp\")))\n os.system(command % (\"simd\", float(results.delta_t), float(results.gui), \"simd\", float(results.model), filename % (\"simd\")))\n os.system(command % (\"seq\", float(results.delta_t), float(results.gui), \"seq\", float(results.model), filename % (\"seq\")))\n \n os.system(\"python create_input.py -n %d\" % (70))\n filename = './results-img/-s=%s-n_balls=4900.jpg'\n\n os.system(command % (\"best\", float(results.delta_t), float(results.gui), \"best\", float(results.model), filename % (\"best\")))\n os.system(command % (\"omp\", float(results.delta_t), float(results.gui), \"omp\", float(results.model), filename % (\"omp\")))\n os.system(command % (\"simd\", float(results.delta_t), float(results.gui), \"simd\", float(results.model), filename % (\"simd\")))\n os.system(command % (\"seq\", float(results.delta_t), float(results.gui), \"seq\", float(results.model), filename % (\"seq\")))\n\n os.system(\"python create_input.py -n %d\" % (90))\n filename = './results-img/-s=%s-n_balls=8100.jpg'\n os.system(command % (\"best\", float(results.delta_t), float(results.gui), \"best\", float(results.model), filename % (\"best\")))\n os.system(command % (\"omp\", float(results.delta_t), float(results.gui), \"omp\", float(results.model), filename % (\"omp\")))\n os.system(command % (\"simd\", float(results.delta_t), float(results.gui), \"simd\", float(results.model), filename % (\"simd\")))\n os.system(command % (\"seq\", float(results.delta_t), float(results.gui), \"seq\", float(results.model), filename % (\"seq\")))\n\n os.system(\"python create_input.py -n %d\" % (110))\n filename = './results-img/-s=%s-n_balls=12100.jpg'\n os.system(command % (\"best\", float(results.delta_t), float(results.gui), \"best\", float(results.model), filename % (\"best\")))\n os.system(command % (\"omp\", float(results.delta_t), float(results.gui), \"omp\", float(results.model), filename % (\"omp\")))\n os.system(command % (\"simd\", float(results.delta_t), float(results.gui), \"simd\", float(results.model), filename % (\"simd\")))\n os.system(command % (\"seq\", float(results.delta_t), float(results.gui), \"seq\", float(results.model), filename % (\"seq\")))\n\n n_balls = [50**2, 70**2, 90**2, 110**2] \n types = [\"best\", \"omp\", \"simd\", \"seq\"]\n\n avg = [[0]*4]*4\n\n for d in range(len(n_balls)):\n ind = -1\n for i in types:\n ind += 1\n counter = 0\n sum = 0.0\n with open('./results/-s=%s -gui=2 -model=0 -n_balls=%d.csv'%(i, n_balls[d])) as csvfile:\n spamreader = csv.reader(csvfile)\n for row in spamreader:\n counter += 1\n sum += float(row[0])\n avg [d][ind] = sum/counter\n\n fig, ax = plt.subplots()\n ind = range(len(avg[0]))\n b, o, si, se = plt.bar(ind, avg[d])\n\n b.set_facecolor('r')\n o.set_facecolor('g')\n si.set_facecolor('b')\n se.set_facecolor('y')\n ax.set_xticks(ind)\n ax.set_xticklabels(['Best', 'OMP', 'SIMD', 'Seq'])\n ax.set_ylabel('Tempo(s)')\n ax.set_title('Tipo de compilação com %d bolinhas'%(n_balls[d])) \n fig.savefig('./results-time/%d.jpg'%(n_balls[d])) \n plt.close(fig)\n\n\n\n\n percent = [[0]*3]*4\n for i in range(len(avg)):\n percent [i][0] = (avg[i][0]/avg[i][3]) * 100\n percent [i][1] = (avg[i][1]/avg[i][3]) * 100\n percent [i][2] = (avg[i][2]/avg[i][3]) * 100\n\n fig, ax = plt.subplots()\n b, o, si = plt.bar(range(len(percent[0])),percent[0])\n\n b.set_facecolor('r')\n o.set_facecolor('g')\n si.set_facecolor('b')\n ax.set_xticks(ind)\n ax.set_xticklabels(['Best/Seq', 'OMP/Seq', 'SIMD/Seq'])\n ax.set_ylabel('Ganho de Desempenho (%)')\n ax.set_title('Tipo de compilação com 2500 bolinhas') \n fig.savefig('./results-time/2500-percent.jpg') \n plt.close(fig)\n\n fig, ax = plt.subplots()\n b, o, si = plt.bar(range(len(percent[1])),percent[1])\n\n b.set_facecolor('r')\n o.set_facecolor('g')\n si.set_facecolor('b')\n ax.set_xticks(ind)\n ax.set_xticklabels(['Best/Seq', 'OMP/Seq', 'SIMD/Seq'])\n ax.set_ylabel('Ganho de Desempenho (%)')\n ax.set_title('Tipo de compilação com 4900 bolinhas') \n fig.savefig('./results-time/4900-percent.jpg') \n plt.close(fig)\n\n fig, ax = plt.subplots()\n b, o, si= plt.bar(range(len(percent[2])),percent[2])\n\n b.set_facecolor('r')\n o.set_facecolor('g')\n si.set_facecolor('b')\n ax.set_xticks(ind)\n ax.set_xticklabels(['Best/Seq', 'OMP/Seq', 'SIMD/Seq'])\n ax.set_ylabel('Ganho de Desempenho (%)')\n ax.set_title('Tipo de compilação com 8100 bolinhas') \n fig.savefig('./results-time/8100-percent.jpg') \n plt.close(fig)\n\n fig, ax = plt.subplots()\n b, o, si= plt.bar(range(len(percent[3])),percent[3])\n\n b.set_facecolor('r')\n o.set_facecolor('g')\n si.set_facecolor('b')\n ax.set_xticks(ind)\n ax.set_xticklabels(['Best/Seq', 'OMP/Seq', 'SIMD/Seq'])\n ax.set_ylabel('Ganho de Desempenho (%)')\n ax.set_title('Tipo de compilação com 12100 bolinhas') \n fig.savefig('./results-time/12100-percent.jpg') \n plt.close(fig)" }, { "alpha_fraction": 0.40869906544685364, "alphanum_fraction": 0.45493730902671814, "avg_line_length": 21.79464340209961, "blob_id": "56af1fc52a00e6845b544c9d61ce8fb1a1a495e1", "content_id": "c088ce223f6dc9ddbeb18bc61d8c55d1dc61941c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2552, "license_type": "no_license", "max_line_length": 85, "num_lines": 112, "path": "/visualizador/create_input.py", "repo_name": "rmolines/2d-sim", "src_encoding": "UTF-8", "text": "import random\nimport os\nimport argparse\nimport sys\n\nXB = 2\nYB = 1\nFW = 10000\nFH = 10000\nRADIUS = 130\nballs = []\nfield = [[False]*FH]*FW\n\n\n\ndef rand_balls():\n XB = int(input())\n\n f = open(\"input\", \"w+\")\n\n f.write(\"%d %d %d\\n\" % (FW, FH, XB*YB))\n\n x_pos = []\n y_pos = []\n\n mu = random.randint(200, 500)\n f.write(\"%f 1 1\\n\" % (50))\n\n for i in range(XB):\n radius = random.randint(100, 500)\n placed = False\n while (not placed):\n collides = False\n x = random.randint(radius, FW-radius)\n y = random.randint(radius, FW-radius)\n for i in range(x-radius, x+radius):\n for j in range(y-radius, y+radius):\n if (field[i][j]):\n collides = True\n \n if(not collides):\n for i in range(x-radius, x+radius):\n for j in range(y-radius, y+radius):\n field[i][j] = True\n placed = True \n \n vx = float(random.randint(-5000,5000))\n vy = float(random.randint(-5000,5000))\n mass = random.randint(1, 100)/10\n f.write(\"%f %f %f %f %f %f %f\\n\" % (i, radius, mass, x, y, vx, vy))\n\ndef go_for_numbers():\n XB = int((sys.argv)[2])\n\n f = open(\"input\", \"w+\")\n\n f.write(\"%d %d %d\\n\" % (FW, FH, XB*XB))\n\n\n mu = random.randint(200, 500)\n f.write(\"%f 1 1\\n\" % (50))\n\n counter = 0;\n for i in range(XB):\n for j in range(XB):\n radius = (FW/XB)*0.2;\n x = i*(FW/XB)+radius;\n y = j*(FH/XB)+radius;\n vx = float(random.randint(-4000,4000))\n vy = float(random.randint(-4000,4000))\n mass = random.randint(1, 100)/10\n f.write(\"%f %f %f %f %f %f %f\\n\" % (counter, radius, mass, x, y, vx, vy))\n counter+=1\n\n\ndef test_balls():\n f = open(\"input\", \"w+\")\n radius = 200\n f.write(\"%d %d %d\\n\" % (FW, FH, XB*YB))\n\n f.write(\"%f 1 1\\n\" % (0))\n x = FW/3;\n y = FH/2;\n vx = 1000;\n vy = 0;\n mass = 1;\n\n f.write(\"%f %f %f %f %f %f %f\\n\" % (0, radius, mass, x, y, vx, vy))\n\n x = FW/3*2;\n y = FH/2;\n vx = -1000;\n vy = 0;\n mass = 1;\n\n f.write(\"%f %f %f %f %f %f %f\\n\" % (1, radius, mass, x, y, vx, vy))\n\n\ndef main():\n if ((sys.argv)[1] == \"-test\"):\n test_balls()\n elif ((sys.argv)[1] == \"-n\"):\n go_for_numbers()\n else:\n rand_balls()\n\nif __name__ == \"__main__\":\n main()\n print(\"Concluido\")\n\n# os.system(\"make\")\n# os.system(\"./visualizador < 2\")" } ]
6
choudharysonal/aws-autoscale-using-jmeter
https://github.com/choudharysonal/aws-autoscale-using-jmeter
d706ceb235bba89aa1f82aa2c5d32b396410d606
b14e1ea48e54519ed24c7feba2d7c220df310eb0
da4caa34eed53c895940f10d4f703ed67754cdd9
refs/heads/master
2020-03-22T10:29:25.619197
2018-07-05T22:25:36
2018-07-05T22:25:36
139,906,424
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.8149350881576538, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 153.5, "blob_id": "8b2542328fed593601117b58f86ac8f63207a6a7", "content_id": "f0dc441f7683b82a470f441936923624932fe4ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 308, "license_type": "no_license", "max_line_length": 293, "num_lines": 2, "path": "/README.md", "repo_name": "choudharysonal/aws-autoscale-using-jmeter", "src_encoding": "UTF-8", "text": "# awsautoscale\nPython flask app deployed in AWS EC2 instance which stays behind the load balancer and auto scaling group. This application has been created to stress/load test with JMeter. The aim of the project is to analyze how AWS scales out or scale in automatically based on the average CPU utilization." }, { "alpha_fraction": 0.6749241948127747, "alphanum_fraction": 0.6890798807144165, "avg_line_length": 27.66666603088379, "blob_id": "dd8140e66c6cf9bc0d5e02f70f8ffac03aa66fd5", "content_id": "f870037cdd9433923962efeb68a37389f9ddadb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1978, "license_type": "no_license", "max_line_length": 113, "num_lines": 69, "path": "/main.py", "repo_name": "choudharysonal/aws-autoscale-using-jmeter", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, json, url_for, redirect\nfrom flaskext.mysql import MySQL\nimport plotly\n#plotly.tools.set_credentials_file(username='', api_key='')\n#import plotly.plotly as py\nfrom plotly.offline import plot\nimport plotly.graph_objs as go\n#py.sign_in('', '')\n\n#kmeans imports\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pylab as pl\n\n\napp = Flask(__name__, static_url_path='')\n\nmysql = MySQL()\napp.config['MYSQL_DATABASE_USER'] = 'root'\napp.config['MYSQL_DATABASE_PASSWORD'] = 'sonal'\napp.config['MYSQL_DATABASE_DB'] = 'clouddb'\napp.config['MYSQL_DATABASE_HOST'] = 'localhost'\nmysql.init_app(app)\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return \"Hello, World!\"\n\n@app.route('/hello/<user>')\ndef hello_name(user):\n return render_template('hello.html', name = user)\n\n\n@app.route('/listcoursesform')\ndef listcoursesform():\n return render_template('listcourses.html', data=[])\n\n@app.route('/display', methods=['GET'])\ndef display():\n data = request.args.get('data')\n print(data)\n resp = data.split()\n print(resp)\n return render_template('listcourses.html', data=resp, ip='http://ec2-54-221-110-14.compute-1.amazonaws.com/')\n\n@app.route('/listcourses', methods=['POST'])\ndef listcourses():\n # Get inputs from form\n day = request.form['day']\n q = \"select course from csefall where \"+day+\"='Y'\"\n print(q)\n # execute and get results\n cursor = mysql.connect().cursor()\n cursor.execute(q)\n results = cursor.fetchall()\n resp = []\n for i in range(len(results)):\n resp.append(str(results[i][0]))\n print(resp)\n data_to_D = ''.join(resp)\n return redirect(\"http://ec2-54-221-110-14.compute-1.amazonaws.com/display?data=\"+data_to_D, code=302)\n #return render_template('listcourses.html', data=resp)\n" }, { "alpha_fraction": 0.7452830076217651, "alphanum_fraction": 0.75, "avg_line_length": 18.363636016845703, "blob_id": "f5cfa2e0982be86fcf36fa420853051fd5fe75eb", "content_id": "4275acb011138357d96944cb9a78ec76c00c74e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/main.wsgi", "repo_name": "choudharysonal/aws-autoscale-using-jmeter", "src_encoding": "UTF-8", "text": "import sys\n\n#Expand Python classes path with your app's path\nsys.path.insert(0, \"/var/www/demoflask\")\n\nfrom main import app\n\n#Put logging code (and imports) here ...\n\n#Initialize WSGI app object\napplication = app" } ]
3
xiaoyuezhuu/circuit_maker
https://github.com/xiaoyuezhuu/circuit_maker
7e48fc7e9e9e84d983e59167e6d5b371e0cbfcb1
2120c71db7f7bc81cb262c6d24ec5856d45db8ae
8c4fe7ac7fcd0807587f6be5d531a0378f590ee6
refs/heads/master
2022-04-03T14:40:52.714705
2020-02-07T16:02:58
2020-02-07T16:02:58
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5470842123031616, "alphanum_fraction": 0.5691436529159546, "avg_line_length": 45.469730377197266, "blob_id": "b0c67d04169cfcabc4a12f8a3e48cfca0d435d0f", "content_id": "1b5f59bf9bcdcfad1c8a60539cd2400cb2e5ffec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22258, "license_type": "no_license", "max_line_length": 210, "num_lines": 479, "path": "/circuit_gui.py", "repo_name": "xiaoyuezhuu/circuit_maker", "src_encoding": "UTF-8", "text": "# GUI for a ciruit maker program. It creates a customized workout for you and times it.\n# Written by xiaoyuez, Jan 2020.\n\nimport tkinter as tk\nfrom pygame import mixer\nimport time\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\n\n# load workout dataframe\ndf = pd.read_csv(os.path.join(sys.path[0], \"workouts.csv\"))\n\n# load the beeps\nsingle_beep = os.path.join(sys.path[0], \"single_beep.mp3\")\n\nclass Circuits(object):\n # the welcome window\n def init(self, root):\n # default values\n #self.emphasis = \"Abs\" # primary focus\n #self.sec_emphasis = \"Butt\" # secondary focus\n self.emp_prob = 0.5 # chosen probabiliy for the primary focus\n self.sec_emp_prob = 0.3 # chosen probability for the secondary focus\n #self.intensity = 2 # medium intensity \n #self.weight = 1 # has weights\n #self.band = 1 # has band\n\n TESTING = 0 # for testing \n if TESTING == 1:\n self.circ_val = 4\n self.set_val = 4\n self.repeat_val = 1\n self.ht_val = 4\n self.lt_val = 2\n self.between_val = 2\n self.warmup_val = 2\n self.cool_val = 2\n else:\n self.circ_val = 4\n self.set_val = 4\n self.repeat_val = 2\n self.ht_val = 50\n self.lt_val = 10\n self.between_val = 45\n self.warmup_val = 60\n self.cool_val = 60\n\n self.frame = tk.Frame(root)\n self.frame.pack()\n\n bruh = tk.Label(self.frame, text = \" \")\n bruh.grid(row = 0, column = 1, pady = 8)\n\n bruh2 = tk.Label(self.frame, text = \" \")\n bruh2.grid(row = 1, column = 1, pady = 8)\n\n bruh3 = tk.Label(self.frame, text = \" \")\n bruh3.grid(row = 2, column = 1, pady = 8)\n\n title = tk.Label(self.frame, text = \"Circuit Maker \", font = (\"Avenir\", 50, 'bold'))\n title.grid(row = 3, column = 1, pady = 8)\n\n start_button = tk.Button(self.frame, text = 'Start', font = (\"Avenir\", 20), \n justify = 'center', command = self.window_1)\n start_button.grid(row = 4, column = 1, pady = 8)\n \n # the main circuit maker program\n\n def circuit_maker(self, num_circ, num_sets, intensity, \n emphasis, sec_emphasis, emp_prob, sec_emp_prob):\n \n # determine body parts and their chosen probability \n body_parts = ['Arms','Legs','Butt','Abs','Back']\n emp_inx = [i for i in range(len(body_parts)) if body_parts[i] == emphasis][0]\n sec_emp_inx = [i for i in range(len(body_parts)) if body_parts[i] == sec_emphasis][0]\n if emp_inx == sec_emp_inx: # just in case \n others_prob = (1 - emp_prob - sec_emp_prob) / (len(body_parts) - 1)\n prob_list = np.repeat(others_prob, len(body_parts))\n prob_list[emp_inx] = emp_prob + sec_emp_prob\n else:\n others_prob = (1 - emp_prob - sec_emp_prob) / (len(body_parts) - 2)\n prob_list = np.repeat(others_prob, len(body_parts))\n prob_list[emp_inx] = emp_prob\n prob_list[sec_emp_inx] = sec_emp_prob\n\n # choose workouts based on criteria without replacements\n circuits = pd.DataFrame()\n for i in range(num_circ * num_sets):\n chosen_type = np.random.choice(body_parts, p = prob_list)\n if i % num_sets == 0: # if this is a new circuit, start with cardio\n this_workout = df[(df.Type == 'Cardio') & (df.chosen == 0)].sample()\n df.chosen[this_workout.index] = 1 # tag this workout\n circuits = circuits.append(this_workout)\n else: \n try:\n if self.weight and self.band:\n this_workout = df[(df[chosen_type] == 1) & (df.Intensity <= intensity) & (df.chosen == 0)].sample()\n elif self.weight and not self.band:\n this_workout = df[(df[chosen_type] == 1) & (df.Intensity <= intensity) & (df.Band == 0) & (df.chosen == 0)].sample()\n elif not self.weight and self.band:\n this_workout = df[(df[chosen_type] == 1) & (df.Intensity <= intensity) & (df.Weight == 0) & (df.chosen == 0)].sample()\n else: # when there is no weight nor band, intensity constraint can be relaxed\n this_workout = df[(df[chosen_type] == 1) & (df.Band == 0) & (df.Weight == 0) & (df.chosen == 0)].sample()\n df.chosen[this_workout.index] = 1\n circuits = circuits.append(this_workout)\n except:\n print('Not enough non-repeating workouts')\n df.chosen = 0\n circuits = circuits.reset_index(drop = True)\n return circuits\n\n # number of circuits & sets window\n def window_1(self):\n self.frame.destroy()\n self.frame = tk.Frame(root)\n self.frame.pack()\n\n title = tk.Label(self.frame, text = \"Create Your Own Circuit Programs \", font = (\"Avenir\", 40, 'bold'))\n title.grid(row = 0, columnspan = 9, pady = 8)\n\n circ_response = tk.Label(self.frame, text = \"Number of circuits:\", font = (\"Avenir\", 20), justify = 'left')\n circ_response.grid(row = 1, column = 0, padx = 20, pady = 5, sticky = tk.W)\n\n set_response = tk.Label(self.frame, text = \"Number of sets:\", font = (\"Avenir\", 20), justify = 'left')\n set_response.grid(row = 2, column = 0, padx = 20, pady = 5, sticky = tk.W)\n\n repeat_response = tk.Label(self.frame, text = \"Number of repeats:\", font = (\"Avenir\", 20), justify = 'left')\n repeat_response.grid(row = 3, column = 0, padx = 20, pady = 5, sticky = tk.W) \n\n self.circ = tk.StringVar(value = self.circ_val)\n circ_proposal = tk.Entry(self.frame, font = (\"Avenir\", 20), textvariable = self.circ, width = 5)\n circ_proposal.grid(row = 1, column = 1, padx = 5, pady = 5, sticky = tk.W)\n\n self.set = tk.StringVar(value = self.set_val)\n set_proposal = tk.Entry(self.frame, font = (\"Avenir\", 20), textvariable = self.set, width = 5)\n set_proposal.grid(row = 2, column = 1, padx = 5, pady = 5, sticky = tk.W)\n\n self.repeat = tk.StringVar(value = self.repeat_val)\n repeat_proposal = tk.Entry(self.frame, font = (\"Avenir\", 20), textvariable = self.repeat, width = 5)\n repeat_proposal.grid(row = 3, column = 1, padx = 5, pady = 5, sticky = tk.W)\n\n emp_response = tk.Label(self.frame, text = \"Primary focus:\", font = (\"Avenir\", 20), justify = 'left')\n emp_response.grid(row = 4, column = 0, padx = 20, pady = 5, sticky = tk.W)\n \n self.v1 = tk.IntVar(None)\n radio_values = {\"Arms\": 1,\"Legs\": 2,\"Butt\": 3,\"Abs\": 4, \"Back\": 5}\n for (text, value) in radio_values.items(): \n tk.Radiobutton(self.frame, text = text, value = value, variable = self.v1, command = self.emp_chosen, font = (\"Avenir\", 20)).grid(row = 4, column = int(value), padx = 5, pady = 5, sticky = tk.W)\n\n sec_emp_response = tk.Label(self.frame, text = \"Secondary focus:\", font = (\"Avenir\", 20), justify = 'left')\n sec_emp_response.grid(row = 5, column = 0, padx = 20, pady = 5, sticky = tk.W)\n \n self.v7 = tk.IntVar(None)\n radio_values = {\"Arms\": 1,\"Legs\": 2,\"Butt\": 3,\"Abs\": 4, \"Back\": 5}\n for (text, value) in radio_values.items(): \n tk.Radiobutton(self.frame, text = text, value = value, variable = self.v7, command = self.sec_emp_chosen, font = (\"Avenir\", 20)).grid(row = 5, column = int(value), padx = 5, pady = 5, sticky = tk.W)\n\n intensity_response = tk.Label(self.frame, text = \"Training intensity:\", font = (\"Avenir\", 20), justify = 'left')\n intensity_response.grid(row = 6, column = 0, padx = 20, pady = 5, sticky = tk.W)\n\n self.v2 = tk.IntVar(None)\n radio_values = {\"Medium\": 1,\"High\": 2}\n for (text, value) in radio_values.items(): \n tk.Radiobutton(self.frame, text = text, value = value, variable = self.v2, command = self.int_chosen, font = (\"Avenir\", 20)).grid(row = 6, column = int(value), padx = 5, pady = 5, sticky = tk.W)\n\n weight_response = tk.Label(self.frame, text = \"Weights?\", font = (\"Avenir\", 20), justify = 'left')\n weight_response.grid(row = 7, column = 0, padx = 20, pady = 5, sticky = tk.W)\n\n band_response = tk.Label(self.frame, text = \"Resistance band?\", font = (\"Avenir\", 20), justify = 'left')\n band_response.grid(row = 8, column = 0, padx = 20, pady = 5, sticky = tk.W)\n\n self.v3 = tk.IntVar(None)\n yes_no = {\"Yes\": 1, \"No\": 2}\n for (text, value) in yes_no.items():\n tk.Radiobutton(self.frame, text = text, value = value, variable = self.v3, command = self.weight_chosen, font = (\"Avenir\", 20)).grid(row = 7, column = int(value), padx = 5, pady = 5, sticky = tk.W)\n\n self.v4 = tk.IntVar(None)\n for (text, value) in yes_no.items():\n tk.Radiobutton(self.frame, text = text, value = value, variable = self.v4, command = self.band_chosen, font = (\"Avenir\", 20)).grid(row = 8, column = int(value), padx = 5, pady = 5, sticky = tk.W)\n\n next_button = tk.Button(self.frame, text = 'Next', font = (\"Avenir\", 20), justify = 'center', command = self.circuit_wrapper)\n next_button.grid(row = 9, column = 7)\n\n # primary focus button commands\n def emp_chosen(self):\n body_parts = ['Arms','Legs','Butt','Abs','Back']\n self.emphasis = body_parts[self.v1.get() - 1]\n\n # secondary focus button commands\n def sec_emp_chosen(self):\n body_parts = ['Arms','Legs','Butt','Abs','Back']\n self.sec_emphasis = body_parts[self.v7.get() - 1]\n\n # training intensity button commands\n def int_chosen(self):\n intensities = ['Medium', 'High']\n self.intensity = intensities[self.v2.get() - 1]\n\n # weight yes-no radio button commands\n def weight_chosen(self):\n if self.v3.get() == 2:\n self.weight = 0\n else:\n self.weight = 1\n\n # band yes-no radio button commands\n def band_chosen(self):\n if self.v4.get() == 2:\n self.band = 0\n else:\n self.band = 1\n\n # the timer window \n def window_2(self):\n self.frame.destroy()\n self.frame = tk.Frame(root)\n self.frame.pack()\n \n title = tk.Label(self.frame, text = \"Create Your HIIT Timer (seconds)\", font = (\"Avenir\", 40, 'bold'))\n title.grid(row=0, columnspan=9, pady=8)\n\n ht_response = tk.Label(self.frame, text = \"High Intensity:\", font = (\"Avenir\", 20), justify = 'left')\n ht_response.grid(row = 1, column = 1, pady = 5, padx = 20, sticky = tk.W)\n\n lt_response = tk.Label(self.frame, text = \"Low Intensity:\", font = (\"Avenir\", 20), justify = 'left')\n lt_response.grid(row = 2, column = 1, pady = 5, padx = 20, sticky = tk.W)\n\n between_response = tk.Label(self.frame, text = \"Between Circuits:\", font = (\"Avenir\", 20), justify = 'left')\n between_response.grid(row = 3, column = 1, pady = 5, padx = 20, sticky = tk.W)\n\n warmup_response = tk.Label(self.frame, text = \"Warmup:\", font = (\"Avenir\", 20), justify = 'left')\n warmup_response.grid(row = 4, column = 1, pady = 5, padx = 20, sticky = tk.W)\n\n cool_response = tk.Label(self.frame, text = \"Cool Down:\", font = (\"Avenir\", 20), justify = 'left')\n cool_response.grid(row = 5, column = 1, pady = 5, padx = 20, sticky = tk.W)\n\n self.ht = tk.StringVar(root, value = self.ht_val)\n ht_proposal = tk.Entry(self.frame, font = (\"Avenir\", 20), textvariable = self.ht, width = 5)\n ht_proposal.grid(row = 1, column = 2, pady = 5, padx = 5, sticky = tk.W)\n\n self.lt = tk.StringVar(root, value = self.lt_val)\n lt_proposal = tk.Entry(self.frame, font = (\"Avenir\", 20), textvariable = self.lt, width = 5)\n lt_proposal.grid(row = 2, column = 2, pady = 5, padx = 5, sticky = tk.W)\n\n self.between = tk.StringVar(root, value = self.between_val)\n between_proposal = tk.Entry(self.frame, font = (\"Avenir\", 20), textvariable = self.between, width = 5)\n between_proposal.grid(row = 3, column = 2, pady = 5, padx = 5, sticky = tk.W)\n\n self.warmup = tk.StringVar(root, value = self.warmup_val)\n warmup_proposal = tk.Entry(self.frame, font = (\"Avenir\",20), textvariable = self.warmup, width = 5)\n warmup_proposal.grid(row = 4, column = 2, pady = 5, padx = 5, sticky = tk.W)\n\n self.cool = tk.StringVar(root, value = self.cool_val)\n cool_proposal = tk.Entry(self.frame, font = (\"Avenir\", 20), textvariable = self.cool, width = 5)\n cool_proposal.grid(row = 5, column = 2, pady = 5, padx = 5, sticky = tk.W)\n\n next_button = tk.Button(self.frame, text = 'Ready?', font = (\"Avenir\", 20), justify = 'center', command = self.countdown_init)\n next_button.grid(row = 6, column = 3)\n\n prev_button = tk.Button(self.frame, text = 'Previous', font = (\"Avenir\", 20), justify = 'center', command = self.window_1)\n prev_button.grid(row = 6, column = 0)\n \n # pass the entry info to the main circuit maker function\n def circuit_wrapper(self):\n today_circuit = self.circuit_maker(\n num_circ = int(self.circ.get()), \n num_sets = int(self.set.get()),\n emphasis = self.emphasis,\n sec_emphasis = self.sec_emphasis, \n intensity = self.v2.get(),\n emp_prob = self.emp_prob,\n sec_emp_prob = self.sec_emp_prob)\n # if not enough non-repeating workouts\n if today_circuit.shape[0] < int(self.circ.get()) * int(self.set.get()):\n self.error_popup()\n else:\n self.workout_list = today_circuit.Name\n self.workout_iter = 0\n self.circuit_iter = 1\n self.repeat_iter = 1\n self.window_2()\n\n def play_single_beep(self):\n mixer.init(44100)\n mixer.music.load(single_beep)\n mixer.music.play()\n time.sleep(0.7)\n mixer.music.stop()\n \n def error_popup(self):\n self.window_1()\n popup = tk.Toplevel()\n popup.geometry(\"350x100\")\n popup.wm_title(\"Error Message\")\n\n message = tk.Label(popup, text = \"Not enough non-repeating workouts.\", font = (\"Avenir\", 20))\n message.grid(row = 0, sticky = tk.W) \n\n message2 = tk.Label(popup, text = \"Please select again.\", font = (\"Avenir\", 20))\n message2.grid(row = 1, sticky = tk.W) \n\n b = tk.Button(popup, text = \"Okay\", command = popup.destroy)\n b.grid(row = 2,sticky = tk.E)\n \n # initialize the countdown from warmup\n def countdown_init(self):\n self.frame.destroy()\n self.frame = tk.Frame(root)\n self.frame.pack()\n\n self.countdown_title = tk.Label(self.frame, text = \"Warmup\", font = (\"Avenir\", 40, 'bold'), bg = '#ff8c00', bd = 4)\n self.countdown_title.grid(row = 0, column = 1, pady = 8)\n\n self.countdown_label = tk.Label(self.frame, text = \" \", font = (\"Avenir\", 35, 'bold'))\n self.countdown_label.grid(row = 2, column = 1, pady = 8 )\n\n self.countdown_next_label = tk.Label(self.frame, text = \" \", font = (\"Avenir\", 35), fg = \"#A9A9A9\")\n self.countdown_next_label.grid(row = 4, column = 1)\n\n self.countdown_clock = tk.Label(self.frame, text = \" \", font = (\"Avenir\", 200, 'bold'), fg = '#ff8c00')\n self.countdown_clock.grid(row = 3, columnspan = 3 )\n\n self.countdown_circ = tk.Label(self.frame, text = \" \", font = (\"Avenir\", 28, 'bold'))\n self.countdown_circ.grid(row = 0, column = 0, padx = 15, pady = 8 )\n\n self.countdown_repeat = tk.Label(self.frame, text = \" \", font = (\"Avenir\", 28, 'bold'))\n self.countdown_repeat.grid(row = 0, column = 2, padx = 15, pady = 8 )\n\n self.paused = 0\n self.pause_button = tk.Button(self.frame, text = \"Pause\", font = (\"Avenir\", 20, 'bold'), command = self.pause, width = 6)\n self.pause_button.grid(row = 7, column = 2, padx = 15, pady = 8 )\n\n self.play_single_beep()\n self.next_section = 'ht'\n self.countdown(int(self.warmup.get()))\n\n # little command function for the pause button \n def pause(self):\n if self.paused == 1:\n self.paused = 0\n self.pause_button.config(text = \"Pause\")\n elif self.paused == 0:\n self.paused = 1\n self.pause_button.config(text = \"Resume\")\n \n # the main countdown function\n def countdown(self, remaining = None):\n if remaining is not None:\n self.remaining = remaining\n\n # decide where to go once the countdown has finished\n if self.remaining <= 0:\n self.blank_window()\n else:\n minutes = '%02d' % int(self.remaining / 60)\n seconds = '%02d'% (round(self.remaining) % 60)\n clock = \"{}:{}\".format(minutes, seconds)\n self.countdown_clock.configure(text = clock)\n if self.paused == 1: # if pause button is pressed\n self.remaining = self.remaining\n else: \n self.remaining = self.remaining - 1\n root.after(1000, self.countdown)\n\n # to avoid flickering between transitions\n def blank_window(self):\n self.countdown_title.configure(text = \" \", fg = 'white')\n self.countdown_label.configure(text = \" \")\n self.countdown_next_label.configure(text = \" \")\n self.countdown_clock.configure(text = \" \", fg = 'white')\n self.countdown_circ.configure(text = \" \")\n self.countdown_repeat.configure(text = \" \")\n\n if self.next_section == 'ht':\n self.ht_init()\n elif self.next_section == 'lt':\n self.lt_init()\n elif self.next_section == 'between':\n self.between_init()\n elif self.next_section == 'cool':\n self.cool_init()\n else:\n self.last_window()\n\n # high intensity countdown initializer \n def ht_init(self): \n self.countdown_title.configure(text = \"High Intensity\", bg = '#d82525')\n self.countdown_label.configure(text = \"[ {} ]\".format(self.workout_list[self.workout_iter]))\n if self.workout_iter != len(self.workout_list):\n if (self.workout_iter + 1) % int(self.set.get()) == 0: # if the last one of the circ\n self.countdown_next_label.configure(text = \"Last one of the set!\")\n else:\n self.countdown_next_label.configure(text = \"Next: {}\".format(self.workout_list[self.workout_iter + 1]))\n else:\n self.countdown_next_label.configure(text = \" \")\n self.countdown_clock.configure(text = \" \", fg = '#d82525')\n self.countdown_circ.configure(text = \"Circuit: {}/{}\".format(self.circuit_iter, int(self.circ.get())))\n self.countdown_repeat.configure(text = \"Repeat: {}/{}\".format(self.repeat_iter, int(self.repeat.get())))\n\n self.workout_iter += 1\n if self.workout_iter % int(self.set.get()) == 0: # if a circuit is completed\n if self.repeat_iter == int(self.repeat.get()): # if this circuit has been repeated \n self.circuit_iter += 1\n self.repeat_iter = 1 # reset \n if self.workout_iter == len(self.workout_list): # if all the circuits are completed\n self.next_section = 'cool'\n else:\n self.next_section = 'between' # take a break \n else:\n self.repeat_iter += 1\n self.workout_iter = self.workout_iter - int(self.set.get()) # repeat the workout\n self.next_section = 'lt'\n else: # if still in the circuit\n self.next_section = 'lt'\n\n self.play_single_beep()\n self.countdown(remaining = int(self.ht.get()))\n\n # low intensity countdown initializer\n def lt_init(self): \n self.countdown_title.configure(text = \"Low Intensity\", bg = '#399d72')\n self.countdown_label.configure(text = \"[ Quick Rest ]\")\n self.countdown_clock.configure(text = \" \", fg = '#399d72')\n self.countdown_circ.configure(text = \"Circuit: {}/{}\".format(self.circuit_iter, int(self.circ.get())))\n self.countdown_repeat.configure(text = \"Repeat: {}/{}\".format(self.repeat_iter, int(self.repeat.get())))\n \n self.next_section = 'ht'\n self.play_single_beep()\n self.countdown(remaining = int(self.lt.get()))\n\n # mini break countdown initializer\n def between_init(self): \n str1 = \"Good job! You have completed circuit {} / {}\".format(int(self.workout_iter / int(self.set.get())), int(self.circ.get()))\n self.countdown_title.configure(text = \"Mini Break\", bg = '#6d9eeb')\n self.countdown_label.configure(text = str1)\n self.countdown_next_label.configure(text = \" \")\n self.countdown_clock.configure(text = \" \", fg = '#6d9eeb')\n\n self.next_section = 'ht'\n self.play_single_beep()\n self.countdown(remaining = int(self.between.get()))\n\n # cool down countdown initializer\n def cool_init(self): \n str1 = \"Excellent! All circuits completed.\"\n self.countdown_title.configure(text = \"Cool Down\", bg = '#597073')\n self.countdown_label.configure(text = str1)\n self.countdown_next_label.configure(text = \" \")\n self.countdown_clock.configure(text = \" \", fg = '#597073')\n\n self.next_section = 'out'\n self.play_single_beep()\n self.countdown(remaining = int(self.lt.get()))\n \n def last_window(self):\n self.frame.destroy()\n self.frame = tk.Frame(root)\n self.frame.pack()\n\n bruh = tk.Label(self.frame, text = \" \")\n bruh.grid(row = 0, column = 1, pady = 8)\n\n bruh2 = tk.Label(self.frame, text = \" \")\n bruh2.grid(row = 1, column = 1, pady = 8)\n\n bruh3 = tk.Label(self.frame, text = \" \")\n bruh3.grid(row = 2, column = 1, pady = 8)\n\n title = tk.Label(self.frame, text = \"Please exit the program\", font = (\"Avenir\", 45, 'bold'))\n title.grid(row = 3, column = 1, pady = 8)\n\nroot = tk.Tk()\nroot.geometry(\"900x600\")\nroot.title('Circuit Maker')\napp = Circuits()\napp.init(root)\nroot.mainloop()" }, { "alpha_fraction": 0.7495978474617004, "alphanum_fraction": 0.765683650970459, "avg_line_length": 61.16666793823242, "blob_id": "eb81c4e603da9541b34b92639891234d819924f0", "content_id": "e9e4b91927e9c95f05bb42b41da3aeaa11d3c9db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1865, "license_type": "no_license", "max_line_length": 738, "num_lines": 30, "path": "/README.md", "repo_name": "xiaoyuezhuu/circuit_maker", "src_encoding": "UTF-8", "text": "## Circuit Maker\nThis is a toy project that creates customized workout programs for you. It started with my frustration that it is hard to find flexible HIIT (High Intensity Interval Timing) training apps. They either come with a few sets of pre-determined workouts, or do not allow you to create your own timer, or both. I soon realized that each workout has its own unique attributes and can be coded into a data frame. For example, a simple plank works on your abs and arms but not back and legs. It is of medium intensity and it does not require weights. Once you have a table of these workouts, you can select the workouts needed to train certain body parts with your desired training intensity. This is exactly what Circuit Maker is designed to do. \n\n## Requirements\n`python 2.7 `or above\n\n`pandas`\n\n`numpy`\n\n`tkinter`\n\n`pygame`\n\n## How to use it\n1. Once you have cloned it, go to its directory and type `python circuit_gui.py`, it should automatically start.\n\n2. On this window, specify the number of circuits, the number of sets ( n sets = 1 circuit) and the number of repeats ( e.g. 2 means to repeat each circuit twice). And select the body part to train and the desired training intensity.\n![img1](https://github.com/xiaoyuez/circuit_maker/blob/master/images/image1.png)<!-- .element height=\"50%\" width=\"50%\" -->\n\n3. On this window, specifiy the duration (in seconds) for each period in your workout. The default should work for most people.\n![img2|200x200,20%](https://github.com/xiaoyuez/circuit_maker/blob/master/images/image2.png)\n\n4. The program will start. You will see these following windows:\n![img3](https://github.com/xiaoyuez/circuit_maker/blob/master/images/image3.png)\n![img4](https://github.com/xiaoyuez/circuit_maker/blob/master/images/image4.png)\n\n## Future improvements\n1. Enlarge the workout database.\n2. Maybe add a restart button.\n" } ]
2
mr-justice/python-coding-interview-prep
https://github.com/mr-justice/python-coding-interview-prep
d53a1350ad39b60e63729f07bba9b67b7b9d5b5e
3c4dac1d33c26c1269da8e3aedf1cc7767b738cc
4cb6fddc7ea259087b4f66e2a049dde5105bbb79
refs/heads/main
2023-02-25T20:36:33.607308
2021-01-31T19:44:05
2021-01-31T19:44:05
334,531,214
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.4503311216831207, "alphanum_fraction": 0.49558499455451965, "avg_line_length": 29.200000762939453, "blob_id": "80b99703bf185223bbe5854f3b0f7bf69b682bb2", "content_id": "1ec6db34ee291bd541eb9e8d0e667831a5393b50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "permissive", "max_line_length": 69, "num_lines": 30, "path": "/pythonic_built_ins/range_vs_enumerate.py", "repo_name": "mr-justice/python-coding-interview-prep", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\ndef fizz_buzz(numbers):\n '''\n Given a list of integers:\n 1. Replace all integers that are evenly divisble by 3 with \"fizz\"\n 2. Replace all integers divisible by 5 with \"buzz\"\n 3. Replace all integers divisible by both 3 and 5 with \"fizzbuzz\"\n >>> numbers = [45, 22, 14, 65, 97, 72]\n >>> fizz_buzz(numbers)\n >>> numbers\n ['fizzbuzz', 22, 14, 'buzz', 97, 'fizz']\n '''\n\n \"\"\" for i in range(len(numbers)):\n num = numbers[i]\n if num % 3 == 0:\n numbers[i] = \"fizz\"\n if num % 5 == 0:\n numbers[i] = \"buzz\"\n if num % 3 == 0 and num % 5 == 0:\n numbers[i] = \"fizzbuzz\" \"\"\"\n\n for i, num in enumerate(numbers):\n if num % 3 == 0:\n numbers[i] = \"fizz\"\n if num % 5 == 0:\n numbers[i] = \"buzz\"\n if num % 3 == 0 and num % 5 == 0:\n numbers[i] = \"fizzbuzz\"\n" }, { "alpha_fraction": 0.4751381278038025, "alphanum_fraction": 0.4917127192020416, "avg_line_length": 19.22222137451172, "blob_id": "78fc01b78ceec9883cb78029540634267f38e160", "content_id": "7633541b829f6f2943c7a9acd6edbf20d5d64423", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "permissive", "max_line_length": 27, "num_lines": 9, "path": "/pythonic_built_ins/debugging.py", "repo_name": "mr-justice/python-coding-interview-prep", "src_encoding": "UTF-8", "text": "def max(list):\n max_num = -float('inf')\n for num in list:\n breakpoint()\n if num > max_num:\n max_num = num\n return max_num\n\nprint(max([-1, -2, -4]))" }, { "alpha_fraction": 0.5139999985694885, "alphanum_fraction": 0.515999972820282, "avg_line_length": 27.571428298950195, "blob_id": "2b7315d435f59914eb9bdd894a2b52141375c412", "content_id": "f3f760f34710a153b34b0780d0d62b45774fae47", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "permissive", "max_line_length": 52, "num_lines": 35, "path": "/leveraging_core_data_structures/deque.py", "repo_name": "mr-justice/python-coding-interview-prep", "src_encoding": "UTF-8", "text": "\nclass TicketQueue(object):\n def __init__(self):\n self.lst = []\n\n def add_person(self, name):\n \"\"\"\n >>> queue = TickerQueue()\n >>> queue.add_person(\"Jack\")\n Jack has been added to the Queue\n \"\"\"\n self.lst.append(name)\n print(f\"{name} has been added to the queue\")\n\n def service_person(self):\n \"\"\"\n >>> queue = TicketQueue()\n >>> queue.add_person(\"Jack\")\n Jack has been added to the queue\n >>> queue.service_person()\n Jack has been serviced\n \"\"\"\n name = self.lst.pop(0)\n print(f\"{name} has been serviced.\")\n\n def bypass_queue(self, name):\n \"\"\"\n >>> queue = TicketQueue()\n >>> queue.add_person(\"Jack\")\n Jack has been added to the queue\n >>> queue.bypass_queue(\"Jill\")\n Jill has bypassed the queue\n \"\"\"\n # self.lst = [name] + self.lst]\n self.lst.insert(0, name)\n print(f\"{name} has bypassed the queue\")" }, { "alpha_fraction": 0.6037735939025879, "alphanum_fraction": 0.6163522005081177, "avg_line_length": 30.799999237060547, "blob_id": "d945ce95e66888c07d475c0639be34100c9dacf5", "content_id": "44d8dd865a0712bffcfab4f1a99d17dc6f0d291a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "permissive", "max_line_length": 73, "num_lines": 30, "path": "/leveraging_core_data_structures/counter.py", "repo_name": "mr-justice/python-coding-interview-prep", "src_encoding": "UTF-8", "text": "from collections import defaultdict, Counter\n\ndef top_three_letters(string):\n '''\n Given a string find the top three most frequent letters. This method \n should return a list of tuples, where the tuple contains \n the character and count.\n\n >>> top_three_letters(\"abbccc\")\n [('c, 3), ('b', 2), ('a', 1)]\n >>> top_three_letters(\"aabbccd\")\n [('a', 2), ('b, 2), ('c', 2)]\n '''\n\n \"\"\"\n 1) Loop through the string and store the count for each letter.\n 2) Sort the dictionary by the count and find the top three most\n frequent letters.\n 3) Return a formatted list to match the output.\n \"\"\"\n\n # counter = defaultdict(int)\n # for c in string:\n # counter[c] += 1\n # top_three_letters = sorted(\n # counter, key=lambda k: counter[k], reverse=True)[:3]\n # return [(c, counter[c]) for c in top_three_letters]\n\n def top_three_letters_better(string):\n return Counter(string).most_common(3)\n" }, { "alpha_fraction": 0.5049505233764648, "alphanum_fraction": 0.5247524976730347, "avg_line_length": 28.5, "blob_id": "de9e07efabeac3ca1db81adbd9f2d19751cec17e", "content_id": "dfb8b3897eab41c9d77c84d832c6c92a92ead72d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "permissive", "max_line_length": 58, "num_lines": 24, "path": "/leveraging_core_data_structures/sets.py", "repo_name": "mr-justice/python-coding-interview-prep", "src_encoding": "UTF-8", "text": "def count_unique(seen):\n '''\n Count number of unique characters in s\n >>> count_unique(\"aabb\")\n 2\n >>> count_unique(\"abcdef\")\n 6\n '''\n # seen_characters = [] # O(1)\n # # 0 + 1 + 2 + 3 + 4 + ... + n - 1 ~= n^2\n # for c in s: # O(n)\n # if c not in seen_characters: # O(n)\n # seen_characters.append(c) # O(n)\n # return len(seen_characters) # O(n^2)\n\n # seen_characters = set() # O(1)\n # for character in seen: # O(n)\n # if character not in seen_characters: # O(1)\n # seen_characters.add(character) O(1)\n # return len(seen_characters) O(n)\n\n # return len({character for character in seen}) # O(n)\n\n return len(set(seen)) # O(n)" }, { "alpha_fraction": 0.8241758346557617, "alphanum_fraction": 0.8241758346557617, "avg_line_length": 135.5, "blob_id": "05dc2d48bea9b6f20d986983837ea5ce72ef9283", "content_id": "5e06eb4ce628036fbdeebc4276c3e643249a1c21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 273, "license_type": "permissive", "max_line_length": 241, "num_lines": 2, "path": "/README.md", "repo_name": "mr-justice/python-coding-interview-prep", "src_encoding": "UTF-8", "text": "# python-coding-interview-prep\n Utilizing Real Python's \"Ace Your Coding Interview\" learning path I am documenting my preparation for python interview questions. This repository will be organized by the courses and lessons found with the learning path for easy navigation.\n" }, { "alpha_fraction": 0.6597937941551208, "alphanum_fraction": 0.6680412292480469, "avg_line_length": 20.55555534362793, "blob_id": "51bbc6762d32588c45c31810407872429e4a736c", "content_id": "706b84055752d522ad1fca061fc626307c7cf68d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 970, "license_type": "permissive", "max_line_length": 52, "num_lines": 45, "path": "/leveraging_core_data_structures/dictionary.py", "repo_name": "mr-justice/python-coding-interview-prep", "src_encoding": "UTF-8", "text": "from collections import defaultdict\n\nstudent_grades = {\n \"Jack\": [85, 90],\n \"Jill\": [80, 95]\n}\n\ndef get_grades_naive(name):\n if name in student_grades:\n return student_grades[name]\n return []\n\n\ndef get_grades_better(name):\n return student_grades.get(name, [])\n\n\ndef get_grades_with_assignment(name):\n if name not in student_grades:\n student_grades[name] = []\n return student_grades[name]\n\n\ndef get_grades_with_assignment_better(name):\n return student_grades.setdefault(name, [])\n\n\ndef set_grade_naive(name, score):\n if name in student_grades:\n grades = student_grades[name]\n else:\n student_grades[name] = []\n grades = student_grades[name]\n grades.append(score)\n\n\ndef set_grade_better(name, score):\n grades = get_grades_with_assignment_better(name)\n grades.append(score)\n\n\nstudent_grades = defaultdict(list, student_grades)\n\ndef set_grade_best(name, score):\n student_grades[name].append(score)\n" } ]
7
jakobkhansen/CattCommand
https://github.com/jakobkhansen/CattCommand
2d66886ec65cefc040039556fd46ea199d917e73
86a813a4a36625cad1ccf9c74baff26d1edf3124
3dec61f1afb546564e34a765ac3236a4fb17a3ff
refs/heads/master
2022-08-14T08:20:16.924353
2020-05-29T00:30:44
2020-05-29T00:30:44
267,718,402
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.639103889465332, "alphanum_fraction": 0.6501018404960632, "avg_line_length": 22.834951400756836, "blob_id": "b3dd5cac369cecd88dfe1a49f5ee458d6b0e9ab4", "content_id": "c4785c58265eebafcaca5e676b7fd80c0f0c62b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2455, "license_type": "permissive", "max_line_length": 74, "num_lines": 103, "path": "/src/commands.py", "repo_name": "jakobkhansen/CattCommand", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport json\n\n# Commands file\n\n# Values, modified by settings.json\nbasecommand = \"catt\"\nvolume_increment = 10\nrewind_amount = 15\nseek_amount = 15\nsaved_volume = 100\n\n# Loads settings into values from settings.json\ndef load_settings():\n global basecommand, volume_increment, rewind_amount, seek_amount\n filepath = os.path.dirname(__file__) + \"/settings.json\"\n config = json.load(open(filepath, \"r\"))\n\n basecommand = config[\"basecommand\"]\n volume_increment = config[\"volume_increment\"]\n rewind_amount = config[\"rewind_amount\"]\n seek_amount = config[\"seek_amount\"]\n\n# Gets info from catt\ndef get_info():\n return exec_command(\"info\")\n\n# Prints info from catt\ndef print_info():\n print(get_info())\n\n# Toggles play/pause\ndef play_toggle():\n exec_command(\"play_toggle\")\n\n# Turns volume down\ndef volumedown():\n command = \"volumedown {}\".format(volume_increment)\n exec_command(command)\n\n# Turns volume up\ndef volumeup():\n command = \"volumeup {}\".format(volume_increment)\n exec_command(command)\n\n# Rewinds the video\ndef rewind():\n command = \"rewind {}\".format(str(rewind_amount))\n exec_command(command)\n\n# Gets current volume\ndef get_volume():\n info = get_info()\n return float(re.findall(\"^volume_level: (.*)\", info, re.MULTILINE)[0])\n\n# Toggles mute\ndef toggle_mute():\n global saved_volume\n volume_level = get_volume()\n\n if (volume_level > 0):\n saved_volume = volume_level\n exec_command(\"volume 0\")\n\n else:\n new_volume = int(saved_volume*100) & 101\n exec_command(\"volume {}\".format(new_volume))\n\n# Gets current time\ndef get_time():\n info = get_info()\n return float(re.findall(\"^current_time: (.*)\", info, re.MULTILINE)[0])\n\n# Skips ahead\ndef skip():\n current_time = int(get_time())\n new_time = current_time + seek_amount\n command = \"seek {}\".format(new_time)\n exec_command(command)\n\n# Stops catt\ndef stop_stream():\n exec_command(\"stop\")\n\n# Executes a command in shell\ndef exec_command(command):\n full_command = \"{} {}\".format(basecommand, command)\n return os.popen(full_command).read()\n\n# List of commands with bindings.\ncommand_list = {\n \" \": [play_toggle, \"Toggling play\"],\n \"i\": [print_info, None],\n \"m\": [toggle_mute, \"Toggling mute\"],\n\n # Arrow keys\n 68: [rewind, \"Rewind\"],\n 65: [volumeup, \"Volume up\"],\n 66: [volumedown, \"Volume down\"],\n 67: [skip, \"Skip\"],\n \"x\": [stop_stream, \"Goodbye\"]\n}\n" }, { "alpha_fraction": 0.5664961934089661, "alphanum_fraction": 0.571611225605011, "avg_line_length": 19.05128288269043, "blob_id": "7bbf2a83a156b5774d48514cde8e5aada88ec380", "content_id": "89f82668abd4e420a11d5b8b570dc5dbdf29cef3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "permissive", "max_line_length": 74, "num_lines": 39, "path": "/src/main.py", "repo_name": "jakobkhansen/CattCommand", "src_encoding": "UTF-8", "text": "import commands\nimport os\nimport getch\nimport time\n\n\ndef main():\n commands.load_settings()\n input_loop()\n\n\n\ndef input_loop():\n\n user_input = \"\"\n while (user_input != \"q\" and user_input != \"x\"):\n user_input = getch.getch()\n if ord(user_input) == 27:\n getch.getch()\n user_input = ord(getch.getch())\n\n command_info = commands.command_list.get(user_input, [None, None])\n command = command_info[0]\n description = command_info[1]\n\n if (command is not None):\n command()\n\n if (description is not None):\n print(description)\n\n\ndef exec_command(command):\n full_command = \"{} {}\".format(commands.basecommand, command)\n os.system(full_command)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7386363744735718, "alphanum_fraction": 0.7386363744735718, "avg_line_length": 28.16666603088379, "blob_id": "a9c7261a1af2916972f6b9546212ed9ac075605d", "content_id": "c215b9f02934618bf6131bc4f07f12ead3b52c2c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 352, "license_type": "permissive", "max_line_length": 116, "num_lines": 12, "path": "/README.md", "repo_name": "jakobkhansen/CattCommand", "src_encoding": "UTF-8", "text": "# CattCommand\nTerminal UI for Catt, written in Python. WIP.\n\nRun main.py to start (Uses the default chromecast setup in Catt, configurable in commands.py, might implement menu).\n\n### Controls\nArrow up/down: Volume control \nArrow left/right: Rewind/Skip \nSpace: Toggle pause \nm: Toggle mute \nq: Exit (Does not close Catt) \nx: Close Catt and exit \n" } ]
3
guinanseyebrows/pyspctl
https://github.com/guinanseyebrows/pyspctl
f12eeb564de732f000c63cd04f494a56e5a9ed3e
184bf49bc07b58fd0fe530a2996f18b238df1ab0
5133454d738a7874713061f24b1d14d3f0a21a8d
refs/heads/master
2021-05-20T01:19:29.115910
2020-04-01T09:12:50
2020-04-01T09:12:50
252,125,381
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7218044996261597, "alphanum_fraction": 0.7218044996261597, "avg_line_length": 20.66666603088379, "blob_id": "1ce68d98498738acc3efc59b66825626f2aad2de", "content_id": "11c4506ddb15fcece91b6d97cafd6038856eb5ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 133, "license_type": "permissive", "max_line_length": 54, "num_lines": 6, "path": "/README.md", "repo_name": "guinanseyebrows/pyspctl", "src_encoding": "UTF-8", "text": "# pyspctl\n## Control Spotify via Python/DBus\n### Usage\npyspctl *action*\n \nActions: playpause, previous, next, status, nowplaying\n \n" }, { "alpha_fraction": 0.6625258922576904, "alphanum_fraction": 0.6708074808120728, "avg_line_length": 28.5510196685791, "blob_id": "057484a6f3dd213c096ba0b3e27d30e4e58f4bdc", "content_id": "e6e02248fbf7991506fa8bb896e9713875835f7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1449, "license_type": "permissive", "max_line_length": 105, "num_lines": 49, "path": "/pyspctl", "repo_name": "guinanseyebrows/pyspctl", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom dbus import Interface, SessionBus \nfrom argparse import ArgumentParser \n\n\nparser = ArgumentParser(description='Control local Spotify via DBus')\nparser.add_argument('action', nargs=1, choices=['playpause', 'next', 'previous', 'status', 'nowplaying'])\naction = parser.parse_args().action[0]\n\nsession_bus = SessionBus()\n\nspotify_bus = session_bus.get_object(\"org.mpris.MediaPlayer2.spotify\",\n \"/org/mpris/MediaPlayer2\")\n\nspotify_properties = Interface(spotify_bus,\n \"org.freedesktop.DBus.Properties\")\n\nspotify_control = Interface(spotify_bus, dbus_interface='org.mpris.MediaPlayer2.Player')\n\ndef nowPlaying():\n metadata = spotify_properties.Get(\"org.mpris.MediaPlayer2.Player\", \"Metadata\")\n title = str(metadata['xesam:title'])\n title = (title[:32] + '...') if len(title) > 32 else title\n artist = str(metadata['xesam:artist'][0])\n print(artist + \" - \" + title)\n\ndef playbackStatus():\n print(spotify_properties.Get(\"org.mpris.MediaPlayer2.Player\", \"PlaybackStatus\"))\n\ndef togglePlayback():\n spotify_control.PlayPause()\n\ndef nextSong():\n spotify_control.Next()\n\ndef prevSong():\n spotify_control.Previous()\n\nif action == 'nowplaying':\n nowPlaying()\nelif action == 'status':\n playbackStatus()\nelif action == 'playpause':\n togglePlayback()\nelif action == 'previous':\n prevSong()\nelif action == 'next':\n nextSong()\n\n" } ]
2
automation-monkey/Stock-Tracker-App-Test-Framework
https://github.com/automation-monkey/Stock-Tracker-App-Test-Framework
301617ffd5dc7a463edd72019c3289551e9c674d
2d4b7a7c026ac82510bf52e8cdb1973fa8e4e5aa
0a0716125f8876520a97e9ca0797e8b1ea67fe4b
refs/heads/main
2023-04-08T04:22:48.164353
2021-04-18T18:50:54
2021-04-18T18:50:54
359,222,784
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.685915470123291, "alphanum_fraction": 0.6957746744155884, "avg_line_length": 24.81818199157715, "blob_id": "490442331081bb3a74eb4764951ccfbd562477df", "content_id": "d90db423611f9addd009c8a3f3bddd73a42e7428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2840, "license_type": "no_license", "max_line_length": 160, "num_lines": 110, "path": "/README.md", "repo_name": "automation-monkey/Stock-Tracker-App-Test-Framework", "src_encoding": "UTF-8", "text": "# Stock-Tracker-App-tests\n\nThis project contains automated API testing framework for the [Stock Tracker App](https://github.com/automate-digital/stocktracker-py) written in Python3 using \n[pytest](https://docs.pytest.org/).\n\n### Setup & Running tests\n\nSetup Stock Tracker App according to their instructions.\n\n#### Install project dependencies:\n\nTests require Python3.\n \n pip3 install -r requirements.txt\n\nChange directory to `tests`\n\n```$ cd tests```\n\nTo run all the tests simply run\n\n```$ pytest```\n\nTo make it verbose use\n\n```$ pytest -v```\n\nYou can generate a html and xml reports using\n\n```$ pytest --html=html/report.html --junitxml=xml/report.xml```\n\nTo run tests in parallel specify the number of processes (N)\n\n```$ pytest -n 4```\n\nThe basic command which runs all the tests in parallel and generates a report\n\n```$ pytest -v --html=html/report.html --self-contained-html --junitxml=xml/report.xml tests/ -n 4```\n\n# Test Plan \n\n1. Test Scope:\n \n\n - Stock Tracker App and its functionalities:\n - Add/Update holding\n - Remove holding\n - Get Valuation\n - Get Portfolio\n\n\n2. Test Schedule:\n \n\n - Start date: Friday, April 16 18:00\n - Requirement Understanding\n - Test Plan creation\n - Test Cases creation\n - Test Execution in Different Environments\n - QA Sign-off\n - End date: Monday, April 19 18:00\n\n\n3. Test Types:\n \n\n - The overall application will include the following testing types/techniques:\n - Feature > Basic Feature Testing\n - GUI > Basic Validate look and feel of the application\n - Database > Basic Verification of DB interactions (csv file)\n - E2E > Validate flows\n - Business Rule > Validate rules with positive/negative conditions\n - Service Level Testing > Validate web service level features (API`s)\n - Error Handling > Verify Application's Error handling\n \n\n4. Test Environment\n \n\n - Local Environment\n \n5. Test Approach\n\n\n Test levels: Acceptance Testing\n Test types: Happy Path, Functional, E2E, Exploratory, Blackbox\n \n6. Exit Criteria\n \n\n - All features been verified and covered by tests\n - Manual tests have passed\n - No Critical or Blocker defects outstanding.\n - Automation Suite Successfully Passed.\n\n\n7. Open Risks/Issues\n \n\n - Risks: In case of downtime on the 3rd party software the App doesn't handle those cases.\n - Issues: None\n\n\n\n\n\n# Notes on improvements that could be considered to further develop the tests\nSince the app is based on a 3rd party API I suggest:\n- Add tests that check the actual valuation of stocks added to app and comparing it with the portfolio created by the app.\n- Create cases that check how app behaves during downtime, since app is relaying on [alphavantage](https://www.alphavantage.co/) endpoints.\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 16.5, "blob_id": "1dfcf87b13e9b72fe8f1753be382620873b228a5", "content_id": "16f867a59f0b32990924af7780759700602b2da6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 70, "license_type": "no_license", "max_line_length": 19, "num_lines": 4, "path": "/requirements.txt", "repo_name": "automation-monkey/Stock-Tracker-App-Test-Framework", "src_encoding": "UTF-8", "text": "requests==2.25.1\npytest==6.2.3\npytest-xdist==2.2.1\npytest-html==3.1.1\n" }, { "alpha_fraction": 0.6095275282859802, "alphanum_fraction": 0.6157751083374023, "avg_line_length": 40.30644989013672, "blob_id": "cd492565cc6da95007202eb9846c45aa969ed589", "content_id": "9fc9f85d690507e7741b84a5cf59beb3da0e5d70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2561, "license_type": "no_license", "max_line_length": 92, "num_lines": 62, "path": "/utils.py", "repo_name": "automation-monkey/Stock-Tracker-App-Test-Framework", "src_encoding": "UTF-8", "text": "import json\nimport requests\n\n\nclass BaseTest:\n\n BASE_URL = 'http://localhost:8080/api/'\n\n @classmethod\n def _get_request(cls, url=None, headers=None, params=None):\n request_url = '{}'.format(url)\n response = requests.get(url=request_url,\n headers=headers,\n params=params)\n print('Get request sent to {}'.format(request_url))\n print('Request headers {}'.format(headers))\n print('Content of the request {}'.format(response.content))\n print('Status code of the request {}'.format(response.status_code))\n print('*' * 100)\n return response\n\n @classmethod\n def _post_request(cls, url=None, headers=None, cookies=None, data=None):\n request_url = '{}'.format(url)\n response = requests.post(url, headers=headers, cookies=cookies, data=data)\n print('Post request sent to {}'.format(url))\n print('Request headers {}'.format(headers))\n print('Request data {}'.format(data))\n print('Content of the request {}'.format(response.content))\n print('Status code of the request {}'.format(response.status_code))\n print('*' * 100)\n return response\n\n @classmethod\n def _delete_request(cls, url=None, headers=None, cookies=None, data=None):\n request_url = '{}'.format(url)\n response = requests.delete(request_url, headers=headers, cookies=cookies, data=data)\n print('Delete request sent to {}'.format(request_url))\n print('Request headers {}'.format(headers))\n print('Request data {}'.format(data))\n print('Content of the request {}'.format(response.content))\n print('Status code of the request {}'.format(response.status_code))\n print('*' * 100)\n return response\n\n @classmethod\n def _put_request(cls, url=None, headers=None, cookies=None, data=None):\n request_url = '{}'.format(url)\n response = requests.delete(request_url, headers=headers, cookies=cookies, data=data)\n print('Put request sent to {}'.format(request_url))\n print('Request headers {}'.format(headers))\n print('Request data {}'.format(data))\n print('Content of the request {}'.format(response.content))\n print('Status code of the request {}'.format(response.status_code))\n print('*' * 100)\n return response\n\n @classmethod\n def _get_user_portfolio(cls):\n r = cls._get_request(url=cls.BASE_URL+'portfolio')\n portfolio = json.loads(r.content)\n return portfolio\n" }, { "alpha_fraction": 0.6356589198112488, "alphanum_fraction": 0.6465116143226624, "avg_line_length": 41.43421173095703, "blob_id": "f8da3d1bbf375a505feee9a5cc9bf950bdb35a0a", "content_id": "d5b6fda5748a93f4cd5283ef3f51c9c077039c0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3225, "license_type": "no_license", "max_line_length": 109, "num_lines": 76, "path": "/tests/test_stock_tracker_app.py", "repo_name": "automation-monkey/Stock-Tracker-App-Test-Framework", "src_encoding": "UTF-8", "text": "import json\nimport pytest\nfrom utils import BaseTest\n\n\nclass TestStockTrackerApp(BaseTest):\n\n VALUATION = {'valuation': float}\n\n USER_PORTFOLIO_EXPECTED = {'AMZN': 1}\n\n # Initiate portfolio with expected test data\n @pytest.fixture(autouse=True, scope='session')\n def create_user_portfolio(self):\n self._post_request(url='http://localhost:8080/api/holding', data={'ticker': 'AMZN', 'units': 1})\n\n @classmethod\n def setup_class(cls):\n cls.tracker_endpoint_url = BaseTest.BASE_URL\n cls.holding_endpoint = cls.tracker_endpoint_url + 'holding'\n cls.portfolio_endpoint = cls.tracker_endpoint_url + 'portfolio'\n cls.valuation_endpoint = cls.tracker_endpoint_url + 'valuation'\n\n def test_get_portfolio_check_data_type(self):\n portfolio = self._get_user_portfolio()\n # Compare response portfolio to expected result\n assert portfolio == self.USER_PORTFOLIO_EXPECTED\n\n def test_get_valuation_check_type_and_structure(self):\n r = self._get_request(url=self.valuation_endpoint)\n valuation = json.loads(r.content)\n assert r.status_code == 200\n assert valuation['valuation'] > 0\n for key in valuation:\n # Verify returned valuation dict types and structure\n assert isinstance(valuation[key], self.VALUATION.get(key)), '{} key incorrect format'.format(key)\n assert all(key in valuation for key in self.VALUATION), '{} key is missing'.format(key)\n\n def test_add_update_and_remove_holding(self):\n # This test adds the twitter stock to the portfolio,\n # updates and deletes it. Verification is made for the whole flow.\n ticker = 'TWTR'\n\n # Add new ticker\n r_add_ticker = self._post_request(url=self.holding_endpoint, data={'ticker': ticker, 'units': 5})\n assert r_add_ticker.status_code == 201\n\n # Check ticker is created\n user_portfolio = self._get_user_portfolio()\n assert ticker in user_portfolio and user_portfolio[ticker] == 5\n\n # Update ticker value\n r_update_ticker = self._post_request(url=self.holding_endpoint, data={'ticker': ticker, 'units': 6})\n assert r_update_ticker.status_code == 201\n\n # Check ticker is updated\n user_portfolio = self._get_user_portfolio()\n assert ticker in user_portfolio and user_portfolio[ticker] == 6\n\n # # Delete ticker\n r_del_ticker = self._delete_request(url=self.holding_endpoint, data={'ticker': ticker})\n assert r_del_ticker.status_code == 204\n\n # Check ticker is deleted\n user_portfolio = self._get_user_portfolio()\n assert ticker not in user_portfolio\n\n @pytest.mark.parametrize('ticker', ('A A P L', '!@#$%', ' ', 'VOW3.DE', '1234'))\n def test_add_new_stock_using_invalid_tracker(self, ticker):\n r = self._post_request(url=self.holding_endpoint, data={'ticker': ticker, 'units': 1})\n assert r.status_code == 400\n\n @pytest.mark.parametrize('units', ('A', 'a', '@', ' ', '.'))\n def test_add_new_stock_using_invalid_units(self, units):\n r = self._post_request(url=self.holding_endpoint, data={'ticker': 'AAPL', 'units': units})\n assert r.status_code == 400\n" } ]
4
EssamKairy/contset2
https://github.com/EssamKairy/contset2
90ba6b0dd028e89cde6b005392eafb73683a216d
d920cf5b01e5d3af14922e9cbfbe777cb50d666a
7394bcc50396383d4dd41e2b9bd31aaf36273ace
refs/heads/master
2021-04-17T15:11:05.088891
2020-03-23T14:29:44
2020-03-23T14:29:44
249,453,752
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4906832277774811, "alphanum_fraction": 0.5031055808067322, "avg_line_length": 15.199999809265137, "blob_id": "e17673a02df87a3ccc29e8d2c5094e2d60a30322", "content_id": "a2b6417eadcb641fded7eec2ff5c3a29802f86ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/P_A.py", "repo_name": "EssamKairy/contset2", "src_encoding": "UTF-8", "text": "# l = list(input().split())\n\nt = int(input())\nres = []\nfor i in range(t):\n c = int(input())\n res.append(str(1)+ ' ' + str(c-1))\n\nfor s in res:\n print(s)" }, { "alpha_fraction": 0.49259260296821594, "alphanum_fraction": 0.5074074268341064, "avg_line_length": 17, "blob_id": "7ea322ffcc59e10f1ac7bb2556d389c87af0469e", "content_id": "d1417bf629cd4600e66324c1131d22306e30a583", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 270, "license_type": "no_license", "max_line_length": 30, "num_lines": 15, "path": "/A.py", "repo_name": "EssamKairy/contset2", "src_encoding": "UTF-8", "text": "t = int(input())\nl = list(input().split())\nmid = []\nres = []\nl = [int(i) for i in l]\nfor i in range(1, len(l) + 1):\n t = [0] + l[:(i)-1]\n #print(t)\n x = max(t)\n mid.append(x)\nprint(l)\nprint(mid)\nfor i in range(len(l)):\n res.append(l[i]-mid[i])\nprint(res)\n" } ]
2
LoganWolfe/Discord-Server-Bot
https://github.com/LoganWolfe/Discord-Server-Bot
c91fff828d7e427d27448d1b6e59bb0355cc7afc
079868d090d8b0926aa8677c3ce75899b1af5449
b1aae18c6b96dcb2fb9892931abfe0d2ef830df6
refs/heads/main
2023-04-29T08:22:50.915770
2021-05-15T08:13:25
2021-05-15T08:13:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6518987417221069, "alphanum_fraction": 0.6540084481239319, "avg_line_length": 25.38888931274414, "blob_id": "ce9e4d9a826f605af32dc82bc9cce512a3550d6f", "content_id": "7fab395abe747e339491df4d7d2946bd6ad6ed70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 64, "num_lines": 18, "path": "/src/cogs/misc.py", "repo_name": "LoganWolfe/Discord-Server-Bot", "src_encoding": "UTF-8", "text": "import discord\nfrom discord.ext import commands\n\nclass Miscellaneous(commands.Cog):\n def __init__ (self, client):\n self.client = client\n\n\n @commands.command(pass_context=True)\n async def hb(self, ctx):\n \"\"\"Prints a Happy Birthday message. Use: '.hb <name>'\"\"\"\n message = ctx.message.content[3:]\n await ctx.message.delete()\n await ctx.send(f'Happy Birthday{message}')\n\n\ndef setup(client):\n client.add_cog(Miscellaneous(client))" }, { "alpha_fraction": 0.7951807379722595, "alphanum_fraction": 0.7951807379722595, "avg_line_length": 53.66666793823242, "blob_id": "8059ba22928ecf4fab737f8dd82121f5718aee4c", "content_id": "749e8a45bb7344836bf49846939c7af92f53feef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 166, "license_type": "no_license", "max_line_length": 79, "num_lines": 3, "path": "/README.md", "repo_name": "LoganWolfe/Discord-Server-Bot", "src_encoding": "UTF-8", "text": "# Discord-Server-Bot\nA Discord server bot that supports user commands and audio playback features. \nBuilt using Rapptz's Discord.py wrapper and deployed with AWS. \n" }, { "alpha_fraction": 0.6708004474639893, "alphanum_fraction": 0.6753100156784058, "avg_line_length": 25.477611541748047, "blob_id": "2c0beec60f3c187ecfc2baca60192fcff61646a8", "content_id": "c49295b5e4b7530671d74f8d8d30acad569c67eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1780, "license_type": "no_license", "max_line_length": 61, "num_lines": 67, "path": "/src/discordBot.py", "repo_name": "LoganWolfe/Discord-Server-Bot", "src_encoding": "UTF-8", "text": "import asyncio\nimport os\nimport time\n\nimport discord\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = '.')\n\n@client.command()\nasync def load(ctx, extension):\n client.load_extension(f'cogs.{extension}')\n await ctx.send(f'{extension} extension loaded.')\n\n@client.command()\nasync def unload(ctx, extension):\n client.unload_extension(f'cogs.{extension}')\n await ctx.send(f'{extension} extension unloaded.')\n\n@client.command()\nasync def reload(ctx, extension):\n client.unload_extension(f'cogs.{extension}')\n client.load_extension(f'cogs.{extension}')\n await ctx.send(f'{extension} extension reloaded.')\n\n\nfor filename in os.listdir('./src/cogs'):\n if filename.endswith('.py'):\n client.load_extension(f'cogs.{filename[:-3]}')\n\n@client.event\nasync def on_ready():\n print('Bot is ready.')\n\n@client.command()\n@commands.has_guild_permissions()\nasync def clear(ctx, amount = 5):\n if len(ctx.message.content) > 6:\n try:\n amount = int(ctx.message.content[6:])\n except ValueError:\n await ctx.send('Proper format: .clear <integer>')\n\n await ctx.channel.purge(limit = amount + 1)\n await ctx.send(f\"{amount} message(s) cleared.\")\n\n await asyncio.sleep(5)\n await ctx.channel.purge(limit = 1)\n\n@client.command()\nasync def poll(ctx):\n channel = ctx.channel\n\n # Store message here and delete it from channel.\n message = ctx.message\n await ctx.message.delete()\n\n # Ping everyone and post poll.\n await channel.send('@everyone')\n msg = await channel.send(message.content[6:])\n\n # Add reactions to poll.\n await msg.add_reaction('👍')\n await msg.add_reaction('👎')\n\n# Private AUTH code. Do not share with anyone or change.\nclient.run('Removed for git upload...')\n" }, { "alpha_fraction": 0.5770609378814697, "alphanum_fraction": 0.5798771381378174, "avg_line_length": 34.180179595947266, "blob_id": "54c0c8b9b89c323fa1035709205828dde3e4c957", "content_id": "1d2f459659710429727d691fe37ac397506a134b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3906, "license_type": "no_license", "max_line_length": 113, "num_lines": 111, "path": "/src/cogs/voice.py", "repo_name": "LoganWolfe/Discord-Server-Bot", "src_encoding": "UTF-8", "text": "import youtube_dl\nimport discord\nimport os\nfrom discord.ext import commands\nfrom discord.utils import get\n\nclass Voice(commands.Cog):\n def __init__ (self, client):\n self.client = client\n self.voiceClients = {}\n self.players = {}\n\n # Commands\n @commands.command(pass_context=True, aliases=['j'])\n async def join(self, ctx):\n \"\"\"Bot joins current voice channel. Use: '.join'\"\"\"\n voice_channel = ctx.author.voice.channel\n voice_state = get(self.client.voice_clients, guild=ctx.guild)\n\n if voice_state and voice_state.is_connected():\n await voice_state.move_to(voice_channel)\n else:\n voice_state = await voice_channel.connect()\n\n await ctx.send(f'Connected to: {voice_channel}')\n\n #self.voiceClients[ctx.guild.id] = await voice_channel.connect()\n\n @commands.command(pass_context=True, aliases=['l'])\n async def leave(self, ctx):\n \"\"\"Bot leaves current voice channel. Use: '.leave'\"\"\"\n voice_channel = ctx.author.voice.channel\n voice_state = get(self.client.voice_clients, guild=ctx.guild)\n\n if voice_state and voice_state.is_connected():\n await voice_state.disconnect()\n await ctx.send(f'Disconnected from: {voice_channel}')\n else:\n await ctx.send(\"I don't think I'm in a voice channel...\")\n\n @commands.command(pass_context=True, aliases=['p'])\n async def play(self, ctx, url: str):\n \"\"\"W.I.P. -- Bot plays audio of given URL. Use: '.play <url>'\"\"\"\n song_there = os.path.isfile(\"song.mp3\")\n try:\n if song_there:\n os.remove(\"song.mp3\")\n print(\"Removed old song file\")\n except PermissionError:\n print(\"Tried to delete song file, but it's being played.\")\n await ctx.send(\"I can't do that: that music is playing.\")\n return\n\n await ctx.send(\"Preparing your song now.\")\n\n voice_state = get(self.client.voice_clients, guild=ctx.guild)\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print(\"Downloading audio now\\n\")\n ydl.download([url])\n\n global name\n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n name = file\n print(f'Renamed file: {file}\\n')\n os.rename(file, \"song.mp3\")\n\n voice_state.play(discord.FFmpegPCMAudio(\"song.mp3\"), after=lambda e: print('Song has finished playing.'))\n voice_state.source = discord.PCMVolumeTransformer(voice_state.source)\n voice_state.source.volume = .1\n\n #nname = name.rsplit(\"-\", 2)\n #await ctx.send(f'Playing: {nname}')\n print('Playing.\\n')\n\n @commands.command(pass_context=True, aliases=['pa'])\n async def pause(self, ctx):\n voice_state = get(self.client.voice_clients, guild=ctx.guild)\n\n if voice_state and voice_state.is_playing():\n print(\"Music paused\")\n voice_state.pause()\n await ctx.send(\"Music paused\")\n else:\n print(\"Music not playing - failed pause.\")\n await ctx.send(\"Music not playing!\")\n\n @commands.command(pass_context=True, aliases=['r'])\n async def resume(self, ctx):\n voice_state = get(self.client.voice_clients, guild=ctx.guild)\n\n if voice_state and voice_state.is_paused():\n print(\"Music resumed.\")\n voice_state.resume()\n await ctx.send(\"Music resumed.\")\n else:\n print(\"Music not paused.\")\n await ctx.send(\"Music is not paused you dingus.\")\n\ndef setup(client):\n client.add_cog(Voice(client))\n\n" } ]
4
tejastu/Python-Series-1-Print-Statements
https://github.com/tejastu/Python-Series-1-Print-Statements
45a519fb3b2ba4fe237e5b87bed1230a1c1eaf7c
7f12664c4e67b35fe75dbff8a32d0adbf12fb8a2
5f2863a6a403c9fbb23e4df240f2218babea1513
refs/heads/master
2023-02-15T03:48:09.297480
2021-01-15T06:55:10
2021-01-15T06:55:10
327,343,558
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7910447716712952, "alphanum_fraction": 0.7985074520111084, "avg_line_length": 43.66666793823242, "blob_id": "2d768eace3d755a1a78cce469145d5989f40d108", "content_id": "3bb5463e68f9488f6c29e8d7cbaa6790d78636cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 134, "license_type": "no_license", "max_line_length": 97, "num_lines": 3, "path": "/README.md", "repo_name": "tejastu/Python-Series-1-Print-Statements", "src_encoding": "UTF-8", "text": "# Python-Series-1-Print-Statements\n\nThe print() function prints the specified message to the screen, or other standard output device.\n" }, { "alpha_fraction": 0.7421875, "alphanum_fraction": 0.7473958134651184, "avg_line_length": 37.400001525878906, "blob_id": "a5fbfab43c5ba849ea8ef03c845866771265a40d", "content_id": "fd968a293196d6e47a1e61858a8e0e680d7fbe65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 384, "license_type": "no_license", "max_line_length": 82, "num_lines": 10, "path": "/1_hello.py", "repo_name": "tejastu/Python-Series-1-Print-Statements", "src_encoding": "UTF-8", "text": "# Single line print\nprint(\"Hello world\")\n\n\n# Multi line print\nprint('''Lorem ipsum, or lipsum as it is sometimes known,\n is dummy text used in laying out print, graphic or web designs.\n The passage is attributed to an unknown typesetter in the 15th century ,\n who is thought to have scrambled parts of Cicero's De Finibus Bonorum et Malorum\n for use in a type specimen book.''')\n" } ]
2
Quinbit/file_organizer
https://github.com/Quinbit/file_organizer
fc50e5fce138db8f8f2415b7cfcc68447efdcc3c
77cf26934111701eb40bd4f2cd5d7d80d58dc4b2
5d61333b3c489f04a623f2fe4f5e129dd2cab20d
refs/heads/master
2020-03-21T15:59:39.560552
2018-06-28T20:36:39
2018-06-28T20:36:39
138,744,716
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5581279993057251, "alphanum_fraction": 0.5728611946105957, "avg_line_length": 34.2707405090332, "blob_id": "dc8ddffafd00f4b84be4518c15441c66e4bc3f23", "content_id": "8f4a33228d60a4c7edda0fa095fa8fc2a0608320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8077, "license_type": "no_license", "max_line_length": 145, "num_lines": 229, "path": "/build/lib/categorize/other.py", "repo_name": "Quinbit/file_organizer", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QInputDialog, QFileDialog, QLineEdit, QLabel\nfrom PyQt5.QtGui import QIcon\nimport os\nfrom functools import reduce\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget,QVBoxLayout, QMessageBox\nfrom random import random\n\nclass App(QWidget):\n def __init__(self):\n #here we initialize the basic parameters for the gui\n super().__init__()\n os.chdir(\"/\")\n self.prev_text = \"\"\n self.title = 'HTML Highlighter'\n self.col_codes = {}\n self.left = 10\n self.top = 10\n self.width = 640\n self.height = 800\n\n self.initUI()\n\n def initUI(self):\n #We set the basic geometry of the window\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n #Here we set up the button that will allow the user to find their html file of choice\n button = QPushButton('Find File', self)\n button.setToolTip('This button will allow you to select an html file to process')\n button.move(250,300)\n button.clicked.connect(self.on_click)\n\n #This button will allow the user to submit their predetermine color preferences\n col_button = QPushButton('Submit_color', self)\n col_button.move(250,550)\n col_button.clicked.connect(self.col_click)\n\n #This object will describe the drag and drop area for files\n drag_object = CustomLabel('Drag and file file here or press the find file button below\\n \\\n to browse for your html text file', self)\n drag_object.move(130,50)\n\n self.button = button\n self.drag_object = drag_object\n\n #This label will give the user the intructions needed\n instr = QLabel(self)\n instr.setText(\"To manually predefine a highlight color, type in the tag \\nand the desired \\\ncolour code in the two text boxes below\")\n instr.move(130, 350)\n\n #This label will list all of the label colours that the user has specified\n res = QLabel(self)\n res.setText(\"\")\n res.move(130, 600)\n res.resize(300, 200)\n\n self.res = res\n\n #Where the user will enter in the tag\n self.textbox = QLineEdit(self)\n self.textbox.setText(\"Type in html tag without brackettes\")\n self.textbox.move(160, 450)\n self.textbox.resize(280,40)\n\n #Where the user will enter in the colour of the tag\n self.textbox2 = QLineEdit(self)\n self.textbox2.setText(\"Type in color code\")\n self.textbox2.move(160, 500)\n self.textbox2.resize(280,40)\n\n self.show()\n\n #returns the file that the user specified\n def openFileNameDialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self,\"QFileDialog.getOpenFileName()\", \"\",\"All Files (*);;Python Files (*.py)\", options=options)\n if fileName:\n print(fileName)\n return fileName\n else:\n return \"Error\"\n\n #looks for a file to get the html from\n @pyqtSlot()\n def on_click(self):\n print('Looking for file')\n location = self.openFileNameDialog()\n new_location = create_new_html(location, self)\n self.drag_object.setText(\"New html text file saved to: \\n\" + new_location)\n\n #Saves the user's input for tag colour\n @pyqtSlot()\n def col_click(self):\n tag = self.textbox.text().upper()\n col = self.textbox2.text().upper()\n self.prev_text = self.prev_text + \"\\n\" + tag + \" will have colour code \" + col\n print(self.prev_text)\n self.res.setText(self.prev_text)\n self.col_codes[tag] = col\n\n#Describes the hover area\nclass CustomLabel(QLabel):\n def __init__(self, title, parent):\n super().__init__(title, parent)\n self.parent = parent\n self.setAcceptDrops(True)\n self.setGeometry(parent.left // 2, parent.top // 2, parent.width // 1.5, parent.height // 3.0)\n\n def dragEnterEvent(self, e):\n print(\"File has entered the drop area\")\n e.accept()\n\n def dropEvent(self, e):\n self.setText(e.mimeData().text())\n self.move(100, 50)\n location = e.mimeData().text()\n new_location = create_new_html(location, self.parent)\n self.setText(\"New html text file saved to: \\n\" + new_location)\n#Called when a new file is to be created\ndef create_new_html(location, gui_obj):\n location = location[8:]\n\n new_file = get_new_html_location(location)\n\n f = open(location, \"r\")\n\n text = f.read()\n\n content = mod_string(text, gui_obj)\n\n g = open(new_file, \"w\")\n\n g.write(content)\n g.close()\n f.close()\n\n return new_file\n\n#Modifies the input string to give the corrected html string\ndef mod_string(string, gui_obj):\n code = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n tag_stack = [None]\n cur_color = \"\"\n i = 0\n\n #Cycles through all text in the file\n while (i < len(string)):\n new = \"\"\n if string[i] == \"<\":\n x = 1\n\n #Identifies the tag\n while(string[x+i] != \">\"):\n new += string[x+i]\n x += 1\n\n #If the tag is the end tag end_statement will be true\n end_statement = (\"/\" in new )\n new = new.replace(\"/\", \"\").upper()\n\n if (not end_statement):\n #Since break doesn't have any ending tags, it is treated differently\n if new == \"BR\":\n end_statement = True\n\n #We essentially create a stack of all the tags to determine what color should be displayed\n tag_stack.append(new)\n\n #Determines if this tag has been encountered before\n if (gui_obj.col_codes.get(new.upper(), False)):\n string = string[:i] + \"\\\\color[\" + gui_obj.col_codes.get(new) + ']' + string[i:]\n i += len(\"\\\\color[\" + gui_obj.col_codes.get(new) + ']')\n else:\n col = \"\"\n for n in range(6):\n col += code[int(random() * 16)]\n\n gui_obj.col_codes[new.upper()] = col\n string = string[:i] + \"\\\\color[\" + col + ']' + string[i:]\n i += len(\"\\\\color[\" + col + ']')\n\n #If this is the ending tag\n if end_statement:\n tag_stack.remove(tag_stack[-1])\n new_tag = False\n\n if (len(string) > x+i+1):\n test_string = string[x+i+1:]\n test_string = test_string.replace(\"\\n\",\"\")\n test_string = test_string.replace(\" \", \"\")\n #tests to see if there is another tag that will immediately take over the color scheme\n if test_string[0] == \"<\":\n if test_string[1] != '/':\n new_tag = True\n else:\n new_tag = True\n\n #If there isn't any new tags coming up, we rely on the stack for the old tags for our color\n if not new_tag:\n string = string[:i+x+1] + \"\\\\color[\" + gui_obj.col_codes.get(tag_stack[-1]) + ']' + string[1+i+x:]\n i += len(\"\\\\color[\" + gui_obj.col_codes.get(tag_stack[-1]) + ']')\n i += 1\n\n print(gui_obj.col_codes)\n return string\n\n#Gets the string for the location of the modified html file\ndef get_new_html_location(location):\n location = location.split(\"/\")\n location.remove(location[-1])\n location.append(\"output.txt\")\n location = reduce(lambda x,y: x+'/'+y, location)\n\n return location\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5814407467842102, "alphanum_fraction": 0.5969955325126648, "avg_line_length": 34.224998474121094, "blob_id": "80b13804bebf0b15f49024df4e7e98ecc561bf8b", "content_id": "f3ed1bc2da4b6e1e6cab16d5d0be61c9e9935bd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16908, "license_type": "no_license", "max_line_length": 202, "num_lines": 480, "path": "/categorize/main.py", "repo_name": "Quinbit/file_organizer", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QInputDialog, QFileDialog, QLineEdit, QLabel, QComboBox, QMessageBox\nfrom PyQt5.QtGui import QIcon\nimport os\nfrom functools import reduce\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget,QVBoxLayout\nfrom random import random\n\ndef parse_for_hidden(files):\n deleted = 0\n for i in range(len(files)):\n if len(files[i-deleted]) < 1:\n del files[i-deleted]\n deleted += 1\n elif files[i-deleted][0]=='.':\n del files[i-deleted]\n deleted += 1\n\n return files\n\nclass App(QWidget):\n def __init__(self):\n #here we initialize the basic parameters for the gui\n super().__init__()\n self.prev_text = \"\"\n self.title = 'Categorizer'\n self.col_codes = {}\n self.left = 10\n self.top = 10\n self.width = 1200\n self.height = 800\n self.cur_dir = os.getcwd()\n self.title_font = QtGui.QFont(\"Times\", 15, QtGui.QFont.Bold)\n self.base_dir = \"\"\n\n self.initUI()\n\n def initUI(self):\n #We set the basic geometry of the window\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n #Here we set up the button that will allow the user to find their html file of choice\n button = QPushButton('Find File', self)\n button.setToolTip('This button will allow you to select an html file to process')\n button.move(150,700)\n button.clicked.connect(self.on_click)\n\n #This button will allow the user to submit their predetermine color preferences\n\n drag_object = CustomLabel('', self)\n drag_object.move(75,150)\n\n self.button = button\n self.drag_object = drag_object\n\n self.cur_file = \"\"\n self.selected_file = QLabel(self)\n self.selected_file.setText(\"The selected file is: \" + self.cur_file)\n self.selected_file.setFont(self.title_font)\n self.selected_file.move(500, 100)\n\n self.directory_label = QLabel(self)\n self.directory_label.setText(\"Root Directory: \" + self.base_dir)\n self.directory_label.move(500, 40)\n\n self.change_dir = QPushButton(\"Change Directory\", self)\n self.change_dir.setToolTip(\"Click to change root directory\")\n self.change_dir.move(900 + len(self.directory_label.text()), 40)\n self.change_dir.clicked.connect(self.change_dir_function)\n\n self.combo = QComboBox(self)\n self.combo.move(500, 200)\n self.combo.setToolTip(\"Choose which folder to add the designated file to\")\n\n self.combo_hint = QLabel(self)\n self.combo_hint.setText(\"Accessible Folders\")\n self.combo_hint.move(500, 170)\n\n self.added_folders = AddedFiles(\"\", self)\n\n self.add_folder = QPushButton(\"Add-->\", self)\n self.add_folder.setToolTip(\"Press to add the folder to the collection of folders\")\n self.add_folder.move(650, 200)\n self.add_folder.clicked.connect(self.add_folder_func)\n\n self.added_folders_label = QLabel(self)\n self.added_folders_label.setText(\"Folders to add file to\")\n self.added_folders_label.move(800, 170)\n\n self.add_simlinks = QPushButton(\"Add Simlinks\", self)\n self.add_simlinks.setToolTip(\"Press to add the file to the given directories\")\n self.add_simlinks.move(500, 700)\n self.add_simlinks.clicked.connect(self.add_simlinks_function)\n\n l = QLabel(self)\n l.setText(\"Files/Directories\")\n l.move(100,100)\n\n self.show()\n self.getBaseDir()\n\n def add_simlinks_function(self):\n if self.cur_file == \"\":\n QMessageBox.question(self, \"Error\",'No file selected', QMessageBox.Ok, QMessageBox.Ok)\n return\n\n folders = []\n for i in range(len(self.added_folders.elem)):\n if self.added_folders.elem[i].text() != '':\n folders.append(self.added_folders.elem[i].text())\n\n file = self.cur_file\n file_base = file.split(\"/\")[-1]\n\n os.system(\"mv \" + file + \" \" + self.base_dir + \"/.hidden\")\n\n for folder in folders:\n print(\"ln -s \" + self.base_dir + \"/.hidden/\" + file_base + \" \" + self.base_dir + folder)\n os.system(\"ln -s \" + self.base_dir + \"/.hidden/\" + file_base + \" \" + self.base_dir + folder)\n\n QMessageBox.question(self, \"Completed\",'The file has been successfully linked. You can find the original file in the hidden folder ' + self.base_dir + \"/.hidden\", QMessageBox.Ok, QMessageBox.Ok)\n\n self.drag_object.update_dir()\n self.update_combo_box()\n self.cur_file = \"\"\n\n @pyqtSlot()\n def add_folder_func(self):\n self.added_folders.add_element(self.combo.currentText())\n\n @pyqtSlot()\n def on_click(self):\n print('Looking for file')\n location = self.openFileNameDialog()\n #do stuff\n\n def openFileNameDialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self,\"QFileDialog.getOpenFileName()\", \"\",\"All Files (*);;Python Files (*.py)\", options=options)\n if fileName:\n print(fileName)\n\n self.cur_file = fileName\n self.selected_file.setText(\"The selected file is: \" + self.cur_file)\n self.selected_file.adjustSize()\n\n return fileName\n else:\n return \"Error\"\n\n def getBaseDir(self):\n if not os.path.isdir(os.getcwd() + \"/.hidden\"):\n self.buildDirPopup()\n else:\n self.base_dir = os.getcwd()\n self.directory_label.setText(\"Root Directory: \" + self.base_dir)\n self.directory_label.adjustSize()\n self.drag_object.update_dir()\n self.change_dir.move(900 + len(self.directory_label.text()), 40)\n self.update_combo_box()\n\n def buildDirPopup(self):\n self.dirPopup = DirectoryPopup(self)\n\n def change_dir_function(self):\n fileName = str(QFileDialog.getExistingDirectory(self,\"QFileDialog.getOpenFileName()\"))\n\n if fileName:\n print(fileName)\n\n if os.path.isdir(fileName):\n os.chdir(fileName)\n self.getBaseDir()\n\n def update_combo_box(self):\n self.combo.clear()\n if self.base_dir != \"\":\n files = self.return_directories(self.base_dir, \"\")\n print(files)\n self.combo.addItems(files)\n self.adjustSize()\n\n def return_directories(self, dir, prefix):\n files = parse_for_hidden(os.listdir(dir))\n deleted = 0\n\n for i in range(len(files)):\n if os.path.isdir(dir+\"/\"+files[i-deleted]):\n files += self.return_directories(dir+\"/\"+files[i-deleted], files[i-deleted])\n files[i - deleted] = prefix + \"/\" + files[i - deleted]\n else:\n del files[i - deleted]\n deleted += 1\n\n return files\n\n\nclass CustomLabel(QLabel):\n def __init__(self, title, parent):\n super().__init__(title, parent)\n self.parent = parent\n self.max_length = 15\n self.setAcceptDrops(True)\n self.setGeometry(20, 20, 300, 500)\n self.setStyleSheet(\"border:1px solid rgb(0, 0, 0);\")\n files = parse_for_hidden(os.listdir(os.getcwd()))\n self.elem = []\n\n if len(files) < self.max_length:\n files += [\"\"]*(self.max_length-len(files))\n\n for i in range(len(files)):\n self.elem.append(ListElement(files[i], self, (30, 30+30*i)))\n\n #add back button\n self.back = QPushButton(\" <--\", self)\n self.back.setToolTip('Go up a directory')\n self.back.move(0,0)\n self.back.clicked.connect(self.back_function)\n\n self.add_dir = QPushButton(\"Add Folder\", self)\n self.add_dir.setToolTip('Clock to add a new directory in the shown folder')\n self.add_dir.move(236, 0)\n self.add_dir.clicked.connect(self.create_directory)\n\n self.new_directory = None\n\n @pyqtSlot()\n def back_function(self):\n os.chdir(\"..\")\n self.parent.cur_dir = os.getcwd()\n self.update_dir()\n\n def dragEnterEvent(self, e):\n print(\"File has entered the drop area\")\n e.accept()\n\n def dropEvent(self, e):\n #e.mimeData().text()\n fileName = e.mimeData().text()\n self.parent.cur_file = fileName\n self.parent.selected_file.setText(\"The selected file is: \" + self.parent.cur_file)\n self.parent.selected_file.adjustSize()\n\n def update_dir(self):\n files = parse_for_hidden(os.listdir(os.getcwd()))\n\n if len(files) < self.max_length:\n files += [\"\"]*(self.max_length-len(files))\n\n for i in range(len(self.elem)):\n self.elem[i].setText(files[i])\n self.elem[i].adjustSize()\n\n def create_directory(self):\n self.new_directory = CreateDirectoryPopup(self)\n\nclass ListElement(QPushButton):\n def __init__(self, title, parent, pos):\n super().__init__(title, parent)\n self.parent = parent\n self.setAutoFillBackground(True)\n self.move(pos[0], pos[1])\n self.setStyleSheet(\"border:0px solid rgb(0, 0, 0);\")\n self.clicked.connect(self.click)\n\n def enterEvent(self, QEvent):\n if self.text() != \"\":\n self.setStyleSheet(\"color: rgb(230, 230, 230); background-color: rgb(0,0,0); border:0px solid rgb(0, 0, 0);\")\n\n def leaveEvent(self, QEvent):\n if self.text() != \"\":\n self.setStyleSheet(\"color: rgb(0,0,0); background-color: rgb(230,230,230); border:0px solid rgb(0, 0, 0);\")\n\n @pyqtSlot()\n def click(self):\n print(self.parent.parent.cur_file)\n\n if not os.path.isdir(self.parent.parent.cur_dir + \"/\" + self.text()):\n self.parent.parent.cur_file = self.parent.parent.cur_dir + \"/\" + self.text()\n self.parent.parent.selected_file.setText(\"The selected file is: \" + self.parent.parent.cur_file)\n print(\"The selected file is: \" + self.parent.parent.cur_file)\n self.parent.parent.selected_file.adjustSize()\n else:\n os.chdir(self.parent.parent.cur_dir + \"/\" + self.text())\n self.parent.parent.cur_dir = self.parent.parent.cur_dir + \"/\" + self.text()\n self.parent.update_dir()\n\n\nclass DirectoryPopup(QWidget):\n def __init__(self, parent):\n super().__init__()\n self.title = 'Select Root Directory'\n self.left = 100\n self.top = 100\n self.width = 600\n self.height = 400\n self.parent = parent\n self.directory = os.getcwd()\n\n self.initUI()\n\n self.show()\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n #set into label\n self.intro = QLabel(self)\n self.intro.setText(\"This directory was not found to be a root directory\\n\\n please select a directory to use as the root\")\n self.intro.move(self.width//4,10)\n\n self.confirm = QPushButton(\"Confirm\", self)\n self.confirm.setToolTip(\"Click to confirm your root directory. By default it will be your current directory\")\n self.confirm.move(self.width//(3.5), self.height//(4/3))\n self.confirm.clicked.connect(self.confirm_func)\n\n self.find = QPushButton(\"Find\", self)\n self.find.setToolTip(\"Click to find a new directory to select as your root\")\n self.find.move(self.width//(1.8), self.height//(4/3))\n self.find.clicked.connect(self.choose_dir)\n\n self.chosen_dir = QLabel(self)\n self.chosen_dir.setText(\"Selected Directory: \" + self.directory)\n self.chosen_dir.move(self.width // 2 - 6*len(\"Selected Directory: \" + self.directory)//2, self.height//2)\n\n def confirm_func(self, event):\n self.parent.base_dir = self.directory\n self.parent.cur_dir = self.directory\n os.chdir(self.directory)\n self.parent.drag_object.update_dir()\n self.parent.directory_label.setText(\"Root Directory: \" + self.parent.base_dir)\n self.parent.directory_label.adjustSize()\n self.parent.update_combo_box()\n if not os.path.isdir(self.directory + \"/.hidden\"):\n os.system(\"mkdir \" + self.directory + \"/.hidden\")\n self.close()\n\n def choose_dir(self, event):\n fileName = str(QFileDialog.getExistingDirectory(self,\"QFileDialog.getOpenFileName()\"))\n\n if fileName:\n print(fileName)\n\n if os.path.isdir(fileName):\n self.directory = fileName\n self.chosen_dir.setText(\"Selected Directory: \" + self.directory)\n self.chosen_dir.adjustSize()\n\nclass AddedFiles(QLabel):\n def __init__(self, title, parent):\n super().__init__(title, parent)\n self.parent = parent\n self.max_length = 20\n self.width = 300\n self.height = 600\n self.top = 200\n self.left = 800\n self.elem = []\n self.removes = []\n self.active = [False]*self.max_length\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.setStyleSheet(\"border:0px solid rgb(0, 0, 0);\")\n self.initialize_elements()\n\n def initialize_elements(self):\n for i in range(self.max_length):\n self.elem.append(QLabel(\"\", self))\n self.elem[i].move(0, i*30)\n self.elem[i].setStyleSheet(\"border:0px solid rgb(0, 0, 0);\")\n\n for i in range(self.max_length):\n self.removes.append(RemoveButton(\"X\", self, i))\n\n self.update_removes()\n\n def add_element(self, text):\n all_elements = []\n for i in range(self.max_length):\n all_elements.append(self.elem[i].text())\n\n if text in all_elements:\n QMessageBox.question(self, \"Error\",'This folder has already been added', QMessageBox.Ok, QMessageBox.Ok)\n return\n\n for i in range(self.max_length):\n if self.elem[i].text() == \"\":\n self.elem[i].setText(text)\n self.elem[i].adjustSize()\n self.update_removes()\n break\n elif i == (self.max_length-1):\n QMessageBox.question(self, \"Error\", \"We can't add the file to any more directories\", QMessageBox.Ok, QMessageBox.Ok)\n\n def remove_elem(self, index):\n self.elem[index].setText(\"\")\n for i in range(index, self.max_length-1):\n self.elem[i].setText(self.elem[i+1].text())\n\n self.elem[-1].setText(\"\")\n self.update_removes()\n\n def update_removes(self):\n for i in range(self.max_length):\n if self.elem[i].text() == \"\":\n self.removes[i].setText(\"\")\n self.removes[i].setStyleSheet(\"border:0px solid rgb(0, 0, 0);\")\n self.removes[i].adjustSize()\n else:\n self.removes[i].setText(\"X\")\n self.removes[i].setStyleSheet(\"border:1px solid rgb(0, 0, 0);\")\n self.removes[i].adjustSize()\n\nclass RemoveButton(QPushButton):\n def __init__(self, title, parent, index):\n super().__init__(title, parent)\n self.parent = parent\n self.index = index\n self.move(290, index*30)\n self.clicked.connect(self.remove)\n\n def remove(self):\n self.parent.remove_elem(self.index)\n\nclass CreateDirectoryPopup(QWidget):\n def __init__(self, parent):\n super().__init__()\n self.title = 'Create New Folder'\n self.left = 400\n self.top = 400\n self.width = 400\n self.height = 200\n self.parent = parent\n\n self.initUI()\n\n self.show()\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n #set into label\n self.intro = QLabel(self)\n self.intro.setText(\"Type in name of new folder\")\n self.intro.move(20, 20)\n\n self.textbox = QLineEdit(self)\n self.textbox.move(20, 50)\n self.textbox.resize(280,40)\n\n self.confirm = QPushButton(\"Confirm\", self)\n self.confirm.setToolTip(\"Click to confirm new folder name\")\n self.confirm.move(170, 150)\n self.confirm.clicked.connect(self.confirm_function)\n\n def confirm_function(self):\n text = self.textbox.text()\n\n os.system(\"mkdir \" + self.parent.parent.cur_dir + \"/\" + text)\n\n self.parent.parent.update_combo_box()\n self.parent.update_dir()\n selc.close()\n\ndef main():\n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()\n" } ]
2
hochu-leto/Django_hw
https://github.com/hochu-leto/Django_hw
791f252fae5df11dee2ff64bb30d57d791f0fd59
58a3d5bf7cd10127e8ab7170f5020eeaf7b93f74
9162e98cb330840f6b61cb6c53b66ca17dddb5b8
refs/heads/master
2023-09-02T04:04:48.785303
2021-11-08T21:49:17
2021-11-08T21:49:17
424,559,590
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5515425205230713, "alphanum_fraction": 0.5673438906669617, "avg_line_length": 36.97142791748047, "blob_id": "8aae745bcf22412e043679a587a408cc17455c54", "content_id": "a97ff951e37ec66cf4672b5df0b54b124b18cbb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1350, "license_type": "no_license", "max_line_length": 115, "num_lines": 35, "path": "/databases_2/m2m-relations/articles/migrations/0002_object_relationship.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-05 17:52\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Object',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50, verbose_name='Имя')),\n ('article', models.ManyToManyField(related_name='object', to='articles.Article')),\n ],\n options={\n 'verbose_name': 'Тег',\n 'verbose_name_plural': 'Теги',\n },\n ),\n migrations.CreateModel(\n name='Relationship',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('is_main', models.BooleanField(default=False, verbose_name='Основной тег')),\n ('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.article')),\n ('object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.object')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5326530337333679, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 22.33333396911621, "blob_id": "28e6cc4b06d861bd6b2e8cad2ab88911ccbf861b", "content_id": "b0de9cb56375da27f75f6270daa1a5cd71dbca22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "no_license", "max_line_length": 76, "num_lines": 21, "path": "/databases_2/m2m-relations/articles/migrations/0003_auto_20211106_1024.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-06 07:24\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0002_object_relationship'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='relationship',\n options={'verbose_name': 'Тема', 'verbose_name_plural': 'Темы'},\n ),\n migrations.RemoveField(\n model_name='object',\n name='article',\n ),\n ]\n" }, { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6164529919624329, "avg_line_length": 31.275861740112305, "blob_id": "b5a35c85d5f86e011c4a94e9e913524c05cd5f44", "content_id": "3ea2b97685c3a5c0480a7a34be16d833dd1e09e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 940, "license_type": "no_license", "max_line_length": 123, "num_lines": 29, "path": "/databases_2/m2m-relations/articles/migrations/0009_auto_20211106_2125.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-06 18:25\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0008_auto_20211106_2120'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='relationship',\n name='article',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.article'),\n ),\n migrations.AlterField(\n model_name='relationship',\n name='scopes',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.scope', verbose_name='Теги'),\n ),\n migrations.AlterField(\n model_name='scope',\n name='article',\n field=models.ManyToManyField(related_name='scopes', through='articles.Relationship', to='articles.Article'),\n ),\n ]\n" }, { "alpha_fraction": 0.5751072764396667, "alphanum_fraction": 0.6194563508033752, "avg_line_length": 28.125, "blob_id": "0469ac73d74426bb6192d46161a123a8190ea388", "content_id": "3d78c206acd524fba4d01ff13d303ce7bdd225ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 703, "license_type": "no_license", "max_line_length": 124, "num_lines": 24, "path": "/databases_2/m2m-relations/articles/migrations/0005_auto_20211106_1409.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-06 11:09\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0004_auto_20211106_1028'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='object',\n name='article',\n field=models.ManyToManyField(through='articles.Relationship', to='articles.Article'),\n ),\n migrations.AlterField(\n model_name='relationship',\n name='object',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='articles.object', verbose_name='Теги'),\n ),\n ]\n" }, { "alpha_fraction": 0.5205992460250854, "alphanum_fraction": 0.5786516666412354, "avg_line_length": 23.272727966308594, "blob_id": "5cfd280b33abf52b8cad67f1c7adab2f9699be19", "content_id": "bb7befcc4169cf4db2f9ede6558f90f994009dbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 95, "num_lines": 22, "path": "/databases_2/m2m-relations/articles/migrations/0011_auto_20211108_2301.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-08 20:01\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0010_auto_20211108_0800'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='scope',\n name='articles',\n ),\n migrations.AddField(\n model_name='article',\n name='scope',\n field=models.ManyToManyField(through='articles.Relationship', to='articles.Scope'),\n ),\n ]\n" }, { "alpha_fraction": 0.5110456347465515, "alphanum_fraction": 0.5567010045051575, "avg_line_length": 24.148147583007812, "blob_id": "8b617d5f625642c9af1bd2cf42eba20c6225489d", "content_id": "6c50c85ac250955f4d1e470e5f1fa39f20eef06c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 679, "license_type": "no_license", "max_line_length": 97, "num_lines": 27, "path": "/databases_2/m2m-relations/articles/migrations/0010_auto_20211108_0800.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-08 05:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0009_auto_20211106_2125'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='relationship',\n old_name='scopes',\n new_name='scope',\n ),\n migrations.RemoveField(\n model_name='scope',\n name='article',\n ),\n migrations.AddField(\n model_name='scope',\n name='articles',\n field=models.ManyToManyField(through='articles.Relationship', to='articles.Article'),\n ),\n ]\n" }, { "alpha_fraction": 0.47991544008255005, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 20.5, "blob_id": "1bbf4c4327a8be9335415954ba0a624746ce7645", "content_id": "dfdd30abcd88b6487c3b7337f41c030ed6363268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 473, "license_type": "no_license", "max_line_length": 48, "num_lines": 22, "path": "/databases_2/m2m-relations/articles/migrations/0006_auto_20211106_1703.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-06 14:03\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0005_auto_20211106_1409'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Object',\n new_name='Scope',\n ),\n migrations.RenameField(\n model_name='relationship',\n old_name='object',\n new_name='scope',\n ),\n ]\n" }, { "alpha_fraction": 0.6967688202857971, "alphanum_fraction": 0.7009113430976868, "avg_line_length": 28.439023971557617, "blob_id": "93eb026b01119a0fe7b0bee234a16acf7c564ee1", "content_id": "b51fabbcfede9a097264209644ad91532721a044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1362, "license_type": "no_license", "max_line_length": 79, "num_lines": 41, "path": "/databases_2/m2m-relations/articles/admin.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "from pprint import pprint\n\nfrom django.contrib import admin\nfrom django.core.exceptions import ValidationError\nfrom django.forms import BaseInlineFormSet\n\nfrom .models import Article, Scope\nfrom .models import Relationship\n\n\nclass RelationshipInlineFormset(BaseInlineFormSet):\n def clean(self):\n main_tags_count = 0\n false_and_true = {False: 0, True: 1}\n for form in self.forms:\n # В form.cleaned_data будет словарь с данными\n # каждой отдельной формы, которые вы можете проверить\n if form.cleaned_data:\n main_tags_count += false_and_true[form.cleaned_data['is_main']]\n\n if main_tags_count > 1:\n raise ValidationError('Основной тег должен быть только один')\n elif main_tags_count == 0:\n raise ValidationError('Укажите основной тег')\n\n return super().clean() # вызываем базовый код переопределяемого метода\n\n\nclass RelationshipInline(admin.TabularInline):\n model = Relationship\n formset = RelationshipInlineFormset\n\n\n@admin.register(Scope)\nclass ObjectAdmin(admin.ModelAdmin):\n pass\n\n\n@admin.register(Article)\nclass ArticleAdmin(admin.ModelAdmin):\n inlines = [RelationshipInline]\n" }, { "alpha_fraction": 0.5182291865348816, "alphanum_fraction": 0.5989583134651184, "avg_line_length": 21.58823585510254, "blob_id": "914ce11ab66aa3fb137041e70901546157cbb6e7", "content_id": "288142d8edd696c45c278369cea9d49957715417", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 75, "num_lines": 17, "path": "/databases_2/m2m-relations/articles/migrations/0004_auto_20211106_1028.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-06 07:28\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0003_auto_20211106_1024'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='relationship',\n options={'verbose_name': 'Тег', 'verbose_name_plural': 'Теги'},\n ),\n ]\n" }, { "alpha_fraction": 0.5142315030097961, "alphanum_fraction": 0.5730550289154053, "avg_line_length": 22.954545974731445, "blob_id": "a9c79cb5c834282e2cb4d70d084f5c22a524b244", "content_id": "3e684772857d9137eb38a4a09eb918928b021156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 82, "num_lines": 22, "path": "/databases_2/m2m-relations/articles/migrations/0013_auto_20211108_2312.py", "repo_name": "hochu-leto/Django_hw", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2021-11-08 20:12\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0012_auto_20211108_2305'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='scope',\n name='is_main',\n ),\n migrations.AddField(\n model_name='relationship',\n name='is_main',\n field=models.BooleanField(default=False, verbose_name='Основной тег'),\n ),\n ]\n" } ]
10
weex/Live-bitcoin-transactions
https://github.com/weex/Live-bitcoin-transactions
6eda0b7914e4cd8cc9103920908e3475b9f7f1b1
3f83ef594bf7d9019cc3a1ec0bd895d01c26361f
0c119467793f22603982c2b43c0ec8800d2138aa
refs/heads/master
2021-06-07T15:32:05.822025
2018-02-21T00:01:50
2018-02-21T00:01:50
4,072,145
3
7
null
2012-04-19T05:59:35
2017-08-07T06:05:40
2017-09-23T15:02:12
Python
[ { "alpha_fraction": 0.7438271641731262, "alphanum_fraction": 0.7654321193695068, "avg_line_length": 39.625, "blob_id": "ecac6c7277180ec65fe4df9753d2aa3906314f77", "content_id": "9796f954544ed9c48fa0d873e59446b3a9cf8f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 324, "license_type": "no_license", "max_line_length": 111, "num_lines": 8, "path": "/README.md", "repo_name": "weex/Live-bitcoin-transactions", "src_encoding": "UTF-8", "text": "Live-bitcoin-transactions\n=========================\n\nThis python script uses websockets to connect to blockchain.info's stream of unconfirmed transactions.\n\nSet raw = 1 to see the raw output from blockchain.info\n\nIf you found this useful or buggy donate to 1AtF6WBZbc3xwxcCPEmDDAR7Z3ThhWYz7d or create an issue respectively." }, { "alpha_fraction": 0.3586371839046478, "alphanum_fraction": 0.45188283920288086, "avg_line_length": 22.899999618530273, "blob_id": "157be0860c7ed74470c94354878a3a03724c1084", "content_id": "bfaf9722d1e88591739d391aad35fb2c7ce46858", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1673, "license_type": "no_license", "max_line_length": 114, "num_lines": 70, "path": "/ws.py", "repo_name": "weex/Live-bitcoin-transactions", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom websocket import create_connection\nimport simplejson as json\nfrom decimal import *\n\nraw = 0\n\nws = create_connection(\"ws://ws.blockchain.info/inv\")\nws.send('{\"op\":\"unconfirmed_sub\"}')\nwhile ( 1 ) :\n\tresult = ws.recv()\n\n\tif raw :\n\t\tprint result\n\n\tresult = json.loads(result)\n\tmarker = \"\t*** new transaction ***\"\n\tif 'out' in result['x'] :\n\t\tfor out in result['x']['out'] :\n if 'addr' in out and out['addr']:\n print out['addr'] + ' got ' + str( Decimal( out['value'] ) / Decimal(100000000.0)) +marker\n\t\t marker = ''\n\t\t\nprint \"Done\"\nws.close()\n\n\"\"\" {\n 'x': {\n 'inputs': [\n {\n 'prev_out': {\n 'type': 0,\n 'addr': '14XGFnhBJQC2sKxwAvUqv8CAu43uezn4Xv',\n 'value': 7107830629\n }\n }\n ],\n 'lock_time': 'Unavailable',\n 'ver': 1,\n 'tx_index': 4062121,\n 'relayed_by': '65.49.73.51',\n 'vin_sz': 1,\n 'vout_sz': 2,\n 'time': 1334803494,\n 'hash': 'aabd272a9be5d6f2709da8e184f29b70d1f34f96aeead5a980a3cbde2863507e',\n 'out': [\n {\n 'type': 0,\n 'addr': '16bEdESzzZA2975qe7egGDZbBcXnQNMJ8X',\n 'value': 643133000\n },\n {\n 'type': 0,\n 'addr': '13A7Sz4YnxpYYj8UEoiXbp2S1bTXUSFjpn',\n 'value': 6464697629\n }\n ],\n 'size': 259\n },\n 'op': 'utx'\n}{\n 'x': {\n 'cc': 'us',\n 'lat': 37.3842,\n 'lon': -122.0196,\n 'id': 1093749043\n },\n 'op': 'marker'\n} \"\"\"\n" } ]
2
avenetj/kiwi
https://github.com/avenetj/kiwi
6ba10a9edac1b063986867743811cb189878eed5
5f82521f925a4b66f3f4509cda675eeed5c75d1c
1c8479435985fed39e6847a95864fec2eab0f70e
refs/heads/master
2021-07-01T07:13:02.471896
2017-09-22T13:12:05
2017-09-22T13:12:05
103,292,004
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6610268354415894, "alphanum_fraction": 0.7001166939735413, "avg_line_length": 23.485713958740234, "blob_id": "fea18c009392752fb95cb2e8d7d896dddb0c9525", "content_id": "c6451d2d4d87bc2b58a2e61584cccb4628e4fb36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1714, "license_type": "no_license", "max_line_length": 169, "num_lines": 70, "path": "/README.md", "repo_name": "avenetj/kiwi", "src_encoding": "UTF-8", "text": "# Kiwi.ki \n# SRE Task - Application \n\n- [x] Part1 - Create a simple backend application which will store a counter of pings from users\n- [x] Part2 - Automate the deployment of the app created in part1. \n\nUsage | Tech\n------|------\nWeb app |Python 3.5 + Flask \nValue Storage | Redis\nLoadBalancing | HAproxy\n\n# Usage\n**Installation**\n>The instructions assume that you have already installed [Docker](https://docs.docker.com/installation/) and [Docker Compose](https://docs.docker.com/compose/install/). \n\n```bash\n git clone https://github.com/avenetj/kiwi.git .\n```\n**Part 1** \n\n> You must be root to use docker-compose commands and the docker deamon must be running\n\n```bash\n cd part1/app\n docker-compose up -d --build\n ```\nThe web application should be available on : http://127.0.0.1:5000/ping\n\n```bash\n curl 127.0.0.1:5000/ping\n curl 127.0.0.1:5000/total\n```\n\n\nThe following URL are implemented\n- /ping : increments the number of ping\n- /total : displays the total number of ping\n- /reset : resets the value stored to 0 \n\nShut down the application by running \n```bash\n docker-compose down \n```\n\n\n**Part 2**\n```bash\n cd ../../part2/app\n docker-compose up --build -d --scale app=2\n```\nThe web application should be available on : http://127.0.0.1/ping (no need for the port)\nThe **--scale app=2** runs 2 app containers. You can change the value to adjust your needs. \n\nThe same URL as before are implemented. \n\n```bash\n curl 127.0.0.1/ping\n curl 127.0.0.1/ping\n curl 127.0.0.1/total\n```\n\nYou should get two differents hostname (container name) when doing the curl commands. \n\nOnce you are done with the application, run this command to shut it down.\n```bash\n docker-compose down \n```\n\nEnjoy :octocat:\n" }, { "alpha_fraction": 0.5079030394554138, "alphanum_fraction": 0.5226554274559021, "avg_line_length": 19.60869598388672, "blob_id": "76e4cd745d4bd37e3e8d8cb06a823563e3ab7857", "content_id": "ff4f653f23166da098c5efcd66390d9fe99e8eb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 63, "num_lines": 46, "path": "/part1/app/app.py", "repo_name": "avenetj/kiwi", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3.5\nfrom flask import Flask, jsonify, abort, make_response, request\nfrom redis import Redis\nimport socket\n\nhost = socket.gethostname()\n\napp = Flask(__name__)\nredis_db = Redis(host='redis', port=6379)\n\n@app.route('/ping')\ndef ping():\n answers = {\n \"status\": \"ok\",\n \"server\": host\n }\n redis_db.incr('hits')\n return jsonify({'Answer': answers})\n\n@app.route('/total')\ndef total():\n total = redis_db.get('hits')\n if total != 0:\n answers = {\n \"status\": \"ok\",\n \"total\": total.decode('utf-8')\n }\n else:\n answers = {\n \"status\": \"okok\",\n \"total\": \"0\"\n }\n return jsonify({'Answer': answers})\n\n@app.route('/reset')\ndef reset():\n redis_db.set('hits', 0)\n return \"Reset value\" \n\n@app.route('/test')\ndef test():\n return \"Test OK\" \n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n\n" } ]
2
nizar/scripts
https://github.com/nizar/scripts
1a2c89eb244c052b12ddf9f6a442817e4a9d6907
e235636cfeba3476a1195394fae59c552f27e46f
5e792d568f1c4b0c552f1a7a83099791710a2b43
refs/heads/master
2023-04-07T12:34:00.830134
2023-01-10T00:26:37
2023-01-10T00:26:37
101,209,965
0
0
null
2017-08-23T17:58:19
2016-10-23T20:18:45
2017-07-22T21:45:26
null
[ { "alpha_fraction": 0.5493506789207458, "alphanum_fraction": 0.5670129656791687, "avg_line_length": 32.77193069458008, "blob_id": "7372388fdb986542ad6ac786d6ae2f7ec14cc951", "content_id": "90572d7e28a6e17a6e2afc442bb71478e1745a61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3850, "license_type": "no_license", "max_line_length": 110, "num_lines": 114, "path": "/extract_overdrive_chapters.py", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#\n# encode with 64kbps stereo, HE, optimize for voice\n#\n\n# Set to true for test mode\ndryrun = True\n\nimport os, sys, re\nimport mutagen.id3 as id3\nfrom mutagen.mp3 import MP3\nfrom mutagen import File\n\nfrom collections import OrderedDict\n\ndef timestr(secs):\n (secs, ms) = str(secs).split('.')\n ms = float(ms[0:3] + '.' + ms[3:])\n secs = int(secs)\n hours = int(secs // 3600)\n secs = secs % 3600\n mins = int(secs // 60)\n secs = secs % 60\n return '{0:02}:{1:02}:{2:02}.{3:03.0f}'.format(hours, mins, secs, ms)\n\ndef load_mp3(total, dir, file):\n path = os.path.join(dir, file)\n #mfile = File(path)\n #file = File('some.mp3') # mutagen can automatically detect format and type of tags\n #artwork = file.tags['APIC:'].data # access APIC frame and grab the image\n #with open('image.jpg', 'wb') as img:\n # img.write(artwork) # write artwork to new image\n #artwork = mfile.tags['APIC:'].data # access APIC frame and grab the image\n #with open('{0}.jpg'.format(path), 'wb') as img:\n # img.write(artwork) # write artwork to new image\n audio = MP3(path)\n print audio.info.length #, audio.info.bitrate\n m = id3.ID3(path)\n\n data = m.get('TXXX:OverDrive MediaMarkers')\n if not data:\n print \"Can't find TXXX data point for {0}\".format(file)\n print m.keys()\n return\n info = data.text[0].encode(\"ascii\", \"ignore\")\n #print info\n file_chapters = re.findall(r\"<Name>\\s*([^>]+?)\\s*</Name><Time>\\s*([\\d:.]+)\\s*</Time>\", info, re.MULTILINE)\n chapters = []\n for chapter in file_chapters:\n (name, length) = chapter\n name = re.sub(r'^\"(.+)\"$', r'\\1', name)\n name = re.sub(r'^\\*(.+)\\*$', r'\\1', name)\n name = re.sub(r'\\s*\\([^)]*\\)$', '', name) # ignore any sub-chapter markers from Overdrive\n name = re.sub(r'\\s+\\(?continued\\)?$', '', name) # ignore any sub-chapter markers from Overdrive\n name = re.sub(r'\\s+-\\s*$', '', name) # ignore any sub-chapter markers from Overdrive\n name = re.sub(r'^Dis[kc]\\s+\\d+\\W*$', '', name) # ignore any disk markers from Overdrive\n name = name.strip()\n t_parts = list(length.split(':'))\n t_parts.reverse()\n seconds = total + float(t_parts[0])\n if len(t_parts) > 1:\n seconds += (int(t_parts[1]) * 60)\n if len(t_parts) > 2:\n seconds += (int(t_parts[2]) * 60 * 60)\n chapters.append([name, seconds])\n print name, seconds\n #chapters = re.search(r'(\\w+)', info)\n #print repr(chapters)\n return (total + audio.info.length, chapters)\n return\n\n\n # try:\n # if file.decode(\"utf-8\") == new.decode(\"utf-8\"):\n # new = None\n # except:\n # print \" FILE: \"+os.path.join(dirname, file)\n # raise\n # # Return\n # return (m, new, changed)\n\ndef visit(arg, dirname, names):\n print dirname\n os.chdir(dirname)\n #parent = os.path.dirname(dirname)\n #thisdir = os.path.basename(dirname)\n #print thisdir\n # Parse the files\n total = 0;\n all_chapters = OrderedDict()\n for file in sorted(names):\n if file.endswith('.mp3'):\n (total, chapters) = load_mp3(total, dirname, file)\n for chapter in chapters:\n if chapter[0] in all_chapters.keys():\n continue\n all_chapters[chapter[0]] = chapter[1]\n if len(all_chapters) > 0:\n with open('overdrive_chapters.txt', 'w') as file:\n for name, length in all_chapters.items():\n chapstr = u'{0} {1}'.format(timestr(length), name)\n print chapstr\n file.write(chapstr + '\\n')\n #print repr(all_chapters)\n\n\n\nif len(sys.argv) > 1:\n path = os.path.abspath(sys.argv[1])\nelse:\n path = os.path.abspath('.')\nprint path\n\nos.path.walk(path, visit, None)\n" }, { "alpha_fraction": 0.5542654395103455, "alphanum_fraction": 0.575713574886322, "avg_line_length": 26.375276565551758, "blob_id": "b5a1b819a9a52fbbfcde8b0c3349dc25b98d7129", "content_id": "3eeb31139ce724386455cf899916b0900bd4ca66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 12402, "license_type": "no_license", "max_line_length": 140, "num_lines": 453, "path": "/.bashrc", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# This file is sourced by all *interactive* bash shells on startup,\n# including some apparently interactive shells such as scp and rcp\n# that can't tolerate any output.\n#\n# It attempts to keep all settings completely generic so that any\n# user can install this without fear of any unusual aliases or\n# preferences being forced upon them.\n#\n# In order to allow for customization (and to allow the authors to\n# use this file along with some private aliases), this file will\n# source other sub-files if they exist, in the following order:\n#\n# ~/.bashrc.d/*\n# A place to store extra configuration in order to keep this master .bashrc\n# file as clean and generic as possible. All files here will be included\n# unless they are \"location\" files as described below.\n#\n# ~/.bashrc.d/\"$LOCATION\".loc\n# This is intended for separating \"home\" and \"work\" settings. $LOCATION is\n# the main domain \"word\" for this host, i.e. example.loc for example.com.\n# Included when at $LOCATION.\n#\n# ~/.bashrc.d/\"$LOCATION\".notloc\n# Like *.loc but only included when not at $LOCATION.\n#\n# ~/.bashrc_custom\n# This behaves like the files in ~/.bashrc.d, but is intended for custom\n# settings specific to an individual host and as such will never be copied\n# or overwritten by host-setup routines.\n#\n\n###############################################################################\n# Test for an interactive shell. There is no need to set anything past this\n# point for scp and rcp, and it's important to refrain from outputting anything\n# in those cases. However, we should add a couple of extra paths in case this\n# is rsync (in case rsync itself is stored somewhere like /usr/local/bin).\n\n if [[ \"$-\" != *i* ]]; then\n for dir in /usr/*/bin/ /opt/*/bin/; do\n export PATH=\"$PATH:$dir\"\n done\n return\n fi\n\n###############################################################################\n# Source any global definitions that exist\n#\n for file in \\\n /etc/*bashrc /etc/profile /etc/bash/bashrc \\\n ~/.bash_aliases \\\n /etc/bash_completion \\\n /sw/bin/init.sh\n do\n [[ -f \"$file\" ]] && source \"$file\"\n done\n\n###############################################################################\n# Setup some global information about the environment\n#\n\n# Figure out what os/distro we are on\n IS_MAC=\n IS_LINUX=\n IS_SUN=\n DISTRO=\n OS=\n VERSION=\n\n if command -v uname &> /dev/null; then\n OS=`uname`\n fi\n\n if [[ \"$OS\" == 'Darwin' ]]; then\n DISTRO='OSX'\n IS_MAC=1\n elif [[ \"$OS\" == 'SunOS' ]]; then\n DISTRO='SunOS'\n IS_SUN=1\n elif [[ \"$OS\" == 'Linux' ]]; then\n IS_LINUX=1\n if [[ -f /etc/gentoo-release ]]; then\n DISTRO='Gentoo'\n elif [[ -f /etc/redhat-release ]]; then\n DISTRO=`awk '{ print $1 }' /etc/redhat-release`\n elif [[ -f /etc/debian_version ]]; then\n DISTRO='Debian'\n elif [[ -f /etc/lsb*release ]]; then\n eval `cat /etc/lsb*release`\n DISTRO=$DISTRIB_ID\n fi\n fi\n\n# In a root-capable group?\n ROOTGROUP=\n if [[ $IS_LINUX ]]; then\n groups | grep 'root\\|wheel' &> /dev/null\n if [[ \"$?\" == 0 ]]; then\n ROOTGROUP=1\n fi\n elif [[ $IS_MAC ]]; then\n groups | grep 'root\\|admin' &> /dev/null\n if [[ \"$?\" == 0 ]]; then\n ROOTGROUP=1\n fi\n fi\n\n# Local X server?\n LOCAL_X=\n if [[ $IS_LINUX ]]; then\n if [[ ':' == \"${DISPLAY:0:1}\" ]]; then\n LOCAL_X=1\n fi\n elif [[ $IS_MAC ]]; then\n if [[ '/tmp/launch' == \"${DISPLAY:0:11}\" ]]; then\n LOCAL_X=1\n fi\n fi\n\n# Get the primary domain for this host (minus any subdomains)\n if [[ $IS_LINUX ]]; then\n DOMAIN=`echo \\`hostname -d\\` | sed -e 's/^.\\+\\.\\([^\\.]\\+\\?\\.[^\\.]\\+\\)$/\\1/'`\n elif [[ $IS_MAC ]]; then\n DOMAIN=`echo \\`hostname -f\\` | sed -Ee 's/^.+\\.([^\\.]+\\.[^\\.]+)$/\\1/'`\n else\n DOMAIN=\n fi\n\n###############################################################################\n# Define useful functions that things below depend on\n#\n\n# Return the absolute/expanded pathname to the requested file or directory\n abspath() {\n dir=\"$1\"\n file=\"\"\n if [[ -f \"$dir\" ]]; then\n file=/`basename \"$dir\"`\n dir=`dirname \"$dir\"`\n fi\n echo `cd \"$dir\" && pwd -P`\"$file\"\n }\n\n#\n# Nice path functions with slight modifications from:\n#\n# http://stackoverflow.com/questions/370047/what-is-the-most-elegant-way-to-remove-a-path-from-the-path-variable-in-bash\n#\n append_path() { NEW=${1/%\\//}; [[ -d $NEW ]] || return; remove_path $NEW; export PATH=\"$PATH:$NEW\"; }\n prepend_path() { NEW=${1/%\\//}; [[ -d $NEW ]] || return; remove_path $NEW; export PATH=\"$NEW:$PATH\"; }\n remove_path() {\n # New format not supported by some old versions of awk\n # PATH=`echo -n \"$PATH\" | awk -v RS=: -v ORS=: '$0 != \"'$1'\"'`\n PATH=`echo -n \"$PATH\" | awk 'BEGIN { RS=\":\"; ORS=\":\" } $0 != \"'$1'\" '`\n export PATH=${PATH/%:/}\n }\n\n\n# Return the first program from the argument list that exists in the execution path\n find_program() {\n for file in $*; do\n if command -v \"$file\" &>/dev/null; then\n echo \"$file\"\n return\n fi\n done\n }\n\n###############################################################################\n# Basic environmental settings/changes that should go everywhere\n#\n\n#\n# ANSI colors\n#\n\n ANSI_RESET=\"\\[\\033[0m\\]\"\n ANSI_BRIGHT=\"\\[\\033[1m\\]\"\n ANSI_UNDERSCORE=\"\\[\\033[4m\\]\"\n\n FG_BLACK=\"\\[\\033[0;30m\\]\"\n FG_BLUE=\"\\[\\033[0;34m\\]\"\n FG_GREEN=\"\\[\\033[0;32m\\]\"\n FG_CYAN=\"\\[\\033[0;36m\\]\"\n FG_RED=\"\\[\\033[0;31m\\]\"\n FG_MAGENTA=\"\\[\\033[0;35m\\]\"\n FG_BROWN=\"\\[\\033[0;33m\\]\"\n FG_LIGHTGRAY=\"\\[\\033[0;37m\\]\"\n FG_DARKGRAY=\"\\[\\033[1;30m\\]\"\n FG_LIGHTBLUE=\"\\[\\033[1;34m\\]\"\n FG_LIGHTGREEN=\"\\[\\033[1;32m\\]\"\n FG_LIGHTCYAN=\"\\[\\033[1;36m\\]\"\n FG_LIGHTRED=\"\\[\\033[1;31m\\]\"\n FG_LIGHTMAGENTA=\"\\[\\033[1;35m\\]\"\n FG_YELLOW=\"\\[\\033[1;33m\\]\"\n FG_WHITE=\"\\[\\033[1;37m\\]\"\n\n BG_BLACK=\"\\[\\033[40m\\]\"\n BG_RED=\"\\[\\033[41m\\]\"\n BG_GREEN=\"\\[\\033[42m\\]\"\n BG_BROWN=\"\\[\\033[43m\\]\"\n BG_BLUE=\"\\[\\033[44m\\]\"\n BG_PURPLE=\"\\[\\033[45m\\]\"\n BG_CYAN=\"\\[\\033[46m\\]\"\n BG_WHITE=\"\\[\\033[47m\\]\"\n\n#\n# Commandline setup\n#\n\n# Colorize and customize the sudo prompt\n alias sudo='sudo -p \"`echo -e '\\''\\033[33msudo \\033[1;31m%U\\033[0;33m password for \\033[0;34m%u\\033[36m@\\033[34m%h\\033[0m: \\033[0m'\\''` \"'\n\n# Change PROMPT_COMMAND so that it will update window/tab titles automatically\n if [[ $IS_LINUX || $IS_MAC || $IS_SUN ]]; then\n case \"$TERM\" in\n xterm*|rxvt|Eterm|eterm|linux)\n PROMPT_COMMAND='echo -ne \"\\033]0;${USER}@${HOSTNAME%%.*}:${PWD/#$HOME/~}\\007\"'\n ;;\n screen)\n PROMPT_COMMAND='echo -ne \"\\033_${USER}@${HOSTNAME%%.*}:${PWD/#$HOME/~}\\033\\\\\"'\n ;;\n esac\n fi\n\n# Redraw the prompt to a better look. Red for Root (EUID zero)\n if [[ $EUID == 0 ]]; then\n PS1=\"${FG_RED}\\u${FG_LIGHTRED}@\\h${ANSI_RESET}: ${FG_GREEN}\\w ${FG_LIGHTRED}#${ANSI_RESET} \"\n else\n PS1=\"${FG_BLUE}\\u${FG_CYAN}@${FGBLUE}\\h${ANSI_RESET}: ${FG_GREEN}\\w ${FG_DARKGRAY}>${ANSI_RESET} \"\n fi\n\n# Allow control-D to log out\n unset ignoreeof\n\n# Enable hist-append\n shopt -s histappend\n\n# History length\n export HISTFILESIZE=100000\n export HISTSIZE=200000\n\n# Give the time in the history file\n export HISTTIMEFORMAT=\"%F %T \"\n\n# Ignore duplicate history entries and those starting with whitespace\n export HISTCONTROL=ignoreboth\n\n# Prevent certain commands from cluttering the history\n export HISTIGNORE=\"&:l:ls:ll:[bf]g:clear:exit:history:history *:history|*:cd:cd -:df\"\n\n# Update the bash history after every command rather then the end of the session\nif [[ \"${PROMPT_COMMAND}\" == *\\; ]]\nthen\n export PROMPT_COMMAND=\"${PROMPT_COMMAND} history -a\"\nelif [[ -n \"${PROMPT_COMMAND}\" ]]\nthen\n export PROMPT_COMMAND=\"${PROMPT_COMMAND}; history -a\"\nelse\n export PROMPT_COMMAND=\"history -a\"\nfi\n\n# Enable spellchecking/guessing for cd commands (useful for typo'd pathnames)\n shopt -s cdspell\n\n# Store multi-line commands as one line in the history\n shopt -s cmdhist\n\n# Turn on checkwinsize so we get $LINES and $COLUMNS\n shopt -s checkwinsize\n\n#\n# Update the search path with some more directories\n#\n\n if [[ $ROOTGROUP ]]; then\n for dir in \\\n /usr/*/sbin/ \\\n /opt/*/sbin/ \\\n /usr/lib/courier/*sbin \\\n ; do\n prepend_path \"$dir\"\n done\n prepend_path /usr/sbin\n prepend_path /sbin\n fi\n\n append_path ~/bin\n append_path ~/scripts\n for dir in \\\n /usr/*/bin/ \\\n /opt/*/bin/ \\\n /usr/java/*/bin/ \\\n ; do\n prepend_path \"$dir\"\n done\n\n#\n# Now that we have altered $PATH, make a few other environment-specific tweaks\n#\n\n# Use gnu utilities if they're available\n if [[ $IS_SUN || $IS_MAC ]]; then\n for APP in grep find tar sed xargs; do\n if command -v g$APP &> /dev/null; then\n alias $APP=g$APP\n fi\n done\n fi\n\n#\n# Setup Grep\n#\n export GREP_OPTIONS=\n\n# Ignore certain directory patterns\n export GREP_OPTIONS=\"--exclude-dir=.svn $GREP_OPTIONS\"\n export GREP_OPTIONS=\"--exclude-dir=.git $GREP_OPTIONS\"\n export GREP_OPTIONS=\"--exclude-dir=CVS $GREP_OPTIONS\"\n\n# Turn on grep colorization\n export GREP_OPTIONS=\"--color=auto $GREP_OPTIONS\"\n # export GREP_COLORS='mt=0;32'\n\n # Apply the options without using the now-deprecated env var\n if command -v ggrep &> /dev/null; then\n alias grep=\"ggrep $GREP_OPTIONS\"\n else\n alias grep=\"grep $GREP_OPTIONS\"\n fi\n export GREP_OPTIONS=\n\n# Prepare the ls color options\n if [[ $IS_MAC ]]; then\n export CLICOLOR=1\n else\n export CLICOLOR=true\n fi\n for file in /etc/DIR_COLORS ~/.dir_colors; do\n if [[ -f \"$file\" ]]; then\n eval `dircolors -b $file`\n fi\n done\n if [[ $IS_MAC ]]; then\n LS_OPTIONS='-G -v'\n elif [[ $IS_LINUX ]]; then\n LS_OPTIONS='-v --color=auto --show-control-chars'\n else\n LS_OPTIONS=\n fi\n\n#\n# Other settings specific to the OS\n#\n\n# Linux and Solaris settings\n if [[ $IS_LINUX || $IS_SUN ]]; then\n\n # Update JAVA_HOME, too\n JAVA_HOME=\"`dirname \\`dirname \\\\\\`command -v java2 2>/dev/null\\\\\\` 2>/dev/null\\` 2>/dev/null`\"\n\n export LC_ALL=$LANG\n\n #Export proper case-sensitive language sorting\n export LC_COLLATE=C\n\n\n # Preferred editor settings\n export EDITOR=`find_program vim vi nano`\n\n # Preferred pager\n export PAGER=`find_program less more cat`\n\n # Python-preferred browser\n export BROWSER=`find_program firefox mozilla iceweasel elinks lynx`\n\n# Mac settings\n elif [[ $IS_MAC ]]; then\n\n # Preferred editor settings\n export EDITOR=vim\n\n # Preferred pager\n export PAGER=less\n\n fi\n\n###############################################################################\n# Things very specific to MacOS\n#\n\nif [[ $IS_MAC ]]; then\n\n# Turn on bash-completion for macs\n if command -v brew > /dev/null; then\n for file in \\\n $(brew --prefix)/etc/bash_completion \\\n $(brew --prefix)/etc/bash_completion.d/brew\n do\n [[ -f \"$file\" ]] && source \"$file\"\n done\n fi\n\nfi\n\n###############################################################################\n# Execute any environment-specific bashrc files\n#\n\n# Get the location (useful for home vs. work separation)\n LOCATION=`echo \"$DOMAIN\" | awk -F. '{ print $1 }'`\n\n# Load any custom extensions\n if [[ -d ~/.bashrc.d ]]; then\n for file in ~/.bashrc.d/*; do\n if [[ -d \"$file\" ]]; then\n continue\n elif [[ ${file:$((${#file}-9)):9} == '.disabled' ]]; then\n continue\n elif [[ ${file:$((${#file}-4)):4} == '.loc' ]]; then\n if [[ $file == ~/.bashrc.d/\"$LOCATION\".loc ]]; then\n source \"$file\"\n fi\n elif [[ ${file:$((${#file}-7)):7} == '.notloc' ]]; then\n if [[ $file != ~/.bashrc.d/\"$LOCATION\".notloc ]]; then\n source \"$file\"\n fi\n else\n source \"$file\"\n fi\n done\n fi\n\n# And finally even more, just in case\n [[ -f ~/.bashrc_custom ]] && source ~/.bashrc_custom\n\n# Init some NVM stuff\n # export NVM_DIR=\"$HOME/.nvm\"\n # [ -s \"$NVM_DIR/nvm.sh\" ] && \\. \"$NVM_DIR/nvm.sh\" # This loads nvm\n # [ -s \"$NVM_DIR/bash_completion\" ] && \\. \"$NVM_DIR/bash_completion\" # This loads nvm bash_completion\n\n# init direnv\n if [[ $(command -v direnv) ]]; then\n eval \"$(direnv hook bash)\"\n fi\n\n# Lastly, init iterm2 shell integration\n if [[ -e \"${HOME}/.iterm2_shell_integration.bash\" ]]; then\n source \"${HOME}/.iterm2_shell_integration.bash\"\n else\n echo \"No iterm integration. Please install via iTerm2 menu\"\n fi\n\n" }, { "alpha_fraction": 0.644253671169281, "alphanum_fraction": 0.6485455632209778, "avg_line_length": 30.75757598876953, "blob_id": "c12a08182c7566c22894052055bad6a561b9168d", "content_id": "b16d27483fa55c9e2fcaba87131a2caf63efb04d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2097, "license_type": "no_license", "max_line_length": 141, "num_lines": 66, "path": "/.bashrc.d/git", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Deal with macports/git splitting out the git-prompt utils into their own file\nif [ -f /opt/local/share/git-core/git-prompt.sh ]; then\n . /opt/local/share/git-core/git-prompt.sh\nfi\n\n# Add these if bash_completion is running\nif [[ $EUID > 0 && \"`type -t __git_ps1`\" == 'function' ]]; then\n export GIT_PS1_SHOWDIRTYSTATE=true\n export GIT_PS1_SHOWUNTRACKEDFILES=true\n export PS1=\"${FG_BLUE}\\u${FG_CYAN}@${FGBLUE}\\h${ANSI_RESET}: ${FG_GREEN}\\w ${FG_LIGHTRED}\\$(__git_ps1 '(%s) ')${FG_GREEN}>${ANSI_RESET} \"\n\nfi\n\n#\n# Git aliases\n#\n\n alias gitlog='git logn'\n alias gg='git grep --color=always'\n\n#\n# git_cleanup is intended to be attached to a git alias.\n# It will remove any remote (origin) branches that have already been merged into\n# master, and have remained untouched for at least a month.\n#\n# Credit to https://github.com/cxreg\n#\n git_cleanup() {\n for branch in $(git branch -r --merged origin/master | grep '\\<origin/' | grep -v '\\<origin/master\\>'); do\n if [[ -z $(git rev-list $branch --since='1 month') ]]; then\n name=$(echo $branch | sed 's/^origin\\///')\n echo git push --delete origin \"$name\"\n fi\n done\n }\n export -f git_cleanup\n\n#\n# git_only intended to be attached to a git alias.\n# Given a branch name, it will show you a log of commits that only exist on that one branch.\n#\n# Credit to https://github.com/cxreg\n#\n function git_only() {\n opts=$(git rev-parse --no-revs \"$@\" 2>/dev/null)\n rev=$(git rev-parse --revs-only \"$@\" 2>/dev/null)\n if [[ -z $rev ]]; then\n branch=$(git name-rev --name-only HEAD)\n else\n branch=$rev\n fi\n git log $(git rev-parse --not --remotes --branches | grep -v $(git rev-parse $branch)) $branch $opts\n }\n export -f git_only\n\n#\n# Install our functions. These do not auto-run because they are only needed once,\n# and in case users want to apply them only to a specific git clone.\n#\n# git config --global alias.only '!git_only'\n# git config --global alias.cleanup '!git_cleanup'\n\n# git config --global http.sslVerify false\n# git config --global merge.tool vimdiff\n\n" }, { "alpha_fraction": 0.6565656661987305, "alphanum_fraction": 0.6565656661987305, "avg_line_length": 11.375, "blob_id": "595f829d6303fdb5127744d9fb9fa1b719067453", "content_id": "2f3eea12a8d6b7c9d38c21e6f0be2559366d8a13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 99, "license_type": "no_license", "max_line_length": 44, "num_lines": 8, "path": "/.bashrc.d/cvs", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#\n# CVS settings\n#\n\n# Preferred CVS_RSH (it's either ssh or rsh)\n export CVS_RSH=ssh\n" }, { "alpha_fraction": 0.7449856996536255, "alphanum_fraction": 0.7507163286209106, "avg_line_length": 33.79999923706055, "blob_id": "e25af501298944c58feb1f27296386e90c496504", "content_id": "26f93b3147abe1e0759270177a0b604d962735a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 349, "license_type": "no_license", "max_line_length": 55, "num_lines": 10, "path": "/chrome/export_searches.sh", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nDESTINATION=${1:-./keywords.sql}\nTEMP_SQL_SCRIPT=/tmp/sync_chrome_sql_script\necho \"Exporting Chrome keywords to $DESTINATION...\"\ncd ~/Library/Application\\ Support/Google/Chrome/Default\necho .output $DESTINATION > $TEMP_SQL_SCRIPT\necho .dump keywords >> $TEMP_SQL_SCRIPT\nsqlite3 -init $TEMP_SQL_SCRIPT Web\\ Data .exit\nrm $TEMP_SQL_SCRIPT\n\n" }, { "alpha_fraction": 0.5796915292739868, "alphanum_fraction": 0.5886889696121216, "avg_line_length": 27.814815521240234, "blob_id": "cb6e6e4e90ce591bbb2d716134159040f3e05931", "content_id": "956a18c3401b4eba7198f5d51e96ba1376be7de3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 778, "license_type": "no_license", "max_line_length": 68, "num_lines": 27, "path": "/.bashrc.d/ssh", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Make sure that the SSH environment perms are correct, or warn that\n# it needs to be configured\n if [[ -s ~/.ssh/authorized_keys ]]; then\n chmod 700 ~/.ssh/\n chmod 600 ~/.ssh/authorized_keys\n else\n echo \"No SSH environment setup, please do so\"\n fi\n\n# Don't try to use a GUI ssh-askpass if there is no X\n [[ $LOCAL_X ]] || export SSH_ASKPASS=\"\"\n\n# Start the ssh agent if it isn't already running\n if [[ $IS_LINUX && (-f ~/.ssh/id_rsa || -f ~/.ssh/id_dsa) ]]; then\n if [[ ! -e \"$SSH_AUTH_SOCK\" ]]; then\n eval `ssh-agent` > /dev/null\n fi\n # If you would also like to auto-add your ssh key when you log in,\n # uncomment the following code:\n # ssh-add -l &> /dev/null\n # if [[ $? == 1 ]]; then\n # ssh-add\n # fi\n #fi\n fi\n" }, { "alpha_fraction": 0.5761618614196777, "alphanum_fraction": 0.5826106071472168, "avg_line_length": 37.1016960144043, "blob_id": "c05b5a2912e789eb81054ba2b7c5cd0d9034bf22", "content_id": "f7ad136877c686043ab35fa7fa0a1330aba48275", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4497, "license_type": "no_license", "max_line_length": 393, "num_lines": 118, "path": "/.bashrc.d/svn", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#\n# Subversion helpers for bash\n#\n\n# Aliases to `svn diff` that show things via the colordiff app (which you may\n# need to install via apt), both with and without whitespace changes shown.\n alias svndiff='svn diff --diff-cmd=colordiff'\n alias svndiffw='svn diff --diff-cmd=colordiff -x \"-buw\"'\n\n# If you don't like color (which doesn't work well to pipe into a file to\n# send out for code review, you could use something like the following, which\n# will also add 10 extra lines of context:\n# alias svndiff='svn diff --diff-cmd=diff -x \"-U 10\"'\n\n# Create a tag in SVN\n function svntag() {\n TAG=\"$1\"\n if [ -z \"$TAG\" ]; then\n echo \"Usage: svntag NEW_TAG_NAME\"\n return 1\n fi\n INFO=`svn info . | grep ^URL`\n if [[ \"$INFO\" =~ '((svn\\+ssh:.+?)/(trunk|tags|branches)(/([^/]+))?)' ]]; then\n BASE=${BASH_REMATCH[2]}\n TYPE=${BASH_REMATCH[3]}\n CUR=${BASH_REMATCH[1]}\n NEW=\"$BASE/tags/$TAG\"\n echo \"Creating tag $TAG:\"\n echo \" from: $CUR\"\n echo \" to: $NEW\"\n svn cp \"$CUR\" \"$NEW\"\n else\n echo \"Could not determine current working path.\"\n echo \"Please make sure you are in your SVN checkout directory.\"\n fi\n }\n\n# Create a branch in SVN\n function svnbranch() {\n TAG=\"$1\"\n if [ -z \"$TAG\" ]; then\n echo \"Usage: svnbranch NEW_BRANCH_NAME\"\n return 1\n fi\n INFO=`svn info . | grep ^URL`\n if [[ \"$INFO\" =~ '((svn\\+ssh:.+?)/(trunk|tags|branches)(/([^/]+))?)' ]]; then\n BASE=${BASH_REMATCH[2]}\n TYPE=${BASH_REMATCH[3]}\n CUR=${BASH_REMATCH[1]}\n NEW=\"$BASE/branches/$TAG\"\n echo \"Creating branch $TAG:\"\n echo \" from: $CUR\"\n echo \" to: $NEW\"\n svn cp \"$CUR\" \"$NEW\"\n echo \"Switching to $TAG\"\n svn switch \"$NEW\"\n else\n echo \"Could not determine current working path.\"\n echo \"Please make sure you are in your SVN checkout directory.\"\n fi\n }\n\n# Switch to a different branch in SVN\n function svnswitch() {\n TAG=\"$1\"\n if [ -z \"$TAG\" ]; then\n echo \"Usage: svnswitch {BRANCH_NAME,trunk,head}\"\n return 1\n fi\n LOWERTAG=`echo $TAG | tr [:upper:] [:lower:]`\n INFO=`svn info . | grep ^URL`\n if [[ \"$INFO\" =~ '((svn\\+ssh:.+?)/(trunk|tags|branches)(/([^/]+))?)' ]]; then\n BASE=${BASH_REMATCH[2]}\n TYPE=${BASH_REMATCH[3]}\n CUR=${BASH_REMATCH[1]}\n if [ \"$LOWERTAG\" = \"trunk\" -o \"$LOWERTAG\" = \"head\" ]; then\n NEW=\"$BASE/trunk\"\n else\n NEW=\"$BASE/branches/$TAG\"\n fi\n echo \"Switching to $TAG\"\n svn switch \"$NEW\"\n else\n echo \"Could not determine current working path.\"\n echo \"Please make sure you are in your SVN checkout directory.\"\n fi\n }\n\n# Fix svn properties for files that came in before my ~/.svn/config was created\n function fix_svn_props() {\n # First, fix the svn:keywords (We need -n 1 because svn craps out if it hits\n # an unversioned file, and won't process any following arguments)\n find . \\\n -regex '.+\\.\\(php\\|inc\\|pl\\|pm\\|py\\|sh\\|js\\|css\\|html?\\|java\\|vm\\|rb\\|rhtml\\|rjs\\|rxml\\|tt\\|xml\\|sql\\)$' \\\n -exec svn ps svn:keywords \"Id Date Revision Author HeadURL\" {} \\+ 2>&1 \\\n | grep -v 'is not a working copy'\n # And make sure we have UNIX linefeeds on all files, too.\n find . \\\n -regex '.+\\.\\(csv\\|php\\|inc\\|pl\\|pm\\|py\\|sh\\|js\\|css\\|html?\\|java\\|vm\\|rb\\|rhtml\\|rjs\\|rxml\\|tt\\|xml\\|sql\\)$' \\\n -exec svn ps svn:eol-style \"native\" {} \\+ 2>&1 \\\n | grep -v 'is not a working copy'\n # But not on files that shouldn't have them\n find . \\\n -regex '.+\\.\\(bmp\\|gif\\|ico\\|jpeg\\|jpg\\|png\\|svg\\|svgz\\|tif\\|tiff\\|eps\\|avi\\|mov\\|mp3\\|smil\\|swf\\|bz2\\|gpgkey\\|gtar\\|gz\\|tar\\|tar.bz2\\|tar.gz\\|tbz\\|tgz\\|vcf\\|zip\\|ai\\|csv\\|doc\\|docm\\|docx\\|dotm\\|dotx\\|odb\\|odc\\|odf\\|odg\\|odi\\|odm\\|odp\\|ods\\|odt\\|otg\\|oth\\|otp\\|ots\\|ott\\|pdf\\|pdf\\|potm\\|potx\\|ppam\\|ppsm\\|ppsx\\|ppt\\|pptm\\|pptx\\|ps\\|psd\\|rtf\\|xlam\\|xls\\|xlsb\\|xlsm\\|xlsx\\|xltm\\|xltx\\)$' \\\n -exec svn propdel svn:eol-style {} \\+ 2>&1 \\\n | grep -v 'is not a working copy'\n # Next, the mime types\n if which svn_apply_autoprops.py &> /dev/null; then\n svn_apply_autoprops.py\n else\n echo \"svn_apply_autoprops.py not found in \\$PATH\"\n fi\n # And now a handful of executable flags\n find . -regex '.+\\.\\(pl\\|py\\|sh\\)$' -exec svn ps svn:executable on {} \\+\n find . -regex '.+\\.\\(gif\\|jpe?g\\|png\\|txt\\|csv\\|inc\\|pm\\|pdf\\|php\\|class\\|java\\|js\\|css\\|html?\\|rb\\|rhtml\\|erb\\|vm\\|xml\\|sql\\)$' -exec svn pd svn:executable {} \\+\n }\n\n" }, { "alpha_fraction": 0.7017543911933899, "alphanum_fraction": 0.7017543911933899, "avg_line_length": 13.375, "blob_id": "d61cdc5dc7230b157b1829a3253a763fe0671c40", "content_id": "f2f17338711ac6430320d07ad601dcd8eaec3e92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 114, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/.bashrc.d/fedora", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#\n# Fedora Packaging settings\n#\n\n# Make Fedora's build system go async\n export KOJI_FLAGS=\"--nowait\"" }, { "alpha_fraction": 0.6422142386436462, "alphanum_fraction": 0.6795679330825806, "avg_line_length": 37.2931022644043, "blob_id": "9d1373c1d1aca0f679afa47dc0bcceb80ed7a4d6", "content_id": "845f54442b92b690c7ace15c6119e579cd17885a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2222, "license_type": "no_license", "max_line_length": 392, "num_lines": 58, "path": "/.bashrc.d/setup", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# Set up a host based on my preferred config (ssh keys, bashrc, etc). ssh key\n# setup taken in part from ssh-copy-id\n function host_setup {\n HOST=\"$1\"\n # The files we want to set up\n FILES=\"{.bashrc,.bashrc.d,.inputrc,.vimrc,.cvsrc,.ackrc,.gitconfig}\"\n # Make sure the files are set up on this host\n cd\n scp -r xris@mythtv.forevermore.net:$FILES .\n # Grab some other things that don't easily transfer with the earlier pattern\n scp -r xris@mythtv.forevermore.net:.ssh/config ~/.ssh/config\n scp -r xris@mythtv.forevermore.net:.subversion/config ~/.subversion/config\n chmod 700 ~ ~/.ssh\n chmod 600 ~/.ssh/config\n # Send them to a new host?\n if [[ $HOST ]]; then\n echo \"Enter password for $HOST\"\n { cat <<EOF\n# xris\nssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAp78fecgVTrYg8KtGgxArEnmr64UdVvE2Y8j7VywAP1sHlb9y0CfJ0OVS0QE4rWRMUAMSbWGj/hqvAaLccGPNFLbrjng4s7uM8kXumbSR97TKL29iCivYHqx+Cvl7VxAriT7jvcTNP6kYV13HZAgh35t1laI+hqpwZKLQ/mg4BHsn/B5eYiT9x8rBKNDgbsGlKssMa96oWvMf/ft8mr1ei3oVOFca9rARTrs6y6VoThFKAiG4p6YmXkh3WMyj/ci1VJ1Rhg5i/ml6MAGfTPMGZ1p2gF77EhKZSdFi+khECClWV+uEdrW/bvZh5anjlNb8yu65knOZ4p6mJXJ0zAdhiQ== xris@forevermore.net\nEOF\n } | ssh \"$HOST\" \"umask 077; test -d .ssh || mkdir .ssh ; cat > .ssh/authorized_keys\" || exit 1\n scp -r ~/$FILES \"$HOST\":\n #cd -\n fi\n }\n\n# Update the ssh config, but make sure we don't touch this stuff more\n# than once at a time (e.g. opening gnome-terminal with multiple tabs)\n# if [ ! -f ~/.ssh/bashrc_edit_lock ]; then\n# touch ~/.ssh/bashrc_edit_lock\n# /bin/rm -f ~/.ssh/config\n# if [ \"`hostname -f 2>/dev/null`\" != \"bumblebee.marchex.com\" ]; then\n# cat > ~/.ssh/config.$$ <<EOF\n#Host *\n# ServerAliveInterval 60\n# TCPKeepAlive yes\n# HashKnownHosts no\n#EOF\n# fi\n# cat >> ~/.ssh/config.$$ <<EOF\n#Host *.forevermore.net *.schedulesdirect.org *.mythtv.org *.percdata.com indra agni web mail dns myth mythtv\n# Port 22\n# User xris\n# ForwardAgent yes\n#Host blitzwing blitzwing.marchex.com\n# ForwardAgent no\n#Host *.marchex.com *.qa *.devint *.sad bumblebee blitzwing\n# Port 22\n# User ccpetersen\n# ForwardAgent yes\n#EOF\n# /bin/mv -f ~/.ssh/config.$$ ~/.ssh/config\n# chmod 600 ~/.ssh/config\n# /bin/rm -f ~/.ssh/bashrc_edit_lock\n# fi\n\n" }, { "alpha_fraction": 0.7383177280426025, "alphanum_fraction": 0.7570093274116516, "avg_line_length": 16.83333396911621, "blob_id": "3b09b3aba055e98725d41520da2f7d80834bdb8b", "content_id": "b21d3feed956abaea3bcc7a1307d9896e6cd6d40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 37, "num_lines": 6, "path": "/urldecode", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom urllib.parse import unquote_plus\n\nprint(unquote_plus(sys.argv[1]))\n" }, { "alpha_fraction": 0.6783266663551331, "alphanum_fraction": 0.7338690757751465, "avg_line_length": 23.888235092163086, "blob_id": "f5422d568ffd588e0aaef74974c228e06acbb000", "content_id": "4fe22b6999f15b97d0fd1e081ce9ae76d8aa8f9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4231, "license_type": "no_license", "max_line_length": 87, "num_lines": 170, "path": "/Brewfile", "repo_name": "nizar/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env brew bundle install\n# Yes, that shebang is magic. Just execute this file via ./Brewfile to make it work.\n\n# Lots of helpful setup info here: https://openfolder.sh/macos-migrations-with-brewfile\n\ntap 'homebrew/cask'\ntap 'homebrew/cask-versions'\ntap 'mas-cli/tap'\ntap 'homebrew/cask-fonts'\n\n# For mac app store stuff (including later in this file)\nbrew 'mas'\n\n# Core stuff\nbrew 'coreutils'\nbrew 'gnu-sed' # --with-default-names\nbrew 'findutils' # --with-default-names\nbrew 'grep' # --with-default-names\nbrew 'progress' # https://github.com/Xfennec/progress\nbrew 'direnv'\nbrew 'pv'\n#brew 'tee' # No brew for this (it's built-in)\nbrew 'pwgen'\n\nbrew 'git'\nbrew 'git-lfs'\nbrew 'hub' # \"unofficial\" github client, mimics git: https://hub.github.com/\nbrew 'gh' # official github client: https://cli.github.com/\nbrew 'bash'\nbrew 'bash-completion'\nbrew 'wget'\nbrew 'colordiff'\n\nbrew 'pyenv'\nbrew 'zlib' # for pyenv until BigSur issues are fixed\nbrew 'bzip2' # for pyenv until BigSur issues are fixed\n# FIXME: figure out how to run this via brew?\n# $(brew --prefix)/bin/pip install virtualenvwrapper ipython\n\nbrew 'imagemagick'\n\nbrew 'libdvdcss'\nbrew 'mp4v2'\nbrew 'gpac'\n\nbrew 'ssh-copy-id'\nbrew 'rename'\n\nbrew 'html-xml-utils'\n\nbrew 'hugo'\nbrew 'tree'\n\n# 3d printer stuff\ncask 'bossa'\n\n# Better than Apple's built-in one\nbrew 'nano'\n\n# for json parsing via bash\nbrew 'jq'\n\n# for photorec, memory card un-deleter\nbrew 'testdisk'\n\n# Install Mac App Store apps first, since we prefer these over casks\n\nmas 'Better Rename 9', id: 414209656\nmas 'Deliveries', id: 924726344\nmas 'Fantastical', id: 975937182\nmas 'Gemini 2', id: 1090488118\nmas 'PCalc', id: 403504866\nmas 'Pixelmator Pro', id: 1289583905\nmas 'Slack', id: 803453959\nmas 'SSH Tunnel Manager', id: 424470626\nmas 'Textual 7', id: 1262957439\nmas 'The Unarchiver', id: 425424353\n# mas 'Xcode', id: 497799835 # Don't really need xcode everywhere\n\n# mas 'Final Cut Pro', id: 424389933\n# mas 'iMovie', id: 408981434\n# mas 'Motion', id: 434290957\n# mas 'Compressor', id: 424390742\n# mas 'RBdigital', id: 491365225\n# mas 'Keynote', id: 409183694\n# mas 'Numbers', id: 409203825\n# mas 'Pages', id: 409201541\n# mas 'Wimoweh', id: 610341008\n# mas 'Moom', id: 419330170\n# mas 'Witch', id: 412485838\n# mas 'CleanMyDrive 2', id: 523620159\n# mas 'Sugarmate Glance', id: 1207352056\n\n# Fonts\n\ncask 'font-hack'\ncask 'font-fira-code'\ncask 'font-monoid'\ncask 'font-source-code-pro'\n# for seagl program\ncask 'font-dosis'\n\n# List of all cask apps (truncated by github display):\n# https://github.com/caskroom/homebrew-cask/tree/master/Casks\n# Search via: `brew search --casks`\n\ncask 'imageoptim'\ncask 'imagealpha'\ncask 'handbrake'\ncask 'makemkv'\ncask 'vlc'\ncask 'plex'\ncask 'steam'\n\ncask 'tor-browser'\n\n#cask '1password-cli'\n\n# cask 'bettertouchtool' # prefer to manage manually\n# cask 'iterm2-beta' # prefer to manage manually\n\n# cask 'prusaslicer' # prefer to install manually for betas\ncask 'openscad-snapshot'\ncask 'meshlab'\ncask 'meshmixer'\n\ncask 'discord'\n# cask 'openemu' # exists but I don't want it at the moment\n\ncask 'xquartz'\ncask 'inkscape'\ncask 'libreoffice'\n\ncask 'visual-studio-code'\ncask 'sublime-text'\n\n# This includes the betterzipql plugin, too\n# The app itself is shareware, and Unarchiver is free/easy but no QL plugin\n#cask 'betterzip'\n\n# This includes an app and an installer-package ql plugin\n# https://www.mothersruin.com/software/SuspiciousPackage/\ncask 'suspicious-package'\n\n# Great list of QL plugins via https://github.com/sindresorhus/quick-look-plugins\n# See also https://www.quicklookplugins.com/\ncask 'ProvisionQL'\ncask 'qlcolorcode'\ncask 'qlstephen'\ncask 'qlmarkdown'\ncask 'quicklook-json'\ncask 'qlprettypatch'\ncask 'quicklook-csv'\ncask 'qlimagesize'\ncask 'webpquicklook'\ncask 'quicklookase'\ncask 'qlvideo'\n# sad.. no longer available: cert-quicklook\n\n# Run these to silence warnings about quicklook plugins\n# xattr -cr ~/Library/QuickLook/QLColorCode.qlgenerator/\n# xattr -cr ~/Library/QuickLook/QLStephen.qlgenerator/\n# xattr -cr ~/Library/QuickLook/QLMarkdown.qlgenerator/\n# And OpenSCAD\n# xattr -cr /Applications/OpenSCAD.app/\n\n# for https://github.com/alex20465/deskbluez\n# commented out because deskbluez doesn't run\n# brew glib\n# brew dlib\n" } ]
11
kummu/samples
https://github.com/kummu/samples
051b2835ae14bafe24a73c43386b395676368393
e288df7deabb1dc9f82945487a05a326b218caf7
3c1d261cd3fcc45110047bb96ec371c623926088
refs/heads/master
2021-01-10T07:57:45.522616
2016-01-25T09:39:17
2016-01-25T09:39:17
50,340,262
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6728144884109497, "alphanum_fraction": 0.6769949793815613, "avg_line_length": 37.140846252441406, "blob_id": "20b5b8f5cf2f8f968bca78487f27e16ba5851ede", "content_id": "8bb680c33b46fd53baf5eaf1ac54c929800b3c58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8133, "license_type": "no_license", "max_line_length": 156, "num_lines": 213, "path": "/example/ecommerce/views.py", "repo_name": "kummu/samples", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom ecommerce.serializer import *\nfrom ecommerce.admin import ProductDetails\nfrom django.http.response import HttpResponse\n# Create your views here.\n\n# function to get all comments of a particular product\ndef allPrices(productId):\n from ecommerce.models import ProdcutDetials\n pricesList = []\n countryList = []\n \n db = ProdcutDetials.objects.get(pk=productId)\n \n productsSpecifications = db.price.all()\n \n for specs in productsSpecifications:\n pricesList.append(specs.price)\n countryList.append(specs.country)\n \n return pricesList,countryList\n\ndef allSpecs(productId):\n from ecommerce.models import ProdcutDetials\n \n specificationList = []\n \n db = ProdcutDetials.objects.get(pk=productId)\n \n productsSpecifications = db.specifications.all()\n \n for specs in productsSpecifications:\n specType = specs.specType\n specValue = specs.specValue\n responseObject = SpecResponseObject(specType=specType,specValue=specValue)\n specificationList.append(responseObject)\n \n return specificationList \n# specTypeList = []\n# specValueList = []\n# \n# db = ProdcutDetials.objects.get(pk=productId)\n# \n# productsSpecifications = db.specifications.all()\n# \n# for specs in productsSpecifications:\n# specTypeList.append(specs.specType)\n# specValueList.append(specs.specValue)\n# \n# return specTypeList,specValueList\n\ndef allComments(productId):\n from ecommerce.models import UserProductComments\n \n commentsList = []\n # accessing a foregin key refrenece field here product has foregin key of another model\n # use double underscorll to get particular field in foreign key model\n result = UserProductComments.objects.filter(product__productId=productId)\n for eachRow in result:\n commentsList.append(eachRow.comments)\n return commentsList\n\n@api_view(['GET'])\ndef getListOfProducts(request):\n from ecommerce.models import ProdcutDetials\n \n productsList = []\n \n db = ProdcutDetials.objects\n productsData = db.all()\n \n for eachProduct in productsData:\n name = eachProduct.productName\n p_id = eachProduct.productId\n description = eachProduct.productDescription\n# price = eachProduct.price\n priceList,countryList = allPrices(p_id)\n price = 50\n rating = eachProduct.rating\n thumbUrl = eachProduct.thumbUrl\n responseObject = Products_ResponseObject(productName=name,productId=p_id,productDescription=description,price=price,rating=rating,thumbUrl=thumbUrl)\n \n# responseObject = Products_ResponseObject(productName=name,productId=p_id,productDescription=description,rating=rating,thumbUrl=thumbUrl)\n productsList.append(responseObject)\n \n responseSerializer = Products_ResponseSerializer(productsList,many=True)\n return Response(data = responseSerializer.data,status = status.HTTP_200_OK)\n\n# ------------------second api\n@api_view(['POST'])\ndef getProductFullDetails(request):\n from ecommerce.models import ProdcutDetials\n \n requestSerializer = Product_RequestSerializer(data=request.data)\n \n if requestSerializer.is_valid():\n requestObject = requestSerializer.save()\n productId = requestObject.productId\n \n productsList = []\n \n db = ProdcutDetials.objects\n eachProduct = db.get(productId=productId)\n \n name = eachProduct.productName\n p_id = eachProduct.productId\n description = eachProduct.productDescription\n price = 40\n rating = eachProduct.rating\n bigUrl = eachProduct.bigUrl\n# specType,specValue = allSpecs(p_id)\n comments = allComments(p_id)\n productSpecs = allSpecs(productId)\n print productSpecs\n\n responseObject = Product_ResponseObject(comments=comments,productName=name,\n productId=p_id,productDescription=description,price=price,rating=rating,\n bigUrl=bigUrl,\n# specType=specType,specValue=specValue,\n productSpecifications=productSpecs)\n \n responseSerializer = Product_ResponseSerializer(responseObject)\n print responseSerializer.data\n return Response(data = responseSerializer.data,status = status.HTTP_200_OK)\n \n else:\n return Response(requestSerializer.error_messages,status=status.HTTP_400_BAD_REQUEST)\n\n# ------------------third api\n@api_view(['POST'])\ndef productComments(request):\n from ecommerce.models import UserProductComments,UserProfile,ProdcutDetials\n \n requestSerializer = ProductComments_RequestSerializer(data=request.data)\n if requestSerializer.is_valid():\n print requestSerializer,\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\"\n requestObject = requestSerializer.save()\n productId = requestObject.productId\n userName = requestObject.userName\n comments = requestObject.comments\n \n from django.core.exceptions import ObjectDoesNotExist\n \n try: \n user = UserProfile.objects.get(name=userName)\n except ObjectDoesNotExist:\n content = {'user Id': 'given user id is not found'}\n return Response(content, status=status.HTTP_417_EXPECTATION_FAILED)\n try:\n product = ProdcutDetials.objects.get(productId=productId)\n except ObjectDoesNotExist:\n content = {'product Id': 'given product id is not found'}\n return Response(content, status=status.HTTP_417_EXPECTATION_FAILED)\n\n upc = UserProductComments(user=user,product=product,comments=comments)\n upc.save()\n content = {'Comment': 'successfully commented'}\n return Response(data=content,status=status.HTTP_200_OK)\n\n# --------------fourth api to write Ratings\n@api_view(['POST'])\ndef userRatings(request):\n from ecommerce.models import Ratings,UserProfile,ProdcutDetials\n \n requestSerializer = ProductRatings_RequestSerializer(data=request.data)\n \n if requestSerializer.is_valid():\n requestObject = requestSerializer.save()\n productId = requestObject.productId\n userName = requestObject.userName\n rating = requestObject.rating\n \n from django.core.exceptions import ObjectDoesNotExist\n \n try: \n user = UserProfile.objects.get(name=userName)\n except ObjectDoesNotExist:\n content = {'user Id': 'given user id is not found'}\n return Response(content, status=status.HTTP_417_EXPECTATION_FAILED)\n try:\n product = ProdcutDetials.objects.get(productId=productId)\n except ObjectDoesNotExist:\n content = {'product Id': 'given product id is not found'}\n return Response(content, status=status.HTTP_417_EXPECTATION_FAILED)\n try:\n ratingsObject = Ratings.objects.get(user=user,product=product)\n ratingsObject.ratings = rating\n ratingsObject.save()\n content = {'Ratings': 'successfully updated'}\n except:\n db_insert = Ratings(user=user,product=product,ratings=rating)\n db_insert.save()\n content = {'Ratings': 'successfully added'}\n return Response(data=content,status=status.HTTP_200_OK)\n\n# --------------fifth api to retrive all product specifications\n@api_view(['POST'])\ndef getSpecification(request):\n from ecommerce.models import ProdcutDetials\n requestSerializer = Product_RequestSerializer(data=request.data)\n \n if requestSerializer.is_valid():\n requestObject = requestSerializer.save()\n productId = requestObject.productId\n \n specificationList = allSpecs(productId)\n \n responseSerializer = SpecResponseSerializer(specificationList,many=True)\n return Response(data = responseSerializer.data,status = status.HTTP_200_OK)\n \n" }, { "alpha_fraction": 0.5623670220375061, "alphanum_fraction": 0.5737768411636353, "avg_line_length": 42.8220329284668, "blob_id": "510df19801c15ca4f5f13f641a6ae2adf4360179", "content_id": "2677543df2fb67cecf4e1c29287f5a84304a6006", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5171, "license_type": "no_license", "max_line_length": 123, "num_lines": 118, "path": "/example/ecommerce/migrations/0001_initial.py", "repo_name": "kummu/samples", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-24 17:04\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='PricesCountry',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('price', models.IntegerField()),\n ('country', models.CharField(default='', max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='PriceSpecification',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ],\n ),\n migrations.CreateModel(\n name='ProdcutDetials',\n fields=[\n ('productName', models.CharField(default='', max_length=120)),\n ('productId', models.AutoField(primary_key=True, serialize=False)),\n ('productDescription', models.CharField(default='', max_length=1200)),\n ('rating', models.CharField(default='', max_length=120)),\n ('thumbUrl', models.CharField(default='', max_length=240)),\n ('bigUrl', models.CharField(default='', max_length=240)),\n ('price', models.ManyToManyField(through='ecommerce.PriceSpecification', to='ecommerce.PricesCountry')),\n ],\n ),\n migrations.CreateModel(\n name='ProductSpecifications',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.ProdcutDetials')),\n ],\n ),\n migrations.CreateModel(\n name='Ratings',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('ratings', models.FloatField(default=2)),\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.ProdcutDetials')),\n ],\n ),\n migrations.CreateModel(\n name='SpecificationDetails',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('specType', models.CharField(default='', max_length=200)),\n ('specValue', models.CharField(default='', max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='UserProductComments',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('comments', models.CharField(default='', max_length=1900)),\n ('ratings', models.FloatField(default=2.5)),\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.ProdcutDetials')),\n ],\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(default='', max_length=120)),\n ('email', models.CharField(default='', max_length=120)),\n ('mobileNumber', models.IntegerField(default=96428)),\n ],\n ),\n migrations.AddField(\n model_name='userproductcomments',\n name='user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.UserProfile'),\n ),\n migrations.AddField(\n model_name='ratings',\n name='user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.UserProfile'),\n ),\n migrations.AddField(\n model_name='productspecifications',\n name='spec',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.SpecificationDetails'),\n ),\n migrations.AddField(\n model_name='prodcutdetials',\n name='specifications',\n field=models.ManyToManyField(through='ecommerce.ProductSpecifications', to='ecommerce.SpecificationDetails'),\n ),\n migrations.AddField(\n model_name='pricespecification',\n name='Product',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.ProdcutDetials'),\n ),\n migrations.AddField(\n model_name='pricespecification',\n name='price',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ecommerce.PricesCountry'),\n ),\n migrations.AlterUniqueTogether(\n name='ratings',\n unique_together=set([('user', 'product')]),\n ),\n ]\n" }, { "alpha_fraction": 0.7547974586486816, "alphanum_fraction": 0.7547974586486816, "avg_line_length": 41.54545593261719, "blob_id": "2fbe024e3ad8c8a580efb1207df50573ef20a5fb", "content_id": "b59146bf256ceda750bc299f0bc9926a635ecfaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 469, "license_type": "no_license", "max_line_length": 112, "num_lines": 11, "path": "/example/ecommerce/urls.py", "repo_name": "kummu/samples", "src_encoding": "UTF-8", "text": "\nfrom django.conf.urls import url\n# import our created api getinterndata\nfrom ecommerce.views import getListOfProducts,getProductFullDetails,productComments,userRatings,getSpecification\n\nurlpatterns = [\n url(r'^getListOfProducts/$',getListOfProducts),\n url(r'^getProductFullDetails/$',getProductFullDetails),\n url(r'^productComments/$',productComments),\n url(r'^userRatings/$',userRatings),\n url(r'^getSpecification/$',getSpecification)\n]\n" }, { "alpha_fraction": 0.7871939539909363, "alphanum_fraction": 0.7884494662284851, "avg_line_length": 32.914894104003906, "blob_id": "bb00f0316806ff73dfd5c372818a9b0d660647db", "content_id": "6f5dbb83960ab549b1cdfff5f819400e88a9d114", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1593, "license_type": "no_license", "max_line_length": 98, "num_lines": 47, "path": "/example/ecommerce/admin.py", "repo_name": "kummu/samples", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom ecommerce.models import UserProfile\nfrom ecommerce.models import ProdcutDetials\nfrom ecommerce.models import UserProductComments\nfrom ecommerce.models import ProductSpecifications\nfrom ecommerce.models import SpecificationDetails\nfrom ecommerce.models import PricesCountry\nfrom ecommerce.models import PriceSpecification\nfrom ecommerce.models import Ratings\n\n#from gluon.contrib.pyrtf.Elements import Inline\n# Register your models here.\n\n@admin.register(UserProfile)\nclass UserProfile(admin.ModelAdmin):\n list_display=['name','email','mobileNumber']\n\nclass PriceSpecification_inline(admin.StackedInline):\n model = PriceSpecification\n extra = 1\n\nclass ProductSpecifications_inline(admin.StackedInline):\n model = ProductSpecifications\n extra = 1\n\n@admin.register(ProdcutDetials) \nclass ProductDetails(admin.ModelAdmin):\n list_display=[\"productName\",\"productId\", \"productDescription\", \"rating\", \"thumbUrl\", \"bigUrl\"]\n inlines = [ProductSpecifications_inline,PriceSpecification_inline]\n# write inlines with , separated otherwise overriden\n \n@admin.register(UserProductComments)\nclass UserProductComments(admin.ModelAdmin):\n list_display=[\"user\",\"product\",\"comments\",\"ratings\"]\n \n@admin.register(SpecificationDetails)\nclass SpecificationDetails(admin.ModelAdmin):\n list_display = [\"specType\",\"specValue\"]\n\n@admin.register(PricesCountry)\nclass PriceCountry(admin.ModelAdmin):\n list_display = [\"price\",\"country\"]\n \n@admin.register(Ratings)\nclass Ratings(admin.ModelAdmin):\n list_display = [\"user\",\"product\",\"ratings\"]" }, { "alpha_fraction": 0.6991465091705322, "alphanum_fraction": 0.6991465091705322, "avg_line_length": 35.63478088378906, "blob_id": "526abc0fc56daaad5b2b0117db877c5b7a993029", "content_id": "62d3d6b656a1ec45a710010b1e96d12ecc8709bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4218, "license_type": "no_license", "max_line_length": 119, "num_lines": 115, "path": "/example/ecommerce/serializer.py", "repo_name": "kummu/samples", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom debian.changelog import comments\n\n\n# -------------------fifth api\nclass SpecResponseObject(object):\n def __init__(self,specType,specValue):\n self.specType=specType\n self.specValue=specValue\n\nclass SpecResponseSerializer(serializers.Serializer):\n specType = serializers.CharField()\n specValue = serializers.CharField()\n \n def create(self, validated_data):\n return SpecResponseObject(**validated_data)\n\n# ----------------------first api\nclass Products_ResponseObject(object):\n def __init__(self,productName,productId, productDescription, price, rating, thumbUrl):\n# def __init__(self,productName,productId, productDescription, rating, thumbUrl):\n self.productName = productName\n self.productId = productId\n self.productDescription = productDescription\n self.price = price\n self.rating=rating\n self.thumbUrl = thumbUrl\n\nclass Products_ResponseSerializer(serializers.Serializer):\n productName = serializers.CharField()\n productId = serializers.IntegerField()\n productDescription = serializers.CharField()\n price = serializers.IntegerField()\n rating = serializers.FloatField()\n thumbUrl = serializers.URLField()\n \n\n def create(self,validated_data):\n return Products_ResponseObject(**validated_data)\n \n# --------------------second api\nclass Product_RequestObject(object):\n def __init__(self,productId):\n self.productId=productId\n\nclass Product_RequestSerializer(serializers.Serializer):\n productId = serializers.IntegerField()\n \n def create(self,validated_data):\n return Product_RequestObject(**validated_data)\n \nclass Product_ResponseObject(object):\n def __init__(self,comments,productName,productId, productDescription, price, rating, bigUrl,productSpecifications):\n self.productName = productName\n self.productId = productId\n self.productDescription = productDescription\n self.price = price\n self.rating=rating\n self.bigUrl = bigUrl\n self.comments=comments\n# self.specType=specType\n# self.specValue=specValue\n self.productSpecifications = productSpecifications\n \n\nclass Product_ResponseSerializer(serializers.Serializer):\n productName = serializers.CharField()\n productId = serializers.IntegerField()\n productDescription = serializers.CharField()\n price = serializers.IntegerField()\n rating = serializers.FloatField()\n bigUrl = serializers.URLField()\n comments = serializers.ListField(child = serializers.CharField())\n# specType = serializers.ListField(child = serializers.CharField())\n# specValue = serializers.ListField(child = serializers.CharField())\n productSpecifications = SpecResponseSerializer(many=True)\n \n def create(self,validated_data):\n specsList = validated_data.pop('productSpecifications')\n psList = []\n print specsList\n for eachSpec in specsList:\n psObject = SpecResponseObject(**eachSpec)\n psList.append(psObject)\n return Product_ResponseObject(productSpecifications=psList,**validated_data) \n\n# -------------------------third api\nclass ProductComments_RequestObject(object):\n def __init__(self,productId,userName,comments):\n self.productId=productId\n self.userName = userName\n self.comments=comments\n\nclass ProductComments_RequestSerializer(serializers.Serializer):\n productId = serializers.IntegerField()\n userName = serializers.CharField()\n comments=serializers.CharField()\n \n def create(self,validated_data):\n return ProductComments_RequestObject(**validated_data)\n\n# -------------------fourth api\nclass ProductRatings_RequestObject(object):\n def __init__(self,productId,userName,rating):\n self.productId=productId\n self.userName = userName\n self.rating=rating\n\nclass ProductRatings_RequestSerializer(serializers.Serializer):\n productId = serializers.IntegerField()\n userName = serializers.CharField()\n rating=serializers.FloatField()\n \n def create(self,validated_data):\n return ProductRatings_RequestObject(**validated_data)\n \n" }, { "alpha_fraction": 0.7106969356536865, "alphanum_fraction": 0.7309562563896179, "avg_line_length": 36.40909194946289, "blob_id": "41c6219fb4376a26ad8febe3ebe0b78103a2879e", "content_id": "6ad5dde2634c511e08d0fd8229add5a427ac8364", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2468, "license_type": "no_license", "max_line_length": 95, "num_lines": 66, "path": "/example/ecommerce/models.py", "repo_name": "kummu/samples", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom django.db import models\n#from gluon.contrib.pymysql.constants.FLAG import AUTO_INCREMENT\nfrom django.template.defaultfilters import default\n\n# Create your models here.\n\nclass UserProfile(models.Model):\n name=models.CharField(max_length=120,default='')\n email = models.CharField(max_length=120,default='')\n mobileNumber = models.IntegerField(default=96428)\n \n def __unicode__(self):\n return unicode(self.name)\n\nclass SpecificationDetails(models.Model):\n specType=models.CharField(max_length=200,default='')\n specValue=models.CharField(max_length=200,default='')\n \n def __unicode__(self):\n return unicode(self.specType+\":\"+self.specValue)\n \nclass PricesCountry(models.Model):\n price = models.IntegerField()\n country = models.CharField(max_length=200,default='')\n \n def __unicode__(self):\n return unicode(str(self.price)+\":\"+self.country)\n \nclass ProdcutDetials(models.Model):\n productName = models.CharField(max_length=120,default='')\n productId=models.AutoField(primary_key=True)\n productDescription=models.CharField(max_length=1200,default='')\n price=models.ManyToManyField(PricesCountry,through=\"PriceSpecification\")\n rating=models.CharField(max_length=120,default='')\n thumbUrl=models.CharField(max_length=240,default='')\n bigUrl=models.CharField(max_length=240,default='')\n# specType=models.CharField(max_length=500,default='')\n# specValue=models.CharField(max_length=1500,default='')\n specifications=models.ManyToManyField(SpecificationDetails,through=\"ProductSpecifications\")\n\n def __unicode__(self):\n return unicode(str(self.productId)+\" : \"+self.productName)\n \nclass UserProductComments(models.Model):\n user = models.ForeignKey(UserProfile)\n product = models.ForeignKey(ProdcutDetials)\n comments=models.CharField(max_length=1900,default='')\n ratings=models.FloatField(default=2.5)\n\nclass ProductSpecifications(models.Model):\n spec = models.ForeignKey(SpecificationDetails)\n product = models.ForeignKey(ProdcutDetials)\n \nclass PriceSpecification(models.Model):\n price = models.ForeignKey(PricesCountry) \n Product = models.ForeignKey(ProdcutDetials)\n\nclass Ratings(models.Model):\n user = models.ForeignKey(UserProfile)\n product = models.ForeignKey(ProdcutDetials)\n ratings=models.FloatField(default=2)\n \n class Meta:\n unique_together = ((\"user\", \"product\"))" } ]
6
byron1988/Medicar
https://github.com/byron1988/Medicar
5d8b89b648c15eb3943c06310f17d71e35e8fe30
ca59b5b6c9620c1836bb87de7893e31da088945e
c20cabfc7b4b79dc13c364772542f3199fe99216
refs/heads/master
2023-02-21T16:54:48.837402
2021-10-08T01:20:42
2021-10-08T01:20:42
242,256,893
1
1
MIT
2020-02-22T00:51:55
2021-10-08T01:20:46
2023-02-15T20:48:28
Python
[ { "alpha_fraction": 0.7385005354881287, "alphanum_fraction": 0.7493661642074585, "avg_line_length": 31.869047164916992, "blob_id": "ab41b067e301659fb72d02a7bdcbdc8a76694b67", "content_id": "648a61a571db480f8898a90d174e3a9bfdf00c00", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2832, "license_type": "permissive", "max_line_length": 235, "num_lines": 84, "path": "/README_FRONTEND.md", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "# Especificações técnicas do frontend do Medicar\nA seguir estão descritas as especificações de implementação do frontend do Medicar\n\n## :art: Layout\nO layout do desafio foi desenvolvido no [Figma](https://www.figma.com/) está disponível nesse [link](https://www.figma.com/file/kJIvTRUJtKin3PFthaGXnj/Desafio-Full-Stack-Intmed?node-id=0%3A1). \n\nVocê deve usar este link para visualizar as telas da aplicação e as propriedades dos componentes do layout para guiar a sua implementação.\n\n## Fluxo na marcação de consultas\nO fluxo para que o paciente possa marcar uma consulta deve seguir os seguintes passsos:\n1. O paciente escolhe a especialidade desejada para a consulta (ex: Dermatologista)\n1. Com isso, deverão aparecer todos os médicos da especialidade escolhida para que o paciente possa selecionar\n1. Uma vez escolhido o médico desejado, deverão aparecer os dias em que o médico está disponível para realizar uma consulta\n1. Ao selecionar um dia específico, deverão aparecer os horário disponíveis do médico para a data escolhida\n1. Ao final deste processo, o paciente poderá confirmar a marcação da consulta e voltar para a tela de listagem\n\n## Utilizando API Medicar\nVocê pode utilizar uma API do Medicar hospedado no [Heroku](https://www.heroku.com/). \n\nO endereço da API é [https://intmed-api-medicar.herokuapp.com](https://intmed-api-medicar.herokuapp.com).\nPara ter acessos aos _endpoints_ é preciso possuir um usuário, conforme é descrito na sessão [backend](https://github.com/Intmed-Software/desafio/tree/master/backend#api) desse desafio. \n\n### Criar usuário\nÉ necessário implementar a criação de usuário para acesso ao sistema\n\n#### Requisição\n```\nPOST /users/\n\n{\n \"username\": <string 150>,\n \"email\": <string 255>,\n \"password\": <string 128>\n}\n```\n\n#### Resposta\n```\ncode status 201\n{\n \"username\": <string 150>,\n \"email\": <string 255>\n}\n```\n\n### Obter token\nApós criar um usuário é preciso implementar login para obter token para utilizar a API\n\n#### Requisição\n```\nPOST /users/login\n\n{\n \"username\":,\n \"password\":\n}\n```\n\n#### Resposta\n```\ncode status 200\n{\n \"token\": <string>\n}\n```\n\nAgora basta adicionar nas requisições via cabeçalho HTTP `Authorization` conforme exemplo abaixo:\n```\nGET /especialidades/\nAuthorization: Token 9944b09199c62bcf9418ad846dd0e4bbdfc6ee4b\n```\n\n### Observações\nNessa API disponibilizada os dados são apresentados de forma **paginada**, então é importante aplicar os filtros conforme é descrito na sessão [backend](https://github.com/Intmed-Software/desafio/tree/master/backend#api) desse desafio.\n\n#### Padrão de paginação\n```\n{\n \"count\": <número total de registros>,\n \"next\": \"<próxima página com dados ou null se não houver>\",\n \"previous\": \"<página anterior com dados ou null se não houver>\",\n \"results\": [<array com resultados>]\n}\n```\n" }, { "alpha_fraction": 0.7132866978645325, "alphanum_fraction": 0.7132866978645325, "avg_line_length": 25, "blob_id": "9dc879e62054201bda9df30b6f5ffe20f77b00c2", "content_id": "272018c4458ff72ab6243affc313f0e1e3c6240c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "permissive", "max_line_length": 72, "num_lines": 11, "path": "/source/back/medico/admin.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom medico.models import Medico\n\n\nclass MedicoAdmin(admin.ModelAdmin):\n list_display = ('nome', 'crm', 'email', 'telefone', 'especialidade')\n list_filter = ['especialidade']\n search_fields = ['nome']\n\n\nadmin.site.register(Medico, MedicoAdmin)\n" }, { "alpha_fraction": 0.7861635088920593, "alphanum_fraction": 0.7987421154975891, "avg_line_length": 25.66666603088379, "blob_id": "ec3f7ca76a9720df1f745738aaf939b0e48c730c", "content_id": "d0df99bf7c96e9f3c7f3719962b3be2337a03da6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 159, "license_type": "permissive", "max_line_length": 53, "num_lines": 6, "path": "/dockers/back/Dockerfile", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "FROM python:3\nENV PYTHONUNBUFFERED=1\nWORKDIR /source\nCOPY ./source/back/requirements.txt /requirements.txt\nRUN pip install -r /requirements.txt\nCOPY . /source/" }, { "alpha_fraction": 0.7028470039367676, "alphanum_fraction": 0.7206405401229858, "avg_line_length": 21.31999969482422, "blob_id": "16f08a2edb68d0917733febfb21e7fd2ee772a9c", "content_id": "db0847d4555d1b469ca788350e5f906a8acc4367", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 568, "license_type": "permissive", "max_line_length": 60, "num_lines": 25, "path": "/README.md", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "# Medicar\nSistema para gestão de consultas em uma clínica médica\n\n\nPara Inicializar o sistema deverá **Fazer!**:\ninstalar o pipenv em sua env.\n\t\n\t>>> docker-compose build --no-cache\n\t>>> docker-compose up\n\ndepois entrar na imagem do docker\n\t>>> docker-compose exec back sh\n\t>>> python manage.py migrate\n\t>>> python manage.py createsuperuser\n\t\ndepois cria um \"token\" de acesso para as requisições da api:\n\t\n\t>>> python manage.py drf_create_token nomedousuario\ndepois execute o sistema :\n\t\t\n\t>>> python manage.py runserver\n\nurl:\n\n\thttp://127.0.0.1:8000/admin/\n\n\n\n\n" }, { "alpha_fraction": 0.5889145731925964, "alphanum_fraction": 0.6112394332885742, "avg_line_length": 39.59375, "blob_id": "7ddb3a30e51961c221c304daefcb87d11519ab6a", "content_id": "43afaa9710b65a2814a6f6ba7ba89a04099eae39", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1306, "license_type": "permissive", "max_line_length": 181, "num_lines": 32, "path": "/source/back/medico/migrations/0001_initial.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-10-01 18:31\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport phonenumber_field.modelfields\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('especialidade', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Medico',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('nome', models.CharField(help_text='Digite a nome do médico', max_length=100)),\n ('crm', models.CharField(help_text='Digite o CRM do médico', max_length=7, unique=True)),\n ('email', models.EmailField(help_text='Digite o E-mail do médico', max_length=254, verbose_name='E-mail')),\n ('telefone', phonenumber_field.modelfields.PhoneNumberField(blank=True, help_text='Digite o telefone do médico', max_length=128, region=None)),\n ('especialidade', models.ForeignKey(help_text='Selecione a especialidade do médico', on_delete=django.db.models.deletion.PROTECT, to='especialidade.especialidade')),\n ],\n options={\n 'verbose_name': 'Médico',\n 'verbose_name_plural': 'Médicos',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6178489923477173, "alphanum_fraction": 0.6247139573097229, "avg_line_length": 22, "blob_id": "f38f021f70d8f7b657ba3987330dce54e5bcc072", "content_id": "424d57b73221f43c6d64be912aa61c481921409d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "permissive", "max_line_length": 46, "num_lines": 19, "path": "/source/back/especialidade/models.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Especialidade(models.Model):\n \"\"\"\n Classe Para definir as especilidades\n \"\"\"\n id = models.AutoField(primary_key=True)\n nome = models.CharField(\n max_length=100,\n unique=True,\n help_text='Digite a especialidade')\n\n class Meta:\n verbose_name = \"Especialidade\"\n verbose_name_plural = \"Especialidades\"\n\n def __str__(self):\n return self.nome\n" }, { "alpha_fraction": 0.5541211366653442, "alphanum_fraction": 0.5729891061782837, "avg_line_length": 32.56666564941406, "blob_id": "fcac1a944b38fb01182fe90de2df73c66ac2c735", "content_id": "16f657ec9e333901a42f74401415bdd0c08fea27", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "permissive", "max_line_length": 183, "num_lines": 30, "path": "/source/back/consulta/migrations/0001_initial.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-10-08 01:03\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('medico', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Consulta',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('dia', models.DateField()),\n ('horario', models.TimeField(verbose_name='horário')),\n ('data_agendamento', models.DateTimeField(auto_now_add=True, verbose_name='data de agandamento')),\n ('medico', models.ForeignKey(help_text='Selecione um médico para a consulta', on_delete=django.db.models.deletion.PROTECT, to='medico.medico', verbose_name='Médico')),\n ],\n options={\n 'verbose_name': 'consulta',\n 'verbose_name_plural': 'consultas',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.7658227682113647, "alphanum_fraction": 0.7658227682113647, "avg_line_length": 25.33333396911621, "blob_id": "92a3a82105870c325ec7979d401402010cafe2cd", "content_id": "6576aa04c83004db2056ba3c3fecb99b51b0f3a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "permissive", "max_line_length": 56, "num_lines": 6, "path": "/source/back/especialidade/apps.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass EspecialidadeConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'especialidade'\n" }, { "alpha_fraction": 0.7306272983551025, "alphanum_fraction": 0.7306272983551025, "avg_line_length": 26.200000762939453, "blob_id": "9dfa63447429787cdeea5fcbe0c9478f3b39e2d1", "content_id": "e41fce4b0a9c5af9e82abf65c42a797fedc6ba5f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "permissive", "max_line_length": 70, "num_lines": 10, "path": "/source/back/especialidade/serializers.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom especialidade.models import Especialidade\n\n\nclass EspecialidadeSerializer(serializers.HyperlinkedModelSerializer):\n url_field_name = 'url'\n\n class Meta:\n model = Especialidade\n fields = ('url', 'id', 'nome')" }, { "alpha_fraction": 0.7394015192985535, "alphanum_fraction": 0.7394015192985535, "avg_line_length": 33.869564056396484, "blob_id": "7a702941853f39ff9edc8d9a2344fc23d770d92b", "content_id": "c577d4bf65875f8bfd0b0c33ff02d0617b6c9a94", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "permissive", "max_line_length": 62, "num_lines": 23, "path": "/source/back/consulta/views.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom django_filters import rest_framework as filters\nfrom consulta.models import Consulta\nfrom medico.models import Medico\nfrom consulta.serializers import ConsultaSerializer\n\nclass ConsultaFilter(filters.FilterSet):\n medico = filters.ModelMultipleChoiceFilter(\n queryset=Medico.objects.all())\n data_inicio = filters.DateFilter(\"dia\", lookup_expr='gte')\n data_final = filters.DateFilter(\"dia\", lookup_expr='lte')\n\n class Meta:\n model = Consulta\n fields = ['medico', 'data_inicio', 'data_final']\n\n\nclass ConsultaViewSet(viewsets.ModelViewSet):\n queryset = Consulta.objects.all()\n serializer_class = ConsultaSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = ConsultaFilter\n ordering = ('dia')\n" }, { "alpha_fraction": 0.5888625383377075, "alphanum_fraction": 0.6212974190711975, "avg_line_length": 22.946807861328125, "blob_id": "002acbe01d02cbd07d2cdb19fa4ee425211f0be8", "content_id": "8df721cd1d4d19ee7ec042b2c7796fa2f2c7a34b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6892, "license_type": "permissive", "max_line_length": 401, "num_lines": 282, "path": "/README_BACKEND.md", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "# Especificações técnicas de backend do Medicar\n\n## Interface administrativa\nVocê deverá implementar uma interface administrativa na qual gestor da clínica (superusuário) poderá cadastrar especialidades, médicos e disponibilizar horários nos quais os clientes poderão marcar as consultas. Utilize a ferramenta de geração de interface administrativa automática do Django para criar esta interface (veja a [documentação](https://docs.djangoproject.com/en/3.0/ref/contrib/admin/)).\n\nA interface administrativa deve conter as funcionalidades a seguir:\n\n### Cadastrar especialidades\nDeve ser possível cadastrar as especialidades médicas (ex: CARDIOLOGIA, PEDIATRIA) que a clínica atende fornecendo as seguintes informações:\n\n* **Nome:** nome da especialidade médica (obrigatório)\n\n### Cadastrar médicos\nDeve ser possível cadastrar os médicos que podem atender na clínica fornecendo as seguintes informações:\n\n* **Nome:** Nome do médico (obrigatório)\n* **CRM:** Número do médico no conselho regional de medicina (obrigatório)\n* **E-mail:** Endereço de e-mail do médico\n* **Telefone:** Telefone do médico\n* **Especialidade:** Especialidade na qual o médico atende\n\n### Criar agenda para médico\nDeve ser possível criar uma agenda para um médico em um dia específico fornecendo as seguintes informações:\n\n* **Médico:** Médico que será alocado (obrigatório)\n* **Dia:** Data de alocação do médico (obrigatório)\n* **Horários:** Lista de horários na qual o médico deverá ser alocado para o dia especificado (obrigatório)\n\n#### Restrições:\n* Não deve ser possível criar mais de uma agenda para um médico em um mesmo dia\n* Não deve ser possível criar uma agenda para um médico em um dia passado\n\n## API\nVocê deverá construir uma API, seguindo os padrões e boas práticas do REST contendo os seguintes endpoints:\n\n### Autenticação\n\nCom exceção dos endpoints de login e cadastro de usuário, todos os endpoints da API devem ser protegidos por autenticação e necessitam receber token via cabeçalho HTTP `Authorization`. Veja um exemplo de requisição:\n\n```\nGET /especialidades/\nAuthorization: Token 9944b09199c62bcf9418ad846dd0e4bbdfc6ee4b\n```\n\n### Listar especialidades médicas\nLista todas as especialidades médicas disponíveis na clínica\n\n#### Requisição\n```\nGET /especialidades/\n```\n\n#### Resposta\n```json\n[\n {\n \"id\": 1,\n \"nome\": \"Pediatria\"\n },\n {\n \"id\": 2,\n \"nome\": \"Ginecologia\"\n },\n {\n \"id\": 3,\n \"nome\": \"Cardiologia\"\n },\n {\n \"id\": 4,\n \"nome\": \"Clínico Geral\"\n }\n]\n```\n\n#### Filtros\n* Nome da especialidade (termo de pesquisa)\n\n```\nGET /especialidades/?search=ped\n```\n\n### Listar médicos\nLista todos os médicos que atendem pela clínica\n\n#### Requisição\n```\nGET /medicos/\n```\n#### Retorno\n```json\n[\n {\n \"id\": 1,\n \"crm\": 3711,\n \"nome\": \"Drauzio Varella\",\n \"especialidade\": {\n \"id\":2,\n \"nome\": \"Pediatria\"\n }\n },\n {\n \"id\": 2,\n \"crm\": 2544,\n \"nome\": \"Gregory House\",\n \"especialidade\": {\n \"id\": 3,\n \"nome\": \"Cardiologia\"\n }\n },\n {\n \"id\": 3,\n \"crm\": 3087,\n \"nome\": \"Tony Tony Chopper\",\n \"especialidade\": {\n \"id\":2,\n \"nome\": \"Pediatria\"\n }\n }\n]\n```\n\n#### Filtros\n\n* Identificador de uma ou mais especialidades\n* Nome do médico (termo de pesquisa)\n\n```\nGET /medicos/?search=maria&especialidade=1&especialidade=3\n```\n\n### Listar consultas marcadas\nLista todas as consultas marcadas do usuário logado\n\n#### Requisição\n```\nGET /consultas/\n```\n\n#### Retorno\n```json\n[\n {\n \"id\": 1,\n \"dia\": \"2020-02-05\",\n \"horario\": \"12:00\",\n \"data_agendamento\": \"2020-02-01T10:45:0-03:00\",\n \"medico\": {\n \"id\": 2,\n \"crm\": 2544,\n \"nome\": \"Gregory House\",\n \"especialidade\": {\n \"id\": 3,\n \"nome\": \"Cardiologia\"\n }\n }\n },\n {\n \"id\": 2,\n \"dia\": \"2020-03-01\",\n \"horario\": \"09:00\",\n \"data_agendamento\": \"2020-02-01T10:45:0-03:00\",\n \"medico\": {\n \"id\": 1,\n \"crm\": 3711,\n \"nome\": \"Drauzio Varella\",\n \"especialidade\": {\n \"id\":2,\n \"nome\": \"Pediatria\"\n }\n }\n }\n]\n```\n\n#### Regras de negócio\n* A listagem não deve exibir consultas para dia e horário passados\n* Os itens da listagem devem vir ordenados por ordem crescente do dia e horário da consulta\n\n### Listar agendas disponíveis\nLista todas as agendas disponíveis na clínica\n\n```json\n[\n {\n \"id\": 1,\n \"medico\": {\n \"id\": 3,\n \"crm\": 3087,\n \"nome\": \"Tony Tony Chopper\",\n \"especialidade\": {\n \"id\":2,\n \"nome\": \"Pediatria\"\n }\n },\n \"dia\": \"2020-02-10\",\n \"horarios\": [\"14:00\", \"14:15\", \"16:00\"]\n },\n {\n \"id\": 2,\n \"medico\": {\n \"id\": 2,\n \"crm\": 2544,\n \"nome\": \"Gregory House\",\n \"especialidade\": {\n \"id\": 3,\n \"nome\": \"Cardiologia\"\n }\n },\n \"dia\": \"2020-02-10\",\n \"horarios\": [\"08:00\", \"08:30\", \"09:00\", \"09:30\", \"14:00\"]\n }\n]\n```\n\n#### Filtros\n* Identificador de um ou mais médicos\n* Identificador de uma ou mais especialidades\n* Intervalo de data\n\n```\nGET /agendas/?medico=1&especialidade=2&data_inicio=2020-01-01&data_final=2020-01-05\n```\n\n#### Regras de negócio\n* As agendas devem vir ordenadas por ordem crescente de data\n* Agendas para datas passadas ou que todos os seus horários já foram preenchidos devem ser excluídas da listagem\n* Horários dentro de uma agenda que já passaram ou que foram preenchidos devem ser excluídos da listagem\n\n### Marcar consulta\nMarca uma consulta para o usuário logado\n\n#### Requisição\n\n```\nPOST /consultas/\n{\n \"agenda_id\": 1,\n \"horario\": \"14:15\"\n}\n```\n#### Retorno\n\n```json\n{\n \"id\": 2,\n \"dia\": \"2020-03-01\",\n \"horario\": \"09:00\",\n \"data_agendamento\": \"2020-02-01T10:45:0-03:00\",\n \"medico\": {\n \"id\": 1,\n \"crm\": 3711,\n \"nome\": \"Drauzio Varella\",\n \"especialidade\": {\n \"id\":2,\n \"nome\": \"Pediatria\"\n }\n }\n}\n```\n\n#### Regras de negócio\n* A data em que o agendamento foi feito deve ser salva ao se marcar uma consulta\n* Não deve ser possível marcar uma consulta para um dia e horário passados\n* Não deve ser possível marcar uma consulta se o usuário já possui uma consulta marcada no mesmo dia e horário\n* Não deve ser possível marcar uma consulta se o dia e horário já foram preenchidos\n\n\n### Desmarcar consulta\nDesmarca uma consulta marcada pelo usuário\n\n#### Requisição\n```\nDELETE /consultas/<consulta_id>\n```\n\n#### Retorno\nNão há retorno (vazio)\n\n#### Regras de negócio\n* Não deve ser possível desmarcar uma consulta que não foi marcada pelo usuário logado\n* Não deve ser possível desmarcar uma consulta que nunca foi marcada (identificador inexistente)\n* Não deve ser possível desmarcar uma consulta que já aconteceu" }, { "alpha_fraction": 0.7456896305084229, "alphanum_fraction": 0.7456896305084229, "avg_line_length": 24.77777862548828, "blob_id": "4355e38f065b0d3902f0ae17732af1075817929d", "content_id": "a5f5e467082188511b68217dff9876635bda1398", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "permissive", "max_line_length": 47, "num_lines": 9, "path": "/source/back/agenda/admin.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom agenda.models import Agenda\n\n\nclass AgendaAdmin(admin.ModelAdmin):\n list_display = ('medico','dia', 'horarios')\n autocomplete_fields = [\"medico\"]\n\nadmin.site.register(Agenda, AgendaAdmin)\n" }, { "alpha_fraction": 0.6582010388374329, "alphanum_fraction": 0.6624338626861572, "avg_line_length": 30.5, "blob_id": "87450f7fa74c930dffda999c1e40850e5d717d7e", "content_id": "3b9498bd8bcaea0eff0898c02e6718b01be52f2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 953, "license_type": "permissive", "max_line_length": 70, "num_lines": 30, "path": "/source/back/medico/models.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom especialidade.models import Especialidade\nfrom phonenumber_field.modelfields import PhoneNumberField\n\n\nclass Medico(models.Model):\n \"\"\"\n Classe para definir os médicos\n \"\"\"\n \n id = models.AutoField(primary_key=True)\n nome = models.CharField(\n max_length=100, help_text='Digite a nome do médico')\n crm = models.CharField(\n unique=True, max_length=7, help_text='Digite o CRM do médico')\n email = models.EmailField(\n help_text='Digite o E-mail do médico', verbose_name=\"E-mail\")\n telefone = PhoneNumberField(\n blank=True, help_text='Digite o telefone do médico')\n especialidade = models.ForeignKey(\n Especialidade,\n on_delete=models.PROTECT,\n help_text='Selecione a especialidade do médico')\n\n class Meta:\n verbose_name = \"Médico\"\n verbose_name_plural = \"Médicos\"\n\n def __str__(self):\n return f'{self.nome}'\n" }, { "alpha_fraction": 0.7519084215164185, "alphanum_fraction": 0.7519084215164185, "avg_line_length": 25.200000762939453, "blob_id": "b12f91714e4f866d51a38eb86c6bceb87a276c60", "content_id": "03092266d6bf59eb466d8b87e8fa0dc938e0519c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 262, "license_type": "permissive", "max_line_length": 54, "num_lines": 10, "path": "/source/back/especialidade/admin.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom especialidade.models import Especialidade\n\n\nclass EspecialidadeAdmin(admin.ModelAdmin):\n list_display = ('nome',)\n search_fields = ['nome']\n fields = ['nome']\n\nadmin.site.register(Especialidade, EspecialidadeAdmin)\n" }, { "alpha_fraction": 0.7044444680213928, "alphanum_fraction": 0.7044444680213928, "avg_line_length": 29, "blob_id": "4d758036c43f1ce3c8bb200fd080d89f45036335", "content_id": "06247dcd5e92027ce3c9ecd69d54dd9dbf04588a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "permissive", "max_line_length": 77, "num_lines": 15, "path": "/source/back/agenda/serializers.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom agenda.models import Agenda\nfrom medico.serializers import MedicoSerializer\nfrom expander import ExpanderSerializerMixin\n\n\nclass AgendaSerializer(ExpanderSerializerMixin, serializers.ModelSerializer):\n url_field_name = 'url'\n\n class Meta:\n model = Agenda\n fields = ('url','id', 'dia', 'medico', 'horarios')\n expandable_fields = {\n 'medico': MedicoSerializer\n }\n" }, { "alpha_fraction": 0.7633689641952515, "alphanum_fraction": 0.7633689641952515, "avg_line_length": 32.772727966308594, "blob_id": "a0083ceecb625abb02ab3a6f41830e03dbb8b50a", "content_id": "cb291dcfcea3ab6d7ed22be1aedd996e9febf159", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "permissive", "max_line_length": 65, "num_lines": 22, "path": "/source/back/medico/views.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom django_filters import rest_framework as filters\nfrom rest_framework.filters import SearchFilter\nfrom medico.models import Medico\nfrom especialidade.models import Especialidade\nfrom medico.serializers import MedicoSerializer\n\n\nclass MedicoFilter(filters.FilterSet):\n especialidade = filters.ModelMultipleChoiceFilter(\n queryset=Especialidade.objects.all())\n \n class Meta:\n model = Medico\n fields = ['especialidade']\n\nclass MedicoViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Medico.objects.all()\n serializer_class = MedicoSerializer\n filter_backends = (filters.DjangoFilterBackend, SearchFilter)\n search_fields = ('nome',)\n filter_class = MedicoFilter\n \n" }, { "alpha_fraction": 0.5377446413040161, "alphanum_fraction": 0.5657036304473877, "avg_line_length": 19.264150619506836, "blob_id": "7efc800526a4dab2a6b4d3efa026679f2ea9ea1b", "content_id": "089caf51861ffded8cf027b6b51f061803bad39c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 1073, "license_type": "permissive", "max_line_length": 52, "num_lines": 53, "path": "/docker-compose.yml", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "version: \"3.9\"\nservices:\n db:\n image: postgres\n volumes:\n - ./data/db:/var/lib/postgresql/data\n environment:\n - POSTGRES_DB=postgres\n - POSTGRES_USER=postgres\n - POSTGRES_PASSWORD=postgres\n networks:\n - database\n\n back:\n build:\n context: .\n dockerfile: ./dockers/back/Dockerfile\n command: python manage.py runserver 0.0.0.0:8000\n volumes:\n - ./source/back/:/source\n ports:\n - \"8000:8000\"\n depends_on:\n - db\n environment:\n - POSTGRESQL_DATABASE=postgres\n - POSTGRESQL_USER=postgres\n - POSTGRESQL_PASSWORD=postgres\n - POSTGRESQL_HOST=db\n - POSTGRESQL_PORT=5432\n networks:\n - internal\n - database\n\n # front:\n # build:\n # context: .\n # dockerfile: ./docker/Dockerfile\n # image: front\n # command: npm run serve\n # volumes:\n # - ./source/front/:/app\n # env_file:\n # - .env\n # ports:\n # - \"8080:8080\"\n # environment: \n # - CHOKIDAR_USEPOLLING=true\n # - CI=true\n \nnetworks:\n internal:\n database:" }, { "alpha_fraction": 0.7579439282417297, "alphanum_fraction": 0.7579439282417297, "avg_line_length": 45.5217399597168, "blob_id": "636abc64af8b255a6618438e691a55a12aa55edb", "content_id": "0228c266e6c2d667817119de29df341ae9a7f574", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "permissive", "max_line_length": 160, "num_lines": 23, "path": "/source/back/especialidade/tests.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom .models import Especialidade\nfrom django.core.exceptions import ValidationError\n\n# Create your tests here.\nclass EspecialidadeTestCase(TestCase):\n def setUp(self):\n pass\n # pivete = Especialidade.objects.create(nome='teste')\n\n # def test_create_especialidade(self):\n # pivete = Especialidade.objects.create(nome='teste')\n # self.assertEquals(pivete.get_nome_especialidade(), 'teste')\n\n def test_create_especialidade_com_mesmo_nome(self):\n especialidade_um = Especialidade.objects.create(nome=\"Oftamologista\")\n especialidade_dois = Especialidade.objects.create(nome=\"Odontologista\")\n self.assertNotEqual(especialidade_um.nome, especialidade_dois.nome)\n\n def test_create_especialidade_caracteres(self):\n especialidade_um = Especialidade()\n especialidade_um.nome = \"iyqoiueywoirquyeoriuqeyworiuqeyworiuqywoeriuqywoeiruqyowiueryoqiwueyroqiuweyoriuqywoeiurqoywiueryoqiwueyroqiuweyroiqwueyroqiwu\"\n self.assertRaises(ValidationError, especialidade_um.save)\n" }, { "alpha_fraction": 0.827235758304596, "alphanum_fraction": 0.827235758304596, "avg_line_length": 40.08333206176758, "blob_id": "4503c70ca941e0aa0cb481dec06447931cb91bf8", "content_id": "9d369d526f3d23ccc44a78e08c62fb410a44c854", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "permissive", "max_line_length": 65, "num_lines": 12, "path": "/source/back/especialidade/views.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom django_filters import rest_framework as filters\nfrom rest_framework.filters import SearchFilter\nfrom especialidade.models import Especialidade\nfrom especialidade.serializers import EspecialidadeSerializer\n\n\nclass EspecialidadeViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Especialidade.objects.all()\n serializer_class = EspecialidadeSerializer\n filter_backends = (filters.DjangoFilterBackend, SearchFilter)\n search_fields = ('nome',)" }, { "alpha_fraction": 0.6465781331062317, "alphanum_fraction": 0.6465781331062317, "avg_line_length": 27.764705657958984, "blob_id": "cbf62eeed5907c21d03f6f83cc3d1e87d9193d54", "content_id": "3e47053e24a5076c137b2b2d70ce1b0f673e2c11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "permissive", "max_line_length": 87, "num_lines": 34, "path": "/source/back/agenda/models.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom medico.models import Medico\nfrom datetime import date\n\n\nclass Agenda(models.Model):\n \"\"\"\n classe para a agenda do médico\n \"\"\"\n id = models.AutoField(primary_key=True)\n medico = models.ForeignKey(\n Medico,\n on_delete=models.PROTECT,\n help_text='Selecione um médico')\n dia = models.DateField()\n horarios = ArrayField(models.TimeField(auto_now=False, auto_now_add=False))\n\n class Meta:\n unique_together = ('medico', 'dia')\n verbose_name = \"Agenda\"\n verbose_name_plural = \"Agendas\"\n ordering = ['dia']\n\n def __str__(self):\n return f'{self.medico}'\n\n\n def clean(self):\n today = date.today()\n if self.dia < today:\n raise ValidationError(\"Não é possível criar uma agenda em um dia passado.\")\n return super(Agenda, self).clean()\n\n" }, { "alpha_fraction": 0.7322834730148315, "alphanum_fraction": 0.7322834730148315, "avg_line_length": 32.93333435058594, "blob_id": "dbca39023b04f865e1000f23aec210b8d91507b1", "content_id": "41dbc45f15494e725317db2ce628bacc9d0a2deb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 508, "license_type": "permissive", "max_line_length": 88, "num_lines": 15, "path": "/source/back/medico/serializers.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom medico.models import Medico\nfrom especialidade.serializers import EspecialidadeSerializer\nfrom expander import ExpanderSerializerMixin\n\n\nclass MedicoSerializer(ExpanderSerializerMixin, serializers.HyperlinkedModelSerializer):\n url_field_name = 'url'\n\n class Meta:\n model = Medico\n fields = ('url','id','nome','crm','email','telefone', 'especialidade')\n expandable_fields = {\n 'especialidade': EspecialidadeSerializer\n }" }, { "alpha_fraction": 0.5193199515342712, "alphanum_fraction": 0.5471406579017639, "avg_line_length": 24.8799991607666, "blob_id": "cd0e8caa207cf629dc2260ff06119ed1ef5945f1", "content_id": "f611fc6417c63a671ea0a073432772c4c7b1fa3a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 647, "license_type": "permissive", "max_line_length": 108, "num_lines": 25, "path": "/source/back/especialidade/migrations/0001_initial.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.7 on 2021-10-01 14:08\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Especialidade',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('nome', models.CharField(help_text='Digite a especialidade', max_length=100, unique=True)),\n ],\n options={\n 'verbose_name': 'Especialidade',\n 'verbose_name_plural': 'Especialidades',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.7773109078407288, "avg_line_length": 20.727272033691406, "blob_id": "e440c6f02a095af3e2b1862700714f31d872c0e2", "content_id": "68b5d73a9041484098356fdad6d981906ce83f8a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 238, "license_type": "permissive", "max_line_length": 35, "num_lines": 11, "path": "/source/back/requirements.txt", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "coverage==5.0.3\nDjango==3.2.7\ndjangorestframework==3.12.4\ndjangorestframework-expander==0.2.3\npsycopg2==2.9.1\npsycopg2-binary==2.9.1\ndjango-filter==21.1\ndjango-phonenumber-field==5.2.0\nphonenumbers==8.12.33\ndjango-rest-auth\ndjango-allauth" }, { "alpha_fraction": 0.7176350951194763, "alphanum_fraction": 0.7176350951194763, "avg_line_length": 32.82758712768555, "blob_id": "f1128be2df16873275f72207ef7e0f034be2fa52", "content_id": "e8ff7c1362fa6b9e5885fa7c4817445fd4592763", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 981, "license_type": "permissive", "max_line_length": 73, "num_lines": 29, "path": "/source/back/agenda/views.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets\nfrom django_filters import rest_framework as filters\nfrom agenda.models import Agenda\nfrom medico.models import Medico\nfrom agenda.serializers import AgendaSerializer\n\n\nclass AgendaFilter(filters.FilterSet):\n medico = filters.ModelMultipleChoiceFilter(\n queryset=Medico.objects.all())\n especialidade = filters.ModelMultipleChoiceFilter(\n field_name='medico__especialidade',\n to_field_name='id',\n queryset=Agenda.objects.all(),\n )\n data_inicio = filters.DateFilter(\"dia\", lookup_expr='gte')\n data_final = filters.DateFilter(\"dia\", lookup_expr='lte')\n\n class Meta:\n model = Agenda\n fields = ['medico', 'especialidade', 'data_inicio', 'data_final']\n\n\nclass AgendaViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Agenda.objects.all()\n serializer_class = AgendaSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filter_class = AgendaFilter\n ordering = ['-dia']\n" }, { "alpha_fraction": 0.658777117729187, "alphanum_fraction": 0.6646943092346191, "avg_line_length": 22.090909957885742, "blob_id": "10828303eee971339dfda3b96571e90b0ff449f6", "content_id": "be75286ebc7a01903322a21f6b9cb051694cd9f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 511, "license_type": "permissive", "max_line_length": 74, "num_lines": 22, "path": "/dockers/front/Dockerfile", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "FROM node:16.4-alpine as develop-stage\n\nRUN set -eux \\\n & apk add \\\n --no-cache \\\n nodejs \\\n npm\n\n# faz da pasta 'app' o diretório atual de trabalho\nWORKDIR /app\n\n# copia os arquivos 'package.json' e 'package-lock.json' (se disponível)\nCOPY ./source/front/package*.json ./\n\n# copia arquivos e pastas para o diretório atual de trabalho (pasta 'app')\nCOPY ./source/front/ .\n\n# instala dependências do projeto\nRUN npm install -g @vue/cli\nRUN npm install\n\n# CMD [ \"npm\", \"run\", \"serve\" ]" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 23.66666603088379, "blob_id": "5f9102179111541cfc31fbd458cb117b00fe6ffa", "content_id": "0dd4d93b37be114a0aa8fe56f86ef09c9c28c11f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "permissive", "max_line_length": 56, "num_lines": 6, "path": "/source/back/consulta/apps.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass ConsultaConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'consulta'\n" }, { "alpha_fraction": 0.7214137315750122, "alphanum_fraction": 0.7214137315750122, "avg_line_length": 31.133333206176758, "blob_id": "89bb1f756eb8ac3d548b686b4bb34e0d759d0686", "content_id": "a31cbc19538777d3ec0d2d3c32f6e7fef1f9b86c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 481, "license_type": "permissive", "max_line_length": 90, "num_lines": 15, "path": "/source/back/consulta/serializers.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom expander import ExpanderSerializerMixin\nfrom consulta.models import Consulta\nfrom medico.serializers import MedicoSerializer\n\n\nclass ConsultaSerializer(ExpanderSerializerMixin, serializers.HyperlinkedModelSerializer):\n url_field_name = 'url'\n\n class Meta:\n model = Consulta\n fields = ('id', 'dia', 'horario', 'data_agendamento', 'medico')\n expandable_fields = {\n 'medico': MedicoSerializer\n }" }, { "alpha_fraction": 0.7428010702133179, "alphanum_fraction": 0.7480366230010986, "avg_line_length": 36.26829147338867, "blob_id": "32924140f2b47e8080315ad15040b10990e08ccf", "content_id": "7c7bbb62d78c68287d84b17bb22daac42a65aa72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1532, "license_type": "permissive", "max_line_length": 77, "num_lines": 41, "path": "/source/back/medicar/urls.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "\"\"\"medicar URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url, include\nfrom rest_framework import routers\nfrom especialidade.views import EspecialidadeViewSet\nfrom medico.views import MedicoViewSet\nfrom agenda.views import AgendaViewSet\nfrom consulta.views import ConsultaViewSet\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'especialidades', EspecialidadeViewSet)\nrouter.register(r'medicos', MedicoViewSet)\nrouter.register(r'agendas', AgendaViewSet)\nrouter.register(r'consultas', ConsultaViewSet)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^api/', include(router.urls)),\n url(r'^rest-auth/', include('rest_auth.urls')),\n url(r'^rest-auth/registration/', include('rest_auth.registration.urls'))\n]\n\nadmin.site.index_title = 'Medicar'\nadmin.site.site_header = 'Administração Medicar'\nadmin.site.site_title = 'Administração Medicar'\n" }, { "alpha_fraction": 0.5950646996498108, "alphanum_fraction": 0.5950646996498108, "avg_line_length": 36.45454406738281, "blob_id": "d90892825a8dd30a50f1871b39c9d1d59d3eed0a", "content_id": "bb543d6470dc1a5c1ec044094973b46435f1763e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2491, "license_type": "permissive", "max_line_length": 87, "num_lines": 66, "path": "/source/back/consulta/models.py", "repo_name": "byron1988/Medicar", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.core.exceptions import ValidationError\nfrom medico.models import Medico\nfrom datetime import date\n\n\nclass Consulta(models.Model):\n \"\"\"\n Classe para definir as consultas\n \"\"\"\n id = models.AutoField(primary_key=True)\n dia = models.DateField()\n horario = models.TimeField(verbose_name='horário')\n data_agendamento = models.DateTimeField(\n auto_now_add=True, verbose_name='data de agandamento')\n medico = models.ForeignKey(Medico,\n verbose_name='Médico',\n on_delete=models.PROTECT,\n help_text='Selecione um médico para a consulta')\n\n class Meta:\n verbose_name = \"consulta\"\n verbose_name_plural = \"consultas\"\n ordering = ['dia']\n\n def __str__(self):\n return f'agendado horário {self.horario}, para o dia:{self.dia}'\n\n # def regra_dia_passado(self):\n # today = date.today()\n # if self.agenda.dia < today:\n # return False\n # return True\n\n # def regra_horario_passado(self):\n # today = date.today()\n # now = timezone.localtime()\n # if self.agenda.dia == today and (self.horario.hour < now.hour\n # or (self.horario.hour == now.hour\n # and self.horario.minute <= now.minute)):\n # return False\n # return True\n\n # def rn_usuario_agendado_em_dia_hora(self):\n # consulta = Consulta.objects.filter(\n # usuario=self.usuario, horario=self.horario, agenda__dia=self.agenda.dia)\n # return not consulta.exists()\n\n # def rn_dia_horario_disponveis(self):\n # horarioagenda = HorarioAgenda.objects.filter(\n # horario=self.horario, agenda=self.agenda, disponivel=True)\n # return horarioagenda.exists()\n\n # def clean(self):\n # if not self.rn_dia_passado():\n # raise ValidationError(\n # \"Não é possível agendar uma consulta em um dia passado.\")\n # if not self.rn_horario_passado():\n # raise ValidationError(\n # \"Não é possível agendar uma consulta em um horário passado.\")\n # if not self.rn_usuario_agendado_em_dia_hora():\n # raise ValidationError(\n # \"Usuário já está agendado para esse dia e horário.\")\n # if not self.rn_dia_horario_disponveis():\n # raise ValidationError(\n # \"Horário da agenda não está disponível.\")\n" } ]
29
jvazkback/TECSUP-DAE-2021-2
https://github.com/jvazkback/TECSUP-DAE-2021-2
eaaa3e2ba1c5aad8adc90ea8dae40120f7ea2fd4
28af4ebc1b9428f3ec9124fc2ac057d4866d46c9
8a57b4213435baa0c4431d77d0daacfaf97b8f87
refs/heads/main
2023-07-18T01:37:03.152916
2021-08-28T21:43:59
2021-08-28T21:43:59
397,650,863
0
0
MIT
2021-08-18T15:31:45
2021-08-18T23:19:01
2021-08-28T21:43:59
Python
[ { "alpha_fraction": 0.752293586730957, "alphanum_fraction": 0.752293586730957, "avg_line_length": 33.0625, "blob_id": "657b89d4fd8c072055ee67b26af572faecb1f34a", "content_id": "0d3ae1269efddd4224aed15a06ad6aca97e45058", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 545, "license_type": "permissive", "max_line_length": 74, "num_lines": 16, "path": "/lab01/encuesta/views.py", "repo_name": "jvazkback/TECSUP-DAE-2021-2", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\n\ndef index(request):\n return HttpResponse(\"saludos desde la vista encuestas\")\n\ndef detalle(request, pregunta_id):\n return HttpResponse(\"Estas viendo la pregunta %s.\" % pregunta_id)\n\ndef resultados(request, pregunta_id):\n response = \"Estas viendo los resultados de la pregunta %s\"\n return HttpResponse(response % pregunta_id)\n\ndef votar(request, pregunta_id):\n return HttpResponse(\"Estas votando por la pregunta %s.\" % pregunta_id)\n" }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 48, "blob_id": "2974da58b63e1339724553cdd4a47b93f2ab3bee", "content_id": "4c924287982f99c3a44dc7233715770f947d605c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "permissive", "max_line_length": 77, "num_lines": 2, "path": "/README.md", "repo_name": "jvazkback/TECSUP-DAE-2021-2", "src_encoding": "UTF-8", "text": "# TECSUP-DAE-2021-2\nLaboratorios del curso desarrollo de aplicaciones empresariales 2021-2 Jesus Vilca\n" } ]
2
jgferdinando/AntarcticBiodiversityIDW
https://github.com/jgferdinando/AntarcticBiodiversityIDW
1117cb15bdc3254f10030550ad77412d2265881a
c1c65f135c3ae3e7d60eb194ac124c99e5fe4093
0376a60a0b2bcb36a83f76686602b175bf779cef
refs/heads/master
2020-04-17T18:48:46.467782
2019-01-21T16:09:23
2019-01-21T16:09:23
166,842,513
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5514882206916809, "alphanum_fraction": 0.5927493572235107, "avg_line_length": 36.507938385009766, "blob_id": "21a0695c49c6fff3c8e3fdc6cccd500c789641ad", "content_id": "220017f50c30338eb6c02bfd22e0e23ecf154328", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14178, "license_type": "no_license", "max_line_length": 132, "num_lines": 378, "path": "/interpolation.py", "repo_name": "jgferdinando/AntarcticBiodiversityIDW", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 26 17:05:29 2018\n@author: Joe\n\"\"\"\n\nfrom tkinter import *\nfrom math import *\nimport numpy\nfrom scipy import stats\n\nroot = Tk()\n\n################################################################################\n\ndef grabColumn(fileName,listName,index):\n file = open(fileName, 'r')\n text = file.readlines()\n file.close()\n for line in text:\n if line[0] == '2' and line.split()[1] != '03Labyrinth2':\n value = float(line.split()[index])\n listName.append(value)\n else:\n continue \n #print(listName)\n return(listName)\n\n################################################################################\n\ngridXvals = []\ngridYvals = []\ngridXvals2 = []\ngridYvals2 = []\ndef makeGrid(xVals,yVals,resolution):\n interval = (max(xVals)-min(xVals)) / resolution\n xVal = min(xVals)\n yVal = min(yVals)\n while xVal < max(xVals):\n gridXvals.append(xVal)\n xVal += interval*4\n while yVal < max(yVals):\n gridYvals.append(yVal)\n yVal += interval\n #print(gridXvals,gridYvals)\n for gridXval in gridXvals:\n for gridYval in gridYvals:\n gridYvals2.append(gridYval)\n for gridYval in gridYvals:\n for gridXval in gridXvals:\n gridXvals2.append(gridXval)\n \nzValsList = []\ndef createZvalsKern(gridXvals2,gridYvals2,dataXvals,dataYvals,searchRadius):\n for gridX,gridY in zip(gridXvals2,gridYvals2):\n score = 0.0\n for dataX,dataY in zip(dataXvals,dataYvals):\n pi = 3.1415926\n distance = acos( (sin(gridY*pi/180)*sin(dataY*pi/180)) \\\n + (cos(gridY*pi/180)*cos(dataY*pi/180) \\\n *cos((((gridX-dataX)*pi/180)**2)**0.5)) )\n if distance < searchRadius:\n kern = (1.0/(searchRadius*len(dataXvals)))*((distance/(searchRadius)))\n score += kern\n else:\n continue\n zValsList.append(score)\n #print(zValsList)\n \n# IDW algorithm from: https://www.e-education.psu.edu/geog486/node/1877\ndef createZvalsIDW(gridXvals2,gridYvals2,dataXvals,dataYvals,dataZvals,neighborhood):\n for gridX,gridY in zip(gridXvals2,gridYvals2):\n score = 0.0\n numer = 0.0\n denom = 0.0\n for dataX,dataY,dataZ in zip(dataXvals,dataYvals,dataZvals):\n pi = 3.1415926\n distance = acos( (sin(gridY*pi/180)*sin(dataY*pi/180)) \\\n + (cos(gridY*pi/180)*cos(dataY*pi/180) \\\n *cos((((gridX-dataX)*pi/180)**2)**0.5)) )\n if distance < neighborhood:\n weight = ( 1 / ( distance**2 ) )\n numer += ( weight * dataZ )\n denom += weight\n else:\n continue\n score = numer / denom\n zValsList.append(score)\n return(zValsList)\n \ncolors = []\ndef createColors(zValsList):\n ramp = ['#ffffd9','#edf8b1','#c7e9b4','#7fcdbb','#41b6c4','#1d91c0','#225ea8','#253494','#081d58']\n print('max = ',max(zValsList))\n print('min = ',min(zValsList))\n print('range = ',max(zValsList)-min(zValsList))\n transform = (max(zValsList)-min(zValsList))/8\n print('transform = ',transform)\n for zVal in zValsList:\n colorIndex = int( round( ( zVal - min(zValsList) ) / transform ) ) \n #print(colorIndex)\n color = ramp[colorIndex]\n colors.append(color)\n textValue = min(zValsList)\n rampLoc = canheight*0.8\n #for color in ramp:\n # can1.create_text(canwidth*0.95,rampLoc, font=\"Arial 11\",text=str(round(textValue,2)))\n # box = [canwidth*0.97,rampLoc,canwidth*0.99,rampLoc,canwidth*0.99,rampLoc+1,canwidth*0.97,rampLoc+1]\n # can1.create_line(box,fill=color)\n # rampLoc -= (canheight*0.6/len(ramp))\n # textValue += transform\n return(colors)\n \n################################################################################\n\ndef pearsons(first,second):\n firstMean =numpy.mean(first)\n secondMean = numpy.mean(second)\n i = 0\n s = 0.0\n t = 0.0\n u = 0.0\n while i < len(first):\n s += ((first[i] - firstMean)*(second[i] - secondMean))\n t += ((first[i] - firstMean)**2)\n u += ((second[i] - secondMean)**2)\n i += 1\n r = (s)/((t**0.5)*(u**0.5))\n print('The Pearsons correlation coefficient is ',str(r))\n return(r)\n\ndef scatterPlotWithRegres(indepVarSet,depVarSet,indepName,depName,title):\n \n #xaxislabel = can1.create_text((canwidth/2),(yAxisStart+25),\\\n # font=\"Arial 10\",text=indepName)\n #yaxislabel = can1.create_text((25),(canheight/2),\\\n # font=\"Arial 10\",text=depName)\n \n indepMax = max(indepVarSet)\n indepMin = min(indepVarSet) \n indepRange = indepMax - indepMin\n indepMean = numpy.mean(indepVarSet)\n \n depMax = max(depVarSet)\n depMin = min(depVarSet) \n depRange = depMax - depMin\n depMean = numpy.mean(depVarSet) \n \n u = 0.0\n v = 0.0\n w = 0.0\n x = 0.0\n y = 0.0\n z = 0.0\n n = len(indepVarSet)\n\n for ind,dep in zip(indepVarSet,depVarSet):\n u += dep\n v += ( ind ** 2 )\n w += ind\n x += (ind * dep )\n y += ( ind ** 2 )\n z += ind\n a = ( ( u * v ) - ( w * x ) ) / ( ( n * y ) - z ** 2 )\n b = ( ( n * x ) - (w * u ) ) / ( ( n * y ) - z ** 2 )\n slope, intercept, r_value, p_value, std_err = stats.linregress(indepVarSet,depVarSet)\n equationstring = 'y = {0}x + {1}\\nr squared = {2}'.format(round(b,3),round(a,3),round(r_value**2,3))\n print(equationstring)\n regStart = ( ( indepMin * b ) + a )\n regEnd = ( ( indepMax * b ) + a )\n plotRegXStart = xAxisStart + ( ((indepMin-indepMin)*(xAxisLength))/indepRange )\n plotRegYStart = yAxisStart - ( ((regStart-depMin)*(yAxisLength))/depRange ) \n plotRegXEnd = xAxisStart + ( ((indepMax-indepMin)*(xAxisLength))/indepRange )\n plotRegYEnd = yAxisStart - ( ((regEnd-depMin)*(yAxisLength))/depRange ) \n plotRegresLine = [plotRegXStart,plotRegYStart,plotRegXEnd,plotRegYEnd]\n #can1.create_line(plotRegresLine,fill='firebrick4',dash=(3, 4))\n #can1.create_text(xAxisEnd-(xAxisLength*1/10),(canheight/2),\\\n # font=\"Arial 11\", fill = 'firebrick4',text=equationstring)\n \n for ind,dep in zip(indepVarSet,depVarSet): \n plotX1 = xAxisStart + ( ((ind-indepMin)*(xAxisLength))/indepRange )\n plotY1 = yAxisStart - ( ((dep-depMin)*(yAxisLength))/depRange )\n plotX = 400 + ( 0.866 * plotX1 ) - ( 0.866 * plotY1 )\n plotY = 900 - ( 0.500 * plotX1 ) - ( 0.500 * plotY1 ) \n point = [plotX,plotY-1000,plotX,plotY+2000,plotX,plotY-1000]\n can1.create_line(point,fill='gray90')\n point = [plotX-2,plotY-2,plotX-2,plotY+2,plotX+2,plotY+2,plotX+2,plotY-2,plotX-2,plotY-2]\n can1.create_line(point,fill='gray75')\n \n j = xAxisStart\n jInterval = (indepRange) / xAxisLength \n k = indepMin\n i = 0\n #while j < xAxisEnd:\n #if i % 150 == 0:\n #can1.create_text(j+7,(yAxisStart+8), font=\"Arial 10\",text=str(round(k,2)))\n #can1.create_line( j, (yAxisStart-3), j, (yAxisStart+3) )\n #else:\n # k = k\n #j += 1\n #k += jInterval\n #i += 1\n \n j = yAxisStart\n jInterval = yAxisLength / 10\n k = depMin\n kInterval = depRange / 10\n #while j >= yAxisEnd:\n #can1.create_line( (xAxisStart-3), j, (xAxisStart+3), j )\n #can1.create_text((xAxisStart-20),j, font=\"Arial 10\",text=str(round(k,2)))\n #j -= jInterval\n #k += kInterval\n\n title = can1.create_text((canwidth/2),(yAxisEnd*0.4),\\\n font=\"Arial 15 bold\",\\\n text=title )\n \n #can1.create_line( xAxisStart, yAxisStart, xAxisEnd, yAxisStart )\n #can1.create_line( xAxisStart, yAxisStart, xAxisStart, yAxisEnd )\n \ndef gridPlot(indepVarSet,depVarSet,Zs,colors,indepName,depName,title):\n \n #xaxislabel = can1.create_text((canwidth/2),(yAxisStart+25),\\\n # font=\"Arial 12\",text=indepName)\n #yaxislabel = can1.create_text((10),(canheight/2),\\\n # font=\"Arial 12\",text=depName)\n \n indepMax = max(indepVarSet)\n indepMin = min(indepVarSet) \n indepRange = indepMax - indepMin\n indepMean = numpy.mean(indepVarSet)\n \n depMax = max(depVarSet)\n depMin = min(depVarSet) \n depRange = depMax - depMin\n depMean = numpy.mean(depVarSet) \n \n for ind,dep,color,Z in zip(indepVarSet,depVarSet,colors,Zs): \n plotX1 = xAxisStart + ( ((ind-indepMin)*(xAxisLength))/indepRange )\n plotY1 = yAxisStart - ( ((dep-depMin)*(yAxisLength))/depRange )\n plotX = -300 + ( 1.35 * plotX1 ) + ( 0.3 * plotY1 ) #0.866\n plotY = 1800 - ( -0 * plotX1 ) + ( 0.5 * plotY1 ) + ( -220 * (Z**(0.9)) ) #0.5 ############################################\n size = 0.5+4*((Z-10)**2)\n point = [plotX,plotY-size,plotX,plotY,plotX+size,plotY,plotX+size,plotY-size,plotX,plotY-size]\n #point = [plotX-1,plotY-1,plotX+1,plotY-1,plotX+1,plotY+1,\\\n # plotX-1,plotY+1,plotX-1,plotY-1]\n can1.create_line(point,fill=color)\n \n #j = xAxisStart\n #jInterval = (indepRange) / xAxisLength \n #k = indepMin\n #i = 0\n #while j < xAxisEnd:\n # if i % 150 == 0:\n # can1.create_text(j+7,(yAxisStart+8), font=\"Arial 12\",text=str(round(k,2)))\n # can1.create_line( j, (yAxisStart-3), j, (yAxisStart+3) )\n # else:\n # k = k\n # j += 1\n # k += jInterval\n # i += 1\n \n j = yAxisStart\n jInterval = yAxisLength / 10\n k = depMin\n kInterval = depRange / 10\n #while j >= yAxisEnd:\n #can1.create_line( (xAxisStart-3), j, (xAxisStart+3), j )\n #can1.create_text((xAxisStart-20),j, font=\"Arial 12\",text=str(round(k,2)))\n #j -= jInterval\n #k += kInterval\n\n #title = can1.create_text((canwidth/2),(yAxisEnd*0.4),\\\n #font=\"Arial 16 bold\",\\\n #text=title )\n \n #can1.create_line( xAxisStart, yAxisStart, xAxisEnd, yAxisStart )\n #can1.create_line( xAxisStart, yAxisStart, xAxisStart, yAxisEnd )\n\n################################################################################ \n\nlons = []\nlats = []\nsoilMoisture = []\nchlorophyllA = []\norganicCarbon = []\nsoilCond = []\nsoilSalinity = []\nsoilpH = []\nelevation = []\ngrabColumn('DryValleyData.txt',lons,2)\ngrabColumn('DryValleyData.txt',lats,3)\ngrabColumn('DryValleyData.txt',soilMoisture,4)\ngrabColumn('DryValleyData.txt',chlorophyllA,5)\ngrabColumn('DryValleyData.txt',organicCarbon,6)\ngrabColumn('DryValleyData.txt',soilCond,7)\ngrabColumn('DryValleyData.txt',soilSalinity,8)\ngrabColumn('DryValleyData.txt',soilpH,9)\ngrabColumn('DryValleyData.txt',elevation,10)\n\nprint(elevation)\n\nspecies = [8,4,3,3,5,3,3,4,7,7,9,11,7,10]\n\nhabitabilitys = []\nfor moisture,chloro,carbon,conductivity,salinity,ph,elev \\\nin zip(soilMoisture,chlorophyllA,organicCarbon,soilCond,soilSalinity,soilpH,elevation):\n habitability = (moisture*chloro*carbon*ph)/(salinity*conductivity*elev+0.0000001)\n habitabilitys.append(habitability)\n\nweightHab = []\nfor moisture,chloro,carbon,conductivity,salinity,ph,elev \\\nin zip(soilMoisture,chlorophyllA,organicCarbon,soilCond,soilSalinity,soilpH,elevation):\n habitability = 1.274*0.565*moisture/max(soilMoisture)\\\n +0.062*0.123*chloro/max(chlorophyllA)\\\n +2.175*0.079*carbon/max(organicCarbon)\\\n +2.82*0.368*ph/max(soilpH)\\\n -1.437*0.193*salinity/max(soilSalinity)\\\n -0.832*0.198*conductivity/max(soilCond)\\\n -0.013*0.487*elev/max(elevation)\n weightHab.append(habitability)\nprint(weightHab)\n\nweightHab2 = []\nfor moisture,chloro,carbon,conductivity,salinity,ph,elev \\\nin zip(soilMoisture,chlorophyllA,organicCarbon,soilCond,soilSalinity,soilpH,elevation):\n habitability = pearsons(species,soilMoisture)*moisture/max(soilMoisture)\\\n +pearsons(species,chlorophyllA)*chloro/max(chlorophyllA)\\\n +pearsons(species,organicCarbon)*carbon/max(organicCarbon)\\\n +pearsons(species,soilpH)*ph/max(soilpH)\\\n +pearsons(species,soilSalinity)*salinity/max(soilSalinity)\\\n +pearsons(species,soilCond)*conductivity/max(soilCond)\\\n +pearsons(species,elevation)*elev/max(elevation)\n weightHab2.append(habitability)\nprint(weightHab2)\n\ncanheight = 800\nratio = (((max(lons)-min(lons)) / (max(lats)-min(lats))**2)**0.5)*0.2\ncanwidth = canheight * ratio\nxAxisFactor = 0.8\nyAxisFactor = 0.8\n\ncan1 = Canvas(root, width = canwidth, height = canheight )\nxAxisStart = canwidth - ( canwidth * xAxisFactor )\nxAxisEnd = canwidth * xAxisFactor \nxAxisLength = xAxisEnd - xAxisStart\nyAxisStart = canheight * yAxisFactor \nyAxisEnd = canheight - ( canheight * yAxisFactor )\nyAxisLength = yAxisStart - yAxisEnd\n\n#print(len(elevation))\n#print(len(species))\n\n\nmakeGrid(lons,lats,600)\n#print(elevation)\n#createZvalsKern(gridXvals2,gridYvals2,lons,lats,0.0005)\n#moistures = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,soilMoisture,0.005)\n#chloros = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,chlorophyllA,0.005)\n#carbons = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,organicCarbon,0.005)\n#conductivitys = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,soilCond,0.005)\n#salinitys = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,soilSalinity,0.005)\nphs = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,soilpH,0.005)\n#elevations = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,elevation,0.02)\n#biodiv = createZvalsIDW(gridXvals2,gridYvals2,lons,lats,species,0.005)\n#createZvalsIDW(gridXvals2,gridYvals2,lons,lats,weightHab,0.005)\nprint(zValsList)\ncreateColors(zValsList)\n#print(colors)\ngridPlot(gridXvals2,gridYvals2,zValsList,colors,'Lon','Lat','Antarctic Dry Valley Expected Habitability v2')\n\n#scatterPlotWithRegres(lons,lats,\\\n#' ',' ',\\\n#' ')\n\ncan1.pack()\nroot.mainloop()\n" } ]
1
vdoddaguni12345/hiveVD
https://github.com/vdoddaguni12345/hiveVD
f081327c11a76f2d122ffa64d2b4d57e0a3b2932
c42747647c629df49f1075a2ef0f703463b65c51
7ee85c0a26c1b99f8064162a09954d43dbf8ad4c
refs/heads/main
2023-02-26T08:28:46.545571
2021-01-31T03:45:25
2021-01-31T03:45:25
334,562,567
0
0
null
2021-01-31T03:17:33
2021-01-31T03:23:46
2021-01-31T03:45:26
null
[ { "alpha_fraction": 0.7808219194412231, "alphanum_fraction": 0.7808219194412231, "avg_line_length": 23.33333396911621, "blob_id": "295909a6a970251348ec76f2c33a68765986c8cd", "content_id": "6dc4aa48e85f0fb436bb2bcf57492fe70300e4f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 73, "license_type": "no_license", "max_line_length": 34, "num_lines": 3, "path": "/README.md", "repo_name": "vdoddaguni12345/hiveVD", "src_encoding": "UTF-8", "text": "# hiveVD\nThis is a hive test project \nI have created some python scripts\n" }, { "alpha_fraction": 0.5274040102958679, "alphanum_fraction": 0.5530029535293579, "avg_line_length": 21.25954246520996, "blob_id": "6e66e65ee61b9bf175daeda1dc9acf376dd768ef", "content_id": "7e9e0b85686a98b8fddd927d6cc1081babf8b8c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3047, "license_type": "no_license", "max_line_length": 82, "num_lines": 131, "path": "/test.py", "repo_name": "vdoddaguni12345/hiveVD", "src_encoding": "UTF-8", "text": "#import modu\r\n# def compute(num1, num2):\r\n# return (num1 + num2)\r\n\r\n# from pyspark.sql import SparkSession\r\n#import json\r\n#from lib import functions, myClasses\r\n#import modules\r\n# spark = SparkSession.builder.appName(\"test\").getOrCreate()\r\n# spark.sql(\"show databases\").show()\r\n\r\nprint(\"hello world\")\r\n \r\n\r\n# ex1: format of numbers and strings \r\n# one = 1\r\n# two = 2\r\n# three = one + two\r\n# myString1 = \"welcome to python\"\r\n#print(\"the sum of %d and %d is %d\" %(one,two, three) ) #old\r\n# print(\"the sum of {} AND {} IS {}\".format(one, two , three)) # new\r\n# print(\"excitung python {} \".format(myString1) ) #new\r\n\r\n# ex2 :functions \r\n# x1=10\r\n# x2=20\r\n# x3=30; x4=40; x5=50\r\n# print('sum of {} and {} is {}'.format(x1,x2,functions.myAdd(x1,x2)))\r\n# print('Av. of five numbers is : {}'.format(functions.myAverage(x1,x2,x3,x4,x5)))\r\n# print('Factorial of {} is : {}'.format(x1,functions.myFact(x1)))\r\n\r\n#ex3 : classes \r\n# myRama = myClasses.myPersonBasic('baba', 2)\r\n# myRama.myDetailsDisplay()\r\n# myRama.name = 'SitaDevi'\r\n# myRama.age = 22\r\n# myRama.myDetailsDisplay()\r\n#del myRama\r\n# mySita = myClasses.myStudent()\r\n# mySita.myInheritanceDisplay\r\n# mySita.myDetailsDisplay('rama', 25)\r\n\r\n#ex4 - Lambda functions \r\n# x1 = lambda a: a+2\r\n# print(x1(5))\r\n# x2 = lambda a,b : a*b\r\n# print(x2(5,2))\r\n# def myfunc1 (n):\r\n# return lambda a: a*n\r\n\r\n# myDoubler = myfunc1(2) \r\n\r\n# print(myDoubler(4))\r\n\r\n#ex5 - for loops \r\n# fruits = ['apple', 'banana', 'coconut']\r\n# for fruit in fruits:\r\n# if fruit == 'banana':\r\n# continue\r\n# print(fruit)\r\n\r\n#functions.myfunc1('abhai', 'babu', 'chandra', 'david') \r\n\r\nimport json\r\n\r\n# dict = {'Python' : '.py', 'C++' : '.cpp', 'Java' : '.java'}\r\n\r\n# json = json.dumps(dict)\r\n# f = open(\"dict.json\",\"w\")\r\n# f.write(json)\r\n# f.close()\r\n\r\n# f1 = open(\"dict.txt\",\"w\")\r\n# f1.write(str(dict))\r\n# f1.close()\r\n\r\n#ex2 - Writing JSON to a File\r\n# data = {}\r\n# data['people'] = []\r\n# data['people'].append({\r\n# 'name': 'Scott',\r\n# 'website': 'stackabuse.com',\r\n# 'from': 'Nebraska'\r\n# })\r\n# data['people'].append({\r\n# 'name': 'Larry',\r\n# 'website': 'google.com',\r\n# 'from': 'Michigan'\r\n# })\r\n# data['people'].append({\r\n# 'name': 'Tim',\r\n# 'website': 'apple.com',\r\n# 'from': 'Alabama'\r\n# })\r\n\r\n# data['people'].append({\r\n# 'name': 'vivek',\r\n# 'website': 'vivek.com',\r\n# 'from': 'Alabama'\r\n# })\r\n\r\n# with open('data.txt', 'w') as outfile:\r\n# json.dump(data, outfile)\r\n\r\n# with open('data.txt') as json_file:\r\n# data = json.load(json_file)\r\n# for p in data['people']:\r\n# print('Name: ' + p['name'])\r\n# print('Website: ' + p['website'])\r\n# print('From: ' + p['from'])\r\n# print('')\r\n\r\n#ex 3\r\n# def square(func):\r\n# def inner(x):\r\n# return func(x) ** 2\r\n# return inner\r\n\r\n# #@square\r\n# def dbl(x):\r\n# return x*2\r\n\r\n# print(\"The value of x is with decorator \",dbl(2)) \r\nfor i in range(10):\r\n print(i)\r\ns = [1,2,3,4,5] \r\nprint(s[3]) \r\n\r\n\r\nx= 20.220 \r\nprint(f'value of x is {x}')\r\n" } ]
2
Zed-chi/dvmn_frontend_ch3
https://github.com/Zed-chi/dvmn_frontend_ch3
078776d4a74f6719a16692bfc93a757076ccbf3d
65a3c32ef4e1e1c23b1ce41a6451d4076e651a5c
0c694b3daa476ca4c222fa9fa77bedee681554f5
refs/heads/master
2023-05-28T05:39:53.027324
2021-01-15T10:36:08
2021-01-15T10:36:08
232,918,274
0
0
null
2020-01-09T22:28:57
2021-01-15T10:36:12
2023-05-23T00:12:38
Python
[ { "alpha_fraction": 0.6308262944221497, "alphanum_fraction": 0.6337394118309021, "avg_line_length": 25.405593872070312, "blob_id": "65d998a0cc6b942dd22c3b58c0fcd8d585cbd322", "content_id": "a804b29bc8324d07716112b7e55b614dc8b5c7fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3812, "license_type": "no_license", "max_line_length": 79, "num_lines": 143, "path": "/utils.py", "repo_name": "Zed-chi/dvmn_frontend_ch3", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport re\nimport urllib\n\nfrom bs4 import BeautifulSoup\n\nfrom pathvalidate import sanitize_filepath\n\nimport requests\n\n\nBASE_URL = \"https://tululu.org\"\n\n\"\"\" helper errors \"\"\"\n\n\nclass EmptyBookError(ValueError):\n pass\n\n\nclass EmptyDetailsError(ValueError):\n pass\n\n\nclass EmptyImageError(ValueError):\n pass\n\n\nclass EmptyHTMLError(ValueError):\n pass\n\n\nclass URLParseError(ValueError):\n pass\n\n\n\"\"\" helper functions \"\"\"\n\n\ndef check_status_code(response):\n if response.status_code >= 300:\n message = f\"Site answered with {response.status_code} code\"\n raise requests.HTTPError(message)\n return True\n\n\ndef get_content_from_url(url, allow_redirects=False):\n response = requests.get(url, allow_redirects=allow_redirects, verify=False)\n check_status_code(response)\n return response.content\n\n\ndef get_text_from_url(url, urlparams=None, allow_redirects=False):\n response = requests.get(\n url, allow_redirects=allow_redirects, params=urlparams, verify=False\n )\n check_status_code(response)\n return response.text\n\n\ndef get_id_from_book_url(url):\n result = re.search(r\"b([0-9]+)\", url)\n if not result:\n raise URLParseError(f\"Cant get book id from {url}\")\n return result.group(1)\n\n\ndef get_book_details(html, base_url):\n soup = BeautifulSoup(html, \"lxml\")\n header = soup.select_one(\"#content > h1\").text\n title, author = [text.strip() for text in header.split(\"::\")]\n img = soup.select_one(\".bookimage img\")\n src = urllib.parse.urljoin(base_url, img.get(\"src\"))\n comments = [tag.text for tag in soup.select(\".texts span\")]\n genres = [tag.text for tag in soup.select(\"#content > .d_book > a\")]\n\n return {\n \"title\": title,\n \"author\": author,\n \"img_url\": src,\n \"comments\": comments,\n \"genres\": genres,\n }\n\n\ndef save_book(filepath, content):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n if os.path.exists(filepath):\n raise FileExistsError(f\"Book {filepath} already saved\")\n with open(filepath, \"w\", encoding=\"utf-8\") as file:\n file.write(content)\n\n\ndef download_txt(from_=\"\", to=\"\", urlparams=None):\n try:\n path = sanitize_filepath(to, platform=\"auto\")\n content = get_text_from_url(from_, urlparams)\n if not content:\n raise EmptyBookError(f\"Got empty textfile from {from_}\")\n save_book(path, content)\n except Exception as e:\n print(e)\n\n\ndef print_book_details(details):\n print(\"\\n==========\")\n if details[\"title\"]:\n print(f\"=== Заголовок: {details['title']} ===\")\n if details[\"author\"]:\n print(f\"=== Автор: {details['author']} ===\")\n if details[\"comments\"]:\n comments = \"\\n \".join(details[\"comments\"])\n print(f\"=== Комментарии: \\n{comments} ===\")\n if details[\"genres\"]:\n print(f\"=== Жанры: {details['genres']} ===\")\n if details[\"img_url\"]:\n print(f\"=== Ссылка: {details['img_url']} ===\")\n print(\"==========\")\n\n\ndef save_image(filepath, content):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n if os.path.exists(filepath):\n raise FileExistsError(f\"Image {filepath} is already saved.\")\n with open(filepath, \"wb\") as file:\n file.write(content)\n\n\ndef download_image(from_=None, to=None):\n try:\n path = sanitize_filepath(to, platform=\"auto\")\n content = get_content_from_url(from_)\n if not content:\n raise EmptyImageError(f\"Got empty image from {from_}\")\n save_image(path, content)\n except Exception as e:\n print(e)\n\n\ndef make_description(json_dict, filepath=\"./books.json\"):\n with open(filepath, \"w\", encoding=\"utf-8\") as write_file:\n json.dump(json_dict, write_file, indent=4, ensure_ascii=False)\n" }, { "alpha_fraction": 0.517241358757019, "alphanum_fraction": 0.7126436829566956, "avg_line_length": 16.399999618530273, "blob_id": "e7a8d9b9c08abd2f4ab768dc81de5f5baddc0a56", "content_id": "83f66b867bac3f9fdcd1e4db694678d639547188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 87, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/requirements.txt", "repo_name": "Zed-chi/dvmn_frontend_ch3", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.9.1\nlxml==4.6.2\nrequests==2.24.0\nargparse==1.4.0\npathvalidate==2.3.0\n" }, { "alpha_fraction": 0.5775432586669922, "alphanum_fraction": 0.5781295895576477, "avg_line_length": 27.424999237060547, "blob_id": "857bf7a9a1ca9b4e7c5d110a284b9161cf1cbde1", "content_id": "f185346a84577a467588ebc6012bde6cfe055062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3411, "license_type": "no_license", "max_line_length": 79, "num_lines": 120, "path": "/app.py", "repo_name": "Zed-chi/dvmn_frontend_ch3", "src_encoding": "UTF-8", "text": "import argparse\nimport logging\nimport os\n\nfrom xml.etree.ElementTree import ParseError\n\nfrom parse_tululu_category import get_links_from_pages\n\nfrom requests import HTTPError\n\nfrom utils import (\n EmptyBookError,\n EmptyDetailsError,\n EmptyHTMLError,\n EmptyImageError,\n URLParseError,\n download_image,\n download_txt,\n get_book_details,\n get_id_from_book_url,\n get_text_from_url,\n make_description,\n)\n\nlogging.basicConfig(level=logging.INFO)\nBASE_URL = \"https://tululu.org\"\nBASE_BOOK_PAGE = \"https://tululu.org/b\"\nBASE_TXT_URL = \"https://tululu.org/txt.php\"\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--start_page\", default=1, type=int)\n parser.add_argument(\"--end_page\", type=int)\n parser.add_argument(\"--dest_folder\", default=\"./\")\n parser.add_argument(\"--skip_imgs\", action=\"store_true\", default=False)\n parser.add_argument(\"--skip_txt\", action=\"store_true\", default=False)\n parser.add_argument(\"--json_path\")\n return parser.parse_args()\n\n\ndef get_name_from_url(url):\n return url.split(\"/\")[-1]\n\n\ndef main():\n args = get_args()\n\n books_dir = os.path.join(args.dest_folder, \"books\")\n images_dir = os.path.join(args.dest_folder, \"images\")\n json_filepath = args.json_path or os.path.join(\n args.dest_folder,\n \"books.json\",\n )\n\n links = get_links_from_pages(args.start_page, args.end_page)\n description = []\n\n if not links:\n logging.warning(\"No files to download :(\")\n return None\n else:\n logging.info(f\"Going to download {len(links)} files...\")\n\n for id, link in enumerate(links):\n try:\n html = get_text_from_url(link, allow_redirects=True)\n if not html:\n raise EmptyHTMLError(\"Book Page html is empty\")\n details = get_book_details(html, link)\n if not details:\n raise EmptyDetailsError(\"Details is empty\")\n\n if not args.skip_imgs:\n image_filename = get_name_from_url(details[\"img_url\"])\n path = os.path.normcase(\n os.path.abspath(os.path.join(images_dir, image_filename))\n )\n\n details[\"img_src\"] = path\n download_image(from_=details[\"img_url\"], to=details[\"img_src\"])\n\n if args.skip_txt:\n continue\n book_filename = f\"{id}.{details['title']}.txt\"\n path = os.path.normcase(\n os.path.abspath(os.path.join(books_dir, book_filename))\n )\n details[\"book_path\"] = path\n txt_id = get_id_from_book_url(link)\n if not txt_id:\n continue\n download_txt(\n from_=BASE_TXT_URL,\n to=details[\"book_path\"],\n urlparams={\"id\": txt_id},\n )\n\n logging.info(f\"File '{book_filename}' has been saved\")\n\n description.append(details)\n\n except (\n HTTPError,\n ParseError,\n ConnectionError,\n FileExistsError,\n EmptyBookError,\n EmptyImageError,\n EmptyHTMLError,\n URLParseError,\n ) as e:\n logging.error(e)\n\n make_description({\"books\": description}, json_filepath)\n logging.info(f\"Files are downloaded, description in {json_filepath}\")\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5950000286102295, "alphanum_fraction": 0.5989999771118164, "avg_line_length": 24.64102554321289, "blob_id": "c7acac212e10a7909e8060bb40b2b8fc2018db37", "content_id": "d88db724a3eb3e9916851a5864b3e033b45c7688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1000, "license_type": "no_license", "max_line_length": 78, "num_lines": 39, "path": "/parse_tululu_category.py", "repo_name": "Zed-chi/dvmn_frontend_ch3", "src_encoding": "UTF-8", "text": "import urllib\n\nfrom bs4 import BeautifulSoup\n\nfrom utils import get_text_from_url\n\nSFICTION_URL = \"https://tululu.org/l55/\"\n\n\ndef get_all_book_links_on_page(html):\n soup = BeautifulSoup(html, \"lxml\")\n hrefs = soup.select(\n \"div#content table.d_book tr:first-child td:first-child a\"\n )\n links = [urllib.parse.urljoin(SFICTION_URL, a.get(\"href\")) for a in hrefs]\n return links\n\n\ndef get_sfiction_list_books_page(page_num):\n url = f\"{SFICTION_URL}{page_num}/\"\n return get_text_from_url(url)\n\n\ndef get_links_from_pages(startpage, endpage=None):\n links = []\n page_num = startpage\n while True:\n try:\n if endpage and page_num == endpage:\n return links\n html = get_sfiction_list_books_page(page_num)\n if not html:\n return links\n links.extend(get_all_book_links_on_page(html))\n page_num += 1\n except:\n print(page_num, \"error\")\n break\n return links\n" }, { "alpha_fraction": 0.7021883726119995, "alphanum_fraction": 0.7193149328231812, "avg_line_length": 34.03333282470703, "blob_id": "97306462a5225a70c7d584201aad32e03e49d98b", "content_id": "9fd9fa132aa05202f419790fa176371064fb9d06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1532, "license_type": "no_license", "max_line_length": 170, "num_lines": 30, "path": "/README.md", "repo_name": "Zed-chi/dvmn_frontend_ch3", "src_encoding": "UTF-8", "text": "# Парсер Онлайн библиотеки (жанр фантастики)\nПоможет скачать книги/обложки c [tululu.org](tululu.org)\n\n[![Maintainability](https://api.codeclimate.com/v1/badges/ac03e2881bf9a8a1e734/maintainability)](https://codeclimate.com/github/Zed-chi/dvmn_frontend_ch3/maintainability)\n\n\n## Требования:\n- python 3.7\n\n## Установка:\n```\n>> pip install -r requirements.txt\n```\n\n## Запуск:\n```\n>> python3 app.py\n```\nСписок доступных аргументов:\n* `--start_page` -> (Значение - Число по-умолч=1) С какой страницы начать скачивание.\n* `--end_page` -> (Значение - Число) До какой страницы скачивать.\n* `--dest_folder` -> (Значение - Строка по-умолч=\"./\") Путь в который будут скачиваться книги/обложки и сохранится файл описания книг.\n* `--skip_imgs` -> (Флаг без значений), Не скачивать обложки.\n* `--skip_txt` -> (Флаг без значений), Не скачивать книги.\n* `--json_path` -> (Значение - Строка), Путь в который сохранится файл описания всех книг.\n\n\n## Цель проекта\n\nКод написан в образовательных целях на онлайн-курсе для веб-разработчиков [dvmn.org](https://dvmn.org/)\n" } ]
5
zhou-zheng/technote
https://github.com/zhou-zheng/technote
45b7a13c382b179eb7e9fa8a578a50f73551405c
426658343338b0e017c796d18bffc87f1f152c43
788888b47e20846bfbdcabf384d10ee5c132d778
refs/heads/master
2020-03-21T17:25:11.826532
2018-06-27T06:34:51
2018-06-27T06:34:51
138,830,664
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "730bfc26faff73b29b4dda6b3e0e71d2a3b21807", "content_id": "7d8df39dcb49c9d11b9e7725d0371f57927410cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/foo2.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "foo_var = 1\nfrom bar2 import bar_var" }, { "alpha_fraction": 0.6837233304977417, "alphanum_fraction": 0.7168937921524048, "avg_line_length": 26.785715103149414, "blob_id": "bd45d88ed3f1f69d2b9cab48106a7a3f65c65761", "content_id": "54d2e832c37290ed6399edd876e9656423d18883", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4905, "license_type": "no_license", "max_line_length": 163, "num_lines": 140, "path": "/python/MutuallyImportProblem/MutuallyImportProblem.md", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "# Python 中的循环导入依赖问题\n\n今天在阅读《Flask Web 开发》一书的 P69 时发现了“避免循环导入依赖”一词,在 google 之后初步搞懂这个问题,记录成此文。 \n\n### 问题描述\n这个问题简单来说,就是两个包产生了互相依赖之后导致 ImportError,举例说明最清晰明了。 \n[foo1.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/foo1.py)\n``` Python\nfrom bar1 import bar_var\nfoo_var = 1\n```\n[bar1.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/bar1.py)\n``` Python\nfrom foo1 import foo_var\nbar_var = 2\n```\n此时如果我们在 Python Shell 中导入 foo1.py 或 bar1.py 都会出错。 \n``` Shell\n>>> import foo1\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"foo1.py\", line 1, in <module>\n from bar1 import bar_var\n File \"bar1.py\", line 1, in <module>\n from foo1 import foo_var\nImportError: cannot import name 'foo_var'\n>>>\n>>> import bar1\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"bar1.py\", line 1, in <module>\n from foo1 import foo_var\n File \"foo1.py\", line 1, in <module>\n from bar1 import bar_var\nImportError: cannot import name 'bar_var'\n```\n---\n### 问题分析\n为什么会产生这样的 ImportError 呢?我们以 import foo1 来分析下整个流程就明白了。 \n- Python Shell 导入 foo1\n- 创建 foo1 的全局符号字典\n- foo1 被 Python 解释器解释执行\n- foo1 导入 bar1\n- 创建 bar1 的全局符号字典\n- bar1 被 Python 解释器解释执行\n- bar1 导入 foo1(此时已经加载过模块 foo1)\n- bar1 导入 foo1 的 foo_var,即 bar1.foo_var = foo1.foo_var\n\n在最后一步,因为 foo1 刚开始解释执行就转去了 bar1,所以 foo1 中的 foo_var 还没有被 Python 解释器解释执行出来,必然出现 cannot import name 'foo_var'!\n\n---\n### 问题解决\n找到了问题的原因,对症下药就可以了。一个简便取巧的做法就是调整代码顺序来打破这个循环依赖的怪圈。 \n[foo2.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/foo2.py)\n``` Python\nfoo_var = 1\nfrom bar2 import bar_var\n```\n[bar2.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/bar2.py)\n``` Python\nbar_var = 1\nfrom foo2 import foo_var\n```\n此时如果我们在 Python Shell 中导入 foo2.py 或 bar2.py 都会成功。\n``` Shell\n>>> import foo2\n>>> import bar2\n>>> foo2.foo_var, foo2.bar_var\n(1, 2)\n>>> bar2.foo_var, bar2.bar_var\n(1, 2)\n```\n为什么成功了呢?同样地我们仍然以 import foo2 为例再对流程分析一遍:\n- Python Shell 导入 foo2\n- 创建 foo2 的全局符号字典\n- foo2 被 Python 解释器解释执行\n- 创建 foo2.foo_var \n- foo2 导入 bar2\n- 创建 bar2 的全局符号字典\n- bar2 被 Python 解释器解释执行\n- 创建 bar2.bar_var\n- bar2 导入 foo2(此时已经加载过模块 foo2)\n- bar2 导入 foo2 的 foo_var,即 bar2.foo_var = foo2.foo_var\n- foo2 继续导入 bar2 的 bar_var,即 foo2.bar_var = bar2.bar_var\n\n至此全部成功!\n\n问题引申\n如果我们在 foo1/bar1 的基础上只修改 foo1 或 bar1 的话,会发生什么呢? \n[foo3.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/foo3.py)\n``` Python\nfoo_var = 1\nfrom bar3 import bar_var\n```\n[bar3.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/bar3.py)\n``` Python\nfrom foo3 import foo_var\nbar_var = 2\n```\nPython Shell 导入结果\n``` Shell\n>>> import bar3\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"bar3.py\", line 1, in <module>\n from foo3 import foo_var\n File \"foo3.py\", line 2, in <module>\n from bar3 import bar_var\nImportError: cannot import name 'bar_var'\n>>> import foo3\n>>> import bar3\n```\n[foo4.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/foo4.py)\n``` Python\nfrom bar4 import bar_var\nfoo_var = 1\n```\n[bar4.py](https://github.com/zhou-zheng/technote/blob/master/python/MutuallyImportProblem/bar4.py)\n``` Python\nbar_var = 2\nfrom foo4 import foo_var\n```\nPython Shell 导入结果\n``` Shell\n>>> import foo4\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"foo4.py\", line 1, in <module>\n from bar4 import bar_var\n File \"bar4.py\", line 2, in <module>\n from foo4 import foo_var\nImportError: cannot import name 'foo_var'\n>>> import bar4\n>>> import foo4\n```\n有没有发现执行的结果随着 import 的顺序的不同而不同了呢?具体原因相信只要搞懂了前两种分析的同学都会明白,我就不多啰嗦了。\n\n---\n参考文献: \n1. [How can I have modules that mutually import each other?](https://docs.python.org/2/faq/programming.html#how-can-i-have-modules-that-mutually-import-each-other)" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "43d2d8da84e949756fc01604b1c4a747adaf5c75", "content_id": "febbee8dce639bef016174389c7928093ca7e764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/bar2.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "bar_var = 2\nfrom foo2 import foo_var" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "c853f713cd302b2cd9bfc59bd7b783acdef17e55", "content_id": "d72628ec3e2889b51d8152c38e845502d183a98d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/bar1.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "from foo1 import foo_var\nbar_var = 2" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "53a4a88bb3cf53b6c67c2278d70c9a959279cebe", "content_id": "6f5ff1129748cbee5270418e97393d62a25f60d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/foo3.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "foo_var = 1\nfrom bar3 import bar_var" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "96bbedcf686852d2675bee9b947b3b4e19b51a36", "content_id": "de2f827357add34335d9c9b4fc4a0e81b18b9886", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/bar4.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "bar_var = 2\nfrom foo4 import foo_var" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "d5104b34feb140efb322459b96cb5e264d644a28", "content_id": "9a9e63273b57929b1409445bb0fbb34678f75863", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/bar3.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "from foo3 import foo_var\nbar_var = 2" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "bff222747c3d4d33a973e210d47cddac598f7d78", "content_id": "7ed5b6cb45cf38541835aac69e245cc58eeff8db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/foo1.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "from bar1 import bar_var\nfoo_var = 1" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.75, "avg_line_length": 17.5, "blob_id": "8b33e3ea26478fc01e110ed5728caca17823e1de", "content_id": "92efd612600cb68e5c4d61e2c099953a2aa79934", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 2, "path": "/python/MutuallyImportProblem/foo4.py", "repo_name": "zhou-zheng/technote", "src_encoding": "UTF-8", "text": "from bar4 import bar_var\nfoo_var = 1" } ]
9
Guibrother32/M210
https://github.com/Guibrother32/M210
bacfdbe591a01fa65944bd80fbff747a4f955ce3
851bd85711d38b10c539c1ff65251b291efec6fe
19a878aab9c1815d6511f4464a6f0077d5a378e6
refs/heads/master
2020-09-12T05:12:20.434283
2019-11-18T15:04:14
2019-11-18T15:04:14
222,319,531
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4030456840991974, "alphanum_fraction": 0.4064297676086426, "avg_line_length": 25.635513305664062, "blob_id": "296365733603b1bac38b5aac796fdb1f29d6d9b7", "content_id": "96a8a48ea780082e7c5a24ea4db21ed99a2b96dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2955, "license_type": "no_license", "max_line_length": 124, "num_lines": 107, "path": "/Simplex/model/F.py", "repo_name": "Guibrother32/M210", "src_encoding": "UTF-8", "text": "'''\r\nCreated on 6 Jul 2015\r\n@author: leo\r\n'''\r\nfrom fractions import Fraction\r\n\r\nclass F(Fraction):\r\n '''\r\n Classe que representa um numero fracionario, extendendo fractions.Fraction\r\n ''' \r\n def __init__(self,n,m=Fraction(0)):\r\n self.fraction = Fraction(n)\r\n self.m = Fraction(m)\r\n \r\n def __repr__(self):\r\n \"\"\"repr(self)\"\"\"\r\n return str(float(self.fraction)) if self.m == 0 else str(float(self.fraction)) + ' + (' + str(float(self.m)) + '*M)'\r\n\r\n\r\n def __str__(self):\r\n \"\"\"str(self)\"\"\"\r\n return str(float(self.fraction)) if self.m == 0 else str(float(self.fraction)) + ' + (' + str(float(self.m)) + '*M)'\r\n \r\n def __eq__(self, f):\r\n \"\"\"a == b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n return self.fraction.__eq__(f.fraction) and self.m.__eq__(f.m)\r\n\r\n def __add__(self, f):\r\n \"\"\"a + b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n return F(self.fraction.__add__(f.fraction),self.m.__add__(f.m))\r\n\r\n def ___sub__(self, f):\r\n \"\"\"a - b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n return F(self.fraction.__sub__(f.fraction),self.m.___sub__(f.m))\r\n\r\n def __mul__(self, f):\r\n \"\"\"a * b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n if f.m == 0:\r\n return F(self.fraction.__mul__(f.fraction))\r\n else:\r\n return F(self.fraction.__mul__(f.fraction),self.m.__mul__(f.m)) \r\n\r\n def __div__(self, f):\r\n \"\"\"a / b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n if f.m == 0:\r\n return F(self.fraction.__div__(f.fraction))\r\n else:\r\n return F(self.fraction.__div__(f.fraction),self.m.__div__(f.m))\r\n \r\n def __lt__(self, f):\r\n \"\"\"a < b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n if self.m == f.m:\r\n return self.fraction.__lt__(f.fraction)\r\n \r\n else: \r\n return self.m.__lt__(f.m)\r\n \r\n def __gt__(self, f):\r\n \"\"\"a > b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n if self.m == f.m:\r\n return self.fraction.__gt__(f.fraction)\r\n \r\n else: \r\n return self.m.__gt__(f.m)\r\n\r\n def __le__(self, f):\r\n \"\"\"a <= b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n if self.m == f.m:\r\n return self.fraction.__le__(f.fraction)\r\n \r\n else: \r\n return self.m.__le__(f.m)\r\n\r\n def __ge__(self, f):\r\n \"\"\"a >= b\"\"\"\r\n if type(f) is not type(self):\r\n f = F(f)\r\n \r\n if self.m == f.m:\r\n return self.fraction.__ge__(f.fraction)\r\n \r\n else: \r\n return self.m.__ge__(f.m)" }, { "alpha_fraction": 0.35421517491340637, "alphanum_fraction": 0.38612017035484314, "avg_line_length": 31.193798065185547, "blob_id": "c68ed1099d43f38ca9e72a16db815cc5d7a68a6d", "content_id": "3e54a8020d16074528e2fb9c66f10f9dc8042e66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4295, "license_type": "no_license", "max_line_length": 121, "num_lines": 129, "path": "/SIMPLEXO.py", "repo_name": "Guibrother32/M210", "src_encoding": "UTF-8", "text": "##########################################################################################################\r\n# #\r\n# Criado em: 14 Nov 2019 #\r\n# Autores: Guilherme Costa e Gustavo Marins #\r\n# Discilplina: M210 #\r\n# Professor: Yvo #\r\n# #\r\n##########################################################################################################\r\n\r\n\r\nimport numpy as np\r\n\r\n#Entrada do numero de linhas e colunas\r\n\r\nprint(\"Entre com o numero de linhas:\")\r\nln = input()\r\nlinhas = int (ln)\r\n\r\nprint(\"Entre com o numero de colunas:\")\r\ncl = input()\r\ncolunas = int (cl)\r\n\r\ns = []\r\nk = 0\r\ntDaMatriz = linhas * colunas\r\ntabela = np.array(tDaMatriz)\r\n\r\nfor ln in range(0, linhas):\r\n for cln in range(0, colunas):\r\n print(\"Entre com os valores do PPl:\")\r\n k = input()\r\n s.append(float(k))\r\n \r\ntabela = s\r\ntabela = np.reshape(tabela, (linhas, colunas))\r\nspxPPL = tabela\r\n\r\n# Exemplo para teste retirado em: https://www.youtube.com/watch?v=uendv1Khpcw\r\n# [3,6] [1 -10 -12 0 0 0 0 1 1 1 0 100 0 1 3 0 1 270]\r\n\r\n# Lucro Total($) 1170.00\r\n# Shadow Price($) 9.00\r\n# Shadow Price($) 1.00\r\n# Money($) 15.00\r\n# Money($) 85.00\r\n# [['1' '-5' '-7' '-8' '0' '0' '0']\r\n #['0.0' '1.0' '1.0' '2.0' '1.0' '0.0' '1190.0']\r\n #['0.0' '3.0' '4.5' '1.0' '0.0' '1.0' '4000.0']]\r\n\r\n\r\n\r\n\r\n\r\ndef simplexoSolver(spxPPL, linhas, colunas):\r\n negativer = [0, None] #primeira coluna\r\n for cln in range(colunas):\r\n if spxPPL[0, cln] < 0:\r\n if abs(spxPPL[0, cln]) > negativer[0]:\r\n negativer[0] = spxPPL[0, cln]\r\n negativer[1] = cln\r\n colunaDoPivot = negativer[1]\r\n\r\n#----------------------------------------------------------------------------------------------------------------------#\r\n\r\n linhaDoPivot = [999, None]\r\n for linha in range(linhas):\r\n LD = spxPPL[linha, colunaDoPivot]\r\n\r\n if LD == 0:\r\n amount = 999\r\n\r\n else: amount = spxPPL[linha, colunas - 1] / LD\r\n\r\n if amount > 0:\r\n if amount < linhaDoPivot[0]:\r\n linhaDoPivot[0] = amount\r\n linhaDoPivot[1] = linha\r\n linhaDoPivot = linhaDoPivot[1]\r\n\r\n #o pivot é achado por:\r\n pivot = spxPPL[linhaDoPivot][colunaDoPivot]\r\n\r\n#----------------------------------------------------------------------------------------------------------------------#\r\n\r\n #achando a NLP:\r\n spxPPL[linhaDoPivot, :] = np.divide(spxPPL[linhaDoPivot, :], pivot)\r\n for j in range(linhas):\r\n if j == linhaDoPivot:\r\n continue\r\n #achando as NL:\r\n spxPPL[j, :] = np.add(spxPPL[j, :], spxPPL[linhaDoPivot, :] * -(spxPPL[j, colunaDoPivot])) #calculo feito\r\n\r\n# ----------------------------------------------------------------------------------------------------------------------#\r\n\r\n negatory = False\r\n for h in range(colunas):\r\n if spxPPL[0, h] < 0:\r\n negatory = True\r\n break\r\n\r\n if not negatory: return spxPPL\r\n else: return simplexoSolver(spxPPL, linhas, colunas)\r\n\r\nlinhas = np.size(spxPPL, 0)\r\ncolunas = np.size(spxPPL, 1)\r\ncolunaEND = colunas - 1\r\nresposta = simplexoSolver(spxPPL, linhas, colunas)\r\n\r\nlucro = resposta[0, colunaEND]\r\nprint(\"\\n\\n=======================================\")\r\nprint(\"Lucro Total R$: {:.2f}\".format(lucro))\r\n\r\naux=0;\r\n\r\nfor a in range(colunas - linhas, colunaEND):\r\n sombra = resposta[0, a]\r\n aux=aux+1;\r\n print(\"X\"+str(aux)+\"-> Preco Sombra R$: {:.2f}\".format(sombra))\r\n\r\naux=0;\r\nfor b in range(1, linhas):\r\n money = resposta[b, colunaEND]\r\n aux=aux+1;\r\n print(\"Investimento\"+\"-> R$ {:.2f}\".format(money))\r\n\r\nprint(\"=======================================\")\r\n\r\n\r\n#----------------------------------------------------------------------------------------------------------------------#\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.4390983283519745, "alphanum_fraction": 0.4551805853843689, "avg_line_length": 32.59360885620117, "blob_id": "debf47d1371659239c0b233e5ffd6d6dcd734505", "content_id": "4cac88a690890290b150c025d42d05310ebd473f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7594, "license_type": "no_license", "max_line_length": 124, "num_lines": 219, "path": "/Simplex/sem título1.py", "repo_name": "Guibrother32/M210", "src_encoding": "UTF-8", "text": "##########################################################################################################\r\n# #\r\n# Criado em: 14 Nov 2019 #\r\n# Autores: Guilherme Costa e Gustavo Marins #\r\n# Discilplina: M210 Professor: Yvo #\r\n# #\r\n##########################################################################################################\r\nfrom numpy import matrix\r\nfrom model.F import F\r\n\r\nclass Tabela(object):\r\n\r\n #definicoes primarias\r\n \r\n def princiapl(self, matrix, matrix2):\r\n \r\n A = matrix('50 80;');\r\n B = matrix('3 2;1 1;1 2');\r\n C = matrix('600;240;300');\r\n \r\n np.concatenate((A, B));\r\n np.concatenate((B, C));\r\n \r\n \r\n return None\r\n \r\n #Funcao principal, usa do metodo da File F.py para auxiliar com fracionarios.\r\n def __init__(self, FO,restricoes=None):\r\n\r\n #fase de preenchimento\r\n #linhas da tabela das restrições\r\n self.linhaR = []\r\n \r\n numVE = len(restricoes) + len([(c,t,tr) for (c,t,tr) in restricoes if t!='<='])\r\n \r\n #linha da função objetivo\r\n self.linhaFO = [1] + [c*(-1) for c in FO] + [0]*numVE + [0] \r\n \r\n for i,(coef,tipo,termo) in enumerate(restricoes):\r\n colExtras = [0]*numVE\r\n \r\n if tipo == '<=':\r\n colExtras[i] = 1 \r\n \r\n elif tipo=='=':\r\n colExtras[i] = 1 \r\n self.linhaFO[1 + len(coef) + i] = F(0, F(1))\r\n \r\n elif tipo=='>=':\r\n colExtras[i] = -1\r\n colExtras[i+1] = 1 \r\n self.linhaFO[1 + len(coef) + i + 1] = F(0, F(1))\r\n \r\n self.linhaR.append(self._converteParaF([0] + coef + colExtras + [termo]))\r\n \r\n def _converteParaF(self,lista):\r\n return [F(e) for e in lista]\r\n \r\n def printTabela(self): \r\n tabela = [self.linhaFO] + self.linhaR \r\n print('\\n', matrix([[str(f) for f in l] for l in tabela]))\r\n \r\n #Pivotamento, elemento pivo é escolhido a partir da coluna mais negativa e a linha LD/elementos da coluna exceto o Z\r\n\r\n def _pivoteamento(self, pi, pj):\r\n #elemento pivo\r\n p = self.linhaR[pi][pj] \r\n #divide a linha do pivo pelo elemento pivo \r\n self.linhaR[pi] = [x/p for x in self.linhaR[pi]] \r\n \r\n #para cada elemento da linha correspondente a funcao objetivo multiplica cada elemento da linha pivo\r\n tempLinha = [self.linhaFO[pj]* x for x in self.linhaR[pi]] \r\n #subtrai cada elemendo da linha da funcao objetivo pelo fator calculado acima\r\n self.linhaFO = [self.linhaFO[i] - tempLinha[i] for i in range(len(tempLinha))]\r\n \r\n #para cara linha corresponte a restricao i repete o mesmo procedimento feito na linha fa F.O.\r\n for i,restricao in enumerate(self.linhaR):\r\n if i != pi: \r\n tempLinha = [restricao[pj]* x for x in self.linhaR[pi]] \r\n self.linhaR[i] = [restricao[i] - tempLinha[i] for i in range(len(tempLinha))] \r\n \r\n \r\n #Encontra a coluna da variavel que entra na base \r\n \r\n def elemento_Entra(self):\r\n menor_Coef = min(self.linhaFO[1:-1])\r\n \r\n if menor_Coef >= 0: \r\n return None\r\n else:\r\n #retorna seu indice\r\n return self.linhaFO[0:-1].index(menor_Coef)\r\n \r\n \r\n #Encontra a linha da variavel que sai da base \r\n \r\n def elemento_Sai(self, coluna_pivo): \r\n termos = [r[-1] for r in self.linhaR]\r\n coef_var_entra = [r[coluna_pivo] for r in self.linhaR]\r\n \r\n razoes = []\r\n for i,termo in enumerate(termos):\r\n if coef_var_entra[i] == 0:\r\n razoes.append(F(1,1)) \r\n else:\r\n razoes.append(termo/coef_var_entra[i])\r\n \r\n menorRazaoPositiva = min([r for r in razoes if r > 0]) \r\n #retorna seu indice\r\n return razoes.index(menorRazaoPositiva)\r\n \r\n @property\r\n def val_Otimo(self):\r\n if not self.solO_encontrada():\r\n self.executar()\r\n \r\n return self.linhaFO[-1]\r\n \r\n \r\n @property \r\n def sol_Otima(self):\r\n if not self.solO_encontrada():\r\n self.executar() \r\n \r\n dentro = self.varInBase\r\n fora = self.varOutBase\r\n \r\n solucao = []\r\n\r\n for val in dentro:\r\n for l in self.linhaR:\r\n if l[val] == F(1):\r\n solucao.append((val,l[-1]))\r\n break\r\n \r\n solucao += [(val,F(0)) for val in fora]\r\n\r\n return [(t[0],float(t[1])) for t in solucao]\r\n \r\n @property\r\n def varInBase(self): \r\n dentroDaBase = [] \r\n for c in range(1,len(self.linhaFO)-1):\r\n valoresColuna = [l[c] for l in self.linhaR]\r\n \r\n numDeZeros = len([z for z in valoresColuna if z==F(0)])\r\n numDeUms = len([u for u in valoresColuna if u==F(1)])\r\n \r\n if numDeUms == 1 and numDeZeros == len(self.linhaR) - 1 :\r\n dentroDaBase.append(c)\r\n\r\n return dentroDaBase\r\n \r\n @property\r\n def varOutBase(self):\r\n return [i for i in range(1,len(self.linhaFO)-1) if i not in self.varInBase]\r\n \r\n \r\n\r\n \r\n \r\n\r\n #pela funcao objetivo, solucao otima encontrada?\r\n def solO_encontrada(self):\r\n if min(self.linhaFO[1:-1]) >= 0: \r\n return True\r\n else:\r\n return False\r\n \r\n #comeco do metodo Simplex\r\n def executar(self):\r\n self.printTabela()\r\n \r\n while not self.solO_encontrada():\r\n c = self.elemento_Entra()\r\n r = self.elemento_Sai(c)\r\n \r\n self._pivoteamento(r,c)\r\n \r\n print('\\nColuna do pivo: %s\\nLinha do pivo: %s'%(c+1,r))\r\n \r\n self.printTabela()\r\n \r\n\r\n \r\ndef getNomeDeVariavel(index):\r\n return 'x' + str(index)\r\n\r\ndef toStringComNomes(lista):\r\n if type(lista[0]) is type(0):\r\n return [getNomeDeVariavel(i) for i in lista]\r\n \r\n elif type(lista[0]) is type(()):\r\n return [(getNomeDeVariavel(l[0]),l[1]) for l in lista]\r\n\r\nif __name__ == '__main__':\r\n \r\n #Como funciona a entrada da FO e restricoes?\r\n \r\n #1o passo FO, colocar o valor de cada argumento na primeira chave\r\n \r\n #2o passo Restricoes, caso seja nulo, colocar 0;\r\n\r\n #CHAPAS METALICAS\r\n t = Tabela([5,7,8],restricoes=[([1,1,2],\"<=\", 1190),([3,4.5,1],\"<=\", 4000)])\r\n \r\n #GOIABA\r\n #t = Tabela([5,7],restricoes=[([0.25,0.5],\"<=\", 50),([3,0],\"<=\", 250),([0,1.5],\"<=\", 100)])\r\n \r\n \r\n print(\"\\nValor otimo: R$%s (%s)\" % (float(t.val_Otimo),t.val_Otimo))\r\n\r\n print(\"\\nSolução otima: %s\" % (toStringComNomes(t.sol_Otima)))\r\n \r\n print(\"\\nPreço Sombra: R$\" % ())\r\n \r\n def printTabela2(self): \r\n tabela = [self.linhaFO] + self.linhaR \r\n print('\\n', matrix([[str(f) for f in l] for l in tabela]))\r\n \r\n" }, { "alpha_fraction": 0.7551020383834839, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 23.5, "blob_id": "21dfb2f5cb9e535e95aafdd1f07b88078bb5a202", "content_id": "fb3c9921c5c973e2c145fa8f1d4454214fdf7f70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 41, "num_lines": 2, "path": "/README.md", "repo_name": "Guibrother32/M210", "src_encoding": "UTF-8", "text": "# M210\nImplementacao do metodo simplex em python\n" } ]
4
nmeylan/zmeventnotification
https://github.com/nmeylan/zmeventnotification
1366c9120b2c0050219f4dedf19cc98a7338a195
1ec30a342745f5221a49f05ba8da7d4eed5bce20
633e2347ae46bbbb3bb68533407a32777017596f
refs/heads/master
2022-11-29T16:39:25.235847
2020-07-24T13:18:54
2020-07-28T15:24:18
282,207,149
0
0
null
2020-07-24T11:52:44
2020-07-24T11:49:39
2020-07-23T21:14:19
null
[ { "alpha_fraction": 0.5989583134651184, "alphanum_fraction": 0.6041666865348816, "avg_line_length": 32.82352828979492, "blob_id": "1c5ddcc743979ce0665854f3f085878df51c8292", "content_id": "9a98e00ccb7eaa6bb7aaccdc4e72a49d5ced6563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 576, "license_type": "no_license", "max_line_length": 92, "num_lines": 17, "path": "/make_changelog.sh", "repo_name": "nmeylan/zmeventnotification", "src_encoding": "UTF-8", "text": "#!/bin/bash\nif [ -z \"$1\" ]; then\n echo \"Inferring version name from hooks/zmes_hook_helpers/__init__.py\"\n if [[ `cat hook/zmes_hook_helpers/__init__.py` =~ ^__version__\\ =\\ \\\"(.*)\\\" ]];\n then\n TAGVER=${BASH_REMATCH[1]}\n else\n echo \"Bad version parsing\"\n exit\n fi\nelse\n TAGVER=$1\nfi\nVER=\"${TAGVER/v/}\"\nread -p \"Future release is v${VER}. Please press any key to confirm...\"\ngithub_changelog_generator -u pliablepixels -p zmeventnotification --future-release v${VER}\n#github_changelog_generator --future-release v${VER}\n\n" }, { "alpha_fraction": 0.6781609058380127, "alphanum_fraction": 0.6798029541969299, "avg_line_length": 26.454545974731445, "blob_id": "d6f6d2ced4273cb0d512f895d1c5b275bfae0871", "content_id": "f94dc8e16bfd35c4b2c8ea65e998dfbeada6f58d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "permissive", "max_line_length": 103, "num_lines": 22, "path": "/hook/train_faces.py", "repo_name": "nmeylan/zmeventnotification", "src_encoding": "UTF-8", "text": "\n#!/usr/bin/python3\nimport argparse\nimport ssl\nimport zmes_hook_helpers.log as log\nimport zmes_hook_helpers.common_params as g\nimport zmes_hook_helpers.utils as utils\nimport zmes_hook_helpers.face_train as train\n\n\nif __name__ == \"__main__\":\n g.ctx = ssl.create_default_context()\n ap = argparse.ArgumentParser()\n ap.add_argument('-c', '--config',default='/etc/zm/objectconfig.ini' , help='config file with path')\n\n args, u = ap.parse_known_args()\n args = vars(args)\n\n log.init(process_name='zm_face_train', dump_console=True)\n utils.process_config(args, g.ctx)\n \n \n train.train()\n \n" }, { "alpha_fraction": 0.4883720874786377, "alphanum_fraction": 0.5813953280448914, "avg_line_length": 20.5, "blob_id": "cad96b87c39da0d07e675e341783273ea03f36a4", "content_id": "8025431701ffdb4bc25244ba1da39133aa836ae9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "permissive", "max_line_length": 22, "num_lines": 2, "path": "/hook/zmes_hook_helpers/__init__.py", "repo_name": "nmeylan/zmeventnotification", "src_encoding": "UTF-8", "text": "__version__ = \"5.15.7\"\nVERSION=__version__\n" }, { "alpha_fraction": 0.5833831429481506, "alphanum_fraction": 0.5887627005577087, "avg_line_length": 27.84482765197754, "blob_id": "394accf68ffa7f965b4da3e75e2da7a99263f5da", "content_id": "959b50dd9f395e3d367df92f5471e756a8d0fab2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1673, "license_type": "permissive", "max_line_length": 93, "num_lines": 58, "path": "/hook/zmes_hook_helpers/log.py", "repo_name": "nmeylan/zmeventnotification", "src_encoding": "UTF-8", "text": "import logging\nimport logging.handlers\nimport zmes_hook_helpers.common_params as g\nimport pyzm.ZMLog as zmlog\nfrom inspect import getframeinfo, stack\n\n\nclass wrapperLogger():\n def __init__(self, name, override, dump_console):\n zmlog.init(name=name, override=override)\n self.dump_console = dump_console\n\n \n\n def debug(self, msg, level=1):\n idx = min(len(stack()), 1)\n caller = getframeinfo(stack()[idx][0])\n zmlog.Debug(level, msg, caller)\n if (self.dump_console):\n print('CONSOLE:' + msg)\n\n def info(self, msg):\n idx = min(len(stack()), 1)\n caller = getframeinfo(stack()[idx][0])\n zmlog.Info(msg, caller)\n if (self.dump_console):\n print('CONSOLE:' + msg)\n\n def error(self, msg):\n idx = min(len(stack()), 1)\n caller = getframeinfo(stack()[idx][0])\n zmlog.Error(msg, caller)\n if (self.dump_console):\n print('CONSOLE:' + msg)\n\n def fatal(self, msg):\n idx = min(len(stack()), 1)\n caller = getframeinfo(stack()[idx][0])\n zmlog.Fatal(msg, caller)\n if (self.dump_console):\n print('CONSOLE:' + msg)\n\n def setLevel(self, level):\n pass\n \n # wrappers to work with pyzm\n # Guess I forgot capitalization\n def Debug (self,level, msg ):\n self.debug(msg, level)\n def Info (self, msg):\n self.info (msg)\n def Error (self, msg):\n self.error (msg)\n def Fatal (self, msg):\n self.fatal(msg)\n\ndef init(process_name=None, override={}, dump_console=False):\n g.logger = wrapperLogger(name=process_name, override=override, dump_console=dump_console)\n" }, { "alpha_fraction": 0.4986315071582794, "alphanum_fraction": 0.50995272397995, "avg_line_length": 40.01020431518555, "blob_id": "889f95850d772821c52fec1d197e7311111e7272", "content_id": "d0216832e6c48a27c0e9dc0c83aa3276bd98fbbc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8038, "license_type": "permissive", "max_line_length": 274, "num_lines": 196, "path": "/hook/zmes_hook_helpers/yolo.py", "repo_name": "nmeylan/zmeventnotification", "src_encoding": "UTF-8", "text": "import numpy as np\nimport zmes_hook_helpers.common_params as g\nimport zmes_hook_helpers.log as log\nimport sys\nimport cv2\nimport time\nimport datetime\nimport re\n\n# Class to handle Yolo based detection\n\n\nclass Yolo:\n\n # The actual CNN object detection code\n # opencv DNN code credit: https://github.com/arunponnusamy/cvlib\n\n def __init__(self):\n self.initialize = True\n self.net = None\n self.classes = None\n\n def populate_class_labels(self):\n if g.config['yolo_type'] == 'tiny':\n class_file_abs_path = g.config['tiny_labels']\n else:\n class_file_abs_path = g.config['labels']\n f = open(class_file_abs_path, 'r')\n self.classes = [line.strip() for line in f.readlines()]\n\n def get_classes(self):\n return self.classes\n\n def get_output_layers(self):\n layer_names = self.net.getLayerNames()\n output_layers = [\n layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()\n ]\n return output_layers\n\n def detect(self, image):\n\n Height, Width = image.shape[:2]\n modelW = 416\n modelH = 416\n\n g.logger.debug(\n '|---------- YOLO (input image: {}w*{}h, resized to: {}w*{}h) ----------|'\n .format(Width, Height, modelW, modelH))\n scale = 0.00392 # 1/255, really. Normalize inputs.\n\n if g.config['yolo_type'] == 'tiny':\n config_file_abs_path = g.config['tiny_config']\n weights_file_abs_path = g.config['tiny_weights']\n else:\n config_file_abs_path = g.config['config']\n weights_file_abs_path = g.config['weights']\n\n if self.initialize:\n g.logger.debug('Initializing Yolo')\n g.logger.debug('config:{}, weights:{}'.format(\n config_file_abs_path, weights_file_abs_path),level=2)\n start = datetime.datetime.now()\n self.populate_class_labels()\n self.net = cv2.dnn.readNet(weights_file_abs_path,\n config_file_abs_path)\n #self.net = cv2.dnn.readNetFromDarknet(config_file_abs_path, weights_file_abs_path)\n\n if g.config['use_opencv_dnn_cuda'] == 'yes':\n (maj, minor, patch) = cv2.__version__.split('.')\n min_ver = int(maj + minor)\n if min_ver < 42:\n g.logger.error('Not setting CUDA backend for OpenCV DNN')\n g.logger.error(\n 'You are using OpenCV version {} which does not support CUDA for DNNs. A minimum of 4.2 is required. See https://www.pyimagesearch.com/2020/02/03/how-to-use-opencvs-dnn-module-with-nvidia-gpus-cuda-and-cudnn/ on how to compile and install openCV 4.2'\n .format(cv2.__version__))\n else:\n g.logger.debug(\n 'Setting CUDA backend for OpenCV. If you did not set your CUDA_ARCH_BIN correctly during OpenCV compilation, you will get errors during detection related to invalid device/make_policy'\n )\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n else:\n g.logger.debug(\"Not using CUDA backend\")\n\n diff_time = (datetime.datetime.now() - start).microseconds / 1000\n g.logger.debug(\n 'YOLO initialization (loading model from disk) took: {} milliseconds'\n .format(diff_time))\n self.initialize = False\n\n start = datetime.datetime.now()\n ln = self.net.getLayerNames()\n ln = [ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]\n blob = cv2.dnn.blobFromImage(image,\n scale, (modelW, modelH), (0, 0, 0),\n True,\n crop=False)\n self.net.setInput(blob)\n outs = self.net.forward(ln)\n\n diff_time = (datetime.datetime.now() - start).microseconds / 1000\n g.logger.debug(\n 'YOLO detection took: {} milliseconds'.format(diff_time))\n\n class_ids = []\n confidences = []\n boxes = []\n\n nms_threshold = 0.4\n conf_threshold = 0.2\n\n # first nms filter out with a yolo confidence of 0.2 (or less)\n if g.config['yolo_min_confidence'] < conf_threshold:\n conf_threshold = g.config['yolo_min_confidence']\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = center_x - w / 2\n y = center_y - h / 2\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n start = datetime.datetime.now()\n indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,\n nms_threshold)\n diff_time = (datetime.datetime.now() - start).microseconds / 1000\n g.logger.debug(\n 'YOLO NMS filtering took: {} milliseconds'.format(diff_time))\n\n bbox = []\n label = []\n conf = []\n\n # now filter out with configured yolo confidence, so we can see rejections in log\n for i in indices:\n i = i[0]\n box = boxes[i]\n x = box[0]\n y = box[1]\n w = box[2]\n h = box[3]\n\n object_area = w * h\n max_object_area = object_area\n\n if g.config['max_object_size']:\n g.logger.debug('Max object size found to be:'.format(g.config['max_object_size']),level=3)\n # Let's make sure its the right size\n m = re.match('(\\d*\\.?\\d*)(px|%)?$', g.config['max_object_size'],\n re.IGNORECASE)\n if m:\n max_object_area = float(m.group(1))\n if m.group(2) == '%':\n max_object_area = float(m.group(1))/100.0*(modelH * modelW)\n g.logger.debug ('Converted {}% to {}'.format(m.group(1), max_object_area), level=2);\n else:\n g.logger.error('max_object_area misformatted: {} - ignoring'.format(\n g.config['max_object_area']))\n \n if (object_area > max_object_area):\n g.logger.debug ('Ignoring object:{}, as its area: {}px exceeds max_object_area of {}px'.format(str(self.classes[class_ids[i]]), object_area, max_object_area))\n continue\n\n if confidences[i] >= g.config['yolo_min_confidence']:\n bbox.append([\n int(round(x)),\n int(round(y)),\n int(round(x + w)),\n int(round(y + h))\n ])\n label.append(str(self.classes[class_ids[i]]))\n conf.append(confidences[i])\n g.logger.info(\n 'object:{} at {} has a acceptable confidence:{} compared to min confidence of: {}, adding'\n .format(label[-1], bbox[-1], conf[-1],\n g.config['yolo_min_confidence']))\n else:\n g.logger.info(\n 'rejecting object:{} at {} because its confidence is :{} compared to min confidence of: {}'\n .format(str(self.classes[class_ids[i]]), [\n int(round(x)),\n int(round(y)),\n int(round(x + w)),\n int(round(y + h))\n ], confidences[i], g.config['yolo_min_confidence']))\n\n return bbox, label, conf\n" } ]
5
alikgs19/aazDB
https://github.com/alikgs19/aazDB
d1bdef15a29410d2908f4f48a3bb5ce9e75e25f7
58a8c267a922d0132321a4dd13e3f037fc459b06
23c49552063dbae5817e1d7af8b0098d8815ab88
refs/heads/master
2021-01-24T08:30:01.724047
2017-06-05T09:36:44
2017-06-05T09:36:44
93,384,045
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49494948983192444, "alphanum_fraction": 0.4989020526409149, "avg_line_length": 22.010101318359375, "blob_id": "0ff20c254de77fa618584f75c457639c55f243dc", "content_id": "57e476cd3c653a3b63234d6949543c92f8b47ab7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2277, "license_type": "no_license", "max_line_length": 88, "num_lines": 99, "path": "/polls/static/polls/Main.js", "repo_name": "alikgs19/aazDB", "src_encoding": "UTF-8", "text": "var app = angular.module(\"footsi\", [\"ngRoute\"]);\n\napp.config(function ($routeProvider, $interpolateProvider) {\n\n\n $interpolateProvider.startSymbol('{$');\n $interpolateProvider.endSymbol('$}');\n\n $routeProvider\n .when(\"/\", {\n title : 'Footsi',\n templateUrl : \"../static/polls/Newclubs.htm\",\n controller : \"clubListController\"\n })\n .when(\"/clubinfo\", {\n templateUrl : \"../static/polls/Newclubinfo.htm\",\n controller : \"clubInfoController\"\n })\n .when(\"/newmatch\", {\n templateUrl : \"../static/polls/Newmatch.htm\",\n controller : \"matchController\"\n });\n});\n\napp.factory('sharedObject', function () {\n\n var state = {};\n\n return {\n getObject: function () {\n return state;\n },\n setObject: function (newObject) {\n state = newObject;\n }\n };\n\n});\n\napp.controller(\"clubListController\", function ($http, $location, $scope, sharedObject) {\n\n\n $scope.newMatch = function () {\n\n $http({\n url: \"getteams/\",\n method: \"GET\"\n }).then(function (response) {\n\n if(response.status !== 200) {\n\n alert('Something baaad happend');\n return;\n }\n\n sharedObject.setObject(response.data);\n $location.path(\"/newmatch\")\n\n console.log(response.data);\n })\n }\n $scope.goToClub = function(clubKey) {\n\n console.log(\"club key : \" + clubKey);\n $http({\n url: \"getclubinfo/\",\n method: \"GET\",\n params: {clubKey : clubKey}\n }).then(function (response) {\n if(response.status !== 200) {\n\n alert('Something baaad happend');\n return;\n }\n\n console.log(response.data);\n sharedObject.setObject(response.data);\n $location.path(\"/clubinfo\");\n });\n\n }\n\n $http({\n url: \"getteams/\",\n method: \"GET\"\n }).then(function (response) {\n\n if(response.status !== 200) {\n\n alert('Something baaad happend');\n return;\n }\n\n $scope.clubs = response.data.clubslist;\n\n console.log($scope.clubs);\n })\n\n});" }, { "alpha_fraction": 0.6615384817123413, "alphanum_fraction": 0.6615384817123413, "avg_line_length": 22.894737243652344, "blob_id": "fb00fe27d211310c729937d51338e118ad3fa27f", "content_id": "ac1102649dd03aa73da0dc0a8de883c7e8d0ca77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 455, "license_type": "no_license", "max_line_length": 88, "num_lines": 19, "path": "/polls/static/polls/clubInfoController.js", "repo_name": "alikgs19/aazDB", "src_encoding": "UTF-8", "text": "var app = angular.module('footsi');\n\napp.controller(\"clubInfoController\", function ($http, $location, $scope, sharedObject) {\n\n\n\n\n $scope.clubMatchs = sharedObject.getObject().clubMatchs;\n $scope.stadiumInfo = sharedObject.getObject().stadiumInfo;\n $scope.clubInfo = sharedObject.getObject().clubInfo;\n\n console.log(\"response : \" );\n console.log($scope.response);\n\n $scope.goBack = function () {\n $location.path(\"/\");\n\n }\n});\n\n" }, { "alpha_fraction": 0.66557377576828, "alphanum_fraction": 0.66557377576828, "avg_line_length": 29.600000381469727, "blob_id": "ced74923b80c2e09384026f0e81c3e15e262f2a8", "content_id": "edbca6dd6c120bcb17dde65b8517efadd746adbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 66, "num_lines": 10, "path": "/polls/urls.py", "repo_name": "alikgs19/aazDB", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.donothing, name='index'),\n url(r'^getteams/$', views.getteams, name='getteams'),\n url(r'^getclubinfo/$', views.getclubinfo, name='getclubinfo'),\n url(r'^playmatch/$', views.playmatch, name='playmatch'),\n]" }, { "alpha_fraction": 0.6072289347648621, "alphanum_fraction": 0.6185542345046997, "avg_line_length": 31.170541763305664, "blob_id": "f281e7dcb1fe395455f456fc2bd4bec2cde24b07", "content_id": "cfc99ae105ddd813e784e2609ccbe96bc15c826b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4150, "license_type": "no_license", "max_line_length": 112, "num_lines": 129, "path": "/polls/views.py", "repo_name": "alikgs19/aazDB", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nimport unicodedata\n# Create your views here.\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nimport pymssql\nimport json\n\nconfig = { 'SERVERIP' : '192.168.1.8',\n 'PASSWORD' : 'kgsali1234',\n 'USER' : 'alikgs',\n 'DATABASE' : 'FootsiDB',\n 'SERVERPORT' : 1433}\n\ndef donothing(request):\n return render(request, 'polls/index.html')\n\n\n\n\ndef playmatch(request):\n\n conn = pymssql.connect(server= config['SERVERIP'], port= config['SERVERPORT'], user=config['USER'],\n password=config['PASSWORD'], database=config['DATABASE'])\n cursor = conn.cursor()\n\n homeKey = request.GET.get('homeKey', '')\n guestKey = request.GET.get('guestKey', '')\n\n strHomeKey = unicodedata.normalize('NFKD', homeKey).encode('ascii','ignore')\n strGuestKey = unicodedata.normalize('NFKD', guestKey).encode('ascii', 'ignore')\n # cursor.callproc('doMatch', (strHomeKey, strGuestKey))\n cursor.callproc('doMatch', ('6'.unicode('utf8'), '5'))\n result = cursor.fetchall()\n\n response = {}\n response['response'] = result[0]\n\n return JsonResponse(response)\n\ndef getteams(self):\n conn = pymssql.connect(server=config['SERVERIP'], port=config['SERVERPORT'], user=config['USER'],\n password=config['PASSWORD'], database=config['DATABASE'])\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM club\")\n response = {}\n result = []\n\n for row in cursor.fetchall():\n tempres = {}\n tempres['budget'] = row[0]\n tempres['name'] = row[1]\n tempres['key'] = row[2]\n tempres['stadium'] = row[3]\n\n result.append(tempres)\n\n response['clubslist'] = result\n\n cursor.execute(\"SELECT league.\")\n return JsonResponse(response)\n\n\n\ndef getclubinfo(request):\n conn = pymssql.connect(server=config['SERVERIP'], port=config['SERVERPORT'], user=config['USER'],\n password=config['PASSWORD'], database=config['DATABASE'])\n response = {}\n\n\n cursor = conn.cursor()\n cursor.execute(\"SELECT stadium.*\"\n \" FROM stadium INNER JOIN club ON stadium.[key] = club.stadium \"\n \"WHERE club.[key] = %s\", (request.GET.get('clubKey', '')))\n\n stadium = cursor.fetchall()[0]\n\n stadiumInfo = {}\n stadiumInfo['key'] = stadium[0]\n stadiumInfo['capacity'] = stadium[1]\n stadiumInfo['feild'] = stadium[2]\n stadiumInfo['name'] = stadium[3]\n stadiumInfo['price'] = stadium[4]\n stadiumInfo['wc'] = stadium[5]\n\n response['stadiumInfo'] = stadiumInfo\n\n cursor.execute(\"SELECT * FROM club WHERE club.[key] = %s\", (request.GET.get('clubKey', '')))\n clubInfo = cursor.fetchall()\n clubInfoObject = {}\n clubInfoObject['budget'] = clubInfo[0][0]\n clubInfoObject['name'] = clubInfo[0][1]\n clubInfoObject['key'] = clubInfo[0][2]\n clubInfoObject['stadium'] = clubInfo[0][3]\n\n response['clubInfo'] = clubInfoObject\n\n cursor.execute(\"SELECT * FROM match INNER JOIN club ON club.[key] = match.host OR club.[key] = match.guest \"\n \"WHERE club.[key] = %s\", (request.GET.get('clubKey', '')))\n clubMatchs = cursor.fetchall()\n\n response['clubMatchs'] = []\n for match in clubMatchs:\n matchInfoObject = {}\n matchInfoObject['key'] = match[0]\n\n cursor.execute(\"SELECT club.name FROM club WHERE club.[key] = %s\", (match[1]))\n hostClubName = cursor.fetchall()[0]\n matchInfoObject['host'] = hostClubName[0]\n\n cursor.execute(\"SELECT club.name FROM club WHERE club.[key] = %s\", (match[2]))\n guestClubName = cursor.fetchall()[0]\n matchInfoObject['guest'] = guestClubName[0]\n\n matchInfoObject['time'] = match[3]\n\n matchInfoObject['score'] = match[4]\n\n cursor.execute(\"SELECT league.name FROM league WHERE league.[key] = %s\", (match[5]))\n leagueName = cursor.fetchall()[0]\n matchInfoObject['leagueName'] = leagueName\n response['clubMatchs'].append(matchInfoObject)\n\n\n\n return JsonResponse(response)\n" }, { "alpha_fraction": 0.5652642846107483, "alphanum_fraction": 0.5668824315071106, "avg_line_length": 27.07575798034668, "blob_id": "ba1e562413dd9b178e8f35431101f1e124411063", "content_id": "748ce4164f2aa75f1f718489823a981440ac22c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1854, "license_type": "no_license", "max_line_length": 106, "num_lines": 66, "path": "/polls/static/polls/matchController.js", "repo_name": "alikgs19/aazDB", "src_encoding": "UTF-8", "text": "var app = angular.module('footsi');\n\napp.controller(\"matchController\", function ($http, $location, $scope, sharedObject) {\n\n\n\n $scope.hostTeams = sharedObject.getObject().clubslist;\n $scope.guestTeams = sharedObject.getObject().clubslist;\n\n $scope.currentHostSelected = '';\n $scope.currentGuestSelected = '';\n\n\n $scope.selectThisHost = function (selectedId) {\n $scope.currentHostSelected = selectedId;\n $scope.activePlayButton = $scope.currentGuestSelected != '' && $scope.currentHostSelected != '' ;\n\n }\n\n $scope.selectThisGuest = function (selectedId) {\n $scope.currentGuestSelected = selectedId;\n $scope.activePlayButton = $scope.currentGuestSelected != '' && $scope.currentHostSelected != '' ;\n\n }\n\n $scope.goBack = function () {\n $location.path(\"/\");\n\n }\n\n\n $scope.playMatch = function () {\n\n if ($scope.currentGuestSelected == $scope.currentHostSelected){\n console.log(\"the same team!!!\");\n return;\n }\n\n if ($scope.currentGuestSelected == '' || $scope.currentHostSelected == ''){\n console.log(\"select two team!!!\");\n return;\n }\n\n console.log(\"two team will be :\");\n console.log(\"host \" + $scope.currentHostSelected);\n console.log(\"guest \" + $scope.currentGuestSelected);\n\n $http({\n url: \"playmatch/\",\n method: \"GET\",\n params: {hostKey : $scope.currentHostSelected,\n guestKey : $scope.currentGuestSelected}\n }).then(function (response) {\n\n if (response.status !== 200) {\n\n alert('Something baaad happend');\n return;\n }\n\n console.log(\"the result of game is :\");\n console.log(response.data);\n $location.path(\"/\");\n });\n }\n});\n\n" } ]
5
diego1castroo/paralelismo-python
https://github.com/diego1castroo/paralelismo-python
87b79e912c0de62528f3fc447c2df34cdef0b332
5963f341c63ac58795970b8bc65b58a7c181be5a
59d92651f676a984897d05362f12f95113018b1c
refs/heads/main
2023-02-17T13:33:12.296689
2021-01-10T23:37:50
2021-01-10T23:37:50
328,501,781
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.575129508972168, "alphanum_fraction": 0.590673565864563, "avg_line_length": 19.44444465637207, "blob_id": "b5d28a6718bf3fc1e65dfb4bad04ac0ab90074bf", "content_id": "2aa73b8c88e266ed437e97d334c5e051c5834f7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/funcionesquenoocuerrenenelinterpretedepython.py", "repo_name": "diego1castroo/paralelismo-python", "src_encoding": "UTF-8", "text": "from time import sleep, time\r\nfrom threading import Thread\r\nstart = time()\r\nfor _ in range(10):\r\n sleep(1)\r\nprint('Tomó {} segundos.'.format(time() - start))\r\n\r\n\r\nthreads = []\r\nstart = time()\r\nfor _ in range(10):\r\n t = Thread(target=sleep, args=(1,))\r\n t.start()\r\n threads.append(t)\r\n \r\nfor t in threads:\r\n t.join()\r\nprint('Tomó {} segundos.'.format(time() - start))\r\n" }, { "alpha_fraction": 0.5225653052330017, "alphanum_fraction": 0.5653206706047058, "avg_line_length": 26.066667556762695, "blob_id": "69b12150808231dc4d53fb2a93ca880f377a7aaf", "content_id": "b6dd1d63310520c891a3e7a0b590472d532b6a14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 54, "num_lines": 15, "path": "/ccalcpi.py", "repo_name": "diego1castroo/paralelismo-python", "src_encoding": "UTF-8", "text": "from cython.parallel import parallel, prange\r\nimport openmp\r\nfrom libc.stdlib import malloc, free\r\nimport cython\r\ndef calcpi(int n):\r\n cdef double result = 0.0\r\n cdef int num_threads\r\n cdef int i, si\r\n\r\n with nogil,parallel(num_threads = 6):\r\n for i in prange (2, n * 2, 2):\r\n si = 1 if ((i/2) %2==1) else -1\r\n result += 4.0 * si /(i*(i+1.0)* (i + 2.0))\r\n\r\n return result + 3\r\n" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.678260862827301, "avg_line_length": 26.75, "blob_id": "4b2edd2d14359d70a7bf90458f95bb079580f28f", "content_id": "fc1eb9d0b24060c27777e3115f4227e328985fc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/testpi2.py", "repo_name": "diego1castroo/paralelismo-python", "src_encoding": "UTF-8", "text": "from ccalcpi import calcpi\r\nnPoints = 50000000\r\npi = calcpi(nPoints)\r\nprint('OpenMP pi = ', pi, ' for ', nPoints)\r\n" }, { "alpha_fraction": 0.5803757905960083, "alphanum_fraction": 0.5845511555671692, "avg_line_length": 24.61111068725586, "blob_id": "52bb214d111802cd18b418deed84b9e80957fae9", "content_id": "efa48830640074733deaabcddb88c13e7df4f87c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "no_license", "max_line_length": 74, "num_lines": 18, "path": "/setup2.py", "repo_name": "diego1castroo/paralelismo-python", "src_encoding": "UTF-8", "text": "from distutils.core import setup\r\nfrom Cython.Build import cythonize\r\nfrom distutils.extension import Extension\r\nfrom Cython.Distutils import build_ext\r\n\r\next_modules=[\r\n Extension(\"ccalcpi\",\r\n [\"ccalcpi.pyx\"],\r\n extra_compile_args = [\"-03\", \"-ffast-math\", \"-march-nativ\"],\r\n extra_link_args = ['-fopenmp']\r\n )\r\n]\r\n\r\nsetup(\r\n name = 'Calc Pi',\r\n cmdclass = {\"build_ext\": build_ext},\r\n ext_modules = ext_modules\r\n)\r\n" } ]
4
danielelinaro/dynasty
https://github.com/danielelinaro/dynasty
9b6933fa331403faa9c3895a3eab06a285244d25
52f3e3b26a08a4abcc14a9e1ecd511de721405b5
5ee0c4d10a58ecc14436622b6cc414f7d55fd75d
refs/heads/master
2020-12-26T20:06:14.312354
2020-12-11T11:04:52
2020-12-11T11:04:52
237,626,174
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48058560490608215, "alphanum_fraction": 0.5257797837257385, "avg_line_length": 21.68115997314453, "blob_id": "2a9b303ac624152926c5ddbc7084da701869bea5", "content_id": "c64272b16639ead1b5b181941f116df147cfeda5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1571, "license_type": "no_license", "max_line_length": 95, "num_lines": 69, "path": "/dynasty.py", "repo_name": "danielelinaro/dynasty", "src_encoding": "UTF-8", "text": "\nimport os\nimport time\nimport ctypes\nfrom ctypes import CDLL, RTLD_GLOBAL, c_double, c_size_t, byref\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n lib_path = '/home/daniele/dynasty/dynasty.so'\n try:\n lib = CDLL(lib_path, mode=RTLD_GLOBAL)\n except:\n print('Cannot load library from {}.'.format(lib_path))\n\n n_eq = 3\n n_pars = 7\n\n y0 = (c_double * n_eq)()\n y0[0] = 0.8\n y0[1] = 0.1\n y0[2] = 0.1\n\n R = 0.1\n E = 2.3\n B = 0.17\n D = 0.42\n G = 0.09\n H = 0.1\n Q = 0.4\n\n pars = (c_double * n_pars)()\n pars[0] = R\n pars[1] = E\n pars[2] = B\n pars[3] = D\n pars[4] = G\n pars[5] = H\n pars[6] = Q\n\n t_tran = c_double(1e4)\n t_stop = c_double(1e5)\n n_ev = c_size_t(1024)\n\n atol = (c_double * n_eq)()\n for i in range(n_eq):\n atol[i] = 1e-8\n rtol = c_double(1e-10)\n\n sol = (c_double * n_ev.value * n_eq)()\n \n start = time.time()\n n_ev = lib.integrate(pars, y0, t_tran, t_stop, n_ev, atol, byref(rtol), sol)\n stop = time.time()\n\n print('Elapsed time = {:.3f} s'.format(stop - start))\n\n data = np.array(sol)\n data = np.reshape(data, (data.shape[1], data.shape[0]), 'C')\n data = data[:n_ev,:]\n\n fig,ax = plt.subplots(1, 1, figsize=(4, 4))\n ax.plot(data[:,2], data[:,1], 'ko', markersize=3, markerfacecolor='w', markeredgewidth=0.5)\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.set_xlabel(r'$\\mathrm{x}_3$')\n ax.set_ylabel(r'$\\mathrm{x}_2$')\n ax.grid('on')\n fig.tight_layout()\n plt.show()\n \n" }, { "alpha_fraction": 0.4474589228630066, "alphanum_fraction": 0.49140235781669617, "avg_line_length": 23.22222137451172, "blob_id": "fe6f17afb473d3b03669202d9229b66ea5262856", "content_id": "4712da00d6648b33ba8970e4f1f265f77e3ce519", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2617, "license_type": "no_license", "max_line_length": 80, "num_lines": 108, "path": "/auto/dynasty.c", "repo_name": "danielelinaro/dynasty", "src_encoding": "UTF-8", "text": "#include \"auto_f2c.h\"\n\n#define B 0.17\n#define D 0.42\n#define G 0.09\n#define H 0.1\n#define Q 0.4\n\nint func (integer ndim, const doublereal *u, const integer *icp,\n const doublereal *par, integer ijac,\n doublereal *f, doublereal *dfdu, doublereal *dfdp)\n{\n /* System generated locals */\n integer dfdu_dim1 = ndim;\n integer dfdp_dim1 = ndim;\n\n double x, y, z;\n double r, e, b, d, g, h, q;\n double denx, deny, denxsquare, denysquare;\n \n x = u[0];\n y = u[1];\n z = u[2];\n\n r = par[0];\n e = par[1];\n b = par[2];\n d = par[3];\n g = par[4];\n h = par[5];\n q = par[6];\n \n f[0] = x*(1-x-y/(b+x)-h*z);\n f[1] = q*y*(e*x/(b+x)-1-z/(d+y));\n f[2] = r*(x*y/(b+x)-g*z);\n\n if (ijac == 0) {\n return 0;\n }\n\n denx = 1/(b+x);\n deny = 1/(d+y);\n denxsquare = denx*denx;\n denysquare = deny*deny;\n \n ARRAY2D(dfdu, 0, 0) = 1 - 2*x - b*y*denxsquare - h*z;\n ARRAY2D(dfdu, 0, 1) = - x*denx;\n ARRAY2D(dfdu, 0, 2) = - h*x;\n ARRAY2D(dfdu, 1, 0) = q*e*b*y*denxsquare;\n ARRAY2D(dfdu, 1, 1) = q*e*x*denx - q*d*z*denysquare - q;\n ARRAY2D(dfdu, 1, 2) = -q*y*deny;\n ARRAY2D(dfdu, 2, 0) = r*b*y*denxsquare;\n ARRAY2D(dfdu, 2, 1) = r*x*denx;\n ARRAY2D(dfdu, 2, 2) = -r*g;\n\n if (ijac == 1) {\n return 0;\n }\n\n return 0;\n}\n\nint stpnt (integer ndim, doublereal t,\n doublereal *u, doublereal *par)\n{\n par[0] = 0.1;\n par[1] = 1.5;\n par[2] = B;\n par[3] = D;\n par[4] = G;\n par[5] = H;\n par[6] = Q;\n\n u[0] = 0.97691713;\n u[1] = 0.01269461;\n u[2] = 0.12014408;\n\n return 0;\n}\n\nint pvls (integer ndim, const doublereal *u,\n doublereal *par)\n{\n\n return 0;\n}\n\nint bcnd (integer ndim, const doublereal *par, const integer *icp,\n integer nbc, const doublereal *u0, const doublereal *u1, integer ijac,\n doublereal *fb, doublereal *dbc)\n{\n return 0;\n}\n\nint icnd (integer ndim, const doublereal *par, const integer *icp,\n integer nint, const doublereal *u, const doublereal *uold,\n const doublereal *udot, const doublereal *upold, integer ijac,\n doublereal *fi, doublereal *dint)\n{\n return 0;\n}\n\nint fopt (integer ndim, const doublereal *u, const integer *icp,\n const doublereal *par, integer ijac,\n doublereal *fs, doublereal *dfdu, doublereal *dfdp)\n{\n return 0;\n}\n\n" }, { "alpha_fraction": 0.5182487964630127, "alphanum_fraction": 0.5464358925819397, "avg_line_length": 26.722089767456055, "blob_id": "4de0bfcd399041089ead814c165f4890ec2274d4", "content_id": "9a9804c46357f7c177626a082b461699e44d97d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11672, "license_type": "no_license", "max_line_length": 101, "num_lines": 421, "path": "/dynasty.c", "repo_name": "danielelinaro/dynasty", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n#include <math.h>\n\n#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */\n#include <nvector/nvector_serial.h> /* access to serial N_Vector */\n#include <sunmatrix/sunmatrix_dense.h> /* access to dense SUNMatrix */\n#include <sunlinsol/sunlinsol_dense.h> /* access to dense SUNLinearSolver */\n#include <sundials/sundials_types.h> /* defs. of realtype, sunindextype */\n\n#define Ith(v,i) NV_Ith_S(v,i) /* Ith numbers components 1..NEQ */\n#define IJth(A,i,j) SM_ELEMENT_D(A,i,j) /* IJth numbers rows,cols 1..NEQ */\n\n/* Problem Constants */\n\n#define NEQ 3 /* number of equations */\n#define NPARS 7 /* number of parameters */\n#define Y0 RCONST(0.8) /* initial y components */\n#define Y1 RCONST(0.1)\n#define Y2 RCONST(0.1)\n#define RTOL RCONST(1.0e-12) /* scalar relative tolerance */\n#define ATOL1 RCONST(1.0e-10) /* vector absolute tolerance components */\n#define ATOL2 RCONST(1.0e-10)\n#define ATOL3 RCONST(1.0e-10)\n#define TTRAN RCONST(1.0e4)\n#define TEND RCONST(1.0e5)\n\n#define EPS RCONST(1.0E-3)\n#define MAX_STEPS 50000\n\n#ifndef LIB\n#define R RCONST(0.07)\n#define E RCONST(2.0)\n#define B RCONST(0.17)\n#define D RCONST(0.42)\n#define G RCONST(0.09)\n#define H RCONST(0.1)\n#define Q RCONST(0.4)\n#endif\n\n/* Functions Called by the Solver */\n\nstatic int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);\n\nstatic int g(realtype t, N_Vector y, realtype *gout, void *user_data);\n\nstatic int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, \n void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);\n\n/* Private function to check function return values */\n\nstatic int check_retval(void *returnvalue, const char *funcname, int opt);\n\n#ifdef TRIM_ZERO\nstatic int trim_zero(realtype *y1, realtype *y2, realtype *y3);\n#endif\n\n\n/*\n *-------------------------------\n * integrate function\n *-------------------------------\n */\n\nint integrate(realtype *parameters,\n realtype *y0,\n realtype ttran,\n realtype tend,\n size_t max_nev,\n realtype *atol,\n realtype *rtol,\n realtype *sol)\n{\n /* Create serial vector of length NEQ for I.C. and abstol */\n N_Vector y = N_VNew_Serial(NEQ);\n if (check_retval((void *) y, \"N_VNew_Serial\", 0)) {\n return(1);\n }\n\n N_Vector pars = N_VNew_Serial(NPARS);\n if (check_retval((void *) pars, \"N_VNew_Serial\", 0)) {\n N_VDestroy(y);\n return(1);\n }\n\n N_Vector abstol = N_VNew_Serial(NEQ); \n if (check_retval((void *) abstol, \"N_VNew_Serial\", 0)) {\n N_VDestroy(pars);\n N_VDestroy(y);\n return(1);\n }\n\n size_t i, j;\n\n /* Initialize y */\n for (i = 0; i < NEQ; i++)\n Ith(y, i) = y0[i];\n\n /* Initialize pars */\n for (i = 0; i < NPARS; i++)\n Ith(pars, i) = parameters[i];\n\n /* Set the scalar relative tolerance */\n realtype reltol;\n if (rtol != NULL)\n reltol = *rtol;\n else\n /* default value */\n reltol = RTOL;\n\n /* Set the vector absolute tolerance */\n if (atol != NULL) {\n for (i = 0; i < NEQ; i++)\n Ith(abstol, i) = atol[i];\n }\n else {\n /* default values */\n Ith(abstol,0) = ATOL1;\n Ith(abstol,1) = ATOL2;\n Ith(abstol,2) = ATOL3;\n }\n\n /* Call CVodeCreate to create the solver memory and specify the \n * Backward Differentiation Formula */\n int retval;\n void *cvode_mem = CVodeCreate(CV_BDF);\n if ( check_retval((void *) cvode_mem, \"CVodeCreate\", 0) ) {\n retval = 1;\n goto free_vec;\n }\n\n /* Call CVodeInit to initialize the integrator memory and specify the\n * user's right hand side function in y'=f(t,y), the inital time, and\n * the initial dependent variable vector y. */\n retval = CVodeInit(cvode_mem, f, 0.0, y);\n if ( check_retval(&retval, \"CVodeInit\", 1) )\n goto free_mem;\n\n /* Call CVodeSVtolerances to specify the scalar relative tolerance\n * and vector absolute tolerances */\n retval = CVodeSVtolerances(cvode_mem, reltol, abstol);\n if ( check_retval(&retval, \"CVodeSVtolerances\", 1) )\n goto free_mem;\n\n /* Create dense SUNMatrix for use in linear solvers */\n SUNMatrix A = SUNDenseMatrix(NEQ, NEQ);\n if ( check_retval((void *) A, \"SUNDenseMatrix\", 0) ) {\n retval = 1;\n goto free_matrix;\n }\n\n /* Create dense SUNLinearSolver object for use by CVode */\n SUNLinearSolver LS = SUNLinSol_Dense(y, A);\n if ( check_retval((void *) LS, \"SUNLinSol_Dense\", 0) ) {\n retval = 1;\n goto free_solver;\n }\n\n /* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */\n retval = CVodeSetLinearSolver(cvode_mem, LS, A);\n if( check_retval( &retval, \"CVodeSetLinearSolver\", 1) )\n goto free_solver;\n\n /* Set the user-supplied Jacobian routine Jac */\n retval = CVodeSetJacFn(cvode_mem, Jac);\n if( check_retval(&retval, \"CVodeSetJacFn\", 1) )\n goto free_solver;\n\n /* Set the parameters of the system */\n retval = CVodeSetUserData(cvode_mem, (void *) pars);\n if( check_retval(&retval, \"CVodeSetUserData\", 1) )\n goto free_solver;\n\n retval = CVodeSetMaxNumSteps(cvode_mem, MAX_STEPS);\n if( check_retval(&retval, \"CVodeSetMaxNumSteps\", 1) )\n goto free_solver;\n\n realtype t = 0.0;\n while(t < ttran) {\n retval = CVode(cvode_mem, ttran, y, &t, CV_NORMAL);\n if (check_retval(&retval, \"CVode\", 1))\n goto free_solver;\n }\n\n /* Call CVodeRootInit to specify the root function g with 1 component */\n retval = CVodeRootInit(cvode_mem, 1, g);\n if ( check_retval(&retval, \"CVodeRootInit\", 1) )\n goto free_solver;\n\n /* Call CVodeSetRootDirection to specify that only negative crossings of *\n * the Poincare' section should be reported */\n int rootdir = -1;\n retval = CVodeSetRootDirection(cvode_mem, &rootdir);\n if ( check_retval(&retval, \"CVodeRootInit\", 1) )\n goto free_solver;\n\n int retvalr, rootsfound[2];\n size_t nev = 0;\n while (nev < max_nev && t < tend) {\n retval = CVode(cvode_mem, tend, y, &t, CV_NORMAL);\n if (retval == CV_ROOT_RETURN) {\n retvalr = CVodeGetRootInfo(cvode_mem, rootsfound);\n if (check_retval(&retvalr, \"CVodeGetRootInfo\", 1))\n break;\n\t sol[nev * (NEQ + 1)] = t;\n for (j = 0; j < NEQ; j++)\n\t sol[nev * (NEQ + 1) + j + 1] = Ith(y, j);\n nev++;\n#ifdef DEBUG\n printf(\"[%03zu/%03zu] %12.4f %13.10f %13.10f %13.10f\\n\", \\\n nev, max_nev, t, Ith(y,0), Ith(y,1), Ith(y,2));\n#endif\n }\n if (check_retval(&retval, \"CVode\", 1))\n break;\n }\n retval = nev;\n\nfree_solver:\n /* Free the linear solver memory */\n SUNLinSolFree(LS);\n\nfree_matrix:\n /* Free the matrix memory */\n SUNMatDestroy(A);\n\nfree_mem:\n /* Free integrator memory */\n CVodeFree(&cvode_mem);\n\nfree_vec:\n /* Free y, pars and abstol vectors */\n N_VDestroy(abstol);\n N_VDestroy(pars);\n N_VDestroy(y);\n\n return(retval);\n}\n\n\n/*\n *-------------------------------\n * Functions called by the solver\n *-------------------------------\n */\n\n/*\n * f routine. Compute function f(t,y). \n */\n\nstatic int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)\n{\n realtype y1, y2, y3;\n realtype r, e, b, d, g, h, q;\n N_Vector pars = (N_Vector) user_data;\n\n r = Ith(pars,0);\n e = Ith(pars,1);\n b = Ith(pars,2);\n d = Ith(pars,3);\n g = Ith(pars,4);\n h = Ith(pars,5);\n q = Ith(pars,6);\n\n y1 = Ith(y,0);\n y2 = Ith(y,1);\n y3 = Ith(y,2);\n\n#ifdef TRIM_ZERO\n if ( trim_zero(&y1, &y2, &y3) )\n return CV_ERR_FAILURE;\n#endif\n\n Ith(ydot,0) = y1 * ( 1 - y1 - y2 / ( b + y1 ) - h * y3 );\n Ith(ydot,1) = q * y2 * ( e * y1 / ( b + y1 ) - 1 - y3 / ( d + y2 ) );\n Ith(ydot,2) = r * ( y1 * y2 / ( b + y1 ) - g * y3 );\n\n return(0);\n}\n\n/*\n * g routine. Compute functions g_i(t,y) for i = 0,1. \n */\n\nstatic int g(realtype t, N_Vector y, realtype *gout, void *user_data)\n{\n realtype y1, y2, y3;\n realtype q, e, b, d;\n N_Vector pars = (N_Vector) user_data;\n\n e = Ith(pars,1);\n b = Ith(pars,2);\n d = Ith(pars,3);\n q = Ith(pars,6);\n\n y1 = Ith(y,0);\n y2 = Ith(y,1);\n y3 = Ith(y,2);\n\n#ifdef TRIM_ZERO\n if ( trim_zero(&y1, &y2, &y3) )\n return CV_ERR_FAILURE;\n#endif\n\n gout[0] = q * y2 * ( e * y1 / ( b + y1 ) - 1 - y3 / ( d + y2 ) );\n\n return(0);\n}\n\n/*\n * Jacobian routine. Compute J(t,y) = df/dy. *\n */\n\nstatic int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, \n void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)\n{\n realtype y1, y2, y3;\n realtype r, e, b, d, g, h, q;\n realtype deny1, deny2, deny1square, deny2square;\n N_Vector pars = (N_Vector) user_data;\n\n r = exp(Ith(pars,0));\n e = Ith(pars,1);\n b = Ith(pars,2);\n d = Ith(pars,3);\n g = Ith(pars,4);\n h = Ith(pars,5);\n q = Ith(pars,6);\n\n y1 = Ith(y,0);\n y2 = Ith(y,1);\n y3 = Ith(y,2);\n\n#ifdef TRIM_ZERO\n if ( trim_zero(&y1, &y2, &y3) )\n return CV_ERR_FAILURE;\n#endif\n\n deny1 = 1 / ( b + y1 );\n deny2 = 1 / ( d + y2 );\n deny1square = deny1 * deny1;\n deny2square = deny2 * deny2;\n\n IJth (J, 0, 0) = 1 - 2 * y1 - b * y2 * deny1square - h * y3;\n IJth (J, 0, 1) = - y1 * deny1;\n IJth (J, 0, 2) = - h * y1;\n IJth (J, 1, 0) = q * e * b * y2 * deny1square;\n IJth (J, 1, 1) = q * e * y1 * deny1 - q * d * y3 * deny2square - q;\n IJth (J, 1, 2) = - q * y2 * deny2;\n IJth (J, 2, 0) = r * b * y2 * deny1square;\n IJth (J, 2, 1) = r * y1 * deny1;\n IJth (J, 2, 2) = -r * g;\n\n return(0);\n}\n\n/*\n * Check function return value...\n * opt == 0 means SUNDIALS function allocates memory so check if\n * returned NULL pointer\n * opt == 1 means SUNDIALS function returns an integer value so check if\n * retval < 0\n * opt == 2 means function allocates memory so check if returned\n * NULL pointer \n */\n\nstatic int check_retval(void *returnvalue, const char *funcname, int opt)\n{\n int *retval;\n\n /* Check if SUNDIALS function returned NULL pointer - no memory allocated */\n if (opt == 0 && returnvalue == NULL) {\n fprintf(stderr, \"\\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\\n\\n\", funcname);\n return(1);\n }\n\n /* Check if retval < 0 */\n else if (opt == 1) {\n retval = (int *) returnvalue;\n if (*retval < 0) {\n fprintf(stderr, \"\\nSUNDIALS_ERROR: %s() failed with retval = %d\\n\\n\", funcname, *retval);\n return(1);\n }\n }\n\n /* Check if function returned NULL pointer - no memory allocated */\n else if (opt == 2 && returnvalue == NULL) {\n fprintf(stderr, \"\\nMEMORY_ERROR: %s() failed - returned NULL pointer\\n\\n\", funcname);\n return(1);\n }\n\n return(0);\n}\n\nint trim_zero(realtype *y1, realtype *y2, realtype *y3) {\n /* small negative components are trimmed to zero */\n if(*y1 < 0.0 || *y2 < 0.0 || *y3 < 0.0) {\n if(*y1 > -EPS) {\n *y1 = 0.0;\n }\n else {\n fprintf(stderr, \"y1 = %g\\n\", *y1);\n return 1;\n }\n if(*y2 > -EPS) {\n *y2 = 0.0;\n }\n else {\n fprintf(stderr, \"y2 = %g\\n\", *y2);\n return 1;\n }\n if(*y3 > -EPS) {\n *y3 = 0.0;\n }\n else {\n fprintf(stderr, \"y3 = %g\\n\", *y3);\n return 1;\n }\n }\n return 0;\n}\n\n" }, { "alpha_fraction": 0.4205761253833771, "alphanum_fraction": 0.4485596716403961, "avg_line_length": 26.581396102905273, "blob_id": "a58d13ee209e4b382987d9d8df4542933305bda8", "content_id": "a7d6e6767e4540481ce3e6ad6029e75189c5bf9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1215, "license_type": "no_license", "max_line_length": 66, "num_lines": 43, "path": "/reader.py", "repo_name": "danielelinaro/dynasty", "src_encoding": "UTF-8", "text": "\nimport numpy as np\n\nfilename = 'dynasty.dat'\n\nwith open(filename,'rb') as fid:\n for binary_line in fid:\n line = binary_line.decode('ascii').rstrip()\n if 'NEQ:' in line:\n neq = int(line.split(' ')[1])\n elif 'NPARS:' in line:\n npars = int(line.split(' ')[1])\n elif 'NEV:' in line:\n nev = int(line.split(' ')[1])\n elif 'NINT:' in line:\n nint = int(line.split(' ')[1])\n elif 'Binary:' in line:\n break\n\n print('neq =', neq)\n print('npars =', npars)\n print('nev =', nev)\n print('nint =', nint)\n\n cnt = 1\n\n for i in range(nint):\n if cnt != 0:\n num = np.fromfile(fid, np.int64, count=1)\n\n if num != 0:\n import ipdb\n ipdb.set_trace()\n\n pars = np.fromfile(fid, np.float64, count=npars)\n #print(pars)\n\n for j in range(nev):\n cnt = np.fromfile(fid, np.int64, count=1)\n if cnt == 0:\n break\n x = np.fromfile(fid, np.float64, count=neq+1)\n print('[{:03d}] {:8.2f} {:7.5f} {:7.5f} {:7.5f}' \\\n .format(j+1, x[0], x[1], x[2], x[3]))\n\n \n" }, { "alpha_fraction": 0.6304348111152649, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 21.91666603088379, "blob_id": "69a7456f6a55b801a500ea79d395f150752cf49f", "content_id": "841a83c41a188bc740aa0cc796d4c3b0a48b3237", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 53, "num_lines": 12, "path": "/auto/dynasty.auto", "repo_name": "danielelinaro/dynasty", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#\nprint('\\n***Compute a stationary solution family***')\neq = run(e='dynasty',c='eq')\nsave(eq,'eq')\n\nprint(\"\\n***Compute periodic solution families***\")\nfor solution in eq('HB'):\n cycle = run(solution, c='lc')\n\nprint('***Clean the directory***')\ncl()\n\n" }, { "alpha_fraction": 0.6123595237731934, "alphanum_fraction": 0.6151685118675232, "avg_line_length": 20.57575798034668, "blob_id": "e779b63a5498999a388c923cfdc31bbe9c77f996", "content_id": "919cfd91a2d4ea3e818629ba7824356aa927985e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 712, "license_type": "no_license", "max_line_length": 75, "num_lines": 33, "path": "/Makefile", "repo_name": "danielelinaro/dynasty", "src_encoding": "UTF-8", "text": "CC = gcc\nCXX = g++\nCFLAGS = -I/Users/daniele/local/include -I/usr/include -Wall -g -O3 -DDEBUG\nLDFLAGS = -L/Users/daniele/local/lib\nLIBS = -lsundials_cvode -lsundials_nvecserial -lm -ldl -lpthread\nOBJS = dynasty.o\n\n%.o : %.c\n\t$(CC) -c -o $@ $< $(CFLAGS)\n\n.PHONY : linuxlib\n\nsharedlib : $(OBJS)\n\t$(CXX) -g -o dynasty.so -fPIC -shared \\\n\t\t-static-libgcc -static-libstdc++ \\\n\t\t$(OBJS) \\\n\t\t-Wl,-zmuldefs \\\n\t\t-Wl,--whole-archiv \\\n\t\t-Wl,--no-whole-archiv \\\n\t\t$(shell python3-config --ldflags) \\\n\t\t-Wl,--library-path=/home/daniele/local/sundials/lib \\\n\t\t$(LIBS)\n\n.PHONY : maclib\n\nmaclib : $(OBJS)\n\t$(CXX) -g -o dynasty.dylib -fPIC -shared \\\n\t\t$(OBJS) \\\n\t\t$(LDFLAGS) \\\n\t\t$(LIBS)\n\nclean :\n\trm -f dynasty *.o *.dylib *.so\n" } ]
6
frong123nk/stock_pipeline
https://github.com/frong123nk/stock_pipeline
83e2c5f0cf44653976383680bd05838e243244de
ae360d7ca8f7d35e94e4012a0fa746ada26063d0
72e2b29cd5d56b54439fb496c97e715abff6ac6a
refs/heads/main
2023-07-05T05:28:02.916467
2021-08-15T13:09:34
2021-08-15T13:09:34
396,351,360
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7880434989929199, "alphanum_fraction": 0.79347825050354, "avg_line_length": 17.399999618530273, "blob_id": "1f57f407fa5d0d162502bd35b7d94bce0f7cd893", "content_id": "f5fbef2e071e042a7fdcf445766b2e3f34443232", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 184, "license_type": "no_license", "max_line_length": 50, "num_lines": 10, "path": "/README.md", "repo_name": "frong123nk/stock_pipeline", "src_encoding": "UTF-8", "text": "# Pipeline\nthis project run on google cloud composer\nAdd this library on pypi package of cloud composer\n```\nrequests\nbeautifulsoup4\npandas\npymysql\n```\nand Add this code to DAGs folder\n" }, { "alpha_fraction": 0.5993139147758484, "alphanum_fraction": 0.607890248298645, "avg_line_length": 27.578432083129883, "blob_id": "adfdecd14bcc89f7128d036fa71e83d901248cae", "content_id": "5320d50a8ae4304eafa70e2762fe5a83919dd08b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5830, "license_type": "no_license", "max_line_length": 128, "num_lines": 204, "path": "/pipeline_stock.py", "repo_name": "frong123nk/stock_pipeline", "src_encoding": "UTF-8", "text": "from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator, BranchPythonOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.utils.dates import days_ago\nfrom bs4 import BeautifulSoup\nimport requests\nfrom datetime import datetime\nimport pandas as pd\nfrom datetime import timedelta\nfrom google.cloud import bigquery\nfrom google.cloud.exceptions import NotFound\n\n\ndef get_data_from_url(symblo):\n url_info_stock = f\"https://www.settrade.com/C04_02_stock_historical_p1.jsp?txtSymbol={symblo}&selectPage=2&max=200&offset=0\"\n res_stock = requests.get(url_info_stock)\n if res_stock.status_code == 200:\n print(\"Successful\")\n elif res_stock.status_code == 404:\n print(\"Error 404 page not found\")\n else:\n print(\"Not both 200 and 404\")\n\n soup_stock = BeautifulSoup(res_stock.content, \"html.parser\")\n table_stock = soup_stock.select_one(\"div.table-responsive\")\n data_all_stock = table_stock.find_all(\"tr\")\n return data_all_stock\n\n\ndef add_to_list(data_all_stock):\n ls = []\n for i in range(len(data_all_stock)):\n ls_data = []\n if i == 0:\n data = data_all_stock[0].find_all(\"th\")\n data = [\n \"Date\",\n \"Open\",\n \"High\",\n \"Low\",\n \"AveragePrice\",\n \"Close\",\n \"Change\",\n \"Percent_Change\",\n \"Volumex1000\",\n \"Value_MB\",\n \"SET_Index\",\n \"Percent_Change\",\n ]\n else:\n data = data_all_stock[i].find_all(\"td\")\n for j in range(len(data)):\n if i == 0:\n ls_data.append(data[j])\n elif j == 0:\n ls_data.append(\n str(datetime.strptime(data[j].text, \"%d/%m/%y\").isoformat())\n )\n else:\n ls_data.append(float(data[j].text.replace(\",\", \"\")))\n ls.append(ls_data)\n return ls\n\n\ndef list_to_json(ls):\n res_dict = {\n str(ls[i][0]): {ls[0][j]: ls[i][j] for j in range(len(ls[0]))}\n for i in range(1, len(ls))\n }\n return res_dict\n\n\ndef scraping_to_csv():\n symbol = \"A\"\n data_all_stock = get_data_from_url(symbol)\n ls = add_to_list(data_all_stock)\n json_stock = list_to_json(ls)\n stock_dict = [i for i in json_stock.values()]\n df = pd.DataFrame(stock_dict)\n df[\"Date\"] = pd.to_datetime(df[\"Date\"]).dt.date\n df.to_csv(\"/home/airflow/gcs/data/result.csv\", index=False)\n\n\ndef check_table():\n status = \"table_exists\"\n client = bigquery.Client()\n table_id = \"etlstock.pipeline_stock.testselect\"\n try:\n client.get_table(table_id)\n print(\"Table {} already exists.\".format(table_id))\n except NotFound:\n print(\"Table {} is not found.\".format(table_id))\n table = bigquery.Table(table_id)\n table = client.create_table(table)\n status = \"table_not_exists\"\n return status\n\n\ndef query_max_date(**context):\n client = bigquery.Client()\n query_job = \"\"\"\n SELECT max(Date) as maxdate\n FROM `etlstock.pipeline_stock.testselect`\n \"\"\"\n df = client.query(query_job).to_dataframe()\n maxdate = str(df[\"maxdate\"][0])\n context[\"ti\"].xcom_push(key=\"max_date\", value=maxdate)\n\n\ndef check_data_to_update(**context):\n max_date = context[\"ti\"].xcom_pull(\n task_ids=\"query_max_date\", key=\"max_date\", include_prior_dates=True\n )\n result_df = pd.read_csv(\"/home/airflow/gcs/data/result.csv\")\n check_data = result_df[\"Date\"].max() > max_date\n dump = \"no_data_to_update\"\n if check_data:\n result = result_df[result_df[\"Date\"] > max_date]\n result.to_csv(\"/home/airflow/gcs/data/new_result.csv\", index=False)\n dump = \"data_to_update\"\n return dump\n\n\ndefault_args = {\n \"owner\": \"iFrong\",\n \"depends_on_past\": False,\n \"start_date\": days_ago(1),\n \"email\": [\"airflow@example.com\"],\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=5),\n \"schedule_interval\": \"@daily\",\n}\n\ndag = DAG(\n \"pipeline_stock\",\n default_args=default_args,\n description=\"Pipeline for ETL stock\",\n)\n\nweb_to_csv = PythonOperator(\n task_id=\"web_to_csv\",\n python_callable=scraping_to_csv,\n dag=dag,\n)\n\nquery_max_date = PythonOperator(\n task_id=\"query_max_date\",\n python_callable=query_max_date,\n provide_context=True,\n dag=dag,\n)\n\n\ncheck_table = BranchPythonOperator(\n task_id=\"Check_table_exists\",\n python_callable=check_table,\n dag=dag,\n)\n\ncheck_data_to_update = BranchPythonOperator(\n task_id=\"check_data_to_update\",\n python_callable=check_data_to_update,\n dag=dag,\n provide_context=True,\n)\n\n\nload_to_bq = BashOperator(\n task_id=\"bq_load\",\n bash_command=\"bq load --source_format=CSV --autodetect \\\n pipeline_stock.testselect \\\n gs://asia-northeast1-etlstock-5cfdc9a7-bucket/data/result.csv\",\n dag=dag,\n)\n\ninsert_to_bq = BashOperator(\n task_id=\"insert_to_bq\",\n bash_command=\"bq load --source_format=CSV --autodetect \\\n pipeline_stock.testselect \\\n gs://asia-northeast1-etlstock-5cfdc9a7-bucket/data/new_result.csv\",\n dag=dag,\n)\n\ntable_exists = DummyOperator(task_id=\"table_exists\", dag=dag)\n\ntable_not_exists = DummyOperator(task_id=\"table_not_exists\", dag=dag)\n\ndata_to_update = DummyOperator(task_id=\"data_to_update\", dag=dag)\n\nno_data_to_update = DummyOperator(task_id=\"no_data_to_update\", dag=dag)\n\n\nweb_to_csv >> check_table >> [table_exists, table_not_exists]\n(\n table_exists\n >> query_max_date\n >> check_data_to_update\n >> [data_to_update, no_data_to_update]\n)\ntable_not_exists >> load_to_bq\ndata_to_update >> insert_to_bq\n" } ]
2
flyte/home-assistant
https://github.com/flyte/home-assistant
2ddbc0244de690c6ff59d9975c29f42f752c6cfb
f0479855bdfff3140ccf761fcb8c9ac247691157
e5fc0acbe44b787d241021e251f853b6df686f58
refs/heads/dev
2020-04-05T17:18:52.201672
2017-07-17T10:09:42
2017-07-17T10:09:42
50,265,453
1
1
null
2016-01-24T00:28:55
2016-01-23T22:06:58
2016-01-23T22:27:38
null
[ { "alpha_fraction": 0.5979326963424683, "alphanum_fraction": 0.5997879505157471, "avg_line_length": 29.427419662475586, "blob_id": "e2e04b011929a317450ca38d764b3e5bf3adf2c1", "content_id": "20906dd8df19f35892e6faea3faa3ae77061a7bc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3773, "license_type": "permissive", "max_line_length": 76, "num_lines": 124, "path": "/homeassistant/components/switch/xiaomi_vacuum.py", "repo_name": "flyte/home-assistant", "src_encoding": "UTF-8", "text": "\"\"\"\nSupport for Xiaomi Vacuum cleaner robot.\n\nFor more details about this platform, please refer to the documentation\nhttps://home-assistant.io/components/switch.xiaomi_vacuum/\n\"\"\"\nimport logging\n\nimport voluptuous as vol\n\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.components.switch import SwitchDevice, PLATFORM_SCHEMA\nfrom homeassistant.const import (DEVICE_DEFAULT_NAME,\n CONF_NAME, CONF_HOST, CONF_TOKEN)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_HOST): cv.string,\n vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),\n vol.Optional(CONF_NAME): cv.string,\n})\n\nREQUIREMENTS = ['python-mirobo==0.1.1']\n\n\n# pylint: disable=unused-argument\ndef setup_platform(hass, config, add_devices_callback, discovery_info=None):\n \"\"\"Set up the vacuum from config.\"\"\"\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get(CONF_TOKEN)\n\n add_devices_callback([MiroboSwitch(name, host, token)])\n\n\nclass MiroboSwitch(SwitchDevice):\n \"\"\"Representation of a Xiaomi Vacuum.\"\"\"\n\n def __init__(self, name, host, token):\n \"\"\"Initialize the vacuum switch.\"\"\"\n self._name = name or DEVICE_DEFAULT_NAME\n self._icon = 'mdi:broom'\n self.host = host\n self.token = token\n\n self._vacuum = None\n self._state = None\n self._state_attrs = {}\n self._is_on = False\n\n @property\n def name(self):\n \"\"\"Return the name of the device if any.\"\"\"\n return self._name\n\n @property\n def icon(self):\n \"\"\"Return the icon to use for device if any.\"\"\"\n return self._icon\n\n @property\n def available(self):\n \"\"\"Return true when state is known.\"\"\"\n return self._state is not None\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes of the device.\"\"\"\n return self._state_attrs\n\n @property\n def is_on(self):\n \"\"\"Return true if switch is on.\"\"\"\n return self._is_on\n\n @property\n def vacuum(self):\n \"\"\"Property accessor for vacuum object.\"\"\"\n if not self._vacuum:\n from mirobo import Vacuum\n _LOGGER.info(\"initializing with host %s token %s\",\n self.host, self.token)\n self._vacuum = Vacuum(self.host, self.token)\n\n return self._vacuum\n\n def turn_on(self, **kwargs):\n \"\"\"Turn the vacuum on.\"\"\"\n from mirobo import VacuumException\n try:\n self.vacuum.start()\n self._is_on = True\n except VacuumException as ex:\n _LOGGER.error(\"Unable to start the vacuum: %s\", ex)\n\n def turn_off(self, **kwargs):\n \"\"\"Turn the vacuum off and return to home.\"\"\"\n from mirobo import VacuumException\n try:\n self.vacuum.stop()\n self.vacuum.home()\n self._is_on = False\n except VacuumException as ex:\n _LOGGER.error(\"Unable to turn off and return home: %s\", ex)\n\n def update(self):\n \"\"\"Fetch state from the device.\"\"\"\n from mirobo import VacuumException\n try:\n state = self.vacuum.status()\n _LOGGER.debug(\"got state from the vacuum: %s\", state)\n\n self._state_attrs = {\n 'Status': state.state, 'Error': state.error,\n 'Battery': state.battery, 'Fan': state.fanspeed,\n 'Cleaning time': str(state.clean_time),\n 'Cleaned area': state.clean_area}\n\n self._state = state.state_code\n self._is_on = state.is_on\n except VacuumException as ex:\n _LOGGER.error(\"Got exception while fetching the state: %s\", ex)\n" }, { "alpha_fraction": 0.5789104700088501, "alphanum_fraction": 0.5855666399002075, "avg_line_length": 34.681251525878906, "blob_id": "967bad8d43303882ab12a21091eb8e1f27038173", "content_id": "6d74f86132aef2983abca62dea311849d4aab192", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5709, "license_type": "permissive", "max_line_length": 79, "num_lines": 160, "path": "/homeassistant/components/notify/twitter.py", "repo_name": "flyte/home-assistant", "src_encoding": "UTF-8", "text": "\"\"\"\nTwitter platform for notify component.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/notify.twitter/\n\"\"\"\nimport json\nimport logging\nimport mimetypes\nimport os\n\nimport voluptuous as vol\n\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.components.notify import (\n ATTR_DATA, PLATFORM_SCHEMA, BaseNotificationService)\nfrom homeassistant.const import CONF_ACCESS_TOKEN, CONF_USERNAME\n\nREQUIREMENTS = ['TwitterAPI==2.4.5']\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_CONSUMER_KEY = 'consumer_key'\nCONF_CONSUMER_SECRET = 'consumer_secret'\nCONF_ACCESS_TOKEN_SECRET = 'access_token_secret'\n\nATTR_MEDIA = 'media'\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_ACCESS_TOKEN): cv.string,\n vol.Required(CONF_ACCESS_TOKEN_SECRET): cv.string,\n vol.Required(CONF_CONSUMER_KEY): cv.string,\n vol.Required(CONF_CONSUMER_SECRET): cv.string,\n vol.Optional(CONF_USERNAME): cv.string,\n})\n\n\ndef get_service(hass, config, discovery_info=None):\n \"\"\"Get the Twitter notification service.\"\"\"\n return TwitterNotificationService(\n hass,\n config[CONF_CONSUMER_KEY], config[CONF_CONSUMER_SECRET],\n config[CONF_ACCESS_TOKEN], config[CONF_ACCESS_TOKEN_SECRET],\n config.get(CONF_USERNAME)\n )\n\n\nclass TwitterNotificationService(BaseNotificationService):\n \"\"\"Implementation of a notification service for the Twitter service.\"\"\"\n\n def __init__(self, hass, consumer_key, consumer_secret, access_token_key,\n access_token_secret, username):\n \"\"\"Initialize the service.\"\"\"\n from TwitterAPI import TwitterAPI\n self.user = username\n self.hass = hass\n self.api = TwitterAPI(consumer_key, consumer_secret, access_token_key,\n access_token_secret)\n\n def send_message(self, message=\"\", **kwargs):\n \"\"\"Tweet a message, optionally with media.\"\"\"\n data = kwargs.get(ATTR_DATA)\n media = data.get(ATTR_MEDIA)\n if not self.hass.config.is_allowed_path(media):\n _LOGGER.warning(\"'%s' is not in a whitelisted area.\", media)\n return\n\n media_id = self.upload_media(media)\n\n if self.user:\n resp = self.api.request('direct_messages/new',\n {'text': message, 'user': self.user,\n 'media_ids': media_id})\n else:\n resp = self.api.request('statuses/update',\n {'status': message, 'media_ids': media_id})\n\n if resp.status_code != 200:\n self.log_error_resp(resp)\n\n def upload_media(self, media_path=None):\n \"\"\"Upload media.\"\"\"\n if not media_path:\n return None\n\n (media_type, _) = mimetypes.guess_type(media_path)\n total_bytes = os.path.getsize(media_path)\n\n file = open(media_path, 'rb')\n resp = self.upload_media_init(media_type, total_bytes)\n\n if 199 > resp.status_code < 300:\n self.log_error_resp(resp)\n return None\n\n media_id = resp.json()['media_id']\n media_id = self.upload_media_chunked(file, total_bytes,\n media_id)\n\n resp = self.upload_media_finalize(media_id)\n if 199 > resp.status_code < 300:\n self.log_error_resp(resp)\n\n return media_id\n\n def upload_media_init(self, media_type, total_bytes):\n \"\"\"Upload media, INIT phase.\"\"\"\n resp = self.api.request('media/upload',\n {'command': 'INIT', 'media_type': media_type,\n 'total_bytes': total_bytes})\n return resp\n\n def upload_media_chunked(self, file, total_bytes, media_id):\n \"\"\"Upload media, chunked append.\"\"\"\n segment_id = 0\n bytes_sent = 0\n while bytes_sent < total_bytes:\n chunk = file.read(4 * 1024 * 1024)\n resp = self.upload_media_append(chunk, media_id, segment_id)\n if resp.status_code not in range(200, 299):\n self.log_error_resp_append(resp)\n return None\n segment_id = segment_id + 1\n bytes_sent = file.tell()\n self.log_bytes_sent(bytes_sent, total_bytes)\n return media_id\n\n def upload_media_append(self, chunk, media_id, segment_id):\n \"\"\"Upload media, append phase.\"\"\"\n return self.api.request('media/upload',\n {'command': 'APPEND', 'media_id': media_id,\n 'segment_index': segment_id},\n {'media': chunk})\n\n def upload_media_finalize(self, media_id):\n \"\"\"Upload media, finalize phase.\"\"\"\n return self.api.request('media/upload',\n {'command': 'FINALIZE', 'media_id': media_id})\n\n @staticmethod\n def log_bytes_sent(bytes_sent, total_bytes):\n \"\"\"Log upload progress.\"\"\"\n _LOGGER.debug(\"%s of %s bytes uploaded\", str(bytes_sent),\n str(total_bytes))\n\n @staticmethod\n def log_error_resp(resp):\n \"\"\"Log error response.\"\"\"\n obj = json.loads(resp.text)\n error_message = obj['error']\n _LOGGER.error(\"Error %s : %s\", resp.status_code, error_message)\n\n @staticmethod\n def log_error_resp_append(resp):\n \"\"\"Log error response, during upload append phase.\"\"\"\n obj = json.loads(resp.text)\n error_message = obj['errors'][0]['message']\n error_code = obj['errors'][0]['code']\n _LOGGER.error(\"Error %s : %s (Code %s)\", resp.status_code,\n error_message, error_code)\n" }, { "alpha_fraction": 0.6162093877792358, "alphanum_fraction": 0.6181015372276306, "avg_line_length": 30.396039962768555, "blob_id": "6e25ffef170fab2e7144aa5c17fb5b1e2a26ddda", "content_id": "c7d019973a34f1ff702f24927fa75a9334c6acb1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3171, "license_type": "permissive", "max_line_length": 75, "num_lines": 101, "path": "/homeassistant/components/media_extractor.py", "repo_name": "flyte/home-assistant", "src_encoding": "UTF-8", "text": "\"\"\"\nDecorator service for the media_player.play_media service.\n\nFor more details about this component, please refer to the documentation at\nhttps://home-assistant.io/components/media_extractor/\n\"\"\"\nimport logging\nimport os\n\nfrom homeassistant.components.media_player import (\n ATTR_MEDIA_CONTENT_ID, DOMAIN as MEDIA_PLAYER_DOMAIN,\n MEDIA_PLAYER_PLAY_MEDIA_SCHEMA, SERVICE_PLAY_MEDIA)\nfrom homeassistant.config import load_yaml_config_file\n\nREQUIREMENTS = ['youtube_dl==2017.7.9']\n\n_LOGGER = logging.getLogger(__name__)\n\nDOMAIN = 'media_extractor'\nDEPENDENCIES = ['media_player']\n\n\ndef setup(hass, config):\n \"\"\"Set up the media extractor service.\"\"\"\n descriptions = load_yaml_config_file(\n os.path.join(os.path.dirname(__file__),\n 'media_player', 'services.yaml'))\n\n def play_media(call):\n \"\"\"Get stream URL and send it to the media_player.play_media.\"\"\"\n media_url = call.data.get(ATTR_MEDIA_CONTENT_ID)\n\n try:\n stream_url = get_media_stream_url(media_url)\n except YDException:\n _LOGGER.error(\"Could not retrieve data for the URL: %s\",\n media_url)\n return\n else:\n data = {k: v for k, v in call.data.items()\n if k != ATTR_MEDIA_CONTENT_ID}\n data[ATTR_MEDIA_CONTENT_ID] = stream_url\n\n hass.async_add_job(\n hass.services.async_call(\n MEDIA_PLAYER_DOMAIN, SERVICE_PLAY_MEDIA, data)\n )\n\n hass.services.register(DOMAIN,\n SERVICE_PLAY_MEDIA,\n play_media,\n description=descriptions[SERVICE_PLAY_MEDIA],\n schema=MEDIA_PLAYER_PLAY_MEDIA_SCHEMA)\n\n return True\n\n\nclass YDException(Exception):\n \"\"\"General service exception.\"\"\"\n\n pass\n\n\ndef get_media_stream_url(media_url):\n \"\"\"Extract stream URL from the media URL.\"\"\"\n from youtube_dl import YoutubeDL\n from youtube_dl.utils import DownloadError, ExtractorError\n\n ydl = YoutubeDL({'quiet': True, 'logger': _LOGGER})\n\n try:\n all_media_streams = ydl.extract_info(media_url, process=False)\n except DownloadError:\n # This exception will be logged by youtube-dl itself\n raise YDException()\n\n if 'entries' in all_media_streams:\n _LOGGER.warning(\"Playlists are not supported, \"\n \"looking for the first video\")\n try:\n selected_stream = next(all_media_streams['entries'])\n except StopIteration:\n _LOGGER.error(\"Playlist is empty\")\n raise YDException()\n else:\n selected_stream = all_media_streams\n\n try:\n media_info = ydl.process_ie_result(selected_stream, download=False)\n except (ExtractorError, DownloadError):\n # This exception will be logged by youtube-dl itself\n raise YDException()\n\n format_selector = ydl.build_format_selector('best')\n\n try:\n best_quality_stream = next(format_selector(media_info))\n except (KeyError, StopIteration):\n best_quality_stream = media_info\n\n return best_quality_stream['url']\n" }, { "alpha_fraction": 0.5986292958259583, "alphanum_fraction": 0.5989273190498352, "avg_line_length": 26.508195877075195, "blob_id": "9afabb0704fc2fa3772e6e958708fc089ea35e9c", "content_id": "b123de481589696cd65452d96dde785851f72d59", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3356, "license_type": "permissive", "max_line_length": 78, "num_lines": 122, "path": "/homeassistant/components/snips.py", "repo_name": "flyte/home-assistant", "src_encoding": "UTF-8", "text": "\"\"\"\nSupport for Snips on-device ASR and NLU.\n\nFor more details about this component, please refer to the documentation at\nhttps://home-assistant.io/components/snips/\n\"\"\"\nimport asyncio\nimport copy\nimport json\nimport logging\nimport voluptuous as vol\nfrom homeassistant.helpers import template, script, config_validation as cv\nimport homeassistant.loader as loader\n\nDOMAIN = 'snips'\nDEPENDENCIES = ['mqtt']\nCONF_INTENTS = 'intents'\nCONF_ACTION = 'action'\n\nINTENT_TOPIC = 'hermes/nlu/intentParsed'\n\nLOGGER = logging.getLogger(__name__)\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: {\n CONF_INTENTS: {\n cv.string: {\n vol.Optional(CONF_ACTION): cv.SCRIPT_SCHEMA,\n }\n }\n }\n}, extra=vol.ALLOW_EXTRA)\n\nINTENT_SCHEMA = vol.Schema({\n vol.Required('input'): str,\n vol.Required('intent'): {\n vol.Required('intentName'): str\n },\n vol.Optional('slots'): [{\n vol.Required('slotName'): str,\n vol.Required('value'): {\n vol.Required('kind'): str,\n vol.Required('value'): cv.match_all\n }\n }]\n}, extra=vol.ALLOW_EXTRA)\n\n\n@asyncio.coroutine\ndef async_setup(hass, config):\n \"\"\"Activate Snips component.\"\"\"\n mqtt = loader.get_component('mqtt')\n intents = config[DOMAIN].get(CONF_INTENTS, {})\n handler = IntentHandler(hass, intents)\n\n @asyncio.coroutine\n def message_received(topic, payload, qos):\n \"\"\"Handle new messages on MQTT.\"\"\"\n LOGGER.debug(\"New intent: %s\", payload)\n yield from handler.handle_intent(payload)\n\n yield from mqtt.async_subscribe(hass, INTENT_TOPIC, message_received)\n\n return True\n\n\nclass IntentHandler(object):\n \"\"\"Help handling intents.\"\"\"\n\n def __init__(self, hass, intents):\n \"\"\"Initialize the intent handler.\"\"\"\n self.hass = hass\n intents = copy.deepcopy(intents)\n template.attach(hass, intents)\n\n for name, intent in intents.items():\n if CONF_ACTION in intent:\n intent[CONF_ACTION] = script.Script(\n hass, intent[CONF_ACTION], \"Snips intent {}\".format(name))\n\n self.intents = intents\n\n @asyncio.coroutine\n def handle_intent(self, payload):\n \"\"\"Handle an intent.\"\"\"\n try:\n response = json.loads(payload)\n except TypeError:\n LOGGER.error('Received invalid JSON: %s', payload)\n return\n\n try:\n response = INTENT_SCHEMA(response)\n except vol.Invalid as err:\n LOGGER.error('Intent has invalid schema: %s. %s', err, response)\n return\n\n intent = response['intent']['intentName'].split('__')[-1]\n config = self.intents.get(intent)\n\n if config is None:\n LOGGER.warning(\"Received unknown intent %s. %s\", intent, response)\n return\n\n action = config.get(CONF_ACTION)\n\n if action is not None:\n slots = self.parse_slots(response)\n yield from action.async_run(slots)\n\n # pylint: disable=no-self-use\n def parse_slots(self, response):\n \"\"\"Parse the intent slots.\"\"\"\n parameters = {}\n\n for slot in response.get('slots', []):\n key = slot['slotName']\n value = slot['value']['value']\n if value is not None:\n parameters[key] = value\n\n return parameters\n" }, { "alpha_fraction": 0.4671151041984558, "alphanum_fraction": 0.4693572521209717, "avg_line_length": 24.245283126831055, "blob_id": "56350ee6ce85b81c56bd02152f35345a0f188b4b", "content_id": "5687723e17aeed8b7518c1dbdac3c0cca385b424", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1338, "license_type": "permissive", "max_line_length": 68, "num_lines": 53, "path": "/tests/components/test_snips.py", "repo_name": "flyte/home-assistant", "src_encoding": "UTF-8", "text": "\"\"\"Test the Snips component.\"\"\"\nimport asyncio\n\nfrom homeassistant.bootstrap import async_setup_component\nfrom tests.common import async_fire_mqtt_message, async_mock_service\n\nEXAMPLE_MSG = \"\"\"\n{\n \"input\": \"turn the lights green\",\n \"intent\": {\n \"intentName\": \"Lights\",\n \"probability\": 1\n },\n \"slots\": [\n {\n \"slotName\": \"light_color\",\n \"value\": {\n \"kind\": \"Custom\",\n \"value\": \"blue\"\n }\n }\n ]\n}\n\"\"\"\n\n\n@asyncio.coroutine\ndef test_snips_call_action(hass, mqtt_mock):\n \"\"\"Test calling action via Snips.\"\"\"\n calls = async_mock_service(hass, 'test', 'service')\n\n result = yield from async_setup_component(hass, \"snips\", {\n \"snips\": {\n \"intents\": {\n \"Lights\": {\n \"action\": {\n \"service\": \"test.service\",\n \"data_template\": {\n \"color\": \"{{ light_color }}\"\n }\n }\n }\n }\n }\n })\n assert result\n\n async_fire_mqtt_message(hass, 'hermes/nlu/intentParsed',\n EXAMPLE_MSG)\n yield from hass.async_block_till_done()\n assert len(calls) == 1\n call = calls[0]\n assert call.data.get('color') == 'blue'\n" } ]
5
artemv/egis-build-tools
https://github.com/artemv/egis-build-tools
a14157abe340db59e8d11e93bf97a5d3266d9088
470a9b3ef8cba3e2ec4b7865ab54d6201c7fc4c2
bd40746c51764e28ae8b8474152b51b72fe2314b
refs/heads/master
2020-12-25T05:26:34.825641
2020-08-06T20:37:16
2020-08-06T20:37:16
41,791,501
0
0
null
2015-09-02T09:10:09
2020-08-06T20:37:51
2021-05-07T02:50:31
JavaScript
[ { "alpha_fraction": 0.7037037014961243, "alphanum_fraction": 0.7160493731498718, "avg_line_length": 19.25, "blob_id": "ab72af9409ce137ef26f95cb4995e02762e0bdd9", "content_id": "403441276f3635d9bac9305cea6aedb84f104208", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 81, "license_type": "no_license", "max_line_length": 43, "num_lines": 4, "path": "/update-build-tools-deps.sh", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nBASEDIR=$(dirname \"$0\")\n$BASEDIR/scripts/update-build-tools-deps.sh\n" }, { "alpha_fraction": 0.572826087474823, "alphanum_fraction": 0.58152174949646, "avg_line_length": 24.55555534362793, "blob_id": "e4a945c08112195dd48c9d5952aa9d59385584e3", "content_id": "2486c3090472290012600bf1c54a4007efda03f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 920, "license_type": "no_license", "max_line_length": 58, "num_lines": 36, "path": "/gulp/styles.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Nikolay Glushchenko <nick@nickalie.com> on 08.09.2015.\n */\n\n\nvar gulp = require('gulp');\nvar plumber = require('gulp-plumber');\nvar debug = require('gulp-debug');\nvar sourcemaps = require('gulp-sourcemaps');\nvar gzip = require('gulp-gzip');\nvar sass = require('gulp-sass');\nvar common = require('./common');\nvar rename = require('gulp-rename');\nvar utils = require('../utils');\n\nvar main = common.main;\nsass.compiler = require('sass');\ngulp.task('styles', ['sass']);\n\ngulp.task('sass', function ()\n{\n const MAIN = 'style/main.scss';\n if (!utils.exists(MAIN)) {\n return;\n }\n return gulp.src([MAIN])\n .pipe(plumber())\n .pipe(debug())\n .pipe(sourcemaps.init())\n .pipe(sass.sync({includePaths: ['node_modules']}))\n .pipe(rename(main + '.css'))\n .pipe(sourcemaps.write('.'))\n .pipe(gulp.dest('build'))\n .pipe(gzip())\n .pipe(gulp.dest('build'))\n});\n" }, { "alpha_fraction": 0.6336166262626648, "alphanum_fraction": 0.6449480652809143, "avg_line_length": 22.021739959716797, "blob_id": "06d36a0ef50249e0ecac6f3552b1be39b49053ae", "content_id": "786e914faaed407eeab21a199d989fdf5847e161", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1059, "license_type": "no_license", "max_line_length": 82, "num_lines": 46, "path": "/scripts/checkTestResults.py", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n\nimport os, sys\nimport glob\nimport xml.etree.ElementTree\n\nDIR = os.getenv('DIR', 'test-output')\nREPORTS='./' + DIR + '/junit'\nMIN_TESTS=int(sys.argv[1])\nMAX_FAIL=int(sys.argv[2])\nMAX_ERRORS=int(sys.argv[3])\nfailures = 0\nerrors = 0\ntests = 0\nprint \"MIN_TESTS=%s MAX_FAIL=%s MAX_ERRORS=%s\" % (MIN_TESTS, MAX_FAIL, MAX_ERRORS)\nfor file in glob.glob('%s/*.xml' % REPORTS):\n\ttry:\n\t\te = xml.etree.ElementTree.parse(file).getroot()\n\t\terror= int(e.get('errors'))\n\t\tif error > 0:\n\t\t\tprint file + \" errors \"\n\t\tfailures += int(e.get('failures'))\n\t\terrors += int(e.get('errors'))\n\t\ttests += int(e.get('tests'))\n\t\tfor tc in e.iter('testcase'):\n\t\t\ttests -= len(tc.findall('skipped'))\n\texcept Exception,e:\n\t\terrors += 1\n\t\tprint \"Failed to parse \" + file\n\nprint \"tests=%s failures=%s errors=%s\" % (tests, failures, errors)\n\n\nif (errors > MAX_ERRORS):\n\tprint \"too many errors\"\n\nif (tests < MIN_TESTS):\n\tprint \"too few tests run %s < %s \" % (tests, MIN_TESTS)\n\tsys.exit(1)\n\n\nif (failures > MAX_FAIL):\n\tprint \"too many failures\"\n\tsys.exit(1)\n\nsys.exit(0)\n" }, { "alpha_fraction": 0.43699392676353455, "alphanum_fraction": 0.44180160760879517, "avg_line_length": 34.28571319580078, "blob_id": "75077294c14ef401a0afe7842a85fa9d880f47ad", "content_id": "9d1418c545f1a32fb8966345da35a89f2af25762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3952, "license_type": "no_license", "max_line_length": 86, "num_lines": 112, "path": "/gulp/gulp-main-bower-files.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "'use strict';\n\n// gulp-main-bower-files package code inlined and modified\n\nvar through = require('through2');\nvar PluginError = require('plugin-error');\nvar mainBowerFiles = require('main-bower-files');\nvar fs = require('fs');\nvar path = require('path');\n\nfunction getBowerFolder() {\n return 'node_modules' + path.sep;\n}\n\nmodule.exports = function(filter, opts, callback) {\n return through.obj(function(file, enc, cb) {\n if (file.isStream()) {\n console.warn(\"file is stream\");\n this.emit(\n 'error',\n new PluginError('gulp-main-bower-files', 'Streams are not supported!')\n );\n return cb();\n }\n\n if (file.isBuffer()) {\n var bowerFolder = getBowerFolder();\n\n if (filter.filter) {\n opts = filter;\n } else if (typeof filter === 'function') {\n callback = filter;\n opts = null;\n filter = null;\n } else if (\n typeof filter !== 'string' &&\n Array.isArray(filter) === false\n ) {\n if (typeof opts === 'function') {\n callback = opts;\n }\n opts = filter;\n filter = null;\n } else if (typeof opts === 'function') {\n callback = opts;\n opts = null;\n }\n\n opts = opts || {};\n opts.filter = opts.filter || filter;\n opts.paths = opts.path || {};\n opts.paths.bowerJson = file.path;\n opts.paths.bowerDirectory = file.base = path.join(file.base, bowerFolder);\n\n var fileNames = mainBowerFiles(opts, callback).sort(function (a, b) {\n function extractPackageName(jsPath) {\n var parts = jsPath.split('node_modules' + path.sep);\n var parts2 = parts[parts.length - 1].split(path.sep);\n var moduleName = parts2.shift();\n return {moduleName: moduleName, rest: parts2};\n }\n function compareByOrder(name1, name2, order) {\n order = order || [];\n var inameA = order.indexOf(name1);\n var inameB = order.indexOf(name2);\n const INFINITY = 10000;\n if (inameA < 0) {\n inameA = INFINITY;\n }\n if (inameB < 0) {\n inameB = INFINITY;\n }\n var res;\n if (inameA < inameB) {\n res = -1;\n } else if (inameA > inameB) {\n res = 1;\n } else {\n // names are equal\n res = 0;\n }\n return res;\n }\n var da = extractPackageName(a);\n var db = extractPackageName(b);\n var moduleNameA = da.moduleName;\n var moduleNameB = db.moduleName;\n var res;\n if (moduleNameA === moduleNameB) {\n var order = (opts.overrides[moduleNameA] || {}).main\n res = compareByOrder(da.rest.join('/'), db.rest.join('/'), order);\n } else {\n res = compareByOrder(moduleNameA, moduleNameB, opts.order);\n }\n\n return res;\n });\n\n fileNames.forEach(function(fileName) {\n var newFile = file.clone();\n newFile.path = fileName;\n newFile.contents = fs.readFileSync(newFile.path);\n newFile.stat = fs.statSync(newFile.path);\n this.push(newFile);\n }, this);\n } else {\n console.warn(\"it's not a stream and not a buffer\")\n }\n\n cb();\n });\n};\n" }, { "alpha_fraction": 0.527291476726532, "alphanum_fraction": 0.527291476726532, "avg_line_length": 33.67856979370117, "blob_id": "4c25c31034d606d36f54c89dad55444b71177931", "content_id": "11b5280c57799c380521d21d33c8d0a1e632228b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 971, "license_type": "no_license", "max_line_length": 101, "num_lines": 28, "path": "/gulp/rollup/fix-sourcemaps.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "var gulp = require('gulp');\nvar replace = require('gulp-replace');\nvar common = require('../common');\n\nmodule.exports = (function () {\n function fix(mapFile, srcDir, destDir) {\n return gulp.src(mapFile)\n .pipe(replace('../../' + srcDir + '/', ''))\n .pipe(gulp.dest(destDir));\n }\n\n return {\n distBundle: function (kind) {\n return fix(common.dist[kind] + '/' + common.bundles[kind] + '.map', common.srcDirs[kind],\n common.dist[kind]);\n },\n\n distBundleRollupIndex: function (kind) {\n return gulp.src(common.dist[kind] + '/' + common.bundles[kind] + '.map')\n .pipe(replace(\".rollup-index.js\", \"../\" + common.dist[kind] + \"/.rollup-index.js\"))\n .pipe(gulp.dest(common.dist[kind]));\n },\n\n endBundle: function (kind) {\n return fix('build/' + common.bundles[kind] + '.map', common.srcDirs[kind], 'build');\n }\n }\n})();\n" }, { "alpha_fraction": 0.5888338088989258, "alphanum_fraction": 0.5930081009864807, "avg_line_length": 24.898649215698242, "blob_id": "adf6346f50b9cdc595687bab82a9802dd679279d", "content_id": "fbf98e123635658c6560b93078087d5e1603a9e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3833, "license_type": "no_license", "max_line_length": 97, "num_lines": 148, "path": "/gulp/common.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Nikolay Glushchenko <nick@nickalie.com> on 08.09.2015.\n */\n\nvar fs = require('fs');\nvar minimist = require('minimist');\nvar lazypipe = require('lazypipe');\nvar replace = require('gulp-replace');\nvar utils = require('../utils');\nvar deploy = process.env.WORK_DIR;\nvar _ = require('lodash');\nvar argv = require('optimist').argv;\n\nvar knownOptions = {\n string: 'env',\n default: {\n env: process.env.NODE_ENV || 'development',\n watch: process.env.watch || false\n\n }\n};\n\nvar options = minimist(process.argv.slice(2), knownOptions);\n\nif (deploy != null)\n{\n deploy += '/work/';\n}\n\nvar pkg = JSON.parse(fs.readFileSync('./package.json', 'utf8'));\nvar mainFile = pkg.mainFile;\nif (!mainFile) {\n var parts = pkg.name.split('/');\n mainFile = parts[parts.length - 1];\n}\n\nvar main = '../build/' + mainFile;\nvar dependenciesJson = {};\n\nif (pkg.plugin != null)\n{\n deploy = deploy || \"build/\";\n deploy += \"plugins\";\n}\nelse\n{\n if (deploy != null)\n {\n deploy += \"webapps\";\n }\n var dPath = 'dependencies.json';\n dependenciesJson = {dependencies: pkg.dependencies};\n if (utils.exists(dPath)) {\n _.assign(dependenciesJson, JSON.parse(fs.readFileSync(dPath, 'utf8')));\n }\n}\n\ndependenciesJson.excludes = dependenciesJson.excludes || [];\ndependenciesJson.standalone = dependenciesJson.standalone || [];\ndependenciesJson.directories = dependenciesJson.directories || {};\ndependenciesJson.overrides = dependenciesJson.overrides || {};\nvar gitHash = (utils.exists('.git/') ? utils.sh('git rev-parse --short HEAD') : 'current');\nvar pkgVersion = (utils.exists('build/.version') ? utils.sh('cat build/.version') : '[unknown]');\nvar timestamp = utils.dateFormat(new Date(), '%Y-%m-%d %H:%M:%S')\nvar replaceAll = lazypipe()\n .pipe(function ()\n {\n return replace('@@version', pkgVersion + \" \" + gitHash)\n })\n .pipe(function ()\n {\n return replace('@@js_suffix', '.js?rel=' + gitHash)\n })\n .pipe(function ()\n {\n return replace('@@css_suffix', '.css?rel=' + gitHash)\n })\n .pipe(function ()\n {\n return replace('@@timestamp', timestamp)\n });\n\nvar distDir = 'dist';\nvar bundles = {\n main: mainFile + '.js',\n tests: 'tests-bundle.js',\n examples: 'examples-bundle.js'\n};\n\nvar bundleKinds = ['main', 'tests'];\nif (pkg.examples) bundleKinds.push('examples');\n\npkg = _.assign({build: {}}, pkg);\npkg.build = _.assign({web: true}, pkg.build); // set some defaults\n\nvar mainSrc = options.srcDir || 'src';\n\nvar config = {\n deploy: deploy,\n pkg: pkg,\n bundleKinds: bundleKinds,\n bundles: bundles,\n srcDirs: {\n main: mainSrc,\n tests: 'test',\n examples: 'examples'\n },\n dependenciesJson: dependenciesJson,\n watch: options.watch,\n scheme: argv.scheme || 'http',\n host: argv.host || pkg.host || 'localhost',\n port: argv.port || pkg.port || '8101',\n prod: !process.env.DEV && options.env === 'production',\n main: main,\n replaceAll: replaceAll,\n build: {\n autoImportAll: {\n main: pkg.build.autoImportAll,\n tests: true,\n examples: true\n }\n },\n dist: {\n dir: distDir,\n main: distDir + '/main',\n tests: distDir + '/test',\n examples: distDir + '/examples'\n },\n module: {\n main: pkg.moduleName,\n tests: 'Tests',\n examples: 'Examples'\n },\n egisUiPkgName: '@egis/egis-ui',\n egisUiModuleName: 'EgisUI',\n dependsOnEgisUi: function() {\n return pkg.devDependencies && pkg.devDependencies[config.egisUiPkgName] ||\n pkg.dependencies && pkg.dependencies[config.egisUiPkgName];\n },\n addWebserver: function(deps) {\n if (argv.serve !== 'false') {\n deps.push('webserver')\n }\n return deps\n }\n};\n\nmodule.exports = config;\n" }, { "alpha_fraction": 0.7105262875556946, "alphanum_fraction": 0.7105262875556946, "avg_line_length": 18, "blob_id": "b9c223c037bbf216e30b6f123bcc2b1dec663439", "content_id": "bbe7dbf0177323b5273d1f64e1075bda2a45e86f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 38, "license_type": "no_license", "max_line_length": 25, "num_lines": 2, "path": "/npm-install.sh", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "#!/bin/bash\nnpm install --unsafe-perm\n" }, { "alpha_fraction": 0.5546971559524536, "alphanum_fraction": 0.5590234994888306, "avg_line_length": 29.242990493774414, "blob_id": "5a8337edcf14a484f865b4f7c8229fd2981fe0f1", "content_id": "961c9f81327af069c8b9b6a04d26ead1da1456e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3236, "license_type": "no_license", "max_line_length": 117, "num_lines": 107, "path": "/gulp/browsersync.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "\"use strict\";\n\n/**\n * Please use Node 7.0.0+ to run this script\n */\n\nvar gulp = require('gulp');\nvar browserSync = require('browser-sync').create();\nvar common = require('./common');\nvar _ = require('lodash');\nvar glob = require('glob');\nvar utils = require('../utils');\nvar fs = require('fs');\nvar argv = require('optimist').argv;\nvar is = require('is_js');\n\nfunction loadAppConfig(pkg, pkgRootDir, serveStaticMap, watchedFiles) {\n if (pkg.routes) {\n _.each(pkg.routes, routeItem => {\n let route, routeDir;\n if (is.array(routeItem)) {\n [route, routeDir] = routeItem;\n } else {\n route = routeItem;\n routeDir = 'build'\n }\n if (!route.startsWith('/')) route = '/web/' + route;\n serveStaticMap[route] = serveStaticMap[route] || [];\n\n //serve css directly from app' build folder - this makes CSS injection work and persist on browser reload\n let path = '../' + pkgRootDir + '/' + routeDir;\n serveStaticMap[route].push(path);\n console.log(pkgRootDir + ' adds route:', `${route} => ${path}`);\n });\n }\n\n if (pkg.watchedFiles) {\n _.flatten([pkg.watchedFiles]).forEach(path => {\n path = '../' + pkgRootDir + '/' + path;\n console.log(pkgRootDir + ' adds watched path:', path);\n watchedFiles.push(path);\n });\n }\n}\n\nlet loadJsonConfig = function (fileName, filePath) {\n let cfg;\n fileName = filePath + '/' + fileName;\n if (utils.exists(fileName)) {\n try {\n cfg = JSON.parse(fs.readFileSync(fileName, 'utf8'));\n } catch (e) {\n console.error('Failed parsing config file' + fileName + ':', e);\n }\n }\n return cfg;\n};\n\nfunction loadApps(rootDir, config) {\n let serveStaticMap = {};\n let watchedFiles = [];\n\n glob.sync(rootDir + '/*/').forEach(filePath => {\n filePath = filePath.substring(0, filePath.length - 1);\n let pkg = loadJsonConfig('package.json', filePath);\n if (!pkg) return;\n let pkgDir = filePath.split('/');\n pkgDir = pkgDir[pkgDir.length - 1];\n if (pkg.plugin && pkgDir !== argv.plugin) {\n console.log(`ignoring plugin ${pkgDir} because it's not the requested one`);\n return\n }\n let cfg = loadJsonConfig('portal-browser-sync.json', filePath);\n if (cfg) loadAppConfig(cfg, pkgDir, serveStaticMap, watchedFiles);\n });\n\n let serveStatic = [];\n\n _.forEach(serveStaticMap, (value, key) => {\n serveStatic.push({route: key, dir: value});\n });\n\n config.files = watchedFiles;\n config.serveStatic = serveStatic;\n}\n\ngulp.task('browsersync', () => {\n let toProxy = (argv['proxied-host'] || 'localhost');\n if (!toProxy.startsWith('https')) {\n toProxy = toProxy + ':' + (argv['proxied-port'] || 8080);\n }\n\n let config = {\n proxy: toProxy,\n startPath: \"/web/portal\",\n open: 'external',\n ghostMode: false,\n reloadDebounce: 500\n };\n\n if (argv.tunnel) config.tunnel = argv.tunnel;\n if (common.port) config.port = common.port;\n\n loadApps('..', config);\n\n browserSync.init(config);\n});\n" }, { "alpha_fraction": 0.5773555636405945, "alphanum_fraction": 0.6056610941886902, "avg_line_length": 55.065216064453125, "blob_id": "61f420eb287ae07e1a4cf8589fa518058f4f019b", "content_id": "94b85993d363eb21024fe551e3e9b2df407fabc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2579, "license_type": "no_license", "max_line_length": 103, "num_lines": 46, "path": "/gulp/rollup/rollup.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "var betterRollup = require('gulp-better-rollup');\nvar babel = require('rollup-plugin-babel');\nvar common = require('../common');\nvar rollup = require('rollup');\n\nmodule.exports = function (moduleName) {\n var globals = {};\n globals[common.egisUiModuleName] = common.egisUiModuleName;\n if (common.module.main) {\n globals[common.module.main] = common.module.main; //allows importing own module for tests\n }\n return betterRollup({\n rollup,\n plugins: [babel({\n 'plugins': [\n [require(\"babel-plugin-transform-es2015-template-literals\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-literals\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-function-name\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-arrow-functions\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-block-scoped-functions\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-classes\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-object-super\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-shorthand-properties\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-computed-properties\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-for-of\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-sticky-regex\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-unicode-regex\"), {loose: true}],\n [require(\"babel-plugin-check-es2015-constants\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-spread\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-parameters\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-destructuring\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-block-scoping\"), {loose: true}],\n [require(\"babel-plugin-transform-es2015-typeof-symbol\"), {loose: true}],\n [require(\"babel-plugin-external-helpers-2\"), {loose: true}],\n [require(\"babel-plugin-transform-regenerator\"), {async: false, asyncGenerators: false}]\n ],\n 'highlightCode': true\n })]\n }, {\n // any option supported by rollup can be set here, including sourceMap\n format: common.pkg.bundleFormat || 'iife',\n // useStrict: false,\n name: moduleName,\n globals: globals\n })\n};\n" }, { "alpha_fraction": 0.6460176706314087, "alphanum_fraction": 0.6548672318458557, "avg_line_length": 17.83333396911621, "blob_id": "674150db16dcab922d3bbc256831b05ca7e46166", "content_id": "c7007f19041ea133ad12cb84f25e73f774c0840a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 113, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/scripts/install-if-not-there.sh", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npackage=$1\nif [ ! -d node_modules/$package ]; then\n yarn-retry -- add $package --ignore-engines\nfi\n" }, { "alpha_fraction": 0.5889570713043213, "alphanum_fraction": 0.5889570713043213, "avg_line_length": 22.285715103149414, "blob_id": "40d504fbe2145f376fe2af70ae4bc2769f7ef63f", "content_id": "9d0b42fc680505e3caf13def6a8d504927eb06f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 163, "license_type": "no_license", "max_line_length": 67, "num_lines": 7, "path": "/gulp/del-dist.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "var gulp = require('gulp');\nvar del = require('del');\n\nmodule.exports = function(distDir)\n{\n return del.sync([distDir + '/**', '!' + distDir], {dot: true});\n};\n" }, { "alpha_fraction": 0.5495327115058899, "alphanum_fraction": 0.5570093393325806, "avg_line_length": 30.47058868408203, "blob_id": "de97b52b54b23eb00105fd1cc94a2cc359fcbdaa", "content_id": "af488433ed921319f84f445bf382bc2efcf8eb2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 154, "num_lines": 34, "path": "/gulp/package.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Nikolay Glushchenko <nick@nickalie.com> on 08.09.2015.\n */\n\nvar gulp = require('gulp');\nvar addsrc = require('gulp-add-src');\nvar zip = require('gulp-zip');\nvar del = require('del');\n\nvar common = require('./common');\nvar utils = require('../utils');\nvar pkg = common.pkg;\nvar deploy = common.deploy;\nconsole.log('');\n\nmodule.exports = function()\n{\n if (!common.module.main) return;\n if (utils.exists('build/.version')) {\n utils.sh('cp build/.version build/version');\n } else {\n console.log('No build/.version - are we in dev mode?');\n }\n var file = common.module.main + (pkg.plugin ? \".zip\" : \".war\");\n del.sync('build/' + file);\n console.log('Deploying to ' + (deploy ? deploy : '.') + \"/\" + file);\n let res = gulp.src([\"build/**/*\", '!**/' + file, '!build/' + common.module.main + '/', '!build/' + common.module.main + '/**/*', '!build/test/**/**'])\n .pipe(addsrc(common.dist.main + \"/*.png\"))\n .pipe(zip(file));\n if (deploy) {\n res = res.pipe(gulp.dest(deploy));\n }\n return res.pipe(gulp.dest('.'));\n};\n" }, { "alpha_fraction": 0.7529411911964417, "alphanum_fraction": 0.7529411911964417, "avg_line_length": 27.33333396911621, "blob_id": "6afc0d8c75d6589c93153168ef09001b17d6f1f2", "content_id": "65780c9dd64aea019ca0095634407dee893a7612", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 85, "license_type": "no_license", "max_line_length": 71, "num_lines": 3, "path": "/scripts/ci/install-pt-cli.sh", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npip install git+https://github.com/egis/papertrail-python-cli --upgrade\n" }, { "alpha_fraction": 0.7085116505622864, "alphanum_fraction": 0.7142177820205688, "avg_line_length": 27.41891860961914, "blob_id": "e60d519dd903f190f436cc776d25251a3b880338", "content_id": "33e16c3d43118efdd737423856be27bf27f89776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2103, "license_type": "no_license", "max_line_length": 229, "num_lines": 74, "path": "/CODESTYLE.md", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "## Git Commit Guidelines\n(stolen from https://github.com/angular/angular.js/blob/master/CONTRIBUTING.md)\n\nWe have very precise rules over how our git commit messages can be formatted. This leads to more readable messages that are easy to follow when looking through the project history.\n\nThe commit message formatting can be added using a typical git workflow or through the use of a CLI wizard (Commitizen). To use the wizard, run npm run commit in your terminal after staging your changes in git.\n\n### Commit Message Format\nEach commit message consists of a header, a body and a footer. The header has a special format that includes a type, a scope and a subject:\n\n`<type>: <subject>`\n\nAny line of the commit message cannot be longer 100 characters! This allows the message to be easier to read on GitHub as well as in various git tools.\n\n## Revert\nIf the commit reverts a previous commit, it should begin with revert:, followed by the header of the reverted commit. In the body it should say: This reverts commit <hash>., where the hash is the SHA of the commit being reverted.\n\n### Type\nMust be one of the following:\n\nfeat: A new feature \nfix: A bug fix \nrevert: Revert a previous commit \ndocs: Documentation only changes \nstyle: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) \nrefactor: A code change that neither fixes a bug nor adds a feature \nperf: A code change that improves performance \ntest: Adding missing tests \nchore: Changes to the build process or auxiliary tools and libraries such as documentation generation \n\n## Code Style\n```javascript\nclass CamelCaseClasses {\n\n\tconstructor(options) {\n\t\tsuper(options);\n\t}\n\t\n\tget value() {\n\t\treturn this._value;\n\t}\n\t\n\tset value(value) {\n\t\tthis._value = value;\n\t}\n\t\n\t/** @ignore **/\n\tprivateMethod() {\n\t\n\t}\n\t\n\tcamelCaseMethods(arg1, arg2) {\n\t\n\t\t// 2.D.1.1\n\t\tif (condition) {\n\t\t // statements\n\t\t}\n\n\t\twhile (condition) {\n\t\t // statements\n\t\t}\n\n\t\tfor (var i = 0; i < 100; i++) {\n\t\t // statements\n\t\t}\n\n\t\tif (true) {\n\t\t // statements\n\t\t} else {\n\t\t // statements\n\t\t} \n }\n}\n```\n" }, { "alpha_fraction": 0.5214646458625793, "alphanum_fraction": 0.5250421166419983, "avg_line_length": 39.26271057128906, "blob_id": "9aaf792f38f665688d564cfb3a425c16fa007a1e", "content_id": "9200519eed225e1decde75068c2cb04f6a62a852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4752, "license_type": "no_license", "max_line_length": 119, "num_lines": 118, "path": "/gulp/dev-bundle.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "var gulp = require('gulp');\nvar sourcemaps = require('gulp-sourcemaps');\nvar concat = require('gulp-concat');\nvar replace = require('gulp-replace');\nvar babel = require('gulp-babel');\nvar debug = require('gulp-debug');\nvar print = require('gulp-print').default;\nvar changed = require('gulp-changed');\nvar plumber = require('gulp-plumber');\nvar _ = require('lodash');\nvar gzip = require('gulp-gzip');\nvar pseudoconcat = require('gulp-pseudoconcat-js');\nvar common = require('./common');\nvar utils = require('../utils');\n\n_.each(common.bundleKinds, function(kind) {\n gulp.task('dev-recompile-' + kind, function() {\n var srcDir = common.srcDirs[kind];\n var destDir = common.dist[kind];\n var t0 = {};\n\n let sources = [srcDir + '/**/*.js'];\n let s1 = srcDir + '/.lib-exports.js';\n if (utils.exists(s1)) {\n sources.push(s1);\n }\n sources.push('!' + srcDir + '/**/*_scsslint_*');\n return gulp.src(sources)\n .pipe(changed(destDir))\n .pipe(print(function(filename) {\n var t = new Date().getTime();\n var fnKey = filename.replace(srcDir, '');\n t0[fnKey] = t;\n }))\n .pipe(common.replaceAll())\n .pipe(sourcemaps.init())\n .pipe(plumber())\n .pipe(debug())\n .pipe(babel({\n highlightCode: true,\n presets: ['es2015']\n }))\n .pipe(sourcemaps.write('.', {includeContent: true}))\n .pipe(gulp.dest(destDir))\n .pipe(print(function(filename) {\n var t = new Date().getTime();\n var fnKey = filename.replace(destDir, '').replace('.map', '');\n var res = t - (t0[fnKey] || 0);\n var report = ['done ', filename, ' in ', res, 'ms'];\n return report.join(\"\");\n }));\n });\n\n gulp.task('generate-systemjs-' + kind + '-index', ['gen-stage2-wildcard-exports-' + kind, 'dev-recompile-' + kind],\n function() {\n var destDir = common.dist[kind];\n let sources = utils.filterExistingFiles([destDir + '/.work/.rollup-wildcard-exports.js',\n destDir + '/.lib-exports.js']);\n if (sources.length === 0) {\n return;\n }\n return gulp.src(sources)\n .pipe(debug())\n .pipe(replace(/export \\* from '(.+)'/g, \"require('$1')\"))\n .pipe(concat('dev-index.js')) //not with dot 'cause Gulp webserver doesn't serve .dotfiles\n .pipe(gulp.dest(destDir + '/'))\n });\n\n gulp.task('dist-' + kind + '-systemjs', function() {\n var systemjsDir = 'node_modules/systemjs';\n return gulp.src([\n systemjsDir + '/dist/system-polyfills.js',\n systemjsDir + '/dist/system.js'\n ])\n .pipe(gulp.dest(common.dist[kind]))\n });\n\n var prepareDevLoaderTaskDeps = [];\n if (kind === 'main') prepareDevLoaderTaskDeps.push('dist-' + kind + '-systemjs');\n gulp.task('prepare-' + kind + '-dev-loader', prepareDevLoaderTaskDeps, function() {\n let s1 = common.srcDirs[kind] + '/.dev-loader.js';\n if (!utils.exists(s1)) {\n return;\n }\n return gulp.src(s1)\n .pipe(sourcemaps.init())\n .pipe(replace('http:', common.scheme + ':'))\n .pipe(replace('HOST', common.host))\n .pipe(replace('PORT', common.port))\n .pipe(concat('dev-loader.js')) //not with dot 'cause Gulp webserver doesn't serve .dotfiles\n .pipe(sourcemaps.write('.', {includeContent: true}))\n .pipe(gulp.dest(common.dist[kind]))\n });\n\n var devBundleTaskDeps = ['generate-systemjs-' + kind + '-index', 'dev-recompile-' + kind,\n 'prepare-' + kind + '-dev-loader'];\n if (kind === 'main') devBundleTaskDeps.push('templates');\n\n gulp.task('dev-bundle-' + kind, devBundleTaskDeps, function() {\n\n var destDir = common.dist[kind];\n let sources = utils.filterExistingFiles([common.dist[kind] + '/dev-loader.js']);\n sources.unshift(common.dist[kind] + '/templates/*.js');\n if (kind === 'main') {\n sources.unshift(common.dist['main'] + '/system.js');\n destDir = 'build';\n }\n return gulp.src(sources)\n .pipe(sourcemaps.init({loadMaps: true}))\n .pipe(pseudoconcat(common.bundles[kind], {\n host: common.scheme + '://' + common.host + ':' + common.port + '/'\n }))\n .pipe(sourcemaps.write('.', {includeContent: true}))\n .pipe(gulp.dest(destDir))\n .pipe(gzip())\n .pipe(gulp.dest(destDir));\n });\n});\n\n" }, { "alpha_fraction": 0.6229507923126221, "alphanum_fraction": 0.7049180269241333, "avg_line_length": 19.33333396911621, "blob_id": "ce6276ad255799e47e8cbe9fa1991eaa386e0e38", "content_id": "21e7c615f9f6d4e0502c0cc42081d99d593ae825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 61, "license_type": "no_license", "max_line_length": 47, "num_lines": 3, "path": "/scripts/install-dont-break.sh", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nyarn global add @artemv/dont-break@1.13.5-pre.1\n" }, { "alpha_fraction": 0.5367231369018555, "alphanum_fraction": 0.5593220591545105, "avg_line_length": 19.882352828979492, "blob_id": "b3bc00182bd4475da81d80ca511c01d54fbdc81c", "content_id": "f45b8ca8b64aee30641591f4b9a081dc6a210eff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 354, "license_type": "no_license", "max_line_length": 48, "num_lines": 17, "path": "/gulp/resources.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Nikolay Glushchenko <nick@nickalie.com> on 08.09.2015.\n */\n\nvar gulp = require('gulp');\nvar replaceAll = require('./common').replaceAll;\n\nmodule.exports = function ()\n{\n gulp.src('resources/**/*')\n .pipe(gulp.dest(\"build\"))\n .pipe(replaceAll());\n\n return gulp.src(\"*.html\")\n .pipe(replaceAll())\n .pipe(gulp.dest(\"build\"));\n};" }, { "alpha_fraction": 0.5855928659439087, "alphanum_fraction": 0.5855928659439087, "avg_line_length": 35.338233947753906, "blob_id": "5a2385b3afd54d11a6cb63bf8c00b36d529ae37a", "content_id": "bb84cdd4e913797c0112650316b82623e84b2beb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2471, "license_type": "no_license", "max_line_length": 133, "num_lines": 68, "path": "/gulp/plugin.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "var gulp = require('gulp');\nvar debug = require('gulp-debug');\nvar gulpif = require('gulp-if');\nvar replace = require('gulp-replace');\nvar zip = require('gulp-zip');\nvar concat = require('gulp-concat');\nvar _ = require('lodash');\nvar path = require('path');\nvar shelljs = require('shelljs');\nvar watch = require('gulp-watch');\n\nvar common = require('./common');\n\ngulp.task('plugin_concat', ['compile-main', 'templates'], function() {\n return gulp.src([common.dist.main + \"/**/*.js\", common.dist.main + \"/templates/*.js\"])\n .pipe(common.replaceAll())\n .pipe(concat( common.bundles.main ))\n .pipe(gulpif(common.watch, replace('/dist/', '/')))\n .pipe(gulpif(common.watch, replace('http://localhost:' + common.port + '/../', 'http://localhost:' + common.port + '/')))\n .pipe(debug())\n .pipe(gulp.dest('build/'))\n});\n\nfunction packagePlugin() {\n var file = common.module.main + (common.pkg.plugin ? \".zip\" : \".war\");\n var distDir = 'tmp';\n shelljs.rm('-rf', distDir);\n var pluginDir = path.join(distDir, \"System\", \"plugins\", common.pkg.plugin);\n shelljs.mkdir(\"-p\", pluginDir);\n shelljs.cp(\"build/*.js\", pluginDir);\n var metaInfPluginDir = path.join(distDir, \"META-INF\");\n shelljs.mkdir(\"-p\", metaInfPluginDir);\n shelljs.cp(\"package.json\", metaInfPluginDir);\n console.log('Deploying to ' + common.deploy + \"/\" + file);\n return gulp.src([\"tmp/**/*\"])\n .pipe(zip(file))\n .pipe(gulp.dest(common.deploy))\n .pipe(gulp.dest('.'))\n}\n\ngulp.task('plugin', ['plugin_concat'], function() {\n return packagePlugin();\n});\n\ngulp.task('plugin-dev-package', ['dev-bundle-main', 'styles', 'resources'], function() {\n return packagePlugin();\n});\n\ngulp.task('plugin_watch', common.addWebserver(['plugin-dev-package', 'dev-bundle-tests']), function() {\n _.each(common.bundleKinds, function(kind) {\n watch([common.srcDirs[kind] + '/**/*.js'], function() {\n gulp.start('dev-recompile-' + kind);\n });\n watch([common.srcDirs[kind] + '/.lib-exports.js'], function() {\n gulp.start('dev-recompile-' + kind);\n gulp.start('generate-systemjs-' + kind + '-index');\n });\n });\n watch('src/.dev-loader.js', function() {\n gulp.start('plugin-dev-package');\n });\n watch('src/**/*.hbs', function() {\n gulp.start('templates');\n });\n watch('style/**/*.*', function() {\n gulp.start('styles');\n });\n});\n" }, { "alpha_fraction": 0.5120435357093811, "alphanum_fraction": 0.5146335363388062, "avg_line_length": 38, "blob_id": "e00ed773c95592a6fd280f149b2a40379caa95b6", "content_id": "3fbd9560aa7e64525aa0c6b80d097830a51cfe95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3861, "license_type": "no_license", "max_line_length": 116, "num_lines": 99, "path": "/gulp/rollup/generate-es6-index-tasks.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "var gulp = require('gulp');\nvar jsonTransform = require('gulp-json-transform');\nvar directoryMap = require(\"gulp-directory-map\");\nvar concat = require('gulp-concat');\nvar _ = require('lodash');\nvar is = require('is');\nvar replace = require('gulp-replace');\nvar plumber = require('gulp-plumber');\nvar sourcemaps = require('gulp-sourcemaps');\nvar debug = require('gulp-debug');\nvar common = require('../common');\nvar utils = require('../../utils');\n\nmodule.exports = function(kind) {\n var srcDir = common.srcDirs[kind];\n var destDir = common.dist[kind];\n var up = '../../'; //let's improve when needed\n var workDir = destDir + '/.work';\n\n gulp.task('prepare-lib-exports-rollup-' + kind, function () {\n if (!utils.exists(srcDir + '/.lib-exports.js')) {\n return;\n }\n return gulp.src([srcDir + '/.lib-exports.js'])\n .pipe(sourcemaps.init())\n .pipe(replace('./', up + srcDir + '/'))\n .pipe(sourcemaps.write('.', {includeContent: false, sourceRoot: up + srcDir}))\n .pipe(gulp.dest(destDir + '/'));\n });\n\n gulp.task('copy-rollup-index-' + kind, ['prepare-lib-exports-rollup-' + kind], function () {\n let sources = utils.filterExistingFiles([destDir + '/.lib-exports.js']);\n if (common.build.autoImportAll[kind]) {\n sources.push(__dirname + '/propagate/.rollup-index-proto.js');\n }\n if (sources.length === 0) {\n return;\n }\n return gulp.src(sources, { base: 'src' })\n .pipe(sourcemaps.init({loadMaps: true}))\n .pipe(concat('.rollup-index.js'))\n .pipe(sourcemaps.write('.', {includeContent: false, sourceRoot: up + srcDir}))\n .pipe(gulp.dest(destDir + '/'));\n\n });\n\n gulp.task('gen-stage1-file-list-' + kind, function ()\n {\n return gulp.src([srcDir + '/**/*.js', '!' + srcDir + '/.lib-exports.js', '!' + srcDir + '/**/*_scsslint_*'])\n .pipe(plumber())\n .pipe(debug())\n .pipe(directoryMap({\n filename: 'modules.json'\n }))\n .pipe(gulp.dest(workDir))\n });\n\n gulp.task('gen-stage2-wildcard-exports-' + kind, ['gen-stage1-file-list-' + kind], function () {\n let sources = utils.filterExistingFiles([workDir + '/modules.json']);\n if (sources.length === 0) {\n return;\n }\n return gulp.src(sources)\n .pipe(jsonTransform(function(data) {\n var blacklist = [];\n var lines = [];\n var fillLines;\n fillLines = function(modulesPathes) {\n _.forIn(modulesPathes, function(modulePath) {\n if (is.string(modulePath)) {\n if (blacklist.indexOf(modulePath) === -1) {\n lines.push(\"export * from './\" + modulePath.replace(/\\.js$/, '') + \"';\");\n }\n } else {\n fillLines(modulePath);\n }\n });\n };\n fillLines(data);\n return lines.sort().join('\\n');\n }))\n .pipe(concat('.rollup-wildcard-exports.js'))\n .pipe(gulp.dest(workDir))\n });\n\n gulp.task('gen-stage3-finalize-exports-' + kind, ['gen-stage2-wildcard-exports-' + kind], function ()\n {\n return gulp.src([workDir + '/.rollup-wildcard-exports.js'])\n .pipe(replace('./', up + srcDir + '/'))\n .pipe(concat('.rollup-wildcard-exports.js'))\n .pipe(gulp.dest(destDir + '/'))\n });\n\n var lastDeps = ['copy-rollup-index-' + kind];\n if (common.build.autoImportAll[kind]) {\n lastDeps.push('gen-stage3-finalize-exports-' + kind);\n }\n gulp.task('generate-es6-index-' + kind, lastDeps);\n};\n" }, { "alpha_fraction": 0.5895153284072876, "alphanum_fraction": 0.6003956198692322, "avg_line_length": 30.59375, "blob_id": "dd0b631dcf133a644ecb97468f8702eb5e13d963", "content_id": "0fda8e154efc8332bc7b34793526d661d699d323", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1011, "license_type": "no_license", "max_line_length": 89, "num_lines": 32, "path": "/gulp/templates.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Nikolay Glushchenko <nick@nickalie.com> on 08.09.2015.\n */\n\nvar gulp = require('gulp');\nvar plumber = require('gulp-plumber');\nvar handlebars = require('gulp-handlebars');\nvar wrap = require('gulp-wrap');\nvar declare = require('gulp-declare');\nvar flatten = require('gulp-flatten');\nvar common = require('./common');\nvar sourcemaps = require('gulp-sourcemaps');\nvar handlebars0 = require('handlebars');\n\nmodule.exports = function()\n{\n console.log('compiling templates with handlebars', handlebars0.VERSION);\n return gulp.src(\"src/**/*.hbs\")\n .pipe(sourcemaps.init())\n .pipe(plumber())\n .pipe(handlebars({\n handlebars: handlebars0\n }))\n .pipe(wrap('Handlebars.template(<%= contents %>)'))\n .pipe(declare({\n namespace: 'TEMPLATES',\n root: 'window'\n }))\n .pipe(flatten())\n .pipe(sourcemaps.write('.', {includeContent: false, sourceRoot: '../../../src'}))\n .pipe(gulp.dest(common.dist.main + '/templates'));\n};\n" }, { "alpha_fraction": 0.7209302186965942, "alphanum_fraction": 0.7209302186965942, "avg_line_length": 33.400001525878906, "blob_id": "dcb44d11d3f8a655663fb279eb1791847bed1005", "content_id": "ae97782d20b184fad03f885326bcedc4c0d35b0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 172, "license_type": "no_license", "max_line_length": 107, "num_lines": 5, "path": "/gulp/rollup/propagate/.rollup-index-proto.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * This file is autoexported from build-tools, be sure to write changes there, otherwise they will be lost.\n */\n\nimport * as exports from './.rollup-wildcard-exports';\n" }, { "alpha_fraction": 0.6204100847244263, "alphanum_fraction": 0.6242250800132751, "avg_line_length": 33.37704849243164, "blob_id": "ff91a02317a8546d7d3bf308ed4715ceb1b745b2", "content_id": "4b4ef9bd1f1f2b5e2ebbae180783f35784691d49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2097, "license_type": "no_license", "max_line_length": 127, "num_lines": 61, "path": "/gulp/bundle.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Nikolay Glushchenko <nick@nickalie.com> on 08.09.2015.\n */\n\nvar gulp = require('gulp');\nvar sourcemaps = require('gulp-sourcemaps');\nvar concat = require('gulp-concat');\nvar uglify = require('gulp-uglify');\nvar gzip = require('gulp-gzip');\nvar common = require('./common');\nvar fixSourcemaps = require('./rollup/fix-sourcemaps');\nvar _ = require('lodash');\n\n_.each(common.bundleKinds, function(kind) {\n gulp.task('fix-' + kind + '-sourcemaps-rollup-index', ['compile-' + kind], function() {\n return fixSourcemaps.distBundleRollupIndex(kind);\n });\n gulp.task('fix-' + kind + '-sourcemaps-main', ['compile-' + kind, 'fix-' + kind + '-sourcemaps-rollup-index'], function() {\n return fixSourcemaps.distBundle(kind);\n });\n gulp.task('fix-' + kind + '-sourcemaps', ['fix-' + kind + '-sourcemaps-rollup-index', 'fix-' + kind + '-sourcemaps-main']);\n});\n\ngulp.task('fix-main-build-sourcemaps', ['do-bundle-main'], function() {\n return fixSourcemaps.endBundle('main');\n});\n\ngulp.task('do-bundle-main', ['compile-main', 'templates', 'fix-main-sourcemaps'], function() {\n var prod = common.prod;\n var res = gulp.src([common.dist.main + '/templates/*.js', common.dist.main + '/' + common.bundles.main],\n { base: common.dist.main })\n .pipe(sourcemaps.init({loadMaps: true}))\n .pipe(common.replaceAll());\n\n if (prod) {\n res = res.pipe(uglify({mangle: false}));\n }\n\n res = res.pipe(concat(common.bundles.main))\n .pipe(sourcemaps.write('.', {includeContent: false, sourceRoot: '../' + common.srcDirs.main}))\n .pipe(gulp.dest('build'));\n\n if (prod) {\n res = res.pipe(gzip())\n .pipe(gulp.dest('build'));\n }\n\n return res;\n});\n\ngulp.task('bundle-main', ['do-bundle-main', 'fix-main-build-sourcemaps']);\n\nvar bundleTaskDeps = ['bundle-main'];\nif (common.pkg.examples) {\n gulp.task('bundle-examples', ['compile-examples', 'fix-examples-sourcemaps']);\n bundleTaskDeps.push('bundle-examples')\n}\n\ngulp.task('bundle', bundleTaskDeps);\n\ngulp.task('bundle-tests', ['compile-tests', 'fix-tests-sourcemaps']);\n" }, { "alpha_fraction": 0.6213217377662659, "alphanum_fraction": 0.6251809000968933, "avg_line_length": 32.98360824584961, "blob_id": "5937329c652f77cea077ffdef54b8af4c430bd0e", "content_id": "6b07c82972c1f39a6562851d90bcb95b466b5866", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2073, "license_type": "no_license", "max_line_length": 89, "num_lines": 61, "path": "/Gulpfile.js", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "/**\n * Created by Nikolay Glushchenko <nick@nickalie.com> on 08.09.2015.\n */\n\nvar gulp = require('gulp');\nvar _ = require('lodash');\nvar path = require('path');\nvar watch = require('gulp-watch');\n\nvar resources = require('./gulp/resources');\nvar dependencies = require('./gulp/dependencies');\nvar common = require('./gulp/common');\nvar pack = require('./gulp/package');\nvar partials = require('./gulp/partials');\nvar templates = require('./gulp/templates');\nvar webserver = require('./gulp/webserver');\n\nrequire('./gulp/styles');\nrequire('./gulp/bundle');\nrequire('./gulp/dev-bundle');\nrequire('./gulp/rollup/tasks');\nrequire('./gulp/plugin');\nrequire('./gulp/karma-testingbot');\n\ngulp.task('resources', resources);\ngulp.task('dependencies', ['resources'], dependencies);\ngulp.task('package', ['all'], pack);\ngulp.task('all', ['bundle', 'styles', 'resources']);\ngulp.task('templates', ['partials'], templates);\ngulp.task('partials', partials);\ngulp.task('default', ['package', 'webserver', 'watch']);\n\ngulp.task('webserver', webserver(common.host, common.port));\n\nvar devPackageTaskDeps = ['dev-bundle-main', 'styles', 'resources'];\nif (common.pkg.examples) devPackageTaskDeps.push('dev-bundle-examples');\n\ngulp.task('test', function() {});\n\ngulp.task('dev-package', devPackageTaskDeps, pack);\n\ngulp.task('watch', common.addWebserver(['dev-package', 'dev-bundle-tests']), function() {\n _.each(common.bundleKinds, function(kind) {\n watch(path.join(common.srcDirs[kind], '**', '*.js'), function() {\n gulp.start('dev-recompile-' + kind);\n });\n watch(path.join(common.srcDirs[kind], '.lib-exports.js'), function() {\n gulp.start('dev-recompile-' + kind);\n gulp.start('generate-systemjs-' + kind + '-index');\n });\n });\n watch(path.join(common.srcDirs.main, '.dev-loader.js'), function() {\n gulp.start('dev-package');\n });\n watch(path.join(common.srcDirs.main, '**', '*.hbs'), function() {\n gulp.start('templates');\n });\n watch('style/**/*.*', function() {\n gulp.start('styles');\n });\n});\n" }, { "alpha_fraction": 0.7631579041481018, "alphanum_fraction": 0.7631579041481018, "avg_line_length": 94, "blob_id": "aee69a9a2528e2f5b8278362f9583f5afa66832b", "content_id": "282461a2abd6d213bd43f03b63396672ab6a9975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 190, "license_type": "no_license", "max_line_length": 112, "num_lines": 2, "path": "/scripts/dont-break-tests.sh", "repo_name": "artemv/egis-build-tools", "src_encoding": "UTF-8", "text": "# note that this is executed from client project folder, after `npm install`\nyarn dependencies && yarn build && yarn test:build && yarn test --browsers=Chrome --reporters=junit,html,verbose\n" } ]
24
q145492675/ADDA
https://github.com/q145492675/ADDA
6092d0b8e3ea57dff46cc33ea94dbe1f8f3da7ef
afb222bc95f6a76994d941559484926cbed159bc
ac60e5ff16f4831a2b21b00d4eea7d90f4496f29
refs/heads/master
2020-06-25T09:22:05.455195
2018-08-07T19:13:28
2018-08-07T19:13:28
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.648439884185791, "alphanum_fraction": 0.6547109484672546, "avg_line_length": 57.10222244262695, "blob_id": "5d4f032b8dcf3e1973d5bc190b5e35cf6d8f88ee", "content_id": "de2854b75e2a3e62eb8a55cb7b9d0c822745d77a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13076, "license_type": "no_license", "max_line_length": 183, "num_lines": 225, "path": "/train_adda.py", "repo_name": "q145492675/ADDA", "src_encoding": "UTF-8", "text": "import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom datasets.dataset_factory import get_dataset\nimport utils\nimport os\nimport argparse\nfrom nets.model import get_model_fn\nfrom nets.adversary import adversarial_discriminator\ndef parse_args():\n parser = argparse.ArgumentParser(description = \"Training Adversarial Discriminator Domain Adaptation\")\n #Dataset Configuration\n parser.add_argument(\"--source_dataset\", help = \"Source Dataset Name\", type = str, default = \"usps\")\n parser.add_argument(\"--target_dataset\", help = \"Target Dataset Name\", type = str, default = \"mnist\")\n\n parser.add_argument(\"--source_dataset_dir\", help = \"Source Dataset Directory\", type = str, default = \"data/usps\")\n parser.add_argument(\"--target_dataset_dir\", help = \"Target Dataset Directory\", type = str, default = \"data/mnist\")\n\n parser.add_argument(\"--source_gray2rgb\", help = \"Convert Between RGB and grayscale: 0: Keep; 1: gray2rgb; -1: rgb2gray\", type = int, default = 0)\n parser.add_argument(\"--target_gray2rgb\", help = \"Convert Between RGB and grayscale: 0: Keep; 1: gray2rgb; -1: rgb2gray\", type = int, default = 0)\n parser.add_argument(\"--num_readers\", help = \"Number of Readers\", type = int, default = 4)\n parser.add_argument(\"--image_size\", help = \"Image Size\", type = int, default = 224)\n parser.add_argument(\"--num_preprocessing_threads\", help = \"Number of Prerocessing Threads\", default = 4)\n parser.add_argument(\"--split\", help = \"Split of Dataset\", type = str, default = \"train\")\n\n #Model Path Configuration\n parser.add_argument(\"--source_model_path\", help = \"Source Model Path\", type = str, default = \"./model/pretrained\")\n parser.add_argument(\"--target_model_path\", help = \"Target Model Path\", type = str, default = \"./model/target\")\n parser.add_argument(\"--adversary_model_path\", help = \"Adeversarial Descriminator Model Path\", type = str, default = \"./model/adversary\")\n\n #Learning Configuration\n parser.add_argument(\"--model\", help = \"Model\", type = str, default = \"lenet\")\n parser.add_argument(\"--adversary_leaky\", help = \"Adversary Leaky\", type = bool, default = False)\n parser.add_argument(\"--learning_rate_discriminator\", help = \"Learning Rate\", type = float, default = 1e-5)\n parser.add_argument(\"--learning_rate_generator\", help = \"Learning Rate of Genertor\", type = float, default = 1e-6)\n parser.add_argument(\"--batch_size\", help = \"Batch Size\", type = int, default = 16)\n parser.add_argument(\"--num_iters\", help = \"Number of Iterations\", type = int, default = 30000)\n parser.add_argument(\"--solver\", help = \"Choice optimizer\", type = str, default = \"adam\")\n parser.add_argument(\"--checkpoint_steps\", help = \"Checkpoint Step\", type = int, default = 100)\n parser.add_argument(\"--lr_decay_steps\", help = \"Decay Steps of Learning Rate\", type = int, default = None)\n parser.add_argument(\"--lr_decay_rate\", help = \"Decay Rate\", type = float, default = 0.1)\n parser.add_argument(\"--feature_name\", help = \"Feature Name\", type = str, default = None)\n return parser.parse_args()\n\ndef main():\n #####################\n ## Parse Arguments ##\n #####################\n\n options = parse_args()\n print(\"Configurations\")\n for var in vars(options):\n print(\"\\t>> {}: {} \".format(var, getattr(options, var)))\n #print(\"Enter to Continue or Ctrl+C to Break\")\n\n ##################\n ## Load dataset ##\n ##################\n source_dataset = get_dataset(options.source_dataset, options.split, options.source_dataset_dir)\n target_dataset = get_dataset(options.target_dataset, options.split, options.target_dataset_dir)\n\n source_images, source_labels = utils.get_batch_from_dataset(dataset = source_dataset, batch_size = options.batch_size,\n num_readers = options.num_readers, is_training = True,\n num_preprocessing_threads = options.num_preprocessing_threads,\n gray2rgb = options.source_gray2rgb, verbose = True, image_size = options.image_size)\n\n target_images, target_labels = utils.get_batch_from_dataset(dataset = target_dataset, batch_size = options.batch_size,\n num_readers = options.num_readers, is_training = True,\n num_preprocessing_threads = options.num_preprocessing_threads,\n gray2rgb = options.target_gray2rgb, verbose = True, image_size = options.image_size)\n\n ##################\n ## Model Config ##\n ##################\n model_fn = get_model_fn(options.model)\n\n source_ft, source_layers = model_fn(source_images, scope = \"source_network\", is_training = False,\n num_classes = source_dataset.num_classes, image_size = options.image_size)\n target_ft, target_layers = model_fn(target_images, scope = \"target_network\", is_training = True,\n num_classes = target_dataset.num_classes, image_size = options.image_size)\n source_pred = source_ft\n target_pred = target_ft\n\n if options.feature_name == \"None\" or options.feature_name is None:\n feature_name = None\n else:\n feature_name = options.feature_name\n\n if (feature_name is not None) and (feature_name in source_layers) and (feature_name in target_layers):\n source_ft = source_layers[options.feature_name]\n target_ft = target_layers[options.feature_name]\n elif feature_name is not None:\n raise ValueError(\"{} was not found in scope\".format(feature_name))\n\n\n source_ft = tf.reshape(source_ft, [-1, int(source_ft.get_shape()[-1])])\n target_ft = tf.reshape(target_ft, [-1, int(target_ft.get_shape()[-1])])\n\n adversary_ft = tf.concat([source_ft, target_ft], 0)\n\n source_adversary_label = tf.zeros([tf.shape(source_ft)[0]], tf.int32)\n target_adversary_label = tf.ones([tf.shape(target_ft)[0]], tf.int32)\n adversary_label = tf.concat([source_adversary_label, target_adversary_label], 0)\n\n adversary_logits = adversarial_discriminator(adversary_ft, leaky = options.adversary_leaky, layers = [512, 500])\n mapping_loss = tf.losses.sparse_softmax_cross_entropy(1 - adversary_label, adversary_logits)\n adversary_loss = tf.losses.sparse_softmax_cross_entropy(adversary_label, adversary_logits)\n\n domain_accuracy = slim.metrics.accuracy(tf.argmax(adversary_logits, axis = 1), tf.cast(adversary_label, tf.int64))\n source_accuracy = slim.metrics.accuracy(tf.argmax(source_pred, axis = 1), tf.cast(source_labels, tf.int64))\n target_accuracy = slim.metrics.accuracy(tf.argmax(target_pred, axis = 1), tf.cast(target_labels, tf.int64))\n\n\n source_vars = utils.collect_vars(\"source_network\", prepend_scope = options.model)\n target_vars = utils.collect_vars(\"target_network\", prepend_scope = options.model)\n adversary_vars = utils.collect_vars(\"adversary\", prepend_scope = \"adversary\")\n\n source_model_path = tf.train.latest_checkpoint(options.source_model_path)\n target_model_path = tf.train.latest_checkpoint(options.target_model_path)\n adversary_model_path = tf.train.latest_checkpoint(options.adversary_model_path)\n if source_model_path is None:\n raise ValueError(\"{} not found to restore source model\".format(options.source_model_path))\n\n source_saver = tf.train.Saver(source_vars)\n target_saver = tf.train.Saver(target_vars)\n adversary_saver = tf.train.Saver(adversary_vars)\n\n learning_rate_generator_op = tf.Variable(options.learning_rate_generator, name='learning_rate_generator', trainable=False)\n learning_rate_discriminator_op = tf.Variable(options.learning_rate_discriminator, name='learning_rate_discriminator', trainable=False)\n\n if options.solver == 'sgd':\n generator_optimizer = tf.train.MomentumOptimizer(learning_rate_generator_op, 0.99, name = \"generator_discriminator\")\n discriminator_optimizer = tf.train.MomentumOptimizer(learning_rate_discriminator_op, 0.99, name = \"discriminator_optimizer\")\n elif options.solver == \"adam\":\n generator_optimizer = tf.train.AdamOptimizer(learning_rate_generator_op, 0.5, name = \"generator_discriminator\")\n discriminator_optimizer = tf.train.AdamOptimizer(learning_rate_discriminator_op, 0.5, name = \"discriminator_optimizer\")\n\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope = \"target_network\")\n if update_ops:\n with tf.control_dependencies(update_ops):\n mapping_step = generator_optimizer.minimize(mapping_loss, var_list=list(target_vars.values()))\n else:\n mapping_step = generator_optimizer.minimize(mapping_loss, var_list=list(target_vars.values()))\n adversary_step = discriminator_optimizer.minimize(adversary_loss, var_list=list(adversary_vars.values()))\n\n config = tf.ConfigProto(device_count=dict(GPU=1))\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n sess.run(tf.global_variables_initializer())\n\n source_saver.restore(sess, source_model_path)\n print(\"Restore Source Model from {}\".format(source_model_path))\n if target_model_path is not None:\n target_saver.restore(sess, target_model_path)\n print(\"Restore Target Model from {}\".format(target_model_path))\n else:\n target_saver.restore(sess, source_model_path)\n print(\"Restore Target Model from {}\".format(source_model_path))\n target_model_path = os.path.join(options.target_model_path, \"target_model_{}_to_{}_{}.ckpt\".format(options.source_dataset, options.target_dataset, options.model))\n\n if adversary_model_path is not None:\n adversary_saver.restore(sess, adversary_model_path)\n print(\"Restore Adversarial Discrimintor from {}\".format(adversary_model_path))\n else:\n print(\"Cannot Restore Adversarial Discriminator. It will be set to random.\")\n adversary_model_path = os.path.join(options.adversary_model_path, \"discriminator_model_{}_to_{}_{}.ckpt\".format(options.source_dataset, options.target_dataset, options.model))\n\n\n sumaries = [\n tf.summary.image(\"Image: Source Image\", source_images),\n tf.summary.image(\"Image: Target Image\", target_images),\n tf.summary.scalar(\"Loss: Mapping Loss\", mapping_loss),\n tf.summary.scalar(\"Loss: Adversary Loss\", adversary_loss),\n tf.summary.scalar(\"Accuracy: Domain Accuracy\", domain_accuracy),\n tf.summary.scalar(\"Accuracy: Source Accuracy\", source_accuracy),\n tf.summary.scalar(\"Accuracy: Target Accuracy\", target_accuracy)\n ]\n summary_op = tf.summary.merge(sumaries)\n summary_writer = tf.summary.FileWriter(options.adversary_model_path, graph=tf.get_default_graph())\n\n ##############\n ## Training ##\n ##############\n timer = utils.Timer()\n current_learning_rate_generator = options.learning_rate_generator\n current_learning_rate_discriminator = options.learning_rate_discriminator\n for iter in xrange(1, options.num_iters + 1):\n timer.tic()\n _mapping_loss, _adversary_loss, _, _, src_acc, tgt_acc, dom_acc, summary, lr_gen, lr_dis = sess.run([\n mapping_loss, adversary_loss,\n mapping_step, adversary_step,\n source_accuracy, target_accuracy, domain_accuracy,\n summary_op, learning_rate_generator_op, learning_rate_discriminator_op])\n\n summary_writer.add_summary(summary, iter)\n print(\"Iteration [{}/{}]:\".format(iter, options.num_iters))\n print(\"\\t>> Mapping Loss:\\t{}\".format(_mapping_loss))\n print(\"\\t>> Adversary Loss:\\t{}\".format(_adversary_loss))\n print(\"\\t>> Domain Accuracy:\\t{}\".format(dom_acc))\n print(\"\\t>> Source Accuracy:\\t{}\".format(src_acc))\n print(\"\\t>> Target Accuracy:\\t{}\".format(tgt_acc))\n print(\"\\t>> Generator Learning Rate:\\t{}\".format(lr_gen))\n print(\"\\t>> Discriminator Learning Rate:\\t{}\".format(lr_gen))\n print(\"\\t>> Executed Time:\\t{} sec/iter\".format(timer.toc()))\n\n if (iter % options.checkpoint_steps) == 0:\n target_saver.save(sess, target_model_path)\n adversary_saver.save(sess, adversary_model_path)\n print(\"Saving model at iteration {}\".format(iter))\n\n\n if options.lr_decay_steps is not None and (iter % options.lr_decay_steps) == 0:\n current_learning_rate_generator = sess.run(learning_rate_generator_op.assign(\n current_learning_rate_generator * options.lr_decay_rate))\n current_learning_rate_discriminator = sess.run(learning_rate_discriminator_op.assign(\n current_learning_rate_discriminator * options.lr_decay_rate))\n\n coord.request_stop()\n coord.join(threads)\n sess.close()\n\nif __name__ == \"__main__\":\n main()\n\n\n\n" }, { "alpha_fraction": 0.7372262477874756, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 19.450000762939453, "blob_id": "f7dbfeaaf2bef43222d815ea26b1f9c19f98e716", "content_id": "ffd96ebd5b45f74ef5ee71c8058c1b1b6f5f37f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 411, "license_type": "no_license", "max_line_length": 243, "num_lines": 20, "path": "/README.md", "repo_name": "q145492675/ADDA", "src_encoding": "UTF-8", "text": "# Adersarial Discriminative Domain Adaptation\n\n## Introduction\n\nThis is an implementation of [ADDA](https://arxiv.org/abs/1702.05464). The original implemtation of ADDA can be found [here](https://github.com/erictzeng/adda/tree/master/adda/models). This current version is implemented which supports ResNet.\n\n## Requirements\n\n+ python\n\n+ tensorflow\n\n\n## About\n\nThanh-Dat Truong\n\nttdat@selab.hcmus.edu.vn\n\nUniversity of Science, VNU-HCM\n\n\n" }, { "alpha_fraction": 0.5604906678199768, "alphanum_fraction": 0.578045666217804, "avg_line_length": 24.148935317993164, "blob_id": "427e3fdead375712e2c5d66e896034c52cb0e69c", "content_id": "6e6e99fe35c7e2ffab7adc703840d310e6204c2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4728, "license_type": "no_license", "max_line_length": 73, "num_lines": 188, "path": "/run.sh", "repo_name": "q145492675/ADDA", "src_encoding": "UTF-8", "text": "#Training Configuration\ntrain_src=1\ntrain_tgt=0\ntrain_adda=1\ntest_src=0\ntest_tgt=0\ntest_adda=0\nclean_log=1\nclean_dir=0\n\n#Model Config\noutput=output\ndata=data\nsrc=visda_3d\ntgt=visda_2d\nmodel=resnet_v1_50\n\nsrc_data_dir=$data/$src\ntgt_data_dir=$data/$tgt\n\nsrc_model_dir=$output/$model/pretrained_${model}_${src}_only\ntgt_model_dir=$output/$model/pretrained_${model}_${tgt}_only\n\nadda_model_dir=$ouput/$model/adda_${model}_${src}_to_${tgt}/target\nadve_model_dir=$ouput/$model/adda_${model}_${src}_to_${tgt}/discriminator\n\nsrc_gray2rgb=0\ntgt_gray2rgb=0\nimage_size=224\nsplit_train=train\nsplit_test=test\nfeat_name=global_pool\n\n#Learning Cofig\nnum_iters_cls=7000\nnum_iters_adda=10000\n\nsolver_cls=adam\nsolver_adda=adam\n\nlr_cls=0.0001\nlr_adda_gen=0.000001\nlr_adda_dis=0.00001\n\nweight_decay=0.00002\nadv_leaky=True\n\nbatch_size_cls=64\nbatch_size_adda=32\n\n#Traning Source Only\nif [[ $train_src -eq 1 ]]; then\n if [ ! -d $src_model_dir ]; then #Create source model path\n mkdir -p $src_model_dir\n fi\n\n if [[ $clean_log -eq 1 ]]; then #Delete Log\n rm -rf $src_model_dir/events.*\n fi\n\n if [[ $clean_dir -eq 1 ]]; then\n rm -rf $src_model_dir/*\n fi\n\n python train_classifier.py \\\n --solver $solver_cls \\\n --learning_rate $lr_cls \\\n --weight_decay $weight_decay \\\n --num_iters $num_iters_cls \\\n --model $model \\\n --model_path $src_model_dir \\\n --dataset $src \\\n --split $split_train \\\n --dataset_dir $src_data_dir \\\n --gray2rgb $src_gray2rgb \\\n --batch_size $batch_size_cls \\\n --image_size $image_size\nfi\n\n#Traning Target Only\nif [[ $train_tgt -eq 1 ]]; then\n if [ ! -d $tgt_model_dir]; then #Create target model path\n mkdir -p $tgt_model\n fi\n\n if [[ $clean_log -eq 1 ]]; then #Delete Log\n rm -rf $tgt_model_dir/events.*\n fi\n\n if [[ $clean_dir -eq 1 ]]; then\n rm -rf $tgt_model_dir/*\n fi\n\n python train_classifier.py \\\n --solver $solver_cls \\\n --learning_rate $lr_cls \\\n --weight_decay $weight_decay \\\n --num_iters $num_iters_cls \\\n --model $model \\\n --model_path $tgt_model_dir \\\n --dataset $tgt \\\n --split $split_train \\\n --dataset_dir $tgt_data_dir \\\n --gray2rgb $tgt_gray2rgb \\\n --batch_size $batch_size_cls \\\n --image_size $image_size\nfi\n\n#Traning Adversarial Discriminative Domain Adaptaion\nif [[ $train_adda -eq 1 ]]; then\n if [ ! -d $adda_model_dir ]; then #Create adda model path\n mkdir -p $adda_model_dir\n fi\n\n if [ ! -d $adve_model_dir ]; then #Create adve model path\n mkdir -p $adve_model_dir\n fi\n\n if [[ $clean_log -eq 1 ]]; then #Delete Log\n rm -rf $adda_model_dir/events.*\n rm -rf $adve_model_dir/events.*\n fi\n\n if [[ $clean_dir -eq 1 ]]; then\n rm -rf $adda_model_dir/*\n rm -rf $adve_model_dir/*\n fi\n\n python train_adda.py \\\n --solver $solver_adda \\\n --learning_rate_generator $lr_adda_gen \\\n --learning_rate_discriminator $lr_adda_dis \\\n --num_iters $num_iters_adda \\\n --model $model \\\n --source_model_path $src_model_dir \\\n --target_model_path $adda_model_dir \\\n --adversary_model_path $adve_model_dir \\\n --source_dataset $src \\\n --source_dataset_dir $src_data_dir \\\n --src_gray2rgb $src_gray2rgb \\\n --target_dataset $tgt \\\n --target_dataset_dir $tgt_data_dir \\\n --tgt_gray2rgb $tgt_gray2rgb \\\n --split $split_train \\\n --adversary_leaky $adv_leaky \\\n --batch_size $batch_size_adda \\\n --image_size $image_size \\\n --feature_name $feat_name\nfi\n\n#Test Source Only\nif [[ $test_src -eq 1 ]]; then\n python test_classifier.py \\\n --model $model \\\n --model_path $src_model_dir \\\n --dataset $src \\\n --dataset_dir $src_data_dir \\\n --split $split_test \\\n --gray2rgb $src_gray2rgb \\\n --batch_size $batch_size_cls \\\n --image_size $image_size\nfi\n\n#Test Target Only\nif [[ $test_tgt -eq 1 ]]; then\n python test_classifier.py \\\n --model $model \\\n --model_path $tgt_model_dir \\\n --dataset $tgt\\\n --dataset_dir $tgt_data_dir \\\n --split $split_test \\\n --gray2rgb $tgt_gray2rgb \\\n --batch_size $batch_size_cls \\\n --image_size $image_size\nfi\n\n#Test ADDA\nif [[ $test_adda -eq 1 ]]; then\n python test_classifier.py \\\n --model $model \\\n --model_path $adda_model_dir \\\n --dataset $tgt\\\n --dataset_dir $tgt_data_dir \\\n --split $split_test \\\n --gray2rgb $tgt_gray2rgb \\\n --batch_size $batch_size_cls \\\n --image_size $image_size\nfi\n" } ]
3
nt27web/statistical-calculator
https://github.com/nt27web/statistical-calculator
98b8d09aba5f6880bbdd40614d206a3f65f6fec6
526b971d4ab1bed70047f80284c9d8a3a4ed601c
d192fe6f9667a993fb6578af8ffdff4cbfb05968
refs/heads/main
2023-01-21T13:33:56.915295
2020-12-02T07:23:07
2020-12-02T07:23:07
309,568,801
1
2
MIT
2020-11-03T04:05:36
2020-11-25T06:37:10
2020-12-02T07:23:08
Python
[ { "alpha_fraction": 0.6813417077064514, "alphanum_fraction": 0.6834381818771362, "avg_line_length": 28.8125, "blob_id": "d63f6bea63b5a849d008363156ff7f215a09e3b7", "content_id": "50c23d68b362954239055acfd93f83c102e094b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "permissive", "max_line_length": 63, "num_lines": 16, "path": "/Statistics/ZScore.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "from Calculator.Subtraction import subtraction\nfrom Calculator.Division import division\nfrom Statistics.Mean import get_mean\nfrom Statistics.StandardDeviation import get_standard_deviation\n\n\ndef get_z_score(data):\n if isinstance(data, float):\n data = [data]\n value_mean = get_mean(data)\n z = []\n for i in range(0, len(data)):\n a = subtraction(value_mean, data[i])\n b = division(get_standard_deviation(data), a)\n z.append(b)\n return z\n" }, { "alpha_fraction": 0.6334519386291504, "alphanum_fraction": 0.6835975646972656, "avg_line_length": 78.17948913574219, "blob_id": "1bbc495ca2ce83882213f75e67885bed7be4d2df", "content_id": "8efc8e1dc4091b75771d6b6a2a5a2c57c5d7485c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3093, "license_type": "permissive", "max_line_length": 447, "num_lines": 39, "path": "/README.md", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "[![Build Status](https://travis-ci.com/nt27web/statistical-calculator.svg?branch=main)](https://travis-ci.com/github/nt27web/statistical-calculator)\n\n# Statistical-calculator\n\n## Task list\nNo. |Task | Short Description | Developer \n------- | --------------- | ---------- | ----------- | \n1| Class Diagram | An outline showing how each of the statistical and calculator functions relates to each other | Nayana \n2| Connect Travis-CI with Github repo | Configure automation testing on all branches using Travis | Nayana\n3| Project management board | Development task breakdown to the function level | Nayana\n4| Baseline the Basic Calculator functions | Addition, Subtraction, Division, Multiplication, Square, Square Root | Nayana \n5| Descriptive Statistics functions | Functions- mean, median, mode, standard Deviation, Z-Score | Sourav\n6| Population Sampling | Simple random sampling, Confidence Interval For a Sample, Margin of Error, Cochran’s Sample Size Formula, How to Find a Sample Size Given a Confidence Interval and Width (unknown population standard deviation) | Nayana\n7| Random Generator | Generate a random number without a seed between a range of two numbers, Generate a random number with a seed between a range of two numbers, Generate a list of N random numbers with a seed and between a range of numbers, Select a random item from a list, Set a seed and randomly. select the same value from a list, Select N number of items from a list without a seed, Select N number of items from a list with a seed | Nayana\n8| Readme | Task breakdown, description, formulas and result | Nayana, Sourav\n\n## Function list\nNo. | Function | Short Description Formula | Example | Result \n------- | --------------- | ---------- | ----------- | ----------- | \n1 | Addition | a + b | 12+3 | 15\n2 | Subtraction| b-a | 13-2 | 11\n3 | Multiplication| a * b |5 * 2 | 10\n4 | Division | b/a | 12/4 | 3\n5 | Square | a * a | 5 * 5 | 25\n6 | Sqaure Root | math.sqrt(a)| math.sqrt(635)| 25.19920633\n7 | Mean | sum(data)/len(data)| 15/3 |data=[1,2,3,4,5] Result = 3\n8 | Median | ((n + 1)/2)th (even number of elements) (((n+1)/2) -1)(odd number of elements), n is len(data)| (5+1)/2 |odd:data=[1,2,3,4,5] Result = 3,even:[1,2,3,4,5,6] Result = 3.5 \n9 | Mode | max(frequency) Mode is calculated by counting the frequencies of an element in a list and calculating the max of all frequencies. | max(2,3,1,1,1,1,1) |data=[1,2,5,1,2,3,6,2,9,10,2] Result = 2(count 3)\n10 | Variance | s=((sum(x-x\")* * 2/len(data-1))|((sum(data[i]-mean(data))* * 2/len(data)-1)| data=[1,2,3,4,5] Result = 2\n11 | Standard deviation | sqrt(variance) | sqrt(2) | data=[1,2,3,4,5] Result = 1.414\n12 | z_score | z=(data[i]-mean(data))/standard deviation | z=(1-3)/1.414 |data=[1,2,3,4,5] -1.414\n\n\n## Class Diagram of Statistics, Calculator and related classes \n<object data=\"Diagram_Nayana.pdf\" type=\"application/pdf\" width=\"700px\" height=\"700px\">\n <embed src=\"Diagram_Nayana.pdf\">\n <p>Click here to veiw/download the class diagram in PDF format: <a href=\"Diagram_Nayana.pdf\">Class Diagram</a>.</p>\n </embed>\n</object>\n\n\n\n" }, { "alpha_fraction": 0.636970579624176, "alphanum_fraction": 0.6567911505699158, "avg_line_length": 40.68695831298828, "blob_id": "4b7c47798be7cde3950259d194316e5fe7391bcc", "content_id": "707dc6ea81cf198a9aaaa13a755aad926ee1b4bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4793, "license_type": "permissive", "max_line_length": 114, "num_lines": 115, "path": "/Tests/test_CalculatorTest.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom Calculator.Calculator import Calculator\nfrom CsvReader.CsvReader import CsvReader\n\n\nclass MyTestCase(unittest.TestCase):\n\n # default test\n def setUp(self) -> None:\n self.calculator = Calculator()\n\n # instance check test\n def test_instantiate_calculator(self):\n self.assertIsInstance(self.calculator, Calculator)\n\n # addition method test1\n def test_add_method_calculator_success(self):\n self.assertEqual(self.calculator.add(1.36,2.78), 4.14)\n\n # addition method test2\n def test_add_method_calculator_zero(self):\n self.assertEqual(self.calculator.add(-1.11, 1.11), 0)\n\n # subtraction method test1\n def test_subtract_method_calculator_success(self):\n self.assertEqual(self.calculator.subtract(4, 10), 6)\n\n # subtraction method test2\n def test_subtract_method_calculator_zero(self):\n self.assertEqual(self.calculator.subtract(4, 4), 0)\n\n # multiplication method test1\n def test_multiply_method_calculator_success(self):\n self.assertEqual(self.calculator.multiply(5, 5), 25)\n\n # multiplication method test2\n def test_multiply_method_calculator_zero(self):\n self.assertEqual(self.calculator.multiply(5, 0), 0)\n\n # division method test1\n def test_divide_method_calculator_success(self):\n self.assertEqual(self.calculator.divide(5, 20), 4)\n\n # division method test2\n def test_divide_method_calculator_zero(self):\n self.assertEqual(self.calculator.divide(5, 0), 0)\n\n # square method test1\n def test_square_method_calculator_success(self):\n self.assertEqual(self.calculator.square(5), 25)\n\n # square method test2\n def test_square_method_calculator_negative(self):\n self.assertEqual(self.calculator.square(-5), 25)\n\n # square root test1\n def test_square_root_method_calculator_success(self):\n self.assertEqual(self.calculator.square_root(25), 5)\n\n # square root test2 - accurate upto 9 decimal points\n def test_square_root_method_calculator_success_decimal(self):\n self.assertEqual(self.calculator.square_root(39.99), 6.323764702)\n\n def test_subtraction(self):\n test_data = CsvReader(\"Tests/Data/UnitTestSubtraction.csv\").data\n for row in test_data:\n result_float = float(row['Result'])\n self.assertEqual(self.calculator.subtract(float(row['Value 1']), float(row['Value 2'])), result_float)\n result_int = int(row['Result'])\n self.assertEqual(self.calculator.subtract(int(row['Value 1']), int(row['Value 2'])), result_int)\n\n def test_addition(self):\n test_data = CsvReader(\"Tests/Data/UnitTestAddition.csv\").data\n for row in test_data:\n result_float = float(row['Result'])\n self.assertEqual(self.calculator.add(float(row['Value 1']), float(row['Value 2'])), result_float)\n result_int = int(row['Result'])\n self.assertEqual(self.calculator.add(int(row['Value 1']), int(row['Value 2'])), result_int)\n\n def test_multiplication(self):\n test_data = CsvReader(\"Tests/Data/UnitTestMultiplication.csv\").data\n for row in test_data:\n result_float = float(row['Result'])\n self.assertEqual(self.calculator.multiply(float(row['Value 1']), float(row['Value 2'])), result_float)\n result_int = int(row['Result'])\n self.assertEqual(self.calculator.multiply(int(row['Value 1']), int(row['Value 2'])), result_int)\n\n def test_division(self):\n test_data = CsvReader(\"Tests/Data/UnitTestDivision.csv\").data\n for row in test_data:\n result_float = float(row['Result'])\n self.assertEqual(self.calculator.divide(float(row['Value 1']), float(row['Value 2'])), result_float)\n result_int = float(row['Result'])\n self.assertEqual(self.calculator.divide(int(row['Value 1']), int(row['Value 2'])), result_int)\n\n def test_square(self):\n test_data = CsvReader(\"Tests/Data/UnitTestSquare.csv\").data\n for row in test_data:\n result_float = float(row['Result'])\n self.assertEqual(self.calculator.square(float(row['Value 1'])), result_float)\n result_int = int(row['Result'])\n self.assertEqual(self.calculator.square(int(row['Value 1'])), result_int)\n\n def test_square_root(self):\n test_data = CsvReader(\"Tests/Data/UnitTestSquareRoot.csv\").data\n for row in test_data:\n result_float = float(row['Result'])\n self.assertEqual(round(self.calculator.square_root(float(row['Value 1'])) , 8), result_float)\n result_int = float(row['Result'])\n self.assertEqual(round(self.calculator.square_root(int(row['Value 1'])) , 8), result_int)\n\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.6811521053314209, "alphanum_fraction": 0.6856998205184937, "avg_line_length": 34.33928680419922, "blob_id": "369272856a5b85f84b2bf8c66132c90634a86d81", "content_id": "25ef4f505142a6d004ea4118f16da672aceac1a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1979, "license_type": "permissive", "max_line_length": 86, "num_lines": 56, "path": "/Tests/test_Statistics.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "import unittest\nfrom numpy.random import seed\nfrom Statistics.Statistics import Statistics\nimport random\nimport statistics\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self) -> None:\n seed(5)\n self.testData = []\n for i in range(0, 10):\n num = random.randint(0, 15)\n self.testData.append(num)\n\n self.mean_value = statistics.mean(self.testData)\n self.median_value = statistics.median(self.testData)\n self.mode_value = statistics.mode(self.testData)\n self.variance_value = statistics.variance(self.testData)\n self.standard_deviation_value=statistics.stdev(self.testData)\n self.statistics = Statistics()\n\n def test_instantiate_calculator(self):\n self.assertIsInstance(self.statistics, Statistics)\n\n def test_mean_calculator(self):\n mean = self.statistics.stats_mean(self.testData)\n self.assertEqual(mean, self.mean_value)\n\n def test_median_calculator(self):\n median = self.statistics.stats_median(self.testData)\n self.assertEqual(median, self.median_value)\n\n def test_mode_calculator(self):\n mode = self.statistics.stats_mode(self.testData)\n self.assertEqual(mode, self.mode_value)\n\n def test_median_calculator(self):\n median = self.statistics.stats_median(self.testData)\n self.assertEqual(median, self.median_value)\n\n def test_mode_calculator(self):\n mode = self.statistics.stats_mode(self.testData)\n self.assertEqual(mode, self.mode_value)\n\n def test_variance_calculator(self):\n variance = self.statistics.stats_variance(self.testData)\n self.assertEqual(variance, round((self.variance_value),1))\n\n def test_standard_deviation_calculator(self):\n standard_deviation = self.statistics.stats_standard_deviation(self.testData)\n self.assertEqual(standard_deviation, round((self.standard_deviation_value),1))\n\n \nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6325757503509521, "avg_line_length": 25.299999237060547, "blob_id": "f6649a6182171c7aa9f1c3ff70b95c35c0b18eec", "content_id": "a1470aee70d7ae42bb80f931a2398f53e8e112d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "permissive", "max_line_length": 77, "num_lines": 10, "path": "/Calculator/SquareRoot.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "import math\n\n\ndef square_root(a):\n if isinstance(a, float):\n return round(math.sqrt(float(a)), 9)\n elif isinstance(a, int):\n return round(math.sqrt(a), 9)\n else:\n raise Exception(\"Data type not supported for square root operation!\")\n\n" }, { "alpha_fraction": 0.5458613038063049, "alphanum_fraction": 0.5592840909957886, "avg_line_length": 23.83333396911621, "blob_id": "893af08969c6e0c921f5dd2301ce90abd52f9ced", "content_id": "c966500b03b66a588c7d71d2cd44a59c16e8dc6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 447, "license_type": "permissive", "max_line_length": 44, "num_lines": 18, "path": "/Statistics/Median.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "from Calculator.Division import division\nfrom Calculator.Addition import addition\n\n\ndef get_median(data):\n num_values = len(data)\n if num_values % 2 == 0:\n value = int(division(2, num_values))\n a = data[value]\n value = value - 1\n b = data[value]\n c = addition(b, a)\n d = division(2, c)\n return d\n else:\n value = int(division(2, num_values))\n e = data[value]\n return e\n" }, { "alpha_fraction": 0.5803936123847961, "alphanum_fraction": 0.6071295738220215, "avg_line_length": 36.38888931274414, "blob_id": "337df7ca74f594f2c2a3809acdc060524a8cb030", "content_id": "aa01afc784ae2f1d9bcbfcaa21582dbf87b5bfa2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2695, "license_type": "permissive", "max_line_length": 109, "num_lines": 72, "path": "/Statistics/PopulationSampler.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "from Statistics.RandomGenerator import RandomGenerator\nfrom Statistics.Statistics import Statistics\n\nimport scipy.stats as st\n\n\nclass PopulationSampler(RandomGenerator):\n\n def __init__(self):\n self.stats = Statistics()\n pass\n\n # Simple random sampling\n def get_simple_random_sampling(self, size, seed, version, data):\n return self.get_rand_num_list_w_seed(size, seed, version, data)\n\n # Confidence Interval For a Sample\n def get_confidence_interval(self, data):\n\n st1_degree_of_freedom = self.stats.subtract(len(data), 1)\n st2_alpha = self.stats.divide(2, self.stats.subtract(1, 0.95))\n st3 = st.t.ppf(1 - st2_alpha, df=st1_degree_of_freedom)\n st4 = self.stats.divide(self.stats.square_root(len(data)), self.stats.stats_standard_deviation(data))\n st5 = self.stats.multiply(st3, st4)\n st6 = self.stats.subtract(self.stats.stats_mean(data), st5)\n st7 = self.stats.add(self.stats.stats_mean(data), st5)\n\n conf_interval = [st6, st7]\n return conf_interval\n\n # Margin of Error\n def get_margin_of_error(self, data, q):\n st1_z_critical_score = st.norm.ppf(1 - (1 - q) / 2)\n st2_sd = self.stats.stats_standard_deviation(data)\n #st3 = self.stats.multiply(st1_z_critical_score, st2_sd)\n se = self.stats.divide(self.stats.square_root(len(data)), st2_sd)\n margin_of_error = self.stats.multiply(st1_z_critical_score, se)\n\n return margin_of_error\n\n # Cochran’s Sample Size Formula\n def get_result_by_cochrans_sample_size(self, p, e, cl):\n z = st.norm.ppf(1-(1-cl)/2)\n print(self.stats.multiply(self.stats.multiply(self.stats.square(z), p), self.stats.square(e)))\n n = self.stats.multiply(self.stats.multiply(self.stats.square(z), p), self.stats.square(e))/(1-p)\n print(n)\n return round(n)\n\n # How to Find a Sample Size Given a Confidence Interval and Width (unknown population standard deviation)\n def get_sample_size_by_confidence_interval_and_width(self, data):\n # step 1\n cl = 0.95\n za_2 = st.norm.ppf(1 - (1 - cl) / 2)\n print(\"za2 - \" + str(za_2))\n e = 0.5\n print(\"e - \" + e)\n p = 0.5\n q = 1 - p\n # step 2\n s2 = self.stats.multiply(p, q)\n print(\"s2 - \" + s2)\n # step 3\n s3 = self.stats.divide(za_2, e)\n print(\"s3 - \" + s3)\n # step 4\n s4 = self.stats.square(s3)\n print(\"s4 - \" + s4)\n # step 5 ( final )\n s5 = self.stats.multiply(s2, s4)\n print(\"s5 - \" + s5)\n # this is the sample population size for an unknown population standard deviation\n return s5\n\n" }, { "alpha_fraction": 0.6366625428199768, "alphanum_fraction": 0.669954776763916, "avg_line_length": 51.89130401611328, "blob_id": "bbf2fea9a739df45d12e603ef8b78f982ee6bfcf", "content_id": "25b837d0f2cc27810fb5e2c2b07026b07a2d7788", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2433, "license_type": "permissive", "max_line_length": 133, "num_lines": 46, "path": "/Tests/test_RandomGenerator.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "import unittest\nfrom Statistics.RandomGenerator import RandomGenerator\n\n\nclass MyTestCase(unittest.TestCase):\n def setUp(self) -> None:\n self.random_generator = RandomGenerator()\n\n # Generate a random number without a seed between a range of two numbers - Both Integer and Decimal\n def test_generate_rand_num_by_range_wo_seed(self):\n self.assertLessEqual(self.random_generator.generate_rand_num_by_range_wo_seed(3, 5), 5)\n self.assertGreaterEqual(self.random_generator.generate_rand_num_by_range_wo_seed(3, 5), 3)\n\n # Generate a random number with a seed between a range of two numbers - Both Integer and Decimal\n def test_generate_rand_num_by_range_w_seed(self):\n self.assertLessEqual(self.random_generator.generate_rand_num_by_range_w_seed(5, 2, 3, 5), 5)\n self.assertGreaterEqual(self.random_generator.generate_rand_num_by_range_w_seed(5, 2, 3, 5), 3)\n\n # Generate a list of N random numbers with a seed and between a range of numbers - Both Integer and Decimal\n def test_get_rand_num_list_by_range_w_seed(self):\n data = self.random_generator.get_rand_num_list_by_range_w_seed(5, 2, 2, 1, 9)\n for num in range(len(data)):\n self.assertLessEqual(data[num], 9)\n self.assertGreaterEqual(data[num], 1)\n\n # Set a seed and randomly.select the same value from a list\n def test_set_seed_and_get_rand_from_list(self):\n self.assertEqual(self.random_generator.set_seed_and_get_rand_from_list(3, 2, [5, 3, 2, 1, 9]), 3)\n\n # Select a random item from a list\n def test_get_rand_item_from_list(self):\n self.assertTrue(self.random_generator.get_rand_item_from_list([5, 3, 2, 1, 9]) in [5, 3, 2, 1, 9])\n\n # Select N number of items from a list without a seed\n def test_get_rand_num_list_wo_seed(self):\n self.assertEqual(len(self.random_generator.get_rand_num_list_wo_seed(3, [5, 3, 2, 1, 9])), 3)\n self.assertTrue(set(self.random_generator.get_rand_num_list_wo_seed(3, [5, 3, 2, 1, 9])).issubset(set([5, 3, 2, 1, 9])))\n\n # Select N number of items from a list with a seed\n def test_get_rand_num_list_w_seed(self):\n self.assertEqual(len(self.random_generator.get_rand_num_list_w_seed(3, 2, 2, [5, 3, 2, 1, 9])), 3)\n self.assertTrue(set(self.random_generator.get_rand_num_list_w_seed(3, 2, 2, [5, 3, 2, 1, 9])).issubset(set([5, 3, 2, 1, 9])))\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6290322542190552, "alphanum_fraction": 0.6580645442008972, "avg_line_length": 28.5238094329834, "blob_id": "54527df4730ff934ded7c108d052ab6be64bd29d", "content_id": "ca33a92dfeeed3fe58a90dee86b405832114f473", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "permissive", "max_line_length": 53, "num_lines": 21, "path": "/Statistics/Variance.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "from Statistics.Mean import get_mean\nfrom Calculator.Division import division\nfrom Calculator.Addition import addition\nfrom Calculator.Square import square\nfrom Calculator.Subtraction import subtraction\n\n\ndef get_variance(data):\n x1 = get_mean(data)\n num_values = len(data)\n total = 0\n total1 = 0\n data1 = []\n for i in range(0, len(data)):\n a = data[i - 1]\n total_sum = subtraction(a, x1)\n total = square(total_sum)\n data1.append(total)\n for i in range(0, len(data1)):\n total1 = total1 + addition(0, data1[i])\n return round(division(num_values - 1, total1), 1)\n" }, { "alpha_fraction": 0.5861456394195557, "alphanum_fraction": 0.5914742350578308, "avg_line_length": 42.230770111083984, "blob_id": "4fe5697ccffcd15f848165f4ac1112d9ce0a43fe", "content_id": "8463581cde54d0504f360c92c135cc51e0e6ce9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 563, "license_type": "permissive", "max_line_length": 108, "num_lines": 13, "path": "/Calculator/Division.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "def division(a, b):\n result = 0\n try:\n if (isinstance(a, int) and isinstance(b, int)) or (isinstance(a, float) and isinstance(b, int)) or (\n isinstance(a, int) and isinstance(b, float)):\n result = round(float(b) / float(a), 9)\n elif isinstance(a, float) and isinstance(b, float):\n result = round(int(b) / int(a), 9)\n else:\n raise Exception(\"Data type not supported for division operation!\")\n except ZeroDivisionError:\n raise Exception(\"Divide by Zero error\")\n return result\n\n" }, { "alpha_fraction": 0.6343825459480286, "alphanum_fraction": 0.6368038654327393, "avg_line_length": 50.5, "blob_id": "2987ac40666086a23b5ac3e68c5a884c745a62d4", "content_id": "8715dfc3c902d2f3edb1312aa83a9fcc6a29c540", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 413, "license_type": "permissive", "max_line_length": 108, "num_lines": 8, "path": "/Calculator/Multiplication.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "def multiplication(a, b):\n if (isinstance(a, float) and isinstance(b, float)) or (isinstance(a, float) and isinstance(b, int)) or (\n isinstance(a, int) and isinstance(b, float)):\n return round(float(a) * float(b), 9)\n elif isinstance(a, int) and isinstance(b, int):\n return int(a) * int(b)\n else:\n raise Exception(\"Data type not supported for multiplication operation!\")\n\n" }, { "alpha_fraction": 0.4475524425506592, "alphanum_fraction": 0.4860139787197113, "avg_line_length": 21, "blob_id": "4f07f7f554b787cf00c4cb7166337709891b787f", "content_id": "6e67488a38c84e2c7361d7bb20067450d0915555", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "permissive", "max_line_length": 36, "num_lines": 13, "path": "/Statistics/Mode.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "def get_mode(data):\n data1 = data\n maximum = data1.count(data1[0])\n m = data1[0]\n for i in range(1, len(data1)):\n freq = data1.count(data1[i])\n\n if freq > maximum:\n maximum = freq\n m = data1[i]\n else:\n pass\n return m\n" }, { "alpha_fraction": 0.582541823387146, "alphanum_fraction": 0.606965184211731, "avg_line_length": 35.24590301513672, "blob_id": "5b7f54ddcdee97fd1d2b4d146ee802c5d2a1151d", "content_id": "0f58be986ba5c48150587c6cb418ae77df1254eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2213, "license_type": "permissive", "max_line_length": 113, "num_lines": 61, "path": "/Tests/test_PopulationSampler.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "import unittest\nfrom Statistics.PopulationSampler import PopulationSampler\nimport statistics\nimport scipy.stats as st\n\n\nclass MyTestCase(unittest.TestCase):\n\n def setUp(self) -> None:\n self.population_sampler = PopulationSampler()\n self.data = [0, 1, 2, 5, 9, 11, 34, 55, 23, 19, 78, 99, 15]\n\n # Simple random sampling\n def test_get_simple_random_sampling(self):\n self.assertEqual(len(self.population_sampler.get_simple_random_sampling(5, 5, 2, self.data)), 5)\n self.assertTrue(\n set(self.population_sampler.get_simple_random_sampling(5, 5, 2, self.data)).issubset(set(self.data)))\n\n # Confidence Interval For a Sample\n def test_get_confidence_interval(self):\n conf_interval = st.t.interval(alpha=0.95\n , df=len(self.data) - 1\n , loc=statistics.mean(self.data)\n , scale=st.sem(self.data)\n )\n ci = self.population_sampler.get_confidence_interval(self.data)\n\n #print(\"Confdence interval\")\n #print(conf_interval)\n #print(ci)\n #self.assertTrue(set(conf_interval).issubset(ci))\n\n # Margin of Error\n def test_get_margin_of_error(self):\n q = 0.05 # assumption\n result = self.population_sampler.get_margin_of_error(self.data, q)\n\n sd = statistics.stdev(self.data)\n z = st.norm.ppf(1-(1-q)/2)\n se = sd / statistics.sqrt(len(self.data))\n moe = z * se\n #print(\"Margin of Error\")\n #print(moe)\n #print(result)\n #self.assertTrue(result-moe >= 0.1)\n\n # Cochran’s Sample Size Formula\n def test_get_result_by_cochrans_sample_size(self):\n # n = 100000\n cl = 0.95\n e = 0.05\n p = 0.5\n #print(self.population_sampler.get_result_by_cochrans_sample_size(p, e, cl))\n\n # How to Find a Sample Size Given a Confidence Interval and Width (unknown population standard deviation)\n \"\"\"def test_get_sample_size_by_confidence_interval_and_width(self):\n print(self.population_sampler.get_sample_size_by_confidence_interval_and_width(self.data))\"\"\"\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6338383555412292, "alphanum_fraction": 0.6338383555412292, "avg_line_length": 48.5, "blob_id": "2a1e85a2820e5064134b20c100113b9d4145edae", "content_id": "63ef6beb53284c2bd6f6488a3f6fcbb25d4c16ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "permissive", "max_line_length": 108, "num_lines": 8, "path": "/Calculator/Subtraction.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "def subtraction(a, b):\n if (isinstance(a, float) and isinstance(b, float)) or (isinstance(a, float) and isinstance(b, int)) or (\n isinstance(a, int) and isinstance(b, float)):\n return float(b) - float(a)\n elif isinstance(a, int) and isinstance(b, int):\n return int(b) - int(a)\n else:\n raise Exception(\"Data type not supported for subtraction operation!\")\n" }, { "alpha_fraction": 0.6223776340484619, "alphanum_fraction": 0.6389859914779663, "avg_line_length": 31.685714721679688, "blob_id": "ffbc7147bb81fb9a648b11a22b2b93261b8d7fc2", "content_id": "d404fe20fefeb1a4e84a005a531eb1cbbdc07f76", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1144, "license_type": "permissive", "max_line_length": 73, "num_lines": 35, "path": "/Tests/test_CsvReaderTest.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom CsvReader.CsvReader import CsvReader, class_factory\n\n\nclass MyTestCase(unittest.TestCase):\n\n def setUp(self) -> None:\n self.csv_reader = CsvReader('Tests/Data/UnitTestSubtraction.csv')\n\n def test_return_data_as_objects(self):\n value1 = self.csv_reader.return_data_as_class('Value 1')\n value2 = self.csv_reader.return_data_as_class('Value 2')\n result = self.csv_reader.return_data_as_class('result')\n\n self.assertIsInstance(value1, list)\n self.assertIsInstance(value2, list)\n self.assertIsInstance(result, list)\n\n test_class1 = class_factory('Value 1', self.csv_reader.data[0])\n test_class2 = class_factory('Value 2', self.csv_reader.data[0])\n test_class3 = class_factory('result', self.csv_reader.data[0])\n\n for value in value1:\n self.assertEqual(value.__name__, test_class1.__name__)\n\n for value in value2:\n self.assertEqual(value.__name__, test_class2.__name__)\n\n for value in result:\n self.assertEqual(value.__name__, test_class3.__name__)\n\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.7715736031532288, "alphanum_fraction": 0.7766497731208801, "avg_line_length": 27.14285659790039, "blob_id": "8f92195e454001793f45a5cba8f18b7214657bba", "content_id": "cfcfb339bcb627f3dd6dd2786273d8d2c0919abd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "permissive", "max_line_length": 45, "num_lines": 7, "path": "/Statistics/StandardDeviation.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "from Statistics.Variance import get_variance\nfrom Calculator.SquareRoot import square_root\n\n\ndef get_standard_deviation(data):\n value = get_variance(data)\n return round(square_root(value),1)\n" }, { "alpha_fraction": 0.6913580298423767, "alphanum_fraction": 0.6954732537269592, "avg_line_length": 21.090909957885742, "blob_id": "99a0ec84640600925b2f9508bb590380d2181803", "content_id": "274cafa8296db339223e5f766aa5777f64b93c86", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "permissive", "max_line_length": 40, "num_lines": 11, "path": "/Statistics/Mean.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "from Calculator.Addition import addition\nfrom Calculator.Division import division\n\n\ndef get_mean(data):\n num_values = len(data)\n total = 0\n for num in data:\n total = addition(total, num)\n\n return division(num_values, total)\n" }, { "alpha_fraction": 0.6481481194496155, "alphanum_fraction": 0.6481481194496155, "avg_line_length": 52.85714340209961, "blob_id": "c24e0a7cfde44fe99068de36755d64d22dc3677c", "content_id": "81cef793715d5a4dc2ef1262d097dc98c51c1c2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "permissive", "max_line_length": 153, "num_lines": 7, "path": "/Calculator/Addition.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "def addition(a, b):\n if (isinstance(a, float) and isinstance(b, float)) or (isinstance(a, float) and isinstance(b, int)) or (isinstance(a, int) and isinstance(b, float)):\n return float(a) + float(b)\n elif isinstance(a, int) and isinstance(b, int):\n return int(a) + int(b)\n else:\n raise Exception(\"Data type not supported for addition operation!\")\n\n" }, { "alpha_fraction": 0.680272102355957, "alphanum_fraction": 0.680272102355957, "avg_line_length": 26.078947067260742, "blob_id": "af15c926d47248aa19dcdd2590ca6094a0c08e08", "content_id": "4520b3cddaabe06596d7df6d873f47f16edf5e92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "permissive", "max_line_length": 63, "num_lines": 38, "path": "/Statistics/Statistics.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "from Calculator.Calculator import Calculator\nfrom Statistics.Mean import get_mean\nfrom Statistics.Median import get_median\nfrom Statistics.Mode import get_mode\nfrom Statistics.Variance import get_variance\nfrom Statistics.StandardDeviation import get_standard_deviation\nfrom Statistics.ZScore import get_z_score\n\n\nclass Statistics(Calculator):\n\n def __init__(self):\n pass\n\n def stats_mean(self, data):\n self.result = get_mean(data)\n return self.result\n\n def stats_median(self, data):\n data.sort()\n self.result = get_median(data)\n return self.result\n\n def stats_mode(self, data):\n self.result = get_mode(data)\n return self.result\n\n def stats_variance(self, data):\n self.result = get_variance(data)\n return self.result\n\n def stats_standard_deviation(self, data):\n self.result = get_standard_deviation(data)\n return self.result\n\n def stats_z_score(self, data):\n self.result = get_z_score(data)\n return self.result\n" }, { "alpha_fraction": 0.6491477489471436, "alphanum_fraction": 0.6496211886405945, "avg_line_length": 36.71428680419922, "blob_id": "acfc82be2168def3373e21661888eafd78933b13", "content_id": "9edb677d69badd51fd143743cedb2e5dac6604ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2112, "license_type": "permissive", "max_line_length": 111, "num_lines": 56, "path": "/Statistics/RandomGenerator.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "import random\n\n\nclass RandomGenerator:\n result = 0\n\n def __init__(self):\n pass\n\n # Generate a random number without a seed between a range of two numbers - Both Integer and Decimal\n def generate_rand_num_by_range_wo_seed(self, low, high):\n self.result = random.uniform(low, high)\n return self.result\n\n # Generate a random number with a seed between a range of two numbers - Both Integer and Decimal\n def generate_rand_num_by_range_w_seed(self, seed, version, low, high):\n random.seed(seed, version)\n self.result = random.uniform(low, high)\n return self.result\n\n # Generate a list of N random numbers with a seed and between a range of numbers - Both Integer and Decimal\n def get_rand_num_list_by_range_w_seed(self, size, seed, version, low, high):\n random.seed(seed, version)\n try:\n self.result = random.sample(range(low, high), size)\n except ValueError:\n print('Sample size exceeded population size.')\n return self.result\n\n # Set a seed and randomly select the same value from a list\n def set_seed_and_get_rand_from_list(self, seed, version, number_list):\n random.seed(seed, version)\n self.result = random.choice(number_list)\n return self.result\n\n # Select a random item from a list\n def get_rand_item_from_list(self, number_list):\n self.result = random.choice(number_list)\n return self.result\n\n # Select N number of items from a list without a seed\n def get_rand_num_list_wo_seed(self, size, number_list):\n try:\n self.result = random.sample(number_list, size)\n except ValueError:\n print('Sample size exceeded population size.')\n return self.result\n\n # Select N number of items from a list with a seed\n def get_rand_num_list_w_seed(self, size, seed, version, number_list):\n random.seed(seed, version)\n try:\n self.result = random.sample(number_list, size)\n except ValueError:\n print('Sample size exceeded population size.')\n return self.result\n" }, { "alpha_fraction": 0.6017315983772278, "alphanum_fraction": 0.6060606241226196, "avg_line_length": 32.14285659790039, "blob_id": "615635ad2fc5f0d6619c9a250cd2c3c5fe279f7c", "content_id": "97960b78add39bf44f4fc438e2005bc9973354ca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "permissive", "max_line_length": 72, "num_lines": 7, "path": "/Calculator/Square.py", "repo_name": "nt27web/statistical-calculator", "src_encoding": "UTF-8", "text": "def square(a):\n if isinstance(a, float):\n return round(float(a) * float(a), 9)\n elif isinstance(a, int):\n return int(a) * int(a)\n else:\n raise Exception(\"Data type not supported for square operation!\")" } ]
21
alexsyou/ArcadeActivities
https://github.com/alexsyou/ArcadeActivities
5f31f158aa55b566fa177de2130ce0d6e2731bea
251930ea87f1be3547c430676a8f57d639d3d708
2cae72feb9dd3502fbcecb068c5392c3a76cad2e
refs/heads/master
2020-09-06T01:44:06.881076
2019-11-07T18:55:00
2019-11-07T18:55:00
220,276,644
0
0
null
2019-11-07T16:06:53
2019-11-07T15:10:20
2019-11-07T15:10:18
null
[ { "alpha_fraction": 0.607949435710907, "alphanum_fraction": 0.6115627884864807, "avg_line_length": 20.288461685180664, "blob_id": "0cb164530abc0ab5475b948691dd2fd885481d41", "content_id": "10d1f086ca8f3eaf4c7753023dbfecff8681733d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "no_license", "max_line_length": 85, "num_lines": 52, "path": "/doggos.py", "repo_name": "alexsyou/ArcadeActivities", "src_encoding": "UTF-8", "text": "'''\nAnimals have a name, energy level, hunger level, and mood\n eat\n sleep\n\nDogs have breeds\n play\n\nCats have coat color\n hunt\n'''\n\n\nclass Dog:\n # Documentation\n name: str\n energy: int\n hungry: bool\n mood: str\n\n # Method because it is a function in a class\n # __init__ is a constructor (kind of)\n # Constructor constructs instances\n # Attributes are like name, energy, hungry, mood\n # Self is not passed in\n # The type of self is the same as the type of the class ex. self is type Dog here\n # class is making a type!\n def __init__(self, the_name: str):\n self.name = the_name\n self.energy = 0\n self.hungry = False\n self.mood = \"happy\"\n\n def feed(self):\n self.hungry = True\n\n def play_with_my_dog(self):\n self.mood = \"happy\"\n self.hungry = True\n self.energy -= 1\n\n def rest(self, length_of_time: int):\n self.energy += length_of_time\n# Creates an instance of Dog\nada = Dog(\"Ada\")\nbabbage = Dog(\"Babbage\")\n\nada.rest(8)\nada.play_with_my_dog()\nada.feed()\nada.rest(5)\nbabbage.mood = ada.mood\n" } ]
1
ojbkxc/Python
https://github.com/ojbkxc/Python
acd16c1e04579218bce599296d1cc231ff5e1c7c
e26b14212ab0f437394abaa3d5c8cb5f5194ce32
f9e02e978de984d03ef4ca0eb6c5b21a665f76ea
refs/heads/master
2023-06-24T05:33:27.819253
2021-07-14T11:49:09
2021-07-14T11:49:09
383,366,094
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.566224992275238, "alphanum_fraction": 0.6017135381698608, "avg_line_length": 26.019607543945312, "blob_id": "0661e04c9d0401890bd43bb734bf0749e00708cb", "content_id": "dc6a6d6a72094bc05f021d8add775d6e5b1942f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55480, "license_type": "no_license", "max_line_length": 128, "num_lines": 1581, "path": "/pythonProject1/py1/mzy.py", "repo_name": "ojbkxc/Python", "src_encoding": "UTF-8", "text": "print(\"Hello, World!\")\t\t#这是一个注释\t\t#\nprint(\"Hello\\nWorld!\")\t\t#这是一个换行\t\t\\n\nprint(\"Hello\\\\World!\")\t #这是一个转义符\t\\\n#转义字符 特殊的字符 无法”看见“的字符,与语言本身语法有冲突的字符。\n# \\ 续航 \\n 换行 \\' 单引号 \\t\t\t表示空4个字符,就是缩进,就是按一下tab键 \\r 回车\nprint(r\"01234\\n56789\")\t #把字符变成一个原始字符串\tr\nprint(\"0123456\"[-1])\t\t\t#得倒数第一个字符\nprint(\"0123456\"[2:5])\t \t#代表步长 :默认1即是2:5:1\nprint(\"01234567890123\"[3:8:2])\t#开始索引结束索引,截取不包括结束索引\nprint(\"01234 6 8 0123\"[3::1])\t#默认无穷大或无穷小,方向看步长方向\nprint(type(8)) #查看数据类型\nnum=8\nprint(\"输出一个浮点型%f%%\"%num)\t#输出一个% %%\nxcc=\"sddfhasi\"\nprint(\"输出一个字符串%s\"%xcc)\nprint (\"我叫 %s 今年 %d 岁!\" % ('小明', 10))\n#python中%d表示格式化一个对象为十进制整数。使用后,在需要输出的长字符串中占位置。输出字符串时,可以依据变量的值,自动更新字符串的内容\n#%3d意思是打印结果为3位整数,当整数的位数不够3位时,在整数左侧补空格,所以%3d的打印结果是 14\nprint(\"num=%3d\" % num) # output: num= 8\n#其他进制转换十进制\nprint(\"num=%3d\" % num)\nprint(0b101) #0b(数字0和小写字母B)代表二进制\nprint(0o11) #0o(数字0和小写字母O)代表8进制\t0o11\t9\nprint(0x101) #0x(数字0和小写字母X)代表16进制\t0x101 \t257\nprint(bin(0xE)) #其他进制转换二进制(默认十进制)\nprint(int(0o111)) #其他进制转换十进制\nprint(hex(0o111)) #hex其他进制转换16进制\nprint(oct(0o111)) #oct其他进制转换8进制\n# user_name = input(\"请输入您的用户名:\") #input语句中结果打印输出后,都会变成字符串\n# if user_name == \"admin\":\n# password = input(\"请输入密码:\")\n# if password =='123456':\n# print(\"登陆成功\")\n# else:print(\"密码错误\")\n# else:\n# print(\"用户名错误\");\n# 位运算符(&与,|或,^异或,~取反,<<做移动,>>右移动)\na=(int(0b110101)) #53\nb=(int(0b101001)) #40\nprint(a,b)\nprint(bin(a))\nprint(bin(b))\nprint(bin(a&b))\nprint(bin(a|b))\nprint(bin(a^b))\nprint(bin(~a))\nprint(bin(a<<3))\nprint(bin(b>>2))\nprint(~a) #a取相反数-1=~a\n #110101加1为110110加上符号-\n# 练习题:如存在一个正整数为123,要求分别取出每个位数上的数值?\na=123;print(a//100,a%100//10,a%10)\n\n#索引,截取,步长加强篇\nprint(\"01234567890123\"[::1])\t #步长默认为1正向输出 [开始索引:结束索引:步长]\nprint(\"01234567890123\"[::-1])\t #反转输出(默认时输出方向随步长方向)\nprint(\"01234567890123\"[0::-1]) #0\nprint(\"01234567890123\"[1:2:-1])\t #输出方向和步长方向不一致则为空\nprint(\"01234567890123\"[0:-3:-1]) #空\nprint(\"01234567890123\"[-2:3]) #空\nprint(\"01234567890123\"[-2:3:-1]) #输出方向与步长方向一致\nprint(\"01234567890123\"[-1:-7:-2]) #319 步长为2每次跨越2步\n\n#字符串中常见的函数\nstrl = \"abc_df\"\nprint(strl.capitalize()) #把字符串的首字母变成大写\nprint(strl.title()) #当字符串中有下划线链接时,每段的开头都变为大写\nprint(strl.upper()) #把字符串全部变为大写\nprint(\"A\".lower()) #把字符串的大写字母便变成小写\nprint(\"aaAa\".count(\"a\")) #统计字符串出现的次数\nprint(\"abcdefg\".strip(\"g\")) #删除字符串的开头或结尾\n#删除字符串的右边/左边字符串或字符\nprint(\"abcdedcba\".lstrip(\"a\")) #左\nprint(\"abcdedcba\".rstrip(\"a\")) #右\nprint(str.split(\"_\")) #对字符串指定切割,以列表形式返回split('分割符','分割次数')\nstr1 = 's'\nprint(str1.join(strl)) #把字符串中的每个元素插入到另一个字符串中间\nprint(strl.startswith(\"a\")) #判断字符串是否以什么开头\nprint(strl.endswith(\"f\")) #判断字符串以什么结束\nprint(strl.isalnum()) #判断字符串是否为数字和字母组合\nprint(strl.isdigit()) #判断字符串是否为全数字\nprint(strl.isalpha()) #判断字符串是否为全字母\nprint(strl.istitle()) #判断字符串是否为大写字母开头\nprint(strl.isupper()) #判断字符串是否为大写字母\nprint(strl.islower()) #判断字符串是否为小写字母\n#指定 beg(开始) 和 end(结束) 范围,则检查是否包含在指定范围内,如果包含子字符串返回开始的索引值,否则返回-1\n#str.find(str, beg=0, end=len(string))\nprint(strl.find(\"c\")) #查找字符串出现某个字符的位置\nprint(strl.rfind(\"c\")) #返回字符串最后一次出现的位置,如果没有匹配项则返回 -1\nprint(strl.replace('c','z')) #替换字符串中的内容\n\n#定义列表的方法:\nlist1 = [1,2,3,'test']\nprint(list1)\nprint(type(list))\n#字符串转换为列表通过list\nstr2 = '123456'\nstr2 = list(str2)\nprint(str2)\n#修改\nlist1[2]='二'\nprint(list1)\n#添加,默认最后位置\nlist1.append('添加元素')\nprint(list1)\n#添加/合并两个列表,被添加列表元素默认添加在元列表的末尾\nlist2 = '789'\nlist1.extend(list2)\nprint(list1)\n\n#在指定索引位添加元素\nlist3 = '0123456'\nlist3 = list(list3)\nlist3.insert(3,'添加') #在索引位之前添加\nprint(list3)\n#移除列表中的元素\nlist3.remove('0')\nprint(list3) #一次只能移除一个\ndel list3[1] #删除索引位的函数\ndel list3[1:3] #删除索引位的函数\nlist3.pop(0) #选择索引位删除(默认最后一个)\nprint(list3.pop()) #删除并默认打印最后一个值\nprint(list3)\nlist4 = '135798642'\nlist4 = list(list4) #数字字符串会从int类型转换成char类型\nlist4.sort() #从小到大排序\nprint(list4)\nlist4 = sorted(list4,reverse=True) #从大到小排序\nprint(list4)\nlist4.reverse() #反转输出\nprint(list4)\nlist5=(1,2,3,4,5,6)\nprint(list5.index(3)) #查找一个值返回该元素索引位,若是char型则不能查找\nprint(type(list5))\n\n#tuple元组中的元素不能被修改 -----元组是列表的二次加工\ntuple1 = (1,2,3,4,5,6)\ntuple1 = list(tuple1) #把元组转换成列表\nprint(tuple1)\ntuple1 = tuple(tuple1) #把列表转换成元组\nprint(tuple1)\n#只能通过间接的方法,把元组转换成列表,修改列表中的值后再转换成元组\n#当元组中只有一个元素时,必须在该元素的后面添加一个逗号,否则该数据结构不是元组\n# tuple=('字符串')\n# tuple=('元组',)\n#元组常见函数:\ntuple2 = (1,2,2,2,4,3,56,8,'数')\nprint(tuple2.index('数')) #返回元组索引位\nprint(tuple2.count(2)) #统计\nprint(tuple2[5:8]) #打印\n\nz = '123456478914234567489'\nprint(z.index('4'))\n#找第8索引位后第一个4的索引位\nx=z.find('4',8)\n\n#定义字典的方法\ndic1 = {\"id\":202101,\"name\":\"zhangsan\",\"age\":18} #创建一个字典\nprint(dic1)\nlist1 = [(\"id\",202101),(\"name\",\"zhangsan\"),(\"age\",18)] #转换成字典\ndic1 = dict(list1)\nprint(dic1)\ndic2 = {} #定义一个空字典\nprint(dic2)\n#字典常见函数\ndic3 = {\"id\":202101,\"name\":\"zhangsan\",\"age\":18}\ndic3[\"sex\"] = 1 #添加键值,使用key:value形式\nprint(dic3)\ndic3[\"age\"] = 20 #更改\nprint(dic3)\n#使用setdefault()函数添加键值对\ndic3.setdefault('sex',1) #添加新的键值对\nprint(dic3)\nprint(dic3.keys()) #取出字典中的键\nprint(dic3.values()) #取出字典中的值\nprint(dic3['name']) #取出字典的键对应的值\n\n#遍历字典(依次取出所有的键和值) for ... in ...\nfor k in dic3:\n print(k,dic3[k])\nfor k,v in dic3.items():\n print(k,v)\ndic1.clear() #清空字典 clear()\ndel dic2 #删除字典\ndel (dic3['id']) #删除字典中的键值对\n#使用python中自带的del方法删除\ndel (dic3['name'])\ndic3.pop('age')\n\ndic4 = {\"id\":202101,\"name\":\"zhangsan\",\"age\":18}\n#判断字典_是否有某个键,返回布尔值\nprint(dic4.__contains__('name'))\n# if(dic4.__contains__('name')):\n# print('有这个键')\n# else:\n# print('没有这个键')\ndic4.popitem() #随机删除键值对 popitem() 默认删除最后一对键值对\ndic4.update(dic3) #更新一个字典_字典3加到字典4后面\nprint(dic4)\n#创建一个新字典,以序列 seq 中元素做字典的键,value 为字典所有键对应的初始值。\ndic4 = {}.fromkeys(['name'],'zhangsan')\nprint(dic4)\n\n# #集合_去重(可变集合与不可变集合)\n# #可变集合set\n# list5 = [1,2,5,8,1,1,1,1,2,2,56,84]\n# set1 = set(list5)\n# print(set1)\n# set1.add('添加')\n# set.remove('添加')\n# set1.pop()\n# 不可变集合\n# ste2 = frozenset\n\n#常见语句\n#if条件判断语句\n# 单分支语句\n# if 条件:\n# 代码块\n# else:\n# 代码块\n# 多分支语句\n# if 条件:\n# 代码块\n# elif 条件:\n# 代码块\n# elif 条件:\n# 代码块\n#if嵌套语句\n# if 条件:\n# if 条件:\n# 代码块\n# else :\n# 代码块\n# else:\n# 代码块\n# user = input('请输入你的用户名')\n# if user == 'admin':\n# passwprd = input(\"请输入密码:\")\n# if passwprd == '123456':\n# print('登陆成功!')\n# else :\n# print('密码错误')\n# else:\n# print('用户名错误');\n#三目运算\n# user_name = input('请输入用户名:')\n# print('管理员上线'if user_name == 'admin' else '普通用户上线')\n\n# while 循环语句\n# while 条件:\n# print()\n# 变化条件\n# print()\n# i = 1\n# while i<=10:\n# print(i);i+=1\n# # 1.打印出1到10的偶数\n# i = 1\n# while i<=10:\n# if i % 2 == 0:\n# print(i,end=' ')\n# i+=1\n# # 2.打印1到10的自然数之和\n# i=1;j=0\n# while i<=10:\n# if i % 1 == 0:\n# j+=i\n# i+=1\n# print('\\n1到10的自然数之和为%s'%j,end=' ')\n\n# for循环语句\n# for XXX in XXX:\n# 代码块\n# for循环语句自带一个变量\n# list6 = [1,2,3,4,5,6,7,8,9]\n# for i in list6: #语句中的i只是一个变量名,可以随意修改\n# print(i,end=' ')\n# sum=0\n# for i in range(1,11):\n# sum+=1\n# print('\\n1到10的自然数之和为%s'%sum,end=' ')\n\n# 1、求出1 / 1 + 1 / 3 + 1 / 5⋯⋯+1 / 99的和\nsum=0\nfor i in range(1,100):\n if i%2!=0:\n sum+=1/i\nprint(sum)\n# 2、用循环语句,计算2 - 10之间整数的循环相乘的值。\nsum=1\nfor i in range(2,11):\n sum*=i\nprint(sum)\n# 3、用for循环打印九九乘法表\n#方法一\n# for i in range(1,10):\n# for j in range(1,10):\n# if i>=j:\n# print(j,'x',i,'=',i*j,end=' ')\n# print()\n#方法二\nfor i in range(1,10):\n for j in range(1,i+1):\n print('{}*{}={}'.format(j,i,(i*j)),end=' ') #format()函数:用于格式化输出\n print()\n# 4、求每个字符串中字符出现的个数如:helloworld\n# 方法零:\nh = 'helloworld'\nfor i in list(set(h)):\n print(i,h.count(i),end=' ')\n # print('{}{}'.format(i, h.count(i)), end=' ')\n# 方法一:\nh = 'helloworld'\nfor i in range(len(set(h))): #序列的索引来迭代,可以用range()和len()组合\n print(list(set(h))[i],h.count(list(set(h))[i]),end=' ')\n# h = 'helloworld'\n# seth = list(set(h))\n# for i in range(len(seth)): #序列的索引来迭代,可以用range()和len()组合\n# print(seth[i],h.count(seth[i]),end=' ')\n# 方法二:\n# str1='helloworld'\n# dic={}\n# for i in str1:\n# if dic.__contains__(i):\n# dic[i]= dic[i]+1\n# else:\n# dic[i]=1\n# print(dic)\n# 方法三:\n# str1='helloworld'\n# list1=list(str1)\n# set1=set(list1)\n# for i in set1:\n# print(i,list1.count(i))\n# # 5、实现把字符串str = \"duoceshi\"中任意字母变为大写(用input语句实现)\n# 改变一个\n# str1 = \"duoceshi\"\n# a = input('请输入要变为大写的字母')\n# print(str1.replace(a,a.upper()))\n# 改变任意个\n# str1 = \"duoceshi\"\n# a = input('请输入要变为大写的字母')\n# for i in a:\n# b = i.upper()\n# if i in str1:\n# str1 = str1.replace(i,b)\n# print(str1)\n# 6、求出1900 - 2017年的闰年?‘\n# 方法一,分别输出:\ns = ['世纪闰年'];p = ['普通闰年']\nfor i in range(1900,2018):\n if i%400 == 0:\n s.append(i)\n else:\n if i%4 == 0 and i%100 != 0:\n p.append(i)\nprint(s,'\\n',p)\n# 方法二,一起输出:\nl1 = [] #创建一个空列表\nfor i in range(1900,2018):\n if i%4 == 0 and i%100 != 0:\n l1.append(i)\n elif i%400 == 0:\n l1.append(i)\nprint(l1)\n\n# 普通闰年:能被4整除但不能被100整除的年份为普通闰年。(如2004年就是闰年,1999年不是闰年)\n# 世纪闰年:能被400整除的为世纪闰年。(如2000年是世纪闰年,1900年不是世纪闰年)\n# 7、分别打印100以内的所有偶数和奇数并存入不同的列表当中\nj = []; o = []\nfor i in range(1,101):\n if i%2==0:\n j.append(i)\n else:\n o.append(i)\nprint(j,'\\n',o)\n# 8、请写一段Python代码实现删除一个list = [1, 3, 6, 9, 1, 8]# 里面的重复元素\n# 方法一:\nlist6 = [1, 3, 6, 9, 1, 8]\nprint(set(list6))\n# 方法二:\nlist6 = [1, 3, 6, 9, 1, 8]\nl1 = []\nfor i in list6:\n if i in l1:\n pass #表示满足此条件不进行任何操作\n else:\n l1.append(i)\nprint(l1)\n# 9、将字符串类似:\"k:1|k3:2|k2:9|...|kn:m\", 处理成key:value或json格式, 比如\n# {\"k\": \"1\", \"k3\": \"2\"}\na = \"k:1|k3:2|k2:9\"\ndic = {}\na = a.split('|')\nfor i in a:\n # print(i)\n b = i.split(':')\n # print(b) #查看列表\n dic.setdefault(b[0],b[1]) #字典添加键值对\nprint(dic)\n# 10、把字符串user_controller转换为驼峰命名UserController\n# 方法一:\nname = 'user_controller'\nprint(name.title().replace('_',''))\n# 方法二:\nname = 'user_controller'\na = name.title()\nb = a.split('_') #以列表的形式返回\n# print(b) #查看列表\nprint(b[0]+b[1]) #字符串相加\n# 方法三:\nname = 'user_controller'\na = name.split('_')\nres = ''\nfor i in a:\n b = i.capitalize()\n res+=b\nprint(res)\n# print(\"你的年龄是多少:\");age = input()\n# print('第二个人的年龄是',age)\n# print('第一个人的年龄是',age,end='*')\n# print('第一个人的年龄是%s'%age,end=' ')\n\n# python中的函数\n# 内置函数(内建函数):安装好python后就直接可以使用的函数;print();del;pass\n# 第三方函数:需要通过python安装语法或者工具执行安装第三方库后才能使用的函数;如:使用pip install 安装selenium后才能使用的find_element_by_id()函数\n# 自定义函数:用户根据自己业务功能需要实现的场景,自己编写的函数\n\n# 定义函数:def\n# 函数的调用\n\n# 1在当前模块直接调用\n\n# 定义函数\ndef printme(str):\n # 打印任何传入的字符串\n print(str)\n return\n# 调用函数\nprintme(\"我要调用用户自定义函数!\")\nprintme(\"再次调用同一函数\")\n\n# from pythonProject1.py1.cs import *\n\n# def login():\n# user = input('请输入用户名:')\n# if user == 'admin':\n# password = input('请输入密码:')\n# if password == '123456':\n# return '账户余额为86153人民币'\n# else:\n# return ' 密码错误!!!'\n# else:\n# return '请重新输入用户名:'\n# def login():\n# user = input('请输入用户名:')\n# if user == 'admin':\n# password = input('请输入密码:')\n# if password == '123456':\n# return '登录成功'\n# else:\n# return ' 密码错误!!!'\n# else:\n# return '请重新输入用户名:'\n#\n# def select_amount():\n# value = login() # 函数的传递\n# if value == \"登录成功\":\n# print(\"您的余额为:¥8888.88\")\n# else:\n# print(\"登录失败!\")\n# select_amount()\n\n# 练习题\n# 11、冒泡排序\n# 给一组无规律的数据从大到小或从小到大进行排序如:list = [2, 6, 9, 10, 18, 15, 1]\nlistl = [2, 6, 9, 10, 18, 15, 1]\nfor i in range(len(listl)):\n for j in range(i+1,len(listl)):\n if listl[i]>listl[j]:\n # print(listl) #验证步骤\n listl[i],listl[j] = listl[j],listl[i] #实现位置互换\nprint(listl)\n# 12、分析以下数字的规律,0 1 1 2 3 5 8 13 21 34用Python语言编程实现输出\nj=1\ni=0\nwhile i <=34:\n print(i,end=' ')\n i,j=j,i+j\n# 13、先定义一个字典来存放用户名\n# 和密码如dic = {'admin': '123456', 'dcs46': '654321'}\n# 要求如下:\n# 1、从字典中获取用户完成登入,登入时判断用户是否存在\n# 存在直接登入\n# 2、如果输入的登入用户判断不存在字典,则调用注册方法,完成该用户的注册,注册成功后写入字典\n# dic = {'admin': '123456', 'dcs46': '654321'}\n# def login():\n# user_name = input('请输入用户名:')\n# if dic.__contains__(user_name):\n# pwd = input('请输入密码:')\n# if pwd == dic[user_name]:\n# print('登陆成功!')\n# else:\n# print('密码错误,请重新输入')\n# else:\n# print('用户名不存在,请注册后再登陆')\n# register()\n# def register():\n# new_user_name = input('请输入要注册的用户名:')\n# if dic.__contains__(new_user_name):\n# print('用户名已存在,请重新输入')\n# else:\n# new_pwd = input('请输入同户名密码:')\n# new_pwd1 = input('请再次输入用户密码:')\n# if new_pwd==new_pwd1:\n# print('注册成功!')\n# dic.setdefault(new_user_name,new_pwd)\n# print(dic)\n# else:\n# print('两次密码不匹配,请重新输入')\n# login()\n# 14、用字符串aabbcdbaaabc,用你熟悉的语言实现去除\"ab\"子串\nprint()\n# 方法一\n# ab=input('请输入要去除的字符串')\nstrl='aabbcdbaaabc'\nab = 'ab'\nwhile ab in strl:\n strl = strl.replace(ab, '')\nprint(strl)\n# 方法二\n# def del_ab(str):\n# s = str.replace('ab','')\n# if s.count('ab')>0:\n# del_ab(s) #递归:在函数的内部调用函数本身\n# else:\n# print(s)\n# del_ab(str)\n\n# python中常见的内置函数\n# 对字符串的格式化输出\n# print('{}{}{}'.format(tr1,str2,a))\n# 索引位输出\n# print('{0} is number one'.format())\n# print(\"{0}{1}{0} is number one\".format(str2,str1))\nlist6=['姓名','男','18']\nprint('姓名:{0[0]} 性别:{0[1]} 年龄:{0[2]}'.format(list6))\nprint(list4)\n\n#zip()函数 通常用于列表合并转换为字典的键和值\n# 将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的对象\nlist7 = zip(list6,list4)\nprint(list7)\ndic = dict(list7)\nprint(dic)\n\n# open() 作用;用于操作Window系统内的TXT文件\n# 1.创建一个用于打开文件的对象\n# o = open(file=r'D:\\Users\\Administrator\\Desktop\\文档.txt',mode='r',encoding='utf-8') #utf-8或者gbk\n# all = o.read() #读取全部内容\n# all = o.readline() #默认读取文件第一行内容\n# all = o.readlines() #以列表的形式读取\n# print(all)\n# 写的模式 w write\n# o = open(file=r'D:\\Users\\Administrator\\Desktop\\文档.txt',mode='w',encoding='utf-8')\n# o.write('hello \\nword')\n# o.close() #关闭文件链接对象,使写入的内容能够直接执行,写入内部文件,而不是停留在系统缓存中:同时还起到释放资源的目的\n# 追加模式 a append\n# o = open(file=r'D:\\Users\\Administrator\\Desktop\\文档.txt',mode='a,encoding='utf-8')\n# open()函数的扩展用法\n# with open(file=)as f:\n# f.write('\\n写入内容')\n# isinstance() 作用:用来判断数据结构的类型\n# print(type(list7))\n# print(isinstance(list7,zip))\n\n# 15、题目:传入一个json串, 返回一个字典, 字典只取出json最底层的数据, 中间如果有字典格式的字符串也需要进行处\n# 理, 请以下面的数据为例, 请用递归的方法实现。\n# 数据输入json = {\"a\": \"aa\", \"b\": [{\"c\": \"cc\", \"d\": \"dd\"}, {\"f\": {\"e\": \"ee\"}}]}输出:dic = {'a': 'aa', 'c': 'cc', 'e': 'ee', 'd': 'dd'}\njson = {\"a\": \"aa\", \"b\": [{\"c\": \"cc\", \"d\": \"dd\"}, {\"f\": {\"e\": \"ee\"}}]}\ndic={}\ndef read_json(json):\n for k,v in json.items():\n if isinstance(v,list):\n for i in v:\n read_json(i)\n elif isinstance(v,dict):\n read_json(v)\n else:\n dic.setdefault(k,v)\n return dic\nprint(read_json(json))\n# 16、水仙花数:一个三位数,其按位立方之和等于该数本身,该数称为水仙花数。求出100 - 1000\n# 之间的水仙花数\n# (其实,水仙花数是“自幂数”中的一种;自幂数:一个n位数,其按位数字的n次方之和,等于该数本身。\ndef sxh():\n for i in range(100, 1000):\n x = i // 100\n y = i % 100 // 10\n z = i % 10\n if i == x**3 + y**3 + z**3:\n print(i, end=' ')\nsxh()\n# 17、用递归的方法求出n的阶乘?4的阶乘结果为?\n# n!=1×2×3×...×(n-1)×n。阶乘亦可以递归方式定义:0!=1,n!=(n-1)!×n\nprint()\ndef jc(n):\n if n==0 or n==1:\n return (1)\n else:\n return (n*jc(n-1))\nprint(jc(4))\n# 18、有如下url地址, 要求实现截取出\"?\"号后面的参数, 并将参数以\"key value\"的键值形式保存起来, 并最终通过#get(key)的方式取出对应的value值。\n# url地址如下:\n#http://ip:port/extername/get_account_trade_record.json?#page_size=20&page_index=1&user_id=203317&trade_type=0\"\nstr1='http://ip:port/extername/get_account_trade_record.json?#page_size=20&page_index=1&user_id=203317&trade_type=0'\ndef read_url(str):\n dic = {}\n for i in str1.split(\"?\")[1].strip('#').split('&'):\n dic.setdefault(i.split('=')[0],i.split('=')[1])\n # c=i.split('=')\n # dic.setdefault(c[0],c[1])\n print(dic)\nread_url(str1)\n\n# python模块内可以包含:变量、常量、函数(方法)、类\n# 1.直接导入整个模块\n# import time #导入time这个模块\n# print(time.time())\n# 2.导入模块内的单个方法\n# from time import time #从time模块内导入time方法\n# print(time())\n# 3.导入模块内的多个方法\n# from time import time,strftime,asctime #从time模块内导入time、strftime、asctime三个方法\n# #导入多个方法之间,用英文逗号间隔\n# 4.导入模块内的所有方法、变量等\n# from time import * #从time模块内导入的所有方法、变量等 *代表所有\n# from pythonProject1.py1.xxx.py #找不到往上层找\n# 5.同时导入多个模块\n# import time,string,re,random #同时导入多个模块\n\n# python中常见的模块\n# 1.time模块\nimport time\n# time模块中常见的方法:\nprint(time.time()) #1970到现在经过的秒数 time()\nprint(time.ctime()) #固定格式的当前时间 ctime()\n# time.sleep(3) #休眠-强制等待,使系统进程停止多少时间-单位秒 sleep()\nprint(time.asctime()) #转换为asc码显示当前时间 asctime()\nprint(time.strftime('%Y-%m-%d-%H-%M-%S')) #时间格式化 strftime()\n\n# 2.random模块\n# import random\n# 生成随机浮点数、整数、字符串,甚至帮助你随机选择列表序列中的一个元素,打乱一组数据等\n# random模块中常见的方法:\n# random.random() #该方法是生成0-1之间的浮点数,但是能取到0,但是取不到1\n# random.randint(x,y) #该方法生成指定范围内整数,包括开始和结束值\n# python中random模块\n# random.randrange(x,y,step) #生成指定范围内的奇数或偶数,不包括结束值 (开始值,范围,步长)\n# random.randrange(1,10,2)\n# random.randrange(0,10,2)\n\n# random.sample(seq,n) #从序列seq中选择n个随机且独立的元素\n# random.sample(seq,3) #返回的元素以列表形式存储\n\n# random.choice(list1) #从序列中随机选一个元素生成随机字符\n# print(random.choice(list1))\n# random.shuffle(list1) #洗牌(随机数列)\n# print(list1)\n\n# hashlib.md5() #MD5加密\n# import hashlib\n# md5 = hashlib.md5()#创建一个hashlib模块内的MD5方法的对象,赋值给到一个为md5的变量\n# str1 = '123456'\n# md5.update(str(str1).encode('utf-8'))\n# print(md5.hexdigest())\n\n# import string,random\n# print(string.digits)#输出0-9的数字\n# print(string.ascii_letters)#输出26个大小写字母\n# print(string.ascii_uppercase)#输出26个大写字母\n# print(string.ascii_lowercase)#输出26个小写字母\n\n# 1、使用random模块随机生成手机号码、自己定义手机号码开头的前三位\n# 方法一\n# def h():\n# str3 = input('请输入号码前三位数:')\n# import random\n# for i in range(8):\n# str3+=str(random.randint(0,9))\n# print(str3)\n# h()\n# 方法二\n# def h():\n# import random\n# str3 = input('请输入号码前三位数:')\n# str3 += ''.join(random.sample('0123456789',8))\n# print(str3)\n# h()\n# 方法三\n# str3 = '0123456789'\n# def ra_phone(str3):\n# a = '123'\n# import random\n# for i in range(8):\n# b =random.choice(str3)\n# a+=b\n# print(a)\n# ra_phone(str3)\n# 方法四\n# str3 = '0123456789'\n# def ra_phone(str3):\n# a = '123'\n# import random\n# l = random.sample(str3, 8)\n# for i in l:\n# a += i\n# print(a)\n# ra_phone(str3)\n# 2、用random模块随机生成6位数验证码\nimport string,random\nstr2 = string.digits+string.ascii_letters\n# print(str2)\ndef verify(n):\n res = ''\n for i in range(6):\n res+=random.choice(n)\n print(res)\nverify(str2)\n# 3、通过md5加密算法把随机生成的6位数验证码进行加密返回16进制的字符串\nimport hashlib\nstr1 = string.digits+string.ascii_letters\ndef verify(str):\n res = ''\n for i in range(6):\n res += random.choice(str)\n return\n\ndef v_md5(str2):\n md5 = hashlib.md5()\n md5.update(str(str2).encode('utf-8'))\n print(md5.hexdigest())\nv_md5(verify(strl))\n\n# 3.os模块\n# import os\n# os模块提供了多数操作系统的功能接口函数。当os模块被导入后,它会自适\n# 应于不同的操作系统平台,根据不同的平台进行相应的操作,在python编\n# 程时,经常和文件、目录打交道,所以离不了os模块。\n# os模块中常见的方法:\n# os.getcwd()获取当前执行命令所在目录\n# print(os.getcwd())\n# os.path.isfile()判断是否文件\n# path = 'D:\\Program Files\\python\\python.txt'\n# print(os.path.isfile(path1))\n# python中os模块\n# os.path.isdir() #判断是否是目录\n# os.path.exists() #判断文件或目录是否存在\n# os.listdir(dirname) #列出指定目录下的目录或文件\n# path1 = 'D:\\Program Files\\python\\python5'\n# print(os.listdir(path2))\n# os.path.split(name) #分割文件名与目录\n# print(os.path.split(path2))\n# os.path.join(path,name) #连接目录与文件名或目录\n# print(os.path.join(path1,'python.txt'))\n\n# os.mkdir(dir) #创建一个目录\n# os.rename(old,new) #更改目录名称\n# print(path2)\n# os.rename(path1,'python2')\n\n# 4.re模块\n# 实现一个编译查找,一般在日志处理或者文件处理时用的比较多,正则表达式主要用于模式匹配和替换工作。\n# 实现一个编译查找,一般在日志处理或者文件处理时用的比较多\n# 正则表达式主要用于模式匹配和替换工作。\n# 预定义字符集匹配:\n# \\d:数字0-9\n# \\D:非数字\n# \\s:空白字符\n# \\n:换行符\n# \\r:回车符\n# python中re正则模块\n# re模块数量词匹配:\n# 符号^:表示的匹配字符以什么开头\n# 符号$:表示的匹配字符以什么结尾\n# 符号*:匹配*前面的字符0次或n次\n# eg:ab* 能匹配a 匹配ab 匹配abb\n# 符号+:匹配+前面的字符1次或n次\n# 符号?:匹配?前面的字符0次或1次\n# 符号{m}:匹配前一个字符m次\n# 符号{m,n}:匹配前一个字符m到n次(包括n次),m或n可以省略,mn都是\n# 正整数\n# re模块相关函数\n# 1、match\n# 从第一个字符开始匹配,如果第一个字符不是要匹配的类型、则匹配失败并报错\n# 注意:如果规则带了'+',则匹配1次或者多次,无'+'只匹配一次\n# 2、search\n# 从第一个字符开始查找、一找到就返回第一个字符串,找到就不往下找,找不到则报错\n# 3、findall\n# 从第一个字符开始查找,找到全部相关匹配为止,找不到返回一个列表[]\n# 4、compile\n# 编译模式生成对象,找到全部相关匹配为止,找不到返回一个列表[]\n\n# Python中常见的模块:\n# time模块\n# random模块\n# hashlib模块\n# os模块\n# re模块\n# string模块\n# xlrd模块\n# json模块\n# sys模块\n\n# 19、存在一个文件, 文件名test.txt\n# 内容如下:\n# 01 success\n# 02 fail\n# 03 fail\n# 04 success\n# ....请使用Python语言编写程序实现统计该文件中\n# 有多少个success\n# 多少个fail的功能?\n# path=r'D:\\Program Files\\python\\test.txt'\n# dic={}\n# def count(path):\n# o = open(file=path, mode='r', encoding='utf-8')\n# list1 = o.readlines()\n# print(list1)\n# for i in list1:\n# a=i.strip('\\n').split(' ')[1]\n# print(a)\n# if dic.__contains__(a):\n# dic[a]=dic[a]+1\n# else:\n# dic[a]=1\n# print(dic)\n# count(path)\ndef read_file1(file1):\n dic = {}\n file1 = open(file=r'D:\\Program Files\\python\\test.txt', mode='r', encoding='utf-8')\n all = file1.readlines()\n print(all)\n for i in all:\n a = i.strip('\\n')\n # print(a)\n b = a.split(' ')[1]\n # print(b)\n if b in dic:\n dic[b]=dic[b]+1\n else:\n dic[b]=1\n print(dic)\n print('出现次数\"success\"的次数为:%d次'%dic['success'])\n print('出现\"fail\"次数为:%d次'%dic['fail'])\nread_file1(r'D:\\Program Files\\python\\test.txt')\n\n# 20、一个txt文件中已知数据为:\n# C4D\n# C4D/maya\n# C4D\n# C4D/su\n# C4D/max/AE\n# 统计每个字段出现的次数,比如C4D,maya, 请用最熟悉的语言或者伪代码实现该需求\ndef read_file1(file1):\n dic = {}\n file1 = open(file=r'D:\\Program Files\\python\\test.txt', mode='r', encoding='utf-8')\n a = file1.readlines()\n print(a)\n for i in a:\n # print(i)\n b = i.strip('\\n').split('/')\n # print(b)\n for j in b:\n if j in dic:\n dic[j]=dic[j]+1\n else:\n dic[j]=1\n print(dic)\nread_file1(r'D:\\Program Files\\python\\test.txt')\npath=r'D:\\Program Files\\python\\test.txt'\ndef a(path):\n dic={}\n o=open(file=path,mode='r',encoding='utf-8')\n# 21、统计一个文件的行数,以e:\\\\write.txt文件为例(内容自己建)\npath=r'D:\\Program Files\\python\\test.txt'\ndef count_len(path):\n o=open(file=path,mode='r',encoding='utf-8')\n all=o.readlines()\n print(len(all))\ncount_len(path)\n\n# 22、登录和注册\n# 要求如下:\n# 1、调用本地文件(user.txt)完成登录,如果存在则调用本地文件中用户\n# 和对应的密码进行登录,用户在本地文件中的格式如:admin:123456 xiao:123123\n# 2、登录用户不存在则调注册函数,将注册好的用户写入本地user.txt文件中,写入不能覆盖已有用户。\n# 3、用户名的长度大于等于6位,小于等于8位,用户密码大于等于6位小于等于8位。\n# path=r'D:\\Program Files\\python\\test.txt'\n# def read_txt(path):\n# dict1 = {}\n# o=open(file=path,mode='r',encoding='utf-8')\n# all=o.readlines()\n# print(all)\n# a=all[0].split(' ')\n# for i in a:\n# b=i.split(':')\n# dict1.setdefault(b[0],b[1])\n# return dict1\n# def login():\n# dict2=read_txt(path)\n# user_name = input(\"请输入您的用户名:\")\n# if dict2.__contains__(user_name):\n# user_pwd = input(\"请输入您的用户密码:\")\n# if user_pwd == dict2[user_name]:\n# print(\"登录成功\")\n# else:\n# print(\"密码错误,请重新输入\")\n# else:\n# print(\"用户不存在,请注册后登录!\")\n# b()\n# def b():\n# dict2=read_txt(path)\n# new_user_name = input(\"请输入你要注册的用户名:\")\n# if not dict2.__contains__(new_user_name):\n# if len(new_user_name)>=6 and len(new_user_name)<=8:\n# new_user_pwd = input(\"请输入您注册的用户密码:\")\n# new_user_pwd1 = input(\"请再次输入您注册的用户密码:\")\n# if new_user_pwd == new_user_pwd1:\n# if len(new_user_pwd)>=6 and len(new_user_pwd)<=8:\n# print(\"注册成功!\")\n# o=open(file=path,mode='a',encoding='utf-8')\n# all=o.write('\\n'+new_user_pwd1+':'+new_user_pwd)\n# o.close()\n# # write_user(new_user_name,new_user_pwd)\n# else:\n# print('密码长度非法!')\n# else:\n# print('两次密码输入不一致!')\n# else:\n# print('注册的用户名长度非法!')\n# else:\n# print('用户名已存在,请重新输入!')\n# login()\n# 23、使用os模块写一个递归调用打印出e:\\\\home下的所有文件名的绝对路径?\nimport os\npath = r'D:\\Program Files\\python\\python'\ndef abs_path(path):\n a = os.listdir(path)\n for i in a:\n b = os.path.join(path,i)\n if os.path.isdir(b):\n abs_path(b)\n else:\n print(b)\nabs_path(path)\n\n# 24、用正则方法实现统计e:\\\\python文件中指定字符如\"python\"的行数?(文件中的python字符)\nimport re\npath = r'D:\\Program Files\\python\\test.txt'\ndef count_py(path):\n res = 0\n py_re = re.compile('python')\n o = open(file=path,mode='r',encoding='utf-8')\n all = o.readlines()\n for i in all:\n a = i.strip('\\n')\n b = py_re.findall(a)\n if len(b)>0:\n res+=1\n print(res)\ncount_py(path)\n# 25、使用正则完成市面上手机规则的编写?(手机号:11位)\nimport random,re,string\nstrl = string.digits\ndef ra_phone(strl):\n while True: #当条件成立是,无限循环\n p_re = re.compile('^[1][3456789]\\d{9}')\n res = ''\n for i in range(11):\n a = random.choice(strl)\n res += a\n b = p_re.findall(res)\n if len(b) > 0:\n print(b)\n break #当打印出结果后,则停止整个循环\nra_phone(strl)\n# 26、用正则实现写一段代码统计e:\\\\log文件中error和warning单词出现的次数分别为几次?\n# 文件内容如下:\n# warningabchelloerror\n# warningerror\n# warning\n# errorwarningwarning\npath = r'D:\\Program Files\\python\\test.txt'\ndef count(file):\n dic = {}\n o = open(file=file,mode='r',encoding='utf-8')\n all = o.read()\n a = all.split('\\n')\n w_re = re.compile('warning')\n e_re = re.compile('error')\n for i in a:\n # print(i)\n b = w_re.findall(i)\n c = e_re.findall(i)\n for j in b:\n if dic.__contains__(j):\n dic[j] = dic[j]+1\n else:\n dic[j] = 1\n for k in c:\n if dic.__contains__(k):\n dic[k] = dic[k]+1\n else:\n dic[k] = 1\n print(dic)\ncount(path)\n# #27、用字符串aabbcdbaaabc,用你熟悉的语言实现去除\"ab\"子串(用正则表达式来实现)\nimport re\nstr1 = 'aabbcdbaaabc'\ndef del_ab(str):\n a = str.replace('ab','')\n ab_re = re.compile('ab')\n b = ab_re.findall(a)\n # print()\n if len(b)>0:\n del_ab(a)\n else:\n print(a)\ndel_ab(str1)\n\n# python中的类class\n# 类的分类:\n# 新式类 class 类名(object):\n# 经典类 class 类名:\nclass people(object): #定义一个名为peopie的新式类,且继承了object这个基类\n #objeck是所有类的基类也即是类的起源\n head = 1 #head是类变量\n def __init__(self,name): #构造函数,用来初始化整个类:在定义类的时候可以不写,如果不写则python\n # 调用默认的构造函数\n self.name = name #name是实例变量\n\n def func(self): #实例化话方法\n print('打篮球')\n\n# 实例化对象\np = people('小明') #为people这个类创建一个名为p的实例化对象\np.func() #用p这个实例化对象调用people类中的func1这个方法\n# 类中的变量和方法\nclass Peole:\n head = 1\n def __init__(self,name):\n self.name = name\n\n def func1(self,name1):\n print(name,'打篮球')\n\np = Peole('xiaoming')\n# p.func1('小张')\n# 变量的调用\n# print(Peole(p),head) #类名的调用类变量,必须传入一个对象\n# print(p.head) #对象调用类的变量\n# print(Peole(p).name) #类名调用实际变量\n# print(p.name) #对象调用实例变量\n\n# 类的三大特性\n# 封装:把函数写在类的内部\nclass people:\n def __init__(self,name):\n pass\n\n def __init__(self,name):\n self.__name = name #__name 叫做私有变量\n\n def func2(self):\n print(self.__name)\np = people('zhangsan')\np.func2()\n\nclass Func:\n def __init__(self):\n pass\n def func1(self):\n for i in range(1,10):\n for j in range(1,i+1):\n print(j,'x',i,'=',j*i,end=' ')\n print(' ')\nf = Func()\nf.func1()\n\n# 继承\nclass Father(object):\n def ___init__(self):\n pass\n def drink(self):\n print('喜欢喝酒')\n\n# class Sun(Father): #定义一个类,同时继承Father这个类\n# 继承构造函数\n# 方法一、\n# def __init__(self):\n# Father.__init__(self):\n# 方法二、\n# def __init__(self):\n# super(Father,delf).__init__(self)\n\n# def car(self):\n# print('喜欢跑车')\n# def disco(self):\n# print('喜欢蹦迪')\n\n# s = Sun()\n# s.car()\n\n#\n# # 多态\n# class Animal(object):\n# def __init__(self):\n# pass\n#\n# def func(self):\n# print('这是一个动物类')\n#\n# class Dog(Animal):\n# def __init__(self):\n# Animal.__init__(seif)\n#\n# def func(self):\n# print('这是一只哈士奇')\n#\n# def eat(self,name):\n# print(name+'骨头')\n#\n# class Dog_1(Animal):\n# def __init__(self):\n# Animal.__init__(self)\n#\n# def func2(self):\n# print('这是一只阿拉斯加狗')\n#\n# def eat(self):\n# print('吃狗粮')\n\n# selenium UI自动化\n# 打开浏览器\n# 1.创建一个打开浏览器的驱动对象\nfrom selenium import webdriver\ndriver = webdriver.Chrome() #实际上打开一个空浏览器\n# 2.打开一个网页地址\ndriver.get('http://www.baidu.com') #通过使用driver对象调用get方法打开百度网页\n# 在同一个浏览器内同时打开两个不同的网页地址\n# w = 'window.open(\"http://www.baidu.com\")'\n# driver.execute_script(w)\n# 在打开的窗口内,重新打开一个新的地址,覆盖原有打开的网页\n# driver.get('http://www.qq.com')\n\n# UI自动化之元素定位\n# 1.id定位\n# ele = driver.find_element_by_id('kw') #使用driver对象调用find_element_by_id()放定位id值为kw的元素,并赋值变量给ele\n# ele.send_keys('selenium') #使用定位到的元素变量值调用send_key()方法,在输入框内输入selenium\n# driver.find_element_by_id('kw').send_keys('selenim')\n# 2.name定位\n# 使用driver对象调用find_element_by_name()放定位name元素为wd的元素,\n# 再调用send_key()方法在该元素位置输入selenium\n# driver.find_element_by_name('wd').send_keys('selennium')\n# 3.class定位\n# driver.find_element_by_class_name('s_ipt').send_keys('selenium')\n# 4.xpath定位\n#//*[@id='kw'] //表示相对路径,*号表示任意标签[]内部的内容表示元素定位中可用的标识符\n# driver.find_element_by_xpath('//*[@id=\"kw\"]').send_keys('selenim') #xpath中的id定位\n# driver.find_element_by_xpath('//*[@name=\"wd\"]').send_keys('selenim') #xpath中的name定位\n# driver.find_element_by_xpath('//*[@class=\"s_ipt\"]').send_keys('selenim') #xpath中的class定位\n# driver.find_element_by_xpath('//input[@autocomplete=\"off\"]').send_keys('selenim')\n# driver.find_element_by_xpath('//*[@id=\"kw\" and @name=\"wd\"]').send_keys('selenim') #组合定位\n# driver.find_element_by_xpath('//*[@id=\"form\"]/span[1]/input[1]').send_keys('selenim') #父子定位\n\n# 5.css定位\n# driver.find_element_by_css_selector(\"#kw\").send_keys('selenim') #css中的id定位\n# driver.find_element_by_css_selector('.s_ipt').send_keys('selenim') #css中的class定位\n# driver.find_element_by_css_selector('[id=\"kw\"][name=\"wd\"]').send_keys('selenim') #css中的组合定位\n# driver.find_element_by_css_selector('form>span>input').send_keys('selenim') #父子定位\n\n# 6.link定位\n# driver.find_element_by_link_text('新闻').click() #链接文本值定位\n\n# 7.partial_link定位:模糊匹配定位\n# driver.find_element_by_partial_link_text('hao').click()\n\n# 8.tag_name定位:标签名定位\n# eles = driver.find_elements_by_tag_name('input') #根据标签名找出\n# for i in eles:\n# if i.get_attribute('id')=='kw':\n# i.send_keys('selenim')\n# 9.执行javaScript脚本语法\n# js = 'document.getElementById(\"kw\").value=\"selenium\"'\n# driver.execute_script(js)\n\n# 页面常见控件\n# 1.输入框和按钮的操作\n# 如:实现论坛的登陆功能?\nfrom selenium import webdriver\nfrom time import sleep\n\ndriver = webdriver.Chrome()\ndriver.get('http://192.168.30.129/bbs')\nsleep(1) #休眠(强制等待):作用是使用系统进程强制等待多少秒\ndriver.find_element_by_id('ls_username').send_keys('admin')\nsleep(0)\ndriver.find_element_by_id('ls_password').send_keys('123456')\nsleep(0)\ndriver.find_element_by_class_name('pn').click()\n\n# pyinstaller -F cs.py #生成exe文件\n\n# 按钮、链接、隐藏框\n# 按钮\n# from selenium import webdriver\n# from time import sleep\n# sleep(1)\ndriver.get('http://www.baidu.com')\n# sleep(1)\n# driver.find_element_by_id('kw').send_keys('selenium')\n# sleep(1)\n# driver.find_element_by_id('su').click()\n\n# 链接\n# driver.find_element_by_link_text('hao123').click()\n\n# 隐藏框\n# from selenium.webdriver.common.action_chains import ActionChains\n# ele=driver.find_element_by_name('tj_briicon')\n# ActionChains(driver).move_to_element(ele).perform()\ndriver.find_element_by_class_name('pn').click()\n\n# python自动化中的三种等待方式\n# 1.强制等待:就是time模块内的sleep方法\n# sleep(20)\n# 2.隐式等待:selenium库内的webdriver模块内的implicitly_wait()方法。\n# driver.implicitly_wait(20)\n# 3.显式等待:针对页面的单个元素,明确等待某个元素在规定的时间内是否加载完成\n# from selenium.webdriver.common.by import By\n# from selenium.webdriver.support import expected_conditions as EC\n# from selenium.webdriver.support.wait import WebDriverWait\n# ele=driver.find_element_by_id('kw')\n# WebDriverWait(driver,10,0.5).until(EC.presence_of_element_located((By.ID,\"kw\")))\n\n# python自动化中的断言方法\n# from selenium import webdriver\n# from time import sleep\n# driver = webdriver.Chrome()\n# driver.get('http://www.baidu.com')\n# 1.获取页面元素的文本值\n# value = driver.find_element_by_id('s-usersetting-top').text\n# print(value)\n# if value =='设置':\n# print('页面打开成功')\n# else:\n# print('页面打开失败')\n# 2.获取页面元素的属性值\n# ele = driver.find_element_by_id('s-usersetting-top')\n# value = ele.get_attribute('name')\n# print(value)\n# assert value == 'tj_settingicon' #asser是python语言自带的断言关键字,当断言成功后,没有任何信息输出\n# #只有断言失败后,才会抛出异常信息\n# 3.获取页面的title\n# title = driver.title\n# # print(title)\n# assert title =='百度一下,你就知道'\n\n# UI页面的常见操作\nfrom selenium import webdriver\nfrom time import sleep\ndriver = webdriver.Chrome()\ndriver.get('http://www.baidu.com')\ndriver.implicitly_wait(20) #隐式等待,最大等待时间20秒\ndriver.maximize_window() #窗口最大化\nsleep(2)\ndriver.refresh() #刷新页面\nsleep(1)\ndriver.get('http://qq.com')\nsleep(1)\ndriver.back() #返回上一步\nsleep(1)\nwin = \"window.open('http://qq.com')\"\ndriver.execute_script(win)\nsleep(2)\n# driver.close() #关闭当前窗口\nsleep(1)\nsize = driver.get_window_size()\nprint(size) #{'width':1552,'height':840}\nsleep(1)\ndriver.quit() #关闭所有窗口\n# 窗口切换\ndriver = webdriver.Chrome()\ndriver.get('http://www.baidu.com')\ndriver.implicitly_wait(20)\ndriver.maximize_window()\nwin = 'window.open(\"http://qq.com\")'\ndriver.execute_script(win)\ntitle = driver.title\nprint(title) #百度一下,你就知道\n# hand = driver.current_window_handle #获取当前窗口的句柄\n# print(handle)\nhandles = driver.window_handles #获取所有窗口的句柄\n# print(handles)\ndriver.switch_to.window(handles[-1]) #切换窗口到最新打开的页面\ntitle = driver.title\nprint(title)\n# driver.find_element_by_xpath('/html/body/header/div[1]/div/div/u1/li[2]/a').click()\n\n# 下拉框的定位操作\nfrom selenium.webdriver.support.select import Select\ndriver = webdriver.Chrome()\ndriver.get('https://www.ctrip.com/')\ndriver.implicitly_wait(20)\ndriver.maximize_window()\nele = driver.find_element_by_id('J_roomCountList')\nsleep(1)\nSelect(ele).select_by_index(1) #通过下拉框的索引位选择\nsleep(1)\nSelect(ele).select_by_value('4') #通过option标签的value值选择\nsleep(1)\nSelect(ele).select_by_visible_text('7间') #通过option标签的文本值选择\n\n# 需求:完成携程酒店预订页面的所有选项填写\nfrom selenium import webdriver\nfrom selenium.webdriver.support.select import Select\ndriver = webdriver.Chrome()\nfrom time import sleep\n# driver.get('https://www.ctrip.com/')\n# driver.implicitly_wait(20)\n# driver.find_element_by_id('HD_CityName').send_keys('上海') #目的地\n# driver.find_element_by_id('HD_CheckIn').clear() # HD_CheckIn 入住时间\n# driver.find_element_by_id('HD_CheckIn').send_keys('2021-7-15')\n# driver.find_element_by_id('HD_CheckOut').clear() # HD_CheckOnt 退房时间\n# driver.find_element_by_id('HD_CheckOut').send_keys('2021-7-25')\n# fj = driver.find_element_by_id('J_roomCountList') #房间数\n# Select(fj).select_by_index(2)\n# jb = driver.find_element_by_id('searchHotelLevelSelect') #酒店级别\n# Select(jb).select_by_index(2)\n# zk = driver.find_element_by_id('J_RoomGuestInfoTxt').click() #住客数\n# driver.find_element_by_class_name('fl_wrap_close').click()\n# cr = driver.find_element_by_class_name('icon_numplus').click()\n# h = driver.find_element_by_id('J_RoomGuestInfoBtnOK').click()\n# gj = driver.find_element_by_id('HD_TxtKeyword').send_keys('静安区')\n# l = driver.find_element_by_id('hotelSwitch').click()\n# j = driver.find_element_by_id('HD_Btn').click()\n# driver.quit()\n\n# 弹框处理\n# driver.get('http://127.0.0.1:8848/xc/enter.html')\n# 1.alert弹框\n# alert = driver.switch_to.alert #切换至页面的alert弹框内\n# alert.accept() #点击确认\n# alert.dismiss() #点击取消\n\n# 2.确认型弹框\n# driver.get(http://127.0.0.1:8848/xc/enter.html)\n# driver.find_element_by_class_name('alert').click() #点击按钮,显示弹框\n# alert = driver.switch_to.alert\n# alert.accept()\n# alert.dismiss()\n\n# 3.输入型弹框\n# driver.get(http://127.0.0.1:8848/xc/prompt.html)\n# driver.find_element_by_xpath('/html/body/div[2]/input').click()\n# driver.implicitly_wait(20)\n# alert = driver.switch_to.alert\n# alert.send_keys('djsfj') #在让款的输入框输入内容\n# alert.accept()\n\n# iframe框处理--京东QQ登陆\n# driver.get('http://www.jd.com')\n# driver.find_element_by_class_name('link-login').click()\n# driver.find_element_by_class_name('QQ-icon').click()\n# driver.switch_to.frame('ptlogin_iframe')\n# driver.find_element_by_class_name('img_out_focus').click()\n\n\n# driver.get('http://mail.163.com')\n# driver.implicitly_wait(20)\n# ele = driver.find_element_by_xpath('//div[@id=\"loginDiv\"]/iframe')\n# driver.switch_to.frame(ele)\n# # driver.switch_to.frame(driver.find_element_by_xpath('//div[@id=\"loginDiv\"]/iframe'))\n# driver.find_element_by_name('email').send_keys('ojbkxc')\n\n# import pyautogui #模拟键盘鼠标\n# pyautogui.typewrite(message='ojbkxc',interval=0.1)\n# pyautogui.press('tab')\n# pyautogui.typewrite(message='qq1657642978',interval=0.1)\n# pyautogui.press('tab')\n# pyautogui.press('enter')\n# pyautogui.press('enter')\n\n# 页面滑动\n# 方法一:执行windows脚本\n# 向下滑动\nwin = 'windows.scrollTo(0,500)' #括号内的数字0表示页面的顶端,后面表示要滑动的像素位置\ndriver.execute_script(win)\n# 向上滑动\nwin1 = 'windows.scollTo(0,0)'\ndriver.execute_script(win1)\n# 方法二:执行JavaScript脚本\n# 向下滑动\njs = 'var a = ducument.ducumentElement.dcrollTop=2000'\ndriver.execute_script(js)\n# 向上滑动\njs = 'var a = ducument.ducumentElement.dcrollTop=0'\ndriver.execute_script(js)\n\n# 控制页面缓慢滑动\n\n# import pyautogui\n# pyautogui.keyDown('winleft')\n# pyautogui.keyDown('ctrl')\n# pyautogui.keyDown('c')\n\n# 模拟键盘鼠标操作\nfrom time import sleep\nfrom selenium import webdriver\n# from selenium.webdriver.common.keys import Keys\n# driver = webdriver.Chrome()\n# driver.get('http://www.baidu.com')\n# driver.implicitly_wait(20)\n# driver.maximize_window()\n# driver.find_element_by_id('kw').send_keys('123456')\n# sleep(1)\n# driver.find_element_by_id('kw').send_keys(Keys.BACKSPACE) #输出单个字符\n# sleep(1)\n# driver.find_element_by_id('kw').send_keys(Keys.CONTROL,'a') #全选\n# sleep(1)\n# driver.find_element_by_id('kw').send_keys(Keys.CONTROL,'x') #剪切\n# sleep(1)\n# driver.find_element_by_id('kw').send_keys(Keys.CONTROL,'v') #粘贴\n# sleep(1)\n# driver.find_element_by_id('kw').send_keys(Keys.CONTROL,'c') #复制\n# sleep(1)\n# driver.find_element_by_id('kw').send_keys(Keys.ENTER) #点击enter键\n\n# 把线性脚本封装到类中\n# from selenium.webdriver.common.keys import Keys\n# class Baidu_input:\n# def __init__(self,url):\n# self.driver = webdriver.Chrome()\n# self.driver.get(url)\n# self.driver.implicitly_wait(20)\n# self.driver.maximize_window()\n#\n# def bd_input(self,locator,value):\n# self.driver.find_element_by_id(locator).send_keys(value)\n# sleep(1)\n# def ba_clear(self,locator):\n# self.driver.find_element_by_id(locator).clear()\n# sleep(1)\n#\n# if __name__ == '__main__':\n# b = Baidu_input('http://www.baidu.com')\n# b.bd_input('kw','selenuim')\n# b.bd_input(\"kw\",Keys.BACK_SPACE)\n# b.bd_input(\"kw\",(Keys.CONTROL,'a'))\n# b.bd_input(\"kw\",(Keys.CONTROL,'x'))\n# b.bd_input(\"kw\",(Keys.CONTROL,'v'))\n# b.bd_input(\"kw\",(Keys.CONTROL,'a'))\n# b.bd_input(\"kw\",(Keys.CONTROL,'c'))\n# b.bd_clear(\"kw\")\n# b.bd_input(\"kw\",(Keys.CONTROL,'v'))\n# b.bd_input(\"kw\",Keys.ENTER)\n\n# 单元测试框架\n# python中可以用来做单元测试的框架:unittest pytest(复杂)\n# java中用来做单元测试的框架:unittet\n\nimport unittest\nclass Unit_test(unittest.TestCase):\n\n @classmethod #类方法的装饰器\n def setUpClass(cls) -> None:\n print('类的方法开始')\n\n @classmethod\n def tearDownClass(cls) -> None:\n print('类方法结束')\n\n def setUp(self) -> None:\n print('方法开始')\n\n def tearDown(self) -> None:\n print('方法结束')\n\n def test_02(self):\n print('222222')\n\n def test_01(self):\n print('111111')\n\n def test_03(self):\n print('333333')\n\n def test_a(self):\n print('4444444')\n\n def test_A(self):\n print('555555')\n\nif __name__ == '__main__':\n unittest.main()\n# 总结:\n# 1.unittest单元测试框架内的测试用例名称必须以test开头,否则无法识别,且不会执行\n# 2.unittest单元测试框架执行测试用例的顺序遵循的是ASCII码顺序,即是0-9 A-z a-z\n# 3.unittest单元测试框架内的setup()和teardown()方法在每一条测试用例执行的开始与结束都会被执行一次\n# 4.unittest单元测试框架中类方法开始和类方法结束,只会在所有的测试用例之前和执行结束之后运行一次\n\nfrom time import sleep,strftime\nfrom selenium import webdriver\nfrom HTMLTestRunner import HTMLTestRunner\nimport unittest\nclass Bbs(unittest.TestCase):\n\n # 单例模式设计方法\n # @classmethod\n # def set_driver(cls):\n # cls.driver = driver\n #\n # @classmethod\n # def get_driver(cls):\n # return cls.driver\n @classmethod\n def setUpClass(cls) -> None:\n cls.driver = webdriver.Chrome()\n cls.driver.get('http://192.168.30.129/bbs')\n cls.driver.implicitly_wait(20)\n cls.driver.maximize_window()\n\n @classmethod\n def tearDownClass(cls) -> None:\n cls.driver.quit()\n\n def test_01_bbs_login(self):\n self.driver.find_element_by_id('ls_username').send_keys('admin')\n sleep(1)\n self.driver.find_element_by_id('ls_password').send_keys('123456')\n sleep(1)\n self.driver.find_element_by_class_name('pn').click()\n sleep(1)\n\n def test_02_bbs_setting(self):\n self.driver.find_element_by_link_text('设置').click()\n sleep(1)\n\n # if __name__ == '__main__':\n # # 第一种调用方法(单元测试框架)\n # unittest.main()\n # sleep(1)\n # # 添加一个测试套件\n # def test_suite():\n # suite = unittest.TestSuite()\n # suite.addTest(Bbs('test_01_Bbs_login')) #每次只能添加一条测试用例\n # suite.addTests([Bbs('test_01_bbs_login'),Bbs('test_02_bbs_setting')])\n # return suite\n # # 第二种调用方法\n # runner = unittest.TextTestRunner()\n # runner.run(test_suite())\n # 第三种调用方法\n path = r'D:\\Program Files\\python\\python\\pythonProject1\\py1'\n discover = unittest.defaultTestLoader.discover(start_dir = path,pattern = '*in*.py')\n now = strftime('%Y-%m-%d-%H-%M-%S')\n filename = path+'\\\\'+str(now)+'_report.html'\n f = open(file = filename,mode = 'wb')\n runner = HTMLTestRunner(stream=f,\n # title='BBS论坛UI自动化测试报告',\n # description='用例执行情况如下:',\n # tester='pythonProject1'\n )\n runner.run(discover)\n f.close()\n\n # 调用邮箱脚本\n from mail import SendMail\n sendmail=SendMail(send_msg=filename,attachment=filename) #attachment=附件\n sendmail.send_mail()\n" } ]
1